Linux Audio

Check our new training course

Loading...
v4.17
 
   1/**************************************************************************/
   2/*                                                                        */
   3/*  IBM System i and System p Virtual NIC Device Driver                   */
   4/*  Copyright (C) 2014 IBM Corp.                                          */
   5/*  Santiago Leon (santi_leon@yahoo.com)                                  */
   6/*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
   7/*  John Allen (jallen@linux.vnet.ibm.com)                                */
   8/*                                                                        */
   9/*  This program is free software; you can redistribute it and/or modify  */
  10/*  it under the terms of the GNU General Public License as published by  */
  11/*  the Free Software Foundation; either version 2 of the License, or     */
  12/*  (at your option) any later version.                                   */
  13/*                                                                        */
  14/*  This program is distributed in the hope that it will be useful,       */
  15/*  but WITHOUT ANY WARRANTY; without even the implied warranty of        */
  16/*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         */
  17/*  GNU General Public License for more details.                          */
  18/*                                                                        */
  19/*  You should have received a copy of the GNU General Public License     */
  20/*  along with this program.                                              */
  21/*                                                                        */
  22/* This module contains the implementation of a virtual ethernet device   */
  23/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
  24/* option of the RS/6000 Platform Architecture to interface with virtual  */
  25/* ethernet NICs that are presented to the partition by the hypervisor.   */
  26/*									   */
  27/* Messages are passed between the VNIC driver and the VNIC server using  */
  28/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
  29/* issue and receive commands that initiate communication with the server */
  30/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
  31/* are used by the driver to notify the server that a packet is           */
  32/* ready for transmission or that a buffer has been added to receive a    */
  33/* packet. Subsequently, sCRQs are used by the server to notify the       */
  34/* driver that a packet transmission has been completed or that a packet  */
  35/* has been received and placed in a waiting buffer.                      */
  36/*                                                                        */
  37/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
  38/* which skbs are DMA mapped and immediately unmapped when the transmit   */
  39/* or receive has been completed, the VNIC driver is required to use      */
  40/* "long term mapping". This entails that large, continuous DMA mapped    */
  41/* buffers are allocated on driver initialization and these buffers are   */
  42/* then continuously reused to pass skbs to and from the VNIC server.     */
  43/*                                                                        */
  44/**************************************************************************/
  45
  46#include <linux/module.h>
  47#include <linux/moduleparam.h>
  48#include <linux/types.h>
  49#include <linux/errno.h>
  50#include <linux/completion.h>
  51#include <linux/ioport.h>
  52#include <linux/dma-mapping.h>
  53#include <linux/kernel.h>
  54#include <linux/netdevice.h>
  55#include <linux/etherdevice.h>
  56#include <linux/skbuff.h>
  57#include <linux/init.h>
  58#include <linux/delay.h>
  59#include <linux/mm.h>
  60#include <linux/ethtool.h>
  61#include <linux/proc_fs.h>
  62#include <linux/if_arp.h>
  63#include <linux/in.h>
  64#include <linux/ip.h>
  65#include <linux/ipv6.h>
  66#include <linux/irq.h>
 
  67#include <linux/kthread.h>
  68#include <linux/seq_file.h>
  69#include <linux/interrupt.h>
  70#include <net/net_namespace.h>
  71#include <asm/hvcall.h>
  72#include <linux/atomic.h>
  73#include <asm/vio.h>
 
  74#include <asm/iommu.h>
  75#include <linux/uaccess.h>
  76#include <asm/firmware.h>
  77#include <linux/workqueue.h>
  78#include <linux/if_vlan.h>
  79#include <linux/utsname.h>
 
  80
  81#include "ibmvnic.h"
  82
  83static const char ibmvnic_driver_name[] = "ibmvnic";
  84static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
  85
  86MODULE_AUTHOR("Santiago Leon");
  87MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
  88MODULE_LICENSE("GPL");
  89MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
  90
  91static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
  92static int ibmvnic_remove(struct vio_dev *);
  93static void release_sub_crqs(struct ibmvnic_adapter *, bool);
  94static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
  95static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
  96static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
  97static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
  98static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
  99		       union sub_crq *sub_crq);
 100static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
 101static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
 102static int enable_scrq_irq(struct ibmvnic_adapter *,
 103			   struct ibmvnic_sub_crq_queue *);
 104static int disable_scrq_irq(struct ibmvnic_adapter *,
 105			    struct ibmvnic_sub_crq_queue *);
 106static int pending_scrq(struct ibmvnic_adapter *,
 107			struct ibmvnic_sub_crq_queue *);
 108static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
 109					struct ibmvnic_sub_crq_queue *);
 110static int ibmvnic_poll(struct napi_struct *napi, int data);
 111static void send_map_query(struct ibmvnic_adapter *adapter);
 112static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
 113static void send_request_unmap(struct ibmvnic_adapter *, u8);
 
 
 114static int send_login(struct ibmvnic_adapter *adapter);
 115static void send_cap_queries(struct ibmvnic_adapter *adapter);
 116static int init_sub_crqs(struct ibmvnic_adapter *);
 117static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
 118static int ibmvnic_init(struct ibmvnic_adapter *);
 119static void release_crq_queue(struct ibmvnic_adapter *);
 120static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
 121static int init_crq_queue(struct ibmvnic_adapter *adapter);
 
 
 
 
 
 
 
 122
 123struct ibmvnic_stat {
 124	char name[ETH_GSTRING_LEN];
 125	int offset;
 126};
 127
 128#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
 129			     offsetof(struct ibmvnic_statistics, stat))
 130#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
 131
 132static const struct ibmvnic_stat ibmvnic_stats[] = {
 133	{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
 134	{"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
 135	{"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
 136	{"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
 137	{"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
 138	{"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
 139	{"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
 140	{"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
 141	{"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
 142	{"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
 143	{"align_errors", IBMVNIC_STAT_OFF(align_errors)},
 144	{"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
 145	{"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
 146	{"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
 147	{"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
 148	{"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
 149	{"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
 150	{"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
 151	{"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
 152	{"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
 153	{"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
 154	{"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
 155};
 156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 157static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
 158			  unsigned long length, unsigned long *number,
 159			  unsigned long *irq)
 160{
 161	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
 162	long rc;
 163
 164	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
 165	*number = retbuf[0];
 166	*irq = retbuf[1];
 167
 168	return rc;
 169}
 170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 171static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
 172				struct ibmvnic_long_term_buff *ltb, int size)
 173{
 174	struct device *dev = &adapter->vdev->dev;
 
 
 175
 176	ltb->size = size;
 177	ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
 178				       GFP_KERNEL);
 
 
 
 
 179
 180	if (!ltb->buff) {
 181		dev_err(dev, "Couldn't alloc long term buffer\n");
 182		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 183	}
 184	ltb->map_id = adapter->map_id;
 185	adapter->map_id++;
 186
 187	init_completion(&adapter->fw_done);
 188	send_request_map(adapter, ltb->addr,
 189			 ltb->size, ltb->map_id);
 190	wait_for_completion(&adapter->fw_done);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 191
 192	if (adapter->fw_done_rc) {
 193		dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
 194			adapter->fw_done_rc);
 195		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
 196		return -1;
 197	}
 198	return 0;
 
 
 
 
 199}
 200
 201static void free_long_term_buff(struct ibmvnic_adapter *adapter,
 202				struct ibmvnic_long_term_buff *ltb)
 203{
 204	struct device *dev = &adapter->vdev->dev;
 205
 206	if (!ltb->buff)
 207		return;
 208
 
 
 
 
 209	if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
 210	    adapter->reset_reason != VNIC_RESET_MOBILITY)
 
 211		send_request_unmap(adapter, ltb->map_id);
 
 212	dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
 
 
 
 
 
 213}
 214
 215static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
 216				struct ibmvnic_long_term_buff *ltb)
 
 
 
 
 
 
 
 
 217{
 218	memset(ltb->buff, 0, ltb->size);
 219
 220	init_completion(&adapter->fw_done);
 221	send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
 222	wait_for_completion(&adapter->fw_done);
 223
 224	if (adapter->fw_done_rc) {
 225		dev_info(&adapter->vdev->dev,
 226			 "Reset failed, attempting to free and reallocate buffer\n");
 227		free_long_term_buff(adapter, ltb);
 228		return alloc_long_term_buff(adapter, ltb, ltb->size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 229	}
 
 
 
 230	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 231}
 232
 233static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
 234{
 235	int i;
 236
 237	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
 238	     i++)
 239		adapter->rx_pool[i].active = 0;
 240}
 241
 242static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
 243			      struct ibmvnic_rx_pool *pool)
 244{
 245	int count = pool->size - atomic_read(&pool->available);
 
 246	struct device *dev = &adapter->vdev->dev;
 
 
 
 
 247	int buffers_added = 0;
 248	unsigned long lpar_rc;
 249	union sub_crq sub_crq;
 250	struct sk_buff *skb;
 251	unsigned int offset;
 252	dma_addr_t dma_addr;
 253	unsigned char *dst;
 254	u64 *handle_array;
 255	int shift = 0;
 256	int index;
 257	int i;
 258
 259	if (!pool->active)
 260		return;
 261
 262	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
 263				      be32_to_cpu(adapter->login_rsp_buf->
 264				      off_rxadd_subcrqs));
 
 
 
 
 
 
 
 
 265
 266	for (i = 0; i < count; ++i) {
 267		skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
 
 
 
 
 268		if (!skb) {
 269			dev_err(dev, "Couldn't replenish rx buff\n");
 270			adapter->replenish_no_mem++;
 271			break;
 
 
 
 
 272		}
 273
 274		index = pool->free_map[pool->next_free];
 275
 276		if (pool->rx_buff[index].skb)
 277			dev_err(dev, "Inconsistent free_map!\n");
 278
 279		/* Copy the skb to the long term mapped DMA buffer */
 280		offset = index * pool->buff_size;
 281		dst = pool->long_term_buff.buff + offset;
 282		memset(dst, 0, pool->buff_size);
 283		dma_addr = pool->long_term_buff.addr + offset;
 284		pool->rx_buff[index].data = dst;
 285
 286		pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
 287		pool->rx_buff[index].dma = dma_addr;
 288		pool->rx_buff[index].skb = skb;
 289		pool->rx_buff[index].pool_index = pool->index;
 290		pool->rx_buff[index].size = pool->buff_size;
 291
 292		memset(&sub_crq, 0, sizeof(sub_crq));
 293		sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
 294		sub_crq.rx_add.correlator =
 295		    cpu_to_be64((u64)&pool->rx_buff[index]);
 296		sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
 297		sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
 
 
 
 298
 299		/* The length field of the sCRQ is defined to be 24 bits so the
 300		 * buffer size needs to be left shifted by a byte before it is
 301		 * converted to big endian to prevent the last byte from being
 302		 * truncated.
 303		 */
 304#ifdef __LITTLE_ENDIAN__
 305		shift = 8;
 306#endif
 307		sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
 308
 309		lpar_rc = send_subcrq(adapter, handle_array[pool->index],
 310				      &sub_crq);
 311		if (lpar_rc != H_SUCCESS)
 312			goto failure;
 313
 314		buffers_added++;
 315		adapter->replenish_add_buff_success++;
 316		pool->next_free = (pool->next_free + 1) % pool->size;
 
 
 
 
 
 317	}
 318	atomic_add(buffers_added, &pool->available);
 319	return;
 320
 321failure:
 322	dev_info(dev, "replenish pools failure\n");
 323	pool->free_map[pool->next_free] = index;
 324	pool->rx_buff[index].skb = NULL;
 
 325
 326	dev_kfree_skb_any(skb);
 327	adapter->replenish_add_buff_failure++;
 
 
 
 
 
 
 
 
 
 328	atomic_add(buffers_added, &pool->available);
 329
 330	if (lpar_rc == H_CLOSED || adapter->failover_pending) {
 331		/* Disable buffer pool replenishment and report carrier off if
 332		 * queue is closed or pending failover.
 333		 * Firmware guarantees that a signal will be sent to the
 334		 * driver, triggering a reset.
 335		 */
 336		deactivate_rx_pools(adapter);
 337		netif_carrier_off(adapter->netdev);
 338	}
 339}
 340
 341static void replenish_pools(struct ibmvnic_adapter *adapter)
 342{
 343	int i;
 344
 345	adapter->replenish_task_cycles++;
 346	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
 347	     i++) {
 348		if (adapter->rx_pool[i].active)
 349			replenish_rx_pool(adapter, &adapter->rx_pool[i]);
 350	}
 
 
 351}
 352
 353static void release_stats_buffers(struct ibmvnic_adapter *adapter)
 354{
 355	kfree(adapter->tx_stats_buffers);
 356	kfree(adapter->rx_stats_buffers);
 357	adapter->tx_stats_buffers = NULL;
 358	adapter->rx_stats_buffers = NULL;
 359}
 360
 361static int init_stats_buffers(struct ibmvnic_adapter *adapter)
 362{
 363	adapter->tx_stats_buffers =
 364				kcalloc(IBMVNIC_MAX_QUEUES,
 365					sizeof(struct ibmvnic_tx_queue_stats),
 366					GFP_KERNEL);
 367	if (!adapter->tx_stats_buffers)
 368		return -ENOMEM;
 369
 370	adapter->rx_stats_buffers =
 371				kcalloc(IBMVNIC_MAX_QUEUES,
 372					sizeof(struct ibmvnic_rx_queue_stats),
 373					GFP_KERNEL);
 374	if (!adapter->rx_stats_buffers)
 375		return -ENOMEM;
 376
 377	return 0;
 378}
 379
 380static void release_stats_token(struct ibmvnic_adapter *adapter)
 381{
 382	struct device *dev = &adapter->vdev->dev;
 383
 384	if (!adapter->stats_token)
 385		return;
 386
 387	dma_unmap_single(dev, adapter->stats_token,
 388			 sizeof(struct ibmvnic_statistics),
 389			 DMA_FROM_DEVICE);
 390	adapter->stats_token = 0;
 391}
 392
 393static int init_stats_token(struct ibmvnic_adapter *adapter)
 394{
 395	struct device *dev = &adapter->vdev->dev;
 396	dma_addr_t stok;
 
 397
 398	stok = dma_map_single(dev, &adapter->stats,
 399			      sizeof(struct ibmvnic_statistics),
 400			      DMA_FROM_DEVICE);
 401	if (dma_mapping_error(dev, stok)) {
 402		dev_err(dev, "Couldn't map stats buffer\n");
 403		return -1;
 
 404	}
 405
 406	adapter->stats_token = stok;
 407	netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
 408	return 0;
 409}
 410
 411static int reset_rx_pools(struct ibmvnic_adapter *adapter)
 412{
 413	struct ibmvnic_rx_pool *rx_pool;
 414	int rx_scrqs;
 415	int i, j, rc;
 416	u64 *size_array;
 417
 418	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
 419		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
 420
 421	rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
 422	for (i = 0; i < rx_scrqs; i++) {
 423		rx_pool = &adapter->rx_pool[i];
 424
 425		netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
 426
 427		if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
 428			free_long_term_buff(adapter, &rx_pool->long_term_buff);
 429			rx_pool->buff_size = be64_to_cpu(size_array[i]);
 430			alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
 431					     rx_pool->size *
 432					     rx_pool->buff_size);
 433		} else {
 434			rc = reset_long_term_buff(adapter,
 435						  &rx_pool->long_term_buff);
 436		}
 437
 438		if (rc)
 439			return rc;
 440
 441		for (j = 0; j < rx_pool->size; j++)
 442			rx_pool->free_map[j] = j;
 443
 444		memset(rx_pool->rx_buff, 0,
 445		       rx_pool->size * sizeof(struct ibmvnic_rx_buff));
 446
 447		atomic_set(&rx_pool->available, 0);
 448		rx_pool->next_alloc = 0;
 449		rx_pool->next_free = 0;
 450		rx_pool->active = 1;
 451	}
 452
 453	return 0;
 454}
 455
 456static void release_rx_pools(struct ibmvnic_adapter *adapter)
 457{
 458	struct ibmvnic_rx_pool *rx_pool;
 459	int i, j;
 460
 461	if (!adapter->rx_pool)
 462		return;
 463
 464	for (i = 0; i < adapter->num_active_rx_pools; i++) {
 465		rx_pool = &adapter->rx_pool[i];
 466
 467		netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
 468
 469		kfree(rx_pool->free_map);
 470		free_long_term_buff(adapter, &rx_pool->long_term_buff);
 
 471
 472		if (!rx_pool->rx_buff)
 473			continue;
 474
 475		for (j = 0; j < rx_pool->size; j++) {
 476			if (rx_pool->rx_buff[j].skb) {
 477				dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
 478				rx_pool->rx_buff[i].skb = NULL;
 479			}
 480		}
 481
 482		kfree(rx_pool->rx_buff);
 483	}
 484
 485	kfree(adapter->rx_pool);
 486	adapter->rx_pool = NULL;
 487	adapter->num_active_rx_pools = 0;
 
 488}
 489
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 490static int init_rx_pools(struct net_device *netdev)
 491{
 492	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 493	struct device *dev = &adapter->vdev->dev;
 494	struct ibmvnic_rx_pool *rx_pool;
 495	int rxadd_subcrqs;
 496	u64 *size_array;
 497	int i, j;
 
 498
 499	rxadd_subcrqs =
 500		be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
 501	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
 502		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
 
 
 
 
 
 
 
 503
 504	adapter->rx_pool = kcalloc(rxadd_subcrqs,
 505				   sizeof(struct ibmvnic_rx_pool),
 506				   GFP_KERNEL);
 507	if (!adapter->rx_pool) {
 508		dev_err(dev, "Failed to allocate rx pools\n");
 509		return -1;
 510	}
 511
 512	adapter->num_active_rx_pools = rxadd_subcrqs;
 
 
 
 513
 514	for (i = 0; i < rxadd_subcrqs; i++) {
 515		rx_pool = &adapter->rx_pool[i];
 516
 517		netdev_dbg(adapter->netdev,
 518			   "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
 519			   i, adapter->req_rx_add_entries_per_subcrq,
 520			   be64_to_cpu(size_array[i]));
 521
 522		rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
 523		rx_pool->index = i;
 524		rx_pool->buff_size = be64_to_cpu(size_array[i]);
 525		rx_pool->active = 1;
 526
 527		rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
 528					    GFP_KERNEL);
 529		if (!rx_pool->free_map) {
 530			release_rx_pools(adapter);
 531			return -1;
 
 532		}
 533
 534		rx_pool->rx_buff = kcalloc(rx_pool->size,
 535					   sizeof(struct ibmvnic_rx_buff),
 536					   GFP_KERNEL);
 537		if (!rx_pool->rx_buff) {
 538			dev_err(dev, "Couldn't alloc rx buffers\n");
 539			release_rx_pools(adapter);
 540			return -1;
 541		}
 542
 543		if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
 544					 rx_pool->size * rx_pool->buff_size)) {
 545			release_rx_pools(adapter);
 546			return -1;
 547		}
 548
 549		for (j = 0; j < rx_pool->size; ++j)
 550			rx_pool->free_map[j] = j;
 551
 552		atomic_set(&rx_pool->available, 0);
 553		rx_pool->next_alloc = 0;
 554		rx_pool->next_free = 0;
 555	}
 556
 557	return 0;
 558}
 559
 560static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
 561			     struct ibmvnic_tx_pool *tx_pool)
 562{
 563	int rc, i;
 564
 565	rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
 566	if (rc)
 567		return rc;
 
 
 568
 569	memset(tx_pool->tx_buff, 0,
 570	       tx_pool->num_buffers *
 571	       sizeof(struct ibmvnic_tx_buff));
 
 572
 573	for (i = 0; i < tx_pool->num_buffers; i++)
 574		tx_pool->free_map[i] = i;
 575
 576	tx_pool->consumer_index = 0;
 577	tx_pool->producer_index = 0;
 578
 579	return 0;
 580}
 581
 582static int reset_tx_pools(struct ibmvnic_adapter *adapter)
 583{
 584	int tx_scrqs;
 585	int i, rc;
 
 
 
 
 
 
 586
 587	tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
 588	for (i = 0; i < tx_scrqs; i++) {
 589		rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
 590		if (rc)
 591			return rc;
 592		rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
 593		if (rc)
 594			return rc;
 
 
 595	}
 596
 597	return 0;
 
 
 
 
 
 
 
 598}
 599
 600static void release_vpd_data(struct ibmvnic_adapter *adapter)
 601{
 602	if (!adapter->vpd)
 603		return;
 604
 605	kfree(adapter->vpd->buff);
 606	kfree(adapter->vpd);
 607
 608	adapter->vpd = NULL;
 609}
 610
 611static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
 612				struct ibmvnic_tx_pool *tx_pool)
 613{
 614	kfree(tx_pool->tx_buff);
 615	kfree(tx_pool->free_map);
 616	free_long_term_buff(adapter, &tx_pool->long_term_buff);
 617}
 618
 
 
 
 
 
 
 619static void release_tx_pools(struct ibmvnic_adapter *adapter)
 620{
 621	int i;
 622
 
 
 
 623	if (!adapter->tx_pool)
 624		return;
 625
 626	for (i = 0; i < adapter->num_active_tx_pools; i++) {
 627		release_one_tx_pool(adapter, &adapter->tx_pool[i]);
 628		release_one_tx_pool(adapter, &adapter->tso_pool[i]);
 629	}
 630
 631	kfree(adapter->tx_pool);
 632	adapter->tx_pool = NULL;
 633	kfree(adapter->tso_pool);
 634	adapter->tso_pool = NULL;
 635	adapter->num_active_tx_pools = 0;
 
 636}
 637
 638static int init_one_tx_pool(struct net_device *netdev,
 639			    struct ibmvnic_tx_pool *tx_pool,
 640			    int num_entries, int buf_size)
 641{
 642	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 643	int i;
 644
 645	tx_pool->tx_buff = kcalloc(num_entries,
 646				   sizeof(struct ibmvnic_tx_buff),
 647				   GFP_KERNEL);
 648	if (!tx_pool->tx_buff)
 649		return -1;
 650
 651	if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
 652				 num_entries * buf_size))
 653		return -1;
 654
 655	tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
 656	if (!tx_pool->free_map)
 657		return -1;
 
 
 
 658
 659	for (i = 0; i < num_entries; i++)
 660		tx_pool->free_map[i] = i;
 661
 662	tx_pool->consumer_index = 0;
 663	tx_pool->producer_index = 0;
 664	tx_pool->num_buffers = num_entries;
 665	tx_pool->buf_size = buf_size;
 666
 667	return 0;
 668}
 669
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 670static int init_tx_pools(struct net_device *netdev)
 671{
 672	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 673	int tx_subcrqs;
 674	int i, rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 675
 676	tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
 677	adapter->tx_pool = kcalloc(tx_subcrqs,
 
 
 
 
 
 678				   sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
 679	if (!adapter->tx_pool)
 680		return -1;
 681
 682	adapter->tso_pool = kcalloc(tx_subcrqs,
 683				    sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
 684	if (!adapter->tso_pool)
 685		return -1;
 
 
 
 
 
 
 
 
 
 
 
 686
 687	adapter->num_active_tx_pools = tx_subcrqs;
 
 
 
 
 
 688
 689	for (i = 0; i < tx_subcrqs; i++) {
 690		rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
 691				      adapter->req_tx_entries_per_subcrq,
 692				      adapter->req_mtu + VLAN_HLEN);
 693		if (rc) {
 694			release_tx_pools(adapter);
 695			return rc;
 696		}
 697
 698		init_one_tx_pool(netdev, &adapter->tso_pool[i],
 699				 IBMVNIC_TSO_BUFS,
 700				 IBMVNIC_TSO_BUF_SZ);
 701		if (rc) {
 702			release_tx_pools(adapter);
 703			return rc;
 704		}
 705	}
 706
 707	return 0;
 708}
 709
 710static void release_error_buffers(struct ibmvnic_adapter *adapter)
 711{
 712	struct device *dev = &adapter->vdev->dev;
 713	struct ibmvnic_error_buff *error_buff, *tmp;
 714	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 715
 716	spin_lock_irqsave(&adapter->error_list_lock, flags);
 717	list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
 718		list_del(&error_buff->list);
 719		dma_unmap_single(dev, error_buff->dma, error_buff->len,
 720				 DMA_FROM_DEVICE);
 721		kfree(error_buff->buff);
 722		kfree(error_buff);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 723	}
 724	spin_unlock_irqrestore(&adapter->error_list_lock, flags);
 
 
 
 
 
 
 
 
 725}
 726
 727static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
 728{
 729	int i;
 730
 731	if (adapter->napi_enabled)
 732		return;
 733
 734	for (i = 0; i < adapter->req_rx_queues; i++)
 735		napi_enable(&adapter->napi[i]);
 736
 737	adapter->napi_enabled = true;
 738}
 739
 740static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
 741{
 742	int i;
 743
 744	if (!adapter->napi_enabled)
 745		return;
 746
 747	for (i = 0; i < adapter->req_rx_queues; i++) {
 748		netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
 749		napi_disable(&adapter->napi[i]);
 750	}
 751
 752	adapter->napi_enabled = false;
 753}
 754
 755static int init_napi(struct ibmvnic_adapter *adapter)
 756{
 757	int i;
 758
 759	adapter->napi = kcalloc(adapter->req_rx_queues,
 760				sizeof(struct napi_struct), GFP_KERNEL);
 761	if (!adapter->napi)
 762		return -ENOMEM;
 763
 764	for (i = 0; i < adapter->req_rx_queues; i++) {
 765		netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
 766		netif_napi_add(adapter->netdev, &adapter->napi[i],
 767			       ibmvnic_poll, NAPI_POLL_WEIGHT);
 768	}
 769
 770	adapter->num_active_rx_napi = adapter->req_rx_queues;
 771	return 0;
 772}
 773
 774static void release_napi(struct ibmvnic_adapter *adapter)
 775{
 776	int i;
 777
 778	if (!adapter->napi)
 779		return;
 780
 781	for (i = 0; i < adapter->num_active_rx_napi; i++) {
 782		if (&adapter->napi[i]) {
 783			netdev_dbg(adapter->netdev,
 784				   "Releasing napi[%d]\n", i);
 785			netif_napi_del(&adapter->napi[i]);
 786		}
 787	}
 788
 789	kfree(adapter->napi);
 790	adapter->napi = NULL;
 791	adapter->num_active_rx_napi = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 792}
 793
 794static int ibmvnic_login(struct net_device *netdev)
 795{
 
 796	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 797	unsigned long timeout = msecs_to_jiffies(30000);
 798	int retry_count = 0;
 
 799	bool retry;
 800	int rc;
 801
 802	do {
 803		retry = false;
 804		if (retry_count > IBMVNIC_MAX_QUEUES) {
 805			netdev_warn(netdev, "Login attempts exceeded\n");
 806			return -1;
 807		}
 808
 809		adapter->init_done_rc = 0;
 810		reinit_completion(&adapter->init_done);
 811		rc = send_login(adapter);
 812		if (rc) {
 813			netdev_warn(netdev, "Unable to login\n");
 814			return rc;
 815		}
 816
 817		if (!wait_for_completion_timeout(&adapter->init_done,
 818						 timeout)) {
 819			netdev_warn(netdev, "Login timed out\n");
 820			return -1;
 
 821		}
 822
 823		if (adapter->init_done_rc == PARTIALSUCCESS) {
 
 
 
 
 
 
 
 
 
 824			retry_count++;
 825			release_sub_crqs(adapter, 1);
 826
 827			retry = true;
 828			netdev_dbg(netdev,
 829				   "Received partial success, retrying...\n");
 830			adapter->init_done_rc = 0;
 831			reinit_completion(&adapter->init_done);
 832			send_cap_queries(adapter);
 833			if (!wait_for_completion_timeout(&adapter->init_done,
 834							 timeout)) {
 835				netdev_warn(netdev,
 836					    "Capabilities query timed out\n");
 837				return -1;
 838			}
 839
 840			rc = init_sub_crqs(adapter);
 841			if (rc) {
 842				netdev_warn(netdev,
 843					    "SCRQ initialization failed\n");
 844				return -1;
 845			}
 846
 847			rc = init_sub_crq_irqs(adapter);
 848			if (rc) {
 849				netdev_warn(netdev,
 850					    "SCRQ irq initialization failed\n");
 851				return -1;
 852			}
 
 853		} else if (adapter->init_done_rc) {
 854			netdev_warn(netdev, "Adapter login failed\n");
 855			return -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 856		}
 857	} while (retry);
 858
 859	/* handle pending MAC address changes after successful login */
 860	if (adapter->mac_change_pending) {
 861		__ibmvnic_set_mac(netdev, &adapter->desired.mac);
 862		adapter->mac_change_pending = false;
 863	}
 864
 
 865	return 0;
 866}
 867
 868static void release_login_buffer(struct ibmvnic_adapter *adapter)
 869{
 
 
 
 
 
 870	kfree(adapter->login_buf);
 871	adapter->login_buf = NULL;
 872}
 873
 874static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
 875{
 
 
 
 
 
 876	kfree(adapter->login_rsp_buf);
 877	adapter->login_rsp_buf = NULL;
 878}
 879
 880static void release_resources(struct ibmvnic_adapter *adapter)
 881{
 882	release_vpd_data(adapter);
 883
 884	release_tx_pools(adapter);
 885	release_rx_pools(adapter);
 886
 887	release_error_buffers(adapter);
 888	release_napi(adapter);
 
 889	release_login_rsp_buffer(adapter);
 890}
 891
 892static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
 893{
 894	struct net_device *netdev = adapter->netdev;
 895	unsigned long timeout = msecs_to_jiffies(30000);
 896	union ibmvnic_crq crq;
 897	bool resend;
 898	int rc;
 899
 900	netdev_dbg(netdev, "setting link state %d\n", link_state);
 901
 902	memset(&crq, 0, sizeof(crq));
 903	crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
 904	crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
 905	crq.logical_link_state.link_state = link_state;
 906
 907	do {
 908		resend = false;
 909
 910		reinit_completion(&adapter->init_done);
 911		rc = ibmvnic_send_crq(adapter, &crq);
 912		if (rc) {
 913			netdev_err(netdev, "Failed to set link state\n");
 914			return rc;
 915		}
 916
 917		if (!wait_for_completion_timeout(&adapter->init_done,
 918						 timeout)) {
 919			netdev_err(netdev, "timeout setting link state\n");
 920			return -1;
 921		}
 922
 923		if (adapter->init_done_rc == 1) {
 924			/* Partuial success, delay and re-send */
 925			mdelay(1000);
 926			resend = true;
 
 
 
 
 927		}
 928	} while (resend);
 929
 930	return 0;
 931}
 932
 933static int set_real_num_queues(struct net_device *netdev)
 934{
 935	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 936	int rc;
 937
 938	netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
 939		   adapter->req_tx_queues, adapter->req_rx_queues);
 940
 941	rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
 942	if (rc) {
 943		netdev_err(netdev, "failed to set the number of tx queues\n");
 944		return rc;
 945	}
 946
 947	rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
 948	if (rc)
 949		netdev_err(netdev, "failed to set the number of rx queues\n");
 950
 951	return rc;
 952}
 953
 954static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
 955{
 956	struct device *dev = &adapter->vdev->dev;
 957	union ibmvnic_crq crq;
 958	int len = 0;
 
 959
 960	if (adapter->vpd->buff)
 961		len = adapter->vpd->len;
 962
 963	init_completion(&adapter->fw_done);
 
 
 
 964	crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
 965	crq.get_vpd_size.cmd = GET_VPD_SIZE;
 966	ibmvnic_send_crq(adapter, &crq);
 967	wait_for_completion(&adapter->fw_done);
 
 
 
 
 
 
 
 
 
 
 
 968
 969	if (!adapter->vpd->len)
 970		return -ENODATA;
 971
 972	if (!adapter->vpd->buff)
 973		adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
 974	else if (adapter->vpd->len != len)
 975		adapter->vpd->buff =
 976			krealloc(adapter->vpd->buff,
 977				 adapter->vpd->len, GFP_KERNEL);
 978
 979	if (!adapter->vpd->buff) {
 980		dev_err(dev, "Could allocate VPD buffer\n");
 981		return -ENOMEM;
 982	}
 983
 984	adapter->vpd->dma_addr =
 985		dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
 986			       DMA_FROM_DEVICE);
 987	if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
 988		dev_err(dev, "Could not map VPD buffer\n");
 989		kfree(adapter->vpd->buff);
 990		adapter->vpd->buff = NULL;
 991		return -ENOMEM;
 992	}
 993
 
 
 994	reinit_completion(&adapter->fw_done);
 
 995	crq.get_vpd.first = IBMVNIC_CRQ_CMD;
 996	crq.get_vpd.cmd = GET_VPD;
 997	crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
 998	crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
 999	ibmvnic_send_crq(adapter, &crq);
1000	wait_for_completion(&adapter->fw_done);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1001
 
1002	return 0;
1003}
1004
1005static int init_resources(struct ibmvnic_adapter *adapter)
1006{
1007	struct net_device *netdev = adapter->netdev;
1008	int rc;
1009
1010	rc = set_real_num_queues(netdev);
1011	if (rc)
1012		return rc;
1013
1014	adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1015	if (!adapter->vpd)
1016		return -ENOMEM;
1017
1018	/* Vital Product Data (VPD) */
1019	rc = ibmvnic_get_vpd(adapter);
1020	if (rc) {
1021		netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1022		return rc;
1023	}
1024
1025	adapter->map_id = 1;
1026
1027	rc = init_napi(adapter);
1028	if (rc)
1029		return rc;
1030
1031	send_map_query(adapter);
1032
1033	rc = init_rx_pools(netdev);
1034	if (rc)
1035		return rc;
1036
1037	rc = init_tx_pools(netdev);
1038	return rc;
1039}
1040
1041static int __ibmvnic_open(struct net_device *netdev)
1042{
1043	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1044	enum vnic_state prev_state = adapter->state;
1045	int i, rc;
1046
1047	adapter->state = VNIC_OPENING;
1048	replenish_pools(adapter);
1049	ibmvnic_napi_enable(adapter);
1050
1051	/* We're ready to receive frames, enable the sub-crq interrupts and
1052	 * set the logical link state to up
1053	 */
1054	for (i = 0; i < adapter->req_rx_queues; i++) {
1055		netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1056		if (prev_state == VNIC_CLOSED)
1057			enable_irq(adapter->rx_scrq[i]->irq);
1058		enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1059	}
1060
1061	for (i = 0; i < adapter->req_tx_queues; i++) {
1062		netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1063		if (prev_state == VNIC_CLOSED)
1064			enable_irq(adapter->tx_scrq[i]->irq);
1065		enable_scrq_irq(adapter, adapter->tx_scrq[i]);
 
 
 
 
 
 
 
 
1066	}
1067
1068	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1069	if (rc) {
1070		for (i = 0; i < adapter->req_rx_queues; i++)
1071			napi_disable(&adapter->napi[i]);
1072		release_resources(adapter);
1073		return rc;
1074	}
1075
 
 
 
 
 
 
 
 
 
1076	netif_tx_start_all_queues(netdev);
1077
1078	if (prev_state == VNIC_CLOSED) {
1079		for (i = 0; i < adapter->req_rx_queues; i++)
1080			napi_schedule(&adapter->napi[i]);
1081	}
1082
1083	adapter->state = VNIC_OPEN;
1084	return rc;
1085}
1086
1087static int ibmvnic_open(struct net_device *netdev)
1088{
1089	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1090	int rc;
1091
1092	/* If device failover is pending, just set device state and return.
1093	 * Device operation will be handled by reset routine.
 
 
 
 
 
 
 
 
 
 
1094	 */
1095	if (adapter->failover_pending) {
 
 
 
1096		adapter->state = VNIC_OPEN;
1097		return 0;
 
1098	}
1099
1100	mutex_lock(&adapter->reset_lock);
1101
1102	if (adapter->state != VNIC_CLOSED) {
1103		rc = ibmvnic_login(netdev);
1104		if (rc) {
1105			mutex_unlock(&adapter->reset_lock);
1106			return rc;
1107		}
1108
1109		rc = init_resources(adapter);
1110		if (rc) {
1111			netdev_err(netdev, "failed to initialize resources\n");
1112			release_resources(adapter);
1113			mutex_unlock(&adapter->reset_lock);
1114			return rc;
1115		}
1116	}
1117
1118	rc = __ibmvnic_open(netdev);
1119	netif_carrier_on(netdev);
1120
1121	mutex_unlock(&adapter->reset_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1122
1123	return rc;
1124}
1125
1126static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1127{
1128	struct ibmvnic_rx_pool *rx_pool;
1129	struct ibmvnic_rx_buff *rx_buff;
1130	u64 rx_entries;
1131	int rx_scrqs;
1132	int i, j;
1133
1134	if (!adapter->rx_pool)
1135		return;
1136
1137	rx_scrqs = adapter->num_active_rx_pools;
1138	rx_entries = adapter->req_rx_add_entries_per_subcrq;
1139
1140	/* Free any remaining skbs in the rx buffer pools */
1141	for (i = 0; i < rx_scrqs; i++) {
1142		rx_pool = &adapter->rx_pool[i];
1143		if (!rx_pool || !rx_pool->rx_buff)
1144			continue;
1145
1146		netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1147		for (j = 0; j < rx_entries; j++) {
1148			rx_buff = &rx_pool->rx_buff[j];
1149			if (rx_buff && rx_buff->skb) {
1150				dev_kfree_skb_any(rx_buff->skb);
1151				rx_buff->skb = NULL;
1152			}
1153		}
1154	}
1155}
1156
1157static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1158			      struct ibmvnic_tx_pool *tx_pool)
1159{
1160	struct ibmvnic_tx_buff *tx_buff;
1161	u64 tx_entries;
1162	int i;
1163
1164	if (!tx_pool || !tx_pool->tx_buff)
1165		return;
1166
1167	tx_entries = tx_pool->num_buffers;
1168
1169	for (i = 0; i < tx_entries; i++) {
1170		tx_buff = &tx_pool->tx_buff[i];
1171		if (tx_buff && tx_buff->skb) {
1172			dev_kfree_skb_any(tx_buff->skb);
1173			tx_buff->skb = NULL;
1174		}
1175	}
1176}
1177
1178static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1179{
1180	int tx_scrqs;
1181	int i;
1182
1183	if (!adapter->tx_pool || !adapter->tso_pool)
1184		return;
1185
1186	tx_scrqs = adapter->num_active_tx_pools;
1187
1188	/* Free any remaining skbs in the tx buffer pools */
1189	for (i = 0; i < tx_scrqs; i++) {
1190		netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1191		clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1192		clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1193	}
1194}
1195
1196static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1197{
1198	struct net_device *netdev = adapter->netdev;
1199	int i;
1200
1201	if (adapter->tx_scrq) {
1202		for (i = 0; i < adapter->req_tx_queues; i++)
1203			if (adapter->tx_scrq[i]->irq) {
1204				netdev_dbg(netdev,
1205					   "Disabling tx_scrq[%d] irq\n", i);
1206				disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1207				disable_irq(adapter->tx_scrq[i]->irq);
1208			}
1209	}
1210
1211	if (adapter->rx_scrq) {
1212		for (i = 0; i < adapter->req_rx_queues; i++) {
1213			if (adapter->rx_scrq[i]->irq) {
1214				netdev_dbg(netdev,
1215					   "Disabling rx_scrq[%d] irq\n", i);
1216				disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1217				disable_irq(adapter->rx_scrq[i]->irq);
1218			}
1219		}
1220	}
1221}
1222
1223static void ibmvnic_cleanup(struct net_device *netdev)
1224{
1225	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1226
1227	/* ensure that transmissions are stopped if called by do_reset */
1228	if (adapter->resetting)
 
 
 
 
 
 
 
 
1229		netif_tx_disable(netdev);
1230	else
1231		netif_tx_stop_all_queues(netdev);
1232
1233	ibmvnic_napi_disable(adapter);
1234	ibmvnic_disable_irqs(adapter);
1235
1236	clean_rx_pools(adapter);
1237	clean_tx_pools(adapter);
1238}
1239
1240static int __ibmvnic_close(struct net_device *netdev)
1241{
1242	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1243	int rc = 0;
1244
1245	adapter->state = VNIC_CLOSING;
1246	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1247	if (rc)
1248		return rc;
1249	adapter->state = VNIC_CLOSED;
1250	return 0;
1251}
1252
1253static int ibmvnic_close(struct net_device *netdev)
1254{
1255	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1256	int rc;
1257
 
 
 
 
 
1258	/* If device failover is pending, just set device state and return.
1259	 * Device operation will be handled by reset routine.
1260	 */
1261	if (adapter->failover_pending) {
1262		adapter->state = VNIC_CLOSED;
1263		return 0;
1264	}
1265
1266	mutex_lock(&adapter->reset_lock);
1267	rc = __ibmvnic_close(netdev);
1268	ibmvnic_cleanup(netdev);
1269	mutex_unlock(&adapter->reset_lock);
 
1270
1271	return rc;
1272}
1273
1274/**
1275 * build_hdr_data - creates L2/L3/L4 header data buffer
1276 * @hdr_field - bitfield determining needed headers
1277 * @skb - socket buffer
1278 * @hdr_len - array of header lengths
1279 * @tot_len - total length of data
1280 *
1281 * Reads hdr_field to determine which headers are needed by firmware.
1282 * Builds a buffer containing these headers.  Saves individual header
1283 * lengths and total buffer length to be used to build descriptors.
1284 */
1285static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1286			  int *hdr_len, u8 *hdr_data)
1287{
1288	int len = 0;
1289	u8 *hdr;
1290
1291	if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1292		hdr_len[0] = sizeof(struct vlan_ethhdr);
1293	else
1294		hdr_len[0] = sizeof(struct ethhdr);
1295
1296	if (skb->protocol == htons(ETH_P_IP)) {
1297		hdr_len[1] = ip_hdr(skb)->ihl * 4;
1298		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1299			hdr_len[2] = tcp_hdrlen(skb);
1300		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1301			hdr_len[2] = sizeof(struct udphdr);
1302	} else if (skb->protocol == htons(ETH_P_IPV6)) {
1303		hdr_len[1] = sizeof(struct ipv6hdr);
1304		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1305			hdr_len[2] = tcp_hdrlen(skb);
1306		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1307			hdr_len[2] = sizeof(struct udphdr);
1308	} else if (skb->protocol == htons(ETH_P_ARP)) {
1309		hdr_len[1] = arp_hdr_len(skb->dev);
1310		hdr_len[2] = 0;
1311	}
1312
1313	memset(hdr_data, 0, 120);
1314	if ((hdr_field >> 6) & 1) {
1315		hdr = skb_mac_header(skb);
1316		memcpy(hdr_data, hdr, hdr_len[0]);
1317		len += hdr_len[0];
1318	}
1319
1320	if ((hdr_field >> 5) & 1) {
1321		hdr = skb_network_header(skb);
1322		memcpy(hdr_data + len, hdr, hdr_len[1]);
1323		len += hdr_len[1];
1324	}
1325
1326	if ((hdr_field >> 4) & 1) {
1327		hdr = skb_transport_header(skb);
1328		memcpy(hdr_data + len, hdr, hdr_len[2]);
1329		len += hdr_len[2];
1330	}
1331	return len;
1332}
1333
1334/**
1335 * create_hdr_descs - create header and header extension descriptors
1336 * @hdr_field - bitfield determining needed headers
1337 * @data - buffer containing header data
1338 * @len - length of data buffer
1339 * @hdr_len - array of individual header lengths
1340 * @scrq_arr - descriptor array
1341 *
1342 * Creates header and, if needed, header extension descriptors and
1343 * places them in a descriptor array, scrq_arr
1344 */
1345
1346static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1347			    union sub_crq *scrq_arr)
1348{
1349	union sub_crq hdr_desc;
1350	int tmp_len = len;
1351	int num_descs = 0;
1352	u8 *data, *cur;
1353	int tmp;
1354
1355	while (tmp_len > 0) {
1356		cur = hdr_data + len - tmp_len;
1357
1358		memset(&hdr_desc, 0, sizeof(hdr_desc));
1359		if (cur != hdr_data) {
1360			data = hdr_desc.hdr_ext.data;
1361			tmp = tmp_len > 29 ? 29 : tmp_len;
1362			hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1363			hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1364			hdr_desc.hdr_ext.len = tmp;
1365		} else {
1366			data = hdr_desc.hdr.data;
1367			tmp = tmp_len > 24 ? 24 : tmp_len;
1368			hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1369			hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1370			hdr_desc.hdr.len = tmp;
1371			hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1372			hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1373			hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1374			hdr_desc.hdr.flag = hdr_field << 1;
1375		}
1376		memcpy(data, cur, tmp);
1377		tmp_len -= tmp;
1378		*scrq_arr = hdr_desc;
1379		scrq_arr++;
1380		num_descs++;
1381	}
1382
1383	return num_descs;
1384}
1385
1386/**
1387 * build_hdr_descs_arr - build a header descriptor array
1388 * @skb - socket buffer
1389 * @num_entries - number of descriptors to be sent
1390 * @subcrq - first TX descriptor
1391 * @hdr_field - bit field determining which headers will be sent
1392 *
1393 * This function will build a TX descriptor array with applicable
1394 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1395 */
1396
1397static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
 
1398				int *num_entries, u8 hdr_field)
1399{
1400	int hdr_len[3] = {0, 0, 0};
 
1401	int tot_len;
1402	u8 *hdr_data = txbuff->hdr_data;
1403
1404	tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1405				 txbuff->hdr_data);
1406	*num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1407			 txbuff->indir_arr + 1);
1408}
1409
1410static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1411				    struct net_device *netdev)
1412{
1413	/* For some backing devices, mishandling of small packets
1414	 * can result in a loss of connection or TX stall. Device
1415	 * architects recommend that no packet should be smaller
1416	 * than the minimum MTU value provided to the driver, so
1417	 * pad any packets to that length
1418	 */
1419	if (skb->len < netdev->min_mtu)
1420		return skb_put_padto(skb, netdev->min_mtu);
1421
1422	return 0;
1423}
1424
1425static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1426{
1427	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1428	int queue_num = skb_get_queue_mapping(skb);
1429	u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1430	struct device *dev = &adapter->vdev->dev;
 
1431	struct ibmvnic_tx_buff *tx_buff = NULL;
1432	struct ibmvnic_sub_crq_queue *tx_scrq;
 
1433	struct ibmvnic_tx_pool *tx_pool;
1434	unsigned int tx_send_failed = 0;
 
1435	unsigned int tx_map_failed = 0;
 
1436	unsigned int tx_dropped = 0;
1437	unsigned int tx_packets = 0;
1438	unsigned int tx_bytes = 0;
1439	dma_addr_t data_dma_addr;
1440	struct netdev_queue *txq;
1441	unsigned long lpar_rc;
1442	union sub_crq tx_crq;
1443	unsigned int offset;
1444	int num_entries = 1;
1445	unsigned char *dst;
1446	u64 *handle_array;
1447	int index = 0;
1448	u8 proto = 0;
1449	int ret = 0;
1450
1451	if (adapter->resetting) {
1452		if (!netif_subqueue_stopped(netdev, skb))
1453			netif_stop_subqueue(netdev, queue_num);
 
 
 
1454		dev_kfree_skb_any(skb);
1455
1456		tx_send_failed++;
1457		tx_dropped++;
1458		ret = NETDEV_TX_OK;
1459		goto out;
1460	}
1461
 
 
 
 
1462	if (ibmvnic_xmit_workarounds(skb, netdev)) {
1463		tx_dropped++;
1464		tx_send_failed++;
1465		ret = NETDEV_TX_OK;
 
1466		goto out;
1467	}
 
1468	if (skb_is_gso(skb))
1469		tx_pool = &adapter->tso_pool[queue_num];
1470	else
1471		tx_pool = &adapter->tx_pool[queue_num];
1472
1473	tx_scrq = adapter->tx_scrq[queue_num];
1474	txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1475	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1476		be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1477
1478	index = tx_pool->free_map[tx_pool->consumer_index];
1479
1480	if (index == IBMVNIC_INVALID_MAP) {
1481		dev_kfree_skb_any(skb);
1482		tx_send_failed++;
1483		tx_dropped++;
 
1484		ret = NETDEV_TX_OK;
1485		goto out;
1486	}
1487
1488	tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1489
1490	offset = index * tx_pool->buf_size;
1491	dst = tx_pool->long_term_buff.buff + offset;
 
1492	memset(dst, 0, tx_pool->buf_size);
1493	data_dma_addr = tx_pool->long_term_buff.addr + offset;
1494
1495	if (skb_shinfo(skb)->nr_frags) {
1496		int cur, i;
1497
1498		/* Copy the head */
1499		skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1500		cur = skb_headlen(skb);
1501
1502		/* Copy the frags */
1503		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1504			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1505
1506			memcpy(dst + cur,
1507			       page_address(skb_frag_page(frag)) +
1508			       frag->page_offset, skb_frag_size(frag));
1509			cur += skb_frag_size(frag);
1510		}
1511	} else {
1512		skb_copy_from_linear_data(skb, dst, skb->len);
1513	}
1514
 
 
 
1515	tx_pool->consumer_index =
1516	    (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1517
1518	tx_buff = &tx_pool->tx_buff[index];
1519	tx_buff->skb = skb;
1520	tx_buff->data_dma[0] = data_dma_addr;
1521	tx_buff->data_len[0] = skb->len;
1522	tx_buff->index = index;
1523	tx_buff->pool_index = queue_num;
1524	tx_buff->last_frag = true;
1525
1526	memset(&tx_crq, 0, sizeof(tx_crq));
1527	tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1528	tx_crq.v1.type = IBMVNIC_TX_DESC;
1529	tx_crq.v1.n_crq_elem = 1;
1530	tx_crq.v1.n_sge = 1;
1531	tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1532
1533	if (skb_is_gso(skb))
1534		tx_crq.v1.correlator =
1535			cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1536	else
1537		tx_crq.v1.correlator = cpu_to_be32(index);
1538	tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1539	tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1540	tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1541
1542	if (adapter->vlan_header_insertion) {
1543		tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1544		tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1545	}
1546
1547	if (skb->protocol == htons(ETH_P_IP)) {
1548		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1549		proto = ip_hdr(skb)->protocol;
1550	} else if (skb->protocol == htons(ETH_P_IPV6)) {
1551		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1552		proto = ipv6_hdr(skb)->nexthdr;
1553	}
1554
1555	if (proto == IPPROTO_TCP)
1556		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1557	else if (proto == IPPROTO_UDP)
1558		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1559
1560	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1561		tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1562		hdrs += 2;
1563	}
1564	if (skb_is_gso(skb)) {
1565		tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1566		tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1567		hdrs += 2;
1568	}
1569	/* determine if l2/3/4 headers are sent to firmware */
1570	if ((*hdrs >> 7) & 1) {
1571		build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1572		tx_crq.v1.n_crq_elem = num_entries;
1573		tx_buff->num_entries = num_entries;
1574		tx_buff->indir_arr[0] = tx_crq;
1575		tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1576						    sizeof(tx_buff->indir_arr),
1577						    DMA_TO_DEVICE);
1578		if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1579			dev_kfree_skb_any(skb);
1580			tx_buff->skb = NULL;
1581			if (!firmware_has_feature(FW_FEATURE_CMO))
1582				dev_err(dev, "tx: unable to map descriptor array\n");
1583			tx_map_failed++;
1584			tx_dropped++;
1585			ret = NETDEV_TX_OK;
1586			goto tx_err_out;
1587		}
1588		lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1589					       (u64)tx_buff->indir_dma,
1590					       (u64)num_entries);
1591	} else {
1592		tx_buff->num_entries = num_entries;
1593		lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1594				      &tx_crq);
1595	}
1596	if (lpar_rc != H_SUCCESS) {
1597		dev_err(dev, "tx failed with code %ld\n", lpar_rc);
1598		dev_kfree_skb_any(skb);
1599		tx_buff->skb = NULL;
1600
1601		if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1602			/* Disable TX and report carrier off if queue is closed
1603			 * or pending failover.
1604			 * Firmware guarantees that a signal will be sent to the
1605			 * driver, triggering a reset or some other action.
1606			 */
1607			netif_tx_stop_all_queues(netdev);
1608			netif_carrier_off(netdev);
1609		}
1610
1611		tx_send_failed++;
1612		tx_dropped++;
1613		ret = NETDEV_TX_OK;
1614		goto tx_err_out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1615	}
1616
1617	if (atomic_add_return(num_entries, &tx_scrq->used)
1618					>= adapter->req_tx_entries_per_subcrq) {
1619		netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1620		netif_stop_subqueue(netdev, queue_num);
1621	}
1622
1623	tx_packets++;
1624	tx_bytes += skb->len;
1625	txq->trans_start = jiffies;
1626	ret = NETDEV_TX_OK;
1627	goto out;
1628
1629tx_err_out:
1630	/* roll back consumer index and map array*/
1631	if (tx_pool->consumer_index == 0)
1632		tx_pool->consumer_index =
1633			tx_pool->num_buffers - 1;
1634	else
1635		tx_pool->consumer_index--;
1636	tx_pool->free_map[tx_pool->consumer_index] = index;
 
 
 
 
 
 
 
 
 
 
 
 
1637out:
 
1638	netdev->stats.tx_dropped += tx_dropped;
1639	netdev->stats.tx_bytes += tx_bytes;
1640	netdev->stats.tx_packets += tx_packets;
1641	adapter->tx_send_failed += tx_send_failed;
1642	adapter->tx_map_failed += tx_map_failed;
1643	adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1644	adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1645	adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1646
1647	return ret;
1648}
1649
1650static void ibmvnic_set_multi(struct net_device *netdev)
1651{
1652	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1653	struct netdev_hw_addr *ha;
1654	union ibmvnic_crq crq;
1655
1656	memset(&crq, 0, sizeof(crq));
1657	crq.request_capability.first = IBMVNIC_CRQ_CMD;
1658	crq.request_capability.cmd = REQUEST_CAPABILITY;
1659
1660	if (netdev->flags & IFF_PROMISC) {
1661		if (!adapter->promisc_supported)
1662			return;
1663	} else {
1664		if (netdev->flags & IFF_ALLMULTI) {
1665			/* Accept all multicast */
1666			memset(&crq, 0, sizeof(crq));
1667			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1668			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1669			crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1670			ibmvnic_send_crq(adapter, &crq);
1671		} else if (netdev_mc_empty(netdev)) {
1672			/* Reject all multicast */
1673			memset(&crq, 0, sizeof(crq));
1674			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1675			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1676			crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1677			ibmvnic_send_crq(adapter, &crq);
1678		} else {
1679			/* Accept one or more multicast(s) */
1680			netdev_for_each_mc_addr(ha, netdev) {
1681				memset(&crq, 0, sizeof(crq));
1682				crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1683				crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1684				crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1685				ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1686						ha->addr);
1687				ibmvnic_send_crq(adapter, &crq);
1688			}
1689		}
1690	}
1691}
1692
1693static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
1694{
1695	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1696	struct sockaddr *addr = p;
1697	union ibmvnic_crq crq;
 
1698
1699	if (!is_valid_ether_addr(addr->sa_data))
1700		return -EADDRNOTAVAIL;
 
 
1701
1702	memset(&crq, 0, sizeof(crq));
1703	crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1704	crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1705	ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1706
1707	init_completion(&adapter->fw_done);
1708	ibmvnic_send_crq(adapter, &crq);
1709	wait_for_completion(&adapter->fw_done);
 
 
 
 
 
 
 
 
 
1710	/* netdev->dev_addr is changed in handle_change_mac_rsp function */
1711	return adapter->fw_done_rc ? -EIO : 0;
 
 
 
 
 
 
 
 
 
1712}
1713
1714static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1715{
1716	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1717	struct sockaddr *addr = p;
1718	int rc;
1719
1720	if (adapter->state == VNIC_PROBED) {
1721		memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
1722		adapter->mac_change_pending = true;
1723		return 0;
1724	}
1725
1726	rc = __ibmvnic_set_mac(netdev, addr);
 
 
1727
1728	return rc;
1729}
1730
1731/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1732 * do_reset returns zero if we are able to keep processing reset events, or
1733 * non-zero if we hit a fatal error and must halt.
1734 */
1735static int do_reset(struct ibmvnic_adapter *adapter,
1736		    struct ibmvnic_rwi *rwi, u32 reset_state)
1737{
1738	u64 old_num_rx_queues, old_num_tx_queues;
1739	struct net_device *netdev = adapter->netdev;
1740	int i, rc;
 
 
1741
1742	netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1743		   rwi->reset_reason);
 
 
 
 
1744
1745	netif_carrier_off(netdev);
1746	adapter->reset_reason = rwi->reset_reason;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1747
1748	old_num_rx_queues = adapter->req_rx_queues;
1749	old_num_tx_queues = adapter->req_tx_queues;
 
 
1750
1751	ibmvnic_cleanup(netdev);
1752
1753	if (adapter->reset_reason != VNIC_RESET_MOBILITY &&
 
1754	    adapter->reset_reason != VNIC_RESET_FAILOVER) {
1755		rc = __ibmvnic_close(netdev);
1756		if (rc)
1757			return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1758	}
1759
1760	if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1761	    adapter->wait_for_reset) {
1762		release_resources(adapter);
1763		release_sub_crqs(adapter, 1);
1764		release_crq_queue(adapter);
1765	}
1766
1767	if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1768		/* remove the closed state so when we call open it appears
1769		 * we are coming from the probed state.
1770		 */
1771		adapter->state = VNIC_PROBED;
1772
1773		if (adapter->wait_for_reset) {
 
 
1774			rc = init_crq_queue(adapter);
1775		} else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1776			rc = ibmvnic_reenable_crq_queue(adapter);
1777			release_sub_crqs(adapter, 1);
1778		} else {
1779			rc = ibmvnic_reset_crq(adapter);
1780			if (!rc)
1781				rc = vio_enable_interrupts(adapter->vdev);
 
 
 
 
 
1782		}
1783
1784		if (rc) {
1785			netdev_err(adapter->netdev,
1786				   "Couldn't initialize crq. rc=%d\n", rc);
1787			return rc;
1788		}
1789
1790		rc = ibmvnic_init(adapter);
1791		if (rc)
1792			return IBMVNIC_INIT_FAILED;
1793
1794		/* If the adapter was in PROBE state prior to the reset,
1795		 * exit here.
1796		 */
1797		if (reset_state == VNIC_PROBED)
1798			return 0;
 
 
1799
1800		rc = ibmvnic_login(netdev);
1801		if (rc) {
1802			adapter->state = VNIC_PROBED;
1803			return 0;
1804		}
1805
1806		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1807		    adapter->wait_for_reset) {
1808			rc = init_resources(adapter);
1809			if (rc)
1810				return rc;
1811		} else if (adapter->req_rx_queues != old_num_rx_queues ||
1812			   adapter->req_tx_queues != old_num_tx_queues) {
1813			adapter->map_id = 1;
1814			release_rx_pools(adapter);
1815			release_tx_pools(adapter);
1816			init_rx_pools(netdev);
1817			init_tx_pools(netdev);
1818
 
1819			release_napi(adapter);
1820			init_napi(adapter);
1821		} else {
1822			rc = reset_tx_pools(adapter);
1823			if (rc)
1824				return rc;
1825
1826			rc = reset_rx_pools(adapter);
1827			if (rc)
1828				return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1829		}
1830		ibmvnic_disable_irqs(adapter);
1831	}
1832	adapter->state = VNIC_CLOSED;
1833
1834	if (reset_state == VNIC_CLOSED)
1835		return 0;
 
 
1836
1837	rc = __ibmvnic_open(netdev);
1838	if (rc) {
1839		if (list_empty(&adapter->rwi_list))
1840			adapter->state = VNIC_CLOSED;
1841		else
1842			adapter->state = reset_state;
1843
1844		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1845	}
1846
1847	/* kick napi */
1848	for (i = 0; i < adapter->req_rx_queues; i++)
1849		napi_schedule(&adapter->napi[i]);
1850
1851	if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
1852	    adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
1853		netdev_notify_peers(netdev);
 
1854
1855	netif_carrier_on(netdev);
 
 
 
1856
1857	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1858}
1859
1860static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1861{
1862	struct ibmvnic_rwi *rwi;
 
1863
1864	mutex_lock(&adapter->rwi_lock);
1865
1866	if (!list_empty(&adapter->rwi_list)) {
1867		rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1868				       list);
1869		list_del(&rwi->list);
1870	} else {
1871		rwi = NULL;
1872	}
1873
1874	mutex_unlock(&adapter->rwi_lock);
1875	return rwi;
1876}
1877
1878static void free_all_rwi(struct ibmvnic_adapter *adapter)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1879{
1880	struct ibmvnic_rwi *rwi;
 
 
 
1881
1882	rwi = get_next_rwi(adapter);
1883	while (rwi) {
1884		kfree(rwi);
1885		rwi = get_next_rwi(adapter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1886	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1887}
1888
1889static void __ibmvnic_reset(struct work_struct *work)
1890{
1891	struct ibmvnic_rwi *rwi;
1892	struct ibmvnic_adapter *adapter;
1893	struct net_device *netdev;
 
 
 
 
 
 
 
1894	u32 reset_state;
1895	int rc = 0;
1896
1897	adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1898	netdev = adapter->netdev;
1899
1900	mutex_lock(&adapter->reset_lock);
1901	adapter->resetting = true;
1902	reset_state = adapter->state;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1903
1904	rwi = get_next_rwi(adapter);
1905	while (rwi) {
1906		rc = do_reset(adapter, rwi, reset_state);
1907		kfree(rwi);
1908		if (rc && rc != IBMVNIC_INIT_FAILED)
 
 
 
 
1909			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1910
1911		rwi = get_next_rwi(adapter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1912	}
1913
1914	if (adapter->wait_for_reset) {
1915		adapter->wait_for_reset = false;
1916		adapter->reset_done_rc = rc;
1917		complete(&adapter->reset_done);
1918	}
1919
1920	if (rc) {
1921		netdev_dbg(adapter->netdev, "Reset failed\n");
1922		free_all_rwi(adapter);
1923		mutex_unlock(&adapter->reset_lock);
1924		return;
1925	}
1926
1927	adapter->resetting = false;
1928	mutex_unlock(&adapter->reset_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1929}
1930
1931static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
1932			 enum ibmvnic_reset_reason reason)
1933{
1934	struct ibmvnic_rwi *rwi, *tmp;
1935	struct net_device *netdev = adapter->netdev;
1936	struct list_head *entry;
 
1937	int ret;
1938
 
 
 
 
 
 
 
1939	if (adapter->state == VNIC_REMOVING ||
1940	    adapter->state == VNIC_REMOVED ||
1941	    adapter->failover_pending) {
1942		ret = EBUSY;
1943		netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
1944		goto err;
1945	}
1946
1947	if (adapter->state == VNIC_PROBING) {
1948		netdev_warn(netdev, "Adapter reset during probe\n");
1949		ret = adapter->init_done_rc = EAGAIN;
1950		goto err;
1951	}
1952
1953	mutex_lock(&adapter->rwi_lock);
1954
1955	list_for_each(entry, &adapter->rwi_list) {
1956		tmp = list_entry(entry, struct ibmvnic_rwi, list);
1957		if (tmp->reset_reason == reason) {
1958			netdev_dbg(netdev, "Skipping matching reset\n");
1959			mutex_unlock(&adapter->rwi_lock);
1960			ret = EBUSY;
1961			goto err;
1962		}
1963	}
1964
1965	rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
1966	if (!rwi) {
1967		mutex_unlock(&adapter->rwi_lock);
1968		ibmvnic_close(netdev);
1969		ret = ENOMEM;
1970		goto err;
1971	}
 
 
 
 
 
1972
1973	rwi->reset_reason = reason;
1974	list_add_tail(&rwi->list, &adapter->rwi_list);
1975	mutex_unlock(&adapter->rwi_lock);
1976
1977	netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
1978	schedule_work(&adapter->ibmvnic_reset);
1979
1980	return 0;
1981err:
1982	if (adapter->wait_for_reset)
1983		adapter->wait_for_reset = false;
 
 
 
 
1984	return -ret;
1985}
1986
1987static void ibmvnic_tx_timeout(struct net_device *dev)
1988{
1989	struct ibmvnic_adapter *adapter = netdev_priv(dev);
1990
 
 
 
 
 
 
 
 
 
 
 
 
1991	ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
1992}
1993
1994static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
1995				  struct ibmvnic_rx_buff *rx_buff)
1996{
1997	struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
1998
1999	rx_buff->skb = NULL;
2000
2001	pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2002	pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2003
2004	atomic_dec(&pool->available);
2005}
2006
2007static int ibmvnic_poll(struct napi_struct *napi, int budget)
2008{
2009	struct net_device *netdev = napi->dev;
2010	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2011	int scrq_num = (int)(napi - adapter->napi);
2012	int frames_processed = 0;
 
 
 
 
 
 
 
2013
2014restart_poll:
2015	while (frames_processed < budget) {
2016		struct sk_buff *skb;
2017		struct ibmvnic_rx_buff *rx_buff;
2018		union sub_crq *next;
2019		u32 length;
2020		u16 offset;
2021		u8 flags = 0;
2022
2023		if (unlikely(adapter->resetting &&
2024			     adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2025			enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2026			napi_complete_done(napi, frames_processed);
2027			return frames_processed;
2028		}
2029
2030		if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2031			break;
2032		next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2033		rx_buff =
2034		    (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2035							  rx_comp.correlator);
2036		/* do error checking */
2037		if (next->rx_comp.rc) {
2038			netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2039				   be16_to_cpu(next->rx_comp.rc));
2040			/* free the entry */
2041			next->rx_comp.first = 0;
2042			dev_kfree_skb_any(rx_buff->skb);
2043			remove_buff_from_pool(adapter, rx_buff);
2044			continue;
2045		} else if (!rx_buff->skb) {
2046			/* free the entry */
2047			next->rx_comp.first = 0;
2048			remove_buff_from_pool(adapter, rx_buff);
2049			continue;
2050		}
2051
2052		length = be32_to_cpu(next->rx_comp.len);
2053		offset = be16_to_cpu(next->rx_comp.off_frame_data);
2054		flags = next->rx_comp.flags;
2055		skb = rx_buff->skb;
 
 
2056		skb_copy_to_linear_data(skb, rx_buff->data + offset,
2057					length);
2058
2059		/* VLAN Header has been stripped by the system firmware and
2060		 * needs to be inserted by the driver
2061		 */
2062		if (adapter->rx_vlan_header_insertion &&
2063		    (flags & IBMVNIC_VLAN_STRIPPED))
2064			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2065					       ntohs(next->rx_comp.vlan_tci));
2066
2067		/* free the entry */
2068		next->rx_comp.first = 0;
2069		remove_buff_from_pool(adapter, rx_buff);
2070
2071		skb_put(skb, length);
2072		skb->protocol = eth_type_trans(skb, netdev);
2073		skb_record_rx_queue(skb, scrq_num);
2074
2075		if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2076		    flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2077			skb->ip_summed = CHECKSUM_UNNECESSARY;
2078		}
2079
2080		length = skb->len;
2081		napi_gro_receive(napi, skb); /* send it up */
2082		netdev->stats.rx_packets++;
2083		netdev->stats.rx_bytes += length;
2084		adapter->rx_stats_buffers[scrq_num].packets++;
2085		adapter->rx_stats_buffers[scrq_num].bytes += length;
2086		frames_processed++;
2087	}
2088
2089	if (adapter->state != VNIC_CLOSING)
 
 
 
2090		replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2091
2092	if (frames_processed < budget) {
2093		enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2094		napi_complete_done(napi, frames_processed);
2095		if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2096		    napi_reschedule(napi)) {
2097			disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2098			goto restart_poll;
 
 
2099		}
2100	}
2101	return frames_processed;
2102}
2103
2104#ifdef CONFIG_NET_POLL_CONTROLLER
2105static void ibmvnic_netpoll_controller(struct net_device *dev)
2106{
2107	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2108	int i;
2109
2110	replenish_pools(netdev_priv(dev));
2111	for (i = 0; i < adapter->req_rx_queues; i++)
2112		ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
2113				     adapter->rx_scrq[i]);
2114}
2115#endif
2116
2117static int wait_for_reset(struct ibmvnic_adapter *adapter)
2118{
2119	int rc, ret;
2120
2121	adapter->fallback.mtu = adapter->req_mtu;
2122	adapter->fallback.rx_queues = adapter->req_rx_queues;
2123	adapter->fallback.tx_queues = adapter->req_tx_queues;
2124	adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2125	adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2126
2127	init_completion(&adapter->reset_done);
2128	adapter->wait_for_reset = true;
2129	rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2130	if (rc)
2131		return rc;
2132	wait_for_completion(&adapter->reset_done);
 
 
 
 
 
 
 
2133
2134	ret = 0;
2135	if (adapter->reset_done_rc) {
2136		ret = -EIO;
2137		adapter->desired.mtu = adapter->fallback.mtu;
2138		adapter->desired.rx_queues = adapter->fallback.rx_queues;
2139		adapter->desired.tx_queues = adapter->fallback.tx_queues;
2140		adapter->desired.rx_entries = adapter->fallback.rx_entries;
2141		adapter->desired.tx_entries = adapter->fallback.tx_entries;
2142
2143		init_completion(&adapter->reset_done);
2144		adapter->wait_for_reset = true;
2145		rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2146		if (rc)
2147			return ret;
2148		wait_for_completion(&adapter->reset_done);
 
 
 
 
 
 
 
2149	}
 
2150	adapter->wait_for_reset = false;
2151
2152	return ret;
2153}
2154
2155static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2156{
2157	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2158
2159	adapter->desired.mtu = new_mtu + ETH_HLEN;
2160
2161	return wait_for_reset(adapter);
2162}
2163
2164static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2165						struct net_device *dev,
2166						netdev_features_t features)
2167{
2168	/* Some backing hardware adapters can not
2169	 * handle packets with a MSS less than 224
2170	 * or with only one segment.
2171	 */
2172	if (skb_is_gso(skb)) {
2173		if (skb_shinfo(skb)->gso_size < 224 ||
2174		    skb_shinfo(skb)->gso_segs == 1)
2175			features &= ~NETIF_F_GSO_MASK;
2176	}
2177
2178	return features;
2179}
2180
2181static const struct net_device_ops ibmvnic_netdev_ops = {
2182	.ndo_open		= ibmvnic_open,
2183	.ndo_stop		= ibmvnic_close,
2184	.ndo_start_xmit		= ibmvnic_xmit,
2185	.ndo_set_rx_mode	= ibmvnic_set_multi,
2186	.ndo_set_mac_address	= ibmvnic_set_mac,
2187	.ndo_validate_addr	= eth_validate_addr,
2188	.ndo_tx_timeout		= ibmvnic_tx_timeout,
2189#ifdef CONFIG_NET_POLL_CONTROLLER
2190	.ndo_poll_controller	= ibmvnic_netpoll_controller,
2191#endif
2192	.ndo_change_mtu		= ibmvnic_change_mtu,
2193	.ndo_features_check     = ibmvnic_features_check,
2194};
2195
2196/* ethtool functions */
2197
2198static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2199				      struct ethtool_link_ksettings *cmd)
2200{
2201	u32 supported, advertising;
 
2202
2203	supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
2204			  SUPPORTED_FIBRE);
2205	advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
2206			    ADVERTISED_FIBRE);
2207	cmd->base.speed = SPEED_1000;
2208	cmd->base.duplex = DUPLEX_FULL;
 
2209	cmd->base.port = PORT_FIBRE;
2210	cmd->base.phy_address = 0;
2211	cmd->base.autoneg = AUTONEG_ENABLE;
2212
2213	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2214						supported);
2215	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2216						advertising);
2217
2218	return 0;
2219}
2220
2221static void ibmvnic_get_drvinfo(struct net_device *netdev,
2222				struct ethtool_drvinfo *info)
2223{
2224	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2225
2226	strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2227	strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2228	strlcpy(info->fw_version, adapter->fw_version,
2229		sizeof(info->fw_version));
2230}
2231
2232static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2233{
2234	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2235
2236	return adapter->msg_enable;
2237}
2238
2239static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2240{
2241	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2242
2243	adapter->msg_enable = data;
2244}
2245
2246static u32 ibmvnic_get_link(struct net_device *netdev)
2247{
2248	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2249
2250	/* Don't need to send a query because we request a logical link up at
2251	 * init and then we wait for link state indications
2252	 */
2253	return adapter->logical_link_state;
2254}
2255
2256static void ibmvnic_get_ringparam(struct net_device *netdev,
2257				  struct ethtool_ringparam *ring)
 
 
2258{
2259	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2260
2261	ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2262	ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2263	ring->rx_mini_max_pending = 0;
2264	ring->rx_jumbo_max_pending = 0;
2265	ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2266	ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2267	ring->rx_mini_pending = 0;
2268	ring->rx_jumbo_pending = 0;
2269}
2270
2271static int ibmvnic_set_ringparam(struct net_device *netdev,
2272				 struct ethtool_ringparam *ring)
 
 
2273{
2274	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2275
2276	if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq  ||
2277	    ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
2278		netdev_err(netdev, "Invalid request.\n");
2279		netdev_err(netdev, "Max tx buffers = %llu\n",
2280			   adapter->max_rx_add_entries_per_subcrq);
2281		netdev_err(netdev, "Max rx buffers = %llu\n",
2282			   adapter->max_tx_entries_per_subcrq);
2283		return -EINVAL;
2284	}
2285
2286	adapter->desired.rx_entries = ring->rx_pending;
2287	adapter->desired.tx_entries = ring->tx_pending;
2288
2289	return wait_for_reset(adapter);
2290}
2291
2292static void ibmvnic_get_channels(struct net_device *netdev,
2293				 struct ethtool_channels *channels)
2294{
2295	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2296
2297	channels->max_rx = adapter->max_rx_queues;
2298	channels->max_tx = adapter->max_tx_queues;
2299	channels->max_other = 0;
2300	channels->max_combined = 0;
2301	channels->rx_count = adapter->req_rx_queues;
2302	channels->tx_count = adapter->req_tx_queues;
2303	channels->other_count = 0;
2304	channels->combined_count = 0;
2305}
2306
2307static int ibmvnic_set_channels(struct net_device *netdev,
2308				struct ethtool_channels *channels)
2309{
2310	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2311
2312	adapter->desired.rx_queues = channels->rx_count;
2313	adapter->desired.tx_queues = channels->tx_count;
2314
2315	return wait_for_reset(adapter);
2316}
2317
2318static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2319{
2320	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2321	int i;
2322
2323	if (stringset != ETH_SS_STATS)
2324		return;
2325
2326	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
2327		memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2328
2329	for (i = 0; i < adapter->req_tx_queues; i++) {
2330		snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2331		data += ETH_GSTRING_LEN;
2332
2333		snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2334		data += ETH_GSTRING_LEN;
2335
2336		snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
2337		data += ETH_GSTRING_LEN;
2338	}
2339
2340	for (i = 0; i < adapter->req_rx_queues; i++) {
2341		snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2342		data += ETH_GSTRING_LEN;
2343
2344		snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2345		data += ETH_GSTRING_LEN;
2346
2347		snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2348		data += ETH_GSTRING_LEN;
2349	}
2350}
2351
2352static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2353{
2354	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2355
2356	switch (sset) {
2357	case ETH_SS_STATS:
2358		return ARRAY_SIZE(ibmvnic_stats) +
2359		       adapter->req_tx_queues * NUM_TX_STATS +
2360		       adapter->req_rx_queues * NUM_RX_STATS;
2361	default:
2362		return -EOPNOTSUPP;
2363	}
2364}
2365
2366static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2367				      struct ethtool_stats *stats, u64 *data)
2368{
2369	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2370	union ibmvnic_crq crq;
2371	int i, j;
 
2372
2373	memset(&crq, 0, sizeof(crq));
2374	crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2375	crq.request_statistics.cmd = REQUEST_STATISTICS;
2376	crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2377	crq.request_statistics.len =
2378	    cpu_to_be32(sizeof(struct ibmvnic_statistics));
2379
2380	/* Wait for data to be written */
2381	init_completion(&adapter->stats_done);
2382	ibmvnic_send_crq(adapter, &crq);
2383	wait_for_completion(&adapter->stats_done);
 
 
 
 
2384
2385	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2386		data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2387						ibmvnic_stats[i].offset));
2388
2389	for (j = 0; j < adapter->req_tx_queues; j++) {
2390		data[i] = adapter->tx_stats_buffers[j].packets;
2391		i++;
2392		data[i] = adapter->tx_stats_buffers[j].bytes;
2393		i++;
2394		data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2395		i++;
2396	}
2397
2398	for (j = 0; j < adapter->req_rx_queues; j++) {
2399		data[i] = adapter->rx_stats_buffers[j].packets;
2400		i++;
2401		data[i] = adapter->rx_stats_buffers[j].bytes;
2402		i++;
2403		data[i] = adapter->rx_stats_buffers[j].interrupts;
2404		i++;
2405	}
2406}
2407
2408static const struct ethtool_ops ibmvnic_ethtool_ops = {
2409	.get_drvinfo		= ibmvnic_get_drvinfo,
2410	.get_msglevel		= ibmvnic_get_msglevel,
2411	.set_msglevel		= ibmvnic_set_msglevel,
2412	.get_link		= ibmvnic_get_link,
2413	.get_ringparam		= ibmvnic_get_ringparam,
2414	.set_ringparam		= ibmvnic_set_ringparam,
2415	.get_channels		= ibmvnic_get_channels,
2416	.set_channels		= ibmvnic_set_channels,
2417	.get_strings            = ibmvnic_get_strings,
2418	.get_sset_count         = ibmvnic_get_sset_count,
2419	.get_ethtool_stats	= ibmvnic_get_ethtool_stats,
2420	.get_link_ksettings	= ibmvnic_get_link_ksettings,
2421};
2422
2423/* Routines for managing CRQs/sCRQs  */
2424
2425static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2426				   struct ibmvnic_sub_crq_queue *scrq)
2427{
2428	int rc;
2429
 
 
 
 
 
2430	if (scrq->irq) {
2431		free_irq(scrq->irq, scrq);
2432		irq_dispose_mapping(scrq->irq);
2433		scrq->irq = 0;
2434	}
2435
2436	memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2437	atomic_set(&scrq->used, 0);
2438	scrq->cur = 0;
 
 
 
 
 
 
2439
2440	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2441			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2442	return rc;
2443}
2444
2445static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2446{
2447	int i, rc;
2448
 
 
 
 
 
2449	for (i = 0; i < adapter->req_tx_queues; i++) {
2450		netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2451		rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2452		if (rc)
2453			return rc;
2454	}
2455
2456	for (i = 0; i < adapter->req_rx_queues; i++) {
2457		netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2458		rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2459		if (rc)
2460			return rc;
2461	}
2462
2463	return rc;
2464}
2465
2466static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2467				  struct ibmvnic_sub_crq_queue *scrq,
2468				  bool do_h_free)
2469{
2470	struct device *dev = &adapter->vdev->dev;
2471	long rc;
2472
2473	netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2474
2475	if (do_h_free) {
2476		/* Close the sub-crqs */
2477		do {
2478			rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2479						adapter->vdev->unit_address,
2480						scrq->crq_num);
2481		} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2482
2483		if (rc) {
2484			netdev_err(adapter->netdev,
2485				   "Failed to release sub-CRQ %16lx, rc = %ld\n",
2486				   scrq->crq_num, rc);
2487		}
2488	}
2489
 
 
 
 
 
2490	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2491			 DMA_BIDIRECTIONAL);
2492	free_pages((unsigned long)scrq->msgs, 2);
 
2493	kfree(scrq);
2494}
2495
2496static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2497							*adapter)
2498{
2499	struct device *dev = &adapter->vdev->dev;
2500	struct ibmvnic_sub_crq_queue *scrq;
2501	int rc;
2502
2503	scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2504	if (!scrq)
2505		return NULL;
2506
2507	scrq->msgs =
2508		(union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2509	if (!scrq->msgs) {
2510		dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2511		goto zero_page_failed;
2512	}
 
 
2513
2514	scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2515					 DMA_BIDIRECTIONAL);
2516	if (dma_mapping_error(dev, scrq->msg_token)) {
2517		dev_warn(dev, "Couldn't map crq queue messages page\n");
2518		goto map_failed;
2519	}
2520
2521	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2522			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2523
2524	if (rc == H_RESOURCE)
2525		rc = ibmvnic_reset_crq(adapter);
2526
2527	if (rc == H_CLOSED) {
2528		dev_warn(dev, "Partner adapter not ready, waiting.\n");
2529	} else if (rc) {
2530		dev_warn(dev, "Error %d registering sub-crq\n", rc);
2531		goto reg_failed;
2532	}
2533
2534	scrq->adapter = adapter;
2535	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
 
 
 
 
 
 
 
 
 
 
 
2536	spin_lock_init(&scrq->lock);
2537
2538	netdev_dbg(adapter->netdev,
2539		   "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2540		   scrq->crq_num, scrq->hw_irq, scrq->irq);
2541
2542	return scrq;
2543
 
 
 
 
 
 
2544reg_failed:
2545	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2546			 DMA_BIDIRECTIONAL);
2547map_failed:
 
 
2548	free_pages((unsigned long)scrq->msgs, 2);
2549zero_page_failed:
2550	kfree(scrq);
2551
2552	return NULL;
2553}
2554
2555static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2556{
2557	int i;
2558
 
2559	if (adapter->tx_scrq) {
2560		for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2561			if (!adapter->tx_scrq[i])
2562				continue;
2563
2564			netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2565				   i);
 
2566			if (adapter->tx_scrq[i]->irq) {
2567				free_irq(adapter->tx_scrq[i]->irq,
2568					 adapter->tx_scrq[i]);
2569				irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2570				adapter->tx_scrq[i]->irq = 0;
2571			}
2572
2573			release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2574					      do_h_free);
2575		}
2576
2577		kfree(adapter->tx_scrq);
2578		adapter->tx_scrq = NULL;
2579		adapter->num_active_tx_scrqs = 0;
2580	}
2581
2582	if (adapter->rx_scrq) {
2583		for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2584			if (!adapter->rx_scrq[i])
2585				continue;
2586
2587			netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2588				   i);
2589			if (adapter->rx_scrq[i]->irq) {
2590				free_irq(adapter->rx_scrq[i]->irq,
2591					 adapter->rx_scrq[i]);
2592				irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2593				adapter->rx_scrq[i]->irq = 0;
2594			}
2595
2596			release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2597					      do_h_free);
2598		}
2599
2600		kfree(adapter->rx_scrq);
2601		adapter->rx_scrq = NULL;
2602		adapter->num_active_rx_scrqs = 0;
2603	}
2604}
2605
2606static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2607			    struct ibmvnic_sub_crq_queue *scrq)
2608{
2609	struct device *dev = &adapter->vdev->dev;
2610	unsigned long rc;
2611
2612	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2613				H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2614	if (rc)
2615		dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2616			scrq->hw_irq, rc);
2617	return rc;
2618}
2619
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2620static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2621			   struct ibmvnic_sub_crq_queue *scrq)
2622{
2623	struct device *dev = &adapter->vdev->dev;
2624	unsigned long rc;
2625
2626	if (scrq->hw_irq > 0x100000000ULL) {
2627		dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2628		return 1;
2629	}
2630
2631	if (adapter->resetting &&
2632	    adapter->reset_reason == VNIC_RESET_MOBILITY) {
2633		u64 val = (0xff000000) | scrq->hw_irq;
2634
2635		rc = plpar_hcall_norets(H_EOI, val);
2636		if (rc)
2637			dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2638				val, rc);
2639	}
2640
2641	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2642				H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2643	if (rc)
2644		dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2645			scrq->hw_irq, rc);
2646	return rc;
2647}
2648
2649static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2650			       struct ibmvnic_sub_crq_queue *scrq)
2651{
2652	struct device *dev = &adapter->vdev->dev;
2653	struct ibmvnic_tx_pool *tx_pool;
2654	struct ibmvnic_tx_buff *txbuff;
 
2655	union sub_crq *next;
2656	int index;
2657	int i, j;
2658	u8 *first;
2659
2660restart_loop:
2661	while (pending_scrq(adapter, scrq)) {
2662		unsigned int pool = scrq->pool_index;
2663		int num_entries = 0;
 
 
2664
2665		next = ibmvnic_next_scrq(adapter, scrq);
2666		for (i = 0; i < next->tx_comp.num_comps; i++) {
2667			if (next->tx_comp.rcs[i]) {
2668				dev_err(dev, "tx error %x\n",
2669					next->tx_comp.rcs[i]);
2670				continue;
2671			}
2672			index = be32_to_cpu(next->tx_comp.correlators[i]);
2673			if (index & IBMVNIC_TSO_POOL_MASK) {
2674				tx_pool = &adapter->tso_pool[pool];
2675				index &= ~IBMVNIC_TSO_POOL_MASK;
2676			} else {
2677				tx_pool = &adapter->tx_pool[pool];
2678			}
2679
2680			txbuff = &tx_pool->tx_buff[index];
2681
2682			for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2683				if (!txbuff->data_dma[j])
2684					continue;
2685
2686				txbuff->data_dma[j] = 0;
2687			}
2688			/* if sub_crq was sent indirectly */
2689			first = &txbuff->indir_arr[0].generic.first;
2690			if (*first == IBMVNIC_CRQ_CMD) {
2691				dma_unmap_single(dev, txbuff->indir_dma,
2692						 sizeof(txbuff->indir_arr),
2693						 DMA_TO_DEVICE);
2694				*first = 0;
2695			}
2696
2697			if (txbuff->last_frag) {
2698				dev_kfree_skb_any(txbuff->skb);
2699				txbuff->skb = NULL;
 
 
 
2700			}
2701
2702			num_entries += txbuff->num_entries;
2703
2704			tx_pool->free_map[tx_pool->producer_index] = index;
2705			tx_pool->producer_index =
2706				(tx_pool->producer_index + 1) %
2707					tx_pool->num_buffers;
2708		}
2709		/* remove tx_comp scrq*/
2710		next->tx_comp.first = 0;
2711
 
 
 
2712		if (atomic_sub_return(num_entries, &scrq->used) <=
2713		    (adapter->req_tx_entries_per_subcrq / 2) &&
2714		    __netif_subqueue_stopped(adapter->netdev,
2715					     scrq->pool_index)) {
2716			netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2717			netdev_dbg(adapter->netdev, "Started queue %d\n",
2718				   scrq->pool_index);
 
 
 
 
 
 
2719		}
2720	}
2721
2722	enable_scrq_irq(adapter, scrq);
2723
2724	if (pending_scrq(adapter, scrq)) {
2725		disable_scrq_irq(adapter, scrq);
2726		goto restart_loop;
2727	}
2728
2729	return 0;
2730}
2731
2732static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2733{
2734	struct ibmvnic_sub_crq_queue *scrq = instance;
2735	struct ibmvnic_adapter *adapter = scrq->adapter;
2736
2737	disable_scrq_irq(adapter, scrq);
2738	ibmvnic_complete_tx(adapter, scrq);
2739
2740	return IRQ_HANDLED;
2741}
2742
2743static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2744{
2745	struct ibmvnic_sub_crq_queue *scrq = instance;
2746	struct ibmvnic_adapter *adapter = scrq->adapter;
2747
2748	/* When booting a kdump kernel we can hit pending interrupts
2749	 * prior to completing driver initialization.
2750	 */
2751	if (unlikely(adapter->state != VNIC_OPEN))
2752		return IRQ_NONE;
2753
2754	adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2755
2756	if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2757		disable_scrq_irq(adapter, scrq);
2758		__napi_schedule(&adapter->napi[scrq->scrq_num]);
2759	}
2760
2761	return IRQ_HANDLED;
2762}
2763
2764static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
2765{
2766	struct device *dev = &adapter->vdev->dev;
2767	struct ibmvnic_sub_crq_queue *scrq;
2768	int i = 0, j = 0;
2769	int rc = 0;
2770
2771	for (i = 0; i < adapter->req_tx_queues; i++) {
2772		netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
2773			   i);
2774		scrq = adapter->tx_scrq[i];
2775		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2776
2777		if (!scrq->irq) {
2778			rc = -EINVAL;
2779			dev_err(dev, "Error mapping irq\n");
2780			goto req_tx_irq_failed;
2781		}
2782
 
 
2783		rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
2784				 0, "ibmvnic_tx", scrq);
2785
2786		if (rc) {
2787			dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
2788				scrq->irq, rc);
2789			irq_dispose_mapping(scrq->irq);
2790			goto req_tx_irq_failed;
2791		}
2792	}
2793
2794	for (i = 0; i < adapter->req_rx_queues; i++) {
2795		netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
2796			   i);
2797		scrq = adapter->rx_scrq[i];
2798		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2799		if (!scrq->irq) {
2800			rc = -EINVAL;
2801			dev_err(dev, "Error mapping irq\n");
2802			goto req_rx_irq_failed;
2803		}
 
 
2804		rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
2805				 0, "ibmvnic_rx", scrq);
2806		if (rc) {
2807			dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
2808				scrq->irq, rc);
2809			irq_dispose_mapping(scrq->irq);
2810			goto req_rx_irq_failed;
2811		}
2812	}
 
 
 
 
 
2813	return rc;
2814
2815req_rx_irq_failed:
2816	for (j = 0; j < i; j++) {
2817		free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
2818		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2819	}
2820	i = adapter->req_tx_queues;
2821req_tx_irq_failed:
2822	for (j = 0; j < i; j++) {
2823		free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
2824		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2825	}
2826	release_sub_crqs(adapter, 1);
2827	return rc;
2828}
2829
2830static int init_sub_crqs(struct ibmvnic_adapter *adapter)
2831{
2832	struct device *dev = &adapter->vdev->dev;
2833	struct ibmvnic_sub_crq_queue **allqueues;
2834	int registered_queues = 0;
2835	int total_queues;
2836	int more = 0;
2837	int i;
2838
2839	total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
2840
2841	allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
2842	if (!allqueues)
2843		return -1;
2844
2845	for (i = 0; i < total_queues; i++) {
2846		allqueues[i] = init_sub_crq_queue(adapter);
2847		if (!allqueues[i]) {
2848			dev_warn(dev, "Couldn't allocate all sub-crqs\n");
2849			break;
2850		}
2851		registered_queues++;
2852	}
2853
2854	/* Make sure we were able to register the minimum number of queues */
2855	if (registered_queues <
2856	    adapter->min_tx_queues + adapter->min_rx_queues) {
2857		dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
2858		goto tx_failed;
2859	}
2860
2861	/* Distribute the failed allocated queues*/
2862	for (i = 0; i < total_queues - registered_queues + more ; i++) {
2863		netdev_dbg(adapter->netdev, "Reducing number of queues\n");
2864		switch (i % 3) {
2865		case 0:
2866			if (adapter->req_rx_queues > adapter->min_rx_queues)
2867				adapter->req_rx_queues--;
2868			else
2869				more++;
2870			break;
2871		case 1:
2872			if (adapter->req_tx_queues > adapter->min_tx_queues)
2873				adapter->req_tx_queues--;
2874			else
2875				more++;
2876			break;
2877		}
2878	}
2879
2880	adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
2881				   sizeof(*adapter->tx_scrq), GFP_KERNEL);
2882	if (!adapter->tx_scrq)
2883		goto tx_failed;
2884
2885	for (i = 0; i < adapter->req_tx_queues; i++) {
2886		adapter->tx_scrq[i] = allqueues[i];
2887		adapter->tx_scrq[i]->pool_index = i;
2888		adapter->num_active_tx_scrqs++;
2889	}
2890
2891	adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
2892				   sizeof(*adapter->rx_scrq), GFP_KERNEL);
2893	if (!adapter->rx_scrq)
2894		goto rx_failed;
2895
2896	for (i = 0; i < adapter->req_rx_queues; i++) {
2897		adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
2898		adapter->rx_scrq[i]->scrq_num = i;
2899		adapter->num_active_rx_scrqs++;
2900	}
2901
2902	kfree(allqueues);
2903	return 0;
2904
2905rx_failed:
2906	kfree(adapter->tx_scrq);
2907	adapter->tx_scrq = NULL;
2908tx_failed:
2909	for (i = 0; i < registered_queues; i++)
2910		release_sub_crq_queue(adapter, allqueues[i], 1);
2911	kfree(allqueues);
2912	return -1;
2913}
2914
2915static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
2916{
2917	struct device *dev = &adapter->vdev->dev;
2918	union ibmvnic_crq crq;
2919	int max_entries;
 
 
 
 
 
 
 
 
 
 
 
 
2920
2921	if (!retry) {
2922		/* Sub-CRQ entries are 32 byte long */
2923		int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
2924
 
 
2925		if (adapter->min_tx_entries_per_subcrq > entries_page ||
2926		    adapter->min_rx_add_entries_per_subcrq > entries_page) {
2927			dev_err(dev, "Fatal, invalid entries per sub-crq\n");
2928			return;
2929		}
2930
2931		if (adapter->desired.mtu)
2932			adapter->req_mtu = adapter->desired.mtu;
2933		else
2934			adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
2935
2936		if (!adapter->desired.tx_entries)
2937			adapter->desired.tx_entries =
2938					adapter->max_tx_entries_per_subcrq;
2939		if (!adapter->desired.rx_entries)
2940			adapter->desired.rx_entries =
2941					adapter->max_rx_add_entries_per_subcrq;
2942
2943		max_entries = IBMVNIC_MAX_LTB_SIZE /
2944			      (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
2945
2946		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
2947			adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
2948			adapter->desired.tx_entries = max_entries;
2949		}
2950
2951		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
2952			adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
2953			adapter->desired.rx_entries = max_entries;
2954		}
2955
2956		if (adapter->desired.tx_entries)
2957			adapter->req_tx_entries_per_subcrq =
2958					adapter->desired.tx_entries;
2959		else
2960			adapter->req_tx_entries_per_subcrq =
2961					adapter->max_tx_entries_per_subcrq;
2962
2963		if (adapter->desired.rx_entries)
2964			adapter->req_rx_add_entries_per_subcrq =
2965					adapter->desired.rx_entries;
2966		else
2967			adapter->req_rx_add_entries_per_subcrq =
2968					adapter->max_rx_add_entries_per_subcrq;
2969
2970		if (adapter->desired.tx_queues)
2971			adapter->req_tx_queues =
2972					adapter->desired.tx_queues;
2973		else
2974			adapter->req_tx_queues =
2975					adapter->opt_tx_comp_sub_queues;
2976
2977		if (adapter->desired.rx_queues)
2978			adapter->req_rx_queues =
2979					adapter->desired.rx_queues;
2980		else
2981			adapter->req_rx_queues =
2982					adapter->opt_rx_comp_queues;
2983
2984		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
 
 
2985	}
2986
2987	memset(&crq, 0, sizeof(crq));
2988	crq.request_capability.first = IBMVNIC_CRQ_CMD;
2989	crq.request_capability.cmd = REQUEST_CAPABILITY;
2990
2991	crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
2992	crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
2993	atomic_inc(&adapter->running_cap_crqs);
2994	ibmvnic_send_crq(adapter, &crq);
2995
2996	crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
2997	crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
2998	atomic_inc(&adapter->running_cap_crqs);
2999	ibmvnic_send_crq(adapter, &crq);
3000
3001	crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3002	crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3003	atomic_inc(&adapter->running_cap_crqs);
3004	ibmvnic_send_crq(adapter, &crq);
3005
3006	crq.request_capability.capability =
3007	    cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3008	crq.request_capability.number =
3009	    cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3010	atomic_inc(&adapter->running_cap_crqs);
3011	ibmvnic_send_crq(adapter, &crq);
3012
3013	crq.request_capability.capability =
3014	    cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3015	crq.request_capability.number =
3016	    cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3017	atomic_inc(&adapter->running_cap_crqs);
3018	ibmvnic_send_crq(adapter, &crq);
3019
3020	crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3021	crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3022	atomic_inc(&adapter->running_cap_crqs);
3023	ibmvnic_send_crq(adapter, &crq);
3024
3025	if (adapter->netdev->flags & IFF_PROMISC) {
3026		if (adapter->promisc_supported) {
3027			crq.request_capability.capability =
3028			    cpu_to_be16(PROMISC_REQUESTED);
3029			crq.request_capability.number = cpu_to_be64(1);
3030			atomic_inc(&adapter->running_cap_crqs);
3031			ibmvnic_send_crq(adapter, &crq);
3032		}
3033	} else {
3034		crq.request_capability.capability =
3035		    cpu_to_be16(PROMISC_REQUESTED);
3036		crq.request_capability.number = cpu_to_be64(0);
3037		atomic_inc(&adapter->running_cap_crqs);
3038		ibmvnic_send_crq(adapter, &crq);
3039	}
 
 
 
 
 
3040}
3041
3042static int pending_scrq(struct ibmvnic_adapter *adapter,
3043			struct ibmvnic_sub_crq_queue *scrq)
3044{
3045	union sub_crq *entry = &scrq->msgs[scrq->cur];
 
3046
3047	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3048		return 1;
3049	else
3050		return 0;
 
 
 
 
3051}
3052
3053static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3054					struct ibmvnic_sub_crq_queue *scrq)
3055{
3056	union sub_crq *entry;
3057	unsigned long flags;
3058
3059	spin_lock_irqsave(&scrq->lock, flags);
3060	entry = &scrq->msgs[scrq->cur];
3061	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3062		if (++scrq->cur == scrq->size)
3063			scrq->cur = 0;
3064	} else {
3065		entry = NULL;
3066	}
3067	spin_unlock_irqrestore(&scrq->lock, flags);
3068
 
 
 
 
 
3069	return entry;
3070}
3071
3072static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3073{
3074	struct ibmvnic_crq_queue *queue = &adapter->crq;
3075	union ibmvnic_crq *crq;
3076
3077	crq = &queue->msgs[queue->cur];
3078	if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3079		if (++queue->cur == queue->size)
3080			queue->cur = 0;
3081	} else {
3082		crq = NULL;
3083	}
3084
3085	return crq;
3086}
3087
3088static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3089		       union sub_crq *sub_crq)
3090{
3091	unsigned int ua = adapter->vdev->unit_address;
3092	struct device *dev = &adapter->vdev->dev;
3093	u64 *u64_crq = (u64 *)sub_crq;
3094	int rc;
3095
3096	netdev_dbg(adapter->netdev,
3097		   "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3098		   (unsigned long int)cpu_to_be64(remote_handle),
3099		   (unsigned long int)cpu_to_be64(u64_crq[0]),
3100		   (unsigned long int)cpu_to_be64(u64_crq[1]),
3101		   (unsigned long int)cpu_to_be64(u64_crq[2]),
3102		   (unsigned long int)cpu_to_be64(u64_crq[3]));
3103
3104	/* Make sure the hypervisor sees the complete request */
3105	mb();
3106
3107	rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3108				cpu_to_be64(remote_handle),
3109				cpu_to_be64(u64_crq[0]),
3110				cpu_to_be64(u64_crq[1]),
3111				cpu_to_be64(u64_crq[2]),
3112				cpu_to_be64(u64_crq[3]));
3113
3114	if (rc) {
3115		if (rc == H_CLOSED)
3116			dev_warn(dev, "CRQ Queue closed\n");
3117		dev_err(dev, "Send error (rc=%d)\n", rc);
3118	}
3119
3120	return rc;
3121}
3122
3123static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3124				u64 remote_handle, u64 ioba, u64 num_entries)
3125{
3126	unsigned int ua = adapter->vdev->unit_address;
3127	struct device *dev = &adapter->vdev->dev;
3128	int rc;
3129
3130	/* Make sure the hypervisor sees the complete request */
3131	mb();
3132	rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3133				cpu_to_be64(remote_handle),
3134				ioba, num_entries);
3135
3136	if (rc) {
3137		if (rc == H_CLOSED)
3138			dev_warn(dev, "CRQ Queue closed\n");
3139		dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
3140	}
3141
3142	return rc;
3143}
3144
3145static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3146			    union ibmvnic_crq *crq)
3147{
3148	unsigned int ua = adapter->vdev->unit_address;
3149	struct device *dev = &adapter->vdev->dev;
3150	u64 *u64_crq = (u64 *)crq;
3151	int rc;
3152
3153	netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3154		   (unsigned long int)cpu_to_be64(u64_crq[0]),
3155		   (unsigned long int)cpu_to_be64(u64_crq[1]));
 
 
 
 
 
 
3156
3157	/* Make sure the hypervisor sees the complete request */
3158	mb();
3159
3160	rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3161				cpu_to_be64(u64_crq[0]),
3162				cpu_to_be64(u64_crq[1]));
3163
3164	if (rc) {
3165		if (rc == H_CLOSED) {
3166			dev_warn(dev, "CRQ Queue closed\n");
3167			if (adapter->resetting)
3168				ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3169		}
3170
3171		dev_warn(dev, "Send error (rc=%d)\n", rc);
3172	}
3173
3174	return rc;
3175}
3176
3177static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3178{
 
3179	union ibmvnic_crq crq;
 
 
3180
3181	memset(&crq, 0, sizeof(crq));
3182	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3183	crq.generic.cmd = IBMVNIC_CRQ_INIT;
3184	netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3185
3186	return ibmvnic_send_crq(adapter, &crq);
3187}
 
 
 
 
3188
3189static int send_version_xchg(struct ibmvnic_adapter *adapter)
3190{
3191	union ibmvnic_crq crq;
3192
3193	memset(&crq, 0, sizeof(crq));
3194	crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3195	crq.version_exchange.cmd = VERSION_EXCHANGE;
3196	crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3197
3198	return ibmvnic_send_crq(adapter, &crq);
3199}
3200
3201struct vnic_login_client_data {
3202	u8	type;
3203	__be16	len;
3204	char	name[];
3205} __packed;
3206
3207static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3208{
3209	int len;
3210
3211	/* Calculate the amount of buffer space needed for the
3212	 * vnic client data in the login buffer. There are four entries,
3213	 * OS name, LPAR name, device name, and a null last entry.
3214	 */
3215	len = 4 * sizeof(struct vnic_login_client_data);
3216	len += 6; /* "Linux" plus NULL */
3217	len += strlen(utsname()->nodename) + 1;
3218	len += strlen(adapter->netdev->name) + 1;
3219
3220	return len;
3221}
3222
3223static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3224				 struct vnic_login_client_data *vlcd)
3225{
3226	const char *os_name = "Linux";
3227	int len;
3228
3229	/* Type 1 - LPAR OS */
3230	vlcd->type = 1;
3231	len = strlen(os_name) + 1;
3232	vlcd->len = cpu_to_be16(len);
3233	strncpy(vlcd->name, os_name, len);
3234	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3235
3236	/* Type 2 - LPAR name */
3237	vlcd->type = 2;
3238	len = strlen(utsname()->nodename) + 1;
3239	vlcd->len = cpu_to_be16(len);
3240	strncpy(vlcd->name, utsname()->nodename, len);
3241	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3242
3243	/* Type 3 - device name */
3244	vlcd->type = 3;
3245	len = strlen(adapter->netdev->name) + 1;
3246	vlcd->len = cpu_to_be16(len);
3247	strncpy(vlcd->name, adapter->netdev->name, len);
3248}
3249
3250static int send_login(struct ibmvnic_adapter *adapter)
3251{
3252	struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3253	struct ibmvnic_login_buffer *login_buffer;
3254	struct device *dev = &adapter->vdev->dev;
 
3255	dma_addr_t rsp_buffer_token;
3256	dma_addr_t buffer_token;
3257	size_t rsp_buffer_size;
3258	union ibmvnic_crq crq;
 
3259	size_t buffer_size;
3260	__be64 *tx_list_p;
3261	__be64 *rx_list_p;
3262	int client_data_len;
3263	struct vnic_login_client_data *vlcd;
3264	int i;
3265
3266	if (!adapter->tx_scrq || !adapter->rx_scrq) {
3267		netdev_err(adapter->netdev,
3268			   "RX or TX queues are not allocated, device login failed\n");
3269		return -1;
3270	}
3271
 
3272	release_login_rsp_buffer(adapter);
 
3273	client_data_len = vnic_client_data_len(adapter);
3274
3275	buffer_size =
3276	    sizeof(struct ibmvnic_login_buffer) +
3277	    sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3278	    client_data_len;
3279
3280	login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3281	if (!login_buffer)
3282		goto buf_alloc_failed;
3283
3284	buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3285				      DMA_TO_DEVICE);
3286	if (dma_mapping_error(dev, buffer_token)) {
3287		dev_err(dev, "Couldn't map login buffer\n");
3288		goto buf_map_failed;
3289	}
3290
3291	rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3292			  sizeof(u64) * adapter->req_tx_queues +
3293			  sizeof(u64) * adapter->req_rx_queues +
3294			  sizeof(u64) * adapter->req_rx_queues +
3295			  sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3296
3297	login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3298	if (!login_rsp_buffer)
3299		goto buf_rsp_alloc_failed;
3300
3301	rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3302					  rsp_buffer_size, DMA_FROM_DEVICE);
3303	if (dma_mapping_error(dev, rsp_buffer_token)) {
3304		dev_err(dev, "Couldn't map login rsp buffer\n");
3305		goto buf_rsp_map_failed;
3306	}
3307
3308	adapter->login_buf = login_buffer;
3309	adapter->login_buf_token = buffer_token;
3310	adapter->login_buf_sz = buffer_size;
3311	adapter->login_rsp_buf = login_rsp_buffer;
3312	adapter->login_rsp_buf_token = rsp_buffer_token;
3313	adapter->login_rsp_buf_sz = rsp_buffer_size;
3314
3315	login_buffer->len = cpu_to_be32(buffer_size);
3316	login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3317	login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3318	login_buffer->off_txcomp_subcrqs =
3319	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3320	login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3321	login_buffer->off_rxcomp_subcrqs =
3322	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3323			sizeof(u64) * adapter->req_tx_queues);
3324	login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3325	login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3326
3327	tx_list_p = (__be64 *)((char *)login_buffer +
3328				      sizeof(struct ibmvnic_login_buffer));
3329	rx_list_p = (__be64 *)((char *)login_buffer +
3330				      sizeof(struct ibmvnic_login_buffer) +
3331				      sizeof(u64) * adapter->req_tx_queues);
3332
3333	for (i = 0; i < adapter->req_tx_queues; i++) {
3334		if (adapter->tx_scrq[i]) {
3335			tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3336						   crq_num);
3337		}
3338	}
3339
3340	for (i = 0; i < adapter->req_rx_queues; i++) {
3341		if (adapter->rx_scrq[i]) {
3342			rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3343						   crq_num);
3344		}
3345	}
3346
3347	/* Insert vNIC login client data */
3348	vlcd = (struct vnic_login_client_data *)
3349		((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3350	login_buffer->client_data_offset =
3351			cpu_to_be32((char *)vlcd - (char *)login_buffer);
3352	login_buffer->client_data_len = cpu_to_be32(client_data_len);
3353
3354	vnic_add_client_data(adapter, vlcd);
3355
3356	netdev_dbg(adapter->netdev, "Login Buffer:\n");
3357	for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3358		netdev_dbg(adapter->netdev, "%016lx\n",
3359			   ((unsigned long int *)(adapter->login_buf))[i]);
3360	}
3361
3362	memset(&crq, 0, sizeof(crq));
3363	crq.login.first = IBMVNIC_CRQ_CMD;
3364	crq.login.cmd = LOGIN;
3365	crq.login.ioba = cpu_to_be32(buffer_token);
3366	crq.login.len = cpu_to_be32(buffer_size);
3367	ibmvnic_send_crq(adapter, &crq);
 
 
 
 
 
 
 
3368
3369	return 0;
3370
 
 
 
3371buf_rsp_map_failed:
3372	kfree(login_rsp_buffer);
 
3373buf_rsp_alloc_failed:
3374	dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3375buf_map_failed:
3376	kfree(login_buffer);
 
3377buf_alloc_failed:
3378	return -1;
3379}
3380
3381static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3382			     u32 len, u8 map_id)
3383{
3384	union ibmvnic_crq crq;
3385
3386	memset(&crq, 0, sizeof(crq));
3387	crq.request_map.first = IBMVNIC_CRQ_CMD;
3388	crq.request_map.cmd = REQUEST_MAP;
3389	crq.request_map.map_id = map_id;
3390	crq.request_map.ioba = cpu_to_be32(addr);
3391	crq.request_map.len = cpu_to_be32(len);
3392	ibmvnic_send_crq(adapter, &crq);
3393}
3394
3395static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3396{
3397	union ibmvnic_crq crq;
3398
3399	memset(&crq, 0, sizeof(crq));
3400	crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3401	crq.request_unmap.cmd = REQUEST_UNMAP;
3402	crq.request_unmap.map_id = map_id;
3403	ibmvnic_send_crq(adapter, &crq);
3404}
3405
3406static void send_map_query(struct ibmvnic_adapter *adapter)
3407{
3408	union ibmvnic_crq crq;
3409
3410	memset(&crq, 0, sizeof(crq));
3411	crq.query_map.first = IBMVNIC_CRQ_CMD;
3412	crq.query_map.cmd = QUERY_MAP;
3413	ibmvnic_send_crq(adapter, &crq);
3414}
3415
3416/* Send a series of CRQs requesting various capabilities of the VNIC server */
3417static void send_cap_queries(struct ibmvnic_adapter *adapter)
3418{
3419	union ibmvnic_crq crq;
 
 
 
 
 
 
 
 
 
3420
3421	atomic_set(&adapter->running_cap_crqs, 0);
3422	memset(&crq, 0, sizeof(crq));
3423	crq.query_capability.first = IBMVNIC_CRQ_CMD;
3424	crq.query_capability.cmd = QUERY_CAPABILITY;
3425
3426	crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3427	atomic_inc(&adapter->running_cap_crqs);
3428	ibmvnic_send_crq(adapter, &crq);
 
3429
3430	crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3431	atomic_inc(&adapter->running_cap_crqs);
3432	ibmvnic_send_crq(adapter, &crq);
 
3433
3434	crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3435	atomic_inc(&adapter->running_cap_crqs);
3436	ibmvnic_send_crq(adapter, &crq);
 
3437
3438	crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3439	atomic_inc(&adapter->running_cap_crqs);
3440	ibmvnic_send_crq(adapter, &crq);
 
3441
3442	crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3443	atomic_inc(&adapter->running_cap_crqs);
3444	ibmvnic_send_crq(adapter, &crq);
 
3445
3446	crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3447	atomic_inc(&adapter->running_cap_crqs);
3448	ibmvnic_send_crq(adapter, &crq);
 
3449
3450	crq.query_capability.capability =
3451	    cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3452	atomic_inc(&adapter->running_cap_crqs);
3453	ibmvnic_send_crq(adapter, &crq);
 
3454
3455	crq.query_capability.capability =
3456	    cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3457	atomic_inc(&adapter->running_cap_crqs);
3458	ibmvnic_send_crq(adapter, &crq);
 
3459
3460	crq.query_capability.capability =
3461	    cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3462	atomic_inc(&adapter->running_cap_crqs);
3463	ibmvnic_send_crq(adapter, &crq);
 
3464
3465	crq.query_capability.capability =
3466	    cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3467	atomic_inc(&adapter->running_cap_crqs);
3468	ibmvnic_send_crq(adapter, &crq);
 
3469
3470	crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3471	atomic_inc(&adapter->running_cap_crqs);
3472	ibmvnic_send_crq(adapter, &crq);
 
3473
3474	crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3475	atomic_inc(&adapter->running_cap_crqs);
3476	ibmvnic_send_crq(adapter, &crq);
 
3477
3478	crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3479	atomic_inc(&adapter->running_cap_crqs);
3480	ibmvnic_send_crq(adapter, &crq);
 
3481
3482	crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3483	atomic_inc(&adapter->running_cap_crqs);
3484	ibmvnic_send_crq(adapter, &crq);
 
3485
3486	crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3487	atomic_inc(&adapter->running_cap_crqs);
3488	ibmvnic_send_crq(adapter, &crq);
 
3489
3490	crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3491	atomic_inc(&adapter->running_cap_crqs);
3492	ibmvnic_send_crq(adapter, &crq);
 
3493
3494	crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3495	atomic_inc(&adapter->running_cap_crqs);
3496	ibmvnic_send_crq(adapter, &crq);
 
3497
3498	crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3499	atomic_inc(&adapter->running_cap_crqs);
3500	ibmvnic_send_crq(adapter, &crq);
 
3501
3502	crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3503	atomic_inc(&adapter->running_cap_crqs);
3504	ibmvnic_send_crq(adapter, &crq);
 
3505
3506	crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3507	atomic_inc(&adapter->running_cap_crqs);
3508	ibmvnic_send_crq(adapter, &crq);
 
3509
3510	crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3511	atomic_inc(&adapter->running_cap_crqs);
3512	ibmvnic_send_crq(adapter, &crq);
 
3513
3514	crq.query_capability.capability =
3515			cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3516	atomic_inc(&adapter->running_cap_crqs);
3517	ibmvnic_send_crq(adapter, &crq);
 
3518
3519	crq.query_capability.capability =
3520			cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3521	atomic_inc(&adapter->running_cap_crqs);
3522	ibmvnic_send_crq(adapter, &crq);
 
3523
3524	crq.query_capability.capability =
3525			cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3526	atomic_inc(&adapter->running_cap_crqs);
3527	ibmvnic_send_crq(adapter, &crq);
 
3528
3529	crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3530	atomic_inc(&adapter->running_cap_crqs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3531	ibmvnic_send_crq(adapter, &crq);
3532}
3533
3534static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3535				struct ibmvnic_adapter *adapter)
3536{
3537	struct device *dev = &adapter->vdev->dev;
3538
3539	if (crq->get_vpd_size_rsp.rc.code) {
3540		dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3541			crq->get_vpd_size_rsp.rc.code);
3542		complete(&adapter->fw_done);
3543		return;
3544	}
3545
3546	adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3547	complete(&adapter->fw_done);
3548}
3549
3550static void handle_vpd_rsp(union ibmvnic_crq *crq,
3551			   struct ibmvnic_adapter *adapter)
3552{
3553	struct device *dev = &adapter->vdev->dev;
3554	unsigned char *substr = NULL;
3555	u8 fw_level_len = 0;
3556
3557	memset(adapter->fw_version, 0, 32);
3558
3559	dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3560			 DMA_FROM_DEVICE);
3561
3562	if (crq->get_vpd_rsp.rc.code) {
3563		dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3564			crq->get_vpd_rsp.rc.code);
3565		goto complete;
3566	}
3567
3568	/* get the position of the firmware version info
3569	 * located after the ASCII 'RM' substring in the buffer
3570	 */
3571	substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3572	if (!substr) {
3573		dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3574		goto complete;
3575	}
3576
3577	/* get length of firmware level ASCII substring */
3578	if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3579		fw_level_len = *(substr + 2);
3580	} else {
3581		dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3582		goto complete;
3583	}
3584
3585	/* copy firmware version string from vpd into adapter */
3586	if ((substr + 3 + fw_level_len) <
3587	    (adapter->vpd->buff + adapter->vpd->len)) {
3588		strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
 
3589	} else {
3590		dev_info(dev, "FW substr extrapolated VPD buff\n");
3591	}
3592
3593complete:
3594	if (adapter->fw_version[0] == '\0')
3595		strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
3596	complete(&adapter->fw_done);
3597}
3598
3599static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3600{
3601	struct device *dev = &adapter->vdev->dev;
3602	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3603	union ibmvnic_crq crq;
3604	int i;
3605
3606	dma_unmap_single(dev, adapter->ip_offload_tok,
3607			 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3608
3609	netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3610	for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3611		netdev_dbg(adapter->netdev, "%016lx\n",
3612			   ((unsigned long int *)(buf))[i]);
3613
3614	netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3615	netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3616	netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3617		   buf->tcp_ipv4_chksum);
3618	netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3619		   buf->tcp_ipv6_chksum);
3620	netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3621		   buf->udp_ipv4_chksum);
3622	netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3623		   buf->udp_ipv6_chksum);
3624	netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3625		   buf->large_tx_ipv4);
3626	netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3627		   buf->large_tx_ipv6);
3628	netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3629		   buf->large_rx_ipv4);
3630	netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3631		   buf->large_rx_ipv6);
3632	netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3633		   buf->max_ipv4_header_size);
3634	netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3635		   buf->max_ipv6_header_size);
3636	netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3637		   buf->max_tcp_header_size);
3638	netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3639		   buf->max_udp_header_size);
3640	netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3641		   buf->max_large_tx_size);
3642	netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3643		   buf->max_large_rx_size);
3644	netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3645		   buf->ipv6_extension_header);
3646	netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3647		   buf->tcp_pseudosum_req);
3648	netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3649		   buf->num_ipv6_ext_headers);
3650	netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3651		   buf->off_ipv6_ext_headers);
3652
3653	adapter->ip_offload_ctrl_tok =
3654	    dma_map_single(dev, &adapter->ip_offload_ctrl,
3655			   sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3656
3657	if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3658		dev_err(dev, "Couldn't map ip offload control buffer\n");
3659		return;
3660	}
3661
3662	adapter->ip_offload_ctrl.len =
3663	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3664	adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3665	adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3666	adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3667	adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3668	adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3669	adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3670	adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3671	adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3672	adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
3673
3674	/* large_rx disabled for now, additional features needed */
3675	adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3676	adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3677
3678	adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
3679
3680	if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3681		adapter->netdev->features |= NETIF_F_IP_CSUM;
3682
3683	if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3684		adapter->netdev->features |= NETIF_F_IPV6_CSUM;
3685
3686	if ((adapter->netdev->features &
3687	    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3688		adapter->netdev->features |= NETIF_F_RXCSUM;
3689
3690	if (buf->large_tx_ipv4)
3691		adapter->netdev->features |= NETIF_F_TSO;
3692	if (buf->large_tx_ipv6)
3693		adapter->netdev->features |= NETIF_F_TSO6;
3694
3695	adapter->netdev->hw_features |= adapter->netdev->features;
3696
3697	memset(&crq, 0, sizeof(crq));
3698	crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3699	crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3700	crq.control_ip_offload.len =
3701	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3702	crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3703	ibmvnic_send_crq(adapter, &crq);
3704}
3705
3706static void handle_error_info_rsp(union ibmvnic_crq *crq,
3707				  struct ibmvnic_adapter *adapter)
3708{
3709	struct device *dev = &adapter->vdev->dev;
3710	struct ibmvnic_error_buff *error_buff, *tmp;
3711	unsigned long flags;
3712	bool found = false;
3713	int i;
3714
3715	if (!crq->request_error_rsp.rc.code) {
3716		dev_info(dev, "Request Error Rsp returned with rc=%x\n",
3717			 crq->request_error_rsp.rc.code);
3718		return;
3719	}
3720
3721	spin_lock_irqsave(&adapter->error_list_lock, flags);
3722	list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
3723		if (error_buff->error_id == crq->request_error_rsp.error_id) {
3724			found = true;
3725			list_del(&error_buff->list);
3726			break;
3727		}
3728	spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3729
3730	if (!found) {
3731		dev_err(dev, "Couldn't find error id %x\n",
3732			be32_to_cpu(crq->request_error_rsp.error_id));
3733		return;
3734	}
3735
3736	dev_err(dev, "Detailed info for error id %x:",
3737		be32_to_cpu(crq->request_error_rsp.error_id));
3738
3739	for (i = 0; i < error_buff->len; i++) {
3740		pr_cont("%02x", (int)error_buff->buff[i]);
3741		if (i % 8 == 7)
3742			pr_cont(" ");
3743	}
3744	pr_cont("\n");
3745
3746	dma_unmap_single(dev, error_buff->dma, error_buff->len,
3747			 DMA_FROM_DEVICE);
3748	kfree(error_buff->buff);
3749	kfree(error_buff);
3750}
3751
3752static void request_error_information(struct ibmvnic_adapter *adapter,
3753				      union ibmvnic_crq *err_crq)
3754{
3755	struct device *dev = &adapter->vdev->dev;
3756	struct net_device *netdev = adapter->netdev;
3757	struct ibmvnic_error_buff *error_buff;
3758	unsigned long timeout = msecs_to_jiffies(30000);
3759	union ibmvnic_crq crq;
3760	unsigned long flags;
3761	int rc, detail_len;
3762
3763	error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
3764	if (!error_buff)
3765		return;
3766
3767	detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
3768	error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
3769	if (!error_buff->buff) {
3770		kfree(error_buff);
3771		return;
3772	}
3773
3774	error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
3775					 DMA_FROM_DEVICE);
3776	if (dma_mapping_error(dev, error_buff->dma)) {
3777		netdev_err(netdev, "Couldn't map error buffer\n");
3778		kfree(error_buff->buff);
3779		kfree(error_buff);
3780		return;
3781	}
3782
3783	error_buff->len = detail_len;
3784	error_buff->error_id = err_crq->error_indication.error_id;
3785
3786	spin_lock_irqsave(&adapter->error_list_lock, flags);
3787	list_add_tail(&error_buff->list, &adapter->errors);
3788	spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3789
3790	memset(&crq, 0, sizeof(crq));
3791	crq.request_error_info.first = IBMVNIC_CRQ_CMD;
3792	crq.request_error_info.cmd = REQUEST_ERROR_INFO;
3793	crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
3794	crq.request_error_info.len = cpu_to_be32(detail_len);
3795	crq.request_error_info.error_id = err_crq->error_indication.error_id;
3796
3797	rc = ibmvnic_send_crq(adapter, &crq);
3798	if (rc) {
3799		netdev_err(netdev, "failed to request error information\n");
3800		goto err_info_fail;
3801	}
3802
3803	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3804		netdev_err(netdev, "timeout waiting for error information\n");
3805		goto err_info_fail;
3806	}
3807
3808	return;
3809
3810err_info_fail:
3811	spin_lock_irqsave(&adapter->error_list_lock, flags);
3812	list_del(&error_buff->list);
3813	spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3814
3815	kfree(error_buff->buff);
3816	kfree(error_buff);
3817}
3818
3819static void handle_error_indication(union ibmvnic_crq *crq,
3820				    struct ibmvnic_adapter *adapter)
3821{
3822	struct device *dev = &adapter->vdev->dev;
 
3823
3824	dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
3825		crq->error_indication.flags
3826			& IBMVNIC_FATAL_ERROR ? "FATAL " : "",
3827		be32_to_cpu(crq->error_indication.error_id),
3828		be16_to_cpu(crq->error_indication.error_cause));
3829
3830	if (be32_to_cpu(crq->error_indication.error_id))
3831		request_error_information(adapter, crq);
 
 
 
3832
3833	if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
3834		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3835	else
3836		ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
3837}
3838
3839static int handle_change_mac_rsp(union ibmvnic_crq *crq,
3840				 struct ibmvnic_adapter *adapter)
3841{
3842	struct net_device *netdev = adapter->netdev;
3843	struct device *dev = &adapter->vdev->dev;
3844	long rc;
3845
3846	rc = crq->change_mac_addr_rsp.rc.code;
3847	if (rc) {
3848		dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
3849		goto out;
3850	}
3851	memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
3852	       ETH_ALEN);
 
 
 
 
3853out:
3854	complete(&adapter->fw_done);
3855	return rc;
3856}
3857
3858static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3859				   struct ibmvnic_adapter *adapter)
3860{
3861	struct device *dev = &adapter->vdev->dev;
3862	u64 *req_value;
3863	char *name;
3864
3865	atomic_dec(&adapter->running_cap_crqs);
 
 
3866	switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
3867	case REQ_TX_QUEUES:
3868		req_value = &adapter->req_tx_queues;
3869		name = "tx";
3870		break;
3871	case REQ_RX_QUEUES:
3872		req_value = &adapter->req_rx_queues;
3873		name = "rx";
3874		break;
3875	case REQ_RX_ADD_QUEUES:
3876		req_value = &adapter->req_rx_add_queues;
3877		name = "rx_add";
3878		break;
3879	case REQ_TX_ENTRIES_PER_SUBCRQ:
3880		req_value = &adapter->req_tx_entries_per_subcrq;
3881		name = "tx_entries_per_subcrq";
3882		break;
3883	case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
3884		req_value = &adapter->req_rx_add_entries_per_subcrq;
3885		name = "rx_add_entries_per_subcrq";
3886		break;
3887	case REQ_MTU:
3888		req_value = &adapter->req_mtu;
3889		name = "mtu";
3890		break;
3891	case PROMISC_REQUESTED:
3892		req_value = &adapter->promisc;
3893		name = "promisc";
3894		break;
3895	default:
3896		dev_err(dev, "Got invalid cap request rsp %d\n",
3897			crq->request_capability.capability);
3898		return;
3899	}
3900
3901	switch (crq->request_capability_rsp.rc.code) {
3902	case SUCCESS:
3903		break;
3904	case PARTIALSUCCESS:
3905		dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3906			 *req_value,
3907			 (long int)be64_to_cpu(crq->request_capability_rsp.
3908					       number), name);
3909
3910		if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3911		    REQ_MTU) {
3912			pr_err("mtu of %llu is not supported. Reverting.\n",
3913			       *req_value);
3914			*req_value = adapter->fallback.mtu;
3915		} else {
3916			*req_value =
3917				be64_to_cpu(crq->request_capability_rsp.number);
3918		}
3919
3920		ibmvnic_send_req_caps(adapter, 1);
3921		return;
3922	default:
3923		dev_err(dev, "Error %d in request cap rsp\n",
3924			crq->request_capability_rsp.rc.code);
3925		return;
3926	}
3927
3928	/* Done receiving requested capabilities, query IP offload support */
3929	if (atomic_read(&adapter->running_cap_crqs) == 0) {
3930		union ibmvnic_crq newcrq;
3931		int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
3932		struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
3933		    &adapter->ip_offload_buf;
3934
3935		adapter->wait_capability = false;
3936		adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
3937							 buf_sz,
3938							 DMA_FROM_DEVICE);
3939
3940		if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
3941			if (!firmware_has_feature(FW_FEATURE_CMO))
3942				dev_err(dev, "Couldn't map offload buffer\n");
3943			return;
3944		}
3945
3946		memset(&newcrq, 0, sizeof(newcrq));
3947		newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
3948		newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
3949		newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
3950		newcrq.query_ip_offload.ioba =
3951		    cpu_to_be32(adapter->ip_offload_tok);
3952
3953		ibmvnic_send_crq(adapter, &newcrq);
3954	}
3955}
3956
3957static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
3958			    struct ibmvnic_adapter *adapter)
3959{
3960	struct device *dev = &adapter->vdev->dev;
3961	struct net_device *netdev = adapter->netdev;
3962	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
3963	struct ibmvnic_login_buffer *login = adapter->login_buf;
 
 
 
 
 
 
3964	int i;
3965
3966	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
3967			 DMA_TO_DEVICE);
3968	dma_unmap_single(dev, adapter->login_rsp_buf_token,
3969			 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
 
 
 
 
3970
3971	/* If the number of queues requested can't be allocated by the
3972	 * server, the login response will return with code 1. We will need
3973	 * to resend the login buffer with fewer queues requested.
3974	 */
3975	if (login_rsp_crq->generic.rc.code) {
3976		adapter->init_done_rc = login_rsp_crq->generic.rc.code;
3977		complete(&adapter->init_done);
3978		return 0;
3979	}
3980
 
 
 
 
 
 
 
 
3981	netdev->mtu = adapter->req_mtu - ETH_HLEN;
3982
3983	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
3984	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
3985		netdev_dbg(adapter->netdev, "%016lx\n",
3986			   ((unsigned long int *)(adapter->login_rsp_buf))[i]);
3987	}
3988
3989	/* Sanity checks */
3990	if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
3991	    (be32_to_cpu(login->num_rxcomp_subcrqs) *
3992	     adapter->req_rx_add_queues !=
3993	     be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
3994		dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
3995		ibmvnic_remove(adapter->vdev);
3996		return -EIO;
3997	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3998	release_login_buffer(adapter);
3999	complete(&adapter->init_done);
4000
4001	return 0;
4002}
4003
4004static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4005				     struct ibmvnic_adapter *adapter)
4006{
4007	struct device *dev = &adapter->vdev->dev;
4008	long rc;
4009
4010	rc = crq->request_unmap_rsp.rc.code;
4011	if (rc)
4012		dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4013}
4014
4015static void handle_query_map_rsp(union ibmvnic_crq *crq,
4016				 struct ibmvnic_adapter *adapter)
4017{
4018	struct net_device *netdev = adapter->netdev;
4019	struct device *dev = &adapter->vdev->dev;
4020	long rc;
4021
4022	rc = crq->query_map_rsp.rc.code;
4023	if (rc) {
4024		dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4025		return;
4026	}
4027	netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4028		   crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4029		   crq->query_map_rsp.free_pages);
 
4030}
4031
4032static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4033				 struct ibmvnic_adapter *adapter)
4034{
4035	struct net_device *netdev = adapter->netdev;
4036	struct device *dev = &adapter->vdev->dev;
4037	long rc;
4038
4039	atomic_dec(&adapter->running_cap_crqs);
4040	netdev_dbg(netdev, "Outstanding queries: %d\n",
4041		   atomic_read(&adapter->running_cap_crqs));
4042	rc = crq->query_capability.rc.code;
4043	if (rc) {
4044		dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4045		goto out;
4046	}
4047
4048	switch (be16_to_cpu(crq->query_capability.capability)) {
4049	case MIN_TX_QUEUES:
4050		adapter->min_tx_queues =
4051		    be64_to_cpu(crq->query_capability.number);
4052		netdev_dbg(netdev, "min_tx_queues = %lld\n",
4053			   adapter->min_tx_queues);
4054		break;
4055	case MIN_RX_QUEUES:
4056		adapter->min_rx_queues =
4057		    be64_to_cpu(crq->query_capability.number);
4058		netdev_dbg(netdev, "min_rx_queues = %lld\n",
4059			   adapter->min_rx_queues);
4060		break;
4061	case MIN_RX_ADD_QUEUES:
4062		adapter->min_rx_add_queues =
4063		    be64_to_cpu(crq->query_capability.number);
4064		netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4065			   adapter->min_rx_add_queues);
4066		break;
4067	case MAX_TX_QUEUES:
4068		adapter->max_tx_queues =
4069		    be64_to_cpu(crq->query_capability.number);
4070		netdev_dbg(netdev, "max_tx_queues = %lld\n",
4071			   adapter->max_tx_queues);
4072		break;
4073	case MAX_RX_QUEUES:
4074		adapter->max_rx_queues =
4075		    be64_to_cpu(crq->query_capability.number);
4076		netdev_dbg(netdev, "max_rx_queues = %lld\n",
4077			   adapter->max_rx_queues);
4078		break;
4079	case MAX_RX_ADD_QUEUES:
4080		adapter->max_rx_add_queues =
4081		    be64_to_cpu(crq->query_capability.number);
4082		netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4083			   adapter->max_rx_add_queues);
4084		break;
4085	case MIN_TX_ENTRIES_PER_SUBCRQ:
4086		adapter->min_tx_entries_per_subcrq =
4087		    be64_to_cpu(crq->query_capability.number);
4088		netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4089			   adapter->min_tx_entries_per_subcrq);
4090		break;
4091	case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4092		adapter->min_rx_add_entries_per_subcrq =
4093		    be64_to_cpu(crq->query_capability.number);
4094		netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4095			   adapter->min_rx_add_entries_per_subcrq);
4096		break;
4097	case MAX_TX_ENTRIES_PER_SUBCRQ:
4098		adapter->max_tx_entries_per_subcrq =
4099		    be64_to_cpu(crq->query_capability.number);
4100		netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4101			   adapter->max_tx_entries_per_subcrq);
4102		break;
4103	case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4104		adapter->max_rx_add_entries_per_subcrq =
4105		    be64_to_cpu(crq->query_capability.number);
4106		netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4107			   adapter->max_rx_add_entries_per_subcrq);
4108		break;
4109	case TCP_IP_OFFLOAD:
4110		adapter->tcp_ip_offload =
4111		    be64_to_cpu(crq->query_capability.number);
4112		netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4113			   adapter->tcp_ip_offload);
4114		break;
4115	case PROMISC_SUPPORTED:
4116		adapter->promisc_supported =
4117		    be64_to_cpu(crq->query_capability.number);
4118		netdev_dbg(netdev, "promisc_supported = %lld\n",
4119			   adapter->promisc_supported);
4120		break;
4121	case MIN_MTU:
4122		adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4123		netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4124		netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4125		break;
4126	case MAX_MTU:
4127		adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4128		netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4129		netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4130		break;
4131	case MAX_MULTICAST_FILTERS:
4132		adapter->max_multicast_filters =
4133		    be64_to_cpu(crq->query_capability.number);
4134		netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4135			   adapter->max_multicast_filters);
4136		break;
4137	case VLAN_HEADER_INSERTION:
4138		adapter->vlan_header_insertion =
4139		    be64_to_cpu(crq->query_capability.number);
4140		if (adapter->vlan_header_insertion)
4141			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4142		netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4143			   adapter->vlan_header_insertion);
4144		break;
4145	case RX_VLAN_HEADER_INSERTION:
4146		adapter->rx_vlan_header_insertion =
4147		    be64_to_cpu(crq->query_capability.number);
4148		netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4149			   adapter->rx_vlan_header_insertion);
4150		break;
4151	case MAX_TX_SG_ENTRIES:
4152		adapter->max_tx_sg_entries =
4153		    be64_to_cpu(crq->query_capability.number);
4154		netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4155			   adapter->max_tx_sg_entries);
4156		break;
4157	case RX_SG_SUPPORTED:
4158		adapter->rx_sg_supported =
4159		    be64_to_cpu(crq->query_capability.number);
4160		netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4161			   adapter->rx_sg_supported);
4162		break;
4163	case OPT_TX_COMP_SUB_QUEUES:
4164		adapter->opt_tx_comp_sub_queues =
4165		    be64_to_cpu(crq->query_capability.number);
4166		netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4167			   adapter->opt_tx_comp_sub_queues);
4168		break;
4169	case OPT_RX_COMP_QUEUES:
4170		adapter->opt_rx_comp_queues =
4171		    be64_to_cpu(crq->query_capability.number);
4172		netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4173			   adapter->opt_rx_comp_queues);
4174		break;
4175	case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4176		adapter->opt_rx_bufadd_q_per_rx_comp_q =
4177		    be64_to_cpu(crq->query_capability.number);
4178		netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4179			   adapter->opt_rx_bufadd_q_per_rx_comp_q);
4180		break;
4181	case OPT_TX_ENTRIES_PER_SUBCRQ:
4182		adapter->opt_tx_entries_per_subcrq =
4183		    be64_to_cpu(crq->query_capability.number);
4184		netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4185			   adapter->opt_tx_entries_per_subcrq);
4186		break;
4187	case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4188		adapter->opt_rxba_entries_per_subcrq =
4189		    be64_to_cpu(crq->query_capability.number);
4190		netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4191			   adapter->opt_rxba_entries_per_subcrq);
4192		break;
4193	case TX_RX_DESC_REQ:
4194		adapter->tx_rx_desc_req = crq->query_capability.number;
4195		netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4196			   adapter->tx_rx_desc_req);
4197		break;
4198
4199	default:
4200		netdev_err(netdev, "Got invalid cap rsp %d\n",
4201			   crq->query_capability.capability);
4202	}
4203
4204out:
4205	if (atomic_read(&adapter->running_cap_crqs) == 0) {
4206		adapter->wait_capability = false;
4207		ibmvnic_send_req_caps(adapter, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4208	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4209}
4210
4211static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4212			       struct ibmvnic_adapter *adapter)
4213{
4214	struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4215	struct net_device *netdev = adapter->netdev;
4216	struct device *dev = &adapter->vdev->dev;
4217	u64 *u64_crq = (u64 *)crq;
4218	long rc;
4219
4220	netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4221		   (unsigned long int)cpu_to_be64(u64_crq[0]),
4222		   (unsigned long int)cpu_to_be64(u64_crq[1]));
4223	switch (gen_crq->first) {
4224	case IBMVNIC_CRQ_INIT_RSP:
4225		switch (gen_crq->cmd) {
4226		case IBMVNIC_CRQ_INIT:
4227			dev_info(dev, "Partner initialized\n");
4228			adapter->from_passive_init = true;
4229			adapter->failover_pending = false;
4230			complete(&adapter->init_done);
4231			ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4232			break;
4233		case IBMVNIC_CRQ_INIT_COMPLETE:
4234			dev_info(dev, "Partner initialization complete\n");
 
4235			send_version_xchg(adapter);
4236			break;
4237		default:
4238			dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4239		}
4240		return;
4241	case IBMVNIC_CRQ_XPORT_EVENT:
4242		netif_carrier_off(netdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4243		if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4244			dev_info(dev, "Migrated, re-enabling adapter\n");
4245			ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4246		} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4247			dev_info(dev, "Backing device failover detected\n");
4248			adapter->failover_pending = true;
4249		} else {
4250			/* The adapter lost the connection */
4251			dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4252				gen_crq->cmd);
4253			ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4254		}
4255		return;
4256	case IBMVNIC_CRQ_CMD_RSP:
4257		break;
4258	default:
4259		dev_err(dev, "Got an invalid msg type 0x%02x\n",
4260			gen_crq->first);
4261		return;
4262	}
4263
4264	switch (gen_crq->cmd) {
4265	case VERSION_EXCHANGE_RSP:
4266		rc = crq->version_exchange_rsp.rc.code;
4267		if (rc) {
4268			dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4269			break;
4270		}
4271		dev_info(dev, "Partner protocol version is %d\n",
4272			 crq->version_exchange_rsp.version);
4273		if (be16_to_cpu(crq->version_exchange_rsp.version) <
4274		    ibmvnic_version)
4275			ibmvnic_version =
4276			    be16_to_cpu(crq->version_exchange_rsp.version);
4277		send_cap_queries(adapter);
 
 
4278		break;
4279	case QUERY_CAPABILITY_RSP:
4280		handle_query_cap_rsp(crq, adapter);
4281		break;
4282	case QUERY_MAP_RSP:
4283		handle_query_map_rsp(crq, adapter);
4284		break;
4285	case REQUEST_MAP_RSP:
4286		adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4287		complete(&adapter->fw_done);
4288		break;
4289	case REQUEST_UNMAP_RSP:
4290		handle_request_unmap_rsp(crq, adapter);
4291		break;
4292	case REQUEST_CAPABILITY_RSP:
4293		handle_request_cap_rsp(crq, adapter);
4294		break;
4295	case LOGIN_RSP:
4296		netdev_dbg(netdev, "Got Login Response\n");
4297		handle_login_rsp(crq, adapter);
4298		break;
4299	case LOGICAL_LINK_STATE_RSP:
4300		netdev_dbg(netdev,
4301			   "Got Logical Link State Response, state: %d rc: %d\n",
4302			   crq->logical_link_state_rsp.link_state,
4303			   crq->logical_link_state_rsp.rc.code);
4304		adapter->logical_link_state =
4305		    crq->logical_link_state_rsp.link_state;
4306		adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4307		complete(&adapter->init_done);
4308		break;
4309	case LINK_STATE_INDICATION:
4310		netdev_dbg(netdev, "Got Logical Link State Indication\n");
4311		adapter->phys_link_state =
4312		    crq->link_state_indication.phys_link_state;
4313		adapter->logical_link_state =
4314		    crq->link_state_indication.logical_link_state;
 
 
 
 
4315		break;
4316	case CHANGE_MAC_ADDR_RSP:
4317		netdev_dbg(netdev, "Got MAC address change Response\n");
4318		adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4319		break;
4320	case ERROR_INDICATION:
4321		netdev_dbg(netdev, "Got Error Indication\n");
4322		handle_error_indication(crq, adapter);
4323		break;
4324	case REQUEST_ERROR_RSP:
4325		netdev_dbg(netdev, "Got Error Detail Response\n");
4326		handle_error_info_rsp(crq, adapter);
4327		break;
4328	case REQUEST_STATISTICS_RSP:
4329		netdev_dbg(netdev, "Got Statistics Response\n");
4330		complete(&adapter->stats_done);
4331		break;
4332	case QUERY_IP_OFFLOAD_RSP:
4333		netdev_dbg(netdev, "Got Query IP offload Response\n");
4334		handle_query_ip_offload_rsp(adapter);
4335		break;
4336	case MULTICAST_CTRL_RSP:
4337		netdev_dbg(netdev, "Got multicast control Response\n");
4338		break;
4339	case CONTROL_IP_OFFLOAD_RSP:
4340		netdev_dbg(netdev, "Got Control IP offload Response\n");
4341		dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4342				 sizeof(adapter->ip_offload_ctrl),
4343				 DMA_TO_DEVICE);
4344		complete(&adapter->init_done);
4345		break;
4346	case COLLECT_FW_TRACE_RSP:
4347		netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4348		complete(&adapter->fw_done);
4349		break;
4350	case GET_VPD_SIZE_RSP:
4351		handle_vpd_size_rsp(crq, adapter);
4352		break;
4353	case GET_VPD_RSP:
4354		handle_vpd_rsp(crq, adapter);
4355		break;
 
 
 
 
4356	default:
4357		netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4358			   gen_crq->cmd);
4359	}
4360}
4361
4362static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4363{
4364	struct ibmvnic_adapter *adapter = instance;
4365
4366	tasklet_schedule(&adapter->tasklet);
4367	return IRQ_HANDLED;
4368}
4369
4370static void ibmvnic_tasklet(void *data)
4371{
4372	struct ibmvnic_adapter *adapter = data;
4373	struct ibmvnic_crq_queue *queue = &adapter->crq;
4374	union ibmvnic_crq *crq;
4375	unsigned long flags;
4376	bool done = false;
4377
4378	spin_lock_irqsave(&queue->lock, flags);
4379	while (!done) {
4380		/* Pull all the valid messages off the CRQ */
4381		while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4382			ibmvnic_handle_crq(crq, adapter);
4383			crq->generic.first = 0;
4384		}
4385
4386		/* remain in tasklet until all
4387		 * capabilities responses are received
 
 
 
 
4388		 */
4389		if (!adapter->wait_capability)
4390			done = true;
 
4391	}
4392	/* if capabilities CRQ's were sent in this tasklet, the following
4393	 * tasklet must wait until all responses are received
4394	 */
4395	if (atomic_read(&adapter->running_cap_crqs) != 0)
4396		adapter->wait_capability = true;
4397	spin_unlock_irqrestore(&queue->lock, flags);
4398}
4399
4400static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4401{
4402	struct vio_dev *vdev = adapter->vdev;
4403	int rc;
4404
4405	do {
4406		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4407	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4408
4409	if (rc)
4410		dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4411
4412	return rc;
4413}
4414
4415static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4416{
4417	struct ibmvnic_crq_queue *crq = &adapter->crq;
4418	struct device *dev = &adapter->vdev->dev;
4419	struct vio_dev *vdev = adapter->vdev;
4420	int rc;
4421
4422	/* Close the CRQ */
4423	do {
4424		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4425	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4426
4427	/* Clean out the queue */
 
 
 
4428	memset(crq->msgs, 0, PAGE_SIZE);
4429	crq->cur = 0;
 
4430
4431	/* And re-open it again */
4432	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4433				crq->msg_token, PAGE_SIZE);
4434
4435	if (rc == H_CLOSED)
4436		/* Adapter is good, but other end is not ready */
4437		dev_warn(dev, "Partner adapter not ready\n");
4438	else if (rc != 0)
4439		dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4440
4441	return rc;
4442}
4443
4444static void release_crq_queue(struct ibmvnic_adapter *adapter)
4445{
4446	struct ibmvnic_crq_queue *crq = &adapter->crq;
4447	struct vio_dev *vdev = adapter->vdev;
4448	long rc;
4449
4450	if (!crq->msgs)
4451		return;
4452
4453	netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4454	free_irq(vdev->irq, adapter);
4455	tasklet_kill(&adapter->tasklet);
4456	do {
4457		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4458	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4459
4460	dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4461			 DMA_BIDIRECTIONAL);
4462	free_page((unsigned long)crq->msgs);
4463	crq->msgs = NULL;
 
4464}
4465
4466static int init_crq_queue(struct ibmvnic_adapter *adapter)
4467{
4468	struct ibmvnic_crq_queue *crq = &adapter->crq;
4469	struct device *dev = &adapter->vdev->dev;
4470	struct vio_dev *vdev = adapter->vdev;
4471	int rc, retrc = -ENOMEM;
4472
4473	if (crq->msgs)
4474		return 0;
4475
4476	crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4477	/* Should we allocate more than one page? */
4478
4479	if (!crq->msgs)
4480		return -ENOMEM;
4481
4482	crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4483	crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4484					DMA_BIDIRECTIONAL);
4485	if (dma_mapping_error(dev, crq->msg_token))
4486		goto map_failed;
4487
4488	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4489				crq->msg_token, PAGE_SIZE);
4490
4491	if (rc == H_RESOURCE)
4492		/* maybe kexecing and resource is busy. try a reset */
4493		rc = ibmvnic_reset_crq(adapter);
4494	retrc = rc;
4495
4496	if (rc == H_CLOSED) {
4497		dev_warn(dev, "Partner adapter not ready\n");
4498	} else if (rc) {
4499		dev_warn(dev, "Error %d opening adapter\n", rc);
4500		goto reg_crq_failed;
4501	}
4502
4503	retrc = 0;
4504
4505	tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4506		     (unsigned long)adapter);
4507
4508	netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4509	rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
4510			 adapter);
 
4511	if (rc) {
4512		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4513			vdev->irq, rc);
4514		goto req_irq_failed;
4515	}
4516
4517	rc = vio_enable_interrupts(vdev);
4518	if (rc) {
4519		dev_err(dev, "Error %d enabling interrupts\n", rc);
4520		goto req_irq_failed;
4521	}
4522
4523	crq->cur = 0;
4524	spin_lock_init(&crq->lock);
4525
 
 
 
4526	return retrc;
4527
4528req_irq_failed:
4529	tasklet_kill(&adapter->tasklet);
4530	do {
4531		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4532	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4533reg_crq_failed:
4534	dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4535map_failed:
4536	free_page((unsigned long)crq->msgs);
4537	crq->msgs = NULL;
4538	return retrc;
4539}
4540
4541static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4542{
4543	struct device *dev = &adapter->vdev->dev;
4544	unsigned long timeout = msecs_to_jiffies(30000);
4545	u64 old_num_rx_queues, old_num_tx_queues;
 
4546	int rc;
4547
4548	adapter->from_passive_init = false;
4549
4550	old_num_rx_queues = adapter->req_rx_queues;
4551	old_num_tx_queues = adapter->req_tx_queues;
 
 
 
4552
4553	init_completion(&adapter->init_done);
4554	adapter->init_done_rc = 0;
4555	ibmvnic_send_crq_init(adapter);
4556	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4557		dev_err(dev, "Initialization sequence timed out\n");
4558		return -1;
4559	}
4560
4561	if (adapter->init_done_rc) {
4562		release_crq_queue(adapter);
 
4563		return adapter->init_done_rc;
4564	}
4565
4566	if (adapter->from_passive_init) {
4567		adapter->state = VNIC_OPEN;
4568		adapter->from_passive_init = false;
4569		return -1;
 
4570	}
4571
4572	if (adapter->resetting && !adapter->wait_for_reset &&
 
4573	    adapter->reset_reason != VNIC_RESET_MOBILITY) {
4574		if (adapter->req_rx_queues != old_num_rx_queues ||
4575		    adapter->req_tx_queues != old_num_tx_queues) {
4576			release_sub_crqs(adapter, 0);
4577			rc = init_sub_crqs(adapter);
4578		} else {
 
 
 
 
 
 
 
 
 
4579			rc = reset_sub_crq_queues(adapter);
4580		}
4581	} else {
4582		rc = init_sub_crqs(adapter);
4583	}
4584
4585	if (rc) {
4586		dev_err(dev, "Initialization of sub crqs failed\n");
4587		release_crq_queue(adapter);
4588		return rc;
4589	}
4590
4591	rc = init_sub_crq_irqs(adapter);
4592	if (rc) {
4593		dev_err(dev, "Failed to initialize sub crq irqs\n");
4594		release_crq_queue(adapter);
4595	}
4596
4597	return rc;
4598}
4599
4600static struct device_attribute dev_attr_failover;
4601
4602static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4603{
4604	struct ibmvnic_adapter *adapter;
4605	struct net_device *netdev;
4606	unsigned char *mac_addr_p;
 
 
4607	int rc;
4608
4609	dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4610		dev->unit_address);
4611
4612	mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4613							VETH_MAC_ADDR, NULL);
4614	if (!mac_addr_p) {
4615		dev_err(&dev->dev,
4616			"(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4617			__FILE__, __LINE__);
4618		return 0;
4619	}
4620
4621	netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
4622				   IBMVNIC_MAX_QUEUES);
4623	if (!netdev)
4624		return -ENOMEM;
4625
4626	adapter = netdev_priv(netdev);
4627	adapter->state = VNIC_PROBING;
4628	dev_set_drvdata(&dev->dev, netdev);
4629	adapter->vdev = dev;
4630	adapter->netdev = netdev;
 
 
 
 
4631
4632	ether_addr_copy(adapter->mac_addr, mac_addr_p);
4633	ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4634	netdev->irq = dev->irq;
4635	netdev->netdev_ops = &ibmvnic_netdev_ops;
4636	netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4637	SET_NETDEV_DEV(netdev, &dev->dev);
4638
4639	spin_lock_init(&adapter->stats_lock);
4640
4641	INIT_LIST_HEAD(&adapter->errors);
4642	spin_lock_init(&adapter->error_list_lock);
4643
4644	INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
 
 
4645	INIT_LIST_HEAD(&adapter->rwi_list);
4646	mutex_init(&adapter->reset_lock);
4647	mutex_init(&adapter->rwi_lock);
4648	adapter->resetting = false;
4649
4650	adapter->mac_change_pending = false;
 
 
 
 
 
 
4651
 
4652	do {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4653		rc = init_crq_queue(adapter);
4654		if (rc) {
4655			dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
4656				rc);
4657			goto ibmvnic_init_fail;
4658		}
4659
4660		rc = ibmvnic_init(adapter);
4661		if (rc && rc != EAGAIN)
4662			goto ibmvnic_init_fail;
4663	} while (rc == EAGAIN);
 
 
 
 
 
 
4664
4665	rc = init_stats_buffers(adapter);
4666	if (rc)
4667		goto ibmvnic_init_fail;
4668
4669	rc = init_stats_token(adapter);
4670	if (rc)
4671		goto ibmvnic_stats_fail;
4672
4673	netdev->mtu = adapter->req_mtu - ETH_HLEN;
4674	netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4675	netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4676
4677	rc = device_create_file(&dev->dev, &dev_attr_failover);
4678	if (rc)
4679		goto ibmvnic_dev_file_err;
4680
4681	netif_carrier_off(netdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
4682	rc = register_netdev(netdev);
4683	if (rc) {
4684		dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
4685		goto ibmvnic_register_fail;
4686	}
4687	dev_info(&dev->dev, "ibmvnic registered\n");
4688
4689	adapter->state = VNIC_PROBED;
 
 
 
 
4690
4691	adapter->wait_for_reset = false;
4692
4693	return 0;
4694
 
 
 
4695ibmvnic_register_fail:
4696	device_remove_file(&dev->dev, &dev_attr_failover);
4697
4698ibmvnic_dev_file_err:
4699	release_stats_token(adapter);
4700
4701ibmvnic_stats_fail:
4702	release_stats_buffers(adapter);
4703
4704ibmvnic_init_fail:
4705	release_sub_crqs(adapter, 1);
4706	release_crq_queue(adapter);
 
 
 
 
 
 
 
 
 
 
 
 
4707	free_netdev(netdev);
4708
4709	return rc;
4710}
4711
4712static int ibmvnic_remove(struct vio_dev *dev)
4713{
4714	struct net_device *netdev = dev_get_drvdata(&dev->dev);
4715	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
 
 
4716
 
 
 
 
 
 
 
 
4717	adapter->state = VNIC_REMOVING;
4718	unregister_netdev(netdev);
4719	mutex_lock(&adapter->reset_lock);
 
 
 
 
 
 
 
 
 
4720
4721	release_resources(adapter);
 
 
4722	release_sub_crqs(adapter, 1);
4723	release_crq_queue(adapter);
4724
4725	release_stats_token(adapter);
4726	release_stats_buffers(adapter);
4727
4728	adapter->state = VNIC_REMOVED;
4729
4730	mutex_unlock(&adapter->reset_lock);
 
4731	device_remove_file(&dev->dev, &dev_attr_failover);
4732	free_netdev(netdev);
4733	dev_set_drvdata(&dev->dev, NULL);
4734
4735	return 0;
4736}
4737
4738static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
4739			      const char *buf, size_t count)
4740{
4741	struct net_device *netdev = dev_get_drvdata(dev);
4742	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4743	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
4744	__be64 session_token;
4745	long rc;
4746
4747	if (!sysfs_streq(buf, "1"))
4748		return -EINVAL;
4749
4750	rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
4751			 H_GET_SESSION_TOKEN, 0, 0, 0);
4752	if (rc) {
4753		netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
4754			   rc);
4755		return -EINVAL;
4756	}
4757
4758	session_token = (__be64)retbuf[0];
4759	netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
4760		   be64_to_cpu(session_token));
4761	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4762				H_SESSION_ERR_DETECTED, session_token, 0, 0);
4763	if (rc) {
4764		netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
 
4765			   rc);
4766		return -EINVAL;
4767	}
4768
4769	return count;
4770}
4771
 
 
 
 
 
 
4772static DEVICE_ATTR_WO(failover);
4773
4774static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
4775{
4776	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
4777	struct ibmvnic_adapter *adapter;
4778	struct iommu_table *tbl;
4779	unsigned long ret = 0;
4780	int i;
4781
4782	tbl = get_iommu_table_base(&vdev->dev);
4783
4784	/* netdev inits at probe time along with the structures we need below*/
4785	if (!netdev)
4786		return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
4787
4788	adapter = netdev_priv(netdev);
4789
4790	ret += PAGE_SIZE; /* the crq message queue */
4791	ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
4792
4793	for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
4794		ret += 4 * PAGE_SIZE; /* the scrq message queue */
4795
4796	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4797	     i++)
4798		ret += adapter->rx_pool[i].size *
4799		    IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
4800
4801	return ret;
4802}
4803
4804static int ibmvnic_resume(struct device *dev)
4805{
4806	struct net_device *netdev = dev_get_drvdata(dev);
4807	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4808
4809	if (adapter->state != VNIC_OPEN)
4810		return 0;
4811
4812	tasklet_schedule(&adapter->tasklet);
4813
4814	return 0;
4815}
4816
4817static const struct vio_device_id ibmvnic_device_table[] = {
4818	{"network", "IBM,vnic"},
4819	{"", "" }
4820};
4821MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
4822
4823static const struct dev_pm_ops ibmvnic_pm_ops = {
4824	.resume = ibmvnic_resume
4825};
4826
4827static struct vio_driver ibmvnic_driver = {
4828	.id_table       = ibmvnic_device_table,
4829	.probe          = ibmvnic_probe,
4830	.remove         = ibmvnic_remove,
4831	.get_desired_dma = ibmvnic_get_desired_dma,
4832	.name		= ibmvnic_driver_name,
4833	.pm		= &ibmvnic_pm_ops,
4834};
4835
4836/* module functions */
4837static int __init ibmvnic_module_init(void)
4838{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4839	pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
4840		IBMVNIC_DRIVER_VERSION);
4841
4842	return vio_register_driver(&ibmvnic_driver);
 
 
 
 
 
 
4843}
4844
4845static void __exit ibmvnic_module_exit(void)
4846{
4847	vio_unregister_driver(&ibmvnic_driver);
 
 
4848}
4849
4850module_init(ibmvnic_module_init);
4851module_exit(ibmvnic_module_exit);
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/**************************************************************************/
   3/*                                                                        */
   4/*  IBM System i and System p Virtual NIC Device Driver                   */
   5/*  Copyright (C) 2014 IBM Corp.                                          */
   6/*  Santiago Leon (santi_leon@yahoo.com)                                  */
   7/*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
   8/*  John Allen (jallen@linux.vnet.ibm.com)                                */
   9/*                                                                        */
 
 
 
 
 
 
 
 
 
 
 
 
  10/*                                                                        */
  11/* This module contains the implementation of a virtual ethernet device   */
  12/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
  13/* option of the RS/6000 Platform Architecture to interface with virtual  */
  14/* ethernet NICs that are presented to the partition by the hypervisor.   */
  15/*									   */
  16/* Messages are passed between the VNIC driver and the VNIC server using  */
  17/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
  18/* issue and receive commands that initiate communication with the server */
  19/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
  20/* are used by the driver to notify the server that a packet is           */
  21/* ready for transmission or that a buffer has been added to receive a    */
  22/* packet. Subsequently, sCRQs are used by the server to notify the       */
  23/* driver that a packet transmission has been completed or that a packet  */
  24/* has been received and placed in a waiting buffer.                      */
  25/*                                                                        */
  26/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
  27/* which skbs are DMA mapped and immediately unmapped when the transmit   */
  28/* or receive has been completed, the VNIC driver is required to use      */
  29/* "long term mapping". This entails that large, continuous DMA mapped    */
  30/* buffers are allocated on driver initialization and these buffers are   */
  31/* then continuously reused to pass skbs to and from the VNIC server.     */
  32/*                                                                        */
  33/**************************************************************************/
  34
  35#include <linux/module.h>
  36#include <linux/moduleparam.h>
  37#include <linux/types.h>
  38#include <linux/errno.h>
  39#include <linux/completion.h>
  40#include <linux/ioport.h>
  41#include <linux/dma-mapping.h>
  42#include <linux/kernel.h>
  43#include <linux/netdevice.h>
  44#include <linux/etherdevice.h>
  45#include <linux/skbuff.h>
  46#include <linux/init.h>
  47#include <linux/delay.h>
  48#include <linux/mm.h>
  49#include <linux/ethtool.h>
  50#include <linux/proc_fs.h>
  51#include <linux/if_arp.h>
  52#include <linux/in.h>
  53#include <linux/ip.h>
  54#include <linux/ipv6.h>
  55#include <linux/irq.h>
  56#include <linux/irqdomain.h>
  57#include <linux/kthread.h>
  58#include <linux/seq_file.h>
  59#include <linux/interrupt.h>
  60#include <net/net_namespace.h>
  61#include <asm/hvcall.h>
  62#include <linux/atomic.h>
  63#include <asm/vio.h>
  64#include <asm/xive.h>
  65#include <asm/iommu.h>
  66#include <linux/uaccess.h>
  67#include <asm/firmware.h>
  68#include <linux/workqueue.h>
  69#include <linux/if_vlan.h>
  70#include <linux/utsname.h>
  71#include <linux/cpu.h>
  72
  73#include "ibmvnic.h"
  74
  75static const char ibmvnic_driver_name[] = "ibmvnic";
  76static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
  77
  78MODULE_AUTHOR("Santiago Leon");
  79MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
  80MODULE_LICENSE("GPL");
  81MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
  82
  83static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
 
  84static void release_sub_crqs(struct ibmvnic_adapter *, bool);
  85static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
  86static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
  87static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
  88static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
 
 
  89static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
  90static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
  91static int enable_scrq_irq(struct ibmvnic_adapter *,
  92			   struct ibmvnic_sub_crq_queue *);
  93static int disable_scrq_irq(struct ibmvnic_adapter *,
  94			    struct ibmvnic_sub_crq_queue *);
  95static int pending_scrq(struct ibmvnic_adapter *,
  96			struct ibmvnic_sub_crq_queue *);
  97static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
  98					struct ibmvnic_sub_crq_queue *);
  99static int ibmvnic_poll(struct napi_struct *napi, int data);
 100static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter);
 101static inline void reinit_init_done(struct ibmvnic_adapter *adapter);
 102static void send_query_map(struct ibmvnic_adapter *adapter);
 103static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
 104static int send_request_unmap(struct ibmvnic_adapter *, u8);
 105static int send_login(struct ibmvnic_adapter *adapter);
 106static void send_query_cap(struct ibmvnic_adapter *adapter);
 107static int init_sub_crqs(struct ibmvnic_adapter *);
 108static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
 109static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
 110static void release_crq_queue(struct ibmvnic_adapter *);
 111static int __ibmvnic_set_mac(struct net_device *, u8 *);
 112static int init_crq_queue(struct ibmvnic_adapter *adapter);
 113static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
 114static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
 115					 struct ibmvnic_sub_crq_queue *tx_scrq);
 116static void free_long_term_buff(struct ibmvnic_adapter *adapter,
 117				struct ibmvnic_long_term_buff *ltb);
 118static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
 119static void flush_reset_queue(struct ibmvnic_adapter *adapter);
 120
 121struct ibmvnic_stat {
 122	char name[ETH_GSTRING_LEN];
 123	int offset;
 124};
 125
 126#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
 127			     offsetof(struct ibmvnic_statistics, stat))
 128#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
 129
 130static const struct ibmvnic_stat ibmvnic_stats[] = {
 131	{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
 132	{"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
 133	{"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
 134	{"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
 135	{"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
 136	{"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
 137	{"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
 138	{"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
 139	{"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
 140	{"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
 141	{"align_errors", IBMVNIC_STAT_OFF(align_errors)},
 142	{"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
 143	{"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
 144	{"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
 145	{"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
 146	{"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
 147	{"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
 148	{"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
 149	{"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
 150	{"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
 151	{"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
 152	{"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
 153};
 154
 155static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
 156{
 157	union ibmvnic_crq crq;
 158
 159	memset(&crq, 0, sizeof(crq));
 160	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
 161	crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
 162
 163	return ibmvnic_send_crq(adapter, &crq);
 164}
 165
 166static int send_version_xchg(struct ibmvnic_adapter *adapter)
 167{
 168	union ibmvnic_crq crq;
 169
 170	memset(&crq, 0, sizeof(crq));
 171	crq.version_exchange.first = IBMVNIC_CRQ_CMD;
 172	crq.version_exchange.cmd = VERSION_EXCHANGE;
 173	crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
 174
 175	return ibmvnic_send_crq(adapter, &crq);
 176}
 177
 178static void ibmvnic_clean_queue_affinity(struct ibmvnic_adapter *adapter,
 179					 struct ibmvnic_sub_crq_queue *queue)
 180{
 181	if (!(queue && queue->irq))
 182		return;
 183
 184	cpumask_clear(queue->affinity_mask);
 185
 186	if (irq_set_affinity_and_hint(queue->irq, NULL))
 187		netdev_warn(adapter->netdev,
 188			    "%s: Clear affinity failed, queue addr = %p, IRQ = %d\n",
 189			    __func__, queue, queue->irq);
 190}
 191
 192static void ibmvnic_clean_affinity(struct ibmvnic_adapter *adapter)
 193{
 194	struct ibmvnic_sub_crq_queue **rxqs;
 195	struct ibmvnic_sub_crq_queue **txqs;
 196	int num_rxqs, num_txqs;
 197	int i;
 198
 199	rxqs = adapter->rx_scrq;
 200	txqs = adapter->tx_scrq;
 201	num_txqs = adapter->num_active_tx_scrqs;
 202	num_rxqs = adapter->num_active_rx_scrqs;
 203
 204	netdev_dbg(adapter->netdev, "%s: Cleaning irq affinity hints", __func__);
 205	if (txqs) {
 206		for (i = 0; i < num_txqs; i++)
 207			ibmvnic_clean_queue_affinity(adapter, txqs[i]);
 208	}
 209	if (rxqs) {
 210		for (i = 0; i < num_rxqs; i++)
 211			ibmvnic_clean_queue_affinity(adapter, rxqs[i]);
 212	}
 213}
 214
 215static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
 216				      unsigned int *cpu, int *stragglers,
 217				      int stride)
 218{
 219	cpumask_var_t mask;
 220	int i;
 221	int rc = 0;
 222
 223	if (!(queue && queue->irq))
 224		return rc;
 225
 226	/* cpumask_var_t is either a pointer or array, allocation works here */
 227	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
 228		return -ENOMEM;
 229
 230	/* while we have extra cpu give one extra to this irq */
 231	if (*stragglers) {
 232		stride++;
 233		(*stragglers)--;
 234	}
 235	/* atomic write is safer than writing bit by bit directly */
 236	for (i = 0; i < stride; i++) {
 237		cpumask_set_cpu(*cpu, mask);
 238		*cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
 239					 nr_cpu_ids, false);
 240	}
 241	/* set queue affinity mask */
 242	cpumask_copy(queue->affinity_mask, mask);
 243	rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
 244	free_cpumask_var(mask);
 245
 246	return rc;
 247}
 248
 249/* assumes cpu read lock is held */
 250static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
 251{
 252	struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq;
 253	struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq;
 254	struct ibmvnic_sub_crq_queue *queue;
 255	int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
 256	int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
 257	int total_queues, stride, stragglers, i;
 258	unsigned int num_cpu, cpu;
 259	bool is_rx_queue;
 260	int rc = 0;
 261
 262	netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__);
 263	if (!(adapter->rx_scrq && adapter->tx_scrq)) {
 264		netdev_warn(adapter->netdev,
 265			    "%s: Set affinity failed, queues not allocated\n",
 266			    __func__);
 267		return;
 268	}
 269
 270	total_queues = num_rxqs + num_txqs;
 271	num_cpu = num_online_cpus();
 272	/* number of cpu's assigned per irq */
 273	stride = max_t(int, num_cpu / total_queues, 1);
 274	/* number of leftover cpu's */
 275	stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
 276	/* next available cpu to assign irq to */
 277	cpu = cpumask_next(-1, cpu_online_mask);
 278
 279	for (i = 0; i < total_queues; i++) {
 280		is_rx_queue = false;
 281		/* balance core load by alternating rx and tx assignments
 282		 * ex: TX0 -> RX0 -> TX1 -> RX1 etc.
 283		 */
 284		if ((i % 2 == 1 && i_rxqs < num_rxqs) || i_txqs == num_txqs) {
 285			queue = rxqs[i_rxqs++];
 286			is_rx_queue = true;
 287		} else {
 288			queue = txqs[i_txqs++];
 289		}
 290
 291		rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
 292						stride);
 293		if (rc)
 294			goto out;
 295
 296		if (!queue || is_rx_queue)
 297			continue;
 298
 299		rc = __netif_set_xps_queue(adapter->netdev,
 300					   cpumask_bits(queue->affinity_mask),
 301					   i_txqs - 1, XPS_CPUS);
 302		if (rc)
 303			netdev_warn(adapter->netdev, "%s: Set XPS on queue %d failed, rc = %d.\n",
 304				    __func__, i_txqs - 1, rc);
 305	}
 306
 307out:
 308	if (rc) {
 309		netdev_warn(adapter->netdev,
 310			    "%s: Set affinity failed, queue addr = %p, IRQ = %d, rc = %d.\n",
 311			    __func__, queue, queue->irq, rc);
 312		ibmvnic_clean_affinity(adapter);
 313	}
 314}
 315
 316static int ibmvnic_cpu_online(unsigned int cpu, struct hlist_node *node)
 317{
 318	struct ibmvnic_adapter *adapter;
 319
 320	adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
 321	ibmvnic_set_affinity(adapter);
 322	return 0;
 323}
 324
 325static int ibmvnic_cpu_dead(unsigned int cpu, struct hlist_node *node)
 326{
 327	struct ibmvnic_adapter *adapter;
 328
 329	adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node_dead);
 330	ibmvnic_set_affinity(adapter);
 331	return 0;
 332}
 333
 334static int ibmvnic_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
 335{
 336	struct ibmvnic_adapter *adapter;
 337
 338	adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
 339	ibmvnic_clean_affinity(adapter);
 340	return 0;
 341}
 342
 343static enum cpuhp_state ibmvnic_online;
 344
 345static int ibmvnic_cpu_notif_add(struct ibmvnic_adapter *adapter)
 346{
 347	int ret;
 348
 349	ret = cpuhp_state_add_instance_nocalls(ibmvnic_online, &adapter->node);
 350	if (ret)
 351		return ret;
 352	ret = cpuhp_state_add_instance_nocalls(CPUHP_IBMVNIC_DEAD,
 353					       &adapter->node_dead);
 354	if (!ret)
 355		return ret;
 356	cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
 357	return ret;
 358}
 359
 360static void ibmvnic_cpu_notif_remove(struct ibmvnic_adapter *adapter)
 361{
 362	cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
 363	cpuhp_state_remove_instance_nocalls(CPUHP_IBMVNIC_DEAD,
 364					    &adapter->node_dead);
 365}
 366
 367static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
 368			  unsigned long length, unsigned long *number,
 369			  unsigned long *irq)
 370{
 371	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
 372	long rc;
 373
 374	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
 375	*number = retbuf[0];
 376	*irq = retbuf[1];
 377
 378	return rc;
 379}
 380
 381/**
 382 * ibmvnic_wait_for_completion - Check device state and wait for completion
 383 * @adapter: private device data
 384 * @comp_done: completion structure to wait for
 385 * @timeout: time to wait in milliseconds
 386 *
 387 * Wait for a completion signal or until the timeout limit is reached
 388 * while checking that the device is still active.
 389 */
 390static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
 391				       struct completion *comp_done,
 392				       unsigned long timeout)
 393{
 394	struct net_device *netdev;
 395	unsigned long div_timeout;
 396	u8 retry;
 397
 398	netdev = adapter->netdev;
 399	retry = 5;
 400	div_timeout = msecs_to_jiffies(timeout / retry);
 401	while (true) {
 402		if (!adapter->crq.active) {
 403			netdev_err(netdev, "Device down!\n");
 404			return -ENODEV;
 405		}
 406		if (!retry--)
 407			break;
 408		if (wait_for_completion_timeout(comp_done, div_timeout))
 409			return 0;
 410	}
 411	netdev_err(netdev, "Operation timed out.\n");
 412	return -ETIMEDOUT;
 413}
 414
 415/**
 416 * reuse_ltb() - Check if a long term buffer can be reused
 417 * @ltb:  The long term buffer to be checked
 418 * @size: The size of the long term buffer.
 419 *
 420 * An LTB can be reused unless its size has changed.
 421 *
 422 * Return: Return true if the LTB can be reused, false otherwise.
 423 */
 424static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
 425{
 426	return (ltb->buff && ltb->size == size);
 427}
 428
 429/**
 430 * alloc_long_term_buff() - Allocate a long term buffer (LTB)
 431 *
 432 * @adapter: ibmvnic adapter associated to the LTB
 433 * @ltb:     container object for the LTB
 434 * @size:    size of the LTB
 435 *
 436 * Allocate an LTB of the specified size and notify VIOS.
 437 *
 438 * If the given @ltb already has the correct size, reuse it. Otherwise if
 439 * its non-NULL, free it. Then allocate a new one of the correct size.
 440 * Notify the VIOS either way since we may now be working with a new VIOS.
 441 *
 442 * Allocating larger chunks of memory during resets, specially LPM or under
 443 * low memory situations can cause resets to fail/timeout and for LPAR to
 444 * lose connectivity. So hold onto the LTB even if we fail to communicate
 445 * with the VIOS and reuse it on next open. Free LTB when adapter is closed.
 446 *
 447 * Return: 0 if we were able to allocate the LTB and notify the VIOS and
 448 *	   a negative value otherwise.
 449 */
 450static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
 451				struct ibmvnic_long_term_buff *ltb, int size)
 452{
 453	struct device *dev = &adapter->vdev->dev;
 454	u64 prev = 0;
 455	int rc;
 456
 457	if (!reuse_ltb(ltb, size)) {
 458		dev_dbg(dev,
 459			"LTB size changed from 0x%llx to 0x%x, reallocating\n",
 460			 ltb->size, size);
 461		prev = ltb->size;
 462		free_long_term_buff(adapter, ltb);
 463	}
 464
 465	if (ltb->buff) {
 466		dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
 467			ltb->map_id, ltb->size);
 468	} else {
 469		ltb->buff = dma_alloc_coherent(dev, size, &ltb->addr,
 470					       GFP_KERNEL);
 471		if (!ltb->buff) {
 472			dev_err(dev, "Couldn't alloc long term buffer\n");
 473			return -ENOMEM;
 474		}
 475		ltb->size = size;
 476
 477		ltb->map_id = find_first_zero_bit(adapter->map_ids,
 478						  MAX_MAP_ID);
 479		bitmap_set(adapter->map_ids, ltb->map_id, 1);
 480
 481		dev_dbg(dev,
 482			"Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n",
 483			 ltb->map_id, ltb->size, prev);
 484	}
 
 
 485
 486	/* Ensure ltb is zeroed - specially when reusing it. */
 487	memset(ltb->buff, 0, ltb->size);
 488
 489	mutex_lock(&adapter->fw_lock);
 490	adapter->fw_done_rc = 0;
 491	reinit_completion(&adapter->fw_done);
 492
 493	rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
 494	if (rc) {
 495		dev_err(dev, "send_request_map failed, rc = %d\n", rc);
 496		goto out;
 497	}
 498
 499	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
 500	if (rc) {
 501		dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
 502			rc);
 503		goto out;
 504	}
 505
 506	if (adapter->fw_done_rc) {
 507		dev_err(dev, "Couldn't map LTB, rc = %d\n",
 508			adapter->fw_done_rc);
 509		rc = -EIO;
 510		goto out;
 511	}
 512	rc = 0;
 513out:
 514	/* don't free LTB on communication error - see function header */
 515	mutex_unlock(&adapter->fw_lock);
 516	return rc;
 517}
 518
 519static void free_long_term_buff(struct ibmvnic_adapter *adapter,
 520				struct ibmvnic_long_term_buff *ltb)
 521{
 522	struct device *dev = &adapter->vdev->dev;
 523
 524	if (!ltb->buff)
 525		return;
 526
 527	/* VIOS automatically unmaps the long term buffer at remote
 528	 * end for the following resets:
 529	 * FAILOVER, MOBILITY, TIMEOUT.
 530	 */
 531	if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
 532	    adapter->reset_reason != VNIC_RESET_MOBILITY &&
 533	    adapter->reset_reason != VNIC_RESET_TIMEOUT)
 534		send_request_unmap(adapter, ltb->map_id);
 535
 536	dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
 537
 538	ltb->buff = NULL;
 539	/* mark this map_id free */
 540	bitmap_clear(adapter->map_ids, ltb->map_id, 1);
 541	ltb->map_id = 0;
 542}
 543
 544/**
 545 * free_ltb_set - free the given set of long term buffers (LTBS)
 546 * @adapter: The ibmvnic adapter containing this ltb set
 547 * @ltb_set: The ltb_set to be freed
 548 *
 549 * Free the set of LTBs in the given set.
 550 */
 551
 552static void free_ltb_set(struct ibmvnic_adapter *adapter,
 553			 struct ibmvnic_ltb_set *ltb_set)
 554{
 555	int i;
 556
 557	for (i = 0; i < ltb_set->num_ltbs; i++)
 558		free_long_term_buff(adapter, &ltb_set->ltbs[i]);
 
 559
 560	kfree(ltb_set->ltbs);
 561	ltb_set->ltbs = NULL;
 562	ltb_set->num_ltbs = 0;
 563}
 564
 565/**
 566 * alloc_ltb_set() - Allocate a set of long term buffers (LTBs)
 567 *
 568 * @adapter: ibmvnic adapter associated to the LTB
 569 * @ltb_set: container object for the set of LTBs
 570 * @num_buffs: Number of buffers in the LTB
 571 * @buff_size: Size of each buffer in the LTB
 572 *
 573 * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size
 574 * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the
 575 * new set of LTBs have fewer LTBs than the old set, free the excess LTBs.
 576 * If new set needs more than in old set, allocate the remaining ones.
 577 * Try and reuse as many LTBs as possible and avoid reallocation.
 578 *
 579 * Any changes to this allocation strategy must be reflected in
 580 * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb().
 581 */
 582static int alloc_ltb_set(struct ibmvnic_adapter *adapter,
 583			 struct ibmvnic_ltb_set *ltb_set, int num_buffs,
 584			 int buff_size)
 585{
 586	struct device *dev = &adapter->vdev->dev;
 587	struct ibmvnic_ltb_set old_set;
 588	struct ibmvnic_ltb_set new_set;
 589	int rem_size;
 590	int tot_size;		/* size of all ltbs */
 591	int ltb_size;		/* size of one ltb */
 592	int nltbs;
 593	int rc;
 594	int n;
 595	int i;
 596
 597	dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs,
 598		buff_size);
 599
 600	ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size);
 601	tot_size = num_buffs * buff_size;
 602
 603	if (ltb_size > tot_size)
 604		ltb_size = tot_size;
 605
 606	nltbs = tot_size / ltb_size;
 607	if (tot_size % ltb_size)
 608		nltbs++;
 609
 610	old_set = *ltb_set;
 611
 612	if (old_set.num_ltbs == nltbs) {
 613		new_set = old_set;
 614	} else {
 615		int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff);
 616
 617		new_set.ltbs = kzalloc(tmp, GFP_KERNEL);
 618		if (!new_set.ltbs)
 619			return -ENOMEM;
 620
 621		new_set.num_ltbs = nltbs;
 622
 623		/* Free any excess ltbs in old set */
 624		for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++)
 625			free_long_term_buff(adapter, &old_set.ltbs[i]);
 626
 627		/* Copy remaining ltbs to new set. All LTBs except the
 628		 * last one are of the same size. alloc_long_term_buff()
 629		 * will realloc if the size changes.
 630		 */
 631		n = min(old_set.num_ltbs, new_set.num_ltbs);
 632		for (i = 0; i < n; i++)
 633			new_set.ltbs[i] = old_set.ltbs[i];
 634
 635		/* Any additional ltbs in new set will have NULL ltbs for
 636		 * now and will be allocated in alloc_long_term_buff().
 637		 */
 638
 639		/* We no longer need the old_set so free it. Note that we
 640		 * may have reused some ltbs from old set and freed excess
 641		 * ltbs above. So we only need to free the container now
 642		 * not the LTBs themselves. (i.e. dont free_ltb_set()!)
 643		 */
 644		kfree(old_set.ltbs);
 645		old_set.ltbs = NULL;
 646		old_set.num_ltbs = 0;
 647
 648		/* Install the new set. If allocations fail below, we will
 649		 * retry later and know what size LTBs we need.
 650		 */
 651		*ltb_set = new_set;
 652	}
 653
 654	i = 0;
 655	rem_size = tot_size;
 656	while (rem_size) {
 657		if (ltb_size > rem_size)
 658			ltb_size = rem_size;
 659
 660		rem_size -= ltb_size;
 661
 662		rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size);
 663		if (rc)
 664			goto out;
 665		i++;
 666	}
 667
 668	WARN_ON(i != new_set.num_ltbs);
 669
 670	return 0;
 671out:
 672	/* We may have allocated one/more LTBs before failing and we
 673	 * want to try and reuse on next reset. So don't free ltb set.
 674	 */
 675	return rc;
 676}
 677
 678/**
 679 * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB.
 680 * @rxpool: The receive buffer pool containing buffer
 681 * @bufidx: Index of buffer in rxpool
 682 * @ltbp: (Output) pointer to the long term buffer containing the buffer
 683 * @offset: (Output) offset of buffer in the LTB from @ltbp
 684 *
 685 * Map the given buffer identified by [rxpool, bufidx] to an LTB in the
 686 * pool and its corresponding offset. Assume for now that each LTB is of
 687 * different size but could possibly be optimized based on the allocation
 688 * strategy in alloc_ltb_set().
 689 */
 690static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool,
 691				  unsigned int bufidx,
 692				  struct ibmvnic_long_term_buff **ltbp,
 693				  unsigned int *offset)
 694{
 695	struct ibmvnic_long_term_buff *ltb;
 696	int nbufs;	/* # of buffers in one ltb */
 697	int i;
 698
 699	WARN_ON(bufidx >= rxpool->size);
 700
 701	for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) {
 702		ltb = &rxpool->ltb_set.ltbs[i];
 703		nbufs = ltb->size / rxpool->buff_size;
 704		if (bufidx < nbufs)
 705			break;
 706		bufidx -= nbufs;
 707	}
 708
 709	*ltbp = ltb;
 710	*offset = bufidx * rxpool->buff_size;
 711}
 712
 713/**
 714 * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB.
 715 * @txpool: The transmit buffer pool containing buffer
 716 * @bufidx: Index of buffer in txpool
 717 * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer
 718 * @offset: (Output) offset of buffer in the LTB from @ltbp
 719 *
 720 * Map the given buffer identified by [txpool, bufidx] to an LTB in the
 721 * pool and its corresponding offset.
 722 */
 723static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool,
 724				  unsigned int bufidx,
 725				  struct ibmvnic_long_term_buff **ltbp,
 726				  unsigned int *offset)
 727{
 728	struct ibmvnic_long_term_buff *ltb;
 729	int nbufs;	/* # of buffers in one ltb */
 730	int i;
 731
 732	WARN_ON_ONCE(bufidx >= txpool->num_buffers);
 733
 734	for (i = 0; i < txpool->ltb_set.num_ltbs; i++) {
 735		ltb = &txpool->ltb_set.ltbs[i];
 736		nbufs = ltb->size / txpool->buf_size;
 737		if (bufidx < nbufs)
 738			break;
 739		bufidx -= nbufs;
 740	}
 741
 742	*ltbp = ltb;
 743	*offset = bufidx * txpool->buf_size;
 744}
 745
 746static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
 747{
 748	int i;
 749
 750	for (i = 0; i < adapter->num_active_rx_pools; i++)
 
 751		adapter->rx_pool[i].active = 0;
 752}
 753
 754static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
 755			      struct ibmvnic_rx_pool *pool)
 756{
 757	int count = pool->size - atomic_read(&pool->available);
 758	u64 handle = adapter->rx_scrq[pool->index]->handle;
 759	struct device *dev = &adapter->vdev->dev;
 760	struct ibmvnic_ind_xmit_queue *ind_bufp;
 761	struct ibmvnic_sub_crq_queue *rx_scrq;
 762	struct ibmvnic_long_term_buff *ltb;
 763	union sub_crq *sub_crq;
 764	int buffers_added = 0;
 765	unsigned long lpar_rc;
 
 766	struct sk_buff *skb;
 767	unsigned int offset;
 768	dma_addr_t dma_addr;
 769	unsigned char *dst;
 
 770	int shift = 0;
 771	int bufidx;
 772	int i;
 773
 774	if (!pool->active)
 775		return;
 776
 777	rx_scrq = adapter->rx_scrq[pool->index];
 778	ind_bufp = &rx_scrq->ind_buf;
 779
 780	/* netdev_skb_alloc() could have failed after we saved a few skbs
 781	 * in the indir_buf and we would not have sent them to VIOS yet.
 782	 * To account for them, start the loop at ind_bufp->index rather
 783	 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
 784	 * be 0.
 785	 */
 786	for (i = ind_bufp->index; i < count; ++i) {
 787		bufidx = pool->free_map[pool->next_free];
 788
 789		/* We maybe reusing the skb from earlier resets. Allocate
 790		 * only if necessary. But since the LTB may have changed
 791		 * during reset (see init_rx_pools()), update LTB below
 792		 * even if reusing skb.
 793		 */
 794		skb = pool->rx_buff[bufidx].skb;
 795		if (!skb) {
 796			skb = netdev_alloc_skb(adapter->netdev,
 797					       pool->buff_size);
 798			if (!skb) {
 799				dev_err(dev, "Couldn't replenish rx buff\n");
 800				adapter->replenish_no_mem++;
 801				break;
 802			}
 803		}
 804
 805		pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
 806		pool->next_free = (pool->next_free + 1) % pool->size;
 
 
 807
 808		/* Copy the skb to the long term mapped DMA buffer */
 809		map_rxpool_buf_to_ltb(pool, bufidx, &ltb, &offset);
 810		dst = ltb->buff + offset;
 811		memset(dst, 0, pool->buff_size);
 812		dma_addr = ltb->addr + offset;
 
 813
 814		/* add the skb to an rx_buff in the pool */
 815		pool->rx_buff[bufidx].data = dst;
 816		pool->rx_buff[bufidx].dma = dma_addr;
 817		pool->rx_buff[bufidx].skb = skb;
 818		pool->rx_buff[bufidx].pool_index = pool->index;
 819		pool->rx_buff[bufidx].size = pool->buff_size;
 820
 821		/* queue the rx_buff for the next send_subcrq_indirect */
 822		sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
 823		memset(sub_crq, 0, sizeof(*sub_crq));
 824		sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
 825		sub_crq->rx_add.correlator =
 826		    cpu_to_be64((u64)&pool->rx_buff[bufidx]);
 827		sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
 828		sub_crq->rx_add.map_id = ltb->map_id;
 829
 830		/* The length field of the sCRQ is defined to be 24 bits so the
 831		 * buffer size needs to be left shifted by a byte before it is
 832		 * converted to big endian to prevent the last byte from being
 833		 * truncated.
 834		 */
 835#ifdef __LITTLE_ENDIAN__
 836		shift = 8;
 837#endif
 838		sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
 839
 840		/* if send_subcrq_indirect queue is full, flush to VIOS */
 841		if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
 842		    i == count - 1) {
 843			lpar_rc =
 844				send_subcrq_indirect(adapter, handle,
 845						     (u64)ind_bufp->indir_dma,
 846						     (u64)ind_bufp->index);
 847			if (lpar_rc != H_SUCCESS)
 848				goto failure;
 849			buffers_added += ind_bufp->index;
 850			adapter->replenish_add_buff_success += ind_bufp->index;
 851			ind_bufp->index = 0;
 852		}
 853	}
 854	atomic_add(buffers_added, &pool->available);
 855	return;
 856
 857failure:
 858	if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
 859		dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
 860	for (i = ind_bufp->index - 1; i >= 0; --i) {
 861		struct ibmvnic_rx_buff *rx_buff;
 862
 863		pool->next_free = pool->next_free == 0 ?
 864				  pool->size - 1 : pool->next_free - 1;
 865		sub_crq = &ind_bufp->indir_arr[i];
 866		rx_buff = (struct ibmvnic_rx_buff *)
 867				be64_to_cpu(sub_crq->rx_add.correlator);
 868		bufidx = (int)(rx_buff - pool->rx_buff);
 869		pool->free_map[pool->next_free] = bufidx;
 870		dev_kfree_skb_any(pool->rx_buff[bufidx].skb);
 871		pool->rx_buff[bufidx].skb = NULL;
 872	}
 873	adapter->replenish_add_buff_failure += ind_bufp->index;
 874	atomic_add(buffers_added, &pool->available);
 875	ind_bufp->index = 0;
 876	if (lpar_rc == H_CLOSED || adapter->failover_pending) {
 877		/* Disable buffer pool replenishment and report carrier off if
 878		 * queue is closed or pending failover.
 879		 * Firmware guarantees that a signal will be sent to the
 880		 * driver, triggering a reset.
 881		 */
 882		deactivate_rx_pools(adapter);
 883		netif_carrier_off(adapter->netdev);
 884	}
 885}
 886
 887static void replenish_pools(struct ibmvnic_adapter *adapter)
 888{
 889	int i;
 890
 891	adapter->replenish_task_cycles++;
 892	for (i = 0; i < adapter->num_active_rx_pools; i++) {
 
 893		if (adapter->rx_pool[i].active)
 894			replenish_rx_pool(adapter, &adapter->rx_pool[i]);
 895	}
 896
 897	netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
 898}
 899
 900static void release_stats_buffers(struct ibmvnic_adapter *adapter)
 901{
 902	kfree(adapter->tx_stats_buffers);
 903	kfree(adapter->rx_stats_buffers);
 904	adapter->tx_stats_buffers = NULL;
 905	adapter->rx_stats_buffers = NULL;
 906}
 907
 908static int init_stats_buffers(struct ibmvnic_adapter *adapter)
 909{
 910	adapter->tx_stats_buffers =
 911				kcalloc(IBMVNIC_MAX_QUEUES,
 912					sizeof(struct ibmvnic_tx_queue_stats),
 913					GFP_KERNEL);
 914	if (!adapter->tx_stats_buffers)
 915		return -ENOMEM;
 916
 917	adapter->rx_stats_buffers =
 918				kcalloc(IBMVNIC_MAX_QUEUES,
 919					sizeof(struct ibmvnic_rx_queue_stats),
 920					GFP_KERNEL);
 921	if (!adapter->rx_stats_buffers)
 922		return -ENOMEM;
 923
 924	return 0;
 925}
 926
 927static void release_stats_token(struct ibmvnic_adapter *adapter)
 928{
 929	struct device *dev = &adapter->vdev->dev;
 930
 931	if (!adapter->stats_token)
 932		return;
 933
 934	dma_unmap_single(dev, adapter->stats_token,
 935			 sizeof(struct ibmvnic_statistics),
 936			 DMA_FROM_DEVICE);
 937	adapter->stats_token = 0;
 938}
 939
 940static int init_stats_token(struct ibmvnic_adapter *adapter)
 941{
 942	struct device *dev = &adapter->vdev->dev;
 943	dma_addr_t stok;
 944	int rc;
 945
 946	stok = dma_map_single(dev, &adapter->stats,
 947			      sizeof(struct ibmvnic_statistics),
 948			      DMA_FROM_DEVICE);
 949	rc = dma_mapping_error(dev, stok);
 950	if (rc) {
 951		dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc);
 952		return rc;
 953	}
 954
 955	adapter->stats_token = stok;
 956	netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
 957	return 0;
 958}
 959
 960/**
 961 * release_rx_pools() - Release any rx pools attached to @adapter.
 962 * @adapter: ibmvnic adapter
 963 *
 964 * Safe to call this multiple times - even if no pools are attached.
 965 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 966static void release_rx_pools(struct ibmvnic_adapter *adapter)
 967{
 968	struct ibmvnic_rx_pool *rx_pool;
 969	int i, j;
 970
 971	if (!adapter->rx_pool)
 972		return;
 973
 974	for (i = 0; i < adapter->num_active_rx_pools; i++) {
 975		rx_pool = &adapter->rx_pool[i];
 976
 977		netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
 978
 979		kfree(rx_pool->free_map);
 980
 981		free_ltb_set(adapter, &rx_pool->ltb_set);
 982
 983		if (!rx_pool->rx_buff)
 984			continue;
 985
 986		for (j = 0; j < rx_pool->size; j++) {
 987			if (rx_pool->rx_buff[j].skb) {
 988				dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
 989				rx_pool->rx_buff[j].skb = NULL;
 990			}
 991		}
 992
 993		kfree(rx_pool->rx_buff);
 994	}
 995
 996	kfree(adapter->rx_pool);
 997	adapter->rx_pool = NULL;
 998	adapter->num_active_rx_pools = 0;
 999	adapter->prev_rx_pool_size = 0;
1000}
1001
1002/**
1003 * reuse_rx_pools() - Check if the existing rx pools can be reused.
1004 * @adapter: ibmvnic adapter
1005 *
1006 * Check if the existing rx pools in the adapter can be reused. The
1007 * pools can be reused if the pool parameters (number of pools,
1008 * number of buffers in the pool and size of each buffer) have not
1009 * changed.
1010 *
1011 * NOTE: This assumes that all pools have the same number of buffers
1012 *       which is the case currently. If that changes, we must fix this.
1013 *
1014 * Return: true if the rx pools can be reused, false otherwise.
1015 */
1016static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
1017{
1018	u64 old_num_pools, new_num_pools;
1019	u64 old_pool_size, new_pool_size;
1020	u64 old_buff_size, new_buff_size;
1021
1022	if (!adapter->rx_pool)
1023		return false;
1024
1025	old_num_pools = adapter->num_active_rx_pools;
1026	new_num_pools = adapter->req_rx_queues;
1027
1028	old_pool_size = adapter->prev_rx_pool_size;
1029	new_pool_size = adapter->req_rx_add_entries_per_subcrq;
1030
1031	old_buff_size = adapter->prev_rx_buf_sz;
1032	new_buff_size = adapter->cur_rx_buf_sz;
1033
1034	if (old_buff_size != new_buff_size ||
1035	    old_num_pools != new_num_pools ||
1036	    old_pool_size != new_pool_size)
1037		return false;
1038
1039	return true;
1040}
1041
1042/**
1043 * init_rx_pools(): Initialize the set of receiver pools in the adapter.
1044 * @netdev: net device associated with the vnic interface
1045 *
1046 * Initialize the set of receiver pools in the ibmvnic adapter associated
1047 * with the net_device @netdev. If possible, reuse the existing rx pools.
1048 * Otherwise free any existing pools and  allocate a new set of pools
1049 * before initializing them.
1050 *
1051 * Return: 0 on success and negative value on error.
1052 */
1053static int init_rx_pools(struct net_device *netdev)
1054{
1055	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1056	struct device *dev = &adapter->vdev->dev;
1057	struct ibmvnic_rx_pool *rx_pool;
1058	u64 num_pools;
1059	u64 pool_size;		/* # of buffers in one pool */
1060	u64 buff_size;
1061	int i, j, rc;
1062
1063	pool_size = adapter->req_rx_add_entries_per_subcrq;
1064	num_pools = adapter->req_rx_queues;
1065	buff_size = adapter->cur_rx_buf_sz;
1066
1067	if (reuse_rx_pools(adapter)) {
1068		dev_dbg(dev, "Reusing rx pools\n");
1069		goto update_ltb;
1070	}
1071
1072	/* Allocate/populate the pools. */
1073	release_rx_pools(adapter);
1074
1075	adapter->rx_pool = kcalloc(num_pools,
1076				   sizeof(struct ibmvnic_rx_pool),
1077				   GFP_KERNEL);
1078	if (!adapter->rx_pool) {
1079		dev_err(dev, "Failed to allocate rx pools\n");
1080		return -ENOMEM;
1081	}
1082
1083	/* Set num_active_rx_pools early. If we fail below after partial
1084	 * allocation, release_rx_pools() will know how many to look for.
1085	 */
1086	adapter->num_active_rx_pools = num_pools;
1087
1088	for (i = 0; i < num_pools; i++) {
1089		rx_pool = &adapter->rx_pool[i];
1090
1091		netdev_dbg(adapter->netdev,
1092			   "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
1093			   i, pool_size, buff_size);
 
1094
1095		rx_pool->size = pool_size;
1096		rx_pool->index = i;
1097		rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
 
1098
1099		rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
1100					    GFP_KERNEL);
1101		if (!rx_pool->free_map) {
1102			dev_err(dev, "Couldn't alloc free_map %d\n", i);
1103			rc = -ENOMEM;
1104			goto out_release;
1105		}
1106
1107		rx_pool->rx_buff = kcalloc(rx_pool->size,
1108					   sizeof(struct ibmvnic_rx_buff),
1109					   GFP_KERNEL);
1110		if (!rx_pool->rx_buff) {
1111			dev_err(dev, "Couldn't alloc rx buffers\n");
1112			rc = -ENOMEM;
1113			goto out_release;
 
 
 
 
 
 
1114		}
 
 
 
 
 
 
 
1115	}
1116
1117	adapter->prev_rx_pool_size = pool_size;
1118	adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
 
 
 
 
 
1119
1120update_ltb:
1121	for (i = 0; i < num_pools; i++) {
1122		rx_pool = &adapter->rx_pool[i];
1123		dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
1124			i, rx_pool->size, rx_pool->buff_size);
1125
1126		rc = alloc_ltb_set(adapter, &rx_pool->ltb_set,
1127				   rx_pool->size, rx_pool->buff_size);
1128		if (rc)
1129			goto out;
1130
1131		for (j = 0; j < rx_pool->size; ++j) {
1132			struct ibmvnic_rx_buff *rx_buff;
1133
1134			rx_pool->free_map[j] = j;
 
 
 
 
1135
1136			/* NOTE: Don't clear rx_buff->skb here - will leak
1137			 * memory! replenish_rx_pool() will reuse skbs or
1138			 * allocate as necessary.
1139			 */
1140			rx_buff = &rx_pool->rx_buff[j];
1141			rx_buff->dma = 0;
1142			rx_buff->data = 0;
1143			rx_buff->size = 0;
1144			rx_buff->pool_index = 0;
1145		}
1146
1147		/* Mark pool "empty" so replenish_rx_pools() will
1148		 * update the LTB info for each buffer
1149		 */
1150		atomic_set(&rx_pool->available, 0);
1151		rx_pool->next_alloc = 0;
1152		rx_pool->next_free = 0;
1153		/* replenish_rx_pool() may have called deactivate_rx_pools()
1154		 * on failover. Ensure pool is active now.
1155		 */
1156		rx_pool->active = 1;
1157	}
 
1158	return 0;
1159out_release:
1160	release_rx_pools(adapter);
1161out:
1162	/* We failed to allocate one or more LTBs or map them on the VIOS.
1163	 * Hold onto the pools and any LTBs that we did allocate/map.
1164	 */
1165	return rc;
1166}
1167
1168static void release_vpd_data(struct ibmvnic_adapter *adapter)
1169{
1170	if (!adapter->vpd)
1171		return;
1172
1173	kfree(adapter->vpd->buff);
1174	kfree(adapter->vpd);
1175
1176	adapter->vpd = NULL;
1177}
1178
1179static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
1180				struct ibmvnic_tx_pool *tx_pool)
1181{
1182	kfree(tx_pool->tx_buff);
1183	kfree(tx_pool->free_map);
1184	free_ltb_set(adapter, &tx_pool->ltb_set);
1185}
1186
1187/**
1188 * release_tx_pools() - Release any tx pools attached to @adapter.
1189 * @adapter: ibmvnic adapter
1190 *
1191 * Safe to call this multiple times - even if no pools are attached.
1192 */
1193static void release_tx_pools(struct ibmvnic_adapter *adapter)
1194{
1195	int i;
1196
1197	/* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
1198	 * both NULL or both non-NULL. So we only need to check one.
1199	 */
1200	if (!adapter->tx_pool)
1201		return;
1202
1203	for (i = 0; i < adapter->num_active_tx_pools; i++) {
1204		release_one_tx_pool(adapter, &adapter->tx_pool[i]);
1205		release_one_tx_pool(adapter, &adapter->tso_pool[i]);
1206	}
1207
1208	kfree(adapter->tx_pool);
1209	adapter->tx_pool = NULL;
1210	kfree(adapter->tso_pool);
1211	adapter->tso_pool = NULL;
1212	adapter->num_active_tx_pools = 0;
1213	adapter->prev_tx_pool_size = 0;
1214}
1215
1216static int init_one_tx_pool(struct net_device *netdev,
1217			    struct ibmvnic_tx_pool *tx_pool,
1218			    int pool_size, int buf_size)
1219{
 
1220	int i;
1221
1222	tx_pool->tx_buff = kcalloc(pool_size,
1223				   sizeof(struct ibmvnic_tx_buff),
1224				   GFP_KERNEL);
1225	if (!tx_pool->tx_buff)
1226		return -ENOMEM;
 
 
 
 
1227
1228	tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
1229	if (!tx_pool->free_map) {
1230		kfree(tx_pool->tx_buff);
1231		tx_pool->tx_buff = NULL;
1232		return -ENOMEM;
1233	}
1234
1235	for (i = 0; i < pool_size; i++)
1236		tx_pool->free_map[i] = i;
1237
1238	tx_pool->consumer_index = 0;
1239	tx_pool->producer_index = 0;
1240	tx_pool->num_buffers = pool_size;
1241	tx_pool->buf_size = buf_size;
1242
1243	return 0;
1244}
1245
1246/**
1247 * reuse_tx_pools() - Check if the existing tx pools can be reused.
1248 * @adapter: ibmvnic adapter
1249 *
1250 * Check if the existing tx pools in the adapter can be reused. The
1251 * pools can be reused if the pool parameters (number of pools,
1252 * number of buffers in the pool and mtu) have not changed.
1253 *
1254 * NOTE: This assumes that all pools have the same number of buffers
1255 *       which is the case currently. If that changes, we must fix this.
1256 *
1257 * Return: true if the tx pools can be reused, false otherwise.
1258 */
1259static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
1260{
1261	u64 old_num_pools, new_num_pools;
1262	u64 old_pool_size, new_pool_size;
1263	u64 old_mtu, new_mtu;
1264
1265	if (!adapter->tx_pool)
1266		return false;
1267
1268	old_num_pools = adapter->num_active_tx_pools;
1269	new_num_pools = adapter->num_active_tx_scrqs;
1270	old_pool_size = adapter->prev_tx_pool_size;
1271	new_pool_size = adapter->req_tx_entries_per_subcrq;
1272	old_mtu = adapter->prev_mtu;
1273	new_mtu = adapter->req_mtu;
1274
1275	if (old_mtu != new_mtu ||
1276	    old_num_pools != new_num_pools ||
1277	    old_pool_size != new_pool_size)
1278		return false;
1279
1280	return true;
1281}
1282
1283/**
1284 * init_tx_pools(): Initialize the set of transmit pools in the adapter.
1285 * @netdev: net device associated with the vnic interface
1286 *
1287 * Initialize the set of transmit pools in the ibmvnic adapter associated
1288 * with the net_device @netdev. If possible, reuse the existing tx pools.
1289 * Otherwise free any existing pools and  allocate a new set of pools
1290 * before initializing them.
1291 *
1292 * Return: 0 on success and negative value on error.
1293 */
1294static int init_tx_pools(struct net_device *netdev)
1295{
1296	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1297	struct device *dev = &adapter->vdev->dev;
1298	int num_pools;
1299	u64 pool_size;		/* # of buffers in pool */
1300	u64 buff_size;
1301	int i, j, rc;
1302
1303	num_pools = adapter->req_tx_queues;
1304
1305	/* We must notify the VIOS about the LTB on all resets - but we only
1306	 * need to alloc/populate pools if either the number of buffers or
1307	 * size of each buffer in the pool has changed.
1308	 */
1309	if (reuse_tx_pools(adapter)) {
1310		netdev_dbg(netdev, "Reusing tx pools\n");
1311		goto update_ltb;
1312	}
1313
1314	/* Allocate/populate the pools. */
1315	release_tx_pools(adapter);
1316
1317	pool_size = adapter->req_tx_entries_per_subcrq;
1318	num_pools = adapter->num_active_tx_scrqs;
1319
1320	adapter->tx_pool = kcalloc(num_pools,
1321				   sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
1322	if (!adapter->tx_pool)
1323		return -ENOMEM;
1324
1325	adapter->tso_pool = kcalloc(num_pools,
1326				    sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
1327	/* To simplify release_tx_pools() ensure that ->tx_pool and
1328	 * ->tso_pool are either both NULL or both non-NULL.
1329	 */
1330	if (!adapter->tso_pool) {
1331		kfree(adapter->tx_pool);
1332		adapter->tx_pool = NULL;
1333		return -ENOMEM;
1334	}
1335
1336	/* Set num_active_tx_pools early. If we fail below after partial
1337	 * allocation, release_tx_pools() will know how many to look for.
1338	 */
1339	adapter->num_active_tx_pools = num_pools;
1340
1341	buff_size = adapter->req_mtu + VLAN_HLEN;
1342	buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
1343
1344	for (i = 0; i < num_pools; i++) {
1345		dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
1346			i, adapter->req_tx_entries_per_subcrq, buff_size);
1347
 
1348		rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
1349				      pool_size, buff_size);
1350		if (rc)
1351			goto out_release;
 
 
 
1352
1353		rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
1354				      IBMVNIC_TSO_BUFS,
1355				      IBMVNIC_TSO_BUF_SZ);
1356		if (rc)
1357			goto out_release;
 
 
1358	}
1359
1360	adapter->prev_tx_pool_size = pool_size;
1361	adapter->prev_mtu = adapter->req_mtu;
1362
1363update_ltb:
1364	/* NOTE: All tx_pools have the same number of buffers (which is
1365	 *       same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS
1366	 *       buffers (see calls init_one_tx_pool() for these).
1367	 *       For consistency, we use tx_pool->num_buffers and
1368	 *       tso_pool->num_buffers below.
1369	 */
1370	rc = -1;
1371	for (i = 0; i < num_pools; i++) {
1372		struct ibmvnic_tx_pool *tso_pool;
1373		struct ibmvnic_tx_pool *tx_pool;
1374
1375		tx_pool = &adapter->tx_pool[i];
1376
1377		dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n",
1378			i, tx_pool->num_buffers, tx_pool->buf_size);
1379
1380		rc = alloc_ltb_set(adapter, &tx_pool->ltb_set,
1381				   tx_pool->num_buffers, tx_pool->buf_size);
1382		if (rc)
1383			goto out;
1384
1385		tx_pool->consumer_index = 0;
1386		tx_pool->producer_index = 0;
1387
1388		for (j = 0; j < tx_pool->num_buffers; j++)
1389			tx_pool->free_map[j] = j;
1390
1391		tso_pool = &adapter->tso_pool[i];
1392
1393		dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n",
1394			i, tso_pool->num_buffers, tso_pool->buf_size);
1395
1396		rc = alloc_ltb_set(adapter, &tso_pool->ltb_set,
1397				   tso_pool->num_buffers, tso_pool->buf_size);
1398		if (rc)
1399			goto out;
1400
1401		tso_pool->consumer_index = 0;
1402		tso_pool->producer_index = 0;
1403
1404		for (j = 0; j < tso_pool->num_buffers; j++)
1405			tso_pool->free_map[j] = j;
1406	}
1407
1408	return 0;
1409out_release:
1410	release_tx_pools(adapter);
1411out:
1412	/* We failed to allocate one or more LTBs or map them on the VIOS.
1413	 * Hold onto the pools and any LTBs that we did allocate/map.
1414	 */
1415	return rc;
1416}
1417
1418static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
1419{
1420	int i;
1421
1422	if (adapter->napi_enabled)
1423		return;
1424
1425	for (i = 0; i < adapter->req_rx_queues; i++)
1426		napi_enable(&adapter->napi[i]);
1427
1428	adapter->napi_enabled = true;
1429}
1430
1431static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
1432{
1433	int i;
1434
1435	if (!adapter->napi_enabled)
1436		return;
1437
1438	for (i = 0; i < adapter->req_rx_queues; i++) {
1439		netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
1440		napi_disable(&adapter->napi[i]);
1441	}
1442
1443	adapter->napi_enabled = false;
1444}
1445
1446static int init_napi(struct ibmvnic_adapter *adapter)
1447{
1448	int i;
1449
1450	adapter->napi = kcalloc(adapter->req_rx_queues,
1451				sizeof(struct napi_struct), GFP_KERNEL);
1452	if (!adapter->napi)
1453		return -ENOMEM;
1454
1455	for (i = 0; i < adapter->req_rx_queues; i++) {
1456		netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
1457		netif_napi_add(adapter->netdev, &adapter->napi[i],
1458			       ibmvnic_poll);
1459	}
1460
1461	adapter->num_active_rx_napi = adapter->req_rx_queues;
1462	return 0;
1463}
1464
1465static void release_napi(struct ibmvnic_adapter *adapter)
1466{
1467	int i;
1468
1469	if (!adapter->napi)
1470		return;
1471
1472	for (i = 0; i < adapter->num_active_rx_napi; i++) {
1473		netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
1474		netif_napi_del(&adapter->napi[i]);
 
 
 
1475	}
1476
1477	kfree(adapter->napi);
1478	adapter->napi = NULL;
1479	adapter->num_active_rx_napi = 0;
1480	adapter->napi_enabled = false;
1481}
1482
1483static const char *adapter_state_to_string(enum vnic_state state)
1484{
1485	switch (state) {
1486	case VNIC_PROBING:
1487		return "PROBING";
1488	case VNIC_PROBED:
1489		return "PROBED";
1490	case VNIC_OPENING:
1491		return "OPENING";
1492	case VNIC_OPEN:
1493		return "OPEN";
1494	case VNIC_CLOSING:
1495		return "CLOSING";
1496	case VNIC_CLOSED:
1497		return "CLOSED";
1498	case VNIC_REMOVING:
1499		return "REMOVING";
1500	case VNIC_REMOVED:
1501		return "REMOVED";
1502	case VNIC_DOWN:
1503		return "DOWN";
1504	}
1505	return "UNKNOWN";
1506}
1507
1508static int ibmvnic_login(struct net_device *netdev)
1509{
1510	unsigned long flags, timeout = msecs_to_jiffies(20000);
1511	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
1512	int retry_count = 0;
1513	int retries = 10;
1514	bool retry;
1515	int rc;
1516
1517	do {
1518		retry = false;
1519		if (retry_count > retries) {
1520			netdev_warn(netdev, "Login attempts exceeded\n");
1521			return -EACCES;
1522		}
1523
1524		adapter->init_done_rc = 0;
1525		reinit_completion(&adapter->init_done);
1526		rc = send_login(adapter);
1527		if (rc)
 
1528			return rc;
 
1529
1530		if (!wait_for_completion_timeout(&adapter->init_done,
1531						 timeout)) {
1532			netdev_warn(netdev, "Login timed out\n");
1533			adapter->login_pending = false;
1534			goto partial_reset;
1535		}
1536
1537		if (adapter->init_done_rc == ABORTED) {
1538			netdev_warn(netdev, "Login aborted, retrying...\n");
1539			retry = true;
1540			adapter->init_done_rc = 0;
1541			retry_count++;
1542			/* FW or device may be busy, so
1543			 * wait a bit before retrying login
1544			 */
1545			msleep(500);
1546		} else if (adapter->init_done_rc == PARTIALSUCCESS) {
1547			retry_count++;
1548			release_sub_crqs(adapter, 1);
1549
1550			retry = true;
1551			netdev_dbg(netdev,
1552				   "Received partial success, retrying...\n");
1553			adapter->init_done_rc = 0;
1554			reinit_completion(&adapter->init_done);
1555			send_query_cap(adapter);
1556			if (!wait_for_completion_timeout(&adapter->init_done,
1557							 timeout)) {
1558				netdev_warn(netdev,
1559					    "Capabilities query timed out\n");
1560				return -ETIMEDOUT;
1561			}
1562
1563			rc = init_sub_crqs(adapter);
1564			if (rc) {
1565				netdev_warn(netdev,
1566					    "SCRQ initialization failed\n");
1567				return rc;
1568			}
1569
1570			rc = init_sub_crq_irqs(adapter);
1571			if (rc) {
1572				netdev_warn(netdev,
1573					    "SCRQ irq initialization failed\n");
1574				return rc;
1575			}
1576		/* Default/timeout error handling, reset and start fresh */
1577		} else if (adapter->init_done_rc) {
1578			netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
1579				    adapter->init_done_rc);
1580
1581partial_reset:
1582			/* adapter login failed, so free any CRQs or sub-CRQs
1583			 * and register again before attempting to login again.
1584			 * If we don't do this then the VIOS may think that
1585			 * we are already logged in and reject any subsequent
1586			 * attempts
1587			 */
1588			netdev_warn(netdev,
1589				    "Freeing and re-registering CRQs before attempting to login again\n");
1590			retry = true;
1591			adapter->init_done_rc = 0;
1592			release_sub_crqs(adapter, true);
1593			/* Much of this is similar logic as ibmvnic_probe(),
1594			 * we are essentially re-initializing communication
1595			 * with the server. We really should not run any
1596			 * resets/failovers here because this is already a form
1597			 * of reset and we do not want parallel resets occurring
1598			 */
1599			do {
1600				reinit_init_done(adapter);
1601				/* Clear any failovers we got in the previous
1602				 * pass since we are re-initializing the CRQ
1603				 */
1604				adapter->failover_pending = false;
1605				release_crq_queue(adapter);
1606				/* If we don't sleep here then we risk an
1607				 * unnecessary failover event from the VIOS.
1608				 * This is a known VIOS issue caused by a vnic
1609				 * device freeing and registering a CRQ too
1610				 * quickly.
1611				 */
1612				msleep(1500);
1613				/* Avoid any resets, since we are currently
1614				 * resetting.
1615				 */
1616				spin_lock_irqsave(&adapter->rwi_lock, flags);
1617				flush_reset_queue(adapter);
1618				spin_unlock_irqrestore(&adapter->rwi_lock,
1619						       flags);
1620
1621				rc = init_crq_queue(adapter);
1622				if (rc) {
1623					netdev_err(netdev, "login recovery: init CRQ failed %d\n",
1624						   rc);
1625					return -EIO;
1626				}
1627
1628				rc = ibmvnic_reset_init(adapter, false);
1629				if (rc)
1630					netdev_err(netdev, "login recovery: Reset init failed %d\n",
1631						   rc);
1632				/* IBMVNIC_CRQ_INIT will return EAGAIN if it
1633				 * fails, since ibmvnic_reset_init will free
1634				 * irq's in failure, we won't be able to receive
1635				 * new CRQs so we need to keep trying. probe()
1636				 * handles this similarly.
1637				 */
1638			} while (rc == -EAGAIN && retry_count++ < retries);
1639		}
1640	} while (retry);
1641
1642	__ibmvnic_set_mac(netdev, adapter->mac_addr);
 
 
 
 
1643
1644	netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
1645	return 0;
1646}
1647
1648static void release_login_buffer(struct ibmvnic_adapter *adapter)
1649{
1650	if (!adapter->login_buf)
1651		return;
1652
1653	dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token,
1654			 adapter->login_buf_sz, DMA_TO_DEVICE);
1655	kfree(adapter->login_buf);
1656	adapter->login_buf = NULL;
1657}
1658
1659static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
1660{
1661	if (!adapter->login_rsp_buf)
1662		return;
1663
1664	dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token,
1665			 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
1666	kfree(adapter->login_rsp_buf);
1667	adapter->login_rsp_buf = NULL;
1668}
1669
1670static void release_resources(struct ibmvnic_adapter *adapter)
1671{
1672	release_vpd_data(adapter);
1673
 
 
 
 
1674	release_napi(adapter);
1675	release_login_buffer(adapter);
1676	release_login_rsp_buffer(adapter);
1677}
1678
1679static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
1680{
1681	struct net_device *netdev = adapter->netdev;
1682	unsigned long timeout = msecs_to_jiffies(20000);
1683	union ibmvnic_crq crq;
1684	bool resend;
1685	int rc;
1686
1687	netdev_dbg(netdev, "setting link state %d\n", link_state);
1688
1689	memset(&crq, 0, sizeof(crq));
1690	crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
1691	crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
1692	crq.logical_link_state.link_state = link_state;
1693
1694	do {
1695		resend = false;
1696
1697		reinit_completion(&adapter->init_done);
1698		rc = ibmvnic_send_crq(adapter, &crq);
1699		if (rc) {
1700			netdev_err(netdev, "Failed to set link state\n");
1701			return rc;
1702		}
1703
1704		if (!wait_for_completion_timeout(&adapter->init_done,
1705						 timeout)) {
1706			netdev_err(netdev, "timeout setting link state\n");
1707			return -ETIMEDOUT;
1708		}
1709
1710		if (adapter->init_done_rc == PARTIALSUCCESS) {
1711			/* Partuial success, delay and re-send */
1712			mdelay(1000);
1713			resend = true;
1714		} else if (adapter->init_done_rc) {
1715			netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1716				    adapter->init_done_rc);
1717			return adapter->init_done_rc;
1718		}
1719	} while (resend);
1720
1721	return 0;
1722}
1723
1724static int set_real_num_queues(struct net_device *netdev)
1725{
1726	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1727	int rc;
1728
1729	netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1730		   adapter->req_tx_queues, adapter->req_rx_queues);
1731
1732	rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1733	if (rc) {
1734		netdev_err(netdev, "failed to set the number of tx queues\n");
1735		return rc;
1736	}
1737
1738	rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1739	if (rc)
1740		netdev_err(netdev, "failed to set the number of rx queues\n");
1741
1742	return rc;
1743}
1744
1745static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1746{
1747	struct device *dev = &adapter->vdev->dev;
1748	union ibmvnic_crq crq;
1749	int len = 0;
1750	int rc;
1751
1752	if (adapter->vpd->buff)
1753		len = adapter->vpd->len;
1754
1755	mutex_lock(&adapter->fw_lock);
1756	adapter->fw_done_rc = 0;
1757	reinit_completion(&adapter->fw_done);
1758
1759	crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1760	crq.get_vpd_size.cmd = GET_VPD_SIZE;
1761	rc = ibmvnic_send_crq(adapter, &crq);
1762	if (rc) {
1763		mutex_unlock(&adapter->fw_lock);
1764		return rc;
1765	}
1766
1767	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1768	if (rc) {
1769		dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1770		mutex_unlock(&adapter->fw_lock);
1771		return rc;
1772	}
1773	mutex_unlock(&adapter->fw_lock);
1774
1775	if (!adapter->vpd->len)
1776		return -ENODATA;
1777
1778	if (!adapter->vpd->buff)
1779		adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1780	else if (adapter->vpd->len != len)
1781		adapter->vpd->buff =
1782			krealloc(adapter->vpd->buff,
1783				 adapter->vpd->len, GFP_KERNEL);
1784
1785	if (!adapter->vpd->buff) {
1786		dev_err(dev, "Could allocate VPD buffer\n");
1787		return -ENOMEM;
1788	}
1789
1790	adapter->vpd->dma_addr =
1791		dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1792			       DMA_FROM_DEVICE);
1793	if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1794		dev_err(dev, "Could not map VPD buffer\n");
1795		kfree(adapter->vpd->buff);
1796		adapter->vpd->buff = NULL;
1797		return -ENOMEM;
1798	}
1799
1800	mutex_lock(&adapter->fw_lock);
1801	adapter->fw_done_rc = 0;
1802	reinit_completion(&adapter->fw_done);
1803
1804	crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1805	crq.get_vpd.cmd = GET_VPD;
1806	crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1807	crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1808	rc = ibmvnic_send_crq(adapter, &crq);
1809	if (rc) {
1810		kfree(adapter->vpd->buff);
1811		adapter->vpd->buff = NULL;
1812		mutex_unlock(&adapter->fw_lock);
1813		return rc;
1814	}
1815
1816	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1817	if (rc) {
1818		dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1819		kfree(adapter->vpd->buff);
1820		adapter->vpd->buff = NULL;
1821		mutex_unlock(&adapter->fw_lock);
1822		return rc;
1823	}
1824
1825	mutex_unlock(&adapter->fw_lock);
1826	return 0;
1827}
1828
1829static int init_resources(struct ibmvnic_adapter *adapter)
1830{
1831	struct net_device *netdev = adapter->netdev;
1832	int rc;
1833
1834	rc = set_real_num_queues(netdev);
1835	if (rc)
1836		return rc;
1837
1838	adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1839	if (!adapter->vpd)
1840		return -ENOMEM;
1841
1842	/* Vital Product Data (VPD) */
1843	rc = ibmvnic_get_vpd(adapter);
1844	if (rc) {
1845		netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1846		return rc;
1847	}
1848
 
 
1849	rc = init_napi(adapter);
1850	if (rc)
1851		return rc;
1852
1853	send_query_map(adapter);
1854
1855	rc = init_rx_pools(netdev);
1856	if (rc)
1857		return rc;
1858
1859	rc = init_tx_pools(netdev);
1860	return rc;
1861}
1862
1863static int __ibmvnic_open(struct net_device *netdev)
1864{
1865	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1866	enum vnic_state prev_state = adapter->state;
1867	int i, rc;
1868
1869	adapter->state = VNIC_OPENING;
1870	replenish_pools(adapter);
1871	ibmvnic_napi_enable(adapter);
1872
1873	/* We're ready to receive frames, enable the sub-crq interrupts and
1874	 * set the logical link state to up
1875	 */
1876	for (i = 0; i < adapter->req_rx_queues; i++) {
1877		netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1878		if (prev_state == VNIC_CLOSED)
1879			enable_irq(adapter->rx_scrq[i]->irq);
1880		enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1881	}
1882
1883	for (i = 0; i < adapter->req_tx_queues; i++) {
1884		netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1885		if (prev_state == VNIC_CLOSED)
1886			enable_irq(adapter->tx_scrq[i]->irq);
1887		enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1888		/* netdev_tx_reset_queue will reset dql stats. During NON_FATAL
1889		 * resets, don't reset the stats because there could be batched
1890		 * skb's waiting to be sent. If we reset dql stats, we risk
1891		 * num_completed being greater than num_queued. This will cause
1892		 * a BUG_ON in dql_completed().
1893		 */
1894		if (adapter->reset_reason != VNIC_RESET_NON_FATAL)
1895			netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
1896	}
1897
1898	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1899	if (rc) {
1900		ibmvnic_napi_disable(adapter);
1901		ibmvnic_disable_irqs(adapter);
 
1902		return rc;
1903	}
1904
1905	adapter->tx_queues_active = true;
1906
1907	/* Since queues were stopped until now, there shouldn't be any
1908	 * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we
1909	 * don't need the synchronize_rcu()? Leaving it for consistency
1910	 * with setting ->tx_queues_active = false.
1911	 */
1912	synchronize_rcu();
1913
1914	netif_tx_start_all_queues(netdev);
1915
1916	if (prev_state == VNIC_CLOSED) {
1917		for (i = 0; i < adapter->req_rx_queues; i++)
1918			napi_schedule(&adapter->napi[i]);
1919	}
1920
1921	adapter->state = VNIC_OPEN;
1922	return rc;
1923}
1924
1925static int ibmvnic_open(struct net_device *netdev)
1926{
1927	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1928	int rc;
1929
1930	ASSERT_RTNL();
1931
1932	/* If device failover is pending or we are about to reset, just set
1933	 * device state and return. Device operation will be handled by reset
1934	 * routine.
1935	 *
1936	 * It should be safe to overwrite the adapter->state here. Since
1937	 * we hold the rtnl, either the reset has not actually started or
1938	 * the rtnl got dropped during the set_link_state() in do_reset().
1939	 * In the former case, no one else is changing the state (again we
1940	 * have the rtnl) and in the latter case, do_reset() will detect and
1941	 * honor our setting below.
1942	 */
1943	if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
1944		netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
1945			   adapter_state_to_string(adapter->state),
1946			   adapter->failover_pending);
1947		adapter->state = VNIC_OPEN;
1948		rc = 0;
1949		goto out;
1950	}
1951
 
 
1952	if (adapter->state != VNIC_CLOSED) {
1953		rc = ibmvnic_login(netdev);
1954		if (rc)
1955			goto out;
 
 
1956
1957		rc = init_resources(adapter);
1958		if (rc) {
1959			netdev_err(netdev, "failed to initialize resources\n");
1960			goto out;
 
 
1961		}
1962	}
1963
1964	rc = __ibmvnic_open(netdev);
 
1965
1966out:
1967	/* If open failed and there is a pending failover or in-progress reset,
1968	 * set device state and return. Device operation will be handled by
1969	 * reset routine. See also comments above regarding rtnl.
1970	 */
1971	if (rc &&
1972	    (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
1973		adapter->state = VNIC_OPEN;
1974		rc = 0;
1975	}
1976
1977	if (rc) {
1978		release_resources(adapter);
1979		release_rx_pools(adapter);
1980		release_tx_pools(adapter);
1981	}
1982
1983	return rc;
1984}
1985
1986static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1987{
1988	struct ibmvnic_rx_pool *rx_pool;
1989	struct ibmvnic_rx_buff *rx_buff;
1990	u64 rx_entries;
1991	int rx_scrqs;
1992	int i, j;
1993
1994	if (!adapter->rx_pool)
1995		return;
1996
1997	rx_scrqs = adapter->num_active_rx_pools;
1998	rx_entries = adapter->req_rx_add_entries_per_subcrq;
1999
2000	/* Free any remaining skbs in the rx buffer pools */
2001	for (i = 0; i < rx_scrqs; i++) {
2002		rx_pool = &adapter->rx_pool[i];
2003		if (!rx_pool || !rx_pool->rx_buff)
2004			continue;
2005
2006		netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
2007		for (j = 0; j < rx_entries; j++) {
2008			rx_buff = &rx_pool->rx_buff[j];
2009			if (rx_buff && rx_buff->skb) {
2010				dev_kfree_skb_any(rx_buff->skb);
2011				rx_buff->skb = NULL;
2012			}
2013		}
2014	}
2015}
2016
2017static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
2018			      struct ibmvnic_tx_pool *tx_pool)
2019{
2020	struct ibmvnic_tx_buff *tx_buff;
2021	u64 tx_entries;
2022	int i;
2023
2024	if (!tx_pool || !tx_pool->tx_buff)
2025		return;
2026
2027	tx_entries = tx_pool->num_buffers;
2028
2029	for (i = 0; i < tx_entries; i++) {
2030		tx_buff = &tx_pool->tx_buff[i];
2031		if (tx_buff && tx_buff->skb) {
2032			dev_kfree_skb_any(tx_buff->skb);
2033			tx_buff->skb = NULL;
2034		}
2035	}
2036}
2037
2038static void clean_tx_pools(struct ibmvnic_adapter *adapter)
2039{
2040	int tx_scrqs;
2041	int i;
2042
2043	if (!adapter->tx_pool || !adapter->tso_pool)
2044		return;
2045
2046	tx_scrqs = adapter->num_active_tx_pools;
2047
2048	/* Free any remaining skbs in the tx buffer pools */
2049	for (i = 0; i < tx_scrqs; i++) {
2050		netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
2051		clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
2052		clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
2053	}
2054}
2055
2056static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
2057{
2058	struct net_device *netdev = adapter->netdev;
2059	int i;
2060
2061	if (adapter->tx_scrq) {
2062		for (i = 0; i < adapter->req_tx_queues; i++)
2063			if (adapter->tx_scrq[i]->irq) {
2064				netdev_dbg(netdev,
2065					   "Disabling tx_scrq[%d] irq\n", i);
2066				disable_scrq_irq(adapter, adapter->tx_scrq[i]);
2067				disable_irq(adapter->tx_scrq[i]->irq);
2068			}
2069	}
2070
2071	if (adapter->rx_scrq) {
2072		for (i = 0; i < adapter->req_rx_queues; i++) {
2073			if (adapter->rx_scrq[i]->irq) {
2074				netdev_dbg(netdev,
2075					   "Disabling rx_scrq[%d] irq\n", i);
2076				disable_scrq_irq(adapter, adapter->rx_scrq[i]);
2077				disable_irq(adapter->rx_scrq[i]->irq);
2078			}
2079		}
2080	}
2081}
2082
2083static void ibmvnic_cleanup(struct net_device *netdev)
2084{
2085	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2086
2087	/* ensure that transmissions are stopped if called by do_reset */
2088
2089	adapter->tx_queues_active = false;
2090
2091	/* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active
2092	 * update so they don't restart a queue after we stop it below.
2093	 */
2094	synchronize_rcu();
2095
2096	if (test_bit(0, &adapter->resetting))
2097		netif_tx_disable(netdev);
2098	else
2099		netif_tx_stop_all_queues(netdev);
2100
2101	ibmvnic_napi_disable(adapter);
2102	ibmvnic_disable_irqs(adapter);
 
 
 
2103}
2104
2105static int __ibmvnic_close(struct net_device *netdev)
2106{
2107	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2108	int rc = 0;
2109
2110	adapter->state = VNIC_CLOSING;
2111	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
 
 
2112	adapter->state = VNIC_CLOSED;
2113	return rc;
2114}
2115
2116static int ibmvnic_close(struct net_device *netdev)
2117{
2118	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2119	int rc;
2120
2121	netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
2122		   adapter_state_to_string(adapter->state),
2123		   adapter->failover_pending,
2124		   adapter->force_reset_recovery);
2125
2126	/* If device failover is pending, just set device state and return.
2127	 * Device operation will be handled by reset routine.
2128	 */
2129	if (adapter->failover_pending) {
2130		adapter->state = VNIC_CLOSED;
2131		return 0;
2132	}
2133
 
2134	rc = __ibmvnic_close(netdev);
2135	ibmvnic_cleanup(netdev);
2136	clean_rx_pools(adapter);
2137	clean_tx_pools(adapter);
2138
2139	return rc;
2140}
2141
2142/**
2143 * build_hdr_data - creates L2/L3/L4 header data buffer
2144 * @hdr_field: bitfield determining needed headers
2145 * @skb: socket buffer
2146 * @hdr_len: array of header lengths
2147 * @hdr_data: buffer to write the header to
2148 *
2149 * Reads hdr_field to determine which headers are needed by firmware.
2150 * Builds a buffer containing these headers.  Saves individual header
2151 * lengths and total buffer length to be used to build descriptors.
2152 */
2153static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
2154			  int *hdr_len, u8 *hdr_data)
2155{
2156	int len = 0;
2157	u8 *hdr;
2158
2159	if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
2160		hdr_len[0] = sizeof(struct vlan_ethhdr);
2161	else
2162		hdr_len[0] = sizeof(struct ethhdr);
2163
2164	if (skb->protocol == htons(ETH_P_IP)) {
2165		hdr_len[1] = ip_hdr(skb)->ihl * 4;
2166		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2167			hdr_len[2] = tcp_hdrlen(skb);
2168		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
2169			hdr_len[2] = sizeof(struct udphdr);
2170	} else if (skb->protocol == htons(ETH_P_IPV6)) {
2171		hdr_len[1] = sizeof(struct ipv6hdr);
2172		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2173			hdr_len[2] = tcp_hdrlen(skb);
2174		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
2175			hdr_len[2] = sizeof(struct udphdr);
2176	} else if (skb->protocol == htons(ETH_P_ARP)) {
2177		hdr_len[1] = arp_hdr_len(skb->dev);
2178		hdr_len[2] = 0;
2179	}
2180
2181	memset(hdr_data, 0, 120);
2182	if ((hdr_field >> 6) & 1) {
2183		hdr = skb_mac_header(skb);
2184		memcpy(hdr_data, hdr, hdr_len[0]);
2185		len += hdr_len[0];
2186	}
2187
2188	if ((hdr_field >> 5) & 1) {
2189		hdr = skb_network_header(skb);
2190		memcpy(hdr_data + len, hdr, hdr_len[1]);
2191		len += hdr_len[1];
2192	}
2193
2194	if ((hdr_field >> 4) & 1) {
2195		hdr = skb_transport_header(skb);
2196		memcpy(hdr_data + len, hdr, hdr_len[2]);
2197		len += hdr_len[2];
2198	}
2199	return len;
2200}
2201
2202/**
2203 * create_hdr_descs - create header and header extension descriptors
2204 * @hdr_field: bitfield determining needed headers
2205 * @hdr_data: buffer containing header data
2206 * @len: length of data buffer
2207 * @hdr_len: array of individual header lengths
2208 * @scrq_arr: descriptor array
2209 *
2210 * Creates header and, if needed, header extension descriptors and
2211 * places them in a descriptor array, scrq_arr
2212 */
2213
2214static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
2215			    union sub_crq *scrq_arr)
2216{
2217	union sub_crq hdr_desc;
2218	int tmp_len = len;
2219	int num_descs = 0;
2220	u8 *data, *cur;
2221	int tmp;
2222
2223	while (tmp_len > 0) {
2224		cur = hdr_data + len - tmp_len;
2225
2226		memset(&hdr_desc, 0, sizeof(hdr_desc));
2227		if (cur != hdr_data) {
2228			data = hdr_desc.hdr_ext.data;
2229			tmp = tmp_len > 29 ? 29 : tmp_len;
2230			hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
2231			hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
2232			hdr_desc.hdr_ext.len = tmp;
2233		} else {
2234			data = hdr_desc.hdr.data;
2235			tmp = tmp_len > 24 ? 24 : tmp_len;
2236			hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
2237			hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
2238			hdr_desc.hdr.len = tmp;
2239			hdr_desc.hdr.l2_len = (u8)hdr_len[0];
2240			hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
2241			hdr_desc.hdr.l4_len = (u8)hdr_len[2];
2242			hdr_desc.hdr.flag = hdr_field << 1;
2243		}
2244		memcpy(data, cur, tmp);
2245		tmp_len -= tmp;
2246		*scrq_arr = hdr_desc;
2247		scrq_arr++;
2248		num_descs++;
2249	}
2250
2251	return num_descs;
2252}
2253
2254/**
2255 * build_hdr_descs_arr - build a header descriptor array
2256 * @skb: tx socket buffer
2257 * @indir_arr: indirect array
2258 * @num_entries: number of descriptors to be sent
2259 * @hdr_field: bit field determining which headers will be sent
2260 *
2261 * This function will build a TX descriptor array with applicable
2262 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
2263 */
2264
2265static void build_hdr_descs_arr(struct sk_buff *skb,
2266				union sub_crq *indir_arr,
2267				int *num_entries, u8 hdr_field)
2268{
2269	int hdr_len[3] = {0, 0, 0};
2270	u8 hdr_data[140] = {0};
2271	int tot_len;
 
2272
2273	tot_len = build_hdr_data(hdr_field, skb, hdr_len,
2274				 hdr_data);
2275	*num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
2276					 indir_arr + 1);
2277}
2278
2279static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
2280				    struct net_device *netdev)
2281{
2282	/* For some backing devices, mishandling of small packets
2283	 * can result in a loss of connection or TX stall. Device
2284	 * architects recommend that no packet should be smaller
2285	 * than the minimum MTU value provided to the driver, so
2286	 * pad any packets to that length
2287	 */
2288	if (skb->len < netdev->min_mtu)
2289		return skb_put_padto(skb, netdev->min_mtu);
2290
2291	return 0;
2292}
2293
2294static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
2295					 struct ibmvnic_sub_crq_queue *tx_scrq)
2296{
2297	struct ibmvnic_ind_xmit_queue *ind_bufp;
2298	struct ibmvnic_tx_buff *tx_buff;
2299	struct ibmvnic_tx_pool *tx_pool;
2300	union sub_crq tx_scrq_entry;
2301	int queue_num;
2302	int entries;
2303	int index;
2304	int i;
2305
2306	ind_bufp = &tx_scrq->ind_buf;
2307	entries = (u64)ind_bufp->index;
2308	queue_num = tx_scrq->pool_index;
2309
2310	for (i = entries - 1; i >= 0; --i) {
2311		tx_scrq_entry = ind_bufp->indir_arr[i];
2312		if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
2313			continue;
2314		index = be32_to_cpu(tx_scrq_entry.v1.correlator);
2315		if (index & IBMVNIC_TSO_POOL_MASK) {
2316			tx_pool = &adapter->tso_pool[queue_num];
2317			index &= ~IBMVNIC_TSO_POOL_MASK;
2318		} else {
2319			tx_pool = &adapter->tx_pool[queue_num];
2320		}
2321		tx_pool->free_map[tx_pool->consumer_index] = index;
2322		tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2323					  tx_pool->num_buffers - 1 :
2324					  tx_pool->consumer_index - 1;
2325		tx_buff = &tx_pool->tx_buff[index];
2326		adapter->netdev->stats.tx_packets--;
2327		adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
2328		adapter->tx_stats_buffers[queue_num].packets--;
2329		adapter->tx_stats_buffers[queue_num].bytes -=
2330						tx_buff->skb->len;
2331		dev_kfree_skb_any(tx_buff->skb);
2332		tx_buff->skb = NULL;
2333		adapter->netdev->stats.tx_dropped++;
2334	}
2335
2336	ind_bufp->index = 0;
2337
2338	if (atomic_sub_return(entries, &tx_scrq->used) <=
2339	    (adapter->req_tx_entries_per_subcrq / 2) &&
2340	    __netif_subqueue_stopped(adapter->netdev, queue_num)) {
2341		rcu_read_lock();
2342
2343		if (adapter->tx_queues_active) {
2344			netif_wake_subqueue(adapter->netdev, queue_num);
2345			netdev_dbg(adapter->netdev, "Started queue %d\n",
2346				   queue_num);
2347		}
2348
2349		rcu_read_unlock();
2350	}
2351}
2352
2353static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
2354				 struct ibmvnic_sub_crq_queue *tx_scrq)
2355{
2356	struct ibmvnic_ind_xmit_queue *ind_bufp;
2357	u64 dma_addr;
2358	u64 entries;
2359	u64 handle;
2360	int rc;
2361
2362	ind_bufp = &tx_scrq->ind_buf;
2363	dma_addr = (u64)ind_bufp->indir_dma;
2364	entries = (u64)ind_bufp->index;
2365	handle = tx_scrq->handle;
2366
2367	if (!entries)
2368		return 0;
2369	rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
2370	if (rc)
2371		ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
2372	else
2373		ind_bufp->index = 0;
2374	return 0;
2375}
2376
2377static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2378{
2379	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2380	int queue_num = skb_get_queue_mapping(skb);
2381	u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
2382	struct device *dev = &adapter->vdev->dev;
2383	struct ibmvnic_ind_xmit_queue *ind_bufp;
2384	struct ibmvnic_tx_buff *tx_buff = NULL;
2385	struct ibmvnic_sub_crq_queue *tx_scrq;
2386	struct ibmvnic_long_term_buff *ltb;
2387	struct ibmvnic_tx_pool *tx_pool;
2388	unsigned int tx_send_failed = 0;
2389	netdev_tx_t ret = NETDEV_TX_OK;
2390	unsigned int tx_map_failed = 0;
2391	union sub_crq indir_arr[16];
2392	unsigned int tx_dropped = 0;
2393	unsigned int tx_packets = 0;
2394	unsigned int tx_bytes = 0;
2395	dma_addr_t data_dma_addr;
2396	struct netdev_queue *txq;
2397	unsigned long lpar_rc;
2398	union sub_crq tx_crq;
2399	unsigned int offset;
2400	int num_entries = 1;
2401	unsigned char *dst;
2402	int bufidx = 0;
 
2403	u8 proto = 0;
 
2404
2405	/* If a reset is in progress, drop the packet since
2406	 * the scrqs may get torn down. Otherwise use the
2407	 * rcu to ensure reset waits for us to complete.
2408	 */
2409	rcu_read_lock();
2410	if (!adapter->tx_queues_active) {
2411		dev_kfree_skb_any(skb);
2412
2413		tx_send_failed++;
2414		tx_dropped++;
2415		ret = NETDEV_TX_OK;
2416		goto out;
2417	}
2418
2419	tx_scrq = adapter->tx_scrq[queue_num];
2420	txq = netdev_get_tx_queue(netdev, queue_num);
2421	ind_bufp = &tx_scrq->ind_buf;
2422
2423	if (ibmvnic_xmit_workarounds(skb, netdev)) {
2424		tx_dropped++;
2425		tx_send_failed++;
2426		ret = NETDEV_TX_OK;
2427		ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2428		goto out;
2429	}
2430
2431	if (skb_is_gso(skb))
2432		tx_pool = &adapter->tso_pool[queue_num];
2433	else
2434		tx_pool = &adapter->tx_pool[queue_num];
2435
2436	bufidx = tx_pool->free_map[tx_pool->consumer_index];
 
 
 
 
 
2437
2438	if (bufidx == IBMVNIC_INVALID_MAP) {
2439		dev_kfree_skb_any(skb);
2440		tx_send_failed++;
2441		tx_dropped++;
2442		ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2443		ret = NETDEV_TX_OK;
2444		goto out;
2445	}
2446
2447	tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
2448
2449	map_txpool_buf_to_ltb(tx_pool, bufidx, &ltb, &offset);
2450
2451	dst = ltb->buff + offset;
2452	memset(dst, 0, tx_pool->buf_size);
2453	data_dma_addr = ltb->addr + offset;
2454
2455	if (skb_shinfo(skb)->nr_frags) {
2456		int cur, i;
2457
2458		/* Copy the head */
2459		skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
2460		cur = skb_headlen(skb);
2461
2462		/* Copy the frags */
2463		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2464			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2465
2466			memcpy(dst + cur, skb_frag_address(frag),
2467			       skb_frag_size(frag));
 
2468			cur += skb_frag_size(frag);
2469		}
2470	} else {
2471		skb_copy_from_linear_data(skb, dst, skb->len);
2472	}
2473
2474	/* post changes to long_term_buff *dst before VIOS accessing it */
2475	dma_wmb();
2476
2477	tx_pool->consumer_index =
2478	    (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
2479
2480	tx_buff = &tx_pool->tx_buff[bufidx];
2481	tx_buff->skb = skb;
2482	tx_buff->index = bufidx;
 
 
2483	tx_buff->pool_index = queue_num;
 
2484
2485	memset(&tx_crq, 0, sizeof(tx_crq));
2486	tx_crq.v1.first = IBMVNIC_CRQ_CMD;
2487	tx_crq.v1.type = IBMVNIC_TX_DESC;
2488	tx_crq.v1.n_crq_elem = 1;
2489	tx_crq.v1.n_sge = 1;
2490	tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
2491
2492	if (skb_is_gso(skb))
2493		tx_crq.v1.correlator =
2494			cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK);
2495	else
2496		tx_crq.v1.correlator = cpu_to_be32(bufidx);
2497	tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id);
2498	tx_crq.v1.sge_len = cpu_to_be32(skb->len);
2499	tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
2500
2501	if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
2502		tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
2503		tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
2504	}
2505
2506	if (skb->protocol == htons(ETH_P_IP)) {
2507		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
2508		proto = ip_hdr(skb)->protocol;
2509	} else if (skb->protocol == htons(ETH_P_IPV6)) {
2510		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
2511		proto = ipv6_hdr(skb)->nexthdr;
2512	}
2513
2514	if (proto == IPPROTO_TCP)
2515		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
2516	else if (proto == IPPROTO_UDP)
2517		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
2518
2519	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2520		tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
2521		hdrs += 2;
2522	}
2523	if (skb_is_gso(skb)) {
2524		tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
2525		tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
2526		hdrs += 2;
2527	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2528
2529	if ((*hdrs >> 7) & 1)
2530		build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
 
 
 
 
 
 
 
2531
2532	tx_crq.v1.n_crq_elem = num_entries;
2533	tx_buff->num_entries = num_entries;
2534	/* flush buffer if current entry can not fit */
2535	if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
2536		lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2537		if (lpar_rc != H_SUCCESS)
2538			goto tx_flush_err;
2539	}
2540
2541	indir_arr[0] = tx_crq;
2542	memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
2543	       num_entries * sizeof(struct ibmvnic_generic_scrq));
2544	ind_bufp->index += num_entries;
2545	if (__netdev_tx_sent_queue(txq, skb->len,
2546				   netdev_xmit_more() &&
2547				   ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
2548		lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2549		if (lpar_rc != H_SUCCESS)
2550			goto tx_err;
2551	}
2552
2553	if (atomic_add_return(num_entries, &tx_scrq->used)
2554					>= adapter->req_tx_entries_per_subcrq) {
2555		netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
2556		netif_stop_subqueue(netdev, queue_num);
2557	}
2558
2559	tx_packets++;
2560	tx_bytes += skb->len;
2561	txq_trans_cond_update(txq);
2562	ret = NETDEV_TX_OK;
2563	goto out;
2564
2565tx_flush_err:
2566	dev_kfree_skb_any(skb);
2567	tx_buff->skb = NULL;
2568	tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2569				  tx_pool->num_buffers - 1 :
2570				  tx_pool->consumer_index - 1;
2571	tx_dropped++;
2572tx_err:
2573	if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
2574		dev_err_ratelimited(dev, "tx: send failed\n");
2575
2576	if (lpar_rc == H_CLOSED || adapter->failover_pending) {
2577		/* Disable TX and report carrier off if queue is closed
2578		 * or pending failover.
2579		 * Firmware guarantees that a signal will be sent to the
2580		 * driver, triggering a reset or some other action.
2581		 */
2582		netif_tx_stop_all_queues(netdev);
2583		netif_carrier_off(netdev);
2584	}
2585out:
2586	rcu_read_unlock();
2587	netdev->stats.tx_dropped += tx_dropped;
2588	netdev->stats.tx_bytes += tx_bytes;
2589	netdev->stats.tx_packets += tx_packets;
2590	adapter->tx_send_failed += tx_send_failed;
2591	adapter->tx_map_failed += tx_map_failed;
2592	adapter->tx_stats_buffers[queue_num].packets += tx_packets;
2593	adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
2594	adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
2595
2596	return ret;
2597}
2598
2599static void ibmvnic_set_multi(struct net_device *netdev)
2600{
2601	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2602	struct netdev_hw_addr *ha;
2603	union ibmvnic_crq crq;
2604
2605	memset(&crq, 0, sizeof(crq));
2606	crq.request_capability.first = IBMVNIC_CRQ_CMD;
2607	crq.request_capability.cmd = REQUEST_CAPABILITY;
2608
2609	if (netdev->flags & IFF_PROMISC) {
2610		if (!adapter->promisc_supported)
2611			return;
2612	} else {
2613		if (netdev->flags & IFF_ALLMULTI) {
2614			/* Accept all multicast */
2615			memset(&crq, 0, sizeof(crq));
2616			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2617			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2618			crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
2619			ibmvnic_send_crq(adapter, &crq);
2620		} else if (netdev_mc_empty(netdev)) {
2621			/* Reject all multicast */
2622			memset(&crq, 0, sizeof(crq));
2623			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2624			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2625			crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
2626			ibmvnic_send_crq(adapter, &crq);
2627		} else {
2628			/* Accept one or more multicast(s) */
2629			netdev_for_each_mc_addr(ha, netdev) {
2630				memset(&crq, 0, sizeof(crq));
2631				crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2632				crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2633				crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
2634				ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
2635						ha->addr);
2636				ibmvnic_send_crq(adapter, &crq);
2637			}
2638		}
2639	}
2640}
2641
2642static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
2643{
2644	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
2645	union ibmvnic_crq crq;
2646	int rc;
2647
2648	if (!is_valid_ether_addr(dev_addr)) {
2649		rc = -EADDRNOTAVAIL;
2650		goto err;
2651	}
2652
2653	memset(&crq, 0, sizeof(crq));
2654	crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
2655	crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
2656	ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
2657
2658	mutex_lock(&adapter->fw_lock);
2659	adapter->fw_done_rc = 0;
2660	reinit_completion(&adapter->fw_done);
2661
2662	rc = ibmvnic_send_crq(adapter, &crq);
2663	if (rc) {
2664		rc = -EIO;
2665		mutex_unlock(&adapter->fw_lock);
2666		goto err;
2667	}
2668
2669	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
2670	/* netdev->dev_addr is changed in handle_change_mac_rsp function */
2671	if (rc || adapter->fw_done_rc) {
2672		rc = -EIO;
2673		mutex_unlock(&adapter->fw_lock);
2674		goto err;
2675	}
2676	mutex_unlock(&adapter->fw_lock);
2677	return 0;
2678err:
2679	ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
2680	return rc;
2681}
2682
2683static int ibmvnic_set_mac(struct net_device *netdev, void *p)
2684{
2685	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2686	struct sockaddr *addr = p;
2687	int rc;
2688
2689	rc = 0;
2690	if (!is_valid_ether_addr(addr->sa_data))
2691		return -EADDRNOTAVAIL;
 
 
2692
2693	ether_addr_copy(adapter->mac_addr, addr->sa_data);
2694	if (adapter->state != VNIC_PROBED)
2695		rc = __ibmvnic_set_mac(netdev, addr->sa_data);
2696
2697	return rc;
2698}
2699
2700static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
2701{
2702	switch (reason) {
2703	case VNIC_RESET_FAILOVER:
2704		return "FAILOVER";
2705	case VNIC_RESET_MOBILITY:
2706		return "MOBILITY";
2707	case VNIC_RESET_FATAL:
2708		return "FATAL";
2709	case VNIC_RESET_NON_FATAL:
2710		return "NON_FATAL";
2711	case VNIC_RESET_TIMEOUT:
2712		return "TIMEOUT";
2713	case VNIC_RESET_CHANGE_PARAM:
2714		return "CHANGE_PARAM";
2715	case VNIC_RESET_PASSIVE_INIT:
2716		return "PASSIVE_INIT";
2717	}
2718	return "UNKNOWN";
2719}
2720
2721/*
2722 * Initialize the init_done completion and return code values. We
2723 * can get a transport event just after registering the CRQ and the
2724 * tasklet will use this to communicate the transport event. To ensure
2725 * we don't miss the notification/error, initialize these _before_
2726 * regisering the CRQ.
2727 */
2728static inline void reinit_init_done(struct ibmvnic_adapter *adapter)
2729{
2730	reinit_completion(&adapter->init_done);
2731	adapter->init_done_rc = 0;
2732}
2733
2734/*
2735 * do_reset returns zero if we are able to keep processing reset events, or
2736 * non-zero if we hit a fatal error and must halt.
2737 */
2738static int do_reset(struct ibmvnic_adapter *adapter,
2739		    struct ibmvnic_rwi *rwi, u32 reset_state)
2740{
 
2741	struct net_device *netdev = adapter->netdev;
2742	u64 old_num_rx_queues, old_num_tx_queues;
2743	u64 old_num_rx_slots, old_num_tx_slots;
2744	int rc;
2745
2746	netdev_dbg(adapter->netdev,
2747		   "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
2748		   adapter_state_to_string(adapter->state),
2749		   adapter->failover_pending,
2750		   reset_reason_to_string(rwi->reset_reason),
2751		   adapter_state_to_string(reset_state));
2752
 
2753	adapter->reset_reason = rwi->reset_reason;
2754	/* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
2755	if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2756		rtnl_lock();
2757
2758	/* Now that we have the rtnl lock, clear any pending failover.
2759	 * This will ensure ibmvnic_open() has either completed or will
2760	 * block until failover is complete.
2761	 */
2762	if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2763		adapter->failover_pending = false;
2764
2765	/* read the state and check (again) after getting rtnl */
2766	reset_state = adapter->state;
2767
2768	if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2769		rc = -EBUSY;
2770		goto out;
2771	}
2772
2773	netif_carrier_off(netdev);
2774
2775	old_num_rx_queues = adapter->req_rx_queues;
2776	old_num_tx_queues = adapter->req_tx_queues;
2777	old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2778	old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2779
2780	ibmvnic_cleanup(netdev);
2781
2782	if (reset_state == VNIC_OPEN &&
2783	    adapter->reset_reason != VNIC_RESET_MOBILITY &&
2784	    adapter->reset_reason != VNIC_RESET_FAILOVER) {
2785		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2786			rc = __ibmvnic_close(netdev);
2787			if (rc)
2788				goto out;
2789		} else {
2790			adapter->state = VNIC_CLOSING;
2791
2792			/* Release the RTNL lock before link state change and
2793			 * re-acquire after the link state change to allow
2794			 * linkwatch_event to grab the RTNL lock and run during
2795			 * a reset.
2796			 */
2797			rtnl_unlock();
2798			rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2799			rtnl_lock();
2800			if (rc)
2801				goto out;
2802
2803			if (adapter->state == VNIC_OPEN) {
2804				/* When we dropped rtnl, ibmvnic_open() got
2805				 * it and noticed that we are resetting and
2806				 * set the adapter state to OPEN. Update our
2807				 * new "target" state, and resume the reset
2808				 * from VNIC_CLOSING state.
2809				 */
2810				netdev_dbg(netdev,
2811					   "Open changed state from %s, updating.\n",
2812					   adapter_state_to_string(reset_state));
2813				reset_state = VNIC_OPEN;
2814				adapter->state = VNIC_CLOSING;
2815			}
2816
2817			if (adapter->state != VNIC_CLOSING) {
2818				/* If someone else changed the adapter state
2819				 * when we dropped the rtnl, fail the reset
2820				 */
2821				rc = -EAGAIN;
2822				goto out;
2823			}
2824			adapter->state = VNIC_CLOSED;
2825		}
2826	}
2827
2828	if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
 
2829		release_resources(adapter);
2830		release_sub_crqs(adapter, 1);
2831		release_crq_queue(adapter);
2832	}
2833
2834	if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2835		/* remove the closed state so when we call open it appears
2836		 * we are coming from the probed state.
2837		 */
2838		adapter->state = VNIC_PROBED;
2839
2840		reinit_init_done(adapter);
2841
2842		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2843			rc = init_crq_queue(adapter);
2844		} else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2845			rc = ibmvnic_reenable_crq_queue(adapter);
2846			release_sub_crqs(adapter, 1);
2847		} else {
2848			rc = ibmvnic_reset_crq(adapter);
2849			if (rc == H_CLOSED || rc == H_SUCCESS) {
2850				rc = vio_enable_interrupts(adapter->vdev);
2851				if (rc)
2852					netdev_err(adapter->netdev,
2853						   "Reset failed to enable interrupts. rc=%d\n",
2854						   rc);
2855			}
2856		}
2857
2858		if (rc) {
2859			netdev_err(adapter->netdev,
2860				   "Reset couldn't initialize crq. rc=%d\n", rc);
2861			goto out;
2862		}
2863
2864		rc = ibmvnic_reset_init(adapter, true);
2865		if (rc)
2866			goto out;
2867
2868		/* If the adapter was in PROBE or DOWN state prior to the reset,
2869		 * exit here.
2870		 */
2871		if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
2872			rc = 0;
2873			goto out;
2874		}
2875
2876		rc = ibmvnic_login(netdev);
2877		if (rc)
2878			goto out;
 
 
2879
2880		if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
 
2881			rc = init_resources(adapter);
2882			if (rc)
2883				goto out;
2884		} else if (adapter->req_rx_queues != old_num_rx_queues ||
2885		    adapter->req_tx_queues != old_num_tx_queues ||
2886		    adapter->req_rx_add_entries_per_subcrq !=
2887		    old_num_rx_slots ||
2888		    adapter->req_tx_entries_per_subcrq !=
2889		    old_num_tx_slots ||
2890		    !adapter->rx_pool ||
2891		    !adapter->tso_pool ||
2892		    !adapter->tx_pool) {
2893			release_napi(adapter);
2894			release_vpd_data(adapter);
 
 
 
 
2895
2896			rc = init_resources(adapter);
2897			if (rc)
2898				goto out;
2899
2900		} else {
2901			rc = init_tx_pools(netdev);
2902			if (rc) {
2903				netdev_dbg(netdev,
2904					   "init tx pools failed (%d)\n",
2905					   rc);
2906				goto out;
2907			}
2908
2909			rc = init_rx_pools(netdev);
2910			if (rc) {
2911				netdev_dbg(netdev,
2912					   "init rx pools failed (%d)\n",
2913					   rc);
2914				goto out;
2915			}
2916		}
2917		ibmvnic_disable_irqs(adapter);
2918	}
2919	adapter->state = VNIC_CLOSED;
2920
2921	if (reset_state == VNIC_CLOSED) {
2922		rc = 0;
2923		goto out;
2924	}
2925
2926	rc = __ibmvnic_open(netdev);
2927	if (rc) {
2928		rc = IBMVNIC_OPEN_FAILED;
2929		goto out;
2930	}
 
2931
2932	/* refresh device's multicast list */
2933	ibmvnic_set_multi(netdev);
2934
2935	if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2936	    adapter->reset_reason == VNIC_RESET_MOBILITY)
2937		__netdev_notify_peers(netdev);
2938
2939	rc = 0;
2940
2941out:
2942	/* restore the adapter state if reset failed */
2943	if (rc)
2944		adapter->state = reset_state;
2945	/* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
2946	if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2947		rtnl_unlock();
2948
2949	netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
2950		   adapter_state_to_string(adapter->state),
2951		   adapter->failover_pending, rc);
2952	return rc;
2953}
2954
2955static int do_hard_reset(struct ibmvnic_adapter *adapter,
2956			 struct ibmvnic_rwi *rwi, u32 reset_state)
2957{
2958	struct net_device *netdev = adapter->netdev;
2959	int rc;
2960
2961	netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
2962		   reset_reason_to_string(rwi->reset_reason));
2963
2964	/* read the state and check (again) after getting rtnl */
2965	reset_state = adapter->state;
2966
2967	if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2968		rc = -EBUSY;
2969		goto out;
2970	}
2971
2972	netif_carrier_off(netdev);
2973	adapter->reset_reason = rwi->reset_reason;
 
2974
2975	ibmvnic_cleanup(netdev);
2976	release_resources(adapter);
2977	release_sub_crqs(adapter, 0);
2978	release_crq_queue(adapter);
2979
2980	/* remove the closed state so when we call open it appears
2981	 * we are coming from the probed state.
2982	 */
2983	adapter->state = VNIC_PROBED;
2984
2985	reinit_init_done(adapter);
2986
2987	rc = init_crq_queue(adapter);
2988	if (rc) {
2989		netdev_err(adapter->netdev,
2990			   "Couldn't initialize crq. rc=%d\n", rc);
2991		goto out;
2992	}
2993
2994	rc = ibmvnic_reset_init(adapter, false);
2995	if (rc)
2996		goto out;
2997
2998	/* If the adapter was in PROBE or DOWN state prior to the reset,
2999	 * exit here.
3000	 */
3001	if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
3002		goto out;
3003
3004	rc = ibmvnic_login(netdev);
3005	if (rc)
3006		goto out;
3007
3008	rc = init_resources(adapter);
3009	if (rc)
3010		goto out;
3011
3012	ibmvnic_disable_irqs(adapter);
3013	adapter->state = VNIC_CLOSED;
3014
3015	if (reset_state == VNIC_CLOSED)
3016		goto out;
3017
3018	rc = __ibmvnic_open(netdev);
3019	if (rc) {
3020		rc = IBMVNIC_OPEN_FAILED;
3021		goto out;
3022	}
3023
3024	__netdev_notify_peers(netdev);
3025out:
3026	/* restore adapter state if reset failed */
3027	if (rc)
3028		adapter->state = reset_state;
3029	netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
3030		   adapter_state_to_string(adapter->state),
3031		   adapter->failover_pending, rc);
3032	return rc;
3033}
3034
3035static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
3036{
3037	struct ibmvnic_rwi *rwi;
3038	unsigned long flags;
3039
3040	spin_lock_irqsave(&adapter->rwi_lock, flags);
3041
3042	if (!list_empty(&adapter->rwi_list)) {
3043		rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
3044				       list);
3045		list_del(&rwi->list);
3046	} else {
3047		rwi = NULL;
3048	}
3049
3050	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
3051	return rwi;
3052}
3053
3054/**
3055 * do_passive_init - complete probing when partner device is detected.
3056 * @adapter: ibmvnic_adapter struct
3057 *
3058 * If the ibmvnic device does not have a partner device to communicate with at boot
3059 * and that partner device comes online at a later time, this function is called
3060 * to complete the initialization process of ibmvnic device.
3061 * Caller is expected to hold rtnl_lock().
3062 *
3063 * Returns non-zero if sub-CRQs are not initialized properly leaving the device
3064 * in the down state.
3065 * Returns 0 upon success and the device is in PROBED state.
3066 */
3067
3068static int do_passive_init(struct ibmvnic_adapter *adapter)
3069{
3070	unsigned long timeout = msecs_to_jiffies(30000);
3071	struct net_device *netdev = adapter->netdev;
3072	struct device *dev = &adapter->vdev->dev;
3073	int rc;
3074
3075	netdev_dbg(netdev, "Partner device found, probing.\n");
3076
3077	adapter->state = VNIC_PROBING;
3078	reinit_completion(&adapter->init_done);
3079	adapter->init_done_rc = 0;
3080	adapter->crq.active = true;
3081
3082	rc = send_crq_init_complete(adapter);
3083	if (rc)
3084		goto out;
3085
3086	rc = send_version_xchg(adapter);
3087	if (rc)
3088		netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
3089
3090	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3091		dev_err(dev, "Initialization sequence timed out\n");
3092		rc = -ETIMEDOUT;
3093		goto out;
3094	}
3095
3096	rc = init_sub_crqs(adapter);
3097	if (rc) {
3098		dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
3099		goto out;
3100	}
3101
3102	rc = init_sub_crq_irqs(adapter);
3103	if (rc) {
3104		dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
3105		goto init_failed;
3106	}
3107
3108	netdev->mtu = adapter->req_mtu - ETH_HLEN;
3109	netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
3110	netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
3111
3112	adapter->state = VNIC_PROBED;
3113	netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
3114
3115	return 0;
3116
3117init_failed:
3118	release_sub_crqs(adapter, 1);
3119out:
3120	adapter->state = VNIC_DOWN;
3121	return rc;
3122}
3123
3124static void __ibmvnic_reset(struct work_struct *work)
3125{
 
3126	struct ibmvnic_adapter *adapter;
3127	unsigned int timeout = 5000;
3128	struct ibmvnic_rwi *tmprwi;
3129	bool saved_state = false;
3130	struct ibmvnic_rwi *rwi;
3131	unsigned long flags;
3132	struct device *dev;
3133	bool need_reset;
3134	int num_fails = 0;
3135	u32 reset_state;
3136	int rc = 0;
3137
3138	adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
3139		dev = &adapter->vdev->dev;
3140
3141	/* Wait for ibmvnic_probe() to complete. If probe is taking too long
3142	 * or if another reset is in progress, defer work for now. If probe
3143	 * eventually fails it will flush and terminate our work.
3144	 *
3145	 * Three possibilities here:
3146	 * 1. Adpater being removed  - just return
3147	 * 2. Timed out on probe or another reset in progress - delay the work
3148	 * 3. Completed probe - perform any resets in queue
3149	 */
3150	if (adapter->state == VNIC_PROBING &&
3151	    !wait_for_completion_timeout(&adapter->probe_done, timeout)) {
3152		dev_err(dev, "Reset thread timed out on probe");
3153		queue_delayed_work(system_long_wq,
3154				   &adapter->ibmvnic_delayed_reset,
3155				   IBMVNIC_RESET_DELAY);
3156		return;
3157	}
3158
3159	/* adapter is done with probe (i.e state is never VNIC_PROBING now) */
3160	if (adapter->state == VNIC_REMOVING)
3161		return;
3162
3163	/* ->rwi_list is stable now (no one else is removing entries) */
3164
3165	/* ibmvnic_probe() may have purged the reset queue after we were
3166	 * scheduled to process a reset so there maybe no resets to process.
3167	 * Before setting the ->resetting bit though, we have to make sure
3168	 * that there is infact a reset to process. Otherwise we may race
3169	 * with ibmvnic_open() and end up leaving the vnic down:
3170	 *
3171	 *	__ibmvnic_reset()	    ibmvnic_open()
3172	 *	-----------------	    --------------
3173	 *
3174	 *  set ->resetting bit
3175	 *  				find ->resetting bit is set
3176	 *  				set ->state to IBMVNIC_OPEN (i.e
3177	 *  				assume reset will open device)
3178	 *  				return
3179	 *  find reset queue empty
3180	 *  return
3181	 *
3182	 *  	Neither performed vnic login/open and vnic stays down
3183	 *
3184	 * If we hold the lock and conditionally set the bit, either we
3185	 * or ibmvnic_open() will complete the open.
3186	 */
3187	need_reset = false;
3188	spin_lock(&adapter->rwi_lock);
3189	if (!list_empty(&adapter->rwi_list)) {
3190		if (test_and_set_bit_lock(0, &adapter->resetting)) {
3191			queue_delayed_work(system_long_wq,
3192					   &adapter->ibmvnic_delayed_reset,
3193					   IBMVNIC_RESET_DELAY);
3194		} else {
3195			need_reset = true;
3196		}
3197	}
3198	spin_unlock(&adapter->rwi_lock);
3199
3200	if (!need_reset)
3201		return;
3202
3203	rwi = get_next_rwi(adapter);
3204	while (rwi) {
3205		spin_lock_irqsave(&adapter->state_lock, flags);
3206
3207		if (adapter->state == VNIC_REMOVING ||
3208		    adapter->state == VNIC_REMOVED) {
3209			spin_unlock_irqrestore(&adapter->state_lock, flags);
3210			kfree(rwi);
3211			rc = EBUSY;
3212			break;
3213		}
3214
3215		if (!saved_state) {
3216			reset_state = adapter->state;
3217			saved_state = true;
3218		}
3219		spin_unlock_irqrestore(&adapter->state_lock, flags);
3220
3221		if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
3222			rtnl_lock();
3223			rc = do_passive_init(adapter);
3224			rtnl_unlock();
3225			if (!rc)
3226				netif_carrier_on(adapter->netdev);
3227		} else if (adapter->force_reset_recovery) {
3228			/* Since we are doing a hard reset now, clear the
3229			 * failover_pending flag so we don't ignore any
3230			 * future MOBILITY or other resets.
3231			 */
3232			adapter->failover_pending = false;
3233
3234			/* Transport event occurred during previous reset */
3235			if (adapter->wait_for_reset) {
3236				/* Previous was CHANGE_PARAM; caller locked */
3237				adapter->force_reset_recovery = false;
3238				rc = do_hard_reset(adapter, rwi, reset_state);
3239			} else {
3240				rtnl_lock();
3241				adapter->force_reset_recovery = false;
3242				rc = do_hard_reset(adapter, rwi, reset_state);
3243				rtnl_unlock();
3244			}
3245			if (rc)
3246				num_fails++;
3247			else
3248				num_fails = 0;
3249
3250			/* If auto-priority-failover is enabled we can get
3251			 * back to back failovers during resets, resulting
3252			 * in at least two failed resets (from high-priority
3253			 * backing device to low-priority one and then back)
3254			 * If resets continue to fail beyond that, give the
3255			 * adapter some time to settle down before retrying.
3256			 */
3257			if (num_fails >= 3) {
3258				netdev_dbg(adapter->netdev,
3259					   "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
3260					   adapter_state_to_string(adapter->state),
3261					   num_fails);
3262				set_current_state(TASK_UNINTERRUPTIBLE);
3263				schedule_timeout(60 * HZ);
3264			}
3265		} else {
3266			rc = do_reset(adapter, rwi, reset_state);
3267		}
3268		tmprwi = rwi;
3269		adapter->last_reset_time = jiffies;
3270
3271		if (rc)
3272			netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
3273
3274		rwi = get_next_rwi(adapter);
3275
3276		/*
3277		 * If there are no resets queued and the previous reset failed,
3278		 * the adapter would be in an undefined state. So retry the
3279		 * previous reset as a hard reset.
3280		 *
3281		 * Else, free the previous rwi and, if there is another reset
3282		 * queued, process the new reset even if previous reset failed
3283		 * (the previous reset could have failed because of a fail
3284		 * over for instance, so process the fail over).
3285		 */
3286		if (!rwi && rc)
3287			rwi = tmprwi;
3288		else
3289			kfree(tmprwi);
3290
3291		if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
3292			    rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
3293			adapter->force_reset_recovery = true;
3294	}
3295
3296	if (adapter->wait_for_reset) {
 
3297		adapter->reset_done_rc = rc;
3298		complete(&adapter->reset_done);
3299	}
3300
3301	clear_bit_unlock(0, &adapter->resetting);
 
 
 
 
 
3302
3303	netdev_dbg(adapter->netdev,
3304		   "[S:%s FRR:%d WFR:%d] Done processing resets\n",
3305		   adapter_state_to_string(adapter->state),
3306		   adapter->force_reset_recovery,
3307		   adapter->wait_for_reset);
3308}
3309
3310static void __ibmvnic_delayed_reset(struct work_struct *work)
3311{
3312	struct ibmvnic_adapter *adapter;
3313
3314	adapter = container_of(work, struct ibmvnic_adapter,
3315			       ibmvnic_delayed_reset.work);
3316	__ibmvnic_reset(&adapter->ibmvnic_reset);
3317}
3318
3319static void flush_reset_queue(struct ibmvnic_adapter *adapter)
3320{
3321	struct list_head *entry, *tmp_entry;
3322
3323	if (!list_empty(&adapter->rwi_list)) {
3324		list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
3325			list_del(entry);
3326			kfree(list_entry(entry, struct ibmvnic_rwi, list));
3327		}
3328	}
3329}
3330
3331static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
3332			 enum ibmvnic_reset_reason reason)
3333{
 
3334	struct net_device *netdev = adapter->netdev;
3335	struct ibmvnic_rwi *rwi, *tmp;
3336	unsigned long flags;
3337	int ret;
3338
3339	spin_lock_irqsave(&adapter->rwi_lock, flags);
3340
3341	/* If failover is pending don't schedule any other reset.
3342	 * Instead let the failover complete. If there is already a
3343	 * a failover reset scheduled, we will detect and drop the
3344	 * duplicate reset when walking the ->rwi_list below.
3345	 */
3346	if (adapter->state == VNIC_REMOVING ||
3347	    adapter->state == VNIC_REMOVED ||
3348	    (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
3349		ret = EBUSY;
3350		netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
3351		goto err;
3352	}
3353
3354	list_for_each_entry(tmp, &adapter->rwi_list, list) {
 
 
 
 
 
 
 
 
 
3355		if (tmp->reset_reason == reason) {
3356			netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
3357				   reset_reason_to_string(reason));
3358			ret = EBUSY;
3359			goto err;
3360		}
3361	}
3362
3363	rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
3364	if (!rwi) {
 
 
3365		ret = ENOMEM;
3366		goto err;
3367	}
3368	/* if we just received a transport event,
3369	 * flush reset queue and process this reset
3370	 */
3371	if (adapter->force_reset_recovery)
3372		flush_reset_queue(adapter);
3373
3374	rwi->reset_reason = reason;
3375	list_add_tail(&rwi->list, &adapter->rwi_list);
3376	netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
3377		   reset_reason_to_string(reason));
3378	queue_work(system_long_wq, &adapter->ibmvnic_reset);
 
3379
3380	ret = 0;
3381err:
3382	/* ibmvnic_close() below can block, so drop the lock first */
3383	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
3384
3385	if (ret == ENOMEM)
3386		ibmvnic_close(netdev);
3387
3388	return -ret;
3389}
3390
3391static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
3392{
3393	struct ibmvnic_adapter *adapter = netdev_priv(dev);
3394
3395	if (test_bit(0, &adapter->resetting)) {
3396		netdev_err(adapter->netdev,
3397			   "Adapter is resetting, skip timeout reset\n");
3398		return;
3399	}
3400	/* No queuing up reset until at least 5 seconds (default watchdog val)
3401	 * after last reset
3402	 */
3403	if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
3404		netdev_dbg(dev, "Not yet time to tx timeout.\n");
3405		return;
3406	}
3407	ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
3408}
3409
3410static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
3411				  struct ibmvnic_rx_buff *rx_buff)
3412{
3413	struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
3414
3415	rx_buff->skb = NULL;
3416
3417	pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
3418	pool->next_alloc = (pool->next_alloc + 1) % pool->size;
3419
3420	atomic_dec(&pool->available);
3421}
3422
3423static int ibmvnic_poll(struct napi_struct *napi, int budget)
3424{
3425	struct ibmvnic_sub_crq_queue *rx_scrq;
3426	struct ibmvnic_adapter *adapter;
3427	struct net_device *netdev;
3428	int frames_processed;
3429	int scrq_num;
3430
3431	netdev = napi->dev;
3432	adapter = netdev_priv(netdev);
3433	scrq_num = (int)(napi - adapter->napi);
3434	frames_processed = 0;
3435	rx_scrq = adapter->rx_scrq[scrq_num];
3436
3437restart_poll:
3438	while (frames_processed < budget) {
3439		struct sk_buff *skb;
3440		struct ibmvnic_rx_buff *rx_buff;
3441		union sub_crq *next;
3442		u32 length;
3443		u16 offset;
3444		u8 flags = 0;
3445
3446		if (unlikely(test_bit(0, &adapter->resetting) &&
3447			     adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
3448			enable_scrq_irq(adapter, rx_scrq);
3449			napi_complete_done(napi, frames_processed);
3450			return frames_processed;
3451		}
3452
3453		if (!pending_scrq(adapter, rx_scrq))
3454			break;
3455		next = ibmvnic_next_scrq(adapter, rx_scrq);
3456		rx_buff = (struct ibmvnic_rx_buff *)
3457			  be64_to_cpu(next->rx_comp.correlator);
 
3458		/* do error checking */
3459		if (next->rx_comp.rc) {
3460			netdev_dbg(netdev, "rx buffer returned with rc %x\n",
3461				   be16_to_cpu(next->rx_comp.rc));
3462			/* free the entry */
3463			next->rx_comp.first = 0;
3464			dev_kfree_skb_any(rx_buff->skb);
3465			remove_buff_from_pool(adapter, rx_buff);
3466			continue;
3467		} else if (!rx_buff->skb) {
3468			/* free the entry */
3469			next->rx_comp.first = 0;
3470			remove_buff_from_pool(adapter, rx_buff);
3471			continue;
3472		}
3473
3474		length = be32_to_cpu(next->rx_comp.len);
3475		offset = be16_to_cpu(next->rx_comp.off_frame_data);
3476		flags = next->rx_comp.flags;
3477		skb = rx_buff->skb;
3478		/* load long_term_buff before copying to skb */
3479		dma_rmb();
3480		skb_copy_to_linear_data(skb, rx_buff->data + offset,
3481					length);
3482
3483		/* VLAN Header has been stripped by the system firmware and
3484		 * needs to be inserted by the driver
3485		 */
3486		if (adapter->rx_vlan_header_insertion &&
3487		    (flags & IBMVNIC_VLAN_STRIPPED))
3488			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3489					       ntohs(next->rx_comp.vlan_tci));
3490
3491		/* free the entry */
3492		next->rx_comp.first = 0;
3493		remove_buff_from_pool(adapter, rx_buff);
3494
3495		skb_put(skb, length);
3496		skb->protocol = eth_type_trans(skb, netdev);
3497		skb_record_rx_queue(skb, scrq_num);
3498
3499		if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
3500		    flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
3501			skb->ip_summed = CHECKSUM_UNNECESSARY;
3502		}
3503
3504		length = skb->len;
3505		napi_gro_receive(napi, skb); /* send it up */
3506		netdev->stats.rx_packets++;
3507		netdev->stats.rx_bytes += length;
3508		adapter->rx_stats_buffers[scrq_num].packets++;
3509		adapter->rx_stats_buffers[scrq_num].bytes += length;
3510		frames_processed++;
3511	}
3512
3513	if (adapter->state != VNIC_CLOSING &&
3514	    ((atomic_read(&adapter->rx_pool[scrq_num].available) <
3515	      adapter->req_rx_add_entries_per_subcrq / 2) ||
3516	      frames_processed < budget))
3517		replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
 
3518	if (frames_processed < budget) {
3519		if (napi_complete_done(napi, frames_processed)) {
3520			enable_scrq_irq(adapter, rx_scrq);
3521			if (pending_scrq(adapter, rx_scrq)) {
3522				if (napi_schedule(napi)) {
3523					disable_scrq_irq(adapter, rx_scrq);
3524					goto restart_poll;
3525				}
3526			}
3527		}
3528	}
3529	return frames_processed;
3530}
3531
 
 
 
 
 
 
 
 
 
 
 
 
 
3532static int wait_for_reset(struct ibmvnic_adapter *adapter)
3533{
3534	int rc, ret;
3535
3536	adapter->fallback.mtu = adapter->req_mtu;
3537	adapter->fallback.rx_queues = adapter->req_rx_queues;
3538	adapter->fallback.tx_queues = adapter->req_tx_queues;
3539	adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
3540	adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
3541
3542	reinit_completion(&adapter->reset_done);
3543	adapter->wait_for_reset = true;
3544	rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3545
3546	if (rc) {
3547		ret = rc;
3548		goto out;
3549	}
3550	rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
3551	if (rc) {
3552		ret = -ENODEV;
3553		goto out;
3554	}
3555
3556	ret = 0;
3557	if (adapter->reset_done_rc) {
3558		ret = -EIO;
3559		adapter->desired.mtu = adapter->fallback.mtu;
3560		adapter->desired.rx_queues = adapter->fallback.rx_queues;
3561		adapter->desired.tx_queues = adapter->fallback.tx_queues;
3562		adapter->desired.rx_entries = adapter->fallback.rx_entries;
3563		adapter->desired.tx_entries = adapter->fallback.tx_entries;
3564
3565		reinit_completion(&adapter->reset_done);
3566		adapter->wait_for_reset = true;
3567		rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
3568		if (rc) {
3569			ret = rc;
3570			goto out;
3571		}
3572		rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
3573						 60000);
3574		if (rc) {
3575			ret = -ENODEV;
3576			goto out;
3577		}
3578	}
3579out:
3580	adapter->wait_for_reset = false;
3581
3582	return ret;
3583}
3584
3585static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
3586{
3587	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3588
3589	adapter->desired.mtu = new_mtu + ETH_HLEN;
3590
3591	return wait_for_reset(adapter);
3592}
3593
3594static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
3595						struct net_device *dev,
3596						netdev_features_t features)
3597{
3598	/* Some backing hardware adapters can not
3599	 * handle packets with a MSS less than 224
3600	 * or with only one segment.
3601	 */
3602	if (skb_is_gso(skb)) {
3603		if (skb_shinfo(skb)->gso_size < 224 ||
3604		    skb_shinfo(skb)->gso_segs == 1)
3605			features &= ~NETIF_F_GSO_MASK;
3606	}
3607
3608	return features;
3609}
3610
3611static const struct net_device_ops ibmvnic_netdev_ops = {
3612	.ndo_open		= ibmvnic_open,
3613	.ndo_stop		= ibmvnic_close,
3614	.ndo_start_xmit		= ibmvnic_xmit,
3615	.ndo_set_rx_mode	= ibmvnic_set_multi,
3616	.ndo_set_mac_address	= ibmvnic_set_mac,
3617	.ndo_validate_addr	= eth_validate_addr,
3618	.ndo_tx_timeout		= ibmvnic_tx_timeout,
 
 
 
3619	.ndo_change_mtu		= ibmvnic_change_mtu,
3620	.ndo_features_check     = ibmvnic_features_check,
3621};
3622
3623/* ethtool functions */
3624
3625static int ibmvnic_get_link_ksettings(struct net_device *netdev,
3626				      struct ethtool_link_ksettings *cmd)
3627{
3628	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3629	int rc;
3630
3631	rc = send_query_phys_parms(adapter);
3632	if (rc) {
3633		adapter->speed = SPEED_UNKNOWN;
3634		adapter->duplex = DUPLEX_UNKNOWN;
3635	}
3636	cmd->base.speed = adapter->speed;
3637	cmd->base.duplex = adapter->duplex;
3638	cmd->base.port = PORT_FIBRE;
3639	cmd->base.phy_address = 0;
3640	cmd->base.autoneg = AUTONEG_ENABLE;
3641
 
 
 
 
 
3642	return 0;
3643}
3644
3645static void ibmvnic_get_drvinfo(struct net_device *netdev,
3646				struct ethtool_drvinfo *info)
3647{
3648	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3649
3650	strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
3651	strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
3652	strscpy(info->fw_version, adapter->fw_version,
3653		sizeof(info->fw_version));
3654}
3655
3656static u32 ibmvnic_get_msglevel(struct net_device *netdev)
3657{
3658	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3659
3660	return adapter->msg_enable;
3661}
3662
3663static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
3664{
3665	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3666
3667	adapter->msg_enable = data;
3668}
3669
3670static u32 ibmvnic_get_link(struct net_device *netdev)
3671{
3672	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3673
3674	/* Don't need to send a query because we request a logical link up at
3675	 * init and then we wait for link state indications
3676	 */
3677	return adapter->logical_link_state;
3678}
3679
3680static void ibmvnic_get_ringparam(struct net_device *netdev,
3681				  struct ethtool_ringparam *ring,
3682				  struct kernel_ethtool_ringparam *kernel_ring,
3683				  struct netlink_ext_ack *extack)
3684{
3685	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3686
3687	ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
3688	ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
3689	ring->rx_mini_max_pending = 0;
3690	ring->rx_jumbo_max_pending = 0;
3691	ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
3692	ring->tx_pending = adapter->req_tx_entries_per_subcrq;
3693	ring->rx_mini_pending = 0;
3694	ring->rx_jumbo_pending = 0;
3695}
3696
3697static int ibmvnic_set_ringparam(struct net_device *netdev,
3698				 struct ethtool_ringparam *ring,
3699				 struct kernel_ethtool_ringparam *kernel_ring,
3700				 struct netlink_ext_ack *extack)
3701{
3702	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3703
3704	if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq  ||
3705	    ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
3706		netdev_err(netdev, "Invalid request.\n");
3707		netdev_err(netdev, "Max tx buffers = %llu\n",
3708			   adapter->max_rx_add_entries_per_subcrq);
3709		netdev_err(netdev, "Max rx buffers = %llu\n",
3710			   adapter->max_tx_entries_per_subcrq);
3711		return -EINVAL;
3712	}
3713
3714	adapter->desired.rx_entries = ring->rx_pending;
3715	adapter->desired.tx_entries = ring->tx_pending;
3716
3717	return wait_for_reset(adapter);
3718}
3719
3720static void ibmvnic_get_channels(struct net_device *netdev,
3721				 struct ethtool_channels *channels)
3722{
3723	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3724
3725	channels->max_rx = adapter->max_rx_queues;
3726	channels->max_tx = adapter->max_tx_queues;
3727	channels->max_other = 0;
3728	channels->max_combined = 0;
3729	channels->rx_count = adapter->req_rx_queues;
3730	channels->tx_count = adapter->req_tx_queues;
3731	channels->other_count = 0;
3732	channels->combined_count = 0;
3733}
3734
3735static int ibmvnic_set_channels(struct net_device *netdev,
3736				struct ethtool_channels *channels)
3737{
3738	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3739
3740	adapter->desired.rx_queues = channels->rx_count;
3741	adapter->desired.tx_queues = channels->tx_count;
3742
3743	return wait_for_reset(adapter);
3744}
3745
3746static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3747{
3748	struct ibmvnic_adapter *adapter = netdev_priv(dev);
3749	int i;
3750
3751	if (stringset != ETH_SS_STATS)
3752		return;
3753
3754	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
3755		memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
3756
3757	for (i = 0; i < adapter->req_tx_queues; i++) {
3758		snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
3759		data += ETH_GSTRING_LEN;
3760
3761		snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
3762		data += ETH_GSTRING_LEN;
3763
3764		snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
3765		data += ETH_GSTRING_LEN;
3766	}
3767
3768	for (i = 0; i < adapter->req_rx_queues; i++) {
3769		snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
3770		data += ETH_GSTRING_LEN;
3771
3772		snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
3773		data += ETH_GSTRING_LEN;
3774
3775		snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
3776		data += ETH_GSTRING_LEN;
3777	}
3778}
3779
3780static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3781{
3782	struct ibmvnic_adapter *adapter = netdev_priv(dev);
3783
3784	switch (sset) {
3785	case ETH_SS_STATS:
3786		return ARRAY_SIZE(ibmvnic_stats) +
3787		       adapter->req_tx_queues * NUM_TX_STATS +
3788		       adapter->req_rx_queues * NUM_RX_STATS;
3789	default:
3790		return -EOPNOTSUPP;
3791	}
3792}
3793
3794static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3795				      struct ethtool_stats *stats, u64 *data)
3796{
3797	struct ibmvnic_adapter *adapter = netdev_priv(dev);
3798	union ibmvnic_crq crq;
3799	int i, j;
3800	int rc;
3801
3802	memset(&crq, 0, sizeof(crq));
3803	crq.request_statistics.first = IBMVNIC_CRQ_CMD;
3804	crq.request_statistics.cmd = REQUEST_STATISTICS;
3805	crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3806	crq.request_statistics.len =
3807	    cpu_to_be32(sizeof(struct ibmvnic_statistics));
3808
3809	/* Wait for data to be written */
3810	reinit_completion(&adapter->stats_done);
3811	rc = ibmvnic_send_crq(adapter, &crq);
3812	if (rc)
3813		return;
3814	rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3815	if (rc)
3816		return;
3817
3818	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
3819		data[i] = be64_to_cpu(IBMVNIC_GET_STAT
3820				      (adapter, ibmvnic_stats[i].offset));
3821
3822	for (j = 0; j < adapter->req_tx_queues; j++) {
3823		data[i] = adapter->tx_stats_buffers[j].packets;
3824		i++;
3825		data[i] = adapter->tx_stats_buffers[j].bytes;
3826		i++;
3827		data[i] = adapter->tx_stats_buffers[j].dropped_packets;
3828		i++;
3829	}
3830
3831	for (j = 0; j < adapter->req_rx_queues; j++) {
3832		data[i] = adapter->rx_stats_buffers[j].packets;
3833		i++;
3834		data[i] = adapter->rx_stats_buffers[j].bytes;
3835		i++;
3836		data[i] = adapter->rx_stats_buffers[j].interrupts;
3837		i++;
3838	}
3839}
3840
3841static const struct ethtool_ops ibmvnic_ethtool_ops = {
3842	.get_drvinfo		= ibmvnic_get_drvinfo,
3843	.get_msglevel		= ibmvnic_get_msglevel,
3844	.set_msglevel		= ibmvnic_set_msglevel,
3845	.get_link		= ibmvnic_get_link,
3846	.get_ringparam		= ibmvnic_get_ringparam,
3847	.set_ringparam		= ibmvnic_set_ringparam,
3848	.get_channels		= ibmvnic_get_channels,
3849	.set_channels		= ibmvnic_set_channels,
3850	.get_strings            = ibmvnic_get_strings,
3851	.get_sset_count         = ibmvnic_get_sset_count,
3852	.get_ethtool_stats	= ibmvnic_get_ethtool_stats,
3853	.get_link_ksettings	= ibmvnic_get_link_ksettings,
3854};
3855
3856/* Routines for managing CRQs/sCRQs  */
3857
3858static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
3859				   struct ibmvnic_sub_crq_queue *scrq)
3860{
3861	int rc;
3862
3863	if (!scrq) {
3864		netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
3865		return -EINVAL;
3866	}
3867
3868	if (scrq->irq) {
3869		free_irq(scrq->irq, scrq);
3870		irq_dispose_mapping(scrq->irq);
3871		scrq->irq = 0;
3872	}
3873
3874	if (scrq->msgs) {
3875		memset(scrq->msgs, 0, 4 * PAGE_SIZE);
3876		atomic_set(&scrq->used, 0);
3877		scrq->cur = 0;
3878		scrq->ind_buf.index = 0;
3879	} else {
3880		netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3881		return -EINVAL;
3882	}
3883
3884	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3885			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3886	return rc;
3887}
3888
3889static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3890{
3891	int i, rc;
3892
3893	if (!adapter->tx_scrq || !adapter->rx_scrq)
3894		return -EINVAL;
3895
3896	ibmvnic_clean_affinity(adapter);
3897
3898	for (i = 0; i < adapter->req_tx_queues; i++) {
3899		netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
3900		rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3901		if (rc)
3902			return rc;
3903	}
3904
3905	for (i = 0; i < adapter->req_rx_queues; i++) {
3906		netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
3907		rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3908		if (rc)
3909			return rc;
3910	}
3911
3912	return rc;
3913}
3914
3915static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
3916				  struct ibmvnic_sub_crq_queue *scrq,
3917				  bool do_h_free)
3918{
3919	struct device *dev = &adapter->vdev->dev;
3920	long rc;
3921
3922	netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3923
3924	if (do_h_free) {
3925		/* Close the sub-crqs */
3926		do {
3927			rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3928						adapter->vdev->unit_address,
3929						scrq->crq_num);
3930		} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3931
3932		if (rc) {
3933			netdev_err(adapter->netdev,
3934				   "Failed to release sub-CRQ %16lx, rc = %ld\n",
3935				   scrq->crq_num, rc);
3936		}
3937	}
3938
3939	dma_free_coherent(dev,
3940			  IBMVNIC_IND_ARR_SZ,
3941			  scrq->ind_buf.indir_arr,
3942			  scrq->ind_buf.indir_dma);
3943
3944	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3945			 DMA_BIDIRECTIONAL);
3946	free_pages((unsigned long)scrq->msgs, 2);
3947	free_cpumask_var(scrq->affinity_mask);
3948	kfree(scrq);
3949}
3950
3951static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3952							*adapter)
3953{
3954	struct device *dev = &adapter->vdev->dev;
3955	struct ibmvnic_sub_crq_queue *scrq;
3956	int rc;
3957
3958	scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
3959	if (!scrq)
3960		return NULL;
3961
3962	scrq->msgs =
3963		(union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
3964	if (!scrq->msgs) {
3965		dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3966		goto zero_page_failed;
3967	}
3968	if (!zalloc_cpumask_var(&scrq->affinity_mask, GFP_KERNEL))
3969		goto cpumask_alloc_failed;
3970
3971	scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3972					 DMA_BIDIRECTIONAL);
3973	if (dma_mapping_error(dev, scrq->msg_token)) {
3974		dev_warn(dev, "Couldn't map crq queue messages page\n");
3975		goto map_failed;
3976	}
3977
3978	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3979			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3980
3981	if (rc == H_RESOURCE)
3982		rc = ibmvnic_reset_crq(adapter);
3983
3984	if (rc == H_CLOSED) {
3985		dev_warn(dev, "Partner adapter not ready, waiting.\n");
3986	} else if (rc) {
3987		dev_warn(dev, "Error %d registering sub-crq\n", rc);
3988		goto reg_failed;
3989	}
3990
3991	scrq->adapter = adapter;
3992	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
3993	scrq->ind_buf.index = 0;
3994
3995	scrq->ind_buf.indir_arr =
3996		dma_alloc_coherent(dev,
3997				   IBMVNIC_IND_ARR_SZ,
3998				   &scrq->ind_buf.indir_dma,
3999				   GFP_KERNEL);
4000
4001	if (!scrq->ind_buf.indir_arr)
4002		goto indir_failed;
4003
4004	spin_lock_init(&scrq->lock);
4005
4006	netdev_dbg(adapter->netdev,
4007		   "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
4008		   scrq->crq_num, scrq->hw_irq, scrq->irq);
4009
4010	return scrq;
4011
4012indir_failed:
4013	do {
4014		rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
4015					adapter->vdev->unit_address,
4016					scrq->crq_num);
4017	} while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
4018reg_failed:
4019	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
4020			 DMA_BIDIRECTIONAL);
4021map_failed:
4022	free_cpumask_var(scrq->affinity_mask);
4023cpumask_alloc_failed:
4024	free_pages((unsigned long)scrq->msgs, 2);
4025zero_page_failed:
4026	kfree(scrq);
4027
4028	return NULL;
4029}
4030
4031static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
4032{
4033	int i;
4034
4035	ibmvnic_clean_affinity(adapter);
4036	if (adapter->tx_scrq) {
4037		for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
4038			if (!adapter->tx_scrq[i])
4039				continue;
4040
4041			netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
4042				   i);
4043			ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
4044			if (adapter->tx_scrq[i]->irq) {
4045				free_irq(adapter->tx_scrq[i]->irq,
4046					 adapter->tx_scrq[i]);
4047				irq_dispose_mapping(adapter->tx_scrq[i]->irq);
4048				adapter->tx_scrq[i]->irq = 0;
4049			}
4050
4051			release_sub_crq_queue(adapter, adapter->tx_scrq[i],
4052					      do_h_free);
4053		}
4054
4055		kfree(adapter->tx_scrq);
4056		adapter->tx_scrq = NULL;
4057		adapter->num_active_tx_scrqs = 0;
4058	}
4059
4060	if (adapter->rx_scrq) {
4061		for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
4062			if (!adapter->rx_scrq[i])
4063				continue;
4064
4065			netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
4066				   i);
4067			if (adapter->rx_scrq[i]->irq) {
4068				free_irq(adapter->rx_scrq[i]->irq,
4069					 adapter->rx_scrq[i]);
4070				irq_dispose_mapping(adapter->rx_scrq[i]->irq);
4071				adapter->rx_scrq[i]->irq = 0;
4072			}
4073
4074			release_sub_crq_queue(adapter, adapter->rx_scrq[i],
4075					      do_h_free);
4076		}
4077
4078		kfree(adapter->rx_scrq);
4079		adapter->rx_scrq = NULL;
4080		adapter->num_active_rx_scrqs = 0;
4081	}
4082}
4083
4084static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
4085			    struct ibmvnic_sub_crq_queue *scrq)
4086{
4087	struct device *dev = &adapter->vdev->dev;
4088	unsigned long rc;
4089
4090	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4091				H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
4092	if (rc)
4093		dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
4094			scrq->hw_irq, rc);
4095	return rc;
4096}
4097
4098/* We can not use the IRQ chip EOI handler because that has the
4099 * unintended effect of changing the interrupt priority.
4100 */
4101static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq)
4102{
4103	u64 val = 0xff000000 | scrq->hw_irq;
4104	unsigned long rc;
4105
4106	rc = plpar_hcall_norets(H_EOI, val);
4107	if (rc)
4108		dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc);
4109}
4110
4111/* Due to a firmware bug, the hypervisor can send an interrupt to a
4112 * transmit or receive queue just prior to a partition migration.
4113 * Force an EOI after migration.
4114 */
4115static void ibmvnic_clear_pending_interrupt(struct device *dev,
4116					    struct ibmvnic_sub_crq_queue *scrq)
4117{
4118	if (!xive_enabled())
4119		ibmvnic_xics_eoi(dev, scrq);
4120}
4121
4122static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
4123			   struct ibmvnic_sub_crq_queue *scrq)
4124{
4125	struct device *dev = &adapter->vdev->dev;
4126	unsigned long rc;
4127
4128	if (scrq->hw_irq > 0x100000000ULL) {
4129		dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
4130		return 1;
4131	}
4132
4133	if (test_bit(0, &adapter->resetting) &&
4134	    adapter->reset_reason == VNIC_RESET_MOBILITY) {
4135		ibmvnic_clear_pending_interrupt(dev, scrq);
 
 
 
 
 
4136	}
4137
4138	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4139				H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
4140	if (rc)
4141		dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
4142			scrq->hw_irq, rc);
4143	return rc;
4144}
4145
4146static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
4147			       struct ibmvnic_sub_crq_queue *scrq)
4148{
4149	struct device *dev = &adapter->vdev->dev;
4150	struct ibmvnic_tx_pool *tx_pool;
4151	struct ibmvnic_tx_buff *txbuff;
4152	struct netdev_queue *txq;
4153	union sub_crq *next;
4154	int index;
4155	int i;
 
4156
4157restart_loop:
4158	while (pending_scrq(adapter, scrq)) {
4159		unsigned int pool = scrq->pool_index;
4160		int num_entries = 0;
4161		int total_bytes = 0;
4162		int num_packets = 0;
4163
4164		next = ibmvnic_next_scrq(adapter, scrq);
4165		for (i = 0; i < next->tx_comp.num_comps; i++) {
 
 
 
 
 
4166			index = be32_to_cpu(next->tx_comp.correlators[i]);
4167			if (index & IBMVNIC_TSO_POOL_MASK) {
4168				tx_pool = &adapter->tso_pool[pool];
4169				index &= ~IBMVNIC_TSO_POOL_MASK;
4170			} else {
4171				tx_pool = &adapter->tx_pool[pool];
4172			}
4173
4174			txbuff = &tx_pool->tx_buff[index];
4175			num_packets++;
4176			num_entries += txbuff->num_entries;
4177			if (txbuff->skb) {
4178				total_bytes += txbuff->skb->len;
4179				if (next->tx_comp.rcs[i]) {
4180					dev_err(dev, "tx error %x\n",
4181						next->tx_comp.rcs[i]);
4182					dev_kfree_skb_irq(txbuff->skb);
4183				} else {
4184					dev_consume_skb_irq(txbuff->skb);
4185				}
 
 
 
 
 
 
 
4186				txbuff->skb = NULL;
4187			} else {
4188				netdev_warn(adapter->netdev,
4189					    "TX completion received with NULL socket buffer\n");
4190			}
 
 
 
4191			tx_pool->free_map[tx_pool->producer_index] = index;
4192			tx_pool->producer_index =
4193				(tx_pool->producer_index + 1) %
4194					tx_pool->num_buffers;
4195		}
4196		/* remove tx_comp scrq*/
4197		next->tx_comp.first = 0;
4198
4199		txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
4200		netdev_tx_completed_queue(txq, num_packets, total_bytes);
4201
4202		if (atomic_sub_return(num_entries, &scrq->used) <=
4203		    (adapter->req_tx_entries_per_subcrq / 2) &&
4204		    __netif_subqueue_stopped(adapter->netdev,
4205					     scrq->pool_index)) {
4206			rcu_read_lock();
4207			if (adapter->tx_queues_active) {
4208				netif_wake_subqueue(adapter->netdev,
4209						    scrq->pool_index);
4210				netdev_dbg(adapter->netdev,
4211					   "Started queue %d\n",
4212					   scrq->pool_index);
4213			}
4214			rcu_read_unlock();
4215		}
4216	}
4217
4218	enable_scrq_irq(adapter, scrq);
4219
4220	if (pending_scrq(adapter, scrq)) {
4221		disable_scrq_irq(adapter, scrq);
4222		goto restart_loop;
4223	}
4224
4225	return 0;
4226}
4227
4228static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
4229{
4230	struct ibmvnic_sub_crq_queue *scrq = instance;
4231	struct ibmvnic_adapter *adapter = scrq->adapter;
4232
4233	disable_scrq_irq(adapter, scrq);
4234	ibmvnic_complete_tx(adapter, scrq);
4235
4236	return IRQ_HANDLED;
4237}
4238
4239static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
4240{
4241	struct ibmvnic_sub_crq_queue *scrq = instance;
4242	struct ibmvnic_adapter *adapter = scrq->adapter;
4243
4244	/* When booting a kdump kernel we can hit pending interrupts
4245	 * prior to completing driver initialization.
4246	 */
4247	if (unlikely(adapter->state != VNIC_OPEN))
4248		return IRQ_NONE;
4249
4250	adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
4251
4252	if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
4253		disable_scrq_irq(adapter, scrq);
4254		__napi_schedule(&adapter->napi[scrq->scrq_num]);
4255	}
4256
4257	return IRQ_HANDLED;
4258}
4259
4260static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
4261{
4262	struct device *dev = &adapter->vdev->dev;
4263	struct ibmvnic_sub_crq_queue *scrq;
4264	int i = 0, j = 0;
4265	int rc = 0;
4266
4267	for (i = 0; i < adapter->req_tx_queues; i++) {
4268		netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
4269			   i);
4270		scrq = adapter->tx_scrq[i];
4271		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
4272
4273		if (!scrq->irq) {
4274			rc = -EINVAL;
4275			dev_err(dev, "Error mapping irq\n");
4276			goto req_tx_irq_failed;
4277		}
4278
4279		snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
4280			 adapter->vdev->unit_address, i);
4281		rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
4282				 0, scrq->name, scrq);
4283
4284		if (rc) {
4285			dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
4286				scrq->irq, rc);
4287			irq_dispose_mapping(scrq->irq);
4288			goto req_tx_irq_failed;
4289		}
4290	}
4291
4292	for (i = 0; i < adapter->req_rx_queues; i++) {
4293		netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
4294			   i);
4295		scrq = adapter->rx_scrq[i];
4296		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
4297		if (!scrq->irq) {
4298			rc = -EINVAL;
4299			dev_err(dev, "Error mapping irq\n");
4300			goto req_rx_irq_failed;
4301		}
4302		snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
4303			 adapter->vdev->unit_address, i);
4304		rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
4305				 0, scrq->name, scrq);
4306		if (rc) {
4307			dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
4308				scrq->irq, rc);
4309			irq_dispose_mapping(scrq->irq);
4310			goto req_rx_irq_failed;
4311		}
4312	}
4313
4314	cpus_read_lock();
4315	ibmvnic_set_affinity(adapter);
4316	cpus_read_unlock();
4317
4318	return rc;
4319
4320req_rx_irq_failed:
4321	for (j = 0; j < i; j++) {
4322		free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
4323		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
4324	}
4325	i = adapter->req_tx_queues;
4326req_tx_irq_failed:
4327	for (j = 0; j < i; j++) {
4328		free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
4329		irq_dispose_mapping(adapter->tx_scrq[j]->irq);
4330	}
4331	release_sub_crqs(adapter, 1);
4332	return rc;
4333}
4334
4335static int init_sub_crqs(struct ibmvnic_adapter *adapter)
4336{
4337	struct device *dev = &adapter->vdev->dev;
4338	struct ibmvnic_sub_crq_queue **allqueues;
4339	int registered_queues = 0;
4340	int total_queues;
4341	int more = 0;
4342	int i;
4343
4344	total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
4345
4346	allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
4347	if (!allqueues)
4348		return -ENOMEM;
4349
4350	for (i = 0; i < total_queues; i++) {
4351		allqueues[i] = init_sub_crq_queue(adapter);
4352		if (!allqueues[i]) {
4353			dev_warn(dev, "Couldn't allocate all sub-crqs\n");
4354			break;
4355		}
4356		registered_queues++;
4357	}
4358
4359	/* Make sure we were able to register the minimum number of queues */
4360	if (registered_queues <
4361	    adapter->min_tx_queues + adapter->min_rx_queues) {
4362		dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
4363		goto tx_failed;
4364	}
4365
4366	/* Distribute the failed allocated queues*/
4367	for (i = 0; i < total_queues - registered_queues + more ; i++) {
4368		netdev_dbg(adapter->netdev, "Reducing number of queues\n");
4369		switch (i % 3) {
4370		case 0:
4371			if (adapter->req_rx_queues > adapter->min_rx_queues)
4372				adapter->req_rx_queues--;
4373			else
4374				more++;
4375			break;
4376		case 1:
4377			if (adapter->req_tx_queues > adapter->min_tx_queues)
4378				adapter->req_tx_queues--;
4379			else
4380				more++;
4381			break;
4382		}
4383	}
4384
4385	adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
4386				   sizeof(*adapter->tx_scrq), GFP_KERNEL);
4387	if (!adapter->tx_scrq)
4388		goto tx_failed;
4389
4390	for (i = 0; i < adapter->req_tx_queues; i++) {
4391		adapter->tx_scrq[i] = allqueues[i];
4392		adapter->tx_scrq[i]->pool_index = i;
4393		adapter->num_active_tx_scrqs++;
4394	}
4395
4396	adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
4397				   sizeof(*adapter->rx_scrq), GFP_KERNEL);
4398	if (!adapter->rx_scrq)
4399		goto rx_failed;
4400
4401	for (i = 0; i < adapter->req_rx_queues; i++) {
4402		adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
4403		adapter->rx_scrq[i]->scrq_num = i;
4404		adapter->num_active_rx_scrqs++;
4405	}
4406
4407	kfree(allqueues);
4408	return 0;
4409
4410rx_failed:
4411	kfree(adapter->tx_scrq);
4412	adapter->tx_scrq = NULL;
4413tx_failed:
4414	for (i = 0; i < registered_queues; i++)
4415		release_sub_crq_queue(adapter, allqueues[i], 1);
4416	kfree(allqueues);
4417	return -ENOMEM;
4418}
4419
4420static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
4421{
4422	struct device *dev = &adapter->vdev->dev;
4423	union ibmvnic_crq crq;
4424	int max_entries;
4425	int cap_reqs;
4426
4427	/* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
4428	 * the PROMISC flag). Initialize this count upfront. When the tasklet
4429	 * receives a response to all of these, it will send the next protocol
4430	 * message (QUERY_IP_OFFLOAD).
4431	 */
4432	if (!(adapter->netdev->flags & IFF_PROMISC) ||
4433	    adapter->promisc_supported)
4434		cap_reqs = 7;
4435	else
4436		cap_reqs = 6;
4437
4438	if (!retry) {
4439		/* Sub-CRQ entries are 32 byte long */
4440		int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
4441
4442		atomic_set(&adapter->running_cap_crqs, cap_reqs);
4443
4444		if (adapter->min_tx_entries_per_subcrq > entries_page ||
4445		    adapter->min_rx_add_entries_per_subcrq > entries_page) {
4446			dev_err(dev, "Fatal, invalid entries per sub-crq\n");
4447			return;
4448		}
4449
4450		if (adapter->desired.mtu)
4451			adapter->req_mtu = adapter->desired.mtu;
4452		else
4453			adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
4454
4455		if (!adapter->desired.tx_entries)
4456			adapter->desired.tx_entries =
4457					adapter->max_tx_entries_per_subcrq;
4458		if (!adapter->desired.rx_entries)
4459			adapter->desired.rx_entries =
4460					adapter->max_rx_add_entries_per_subcrq;
4461
4462		max_entries = IBMVNIC_LTB_SET_SIZE /
4463			      (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
4464
4465		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4466			adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) {
4467			adapter->desired.tx_entries = max_entries;
4468		}
4469
4470		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
4471			adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) {
4472			adapter->desired.rx_entries = max_entries;
4473		}
4474
4475		if (adapter->desired.tx_entries)
4476			adapter->req_tx_entries_per_subcrq =
4477					adapter->desired.tx_entries;
4478		else
4479			adapter->req_tx_entries_per_subcrq =
4480					adapter->max_tx_entries_per_subcrq;
4481
4482		if (adapter->desired.rx_entries)
4483			adapter->req_rx_add_entries_per_subcrq =
4484					adapter->desired.rx_entries;
4485		else
4486			adapter->req_rx_add_entries_per_subcrq =
4487					adapter->max_rx_add_entries_per_subcrq;
4488
4489		if (adapter->desired.tx_queues)
4490			adapter->req_tx_queues =
4491					adapter->desired.tx_queues;
4492		else
4493			adapter->req_tx_queues =
4494					adapter->opt_tx_comp_sub_queues;
4495
4496		if (adapter->desired.rx_queues)
4497			adapter->req_rx_queues =
4498					adapter->desired.rx_queues;
4499		else
4500			adapter->req_rx_queues =
4501					adapter->opt_rx_comp_queues;
4502
4503		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
4504	} else {
4505		atomic_add(cap_reqs, &adapter->running_cap_crqs);
4506	}
 
4507	memset(&crq, 0, sizeof(crq));
4508	crq.request_capability.first = IBMVNIC_CRQ_CMD;
4509	crq.request_capability.cmd = REQUEST_CAPABILITY;
4510
4511	crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
4512	crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
4513	cap_reqs--;
4514	ibmvnic_send_crq(adapter, &crq);
4515
4516	crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
4517	crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
4518	cap_reqs--;
4519	ibmvnic_send_crq(adapter, &crq);
4520
4521	crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
4522	crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
4523	cap_reqs--;
4524	ibmvnic_send_crq(adapter, &crq);
4525
4526	crq.request_capability.capability =
4527	    cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
4528	crq.request_capability.number =
4529	    cpu_to_be64(adapter->req_tx_entries_per_subcrq);
4530	cap_reqs--;
4531	ibmvnic_send_crq(adapter, &crq);
4532
4533	crq.request_capability.capability =
4534	    cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
4535	crq.request_capability.number =
4536	    cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
4537	cap_reqs--;
4538	ibmvnic_send_crq(adapter, &crq);
4539
4540	crq.request_capability.capability = cpu_to_be16(REQ_MTU);
4541	crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
4542	cap_reqs--;
4543	ibmvnic_send_crq(adapter, &crq);
4544
4545	if (adapter->netdev->flags & IFF_PROMISC) {
4546		if (adapter->promisc_supported) {
4547			crq.request_capability.capability =
4548			    cpu_to_be16(PROMISC_REQUESTED);
4549			crq.request_capability.number = cpu_to_be64(1);
4550			cap_reqs--;
4551			ibmvnic_send_crq(adapter, &crq);
4552		}
4553	} else {
4554		crq.request_capability.capability =
4555		    cpu_to_be16(PROMISC_REQUESTED);
4556		crq.request_capability.number = cpu_to_be64(0);
4557		cap_reqs--;
4558		ibmvnic_send_crq(adapter, &crq);
4559	}
4560
4561	/* Keep at end to catch any discrepancy between expected and actual
4562	 * CRQs sent.
4563	 */
4564	WARN_ON(cap_reqs != 0);
4565}
4566
4567static int pending_scrq(struct ibmvnic_adapter *adapter,
4568			struct ibmvnic_sub_crq_queue *scrq)
4569{
4570	union sub_crq *entry = &scrq->msgs[scrq->cur];
4571	int rc;
4572
4573	rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
4574
4575	/* Ensure that the SCRQ valid flag is loaded prior to loading the
4576	 * contents of the SCRQ descriptor
4577	 */
4578	dma_rmb();
4579
4580	return rc;
4581}
4582
4583static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
4584					struct ibmvnic_sub_crq_queue *scrq)
4585{
4586	union sub_crq *entry;
4587	unsigned long flags;
4588
4589	spin_lock_irqsave(&scrq->lock, flags);
4590	entry = &scrq->msgs[scrq->cur];
4591	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4592		if (++scrq->cur == scrq->size)
4593			scrq->cur = 0;
4594	} else {
4595		entry = NULL;
4596	}
4597	spin_unlock_irqrestore(&scrq->lock, flags);
4598
4599	/* Ensure that the SCRQ valid flag is loaded prior to loading the
4600	 * contents of the SCRQ descriptor
4601	 */
4602	dma_rmb();
4603
4604	return entry;
4605}
4606
4607static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
4608{
4609	struct ibmvnic_crq_queue *queue = &adapter->crq;
4610	union ibmvnic_crq *crq;
4611
4612	crq = &queue->msgs[queue->cur];
4613	if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4614		if (++queue->cur == queue->size)
4615			queue->cur = 0;
4616	} else {
4617		crq = NULL;
4618	}
4619
4620	return crq;
4621}
4622
4623static void print_subcrq_error(struct device *dev, int rc, const char *func)
 
4624{
4625	switch (rc) {
4626	case H_PARAMETER:
4627		dev_warn_ratelimited(dev,
4628				     "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
4629				     func, rc);
4630		break;
4631	case H_CLOSED:
4632		dev_warn_ratelimited(dev,
4633				     "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
4634				     func, rc);
4635		break;
4636	default:
4637		dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
4638		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
4639	}
 
 
4640}
4641
4642static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
4643				u64 remote_handle, u64 ioba, u64 num_entries)
4644{
4645	unsigned int ua = adapter->vdev->unit_address;
4646	struct device *dev = &adapter->vdev->dev;
4647	int rc;
4648
4649	/* Make sure the hypervisor sees the complete request */
4650	dma_wmb();
4651	rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
4652				cpu_to_be64(remote_handle),
4653				ioba, num_entries);
4654
4655	if (rc)
4656		print_subcrq_error(dev, rc, __func__);
 
 
 
4657
4658	return rc;
4659}
4660
4661static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
4662			    union ibmvnic_crq *crq)
4663{
4664	unsigned int ua = adapter->vdev->unit_address;
4665	struct device *dev = &adapter->vdev->dev;
4666	u64 *u64_crq = (u64 *)crq;
4667	int rc;
4668
4669	netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
4670		   (unsigned long)cpu_to_be64(u64_crq[0]),
4671		   (unsigned long)cpu_to_be64(u64_crq[1]));
4672
4673	if (!adapter->crq.active &&
4674	    crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
4675		dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
4676		return -EINVAL;
4677	}
4678
4679	/* Make sure the hypervisor sees the complete request */
4680	dma_wmb();
4681
4682	rc = plpar_hcall_norets(H_SEND_CRQ, ua,
4683				cpu_to_be64(u64_crq[0]),
4684				cpu_to_be64(u64_crq[1]));
4685
4686	if (rc) {
4687		if (rc == H_CLOSED) {
4688			dev_warn(dev, "CRQ Queue closed\n");
4689			/* do not reset, report the fail, wait for passive init from server */
 
4690		}
4691
4692		dev_warn(dev, "Send error (rc=%d)\n", rc);
4693	}
4694
4695	return rc;
4696}
4697
4698static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
4699{
4700	struct device *dev = &adapter->vdev->dev;
4701	union ibmvnic_crq crq;
4702	int retries = 100;
4703	int rc;
4704
4705	memset(&crq, 0, sizeof(crq));
4706	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
4707	crq.generic.cmd = IBMVNIC_CRQ_INIT;
4708	netdev_dbg(adapter->netdev, "Sending CRQ init\n");
4709
4710	do {
4711		rc = ibmvnic_send_crq(adapter, &crq);
4712		if (rc != H_CLOSED)
4713			break;
4714		retries--;
4715		msleep(50);
4716
4717	} while (retries > 0);
 
 
4718
4719	if (rc) {
4720		dev_err(dev, "Failed to send init request, rc = %d\n", rc);
4721		return rc;
4722	}
4723
4724	return 0;
4725}
4726
4727struct vnic_login_client_data {
4728	u8	type;
4729	__be16	len;
4730	char	name[];
4731} __packed;
4732
4733static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
4734{
4735	int len;
4736
4737	/* Calculate the amount of buffer space needed for the
4738	 * vnic client data in the login buffer. There are four entries,
4739	 * OS name, LPAR name, device name, and a null last entry.
4740	 */
4741	len = 4 * sizeof(struct vnic_login_client_data);
4742	len += 6; /* "Linux" plus NULL */
4743	len += strlen(utsname()->nodename) + 1;
4744	len += strlen(adapter->netdev->name) + 1;
4745
4746	return len;
4747}
4748
4749static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
4750				 struct vnic_login_client_data *vlcd)
4751{
4752	const char *os_name = "Linux";
4753	int len;
4754
4755	/* Type 1 - LPAR OS */
4756	vlcd->type = 1;
4757	len = strlen(os_name) + 1;
4758	vlcd->len = cpu_to_be16(len);
4759	strscpy(vlcd->name, os_name, len);
4760	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4761
4762	/* Type 2 - LPAR name */
4763	vlcd->type = 2;
4764	len = strlen(utsname()->nodename) + 1;
4765	vlcd->len = cpu_to_be16(len);
4766	strscpy(vlcd->name, utsname()->nodename, len);
4767	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4768
4769	/* Type 3 - device name */
4770	vlcd->type = 3;
4771	len = strlen(adapter->netdev->name) + 1;
4772	vlcd->len = cpu_to_be16(len);
4773	strscpy(vlcd->name, adapter->netdev->name, len);
4774}
4775
4776static int send_login(struct ibmvnic_adapter *adapter)
4777{
4778	struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
4779	struct ibmvnic_login_buffer *login_buffer;
4780	struct device *dev = &adapter->vdev->dev;
4781	struct vnic_login_client_data *vlcd;
4782	dma_addr_t rsp_buffer_token;
4783	dma_addr_t buffer_token;
4784	size_t rsp_buffer_size;
4785	union ibmvnic_crq crq;
4786	int client_data_len;
4787	size_t buffer_size;
4788	__be64 *tx_list_p;
4789	__be64 *rx_list_p;
4790	int rc;
 
4791	int i;
4792
4793	if (!adapter->tx_scrq || !adapter->rx_scrq) {
4794		netdev_err(adapter->netdev,
4795			   "RX or TX queues are not allocated, device login failed\n");
4796		return -ENOMEM;
4797	}
4798
4799	release_login_buffer(adapter);
4800	release_login_rsp_buffer(adapter);
4801
4802	client_data_len = vnic_client_data_len(adapter);
4803
4804	buffer_size =
4805	    sizeof(struct ibmvnic_login_buffer) +
4806	    sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
4807	    client_data_len;
4808
4809	login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
4810	if (!login_buffer)
4811		goto buf_alloc_failed;
4812
4813	buffer_token = dma_map_single(dev, login_buffer, buffer_size,
4814				      DMA_TO_DEVICE);
4815	if (dma_mapping_error(dev, buffer_token)) {
4816		dev_err(dev, "Couldn't map login buffer\n");
4817		goto buf_map_failed;
4818	}
4819
4820	rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
4821			  sizeof(u64) * adapter->req_tx_queues +
4822			  sizeof(u64) * adapter->req_rx_queues +
4823			  sizeof(u64) * adapter->req_rx_queues +
4824			  sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
4825
4826	login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
4827	if (!login_rsp_buffer)
4828		goto buf_rsp_alloc_failed;
4829
4830	rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
4831					  rsp_buffer_size, DMA_FROM_DEVICE);
4832	if (dma_mapping_error(dev, rsp_buffer_token)) {
4833		dev_err(dev, "Couldn't map login rsp buffer\n");
4834		goto buf_rsp_map_failed;
4835	}
4836
4837	adapter->login_buf = login_buffer;
4838	adapter->login_buf_token = buffer_token;
4839	adapter->login_buf_sz = buffer_size;
4840	adapter->login_rsp_buf = login_rsp_buffer;
4841	adapter->login_rsp_buf_token = rsp_buffer_token;
4842	adapter->login_rsp_buf_sz = rsp_buffer_size;
4843
4844	login_buffer->len = cpu_to_be32(buffer_size);
4845	login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
4846	login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4847	login_buffer->off_txcomp_subcrqs =
4848	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
4849	login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4850	login_buffer->off_rxcomp_subcrqs =
4851	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
4852			sizeof(u64) * adapter->req_tx_queues);
4853	login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
4854	login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
4855
4856	tx_list_p = (__be64 *)((char *)login_buffer +
4857				      sizeof(struct ibmvnic_login_buffer));
4858	rx_list_p = (__be64 *)((char *)login_buffer +
4859				      sizeof(struct ibmvnic_login_buffer) +
4860				      sizeof(u64) * adapter->req_tx_queues);
4861
4862	for (i = 0; i < adapter->req_tx_queues; i++) {
4863		if (adapter->tx_scrq[i]) {
4864			tx_list_p[i] =
4865				cpu_to_be64(adapter->tx_scrq[i]->crq_num);
4866		}
4867	}
4868
4869	for (i = 0; i < adapter->req_rx_queues; i++) {
4870		if (adapter->rx_scrq[i]) {
4871			rx_list_p[i] =
4872				cpu_to_be64(adapter->rx_scrq[i]->crq_num);
4873		}
4874	}
4875
4876	/* Insert vNIC login client data */
4877	vlcd = (struct vnic_login_client_data *)
4878		((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
4879	login_buffer->client_data_offset =
4880			cpu_to_be32((char *)vlcd - (char *)login_buffer);
4881	login_buffer->client_data_len = cpu_to_be32(client_data_len);
4882
4883	vnic_add_client_data(adapter, vlcd);
4884
4885	netdev_dbg(adapter->netdev, "Login Buffer:\n");
4886	for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
4887		netdev_dbg(adapter->netdev, "%016lx\n",
4888			   ((unsigned long *)(adapter->login_buf))[i]);
4889	}
4890
4891	memset(&crq, 0, sizeof(crq));
4892	crq.login.first = IBMVNIC_CRQ_CMD;
4893	crq.login.cmd = LOGIN;
4894	crq.login.ioba = cpu_to_be32(buffer_token);
4895	crq.login.len = cpu_to_be32(buffer_size);
4896
4897	adapter->login_pending = true;
4898	rc = ibmvnic_send_crq(adapter, &crq);
4899	if (rc) {
4900		adapter->login_pending = false;
4901		netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4902		goto buf_send_failed;
4903	}
4904
4905	return 0;
4906
4907buf_send_failed:
4908	dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
4909			 DMA_FROM_DEVICE);
4910buf_rsp_map_failed:
4911	kfree(login_rsp_buffer);
4912	adapter->login_rsp_buf = NULL;
4913buf_rsp_alloc_failed:
4914	dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
4915buf_map_failed:
4916	kfree(login_buffer);
4917	adapter->login_buf = NULL;
4918buf_alloc_failed:
4919	return -ENOMEM;
4920}
4921
4922static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4923			    u32 len, u8 map_id)
4924{
4925	union ibmvnic_crq crq;
4926
4927	memset(&crq, 0, sizeof(crq));
4928	crq.request_map.first = IBMVNIC_CRQ_CMD;
4929	crq.request_map.cmd = REQUEST_MAP;
4930	crq.request_map.map_id = map_id;
4931	crq.request_map.ioba = cpu_to_be32(addr);
4932	crq.request_map.len = cpu_to_be32(len);
4933	return ibmvnic_send_crq(adapter, &crq);
4934}
4935
4936static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
4937{
4938	union ibmvnic_crq crq;
4939
4940	memset(&crq, 0, sizeof(crq));
4941	crq.request_unmap.first = IBMVNIC_CRQ_CMD;
4942	crq.request_unmap.cmd = REQUEST_UNMAP;
4943	crq.request_unmap.map_id = map_id;
4944	return ibmvnic_send_crq(adapter, &crq);
4945}
4946
4947static void send_query_map(struct ibmvnic_adapter *adapter)
4948{
4949	union ibmvnic_crq crq;
4950
4951	memset(&crq, 0, sizeof(crq));
4952	crq.query_map.first = IBMVNIC_CRQ_CMD;
4953	crq.query_map.cmd = QUERY_MAP;
4954	ibmvnic_send_crq(adapter, &crq);
4955}
4956
4957/* Send a series of CRQs requesting various capabilities of the VNIC server */
4958static void send_query_cap(struct ibmvnic_adapter *adapter)
4959{
4960	union ibmvnic_crq crq;
4961	int cap_reqs;
4962
4963	/* We send out 25 QUERY_CAPABILITY CRQs below.  Initialize this count
4964	 * upfront. When the tasklet receives a response to all of these, it
4965	 * can send out the next protocol messaage (REQUEST_CAPABILITY).
4966	 */
4967	cap_reqs = 25;
4968
4969	atomic_set(&adapter->running_cap_crqs, cap_reqs);
4970
 
4971	memset(&crq, 0, sizeof(crq));
4972	crq.query_capability.first = IBMVNIC_CRQ_CMD;
4973	crq.query_capability.cmd = QUERY_CAPABILITY;
4974
4975	crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
 
4976	ibmvnic_send_crq(adapter, &crq);
4977	cap_reqs--;
4978
4979	crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
 
4980	ibmvnic_send_crq(adapter, &crq);
4981	cap_reqs--;
4982
4983	crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
 
4984	ibmvnic_send_crq(adapter, &crq);
4985	cap_reqs--;
4986
4987	crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
 
4988	ibmvnic_send_crq(adapter, &crq);
4989	cap_reqs--;
4990
4991	crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
 
4992	ibmvnic_send_crq(adapter, &crq);
4993	cap_reqs--;
4994
4995	crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
 
4996	ibmvnic_send_crq(adapter, &crq);
4997	cap_reqs--;
4998
4999	crq.query_capability.capability =
5000	    cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
 
5001	ibmvnic_send_crq(adapter, &crq);
5002	cap_reqs--;
5003
5004	crq.query_capability.capability =
5005	    cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
 
5006	ibmvnic_send_crq(adapter, &crq);
5007	cap_reqs--;
5008
5009	crq.query_capability.capability =
5010	    cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
 
5011	ibmvnic_send_crq(adapter, &crq);
5012	cap_reqs--;
5013
5014	crq.query_capability.capability =
5015	    cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
 
5016	ibmvnic_send_crq(adapter, &crq);
5017	cap_reqs--;
5018
5019	crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
 
5020	ibmvnic_send_crq(adapter, &crq);
5021	cap_reqs--;
5022
5023	crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
 
5024	ibmvnic_send_crq(adapter, &crq);
5025	cap_reqs--;
5026
5027	crq.query_capability.capability = cpu_to_be16(MIN_MTU);
 
5028	ibmvnic_send_crq(adapter, &crq);
5029	cap_reqs--;
5030
5031	crq.query_capability.capability = cpu_to_be16(MAX_MTU);
 
5032	ibmvnic_send_crq(adapter, &crq);
5033	cap_reqs--;
5034
5035	crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
 
5036	ibmvnic_send_crq(adapter, &crq);
5037	cap_reqs--;
5038
5039	crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
 
5040	ibmvnic_send_crq(adapter, &crq);
5041	cap_reqs--;
5042
5043	crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
 
5044	ibmvnic_send_crq(adapter, &crq);
5045	cap_reqs--;
5046
5047	crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
 
5048	ibmvnic_send_crq(adapter, &crq);
5049	cap_reqs--;
5050
5051	crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
 
5052	ibmvnic_send_crq(adapter, &crq);
5053	cap_reqs--;
5054
5055	crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
 
5056	ibmvnic_send_crq(adapter, &crq);
5057	cap_reqs--;
5058
5059	crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
 
5060	ibmvnic_send_crq(adapter, &crq);
5061	cap_reqs--;
5062
5063	crq.query_capability.capability =
5064			cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
 
5065	ibmvnic_send_crq(adapter, &crq);
5066	cap_reqs--;
5067
5068	crq.query_capability.capability =
5069			cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
 
5070	ibmvnic_send_crq(adapter, &crq);
5071	cap_reqs--;
5072
5073	crq.query_capability.capability =
5074			cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
 
5075	ibmvnic_send_crq(adapter, &crq);
5076	cap_reqs--;
5077
5078	crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
5079
5080	ibmvnic_send_crq(adapter, &crq);
5081	cap_reqs--;
5082
5083	/* Keep at end to catch any discrepancy between expected and actual
5084	 * CRQs sent.
5085	 */
5086	WARN_ON(cap_reqs != 0);
5087}
5088
5089static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
5090{
5091	int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
5092	struct device *dev = &adapter->vdev->dev;
5093	union ibmvnic_crq crq;
5094
5095	adapter->ip_offload_tok =
5096		dma_map_single(dev,
5097			       &adapter->ip_offload_buf,
5098			       buf_sz,
5099			       DMA_FROM_DEVICE);
5100
5101	if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
5102		if (!firmware_has_feature(FW_FEATURE_CMO))
5103			dev_err(dev, "Couldn't map offload buffer\n");
5104		return;
5105	}
5106
5107	memset(&crq, 0, sizeof(crq));
5108	crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
5109	crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
5110	crq.query_ip_offload.len = cpu_to_be32(buf_sz);
5111	crq.query_ip_offload.ioba =
5112	    cpu_to_be32(adapter->ip_offload_tok);
5113
5114	ibmvnic_send_crq(adapter, &crq);
5115}
5116
5117static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
5118{
5119	struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
5120	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
5121	struct device *dev = &adapter->vdev->dev;
5122	netdev_features_t old_hw_features = 0;
5123	union ibmvnic_crq crq;
5124
5125	adapter->ip_offload_ctrl_tok =
5126		dma_map_single(dev,
5127			       ctrl_buf,
5128			       sizeof(adapter->ip_offload_ctrl),
5129			       DMA_TO_DEVICE);
5130
5131	if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
5132		dev_err(dev, "Couldn't map ip offload control buffer\n");
5133		return;
5134	}
5135
5136	ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
5137	ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
5138	ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
5139	ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
5140	ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
5141	ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
5142	ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
5143	ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
5144	ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
5145	ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
5146
5147	/* large_rx disabled for now, additional features needed */
5148	ctrl_buf->large_rx_ipv4 = 0;
5149	ctrl_buf->large_rx_ipv6 = 0;
5150
5151	if (adapter->state != VNIC_PROBING) {
5152		old_hw_features = adapter->netdev->hw_features;
5153		adapter->netdev->hw_features = 0;
5154	}
5155
5156	adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
5157
5158	if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
5159		adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
5160
5161	if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
5162		adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
5163
5164	if ((adapter->netdev->features &
5165	    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
5166		adapter->netdev->hw_features |= NETIF_F_RXCSUM;
5167
5168	if (buf->large_tx_ipv4)
5169		adapter->netdev->hw_features |= NETIF_F_TSO;
5170	if (buf->large_tx_ipv6)
5171		adapter->netdev->hw_features |= NETIF_F_TSO6;
5172
5173	if (adapter->state == VNIC_PROBING) {
5174		adapter->netdev->features |= adapter->netdev->hw_features;
5175	} else if (old_hw_features != adapter->netdev->hw_features) {
5176		netdev_features_t tmp = 0;
5177
5178		/* disable features no longer supported */
5179		adapter->netdev->features &= adapter->netdev->hw_features;
5180		/* turn on features now supported if previously enabled */
5181		tmp = (old_hw_features ^ adapter->netdev->hw_features) &
5182			adapter->netdev->hw_features;
5183		adapter->netdev->features |=
5184				tmp & adapter->netdev->wanted_features;
5185	}
5186
5187	memset(&crq, 0, sizeof(crq));
5188	crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
5189	crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
5190	crq.control_ip_offload.len =
5191	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
5192	crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
5193	ibmvnic_send_crq(adapter, &crq);
5194}
5195
5196static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
5197				struct ibmvnic_adapter *adapter)
5198{
5199	struct device *dev = &adapter->vdev->dev;
5200
5201	if (crq->get_vpd_size_rsp.rc.code) {
5202		dev_err(dev, "Error retrieving VPD size, rc=%x\n",
5203			crq->get_vpd_size_rsp.rc.code);
5204		complete(&adapter->fw_done);
5205		return;
5206	}
5207
5208	adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
5209	complete(&adapter->fw_done);
5210}
5211
5212static void handle_vpd_rsp(union ibmvnic_crq *crq,
5213			   struct ibmvnic_adapter *adapter)
5214{
5215	struct device *dev = &adapter->vdev->dev;
5216	unsigned char *substr = NULL;
5217	u8 fw_level_len = 0;
5218
5219	memset(adapter->fw_version, 0, 32);
5220
5221	dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
5222			 DMA_FROM_DEVICE);
5223
5224	if (crq->get_vpd_rsp.rc.code) {
5225		dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
5226			crq->get_vpd_rsp.rc.code);
5227		goto complete;
5228	}
5229
5230	/* get the position of the firmware version info
5231	 * located after the ASCII 'RM' substring in the buffer
5232	 */
5233	substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
5234	if (!substr) {
5235		dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
5236		goto complete;
5237	}
5238
5239	/* get length of firmware level ASCII substring */
5240	if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
5241		fw_level_len = *(substr + 2);
5242	} else {
5243		dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
5244		goto complete;
5245	}
5246
5247	/* copy firmware version string from vpd into adapter */
5248	if ((substr + 3 + fw_level_len) <
5249	    (adapter->vpd->buff + adapter->vpd->len)) {
5250		strscpy(adapter->fw_version, substr + 3,
5251			sizeof(adapter->fw_version));
5252	} else {
5253		dev_info(dev, "FW substr extrapolated VPD buff\n");
5254	}
5255
5256complete:
5257	if (adapter->fw_version[0] == '\0')
5258		strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
5259	complete(&adapter->fw_done);
5260}
5261
5262static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
5263{
5264	struct device *dev = &adapter->vdev->dev;
5265	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
 
5266	int i;
5267
5268	dma_unmap_single(dev, adapter->ip_offload_tok,
5269			 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
5270
5271	netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
5272	for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
5273		netdev_dbg(adapter->netdev, "%016lx\n",
5274			   ((unsigned long *)(buf))[i]);
5275
5276	netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
5277	netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
5278	netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
5279		   buf->tcp_ipv4_chksum);
5280	netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
5281		   buf->tcp_ipv6_chksum);
5282	netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
5283		   buf->udp_ipv4_chksum);
5284	netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
5285		   buf->udp_ipv6_chksum);
5286	netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
5287		   buf->large_tx_ipv4);
5288	netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
5289		   buf->large_tx_ipv6);
5290	netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
5291		   buf->large_rx_ipv4);
5292	netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
5293		   buf->large_rx_ipv6);
5294	netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
5295		   buf->max_ipv4_header_size);
5296	netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
5297		   buf->max_ipv6_header_size);
5298	netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
5299		   buf->max_tcp_header_size);
5300	netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
5301		   buf->max_udp_header_size);
5302	netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
5303		   buf->max_large_tx_size);
5304	netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
5305		   buf->max_large_rx_size);
5306	netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
5307		   buf->ipv6_extension_header);
5308	netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
5309		   buf->tcp_pseudosum_req);
5310	netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
5311		   buf->num_ipv6_ext_headers);
5312	netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
5313		   buf->off_ipv6_ext_headers);
5314
5315	send_control_ip_offload(adapter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5316}
5317
5318static const char *ibmvnic_fw_err_cause(u16 cause)
 
5319{
5320	switch (cause) {
5321	case ADAPTER_PROBLEM:
5322		return "adapter problem";
5323	case BUS_PROBLEM:
5324		return "bus problem";
5325	case FW_PROBLEM:
5326		return "firmware problem";
5327	case DD_PROBLEM:
5328		return "device driver problem";
5329	case EEH_RECOVERY:
5330		return "EEH recovery";
5331	case FW_UPDATED:
5332		return "firmware updated";
5333	case LOW_MEMORY:
5334		return "low Memory";
5335	default:
5336		return "unknown";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5337	}
 
 
 
 
 
 
 
 
 
 
5338}
5339
5340static void handle_error_indication(union ibmvnic_crq *crq,
5341				    struct ibmvnic_adapter *adapter)
5342{
5343	struct device *dev = &adapter->vdev->dev;
5344	u16 cause;
5345
5346	cause = be16_to_cpu(crq->error_indication.error_cause);
 
 
 
 
5347
5348	dev_warn_ratelimited(dev,
5349			     "Firmware reports %serror, cause: %s. Starting recovery...\n",
5350			     crq->error_indication.flags
5351				& IBMVNIC_FATAL_ERROR ? "FATAL " : "",
5352			     ibmvnic_fw_err_cause(cause));
5353
5354	if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
5355		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5356	else
5357		ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
5358}
5359
5360static int handle_change_mac_rsp(union ibmvnic_crq *crq,
5361				 struct ibmvnic_adapter *adapter)
5362{
5363	struct net_device *netdev = adapter->netdev;
5364	struct device *dev = &adapter->vdev->dev;
5365	long rc;
5366
5367	rc = crq->change_mac_addr_rsp.rc.code;
5368	if (rc) {
5369		dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
5370		goto out;
5371	}
5372	/* crq->change_mac_addr.mac_addr is the requested one
5373	 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
5374	 */
5375	eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]);
5376	ether_addr_copy(adapter->mac_addr,
5377			&crq->change_mac_addr_rsp.mac_addr[0]);
5378out:
5379	complete(&adapter->fw_done);
5380	return rc;
5381}
5382
5383static void handle_request_cap_rsp(union ibmvnic_crq *crq,
5384				   struct ibmvnic_adapter *adapter)
5385{
5386	struct device *dev = &adapter->vdev->dev;
5387	u64 *req_value;
5388	char *name;
5389
5390	atomic_dec(&adapter->running_cap_crqs);
5391	netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
5392		   atomic_read(&adapter->running_cap_crqs));
5393	switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
5394	case REQ_TX_QUEUES:
5395		req_value = &adapter->req_tx_queues;
5396		name = "tx";
5397		break;
5398	case REQ_RX_QUEUES:
5399		req_value = &adapter->req_rx_queues;
5400		name = "rx";
5401		break;
5402	case REQ_RX_ADD_QUEUES:
5403		req_value = &adapter->req_rx_add_queues;
5404		name = "rx_add";
5405		break;
5406	case REQ_TX_ENTRIES_PER_SUBCRQ:
5407		req_value = &adapter->req_tx_entries_per_subcrq;
5408		name = "tx_entries_per_subcrq";
5409		break;
5410	case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
5411		req_value = &adapter->req_rx_add_entries_per_subcrq;
5412		name = "rx_add_entries_per_subcrq";
5413		break;
5414	case REQ_MTU:
5415		req_value = &adapter->req_mtu;
5416		name = "mtu";
5417		break;
5418	case PROMISC_REQUESTED:
5419		req_value = &adapter->promisc;
5420		name = "promisc";
5421		break;
5422	default:
5423		dev_err(dev, "Got invalid cap request rsp %d\n",
5424			crq->request_capability.capability);
5425		return;
5426	}
5427
5428	switch (crq->request_capability_rsp.rc.code) {
5429	case SUCCESS:
5430		break;
5431	case PARTIALSUCCESS:
5432		dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
5433			 *req_value,
5434			 (long)be64_to_cpu(crq->request_capability_rsp.number),
5435			 name);
5436
5437		if (be16_to_cpu(crq->request_capability_rsp.capability) ==
5438		    REQ_MTU) {
5439			pr_err("mtu of %llu is not supported. Reverting.\n",
5440			       *req_value);
5441			*req_value = adapter->fallback.mtu;
5442		} else {
5443			*req_value =
5444				be64_to_cpu(crq->request_capability_rsp.number);
5445		}
5446
5447		send_request_cap(adapter, 1);
5448		return;
5449	default:
5450		dev_err(dev, "Error %d in request cap rsp\n",
5451			crq->request_capability_rsp.rc.code);
5452		return;
5453	}
5454
5455	/* Done receiving requested capabilities, query IP offload support */
5456	if (atomic_read(&adapter->running_cap_crqs) == 0)
5457		send_query_ip_offload(adapter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5458}
5459
5460static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
5461			    struct ibmvnic_adapter *adapter)
5462{
5463	struct device *dev = &adapter->vdev->dev;
5464	struct net_device *netdev = adapter->netdev;
5465	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
5466	struct ibmvnic_login_buffer *login = adapter->login_buf;
5467	u64 *tx_handle_array;
5468	u64 *rx_handle_array;
5469	int num_tx_pools;
5470	int num_rx_pools;
5471	u64 *size_array;
5472	u32 rsp_len;
5473	int i;
5474
5475	/* CHECK: Test/set of login_pending does not need to be atomic
5476	 * because only ibmvnic_tasklet tests/clears this.
5477	 */
5478	if (!adapter->login_pending) {
5479		netdev_warn(netdev, "Ignoring unexpected login response\n");
5480		return 0;
5481	}
5482	adapter->login_pending = false;
5483
5484	/* If the number of queues requested can't be allocated by the
5485	 * server, the login response will return with code 1. We will need
5486	 * to resend the login buffer with fewer queues requested.
5487	 */
5488	if (login_rsp_crq->generic.rc.code) {
5489		adapter->init_done_rc = login_rsp_crq->generic.rc.code;
5490		complete(&adapter->init_done);
5491		return 0;
5492	}
5493
5494	if (adapter->failover_pending) {
5495		adapter->init_done_rc = -EAGAIN;
5496		netdev_dbg(netdev, "Failover pending, ignoring login response\n");
5497		complete(&adapter->init_done);
5498		/* login response buffer will be released on reset */
5499		return 0;
5500	}
5501
5502	netdev->mtu = adapter->req_mtu - ETH_HLEN;
5503
5504	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
5505	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
5506		netdev_dbg(adapter->netdev, "%016lx\n",
5507			   ((unsigned long *)(adapter->login_rsp_buf))[i]);
5508	}
5509
5510	/* Sanity checks */
5511	if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
5512	    (be32_to_cpu(login->num_rxcomp_subcrqs) *
5513	     adapter->req_rx_add_queues !=
5514	     be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
5515		dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
5516		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5517		return -EIO;
5518	}
5519
5520	rsp_len = be32_to_cpu(login_rsp->len);
5521	if (be32_to_cpu(login->login_rsp_len) < rsp_len ||
5522	    rsp_len <= be32_to_cpu(login_rsp->off_txsubm_subcrqs) ||
5523	    rsp_len <= be32_to_cpu(login_rsp->off_rxadd_subcrqs) ||
5524	    rsp_len <= be32_to_cpu(login_rsp->off_rxadd_buff_size) ||
5525	    rsp_len <= be32_to_cpu(login_rsp->off_supp_tx_desc)) {
5526		/* This can happen if a login request times out and there are
5527		 * 2 outstanding login requests sent, the LOGIN_RSP crq
5528		 * could have been for the older login request. So we are
5529		 * parsing the newer response buffer which may be incomplete
5530		 */
5531		dev_err(dev, "FATAL: Login rsp offsets/lengths invalid\n");
5532		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5533		return -EIO;
5534	}
5535
5536	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5537		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
5538	/* variable buffer sizes are not supported, so just read the
5539	 * first entry.
5540	 */
5541	adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
5542
5543	num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
5544	num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
5545
5546	tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5547				  be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
5548	rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
5549				  be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
5550
5551	for (i = 0; i < num_tx_pools; i++)
5552		adapter->tx_scrq[i]->handle = tx_handle_array[i];
5553
5554	for (i = 0; i < num_rx_pools; i++)
5555		adapter->rx_scrq[i]->handle = rx_handle_array[i];
5556
5557	adapter->num_active_tx_scrqs = num_tx_pools;
5558	adapter->num_active_rx_scrqs = num_rx_pools;
5559	release_login_rsp_buffer(adapter);
5560	release_login_buffer(adapter);
5561	complete(&adapter->init_done);
5562
5563	return 0;
5564}
5565
5566static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
5567				     struct ibmvnic_adapter *adapter)
5568{
5569	struct device *dev = &adapter->vdev->dev;
5570	long rc;
5571
5572	rc = crq->request_unmap_rsp.rc.code;
5573	if (rc)
5574		dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
5575}
5576
5577static void handle_query_map_rsp(union ibmvnic_crq *crq,
5578				 struct ibmvnic_adapter *adapter)
5579{
5580	struct net_device *netdev = adapter->netdev;
5581	struct device *dev = &adapter->vdev->dev;
5582	long rc;
5583
5584	rc = crq->query_map_rsp.rc.code;
5585	if (rc) {
5586		dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
5587		return;
5588	}
5589	netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
5590		   crq->query_map_rsp.page_size,
5591		   __be32_to_cpu(crq->query_map_rsp.tot_pages),
5592		   __be32_to_cpu(crq->query_map_rsp.free_pages));
5593}
5594
5595static void handle_query_cap_rsp(union ibmvnic_crq *crq,
5596				 struct ibmvnic_adapter *adapter)
5597{
5598	struct net_device *netdev = adapter->netdev;
5599	struct device *dev = &adapter->vdev->dev;
5600	long rc;
5601
5602	atomic_dec(&adapter->running_cap_crqs);
5603	netdev_dbg(netdev, "Outstanding queries: %d\n",
5604		   atomic_read(&adapter->running_cap_crqs));
5605	rc = crq->query_capability.rc.code;
5606	if (rc) {
5607		dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
5608		goto out;
5609	}
5610
5611	switch (be16_to_cpu(crq->query_capability.capability)) {
5612	case MIN_TX_QUEUES:
5613		adapter->min_tx_queues =
5614		    be64_to_cpu(crq->query_capability.number);
5615		netdev_dbg(netdev, "min_tx_queues = %lld\n",
5616			   adapter->min_tx_queues);
5617		break;
5618	case MIN_RX_QUEUES:
5619		adapter->min_rx_queues =
5620		    be64_to_cpu(crq->query_capability.number);
5621		netdev_dbg(netdev, "min_rx_queues = %lld\n",
5622			   adapter->min_rx_queues);
5623		break;
5624	case MIN_RX_ADD_QUEUES:
5625		adapter->min_rx_add_queues =
5626		    be64_to_cpu(crq->query_capability.number);
5627		netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
5628			   adapter->min_rx_add_queues);
5629		break;
5630	case MAX_TX_QUEUES:
5631		adapter->max_tx_queues =
5632		    be64_to_cpu(crq->query_capability.number);
5633		netdev_dbg(netdev, "max_tx_queues = %lld\n",
5634			   adapter->max_tx_queues);
5635		break;
5636	case MAX_RX_QUEUES:
5637		adapter->max_rx_queues =
5638		    be64_to_cpu(crq->query_capability.number);
5639		netdev_dbg(netdev, "max_rx_queues = %lld\n",
5640			   adapter->max_rx_queues);
5641		break;
5642	case MAX_RX_ADD_QUEUES:
5643		adapter->max_rx_add_queues =
5644		    be64_to_cpu(crq->query_capability.number);
5645		netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
5646			   adapter->max_rx_add_queues);
5647		break;
5648	case MIN_TX_ENTRIES_PER_SUBCRQ:
5649		adapter->min_tx_entries_per_subcrq =
5650		    be64_to_cpu(crq->query_capability.number);
5651		netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
5652			   adapter->min_tx_entries_per_subcrq);
5653		break;
5654	case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
5655		adapter->min_rx_add_entries_per_subcrq =
5656		    be64_to_cpu(crq->query_capability.number);
5657		netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
5658			   adapter->min_rx_add_entries_per_subcrq);
5659		break;
5660	case MAX_TX_ENTRIES_PER_SUBCRQ:
5661		adapter->max_tx_entries_per_subcrq =
5662		    be64_to_cpu(crq->query_capability.number);
5663		netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
5664			   adapter->max_tx_entries_per_subcrq);
5665		break;
5666	case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
5667		adapter->max_rx_add_entries_per_subcrq =
5668		    be64_to_cpu(crq->query_capability.number);
5669		netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
5670			   adapter->max_rx_add_entries_per_subcrq);
5671		break;
5672	case TCP_IP_OFFLOAD:
5673		adapter->tcp_ip_offload =
5674		    be64_to_cpu(crq->query_capability.number);
5675		netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
5676			   adapter->tcp_ip_offload);
5677		break;
5678	case PROMISC_SUPPORTED:
5679		adapter->promisc_supported =
5680		    be64_to_cpu(crq->query_capability.number);
5681		netdev_dbg(netdev, "promisc_supported = %lld\n",
5682			   adapter->promisc_supported);
5683		break;
5684	case MIN_MTU:
5685		adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
5686		netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5687		netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
5688		break;
5689	case MAX_MTU:
5690		adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
5691		netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5692		netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
5693		break;
5694	case MAX_MULTICAST_FILTERS:
5695		adapter->max_multicast_filters =
5696		    be64_to_cpu(crq->query_capability.number);
5697		netdev_dbg(netdev, "max_multicast_filters = %lld\n",
5698			   adapter->max_multicast_filters);
5699		break;
5700	case VLAN_HEADER_INSERTION:
5701		adapter->vlan_header_insertion =
5702		    be64_to_cpu(crq->query_capability.number);
5703		if (adapter->vlan_header_insertion)
5704			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
5705		netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
5706			   adapter->vlan_header_insertion);
5707		break;
5708	case RX_VLAN_HEADER_INSERTION:
5709		adapter->rx_vlan_header_insertion =
5710		    be64_to_cpu(crq->query_capability.number);
5711		netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
5712			   adapter->rx_vlan_header_insertion);
5713		break;
5714	case MAX_TX_SG_ENTRIES:
5715		adapter->max_tx_sg_entries =
5716		    be64_to_cpu(crq->query_capability.number);
5717		netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
5718			   adapter->max_tx_sg_entries);
5719		break;
5720	case RX_SG_SUPPORTED:
5721		adapter->rx_sg_supported =
5722		    be64_to_cpu(crq->query_capability.number);
5723		netdev_dbg(netdev, "rx_sg_supported = %lld\n",
5724			   adapter->rx_sg_supported);
5725		break;
5726	case OPT_TX_COMP_SUB_QUEUES:
5727		adapter->opt_tx_comp_sub_queues =
5728		    be64_to_cpu(crq->query_capability.number);
5729		netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
5730			   adapter->opt_tx_comp_sub_queues);
5731		break;
5732	case OPT_RX_COMP_QUEUES:
5733		adapter->opt_rx_comp_queues =
5734		    be64_to_cpu(crq->query_capability.number);
5735		netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
5736			   adapter->opt_rx_comp_queues);
5737		break;
5738	case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
5739		adapter->opt_rx_bufadd_q_per_rx_comp_q =
5740		    be64_to_cpu(crq->query_capability.number);
5741		netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
5742			   adapter->opt_rx_bufadd_q_per_rx_comp_q);
5743		break;
5744	case OPT_TX_ENTRIES_PER_SUBCRQ:
5745		adapter->opt_tx_entries_per_subcrq =
5746		    be64_to_cpu(crq->query_capability.number);
5747		netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
5748			   adapter->opt_tx_entries_per_subcrq);
5749		break;
5750	case OPT_RXBA_ENTRIES_PER_SUBCRQ:
5751		adapter->opt_rxba_entries_per_subcrq =
5752		    be64_to_cpu(crq->query_capability.number);
5753		netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
5754			   adapter->opt_rxba_entries_per_subcrq);
5755		break;
5756	case TX_RX_DESC_REQ:
5757		adapter->tx_rx_desc_req = crq->query_capability.number;
5758		netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
5759			   adapter->tx_rx_desc_req);
5760		break;
5761
5762	default:
5763		netdev_err(netdev, "Got invalid cap rsp %d\n",
5764			   crq->query_capability.capability);
5765	}
5766
5767out:
5768	if (atomic_read(&adapter->running_cap_crqs) == 0)
5769		send_request_cap(adapter, 0);
5770}
5771
5772static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
5773{
5774	union ibmvnic_crq crq;
5775	int rc;
5776
5777	memset(&crq, 0, sizeof(crq));
5778	crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
5779	crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
5780
5781	mutex_lock(&adapter->fw_lock);
5782	adapter->fw_done_rc = 0;
5783	reinit_completion(&adapter->fw_done);
5784
5785	rc = ibmvnic_send_crq(adapter, &crq);
5786	if (rc) {
5787		mutex_unlock(&adapter->fw_lock);
5788		return rc;
5789	}
5790
5791	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
5792	if (rc) {
5793		mutex_unlock(&adapter->fw_lock);
5794		return rc;
5795	}
5796
5797	mutex_unlock(&adapter->fw_lock);
5798	return adapter->fw_done_rc ? -EIO : 0;
5799}
5800
5801static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
5802				       struct ibmvnic_adapter *adapter)
5803{
5804	struct net_device *netdev = adapter->netdev;
5805	int rc;
5806	__be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
5807
5808	rc = crq->query_phys_parms_rsp.rc.code;
5809	if (rc) {
5810		netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
5811		return rc;
5812	}
5813	switch (rspeed) {
5814	case IBMVNIC_10MBPS:
5815		adapter->speed = SPEED_10;
5816		break;
5817	case IBMVNIC_100MBPS:
5818		adapter->speed = SPEED_100;
5819		break;
5820	case IBMVNIC_1GBPS:
5821		adapter->speed = SPEED_1000;
5822		break;
5823	case IBMVNIC_10GBPS:
5824		adapter->speed = SPEED_10000;
5825		break;
5826	case IBMVNIC_25GBPS:
5827		adapter->speed = SPEED_25000;
5828		break;
5829	case IBMVNIC_40GBPS:
5830		adapter->speed = SPEED_40000;
5831		break;
5832	case IBMVNIC_50GBPS:
5833		adapter->speed = SPEED_50000;
5834		break;
5835	case IBMVNIC_100GBPS:
5836		adapter->speed = SPEED_100000;
5837		break;
5838	case IBMVNIC_200GBPS:
5839		adapter->speed = SPEED_200000;
5840		break;
5841	default:
5842		if (netif_carrier_ok(netdev))
5843			netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
5844		adapter->speed = SPEED_UNKNOWN;
5845	}
5846	if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
5847		adapter->duplex = DUPLEX_FULL;
5848	else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
5849		adapter->duplex = DUPLEX_HALF;
5850	else
5851		adapter->duplex = DUPLEX_UNKNOWN;
5852
5853	return rc;
5854}
5855
5856static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
5857			       struct ibmvnic_adapter *adapter)
5858{
5859	struct ibmvnic_generic_crq *gen_crq = &crq->generic;
5860	struct net_device *netdev = adapter->netdev;
5861	struct device *dev = &adapter->vdev->dev;
5862	u64 *u64_crq = (u64 *)crq;
5863	long rc;
5864
5865	netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
5866		   (unsigned long)cpu_to_be64(u64_crq[0]),
5867		   (unsigned long)cpu_to_be64(u64_crq[1]));
5868	switch (gen_crq->first) {
5869	case IBMVNIC_CRQ_INIT_RSP:
5870		switch (gen_crq->cmd) {
5871		case IBMVNIC_CRQ_INIT:
5872			dev_info(dev, "Partner initialized\n");
5873			adapter->from_passive_init = true;
5874			/* Discard any stale login responses from prev reset.
5875			 * CHECK: should we clear even on INIT_COMPLETE?
5876			 */
5877			adapter->login_pending = false;
5878
5879			if (adapter->state == VNIC_DOWN)
5880				rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
5881			else
5882				rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5883
5884			if (rc && rc != -EBUSY) {
5885				/* We were unable to schedule the failover
5886				 * reset either because the adapter was still
5887				 * probing (eg: during kexec) or we could not
5888				 * allocate memory. Clear the failover_pending
5889				 * flag since no one else will. We ignore
5890				 * EBUSY because it means either FAILOVER reset
5891				 * is already scheduled or the adapter is
5892				 * being removed.
5893				 */
5894				netdev_err(netdev,
5895					   "Error %ld scheduling failover reset\n",
5896					   rc);
5897				adapter->failover_pending = false;
5898			}
5899
5900			if (!completion_done(&adapter->init_done)) {
5901				if (!adapter->init_done_rc)
5902					adapter->init_done_rc = -EAGAIN;
5903				complete(&adapter->init_done);
5904			}
5905
5906			break;
5907		case IBMVNIC_CRQ_INIT_COMPLETE:
5908			dev_info(dev, "Partner initialization complete\n");
5909			adapter->crq.active = true;
5910			send_version_xchg(adapter);
5911			break;
5912		default:
5913			dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
5914		}
5915		return;
5916	case IBMVNIC_CRQ_XPORT_EVENT:
5917		netif_carrier_off(netdev);
5918		adapter->crq.active = false;
5919		/* terminate any thread waiting for a response
5920		 * from the device
5921		 */
5922		if (!completion_done(&adapter->fw_done)) {
5923			adapter->fw_done_rc = -EIO;
5924			complete(&adapter->fw_done);
5925		}
5926
5927		/* if we got here during crq-init, retry crq-init */
5928		if (!completion_done(&adapter->init_done)) {
5929			adapter->init_done_rc = -EAGAIN;
5930			complete(&adapter->init_done);
5931		}
5932
5933		if (!completion_done(&adapter->stats_done))
5934			complete(&adapter->stats_done);
5935		if (test_bit(0, &adapter->resetting))
5936			adapter->force_reset_recovery = true;
5937		if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
5938			dev_info(dev, "Migrated, re-enabling adapter\n");
5939			ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
5940		} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
5941			dev_info(dev, "Backing device failover detected\n");
5942			adapter->failover_pending = true;
5943		} else {
5944			/* The adapter lost the connection */
5945			dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
5946				gen_crq->cmd);
5947			ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5948		}
5949		return;
5950	case IBMVNIC_CRQ_CMD_RSP:
5951		break;
5952	default:
5953		dev_err(dev, "Got an invalid msg type 0x%02x\n",
5954			gen_crq->first);
5955		return;
5956	}
5957
5958	switch (gen_crq->cmd) {
5959	case VERSION_EXCHANGE_RSP:
5960		rc = crq->version_exchange_rsp.rc.code;
5961		if (rc) {
5962			dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
5963			break;
5964		}
5965		ibmvnic_version =
 
 
 
 
5966			    be16_to_cpu(crq->version_exchange_rsp.version);
5967		dev_info(dev, "Partner protocol version is %d\n",
5968			 ibmvnic_version);
5969		send_query_cap(adapter);
5970		break;
5971	case QUERY_CAPABILITY_RSP:
5972		handle_query_cap_rsp(crq, adapter);
5973		break;
5974	case QUERY_MAP_RSP:
5975		handle_query_map_rsp(crq, adapter);
5976		break;
5977	case REQUEST_MAP_RSP:
5978		adapter->fw_done_rc = crq->request_map_rsp.rc.code;
5979		complete(&adapter->fw_done);
5980		break;
5981	case REQUEST_UNMAP_RSP:
5982		handle_request_unmap_rsp(crq, adapter);
5983		break;
5984	case REQUEST_CAPABILITY_RSP:
5985		handle_request_cap_rsp(crq, adapter);
5986		break;
5987	case LOGIN_RSP:
5988		netdev_dbg(netdev, "Got Login Response\n");
5989		handle_login_rsp(crq, adapter);
5990		break;
5991	case LOGICAL_LINK_STATE_RSP:
5992		netdev_dbg(netdev,
5993			   "Got Logical Link State Response, state: %d rc: %d\n",
5994			   crq->logical_link_state_rsp.link_state,
5995			   crq->logical_link_state_rsp.rc.code);
5996		adapter->logical_link_state =
5997		    crq->logical_link_state_rsp.link_state;
5998		adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
5999		complete(&adapter->init_done);
6000		break;
6001	case LINK_STATE_INDICATION:
6002		netdev_dbg(netdev, "Got Logical Link State Indication\n");
6003		adapter->phys_link_state =
6004		    crq->link_state_indication.phys_link_state;
6005		adapter->logical_link_state =
6006		    crq->link_state_indication.logical_link_state;
6007		if (adapter->phys_link_state && adapter->logical_link_state)
6008			netif_carrier_on(netdev);
6009		else
6010			netif_carrier_off(netdev);
6011		break;
6012	case CHANGE_MAC_ADDR_RSP:
6013		netdev_dbg(netdev, "Got MAC address change Response\n");
6014		adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
6015		break;
6016	case ERROR_INDICATION:
6017		netdev_dbg(netdev, "Got Error Indication\n");
6018		handle_error_indication(crq, adapter);
6019		break;
 
 
 
 
6020	case REQUEST_STATISTICS_RSP:
6021		netdev_dbg(netdev, "Got Statistics Response\n");
6022		complete(&adapter->stats_done);
6023		break;
6024	case QUERY_IP_OFFLOAD_RSP:
6025		netdev_dbg(netdev, "Got Query IP offload Response\n");
6026		handle_query_ip_offload_rsp(adapter);
6027		break;
6028	case MULTICAST_CTRL_RSP:
6029		netdev_dbg(netdev, "Got multicast control Response\n");
6030		break;
6031	case CONTROL_IP_OFFLOAD_RSP:
6032		netdev_dbg(netdev, "Got Control IP offload Response\n");
6033		dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
6034				 sizeof(adapter->ip_offload_ctrl),
6035				 DMA_TO_DEVICE);
6036		complete(&adapter->init_done);
6037		break;
6038	case COLLECT_FW_TRACE_RSP:
6039		netdev_dbg(netdev, "Got Collect firmware trace Response\n");
6040		complete(&adapter->fw_done);
6041		break;
6042	case GET_VPD_SIZE_RSP:
6043		handle_vpd_size_rsp(crq, adapter);
6044		break;
6045	case GET_VPD_RSP:
6046		handle_vpd_rsp(crq, adapter);
6047		break;
6048	case QUERY_PHYS_PARMS_RSP:
6049		adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
6050		complete(&adapter->fw_done);
6051		break;
6052	default:
6053		netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
6054			   gen_crq->cmd);
6055	}
6056}
6057
6058static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
6059{
6060	struct ibmvnic_adapter *adapter = instance;
6061
6062	tasklet_schedule(&adapter->tasklet);
6063	return IRQ_HANDLED;
6064}
6065
6066static void ibmvnic_tasklet(struct tasklet_struct *t)
6067{
6068	struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
6069	struct ibmvnic_crq_queue *queue = &adapter->crq;
6070	union ibmvnic_crq *crq;
6071	unsigned long flags;
 
6072
6073	spin_lock_irqsave(&queue->lock, flags);
 
 
 
 
 
 
6074
6075	/* Pull all the valid messages off the CRQ */
6076	while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
6077		/* This barrier makes sure ibmvnic_next_crq()'s
6078		 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
6079		 * before ibmvnic_handle_crq()'s
6080		 * switch(gen_crq->first) and switch(gen_crq->cmd).
6081		 */
6082		dma_rmb();
6083		ibmvnic_handle_crq(crq, adapter);
6084		crq->generic.first = 0;
6085	}
6086
 
 
 
 
6087	spin_unlock_irqrestore(&queue->lock, flags);
6088}
6089
6090static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
6091{
6092	struct vio_dev *vdev = adapter->vdev;
6093	int rc;
6094
6095	do {
6096		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
6097	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
6098
6099	if (rc)
6100		dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
6101
6102	return rc;
6103}
6104
6105static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
6106{
6107	struct ibmvnic_crq_queue *crq = &adapter->crq;
6108	struct device *dev = &adapter->vdev->dev;
6109	struct vio_dev *vdev = adapter->vdev;
6110	int rc;
6111
6112	/* Close the CRQ */
6113	do {
6114		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
6115	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6116
6117	/* Clean out the queue */
6118	if (!crq->msgs)
6119		return -EINVAL;
6120
6121	memset(crq->msgs, 0, PAGE_SIZE);
6122	crq->cur = 0;
6123	crq->active = false;
6124
6125	/* And re-open it again */
6126	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
6127				crq->msg_token, PAGE_SIZE);
6128
6129	if (rc == H_CLOSED)
6130		/* Adapter is good, but other end is not ready */
6131		dev_warn(dev, "Partner adapter not ready\n");
6132	else if (rc != 0)
6133		dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
6134
6135	return rc;
6136}
6137
6138static void release_crq_queue(struct ibmvnic_adapter *adapter)
6139{
6140	struct ibmvnic_crq_queue *crq = &adapter->crq;
6141	struct vio_dev *vdev = adapter->vdev;
6142	long rc;
6143
6144	if (!crq->msgs)
6145		return;
6146
6147	netdev_dbg(adapter->netdev, "Releasing CRQ\n");
6148	free_irq(vdev->irq, adapter);
6149	tasklet_kill(&adapter->tasklet);
6150	do {
6151		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
6152	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6153
6154	dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
6155			 DMA_BIDIRECTIONAL);
6156	free_page((unsigned long)crq->msgs);
6157	crq->msgs = NULL;
6158	crq->active = false;
6159}
6160
6161static int init_crq_queue(struct ibmvnic_adapter *adapter)
6162{
6163	struct ibmvnic_crq_queue *crq = &adapter->crq;
6164	struct device *dev = &adapter->vdev->dev;
6165	struct vio_dev *vdev = adapter->vdev;
6166	int rc, retrc = -ENOMEM;
6167
6168	if (crq->msgs)
6169		return 0;
6170
6171	crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
6172	/* Should we allocate more than one page? */
6173
6174	if (!crq->msgs)
6175		return -ENOMEM;
6176
6177	crq->size = PAGE_SIZE / sizeof(*crq->msgs);
6178	crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
6179					DMA_BIDIRECTIONAL);
6180	if (dma_mapping_error(dev, crq->msg_token))
6181		goto map_failed;
6182
6183	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
6184				crq->msg_token, PAGE_SIZE);
6185
6186	if (rc == H_RESOURCE)
6187		/* maybe kexecing and resource is busy. try a reset */
6188		rc = ibmvnic_reset_crq(adapter);
6189	retrc = rc;
6190
6191	if (rc == H_CLOSED) {
6192		dev_warn(dev, "Partner adapter not ready\n");
6193	} else if (rc) {
6194		dev_warn(dev, "Error %d opening adapter\n", rc);
6195		goto reg_crq_failed;
6196	}
6197
6198	retrc = 0;
6199
6200	tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
 
6201
6202	netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
6203	snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
6204		 adapter->vdev->unit_address);
6205	rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
6206	if (rc) {
6207		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
6208			vdev->irq, rc);
6209		goto req_irq_failed;
6210	}
6211
6212	rc = vio_enable_interrupts(vdev);
6213	if (rc) {
6214		dev_err(dev, "Error %d enabling interrupts\n", rc);
6215		goto req_irq_failed;
6216	}
6217
6218	crq->cur = 0;
6219	spin_lock_init(&crq->lock);
6220
6221	/* process any CRQs that were queued before we enabled interrupts */
6222	tasklet_schedule(&adapter->tasklet);
6223
6224	return retrc;
6225
6226req_irq_failed:
6227	tasklet_kill(&adapter->tasklet);
6228	do {
6229		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
6230	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6231reg_crq_failed:
6232	dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
6233map_failed:
6234	free_page((unsigned long)crq->msgs);
6235	crq->msgs = NULL;
6236	return retrc;
6237}
6238
6239static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
6240{
6241	struct device *dev = &adapter->vdev->dev;
6242	unsigned long timeout = msecs_to_jiffies(20000);
6243	u64 old_num_rx_queues = adapter->req_rx_queues;
6244	u64 old_num_tx_queues = adapter->req_tx_queues;
6245	int rc;
6246
6247	adapter->from_passive_init = false;
6248
6249	rc = ibmvnic_send_crq_init(adapter);
6250	if (rc) {
6251		dev_err(dev, "Send crq init failed with error %d\n", rc);
6252		return rc;
6253	}
6254
 
 
 
6255	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
6256		dev_err(dev, "Initialization sequence timed out\n");
6257		return -ETIMEDOUT;
6258	}
6259
6260	if (adapter->init_done_rc) {
6261		release_crq_queue(adapter);
6262		dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc);
6263		return adapter->init_done_rc;
6264	}
6265
6266	if (adapter->from_passive_init) {
6267		adapter->state = VNIC_OPEN;
6268		adapter->from_passive_init = false;
6269		dev_err(dev, "CRQ-init failed, passive-init\n");
6270		return -EINVAL;
6271	}
6272
6273	if (reset &&
6274	    test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
6275	    adapter->reset_reason != VNIC_RESET_MOBILITY) {
6276		if (adapter->req_rx_queues != old_num_rx_queues ||
6277		    adapter->req_tx_queues != old_num_tx_queues) {
6278			release_sub_crqs(adapter, 0);
6279			rc = init_sub_crqs(adapter);
6280		} else {
6281			/* no need to reinitialize completely, but we do
6282			 * need to clean up transmits that were in flight
6283			 * when we processed the reset.  Failure to do so
6284			 * will confound the upper layer, usually TCP, by
6285			 * creating the illusion of transmits that are
6286			 * awaiting completion.
6287			 */
6288			clean_tx_pools(adapter);
6289
6290			rc = reset_sub_crq_queues(adapter);
6291		}
6292	} else {
6293		rc = init_sub_crqs(adapter);
6294	}
6295
6296	if (rc) {
6297		dev_err(dev, "Initialization of sub crqs failed\n");
6298		release_crq_queue(adapter);
6299		return rc;
6300	}
6301
6302	rc = init_sub_crq_irqs(adapter);
6303	if (rc) {
6304		dev_err(dev, "Failed to initialize sub crq irqs\n");
6305		release_crq_queue(adapter);
6306	}
6307
6308	return rc;
6309}
6310
6311static struct device_attribute dev_attr_failover;
6312
6313static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
6314{
6315	struct ibmvnic_adapter *adapter;
6316	struct net_device *netdev;
6317	unsigned char *mac_addr_p;
6318	unsigned long flags;
6319	bool init_success;
6320	int rc;
6321
6322	dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
6323		dev->unit_address);
6324
6325	mac_addr_p = (unsigned char *)vio_get_attribute(dev,
6326							VETH_MAC_ADDR, NULL);
6327	if (!mac_addr_p) {
6328		dev_err(&dev->dev,
6329			"(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
6330			__FILE__, __LINE__);
6331		return 0;
6332	}
6333
6334	netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
6335				   IBMVNIC_MAX_QUEUES);
6336	if (!netdev)
6337		return -ENOMEM;
6338
6339	adapter = netdev_priv(netdev);
6340	adapter->state = VNIC_PROBING;
6341	dev_set_drvdata(&dev->dev, netdev);
6342	adapter->vdev = dev;
6343	adapter->netdev = netdev;
6344	adapter->login_pending = false;
6345	memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
6346	/* map_ids start at 1, so ensure map_id 0 is always "in-use" */
6347	bitmap_set(adapter->map_ids, 0, 1);
6348
6349	ether_addr_copy(adapter->mac_addr, mac_addr_p);
6350	eth_hw_addr_set(netdev, adapter->mac_addr);
6351	netdev->irq = dev->irq;
6352	netdev->netdev_ops = &ibmvnic_netdev_ops;
6353	netdev->ethtool_ops = &ibmvnic_ethtool_ops;
6354	SET_NETDEV_DEV(netdev, &dev->dev);
6355
 
 
 
 
 
6356	INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
6357	INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
6358			  __ibmvnic_delayed_reset);
6359	INIT_LIST_HEAD(&adapter->rwi_list);
6360	spin_lock_init(&adapter->rwi_lock);
6361	spin_lock_init(&adapter->state_lock);
6362	mutex_init(&adapter->fw_lock);
6363	init_completion(&adapter->probe_done);
6364	init_completion(&adapter->init_done);
6365	init_completion(&adapter->fw_done);
6366	init_completion(&adapter->reset_done);
6367	init_completion(&adapter->stats_done);
6368	clear_bit(0, &adapter->resetting);
6369	adapter->prev_rx_buf_sz = 0;
6370	adapter->prev_mtu = 0;
6371
6372	init_success = false;
6373	do {
6374		reinit_init_done(adapter);
6375
6376		/* clear any failovers we got in the previous pass
6377		 * since we are reinitializing the CRQ
6378		 */
6379		adapter->failover_pending = false;
6380
6381		/* If we had already initialized CRQ, we may have one or
6382		 * more resets queued already. Discard those and release
6383		 * the CRQ before initializing the CRQ again.
6384		 */
6385		release_crq_queue(adapter);
6386
6387		/* Since we are still in PROBING state, __ibmvnic_reset()
6388		 * will not access the ->rwi_list and since we released CRQ,
6389		 * we won't get _new_ transport events. But there maybe an
6390		 * ongoing ibmvnic_reset() call. So serialize access to
6391		 * rwi_list. If we win the race, ibvmnic_reset() could add
6392		 * a reset after we purged but thats ok - we just may end
6393		 * up with an extra reset (i.e similar to having two or more
6394		 * resets in the queue at once).
6395		 * CHECK.
6396		 */
6397		spin_lock_irqsave(&adapter->rwi_lock, flags);
6398		flush_reset_queue(adapter);
6399		spin_unlock_irqrestore(&adapter->rwi_lock, flags);
6400
6401		rc = init_crq_queue(adapter);
6402		if (rc) {
6403			dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
6404				rc);
6405			goto ibmvnic_init_fail;
6406		}
6407
6408		rc = ibmvnic_reset_init(adapter, false);
6409	} while (rc == -EAGAIN);
6410
6411	/* We are ignoring the error from ibmvnic_reset_init() assuming that the
6412	 * partner is not ready. CRQ is not active. When the partner becomes
6413	 * ready, we will do the passive init reset.
6414	 */
6415
6416	if (!rc)
6417		init_success = true;
6418
6419	rc = init_stats_buffers(adapter);
6420	if (rc)
6421		goto ibmvnic_init_fail;
6422
6423	rc = init_stats_token(adapter);
6424	if (rc)
6425		goto ibmvnic_stats_fail;
6426
 
 
 
 
6427	rc = device_create_file(&dev->dev, &dev_attr_failover);
6428	if (rc)
6429		goto ibmvnic_dev_file_err;
6430
6431	netif_carrier_off(netdev);
6432
6433	if (init_success) {
6434		adapter->state = VNIC_PROBED;
6435		netdev->mtu = adapter->req_mtu - ETH_HLEN;
6436		netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
6437		netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
6438	} else {
6439		adapter->state = VNIC_DOWN;
6440	}
6441
6442	adapter->wait_for_reset = false;
6443	adapter->last_reset_time = jiffies;
6444
6445	rc = register_netdev(netdev);
6446	if (rc) {
6447		dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
6448		goto ibmvnic_register_fail;
6449	}
6450	dev_info(&dev->dev, "ibmvnic registered\n");
6451
6452	rc = ibmvnic_cpu_notif_add(adapter);
6453	if (rc) {
6454		netdev_err(netdev, "Registering cpu notifier failed\n");
6455		goto cpu_notif_add_failed;
6456	}
6457
6458	complete(&adapter->probe_done);
6459
6460	return 0;
6461
6462cpu_notif_add_failed:
6463	unregister_netdev(netdev);
6464
6465ibmvnic_register_fail:
6466	device_remove_file(&dev->dev, &dev_attr_failover);
6467
6468ibmvnic_dev_file_err:
6469	release_stats_token(adapter);
6470
6471ibmvnic_stats_fail:
6472	release_stats_buffers(adapter);
6473
6474ibmvnic_init_fail:
6475	release_sub_crqs(adapter, 1);
6476	release_crq_queue(adapter);
6477
6478	/* cleanup worker thread after releasing CRQ so we don't get
6479	 * transport events (i.e new work items for the worker thread).
6480	 */
6481	adapter->state = VNIC_REMOVING;
6482	complete(&adapter->probe_done);
6483	flush_work(&adapter->ibmvnic_reset);
6484	flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6485
6486	flush_reset_queue(adapter);
6487
6488	mutex_destroy(&adapter->fw_lock);
6489	free_netdev(netdev);
6490
6491	return rc;
6492}
6493
6494static void ibmvnic_remove(struct vio_dev *dev)
6495{
6496	struct net_device *netdev = dev_get_drvdata(&dev->dev);
6497	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6498	unsigned long flags;
6499
6500	spin_lock_irqsave(&adapter->state_lock, flags);
6501
6502	/* If ibmvnic_reset() is scheduling a reset, wait for it to
6503	 * finish. Then, set the state to REMOVING to prevent it from
6504	 * scheduling any more work and to have reset functions ignore
6505	 * any resets that have already been scheduled. Drop the lock
6506	 * after setting state, so __ibmvnic_reset() which is called
6507	 * from the flush_work() below, can make progress.
6508	 */
6509	spin_lock(&adapter->rwi_lock);
6510	adapter->state = VNIC_REMOVING;
6511	spin_unlock(&adapter->rwi_lock);
6512
6513	spin_unlock_irqrestore(&adapter->state_lock, flags);
6514
6515	ibmvnic_cpu_notif_remove(adapter);
6516
6517	flush_work(&adapter->ibmvnic_reset);
6518	flush_delayed_work(&adapter->ibmvnic_delayed_reset);
6519
6520	rtnl_lock();
6521	unregister_netdevice(netdev);
6522
6523	release_resources(adapter);
6524	release_rx_pools(adapter);
6525	release_tx_pools(adapter);
6526	release_sub_crqs(adapter, 1);
6527	release_crq_queue(adapter);
6528
6529	release_stats_token(adapter);
6530	release_stats_buffers(adapter);
6531
6532	adapter->state = VNIC_REMOVED;
6533
6534	rtnl_unlock();
6535	mutex_destroy(&adapter->fw_lock);
6536	device_remove_file(&dev->dev, &dev_attr_failover);
6537	free_netdev(netdev);
6538	dev_set_drvdata(&dev->dev, NULL);
 
 
6539}
6540
6541static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
6542			      const char *buf, size_t count)
6543{
6544	struct net_device *netdev = dev_get_drvdata(dev);
6545	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6546	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
6547	__be64 session_token;
6548	long rc;
6549
6550	if (!sysfs_streq(buf, "1"))
6551		return -EINVAL;
6552
6553	rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
6554			 H_GET_SESSION_TOKEN, 0, 0, 0);
6555	if (rc) {
6556		netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
6557			   rc);
6558		goto last_resort;
6559	}
6560
6561	session_token = (__be64)retbuf[0];
6562	netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
6563		   be64_to_cpu(session_token));
6564	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
6565				H_SESSION_ERR_DETECTED, session_token, 0, 0);
6566	if (rc) {
6567		netdev_err(netdev,
6568			   "H_VIOCTL initiated failover failed, rc %ld\n",
6569			   rc);
6570		goto last_resort;
6571	}
6572
6573	return count;
 
6574
6575last_resort:
6576	netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
6577	ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
6578
6579	return count;
6580}
6581static DEVICE_ATTR_WO(failover);
6582
6583static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
6584{
6585	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
6586	struct ibmvnic_adapter *adapter;
6587	struct iommu_table *tbl;
6588	unsigned long ret = 0;
6589	int i;
6590
6591	tbl = get_iommu_table_base(&vdev->dev);
6592
6593	/* netdev inits at probe time along with the structures we need below*/
6594	if (!netdev)
6595		return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
6596
6597	adapter = netdev_priv(netdev);
6598
6599	ret += PAGE_SIZE; /* the crq message queue */
6600	ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
6601
6602	for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
6603		ret += 4 * PAGE_SIZE; /* the scrq message queue */
6604
6605	for (i = 0; i < adapter->num_active_rx_pools; i++)
 
6606		ret += adapter->rx_pool[i].size *
6607		    IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
6608
6609	return ret;
6610}
6611
6612static int ibmvnic_resume(struct device *dev)
6613{
6614	struct net_device *netdev = dev_get_drvdata(dev);
6615	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
6616
6617	if (adapter->state != VNIC_OPEN)
6618		return 0;
6619
6620	tasklet_schedule(&adapter->tasklet);
6621
6622	return 0;
6623}
6624
6625static const struct vio_device_id ibmvnic_device_table[] = {
6626	{"network", "IBM,vnic"},
6627	{"", "" }
6628};
6629MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
6630
6631static const struct dev_pm_ops ibmvnic_pm_ops = {
6632	.resume = ibmvnic_resume
6633};
6634
6635static struct vio_driver ibmvnic_driver = {
6636	.id_table       = ibmvnic_device_table,
6637	.probe          = ibmvnic_probe,
6638	.remove         = ibmvnic_remove,
6639	.get_desired_dma = ibmvnic_get_desired_dma,
6640	.name		= ibmvnic_driver_name,
6641	.pm		= &ibmvnic_pm_ops,
6642};
6643
6644/* module functions */
6645static int __init ibmvnic_module_init(void)
6646{
6647	int ret;
6648
6649	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/ibmvnic:online",
6650				      ibmvnic_cpu_online,
6651				      ibmvnic_cpu_down_prep);
6652	if (ret < 0)
6653		goto out;
6654	ibmvnic_online = ret;
6655	ret = cpuhp_setup_state_multi(CPUHP_IBMVNIC_DEAD, "net/ibmvnic:dead",
6656				      NULL, ibmvnic_cpu_dead);
6657	if (ret)
6658		goto err_dead;
6659
6660	ret = vio_register_driver(&ibmvnic_driver);
6661	if (ret)
6662		goto err_vio_register;
6663
6664	pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
6665		IBMVNIC_DRIVER_VERSION);
6666
6667	return 0;
6668err_vio_register:
6669	cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD);
6670err_dead:
6671	cpuhp_remove_multi_state(ibmvnic_online);
6672out:
6673	return ret;
6674}
6675
6676static void __exit ibmvnic_module_exit(void)
6677{
6678	vio_unregister_driver(&ibmvnic_driver);
6679	cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD);
6680	cpuhp_remove_multi_state(ibmvnic_online);
6681}
6682
6683module_init(ibmvnic_module_init);
6684module_exit(ibmvnic_module_exit);