Linux Audio

Check our new training course

Loading...
v4.6
 
   1/**************************************************************************/
   2/*                                                                        */
   3/*  IBM System i and System p Virtual NIC Device Driver                   */
   4/*  Copyright (C) 2014 IBM Corp.                                          */
   5/*  Santiago Leon (santi_leon@yahoo.com)                                  */
   6/*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
   7/*  John Allen (jallen@linux.vnet.ibm.com)                                */
   8/*                                                                        */
   9/*  This program is free software; you can redistribute it and/or modify  */
  10/*  it under the terms of the GNU General Public License as published by  */
  11/*  the Free Software Foundation; either version 2 of the License, or     */
  12/*  (at your option) any later version.                                   */
  13/*                                                                        */
  14/*  This program is distributed in the hope that it will be useful,       */
  15/*  but WITHOUT ANY WARRANTY; without even the implied warranty of        */
  16/*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         */
  17/*  GNU General Public License for more details.                          */
  18/*                                                                        */
  19/*  You should have received a copy of the GNU General Public License     */
  20/*  along with this program.                                              */
  21/*                                                                        */
  22/* This module contains the implementation of a virtual ethernet device   */
  23/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
  24/* option of the RS/6000 Platform Architecture to interface with virtual  */
  25/* ethernet NICs that are presented to the partition by the hypervisor.   */
  26/*									   */
  27/* Messages are passed between the VNIC driver and the VNIC server using  */
  28/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
  29/* issue and receive commands that initiate communication with the server */
  30/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
  31/* are used by the driver to notify the server that a packet is           */
  32/* ready for transmission or that a buffer has been added to receive a    */
  33/* packet. Subsequently, sCRQs are used by the server to notify the       */
  34/* driver that a packet transmission has been completed or that a packet  */
  35/* has been received and placed in a waiting buffer.                      */
  36/*                                                                        */
  37/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
  38/* which skbs are DMA mapped and immediately unmapped when the transmit   */
  39/* or receive has been completed, the VNIC driver is required to use      */
  40/* "long term mapping". This entails that large, continuous DMA mapped    */
  41/* buffers are allocated on driver initialization and these buffers are   */
  42/* then continuously reused to pass skbs to and from the VNIC server.     */
  43/*                                                                        */
  44/**************************************************************************/
  45
  46#include <linux/module.h>
  47#include <linux/moduleparam.h>
  48#include <linux/types.h>
  49#include <linux/errno.h>
  50#include <linux/completion.h>
  51#include <linux/ioport.h>
  52#include <linux/dma-mapping.h>
  53#include <linux/kernel.h>
  54#include <linux/netdevice.h>
  55#include <linux/etherdevice.h>
  56#include <linux/skbuff.h>
  57#include <linux/init.h>
  58#include <linux/delay.h>
  59#include <linux/mm.h>
  60#include <linux/ethtool.h>
  61#include <linux/proc_fs.h>
 
  62#include <linux/in.h>
  63#include <linux/ip.h>
 
  64#include <linux/irq.h>
  65#include <linux/kthread.h>
  66#include <linux/seq_file.h>
  67#include <linux/debugfs.h>
  68#include <linux/interrupt.h>
  69#include <net/net_namespace.h>
  70#include <asm/hvcall.h>
  71#include <linux/atomic.h>
  72#include <asm/vio.h>
  73#include <asm/iommu.h>
  74#include <linux/uaccess.h>
  75#include <asm/firmware.h>
  76#include <linux/seq_file.h>
 
 
  77
  78#include "ibmvnic.h"
  79
  80static const char ibmvnic_driver_name[] = "ibmvnic";
  81static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
  82
  83MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
  84MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
  85MODULE_LICENSE("GPL");
  86MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
  87
  88static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
  89static int ibmvnic_remove(struct vio_dev *);
  90static void release_sub_crqs(struct ibmvnic_adapter *);
  91static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
  92static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
  93static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
  94static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
  95static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
  96		       union sub_crq *sub_crq);
 
  97static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
  98static int enable_scrq_irq(struct ibmvnic_adapter *,
  99			   struct ibmvnic_sub_crq_queue *);
 100static int disable_scrq_irq(struct ibmvnic_adapter *,
 101			    struct ibmvnic_sub_crq_queue *);
 102static int pending_scrq(struct ibmvnic_adapter *,
 103			struct ibmvnic_sub_crq_queue *);
 104static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
 105					struct ibmvnic_sub_crq_queue *);
 106static int ibmvnic_poll(struct napi_struct *napi, int data);
 107static void send_map_query(struct ibmvnic_adapter *adapter);
 108static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
 109static void send_request_unmap(struct ibmvnic_adapter *, u8);
 
 
 
 
 
 
 
 
 
 
 110
 111struct ibmvnic_stat {
 112	char name[ETH_GSTRING_LEN];
 113	int offset;
 114};
 115
 116#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
 117			     offsetof(struct ibmvnic_statistics, stat))
 118#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
 119
 120static const struct ibmvnic_stat ibmvnic_stats[] = {
 121	{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
 122	{"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
 123	{"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
 124	{"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
 125	{"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
 126	{"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
 127	{"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
 128	{"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
 129	{"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
 130	{"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
 131	{"align_errors", IBMVNIC_STAT_OFF(align_errors)},
 132	{"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
 133	{"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
 134	{"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
 135	{"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
 136	{"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
 137	{"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
 138	{"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
 139	{"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
 140	{"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
 141	{"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
 142	{"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
 143};
 144
 145static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
 146			  unsigned long length, unsigned long *number,
 147			  unsigned long *irq)
 148{
 149	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
 150	long rc;
 151
 152	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
 153	*number = retbuf[0];
 154	*irq = retbuf[1];
 155
 156	return rc;
 157}
 158
 159/* net_device_ops functions */
 160
 161static void init_rx_pool(struct ibmvnic_adapter *adapter,
 162			 struct ibmvnic_rx_pool *rx_pool, int num, int index,
 163			 int buff_size, int active)
 164{
 165	netdev_dbg(adapter->netdev,
 166		   "Initializing rx_pool %d, %d buffs, %d bytes each\n",
 167		   index, num, buff_size);
 168	rx_pool->size = num;
 169	rx_pool->index = index;
 170	rx_pool->buff_size = buff_size;
 171	rx_pool->active = active;
 172}
 173
 174static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
 175				struct ibmvnic_long_term_buff *ltb, int size)
 176{
 177	struct device *dev = &adapter->vdev->dev;
 
 178
 179	ltb->size = size;
 180	ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
 181				       GFP_KERNEL);
 182
 183	if (!ltb->buff) {
 184		dev_err(dev, "Couldn't alloc long term buffer\n");
 185		return -ENOMEM;
 186	}
 187	ltb->map_id = adapter->map_id;
 188	adapter->map_id++;
 189	send_request_map(adapter, ltb->addr,
 190			 ltb->size, ltb->map_id);
 191	init_completion(&adapter->fw_done);
 
 
 
 
 
 
 192	wait_for_completion(&adapter->fw_done);
 
 
 
 
 
 
 
 193	return 0;
 194}
 195
 196static void free_long_term_buff(struct ibmvnic_adapter *adapter,
 197				struct ibmvnic_long_term_buff *ltb)
 198{
 199	struct device *dev = &adapter->vdev->dev;
 200
 
 
 
 
 
 
 201	dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
 202	send_request_unmap(adapter, ltb->map_id);
 203}
 204
 205static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
 206			 struct ibmvnic_rx_pool *pool)
 207{
 208	struct device *dev = &adapter->vdev->dev;
 209	int i;
 210
 211	pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
 212	if (!pool->free_map)
 213		return -ENOMEM;
 214
 215	pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
 216				GFP_KERNEL);
 217
 218	if (!pool->rx_buff) {
 219		dev_err(dev, "Couldn't alloc rx buffers\n");
 220		kfree(pool->free_map);
 221		return -ENOMEM;
 222	}
 223
 224	if (alloc_long_term_buff(adapter, &pool->long_term_buff,
 225				 pool->size * pool->buff_size)) {
 226		kfree(pool->free_map);
 227		kfree(pool->rx_buff);
 228		return -ENOMEM;
 229	}
 
 
 230
 231	for (i = 0; i < pool->size; ++i)
 232		pool->free_map[i] = i;
 233
 234	atomic_set(&pool->available, 0);
 235	pool->next_alloc = 0;
 236	pool->next_free = 0;
 237
 238	return 0;
 
 
 239}
 240
 241static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
 242			      struct ibmvnic_rx_pool *pool)
 243{
 244	int count = pool->size - atomic_read(&pool->available);
 245	struct device *dev = &adapter->vdev->dev;
 246	int buffers_added = 0;
 247	unsigned long lpar_rc;
 248	union sub_crq sub_crq;
 249	struct sk_buff *skb;
 250	unsigned int offset;
 251	dma_addr_t dma_addr;
 252	unsigned char *dst;
 253	u64 *handle_array;
 254	int shift = 0;
 255	int index;
 256	int i;
 257
 
 
 
 258	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
 259				      be32_to_cpu(adapter->login_rsp_buf->
 260				      off_rxadd_subcrqs));
 261
 262	for (i = 0; i < count; ++i) {
 263		skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
 264		if (!skb) {
 265			dev_err(dev, "Couldn't replenish rx buff\n");
 266			adapter->replenish_no_mem++;
 267			break;
 268		}
 269
 270		index = pool->free_map[pool->next_free];
 271
 272		if (pool->rx_buff[index].skb)
 273			dev_err(dev, "Inconsistent free_map!\n");
 274
 275		/* Copy the skb to the long term mapped DMA buffer */
 276		offset = index * pool->buff_size;
 277		dst = pool->long_term_buff.buff + offset;
 278		memset(dst, 0, pool->buff_size);
 279		dma_addr = pool->long_term_buff.addr + offset;
 280		pool->rx_buff[index].data = dst;
 281
 282		pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
 283		pool->rx_buff[index].dma = dma_addr;
 284		pool->rx_buff[index].skb = skb;
 285		pool->rx_buff[index].pool_index = pool->index;
 286		pool->rx_buff[index].size = pool->buff_size;
 287
 288		memset(&sub_crq, 0, sizeof(sub_crq));
 289		sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
 290		sub_crq.rx_add.correlator =
 291		    cpu_to_be64((u64)&pool->rx_buff[index]);
 292		sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
 293		sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
 294
 295		/* The length field of the sCRQ is defined to be 24 bits so the
 296		 * buffer size needs to be left shifted by a byte before it is
 297		 * converted to big endian to prevent the last byte from being
 298		 * truncated.
 299		 */
 300#ifdef __LITTLE_ENDIAN__
 301		shift = 8;
 302#endif
 303		sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
 304
 305		lpar_rc = send_subcrq(adapter, handle_array[pool->index],
 306				      &sub_crq);
 307		if (lpar_rc != H_SUCCESS)
 308			goto failure;
 309
 310		buffers_added++;
 311		adapter->replenish_add_buff_success++;
 312		pool->next_free = (pool->next_free + 1) % pool->size;
 313	}
 314	atomic_add(buffers_added, &pool->available);
 315	return;
 316
 317failure:
 318	dev_info(dev, "replenish pools failure\n");
 
 319	pool->free_map[pool->next_free] = index;
 320	pool->rx_buff[index].skb = NULL;
 321	if (!dma_mapping_error(dev, dma_addr))
 322		dma_unmap_single(dev, dma_addr, pool->buff_size,
 323				 DMA_FROM_DEVICE);
 324
 325	dev_kfree_skb_any(skb);
 326	adapter->replenish_add_buff_failure++;
 327	atomic_add(buffers_added, &pool->available);
 
 
 
 
 
 
 
 
 
 
 328}
 329
 330static void replenish_pools(struct ibmvnic_adapter *adapter)
 331{
 332	int i;
 333
 334	if (adapter->migrated)
 335		return;
 336
 337	adapter->replenish_task_cycles++;
 338	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
 339	     i++) {
 340		if (adapter->rx_pool[i].active)
 341			replenish_rx_pool(adapter, &adapter->rx_pool[i]);
 342	}
 343}
 344
 345static void free_rx_pool(struct ibmvnic_adapter *adapter,
 346			 struct ibmvnic_rx_pool *pool)
 347{
 348	int i;
 
 
 
 
 349
 350	kfree(pool->free_map);
 351	pool->free_map = NULL;
 
 
 
 
 
 
 352
 353	if (!pool->rx_buff)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 354		return;
 355
 356	for (i = 0; i < pool->size; i++) {
 357		if (pool->rx_buff[i].skb) {
 358			dev_kfree_skb_any(pool->rx_buff[i].skb);
 359			pool->rx_buff[i].skb = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 360		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 361	}
 362	kfree(pool->rx_buff);
 363	pool->rx_buff = NULL;
 364}
 365
 366static int ibmvnic_open(struct net_device *netdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 367{
 368	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 369	struct device *dev = &adapter->vdev->dev;
 370	struct ibmvnic_tx_pool *tx_pool;
 371	union ibmvnic_crq crq;
 372	int rxadd_subcrqs;
 373	u64 *size_array;
 374	int tx_subcrqs;
 375	int i, j;
 376
 377	rxadd_subcrqs =
 378	    be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
 379	tx_subcrqs =
 380	    be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
 381	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
 382				  be32_to_cpu(adapter->login_rsp_buf->
 383					      off_rxadd_buff_size));
 384	adapter->map_id = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 385	adapter->napi = kcalloc(adapter->req_rx_queues,
 386				sizeof(struct napi_struct), GFP_KERNEL);
 387	if (!adapter->napi)
 388		goto alloc_napi_failed;
 
 389	for (i = 0; i < adapter->req_rx_queues; i++) {
 390		netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
 391			       NAPI_POLL_WEIGHT);
 392		napi_enable(&adapter->napi[i]);
 393	}
 394	adapter->rx_pool =
 395	    kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
 396
 397	if (!adapter->rx_pool)
 398		goto rx_pool_arr_alloc_failed;
 399	send_map_query(adapter);
 400	for (i = 0; i < rxadd_subcrqs; i++) {
 401		init_rx_pool(adapter, &adapter->rx_pool[i],
 402			     IBMVNIC_BUFFS_PER_POOL, i,
 403			     be64_to_cpu(size_array[i]), 1);
 404		if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
 405			dev_err(dev, "Couldn't alloc rx pool\n");
 406			goto rx_pool_alloc_failed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 407		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 408	}
 409	adapter->tx_pool =
 410	    kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
 411
 412	if (!adapter->tx_pool)
 413		goto tx_pool_arr_alloc_failed;
 414	for (i = 0; i < tx_subcrqs; i++) {
 415		tx_pool = &adapter->tx_pool[i];
 416		tx_pool->tx_buff =
 417		    kcalloc(adapter->max_tx_entries_per_subcrq,
 418			    sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
 419		if (!tx_pool->tx_buff)
 420			goto tx_pool_alloc_failed;
 421
 422		if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
 423					 adapter->max_tx_entries_per_subcrq *
 424					 adapter->req_mtu))
 425			goto tx_ltb_alloc_failed;
 426
 427		tx_pool->free_map =
 428		    kcalloc(adapter->max_tx_entries_per_subcrq,
 429			    sizeof(int), GFP_KERNEL);
 430		if (!tx_pool->free_map)
 431			goto tx_fm_alloc_failed;
 432
 433		for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
 434			tx_pool->free_map[j] = j;
 435
 436		tx_pool->consumer_index = 0;
 437		tx_pool->producer_index = 0;
 438	}
 439	adapter->bounce_buffer_size =
 440	    (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
 441	adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
 442					 GFP_KERNEL);
 443	if (!adapter->bounce_buffer)
 444		goto bounce_alloc_failed;
 445
 446	adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
 447						    adapter->bounce_buffer_size,
 448						    DMA_TO_DEVICE);
 449	if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
 450		dev_err(dev, "Couldn't map tx bounce buffer\n");
 451		goto bounce_map_failed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 452	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 453	replenish_pools(adapter);
 
 454
 455	/* We're ready to receive frames, enable the sub-crq interrupts and
 456	 * set the logical link state to up
 457	 */
 458	for (i = 0; i < adapter->req_rx_queues; i++)
 
 
 
 459		enable_scrq_irq(adapter, adapter->rx_scrq[i]);
 
 460
 461	for (i = 0; i < adapter->req_tx_queues; i++)
 
 
 
 462		enable_scrq_irq(adapter, adapter->tx_scrq[i]);
 
 463
 464	memset(&crq, 0, sizeof(crq));
 465	crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
 466	crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
 467	crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
 468	ibmvnic_send_crq(adapter, &crq);
 
 
 469
 470	netif_start_queue(netdev);
 471	return 0;
 472
 473bounce_map_failed:
 474	kfree(adapter->bounce_buffer);
 475bounce_alloc_failed:
 476	i = tx_subcrqs - 1;
 477	kfree(adapter->tx_pool[i].free_map);
 478tx_fm_alloc_failed:
 479	free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
 480tx_ltb_alloc_failed:
 481	kfree(adapter->tx_pool[i].tx_buff);
 482tx_pool_alloc_failed:
 483	for (j = 0; j < i; j++) {
 484		kfree(adapter->tx_pool[j].tx_buff);
 485		free_long_term_buff(adapter,
 486				    &adapter->tx_pool[j].long_term_buff);
 487		kfree(adapter->tx_pool[j].free_map);
 488	}
 489	kfree(adapter->tx_pool);
 490	adapter->tx_pool = NULL;
 491tx_pool_arr_alloc_failed:
 492	i = rxadd_subcrqs;
 493rx_pool_alloc_failed:
 494	for (j = 0; j < i; j++) {
 495		free_rx_pool(adapter, &adapter->rx_pool[j]);
 496		free_long_term_buff(adapter,
 497				    &adapter->rx_pool[j].long_term_buff);
 498	}
 499	kfree(adapter->rx_pool);
 500	adapter->rx_pool = NULL;
 501rx_pool_arr_alloc_failed:
 502	for (i = 0; i < adapter->req_rx_queues; i++)
 503		napi_enable(&adapter->napi[i]);
 504alloc_napi_failed:
 505	return -ENOMEM;
 506}
 507
 508static int ibmvnic_close(struct net_device *netdev)
 509{
 510	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 511	struct device *dev = &adapter->vdev->dev;
 512	union ibmvnic_crq crq;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 513	int i;
 514
 515	adapter->closing = true;
 
 516
 517	for (i = 0; i < adapter->req_rx_queues; i++)
 518		napi_disable(&adapter->napi[i]);
 
 
 
 
 
 
 
 
 519
 520	netif_stop_queue(netdev);
 
 
 
 521
 522	if (adapter->bounce_buffer) {
 523		if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
 524			dma_unmap_single(&adapter->vdev->dev,
 525					 adapter->bounce_buffer_dma,
 526					 adapter->bounce_buffer_size,
 527					 DMA_BIDIRECTIONAL);
 528			adapter->bounce_buffer_dma = DMA_ERROR_CODE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 529		}
 530		kfree(adapter->bounce_buffer);
 531		adapter->bounce_buffer = NULL;
 532	}
 
 533
 534	memset(&crq, 0, sizeof(crq));
 535	crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
 536	crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
 537	crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
 538	ibmvnic_send_crq(adapter, &crq);
 539
 540	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
 541	     i++) {
 542		kfree(adapter->tx_pool[i].tx_buff);
 543		free_long_term_buff(adapter,
 544				    &adapter->tx_pool[i].long_term_buff);
 545		kfree(adapter->tx_pool[i].free_map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 546	}
 547	kfree(adapter->tx_pool);
 548	adapter->tx_pool = NULL;
 549
 550	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
 551	     i++) {
 552		free_rx_pool(adapter, &adapter->rx_pool[i]);
 553		free_long_term_buff(adapter,
 554				    &adapter->rx_pool[i].long_term_buff);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 555	}
 556	kfree(adapter->rx_pool);
 557	adapter->rx_pool = NULL;
 558
 559	adapter->closing = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 560
 561	return 0;
 562}
 563
 564static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 565{
 566	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 567	int queue_num = skb_get_queue_mapping(skb);
 
 568	struct device *dev = &adapter->vdev->dev;
 569	struct ibmvnic_tx_buff *tx_buff = NULL;
 
 570	struct ibmvnic_tx_pool *tx_pool;
 571	unsigned int tx_send_failed = 0;
 572	unsigned int tx_map_failed = 0;
 573	unsigned int tx_dropped = 0;
 574	unsigned int tx_packets = 0;
 575	unsigned int tx_bytes = 0;
 576	dma_addr_t data_dma_addr;
 577	struct netdev_queue *txq;
 578	bool used_bounce = false;
 579	unsigned long lpar_rc;
 580	union sub_crq tx_crq;
 581	unsigned int offset;
 
 582	unsigned char *dst;
 583	u64 *handle_array;
 584	int index = 0;
 585	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 586
 587	tx_pool = &adapter->tx_pool[queue_num];
 588	txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
 589	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
 590				   be32_to_cpu(adapter->login_rsp_buf->
 591					       off_txsubm_subcrqs));
 592	if (adapter->migrated) {
 
 
 
 593		tx_send_failed++;
 594		tx_dropped++;
 595		ret = NETDEV_TX_BUSY;
 596		goto out;
 597	}
 598
 599	index = tx_pool->free_map[tx_pool->consumer_index];
 600	offset = index * adapter->req_mtu;
 
 601	dst = tx_pool->long_term_buff.buff + offset;
 602	memset(dst, 0, adapter->req_mtu);
 603	skb_copy_from_linear_data(skb, dst, skb->len);
 604	data_dma_addr = tx_pool->long_term_buff.addr + offset;
 605
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 606	tx_pool->consumer_index =
 607	    (tx_pool->consumer_index + 1) %
 608		adapter->max_tx_entries_per_subcrq;
 609
 610	tx_buff = &tx_pool->tx_buff[index];
 611	tx_buff->skb = skb;
 612	tx_buff->data_dma[0] = data_dma_addr;
 613	tx_buff->data_len[0] = skb->len;
 614	tx_buff->index = index;
 615	tx_buff->pool_index = queue_num;
 616	tx_buff->last_frag = true;
 617	tx_buff->used_bounce = used_bounce;
 618
 619	memset(&tx_crq, 0, sizeof(tx_crq));
 620	tx_crq.v1.first = IBMVNIC_CRQ_CMD;
 621	tx_crq.v1.type = IBMVNIC_TX_DESC;
 622	tx_crq.v1.n_crq_elem = 1;
 623	tx_crq.v1.n_sge = 1;
 624	tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
 625	tx_crq.v1.correlator = cpu_to_be32(index);
 
 
 
 
 
 626	tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
 627	tx_crq.v1.sge_len = cpu_to_be32(skb->len);
 628	tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
 629
 630	if (adapter->vlan_header_insertion) {
 631		tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
 632		tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
 633	}
 634
 635	if (skb->protocol == htons(ETH_P_IP)) {
 636		if (ip_hdr(skb)->version == 4)
 637			tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
 638		else if (ip_hdr(skb)->version == 6)
 639			tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
 640
 641		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
 642			tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
 643		else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
 644			tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
 645	}
 646
 647	if (skb->ip_summed == CHECKSUM_PARTIAL)
 648		tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
 649
 650	lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq);
 651
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 652	if (lpar_rc != H_SUCCESS) {
 653		dev_err(dev, "tx failed with code %ld\n", lpar_rc);
 654
 655		if (tx_pool->consumer_index == 0)
 656			tx_pool->consumer_index =
 657				adapter->max_tx_entries_per_subcrq - 1;
 658		else
 659			tx_pool->consumer_index--;
 
 
 
 
 
 
 
 660
 661		tx_send_failed++;
 662		tx_dropped++;
 663		ret = NETDEV_TX_BUSY;
 664		goto out;
 
 
 
 
 
 
 665	}
 
 666	tx_packets++;
 667	tx_bytes += skb->len;
 668	txq->trans_start = jiffies;
 669	ret = NETDEV_TX_OK;
 
 670
 
 
 
 
 
 
 
 
 671out:
 672	netdev->stats.tx_dropped += tx_dropped;
 673	netdev->stats.tx_bytes += tx_bytes;
 674	netdev->stats.tx_packets += tx_packets;
 675	adapter->tx_send_failed += tx_send_failed;
 676	adapter->tx_map_failed += tx_map_failed;
 
 
 
 677
 678	return ret;
 679}
 680
 681static void ibmvnic_set_multi(struct net_device *netdev)
 682{
 683	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 684	struct netdev_hw_addr *ha;
 685	union ibmvnic_crq crq;
 686
 687	memset(&crq, 0, sizeof(crq));
 688	crq.request_capability.first = IBMVNIC_CRQ_CMD;
 689	crq.request_capability.cmd = REQUEST_CAPABILITY;
 690
 691	if (netdev->flags & IFF_PROMISC) {
 692		if (!adapter->promisc_supported)
 693			return;
 694	} else {
 695		if (netdev->flags & IFF_ALLMULTI) {
 696			/* Accept all multicast */
 697			memset(&crq, 0, sizeof(crq));
 698			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
 699			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
 700			crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
 701			ibmvnic_send_crq(adapter, &crq);
 702		} else if (netdev_mc_empty(netdev)) {
 703			/* Reject all multicast */
 704			memset(&crq, 0, sizeof(crq));
 705			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
 706			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
 707			crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
 708			ibmvnic_send_crq(adapter, &crq);
 709		} else {
 710			/* Accept one or more multicast(s) */
 711			netdev_for_each_mc_addr(ha, netdev) {
 712				memset(&crq, 0, sizeof(crq));
 713				crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
 714				crq.multicast_ctrl.cmd = MULTICAST_CTRL;
 715				crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
 716				ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
 717						ha->addr);
 718				ibmvnic_send_crq(adapter, &crq);
 719			}
 720		}
 721	}
 722}
 723
 724static int ibmvnic_set_mac(struct net_device *netdev, void *p)
 725{
 726	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 727	struct sockaddr *addr = p;
 728	union ibmvnic_crq crq;
 
 729
 730	if (!is_valid_ether_addr(addr->sa_data))
 731		return -EADDRNOTAVAIL;
 
 
 732
 733	memset(&crq, 0, sizeof(crq));
 734	crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
 735	crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
 736	ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
 737	ibmvnic_send_crq(adapter, &crq);
 
 
 
 
 
 
 
 
 738	/* netdev->dev_addr is changed in handle_change_mac_rsp function */
 
 
 
 
 
 739	return 0;
 
 
 
 740}
 741
 742static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
 743{
 744	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
 
 745
 746	if (new_mtu > adapter->req_mtu || new_mtu < adapter->min_mtu)
 747		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 748
 749	netdev->mtu = new_mtu;
 750	return 0;
 751}
 752
 753static void ibmvnic_tx_timeout(struct net_device *dev)
 
 
 
 
 
 754{
 755	struct ibmvnic_adapter *adapter = netdev_priv(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 756	int rc;
 757
 758	/* Adapter timed out, resetting it */
 759	release_sub_crqs(adapter);
 760	rc = ibmvnic_reset_crq(adapter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 761	if (rc)
 762		dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
 763	else
 764		ibmvnic_send_crq_init(adapter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 765}
 766
 767static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
 768				  struct ibmvnic_rx_buff *rx_buff)
 769{
 770	struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
 771
 772	rx_buff->skb = NULL;
 773
 774	pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
 775	pool->next_alloc = (pool->next_alloc + 1) % pool->size;
 776
 777	atomic_dec(&pool->available);
 778}
 779
 780static int ibmvnic_poll(struct napi_struct *napi, int budget)
 781{
 782	struct net_device *netdev = napi->dev;
 783	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 784	int scrq_num = (int)(napi - adapter->napi);
 785	int frames_processed = 0;
 
 786restart_poll:
 787	while (frames_processed < budget) {
 788		struct sk_buff *skb;
 789		struct ibmvnic_rx_buff *rx_buff;
 790		union sub_crq *next;
 791		u32 length;
 792		u16 offset;
 793		u8 flags = 0;
 794
 
 
 
 
 
 
 
 795		if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
 796			break;
 797		next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
 798		rx_buff =
 799		    (struct ibmvnic_rx_buff *)be64_to_cpu(next->
 800							  rx_comp.correlator);
 801		/* do error checking */
 802		if (next->rx_comp.rc) {
 803			netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
 
 804			/* free the entry */
 805			next->rx_comp.first = 0;
 
 806			remove_buff_from_pool(adapter, rx_buff);
 807			break;
 
 
 
 
 
 808		}
 809
 810		length = be32_to_cpu(next->rx_comp.len);
 811		offset = be16_to_cpu(next->rx_comp.off_frame_data);
 812		flags = next->rx_comp.flags;
 813		skb = rx_buff->skb;
 814		skb_copy_to_linear_data(skb, rx_buff->data + offset,
 815					length);
 816		skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
 
 
 
 
 
 
 
 
 817		/* free the entry */
 818		next->rx_comp.first = 0;
 819		remove_buff_from_pool(adapter, rx_buff);
 820
 821		skb_put(skb, length);
 822		skb->protocol = eth_type_trans(skb, netdev);
 
 823
 824		if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
 825		    flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
 826			skb->ip_summed = CHECKSUM_UNNECESSARY;
 827		}
 828
 829		length = skb->len;
 830		napi_gro_receive(napi, skb); /* send it up */
 831		netdev->stats.rx_packets++;
 832		netdev->stats.rx_bytes += length;
 
 
 833		frames_processed++;
 834	}
 835	replenish_pools(adapter);
 
 
 836
 837	if (frames_processed < budget) {
 838		enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
 839		napi_complete(napi);
 840		if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
 841		    napi_reschedule(napi)) {
 842			disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
 843			goto restart_poll;
 844		}
 845	}
 846	return frames_processed;
 847}
 848
 849#ifdef CONFIG_NET_POLL_CONTROLLER
 850static void ibmvnic_netpoll_controller(struct net_device *dev)
 851{
 852	struct ibmvnic_adapter *adapter = netdev_priv(dev);
 853	int i;
 854
 855	replenish_pools(netdev_priv(dev));
 856	for (i = 0; i < adapter->req_rx_queues; i++)
 857		ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
 858				     adapter->rx_scrq[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 859}
 860#endif
 861
 862static const struct net_device_ops ibmvnic_netdev_ops = {
 863	.ndo_open		= ibmvnic_open,
 864	.ndo_stop		= ibmvnic_close,
 865	.ndo_start_xmit		= ibmvnic_xmit,
 866	.ndo_set_rx_mode	= ibmvnic_set_multi,
 867	.ndo_set_mac_address	= ibmvnic_set_mac,
 868	.ndo_validate_addr	= eth_validate_addr,
 869	.ndo_change_mtu		= ibmvnic_change_mtu,
 870	.ndo_tx_timeout		= ibmvnic_tx_timeout,
 871#ifdef CONFIG_NET_POLL_CONTROLLER
 872	.ndo_poll_controller	= ibmvnic_netpoll_controller,
 873#endif
 874};
 875
 876/* ethtool functions */
 877
 878static int ibmvnic_get_settings(struct net_device *netdev,
 879				struct ethtool_cmd *cmd)
 880{
 881	cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
 882			  SUPPORTED_FIBRE);
 883	cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
 884			    ADVERTISED_FIBRE);
 885	ethtool_cmd_speed_set(cmd, SPEED_1000);
 886	cmd->duplex = DUPLEX_FULL;
 887	cmd->port = PORT_FIBRE;
 888	cmd->phy_address = 0;
 889	cmd->transceiver = XCVR_INTERNAL;
 890	cmd->autoneg = AUTONEG_ENABLE;
 891	cmd->maxtxpkt = 0;
 892	cmd->maxrxpkt = 1;
 
 
 893	return 0;
 894}
 895
 896static void ibmvnic_get_drvinfo(struct net_device *dev,
 897				struct ethtool_drvinfo *info)
 898{
 
 
 899	strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
 900	strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
 
 
 901}
 902
 903static u32 ibmvnic_get_msglevel(struct net_device *netdev)
 904{
 905	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 906
 907	return adapter->msg_enable;
 908}
 909
 910static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
 911{
 912	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 913
 914	adapter->msg_enable = data;
 915}
 916
 917static u32 ibmvnic_get_link(struct net_device *netdev)
 918{
 919	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 920
 921	/* Don't need to send a query because we request a logical link up at
 922	 * init and then we wait for link state indications
 923	 */
 924	return adapter->logical_link_state;
 925}
 926
 927static void ibmvnic_get_ringparam(struct net_device *netdev,
 928				  struct ethtool_ringparam *ring)
 929{
 930	ring->rx_max_pending = 0;
 931	ring->tx_max_pending = 0;
 
 
 
 
 
 
 
 932	ring->rx_mini_max_pending = 0;
 933	ring->rx_jumbo_max_pending = 0;
 934	ring->rx_pending = 0;
 935	ring->tx_pending = 0;
 936	ring->rx_mini_pending = 0;
 937	ring->rx_jumbo_pending = 0;
 938}
 939
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 940static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 941{
 
 942	int i;
 943
 944	if (stringset != ETH_SS_STATS)
 945		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 946
 947	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
 948		memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 949}
 950
 951static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
 952{
 
 
 953	switch (sset) {
 954	case ETH_SS_STATS:
 955		return ARRAY_SIZE(ibmvnic_stats);
 
 
 
 
 956	default:
 957		return -EOPNOTSUPP;
 958	}
 959}
 960
 961static void ibmvnic_get_ethtool_stats(struct net_device *dev,
 962				      struct ethtool_stats *stats, u64 *data)
 963{
 964	struct ibmvnic_adapter *adapter = netdev_priv(dev);
 965	union ibmvnic_crq crq;
 966	int i;
 
 967
 968	memset(&crq, 0, sizeof(crq));
 969	crq.request_statistics.first = IBMVNIC_CRQ_CMD;
 970	crq.request_statistics.cmd = REQUEST_STATISTICS;
 971	crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
 972	crq.request_statistics.len =
 973	    cpu_to_be32(sizeof(struct ibmvnic_statistics));
 974	ibmvnic_send_crq(adapter, &crq);
 975
 976	/* Wait for data to be written */
 977	init_completion(&adapter->stats_done);
 
 
 
 978	wait_for_completion(&adapter->stats_done);
 979
 980	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
 981		data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 982}
 983
 
 
 
 
 
 
 
 
 
 
 
 
 984static const struct ethtool_ops ibmvnic_ethtool_ops = {
 985	.get_settings		= ibmvnic_get_settings,
 986	.get_drvinfo		= ibmvnic_get_drvinfo,
 987	.get_msglevel		= ibmvnic_get_msglevel,
 988	.set_msglevel		= ibmvnic_set_msglevel,
 989	.get_link		= ibmvnic_get_link,
 990	.get_ringparam		= ibmvnic_get_ringparam,
 
 
 
 991	.get_strings            = ibmvnic_get_strings,
 992	.get_sset_count         = ibmvnic_get_sset_count,
 993	.get_ethtool_stats	= ibmvnic_get_ethtool_stats,
 
 
 
 994};
 995
 996/* Routines for managing CRQs/sCRQs  */
 997
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 998static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
 999				  struct ibmvnic_sub_crq_queue *scrq)
 
1000{
1001	struct device *dev = &adapter->vdev->dev;
1002	long rc;
1003
1004	netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1005
1006	/* Close the sub-crqs */
1007	do {
1008		rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1009					adapter->vdev->unit_address,
1010					scrq->crq_num);
1011	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 
 
 
 
 
 
 
 
1012
1013	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1014			 DMA_BIDIRECTIONAL);
1015	free_pages((unsigned long)scrq->msgs, 2);
1016	kfree(scrq);
1017}
1018
1019static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1020							*adapter)
1021{
1022	struct device *dev = &adapter->vdev->dev;
1023	struct ibmvnic_sub_crq_queue *scrq;
1024	int rc;
1025
1026	scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1027	if (!scrq)
1028		return NULL;
1029
1030	scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2);
1031	memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1032	if (!scrq->msgs) {
1033		dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1034		goto zero_page_failed;
1035	}
1036
1037	scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1038					 DMA_BIDIRECTIONAL);
1039	if (dma_mapping_error(dev, scrq->msg_token)) {
1040		dev_warn(dev, "Couldn't map crq queue messages page\n");
1041		goto map_failed;
1042	}
1043
1044	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1045			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1046
1047	if (rc == H_RESOURCE)
1048		rc = ibmvnic_reset_crq(adapter);
1049
1050	if (rc == H_CLOSED) {
1051		dev_warn(dev, "Partner adapter not ready, waiting.\n");
1052	} else if (rc) {
1053		dev_warn(dev, "Error %d registering sub-crq\n", rc);
1054		goto reg_failed;
1055	}
1056
1057	scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1058	if (scrq->irq == NO_IRQ) {
1059		dev_err(dev, "Error mapping irq\n");
1060		goto map_irq_failed;
1061	}
1062
1063	scrq->adapter = adapter;
1064	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1065	scrq->cur = 0;
1066	scrq->rx_skb_top = NULL;
1067	spin_lock_init(&scrq->lock);
1068
1069	netdev_dbg(adapter->netdev,
1070		   "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1071		   scrq->crq_num, scrq->hw_irq, scrq->irq);
1072
1073	return scrq;
1074
1075map_irq_failed:
1076	do {
1077		rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1078					adapter->vdev->unit_address,
1079					scrq->crq_num);
1080	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1081reg_failed:
1082	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1083			 DMA_BIDIRECTIONAL);
1084map_failed:
1085	free_pages((unsigned long)scrq->msgs, 2);
1086zero_page_failed:
1087	kfree(scrq);
1088
1089	return NULL;
1090}
1091
1092static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1093{
1094	int i;
1095
1096	if (adapter->tx_scrq) {
1097		for (i = 0; i < adapter->req_tx_queues; i++)
1098			if (adapter->tx_scrq[i]) {
 
 
 
 
 
1099				free_irq(adapter->tx_scrq[i]->irq,
1100					 adapter->tx_scrq[i]);
1101				release_sub_crq_queue(adapter,
1102						      adapter->tx_scrq[i]);
1103			}
 
 
 
 
 
 
1104		adapter->tx_scrq = NULL;
 
1105	}
1106
1107	if (adapter->rx_scrq) {
1108		for (i = 0; i < adapter->req_rx_queues; i++)
1109			if (adapter->rx_scrq[i]) {
 
 
 
 
 
1110				free_irq(adapter->rx_scrq[i]->irq,
1111					 adapter->rx_scrq[i]);
1112				release_sub_crq_queue(adapter,
1113						      adapter->rx_scrq[i]);
1114			}
 
 
 
 
 
 
1115		adapter->rx_scrq = NULL;
 
1116	}
1117
1118	adapter->requested_caps = 0;
1119}
1120
1121static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1122			    struct ibmvnic_sub_crq_queue *scrq)
1123{
1124	struct device *dev = &adapter->vdev->dev;
1125	unsigned long rc;
1126
1127	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1128				H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1129	if (rc)
1130		dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1131			scrq->hw_irq, rc);
1132	return rc;
1133}
1134
1135static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1136			   struct ibmvnic_sub_crq_queue *scrq)
1137{
1138	struct device *dev = &adapter->vdev->dev;
1139	unsigned long rc;
1140
1141	if (scrq->hw_irq > 0x100000000ULL) {
1142		dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1143		return 1;
1144	}
1145
 
 
 
 
 
 
 
 
 
 
 
 
 
1146	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1147				H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1148	if (rc)
1149		dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1150			scrq->hw_irq, rc);
1151	return rc;
1152}
1153
1154static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1155			       struct ibmvnic_sub_crq_queue *scrq)
1156{
1157	struct device *dev = &adapter->vdev->dev;
 
1158	struct ibmvnic_tx_buff *txbuff;
1159	union sub_crq *next;
1160	int index;
1161	int i, j;
1162
1163restart_loop:
1164	while (pending_scrq(adapter, scrq)) {
1165		unsigned int pool = scrq->pool_index;
 
1166
1167		next = ibmvnic_next_scrq(adapter, scrq);
1168		for (i = 0; i < next->tx_comp.num_comps; i++) {
1169			if (next->tx_comp.rcs[i]) {
1170				dev_err(dev, "tx error %x\n",
1171					next->tx_comp.rcs[i]);
1172				continue;
1173			}
1174			index = be32_to_cpu(next->tx_comp.correlators[i]);
1175			txbuff = &adapter->tx_pool[pool].tx_buff[index];
 
 
 
 
 
 
 
1176
1177			for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1178				if (!txbuff->data_dma[j])
1179					continue;
1180
1181				txbuff->data_dma[j] = 0;
1182				txbuff->used_bounce = false;
1183			}
1184
1185			if (txbuff->last_frag)
1186				dev_kfree_skb_any(txbuff->skb);
 
 
 
 
1187
1188			adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1189						     producer_index] = index;
1190			adapter->tx_pool[pool].producer_index =
1191			    (adapter->tx_pool[pool].producer_index + 1) %
1192			    adapter->max_tx_entries_per_subcrq;
1193		}
1194		/* remove tx_comp scrq*/
1195		next->tx_comp.first = 0;
 
 
 
 
 
 
 
 
 
1196	}
1197
1198	enable_scrq_irq(adapter, scrq);
1199
1200	if (pending_scrq(adapter, scrq)) {
1201		disable_scrq_irq(adapter, scrq);
1202		goto restart_loop;
1203	}
1204
1205	return 0;
1206}
1207
1208static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1209{
1210	struct ibmvnic_sub_crq_queue *scrq = instance;
1211	struct ibmvnic_adapter *adapter = scrq->adapter;
1212
1213	disable_scrq_irq(adapter, scrq);
1214	ibmvnic_complete_tx(adapter, scrq);
1215
1216	return IRQ_HANDLED;
1217}
1218
1219static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1220{
1221	struct ibmvnic_sub_crq_queue *scrq = instance;
1222	struct ibmvnic_adapter *adapter = scrq->adapter;
1223
 
 
 
 
 
 
 
 
1224	if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1225		disable_scrq_irq(adapter, scrq);
1226		__napi_schedule(&adapter->napi[scrq->scrq_num]);
1227	}
1228
1229	return IRQ_HANDLED;
1230}
1231
1232static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1233{
1234	struct device *dev = &adapter->vdev->dev;
1235	struct ibmvnic_sub_crq_queue **allqueues;
1236	int registered_queues = 0;
1237	union ibmvnic_crq crq;
1238	int total_queues;
1239	int more = 0;
1240	int i, j;
1241	int rc;
1242
1243	if (!retry) {
1244		/* Sub-CRQ entries are 32 byte long */
1245		int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
 
 
 
 
 
 
 
 
1246
1247		if (adapter->min_tx_entries_per_subcrq > entries_page ||
1248		    adapter->min_rx_add_entries_per_subcrq > entries_page) {
1249			dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1250			goto allqueues_failed;
 
 
 
 
 
 
1251		}
 
1252
1253		/* Get the minimum between the queried max and the entries
1254		 * that fit in our PAGE_SIZE
1255		 */
1256		adapter->req_tx_entries_per_subcrq =
1257		    adapter->max_tx_entries_per_subcrq > entries_page ?
1258		    entries_page : adapter->max_tx_entries_per_subcrq;
1259		adapter->req_rx_add_entries_per_subcrq =
1260		    adapter->max_rx_add_entries_per_subcrq > entries_page ?
1261		    entries_page : adapter->max_rx_add_entries_per_subcrq;
1262
1263		/* Choosing the maximum number of queues supported by firmware*/
1264		adapter->req_tx_queues = adapter->min_tx_queues;
1265		adapter->req_rx_queues = adapter->min_rx_queues;
1266		adapter->req_rx_add_queues = adapter->min_rx_add_queues;
 
 
 
 
 
 
 
 
1267
1268		adapter->req_mtu = adapter->max_mtu;
 
 
 
 
 
 
 
 
 
1269	}
 
 
 
 
 
 
 
 
 
 
 
 
1270
1271	total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1272
1273	allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1274	if (!allqueues)
1275		goto allqueues_failed;
1276
1277	for (i = 0; i < total_queues; i++) {
1278		allqueues[i] = init_sub_crq_queue(adapter);
1279		if (!allqueues[i]) {
1280			dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1281			break;
1282		}
1283		registered_queues++;
1284	}
1285
1286	/* Make sure we were able to register the minimum number of queues */
1287	if (registered_queues <
1288	    adapter->min_tx_queues + adapter->min_rx_queues) {
1289		dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
1290		goto tx_failed;
1291	}
1292
1293	/* Distribute the failed allocated queues*/
1294	for (i = 0; i < total_queues - registered_queues + more ; i++) {
1295		netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1296		switch (i % 3) {
1297		case 0:
1298			if (adapter->req_rx_queues > adapter->min_rx_queues)
1299				adapter->req_rx_queues--;
1300			else
1301				more++;
1302			break;
1303		case 1:
1304			if (adapter->req_tx_queues > adapter->min_tx_queues)
1305				adapter->req_tx_queues--;
1306			else
1307				more++;
1308			break;
1309		}
1310	}
1311
1312	adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1313				   sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1314	if (!adapter->tx_scrq)
1315		goto tx_failed;
1316
1317	for (i = 0; i < adapter->req_tx_queues; i++) {
1318		adapter->tx_scrq[i] = allqueues[i];
1319		adapter->tx_scrq[i]->pool_index = i;
1320		rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
1321				 0, "ibmvnic_tx", adapter->tx_scrq[i]);
1322		if (rc) {
1323			dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1324				adapter->tx_scrq[i]->irq, rc);
1325			goto req_tx_irq_failed;
1326		}
1327	}
1328
1329	adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1330				   sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1331	if (!adapter->rx_scrq)
1332		goto rx_failed;
1333
1334	for (i = 0; i < adapter->req_rx_queues; i++) {
1335		adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1336		adapter->rx_scrq[i]->scrq_num = i;
1337		rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
1338				 0, "ibmvnic_rx", adapter->rx_scrq[i]);
1339		if (rc) {
1340			dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1341				adapter->rx_scrq[i]->irq, rc);
1342			goto req_rx_irq_failed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1343		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1344	}
1345
1346	memset(&crq, 0, sizeof(crq));
1347	crq.request_capability.first = IBMVNIC_CRQ_CMD;
1348	crq.request_capability.cmd = REQUEST_CAPABILITY;
1349
1350	crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
1351	crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
 
1352	ibmvnic_send_crq(adapter, &crq);
1353
1354	crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
1355	crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
 
1356	ibmvnic_send_crq(adapter, &crq);
1357
1358	crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
1359	crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
 
1360	ibmvnic_send_crq(adapter, &crq);
1361
1362	crq.request_capability.capability =
1363	    cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1364	crq.request_capability.number =
1365	    cpu_to_be64(adapter->req_tx_entries_per_subcrq);
 
1366	ibmvnic_send_crq(adapter, &crq);
1367
1368	crq.request_capability.capability =
1369	    cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1370	crq.request_capability.number =
1371	    cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
 
1372	ibmvnic_send_crq(adapter, &crq);
1373
1374	crq.request_capability.capability = cpu_to_be16(REQ_MTU);
1375	crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
 
1376	ibmvnic_send_crq(adapter, &crq);
1377
1378	if (adapter->netdev->flags & IFF_PROMISC) {
1379		if (adapter->promisc_supported) {
1380			crq.request_capability.capability =
1381			    cpu_to_be16(PROMISC_REQUESTED);
1382			crq.request_capability.number = cpu_to_be64(1);
 
1383			ibmvnic_send_crq(adapter, &crq);
1384		}
1385	} else {
1386		crq.request_capability.capability =
1387		    cpu_to_be16(PROMISC_REQUESTED);
1388		crq.request_capability.number = cpu_to_be64(0);
 
1389		ibmvnic_send_crq(adapter, &crq);
1390	}
1391
1392	kfree(allqueues);
1393
1394	return;
1395
1396req_rx_irq_failed:
1397	for (j = 0; j < i; j++)
1398		free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1399	i = adapter->req_tx_queues;
1400req_tx_irq_failed:
1401	for (j = 0; j < i; j++)
1402		free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1403	kfree(adapter->rx_scrq);
1404	adapter->rx_scrq = NULL;
1405rx_failed:
1406	kfree(adapter->tx_scrq);
1407	adapter->tx_scrq = NULL;
1408tx_failed:
1409	for (i = 0; i < registered_queues; i++)
1410		release_sub_crq_queue(adapter, allqueues[i]);
1411	kfree(allqueues);
1412allqueues_failed:
1413	ibmvnic_remove(adapter->vdev);
1414}
1415
1416static int pending_scrq(struct ibmvnic_adapter *adapter,
1417			struct ibmvnic_sub_crq_queue *scrq)
1418{
1419	union sub_crq *entry = &scrq->msgs[scrq->cur];
1420
1421	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1422		return 1;
1423	else
1424		return 0;
1425}
1426
1427static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1428					struct ibmvnic_sub_crq_queue *scrq)
1429{
1430	union sub_crq *entry;
1431	unsigned long flags;
1432
1433	spin_lock_irqsave(&scrq->lock, flags);
1434	entry = &scrq->msgs[scrq->cur];
1435	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1436		if (++scrq->cur == scrq->size)
1437			scrq->cur = 0;
1438	} else {
1439		entry = NULL;
1440	}
1441	spin_unlock_irqrestore(&scrq->lock, flags);
1442
1443	return entry;
1444}
1445
1446static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1447{
1448	struct ibmvnic_crq_queue *queue = &adapter->crq;
1449	union ibmvnic_crq *crq;
1450
1451	crq = &queue->msgs[queue->cur];
1452	if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1453		if (++queue->cur == queue->size)
1454			queue->cur = 0;
1455	} else {
1456		crq = NULL;
1457	}
1458
1459	return crq;
1460}
1461
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1462static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1463		       union sub_crq *sub_crq)
1464{
1465	unsigned int ua = adapter->vdev->unit_address;
1466	struct device *dev = &adapter->vdev->dev;
1467	u64 *u64_crq = (u64 *)sub_crq;
1468	int rc;
1469
1470	netdev_dbg(adapter->netdev,
1471		   "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1472		   (unsigned long int)cpu_to_be64(remote_handle),
1473		   (unsigned long int)cpu_to_be64(u64_crq[0]),
1474		   (unsigned long int)cpu_to_be64(u64_crq[1]),
1475		   (unsigned long int)cpu_to_be64(u64_crq[2]),
1476		   (unsigned long int)cpu_to_be64(u64_crq[3]));
1477
1478	/* Make sure the hypervisor sees the complete request */
1479	mb();
1480
1481	rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1482				cpu_to_be64(remote_handle),
1483				cpu_to_be64(u64_crq[0]),
1484				cpu_to_be64(u64_crq[1]),
1485				cpu_to_be64(u64_crq[2]),
1486				cpu_to_be64(u64_crq[3]));
1487
1488	if (rc) {
1489		if (rc == H_CLOSED)
1490			dev_warn(dev, "CRQ Queue closed\n");
1491		dev_err(dev, "Send error (rc=%d)\n", rc);
1492	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1493
1494	return rc;
1495}
1496
1497static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1498			    union ibmvnic_crq *crq)
1499{
1500	unsigned int ua = adapter->vdev->unit_address;
1501	struct device *dev = &adapter->vdev->dev;
1502	u64 *u64_crq = (u64 *)crq;
1503	int rc;
1504
1505	netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1506		   (unsigned long int)cpu_to_be64(u64_crq[0]),
1507		   (unsigned long int)cpu_to_be64(u64_crq[1]));
1508
 
 
 
 
 
 
1509	/* Make sure the hypervisor sees the complete request */
1510	mb();
1511
1512	rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1513				cpu_to_be64(u64_crq[0]),
1514				cpu_to_be64(u64_crq[1]));
1515
1516	if (rc) {
1517		if (rc == H_CLOSED)
1518			dev_warn(dev, "CRQ Queue closed\n");
 
 
 
 
1519		dev_warn(dev, "Send error (rc=%d)\n", rc);
1520	}
1521
1522	return rc;
1523}
1524
1525static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1526{
1527	union ibmvnic_crq crq;
1528
1529	memset(&crq, 0, sizeof(crq));
1530	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1531	crq.generic.cmd = IBMVNIC_CRQ_INIT;
1532	netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1533
1534	return ibmvnic_send_crq(adapter, &crq);
1535}
1536
1537static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1538{
1539	union ibmvnic_crq crq;
1540
1541	memset(&crq, 0, sizeof(crq));
1542	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1543	crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1544	netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1545
1546	return ibmvnic_send_crq(adapter, &crq);
1547}
1548
1549static int send_version_xchg(struct ibmvnic_adapter *adapter)
1550{
1551	union ibmvnic_crq crq;
1552
1553	memset(&crq, 0, sizeof(crq));
1554	crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1555	crq.version_exchange.cmd = VERSION_EXCHANGE;
1556	crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1557
1558	return ibmvnic_send_crq(adapter, &crq);
1559}
1560
1561static void send_login(struct ibmvnic_adapter *adapter)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1562{
1563	struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1564	struct ibmvnic_login_buffer *login_buffer;
1565	struct ibmvnic_inflight_cmd *inflight_cmd;
1566	struct device *dev = &adapter->vdev->dev;
1567	dma_addr_t rsp_buffer_token;
1568	dma_addr_t buffer_token;
1569	size_t rsp_buffer_size;
1570	union ibmvnic_crq crq;
1571	unsigned long flags;
1572	size_t buffer_size;
1573	__be64 *tx_list_p;
1574	__be64 *rx_list_p;
 
 
1575	int i;
1576
 
 
 
 
 
 
 
 
 
1577	buffer_size =
1578	    sizeof(struct ibmvnic_login_buffer) +
1579	    sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
 
1580
1581	login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1582	if (!login_buffer)
1583		goto buf_alloc_failed;
1584
1585	buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1586				      DMA_TO_DEVICE);
1587	if (dma_mapping_error(dev, buffer_token)) {
1588		dev_err(dev, "Couldn't map login buffer\n");
1589		goto buf_map_failed;
1590	}
1591
1592	rsp_buffer_size =
1593	    sizeof(struct ibmvnic_login_rsp_buffer) +
1594	    sizeof(u64) * (adapter->req_tx_queues +
1595			   adapter->req_rx_queues *
1596			   adapter->req_rx_add_queues + adapter->
1597			   req_rx_add_queues) +
1598	    sizeof(u8) * (IBMVNIC_TX_DESC_VERSIONS);
1599
1600	login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1601	if (!login_rsp_buffer)
1602		goto buf_rsp_alloc_failed;
1603
1604	rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1605					  rsp_buffer_size, DMA_FROM_DEVICE);
1606	if (dma_mapping_error(dev, rsp_buffer_token)) {
1607		dev_err(dev, "Couldn't map login rsp buffer\n");
1608		goto buf_rsp_map_failed;
1609	}
1610	inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
1611	if (!inflight_cmd) {
1612		dev_err(dev, "Couldn't allocate inflight_cmd\n");
1613		goto inflight_alloc_failed;
1614	}
1615	adapter->login_buf = login_buffer;
1616	adapter->login_buf_token = buffer_token;
1617	adapter->login_buf_sz = buffer_size;
1618	adapter->login_rsp_buf = login_rsp_buffer;
1619	adapter->login_rsp_buf_token = rsp_buffer_token;
1620	adapter->login_rsp_buf_sz = rsp_buffer_size;
1621
1622	login_buffer->len = cpu_to_be32(buffer_size);
1623	login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
1624	login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
1625	login_buffer->off_txcomp_subcrqs =
1626	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
1627	login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
1628	login_buffer->off_rxcomp_subcrqs =
1629	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
1630			sizeof(u64) * adapter->req_tx_queues);
1631	login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
1632	login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
1633
1634	tx_list_p = (__be64 *)((char *)login_buffer +
1635				      sizeof(struct ibmvnic_login_buffer));
1636	rx_list_p = (__be64 *)((char *)login_buffer +
1637				      sizeof(struct ibmvnic_login_buffer) +
1638				      sizeof(u64) * adapter->req_tx_queues);
1639
1640	for (i = 0; i < adapter->req_tx_queues; i++) {
1641		if (adapter->tx_scrq[i]) {
1642			tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
1643						   crq_num);
1644		}
1645	}
1646
1647	for (i = 0; i < adapter->req_rx_queues; i++) {
1648		if (adapter->rx_scrq[i]) {
1649			rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
1650						   crq_num);
1651		}
1652	}
1653
 
 
 
 
 
 
 
 
 
1654	netdev_dbg(adapter->netdev, "Login Buffer:\n");
1655	for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
1656		netdev_dbg(adapter->netdev, "%016lx\n",
1657			   ((unsigned long int *)(adapter->login_buf))[i]);
1658	}
1659
1660	memset(&crq, 0, sizeof(crq));
1661	crq.login.first = IBMVNIC_CRQ_CMD;
1662	crq.login.cmd = LOGIN;
1663	crq.login.ioba = cpu_to_be32(buffer_token);
1664	crq.login.len = cpu_to_be32(buffer_size);
1665
1666	memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
1667
1668	spin_lock_irqsave(&adapter->inflight_lock, flags);
1669	list_add_tail(&inflight_cmd->list, &adapter->inflight);
1670	spin_unlock_irqrestore(&adapter->inflight_lock, flags);
1671
1672	ibmvnic_send_crq(adapter, &crq);
1673
1674	return;
1675
1676inflight_alloc_failed:
1677	dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
1678			 DMA_FROM_DEVICE);
1679buf_rsp_map_failed:
1680	kfree(login_rsp_buffer);
1681buf_rsp_alloc_failed:
1682	dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
1683buf_map_failed:
1684	kfree(login_buffer);
1685buf_alloc_failed:
1686	return;
1687}
1688
1689static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
1690			     u32 len, u8 map_id)
1691{
1692	union ibmvnic_crq crq;
1693
1694	memset(&crq, 0, sizeof(crq));
1695	crq.request_map.first = IBMVNIC_CRQ_CMD;
1696	crq.request_map.cmd = REQUEST_MAP;
1697	crq.request_map.map_id = map_id;
1698	crq.request_map.ioba = cpu_to_be32(addr);
1699	crq.request_map.len = cpu_to_be32(len);
1700	ibmvnic_send_crq(adapter, &crq);
1701}
1702
1703static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
1704{
1705	union ibmvnic_crq crq;
1706
1707	memset(&crq, 0, sizeof(crq));
1708	crq.request_unmap.first = IBMVNIC_CRQ_CMD;
1709	crq.request_unmap.cmd = REQUEST_UNMAP;
1710	crq.request_unmap.map_id = map_id;
1711	ibmvnic_send_crq(adapter, &crq);
1712}
1713
1714static void send_map_query(struct ibmvnic_adapter *adapter)
1715{
1716	union ibmvnic_crq crq;
1717
1718	memset(&crq, 0, sizeof(crq));
1719	crq.query_map.first = IBMVNIC_CRQ_CMD;
1720	crq.query_map.cmd = QUERY_MAP;
1721	ibmvnic_send_crq(adapter, &crq);
1722}
1723
1724/* Send a series of CRQs requesting various capabilities of the VNIC server */
1725static void send_cap_queries(struct ibmvnic_adapter *adapter)
1726{
1727	union ibmvnic_crq crq;
1728
1729	atomic_set(&adapter->running_cap_queries, 0);
1730	memset(&crq, 0, sizeof(crq));
1731	crq.query_capability.first = IBMVNIC_CRQ_CMD;
1732	crq.query_capability.cmd = QUERY_CAPABILITY;
1733
1734	crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
1735	atomic_inc(&adapter->running_cap_queries);
1736	ibmvnic_send_crq(adapter, &crq);
1737
1738	crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
1739	atomic_inc(&adapter->running_cap_queries);
1740	ibmvnic_send_crq(adapter, &crq);
1741
1742	crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
1743	atomic_inc(&adapter->running_cap_queries);
1744	ibmvnic_send_crq(adapter, &crq);
1745
1746	crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
1747	atomic_inc(&adapter->running_cap_queries);
1748	ibmvnic_send_crq(adapter, &crq);
1749
1750	crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
1751	atomic_inc(&adapter->running_cap_queries);
1752	ibmvnic_send_crq(adapter, &crq);
1753
1754	crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
1755	atomic_inc(&adapter->running_cap_queries);
1756	ibmvnic_send_crq(adapter, &crq);
1757
1758	crq.query_capability.capability =
1759	    cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
1760	atomic_inc(&adapter->running_cap_queries);
1761	ibmvnic_send_crq(adapter, &crq);
1762
1763	crq.query_capability.capability =
1764	    cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
1765	atomic_inc(&adapter->running_cap_queries);
1766	ibmvnic_send_crq(adapter, &crq);
1767
1768	crq.query_capability.capability =
1769	    cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
1770	atomic_inc(&adapter->running_cap_queries);
1771	ibmvnic_send_crq(adapter, &crq);
1772
1773	crq.query_capability.capability =
1774	    cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
1775	atomic_inc(&adapter->running_cap_queries);
1776	ibmvnic_send_crq(adapter, &crq);
1777
1778	crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
1779	atomic_inc(&adapter->running_cap_queries);
1780	ibmvnic_send_crq(adapter, &crq);
1781
1782	crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
1783	atomic_inc(&adapter->running_cap_queries);
1784	ibmvnic_send_crq(adapter, &crq);
1785
1786	crq.query_capability.capability = cpu_to_be16(MIN_MTU);
1787	atomic_inc(&adapter->running_cap_queries);
1788	ibmvnic_send_crq(adapter, &crq);
1789
1790	crq.query_capability.capability = cpu_to_be16(MAX_MTU);
1791	atomic_inc(&adapter->running_cap_queries);
1792	ibmvnic_send_crq(adapter, &crq);
1793
1794	crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
1795	atomic_inc(&adapter->running_cap_queries);
1796	ibmvnic_send_crq(adapter, &crq);
1797
1798	crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
1799	atomic_inc(&adapter->running_cap_queries);
 
 
 
 
1800	ibmvnic_send_crq(adapter, &crq);
1801
1802	crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
1803	atomic_inc(&adapter->running_cap_queries);
1804	ibmvnic_send_crq(adapter, &crq);
1805
1806	crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
1807	atomic_inc(&adapter->running_cap_queries);
1808	ibmvnic_send_crq(adapter, &crq);
1809
1810	crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
1811	atomic_inc(&adapter->running_cap_queries);
1812	ibmvnic_send_crq(adapter, &crq);
1813
1814	crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
1815	atomic_inc(&adapter->running_cap_queries);
1816	ibmvnic_send_crq(adapter, &crq);
1817
1818	crq.query_capability.capability =
1819			cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
1820	atomic_inc(&adapter->running_cap_queries);
1821	ibmvnic_send_crq(adapter, &crq);
1822
1823	crq.query_capability.capability =
1824			cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
1825	atomic_inc(&adapter->running_cap_queries);
1826	ibmvnic_send_crq(adapter, &crq);
1827
1828	crq.query_capability.capability =
1829			cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
1830	atomic_inc(&adapter->running_cap_queries);
1831	ibmvnic_send_crq(adapter, &crq);
1832
1833	crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
1834	atomic_inc(&adapter->running_cap_queries);
1835	ibmvnic_send_crq(adapter, &crq);
1836}
1837
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1838static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
1839{
1840	struct device *dev = &adapter->vdev->dev;
1841	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
 
1842	union ibmvnic_crq crq;
1843	int i;
1844
1845	dma_unmap_single(dev, adapter->ip_offload_tok,
1846			 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
1847
1848	netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
1849	for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
1850		netdev_dbg(adapter->netdev, "%016lx\n",
1851			   ((unsigned long int *)(buf))[i]);
1852
1853	netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
1854	netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
1855	netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
1856		   buf->tcp_ipv4_chksum);
1857	netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
1858		   buf->tcp_ipv6_chksum);
1859	netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
1860		   buf->udp_ipv4_chksum);
1861	netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
1862		   buf->udp_ipv6_chksum);
1863	netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
1864		   buf->large_tx_ipv4);
1865	netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
1866		   buf->large_tx_ipv6);
1867	netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
1868		   buf->large_rx_ipv4);
1869	netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
1870		   buf->large_rx_ipv6);
1871	netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
1872		   buf->max_ipv4_header_size);
1873	netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
1874		   buf->max_ipv6_header_size);
1875	netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
1876		   buf->max_tcp_header_size);
1877	netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
1878		   buf->max_udp_header_size);
1879	netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
1880		   buf->max_large_tx_size);
1881	netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
1882		   buf->max_large_rx_size);
1883	netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
1884		   buf->ipv6_extension_header);
1885	netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
1886		   buf->tcp_pseudosum_req);
1887	netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
1888		   buf->num_ipv6_ext_headers);
1889	netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
1890		   buf->off_ipv6_ext_headers);
1891
1892	adapter->ip_offload_ctrl_tok =
1893	    dma_map_single(dev, &adapter->ip_offload_ctrl,
1894			   sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
1895
1896	if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
1897		dev_err(dev, "Couldn't map ip offload control buffer\n");
1898		return;
1899	}
1900
 
 
1901	adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
 
 
1902	adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
1903	adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
1904	adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
1905	adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
 
 
1906
1907	/* large_tx/rx disabled for now, additional features needed */
1908	adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
1909	adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
1910	adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
1911	adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
1912
1913	adapter->netdev->features = NETIF_F_GSO;
 
 
 
 
 
1914
1915	if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
1916		adapter->netdev->features |= NETIF_F_IP_CSUM;
1917
1918	if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
1919		adapter->netdev->features |= NETIF_F_IPV6_CSUM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1920
1921	memset(&crq, 0, sizeof(crq));
1922	crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
1923	crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
1924	crq.control_ip_offload.len =
1925	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
1926	crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
1927	ibmvnic_send_crq(adapter, &crq);
1928}
1929
1930static void handle_error_info_rsp(union ibmvnic_crq *crq,
1931				  struct ibmvnic_adapter *adapter)
1932{
1933	struct device *dev = &adapter->vdev->dev;
1934	struct ibmvnic_error_buff *error_buff;
1935	unsigned long flags;
1936	bool found = false;
1937	int i;
1938
1939	if (!crq->request_error_rsp.rc.code) {
1940		dev_info(dev, "Request Error Rsp returned with rc=%x\n",
1941			 crq->request_error_rsp.rc.code);
1942		return;
1943	}
1944
1945	spin_lock_irqsave(&adapter->error_list_lock, flags);
1946	list_for_each_entry(error_buff, &adapter->errors, list)
1947		if (error_buff->error_id == crq->request_error_rsp.error_id) {
1948			found = true;
1949			list_del(&error_buff->list);
1950			break;
1951		}
1952	spin_unlock_irqrestore(&adapter->error_list_lock, flags);
1953
1954	if (!found) {
1955		dev_err(dev, "Couldn't find error id %x\n",
1956			crq->request_error_rsp.error_id);
1957		return;
1958	}
1959
1960	dev_err(dev, "Detailed info for error id %x:",
1961		crq->request_error_rsp.error_id);
1962
1963	for (i = 0; i < error_buff->len; i++) {
1964		pr_cont("%02x", (int)error_buff->buff[i]);
1965		if (i % 8 == 7)
1966			pr_cont(" ");
1967	}
1968	pr_cont("\n");
1969
1970	dma_unmap_single(dev, error_buff->dma, error_buff->len,
1971			 DMA_FROM_DEVICE);
1972	kfree(error_buff->buff);
1973	kfree(error_buff);
1974}
1975
1976static void handle_dump_size_rsp(union ibmvnic_crq *crq,
1977				 struct ibmvnic_adapter *adapter)
1978{
1979	int len = be32_to_cpu(crq->request_dump_size_rsp.len);
1980	struct ibmvnic_inflight_cmd *inflight_cmd;
1981	struct device *dev = &adapter->vdev->dev;
1982	union ibmvnic_crq newcrq;
1983	unsigned long flags;
1984
1985	/* allocate and map buffer */
1986	adapter->dump_data = kmalloc(len, GFP_KERNEL);
1987	if (!adapter->dump_data) {
1988		complete(&adapter->fw_done);
1989		return;
1990	}
1991
1992	adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
1993						  DMA_FROM_DEVICE);
1994
1995	if (dma_mapping_error(dev, adapter->dump_data_token)) {
1996		if (!firmware_has_feature(FW_FEATURE_CMO))
1997			dev_err(dev, "Couldn't map dump data\n");
1998		kfree(adapter->dump_data);
1999		complete(&adapter->fw_done);
2000		return;
2001	}
2002
2003	inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2004	if (!inflight_cmd) {
2005		dma_unmap_single(dev, adapter->dump_data_token, len,
2006				 DMA_FROM_DEVICE);
2007		kfree(adapter->dump_data);
2008		complete(&adapter->fw_done);
2009		return;
2010	}
2011
2012	memset(&newcrq, 0, sizeof(newcrq));
2013	newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
2014	newcrq.request_dump.cmd = REQUEST_DUMP;
2015	newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
2016	newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
2017
2018	memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
2019
2020	spin_lock_irqsave(&adapter->inflight_lock, flags);
2021	list_add_tail(&inflight_cmd->list, &adapter->inflight);
2022	spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2023
2024	ibmvnic_send_crq(adapter, &newcrq);
2025}
2026
2027static void handle_error_indication(union ibmvnic_crq *crq,
2028				    struct ibmvnic_adapter *adapter)
2029{
2030	int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2031	struct ibmvnic_inflight_cmd *inflight_cmd;
2032	struct device *dev = &adapter->vdev->dev;
2033	struct ibmvnic_error_buff *error_buff;
2034	union ibmvnic_crq new_crq;
2035	unsigned long flags;
2036
2037	dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2038		crq->error_indication.
2039		    flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2040		crq->error_indication.error_id,
2041		crq->error_indication.error_cause);
2042
2043	error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2044	if (!error_buff)
2045		return;
2046
2047	error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2048	if (!error_buff->buff) {
2049		kfree(error_buff);
2050		return;
2051	}
2052
2053	error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2054					 DMA_FROM_DEVICE);
2055	if (dma_mapping_error(dev, error_buff->dma)) {
2056		if (!firmware_has_feature(FW_FEATURE_CMO))
2057			dev_err(dev, "Couldn't map error buffer\n");
2058		kfree(error_buff->buff);
2059		kfree(error_buff);
2060		return;
2061	}
2062
2063	inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2064	if (!inflight_cmd) {
2065		dma_unmap_single(dev, error_buff->dma, detail_len,
2066				 DMA_FROM_DEVICE);
2067		kfree(error_buff->buff);
2068		kfree(error_buff);
2069		return;
2070	}
2071
2072	error_buff->len = detail_len;
2073	error_buff->error_id = crq->error_indication.error_id;
2074
2075	spin_lock_irqsave(&adapter->error_list_lock, flags);
2076	list_add_tail(&error_buff->list, &adapter->errors);
2077	spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2078
2079	memset(&new_crq, 0, sizeof(new_crq));
2080	new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2081	new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2082	new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2083	new_crq.request_error_info.len = cpu_to_be32(detail_len);
2084	new_crq.request_error_info.error_id = crq->error_indication.error_id;
2085
2086	memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2087
2088	spin_lock_irqsave(&adapter->inflight_lock, flags);
2089	list_add_tail(&inflight_cmd->list, &adapter->inflight);
2090	spin_unlock_irqrestore(&adapter->inflight_lock, flags);
 
 
2091
2092	ibmvnic_send_crq(adapter, &new_crq);
 
 
 
2093}
2094
2095static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2096				  struct ibmvnic_adapter *adapter)
2097{
2098	struct net_device *netdev = adapter->netdev;
2099	struct device *dev = &adapter->vdev->dev;
2100	long rc;
2101
2102	rc = crq->change_mac_addr_rsp.rc.code;
2103	if (rc) {
2104		dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2105		return;
2106	}
2107	memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2108	       ETH_ALEN);
 
 
 
2109}
2110
2111static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2112				   struct ibmvnic_adapter *adapter)
2113{
2114	struct device *dev = &adapter->vdev->dev;
2115	u64 *req_value;
2116	char *name;
2117
 
2118	switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2119	case REQ_TX_QUEUES:
2120		req_value = &adapter->req_tx_queues;
2121		name = "tx";
2122		break;
2123	case REQ_RX_QUEUES:
2124		req_value = &adapter->req_rx_queues;
2125		name = "rx";
2126		break;
2127	case REQ_RX_ADD_QUEUES:
2128		req_value = &adapter->req_rx_add_queues;
2129		name = "rx_add";
2130		break;
2131	case REQ_TX_ENTRIES_PER_SUBCRQ:
2132		req_value = &adapter->req_tx_entries_per_subcrq;
2133		name = "tx_entries_per_subcrq";
2134		break;
2135	case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2136		req_value = &adapter->req_rx_add_entries_per_subcrq;
2137		name = "rx_add_entries_per_subcrq";
2138		break;
2139	case REQ_MTU:
2140		req_value = &adapter->req_mtu;
2141		name = "mtu";
2142		break;
2143	case PROMISC_REQUESTED:
2144		req_value = &adapter->promisc;
2145		name = "promisc";
2146		break;
2147	default:
2148		dev_err(dev, "Got invalid cap request rsp %d\n",
2149			crq->request_capability.capability);
2150		return;
2151	}
2152
2153	switch (crq->request_capability_rsp.rc.code) {
2154	case SUCCESS:
2155		break;
2156	case PARTIALSUCCESS:
2157		dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2158			 *req_value,
2159			 (long int)be32_to_cpu(crq->request_capability_rsp.
2160					       number), name);
2161		release_sub_crqs(adapter);
2162		*req_value = be32_to_cpu(crq->request_capability_rsp.number);
2163		complete(&adapter->init_done);
 
 
 
 
 
 
 
 
 
2164		return;
2165	default:
2166		dev_err(dev, "Error %d in request cap rsp\n",
2167			crq->request_capability_rsp.rc.code);
2168		return;
2169	}
2170
2171	/* Done receiving requested capabilities, query IP offload support */
2172	if (++adapter->requested_caps == 7) {
2173		union ibmvnic_crq newcrq;
2174		int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2175		struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2176		    &adapter->ip_offload_buf;
2177
 
2178		adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2179							 buf_sz,
2180							 DMA_FROM_DEVICE);
2181
2182		if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2183			if (!firmware_has_feature(FW_FEATURE_CMO))
2184				dev_err(dev, "Couldn't map offload buffer\n");
2185			return;
2186		}
2187
2188		memset(&newcrq, 0, sizeof(newcrq));
2189		newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2190		newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2191		newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2192		newcrq.query_ip_offload.ioba =
2193		    cpu_to_be32(adapter->ip_offload_tok);
2194
2195		ibmvnic_send_crq(adapter, &newcrq);
2196	}
2197}
2198
2199static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2200			    struct ibmvnic_adapter *adapter)
2201{
2202	struct device *dev = &adapter->vdev->dev;
 
2203	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2204	struct ibmvnic_login_buffer *login = adapter->login_buf;
2205	union ibmvnic_crq crq;
2206	int i;
2207
2208	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2209			 DMA_BIDIRECTIONAL);
2210	dma_unmap_single(dev, adapter->login_rsp_buf_token,
2211			 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
 
 
 
 
 
 
 
 
 
 
 
 
2212
2213	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2214	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2215		netdev_dbg(adapter->netdev, "%016lx\n",
2216			   ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2217	}
2218
2219	/* Sanity checks */
2220	if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2221	    (be32_to_cpu(login->num_rxcomp_subcrqs) *
2222	     adapter->req_rx_add_queues !=
2223	     be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2224		dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2225		ibmvnic_remove(adapter->vdev);
2226		return -EIO;
2227	}
 
2228	complete(&adapter->init_done);
2229
2230	memset(&crq, 0, sizeof(crq));
2231	crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
2232	crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
2233	ibmvnic_send_crq(adapter, &crq);
2234
2235	return 0;
2236}
2237
2238static void handle_request_map_rsp(union ibmvnic_crq *crq,
2239				   struct ibmvnic_adapter *adapter)
2240{
2241	struct device *dev = &adapter->vdev->dev;
2242	u8 map_id = crq->request_map_rsp.map_id;
2243	int tx_subcrqs;
2244	int rx_subcrqs;
2245	long rc;
2246	int i;
2247
2248	tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2249	rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2250
2251	rc = crq->request_map_rsp.rc.code;
2252	if (rc) {
2253		dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2254		adapter->map_id--;
2255		/* need to find and zero tx/rx_pool map_id */
2256		for (i = 0; i < tx_subcrqs; i++) {
2257			if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2258				adapter->tx_pool[i].long_term_buff.map_id = 0;
2259		}
2260		for (i = 0; i < rx_subcrqs; i++) {
2261			if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2262				adapter->rx_pool[i].long_term_buff.map_id = 0;
2263		}
2264	}
2265	complete(&adapter->fw_done);
2266}
2267
2268static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2269				     struct ibmvnic_adapter *adapter)
2270{
2271	struct device *dev = &adapter->vdev->dev;
2272	long rc;
2273
2274	rc = crq->request_unmap_rsp.rc.code;
2275	if (rc)
2276		dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2277}
2278
2279static void handle_query_map_rsp(union ibmvnic_crq *crq,
2280				 struct ibmvnic_adapter *adapter)
2281{
2282	struct net_device *netdev = adapter->netdev;
2283	struct device *dev = &adapter->vdev->dev;
2284	long rc;
2285
2286	rc = crq->query_map_rsp.rc.code;
2287	if (rc) {
2288		dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2289		return;
2290	}
2291	netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2292		   crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2293		   crq->query_map_rsp.free_pages);
2294}
2295
2296static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2297				 struct ibmvnic_adapter *adapter)
2298{
2299	struct net_device *netdev = adapter->netdev;
2300	struct device *dev = &adapter->vdev->dev;
2301	long rc;
2302
2303	atomic_dec(&adapter->running_cap_queries);
2304	netdev_dbg(netdev, "Outstanding queries: %d\n",
2305		   atomic_read(&adapter->running_cap_queries));
2306	rc = crq->query_capability.rc.code;
2307	if (rc) {
2308		dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2309		goto out;
2310	}
2311
2312	switch (be16_to_cpu(crq->query_capability.capability)) {
2313	case MIN_TX_QUEUES:
2314		adapter->min_tx_queues =
2315		    be64_to_cpu(crq->query_capability.number);
2316		netdev_dbg(netdev, "min_tx_queues = %lld\n",
2317			   adapter->min_tx_queues);
2318		break;
2319	case MIN_RX_QUEUES:
2320		adapter->min_rx_queues =
2321		    be64_to_cpu(crq->query_capability.number);
2322		netdev_dbg(netdev, "min_rx_queues = %lld\n",
2323			   adapter->min_rx_queues);
2324		break;
2325	case MIN_RX_ADD_QUEUES:
2326		adapter->min_rx_add_queues =
2327		    be64_to_cpu(crq->query_capability.number);
2328		netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2329			   adapter->min_rx_add_queues);
2330		break;
2331	case MAX_TX_QUEUES:
2332		adapter->max_tx_queues =
2333		    be64_to_cpu(crq->query_capability.number);
2334		netdev_dbg(netdev, "max_tx_queues = %lld\n",
2335			   adapter->max_tx_queues);
2336		break;
2337	case MAX_RX_QUEUES:
2338		adapter->max_rx_queues =
2339		    be64_to_cpu(crq->query_capability.number);
2340		netdev_dbg(netdev, "max_rx_queues = %lld\n",
2341			   adapter->max_rx_queues);
2342		break;
2343	case MAX_RX_ADD_QUEUES:
2344		adapter->max_rx_add_queues =
2345		    be64_to_cpu(crq->query_capability.number);
2346		netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2347			   adapter->max_rx_add_queues);
2348		break;
2349	case MIN_TX_ENTRIES_PER_SUBCRQ:
2350		adapter->min_tx_entries_per_subcrq =
2351		    be64_to_cpu(crq->query_capability.number);
2352		netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2353			   adapter->min_tx_entries_per_subcrq);
2354		break;
2355	case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2356		adapter->min_rx_add_entries_per_subcrq =
2357		    be64_to_cpu(crq->query_capability.number);
2358		netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2359			   adapter->min_rx_add_entries_per_subcrq);
2360		break;
2361	case MAX_TX_ENTRIES_PER_SUBCRQ:
2362		adapter->max_tx_entries_per_subcrq =
2363		    be64_to_cpu(crq->query_capability.number);
2364		netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2365			   adapter->max_tx_entries_per_subcrq);
2366		break;
2367	case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2368		adapter->max_rx_add_entries_per_subcrq =
2369		    be64_to_cpu(crq->query_capability.number);
2370		netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2371			   adapter->max_rx_add_entries_per_subcrq);
2372		break;
2373	case TCP_IP_OFFLOAD:
2374		adapter->tcp_ip_offload =
2375		    be64_to_cpu(crq->query_capability.number);
2376		netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2377			   adapter->tcp_ip_offload);
2378		break;
2379	case PROMISC_SUPPORTED:
2380		adapter->promisc_supported =
2381		    be64_to_cpu(crq->query_capability.number);
2382		netdev_dbg(netdev, "promisc_supported = %lld\n",
2383			   adapter->promisc_supported);
2384		break;
2385	case MIN_MTU:
2386		adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
 
2387		netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2388		break;
2389	case MAX_MTU:
2390		adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
 
2391		netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2392		break;
2393	case MAX_MULTICAST_FILTERS:
2394		adapter->max_multicast_filters =
2395		    be64_to_cpu(crq->query_capability.number);
2396		netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2397			   adapter->max_multicast_filters);
2398		break;
2399	case VLAN_HEADER_INSERTION:
2400		adapter->vlan_header_insertion =
2401		    be64_to_cpu(crq->query_capability.number);
2402		if (adapter->vlan_header_insertion)
2403			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2404		netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2405			   adapter->vlan_header_insertion);
2406		break;
 
 
 
 
 
 
2407	case MAX_TX_SG_ENTRIES:
2408		adapter->max_tx_sg_entries =
2409		    be64_to_cpu(crq->query_capability.number);
2410		netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2411			   adapter->max_tx_sg_entries);
2412		break;
2413	case RX_SG_SUPPORTED:
2414		adapter->rx_sg_supported =
2415		    be64_to_cpu(crq->query_capability.number);
2416		netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2417			   adapter->rx_sg_supported);
2418		break;
2419	case OPT_TX_COMP_SUB_QUEUES:
2420		adapter->opt_tx_comp_sub_queues =
2421		    be64_to_cpu(crq->query_capability.number);
2422		netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2423			   adapter->opt_tx_comp_sub_queues);
2424		break;
2425	case OPT_RX_COMP_QUEUES:
2426		adapter->opt_rx_comp_queues =
2427		    be64_to_cpu(crq->query_capability.number);
2428		netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2429			   adapter->opt_rx_comp_queues);
2430		break;
2431	case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2432		adapter->opt_rx_bufadd_q_per_rx_comp_q =
2433		    be64_to_cpu(crq->query_capability.number);
2434		netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2435			   adapter->opt_rx_bufadd_q_per_rx_comp_q);
2436		break;
2437	case OPT_TX_ENTRIES_PER_SUBCRQ:
2438		adapter->opt_tx_entries_per_subcrq =
2439		    be64_to_cpu(crq->query_capability.number);
2440		netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2441			   adapter->opt_tx_entries_per_subcrq);
2442		break;
2443	case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2444		adapter->opt_rxba_entries_per_subcrq =
2445		    be64_to_cpu(crq->query_capability.number);
2446		netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2447			   adapter->opt_rxba_entries_per_subcrq);
2448		break;
2449	case TX_RX_DESC_REQ:
2450		adapter->tx_rx_desc_req = crq->query_capability.number;
2451		netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2452			   adapter->tx_rx_desc_req);
2453		break;
2454
2455	default:
2456		netdev_err(netdev, "Got invalid cap rsp %d\n",
2457			   crq->query_capability.capability);
2458	}
2459
2460out:
2461	if (atomic_read(&adapter->running_cap_queries) == 0)
2462		complete(&adapter->init_done);
2463		/* We're done querying the capabilities, initialize sub-crqs */
 
2464}
2465
2466static void handle_control_ras_rsp(union ibmvnic_crq *crq,
2467				   struct ibmvnic_adapter *adapter)
2468{
2469	u8 correlator = crq->control_ras_rsp.correlator;
2470	struct device *dev = &adapter->vdev->dev;
2471	bool found = false;
2472	int i;
2473
2474	if (crq->control_ras_rsp.rc.code) {
2475		dev_warn(dev, "Control ras failed rc=%d\n",
2476			 crq->control_ras_rsp.rc.code);
2477		return;
2478	}
 
 
 
 
 
2479
2480	for (i = 0; i < adapter->ras_comp_num; i++) {
2481		if (adapter->ras_comps[i].correlator == correlator) {
2482			found = true;
2483			break;
2484		}
2485	}
2486
2487	if (!found) {
2488		dev_warn(dev, "Correlator not found on control_ras_rsp\n");
2489		return;
 
2490	}
2491
2492	switch (crq->control_ras_rsp.op) {
2493	case IBMVNIC_TRACE_LEVEL:
2494		adapter->ras_comps[i].trace_level = crq->control_ras.level;
 
 
2495		break;
2496	case IBMVNIC_ERROR_LEVEL:
2497		adapter->ras_comps[i].error_check_level =
2498		    crq->control_ras.level;
2499		break;
2500	case IBMVNIC_TRACE_PAUSE:
2501		adapter->ras_comp_int[i].paused = 1;
2502		break;
2503	case IBMVNIC_TRACE_RESUME:
2504		adapter->ras_comp_int[i].paused = 0;
2505		break;
2506	case IBMVNIC_TRACE_ON:
2507		adapter->ras_comps[i].trace_on = 1;
2508		break;
2509	case IBMVNIC_TRACE_OFF:
2510		adapter->ras_comps[i].trace_on = 0;
2511		break;
2512	case IBMVNIC_CHG_TRACE_BUFF_SZ:
2513		/* trace_buff_sz is 3 bytes, stuff it into an int */
2514		((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
2515		((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
2516		    crq->control_ras_rsp.trace_buff_sz[0];
2517		((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
2518		    crq->control_ras_rsp.trace_buff_sz[1];
2519		((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
2520		    crq->control_ras_rsp.trace_buff_sz[2];
2521		break;
2522	default:
2523		dev_err(dev, "invalid op %d on control_ras_rsp",
2524			crq->control_ras_rsp.op);
2525	}
2526}
2527
2528static int ibmvnic_fw_comp_open(struct inode *inode, struct file *file)
2529{
2530	file->private_data = inode->i_private;
2531	return 0;
2532}
2533
2534static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2535			  loff_t *ppos)
2536{
2537	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2538	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2539	struct device *dev = &adapter->vdev->dev;
2540	struct ibmvnic_fw_trace_entry *trace;
2541	int num = ras_comp_int->num;
2542	union ibmvnic_crq crq;
2543	dma_addr_t trace_tok;
2544
2545	if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2546		return 0;
2547
2548	trace =
2549	    dma_alloc_coherent(dev,
2550			       be32_to_cpu(adapter->ras_comps[num].
2551					   trace_buff_size), &trace_tok,
2552			       GFP_KERNEL);
2553	if (!trace) {
2554		dev_err(dev, "Couldn't alloc trace buffer\n");
2555		return 0;
2556	}
2557
2558	memset(&crq, 0, sizeof(crq));
2559	crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
2560	crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
2561	crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2562	crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2563	crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
2564	ibmvnic_send_crq(adapter, &crq);
2565
2566	init_completion(&adapter->fw_done);
2567	wait_for_completion(&adapter->fw_done);
2568
2569	if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2570		len =
2571		    be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
2572		    *ppos;
2573
2574	copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
2575
2576	dma_free_coherent(dev,
2577			  be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
2578			  trace, trace_tok);
2579	*ppos += len;
2580	return len;
2581}
2582
2583static const struct file_operations trace_ops = {
2584	.owner		= THIS_MODULE,
2585	.open		= ibmvnic_fw_comp_open,
2586	.read		= trace_read,
2587};
2588
2589static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
2590			   loff_t *ppos)
2591{
2592	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2593	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2594	int num = ras_comp_int->num;
2595	char buff[5]; /*  1 or 0 plus \n and \0 */
2596	int size;
2597
2598	size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
2599
2600	if (*ppos >= size)
2601		return 0;
2602
2603	copy_to_user(user_buf, buff, size);
2604	*ppos += size;
2605	return size;
2606}
2607
2608static ssize_t paused_write(struct file *file, const char __user *user_buf,
2609			    size_t len, loff_t *ppos)
2610{
2611	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2612	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2613	int num = ras_comp_int->num;
2614	union ibmvnic_crq crq;
2615	unsigned long val;
2616	char buff[9]; /* decimal max int plus \n and \0 */
2617
2618	copy_from_user(buff, user_buf, sizeof(buff));
2619	val = kstrtoul(buff, 10, NULL);
2620
2621	adapter->ras_comp_int[num].paused = val ? 1 : 0;
2622
2623	memset(&crq, 0, sizeof(crq));
2624	crq.control_ras.first = IBMVNIC_CRQ_CMD;
2625	crq.control_ras.cmd = CONTROL_RAS;
2626	crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2627	crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
2628	ibmvnic_send_crq(adapter, &crq);
2629
2630	return len;
2631}
2632
2633static const struct file_operations paused_ops = {
2634	.owner		= THIS_MODULE,
2635	.open		= ibmvnic_fw_comp_open,
2636	.read		= paused_read,
2637	.write		= paused_write,
2638};
2639
2640static ssize_t tracing_read(struct file *file, char __user *user_buf,
2641			    size_t len, loff_t *ppos)
2642{
2643	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2644	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2645	int num = ras_comp_int->num;
2646	char buff[5]; /*  1 or 0 plus \n and \0 */
2647	int size;
2648
2649	size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
2650
2651	if (*ppos >= size)
2652		return 0;
2653
2654	copy_to_user(user_buf, buff, size);
2655	*ppos += size;
2656	return size;
2657}
2658
2659static ssize_t tracing_write(struct file *file, const char __user *user_buf,
2660			     size_t len, loff_t *ppos)
2661{
2662	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2663	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2664	int num = ras_comp_int->num;
2665	union ibmvnic_crq crq;
2666	unsigned long val;
2667	char buff[9]; /* decimal max int plus \n and \0 */
2668
2669	copy_from_user(buff, user_buf, sizeof(buff));
2670	val = kstrtoul(buff, 10, NULL);
2671
2672	memset(&crq, 0, sizeof(crq));
2673	crq.control_ras.first = IBMVNIC_CRQ_CMD;
2674	crq.control_ras.cmd = CONTROL_RAS;
2675	crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2676	crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
2677
2678	return len;
2679}
2680
2681static const struct file_operations tracing_ops = {
2682	.owner		= THIS_MODULE,
2683	.open		= ibmvnic_fw_comp_open,
2684	.read		= tracing_read,
2685	.write		= tracing_write,
2686};
2687
2688static ssize_t error_level_read(struct file *file, char __user *user_buf,
2689				size_t len, loff_t *ppos)
2690{
2691	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2692	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2693	int num = ras_comp_int->num;
2694	char buff[5]; /* decimal max char plus \n and \0 */
2695	int size;
2696
2697	size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
2698
2699	if (*ppos >= size)
2700		return 0;
2701
2702	copy_to_user(user_buf, buff, size);
2703	*ppos += size;
2704	return size;
2705}
2706
2707static ssize_t error_level_write(struct file *file, const char __user *user_buf,
2708				 size_t len, loff_t *ppos)
2709{
2710	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2711	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2712	int num = ras_comp_int->num;
2713	union ibmvnic_crq crq;
2714	unsigned long val;
2715	char buff[9]; /* decimal max int plus \n and \0 */
2716
2717	copy_from_user(buff, user_buf, sizeof(buff));
2718	val = kstrtoul(buff, 10, NULL);
2719
2720	if (val > 9)
2721		val = 9;
2722
2723	memset(&crq, 0, sizeof(crq));
2724	crq.control_ras.first = IBMVNIC_CRQ_CMD;
2725	crq.control_ras.cmd = CONTROL_RAS;
2726	crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2727	crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
2728	crq.control_ras.level = val;
2729	ibmvnic_send_crq(adapter, &crq);
2730
2731	return len;
2732}
2733
2734static const struct file_operations error_level_ops = {
2735	.owner		= THIS_MODULE,
2736	.open		= ibmvnic_fw_comp_open,
2737	.read		= error_level_read,
2738	.write		= error_level_write,
2739};
2740
2741static ssize_t trace_level_read(struct file *file, char __user *user_buf,
2742				size_t len, loff_t *ppos)
2743{
2744	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2745	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2746	int num = ras_comp_int->num;
2747	char buff[5]; /* decimal max char plus \n and \0 */
2748	int size;
2749
2750	size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
2751	if (*ppos >= size)
2752		return 0;
2753
2754	copy_to_user(user_buf, buff, size);
2755	*ppos += size;
2756	return size;
2757}
2758
2759static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
2760				 size_t len, loff_t *ppos)
2761{
2762	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2763	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2764	union ibmvnic_crq crq;
2765	unsigned long val;
2766	char buff[9]; /* decimal max int plus \n and \0 */
2767
2768	copy_from_user(buff, user_buf, sizeof(buff));
2769	val = kstrtoul(buff, 10, NULL);
2770	if (val > 9)
2771		val = 9;
2772
2773	memset(&crq, 0, sizeof(crq));
2774	crq.control_ras.first = IBMVNIC_CRQ_CMD;
2775	crq.control_ras.cmd = CONTROL_RAS;
2776	crq.control_ras.correlator =
2777	    adapter->ras_comps[ras_comp_int->num].correlator;
2778	crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
2779	crq.control_ras.level = val;
2780	ibmvnic_send_crq(adapter, &crq);
2781
2782	return len;
2783}
2784
2785static const struct file_operations trace_level_ops = {
2786	.owner		= THIS_MODULE,
2787	.open		= ibmvnic_fw_comp_open,
2788	.read		= trace_level_read,
2789	.write		= trace_level_write,
2790};
2791
2792static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
2793				    size_t len, loff_t *ppos)
2794{
2795	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2796	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2797	int num = ras_comp_int->num;
2798	char buff[9]; /* decimal max int plus \n and \0 */
2799	int size;
2800
2801	size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
2802	if (*ppos >= size)
2803		return 0;
2804
2805	copy_to_user(user_buf, buff, size);
2806	*ppos += size;
2807	return size;
2808}
2809
2810static ssize_t trace_buff_size_write(struct file *file,
2811				     const char __user *user_buf, size_t len,
2812				     loff_t *ppos)
2813{
2814	struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2815	struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2816	union ibmvnic_crq crq;
2817	unsigned long val;
2818	char buff[9]; /* decimal max int plus \n and \0 */
2819
2820	copy_from_user(buff, user_buf, sizeof(buff));
2821	val = kstrtoul(buff, 10, NULL);
2822
2823	memset(&crq, 0, sizeof(crq));
2824	crq.control_ras.first = IBMVNIC_CRQ_CMD;
2825	crq.control_ras.cmd = CONTROL_RAS;
2826	crq.control_ras.correlator =
2827	    adapter->ras_comps[ras_comp_int->num].correlator;
2828	crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
2829	/* trace_buff_sz is 3 bytes, stuff an int into it */
2830	crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
2831	crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
2832	crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
2833	ibmvnic_send_crq(adapter, &crq);
2834
2835	return len;
2836}
2837
2838static const struct file_operations trace_size_ops = {
2839	.owner		= THIS_MODULE,
2840	.open		= ibmvnic_fw_comp_open,
2841	.read		= trace_buff_size_read,
2842	.write		= trace_buff_size_write,
2843};
2844
2845static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
2846					 struct ibmvnic_adapter *adapter)
2847{
2848	struct device *dev = &adapter->vdev->dev;
2849	struct dentry *dir_ent;
2850	struct dentry *ent;
2851	int i;
2852
2853	debugfs_remove_recursive(adapter->ras_comps_ent);
2854
2855	adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
2856						    adapter->debugfs_dir);
2857	if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
2858		dev_info(dev, "debugfs create ras_comps dir failed\n");
2859		return;
2860	}
2861
2862	for (i = 0; i < adapter->ras_comp_num; i++) {
2863		dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
2864					     adapter->ras_comps_ent);
2865		if (!dir_ent || IS_ERR(dir_ent)) {
2866			dev_info(dev, "debugfs create %s dir failed\n",
2867				 adapter->ras_comps[i].name);
2868			continue;
2869		}
2870
2871		adapter->ras_comp_int[i].adapter = adapter;
2872		adapter->ras_comp_int[i].num = i;
2873		adapter->ras_comp_int[i].desc_blob.data =
2874		    &adapter->ras_comps[i].description;
2875		adapter->ras_comp_int[i].desc_blob.size =
2876		    sizeof(adapter->ras_comps[i].description);
2877
2878		/* Don't need to remember the dentry's because the debugfs dir
2879		 * gets removed recursively
2880		 */
2881		ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
2882					  &adapter->ras_comp_int[i].desc_blob);
2883		ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
2884					  dir_ent, &adapter->ras_comp_int[i],
2885					  &trace_size_ops);
2886		ent = debugfs_create_file("trace_level",
2887					  S_IRUGO |
2888					  (adapter->ras_comps[i].trace_level !=
2889					   0xFF  ? S_IWUSR : 0),
2890					   dir_ent, &adapter->ras_comp_int[i],
2891					   &trace_level_ops);
2892		ent = debugfs_create_file("error_level",
2893					  S_IRUGO |
2894					  (adapter->
2895					   ras_comps[i].error_check_level !=
2896					   0xFF ? S_IWUSR : 0),
2897					  dir_ent, &adapter->ras_comp_int[i],
2898					  &trace_level_ops);
2899		ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
2900					  dir_ent, &adapter->ras_comp_int[i],
2901					  &tracing_ops);
2902		ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
2903					  dir_ent, &adapter->ras_comp_int[i],
2904					  &paused_ops);
2905		ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
2906					  &adapter->ras_comp_int[i],
2907					  &trace_ops);
2908	}
2909}
2910
2911static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
2912					    struct ibmvnic_adapter *adapter)
2913{
2914	int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
2915	struct device *dev = &adapter->vdev->dev;
2916	union ibmvnic_crq newcrq;
2917
2918	adapter->ras_comps = dma_alloc_coherent(dev, len,
2919						&adapter->ras_comps_tok,
2920						GFP_KERNEL);
2921	if (!adapter->ras_comps) {
2922		if (!firmware_has_feature(FW_FEATURE_CMO))
2923			dev_err(dev, "Couldn't alloc fw comps buffer\n");
2924		return;
2925	}
2926
2927	adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
2928					sizeof(struct ibmvnic_fw_comp_internal),
2929					GFP_KERNEL);
2930	if (!adapter->ras_comp_int)
2931		dma_free_coherent(dev, len, adapter->ras_comps,
2932				  adapter->ras_comps_tok);
2933
2934	memset(&newcrq, 0, sizeof(newcrq));
2935	newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
2936	newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
2937	newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
2938	newcrq.request_ras_comps.len = cpu_to_be32(len);
2939	ibmvnic_send_crq(adapter, &newcrq);
2940}
2941
2942static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
2943{
2944	struct ibmvnic_inflight_cmd *inflight_cmd;
2945	struct device *dev = &adapter->vdev->dev;
2946	struct ibmvnic_error_buff *error_buff;
2947	unsigned long flags;
2948	unsigned long flags2;
2949
2950	spin_lock_irqsave(&adapter->inflight_lock, flags);
2951	list_for_each_entry(inflight_cmd, &adapter->inflight, list) {
2952		switch (inflight_cmd->crq.generic.cmd) {
2953		case LOGIN:
2954			dma_unmap_single(dev, adapter->login_buf_token,
2955					 adapter->login_buf_sz,
2956					 DMA_BIDIRECTIONAL);
2957			dma_unmap_single(dev, adapter->login_rsp_buf_token,
2958					 adapter->login_rsp_buf_sz,
2959					 DMA_BIDIRECTIONAL);
2960			kfree(adapter->login_rsp_buf);
2961			kfree(adapter->login_buf);
2962			break;
2963		case REQUEST_DUMP:
2964			complete(&adapter->fw_done);
2965			break;
2966		case REQUEST_ERROR_INFO:
2967			spin_lock_irqsave(&adapter->error_list_lock, flags2);
2968			list_for_each_entry(error_buff, &adapter->errors,
2969					    list) {
2970				dma_unmap_single(dev, error_buff->dma,
2971						 error_buff->len,
2972						 DMA_FROM_DEVICE);
2973				kfree(error_buff->buff);
2974				list_del(&error_buff->list);
2975				kfree(error_buff);
2976			}
2977			spin_unlock_irqrestore(&adapter->error_list_lock,
2978					       flags2);
2979			break;
2980		}
2981		list_del(&inflight_cmd->list);
2982		kfree(inflight_cmd);
2983	}
2984	spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2985}
2986
2987static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
2988			       struct ibmvnic_adapter *adapter)
2989{
2990	struct ibmvnic_generic_crq *gen_crq = &crq->generic;
2991	struct net_device *netdev = adapter->netdev;
2992	struct device *dev = &adapter->vdev->dev;
 
2993	long rc;
2994
2995	netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
2996		   ((unsigned long int *)crq)[0],
2997		   ((unsigned long int *)crq)[1]);
2998	switch (gen_crq->first) {
2999	case IBMVNIC_CRQ_INIT_RSP:
3000		switch (gen_crq->cmd) {
3001		case IBMVNIC_CRQ_INIT:
3002			dev_info(dev, "Partner initialized\n");
3003			/* Send back a response */
3004			rc = ibmvnic_send_crq_init_complete(adapter);
3005			if (rc == 0)
3006				send_version_xchg(adapter);
3007			else
3008				dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
 
3009			break;
3010		case IBMVNIC_CRQ_INIT_COMPLETE:
3011			dev_info(dev, "Partner initialization complete\n");
 
3012			send_version_xchg(adapter);
3013			break;
3014		default:
3015			dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3016		}
3017		return;
3018	case IBMVNIC_CRQ_XPORT_EVENT:
 
 
 
 
3019		if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3020			dev_info(dev, "Re-enabling adapter\n");
3021			adapter->migrated = true;
3022			ibmvnic_free_inflight(adapter);
3023			release_sub_crqs(adapter);
3024			rc = ibmvnic_reenable_crq_queue(adapter);
3025			if (rc)
3026				dev_err(dev, "Error after enable rc=%ld\n", rc);
3027			adapter->migrated = false;
3028			rc = ibmvnic_send_crq_init(adapter);
3029			if (rc)
3030				dev_err(dev, "Error sending init rc=%ld\n", rc);
3031		} else {
3032			/* The adapter lost the connection */
3033			dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3034				gen_crq->cmd);
3035			ibmvnic_free_inflight(adapter);
3036			release_sub_crqs(adapter);
3037		}
3038		return;
3039	case IBMVNIC_CRQ_CMD_RSP:
3040		break;
3041	default:
3042		dev_err(dev, "Got an invalid msg type 0x%02x\n",
3043			gen_crq->first);
3044		return;
3045	}
3046
3047	switch (gen_crq->cmd) {
3048	case VERSION_EXCHANGE_RSP:
3049		rc = crq->version_exchange_rsp.rc.code;
3050		if (rc) {
3051			dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3052			break;
3053		}
3054		dev_info(dev, "Partner protocol version is %d\n",
3055			 crq->version_exchange_rsp.version);
3056		if (be16_to_cpu(crq->version_exchange_rsp.version) <
3057		    ibmvnic_version)
3058			ibmvnic_version =
3059			    be16_to_cpu(crq->version_exchange_rsp.version);
3060		send_cap_queries(adapter);
3061		break;
3062	case QUERY_CAPABILITY_RSP:
3063		handle_query_cap_rsp(crq, adapter);
3064		break;
3065	case QUERY_MAP_RSP:
3066		handle_query_map_rsp(crq, adapter);
3067		break;
3068	case REQUEST_MAP_RSP:
3069		handle_request_map_rsp(crq, adapter);
 
3070		break;
3071	case REQUEST_UNMAP_RSP:
3072		handle_request_unmap_rsp(crq, adapter);
3073		break;
3074	case REQUEST_CAPABILITY_RSP:
3075		handle_request_cap_rsp(crq, adapter);
3076		break;
3077	case LOGIN_RSP:
3078		netdev_dbg(netdev, "Got Login Response\n");
3079		handle_login_rsp(crq, adapter);
3080		break;
3081	case LOGICAL_LINK_STATE_RSP:
3082		netdev_dbg(netdev, "Got Logical Link State Response\n");
 
 
 
3083		adapter->logical_link_state =
3084		    crq->logical_link_state_rsp.link_state;
 
 
3085		break;
3086	case LINK_STATE_INDICATION:
3087		netdev_dbg(netdev, "Got Logical Link State Indication\n");
3088		adapter->phys_link_state =
3089		    crq->link_state_indication.phys_link_state;
3090		adapter->logical_link_state =
3091		    crq->link_state_indication.logical_link_state;
 
 
 
 
3092		break;
3093	case CHANGE_MAC_ADDR_RSP:
3094		netdev_dbg(netdev, "Got MAC address change Response\n");
3095		handle_change_mac_rsp(crq, adapter);
3096		break;
3097	case ERROR_INDICATION:
3098		netdev_dbg(netdev, "Got Error Indication\n");
3099		handle_error_indication(crq, adapter);
3100		break;
3101	case REQUEST_ERROR_RSP:
3102		netdev_dbg(netdev, "Got Error Detail Response\n");
3103		handle_error_info_rsp(crq, adapter);
3104		break;
3105	case REQUEST_STATISTICS_RSP:
3106		netdev_dbg(netdev, "Got Statistics Response\n");
3107		complete(&adapter->stats_done);
3108		break;
3109	case REQUEST_DUMP_SIZE_RSP:
3110		netdev_dbg(netdev, "Got Request Dump Size Response\n");
3111		handle_dump_size_rsp(crq, adapter);
3112		break;
3113	case REQUEST_DUMP_RSP:
3114		netdev_dbg(netdev, "Got Request Dump Response\n");
3115		complete(&adapter->fw_done);
3116		break;
3117	case QUERY_IP_OFFLOAD_RSP:
3118		netdev_dbg(netdev, "Got Query IP offload Response\n");
3119		handle_query_ip_offload_rsp(adapter);
3120		break;
3121	case MULTICAST_CTRL_RSP:
3122		netdev_dbg(netdev, "Got multicast control Response\n");
3123		break;
3124	case CONTROL_IP_OFFLOAD_RSP:
3125		netdev_dbg(netdev, "Got Control IP offload Response\n");
3126		dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3127				 sizeof(adapter->ip_offload_ctrl),
3128				 DMA_TO_DEVICE);
3129		/* We're done with the queries, perform the login */
3130		send_login(adapter);
3131		break;
3132	case REQUEST_RAS_COMP_NUM_RSP:
3133		netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
3134		if (crq->request_ras_comp_num_rsp.rc.code == 10) {
3135			netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
3136			break;
3137		}
3138		adapter->ras_comp_num =
3139		    be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
3140		handle_request_ras_comp_num_rsp(crq, adapter);
3141		break;
3142	case REQUEST_RAS_COMPS_RSP:
3143		netdev_dbg(netdev, "Got Request RAS Comps Response\n");
3144		handle_request_ras_comps_rsp(crq, adapter);
3145		break;
3146	case CONTROL_RAS_RSP:
3147		netdev_dbg(netdev, "Got Control RAS Response\n");
3148		handle_control_ras_rsp(crq, adapter);
3149		break;
3150	case COLLECT_FW_TRACE_RSP:
3151		netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3152		complete(&adapter->fw_done);
3153		break;
 
 
 
 
 
 
 
 
 
 
3154	default:
3155		netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3156			   gen_crq->cmd);
3157	}
3158}
3159
3160static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3161{
3162	struct ibmvnic_adapter *adapter = instance;
 
 
 
 
 
 
 
 
3163	struct ibmvnic_crq_queue *queue = &adapter->crq;
3164	struct vio_dev *vdev = adapter->vdev;
3165	union ibmvnic_crq *crq;
3166	unsigned long flags;
3167	bool done = false;
3168
3169	spin_lock_irqsave(&queue->lock, flags);
3170	vio_disable_interrupts(vdev);
3171	while (!done) {
3172		/* Pull all the valid messages off the CRQ */
3173		while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3174			ibmvnic_handle_crq(crq, adapter);
3175			crq->generic.first = 0;
3176		}
3177		vio_enable_interrupts(vdev);
3178		crq = ibmvnic_next_crq(adapter);
3179		if (crq) {
3180			vio_disable_interrupts(vdev);
3181			ibmvnic_handle_crq(crq, adapter);
3182			crq->generic.first = 0;
3183		} else {
3184			done = true;
3185		}
3186	}
 
 
 
 
 
3187	spin_unlock_irqrestore(&queue->lock, flags);
3188	return IRQ_HANDLED;
3189}
3190
3191static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3192{
3193	struct vio_dev *vdev = adapter->vdev;
3194	int rc;
3195
3196	do {
3197		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3198	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3199
3200	if (rc)
3201		dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3202
3203	return rc;
3204}
3205
3206static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3207{
3208	struct ibmvnic_crq_queue *crq = &adapter->crq;
3209	struct device *dev = &adapter->vdev->dev;
3210	struct vio_dev *vdev = adapter->vdev;
3211	int rc;
3212
3213	/* Close the CRQ */
3214	do {
3215		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3216	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3217
3218	/* Clean out the queue */
3219	memset(crq->msgs, 0, PAGE_SIZE);
3220	crq->cur = 0;
 
3221
3222	/* And re-open it again */
3223	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3224				crq->msg_token, PAGE_SIZE);
3225
3226	if (rc == H_CLOSED)
3227		/* Adapter is good, but other end is not ready */
3228		dev_warn(dev, "Partner adapter not ready\n");
3229	else if (rc != 0)
3230		dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3231
3232	return rc;
3233}
3234
3235static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
3236{
3237	struct ibmvnic_crq_queue *crq = &adapter->crq;
3238	struct vio_dev *vdev = adapter->vdev;
3239	long rc;
3240
 
 
 
3241	netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3242	free_irq(vdev->irq, adapter);
 
3243	do {
3244		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3245	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3246
3247	dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3248			 DMA_BIDIRECTIONAL);
3249	free_page((unsigned long)crq->msgs);
 
 
3250}
3251
3252static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
3253{
3254	struct ibmvnic_crq_queue *crq = &adapter->crq;
3255	struct device *dev = &adapter->vdev->dev;
3256	struct vio_dev *vdev = adapter->vdev;
3257	int rc, retrc = -ENOMEM;
3258
 
 
 
3259	crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3260	/* Should we allocate more than one page? */
3261
3262	if (!crq->msgs)
3263		return -ENOMEM;
3264
3265	crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3266	crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3267					DMA_BIDIRECTIONAL);
3268	if (dma_mapping_error(dev, crq->msg_token))
3269		goto map_failed;
3270
3271	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3272				crq->msg_token, PAGE_SIZE);
3273
3274	if (rc == H_RESOURCE)
3275		/* maybe kexecing and resource is busy. try a reset */
3276		rc = ibmvnic_reset_crq(adapter);
3277	retrc = rc;
3278
3279	if (rc == H_CLOSED) {
3280		dev_warn(dev, "Partner adapter not ready\n");
3281	} else if (rc) {
3282		dev_warn(dev, "Error %d opening adapter\n", rc);
3283		goto reg_crq_failed;
3284	}
3285
3286	retrc = 0;
3287
 
 
 
3288	netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3289	rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3290			 adapter);
 
3291	if (rc) {
3292		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3293			vdev->irq, rc);
3294		goto req_irq_failed;
3295	}
3296
3297	rc = vio_enable_interrupts(vdev);
3298	if (rc) {
3299		dev_err(dev, "Error %d enabling interrupts\n", rc);
3300		goto req_irq_failed;
3301	}
3302
3303	crq->cur = 0;
3304	spin_lock_init(&crq->lock);
3305
3306	return retrc;
3307
3308req_irq_failed:
 
3309	do {
3310		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3311	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3312reg_crq_failed:
3313	dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3314map_failed:
3315	free_page((unsigned long)crq->msgs);
 
3316	return retrc;
3317}
3318
3319/* debugfs for dump */
3320static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3321{
3322	struct net_device *netdev = seq->private;
3323	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3324	struct device *dev = &adapter->vdev->dev;
3325	union ibmvnic_crq crq;
 
 
3326
3327	memset(&crq, 0, sizeof(crq));
3328	crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3329	crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
3330	ibmvnic_send_crq(adapter, &crq);
3331
3332	init_completion(&adapter->fw_done);
3333	wait_for_completion(&adapter->fw_done);
3334
3335	seq_write(seq, adapter->dump_data, adapter->dump_data_size);
 
 
 
 
 
 
3336
3337	dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
3338			 DMA_BIDIRECTIONAL);
 
 
3339
3340	kfree(adapter->dump_data);
 
 
 
 
3341
3342	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3343}
3344
3345static int ibmvnic_dump_open(struct inode *inode, struct file *file)
3346{
3347	return single_open(file, ibmvnic_dump_show, inode->i_private);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3348}
3349
3350static const struct file_operations ibmvnic_dump_ops = {
3351	.owner          = THIS_MODULE,
3352	.open           = ibmvnic_dump_open,
3353	.read           = seq_read,
3354	.llseek         = seq_lseek,
3355	.release        = single_release,
3356};
3357
3358static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3359{
3360	struct ibmvnic_adapter *adapter;
3361	struct net_device *netdev;
3362	unsigned char *mac_addr_p;
3363	struct dentry *ent;
3364	char buf[16]; /* debugfs name buf */
3365	int rc;
3366
3367	dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3368		dev->unit_address);
3369
3370	mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3371							VETH_MAC_ADDR, NULL);
3372	if (!mac_addr_p) {
3373		dev_err(&dev->dev,
3374			"(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3375			__FILE__, __LINE__);
3376		return 0;
3377	}
3378
3379	netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3380				   IBMVNIC_MAX_TX_QUEUES);
3381	if (!netdev)
3382		return -ENOMEM;
3383
3384	adapter = netdev_priv(netdev);
 
3385	dev_set_drvdata(&dev->dev, netdev);
3386	adapter->vdev = dev;
3387	adapter->netdev = netdev;
3388
3389	ether_addr_copy(adapter->mac_addr, mac_addr_p);
3390	ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3391	netdev->irq = dev->irq;
3392	netdev->netdev_ops = &ibmvnic_netdev_ops;
3393	netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3394	SET_NETDEV_DEV(netdev, &dev->dev);
3395
3396	spin_lock_init(&adapter->stats_lock);
3397
3398	rc = ibmvnic_init_crq_queue(adapter);
3399	if (rc) {
3400		dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
3401		goto free_netdev;
3402	}
 
 
3403
3404	INIT_LIST_HEAD(&adapter->errors);
3405	INIT_LIST_HEAD(&adapter->inflight);
3406	spin_lock_init(&adapter->error_list_lock);
3407	spin_lock_init(&adapter->inflight_lock);
3408
3409	adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
3410					      sizeof(struct ibmvnic_statistics),
3411					      DMA_FROM_DEVICE);
3412	if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
3413		if (!firmware_has_feature(FW_FEATURE_CMO))
3414			dev_err(&dev->dev, "Couldn't map stats buffer\n");
3415		goto free_crq;
3416	}
3417
3418	snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
3419	ent = debugfs_create_dir(buf, NULL);
3420	if (!ent || IS_ERR(ent)) {
3421		dev_info(&dev->dev, "debugfs create directory failed\n");
3422		adapter->debugfs_dir = NULL;
3423	} else {
3424		adapter->debugfs_dir = ent;
3425		ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
3426					  netdev, &ibmvnic_dump_ops);
3427		if (!ent || IS_ERR(ent)) {
3428			dev_info(&dev->dev,
3429				 "debugfs create dump file failed\n");
3430			adapter->debugfs_dump = NULL;
3431		} else {
3432			adapter->debugfs_dump = ent;
3433		}
3434	}
3435	ibmvnic_send_crq_init(adapter);
3436
3437	init_completion(&adapter->init_done);
3438	wait_for_completion(&adapter->init_done);
3439
3440	/* needed to pull init_sub_crqs outside of an interrupt context
3441	 * because it creates IRQ mappings for the subCRQ queues, causing
3442	 * a kernel warning
3443	 */
3444	init_sub_crqs(adapter, 0);
3445
3446	reinit_completion(&adapter->init_done);
3447	wait_for_completion(&adapter->init_done);
 
3448
3449	/* if init_sub_crqs is partially successful, retry */
3450	while (!adapter->tx_scrq || !adapter->rx_scrq) {
3451		init_sub_crqs(adapter, 1);
3452
3453		reinit_completion(&adapter->init_done);
3454		wait_for_completion(&adapter->init_done);
3455	}
3456
3457	netdev->real_num_tx_queues = adapter->req_tx_queues;
 
 
3458
 
3459	rc = register_netdev(netdev);
3460	if (rc) {
3461		dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3462		goto free_debugfs;
3463	}
3464	dev_info(&dev->dev, "ibmvnic registered\n");
3465
 
 
 
 
3466	return 0;
3467
3468free_debugfs:
3469	if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3470		debugfs_remove_recursive(adapter->debugfs_dir);
3471free_crq:
3472	ibmvnic_release_crq_queue(adapter);
3473free_netdev:
 
 
 
 
 
 
3474	free_netdev(netdev);
 
3475	return rc;
3476}
3477
3478static int ibmvnic_remove(struct vio_dev *dev)
3479{
3480	struct net_device *netdev = dev_get_drvdata(&dev->dev);
3481	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3482
3483	unregister_netdev(netdev);
3484
3485	release_sub_crqs(adapter);
3486
3487	ibmvnic_release_crq_queue(adapter);
3488
3489	if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3490		debugfs_remove_recursive(adapter->debugfs_dir);
 
3491
3492	if (adapter->ras_comps)
3493		dma_free_coherent(&dev->dev,
3494				  adapter->ras_comp_num *
3495				  sizeof(struct ibmvnic_fw_component),
3496				  adapter->ras_comps, adapter->ras_comps_tok);
3497
3498	kfree(adapter->ras_comp_int);
3499
 
 
3500	free_netdev(netdev);
3501	dev_set_drvdata(&dev->dev, NULL);
3502
3503	return 0;
3504}
3505
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3506static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3507{
3508	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3509	struct ibmvnic_adapter *adapter;
3510	struct iommu_table *tbl;
3511	unsigned long ret = 0;
3512	int i;
3513
3514	tbl = get_iommu_table_base(&vdev->dev);
3515
3516	/* netdev inits at probe time along with the structures we need below*/
3517	if (!netdev)
3518		return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3519
3520	adapter = netdev_priv(netdev);
3521
3522	ret += PAGE_SIZE; /* the crq message queue */
3523	ret += adapter->bounce_buffer_size;
3524	ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3525
3526	for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3527		ret += 4 * PAGE_SIZE; /* the scrq message queue */
3528
3529	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3530	     i++)
3531		ret += adapter->rx_pool[i].size *
3532		    IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3533
3534	return ret;
3535}
3536
3537static int ibmvnic_resume(struct device *dev)
3538{
3539	struct net_device *netdev = dev_get_drvdata(dev);
3540	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3541	int i;
3542
3543	/* kick the interrupt handlers just in case we lost an interrupt */
3544	for (i = 0; i < adapter->req_rx_queues; i++)
3545		ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3546				     adapter->rx_scrq[i]);
3547
3548	return 0;
3549}
3550
3551static struct vio_device_id ibmvnic_device_table[] = {
3552	{"network", "IBM,vnic"},
3553	{"", "" }
3554};
3555MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3556
3557static const struct dev_pm_ops ibmvnic_pm_ops = {
3558	.resume = ibmvnic_resume
3559};
3560
3561static struct vio_driver ibmvnic_driver = {
3562	.id_table       = ibmvnic_device_table,
3563	.probe          = ibmvnic_probe,
3564	.remove         = ibmvnic_remove,
3565	.get_desired_dma = ibmvnic_get_desired_dma,
3566	.name		= ibmvnic_driver_name,
3567	.pm		= &ibmvnic_pm_ops,
3568};
3569
3570/* module functions */
3571static int __init ibmvnic_module_init(void)
3572{
3573	pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3574		IBMVNIC_DRIVER_VERSION);
3575
3576	return vio_register_driver(&ibmvnic_driver);
3577}
3578
3579static void __exit ibmvnic_module_exit(void)
3580{
3581	vio_unregister_driver(&ibmvnic_driver);
3582}
3583
3584module_init(ibmvnic_module_init);
3585module_exit(ibmvnic_module_exit);
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/**************************************************************************/
   3/*                                                                        */
   4/*  IBM System i and System p Virtual NIC Device Driver                   */
   5/*  Copyright (C) 2014 IBM Corp.                                          */
   6/*  Santiago Leon (santi_leon@yahoo.com)                                  */
   7/*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
   8/*  John Allen (jallen@linux.vnet.ibm.com)                                */
   9/*                                                                        */
 
 
 
 
 
 
 
 
 
 
 
 
  10/*                                                                        */
  11/* This module contains the implementation of a virtual ethernet device   */
  12/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
  13/* option of the RS/6000 Platform Architecture to interface with virtual  */
  14/* ethernet NICs that are presented to the partition by the hypervisor.   */
  15/*									   */
  16/* Messages are passed between the VNIC driver and the VNIC server using  */
  17/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
  18/* issue and receive commands that initiate communication with the server */
  19/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
  20/* are used by the driver to notify the server that a packet is           */
  21/* ready for transmission or that a buffer has been added to receive a    */
  22/* packet. Subsequently, sCRQs are used by the server to notify the       */
  23/* driver that a packet transmission has been completed or that a packet  */
  24/* has been received and placed in a waiting buffer.                      */
  25/*                                                                        */
  26/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
  27/* which skbs are DMA mapped and immediately unmapped when the transmit   */
  28/* or receive has been completed, the VNIC driver is required to use      */
  29/* "long term mapping". This entails that large, continuous DMA mapped    */
  30/* buffers are allocated on driver initialization and these buffers are   */
  31/* then continuously reused to pass skbs to and from the VNIC server.     */
  32/*                                                                        */
  33/**************************************************************************/
  34
  35#include <linux/module.h>
  36#include <linux/moduleparam.h>
  37#include <linux/types.h>
  38#include <linux/errno.h>
  39#include <linux/completion.h>
  40#include <linux/ioport.h>
  41#include <linux/dma-mapping.h>
  42#include <linux/kernel.h>
  43#include <linux/netdevice.h>
  44#include <linux/etherdevice.h>
  45#include <linux/skbuff.h>
  46#include <linux/init.h>
  47#include <linux/delay.h>
  48#include <linux/mm.h>
  49#include <linux/ethtool.h>
  50#include <linux/proc_fs.h>
  51#include <linux/if_arp.h>
  52#include <linux/in.h>
  53#include <linux/ip.h>
  54#include <linux/ipv6.h>
  55#include <linux/irq.h>
  56#include <linux/kthread.h>
  57#include <linux/seq_file.h>
 
  58#include <linux/interrupt.h>
  59#include <net/net_namespace.h>
  60#include <asm/hvcall.h>
  61#include <linux/atomic.h>
  62#include <asm/vio.h>
  63#include <asm/iommu.h>
  64#include <linux/uaccess.h>
  65#include <asm/firmware.h>
  66#include <linux/workqueue.h>
  67#include <linux/if_vlan.h>
  68#include <linux/utsname.h>
  69
  70#include "ibmvnic.h"
  71
  72static const char ibmvnic_driver_name[] = "ibmvnic";
  73static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
  74
  75MODULE_AUTHOR("Santiago Leon");
  76MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
  77MODULE_LICENSE("GPL");
  78MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
  79
  80static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
  81static int ibmvnic_remove(struct vio_dev *);
  82static void release_sub_crqs(struct ibmvnic_adapter *, bool);
  83static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
  84static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
  85static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
  86static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
  87static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
  88		       union sub_crq *sub_crq);
  89static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
  90static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
  91static int enable_scrq_irq(struct ibmvnic_adapter *,
  92			   struct ibmvnic_sub_crq_queue *);
  93static int disable_scrq_irq(struct ibmvnic_adapter *,
  94			    struct ibmvnic_sub_crq_queue *);
  95static int pending_scrq(struct ibmvnic_adapter *,
  96			struct ibmvnic_sub_crq_queue *);
  97static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
  98					struct ibmvnic_sub_crq_queue *);
  99static int ibmvnic_poll(struct napi_struct *napi, int data);
 100static void send_map_query(struct ibmvnic_adapter *adapter);
 101static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
 102static int send_request_unmap(struct ibmvnic_adapter *, u8);
 103static int send_login(struct ibmvnic_adapter *adapter);
 104static void send_cap_queries(struct ibmvnic_adapter *adapter);
 105static int init_sub_crqs(struct ibmvnic_adapter *);
 106static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
 107static int ibmvnic_init(struct ibmvnic_adapter *);
 108static int ibmvnic_reset_init(struct ibmvnic_adapter *);
 109static void release_crq_queue(struct ibmvnic_adapter *);
 110static int __ibmvnic_set_mac(struct net_device *, u8 *);
 111static int init_crq_queue(struct ibmvnic_adapter *adapter);
 112static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
 113
 114struct ibmvnic_stat {
 115	char name[ETH_GSTRING_LEN];
 116	int offset;
 117};
 118
 119#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
 120			     offsetof(struct ibmvnic_statistics, stat))
 121#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
 122
 123static const struct ibmvnic_stat ibmvnic_stats[] = {
 124	{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
 125	{"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
 126	{"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
 127	{"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
 128	{"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
 129	{"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
 130	{"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
 131	{"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
 132	{"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
 133	{"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
 134	{"align_errors", IBMVNIC_STAT_OFF(align_errors)},
 135	{"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
 136	{"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
 137	{"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
 138	{"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
 139	{"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
 140	{"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
 141	{"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
 142	{"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
 143	{"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
 144	{"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
 145	{"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
 146};
 147
 148static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
 149			  unsigned long length, unsigned long *number,
 150			  unsigned long *irq)
 151{
 152	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
 153	long rc;
 154
 155	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
 156	*number = retbuf[0];
 157	*irq = retbuf[1];
 158
 159	return rc;
 160}
 161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 162static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
 163				struct ibmvnic_long_term_buff *ltb, int size)
 164{
 165	struct device *dev = &adapter->vdev->dev;
 166	int rc;
 167
 168	ltb->size = size;
 169	ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
 170				       GFP_KERNEL);
 171
 172	if (!ltb->buff) {
 173		dev_err(dev, "Couldn't alloc long term buffer\n");
 174		return -ENOMEM;
 175	}
 176	ltb->map_id = adapter->map_id;
 177	adapter->map_id++;
 178
 
 179	init_completion(&adapter->fw_done);
 180	rc = send_request_map(adapter, ltb->addr,
 181			      ltb->size, ltb->map_id);
 182	if (rc) {
 183		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
 184		return rc;
 185	}
 186	wait_for_completion(&adapter->fw_done);
 187
 188	if (adapter->fw_done_rc) {
 189		dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
 190			adapter->fw_done_rc);
 191		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
 192		return -1;
 193	}
 194	return 0;
 195}
 196
 197static void free_long_term_buff(struct ibmvnic_adapter *adapter,
 198				struct ibmvnic_long_term_buff *ltb)
 199{
 200	struct device *dev = &adapter->vdev->dev;
 201
 202	if (!ltb->buff)
 203		return;
 204
 205	if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
 206	    adapter->reset_reason != VNIC_RESET_MOBILITY)
 207		send_request_unmap(adapter, ltb->map_id);
 208	dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
 
 209}
 210
 211static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
 212				struct ibmvnic_long_term_buff *ltb)
 213{
 214	int rc;
 
 
 
 
 
 215
 216	memset(ltb->buff, 0, ltb->size);
 
 217
 218	init_completion(&adapter->fw_done);
 219	rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
 220	if (rc)
 221		return rc;
 222	wait_for_completion(&adapter->fw_done);
 223
 224	if (adapter->fw_done_rc) {
 225		dev_info(&adapter->vdev->dev,
 226			 "Reset failed, attempting to free and reallocate buffer\n");
 227		free_long_term_buff(adapter, ltb);
 228		return alloc_long_term_buff(adapter, ltb, ltb->size);
 229	}
 230	return 0;
 231}
 232
 233static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
 234{
 235	int i;
 
 
 
 236
 237	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
 238	     i++)
 239		adapter->rx_pool[i].active = 0;
 240}
 241
 242static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
 243			      struct ibmvnic_rx_pool *pool)
 244{
 245	int count = pool->size - atomic_read(&pool->available);
 246	struct device *dev = &adapter->vdev->dev;
 247	int buffers_added = 0;
 248	unsigned long lpar_rc;
 249	union sub_crq sub_crq;
 250	struct sk_buff *skb;
 251	unsigned int offset;
 252	dma_addr_t dma_addr;
 253	unsigned char *dst;
 254	u64 *handle_array;
 255	int shift = 0;
 256	int index;
 257	int i;
 258
 259	if (!pool->active)
 260		return;
 261
 262	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
 263				      be32_to_cpu(adapter->login_rsp_buf->
 264				      off_rxadd_subcrqs));
 265
 266	for (i = 0; i < count; ++i) {
 267		skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
 268		if (!skb) {
 269			dev_err(dev, "Couldn't replenish rx buff\n");
 270			adapter->replenish_no_mem++;
 271			break;
 272		}
 273
 274		index = pool->free_map[pool->next_free];
 275
 276		if (pool->rx_buff[index].skb)
 277			dev_err(dev, "Inconsistent free_map!\n");
 278
 279		/* Copy the skb to the long term mapped DMA buffer */
 280		offset = index * pool->buff_size;
 281		dst = pool->long_term_buff.buff + offset;
 282		memset(dst, 0, pool->buff_size);
 283		dma_addr = pool->long_term_buff.addr + offset;
 284		pool->rx_buff[index].data = dst;
 285
 286		pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
 287		pool->rx_buff[index].dma = dma_addr;
 288		pool->rx_buff[index].skb = skb;
 289		pool->rx_buff[index].pool_index = pool->index;
 290		pool->rx_buff[index].size = pool->buff_size;
 291
 292		memset(&sub_crq, 0, sizeof(sub_crq));
 293		sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
 294		sub_crq.rx_add.correlator =
 295		    cpu_to_be64((u64)&pool->rx_buff[index]);
 296		sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
 297		sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
 298
 299		/* The length field of the sCRQ is defined to be 24 bits so the
 300		 * buffer size needs to be left shifted by a byte before it is
 301		 * converted to big endian to prevent the last byte from being
 302		 * truncated.
 303		 */
 304#ifdef __LITTLE_ENDIAN__
 305		shift = 8;
 306#endif
 307		sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
 308
 309		lpar_rc = send_subcrq(adapter, handle_array[pool->index],
 310				      &sub_crq);
 311		if (lpar_rc != H_SUCCESS)
 312			goto failure;
 313
 314		buffers_added++;
 315		adapter->replenish_add_buff_success++;
 316		pool->next_free = (pool->next_free + 1) % pool->size;
 317	}
 318	atomic_add(buffers_added, &pool->available);
 319	return;
 320
 321failure:
 322	if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
 323		dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
 324	pool->free_map[pool->next_free] = index;
 325	pool->rx_buff[index].skb = NULL;
 
 
 
 326
 327	dev_kfree_skb_any(skb);
 328	adapter->replenish_add_buff_failure++;
 329	atomic_add(buffers_added, &pool->available);
 330
 331	if (lpar_rc == H_CLOSED || adapter->failover_pending) {
 332		/* Disable buffer pool replenishment and report carrier off if
 333		 * queue is closed or pending failover.
 334		 * Firmware guarantees that a signal will be sent to the
 335		 * driver, triggering a reset.
 336		 */
 337		deactivate_rx_pools(adapter);
 338		netif_carrier_off(adapter->netdev);
 339	}
 340}
 341
 342static void replenish_pools(struct ibmvnic_adapter *adapter)
 343{
 344	int i;
 345
 
 
 
 346	adapter->replenish_task_cycles++;
 347	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
 348	     i++) {
 349		if (adapter->rx_pool[i].active)
 350			replenish_rx_pool(adapter, &adapter->rx_pool[i]);
 351	}
 352}
 353
 354static void release_stats_buffers(struct ibmvnic_adapter *adapter)
 
 355{
 356	kfree(adapter->tx_stats_buffers);
 357	kfree(adapter->rx_stats_buffers);
 358	adapter->tx_stats_buffers = NULL;
 359	adapter->rx_stats_buffers = NULL;
 360}
 361
 362static int init_stats_buffers(struct ibmvnic_adapter *adapter)
 363{
 364	adapter->tx_stats_buffers =
 365				kcalloc(IBMVNIC_MAX_QUEUES,
 366					sizeof(struct ibmvnic_tx_queue_stats),
 367					GFP_KERNEL);
 368	if (!adapter->tx_stats_buffers)
 369		return -ENOMEM;
 370
 371	adapter->rx_stats_buffers =
 372				kcalloc(IBMVNIC_MAX_QUEUES,
 373					sizeof(struct ibmvnic_rx_queue_stats),
 374					GFP_KERNEL);
 375	if (!adapter->rx_stats_buffers)
 376		return -ENOMEM;
 377
 378	return 0;
 379}
 380
 381static void release_stats_token(struct ibmvnic_adapter *adapter)
 382{
 383	struct device *dev = &adapter->vdev->dev;
 384
 385	if (!adapter->stats_token)
 386		return;
 387
 388	dma_unmap_single(dev, adapter->stats_token,
 389			 sizeof(struct ibmvnic_statistics),
 390			 DMA_FROM_DEVICE);
 391	adapter->stats_token = 0;
 392}
 393
 394static int init_stats_token(struct ibmvnic_adapter *adapter)
 395{
 396	struct device *dev = &adapter->vdev->dev;
 397	dma_addr_t stok;
 398
 399	stok = dma_map_single(dev, &adapter->stats,
 400			      sizeof(struct ibmvnic_statistics),
 401			      DMA_FROM_DEVICE);
 402	if (dma_mapping_error(dev, stok)) {
 403		dev_err(dev, "Couldn't map stats buffer\n");
 404		return -1;
 405	}
 406
 407	adapter->stats_token = stok;
 408	netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
 409	return 0;
 410}
 411
 412static int reset_rx_pools(struct ibmvnic_adapter *adapter)
 413{
 414	struct ibmvnic_rx_pool *rx_pool;
 415	int rx_scrqs;
 416	int i, j, rc;
 417	u64 *size_array;
 418
 419	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
 420		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
 421
 422	rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
 423	for (i = 0; i < rx_scrqs; i++) {
 424		rx_pool = &adapter->rx_pool[i];
 425
 426		netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
 427
 428		if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
 429			free_long_term_buff(adapter, &rx_pool->long_term_buff);
 430			rx_pool->buff_size = be64_to_cpu(size_array[i]);
 431			rc = alloc_long_term_buff(adapter,
 432						  &rx_pool->long_term_buff,
 433						  rx_pool->size *
 434						  rx_pool->buff_size);
 435		} else {
 436			rc = reset_long_term_buff(adapter,
 437						  &rx_pool->long_term_buff);
 438		}
 439
 440		if (rc)
 441			return rc;
 442
 443		for (j = 0; j < rx_pool->size; j++)
 444			rx_pool->free_map[j] = j;
 445
 446		memset(rx_pool->rx_buff, 0,
 447		       rx_pool->size * sizeof(struct ibmvnic_rx_buff));
 448
 449		atomic_set(&rx_pool->available, 0);
 450		rx_pool->next_alloc = 0;
 451		rx_pool->next_free = 0;
 452		rx_pool->active = 1;
 453	}
 454
 455	return 0;
 456}
 457
 458static void release_rx_pools(struct ibmvnic_adapter *adapter)
 459{
 460	struct ibmvnic_rx_pool *rx_pool;
 461	int i, j;
 462
 463	if (!adapter->rx_pool)
 464		return;
 465
 466	for (i = 0; i < adapter->num_active_rx_pools; i++) {
 467		rx_pool = &adapter->rx_pool[i];
 468
 469		netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
 470
 471		kfree(rx_pool->free_map);
 472		free_long_term_buff(adapter, &rx_pool->long_term_buff);
 473
 474		if (!rx_pool->rx_buff)
 475			continue;
 476
 477		for (j = 0; j < rx_pool->size; j++) {
 478			if (rx_pool->rx_buff[j].skb) {
 479				dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
 480				rx_pool->rx_buff[j].skb = NULL;
 481			}
 482		}
 483
 484		kfree(rx_pool->rx_buff);
 485	}
 486
 487	kfree(adapter->rx_pool);
 488	adapter->rx_pool = NULL;
 489	adapter->num_active_rx_pools = 0;
 490}
 491
 492static int init_rx_pools(struct net_device *netdev)
 493{
 494	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 495	struct device *dev = &adapter->vdev->dev;
 496	struct ibmvnic_rx_pool *rx_pool;
 
 497	int rxadd_subcrqs;
 498	u64 *size_array;
 
 499	int i, j;
 500
 501	rxadd_subcrqs =
 502		be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
 
 
 503	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
 504		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
 505
 506	adapter->rx_pool = kcalloc(rxadd_subcrqs,
 507				   sizeof(struct ibmvnic_rx_pool),
 508				   GFP_KERNEL);
 509	if (!adapter->rx_pool) {
 510		dev_err(dev, "Failed to allocate rx pools\n");
 511		return -1;
 512	}
 513
 514	adapter->num_active_rx_pools = rxadd_subcrqs;
 515
 516	for (i = 0; i < rxadd_subcrqs; i++) {
 517		rx_pool = &adapter->rx_pool[i];
 518
 519		netdev_dbg(adapter->netdev,
 520			   "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
 521			   i, adapter->req_rx_add_entries_per_subcrq,
 522			   be64_to_cpu(size_array[i]));
 523
 524		rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
 525		rx_pool->index = i;
 526		rx_pool->buff_size = be64_to_cpu(size_array[i]);
 527		rx_pool->active = 1;
 528
 529		rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
 530					    GFP_KERNEL);
 531		if (!rx_pool->free_map) {
 532			release_rx_pools(adapter);
 533			return -1;
 534		}
 535
 536		rx_pool->rx_buff = kcalloc(rx_pool->size,
 537					   sizeof(struct ibmvnic_rx_buff),
 538					   GFP_KERNEL);
 539		if (!rx_pool->rx_buff) {
 540			dev_err(dev, "Couldn't alloc rx buffers\n");
 541			release_rx_pools(adapter);
 542			return -1;
 543		}
 544
 545		if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
 546					 rx_pool->size * rx_pool->buff_size)) {
 547			release_rx_pools(adapter);
 548			return -1;
 549		}
 550
 551		for (j = 0; j < rx_pool->size; ++j)
 552			rx_pool->free_map[j] = j;
 553
 554		atomic_set(&rx_pool->available, 0);
 555		rx_pool->next_alloc = 0;
 556		rx_pool->next_free = 0;
 557	}
 558
 559	return 0;
 560}
 561
 562static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
 563			     struct ibmvnic_tx_pool *tx_pool)
 564{
 565	int rc, i;
 566
 567	rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
 568	if (rc)
 569		return rc;
 570
 571	memset(tx_pool->tx_buff, 0,
 572	       tx_pool->num_buffers *
 573	       sizeof(struct ibmvnic_tx_buff));
 574
 575	for (i = 0; i < tx_pool->num_buffers; i++)
 576		tx_pool->free_map[i] = i;
 577
 578	tx_pool->consumer_index = 0;
 579	tx_pool->producer_index = 0;
 580
 581	return 0;
 582}
 583
 584static int reset_tx_pools(struct ibmvnic_adapter *adapter)
 585{
 586	int tx_scrqs;
 587	int i, rc;
 588
 589	tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
 590	for (i = 0; i < tx_scrqs; i++) {
 591		rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
 592		if (rc)
 593			return rc;
 594		rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
 595		if (rc)
 596			return rc;
 597	}
 598
 599	return 0;
 600}
 601
 602static void release_vpd_data(struct ibmvnic_adapter *adapter)
 603{
 604	if (!adapter->vpd)
 605		return;
 606
 607	kfree(adapter->vpd->buff);
 608	kfree(adapter->vpd);
 609
 610	adapter->vpd = NULL;
 611}
 612
 613static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
 614				struct ibmvnic_tx_pool *tx_pool)
 615{
 616	kfree(tx_pool->tx_buff);
 617	kfree(tx_pool->free_map);
 618	free_long_term_buff(adapter, &tx_pool->long_term_buff);
 619}
 620
 621static void release_tx_pools(struct ibmvnic_adapter *adapter)
 622{
 623	int i;
 624
 625	if (!adapter->tx_pool)
 626		return;
 627
 628	for (i = 0; i < adapter->num_active_tx_pools; i++) {
 629		release_one_tx_pool(adapter, &adapter->tx_pool[i]);
 630		release_one_tx_pool(adapter, &adapter->tso_pool[i]);
 631	}
 632
 633	kfree(adapter->tx_pool);
 634	adapter->tx_pool = NULL;
 635	kfree(adapter->tso_pool);
 636	adapter->tso_pool = NULL;
 637	adapter->num_active_tx_pools = 0;
 638}
 639
 640static int init_one_tx_pool(struct net_device *netdev,
 641			    struct ibmvnic_tx_pool *tx_pool,
 642			    int num_entries, int buf_size)
 643{
 644	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 645	int i;
 646
 647	tx_pool->tx_buff = kcalloc(num_entries,
 648				   sizeof(struct ibmvnic_tx_buff),
 649				   GFP_KERNEL);
 650	if (!tx_pool->tx_buff)
 651		return -1;
 652
 653	if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
 654				 num_entries * buf_size))
 655		return -1;
 656
 657	tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
 658	if (!tx_pool->free_map)
 659		return -1;
 660
 661	for (i = 0; i < num_entries; i++)
 662		tx_pool->free_map[i] = i;
 663
 664	tx_pool->consumer_index = 0;
 665	tx_pool->producer_index = 0;
 666	tx_pool->num_buffers = num_entries;
 667	tx_pool->buf_size = buf_size;
 668
 669	return 0;
 670}
 671
 672static int init_tx_pools(struct net_device *netdev)
 673{
 674	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 675	int tx_subcrqs;
 676	int i, rc;
 677
 678	tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
 679	adapter->tx_pool = kcalloc(tx_subcrqs,
 680				   sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
 681	if (!adapter->tx_pool)
 682		return -1;
 683
 684	adapter->tso_pool = kcalloc(tx_subcrqs,
 685				    sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
 686	if (!adapter->tso_pool)
 687		return -1;
 688
 689	adapter->num_active_tx_pools = tx_subcrqs;
 690
 691	for (i = 0; i < tx_subcrqs; i++) {
 692		rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
 693				      adapter->req_tx_entries_per_subcrq,
 694				      adapter->req_mtu + VLAN_HLEN);
 695		if (rc) {
 696			release_tx_pools(adapter);
 697			return rc;
 698		}
 699
 700		rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
 701				      IBMVNIC_TSO_BUFS,
 702				      IBMVNIC_TSO_BUF_SZ);
 703		if (rc) {
 704			release_tx_pools(adapter);
 705			return rc;
 706		}
 707	}
 708
 709	return 0;
 710}
 711
 712static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
 713{
 714	int i;
 715
 716	if (adapter->napi_enabled)
 717		return;
 718
 719	for (i = 0; i < adapter->req_rx_queues; i++)
 720		napi_enable(&adapter->napi[i]);
 721
 722	adapter->napi_enabled = true;
 723}
 724
 725static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
 726{
 727	int i;
 728
 729	if (!adapter->napi_enabled)
 730		return;
 731
 732	for (i = 0; i < adapter->req_rx_queues; i++) {
 733		netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
 734		napi_disable(&adapter->napi[i]);
 735	}
 736
 737	adapter->napi_enabled = false;
 738}
 739
 740static int init_napi(struct ibmvnic_adapter *adapter)
 741{
 742	int i;
 743
 744	adapter->napi = kcalloc(adapter->req_rx_queues,
 745				sizeof(struct napi_struct), GFP_KERNEL);
 746	if (!adapter->napi)
 747		return -ENOMEM;
 748
 749	for (i = 0; i < adapter->req_rx_queues; i++) {
 750		netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
 751		netif_napi_add(adapter->netdev, &adapter->napi[i],
 752			       ibmvnic_poll, NAPI_POLL_WEIGHT);
 753	}
 
 
 754
 755	adapter->num_active_rx_napi = adapter->req_rx_queues;
 756	return 0;
 757}
 758
 759static void release_napi(struct ibmvnic_adapter *adapter)
 760{
 761	int i;
 762
 763	if (!adapter->napi)
 764		return;
 765
 766	for (i = 0; i < adapter->num_active_rx_napi; i++) {
 767		netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
 768		netif_napi_del(&adapter->napi[i]);
 769	}
 770
 771	kfree(adapter->napi);
 772	adapter->napi = NULL;
 773	adapter->num_active_rx_napi = 0;
 774	adapter->napi_enabled = false;
 775}
 776
 777static int ibmvnic_login(struct net_device *netdev)
 778{
 779	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 780	unsigned long timeout = msecs_to_jiffies(30000);
 781	int retry_count = 0;
 782	bool retry;
 783	int rc;
 784
 785	do {
 786		retry = false;
 787		if (retry_count > IBMVNIC_MAX_QUEUES) {
 788			netdev_warn(netdev, "Login attempts exceeded\n");
 789			return -1;
 790		}
 791
 792		adapter->init_done_rc = 0;
 793		reinit_completion(&adapter->init_done);
 794		rc = send_login(adapter);
 795		if (rc) {
 796			netdev_warn(netdev, "Unable to login\n");
 797			return rc;
 798		}
 799
 800		if (!wait_for_completion_timeout(&adapter->init_done,
 801						 timeout)) {
 802			netdev_warn(netdev, "Login timed out\n");
 803			return -1;
 804		}
 805
 806		if (adapter->init_done_rc == PARTIALSUCCESS) {
 807			retry_count++;
 808			release_sub_crqs(adapter, 1);
 809
 810			retry = true;
 811			netdev_dbg(netdev,
 812				   "Received partial success, retrying...\n");
 813			adapter->init_done_rc = 0;
 814			reinit_completion(&adapter->init_done);
 815			send_cap_queries(adapter);
 816			if (!wait_for_completion_timeout(&adapter->init_done,
 817							 timeout)) {
 818				netdev_warn(netdev,
 819					    "Capabilities query timed out\n");
 820				return -1;
 821			}
 822
 823			rc = init_sub_crqs(adapter);
 824			if (rc) {
 825				netdev_warn(netdev,
 826					    "SCRQ initialization failed\n");
 827				return -1;
 828			}
 829
 830			rc = init_sub_crq_irqs(adapter);
 831			if (rc) {
 832				netdev_warn(netdev,
 833					    "SCRQ irq initialization failed\n");
 834				return -1;
 835			}
 836		} else if (adapter->init_done_rc) {
 837			netdev_warn(netdev, "Adapter login failed\n");
 838			return -1;
 839		}
 840	} while (retry);
 841
 842	__ibmvnic_set_mac(netdev, adapter->mac_addr);
 843
 844	return 0;
 845}
 846
 847static void release_login_buffer(struct ibmvnic_adapter *adapter)
 848{
 849	kfree(adapter->login_buf);
 850	adapter->login_buf = NULL;
 851}
 852
 853static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
 854{
 855	kfree(adapter->login_rsp_buf);
 856	adapter->login_rsp_buf = NULL;
 857}
 858
 859static void release_resources(struct ibmvnic_adapter *adapter)
 860{
 861	release_vpd_data(adapter);
 862
 863	release_tx_pools(adapter);
 864	release_rx_pools(adapter);
 865
 866	release_napi(adapter);
 867	release_login_rsp_buffer(adapter);
 868}
 869
 870static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
 871{
 872	struct net_device *netdev = adapter->netdev;
 873	unsigned long timeout = msecs_to_jiffies(30000);
 874	union ibmvnic_crq crq;
 875	bool resend;
 876	int rc;
 877
 878	netdev_dbg(netdev, "setting link state %d\n", link_state);
 879
 880	memset(&crq, 0, sizeof(crq));
 881	crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
 882	crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
 883	crq.logical_link_state.link_state = link_state;
 884
 885	do {
 886		resend = false;
 887
 888		reinit_completion(&adapter->init_done);
 889		rc = ibmvnic_send_crq(adapter, &crq);
 890		if (rc) {
 891			netdev_err(netdev, "Failed to set link state\n");
 892			return rc;
 893		}
 894
 895		if (!wait_for_completion_timeout(&adapter->init_done,
 896						 timeout)) {
 897			netdev_err(netdev, "timeout setting link state\n");
 898			return -1;
 899		}
 900
 901		if (adapter->init_done_rc == 1) {
 902			/* Partuial success, delay and re-send */
 903			mdelay(1000);
 904			resend = true;
 905		} else if (adapter->init_done_rc) {
 906			netdev_warn(netdev, "Unable to set link state, rc=%d\n",
 907				    adapter->init_done_rc);
 908			return adapter->init_done_rc;
 909		}
 910	} while (resend);
 911
 912	return 0;
 913}
 914
 915static int set_real_num_queues(struct net_device *netdev)
 916{
 917	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 918	int rc;
 919
 920	netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
 921		   adapter->req_tx_queues, adapter->req_rx_queues);
 922
 923	rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
 924	if (rc) {
 925		netdev_err(netdev, "failed to set the number of tx queues\n");
 926		return rc;
 927	}
 
 
 928
 929	rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
 930	if (rc)
 931		netdev_err(netdev, "failed to set the number of rx queues\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 932
 933	return rc;
 934}
 935
 936static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
 937{
 938	struct device *dev = &adapter->vdev->dev;
 939	union ibmvnic_crq crq;
 940	int len = 0;
 941	int rc;
 942
 943	if (adapter->vpd->buff)
 944		len = adapter->vpd->len;
 945
 946	init_completion(&adapter->fw_done);
 947	crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
 948	crq.get_vpd_size.cmd = GET_VPD_SIZE;
 949	rc = ibmvnic_send_crq(adapter, &crq);
 950	if (rc)
 951		return rc;
 952	wait_for_completion(&adapter->fw_done);
 953
 954	if (!adapter->vpd->len)
 955		return -ENODATA;
 956
 957	if (!adapter->vpd->buff)
 958		adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
 959	else if (adapter->vpd->len != len)
 960		adapter->vpd->buff =
 961			krealloc(adapter->vpd->buff,
 962				 adapter->vpd->len, GFP_KERNEL);
 963
 964	if (!adapter->vpd->buff) {
 965		dev_err(dev, "Could allocate VPD buffer\n");
 966		return -ENOMEM;
 967	}
 968
 969	adapter->vpd->dma_addr =
 970		dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
 971			       DMA_FROM_DEVICE);
 972	if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
 973		dev_err(dev, "Could not map VPD buffer\n");
 974		kfree(adapter->vpd->buff);
 975		adapter->vpd->buff = NULL;
 976		return -ENOMEM;
 977	}
 978
 979	reinit_completion(&adapter->fw_done);
 980	crq.get_vpd.first = IBMVNIC_CRQ_CMD;
 981	crq.get_vpd.cmd = GET_VPD;
 982	crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
 983	crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
 984	rc = ibmvnic_send_crq(adapter, &crq);
 985	if (rc) {
 986		kfree(adapter->vpd->buff);
 987		adapter->vpd->buff = NULL;
 988		return rc;
 989	}
 990	wait_for_completion(&adapter->fw_done);
 991
 992	return 0;
 993}
 994
 995static int init_resources(struct ibmvnic_adapter *adapter)
 996{
 997	struct net_device *netdev = adapter->netdev;
 998	int rc;
 999
1000	rc = set_real_num_queues(netdev);
1001	if (rc)
1002		return rc;
1003
1004	adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1005	if (!adapter->vpd)
1006		return -ENOMEM;
1007
1008	/* Vital Product Data (VPD) */
1009	rc = ibmvnic_get_vpd(adapter);
1010	if (rc) {
1011		netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1012		return rc;
1013	}
1014
1015	adapter->map_id = 1;
1016
1017	rc = init_napi(adapter);
1018	if (rc)
1019		return rc;
1020
1021	send_map_query(adapter);
1022
1023	rc = init_rx_pools(netdev);
1024	if (rc)
1025		return rc;
1026
1027	rc = init_tx_pools(netdev);
1028	return rc;
1029}
1030
1031static int __ibmvnic_open(struct net_device *netdev)
1032{
1033	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1034	enum vnic_state prev_state = adapter->state;
1035	int i, rc;
1036
1037	adapter->state = VNIC_OPENING;
1038	replenish_pools(adapter);
1039	ibmvnic_napi_enable(adapter);
1040
1041	/* We're ready to receive frames, enable the sub-crq interrupts and
1042	 * set the logical link state to up
1043	 */
1044	for (i = 0; i < adapter->req_rx_queues; i++) {
1045		netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1046		if (prev_state == VNIC_CLOSED)
1047			enable_irq(adapter->rx_scrq[i]->irq);
1048		enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1049	}
1050
1051	for (i = 0; i < adapter->req_tx_queues; i++) {
1052		netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1053		if (prev_state == VNIC_CLOSED)
1054			enable_irq(adapter->tx_scrq[i]->irq);
1055		enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1056	}
1057
1058	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1059	if (rc) {
1060		for (i = 0; i < adapter->req_rx_queues; i++)
1061			napi_disable(&adapter->napi[i]);
1062		release_resources(adapter);
1063		return rc;
1064	}
1065
1066	netif_tx_start_all_queues(netdev);
 
1067
1068	if (prev_state == VNIC_CLOSED) {
1069		for (i = 0; i < adapter->req_rx_queues; i++)
1070			napi_schedule(&adapter->napi[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1071	}
1072
1073	adapter->state = VNIC_OPEN;
1074	return rc;
 
 
 
 
1075}
1076
1077static int ibmvnic_open(struct net_device *netdev)
1078{
1079	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1080	int rc;
1081
1082	/* If device failover is pending, just set device state and return.
1083	 * Device operation will be handled by reset routine.
1084	 */
1085	if (adapter->failover_pending) {
1086		adapter->state = VNIC_OPEN;
1087		return 0;
1088	}
1089
1090	if (adapter->state != VNIC_CLOSED) {
1091		rc = ibmvnic_login(netdev);
1092		if (rc)
1093			return rc;
1094
1095		rc = init_resources(adapter);
1096		if (rc) {
1097			netdev_err(netdev, "failed to initialize resources\n");
1098			release_resources(adapter);
1099			return rc;
1100		}
1101	}
1102
1103	rc = __ibmvnic_open(netdev);
1104
1105	return rc;
1106}
1107
1108static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1109{
1110	struct ibmvnic_rx_pool *rx_pool;
1111	struct ibmvnic_rx_buff *rx_buff;
1112	u64 rx_entries;
1113	int rx_scrqs;
1114	int i, j;
1115
1116	if (!adapter->rx_pool)
1117		return;
1118
1119	rx_scrqs = adapter->num_active_rx_pools;
1120	rx_entries = adapter->req_rx_add_entries_per_subcrq;
1121
1122	/* Free any remaining skbs in the rx buffer pools */
1123	for (i = 0; i < rx_scrqs; i++) {
1124		rx_pool = &adapter->rx_pool[i];
1125		if (!rx_pool || !rx_pool->rx_buff)
1126			continue;
1127
1128		netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1129		for (j = 0; j < rx_entries; j++) {
1130			rx_buff = &rx_pool->rx_buff[j];
1131			if (rx_buff && rx_buff->skb) {
1132				dev_kfree_skb_any(rx_buff->skb);
1133				rx_buff->skb = NULL;
1134			}
1135		}
1136	}
1137}
1138
1139static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1140			      struct ibmvnic_tx_pool *tx_pool)
1141{
1142	struct ibmvnic_tx_buff *tx_buff;
1143	u64 tx_entries;
1144	int i;
1145
1146	if (!tx_pool || !tx_pool->tx_buff)
1147		return;
1148
1149	tx_entries = tx_pool->num_buffers;
1150
1151	for (i = 0; i < tx_entries; i++) {
1152		tx_buff = &tx_pool->tx_buff[i];
1153		if (tx_buff && tx_buff->skb) {
1154			dev_kfree_skb_any(tx_buff->skb);
1155			tx_buff->skb = NULL;
1156		}
1157	}
1158}
1159
1160static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1161{
1162	int tx_scrqs;
1163	int i;
1164
1165	if (!adapter->tx_pool || !adapter->tso_pool)
1166		return;
1167
1168	tx_scrqs = adapter->num_active_tx_pools;
1169
1170	/* Free any remaining skbs in the tx buffer pools */
1171	for (i = 0; i < tx_scrqs; i++) {
1172		netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1173		clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1174		clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1175	}
1176}
1177
1178static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1179{
1180	struct net_device *netdev = adapter->netdev;
1181	int i;
1182
1183	if (adapter->tx_scrq) {
1184		for (i = 0; i < adapter->req_tx_queues; i++)
1185			if (adapter->tx_scrq[i]->irq) {
1186				netdev_dbg(netdev,
1187					   "Disabling tx_scrq[%d] irq\n", i);
1188				disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1189				disable_irq(adapter->tx_scrq[i]->irq);
1190			}
1191	}
1192
1193	if (adapter->rx_scrq) {
1194		for (i = 0; i < adapter->req_rx_queues; i++) {
1195			if (adapter->rx_scrq[i]->irq) {
1196				netdev_dbg(netdev,
1197					   "Disabling rx_scrq[%d] irq\n", i);
1198				disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1199				disable_irq(adapter->rx_scrq[i]->irq);
1200			}
1201		}
 
 
1202	}
1203}
1204
1205static void ibmvnic_cleanup(struct net_device *netdev)
1206{
1207	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
 
1208
1209	/* ensure that transmissions are stopped if called by do_reset */
1210	if (test_bit(0, &adapter->resetting))
1211		netif_tx_disable(netdev);
1212	else
1213		netif_tx_stop_all_queues(netdev);
1214
1215	ibmvnic_napi_disable(adapter);
1216	ibmvnic_disable_irqs(adapter);
1217
1218	clean_rx_pools(adapter);
1219	clean_tx_pools(adapter);
1220}
1221
1222static int __ibmvnic_close(struct net_device *netdev)
1223{
1224	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1225	int rc = 0;
1226
1227	adapter->state = VNIC_CLOSING;
1228	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1229	if (rc)
1230		return rc;
1231	adapter->state = VNIC_CLOSED;
1232	return 0;
1233}
1234
1235static int ibmvnic_close(struct net_device *netdev)
1236{
1237	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1238	int rc;
1239
1240	/* If device failover is pending, just set device state and return.
1241	 * Device operation will be handled by reset routine.
1242	 */
1243	if (adapter->failover_pending) {
1244		adapter->state = VNIC_CLOSED;
1245		return 0;
1246	}
 
 
1247
1248	rc = __ibmvnic_close(netdev);
1249	ibmvnic_cleanup(netdev);
1250
1251	return rc;
1252}
1253
1254/**
1255 * build_hdr_data - creates L2/L3/L4 header data buffer
1256 * @hdr_field - bitfield determining needed headers
1257 * @skb - socket buffer
1258 * @hdr_len - array of header lengths
1259 * @tot_len - total length of data
1260 *
1261 * Reads hdr_field to determine which headers are needed by firmware.
1262 * Builds a buffer containing these headers.  Saves individual header
1263 * lengths and total buffer length to be used to build descriptors.
1264 */
1265static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1266			  int *hdr_len, u8 *hdr_data)
1267{
1268	int len = 0;
1269	u8 *hdr;
1270
1271	if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1272		hdr_len[0] = sizeof(struct vlan_ethhdr);
1273	else
1274		hdr_len[0] = sizeof(struct ethhdr);
1275
1276	if (skb->protocol == htons(ETH_P_IP)) {
1277		hdr_len[1] = ip_hdr(skb)->ihl * 4;
1278		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1279			hdr_len[2] = tcp_hdrlen(skb);
1280		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1281			hdr_len[2] = sizeof(struct udphdr);
1282	} else if (skb->protocol == htons(ETH_P_IPV6)) {
1283		hdr_len[1] = sizeof(struct ipv6hdr);
1284		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1285			hdr_len[2] = tcp_hdrlen(skb);
1286		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1287			hdr_len[2] = sizeof(struct udphdr);
1288	} else if (skb->protocol == htons(ETH_P_ARP)) {
1289		hdr_len[1] = arp_hdr_len(skb->dev);
1290		hdr_len[2] = 0;
1291	}
1292
1293	memset(hdr_data, 0, 120);
1294	if ((hdr_field >> 6) & 1) {
1295		hdr = skb_mac_header(skb);
1296		memcpy(hdr_data, hdr, hdr_len[0]);
1297		len += hdr_len[0];
1298	}
1299
1300	if ((hdr_field >> 5) & 1) {
1301		hdr = skb_network_header(skb);
1302		memcpy(hdr_data + len, hdr, hdr_len[1]);
1303		len += hdr_len[1];
1304	}
1305
1306	if ((hdr_field >> 4) & 1) {
1307		hdr = skb_transport_header(skb);
1308		memcpy(hdr_data + len, hdr, hdr_len[2]);
1309		len += hdr_len[2];
1310	}
1311	return len;
1312}
1313
1314/**
1315 * create_hdr_descs - create header and header extension descriptors
1316 * @hdr_field - bitfield determining needed headers
1317 * @data - buffer containing header data
1318 * @len - length of data buffer
1319 * @hdr_len - array of individual header lengths
1320 * @scrq_arr - descriptor array
1321 *
1322 * Creates header and, if needed, header extension descriptors and
1323 * places them in a descriptor array, scrq_arr
1324 */
1325
1326static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1327			    union sub_crq *scrq_arr)
1328{
1329	union sub_crq hdr_desc;
1330	int tmp_len = len;
1331	int num_descs = 0;
1332	u8 *data, *cur;
1333	int tmp;
1334
1335	while (tmp_len > 0) {
1336		cur = hdr_data + len - tmp_len;
1337
1338		memset(&hdr_desc, 0, sizeof(hdr_desc));
1339		if (cur != hdr_data) {
1340			data = hdr_desc.hdr_ext.data;
1341			tmp = tmp_len > 29 ? 29 : tmp_len;
1342			hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1343			hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1344			hdr_desc.hdr_ext.len = tmp;
1345		} else {
1346			data = hdr_desc.hdr.data;
1347			tmp = tmp_len > 24 ? 24 : tmp_len;
1348			hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1349			hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1350			hdr_desc.hdr.len = tmp;
1351			hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1352			hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1353			hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1354			hdr_desc.hdr.flag = hdr_field << 1;
1355		}
1356		memcpy(data, cur, tmp);
1357		tmp_len -= tmp;
1358		*scrq_arr = hdr_desc;
1359		scrq_arr++;
1360		num_descs++;
1361	}
1362
1363	return num_descs;
1364}
1365
1366/**
1367 * build_hdr_descs_arr - build a header descriptor array
1368 * @skb - socket buffer
1369 * @num_entries - number of descriptors to be sent
1370 * @subcrq - first TX descriptor
1371 * @hdr_field - bit field determining which headers will be sent
1372 *
1373 * This function will build a TX descriptor array with applicable
1374 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1375 */
1376
1377static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1378				int *num_entries, u8 hdr_field)
1379{
1380	int hdr_len[3] = {0, 0, 0};
1381	int tot_len;
1382	u8 *hdr_data = txbuff->hdr_data;
1383
1384	tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1385				 txbuff->hdr_data);
1386	*num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1387			 txbuff->indir_arr + 1);
1388}
1389
1390static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1391				    struct net_device *netdev)
1392{
1393	/* For some backing devices, mishandling of small packets
1394	 * can result in a loss of connection or TX stall. Device
1395	 * architects recommend that no packet should be smaller
1396	 * than the minimum MTU value provided to the driver, so
1397	 * pad any packets to that length
1398	 */
1399	if (skb->len < netdev->min_mtu)
1400		return skb_put_padto(skb, netdev->min_mtu);
1401
1402	return 0;
1403}
1404
1405static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1406{
1407	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1408	int queue_num = skb_get_queue_mapping(skb);
1409	u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1410	struct device *dev = &adapter->vdev->dev;
1411	struct ibmvnic_tx_buff *tx_buff = NULL;
1412	struct ibmvnic_sub_crq_queue *tx_scrq;
1413	struct ibmvnic_tx_pool *tx_pool;
1414	unsigned int tx_send_failed = 0;
1415	unsigned int tx_map_failed = 0;
1416	unsigned int tx_dropped = 0;
1417	unsigned int tx_packets = 0;
1418	unsigned int tx_bytes = 0;
1419	dma_addr_t data_dma_addr;
1420	struct netdev_queue *txq;
 
1421	unsigned long lpar_rc;
1422	union sub_crq tx_crq;
1423	unsigned int offset;
1424	int num_entries = 1;
1425	unsigned char *dst;
1426	u64 *handle_array;
1427	int index = 0;
1428	u8 proto = 0;
1429	netdev_tx_t ret = NETDEV_TX_OK;
1430
1431	if (test_bit(0, &adapter->resetting)) {
1432		if (!netif_subqueue_stopped(netdev, skb))
1433			netif_stop_subqueue(netdev, queue_num);
1434		dev_kfree_skb_any(skb);
1435
1436		tx_send_failed++;
1437		tx_dropped++;
1438		ret = NETDEV_TX_OK;
1439		goto out;
1440	}
1441
1442	if (ibmvnic_xmit_workarounds(skb, netdev)) {
1443		tx_dropped++;
1444		tx_send_failed++;
1445		ret = NETDEV_TX_OK;
1446		goto out;
1447	}
1448	if (skb_is_gso(skb))
1449		tx_pool = &adapter->tso_pool[queue_num];
1450	else
1451		tx_pool = &adapter->tx_pool[queue_num];
1452
1453	tx_scrq = adapter->tx_scrq[queue_num];
1454	txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1455	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1456		be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1457
1458	index = tx_pool->free_map[tx_pool->consumer_index];
1459
1460	if (index == IBMVNIC_INVALID_MAP) {
1461		dev_kfree_skb_any(skb);
1462		tx_send_failed++;
1463		tx_dropped++;
1464		ret = NETDEV_TX_OK;
1465		goto out;
1466	}
1467
1468	tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1469
1470	offset = index * tx_pool->buf_size;
1471	dst = tx_pool->long_term_buff.buff + offset;
1472	memset(dst, 0, tx_pool->buf_size);
 
1473	data_dma_addr = tx_pool->long_term_buff.addr + offset;
1474
1475	if (skb_shinfo(skb)->nr_frags) {
1476		int cur, i;
1477
1478		/* Copy the head */
1479		skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1480		cur = skb_headlen(skb);
1481
1482		/* Copy the frags */
1483		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1484			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1485
1486			memcpy(dst + cur,
1487			       page_address(skb_frag_page(frag)) +
1488			       skb_frag_off(frag), skb_frag_size(frag));
1489			cur += skb_frag_size(frag);
1490		}
1491	} else {
1492		skb_copy_from_linear_data(skb, dst, skb->len);
1493	}
1494
1495	tx_pool->consumer_index =
1496	    (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
 
1497
1498	tx_buff = &tx_pool->tx_buff[index];
1499	tx_buff->skb = skb;
1500	tx_buff->data_dma[0] = data_dma_addr;
1501	tx_buff->data_len[0] = skb->len;
1502	tx_buff->index = index;
1503	tx_buff->pool_index = queue_num;
1504	tx_buff->last_frag = true;
 
1505
1506	memset(&tx_crq, 0, sizeof(tx_crq));
1507	tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1508	tx_crq.v1.type = IBMVNIC_TX_DESC;
1509	tx_crq.v1.n_crq_elem = 1;
1510	tx_crq.v1.n_sge = 1;
1511	tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1512
1513	if (skb_is_gso(skb))
1514		tx_crq.v1.correlator =
1515			cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1516	else
1517		tx_crq.v1.correlator = cpu_to_be32(index);
1518	tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1519	tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1520	tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1521
1522	if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1523		tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1524		tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1525	}
1526
1527	if (skb->protocol == htons(ETH_P_IP)) {
1528		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1529		proto = ip_hdr(skb)->protocol;
1530	} else if (skb->protocol == htons(ETH_P_IPV6)) {
1531		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1532		proto = ipv6_hdr(skb)->nexthdr;
 
 
 
 
1533	}
1534
1535	if (proto == IPPROTO_TCP)
1536		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1537	else if (proto == IPPROTO_UDP)
1538		tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1539
1540	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1541		tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1542		hdrs += 2;
1543	}
1544	if (skb_is_gso(skb)) {
1545		tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1546		tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1547		hdrs += 2;
1548	}
1549	/* determine if l2/3/4 headers are sent to firmware */
1550	if ((*hdrs >> 7) & 1) {
1551		build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1552		tx_crq.v1.n_crq_elem = num_entries;
1553		tx_buff->num_entries = num_entries;
1554		tx_buff->indir_arr[0] = tx_crq;
1555		tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1556						    sizeof(tx_buff->indir_arr),
1557						    DMA_TO_DEVICE);
1558		if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1559			dev_kfree_skb_any(skb);
1560			tx_buff->skb = NULL;
1561			if (!firmware_has_feature(FW_FEATURE_CMO))
1562				dev_err(dev, "tx: unable to map descriptor array\n");
1563			tx_map_failed++;
1564			tx_dropped++;
1565			ret = NETDEV_TX_OK;
1566			goto tx_err_out;
1567		}
1568		lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1569					       (u64)tx_buff->indir_dma,
1570					       (u64)num_entries);
1571		dma_unmap_single(dev, tx_buff->indir_dma,
1572				 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
1573	} else {
1574		tx_buff->num_entries = num_entries;
1575		lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1576				      &tx_crq);
1577	}
1578	if (lpar_rc != H_SUCCESS) {
1579		if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1580			dev_err_ratelimited(dev, "tx: send failed\n");
1581		dev_kfree_skb_any(skb);
1582		tx_buff->skb = NULL;
1583
1584		if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1585			/* Disable TX and report carrier off if queue is closed
1586			 * or pending failover.
1587			 * Firmware guarantees that a signal will be sent to the
1588			 * driver, triggering a reset or some other action.
1589			 */
1590			netif_tx_stop_all_queues(netdev);
1591			netif_carrier_off(netdev);
1592		}
1593
1594		tx_send_failed++;
1595		tx_dropped++;
1596		ret = NETDEV_TX_OK;
1597		goto tx_err_out;
1598	}
1599
1600	if (atomic_add_return(num_entries, &tx_scrq->used)
1601					>= adapter->req_tx_entries_per_subcrq) {
1602		netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1603		netif_stop_subqueue(netdev, queue_num);
1604	}
1605
1606	tx_packets++;
1607	tx_bytes += skb->len;
1608	txq->trans_start = jiffies;
1609	ret = NETDEV_TX_OK;
1610	goto out;
1611
1612tx_err_out:
1613	/* roll back consumer index and map array*/
1614	if (tx_pool->consumer_index == 0)
1615		tx_pool->consumer_index =
1616			tx_pool->num_buffers - 1;
1617	else
1618		tx_pool->consumer_index--;
1619	tx_pool->free_map[tx_pool->consumer_index] = index;
1620out:
1621	netdev->stats.tx_dropped += tx_dropped;
1622	netdev->stats.tx_bytes += tx_bytes;
1623	netdev->stats.tx_packets += tx_packets;
1624	adapter->tx_send_failed += tx_send_failed;
1625	adapter->tx_map_failed += tx_map_failed;
1626	adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1627	adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1628	adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1629
1630	return ret;
1631}
1632
1633static void ibmvnic_set_multi(struct net_device *netdev)
1634{
1635	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1636	struct netdev_hw_addr *ha;
1637	union ibmvnic_crq crq;
1638
1639	memset(&crq, 0, sizeof(crq));
1640	crq.request_capability.first = IBMVNIC_CRQ_CMD;
1641	crq.request_capability.cmd = REQUEST_CAPABILITY;
1642
1643	if (netdev->flags & IFF_PROMISC) {
1644		if (!adapter->promisc_supported)
1645			return;
1646	} else {
1647		if (netdev->flags & IFF_ALLMULTI) {
1648			/* Accept all multicast */
1649			memset(&crq, 0, sizeof(crq));
1650			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1651			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1652			crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1653			ibmvnic_send_crq(adapter, &crq);
1654		} else if (netdev_mc_empty(netdev)) {
1655			/* Reject all multicast */
1656			memset(&crq, 0, sizeof(crq));
1657			crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1658			crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1659			crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1660			ibmvnic_send_crq(adapter, &crq);
1661		} else {
1662			/* Accept one or more multicast(s) */
1663			netdev_for_each_mc_addr(ha, netdev) {
1664				memset(&crq, 0, sizeof(crq));
1665				crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1666				crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1667				crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1668				ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1669						ha->addr);
1670				ibmvnic_send_crq(adapter, &crq);
1671			}
1672		}
1673	}
1674}
1675
1676static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1677{
1678	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
1679	union ibmvnic_crq crq;
1680	int rc;
1681
1682	if (!is_valid_ether_addr(dev_addr)) {
1683		rc = -EADDRNOTAVAIL;
1684		goto err;
1685	}
1686
1687	memset(&crq, 0, sizeof(crq));
1688	crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1689	crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1690	ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1691
1692	init_completion(&adapter->fw_done);
1693	rc = ibmvnic_send_crq(adapter, &crq);
1694	if (rc) {
1695		rc = -EIO;
1696		goto err;
1697	}
1698
1699	wait_for_completion(&adapter->fw_done);
1700	/* netdev->dev_addr is changed in handle_change_mac_rsp function */
1701	if (adapter->fw_done_rc) {
1702		rc = -EIO;
1703		goto err;
1704	}
1705
1706	return 0;
1707err:
1708	ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1709	return rc;
1710}
1711
1712static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1713{
1714	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1715	struct sockaddr *addr = p;
1716	int rc;
1717
1718	rc = 0;
1719	ether_addr_copy(adapter->mac_addr, addr->sa_data);
1720	if (adapter->state != VNIC_PROBED)
1721		rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1722
1723	return rc;
1724}
1725
1726/**
1727 * do_change_param_reset returns zero if we are able to keep processing reset
1728 * events, or non-zero if we hit a fatal error and must halt.
1729 */
1730static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1731				 struct ibmvnic_rwi *rwi,
1732				 u32 reset_state)
1733{
1734	struct net_device *netdev = adapter->netdev;
1735	int i, rc;
1736
1737	netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1738		   rwi->reset_reason);
1739
1740	netif_carrier_off(netdev);
1741	adapter->reset_reason = rwi->reset_reason;
1742
1743	ibmvnic_cleanup(netdev);
1744
1745	if (reset_state == VNIC_OPEN) {
1746		rc = __ibmvnic_close(netdev);
1747		if (rc)
1748			return rc;
1749	}
1750
1751	release_resources(adapter);
1752	release_sub_crqs(adapter, 1);
1753	release_crq_queue(adapter);
1754
1755	adapter->state = VNIC_PROBED;
1756
1757	rc = init_crq_queue(adapter);
1758
1759	if (rc) {
1760		netdev_err(adapter->netdev,
1761			   "Couldn't initialize crq. rc=%d\n", rc);
1762		return rc;
1763	}
1764
1765	rc = ibmvnic_reset_init(adapter);
1766	if (rc)
1767		return IBMVNIC_INIT_FAILED;
1768
1769	/* If the adapter was in PROBE state prior to the reset,
1770	 * exit here.
1771	 */
1772	if (reset_state == VNIC_PROBED)
1773		return 0;
1774
1775	rc = ibmvnic_login(netdev);
1776	if (rc) {
1777		adapter->state = reset_state;
1778		return rc;
1779	}
1780
1781	rc = init_resources(adapter);
1782	if (rc)
1783		return rc;
1784
1785	ibmvnic_disable_irqs(adapter);
1786
1787	adapter->state = VNIC_CLOSED;
1788
1789	if (reset_state == VNIC_CLOSED)
1790		return 0;
1791
1792	rc = __ibmvnic_open(netdev);
1793	if (rc)
1794		return IBMVNIC_OPEN_FAILED;
1795
1796	/* refresh device's multicast list */
1797	ibmvnic_set_multi(netdev);
1798
1799	/* kick napi */
1800	for (i = 0; i < adapter->req_rx_queues; i++)
1801		napi_schedule(&adapter->napi[i]);
1802
 
1803	return 0;
1804}
1805
1806/**
1807 * do_reset returns zero if we are able to keep processing reset events, or
1808 * non-zero if we hit a fatal error and must halt.
1809 */
1810static int do_reset(struct ibmvnic_adapter *adapter,
1811		    struct ibmvnic_rwi *rwi, u32 reset_state)
1812{
1813	u64 old_num_rx_queues, old_num_tx_queues;
1814	u64 old_num_rx_slots, old_num_tx_slots;
1815	struct net_device *netdev = adapter->netdev;
1816	int i, rc;
1817
1818	netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1819		   rwi->reset_reason);
1820
1821	rtnl_lock();
1822
1823	netif_carrier_off(netdev);
1824	adapter->reset_reason = rwi->reset_reason;
1825
1826	old_num_rx_queues = adapter->req_rx_queues;
1827	old_num_tx_queues = adapter->req_tx_queues;
1828	old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1829	old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1830
1831	ibmvnic_cleanup(netdev);
1832
1833	if (reset_state == VNIC_OPEN &&
1834	    adapter->reset_reason != VNIC_RESET_MOBILITY &&
1835	    adapter->reset_reason != VNIC_RESET_FAILOVER) {
1836		adapter->state = VNIC_CLOSING;
1837
1838		/* Release the RTNL lock before link state change and
1839		 * re-acquire after the link state change to allow
1840		 * linkwatch_event to grab the RTNL lock and run during
1841		 * a reset.
1842		 */
1843		rtnl_unlock();
1844		rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1845		rtnl_lock();
1846		if (rc)
1847			goto out;
1848
1849		if (adapter->state != VNIC_CLOSING) {
1850			rc = -1;
1851			goto out;
1852		}
1853
1854		adapter->state = VNIC_CLOSED;
1855	}
1856
1857	if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1858		/* remove the closed state so when we call open it appears
1859		 * we are coming from the probed state.
1860		 */
1861		adapter->state = VNIC_PROBED;
1862
1863		if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1864			rc = ibmvnic_reenable_crq_queue(adapter);
1865			release_sub_crqs(adapter, 1);
1866		} else {
1867			rc = ibmvnic_reset_crq(adapter);
1868			if (!rc)
1869				rc = vio_enable_interrupts(adapter->vdev);
1870		}
1871
1872		if (rc) {
1873			netdev_err(adapter->netdev,
1874				   "Couldn't initialize crq. rc=%d\n", rc);
1875			goto out;
1876		}
1877
1878		rc = ibmvnic_reset_init(adapter);
1879		if (rc) {
1880			rc = IBMVNIC_INIT_FAILED;
1881			goto out;
1882		}
1883
1884		/* If the adapter was in PROBE state prior to the reset,
1885		 * exit here.
1886		 */
1887		if (reset_state == VNIC_PROBED) {
1888			rc = 0;
1889			goto out;
1890		}
1891
1892		rc = ibmvnic_login(netdev);
1893		if (rc) {
1894			adapter->state = reset_state;
1895			goto out;
1896		}
1897
1898		if (adapter->req_rx_queues != old_num_rx_queues ||
1899		    adapter->req_tx_queues != old_num_tx_queues ||
1900		    adapter->req_rx_add_entries_per_subcrq !=
1901		    old_num_rx_slots ||
1902		    adapter->req_tx_entries_per_subcrq !=
1903		    old_num_tx_slots) {
1904			release_rx_pools(adapter);
1905			release_tx_pools(adapter);
1906			release_napi(adapter);
1907			release_vpd_data(adapter);
1908
1909			rc = init_resources(adapter);
1910			if (rc)
1911				goto out;
1912
1913		} else {
1914			rc = reset_tx_pools(adapter);
1915			if (rc)
1916				goto out;
1917
1918			rc = reset_rx_pools(adapter);
1919			if (rc)
1920				goto out;
1921		}
1922		ibmvnic_disable_irqs(adapter);
1923	}
1924	adapter->state = VNIC_CLOSED;
1925
1926	if (reset_state == VNIC_CLOSED) {
1927		rc = 0;
1928		goto out;
1929	}
1930
1931	rc = __ibmvnic_open(netdev);
1932	if (rc) {
1933		rc = IBMVNIC_OPEN_FAILED;
1934		goto out;
1935	}
1936
1937	/* refresh device's multicast list */
1938	ibmvnic_set_multi(netdev);
1939
1940	/* kick napi */
1941	for (i = 0; i < adapter->req_rx_queues; i++)
1942		napi_schedule(&adapter->napi[i]);
1943
1944	if (adapter->reset_reason != VNIC_RESET_FAILOVER)
1945		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
1946
1947	rc = 0;
1948
1949out:
1950	rtnl_unlock();
1951
1952	return rc;
1953}
1954
1955static int do_hard_reset(struct ibmvnic_adapter *adapter,
1956			 struct ibmvnic_rwi *rwi, u32 reset_state)
1957{
1958	struct net_device *netdev = adapter->netdev;
1959	int rc;
1960
1961	netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
1962		   rwi->reset_reason);
1963
1964	netif_carrier_off(netdev);
1965	adapter->reset_reason = rwi->reset_reason;
1966
1967	ibmvnic_cleanup(netdev);
1968	release_resources(adapter);
1969	release_sub_crqs(adapter, 0);
1970	release_crq_queue(adapter);
1971
1972	/* remove the closed state so when we call open it appears
1973	 * we are coming from the probed state.
1974	 */
1975	adapter->state = VNIC_PROBED;
1976
1977	reinit_completion(&adapter->init_done);
1978	rc = init_crq_queue(adapter);
1979	if (rc) {
1980		netdev_err(adapter->netdev,
1981			   "Couldn't initialize crq. rc=%d\n", rc);
1982		return rc;
1983	}
1984
1985	rc = ibmvnic_init(adapter);
1986	if (rc)
1987		return rc;
1988
1989	/* If the adapter was in PROBE state prior to the reset,
1990	 * exit here.
1991	 */
1992	if (reset_state == VNIC_PROBED)
1993		return 0;
1994
1995	rc = ibmvnic_login(netdev);
1996	if (rc) {
1997		adapter->state = VNIC_PROBED;
1998		return 0;
1999	}
2000
2001	rc = init_resources(adapter);
2002	if (rc)
2003		return rc;
2004
2005	ibmvnic_disable_irqs(adapter);
2006	adapter->state = VNIC_CLOSED;
2007
2008	if (reset_state == VNIC_CLOSED)
2009		return 0;
2010
2011	rc = __ibmvnic_open(netdev);
2012	if (rc)
2013		return IBMVNIC_OPEN_FAILED;
2014
2015	return 0;
2016}
2017
2018static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2019{
2020	struct ibmvnic_rwi *rwi;
2021	unsigned long flags;
2022
2023	spin_lock_irqsave(&adapter->rwi_lock, flags);
2024
2025	if (!list_empty(&adapter->rwi_list)) {
2026		rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2027				       list);
2028		list_del(&rwi->list);
2029	} else {
2030		rwi = NULL;
2031	}
2032
2033	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2034	return rwi;
2035}
2036
2037static void free_all_rwi(struct ibmvnic_adapter *adapter)
2038{
2039	struct ibmvnic_rwi *rwi;
2040
2041	rwi = get_next_rwi(adapter);
2042	while (rwi) {
2043		kfree(rwi);
2044		rwi = get_next_rwi(adapter);
2045	}
2046}
2047
2048static void __ibmvnic_reset(struct work_struct *work)
2049{
2050	struct ibmvnic_rwi *rwi;
2051	struct ibmvnic_adapter *adapter;
2052	u32 reset_state;
2053	int rc = 0;
2054
2055	adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2056
2057	if (test_and_set_bit_lock(0, &adapter->resetting)) {
2058		schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2059				      IBMVNIC_RESET_DELAY);
2060		return;
2061	}
2062
2063	reset_state = adapter->state;
2064
2065	rwi = get_next_rwi(adapter);
2066	while (rwi) {
2067		if (adapter->state == VNIC_REMOVING ||
2068		    adapter->state == VNIC_REMOVED) {
2069			kfree(rwi);
2070			rc = EBUSY;
2071			break;
2072		}
2073
2074		if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2075			/* CHANGE_PARAM requestor holds rtnl_lock */
2076			rc = do_change_param_reset(adapter, rwi, reset_state);
2077		} else if (adapter->force_reset_recovery) {
2078			/* Transport event occurred during previous reset */
2079			if (adapter->wait_for_reset) {
2080				/* Previous was CHANGE_PARAM; caller locked */
2081				adapter->force_reset_recovery = false;
2082				rc = do_hard_reset(adapter, rwi, reset_state);
2083			} else {
2084				rtnl_lock();
2085				adapter->force_reset_recovery = false;
2086				rc = do_hard_reset(adapter, rwi, reset_state);
2087				rtnl_unlock();
2088			}
2089		} else {
2090			rc = do_reset(adapter, rwi, reset_state);
2091		}
2092		kfree(rwi);
2093		if (rc == IBMVNIC_OPEN_FAILED) {
2094			if (list_empty(&adapter->rwi_list))
2095				adapter->state = VNIC_CLOSED;
2096			else
2097				adapter->state = reset_state;
2098			rc = 0;
2099		} else if (rc && rc != IBMVNIC_INIT_FAILED &&
2100		    !adapter->force_reset_recovery)
2101			break;
2102
2103		rwi = get_next_rwi(adapter);
2104
2105		if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2106			    rwi->reset_reason == VNIC_RESET_MOBILITY))
2107			adapter->force_reset_recovery = true;
2108	}
2109
2110	if (adapter->wait_for_reset) {
2111		adapter->reset_done_rc = rc;
2112		complete(&adapter->reset_done);
2113	}
2114
2115	if (rc) {
2116		netdev_dbg(adapter->netdev, "Reset failed\n");
2117		free_all_rwi(adapter);
2118	}
2119
2120	clear_bit_unlock(0, &adapter->resetting);
2121}
2122
2123static void __ibmvnic_delayed_reset(struct work_struct *work)
2124{
2125	struct ibmvnic_adapter *adapter;
2126
2127	adapter = container_of(work, struct ibmvnic_adapter,
2128			       ibmvnic_delayed_reset.work);
2129	__ibmvnic_reset(&adapter->ibmvnic_reset);
2130}
2131
2132static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2133			 enum ibmvnic_reset_reason reason)
2134{
2135	struct list_head *entry, *tmp_entry;
2136	struct ibmvnic_rwi *rwi, *tmp;
2137	struct net_device *netdev = adapter->netdev;
2138	unsigned long flags;
2139	int ret;
2140
2141	if (adapter->state == VNIC_REMOVING ||
2142	    adapter->state == VNIC_REMOVED ||
2143	    adapter->failover_pending) {
2144		ret = EBUSY;
2145		netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2146		goto err;
2147	}
2148
2149	if (adapter->state == VNIC_PROBING) {
2150		netdev_warn(netdev, "Adapter reset during probe\n");
2151		ret = adapter->init_done_rc = EAGAIN;
2152		goto err;
2153	}
2154
2155	spin_lock_irqsave(&adapter->rwi_lock, flags);
2156
2157	list_for_each(entry, &adapter->rwi_list) {
2158		tmp = list_entry(entry, struct ibmvnic_rwi, list);
2159		if (tmp->reset_reason == reason) {
2160			netdev_dbg(netdev, "Skipping matching reset\n");
2161			spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2162			ret = EBUSY;
2163			goto err;
2164		}
2165	}
2166
2167	rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2168	if (!rwi) {
2169		spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2170		ibmvnic_close(netdev);
2171		ret = ENOMEM;
2172		goto err;
2173	}
2174	/* if we just received a transport event,
2175	 * flush reset queue and process this reset
2176	 */
2177	if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2178		list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2179			list_del(entry);
2180	}
2181	rwi->reset_reason = reason;
2182	list_add_tail(&rwi->list, &adapter->rwi_list);
2183	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2184	netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2185	schedule_work(&adapter->ibmvnic_reset);
2186
2187	return 0;
2188err:
2189	return -ret;
2190}
2191
2192static void ibmvnic_tx_timeout(struct net_device *dev)
2193{
2194	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2195
2196	ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2197}
2198
2199static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2200				  struct ibmvnic_rx_buff *rx_buff)
2201{
2202	struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2203
2204	rx_buff->skb = NULL;
2205
2206	pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2207	pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2208
2209	atomic_dec(&pool->available);
2210}
2211
2212static int ibmvnic_poll(struct napi_struct *napi, int budget)
2213{
2214	struct net_device *netdev = napi->dev;
2215	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2216	int scrq_num = (int)(napi - adapter->napi);
2217	int frames_processed = 0;
2218
2219restart_poll:
2220	while (frames_processed < budget) {
2221		struct sk_buff *skb;
2222		struct ibmvnic_rx_buff *rx_buff;
2223		union sub_crq *next;
2224		u32 length;
2225		u16 offset;
2226		u8 flags = 0;
2227
2228		if (unlikely(test_bit(0, &adapter->resetting) &&
2229			     adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2230			enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2231			napi_complete_done(napi, frames_processed);
2232			return frames_processed;
2233		}
2234
2235		if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2236			break;
2237		next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2238		rx_buff =
2239		    (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2240							  rx_comp.correlator);
2241		/* do error checking */
2242		if (next->rx_comp.rc) {
2243			netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2244				   be16_to_cpu(next->rx_comp.rc));
2245			/* free the entry */
2246			next->rx_comp.first = 0;
2247			dev_kfree_skb_any(rx_buff->skb);
2248			remove_buff_from_pool(adapter, rx_buff);
2249			continue;
2250		} else if (!rx_buff->skb) {
2251			/* free the entry */
2252			next->rx_comp.first = 0;
2253			remove_buff_from_pool(adapter, rx_buff);
2254			continue;
2255		}
2256
2257		length = be32_to_cpu(next->rx_comp.len);
2258		offset = be16_to_cpu(next->rx_comp.off_frame_data);
2259		flags = next->rx_comp.flags;
2260		skb = rx_buff->skb;
2261		skb_copy_to_linear_data(skb, rx_buff->data + offset,
2262					length);
2263
2264		/* VLAN Header has been stripped by the system firmware and
2265		 * needs to be inserted by the driver
2266		 */
2267		if (adapter->rx_vlan_header_insertion &&
2268		    (flags & IBMVNIC_VLAN_STRIPPED))
2269			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2270					       ntohs(next->rx_comp.vlan_tci));
2271
2272		/* free the entry */
2273		next->rx_comp.first = 0;
2274		remove_buff_from_pool(adapter, rx_buff);
2275
2276		skb_put(skb, length);
2277		skb->protocol = eth_type_trans(skb, netdev);
2278		skb_record_rx_queue(skb, scrq_num);
2279
2280		if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2281		    flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2282			skb->ip_summed = CHECKSUM_UNNECESSARY;
2283		}
2284
2285		length = skb->len;
2286		napi_gro_receive(napi, skb); /* send it up */
2287		netdev->stats.rx_packets++;
2288		netdev->stats.rx_bytes += length;
2289		adapter->rx_stats_buffers[scrq_num].packets++;
2290		adapter->rx_stats_buffers[scrq_num].bytes += length;
2291		frames_processed++;
2292	}
2293
2294	if (adapter->state != VNIC_CLOSING)
2295		replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2296
2297	if (frames_processed < budget) {
2298		enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2299		napi_complete_done(napi, frames_processed);
2300		if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2301		    napi_reschedule(napi)) {
2302			disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2303			goto restart_poll;
2304		}
2305	}
2306	return frames_processed;
2307}
2308
2309static int wait_for_reset(struct ibmvnic_adapter *adapter)
 
2310{
2311	int rc, ret;
 
2312
2313	adapter->fallback.mtu = adapter->req_mtu;
2314	adapter->fallback.rx_queues = adapter->req_rx_queues;
2315	adapter->fallback.tx_queues = adapter->req_tx_queues;
2316	adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2317	adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2318
2319	init_completion(&adapter->reset_done);
2320	adapter->wait_for_reset = true;
2321	rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2322	if (rc)
2323		return rc;
2324	wait_for_completion(&adapter->reset_done);
2325
2326	ret = 0;
2327	if (adapter->reset_done_rc) {
2328		ret = -EIO;
2329		adapter->desired.mtu = adapter->fallback.mtu;
2330		adapter->desired.rx_queues = adapter->fallback.rx_queues;
2331		adapter->desired.tx_queues = adapter->fallback.tx_queues;
2332		adapter->desired.rx_entries = adapter->fallback.rx_entries;
2333		adapter->desired.tx_entries = adapter->fallback.tx_entries;
2334
2335		init_completion(&adapter->reset_done);
2336		adapter->wait_for_reset = true;
2337		rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2338		if (rc)
2339			return ret;
2340		wait_for_completion(&adapter->reset_done);
2341	}
2342	adapter->wait_for_reset = false;
2343
2344	return ret;
2345}
2346
2347static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2348{
2349	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2350
2351	adapter->desired.mtu = new_mtu + ETH_HLEN;
2352
2353	return wait_for_reset(adapter);
2354}
2355
2356static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2357						struct net_device *dev,
2358						netdev_features_t features)
2359{
2360	/* Some backing hardware adapters can not
2361	 * handle packets with a MSS less than 224
2362	 * or with only one segment.
2363	 */
2364	if (skb_is_gso(skb)) {
2365		if (skb_shinfo(skb)->gso_size < 224 ||
2366		    skb_shinfo(skb)->gso_segs == 1)
2367			features &= ~NETIF_F_GSO_MASK;
2368	}
2369
2370	return features;
2371}
 
2372
2373static const struct net_device_ops ibmvnic_netdev_ops = {
2374	.ndo_open		= ibmvnic_open,
2375	.ndo_stop		= ibmvnic_close,
2376	.ndo_start_xmit		= ibmvnic_xmit,
2377	.ndo_set_rx_mode	= ibmvnic_set_multi,
2378	.ndo_set_mac_address	= ibmvnic_set_mac,
2379	.ndo_validate_addr	= eth_validate_addr,
 
2380	.ndo_tx_timeout		= ibmvnic_tx_timeout,
2381	.ndo_change_mtu		= ibmvnic_change_mtu,
2382	.ndo_features_check     = ibmvnic_features_check,
 
2383};
2384
2385/* ethtool functions */
2386
2387static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2388				      struct ethtool_link_ksettings *cmd)
2389{
2390	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2391	int rc;
2392
2393	rc = send_query_phys_parms(adapter);
2394	if (rc) {
2395		adapter->speed = SPEED_UNKNOWN;
2396		adapter->duplex = DUPLEX_UNKNOWN;
2397	}
2398	cmd->base.speed = adapter->speed;
2399	cmd->base.duplex = adapter->duplex;
2400	cmd->base.port = PORT_FIBRE;
2401	cmd->base.phy_address = 0;
2402	cmd->base.autoneg = AUTONEG_ENABLE;
2403
2404	return 0;
2405}
2406
2407static void ibmvnic_get_drvinfo(struct net_device *netdev,
2408				struct ethtool_drvinfo *info)
2409{
2410	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2411
2412	strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2413	strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2414	strlcpy(info->fw_version, adapter->fw_version,
2415		sizeof(info->fw_version));
2416}
2417
2418static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2419{
2420	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2421
2422	return adapter->msg_enable;
2423}
2424
2425static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2426{
2427	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2428
2429	adapter->msg_enable = data;
2430}
2431
2432static u32 ibmvnic_get_link(struct net_device *netdev)
2433{
2434	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2435
2436	/* Don't need to send a query because we request a logical link up at
2437	 * init and then we wait for link state indications
2438	 */
2439	return adapter->logical_link_state;
2440}
2441
2442static void ibmvnic_get_ringparam(struct net_device *netdev,
2443				  struct ethtool_ringparam *ring)
2444{
2445	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2446
2447	if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2448		ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2449		ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2450	} else {
2451		ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2452		ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2453	}
2454	ring->rx_mini_max_pending = 0;
2455	ring->rx_jumbo_max_pending = 0;
2456	ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2457	ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2458	ring->rx_mini_pending = 0;
2459	ring->rx_jumbo_pending = 0;
2460}
2461
2462static int ibmvnic_set_ringparam(struct net_device *netdev,
2463				 struct ethtool_ringparam *ring)
2464{
2465	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2466	int ret;
2467
2468	ret = 0;
2469	adapter->desired.rx_entries = ring->rx_pending;
2470	adapter->desired.tx_entries = ring->tx_pending;
2471
2472	ret = wait_for_reset(adapter);
2473
2474	if (!ret &&
2475	    (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2476	     adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2477		netdev_info(netdev,
2478			    "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2479			    ring->rx_pending, ring->tx_pending,
2480			    adapter->req_rx_add_entries_per_subcrq,
2481			    adapter->req_tx_entries_per_subcrq);
2482	return ret;
2483}
2484
2485static void ibmvnic_get_channels(struct net_device *netdev,
2486				 struct ethtool_channels *channels)
2487{
2488	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2489
2490	if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2491		channels->max_rx = adapter->max_rx_queues;
2492		channels->max_tx = adapter->max_tx_queues;
2493	} else {
2494		channels->max_rx = IBMVNIC_MAX_QUEUES;
2495		channels->max_tx = IBMVNIC_MAX_QUEUES;
2496	}
2497
2498	channels->max_other = 0;
2499	channels->max_combined = 0;
2500	channels->rx_count = adapter->req_rx_queues;
2501	channels->tx_count = adapter->req_tx_queues;
2502	channels->other_count = 0;
2503	channels->combined_count = 0;
2504}
2505
2506static int ibmvnic_set_channels(struct net_device *netdev,
2507				struct ethtool_channels *channels)
2508{
2509	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2510	int ret;
2511
2512	ret = 0;
2513	adapter->desired.rx_queues = channels->rx_count;
2514	adapter->desired.tx_queues = channels->tx_count;
2515
2516	ret = wait_for_reset(adapter);
2517
2518	if (!ret &&
2519	    (adapter->req_rx_queues != channels->rx_count ||
2520	     adapter->req_tx_queues != channels->tx_count))
2521		netdev_info(netdev,
2522			    "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2523			    channels->rx_count, channels->tx_count,
2524			    adapter->req_rx_queues, adapter->req_tx_queues);
2525	return ret;
2526
2527}
2528
2529static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2530{
2531	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2532	int i;
2533
2534	switch (stringset) {
2535	case ETH_SS_STATS:
2536		for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2537				i++, data += ETH_GSTRING_LEN)
2538			memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2539
2540		for (i = 0; i < adapter->req_tx_queues; i++) {
2541			snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2542			data += ETH_GSTRING_LEN;
2543
2544			snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2545			data += ETH_GSTRING_LEN;
2546
2547			snprintf(data, ETH_GSTRING_LEN,
2548				 "tx%d_dropped_packets", i);
2549			data += ETH_GSTRING_LEN;
2550		}
2551
2552		for (i = 0; i < adapter->req_rx_queues; i++) {
2553			snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2554			data += ETH_GSTRING_LEN;
2555
2556			snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2557			data += ETH_GSTRING_LEN;
2558
2559			snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2560			data += ETH_GSTRING_LEN;
2561		}
2562		break;
2563
2564	case ETH_SS_PRIV_FLAGS:
2565		for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2566			strcpy(data + i * ETH_GSTRING_LEN,
2567			       ibmvnic_priv_flags[i]);
2568		break;
2569	default:
2570		return;
2571	}
2572}
2573
2574static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2575{
2576	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2577
2578	switch (sset) {
2579	case ETH_SS_STATS:
2580		return ARRAY_SIZE(ibmvnic_stats) +
2581		       adapter->req_tx_queues * NUM_TX_STATS +
2582		       adapter->req_rx_queues * NUM_RX_STATS;
2583	case ETH_SS_PRIV_FLAGS:
2584		return ARRAY_SIZE(ibmvnic_priv_flags);
2585	default:
2586		return -EOPNOTSUPP;
2587	}
2588}
2589
2590static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2591				      struct ethtool_stats *stats, u64 *data)
2592{
2593	struct ibmvnic_adapter *adapter = netdev_priv(dev);
2594	union ibmvnic_crq crq;
2595	int i, j;
2596	int rc;
2597
2598	memset(&crq, 0, sizeof(crq));
2599	crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2600	crq.request_statistics.cmd = REQUEST_STATISTICS;
2601	crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2602	crq.request_statistics.len =
2603	    cpu_to_be32(sizeof(struct ibmvnic_statistics));
 
2604
2605	/* Wait for data to be written */
2606	init_completion(&adapter->stats_done);
2607	rc = ibmvnic_send_crq(adapter, &crq);
2608	if (rc)
2609		return;
2610	wait_for_completion(&adapter->stats_done);
2611
2612	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2613		data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2614						ibmvnic_stats[i].offset));
2615
2616	for (j = 0; j < adapter->req_tx_queues; j++) {
2617		data[i] = adapter->tx_stats_buffers[j].packets;
2618		i++;
2619		data[i] = adapter->tx_stats_buffers[j].bytes;
2620		i++;
2621		data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2622		i++;
2623	}
2624
2625	for (j = 0; j < adapter->req_rx_queues; j++) {
2626		data[i] = adapter->rx_stats_buffers[j].packets;
2627		i++;
2628		data[i] = adapter->rx_stats_buffers[j].bytes;
2629		i++;
2630		data[i] = adapter->rx_stats_buffers[j].interrupts;
2631		i++;
2632	}
2633}
2634
2635static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2636{
2637	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2638
2639	return adapter->priv_flags;
2640}
2641
2642static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2643{
2644	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2645	bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2646
2647	if (which_maxes)
2648		adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2649	else
2650		adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2651
2652	return 0;
2653}
2654static const struct ethtool_ops ibmvnic_ethtool_ops = {
 
2655	.get_drvinfo		= ibmvnic_get_drvinfo,
2656	.get_msglevel		= ibmvnic_get_msglevel,
2657	.set_msglevel		= ibmvnic_set_msglevel,
2658	.get_link		= ibmvnic_get_link,
2659	.get_ringparam		= ibmvnic_get_ringparam,
2660	.set_ringparam		= ibmvnic_set_ringparam,
2661	.get_channels		= ibmvnic_get_channels,
2662	.set_channels		= ibmvnic_set_channels,
2663	.get_strings            = ibmvnic_get_strings,
2664	.get_sset_count         = ibmvnic_get_sset_count,
2665	.get_ethtool_stats	= ibmvnic_get_ethtool_stats,
2666	.get_link_ksettings	= ibmvnic_get_link_ksettings,
2667	.get_priv_flags		= ibmvnic_get_priv_flags,
2668	.set_priv_flags		= ibmvnic_set_priv_flags,
2669};
2670
2671/* Routines for managing CRQs/sCRQs  */
2672
2673static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2674				   struct ibmvnic_sub_crq_queue *scrq)
2675{
2676	int rc;
2677
2678	if (scrq->irq) {
2679		free_irq(scrq->irq, scrq);
2680		irq_dispose_mapping(scrq->irq);
2681		scrq->irq = 0;
2682	}
2683
2684	memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2685	atomic_set(&scrq->used, 0);
2686	scrq->cur = 0;
2687
2688	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2689			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2690	return rc;
2691}
2692
2693static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2694{
2695	int i, rc;
2696
2697	for (i = 0; i < adapter->req_tx_queues; i++) {
2698		netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2699		rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2700		if (rc)
2701			return rc;
2702	}
2703
2704	for (i = 0; i < adapter->req_rx_queues; i++) {
2705		netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2706		rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2707		if (rc)
2708			return rc;
2709	}
2710
2711	return rc;
2712}
2713
2714static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2715				  struct ibmvnic_sub_crq_queue *scrq,
2716				  bool do_h_free)
2717{
2718	struct device *dev = &adapter->vdev->dev;
2719	long rc;
2720
2721	netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2722
2723	if (do_h_free) {
2724		/* Close the sub-crqs */
2725		do {
2726			rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2727						adapter->vdev->unit_address,
2728						scrq->crq_num);
2729		} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2730
2731		if (rc) {
2732			netdev_err(adapter->netdev,
2733				   "Failed to release sub-CRQ %16lx, rc = %ld\n",
2734				   scrq->crq_num, rc);
2735		}
2736	}
2737
2738	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2739			 DMA_BIDIRECTIONAL);
2740	free_pages((unsigned long)scrq->msgs, 2);
2741	kfree(scrq);
2742}
2743
2744static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2745							*adapter)
2746{
2747	struct device *dev = &adapter->vdev->dev;
2748	struct ibmvnic_sub_crq_queue *scrq;
2749	int rc;
2750
2751	scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2752	if (!scrq)
2753		return NULL;
2754
2755	scrq->msgs =
2756		(union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2757	if (!scrq->msgs) {
2758		dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2759		goto zero_page_failed;
2760	}
2761
2762	scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2763					 DMA_BIDIRECTIONAL);
2764	if (dma_mapping_error(dev, scrq->msg_token)) {
2765		dev_warn(dev, "Couldn't map crq queue messages page\n");
2766		goto map_failed;
2767	}
2768
2769	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2770			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2771
2772	if (rc == H_RESOURCE)
2773		rc = ibmvnic_reset_crq(adapter);
2774
2775	if (rc == H_CLOSED) {
2776		dev_warn(dev, "Partner adapter not ready, waiting.\n");
2777	} else if (rc) {
2778		dev_warn(dev, "Error %d registering sub-crq\n", rc);
2779		goto reg_failed;
2780	}
2781
 
 
 
 
 
 
2782	scrq->adapter = adapter;
2783	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
 
 
2784	spin_lock_init(&scrq->lock);
2785
2786	netdev_dbg(adapter->netdev,
2787		   "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2788		   scrq->crq_num, scrq->hw_irq, scrq->irq);
2789
2790	return scrq;
2791
 
 
 
 
 
 
2792reg_failed:
2793	dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2794			 DMA_BIDIRECTIONAL);
2795map_failed:
2796	free_pages((unsigned long)scrq->msgs, 2);
2797zero_page_failed:
2798	kfree(scrq);
2799
2800	return NULL;
2801}
2802
2803static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2804{
2805	int i;
2806
2807	if (adapter->tx_scrq) {
2808		for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2809			if (!adapter->tx_scrq[i])
2810				continue;
2811
2812			netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2813				   i);
2814			if (adapter->tx_scrq[i]->irq) {
2815				free_irq(adapter->tx_scrq[i]->irq,
2816					 adapter->tx_scrq[i]);
2817				irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2818				adapter->tx_scrq[i]->irq = 0;
2819			}
2820
2821			release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2822					      do_h_free);
2823		}
2824
2825		kfree(adapter->tx_scrq);
2826		adapter->tx_scrq = NULL;
2827		adapter->num_active_tx_scrqs = 0;
2828	}
2829
2830	if (adapter->rx_scrq) {
2831		for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2832			if (!adapter->rx_scrq[i])
2833				continue;
2834
2835			netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2836				   i);
2837			if (adapter->rx_scrq[i]->irq) {
2838				free_irq(adapter->rx_scrq[i]->irq,
2839					 adapter->rx_scrq[i]);
2840				irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2841				adapter->rx_scrq[i]->irq = 0;
2842			}
2843
2844			release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2845					      do_h_free);
2846		}
2847
2848		kfree(adapter->rx_scrq);
2849		adapter->rx_scrq = NULL;
2850		adapter->num_active_rx_scrqs = 0;
2851	}
 
 
2852}
2853
2854static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2855			    struct ibmvnic_sub_crq_queue *scrq)
2856{
2857	struct device *dev = &adapter->vdev->dev;
2858	unsigned long rc;
2859
2860	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2861				H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2862	if (rc)
2863		dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2864			scrq->hw_irq, rc);
2865	return rc;
2866}
2867
2868static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2869			   struct ibmvnic_sub_crq_queue *scrq)
2870{
2871	struct device *dev = &adapter->vdev->dev;
2872	unsigned long rc;
2873
2874	if (scrq->hw_irq > 0x100000000ULL) {
2875		dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2876		return 1;
2877	}
2878
2879	if (test_bit(0, &adapter->resetting) &&
2880	    adapter->reset_reason == VNIC_RESET_MOBILITY) {
2881		u64 val = (0xff000000) | scrq->hw_irq;
2882
2883		rc = plpar_hcall_norets(H_EOI, val);
2884		/* H_EOI would fail with rc = H_FUNCTION when running
2885		 * in XIVE mode which is expected, but not an error.
2886		 */
2887		if (rc && (rc != H_FUNCTION))
2888			dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2889				val, rc);
2890	}
2891
2892	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2893				H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2894	if (rc)
2895		dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2896			scrq->hw_irq, rc);
2897	return rc;
2898}
2899
2900static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2901			       struct ibmvnic_sub_crq_queue *scrq)
2902{
2903	struct device *dev = &adapter->vdev->dev;
2904	struct ibmvnic_tx_pool *tx_pool;
2905	struct ibmvnic_tx_buff *txbuff;
2906	union sub_crq *next;
2907	int index;
2908	int i, j;
2909
2910restart_loop:
2911	while (pending_scrq(adapter, scrq)) {
2912		unsigned int pool = scrq->pool_index;
2913		int num_entries = 0;
2914
2915		next = ibmvnic_next_scrq(adapter, scrq);
2916		for (i = 0; i < next->tx_comp.num_comps; i++) {
2917			if (next->tx_comp.rcs[i]) {
2918				dev_err(dev, "tx error %x\n",
2919					next->tx_comp.rcs[i]);
2920				continue;
2921			}
2922			index = be32_to_cpu(next->tx_comp.correlators[i]);
2923			if (index & IBMVNIC_TSO_POOL_MASK) {
2924				tx_pool = &adapter->tso_pool[pool];
2925				index &= ~IBMVNIC_TSO_POOL_MASK;
2926			} else {
2927				tx_pool = &adapter->tx_pool[pool];
2928			}
2929
2930			txbuff = &tx_pool->tx_buff[index];
2931
2932			for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2933				if (!txbuff->data_dma[j])
2934					continue;
2935
2936				txbuff->data_dma[j] = 0;
 
2937			}
2938
2939			if (txbuff->last_frag) {
2940				dev_kfree_skb_any(txbuff->skb);
2941				txbuff->skb = NULL;
2942			}
2943
2944			num_entries += txbuff->num_entries;
2945
2946			tx_pool->free_map[tx_pool->producer_index] = index;
2947			tx_pool->producer_index =
2948				(tx_pool->producer_index + 1) %
2949					tx_pool->num_buffers;
 
2950		}
2951		/* remove tx_comp scrq*/
2952		next->tx_comp.first = 0;
2953
2954		if (atomic_sub_return(num_entries, &scrq->used) <=
2955		    (adapter->req_tx_entries_per_subcrq / 2) &&
2956		    __netif_subqueue_stopped(adapter->netdev,
2957					     scrq->pool_index)) {
2958			netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2959			netdev_dbg(adapter->netdev, "Started queue %d\n",
2960				   scrq->pool_index);
2961		}
2962	}
2963
2964	enable_scrq_irq(adapter, scrq);
2965
2966	if (pending_scrq(adapter, scrq)) {
2967		disable_scrq_irq(adapter, scrq);
2968		goto restart_loop;
2969	}
2970
2971	return 0;
2972}
2973
2974static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2975{
2976	struct ibmvnic_sub_crq_queue *scrq = instance;
2977	struct ibmvnic_adapter *adapter = scrq->adapter;
2978
2979	disable_scrq_irq(adapter, scrq);
2980	ibmvnic_complete_tx(adapter, scrq);
2981
2982	return IRQ_HANDLED;
2983}
2984
2985static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2986{
2987	struct ibmvnic_sub_crq_queue *scrq = instance;
2988	struct ibmvnic_adapter *adapter = scrq->adapter;
2989
2990	/* When booting a kdump kernel we can hit pending interrupts
2991	 * prior to completing driver initialization.
2992	 */
2993	if (unlikely(adapter->state != VNIC_OPEN))
2994		return IRQ_NONE;
2995
2996	adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2997
2998	if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2999		disable_scrq_irq(adapter, scrq);
3000		__napi_schedule(&adapter->napi[scrq->scrq_num]);
3001	}
3002
3003	return IRQ_HANDLED;
3004}
3005
3006static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3007{
3008	struct device *dev = &adapter->vdev->dev;
3009	struct ibmvnic_sub_crq_queue *scrq;
3010	int i = 0, j = 0;
3011	int rc = 0;
 
 
 
 
3012
3013	for (i = 0; i < adapter->req_tx_queues; i++) {
3014		netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3015			   i);
3016		scrq = adapter->tx_scrq[i];
3017		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3018
3019		if (!scrq->irq) {
3020			rc = -EINVAL;
3021			dev_err(dev, "Error mapping irq\n");
3022			goto req_tx_irq_failed;
3023		}
3024
3025		snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3026			 adapter->vdev->unit_address, i);
3027		rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3028				 0, scrq->name, scrq);
3029
3030		if (rc) {
3031			dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3032				scrq->irq, rc);
3033			irq_dispose_mapping(scrq->irq);
3034			goto req_tx_irq_failed;
3035		}
3036	}
3037
3038	for (i = 0; i < adapter->req_rx_queues; i++) {
3039		netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3040			   i);
3041		scrq = adapter->rx_scrq[i];
3042		scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3043		if (!scrq->irq) {
3044			rc = -EINVAL;
3045			dev_err(dev, "Error mapping irq\n");
3046			goto req_rx_irq_failed;
3047		}
3048		snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3049			 adapter->vdev->unit_address, i);
3050		rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3051				 0, scrq->name, scrq);
3052		if (rc) {
3053			dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3054				scrq->irq, rc);
3055			irq_dispose_mapping(scrq->irq);
3056			goto req_rx_irq_failed;
3057		}
3058	}
3059	return rc;
3060
3061req_rx_irq_failed:
3062	for (j = 0; j < i; j++) {
3063		free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3064		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3065	}
3066	i = adapter->req_tx_queues;
3067req_tx_irq_failed:
3068	for (j = 0; j < i; j++) {
3069		free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3070		irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3071	}
3072	release_sub_crqs(adapter, 1);
3073	return rc;
3074}
3075
3076static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3077{
3078	struct device *dev = &adapter->vdev->dev;
3079	struct ibmvnic_sub_crq_queue **allqueues;
3080	int registered_queues = 0;
3081	int total_queues;
3082	int more = 0;
3083	int i;
3084
3085	total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3086
3087	allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3088	if (!allqueues)
3089		return -1;
3090
3091	for (i = 0; i < total_queues; i++) {
3092		allqueues[i] = init_sub_crq_queue(adapter);
3093		if (!allqueues[i]) {
3094			dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3095			break;
3096		}
3097		registered_queues++;
3098	}
3099
3100	/* Make sure we were able to register the minimum number of queues */
3101	if (registered_queues <
3102	    adapter->min_tx_queues + adapter->min_rx_queues) {
3103		dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
3104		goto tx_failed;
3105	}
3106
3107	/* Distribute the failed allocated queues*/
3108	for (i = 0; i < total_queues - registered_queues + more ; i++) {
3109		netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3110		switch (i % 3) {
3111		case 0:
3112			if (adapter->req_rx_queues > adapter->min_rx_queues)
3113				adapter->req_rx_queues--;
3114			else
3115				more++;
3116			break;
3117		case 1:
3118			if (adapter->req_tx_queues > adapter->min_tx_queues)
3119				adapter->req_tx_queues--;
3120			else
3121				more++;
3122			break;
3123		}
3124	}
3125
3126	adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3127				   sizeof(*adapter->tx_scrq), GFP_KERNEL);
3128	if (!adapter->tx_scrq)
3129		goto tx_failed;
3130
3131	for (i = 0; i < adapter->req_tx_queues; i++) {
3132		adapter->tx_scrq[i] = allqueues[i];
3133		adapter->tx_scrq[i]->pool_index = i;
3134		adapter->num_active_tx_scrqs++;
 
 
 
 
 
 
3135	}
3136
3137	adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3138				   sizeof(*adapter->rx_scrq), GFP_KERNEL);
3139	if (!adapter->rx_scrq)
3140		goto rx_failed;
3141
3142	for (i = 0; i < adapter->req_rx_queues; i++) {
3143		adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3144		adapter->rx_scrq[i]->scrq_num = i;
3145		adapter->num_active_rx_scrqs++;
3146	}
3147
3148	kfree(allqueues);
3149	return 0;
3150
3151rx_failed:
3152	kfree(adapter->tx_scrq);
3153	adapter->tx_scrq = NULL;
3154tx_failed:
3155	for (i = 0; i < registered_queues; i++)
3156		release_sub_crq_queue(adapter, allqueues[i], 1);
3157	kfree(allqueues);
3158	return -1;
3159}
3160
3161static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
3162{
3163	struct device *dev = &adapter->vdev->dev;
3164	union ibmvnic_crq crq;
3165	int max_entries;
3166
3167	if (!retry) {
3168		/* Sub-CRQ entries are 32 byte long */
3169		int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3170
3171		if (adapter->min_tx_entries_per_subcrq > entries_page ||
3172		    adapter->min_rx_add_entries_per_subcrq > entries_page) {
3173			dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3174			return;
3175		}
3176
3177		if (adapter->desired.mtu)
3178			adapter->req_mtu = adapter->desired.mtu;
3179		else
3180			adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3181
3182		if (!adapter->desired.tx_entries)
3183			adapter->desired.tx_entries =
3184					adapter->max_tx_entries_per_subcrq;
3185		if (!adapter->desired.rx_entries)
3186			adapter->desired.rx_entries =
3187					adapter->max_rx_add_entries_per_subcrq;
3188
3189		max_entries = IBMVNIC_MAX_LTB_SIZE /
3190			      (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3191
3192		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3193			adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3194			adapter->desired.tx_entries = max_entries;
3195		}
3196
3197		if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3198			adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3199			adapter->desired.rx_entries = max_entries;
3200		}
3201
3202		if (adapter->desired.tx_entries)
3203			adapter->req_tx_entries_per_subcrq =
3204					adapter->desired.tx_entries;
3205		else
3206			adapter->req_tx_entries_per_subcrq =
3207					adapter->max_tx_entries_per_subcrq;
3208
3209		if (adapter->desired.rx_entries)
3210			adapter->req_rx_add_entries_per_subcrq =
3211					adapter->desired.rx_entries;
3212		else
3213			adapter->req_rx_add_entries_per_subcrq =
3214					adapter->max_rx_add_entries_per_subcrq;
3215
3216		if (adapter->desired.tx_queues)
3217			adapter->req_tx_queues =
3218					adapter->desired.tx_queues;
3219		else
3220			adapter->req_tx_queues =
3221					adapter->opt_tx_comp_sub_queues;
3222
3223		if (adapter->desired.rx_queues)
3224			adapter->req_rx_queues =
3225					adapter->desired.rx_queues;
3226		else
3227			adapter->req_rx_queues =
3228					adapter->opt_rx_comp_queues;
3229
3230		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3231	}
3232
3233	memset(&crq, 0, sizeof(crq));
3234	crq.request_capability.first = IBMVNIC_CRQ_CMD;
3235	crq.request_capability.cmd = REQUEST_CAPABILITY;
3236
3237	crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3238	crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3239	atomic_inc(&adapter->running_cap_crqs);
3240	ibmvnic_send_crq(adapter, &crq);
3241
3242	crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3243	crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3244	atomic_inc(&adapter->running_cap_crqs);
3245	ibmvnic_send_crq(adapter, &crq);
3246
3247	crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3248	crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3249	atomic_inc(&adapter->running_cap_crqs);
3250	ibmvnic_send_crq(adapter, &crq);
3251
3252	crq.request_capability.capability =
3253	    cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3254	crq.request_capability.number =
3255	    cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3256	atomic_inc(&adapter->running_cap_crqs);
3257	ibmvnic_send_crq(adapter, &crq);
3258
3259	crq.request_capability.capability =
3260	    cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3261	crq.request_capability.number =
3262	    cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3263	atomic_inc(&adapter->running_cap_crqs);
3264	ibmvnic_send_crq(adapter, &crq);
3265
3266	crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3267	crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3268	atomic_inc(&adapter->running_cap_crqs);
3269	ibmvnic_send_crq(adapter, &crq);
3270
3271	if (adapter->netdev->flags & IFF_PROMISC) {
3272		if (adapter->promisc_supported) {
3273			crq.request_capability.capability =
3274			    cpu_to_be16(PROMISC_REQUESTED);
3275			crq.request_capability.number = cpu_to_be64(1);
3276			atomic_inc(&adapter->running_cap_crqs);
3277			ibmvnic_send_crq(adapter, &crq);
3278		}
3279	} else {
3280		crq.request_capability.capability =
3281		    cpu_to_be16(PROMISC_REQUESTED);
3282		crq.request_capability.number = cpu_to_be64(0);
3283		atomic_inc(&adapter->running_cap_crqs);
3284		ibmvnic_send_crq(adapter, &crq);
3285	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3286}
3287
3288static int pending_scrq(struct ibmvnic_adapter *adapter,
3289			struct ibmvnic_sub_crq_queue *scrq)
3290{
3291	union sub_crq *entry = &scrq->msgs[scrq->cur];
3292
3293	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3294		return 1;
3295	else
3296		return 0;
3297}
3298
3299static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3300					struct ibmvnic_sub_crq_queue *scrq)
3301{
3302	union sub_crq *entry;
3303	unsigned long flags;
3304
3305	spin_lock_irqsave(&scrq->lock, flags);
3306	entry = &scrq->msgs[scrq->cur];
3307	if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3308		if (++scrq->cur == scrq->size)
3309			scrq->cur = 0;
3310	} else {
3311		entry = NULL;
3312	}
3313	spin_unlock_irqrestore(&scrq->lock, flags);
3314
3315	return entry;
3316}
3317
3318static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3319{
3320	struct ibmvnic_crq_queue *queue = &adapter->crq;
3321	union ibmvnic_crq *crq;
3322
3323	crq = &queue->msgs[queue->cur];
3324	if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3325		if (++queue->cur == queue->size)
3326			queue->cur = 0;
3327	} else {
3328		crq = NULL;
3329	}
3330
3331	return crq;
3332}
3333
3334static void print_subcrq_error(struct device *dev, int rc, const char *func)
3335{
3336	switch (rc) {
3337	case H_PARAMETER:
3338		dev_warn_ratelimited(dev,
3339				     "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3340				     func, rc);
3341		break;
3342	case H_CLOSED:
3343		dev_warn_ratelimited(dev,
3344				     "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3345				     func, rc);
3346		break;
3347	default:
3348		dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3349		break;
3350	}
3351}
3352
3353static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3354		       union sub_crq *sub_crq)
3355{
3356	unsigned int ua = adapter->vdev->unit_address;
3357	struct device *dev = &adapter->vdev->dev;
3358	u64 *u64_crq = (u64 *)sub_crq;
3359	int rc;
3360
3361	netdev_dbg(adapter->netdev,
3362		   "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3363		   (unsigned long int)cpu_to_be64(remote_handle),
3364		   (unsigned long int)cpu_to_be64(u64_crq[0]),
3365		   (unsigned long int)cpu_to_be64(u64_crq[1]),
3366		   (unsigned long int)cpu_to_be64(u64_crq[2]),
3367		   (unsigned long int)cpu_to_be64(u64_crq[3]));
3368
3369	/* Make sure the hypervisor sees the complete request */
3370	mb();
3371
3372	rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3373				cpu_to_be64(remote_handle),
3374				cpu_to_be64(u64_crq[0]),
3375				cpu_to_be64(u64_crq[1]),
3376				cpu_to_be64(u64_crq[2]),
3377				cpu_to_be64(u64_crq[3]));
3378
3379	if (rc)
3380		print_subcrq_error(dev, rc, __func__);
3381
3382	return rc;
3383}
3384
3385static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3386				u64 remote_handle, u64 ioba, u64 num_entries)
3387{
3388	unsigned int ua = adapter->vdev->unit_address;
3389	struct device *dev = &adapter->vdev->dev;
3390	int rc;
3391
3392	/* Make sure the hypervisor sees the complete request */
3393	mb();
3394	rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3395				cpu_to_be64(remote_handle),
3396				ioba, num_entries);
3397
3398	if (rc)
3399		print_subcrq_error(dev, rc, __func__);
3400
3401	return rc;
3402}
3403
3404static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3405			    union ibmvnic_crq *crq)
3406{
3407	unsigned int ua = adapter->vdev->unit_address;
3408	struct device *dev = &adapter->vdev->dev;
3409	u64 *u64_crq = (u64 *)crq;
3410	int rc;
3411
3412	netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3413		   (unsigned long int)cpu_to_be64(u64_crq[0]),
3414		   (unsigned long int)cpu_to_be64(u64_crq[1]));
3415
3416	if (!adapter->crq.active &&
3417	    crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3418		dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3419		return -EINVAL;
3420	}
3421
3422	/* Make sure the hypervisor sees the complete request */
3423	mb();
3424
3425	rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3426				cpu_to_be64(u64_crq[0]),
3427				cpu_to_be64(u64_crq[1]));
3428
3429	if (rc) {
3430		if (rc == H_CLOSED) {
3431			dev_warn(dev, "CRQ Queue closed\n");
3432			if (test_bit(0, &adapter->resetting))
3433				ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3434		}
3435
3436		dev_warn(dev, "Send error (rc=%d)\n", rc);
3437	}
3438
3439	return rc;
3440}
3441
3442static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3443{
3444	union ibmvnic_crq crq;
3445
3446	memset(&crq, 0, sizeof(crq));
3447	crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3448	crq.generic.cmd = IBMVNIC_CRQ_INIT;
3449	netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3450
3451	return ibmvnic_send_crq(adapter, &crq);
3452}
3453
 
 
 
 
 
 
 
 
 
 
 
 
3454static int send_version_xchg(struct ibmvnic_adapter *adapter)
3455{
3456	union ibmvnic_crq crq;
3457
3458	memset(&crq, 0, sizeof(crq));
3459	crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3460	crq.version_exchange.cmd = VERSION_EXCHANGE;
3461	crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3462
3463	return ibmvnic_send_crq(adapter, &crq);
3464}
3465
3466struct vnic_login_client_data {
3467	u8	type;
3468	__be16	len;
3469	char	name[];
3470} __packed;
3471
3472static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3473{
3474	int len;
3475
3476	/* Calculate the amount of buffer space needed for the
3477	 * vnic client data in the login buffer. There are four entries,
3478	 * OS name, LPAR name, device name, and a null last entry.
3479	 */
3480	len = 4 * sizeof(struct vnic_login_client_data);
3481	len += 6; /* "Linux" plus NULL */
3482	len += strlen(utsname()->nodename) + 1;
3483	len += strlen(adapter->netdev->name) + 1;
3484
3485	return len;
3486}
3487
3488static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3489				 struct vnic_login_client_data *vlcd)
3490{
3491	const char *os_name = "Linux";
3492	int len;
3493
3494	/* Type 1 - LPAR OS */
3495	vlcd->type = 1;
3496	len = strlen(os_name) + 1;
3497	vlcd->len = cpu_to_be16(len);
3498	strncpy(vlcd->name, os_name, len);
3499	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3500
3501	/* Type 2 - LPAR name */
3502	vlcd->type = 2;
3503	len = strlen(utsname()->nodename) + 1;
3504	vlcd->len = cpu_to_be16(len);
3505	strncpy(vlcd->name, utsname()->nodename, len);
3506	vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3507
3508	/* Type 3 - device name */
3509	vlcd->type = 3;
3510	len = strlen(adapter->netdev->name) + 1;
3511	vlcd->len = cpu_to_be16(len);
3512	strncpy(vlcd->name, adapter->netdev->name, len);
3513}
3514
3515static int send_login(struct ibmvnic_adapter *adapter)
3516{
3517	struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3518	struct ibmvnic_login_buffer *login_buffer;
 
3519	struct device *dev = &adapter->vdev->dev;
3520	dma_addr_t rsp_buffer_token;
3521	dma_addr_t buffer_token;
3522	size_t rsp_buffer_size;
3523	union ibmvnic_crq crq;
 
3524	size_t buffer_size;
3525	__be64 *tx_list_p;
3526	__be64 *rx_list_p;
3527	int client_data_len;
3528	struct vnic_login_client_data *vlcd;
3529	int i;
3530
3531	if (!adapter->tx_scrq || !adapter->rx_scrq) {
3532		netdev_err(adapter->netdev,
3533			   "RX or TX queues are not allocated, device login failed\n");
3534		return -1;
3535	}
3536
3537	release_login_rsp_buffer(adapter);
3538	client_data_len = vnic_client_data_len(adapter);
3539
3540	buffer_size =
3541	    sizeof(struct ibmvnic_login_buffer) +
3542	    sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3543	    client_data_len;
3544
3545	login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3546	if (!login_buffer)
3547		goto buf_alloc_failed;
3548
3549	buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3550				      DMA_TO_DEVICE);
3551	if (dma_mapping_error(dev, buffer_token)) {
3552		dev_err(dev, "Couldn't map login buffer\n");
3553		goto buf_map_failed;
3554	}
3555
3556	rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3557			  sizeof(u64) * adapter->req_tx_queues +
3558			  sizeof(u64) * adapter->req_rx_queues +
3559			  sizeof(u64) * adapter->req_rx_queues +
3560			  sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
 
 
3561
3562	login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3563	if (!login_rsp_buffer)
3564		goto buf_rsp_alloc_failed;
3565
3566	rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3567					  rsp_buffer_size, DMA_FROM_DEVICE);
3568	if (dma_mapping_error(dev, rsp_buffer_token)) {
3569		dev_err(dev, "Couldn't map login rsp buffer\n");
3570		goto buf_rsp_map_failed;
3571	}
3572
 
 
 
 
3573	adapter->login_buf = login_buffer;
3574	adapter->login_buf_token = buffer_token;
3575	adapter->login_buf_sz = buffer_size;
3576	adapter->login_rsp_buf = login_rsp_buffer;
3577	adapter->login_rsp_buf_token = rsp_buffer_token;
3578	adapter->login_rsp_buf_sz = rsp_buffer_size;
3579
3580	login_buffer->len = cpu_to_be32(buffer_size);
3581	login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3582	login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3583	login_buffer->off_txcomp_subcrqs =
3584	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3585	login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3586	login_buffer->off_rxcomp_subcrqs =
3587	    cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3588			sizeof(u64) * adapter->req_tx_queues);
3589	login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3590	login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3591
3592	tx_list_p = (__be64 *)((char *)login_buffer +
3593				      sizeof(struct ibmvnic_login_buffer));
3594	rx_list_p = (__be64 *)((char *)login_buffer +
3595				      sizeof(struct ibmvnic_login_buffer) +
3596				      sizeof(u64) * adapter->req_tx_queues);
3597
3598	for (i = 0; i < adapter->req_tx_queues; i++) {
3599		if (adapter->tx_scrq[i]) {
3600			tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3601						   crq_num);
3602		}
3603	}
3604
3605	for (i = 0; i < adapter->req_rx_queues; i++) {
3606		if (adapter->rx_scrq[i]) {
3607			rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3608						   crq_num);
3609		}
3610	}
3611
3612	/* Insert vNIC login client data */
3613	vlcd = (struct vnic_login_client_data *)
3614		((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3615	login_buffer->client_data_offset =
3616			cpu_to_be32((char *)vlcd - (char *)login_buffer);
3617	login_buffer->client_data_len = cpu_to_be32(client_data_len);
3618
3619	vnic_add_client_data(adapter, vlcd);
3620
3621	netdev_dbg(adapter->netdev, "Login Buffer:\n");
3622	for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3623		netdev_dbg(adapter->netdev, "%016lx\n",
3624			   ((unsigned long int *)(adapter->login_buf))[i]);
3625	}
3626
3627	memset(&crq, 0, sizeof(crq));
3628	crq.login.first = IBMVNIC_CRQ_CMD;
3629	crq.login.cmd = LOGIN;
3630	crq.login.ioba = cpu_to_be32(buffer_token);
3631	crq.login.len = cpu_to_be32(buffer_size);
 
 
 
 
 
 
 
3632	ibmvnic_send_crq(adapter, &crq);
3633
3634	return 0;
3635
 
 
 
3636buf_rsp_map_failed:
3637	kfree(login_rsp_buffer);
3638buf_rsp_alloc_failed:
3639	dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3640buf_map_failed:
3641	kfree(login_buffer);
3642buf_alloc_failed:
3643	return -1;
3644}
3645
3646static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3647			    u32 len, u8 map_id)
3648{
3649	union ibmvnic_crq crq;
3650
3651	memset(&crq, 0, sizeof(crq));
3652	crq.request_map.first = IBMVNIC_CRQ_CMD;
3653	crq.request_map.cmd = REQUEST_MAP;
3654	crq.request_map.map_id = map_id;
3655	crq.request_map.ioba = cpu_to_be32(addr);
3656	crq.request_map.len = cpu_to_be32(len);
3657	return ibmvnic_send_crq(adapter, &crq);
3658}
3659
3660static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3661{
3662	union ibmvnic_crq crq;
3663
3664	memset(&crq, 0, sizeof(crq));
3665	crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3666	crq.request_unmap.cmd = REQUEST_UNMAP;
3667	crq.request_unmap.map_id = map_id;
3668	return ibmvnic_send_crq(adapter, &crq);
3669}
3670
3671static void send_map_query(struct ibmvnic_adapter *adapter)
3672{
3673	union ibmvnic_crq crq;
3674
3675	memset(&crq, 0, sizeof(crq));
3676	crq.query_map.first = IBMVNIC_CRQ_CMD;
3677	crq.query_map.cmd = QUERY_MAP;
3678	ibmvnic_send_crq(adapter, &crq);
3679}
3680
3681/* Send a series of CRQs requesting various capabilities of the VNIC server */
3682static void send_cap_queries(struct ibmvnic_adapter *adapter)
3683{
3684	union ibmvnic_crq crq;
3685
3686	atomic_set(&adapter->running_cap_crqs, 0);
3687	memset(&crq, 0, sizeof(crq));
3688	crq.query_capability.first = IBMVNIC_CRQ_CMD;
3689	crq.query_capability.cmd = QUERY_CAPABILITY;
3690
3691	crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3692	atomic_inc(&adapter->running_cap_crqs);
3693	ibmvnic_send_crq(adapter, &crq);
3694
3695	crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3696	atomic_inc(&adapter->running_cap_crqs);
3697	ibmvnic_send_crq(adapter, &crq);
3698
3699	crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3700	atomic_inc(&adapter->running_cap_crqs);
3701	ibmvnic_send_crq(adapter, &crq);
3702
3703	crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3704	atomic_inc(&adapter->running_cap_crqs);
3705	ibmvnic_send_crq(adapter, &crq);
3706
3707	crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3708	atomic_inc(&adapter->running_cap_crqs);
3709	ibmvnic_send_crq(adapter, &crq);
3710
3711	crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3712	atomic_inc(&adapter->running_cap_crqs);
3713	ibmvnic_send_crq(adapter, &crq);
3714
3715	crq.query_capability.capability =
3716	    cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3717	atomic_inc(&adapter->running_cap_crqs);
3718	ibmvnic_send_crq(adapter, &crq);
3719
3720	crq.query_capability.capability =
3721	    cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3722	atomic_inc(&adapter->running_cap_crqs);
3723	ibmvnic_send_crq(adapter, &crq);
3724
3725	crq.query_capability.capability =
3726	    cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3727	atomic_inc(&adapter->running_cap_crqs);
3728	ibmvnic_send_crq(adapter, &crq);
3729
3730	crq.query_capability.capability =
3731	    cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3732	atomic_inc(&adapter->running_cap_crqs);
3733	ibmvnic_send_crq(adapter, &crq);
3734
3735	crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3736	atomic_inc(&adapter->running_cap_crqs);
3737	ibmvnic_send_crq(adapter, &crq);
3738
3739	crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3740	atomic_inc(&adapter->running_cap_crqs);
3741	ibmvnic_send_crq(adapter, &crq);
3742
3743	crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3744	atomic_inc(&adapter->running_cap_crqs);
3745	ibmvnic_send_crq(adapter, &crq);
3746
3747	crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3748	atomic_inc(&adapter->running_cap_crqs);
3749	ibmvnic_send_crq(adapter, &crq);
3750
3751	crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3752	atomic_inc(&adapter->running_cap_crqs);
3753	ibmvnic_send_crq(adapter, &crq);
3754
3755	crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3756	atomic_inc(&adapter->running_cap_crqs);
3757	ibmvnic_send_crq(adapter, &crq);
3758
3759	crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3760	atomic_inc(&adapter->running_cap_crqs);
3761	ibmvnic_send_crq(adapter, &crq);
3762
3763	crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3764	atomic_inc(&adapter->running_cap_crqs);
3765	ibmvnic_send_crq(adapter, &crq);
3766
3767	crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3768	atomic_inc(&adapter->running_cap_crqs);
3769	ibmvnic_send_crq(adapter, &crq);
3770
3771	crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3772	atomic_inc(&adapter->running_cap_crqs);
3773	ibmvnic_send_crq(adapter, &crq);
3774
3775	crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3776	atomic_inc(&adapter->running_cap_crqs);
3777	ibmvnic_send_crq(adapter, &crq);
3778
3779	crq.query_capability.capability =
3780			cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3781	atomic_inc(&adapter->running_cap_crqs);
3782	ibmvnic_send_crq(adapter, &crq);
3783
3784	crq.query_capability.capability =
3785			cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3786	atomic_inc(&adapter->running_cap_crqs);
3787	ibmvnic_send_crq(adapter, &crq);
3788
3789	crq.query_capability.capability =
3790			cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3791	atomic_inc(&adapter->running_cap_crqs);
3792	ibmvnic_send_crq(adapter, &crq);
3793
3794	crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3795	atomic_inc(&adapter->running_cap_crqs);
3796	ibmvnic_send_crq(adapter, &crq);
3797}
3798
3799static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3800				struct ibmvnic_adapter *adapter)
3801{
3802	struct device *dev = &adapter->vdev->dev;
3803
3804	if (crq->get_vpd_size_rsp.rc.code) {
3805		dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3806			crq->get_vpd_size_rsp.rc.code);
3807		complete(&adapter->fw_done);
3808		return;
3809	}
3810
3811	adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3812	complete(&adapter->fw_done);
3813}
3814
3815static void handle_vpd_rsp(union ibmvnic_crq *crq,
3816			   struct ibmvnic_adapter *adapter)
3817{
3818	struct device *dev = &adapter->vdev->dev;
3819	unsigned char *substr = NULL;
3820	u8 fw_level_len = 0;
3821
3822	memset(adapter->fw_version, 0, 32);
3823
3824	dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3825			 DMA_FROM_DEVICE);
3826
3827	if (crq->get_vpd_rsp.rc.code) {
3828		dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3829			crq->get_vpd_rsp.rc.code);
3830		goto complete;
3831	}
3832
3833	/* get the position of the firmware version info
3834	 * located after the ASCII 'RM' substring in the buffer
3835	 */
3836	substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3837	if (!substr) {
3838		dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3839		goto complete;
3840	}
3841
3842	/* get length of firmware level ASCII substring */
3843	if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3844		fw_level_len = *(substr + 2);
3845	} else {
3846		dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3847		goto complete;
3848	}
3849
3850	/* copy firmware version string from vpd into adapter */
3851	if ((substr + 3 + fw_level_len) <
3852	    (adapter->vpd->buff + adapter->vpd->len)) {
3853		strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
3854	} else {
3855		dev_info(dev, "FW substr extrapolated VPD buff\n");
3856	}
3857
3858complete:
3859	if (adapter->fw_version[0] == '\0')
3860		strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
3861	complete(&adapter->fw_done);
3862}
3863
3864static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3865{
3866	struct device *dev = &adapter->vdev->dev;
3867	struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3868	netdev_features_t old_hw_features = 0;
3869	union ibmvnic_crq crq;
3870	int i;
3871
3872	dma_unmap_single(dev, adapter->ip_offload_tok,
3873			 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3874
3875	netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3876	for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3877		netdev_dbg(adapter->netdev, "%016lx\n",
3878			   ((unsigned long int *)(buf))[i]);
3879
3880	netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3881	netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3882	netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3883		   buf->tcp_ipv4_chksum);
3884	netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3885		   buf->tcp_ipv6_chksum);
3886	netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3887		   buf->udp_ipv4_chksum);
3888	netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3889		   buf->udp_ipv6_chksum);
3890	netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3891		   buf->large_tx_ipv4);
3892	netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3893		   buf->large_tx_ipv6);
3894	netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3895		   buf->large_rx_ipv4);
3896	netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3897		   buf->large_rx_ipv6);
3898	netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3899		   buf->max_ipv4_header_size);
3900	netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3901		   buf->max_ipv6_header_size);
3902	netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3903		   buf->max_tcp_header_size);
3904	netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3905		   buf->max_udp_header_size);
3906	netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3907		   buf->max_large_tx_size);
3908	netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3909		   buf->max_large_rx_size);
3910	netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3911		   buf->ipv6_extension_header);
3912	netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3913		   buf->tcp_pseudosum_req);
3914	netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3915		   buf->num_ipv6_ext_headers);
3916	netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3917		   buf->off_ipv6_ext_headers);
3918
3919	adapter->ip_offload_ctrl_tok =
3920	    dma_map_single(dev, &adapter->ip_offload_ctrl,
3921			   sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3922
3923	if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3924		dev_err(dev, "Couldn't map ip offload control buffer\n");
3925		return;
3926	}
3927
3928	adapter->ip_offload_ctrl.len =
3929	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3930	adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3931	adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3932	adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3933	adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3934	adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3935	adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3936	adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3937	adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3938	adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
3939
3940	/* large_rx disabled for now, additional features needed */
 
 
3941	adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3942	adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3943
3944	if (adapter->state != VNIC_PROBING) {
3945		old_hw_features = adapter->netdev->hw_features;
3946		adapter->netdev->hw_features = 0;
3947	}
3948
3949	adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
3950
3951	if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3952		adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
3953
3954	if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3955		adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
3956
3957	if ((adapter->netdev->features &
3958	    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3959		adapter->netdev->hw_features |= NETIF_F_RXCSUM;
3960
3961	if (buf->large_tx_ipv4)
3962		adapter->netdev->hw_features |= NETIF_F_TSO;
3963	if (buf->large_tx_ipv6)
3964		adapter->netdev->hw_features |= NETIF_F_TSO6;
3965
3966	if (adapter->state == VNIC_PROBING) {
3967		adapter->netdev->features |= adapter->netdev->hw_features;
3968	} else if (old_hw_features != adapter->netdev->hw_features) {
3969		netdev_features_t tmp = 0;
3970
3971		/* disable features no longer supported */
3972		adapter->netdev->features &= adapter->netdev->hw_features;
3973		/* turn on features now supported if previously enabled */
3974		tmp = (old_hw_features ^ adapter->netdev->hw_features) &
3975			adapter->netdev->hw_features;
3976		adapter->netdev->features |=
3977				tmp & adapter->netdev->wanted_features;
3978	}
3979
3980	memset(&crq, 0, sizeof(crq));
3981	crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3982	crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3983	crq.control_ip_offload.len =
3984	    cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3985	crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3986	ibmvnic_send_crq(adapter, &crq);
3987}
3988
3989static const char *ibmvnic_fw_err_cause(u16 cause)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3990{
3991	switch (cause) {
3992	case ADAPTER_PROBLEM:
3993		return "adapter problem";
3994	case BUS_PROBLEM:
3995		return "bus problem";
3996	case FW_PROBLEM:
3997		return "firmware problem";
3998	case DD_PROBLEM:
3999		return "device driver problem";
4000	case EEH_RECOVERY:
4001		return "EEH recovery";
4002	case FW_UPDATED:
4003		return "firmware updated";
4004	case LOW_MEMORY:
4005		return "low Memory";
4006	default:
4007		return "unknown";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4008	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4009}
4010
4011static void handle_error_indication(union ibmvnic_crq *crq,
4012				    struct ibmvnic_adapter *adapter)
4013{
 
 
4014	struct device *dev = &adapter->vdev->dev;
4015	u16 cause;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4016
4017	cause = be16_to_cpu(crq->error_indication.error_cause);
4018
4019	dev_warn_ratelimited(dev,
4020			     "Firmware reports %serror, cause: %s. Starting recovery...\n",
4021			     crq->error_indication.flags
4022				& IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4023			     ibmvnic_fw_err_cause(cause));
4024
4025	if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4026		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4027	else
4028		ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4029}
4030
4031static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4032				 struct ibmvnic_adapter *adapter)
4033{
4034	struct net_device *netdev = adapter->netdev;
4035	struct device *dev = &adapter->vdev->dev;
4036	long rc;
4037
4038	rc = crq->change_mac_addr_rsp.rc.code;
4039	if (rc) {
4040		dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4041		goto out;
4042	}
4043	ether_addr_copy(netdev->dev_addr,
4044			&crq->change_mac_addr_rsp.mac_addr[0]);
4045out:
4046	complete(&adapter->fw_done);
4047	return rc;
4048}
4049
4050static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4051				   struct ibmvnic_adapter *adapter)
4052{
4053	struct device *dev = &adapter->vdev->dev;
4054	u64 *req_value;
4055	char *name;
4056
4057	atomic_dec(&adapter->running_cap_crqs);
4058	switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4059	case REQ_TX_QUEUES:
4060		req_value = &adapter->req_tx_queues;
4061		name = "tx";
4062		break;
4063	case REQ_RX_QUEUES:
4064		req_value = &adapter->req_rx_queues;
4065		name = "rx";
4066		break;
4067	case REQ_RX_ADD_QUEUES:
4068		req_value = &adapter->req_rx_add_queues;
4069		name = "rx_add";
4070		break;
4071	case REQ_TX_ENTRIES_PER_SUBCRQ:
4072		req_value = &adapter->req_tx_entries_per_subcrq;
4073		name = "tx_entries_per_subcrq";
4074		break;
4075	case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4076		req_value = &adapter->req_rx_add_entries_per_subcrq;
4077		name = "rx_add_entries_per_subcrq";
4078		break;
4079	case REQ_MTU:
4080		req_value = &adapter->req_mtu;
4081		name = "mtu";
4082		break;
4083	case PROMISC_REQUESTED:
4084		req_value = &adapter->promisc;
4085		name = "promisc";
4086		break;
4087	default:
4088		dev_err(dev, "Got invalid cap request rsp %d\n",
4089			crq->request_capability.capability);
4090		return;
4091	}
4092
4093	switch (crq->request_capability_rsp.rc.code) {
4094	case SUCCESS:
4095		break;
4096	case PARTIALSUCCESS:
4097		dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4098			 *req_value,
4099			 (long int)be64_to_cpu(crq->request_capability_rsp.
4100					       number), name);
4101
4102		if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4103		    REQ_MTU) {
4104			pr_err("mtu of %llu is not supported. Reverting.\n",
4105			       *req_value);
4106			*req_value = adapter->fallback.mtu;
4107		} else {
4108			*req_value =
4109				be64_to_cpu(crq->request_capability_rsp.number);
4110		}
4111
4112		ibmvnic_send_req_caps(adapter, 1);
4113		return;
4114	default:
4115		dev_err(dev, "Error %d in request cap rsp\n",
4116			crq->request_capability_rsp.rc.code);
4117		return;
4118	}
4119
4120	/* Done receiving requested capabilities, query IP offload support */
4121	if (atomic_read(&adapter->running_cap_crqs) == 0) {
4122		union ibmvnic_crq newcrq;
4123		int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4124		struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
4125		    &adapter->ip_offload_buf;
4126
4127		adapter->wait_capability = false;
4128		adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
4129							 buf_sz,
4130							 DMA_FROM_DEVICE);
4131
4132		if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4133			if (!firmware_has_feature(FW_FEATURE_CMO))
4134				dev_err(dev, "Couldn't map offload buffer\n");
4135			return;
4136		}
4137
4138		memset(&newcrq, 0, sizeof(newcrq));
4139		newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4140		newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4141		newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
4142		newcrq.query_ip_offload.ioba =
4143		    cpu_to_be32(adapter->ip_offload_tok);
4144
4145		ibmvnic_send_crq(adapter, &newcrq);
4146	}
4147}
4148
4149static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4150			    struct ibmvnic_adapter *adapter)
4151{
4152	struct device *dev = &adapter->vdev->dev;
4153	struct net_device *netdev = adapter->netdev;
4154	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4155	struct ibmvnic_login_buffer *login = adapter->login_buf;
 
4156	int i;
4157
4158	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4159			 DMA_TO_DEVICE);
4160	dma_unmap_single(dev, adapter->login_rsp_buf_token,
4161			 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4162
4163	/* If the number of queues requested can't be allocated by the
4164	 * server, the login response will return with code 1. We will need
4165	 * to resend the login buffer with fewer queues requested.
4166	 */
4167	if (login_rsp_crq->generic.rc.code) {
4168		adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4169		complete(&adapter->init_done);
4170		return 0;
4171	}
4172
4173	netdev->mtu = adapter->req_mtu - ETH_HLEN;
4174
4175	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4176	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4177		netdev_dbg(adapter->netdev, "%016lx\n",
4178			   ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4179	}
4180
4181	/* Sanity checks */
4182	if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4183	    (be32_to_cpu(login->num_rxcomp_subcrqs) *
4184	     adapter->req_rx_add_queues !=
4185	     be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4186		dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4187		ibmvnic_remove(adapter->vdev);
4188		return -EIO;
4189	}
4190	release_login_buffer(adapter);
4191	complete(&adapter->init_done);
4192
 
 
 
 
 
4193	return 0;
4194}
4195
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4196static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4197				     struct ibmvnic_adapter *adapter)
4198{
4199	struct device *dev = &adapter->vdev->dev;
4200	long rc;
4201
4202	rc = crq->request_unmap_rsp.rc.code;
4203	if (rc)
4204		dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4205}
4206
4207static void handle_query_map_rsp(union ibmvnic_crq *crq,
4208				 struct ibmvnic_adapter *adapter)
4209{
4210	struct net_device *netdev = adapter->netdev;
4211	struct device *dev = &adapter->vdev->dev;
4212	long rc;
4213
4214	rc = crq->query_map_rsp.rc.code;
4215	if (rc) {
4216		dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4217		return;
4218	}
4219	netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4220		   crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4221		   crq->query_map_rsp.free_pages);
4222}
4223
4224static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4225				 struct ibmvnic_adapter *adapter)
4226{
4227	struct net_device *netdev = adapter->netdev;
4228	struct device *dev = &adapter->vdev->dev;
4229	long rc;
4230
4231	atomic_dec(&adapter->running_cap_crqs);
4232	netdev_dbg(netdev, "Outstanding queries: %d\n",
4233		   atomic_read(&adapter->running_cap_crqs));
4234	rc = crq->query_capability.rc.code;
4235	if (rc) {
4236		dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4237		goto out;
4238	}
4239
4240	switch (be16_to_cpu(crq->query_capability.capability)) {
4241	case MIN_TX_QUEUES:
4242		adapter->min_tx_queues =
4243		    be64_to_cpu(crq->query_capability.number);
4244		netdev_dbg(netdev, "min_tx_queues = %lld\n",
4245			   adapter->min_tx_queues);
4246		break;
4247	case MIN_RX_QUEUES:
4248		adapter->min_rx_queues =
4249		    be64_to_cpu(crq->query_capability.number);
4250		netdev_dbg(netdev, "min_rx_queues = %lld\n",
4251			   adapter->min_rx_queues);
4252		break;
4253	case MIN_RX_ADD_QUEUES:
4254		adapter->min_rx_add_queues =
4255		    be64_to_cpu(crq->query_capability.number);
4256		netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4257			   adapter->min_rx_add_queues);
4258		break;
4259	case MAX_TX_QUEUES:
4260		adapter->max_tx_queues =
4261		    be64_to_cpu(crq->query_capability.number);
4262		netdev_dbg(netdev, "max_tx_queues = %lld\n",
4263			   adapter->max_tx_queues);
4264		break;
4265	case MAX_RX_QUEUES:
4266		adapter->max_rx_queues =
4267		    be64_to_cpu(crq->query_capability.number);
4268		netdev_dbg(netdev, "max_rx_queues = %lld\n",
4269			   adapter->max_rx_queues);
4270		break;
4271	case MAX_RX_ADD_QUEUES:
4272		adapter->max_rx_add_queues =
4273		    be64_to_cpu(crq->query_capability.number);
4274		netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4275			   adapter->max_rx_add_queues);
4276		break;
4277	case MIN_TX_ENTRIES_PER_SUBCRQ:
4278		adapter->min_tx_entries_per_subcrq =
4279		    be64_to_cpu(crq->query_capability.number);
4280		netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4281			   adapter->min_tx_entries_per_subcrq);
4282		break;
4283	case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4284		adapter->min_rx_add_entries_per_subcrq =
4285		    be64_to_cpu(crq->query_capability.number);
4286		netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4287			   adapter->min_rx_add_entries_per_subcrq);
4288		break;
4289	case MAX_TX_ENTRIES_PER_SUBCRQ:
4290		adapter->max_tx_entries_per_subcrq =
4291		    be64_to_cpu(crq->query_capability.number);
4292		netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4293			   adapter->max_tx_entries_per_subcrq);
4294		break;
4295	case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4296		adapter->max_rx_add_entries_per_subcrq =
4297		    be64_to_cpu(crq->query_capability.number);
4298		netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4299			   adapter->max_rx_add_entries_per_subcrq);
4300		break;
4301	case TCP_IP_OFFLOAD:
4302		adapter->tcp_ip_offload =
4303		    be64_to_cpu(crq->query_capability.number);
4304		netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4305			   adapter->tcp_ip_offload);
4306		break;
4307	case PROMISC_SUPPORTED:
4308		adapter->promisc_supported =
4309		    be64_to_cpu(crq->query_capability.number);
4310		netdev_dbg(netdev, "promisc_supported = %lld\n",
4311			   adapter->promisc_supported);
4312		break;
4313	case MIN_MTU:
4314		adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4315		netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4316		netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4317		break;
4318	case MAX_MTU:
4319		adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4320		netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4321		netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4322		break;
4323	case MAX_MULTICAST_FILTERS:
4324		adapter->max_multicast_filters =
4325		    be64_to_cpu(crq->query_capability.number);
4326		netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4327			   adapter->max_multicast_filters);
4328		break;
4329	case VLAN_HEADER_INSERTION:
4330		adapter->vlan_header_insertion =
4331		    be64_to_cpu(crq->query_capability.number);
4332		if (adapter->vlan_header_insertion)
4333			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4334		netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4335			   adapter->vlan_header_insertion);
4336		break;
4337	case RX_VLAN_HEADER_INSERTION:
4338		adapter->rx_vlan_header_insertion =
4339		    be64_to_cpu(crq->query_capability.number);
4340		netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4341			   adapter->rx_vlan_header_insertion);
4342		break;
4343	case MAX_TX_SG_ENTRIES:
4344		adapter->max_tx_sg_entries =
4345		    be64_to_cpu(crq->query_capability.number);
4346		netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4347			   adapter->max_tx_sg_entries);
4348		break;
4349	case RX_SG_SUPPORTED:
4350		adapter->rx_sg_supported =
4351		    be64_to_cpu(crq->query_capability.number);
4352		netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4353			   adapter->rx_sg_supported);
4354		break;
4355	case OPT_TX_COMP_SUB_QUEUES:
4356		adapter->opt_tx_comp_sub_queues =
4357		    be64_to_cpu(crq->query_capability.number);
4358		netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4359			   adapter->opt_tx_comp_sub_queues);
4360		break;
4361	case OPT_RX_COMP_QUEUES:
4362		adapter->opt_rx_comp_queues =
4363		    be64_to_cpu(crq->query_capability.number);
4364		netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4365			   adapter->opt_rx_comp_queues);
4366		break;
4367	case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4368		adapter->opt_rx_bufadd_q_per_rx_comp_q =
4369		    be64_to_cpu(crq->query_capability.number);
4370		netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4371			   adapter->opt_rx_bufadd_q_per_rx_comp_q);
4372		break;
4373	case OPT_TX_ENTRIES_PER_SUBCRQ:
4374		adapter->opt_tx_entries_per_subcrq =
4375		    be64_to_cpu(crq->query_capability.number);
4376		netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4377			   adapter->opt_tx_entries_per_subcrq);
4378		break;
4379	case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4380		adapter->opt_rxba_entries_per_subcrq =
4381		    be64_to_cpu(crq->query_capability.number);
4382		netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4383			   adapter->opt_rxba_entries_per_subcrq);
4384		break;
4385	case TX_RX_DESC_REQ:
4386		adapter->tx_rx_desc_req = crq->query_capability.number;
4387		netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4388			   adapter->tx_rx_desc_req);
4389		break;
4390
4391	default:
4392		netdev_err(netdev, "Got invalid cap rsp %d\n",
4393			   crq->query_capability.capability);
4394	}
4395
4396out:
4397	if (atomic_read(&adapter->running_cap_crqs) == 0) {
4398		adapter->wait_capability = false;
4399		ibmvnic_send_req_caps(adapter, 0);
4400	}
4401}
4402
4403static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
 
4404{
4405	union ibmvnic_crq crq;
4406	int rc;
 
 
4407
4408	memset(&crq, 0, sizeof(crq));
4409	crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4410	crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4411	init_completion(&adapter->fw_done);
4412	rc = ibmvnic_send_crq(adapter, &crq);
4413	if (rc)
4414		return rc;
4415	wait_for_completion(&adapter->fw_done);
4416	return adapter->fw_done_rc ? -EIO : 0;
4417}
4418
4419static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4420				       struct ibmvnic_adapter *adapter)
4421{
4422	struct net_device *netdev = adapter->netdev;
4423	int rc;
4424	__be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
4425
4426	rc = crq->query_phys_parms_rsp.rc.code;
4427	if (rc) {
4428		netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4429		return rc;
4430	}
4431	switch (rspeed) {
4432	case IBMVNIC_10MBPS:
4433		adapter->speed = SPEED_10;
4434		break;
4435	case IBMVNIC_100MBPS:
4436		adapter->speed = SPEED_100;
4437		break;
4438	case IBMVNIC_1GBPS:
4439		adapter->speed = SPEED_1000;
 
4440		break;
4441	case IBMVNIC_10GBP:
4442		adapter->speed = SPEED_10000;
4443		break;
4444	case IBMVNIC_25GBPS:
4445		adapter->speed = SPEED_25000;
4446		break;
4447	case IBMVNIC_40GBPS:
4448		adapter->speed = SPEED_40000;
4449		break;
4450	case IBMVNIC_50GBPS:
4451		adapter->speed = SPEED_50000;
4452		break;
4453	case IBMVNIC_100GBPS:
4454		adapter->speed = SPEED_100000;
 
 
 
 
 
 
 
4455		break;
4456	default:
4457		if (netif_carrier_ok(netdev))
4458			netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
4459		adapter->speed = SPEED_UNKNOWN;
4460	}
4461	if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4462		adapter->duplex = DUPLEX_FULL;
4463	else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4464		adapter->duplex = DUPLEX_HALF;
4465	else
4466		adapter->duplex = DUPLEX_UNKNOWN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4467
4468	return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4469}
4470
4471static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4472			       struct ibmvnic_adapter *adapter)
4473{
4474	struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4475	struct net_device *netdev = adapter->netdev;
4476	struct device *dev = &adapter->vdev->dev;
4477	u64 *u64_crq = (u64 *)crq;
4478	long rc;
4479
4480	netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4481		   (unsigned long int)cpu_to_be64(u64_crq[0]),
4482		   (unsigned long int)cpu_to_be64(u64_crq[1]));
4483	switch (gen_crq->first) {
4484	case IBMVNIC_CRQ_INIT_RSP:
4485		switch (gen_crq->cmd) {
4486		case IBMVNIC_CRQ_INIT:
4487			dev_info(dev, "Partner initialized\n");
4488			adapter->from_passive_init = true;
4489			adapter->failover_pending = false;
4490			if (!completion_done(&adapter->init_done)) {
4491				complete(&adapter->init_done);
4492				adapter->init_done_rc = -EIO;
4493			}
4494			ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4495			break;
4496		case IBMVNIC_CRQ_INIT_COMPLETE:
4497			dev_info(dev, "Partner initialization complete\n");
4498			adapter->crq.active = true;
4499			send_version_xchg(adapter);
4500			break;
4501		default:
4502			dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4503		}
4504		return;
4505	case IBMVNIC_CRQ_XPORT_EVENT:
4506		netif_carrier_off(netdev);
4507		adapter->crq.active = false;
4508		if (test_bit(0, &adapter->resetting))
4509			adapter->force_reset_recovery = true;
4510		if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4511			dev_info(dev, "Migrated, re-enabling adapter\n");
4512			ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4513		} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4514			dev_info(dev, "Backing device failover detected\n");
4515			adapter->failover_pending = true;
 
 
 
 
 
 
4516		} else {
4517			/* The adapter lost the connection */
4518			dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4519				gen_crq->cmd);
4520			ibmvnic_reset(adapter, VNIC_RESET_FATAL);
 
4521		}
4522		return;
4523	case IBMVNIC_CRQ_CMD_RSP:
4524		break;
4525	default:
4526		dev_err(dev, "Got an invalid msg type 0x%02x\n",
4527			gen_crq->first);
4528		return;
4529	}
4530
4531	switch (gen_crq->cmd) {
4532	case VERSION_EXCHANGE_RSP:
4533		rc = crq->version_exchange_rsp.rc.code;
4534		if (rc) {
4535			dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4536			break;
4537		}
4538		dev_info(dev, "Partner protocol version is %d\n",
4539			 crq->version_exchange_rsp.version);
4540		if (be16_to_cpu(crq->version_exchange_rsp.version) <
4541		    ibmvnic_version)
4542			ibmvnic_version =
4543			    be16_to_cpu(crq->version_exchange_rsp.version);
4544		send_cap_queries(adapter);
4545		break;
4546	case QUERY_CAPABILITY_RSP:
4547		handle_query_cap_rsp(crq, adapter);
4548		break;
4549	case QUERY_MAP_RSP:
4550		handle_query_map_rsp(crq, adapter);
4551		break;
4552	case REQUEST_MAP_RSP:
4553		adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4554		complete(&adapter->fw_done);
4555		break;
4556	case REQUEST_UNMAP_RSP:
4557		handle_request_unmap_rsp(crq, adapter);
4558		break;
4559	case REQUEST_CAPABILITY_RSP:
4560		handle_request_cap_rsp(crq, adapter);
4561		break;
4562	case LOGIN_RSP:
4563		netdev_dbg(netdev, "Got Login Response\n");
4564		handle_login_rsp(crq, adapter);
4565		break;
4566	case LOGICAL_LINK_STATE_RSP:
4567		netdev_dbg(netdev,
4568			   "Got Logical Link State Response, state: %d rc: %d\n",
4569			   crq->logical_link_state_rsp.link_state,
4570			   crq->logical_link_state_rsp.rc.code);
4571		adapter->logical_link_state =
4572		    crq->logical_link_state_rsp.link_state;
4573		adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4574		complete(&adapter->init_done);
4575		break;
4576	case LINK_STATE_INDICATION:
4577		netdev_dbg(netdev, "Got Logical Link State Indication\n");
4578		adapter->phys_link_state =
4579		    crq->link_state_indication.phys_link_state;
4580		adapter->logical_link_state =
4581		    crq->link_state_indication.logical_link_state;
4582		if (adapter->phys_link_state && adapter->logical_link_state)
4583			netif_carrier_on(netdev);
4584		else
4585			netif_carrier_off(netdev);
4586		break;
4587	case CHANGE_MAC_ADDR_RSP:
4588		netdev_dbg(netdev, "Got MAC address change Response\n");
4589		adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4590		break;
4591	case ERROR_INDICATION:
4592		netdev_dbg(netdev, "Got Error Indication\n");
4593		handle_error_indication(crq, adapter);
4594		break;
 
 
 
 
4595	case REQUEST_STATISTICS_RSP:
4596		netdev_dbg(netdev, "Got Statistics Response\n");
4597		complete(&adapter->stats_done);
4598		break;
 
 
 
 
 
 
 
 
4599	case QUERY_IP_OFFLOAD_RSP:
4600		netdev_dbg(netdev, "Got Query IP offload Response\n");
4601		handle_query_ip_offload_rsp(adapter);
4602		break;
4603	case MULTICAST_CTRL_RSP:
4604		netdev_dbg(netdev, "Got multicast control Response\n");
4605		break;
4606	case CONTROL_IP_OFFLOAD_RSP:
4607		netdev_dbg(netdev, "Got Control IP offload Response\n");
4608		dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4609				 sizeof(adapter->ip_offload_ctrl),
4610				 DMA_TO_DEVICE);
4611		complete(&adapter->init_done);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4612		break;
4613	case COLLECT_FW_TRACE_RSP:
4614		netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4615		complete(&adapter->fw_done);
4616		break;
4617	case GET_VPD_SIZE_RSP:
4618		handle_vpd_size_rsp(crq, adapter);
4619		break;
4620	case GET_VPD_RSP:
4621		handle_vpd_rsp(crq, adapter);
4622		break;
4623	case QUERY_PHYS_PARMS_RSP:
4624		adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4625		complete(&adapter->fw_done);
4626		break;
4627	default:
4628		netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4629			   gen_crq->cmd);
4630	}
4631}
4632
4633static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4634{
4635	struct ibmvnic_adapter *adapter = instance;
4636
4637	tasklet_schedule(&adapter->tasklet);
4638	return IRQ_HANDLED;
4639}
4640
4641static void ibmvnic_tasklet(void *data)
4642{
4643	struct ibmvnic_adapter *adapter = data;
4644	struct ibmvnic_crq_queue *queue = &adapter->crq;
 
4645	union ibmvnic_crq *crq;
4646	unsigned long flags;
4647	bool done = false;
4648
4649	spin_lock_irqsave(&queue->lock, flags);
 
4650	while (!done) {
4651		/* Pull all the valid messages off the CRQ */
4652		while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4653			ibmvnic_handle_crq(crq, adapter);
4654			crq->generic.first = 0;
4655		}
4656
4657		/* remain in tasklet until all
4658		 * capabilities responses are received
4659		 */
4660		if (!adapter->wait_capability)
 
 
4661			done = true;
 
4662	}
4663	/* if capabilities CRQ's were sent in this tasklet, the following
4664	 * tasklet must wait until all responses are received
4665	 */
4666	if (atomic_read(&adapter->running_cap_crqs) != 0)
4667		adapter->wait_capability = true;
4668	spin_unlock_irqrestore(&queue->lock, flags);
 
4669}
4670
4671static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4672{
4673	struct vio_dev *vdev = adapter->vdev;
4674	int rc;
4675
4676	do {
4677		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4678	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4679
4680	if (rc)
4681		dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4682
4683	return rc;
4684}
4685
4686static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4687{
4688	struct ibmvnic_crq_queue *crq = &adapter->crq;
4689	struct device *dev = &adapter->vdev->dev;
4690	struct vio_dev *vdev = adapter->vdev;
4691	int rc;
4692
4693	/* Close the CRQ */
4694	do {
4695		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4696	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4697
4698	/* Clean out the queue */
4699	memset(crq->msgs, 0, PAGE_SIZE);
4700	crq->cur = 0;
4701	crq->active = false;
4702
4703	/* And re-open it again */
4704	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4705				crq->msg_token, PAGE_SIZE);
4706
4707	if (rc == H_CLOSED)
4708		/* Adapter is good, but other end is not ready */
4709		dev_warn(dev, "Partner adapter not ready\n");
4710	else if (rc != 0)
4711		dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4712
4713	return rc;
4714}
4715
4716static void release_crq_queue(struct ibmvnic_adapter *adapter)
4717{
4718	struct ibmvnic_crq_queue *crq = &adapter->crq;
4719	struct vio_dev *vdev = adapter->vdev;
4720	long rc;
4721
4722	if (!crq->msgs)
4723		return;
4724
4725	netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4726	free_irq(vdev->irq, adapter);
4727	tasklet_kill(&adapter->tasklet);
4728	do {
4729		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4730	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4731
4732	dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4733			 DMA_BIDIRECTIONAL);
4734	free_page((unsigned long)crq->msgs);
4735	crq->msgs = NULL;
4736	crq->active = false;
4737}
4738
4739static int init_crq_queue(struct ibmvnic_adapter *adapter)
4740{
4741	struct ibmvnic_crq_queue *crq = &adapter->crq;
4742	struct device *dev = &adapter->vdev->dev;
4743	struct vio_dev *vdev = adapter->vdev;
4744	int rc, retrc = -ENOMEM;
4745
4746	if (crq->msgs)
4747		return 0;
4748
4749	crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4750	/* Should we allocate more than one page? */
4751
4752	if (!crq->msgs)
4753		return -ENOMEM;
4754
4755	crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4756	crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4757					DMA_BIDIRECTIONAL);
4758	if (dma_mapping_error(dev, crq->msg_token))
4759		goto map_failed;
4760
4761	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4762				crq->msg_token, PAGE_SIZE);
4763
4764	if (rc == H_RESOURCE)
4765		/* maybe kexecing and resource is busy. try a reset */
4766		rc = ibmvnic_reset_crq(adapter);
4767	retrc = rc;
4768
4769	if (rc == H_CLOSED) {
4770		dev_warn(dev, "Partner adapter not ready\n");
4771	} else if (rc) {
4772		dev_warn(dev, "Error %d opening adapter\n", rc);
4773		goto reg_crq_failed;
4774	}
4775
4776	retrc = 0;
4777
4778	tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4779		     (unsigned long)adapter);
4780
4781	netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4782	snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
4783		 adapter->vdev->unit_address);
4784	rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
4785	if (rc) {
4786		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4787			vdev->irq, rc);
4788		goto req_irq_failed;
4789	}
4790
4791	rc = vio_enable_interrupts(vdev);
4792	if (rc) {
4793		dev_err(dev, "Error %d enabling interrupts\n", rc);
4794		goto req_irq_failed;
4795	}
4796
4797	crq->cur = 0;
4798	spin_lock_init(&crq->lock);
4799
4800	return retrc;
4801
4802req_irq_failed:
4803	tasklet_kill(&adapter->tasklet);
4804	do {
4805		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4806	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4807reg_crq_failed:
4808	dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4809map_failed:
4810	free_page((unsigned long)crq->msgs);
4811	crq->msgs = NULL;
4812	return retrc;
4813}
4814
4815static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
 
4816{
 
 
4817	struct device *dev = &adapter->vdev->dev;
4818	unsigned long timeout = msecs_to_jiffies(30000);
4819	u64 old_num_rx_queues, old_num_tx_queues;
4820	int rc;
4821
4822	adapter->from_passive_init = false;
 
 
 
4823
4824	old_num_rx_queues = adapter->req_rx_queues;
4825	old_num_tx_queues = adapter->req_tx_queues;
4826
4827	reinit_completion(&adapter->init_done);
4828	adapter->init_done_rc = 0;
4829	ibmvnic_send_crq_init(adapter);
4830	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4831		dev_err(dev, "Initialization sequence timed out\n");
4832		return -1;
4833	}
4834
4835	if (adapter->init_done_rc) {
4836		release_crq_queue(adapter);
4837		return adapter->init_done_rc;
4838	}
4839
4840	if (adapter->from_passive_init) {
4841		adapter->state = VNIC_OPEN;
4842		adapter->from_passive_init = false;
4843		return -1;
4844	}
4845
4846	if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
4847	    adapter->reset_reason != VNIC_RESET_MOBILITY) {
4848		if (adapter->req_rx_queues != old_num_rx_queues ||
4849		    adapter->req_tx_queues != old_num_tx_queues) {
4850			release_sub_crqs(adapter, 0);
4851			rc = init_sub_crqs(adapter);
4852		} else {
4853			rc = reset_sub_crq_queues(adapter);
4854		}
4855	} else {
4856		rc = init_sub_crqs(adapter);
4857	}
4858
4859	if (rc) {
4860		dev_err(dev, "Initialization of sub crqs failed\n");
4861		release_crq_queue(adapter);
4862		return rc;
4863	}
4864
4865	rc = init_sub_crq_irqs(adapter);
4866	if (rc) {
4867		dev_err(dev, "Failed to initialize sub crq irqs\n");
4868		release_crq_queue(adapter);
4869	}
4870
4871	return rc;
4872}
4873
4874static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4875{
4876	struct device *dev = &adapter->vdev->dev;
4877	unsigned long timeout = msecs_to_jiffies(30000);
4878	int rc;
4879
4880	adapter->from_passive_init = false;
4881
4882	adapter->init_done_rc = 0;
4883	ibmvnic_send_crq_init(adapter);
4884	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4885		dev_err(dev, "Initialization sequence timed out\n");
4886		return -1;
4887	}
4888
4889	if (adapter->init_done_rc) {
4890		release_crq_queue(adapter);
4891		return adapter->init_done_rc;
4892	}
4893
4894	if (adapter->from_passive_init) {
4895		adapter->state = VNIC_OPEN;
4896		adapter->from_passive_init = false;
4897		return -1;
4898	}
4899
4900	rc = init_sub_crqs(adapter);
4901	if (rc) {
4902		dev_err(dev, "Initialization of sub crqs failed\n");
4903		release_crq_queue(adapter);
4904		return rc;
4905	}
4906
4907	rc = init_sub_crq_irqs(adapter);
4908	if (rc) {
4909		dev_err(dev, "Failed to initialize sub crq irqs\n");
4910		release_crq_queue(adapter);
4911	}
4912
4913	return rc;
4914}
4915
4916static struct device_attribute dev_attr_failover;
 
 
 
 
 
 
4917
4918static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4919{
4920	struct ibmvnic_adapter *adapter;
4921	struct net_device *netdev;
4922	unsigned char *mac_addr_p;
 
 
4923	int rc;
4924
4925	dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4926		dev->unit_address);
4927
4928	mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4929							VETH_MAC_ADDR, NULL);
4930	if (!mac_addr_p) {
4931		dev_err(&dev->dev,
4932			"(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4933			__FILE__, __LINE__);
4934		return 0;
4935	}
4936
4937	netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
4938				   IBMVNIC_MAX_QUEUES);
4939	if (!netdev)
4940		return -ENOMEM;
4941
4942	adapter = netdev_priv(netdev);
4943	adapter->state = VNIC_PROBING;
4944	dev_set_drvdata(&dev->dev, netdev);
4945	adapter->vdev = dev;
4946	adapter->netdev = netdev;
4947
4948	ether_addr_copy(adapter->mac_addr, mac_addr_p);
4949	ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4950	netdev->irq = dev->irq;
4951	netdev->netdev_ops = &ibmvnic_netdev_ops;
4952	netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4953	SET_NETDEV_DEV(netdev, &dev->dev);
4954
4955	spin_lock_init(&adapter->stats_lock);
4956
4957	INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4958	INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
4959			  __ibmvnic_delayed_reset);
4960	INIT_LIST_HEAD(&adapter->rwi_list);
4961	spin_lock_init(&adapter->rwi_lock);
4962	init_completion(&adapter->init_done);
4963	clear_bit(0, &adapter->resetting);
4964
4965	do {
4966		rc = init_crq_queue(adapter);
4967		if (rc) {
4968			dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
4969				rc);
4970			goto ibmvnic_init_fail;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4971		}
 
 
 
 
 
4972
4973		rc = ibmvnic_init(adapter);
4974		if (rc && rc != EAGAIN)
4975			goto ibmvnic_init_fail;
4976	} while (rc == EAGAIN);
 
4977
4978	rc = init_stats_buffers(adapter);
4979	if (rc)
4980		goto ibmvnic_init_fail;
4981
4982	rc = init_stats_token(adapter);
4983	if (rc)
4984		goto ibmvnic_stats_fail;
4985
4986	netdev->mtu = adapter->req_mtu - ETH_HLEN;
4987	netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4988	netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4989
4990	rc = device_create_file(&dev->dev, &dev_attr_failover);
4991	if (rc)
4992		goto ibmvnic_dev_file_err;
4993
4994	netif_carrier_off(netdev);
4995	rc = register_netdev(netdev);
4996	if (rc) {
4997		dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
4998		goto ibmvnic_register_fail;
4999	}
5000	dev_info(&dev->dev, "ibmvnic registered\n");
5001
5002	adapter->state = VNIC_PROBED;
5003
5004	adapter->wait_for_reset = false;
5005
5006	return 0;
5007
5008ibmvnic_register_fail:
5009	device_remove_file(&dev->dev, &dev_attr_failover);
5010
5011ibmvnic_dev_file_err:
5012	release_stats_token(adapter);
5013
5014ibmvnic_stats_fail:
5015	release_stats_buffers(adapter);
5016
5017ibmvnic_init_fail:
5018	release_sub_crqs(adapter, 1);
5019	release_crq_queue(adapter);
5020	free_netdev(netdev);
5021
5022	return rc;
5023}
5024
5025static int ibmvnic_remove(struct vio_dev *dev)
5026{
5027	struct net_device *netdev = dev_get_drvdata(&dev->dev);
5028	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5029
5030	adapter->state = VNIC_REMOVING;
5031	rtnl_lock();
5032	unregister_netdevice(netdev);
 
 
5033
5034	release_resources(adapter);
5035	release_sub_crqs(adapter, 1);
5036	release_crq_queue(adapter);
5037
5038	release_stats_token(adapter);
5039	release_stats_buffers(adapter);
 
 
 
5040
5041	adapter->state = VNIC_REMOVED;
5042
5043	rtnl_unlock();
5044	device_remove_file(&dev->dev, &dev_attr_failover);
5045	free_netdev(netdev);
5046	dev_set_drvdata(&dev->dev, NULL);
5047
5048	return 0;
5049}
5050
5051static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5052			      const char *buf, size_t count)
5053{
5054	struct net_device *netdev = dev_get_drvdata(dev);
5055	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5056	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5057	__be64 session_token;
5058	long rc;
5059
5060	if (!sysfs_streq(buf, "1"))
5061		return -EINVAL;
5062
5063	rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5064			 H_GET_SESSION_TOKEN, 0, 0, 0);
5065	if (rc) {
5066		netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5067			   rc);
5068		return -EINVAL;
5069	}
5070
5071	session_token = (__be64)retbuf[0];
5072	netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5073		   be64_to_cpu(session_token));
5074	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5075				H_SESSION_ERR_DETECTED, session_token, 0, 0);
5076	if (rc) {
5077		netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5078			   rc);
5079		return -EINVAL;
5080	}
5081
5082	return count;
5083}
5084
5085static DEVICE_ATTR_WO(failover);
5086
5087static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5088{
5089	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5090	struct ibmvnic_adapter *adapter;
5091	struct iommu_table *tbl;
5092	unsigned long ret = 0;
5093	int i;
5094
5095	tbl = get_iommu_table_base(&vdev->dev);
5096
5097	/* netdev inits at probe time along with the structures we need below*/
5098	if (!netdev)
5099		return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5100
5101	adapter = netdev_priv(netdev);
5102
5103	ret += PAGE_SIZE; /* the crq message queue */
 
5104	ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5105
5106	for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5107		ret += 4 * PAGE_SIZE; /* the scrq message queue */
5108
5109	for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
5110	     i++)
5111		ret += adapter->rx_pool[i].size *
5112		    IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5113
5114	return ret;
5115}
5116
5117static int ibmvnic_resume(struct device *dev)
5118{
5119	struct net_device *netdev = dev_get_drvdata(dev);
5120	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
5121
5122	if (adapter->state != VNIC_OPEN)
5123		return 0;
5124
5125	tasklet_schedule(&adapter->tasklet);
5126
5127	return 0;
5128}
5129
5130static const struct vio_device_id ibmvnic_device_table[] = {
5131	{"network", "IBM,vnic"},
5132	{"", "" }
5133};
5134MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5135
5136static const struct dev_pm_ops ibmvnic_pm_ops = {
5137	.resume = ibmvnic_resume
5138};
5139
5140static struct vio_driver ibmvnic_driver = {
5141	.id_table       = ibmvnic_device_table,
5142	.probe          = ibmvnic_probe,
5143	.remove         = ibmvnic_remove,
5144	.get_desired_dma = ibmvnic_get_desired_dma,
5145	.name		= ibmvnic_driver_name,
5146	.pm		= &ibmvnic_pm_ops,
5147};
5148
5149/* module functions */
5150static int __init ibmvnic_module_init(void)
5151{
5152	pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5153		IBMVNIC_DRIVER_VERSION);
5154
5155	return vio_register_driver(&ibmvnic_driver);
5156}
5157
5158static void __exit ibmvnic_module_exit(void)
5159{
5160	vio_unregister_driver(&ibmvnic_driver);
5161}
5162
5163module_init(ibmvnic_module_init);
5164module_exit(ibmvnic_module_exit);