Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * IBM Power Virtual Ethernet Device Driver
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  16 *
  17 * Copyright (C) IBM Corporation, 2003, 2010
  18 *
  19 * Authors: Dave Larson <larson1@us.ibm.com>
  20 *	    Santiago Leon <santil@linux.vnet.ibm.com>
  21 *	    Brian King <brking@linux.vnet.ibm.com>
  22 *	    Robert Jennings <rcj@linux.vnet.ibm.com>
  23 *	    Anton Blanchard <anton@au.ibm.com>
  24 */
  25
  26#include <linux/module.h>
  27#include <linux/moduleparam.h>
  28#include <linux/types.h>
  29#include <linux/errno.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/kernel.h>
  32#include <linux/netdevice.h>
  33#include <linux/etherdevice.h>
  34#include <linux/skbuff.h>
  35#include <linux/init.h>
  36#include <linux/interrupt.h>
  37#include <linux/mm.h>
  38#include <linux/pm.h>
  39#include <linux/ethtool.h>
  40#include <linux/in.h>
  41#include <linux/ip.h>
  42#include <linux/ipv6.h>
  43#include <linux/slab.h>
  44#include <asm/hvcall.h>
  45#include <linux/atomic.h>
  46#include <asm/vio.h>
  47#include <asm/iommu.h>
  48#include <asm/firmware.h>
  49
  50#include "ibmveth.h"
  51
  52static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
  53static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
  54static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
  55
  56static struct kobj_type ktype_veth_pool;
  57
  58
  59static const char ibmveth_driver_name[] = "ibmveth";
  60static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
  61#define ibmveth_driver_version "1.05"
  62
  63MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
  64MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
  65MODULE_LICENSE("GPL");
  66MODULE_VERSION(ibmveth_driver_version);
  67
  68static unsigned int tx_copybreak __read_mostly = 128;
  69module_param(tx_copybreak, uint, 0644);
  70MODULE_PARM_DESC(tx_copybreak,
  71	"Maximum size of packet that is copied to a new buffer on transmit");
  72
  73static unsigned int rx_copybreak __read_mostly = 128;
  74module_param(rx_copybreak, uint, 0644);
  75MODULE_PARM_DESC(rx_copybreak,
  76	"Maximum size of packet that is copied to a new buffer on receive");
  77
  78static unsigned int rx_flush __read_mostly = 0;
  79module_param(rx_flush, uint, 0644);
  80MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
  81
  82static bool old_large_send __read_mostly;
  83module_param(old_large_send, bool, S_IRUGO);
  84MODULE_PARM_DESC(old_large_send,
  85	"Use old large send method on firmware that supports the new method");
  86
  87struct ibmveth_stat {
  88	char name[ETH_GSTRING_LEN];
  89	int offset;
  90};
  91
  92#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
  93#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
  94
  95struct ibmveth_stat ibmveth_stats[] = {
  96	{ "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
  97	{ "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
  98	{ "replenish_add_buff_failure",
  99			IBMVETH_STAT_OFF(replenish_add_buff_failure) },
 100	{ "replenish_add_buff_success",
 101			IBMVETH_STAT_OFF(replenish_add_buff_success) },
 102	{ "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
 103	{ "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
 104	{ "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
 105	{ "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
 106	{ "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
 107	{ "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
 108	{ "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
 109	{ "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
 110	{ "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
 111};
 112
 113/* simple methods of getting data from the current rxq entry */
 114static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
 115{
 116	return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
 117}
 118
 119static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
 120{
 121	return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
 122			IBMVETH_RXQ_TOGGLE_SHIFT;
 123}
 124
 125static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
 126{
 127	return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
 128}
 129
 130static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
 131{
 132	return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
 133}
 134
 135static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
 136{
 137	return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
 138}
 139
 140static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
 141{
 142	return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
 143}
 144
 145static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
 146{
 147	return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
 148}
 149
 150/* setup the initial settings for a buffer pool */
 151static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
 152				     u32 pool_index, u32 pool_size,
 153				     u32 buff_size, u32 pool_active)
 154{
 155	pool->size = pool_size;
 156	pool->index = pool_index;
 157	pool->buff_size = buff_size;
 158	pool->threshold = pool_size * 7 / 8;
 159	pool->active = pool_active;
 160}
 161
 162/* allocate and setup an buffer pool - called during open */
 163static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
 164{
 165	int i;
 166
 167	pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
 168
 169	if (!pool->free_map)
 170		return -1;
 171
 172	pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL);
 173	if (!pool->dma_addr) {
 174		kfree(pool->free_map);
 175		pool->free_map = NULL;
 176		return -1;
 177	}
 178
 179	pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
 180
 181	if (!pool->skbuff) {
 182		kfree(pool->dma_addr);
 183		pool->dma_addr = NULL;
 184
 185		kfree(pool->free_map);
 186		pool->free_map = NULL;
 187		return -1;
 188	}
 189
 190	for (i = 0; i < pool->size; ++i)
 191		pool->free_map[i] = i;
 192
 193	atomic_set(&pool->available, 0);
 194	pool->producer_index = 0;
 195	pool->consumer_index = 0;
 196
 197	return 0;
 198}
 199
 200static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
 201{
 202	unsigned long offset;
 203
 204	for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
 205		asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
 206}
 207
 208/* replenish the buffers for a pool.  note that we don't need to
 209 * skb_reserve these since they are used for incoming...
 210 */
 211static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
 212					  struct ibmveth_buff_pool *pool)
 213{
 214	u32 i;
 215	u32 count = pool->size - atomic_read(&pool->available);
 216	u32 buffers_added = 0;
 217	struct sk_buff *skb;
 218	unsigned int free_index, index;
 219	u64 correlator;
 220	unsigned long lpar_rc;
 221	dma_addr_t dma_addr;
 222
 223	mb();
 224
 225	for (i = 0; i < count; ++i) {
 226		union ibmveth_buf_desc desc;
 227
 228		skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
 229
 230		if (!skb) {
 231			netdev_dbg(adapter->netdev,
 232				   "replenish: unable to allocate skb\n");
 233			adapter->replenish_no_mem++;
 234			break;
 235		}
 236
 237		free_index = pool->consumer_index;
 238		pool->consumer_index++;
 239		if (pool->consumer_index >= pool->size)
 240			pool->consumer_index = 0;
 241		index = pool->free_map[free_index];
 242
 243		BUG_ON(index == IBM_VETH_INVALID_MAP);
 244		BUG_ON(pool->skbuff[index] != NULL);
 245
 246		dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
 247				pool->buff_size, DMA_FROM_DEVICE);
 248
 249		if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
 250			goto failure;
 251
 252		pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
 253		pool->dma_addr[index] = dma_addr;
 254		pool->skbuff[index] = skb;
 255
 256		correlator = ((u64)pool->index << 32) | index;
 257		*(u64 *)skb->data = correlator;
 258
 259		desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
 260		desc.fields.address = dma_addr;
 261
 262		if (rx_flush) {
 263			unsigned int len = min(pool->buff_size,
 264						adapter->netdev->mtu +
 265						IBMVETH_BUFF_OH);
 266			ibmveth_flush_buffer(skb->data, len);
 267		}
 268		lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
 269						   desc.desc);
 270
 271		if (lpar_rc != H_SUCCESS) {
 272			goto failure;
 273		} else {
 274			buffers_added++;
 275			adapter->replenish_add_buff_success++;
 276		}
 277	}
 278
 279	mb();
 280	atomic_add(buffers_added, &(pool->available));
 281	return;
 282
 283failure:
 284	pool->free_map[free_index] = index;
 285	pool->skbuff[index] = NULL;
 286	if (pool->consumer_index == 0)
 287		pool->consumer_index = pool->size - 1;
 288	else
 289		pool->consumer_index--;
 290	if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
 291		dma_unmap_single(&adapter->vdev->dev,
 292		                 pool->dma_addr[index], pool->buff_size,
 293		                 DMA_FROM_DEVICE);
 294	dev_kfree_skb_any(skb);
 295	adapter->replenish_add_buff_failure++;
 296
 297	mb();
 298	atomic_add(buffers_added, &(pool->available));
 299}
 300
 301/*
 302 * The final 8 bytes of the buffer list is a counter of frames dropped
 303 * because there was not a buffer in the buffer list capable of holding
 304 * the frame.
 305 */
 306static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
 307{
 308	__be64 *p = adapter->buffer_list_addr + 4096 - 8;
 309
 310	adapter->rx_no_buffer = be64_to_cpup(p);
 311}
 312
 313/* replenish routine */
 314static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
 315{
 316	int i;
 317
 318	adapter->replenish_task_cycles++;
 319
 320	for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
 321		struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
 322
 323		if (pool->active &&
 324		    (atomic_read(&pool->available) < pool->threshold))
 325			ibmveth_replenish_buffer_pool(adapter, pool);
 326	}
 327
 328	ibmveth_update_rx_no_buffer(adapter);
 329}
 330
 331/* empty and free ana buffer pool - also used to do cleanup in error paths */
 332static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
 333				     struct ibmveth_buff_pool *pool)
 334{
 335	int i;
 336
 337	kfree(pool->free_map);
 338	pool->free_map = NULL;
 339
 340	if (pool->skbuff && pool->dma_addr) {
 341		for (i = 0; i < pool->size; ++i) {
 342			struct sk_buff *skb = pool->skbuff[i];
 343			if (skb) {
 344				dma_unmap_single(&adapter->vdev->dev,
 345						 pool->dma_addr[i],
 346						 pool->buff_size,
 347						 DMA_FROM_DEVICE);
 348				dev_kfree_skb_any(skb);
 349				pool->skbuff[i] = NULL;
 350			}
 351		}
 352	}
 353
 354	if (pool->dma_addr) {
 355		kfree(pool->dma_addr);
 356		pool->dma_addr = NULL;
 357	}
 358
 359	if (pool->skbuff) {
 360		kfree(pool->skbuff);
 361		pool->skbuff = NULL;
 362	}
 363}
 364
 365/* remove a buffer from a pool */
 366static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
 367					    u64 correlator)
 368{
 369	unsigned int pool  = correlator >> 32;
 370	unsigned int index = correlator & 0xffffffffUL;
 371	unsigned int free_index;
 372	struct sk_buff *skb;
 373
 374	BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
 375	BUG_ON(index >= adapter->rx_buff_pool[pool].size);
 376
 377	skb = adapter->rx_buff_pool[pool].skbuff[index];
 378
 379	BUG_ON(skb == NULL);
 380
 381	adapter->rx_buff_pool[pool].skbuff[index] = NULL;
 382
 383	dma_unmap_single(&adapter->vdev->dev,
 384			 adapter->rx_buff_pool[pool].dma_addr[index],
 385			 adapter->rx_buff_pool[pool].buff_size,
 386			 DMA_FROM_DEVICE);
 387
 388	free_index = adapter->rx_buff_pool[pool].producer_index;
 389	adapter->rx_buff_pool[pool].producer_index++;
 390	if (adapter->rx_buff_pool[pool].producer_index >=
 391	    adapter->rx_buff_pool[pool].size)
 392		adapter->rx_buff_pool[pool].producer_index = 0;
 393	adapter->rx_buff_pool[pool].free_map[free_index] = index;
 394
 395	mb();
 396
 397	atomic_dec(&(adapter->rx_buff_pool[pool].available));
 398}
 399
 400/* get the current buffer on the rx queue */
 401static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
 402{
 403	u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
 404	unsigned int pool = correlator >> 32;
 405	unsigned int index = correlator & 0xffffffffUL;
 406
 407	BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
 408	BUG_ON(index >= adapter->rx_buff_pool[pool].size);
 409
 410	return adapter->rx_buff_pool[pool].skbuff[index];
 411}
 412
 413/* recycle the current buffer on the rx queue */
 414static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
 415{
 416	u32 q_index = adapter->rx_queue.index;
 417	u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
 418	unsigned int pool = correlator >> 32;
 419	unsigned int index = correlator & 0xffffffffUL;
 420	union ibmveth_buf_desc desc;
 421	unsigned long lpar_rc;
 422	int ret = 1;
 423
 424	BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
 425	BUG_ON(index >= adapter->rx_buff_pool[pool].size);
 426
 427	if (!adapter->rx_buff_pool[pool].active) {
 428		ibmveth_rxq_harvest_buffer(adapter);
 429		ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
 430		goto out;
 431	}
 432
 433	desc.fields.flags_len = IBMVETH_BUF_VALID |
 434		adapter->rx_buff_pool[pool].buff_size;
 435	desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
 436
 437	lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
 438
 439	if (lpar_rc != H_SUCCESS) {
 440		netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
 441			   "during recycle rc=%ld", lpar_rc);
 442		ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
 443		ret = 0;
 444	}
 445
 446	if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
 447		adapter->rx_queue.index = 0;
 448		adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
 449	}
 450
 451out:
 452	return ret;
 453}
 454
 455static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
 456{
 457	ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
 458
 459	if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
 460		adapter->rx_queue.index = 0;
 461		adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
 462	}
 463}
 464
 465static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
 466{
 467	int i;
 468	struct device *dev = &adapter->vdev->dev;
 469
 470	if (adapter->buffer_list_addr != NULL) {
 471		if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
 472			dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
 473					DMA_BIDIRECTIONAL);
 474			adapter->buffer_list_dma = DMA_ERROR_CODE;
 475		}
 476		free_page((unsigned long)adapter->buffer_list_addr);
 477		adapter->buffer_list_addr = NULL;
 478	}
 479
 480	if (adapter->filter_list_addr != NULL) {
 481		if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
 482			dma_unmap_single(dev, adapter->filter_list_dma, 4096,
 483					DMA_BIDIRECTIONAL);
 484			adapter->filter_list_dma = DMA_ERROR_CODE;
 485		}
 486		free_page((unsigned long)adapter->filter_list_addr);
 487		adapter->filter_list_addr = NULL;
 488	}
 489
 490	if (adapter->rx_queue.queue_addr != NULL) {
 491		dma_free_coherent(dev, adapter->rx_queue.queue_len,
 492				  adapter->rx_queue.queue_addr,
 493				  adapter->rx_queue.queue_dma);
 494		adapter->rx_queue.queue_addr = NULL;
 495	}
 496
 497	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
 498		if (adapter->rx_buff_pool[i].active)
 499			ibmveth_free_buffer_pool(adapter,
 500						 &adapter->rx_buff_pool[i]);
 501
 502	if (adapter->bounce_buffer != NULL) {
 503		if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
 504			dma_unmap_single(&adapter->vdev->dev,
 505					adapter->bounce_buffer_dma,
 506					adapter->netdev->mtu + IBMVETH_BUFF_OH,
 507					DMA_BIDIRECTIONAL);
 508			adapter->bounce_buffer_dma = DMA_ERROR_CODE;
 509		}
 510		kfree(adapter->bounce_buffer);
 511		adapter->bounce_buffer = NULL;
 512	}
 513}
 514
 515static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
 516        union ibmveth_buf_desc rxq_desc, u64 mac_address)
 517{
 518	int rc, try_again = 1;
 519
 520	/*
 521	 * After a kexec the adapter will still be open, so our attempt to
 522	 * open it will fail. So if we get a failure we free the adapter and
 523	 * try again, but only once.
 524	 */
 525retry:
 526	rc = h_register_logical_lan(adapter->vdev->unit_address,
 527				    adapter->buffer_list_dma, rxq_desc.desc,
 528				    adapter->filter_list_dma, mac_address);
 529
 530	if (rc != H_SUCCESS && try_again) {
 531		do {
 532			rc = h_free_logical_lan(adapter->vdev->unit_address);
 533		} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
 534
 535		try_again = 0;
 536		goto retry;
 537	}
 538
 539	return rc;
 540}
 541
 542static u64 ibmveth_encode_mac_addr(u8 *mac)
 543{
 544	int i;
 545	u64 encoded = 0;
 546
 547	for (i = 0; i < ETH_ALEN; i++)
 548		encoded = (encoded << 8) | mac[i];
 549
 550	return encoded;
 551}
 552
 553static int ibmveth_open(struct net_device *netdev)
 554{
 555	struct ibmveth_adapter *adapter = netdev_priv(netdev);
 556	u64 mac_address;
 557	int rxq_entries = 1;
 558	unsigned long lpar_rc;
 559	int rc;
 560	union ibmveth_buf_desc rxq_desc;
 561	int i;
 562	struct device *dev;
 563
 564	netdev_dbg(netdev, "open starting\n");
 565
 566	napi_enable(&adapter->napi);
 567
 568	for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
 569		rxq_entries += adapter->rx_buff_pool[i].size;
 570
 571	adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
 572	adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
 573
 574	if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
 575		netdev_err(netdev, "unable to allocate filter or buffer list "
 576			   "pages\n");
 577		rc = -ENOMEM;
 578		goto err_out;
 579	}
 580
 581	dev = &adapter->vdev->dev;
 582
 583	adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
 584						rxq_entries;
 585	adapter->rx_queue.queue_addr =
 586		dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
 587				   &adapter->rx_queue.queue_dma, GFP_KERNEL);
 588	if (!adapter->rx_queue.queue_addr) {
 589		rc = -ENOMEM;
 590		goto err_out;
 591	}
 592
 593	adapter->buffer_list_dma = dma_map_single(dev,
 594			adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
 595	adapter->filter_list_dma = dma_map_single(dev,
 596			adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
 597
 598	if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
 599	    (dma_mapping_error(dev, adapter->filter_list_dma))) {
 600		netdev_err(netdev, "unable to map filter or buffer list "
 601			   "pages\n");
 602		rc = -ENOMEM;
 603		goto err_out;
 604	}
 605
 606	adapter->rx_queue.index = 0;
 607	adapter->rx_queue.num_slots = rxq_entries;
 608	adapter->rx_queue.toggle = 1;
 609
 610	mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
 611
 612	rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
 613					adapter->rx_queue.queue_len;
 614	rxq_desc.fields.address = adapter->rx_queue.queue_dma;
 615
 616	netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
 617	netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
 618	netdev_dbg(netdev, "receive q   @ 0x%p\n", adapter->rx_queue.queue_addr);
 619
 620	h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
 621
 622	lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
 623
 624	if (lpar_rc != H_SUCCESS) {
 625		netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
 626			   lpar_rc);
 627		netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
 628			   "desc:0x%llx MAC:0x%llx\n",
 629				     adapter->buffer_list_dma,
 630				     adapter->filter_list_dma,
 631				     rxq_desc.desc,
 632				     mac_address);
 633		rc = -ENONET;
 634		goto err_out;
 635	}
 636
 637	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
 638		if (!adapter->rx_buff_pool[i].active)
 639			continue;
 640		if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
 641			netdev_err(netdev, "unable to alloc pool\n");
 642			adapter->rx_buff_pool[i].active = 0;
 643			rc = -ENOMEM;
 644			goto err_out;
 645		}
 646	}
 647
 648	netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
 649	rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
 650			 netdev);
 651	if (rc != 0) {
 652		netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
 653			   netdev->irq, rc);
 654		do {
 655			lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
 656		} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
 657
 658		goto err_out;
 659	}
 660
 661	adapter->bounce_buffer =
 662	    kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
 663	if (!adapter->bounce_buffer) {
 664		rc = -ENOMEM;
 665		goto err_out_free_irq;
 666	}
 667	adapter->bounce_buffer_dma =
 668	    dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
 669			   netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
 670	if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
 671		netdev_err(netdev, "unable to map bounce buffer\n");
 672		rc = -ENOMEM;
 673		goto err_out_free_irq;
 674	}
 675
 676	netdev_dbg(netdev, "initial replenish cycle\n");
 677	ibmveth_interrupt(netdev->irq, netdev);
 678
 679	netif_start_queue(netdev);
 680
 681	netdev_dbg(netdev, "open complete\n");
 682
 683	return 0;
 684
 685err_out_free_irq:
 686	free_irq(netdev->irq, netdev);
 687err_out:
 688	ibmveth_cleanup(adapter);
 689	napi_disable(&adapter->napi);
 690	return rc;
 691}
 692
 693static int ibmveth_close(struct net_device *netdev)
 694{
 695	struct ibmveth_adapter *adapter = netdev_priv(netdev);
 696	long lpar_rc;
 697
 698	netdev_dbg(netdev, "close starting\n");
 699
 700	napi_disable(&adapter->napi);
 701
 702	if (!adapter->pool_config)
 703		netif_stop_queue(netdev);
 704
 705	h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
 706
 707	do {
 708		lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
 709	} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
 710
 711	if (lpar_rc != H_SUCCESS) {
 712		netdev_err(netdev, "h_free_logical_lan failed with %lx, "
 713			   "continuing with close\n", lpar_rc);
 714	}
 715
 716	free_irq(netdev->irq, netdev);
 717
 718	ibmveth_update_rx_no_buffer(adapter);
 719
 720	ibmveth_cleanup(adapter);
 721
 722	netdev_dbg(netdev, "close complete\n");
 723
 724	return 0;
 725}
 726
 727static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 728{
 729	cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
 730				SUPPORTED_FIBRE);
 731	cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
 732				ADVERTISED_FIBRE);
 733	ethtool_cmd_speed_set(cmd, SPEED_1000);
 734	cmd->duplex = DUPLEX_FULL;
 735	cmd->port = PORT_FIBRE;
 736	cmd->phy_address = 0;
 737	cmd->transceiver = XCVR_INTERNAL;
 738	cmd->autoneg = AUTONEG_ENABLE;
 739	cmd->maxtxpkt = 0;
 740	cmd->maxrxpkt = 1;
 741	return 0;
 742}
 743
 744static void netdev_get_drvinfo(struct net_device *dev,
 745			       struct ethtool_drvinfo *info)
 746{
 747	strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
 748	strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
 749}
 750
 751static netdev_features_t ibmveth_fix_features(struct net_device *dev,
 752	netdev_features_t features)
 753{
 754	/*
 755	 * Since the ibmveth firmware interface does not have the
 756	 * concept of separate tx/rx checksum offload enable, if rx
 757	 * checksum is disabled we also have to disable tx checksum
 758	 * offload. Once we disable rx checksum offload, we are no
 759	 * longer allowed to send tx buffers that are not properly
 760	 * checksummed.
 761	 */
 762
 763	if (!(features & NETIF_F_RXCSUM))
 764		features &= ~NETIF_F_CSUM_MASK;
 765
 766	return features;
 767}
 768
 769static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
 770{
 771	struct ibmveth_adapter *adapter = netdev_priv(dev);
 772	unsigned long set_attr, clr_attr, ret_attr;
 773	unsigned long set_attr6, clr_attr6;
 774	long ret, ret4, ret6;
 775	int rc1 = 0, rc2 = 0;
 776	int restart = 0;
 777
 778	if (netif_running(dev)) {
 779		restart = 1;
 780		adapter->pool_config = 1;
 781		ibmveth_close(dev);
 782		adapter->pool_config = 0;
 783	}
 784
 785	set_attr = 0;
 786	clr_attr = 0;
 787	set_attr6 = 0;
 788	clr_attr6 = 0;
 789
 790	if (data) {
 791		set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
 792		set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
 793	} else {
 794		clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
 795		clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
 796	}
 797
 798	ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
 799
 800	if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
 801	    !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
 802	    (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
 803		ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
 804					 set_attr, &ret_attr);
 805
 806		if (ret4 != H_SUCCESS) {
 807			netdev_err(dev, "unable to change IPv4 checksum "
 808					"offload settings. %d rc=%ld\n",
 809					data, ret4);
 810
 811			h_illan_attributes(adapter->vdev->unit_address,
 812					   set_attr, clr_attr, &ret_attr);
 813
 814			if (data == 1)
 815				dev->features &= ~NETIF_F_IP_CSUM;
 816
 817		} else {
 818			adapter->fw_ipv4_csum_support = data;
 819		}
 820
 821		ret6 = h_illan_attributes(adapter->vdev->unit_address,
 822					 clr_attr6, set_attr6, &ret_attr);
 823
 824		if (ret6 != H_SUCCESS) {
 825			netdev_err(dev, "unable to change IPv6 checksum "
 826					"offload settings. %d rc=%ld\n",
 827					data, ret6);
 828
 829			h_illan_attributes(adapter->vdev->unit_address,
 830					   set_attr6, clr_attr6, &ret_attr);
 831
 832			if (data == 1)
 833				dev->features &= ~NETIF_F_IPV6_CSUM;
 834
 835		} else
 836			adapter->fw_ipv6_csum_support = data;
 837
 838		if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
 839			adapter->rx_csum = data;
 840		else
 841			rc1 = -EIO;
 842	} else {
 843		rc1 = -EIO;
 844		netdev_err(dev, "unable to change checksum offload settings."
 845				     " %d rc=%ld ret_attr=%lx\n", data, ret,
 846				     ret_attr);
 847	}
 848
 849	if (restart)
 850		rc2 = ibmveth_open(dev);
 851
 852	return rc1 ? rc1 : rc2;
 853}
 854
 855static int ibmveth_set_tso(struct net_device *dev, u32 data)
 856{
 857	struct ibmveth_adapter *adapter = netdev_priv(dev);
 858	unsigned long set_attr, clr_attr, ret_attr;
 859	long ret1, ret2;
 860	int rc1 = 0, rc2 = 0;
 861	int restart = 0;
 862
 863	if (netif_running(dev)) {
 864		restart = 1;
 865		adapter->pool_config = 1;
 866		ibmveth_close(dev);
 867		adapter->pool_config = 0;
 868	}
 869
 870	set_attr = 0;
 871	clr_attr = 0;
 872
 873	if (data)
 874		set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
 875	else
 876		clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
 877
 878	ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
 879
 880	if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
 881	    !old_large_send) {
 882		ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
 883					  set_attr, &ret_attr);
 884
 885		if (ret2 != H_SUCCESS) {
 886			netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
 887				   data, ret2);
 888
 889			h_illan_attributes(adapter->vdev->unit_address,
 890					   set_attr, clr_attr, &ret_attr);
 891
 892			if (data == 1)
 893				dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
 894			rc1 = -EIO;
 895
 896		} else {
 897			adapter->fw_large_send_support = data;
 898			adapter->large_send = data;
 899		}
 900	} else {
 901		/* Older firmware version of large send offload does not
 902		 * support tcp6/ipv6
 903		 */
 904		if (data == 1) {
 905			dev->features &= ~NETIF_F_TSO6;
 906			netdev_info(dev, "TSO feature requires all partitions to have updated driver");
 907		}
 908		adapter->large_send = data;
 909	}
 910
 911	if (restart)
 912		rc2 = ibmveth_open(dev);
 913
 914	return rc1 ? rc1 : rc2;
 915}
 916
 917static int ibmveth_set_features(struct net_device *dev,
 918	netdev_features_t features)
 919{
 920	struct ibmveth_adapter *adapter = netdev_priv(dev);
 921	int rx_csum = !!(features & NETIF_F_RXCSUM);
 922	int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
 923	int rc1 = 0, rc2 = 0;
 924
 925	if (rx_csum != adapter->rx_csum) {
 926		rc1 = ibmveth_set_csum_offload(dev, rx_csum);
 927		if (rc1 && !adapter->rx_csum)
 928			dev->features =
 929				features & ~(NETIF_F_CSUM_MASK |
 930					     NETIF_F_RXCSUM);
 931	}
 932
 933	if (large_send != adapter->large_send) {
 934		rc2 = ibmveth_set_tso(dev, large_send);
 935		if (rc2 && !adapter->large_send)
 936			dev->features =
 937				features & ~(NETIF_F_TSO | NETIF_F_TSO6);
 938	}
 939
 940	return rc1 ? rc1 : rc2;
 941}
 942
 943static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 944{
 945	int i;
 946
 947	if (stringset != ETH_SS_STATS)
 948		return;
 949
 950	for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
 951		memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
 952}
 953
 954static int ibmveth_get_sset_count(struct net_device *dev, int sset)
 955{
 956	switch (sset) {
 957	case ETH_SS_STATS:
 958		return ARRAY_SIZE(ibmveth_stats);
 959	default:
 960		return -EOPNOTSUPP;
 961	}
 962}
 963
 964static void ibmveth_get_ethtool_stats(struct net_device *dev,
 965				      struct ethtool_stats *stats, u64 *data)
 966{
 967	int i;
 968	struct ibmveth_adapter *adapter = netdev_priv(dev);
 969
 970	for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
 971		data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
 972}
 973
 974static const struct ethtool_ops netdev_ethtool_ops = {
 975	.get_drvinfo		= netdev_get_drvinfo,
 976	.get_settings		= netdev_get_settings,
 977	.get_link		= ethtool_op_get_link,
 978	.get_strings		= ibmveth_get_strings,
 979	.get_sset_count		= ibmveth_get_sset_count,
 980	.get_ethtool_stats	= ibmveth_get_ethtool_stats,
 981};
 982
 983static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 984{
 985	return -EOPNOTSUPP;
 986}
 987
 988#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
 989
 990static int ibmveth_send(struct ibmveth_adapter *adapter,
 991			union ibmveth_buf_desc *descs, unsigned long mss)
 992{
 993	unsigned long correlator;
 994	unsigned int retry_count;
 995	unsigned long ret;
 996
 997	/*
 998	 * The retry count sets a maximum for the number of broadcast and
 999	 * multicast destinations within the system.
1000	 */
1001	retry_count = 1024;
1002	correlator = 0;
1003	do {
1004		ret = h_send_logical_lan(adapter->vdev->unit_address,
1005					     descs[0].desc, descs[1].desc,
1006					     descs[2].desc, descs[3].desc,
1007					     descs[4].desc, descs[5].desc,
1008					     correlator, &correlator, mss,
1009					     adapter->fw_large_send_support);
1010	} while ((ret == H_BUSY) && (retry_count--));
1011
1012	if (ret != H_SUCCESS && ret != H_DROPPED) {
1013		netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
1014			   "with rc=%ld\n", ret);
1015		return 1;
1016	}
1017
1018	return 0;
1019}
1020
1021static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
1022				      struct net_device *netdev)
1023{
1024	struct ibmveth_adapter *adapter = netdev_priv(netdev);
1025	unsigned int desc_flags;
1026	union ibmveth_buf_desc descs[6];
1027	int last, i;
1028	int force_bounce = 0;
1029	dma_addr_t dma_addr;
1030	unsigned long mss = 0;
1031
1032	/*
1033	 * veth handles a maximum of 6 segments including the header, so
1034	 * we have to linearize the skb if there are more than this.
1035	 */
1036	if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
1037		netdev->stats.tx_dropped++;
1038		goto out;
1039	}
1040
1041	/* veth can't checksum offload UDP */
1042	if (skb->ip_summed == CHECKSUM_PARTIAL &&
1043	    ((skb->protocol == htons(ETH_P_IP) &&
1044	      ip_hdr(skb)->protocol != IPPROTO_TCP) ||
1045	     (skb->protocol == htons(ETH_P_IPV6) &&
1046	      ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
1047	    skb_checksum_help(skb)) {
1048
1049		netdev_err(netdev, "tx: failed to checksum packet\n");
1050		netdev->stats.tx_dropped++;
1051		goto out;
1052	}
1053
1054	desc_flags = IBMVETH_BUF_VALID;
1055
1056	if (skb_is_gso(skb) && adapter->fw_large_send_support)
1057		desc_flags |= IBMVETH_BUF_LRG_SND;
1058
1059	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1060		unsigned char *buf = skb_transport_header(skb) +
1061						skb->csum_offset;
1062
1063		desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
1064
1065		/* Need to zero out the checksum */
1066		buf[0] = 0;
1067		buf[1] = 0;
1068	}
1069
1070retry_bounce:
1071	memset(descs, 0, sizeof(descs));
1072
1073	/*
1074	 * If a linear packet is below the rx threshold then
1075	 * copy it into the static bounce buffer. This avoids the
1076	 * cost of a TCE insert and remove.
1077	 */
1078	if (force_bounce || (!skb_is_nonlinear(skb) &&
1079				(skb->len < tx_copybreak))) {
1080		skb_copy_from_linear_data(skb, adapter->bounce_buffer,
1081					  skb->len);
1082
1083		descs[0].fields.flags_len = desc_flags | skb->len;
1084		descs[0].fields.address = adapter->bounce_buffer_dma;
1085
1086		if (ibmveth_send(adapter, descs, 0)) {
1087			adapter->tx_send_failed++;
1088			netdev->stats.tx_dropped++;
1089		} else {
1090			netdev->stats.tx_packets++;
1091			netdev->stats.tx_bytes += skb->len;
1092		}
1093
1094		goto out;
1095	}
1096
1097	/* Map the header */
1098	dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1099				  skb_headlen(skb), DMA_TO_DEVICE);
1100	if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1101		goto map_failed;
1102
1103	descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1104	descs[0].fields.address = dma_addr;
1105
1106	/* Map the frags */
1107	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1108		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1109
1110		dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
1111					    skb_frag_size(frag), DMA_TO_DEVICE);
1112
1113		if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1114			goto map_failed_frags;
1115
1116		descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
1117		descs[i+1].fields.address = dma_addr;
1118	}
1119
1120	if (skb_is_gso(skb)) {
1121		if (adapter->fw_large_send_support) {
1122			mss = (unsigned long)skb_shinfo(skb)->gso_size;
1123			adapter->tx_large_packets++;
1124		} else if (!skb_is_gso_v6(skb)) {
1125			/* Put -1 in the IP checksum to tell phyp it
1126			 * is a largesend packet. Put the mss in
1127			 * the TCP checksum.
1128			 */
1129			ip_hdr(skb)->check = 0xffff;
1130			tcp_hdr(skb)->check =
1131				cpu_to_be16(skb_shinfo(skb)->gso_size);
1132			adapter->tx_large_packets++;
1133		}
1134	}
1135
1136	if (ibmveth_send(adapter, descs, mss)) {
1137		adapter->tx_send_failed++;
1138		netdev->stats.tx_dropped++;
1139	} else {
1140		netdev->stats.tx_packets++;
1141		netdev->stats.tx_bytes += skb->len;
1142	}
1143
1144	dma_unmap_single(&adapter->vdev->dev,
1145			 descs[0].fields.address,
1146			 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1147			 DMA_TO_DEVICE);
1148
1149	for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1150		dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1151			       descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1152			       DMA_TO_DEVICE);
1153
1154out:
1155	dev_consume_skb_any(skb);
1156	return NETDEV_TX_OK;
1157
1158map_failed_frags:
1159	last = i+1;
1160	for (i = 0; i < last; i++)
1161		dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1162			       descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1163			       DMA_TO_DEVICE);
1164
1165map_failed:
1166	if (!firmware_has_feature(FW_FEATURE_CMO))
1167		netdev_err(netdev, "tx: unable to map xmit buffer\n");
1168	adapter->tx_map_failed++;
1169	if (skb_linearize(skb)) {
1170		netdev->stats.tx_dropped++;
1171		goto out;
1172	}
1173	force_bounce = 1;
1174	goto retry_bounce;
1175}
1176
1177static int ibmveth_poll(struct napi_struct *napi, int budget)
1178{
1179	struct ibmveth_adapter *adapter =
1180			container_of(napi, struct ibmveth_adapter, napi);
1181	struct net_device *netdev = adapter->netdev;
1182	int frames_processed = 0;
1183	unsigned long lpar_rc;
1184	struct iphdr *iph;
1185
1186restart_poll:
1187	while (frames_processed < budget) {
1188		if (!ibmveth_rxq_pending_buffer(adapter))
1189			break;
1190
1191		smp_rmb();
1192		if (!ibmveth_rxq_buffer_valid(adapter)) {
1193			wmb(); /* suggested by larson1 */
1194			adapter->rx_invalid_buffer++;
1195			netdev_dbg(netdev, "recycling invalid buffer\n");
1196			ibmveth_rxq_recycle_buffer(adapter);
1197		} else {
1198			struct sk_buff *skb, *new_skb;
1199			int length = ibmveth_rxq_frame_length(adapter);
1200			int offset = ibmveth_rxq_frame_offset(adapter);
1201			int csum_good = ibmveth_rxq_csum_good(adapter);
1202
1203			skb = ibmveth_rxq_get_buffer(adapter);
1204
1205			new_skb = NULL;
1206			if (length < rx_copybreak)
1207				new_skb = netdev_alloc_skb(netdev, length);
1208
1209			if (new_skb) {
1210				skb_copy_to_linear_data(new_skb,
1211							skb->data + offset,
1212							length);
1213				if (rx_flush)
1214					ibmveth_flush_buffer(skb->data,
1215						length + offset);
1216				if (!ibmveth_rxq_recycle_buffer(adapter))
1217					kfree_skb(skb);
1218				skb = new_skb;
1219			} else {
1220				ibmveth_rxq_harvest_buffer(adapter);
1221				skb_reserve(skb, offset);
1222			}
1223
1224			skb_put(skb, length);
1225			skb->protocol = eth_type_trans(skb, netdev);
1226
1227			if (csum_good) {
1228				skb->ip_summed = CHECKSUM_UNNECESSARY;
1229				if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
1230					iph = (struct iphdr *)skb->data;
1231
1232					/* If the IP checksum is not offloaded and if the packet
1233					 *  is large send, the checksum must be rebuilt.
1234					 */
1235					if (iph->check == 0xffff) {
1236						iph->check = 0;
1237						iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1238						adapter->rx_large_packets++;
1239					}
1240				}
1241			}
1242
1243			napi_gro_receive(napi, skb);	/* send it up */
1244
1245			netdev->stats.rx_packets++;
1246			netdev->stats.rx_bytes += length;
1247			frames_processed++;
1248		}
1249	}
1250
1251	ibmveth_replenish_task(adapter);
1252
1253	if (frames_processed < budget) {
1254		napi_complete(napi);
1255
1256		/* We think we are done - reenable interrupts,
1257		 * then check once more to make sure we are done.
1258		 */
1259		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1260				       VIO_IRQ_ENABLE);
1261
1262		BUG_ON(lpar_rc != H_SUCCESS);
1263
1264		if (ibmveth_rxq_pending_buffer(adapter) &&
1265		    napi_reschedule(napi)) {
1266			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1267					       VIO_IRQ_DISABLE);
1268			goto restart_poll;
1269		}
1270	}
1271
1272	return frames_processed;
1273}
1274
1275static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1276{
1277	struct net_device *netdev = dev_instance;
1278	struct ibmveth_adapter *adapter = netdev_priv(netdev);
1279	unsigned long lpar_rc;
1280
1281	if (napi_schedule_prep(&adapter->napi)) {
1282		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1283				       VIO_IRQ_DISABLE);
1284		BUG_ON(lpar_rc != H_SUCCESS);
1285		__napi_schedule(&adapter->napi);
1286	}
1287	return IRQ_HANDLED;
1288}
1289
1290static void ibmveth_set_multicast_list(struct net_device *netdev)
1291{
1292	struct ibmveth_adapter *adapter = netdev_priv(netdev);
1293	unsigned long lpar_rc;
1294
1295	if ((netdev->flags & IFF_PROMISC) ||
1296	    (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1297		lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1298					   IbmVethMcastEnableRecv |
1299					   IbmVethMcastDisableFiltering,
1300					   0);
1301		if (lpar_rc != H_SUCCESS) {
1302			netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1303				   "entering promisc mode\n", lpar_rc);
1304		}
1305	} else {
1306		struct netdev_hw_addr *ha;
1307		/* clear the filter table & disable filtering */
1308		lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1309					   IbmVethMcastEnableRecv |
1310					   IbmVethMcastDisableFiltering |
1311					   IbmVethMcastClearFilterTable,
1312					   0);
1313		if (lpar_rc != H_SUCCESS) {
1314			netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1315				   "attempting to clear filter table\n",
1316				   lpar_rc);
1317		}
1318		/* add the addresses to the filter table */
1319		netdev_for_each_mc_addr(ha, netdev) {
1320			/* add the multicast address to the filter table */
1321			u64 mcast_addr;
1322			mcast_addr = ibmveth_encode_mac_addr(ha->addr);
1323			lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1324						   IbmVethMcastAddFilter,
1325						   mcast_addr);
1326			if (lpar_rc != H_SUCCESS) {
1327				netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1328					   "when adding an entry to the filter "
1329					   "table\n", lpar_rc);
1330			}
1331		}
1332
1333		/* re-enable filtering */
1334		lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1335					   IbmVethMcastEnableFiltering,
1336					   0);
1337		if (lpar_rc != H_SUCCESS) {
1338			netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1339				   "enabling filtering\n", lpar_rc);
1340		}
1341	}
1342}
1343
1344static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1345{
1346	struct ibmveth_adapter *adapter = netdev_priv(dev);
1347	struct vio_dev *viodev = adapter->vdev;
1348	int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1349	int i, rc;
1350	int need_restart = 0;
1351
1352	if (new_mtu < IBMVETH_MIN_MTU)
1353		return -EINVAL;
1354
1355	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1356		if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
1357			break;
1358
1359	if (i == IBMVETH_NUM_BUFF_POOLS)
1360		return -EINVAL;
1361
1362	/* Deactivate all the buffer pools so that the next loop can activate
1363	   only the buffer pools necessary to hold the new MTU */
1364	if (netif_running(adapter->netdev)) {
1365		need_restart = 1;
1366		adapter->pool_config = 1;
1367		ibmveth_close(adapter->netdev);
1368		adapter->pool_config = 0;
1369	}
1370
1371	/* Look for an active buffer pool that can hold the new MTU */
1372	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1373		adapter->rx_buff_pool[i].active = 1;
1374
1375		if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
1376			dev->mtu = new_mtu;
1377			vio_cmo_set_dev_desired(viodev,
1378						ibmveth_get_desired_dma
1379						(viodev));
1380			if (need_restart) {
1381				return ibmveth_open(adapter->netdev);
1382			}
1383			return 0;
1384		}
1385	}
1386
1387	if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1388		return rc;
1389
1390	return -EINVAL;
1391}
1392
1393#ifdef CONFIG_NET_POLL_CONTROLLER
1394static void ibmveth_poll_controller(struct net_device *dev)
1395{
1396	ibmveth_replenish_task(netdev_priv(dev));
1397	ibmveth_interrupt(dev->irq, dev);
1398}
1399#endif
1400
1401/**
1402 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1403 *
1404 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1405 *
1406 * Return value:
1407 *	Number of bytes of IO data the driver will need to perform well.
1408 */
1409static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1410{
1411	struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1412	struct ibmveth_adapter *adapter;
1413	struct iommu_table *tbl;
1414	unsigned long ret;
1415	int i;
1416	int rxqentries = 1;
1417
1418	tbl = get_iommu_table_base(&vdev->dev);
1419
1420	/* netdev inits at probe time along with the structures we need below*/
1421	if (netdev == NULL)
1422		return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1423
1424	adapter = netdev_priv(netdev);
1425
1426	ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1427	ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1428
1429	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1430		/* add the size of the active receive buffers */
1431		if (adapter->rx_buff_pool[i].active)
1432			ret +=
1433			    adapter->rx_buff_pool[i].size *
1434			    IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1435					     buff_size, tbl);
1436		rxqentries += adapter->rx_buff_pool[i].size;
1437	}
1438	/* add the size of the receive queue entries */
1439	ret += IOMMU_PAGE_ALIGN(
1440		rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1441
1442	return ret;
1443}
1444
1445static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1446{
1447	struct ibmveth_adapter *adapter = netdev_priv(dev);
1448	struct sockaddr *addr = p;
1449	u64 mac_address;
1450	int rc;
1451
1452	if (!is_valid_ether_addr(addr->sa_data))
1453		return -EADDRNOTAVAIL;
1454
1455	mac_address = ibmveth_encode_mac_addr(addr->sa_data);
1456	rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1457	if (rc) {
1458		netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1459		return rc;
1460	}
1461
1462	ether_addr_copy(dev->dev_addr, addr->sa_data);
1463
1464	return 0;
1465}
1466
1467static const struct net_device_ops ibmveth_netdev_ops = {
1468	.ndo_open		= ibmveth_open,
1469	.ndo_stop		= ibmveth_close,
1470	.ndo_start_xmit		= ibmveth_start_xmit,
1471	.ndo_set_rx_mode	= ibmveth_set_multicast_list,
1472	.ndo_do_ioctl		= ibmveth_ioctl,
1473	.ndo_change_mtu		= ibmveth_change_mtu,
1474	.ndo_fix_features	= ibmveth_fix_features,
1475	.ndo_set_features	= ibmveth_set_features,
1476	.ndo_validate_addr	= eth_validate_addr,
1477	.ndo_set_mac_address    = ibmveth_set_mac_addr,
1478#ifdef CONFIG_NET_POLL_CONTROLLER
1479	.ndo_poll_controller	= ibmveth_poll_controller,
1480#endif
1481};
1482
1483static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1484{
1485	int rc, i, mac_len;
1486	struct net_device *netdev;
1487	struct ibmveth_adapter *adapter;
1488	unsigned char *mac_addr_p;
1489	unsigned int *mcastFilterSize_p;
1490	long ret;
1491	unsigned long ret_attr;
1492
1493	dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1494		dev->unit_address);
1495
1496	mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1497							&mac_len);
1498	if (!mac_addr_p) {
1499		dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1500		return -EINVAL;
1501	}
1502	/* Workaround for old/broken pHyp */
1503	if (mac_len == 8)
1504		mac_addr_p += 2;
1505	else if (mac_len != 6) {
1506		dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
1507			mac_len);
1508		return -EINVAL;
1509	}
1510
1511	mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
1512						VETH_MCAST_FILTER_SIZE, NULL);
1513	if (!mcastFilterSize_p) {
1514		dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1515			"attribute\n");
1516		return -EINVAL;
1517	}
1518
1519	netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1520
1521	if (!netdev)
1522		return -ENOMEM;
1523
1524	adapter = netdev_priv(netdev);
1525	dev_set_drvdata(&dev->dev, netdev);
1526
1527	adapter->vdev = dev;
1528	adapter->netdev = netdev;
1529	adapter->mcastFilterSize = *mcastFilterSize_p;
1530	adapter->pool_config = 0;
1531
1532	netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1533
1534	netdev->irq = dev->irq;
1535	netdev->netdev_ops = &ibmveth_netdev_ops;
1536	netdev->ethtool_ops = &netdev_ethtool_ops;
1537	SET_NETDEV_DEV(netdev, &dev->dev);
1538	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
1539		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1540
1541	netdev->features |= netdev->hw_features;
1542
1543	ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
1544
1545	/* If running older firmware, TSO should not be enabled by default */
1546	if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
1547	    !old_large_send) {
1548		netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1549		netdev->features |= netdev->hw_features;
1550	} else {
1551		netdev->hw_features |= NETIF_F_TSO;
1552	}
1553
1554	memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
1555
1556	if (firmware_has_feature(FW_FEATURE_CMO))
1557		memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
1558
1559	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1560		struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1561		int error;
1562
1563		ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1564					 pool_count[i], pool_size[i],
1565					 pool_active[i]);
1566		error = kobject_init_and_add(kobj, &ktype_veth_pool,
1567					     &dev->dev.kobj, "pool%d", i);
1568		if (!error)
1569			kobject_uevent(kobj, KOBJ_ADD);
1570	}
1571
1572	netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1573
1574	adapter->buffer_list_dma = DMA_ERROR_CODE;
1575	adapter->filter_list_dma = DMA_ERROR_CODE;
1576	adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1577
1578	netdev_dbg(netdev, "registering netdev...\n");
1579
1580	ibmveth_set_features(netdev, netdev->features);
1581
1582	rc = register_netdev(netdev);
1583
1584	if (rc) {
1585		netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1586		free_netdev(netdev);
1587		return rc;
1588	}
1589
1590	netdev_dbg(netdev, "registered\n");
1591
1592	return 0;
1593}
1594
1595static int ibmveth_remove(struct vio_dev *dev)
1596{
1597	struct net_device *netdev = dev_get_drvdata(&dev->dev);
1598	struct ibmveth_adapter *adapter = netdev_priv(netdev);
1599	int i;
1600
1601	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1602		kobject_put(&adapter->rx_buff_pool[i].kobj);
1603
1604	unregister_netdev(netdev);
1605
1606	free_netdev(netdev);
1607	dev_set_drvdata(&dev->dev, NULL);
1608
1609	return 0;
1610}
1611
1612static struct attribute veth_active_attr;
1613static struct attribute veth_num_attr;
1614static struct attribute veth_size_attr;
1615
1616static ssize_t veth_pool_show(struct kobject *kobj,
1617			      struct attribute *attr, char *buf)
1618{
1619	struct ibmveth_buff_pool *pool = container_of(kobj,
1620						      struct ibmveth_buff_pool,
1621						      kobj);
1622
1623	if (attr == &veth_active_attr)
1624		return sprintf(buf, "%d\n", pool->active);
1625	else if (attr == &veth_num_attr)
1626		return sprintf(buf, "%d\n", pool->size);
1627	else if (attr == &veth_size_attr)
1628		return sprintf(buf, "%d\n", pool->buff_size);
1629	return 0;
1630}
1631
1632static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1633			       const char *buf, size_t count)
1634{
1635	struct ibmveth_buff_pool *pool = container_of(kobj,
1636						      struct ibmveth_buff_pool,
1637						      kobj);
1638	struct net_device *netdev = dev_get_drvdata(
1639	    container_of(kobj->parent, struct device, kobj));
1640	struct ibmveth_adapter *adapter = netdev_priv(netdev);
1641	long value = simple_strtol(buf, NULL, 10);
1642	long rc;
1643
1644	if (attr == &veth_active_attr) {
1645		if (value && !pool->active) {
1646			if (netif_running(netdev)) {
1647				if (ibmveth_alloc_buffer_pool(pool)) {
1648					netdev_err(netdev,
1649						   "unable to alloc pool\n");
1650					return -ENOMEM;
1651				}
1652				pool->active = 1;
1653				adapter->pool_config = 1;
1654				ibmveth_close(netdev);
1655				adapter->pool_config = 0;
1656				if ((rc = ibmveth_open(netdev)))
1657					return rc;
1658			} else {
1659				pool->active = 1;
1660			}
1661		} else if (!value && pool->active) {
1662			int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1663			int i;
1664			/* Make sure there is a buffer pool with buffers that
1665			   can hold a packet of the size of the MTU */
1666			for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1667				if (pool == &adapter->rx_buff_pool[i])
1668					continue;
1669				if (!adapter->rx_buff_pool[i].active)
1670					continue;
1671				if (mtu <= adapter->rx_buff_pool[i].buff_size)
1672					break;
1673			}
1674
1675			if (i == IBMVETH_NUM_BUFF_POOLS) {
1676				netdev_err(netdev, "no active pool >= MTU\n");
1677				return -EPERM;
1678			}
1679
1680			if (netif_running(netdev)) {
1681				adapter->pool_config = 1;
1682				ibmveth_close(netdev);
1683				pool->active = 0;
1684				adapter->pool_config = 0;
1685				if ((rc = ibmveth_open(netdev)))
1686					return rc;
1687			}
1688			pool->active = 0;
1689		}
1690	} else if (attr == &veth_num_attr) {
1691		if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1692			return -EINVAL;
1693		} else {
1694			if (netif_running(netdev)) {
1695				adapter->pool_config = 1;
1696				ibmveth_close(netdev);
1697				adapter->pool_config = 0;
1698				pool->size = value;
1699				if ((rc = ibmveth_open(netdev)))
1700					return rc;
1701			} else {
1702				pool->size = value;
1703			}
1704		}
1705	} else if (attr == &veth_size_attr) {
1706		if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1707			return -EINVAL;
1708		} else {
1709			if (netif_running(netdev)) {
1710				adapter->pool_config = 1;
1711				ibmveth_close(netdev);
1712				adapter->pool_config = 0;
1713				pool->buff_size = value;
1714				if ((rc = ibmveth_open(netdev)))
1715					return rc;
1716			} else {
1717				pool->buff_size = value;
1718			}
1719		}
1720	}
1721
1722	/* kick the interrupt handler to allocate/deallocate pools */
1723	ibmveth_interrupt(netdev->irq, netdev);
1724	return count;
1725}
1726
1727
1728#define ATTR(_name, _mode)				\
1729	struct attribute veth_##_name##_attr = {	\
1730	.name = __stringify(_name), .mode = _mode,	\
1731	};
1732
1733static ATTR(active, 0644);
1734static ATTR(num, 0644);
1735static ATTR(size, 0644);
1736
1737static struct attribute *veth_pool_attrs[] = {
1738	&veth_active_attr,
1739	&veth_num_attr,
1740	&veth_size_attr,
1741	NULL,
1742};
1743
1744static const struct sysfs_ops veth_pool_ops = {
1745	.show   = veth_pool_show,
1746	.store  = veth_pool_store,
1747};
1748
1749static struct kobj_type ktype_veth_pool = {
1750	.release        = NULL,
1751	.sysfs_ops      = &veth_pool_ops,
1752	.default_attrs  = veth_pool_attrs,
1753};
1754
1755static int ibmveth_resume(struct device *dev)
1756{
1757	struct net_device *netdev = dev_get_drvdata(dev);
1758	ibmveth_interrupt(netdev->irq, netdev);
1759	return 0;
1760}
1761
1762static struct vio_device_id ibmveth_device_table[] = {
1763	{ "network", "IBM,l-lan"},
1764	{ "", "" }
1765};
1766MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1767
1768static struct dev_pm_ops ibmveth_pm_ops = {
1769	.resume = ibmveth_resume
1770};
1771
1772static struct vio_driver ibmveth_driver = {
1773	.id_table	= ibmveth_device_table,
1774	.probe		= ibmveth_probe,
1775	.remove		= ibmveth_remove,
1776	.get_desired_dma = ibmveth_get_desired_dma,
1777	.name		= ibmveth_driver_name,
1778	.pm		= &ibmveth_pm_ops,
1779};
1780
1781static int __init ibmveth_module_init(void)
1782{
1783	printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1784	       ibmveth_driver_string, ibmveth_driver_version);
1785
1786	return vio_register_driver(&ibmveth_driver);
1787}
1788
1789static void __exit ibmveth_module_exit(void)
1790{
1791	vio_unregister_driver(&ibmveth_driver);
1792}
1793
1794module_init(ibmveth_module_init);
1795module_exit(ibmveth_module_exit);