Loading...
1/*
2 * IBM Power Virtual Ethernet Device Driver
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2003, 2010
19 *
20 * Authors: Dave Larson <larson1@us.ibm.com>
21 * Santiago Leon <santil@linux.vnet.ibm.com>
22 * Brian King <brking@linux.vnet.ibm.com>
23 * Robert Jennings <rcj@linux.vnet.ibm.com>
24 * Anton Blanchard <anton@au.ibm.com>
25 */
26
27#include <linux/module.h>
28#include <linux/moduleparam.h>
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/dma-mapping.h>
32#include <linux/kernel.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/init.h>
37#include <linux/interrupt.h>
38#include <linux/mm.h>
39#include <linux/pm.h>
40#include <linux/ethtool.h>
41#include <linux/in.h>
42#include <linux/ip.h>
43#include <linux/ipv6.h>
44#include <linux/slab.h>
45#include <asm/hvcall.h>
46#include <linux/atomic.h>
47#include <asm/vio.h>
48#include <asm/iommu.h>
49#include <asm/firmware.h>
50
51#include "ibmveth.h"
52
53static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
54static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
55static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
56
57static struct kobj_type ktype_veth_pool;
58
59
60static const char ibmveth_driver_name[] = "ibmveth";
61static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
62#define ibmveth_driver_version "1.04"
63
64MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
65MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
66MODULE_LICENSE("GPL");
67MODULE_VERSION(ibmveth_driver_version);
68
69static unsigned int tx_copybreak __read_mostly = 128;
70module_param(tx_copybreak, uint, 0644);
71MODULE_PARM_DESC(tx_copybreak,
72 "Maximum size of packet that is copied to a new buffer on transmit");
73
74static unsigned int rx_copybreak __read_mostly = 128;
75module_param(rx_copybreak, uint, 0644);
76MODULE_PARM_DESC(rx_copybreak,
77 "Maximum size of packet that is copied to a new buffer on receive");
78
79static unsigned int rx_flush __read_mostly = 0;
80module_param(rx_flush, uint, 0644);
81MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
82
83struct ibmveth_stat {
84 char name[ETH_GSTRING_LEN];
85 int offset;
86};
87
88#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
89#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
90
91struct ibmveth_stat ibmveth_stats[] = {
92 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
93 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
94 { "replenish_add_buff_failure",
95 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
96 { "replenish_add_buff_success",
97 IBMVETH_STAT_OFF(replenish_add_buff_success) },
98 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
99 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
100 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
101 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
102 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
103 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
104};
105
106/* simple methods of getting data from the current rxq entry */
107static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
108{
109 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
110}
111
112static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
113{
114 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
115 IBMVETH_RXQ_TOGGLE_SHIFT;
116}
117
118static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
119{
120 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
121}
122
123static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
124{
125 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
126}
127
128static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
129{
130 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
131}
132
133static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
134{
135 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
136}
137
138static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
139{
140 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
141}
142
143/* setup the initial settings for a buffer pool */
144static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
145 u32 pool_index, u32 pool_size,
146 u32 buff_size, u32 pool_active)
147{
148 pool->size = pool_size;
149 pool->index = pool_index;
150 pool->buff_size = buff_size;
151 pool->threshold = pool_size * 7 / 8;
152 pool->active = pool_active;
153}
154
155/* allocate and setup an buffer pool - called during open */
156static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
157{
158 int i;
159
160 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
161
162 if (!pool->free_map)
163 return -1;
164
165 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
166 if (!pool->dma_addr) {
167 kfree(pool->free_map);
168 pool->free_map = NULL;
169 return -1;
170 }
171
172 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
173
174 if (!pool->skbuff) {
175 kfree(pool->dma_addr);
176 pool->dma_addr = NULL;
177
178 kfree(pool->free_map);
179 pool->free_map = NULL;
180 return -1;
181 }
182
183 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
184
185 for (i = 0; i < pool->size; ++i)
186 pool->free_map[i] = i;
187
188 atomic_set(&pool->available, 0);
189 pool->producer_index = 0;
190 pool->consumer_index = 0;
191
192 return 0;
193}
194
195static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
196{
197 unsigned long offset;
198
199 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
200 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
201}
202
203/* replenish the buffers for a pool. note that we don't need to
204 * skb_reserve these since they are used for incoming...
205 */
206static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
207 struct ibmveth_buff_pool *pool)
208{
209 u32 i;
210 u32 count = pool->size - atomic_read(&pool->available);
211 u32 buffers_added = 0;
212 struct sk_buff *skb;
213 unsigned int free_index, index;
214 u64 correlator;
215 unsigned long lpar_rc;
216 dma_addr_t dma_addr;
217
218 mb();
219
220 for (i = 0; i < count; ++i) {
221 union ibmveth_buf_desc desc;
222
223 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
224
225 if (!skb) {
226 netdev_dbg(adapter->netdev,
227 "replenish: unable to allocate skb\n");
228 adapter->replenish_no_mem++;
229 break;
230 }
231
232 free_index = pool->consumer_index;
233 pool->consumer_index++;
234 if (pool->consumer_index >= pool->size)
235 pool->consumer_index = 0;
236 index = pool->free_map[free_index];
237
238 BUG_ON(index == IBM_VETH_INVALID_MAP);
239 BUG_ON(pool->skbuff[index] != NULL);
240
241 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
242 pool->buff_size, DMA_FROM_DEVICE);
243
244 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
245 goto failure;
246
247 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
248 pool->dma_addr[index] = dma_addr;
249 pool->skbuff[index] = skb;
250
251 correlator = ((u64)pool->index << 32) | index;
252 *(u64 *)skb->data = correlator;
253
254 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
255 desc.fields.address = dma_addr;
256
257 if (rx_flush) {
258 unsigned int len = min(pool->buff_size,
259 adapter->netdev->mtu +
260 IBMVETH_BUFF_OH);
261 ibmveth_flush_buffer(skb->data, len);
262 }
263 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
264 desc.desc);
265
266 if (lpar_rc != H_SUCCESS) {
267 goto failure;
268 } else {
269 buffers_added++;
270 adapter->replenish_add_buff_success++;
271 }
272 }
273
274 mb();
275 atomic_add(buffers_added, &(pool->available));
276 return;
277
278failure:
279 pool->free_map[free_index] = index;
280 pool->skbuff[index] = NULL;
281 if (pool->consumer_index == 0)
282 pool->consumer_index = pool->size - 1;
283 else
284 pool->consumer_index--;
285 if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
286 dma_unmap_single(&adapter->vdev->dev,
287 pool->dma_addr[index], pool->buff_size,
288 DMA_FROM_DEVICE);
289 dev_kfree_skb_any(skb);
290 adapter->replenish_add_buff_failure++;
291
292 mb();
293 atomic_add(buffers_added, &(pool->available));
294}
295
296/* replenish routine */
297static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
298{
299 int i;
300
301 adapter->replenish_task_cycles++;
302
303 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
304 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
305
306 if (pool->active &&
307 (atomic_read(&pool->available) < pool->threshold))
308 ibmveth_replenish_buffer_pool(adapter, pool);
309 }
310
311 adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
312 4096 - 8);
313}
314
315/* empty and free ana buffer pool - also used to do cleanup in error paths */
316static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
317 struct ibmveth_buff_pool *pool)
318{
319 int i;
320
321 kfree(pool->free_map);
322 pool->free_map = NULL;
323
324 if (pool->skbuff && pool->dma_addr) {
325 for (i = 0; i < pool->size; ++i) {
326 struct sk_buff *skb = pool->skbuff[i];
327 if (skb) {
328 dma_unmap_single(&adapter->vdev->dev,
329 pool->dma_addr[i],
330 pool->buff_size,
331 DMA_FROM_DEVICE);
332 dev_kfree_skb_any(skb);
333 pool->skbuff[i] = NULL;
334 }
335 }
336 }
337
338 if (pool->dma_addr) {
339 kfree(pool->dma_addr);
340 pool->dma_addr = NULL;
341 }
342
343 if (pool->skbuff) {
344 kfree(pool->skbuff);
345 pool->skbuff = NULL;
346 }
347}
348
349/* remove a buffer from a pool */
350static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
351 u64 correlator)
352{
353 unsigned int pool = correlator >> 32;
354 unsigned int index = correlator & 0xffffffffUL;
355 unsigned int free_index;
356 struct sk_buff *skb;
357
358 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
359 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
360
361 skb = adapter->rx_buff_pool[pool].skbuff[index];
362
363 BUG_ON(skb == NULL);
364
365 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
366
367 dma_unmap_single(&adapter->vdev->dev,
368 adapter->rx_buff_pool[pool].dma_addr[index],
369 adapter->rx_buff_pool[pool].buff_size,
370 DMA_FROM_DEVICE);
371
372 free_index = adapter->rx_buff_pool[pool].producer_index;
373 adapter->rx_buff_pool[pool].producer_index++;
374 if (adapter->rx_buff_pool[pool].producer_index >=
375 adapter->rx_buff_pool[pool].size)
376 adapter->rx_buff_pool[pool].producer_index = 0;
377 adapter->rx_buff_pool[pool].free_map[free_index] = index;
378
379 mb();
380
381 atomic_dec(&(adapter->rx_buff_pool[pool].available));
382}
383
384/* get the current buffer on the rx queue */
385static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
386{
387 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
388 unsigned int pool = correlator >> 32;
389 unsigned int index = correlator & 0xffffffffUL;
390
391 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
392 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
393
394 return adapter->rx_buff_pool[pool].skbuff[index];
395}
396
397/* recycle the current buffer on the rx queue */
398static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
399{
400 u32 q_index = adapter->rx_queue.index;
401 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
402 unsigned int pool = correlator >> 32;
403 unsigned int index = correlator & 0xffffffffUL;
404 union ibmveth_buf_desc desc;
405 unsigned long lpar_rc;
406 int ret = 1;
407
408 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
409 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
410
411 if (!adapter->rx_buff_pool[pool].active) {
412 ibmveth_rxq_harvest_buffer(adapter);
413 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
414 goto out;
415 }
416
417 desc.fields.flags_len = IBMVETH_BUF_VALID |
418 adapter->rx_buff_pool[pool].buff_size;
419 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
420
421 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
422
423 if (lpar_rc != H_SUCCESS) {
424 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
425 "during recycle rc=%ld", lpar_rc);
426 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
427 ret = 0;
428 }
429
430 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
431 adapter->rx_queue.index = 0;
432 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
433 }
434
435out:
436 return ret;
437}
438
439static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
440{
441 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
442
443 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
444 adapter->rx_queue.index = 0;
445 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
446 }
447}
448
449static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
450{
451 int i;
452 struct device *dev = &adapter->vdev->dev;
453
454 if (adapter->buffer_list_addr != NULL) {
455 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
456 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
457 DMA_BIDIRECTIONAL);
458 adapter->buffer_list_dma = DMA_ERROR_CODE;
459 }
460 free_page((unsigned long)adapter->buffer_list_addr);
461 adapter->buffer_list_addr = NULL;
462 }
463
464 if (adapter->filter_list_addr != NULL) {
465 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
466 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
467 DMA_BIDIRECTIONAL);
468 adapter->filter_list_dma = DMA_ERROR_CODE;
469 }
470 free_page((unsigned long)adapter->filter_list_addr);
471 adapter->filter_list_addr = NULL;
472 }
473
474 if (adapter->rx_queue.queue_addr != NULL) {
475 dma_free_coherent(dev, adapter->rx_queue.queue_len,
476 adapter->rx_queue.queue_addr,
477 adapter->rx_queue.queue_dma);
478 adapter->rx_queue.queue_addr = NULL;
479 }
480
481 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
482 if (adapter->rx_buff_pool[i].active)
483 ibmveth_free_buffer_pool(adapter,
484 &adapter->rx_buff_pool[i]);
485
486 if (adapter->bounce_buffer != NULL) {
487 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
488 dma_unmap_single(&adapter->vdev->dev,
489 adapter->bounce_buffer_dma,
490 adapter->netdev->mtu + IBMVETH_BUFF_OH,
491 DMA_BIDIRECTIONAL);
492 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
493 }
494 kfree(adapter->bounce_buffer);
495 adapter->bounce_buffer = NULL;
496 }
497}
498
499static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
500 union ibmveth_buf_desc rxq_desc, u64 mac_address)
501{
502 int rc, try_again = 1;
503
504 /*
505 * After a kexec the adapter will still be open, so our attempt to
506 * open it will fail. So if we get a failure we free the adapter and
507 * try again, but only once.
508 */
509retry:
510 rc = h_register_logical_lan(adapter->vdev->unit_address,
511 adapter->buffer_list_dma, rxq_desc.desc,
512 adapter->filter_list_dma, mac_address);
513
514 if (rc != H_SUCCESS && try_again) {
515 do {
516 rc = h_free_logical_lan(adapter->vdev->unit_address);
517 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
518
519 try_again = 0;
520 goto retry;
521 }
522
523 return rc;
524}
525
526static int ibmveth_open(struct net_device *netdev)
527{
528 struct ibmveth_adapter *adapter = netdev_priv(netdev);
529 u64 mac_address = 0;
530 int rxq_entries = 1;
531 unsigned long lpar_rc;
532 int rc;
533 union ibmveth_buf_desc rxq_desc;
534 int i;
535 struct device *dev;
536
537 netdev_dbg(netdev, "open starting\n");
538
539 napi_enable(&adapter->napi);
540
541 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
542 rxq_entries += adapter->rx_buff_pool[i].size;
543
544 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
545 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
546
547 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
548 netdev_err(netdev, "unable to allocate filter or buffer list "
549 "pages\n");
550 rc = -ENOMEM;
551 goto err_out;
552 }
553
554 dev = &adapter->vdev->dev;
555
556 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
557 rxq_entries;
558 adapter->rx_queue.queue_addr =
559 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
560 &adapter->rx_queue.queue_dma, GFP_KERNEL);
561
562 if (!adapter->rx_queue.queue_addr) {
563 netdev_err(netdev, "unable to allocate rx queue pages\n");
564 rc = -ENOMEM;
565 goto err_out;
566 }
567
568 adapter->buffer_list_dma = dma_map_single(dev,
569 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
570 adapter->filter_list_dma = dma_map_single(dev,
571 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
572
573 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
574 (dma_mapping_error(dev, adapter->filter_list_dma))) {
575 netdev_err(netdev, "unable to map filter or buffer list "
576 "pages\n");
577 rc = -ENOMEM;
578 goto err_out;
579 }
580
581 adapter->rx_queue.index = 0;
582 adapter->rx_queue.num_slots = rxq_entries;
583 adapter->rx_queue.toggle = 1;
584
585 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
586 mac_address = mac_address >> 16;
587
588 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
589 adapter->rx_queue.queue_len;
590 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
591
592 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
593 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
594 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
595
596 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
597
598 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
599
600 if (lpar_rc != H_SUCCESS) {
601 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
602 lpar_rc);
603 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
604 "desc:0x%llx MAC:0x%llx\n",
605 adapter->buffer_list_dma,
606 adapter->filter_list_dma,
607 rxq_desc.desc,
608 mac_address);
609 rc = -ENONET;
610 goto err_out;
611 }
612
613 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
614 if (!adapter->rx_buff_pool[i].active)
615 continue;
616 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
617 netdev_err(netdev, "unable to alloc pool\n");
618 adapter->rx_buff_pool[i].active = 0;
619 rc = -ENOMEM;
620 goto err_out;
621 }
622 }
623
624 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
625 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
626 netdev);
627 if (rc != 0) {
628 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
629 netdev->irq, rc);
630 do {
631 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
632 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
633
634 goto err_out;
635 }
636
637 adapter->bounce_buffer =
638 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
639 if (!adapter->bounce_buffer) {
640 netdev_err(netdev, "unable to allocate bounce buffer\n");
641 rc = -ENOMEM;
642 goto err_out_free_irq;
643 }
644 adapter->bounce_buffer_dma =
645 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
646 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
647 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
648 netdev_err(netdev, "unable to map bounce buffer\n");
649 rc = -ENOMEM;
650 goto err_out_free_irq;
651 }
652
653 netdev_dbg(netdev, "initial replenish cycle\n");
654 ibmveth_interrupt(netdev->irq, netdev);
655
656 netif_start_queue(netdev);
657
658 netdev_dbg(netdev, "open complete\n");
659
660 return 0;
661
662err_out_free_irq:
663 free_irq(netdev->irq, netdev);
664err_out:
665 ibmveth_cleanup(adapter);
666 napi_disable(&adapter->napi);
667 return rc;
668}
669
670static int ibmveth_close(struct net_device *netdev)
671{
672 struct ibmveth_adapter *adapter = netdev_priv(netdev);
673 long lpar_rc;
674
675 netdev_dbg(netdev, "close starting\n");
676
677 napi_disable(&adapter->napi);
678
679 if (!adapter->pool_config)
680 netif_stop_queue(netdev);
681
682 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
683
684 do {
685 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
686 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
687
688 if (lpar_rc != H_SUCCESS) {
689 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
690 "continuing with close\n", lpar_rc);
691 }
692
693 free_irq(netdev->irq, netdev);
694
695 adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
696 4096 - 8);
697
698 ibmveth_cleanup(adapter);
699
700 netdev_dbg(netdev, "close complete\n");
701
702 return 0;
703}
704
705static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
706{
707 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
708 SUPPORTED_FIBRE);
709 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
710 ADVERTISED_FIBRE);
711 ethtool_cmd_speed_set(cmd, SPEED_1000);
712 cmd->duplex = DUPLEX_FULL;
713 cmd->port = PORT_FIBRE;
714 cmd->phy_address = 0;
715 cmd->transceiver = XCVR_INTERNAL;
716 cmd->autoneg = AUTONEG_ENABLE;
717 cmd->maxtxpkt = 0;
718 cmd->maxrxpkt = 1;
719 return 0;
720}
721
722static void netdev_get_drvinfo(struct net_device *dev,
723 struct ethtool_drvinfo *info)
724{
725 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
726 strncpy(info->version, ibmveth_driver_version,
727 sizeof(info->version) - 1);
728}
729
730static netdev_features_t ibmveth_fix_features(struct net_device *dev,
731 netdev_features_t features)
732{
733 /*
734 * Since the ibmveth firmware interface does not have the
735 * concept of separate tx/rx checksum offload enable, if rx
736 * checksum is disabled we also have to disable tx checksum
737 * offload. Once we disable rx checksum offload, we are no
738 * longer allowed to send tx buffers that are not properly
739 * checksummed.
740 */
741
742 if (!(features & NETIF_F_RXCSUM))
743 features &= ~NETIF_F_ALL_CSUM;
744
745 return features;
746}
747
748static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
749{
750 struct ibmveth_adapter *adapter = netdev_priv(dev);
751 unsigned long set_attr, clr_attr, ret_attr;
752 unsigned long set_attr6, clr_attr6;
753 long ret, ret4, ret6;
754 int rc1 = 0, rc2 = 0;
755 int restart = 0;
756
757 if (netif_running(dev)) {
758 restart = 1;
759 adapter->pool_config = 1;
760 ibmveth_close(dev);
761 adapter->pool_config = 0;
762 }
763
764 set_attr = 0;
765 clr_attr = 0;
766 set_attr6 = 0;
767 clr_attr6 = 0;
768
769 if (data) {
770 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
771 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
772 } else {
773 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
774 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
775 }
776
777 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
778
779 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
780 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
781 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
782 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
783 set_attr, &ret_attr);
784
785 if (ret4 != H_SUCCESS) {
786 netdev_err(dev, "unable to change IPv4 checksum "
787 "offload settings. %d rc=%ld\n",
788 data, ret4);
789
790 h_illan_attributes(adapter->vdev->unit_address,
791 set_attr, clr_attr, &ret_attr);
792
793 if (data == 1)
794 dev->features &= ~NETIF_F_IP_CSUM;
795
796 } else {
797 adapter->fw_ipv4_csum_support = data;
798 }
799
800 ret6 = h_illan_attributes(adapter->vdev->unit_address,
801 clr_attr6, set_attr6, &ret_attr);
802
803 if (ret6 != H_SUCCESS) {
804 netdev_err(dev, "unable to change IPv6 checksum "
805 "offload settings. %d rc=%ld\n",
806 data, ret6);
807
808 h_illan_attributes(adapter->vdev->unit_address,
809 set_attr6, clr_attr6, &ret_attr);
810
811 if (data == 1)
812 dev->features &= ~NETIF_F_IPV6_CSUM;
813
814 } else
815 adapter->fw_ipv6_csum_support = data;
816
817 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
818 adapter->rx_csum = data;
819 else
820 rc1 = -EIO;
821 } else {
822 rc1 = -EIO;
823 netdev_err(dev, "unable to change checksum offload settings."
824 " %d rc=%ld ret_attr=%lx\n", data, ret,
825 ret_attr);
826 }
827
828 if (restart)
829 rc2 = ibmveth_open(dev);
830
831 return rc1 ? rc1 : rc2;
832}
833
834static int ibmveth_set_features(struct net_device *dev,
835 netdev_features_t features)
836{
837 struct ibmveth_adapter *adapter = netdev_priv(dev);
838 int rx_csum = !!(features & NETIF_F_RXCSUM);
839 int rc;
840
841 if (rx_csum == adapter->rx_csum)
842 return 0;
843
844 rc = ibmveth_set_csum_offload(dev, rx_csum);
845 if (rc && !adapter->rx_csum)
846 dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
847
848 return rc;
849}
850
851static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
852{
853 int i;
854
855 if (stringset != ETH_SS_STATS)
856 return;
857
858 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
859 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
860}
861
862static int ibmveth_get_sset_count(struct net_device *dev, int sset)
863{
864 switch (sset) {
865 case ETH_SS_STATS:
866 return ARRAY_SIZE(ibmveth_stats);
867 default:
868 return -EOPNOTSUPP;
869 }
870}
871
872static void ibmveth_get_ethtool_stats(struct net_device *dev,
873 struct ethtool_stats *stats, u64 *data)
874{
875 int i;
876 struct ibmveth_adapter *adapter = netdev_priv(dev);
877
878 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
879 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
880}
881
882static const struct ethtool_ops netdev_ethtool_ops = {
883 .get_drvinfo = netdev_get_drvinfo,
884 .get_settings = netdev_get_settings,
885 .get_link = ethtool_op_get_link,
886 .get_strings = ibmveth_get_strings,
887 .get_sset_count = ibmveth_get_sset_count,
888 .get_ethtool_stats = ibmveth_get_ethtool_stats,
889};
890
891static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
892{
893 return -EOPNOTSUPP;
894}
895
896#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
897
898static int ibmveth_send(struct ibmveth_adapter *adapter,
899 union ibmveth_buf_desc *descs)
900{
901 unsigned long correlator;
902 unsigned int retry_count;
903 unsigned long ret;
904
905 /*
906 * The retry count sets a maximum for the number of broadcast and
907 * multicast destinations within the system.
908 */
909 retry_count = 1024;
910 correlator = 0;
911 do {
912 ret = h_send_logical_lan(adapter->vdev->unit_address,
913 descs[0].desc, descs[1].desc,
914 descs[2].desc, descs[3].desc,
915 descs[4].desc, descs[5].desc,
916 correlator, &correlator);
917 } while ((ret == H_BUSY) && (retry_count--));
918
919 if (ret != H_SUCCESS && ret != H_DROPPED) {
920 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
921 "with rc=%ld\n", ret);
922 return 1;
923 }
924
925 return 0;
926}
927
928static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
929 struct net_device *netdev)
930{
931 struct ibmveth_adapter *adapter = netdev_priv(netdev);
932 unsigned int desc_flags;
933 union ibmveth_buf_desc descs[6];
934 int last, i;
935 int force_bounce = 0;
936 dma_addr_t dma_addr;
937
938 /*
939 * veth handles a maximum of 6 segments including the header, so
940 * we have to linearize the skb if there are more than this.
941 */
942 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
943 netdev->stats.tx_dropped++;
944 goto out;
945 }
946
947 /* veth can't checksum offload UDP */
948 if (skb->ip_summed == CHECKSUM_PARTIAL &&
949 ((skb->protocol == htons(ETH_P_IP) &&
950 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
951 (skb->protocol == htons(ETH_P_IPV6) &&
952 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
953 skb_checksum_help(skb)) {
954
955 netdev_err(netdev, "tx: failed to checksum packet\n");
956 netdev->stats.tx_dropped++;
957 goto out;
958 }
959
960 desc_flags = IBMVETH_BUF_VALID;
961
962 if (skb->ip_summed == CHECKSUM_PARTIAL) {
963 unsigned char *buf = skb_transport_header(skb) +
964 skb->csum_offset;
965
966 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
967
968 /* Need to zero out the checksum */
969 buf[0] = 0;
970 buf[1] = 0;
971 }
972
973retry_bounce:
974 memset(descs, 0, sizeof(descs));
975
976 /*
977 * If a linear packet is below the rx threshold then
978 * copy it into the static bounce buffer. This avoids the
979 * cost of a TCE insert and remove.
980 */
981 if (force_bounce || (!skb_is_nonlinear(skb) &&
982 (skb->len < tx_copybreak))) {
983 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
984 skb->len);
985
986 descs[0].fields.flags_len = desc_flags | skb->len;
987 descs[0].fields.address = adapter->bounce_buffer_dma;
988
989 if (ibmveth_send(adapter, descs)) {
990 adapter->tx_send_failed++;
991 netdev->stats.tx_dropped++;
992 } else {
993 netdev->stats.tx_packets++;
994 netdev->stats.tx_bytes += skb->len;
995 }
996
997 goto out;
998 }
999
1000 /* Map the header */
1001 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1002 skb_headlen(skb), DMA_TO_DEVICE);
1003 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1004 goto map_failed;
1005
1006 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1007 descs[0].fields.address = dma_addr;
1008
1009 /* Map the frags */
1010 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1011 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1012
1013 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
1014 skb_frag_size(frag), DMA_TO_DEVICE);
1015
1016 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1017 goto map_failed_frags;
1018
1019 descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
1020 descs[i+1].fields.address = dma_addr;
1021 }
1022
1023 if (ibmveth_send(adapter, descs)) {
1024 adapter->tx_send_failed++;
1025 netdev->stats.tx_dropped++;
1026 } else {
1027 netdev->stats.tx_packets++;
1028 netdev->stats.tx_bytes += skb->len;
1029 }
1030
1031 dma_unmap_single(&adapter->vdev->dev,
1032 descs[0].fields.address,
1033 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1034 DMA_TO_DEVICE);
1035
1036 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1037 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1038 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1039 DMA_TO_DEVICE);
1040
1041out:
1042 dev_kfree_skb(skb);
1043 return NETDEV_TX_OK;
1044
1045map_failed_frags:
1046 last = i+1;
1047 for (i = 0; i < last; i++)
1048 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1049 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1050 DMA_TO_DEVICE);
1051
1052map_failed:
1053 if (!firmware_has_feature(FW_FEATURE_CMO))
1054 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1055 adapter->tx_map_failed++;
1056 skb_linearize(skb);
1057 force_bounce = 1;
1058 goto retry_bounce;
1059}
1060
1061static int ibmveth_poll(struct napi_struct *napi, int budget)
1062{
1063 struct ibmveth_adapter *adapter =
1064 container_of(napi, struct ibmveth_adapter, napi);
1065 struct net_device *netdev = adapter->netdev;
1066 int frames_processed = 0;
1067 unsigned long lpar_rc;
1068
1069restart_poll:
1070 do {
1071 if (!ibmveth_rxq_pending_buffer(adapter))
1072 break;
1073
1074 smp_rmb();
1075 if (!ibmveth_rxq_buffer_valid(adapter)) {
1076 wmb(); /* suggested by larson1 */
1077 adapter->rx_invalid_buffer++;
1078 netdev_dbg(netdev, "recycling invalid buffer\n");
1079 ibmveth_rxq_recycle_buffer(adapter);
1080 } else {
1081 struct sk_buff *skb, *new_skb;
1082 int length = ibmveth_rxq_frame_length(adapter);
1083 int offset = ibmveth_rxq_frame_offset(adapter);
1084 int csum_good = ibmveth_rxq_csum_good(adapter);
1085
1086 skb = ibmveth_rxq_get_buffer(adapter);
1087
1088 new_skb = NULL;
1089 if (length < rx_copybreak)
1090 new_skb = netdev_alloc_skb(netdev, length);
1091
1092 if (new_skb) {
1093 skb_copy_to_linear_data(new_skb,
1094 skb->data + offset,
1095 length);
1096 if (rx_flush)
1097 ibmveth_flush_buffer(skb->data,
1098 length + offset);
1099 if (!ibmveth_rxq_recycle_buffer(adapter))
1100 kfree_skb(skb);
1101 skb = new_skb;
1102 } else {
1103 ibmveth_rxq_harvest_buffer(adapter);
1104 skb_reserve(skb, offset);
1105 }
1106
1107 skb_put(skb, length);
1108 skb->protocol = eth_type_trans(skb, netdev);
1109
1110 if (csum_good)
1111 skb->ip_summed = CHECKSUM_UNNECESSARY;
1112
1113 netif_receive_skb(skb); /* send it up */
1114
1115 netdev->stats.rx_packets++;
1116 netdev->stats.rx_bytes += length;
1117 frames_processed++;
1118 }
1119 } while (frames_processed < budget);
1120
1121 ibmveth_replenish_task(adapter);
1122
1123 if (frames_processed < budget) {
1124 /* We think we are done - reenable interrupts,
1125 * then check once more to make sure we are done.
1126 */
1127 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1128 VIO_IRQ_ENABLE);
1129
1130 BUG_ON(lpar_rc != H_SUCCESS);
1131
1132 napi_complete(napi);
1133
1134 if (ibmveth_rxq_pending_buffer(adapter) &&
1135 napi_reschedule(napi)) {
1136 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1137 VIO_IRQ_DISABLE);
1138 goto restart_poll;
1139 }
1140 }
1141
1142 return frames_processed;
1143}
1144
1145static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1146{
1147 struct net_device *netdev = dev_instance;
1148 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1149 unsigned long lpar_rc;
1150
1151 if (napi_schedule_prep(&adapter->napi)) {
1152 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1153 VIO_IRQ_DISABLE);
1154 BUG_ON(lpar_rc != H_SUCCESS);
1155 __napi_schedule(&adapter->napi);
1156 }
1157 return IRQ_HANDLED;
1158}
1159
1160static void ibmveth_set_multicast_list(struct net_device *netdev)
1161{
1162 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1163 unsigned long lpar_rc;
1164
1165 if ((netdev->flags & IFF_PROMISC) ||
1166 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1167 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1168 IbmVethMcastEnableRecv |
1169 IbmVethMcastDisableFiltering,
1170 0);
1171 if (lpar_rc != H_SUCCESS) {
1172 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1173 "entering promisc mode\n", lpar_rc);
1174 }
1175 } else {
1176 struct netdev_hw_addr *ha;
1177 /* clear the filter table & disable filtering */
1178 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1179 IbmVethMcastEnableRecv |
1180 IbmVethMcastDisableFiltering |
1181 IbmVethMcastClearFilterTable,
1182 0);
1183 if (lpar_rc != H_SUCCESS) {
1184 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1185 "attempting to clear filter table\n",
1186 lpar_rc);
1187 }
1188 /* add the addresses to the filter table */
1189 netdev_for_each_mc_addr(ha, netdev) {
1190 /* add the multicast address to the filter table */
1191 unsigned long mcast_addr = 0;
1192 memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
1193 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1194 IbmVethMcastAddFilter,
1195 mcast_addr);
1196 if (lpar_rc != H_SUCCESS) {
1197 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1198 "when adding an entry to the filter "
1199 "table\n", lpar_rc);
1200 }
1201 }
1202
1203 /* re-enable filtering */
1204 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1205 IbmVethMcastEnableFiltering,
1206 0);
1207 if (lpar_rc != H_SUCCESS) {
1208 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1209 "enabling filtering\n", lpar_rc);
1210 }
1211 }
1212}
1213
1214static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1215{
1216 struct ibmveth_adapter *adapter = netdev_priv(dev);
1217 struct vio_dev *viodev = adapter->vdev;
1218 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1219 int i, rc;
1220 int need_restart = 0;
1221
1222 if (new_mtu < IBMVETH_MIN_MTU)
1223 return -EINVAL;
1224
1225 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1226 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
1227 break;
1228
1229 if (i == IBMVETH_NUM_BUFF_POOLS)
1230 return -EINVAL;
1231
1232 /* Deactivate all the buffer pools so that the next loop can activate
1233 only the buffer pools necessary to hold the new MTU */
1234 if (netif_running(adapter->netdev)) {
1235 need_restart = 1;
1236 adapter->pool_config = 1;
1237 ibmveth_close(adapter->netdev);
1238 adapter->pool_config = 0;
1239 }
1240
1241 /* Look for an active buffer pool that can hold the new MTU */
1242 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1243 adapter->rx_buff_pool[i].active = 1;
1244
1245 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1246 dev->mtu = new_mtu;
1247 vio_cmo_set_dev_desired(viodev,
1248 ibmveth_get_desired_dma
1249 (viodev));
1250 if (need_restart) {
1251 return ibmveth_open(adapter->netdev);
1252 }
1253 return 0;
1254 }
1255 }
1256
1257 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1258 return rc;
1259
1260 return -EINVAL;
1261}
1262
1263#ifdef CONFIG_NET_POLL_CONTROLLER
1264static void ibmveth_poll_controller(struct net_device *dev)
1265{
1266 ibmveth_replenish_task(netdev_priv(dev));
1267 ibmveth_interrupt(dev->irq, dev);
1268}
1269#endif
1270
1271/**
1272 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1273 *
1274 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1275 *
1276 * Return value:
1277 * Number of bytes of IO data the driver will need to perform well.
1278 */
1279static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1280{
1281 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1282 struct ibmveth_adapter *adapter;
1283 unsigned long ret;
1284 int i;
1285 int rxqentries = 1;
1286
1287 /* netdev inits at probe time along with the structures we need below*/
1288 if (netdev == NULL)
1289 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
1290
1291 adapter = netdev_priv(netdev);
1292
1293 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1294 ret += IOMMU_PAGE_ALIGN(netdev->mtu);
1295
1296 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1297 /* add the size of the active receive buffers */
1298 if (adapter->rx_buff_pool[i].active)
1299 ret +=
1300 adapter->rx_buff_pool[i].size *
1301 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1302 buff_size);
1303 rxqentries += adapter->rx_buff_pool[i].size;
1304 }
1305 /* add the size of the receive queue entries */
1306 ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
1307
1308 return ret;
1309}
1310
1311static const struct net_device_ops ibmveth_netdev_ops = {
1312 .ndo_open = ibmveth_open,
1313 .ndo_stop = ibmveth_close,
1314 .ndo_start_xmit = ibmveth_start_xmit,
1315 .ndo_set_rx_mode = ibmveth_set_multicast_list,
1316 .ndo_do_ioctl = ibmveth_ioctl,
1317 .ndo_change_mtu = ibmveth_change_mtu,
1318 .ndo_fix_features = ibmveth_fix_features,
1319 .ndo_set_features = ibmveth_set_features,
1320 .ndo_validate_addr = eth_validate_addr,
1321 .ndo_set_mac_address = eth_mac_addr,
1322#ifdef CONFIG_NET_POLL_CONTROLLER
1323 .ndo_poll_controller = ibmveth_poll_controller,
1324#endif
1325};
1326
1327static int __devinit ibmveth_probe(struct vio_dev *dev,
1328 const struct vio_device_id *id)
1329{
1330 int rc, i;
1331 struct net_device *netdev;
1332 struct ibmveth_adapter *adapter;
1333 unsigned char *mac_addr_p;
1334 unsigned int *mcastFilterSize_p;
1335
1336 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1337 dev->unit_address);
1338
1339 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1340 NULL);
1341 if (!mac_addr_p) {
1342 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1343 return -EINVAL;
1344 }
1345
1346 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
1347 VETH_MCAST_FILTER_SIZE, NULL);
1348 if (!mcastFilterSize_p) {
1349 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1350 "attribute\n");
1351 return -EINVAL;
1352 }
1353
1354 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1355
1356 if (!netdev)
1357 return -ENOMEM;
1358
1359 adapter = netdev_priv(netdev);
1360 dev_set_drvdata(&dev->dev, netdev);
1361
1362 adapter->vdev = dev;
1363 adapter->netdev = netdev;
1364 adapter->mcastFilterSize = *mcastFilterSize_p;
1365 adapter->pool_config = 0;
1366
1367 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1368
1369 /*
1370 * Some older boxes running PHYP non-natively have an OF that returns
1371 * a 8-byte local-mac-address field (and the first 2 bytes have to be
1372 * ignored) while newer boxes' OF return a 6-byte field. Note that
1373 * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
1374 * The RPA doc specifies that the first byte must be 10b, so we'll
1375 * just look for it to solve this 8 vs. 6 byte field issue
1376 */
1377 if ((*mac_addr_p & 0x3) != 0x02)
1378 mac_addr_p += 2;
1379
1380 adapter->mac_addr = 0;
1381 memcpy(&adapter->mac_addr, mac_addr_p, 6);
1382
1383 netdev->irq = dev->irq;
1384 netdev->netdev_ops = &ibmveth_netdev_ops;
1385 netdev->ethtool_ops = &netdev_ethtool_ops;
1386 SET_NETDEV_DEV(netdev, &dev->dev);
1387 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
1388 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1389 netdev->features |= netdev->hw_features;
1390
1391 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1392
1393 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1394 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1395 int error;
1396
1397 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1398 pool_count[i], pool_size[i],
1399 pool_active[i]);
1400 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1401 &dev->dev.kobj, "pool%d", i);
1402 if (!error)
1403 kobject_uevent(kobj, KOBJ_ADD);
1404 }
1405
1406 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1407
1408 adapter->buffer_list_dma = DMA_ERROR_CODE;
1409 adapter->filter_list_dma = DMA_ERROR_CODE;
1410 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1411
1412 netdev_dbg(netdev, "registering netdev...\n");
1413
1414 ibmveth_set_features(netdev, netdev->features);
1415
1416 rc = register_netdev(netdev);
1417
1418 if (rc) {
1419 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1420 free_netdev(netdev);
1421 return rc;
1422 }
1423
1424 netdev_dbg(netdev, "registered\n");
1425
1426 return 0;
1427}
1428
1429static int __devexit ibmveth_remove(struct vio_dev *dev)
1430{
1431 struct net_device *netdev = dev_get_drvdata(&dev->dev);
1432 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1433 int i;
1434
1435 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1436 kobject_put(&adapter->rx_buff_pool[i].kobj);
1437
1438 unregister_netdev(netdev);
1439
1440 free_netdev(netdev);
1441 dev_set_drvdata(&dev->dev, NULL);
1442
1443 return 0;
1444}
1445
1446static struct attribute veth_active_attr;
1447static struct attribute veth_num_attr;
1448static struct attribute veth_size_attr;
1449
1450static ssize_t veth_pool_show(struct kobject *kobj,
1451 struct attribute *attr, char *buf)
1452{
1453 struct ibmveth_buff_pool *pool = container_of(kobj,
1454 struct ibmveth_buff_pool,
1455 kobj);
1456
1457 if (attr == &veth_active_attr)
1458 return sprintf(buf, "%d\n", pool->active);
1459 else if (attr == &veth_num_attr)
1460 return sprintf(buf, "%d\n", pool->size);
1461 else if (attr == &veth_size_attr)
1462 return sprintf(buf, "%d\n", pool->buff_size);
1463 return 0;
1464}
1465
1466static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1467 const char *buf, size_t count)
1468{
1469 struct ibmveth_buff_pool *pool = container_of(kobj,
1470 struct ibmveth_buff_pool,
1471 kobj);
1472 struct net_device *netdev = dev_get_drvdata(
1473 container_of(kobj->parent, struct device, kobj));
1474 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1475 long value = simple_strtol(buf, NULL, 10);
1476 long rc;
1477
1478 if (attr == &veth_active_attr) {
1479 if (value && !pool->active) {
1480 if (netif_running(netdev)) {
1481 if (ibmveth_alloc_buffer_pool(pool)) {
1482 netdev_err(netdev,
1483 "unable to alloc pool\n");
1484 return -ENOMEM;
1485 }
1486 pool->active = 1;
1487 adapter->pool_config = 1;
1488 ibmveth_close(netdev);
1489 adapter->pool_config = 0;
1490 if ((rc = ibmveth_open(netdev)))
1491 return rc;
1492 } else {
1493 pool->active = 1;
1494 }
1495 } else if (!value && pool->active) {
1496 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1497 int i;
1498 /* Make sure there is a buffer pool with buffers that
1499 can hold a packet of the size of the MTU */
1500 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1501 if (pool == &adapter->rx_buff_pool[i])
1502 continue;
1503 if (!adapter->rx_buff_pool[i].active)
1504 continue;
1505 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1506 break;
1507 }
1508
1509 if (i == IBMVETH_NUM_BUFF_POOLS) {
1510 netdev_err(netdev, "no active pool >= MTU\n");
1511 return -EPERM;
1512 }
1513
1514 if (netif_running(netdev)) {
1515 adapter->pool_config = 1;
1516 ibmveth_close(netdev);
1517 pool->active = 0;
1518 adapter->pool_config = 0;
1519 if ((rc = ibmveth_open(netdev)))
1520 return rc;
1521 }
1522 pool->active = 0;
1523 }
1524 } else if (attr == &veth_num_attr) {
1525 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1526 return -EINVAL;
1527 } else {
1528 if (netif_running(netdev)) {
1529 adapter->pool_config = 1;
1530 ibmveth_close(netdev);
1531 adapter->pool_config = 0;
1532 pool->size = value;
1533 if ((rc = ibmveth_open(netdev)))
1534 return rc;
1535 } else {
1536 pool->size = value;
1537 }
1538 }
1539 } else if (attr == &veth_size_attr) {
1540 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1541 return -EINVAL;
1542 } else {
1543 if (netif_running(netdev)) {
1544 adapter->pool_config = 1;
1545 ibmveth_close(netdev);
1546 adapter->pool_config = 0;
1547 pool->buff_size = value;
1548 if ((rc = ibmveth_open(netdev)))
1549 return rc;
1550 } else {
1551 pool->buff_size = value;
1552 }
1553 }
1554 }
1555
1556 /* kick the interrupt handler to allocate/deallocate pools */
1557 ibmveth_interrupt(netdev->irq, netdev);
1558 return count;
1559}
1560
1561
1562#define ATTR(_name, _mode) \
1563 struct attribute veth_##_name##_attr = { \
1564 .name = __stringify(_name), .mode = _mode, \
1565 };
1566
1567static ATTR(active, 0644);
1568static ATTR(num, 0644);
1569static ATTR(size, 0644);
1570
1571static struct attribute *veth_pool_attrs[] = {
1572 &veth_active_attr,
1573 &veth_num_attr,
1574 &veth_size_attr,
1575 NULL,
1576};
1577
1578static const struct sysfs_ops veth_pool_ops = {
1579 .show = veth_pool_show,
1580 .store = veth_pool_store,
1581};
1582
1583static struct kobj_type ktype_veth_pool = {
1584 .release = NULL,
1585 .sysfs_ops = &veth_pool_ops,
1586 .default_attrs = veth_pool_attrs,
1587};
1588
1589static int ibmveth_resume(struct device *dev)
1590{
1591 struct net_device *netdev = dev_get_drvdata(dev);
1592 ibmveth_interrupt(netdev->irq, netdev);
1593 return 0;
1594}
1595
1596static struct vio_device_id ibmveth_device_table[] __devinitdata = {
1597 { "network", "IBM,l-lan"},
1598 { "", "" }
1599};
1600MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1601
1602static struct dev_pm_ops ibmveth_pm_ops = {
1603 .resume = ibmveth_resume
1604};
1605
1606static struct vio_driver ibmveth_driver = {
1607 .id_table = ibmveth_device_table,
1608 .probe = ibmveth_probe,
1609 .remove = ibmveth_remove,
1610 .get_desired_dma = ibmveth_get_desired_dma,
1611 .name = ibmveth_driver_name,
1612 .pm = &ibmveth_pm_ops,
1613};
1614
1615static int __init ibmveth_module_init(void)
1616{
1617 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1618 ibmveth_driver_string, ibmveth_driver_version);
1619
1620 return vio_register_driver(&ibmveth_driver);
1621}
1622
1623static void __exit ibmveth_module_exit(void)
1624{
1625 vio_unregister_driver(&ibmveth_driver);
1626}
1627
1628module_init(ibmveth_module_init);
1629module_exit(ibmveth_module_exit);
1/*
2 * IBM Power Virtual Ethernet Device Driver
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * Copyright (C) IBM Corporation, 2003, 2010
18 *
19 * Authors: Dave Larson <larson1@us.ibm.com>
20 * Santiago Leon <santil@linux.vnet.ibm.com>
21 * Brian King <brking@linux.vnet.ibm.com>
22 * Robert Jennings <rcj@linux.vnet.ibm.com>
23 * Anton Blanchard <anton@au.ibm.com>
24 */
25
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/types.h>
29#include <linux/errno.h>
30#include <linux/dma-mapping.h>
31#include <linux/kernel.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/init.h>
36#include <linux/interrupt.h>
37#include <linux/mm.h>
38#include <linux/pm.h>
39#include <linux/ethtool.h>
40#include <linux/in.h>
41#include <linux/ip.h>
42#include <linux/ipv6.h>
43#include <linux/slab.h>
44#include <asm/hvcall.h>
45#include <linux/atomic.h>
46#include <asm/vio.h>
47#include <asm/iommu.h>
48#include <asm/firmware.h>
49
50#include "ibmveth.h"
51
52static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
53static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
54static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
55
56static struct kobj_type ktype_veth_pool;
57
58
59static const char ibmveth_driver_name[] = "ibmveth";
60static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
61#define ibmveth_driver_version "1.04"
62
63MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
64MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
65MODULE_LICENSE("GPL");
66MODULE_VERSION(ibmveth_driver_version);
67
68static unsigned int tx_copybreak __read_mostly = 128;
69module_param(tx_copybreak, uint, 0644);
70MODULE_PARM_DESC(tx_copybreak,
71 "Maximum size of packet that is copied to a new buffer on transmit");
72
73static unsigned int rx_copybreak __read_mostly = 128;
74module_param(rx_copybreak, uint, 0644);
75MODULE_PARM_DESC(rx_copybreak,
76 "Maximum size of packet that is copied to a new buffer on receive");
77
78static unsigned int rx_flush __read_mostly = 0;
79module_param(rx_flush, uint, 0644);
80MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
81
82struct ibmveth_stat {
83 char name[ETH_GSTRING_LEN];
84 int offset;
85};
86
87#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
88#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
89
90struct ibmveth_stat ibmveth_stats[] = {
91 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
92 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
93 { "replenish_add_buff_failure",
94 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
95 { "replenish_add_buff_success",
96 IBMVETH_STAT_OFF(replenish_add_buff_success) },
97 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
98 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
99 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
100 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
101 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
102 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
103};
104
105/* simple methods of getting data from the current rxq entry */
106static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
107{
108 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
109}
110
111static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
112{
113 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
114 IBMVETH_RXQ_TOGGLE_SHIFT;
115}
116
117static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
118{
119 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
120}
121
122static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
123{
124 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
125}
126
127static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
128{
129 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
130}
131
132static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
133{
134 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
135}
136
137static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
138{
139 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
140}
141
142/* setup the initial settings for a buffer pool */
143static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
144 u32 pool_index, u32 pool_size,
145 u32 buff_size, u32 pool_active)
146{
147 pool->size = pool_size;
148 pool->index = pool_index;
149 pool->buff_size = buff_size;
150 pool->threshold = pool_size * 7 / 8;
151 pool->active = pool_active;
152}
153
154/* allocate and setup an buffer pool - called during open */
155static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
156{
157 int i;
158
159 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
160
161 if (!pool->free_map)
162 return -1;
163
164 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
165 if (!pool->dma_addr) {
166 kfree(pool->free_map);
167 pool->free_map = NULL;
168 return -1;
169 }
170
171 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
172
173 if (!pool->skbuff) {
174 kfree(pool->dma_addr);
175 pool->dma_addr = NULL;
176
177 kfree(pool->free_map);
178 pool->free_map = NULL;
179 return -1;
180 }
181
182 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
183
184 for (i = 0; i < pool->size; ++i)
185 pool->free_map[i] = i;
186
187 atomic_set(&pool->available, 0);
188 pool->producer_index = 0;
189 pool->consumer_index = 0;
190
191 return 0;
192}
193
194static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
195{
196 unsigned long offset;
197
198 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
199 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
200}
201
202/* replenish the buffers for a pool. note that we don't need to
203 * skb_reserve these since they are used for incoming...
204 */
205static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
206 struct ibmveth_buff_pool *pool)
207{
208 u32 i;
209 u32 count = pool->size - atomic_read(&pool->available);
210 u32 buffers_added = 0;
211 struct sk_buff *skb;
212 unsigned int free_index, index;
213 u64 correlator;
214 unsigned long lpar_rc;
215 dma_addr_t dma_addr;
216
217 mb();
218
219 for (i = 0; i < count; ++i) {
220 union ibmveth_buf_desc desc;
221
222 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
223
224 if (!skb) {
225 netdev_dbg(adapter->netdev,
226 "replenish: unable to allocate skb\n");
227 adapter->replenish_no_mem++;
228 break;
229 }
230
231 free_index = pool->consumer_index;
232 pool->consumer_index++;
233 if (pool->consumer_index >= pool->size)
234 pool->consumer_index = 0;
235 index = pool->free_map[free_index];
236
237 BUG_ON(index == IBM_VETH_INVALID_MAP);
238 BUG_ON(pool->skbuff[index] != NULL);
239
240 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
241 pool->buff_size, DMA_FROM_DEVICE);
242
243 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
244 goto failure;
245
246 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
247 pool->dma_addr[index] = dma_addr;
248 pool->skbuff[index] = skb;
249
250 correlator = ((u64)pool->index << 32) | index;
251 *(u64 *)skb->data = correlator;
252
253 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
254 desc.fields.address = dma_addr;
255
256 if (rx_flush) {
257 unsigned int len = min(pool->buff_size,
258 adapter->netdev->mtu +
259 IBMVETH_BUFF_OH);
260 ibmveth_flush_buffer(skb->data, len);
261 }
262 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
263 desc.desc);
264
265 if (lpar_rc != H_SUCCESS) {
266 goto failure;
267 } else {
268 buffers_added++;
269 adapter->replenish_add_buff_success++;
270 }
271 }
272
273 mb();
274 atomic_add(buffers_added, &(pool->available));
275 return;
276
277failure:
278 pool->free_map[free_index] = index;
279 pool->skbuff[index] = NULL;
280 if (pool->consumer_index == 0)
281 pool->consumer_index = pool->size - 1;
282 else
283 pool->consumer_index--;
284 if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
285 dma_unmap_single(&adapter->vdev->dev,
286 pool->dma_addr[index], pool->buff_size,
287 DMA_FROM_DEVICE);
288 dev_kfree_skb_any(skb);
289 adapter->replenish_add_buff_failure++;
290
291 mb();
292 atomic_add(buffers_added, &(pool->available));
293}
294
295/* replenish routine */
296static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
297{
298 int i;
299
300 adapter->replenish_task_cycles++;
301
302 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
303 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
304
305 if (pool->active &&
306 (atomic_read(&pool->available) < pool->threshold))
307 ibmveth_replenish_buffer_pool(adapter, pool);
308 }
309
310 adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
311 4096 - 8);
312}
313
314/* empty and free ana buffer pool - also used to do cleanup in error paths */
315static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
316 struct ibmveth_buff_pool *pool)
317{
318 int i;
319
320 kfree(pool->free_map);
321 pool->free_map = NULL;
322
323 if (pool->skbuff && pool->dma_addr) {
324 for (i = 0; i < pool->size; ++i) {
325 struct sk_buff *skb = pool->skbuff[i];
326 if (skb) {
327 dma_unmap_single(&adapter->vdev->dev,
328 pool->dma_addr[i],
329 pool->buff_size,
330 DMA_FROM_DEVICE);
331 dev_kfree_skb_any(skb);
332 pool->skbuff[i] = NULL;
333 }
334 }
335 }
336
337 if (pool->dma_addr) {
338 kfree(pool->dma_addr);
339 pool->dma_addr = NULL;
340 }
341
342 if (pool->skbuff) {
343 kfree(pool->skbuff);
344 pool->skbuff = NULL;
345 }
346}
347
348/* remove a buffer from a pool */
349static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
350 u64 correlator)
351{
352 unsigned int pool = correlator >> 32;
353 unsigned int index = correlator & 0xffffffffUL;
354 unsigned int free_index;
355 struct sk_buff *skb;
356
357 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
358 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
359
360 skb = adapter->rx_buff_pool[pool].skbuff[index];
361
362 BUG_ON(skb == NULL);
363
364 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
365
366 dma_unmap_single(&adapter->vdev->dev,
367 adapter->rx_buff_pool[pool].dma_addr[index],
368 adapter->rx_buff_pool[pool].buff_size,
369 DMA_FROM_DEVICE);
370
371 free_index = adapter->rx_buff_pool[pool].producer_index;
372 adapter->rx_buff_pool[pool].producer_index++;
373 if (adapter->rx_buff_pool[pool].producer_index >=
374 adapter->rx_buff_pool[pool].size)
375 adapter->rx_buff_pool[pool].producer_index = 0;
376 adapter->rx_buff_pool[pool].free_map[free_index] = index;
377
378 mb();
379
380 atomic_dec(&(adapter->rx_buff_pool[pool].available));
381}
382
383/* get the current buffer on the rx queue */
384static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
385{
386 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
387 unsigned int pool = correlator >> 32;
388 unsigned int index = correlator & 0xffffffffUL;
389
390 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
391 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
392
393 return adapter->rx_buff_pool[pool].skbuff[index];
394}
395
396/* recycle the current buffer on the rx queue */
397static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
398{
399 u32 q_index = adapter->rx_queue.index;
400 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
401 unsigned int pool = correlator >> 32;
402 unsigned int index = correlator & 0xffffffffUL;
403 union ibmveth_buf_desc desc;
404 unsigned long lpar_rc;
405 int ret = 1;
406
407 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
408 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
409
410 if (!adapter->rx_buff_pool[pool].active) {
411 ibmveth_rxq_harvest_buffer(adapter);
412 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
413 goto out;
414 }
415
416 desc.fields.flags_len = IBMVETH_BUF_VALID |
417 adapter->rx_buff_pool[pool].buff_size;
418 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
419
420 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
421
422 if (lpar_rc != H_SUCCESS) {
423 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
424 "during recycle rc=%ld", lpar_rc);
425 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
426 ret = 0;
427 }
428
429 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
430 adapter->rx_queue.index = 0;
431 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
432 }
433
434out:
435 return ret;
436}
437
438static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
439{
440 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
441
442 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
443 adapter->rx_queue.index = 0;
444 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
445 }
446}
447
448static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
449{
450 int i;
451 struct device *dev = &adapter->vdev->dev;
452
453 if (adapter->buffer_list_addr != NULL) {
454 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
455 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
456 DMA_BIDIRECTIONAL);
457 adapter->buffer_list_dma = DMA_ERROR_CODE;
458 }
459 free_page((unsigned long)adapter->buffer_list_addr);
460 adapter->buffer_list_addr = NULL;
461 }
462
463 if (adapter->filter_list_addr != NULL) {
464 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
465 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
466 DMA_BIDIRECTIONAL);
467 adapter->filter_list_dma = DMA_ERROR_CODE;
468 }
469 free_page((unsigned long)adapter->filter_list_addr);
470 adapter->filter_list_addr = NULL;
471 }
472
473 if (adapter->rx_queue.queue_addr != NULL) {
474 dma_free_coherent(dev, adapter->rx_queue.queue_len,
475 adapter->rx_queue.queue_addr,
476 adapter->rx_queue.queue_dma);
477 adapter->rx_queue.queue_addr = NULL;
478 }
479
480 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
481 if (adapter->rx_buff_pool[i].active)
482 ibmveth_free_buffer_pool(adapter,
483 &adapter->rx_buff_pool[i]);
484
485 if (adapter->bounce_buffer != NULL) {
486 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
487 dma_unmap_single(&adapter->vdev->dev,
488 adapter->bounce_buffer_dma,
489 adapter->netdev->mtu + IBMVETH_BUFF_OH,
490 DMA_BIDIRECTIONAL);
491 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
492 }
493 kfree(adapter->bounce_buffer);
494 adapter->bounce_buffer = NULL;
495 }
496}
497
498static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
499 union ibmveth_buf_desc rxq_desc, u64 mac_address)
500{
501 int rc, try_again = 1;
502
503 /*
504 * After a kexec the adapter will still be open, so our attempt to
505 * open it will fail. So if we get a failure we free the adapter and
506 * try again, but only once.
507 */
508retry:
509 rc = h_register_logical_lan(adapter->vdev->unit_address,
510 adapter->buffer_list_dma, rxq_desc.desc,
511 adapter->filter_list_dma, mac_address);
512
513 if (rc != H_SUCCESS && try_again) {
514 do {
515 rc = h_free_logical_lan(adapter->vdev->unit_address);
516 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
517
518 try_again = 0;
519 goto retry;
520 }
521
522 return rc;
523}
524
525static u64 ibmveth_encode_mac_addr(u8 *mac)
526{
527 int i;
528 u64 encoded = 0;
529
530 for (i = 0; i < ETH_ALEN; i++)
531 encoded = (encoded << 8) | mac[i];
532
533 return encoded;
534}
535
536static int ibmveth_open(struct net_device *netdev)
537{
538 struct ibmveth_adapter *adapter = netdev_priv(netdev);
539 u64 mac_address;
540 int rxq_entries = 1;
541 unsigned long lpar_rc;
542 int rc;
543 union ibmveth_buf_desc rxq_desc;
544 int i;
545 struct device *dev;
546
547 netdev_dbg(netdev, "open starting\n");
548
549 napi_enable(&adapter->napi);
550
551 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
552 rxq_entries += adapter->rx_buff_pool[i].size;
553
554 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
555 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
556
557 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
558 netdev_err(netdev, "unable to allocate filter or buffer list "
559 "pages\n");
560 rc = -ENOMEM;
561 goto err_out;
562 }
563
564 dev = &adapter->vdev->dev;
565
566 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
567 rxq_entries;
568 adapter->rx_queue.queue_addr =
569 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
570 &adapter->rx_queue.queue_dma, GFP_KERNEL);
571 if (!adapter->rx_queue.queue_addr) {
572 rc = -ENOMEM;
573 goto err_out;
574 }
575
576 adapter->buffer_list_dma = dma_map_single(dev,
577 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
578 adapter->filter_list_dma = dma_map_single(dev,
579 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
580
581 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
582 (dma_mapping_error(dev, adapter->filter_list_dma))) {
583 netdev_err(netdev, "unable to map filter or buffer list "
584 "pages\n");
585 rc = -ENOMEM;
586 goto err_out;
587 }
588
589 adapter->rx_queue.index = 0;
590 adapter->rx_queue.num_slots = rxq_entries;
591 adapter->rx_queue.toggle = 1;
592
593 mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
594
595 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
596 adapter->rx_queue.queue_len;
597 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
598
599 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
600 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
601 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
602
603 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
604
605 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
606
607 if (lpar_rc != H_SUCCESS) {
608 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
609 lpar_rc);
610 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
611 "desc:0x%llx MAC:0x%llx\n",
612 adapter->buffer_list_dma,
613 adapter->filter_list_dma,
614 rxq_desc.desc,
615 mac_address);
616 rc = -ENONET;
617 goto err_out;
618 }
619
620 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
621 if (!adapter->rx_buff_pool[i].active)
622 continue;
623 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
624 netdev_err(netdev, "unable to alloc pool\n");
625 adapter->rx_buff_pool[i].active = 0;
626 rc = -ENOMEM;
627 goto err_out;
628 }
629 }
630
631 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
632 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
633 netdev);
634 if (rc != 0) {
635 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
636 netdev->irq, rc);
637 do {
638 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
639 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
640
641 goto err_out;
642 }
643
644 adapter->bounce_buffer =
645 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
646 if (!adapter->bounce_buffer) {
647 rc = -ENOMEM;
648 goto err_out_free_irq;
649 }
650 adapter->bounce_buffer_dma =
651 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
652 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
653 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
654 netdev_err(netdev, "unable to map bounce buffer\n");
655 rc = -ENOMEM;
656 goto err_out_free_irq;
657 }
658
659 netdev_dbg(netdev, "initial replenish cycle\n");
660 ibmveth_interrupt(netdev->irq, netdev);
661
662 netif_start_queue(netdev);
663
664 netdev_dbg(netdev, "open complete\n");
665
666 return 0;
667
668err_out_free_irq:
669 free_irq(netdev->irq, netdev);
670err_out:
671 ibmveth_cleanup(adapter);
672 napi_disable(&adapter->napi);
673 return rc;
674}
675
676static int ibmveth_close(struct net_device *netdev)
677{
678 struct ibmveth_adapter *adapter = netdev_priv(netdev);
679 long lpar_rc;
680
681 netdev_dbg(netdev, "close starting\n");
682
683 napi_disable(&adapter->napi);
684
685 if (!adapter->pool_config)
686 netif_stop_queue(netdev);
687
688 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
689
690 do {
691 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
692 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
693
694 if (lpar_rc != H_SUCCESS) {
695 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
696 "continuing with close\n", lpar_rc);
697 }
698
699 free_irq(netdev->irq, netdev);
700
701 adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
702 4096 - 8);
703
704 ibmveth_cleanup(adapter);
705
706 netdev_dbg(netdev, "close complete\n");
707
708 return 0;
709}
710
711static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
712{
713 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
714 SUPPORTED_FIBRE);
715 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
716 ADVERTISED_FIBRE);
717 ethtool_cmd_speed_set(cmd, SPEED_1000);
718 cmd->duplex = DUPLEX_FULL;
719 cmd->port = PORT_FIBRE;
720 cmd->phy_address = 0;
721 cmd->transceiver = XCVR_INTERNAL;
722 cmd->autoneg = AUTONEG_ENABLE;
723 cmd->maxtxpkt = 0;
724 cmd->maxrxpkt = 1;
725 return 0;
726}
727
728static void netdev_get_drvinfo(struct net_device *dev,
729 struct ethtool_drvinfo *info)
730{
731 strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
732 strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
733}
734
735static netdev_features_t ibmveth_fix_features(struct net_device *dev,
736 netdev_features_t features)
737{
738 /*
739 * Since the ibmveth firmware interface does not have the
740 * concept of separate tx/rx checksum offload enable, if rx
741 * checksum is disabled we also have to disable tx checksum
742 * offload. Once we disable rx checksum offload, we are no
743 * longer allowed to send tx buffers that are not properly
744 * checksummed.
745 */
746
747 if (!(features & NETIF_F_RXCSUM))
748 features &= ~NETIF_F_ALL_CSUM;
749
750 return features;
751}
752
753static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
754{
755 struct ibmveth_adapter *adapter = netdev_priv(dev);
756 unsigned long set_attr, clr_attr, ret_attr;
757 unsigned long set_attr6, clr_attr6;
758 long ret, ret4, ret6;
759 int rc1 = 0, rc2 = 0;
760 int restart = 0;
761
762 if (netif_running(dev)) {
763 restart = 1;
764 adapter->pool_config = 1;
765 ibmveth_close(dev);
766 adapter->pool_config = 0;
767 }
768
769 set_attr = 0;
770 clr_attr = 0;
771 set_attr6 = 0;
772 clr_attr6 = 0;
773
774 if (data) {
775 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
776 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
777 } else {
778 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
779 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
780 }
781
782 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
783
784 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
785 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
786 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
787 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
788 set_attr, &ret_attr);
789
790 if (ret4 != H_SUCCESS) {
791 netdev_err(dev, "unable to change IPv4 checksum "
792 "offload settings. %d rc=%ld\n",
793 data, ret4);
794
795 h_illan_attributes(adapter->vdev->unit_address,
796 set_attr, clr_attr, &ret_attr);
797
798 if (data == 1)
799 dev->features &= ~NETIF_F_IP_CSUM;
800
801 } else {
802 adapter->fw_ipv4_csum_support = data;
803 }
804
805 ret6 = h_illan_attributes(adapter->vdev->unit_address,
806 clr_attr6, set_attr6, &ret_attr);
807
808 if (ret6 != H_SUCCESS) {
809 netdev_err(dev, "unable to change IPv6 checksum "
810 "offload settings. %d rc=%ld\n",
811 data, ret6);
812
813 h_illan_attributes(adapter->vdev->unit_address,
814 set_attr6, clr_attr6, &ret_attr);
815
816 if (data == 1)
817 dev->features &= ~NETIF_F_IPV6_CSUM;
818
819 } else
820 adapter->fw_ipv6_csum_support = data;
821
822 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
823 adapter->rx_csum = data;
824 else
825 rc1 = -EIO;
826 } else {
827 rc1 = -EIO;
828 netdev_err(dev, "unable to change checksum offload settings."
829 " %d rc=%ld ret_attr=%lx\n", data, ret,
830 ret_attr);
831 }
832
833 if (restart)
834 rc2 = ibmveth_open(dev);
835
836 return rc1 ? rc1 : rc2;
837}
838
839static int ibmveth_set_features(struct net_device *dev,
840 netdev_features_t features)
841{
842 struct ibmveth_adapter *adapter = netdev_priv(dev);
843 int rx_csum = !!(features & NETIF_F_RXCSUM);
844 int rc;
845
846 if (rx_csum == adapter->rx_csum)
847 return 0;
848
849 rc = ibmveth_set_csum_offload(dev, rx_csum);
850 if (rc && !adapter->rx_csum)
851 dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
852
853 return rc;
854}
855
856static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
857{
858 int i;
859
860 if (stringset != ETH_SS_STATS)
861 return;
862
863 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
864 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
865}
866
867static int ibmveth_get_sset_count(struct net_device *dev, int sset)
868{
869 switch (sset) {
870 case ETH_SS_STATS:
871 return ARRAY_SIZE(ibmveth_stats);
872 default:
873 return -EOPNOTSUPP;
874 }
875}
876
877static void ibmveth_get_ethtool_stats(struct net_device *dev,
878 struct ethtool_stats *stats, u64 *data)
879{
880 int i;
881 struct ibmveth_adapter *adapter = netdev_priv(dev);
882
883 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
884 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
885}
886
887static const struct ethtool_ops netdev_ethtool_ops = {
888 .get_drvinfo = netdev_get_drvinfo,
889 .get_settings = netdev_get_settings,
890 .get_link = ethtool_op_get_link,
891 .get_strings = ibmveth_get_strings,
892 .get_sset_count = ibmveth_get_sset_count,
893 .get_ethtool_stats = ibmveth_get_ethtool_stats,
894};
895
896static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
897{
898 return -EOPNOTSUPP;
899}
900
901#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
902
903static int ibmveth_send(struct ibmveth_adapter *adapter,
904 union ibmveth_buf_desc *descs)
905{
906 unsigned long correlator;
907 unsigned int retry_count;
908 unsigned long ret;
909
910 /*
911 * The retry count sets a maximum for the number of broadcast and
912 * multicast destinations within the system.
913 */
914 retry_count = 1024;
915 correlator = 0;
916 do {
917 ret = h_send_logical_lan(adapter->vdev->unit_address,
918 descs[0].desc, descs[1].desc,
919 descs[2].desc, descs[3].desc,
920 descs[4].desc, descs[5].desc,
921 correlator, &correlator);
922 } while ((ret == H_BUSY) && (retry_count--));
923
924 if (ret != H_SUCCESS && ret != H_DROPPED) {
925 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
926 "with rc=%ld\n", ret);
927 return 1;
928 }
929
930 return 0;
931}
932
933static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
934 struct net_device *netdev)
935{
936 struct ibmveth_adapter *adapter = netdev_priv(netdev);
937 unsigned int desc_flags;
938 union ibmveth_buf_desc descs[6];
939 int last, i;
940 int force_bounce = 0;
941 dma_addr_t dma_addr;
942
943 /*
944 * veth handles a maximum of 6 segments including the header, so
945 * we have to linearize the skb if there are more than this.
946 */
947 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
948 netdev->stats.tx_dropped++;
949 goto out;
950 }
951
952 /* veth can't checksum offload UDP */
953 if (skb->ip_summed == CHECKSUM_PARTIAL &&
954 ((skb->protocol == htons(ETH_P_IP) &&
955 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
956 (skb->protocol == htons(ETH_P_IPV6) &&
957 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
958 skb_checksum_help(skb)) {
959
960 netdev_err(netdev, "tx: failed to checksum packet\n");
961 netdev->stats.tx_dropped++;
962 goto out;
963 }
964
965 desc_flags = IBMVETH_BUF_VALID;
966
967 if (skb->ip_summed == CHECKSUM_PARTIAL) {
968 unsigned char *buf = skb_transport_header(skb) +
969 skb->csum_offset;
970
971 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
972
973 /* Need to zero out the checksum */
974 buf[0] = 0;
975 buf[1] = 0;
976 }
977
978retry_bounce:
979 memset(descs, 0, sizeof(descs));
980
981 /*
982 * If a linear packet is below the rx threshold then
983 * copy it into the static bounce buffer. This avoids the
984 * cost of a TCE insert and remove.
985 */
986 if (force_bounce || (!skb_is_nonlinear(skb) &&
987 (skb->len < tx_copybreak))) {
988 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
989 skb->len);
990
991 descs[0].fields.flags_len = desc_flags | skb->len;
992 descs[0].fields.address = adapter->bounce_buffer_dma;
993
994 if (ibmveth_send(adapter, descs)) {
995 adapter->tx_send_failed++;
996 netdev->stats.tx_dropped++;
997 } else {
998 netdev->stats.tx_packets++;
999 netdev->stats.tx_bytes += skb->len;
1000 }
1001
1002 goto out;
1003 }
1004
1005 /* Map the header */
1006 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1007 skb_headlen(skb), DMA_TO_DEVICE);
1008 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1009 goto map_failed;
1010
1011 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1012 descs[0].fields.address = dma_addr;
1013
1014 /* Map the frags */
1015 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1016 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1017
1018 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
1019 skb_frag_size(frag), DMA_TO_DEVICE);
1020
1021 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1022 goto map_failed_frags;
1023
1024 descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
1025 descs[i+1].fields.address = dma_addr;
1026 }
1027
1028 if (ibmveth_send(adapter, descs)) {
1029 adapter->tx_send_failed++;
1030 netdev->stats.tx_dropped++;
1031 } else {
1032 netdev->stats.tx_packets++;
1033 netdev->stats.tx_bytes += skb->len;
1034 }
1035
1036 dma_unmap_single(&adapter->vdev->dev,
1037 descs[0].fields.address,
1038 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1039 DMA_TO_DEVICE);
1040
1041 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1042 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1043 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1044 DMA_TO_DEVICE);
1045
1046out:
1047 dev_consume_skb_any(skb);
1048 return NETDEV_TX_OK;
1049
1050map_failed_frags:
1051 last = i+1;
1052 for (i = 0; i < last; i++)
1053 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1054 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1055 DMA_TO_DEVICE);
1056
1057map_failed:
1058 if (!firmware_has_feature(FW_FEATURE_CMO))
1059 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1060 adapter->tx_map_failed++;
1061 skb_linearize(skb);
1062 force_bounce = 1;
1063 goto retry_bounce;
1064}
1065
1066static int ibmveth_poll(struct napi_struct *napi, int budget)
1067{
1068 struct ibmveth_adapter *adapter =
1069 container_of(napi, struct ibmveth_adapter, napi);
1070 struct net_device *netdev = adapter->netdev;
1071 int frames_processed = 0;
1072 unsigned long lpar_rc;
1073
1074restart_poll:
1075 while (frames_processed < budget) {
1076 if (!ibmveth_rxq_pending_buffer(adapter))
1077 break;
1078
1079 smp_rmb();
1080 if (!ibmveth_rxq_buffer_valid(adapter)) {
1081 wmb(); /* suggested by larson1 */
1082 adapter->rx_invalid_buffer++;
1083 netdev_dbg(netdev, "recycling invalid buffer\n");
1084 ibmveth_rxq_recycle_buffer(adapter);
1085 } else {
1086 struct sk_buff *skb, *new_skb;
1087 int length = ibmveth_rxq_frame_length(adapter);
1088 int offset = ibmveth_rxq_frame_offset(adapter);
1089 int csum_good = ibmveth_rxq_csum_good(adapter);
1090
1091 skb = ibmveth_rxq_get_buffer(adapter);
1092
1093 new_skb = NULL;
1094 if (length < rx_copybreak)
1095 new_skb = netdev_alloc_skb(netdev, length);
1096
1097 if (new_skb) {
1098 skb_copy_to_linear_data(new_skb,
1099 skb->data + offset,
1100 length);
1101 if (rx_flush)
1102 ibmveth_flush_buffer(skb->data,
1103 length + offset);
1104 if (!ibmveth_rxq_recycle_buffer(adapter))
1105 kfree_skb(skb);
1106 skb = new_skb;
1107 } else {
1108 ibmveth_rxq_harvest_buffer(adapter);
1109 skb_reserve(skb, offset);
1110 }
1111
1112 skb_put(skb, length);
1113 skb->protocol = eth_type_trans(skb, netdev);
1114
1115 if (csum_good)
1116 skb->ip_summed = CHECKSUM_UNNECESSARY;
1117
1118 netif_receive_skb(skb); /* send it up */
1119
1120 netdev->stats.rx_packets++;
1121 netdev->stats.rx_bytes += length;
1122 frames_processed++;
1123 }
1124 }
1125
1126 ibmveth_replenish_task(adapter);
1127
1128 if (frames_processed < budget) {
1129 /* We think we are done - reenable interrupts,
1130 * then check once more to make sure we are done.
1131 */
1132 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1133 VIO_IRQ_ENABLE);
1134
1135 BUG_ON(lpar_rc != H_SUCCESS);
1136
1137 napi_complete(napi);
1138
1139 if (ibmveth_rxq_pending_buffer(adapter) &&
1140 napi_reschedule(napi)) {
1141 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1142 VIO_IRQ_DISABLE);
1143 goto restart_poll;
1144 }
1145 }
1146
1147 return frames_processed;
1148}
1149
1150static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1151{
1152 struct net_device *netdev = dev_instance;
1153 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1154 unsigned long lpar_rc;
1155
1156 if (napi_schedule_prep(&adapter->napi)) {
1157 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1158 VIO_IRQ_DISABLE);
1159 BUG_ON(lpar_rc != H_SUCCESS);
1160 __napi_schedule(&adapter->napi);
1161 }
1162 return IRQ_HANDLED;
1163}
1164
1165static void ibmveth_set_multicast_list(struct net_device *netdev)
1166{
1167 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1168 unsigned long lpar_rc;
1169
1170 if ((netdev->flags & IFF_PROMISC) ||
1171 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1172 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1173 IbmVethMcastEnableRecv |
1174 IbmVethMcastDisableFiltering,
1175 0);
1176 if (lpar_rc != H_SUCCESS) {
1177 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1178 "entering promisc mode\n", lpar_rc);
1179 }
1180 } else {
1181 struct netdev_hw_addr *ha;
1182 /* clear the filter table & disable filtering */
1183 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1184 IbmVethMcastEnableRecv |
1185 IbmVethMcastDisableFiltering |
1186 IbmVethMcastClearFilterTable,
1187 0);
1188 if (lpar_rc != H_SUCCESS) {
1189 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1190 "attempting to clear filter table\n",
1191 lpar_rc);
1192 }
1193 /* add the addresses to the filter table */
1194 netdev_for_each_mc_addr(ha, netdev) {
1195 /* add the multicast address to the filter table */
1196 u64 mcast_addr;
1197 mcast_addr = ibmveth_encode_mac_addr(ha->addr);
1198 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1199 IbmVethMcastAddFilter,
1200 mcast_addr);
1201 if (lpar_rc != H_SUCCESS) {
1202 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1203 "when adding an entry to the filter "
1204 "table\n", lpar_rc);
1205 }
1206 }
1207
1208 /* re-enable filtering */
1209 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1210 IbmVethMcastEnableFiltering,
1211 0);
1212 if (lpar_rc != H_SUCCESS) {
1213 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1214 "enabling filtering\n", lpar_rc);
1215 }
1216 }
1217}
1218
1219static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1220{
1221 struct ibmveth_adapter *adapter = netdev_priv(dev);
1222 struct vio_dev *viodev = adapter->vdev;
1223 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1224 int i, rc;
1225 int need_restart = 0;
1226
1227 if (new_mtu < IBMVETH_MIN_MTU)
1228 return -EINVAL;
1229
1230 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1231 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
1232 break;
1233
1234 if (i == IBMVETH_NUM_BUFF_POOLS)
1235 return -EINVAL;
1236
1237 /* Deactivate all the buffer pools so that the next loop can activate
1238 only the buffer pools necessary to hold the new MTU */
1239 if (netif_running(adapter->netdev)) {
1240 need_restart = 1;
1241 adapter->pool_config = 1;
1242 ibmveth_close(adapter->netdev);
1243 adapter->pool_config = 0;
1244 }
1245
1246 /* Look for an active buffer pool that can hold the new MTU */
1247 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1248 adapter->rx_buff_pool[i].active = 1;
1249
1250 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1251 dev->mtu = new_mtu;
1252 vio_cmo_set_dev_desired(viodev,
1253 ibmveth_get_desired_dma
1254 (viodev));
1255 if (need_restart) {
1256 return ibmveth_open(adapter->netdev);
1257 }
1258 return 0;
1259 }
1260 }
1261
1262 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1263 return rc;
1264
1265 return -EINVAL;
1266}
1267
1268#ifdef CONFIG_NET_POLL_CONTROLLER
1269static void ibmveth_poll_controller(struct net_device *dev)
1270{
1271 ibmveth_replenish_task(netdev_priv(dev));
1272 ibmveth_interrupt(dev->irq, dev);
1273}
1274#endif
1275
1276/**
1277 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1278 *
1279 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1280 *
1281 * Return value:
1282 * Number of bytes of IO data the driver will need to perform well.
1283 */
1284static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1285{
1286 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1287 struct ibmveth_adapter *adapter;
1288 struct iommu_table *tbl;
1289 unsigned long ret;
1290 int i;
1291 int rxqentries = 1;
1292
1293 tbl = get_iommu_table_base(&vdev->dev);
1294
1295 /* netdev inits at probe time along with the structures we need below*/
1296 if (netdev == NULL)
1297 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1298
1299 adapter = netdev_priv(netdev);
1300
1301 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1302 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1303
1304 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1305 /* add the size of the active receive buffers */
1306 if (adapter->rx_buff_pool[i].active)
1307 ret +=
1308 adapter->rx_buff_pool[i].size *
1309 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1310 buff_size, tbl);
1311 rxqentries += adapter->rx_buff_pool[i].size;
1312 }
1313 /* add the size of the receive queue entries */
1314 ret += IOMMU_PAGE_ALIGN(
1315 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1316
1317 return ret;
1318}
1319
1320static const struct net_device_ops ibmveth_netdev_ops = {
1321 .ndo_open = ibmveth_open,
1322 .ndo_stop = ibmveth_close,
1323 .ndo_start_xmit = ibmveth_start_xmit,
1324 .ndo_set_rx_mode = ibmveth_set_multicast_list,
1325 .ndo_do_ioctl = ibmveth_ioctl,
1326 .ndo_change_mtu = ibmveth_change_mtu,
1327 .ndo_fix_features = ibmveth_fix_features,
1328 .ndo_set_features = ibmveth_set_features,
1329 .ndo_validate_addr = eth_validate_addr,
1330 .ndo_set_mac_address = eth_mac_addr,
1331#ifdef CONFIG_NET_POLL_CONTROLLER
1332 .ndo_poll_controller = ibmveth_poll_controller,
1333#endif
1334};
1335
1336static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1337{
1338 int rc, i, mac_len;
1339 struct net_device *netdev;
1340 struct ibmveth_adapter *adapter;
1341 unsigned char *mac_addr_p;
1342 unsigned int *mcastFilterSize_p;
1343
1344 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1345 dev->unit_address);
1346
1347 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1348 &mac_len);
1349 if (!mac_addr_p) {
1350 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1351 return -EINVAL;
1352 }
1353 /* Workaround for old/broken pHyp */
1354 if (mac_len == 8)
1355 mac_addr_p += 2;
1356 else if (mac_len != 6) {
1357 dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
1358 mac_len);
1359 return -EINVAL;
1360 }
1361
1362 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
1363 VETH_MCAST_FILTER_SIZE, NULL);
1364 if (!mcastFilterSize_p) {
1365 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1366 "attribute\n");
1367 return -EINVAL;
1368 }
1369
1370 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1371
1372 if (!netdev)
1373 return -ENOMEM;
1374
1375 adapter = netdev_priv(netdev);
1376 dev_set_drvdata(&dev->dev, netdev);
1377
1378 adapter->vdev = dev;
1379 adapter->netdev = netdev;
1380 adapter->mcastFilterSize = *mcastFilterSize_p;
1381 adapter->pool_config = 0;
1382
1383 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1384
1385 netdev->irq = dev->irq;
1386 netdev->netdev_ops = &ibmveth_netdev_ops;
1387 netdev->ethtool_ops = &netdev_ethtool_ops;
1388 SET_NETDEV_DEV(netdev, &dev->dev);
1389 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
1390 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1391 netdev->features |= netdev->hw_features;
1392
1393 memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
1394
1395 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1396 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1397 int error;
1398
1399 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1400 pool_count[i], pool_size[i],
1401 pool_active[i]);
1402 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1403 &dev->dev.kobj, "pool%d", i);
1404 if (!error)
1405 kobject_uevent(kobj, KOBJ_ADD);
1406 }
1407
1408 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1409
1410 adapter->buffer_list_dma = DMA_ERROR_CODE;
1411 adapter->filter_list_dma = DMA_ERROR_CODE;
1412 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1413
1414 netdev_dbg(netdev, "registering netdev...\n");
1415
1416 ibmveth_set_features(netdev, netdev->features);
1417
1418 rc = register_netdev(netdev);
1419
1420 if (rc) {
1421 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1422 free_netdev(netdev);
1423 return rc;
1424 }
1425
1426 netdev_dbg(netdev, "registered\n");
1427
1428 return 0;
1429}
1430
1431static int ibmveth_remove(struct vio_dev *dev)
1432{
1433 struct net_device *netdev = dev_get_drvdata(&dev->dev);
1434 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1435 int i;
1436
1437 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1438 kobject_put(&adapter->rx_buff_pool[i].kobj);
1439
1440 unregister_netdev(netdev);
1441
1442 free_netdev(netdev);
1443 dev_set_drvdata(&dev->dev, NULL);
1444
1445 return 0;
1446}
1447
1448static struct attribute veth_active_attr;
1449static struct attribute veth_num_attr;
1450static struct attribute veth_size_attr;
1451
1452static ssize_t veth_pool_show(struct kobject *kobj,
1453 struct attribute *attr, char *buf)
1454{
1455 struct ibmveth_buff_pool *pool = container_of(kobj,
1456 struct ibmveth_buff_pool,
1457 kobj);
1458
1459 if (attr == &veth_active_attr)
1460 return sprintf(buf, "%d\n", pool->active);
1461 else if (attr == &veth_num_attr)
1462 return sprintf(buf, "%d\n", pool->size);
1463 else if (attr == &veth_size_attr)
1464 return sprintf(buf, "%d\n", pool->buff_size);
1465 return 0;
1466}
1467
1468static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1469 const char *buf, size_t count)
1470{
1471 struct ibmveth_buff_pool *pool = container_of(kobj,
1472 struct ibmveth_buff_pool,
1473 kobj);
1474 struct net_device *netdev = dev_get_drvdata(
1475 container_of(kobj->parent, struct device, kobj));
1476 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1477 long value = simple_strtol(buf, NULL, 10);
1478 long rc;
1479
1480 if (attr == &veth_active_attr) {
1481 if (value && !pool->active) {
1482 if (netif_running(netdev)) {
1483 if (ibmveth_alloc_buffer_pool(pool)) {
1484 netdev_err(netdev,
1485 "unable to alloc pool\n");
1486 return -ENOMEM;
1487 }
1488 pool->active = 1;
1489 adapter->pool_config = 1;
1490 ibmveth_close(netdev);
1491 adapter->pool_config = 0;
1492 if ((rc = ibmveth_open(netdev)))
1493 return rc;
1494 } else {
1495 pool->active = 1;
1496 }
1497 } else if (!value && pool->active) {
1498 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1499 int i;
1500 /* Make sure there is a buffer pool with buffers that
1501 can hold a packet of the size of the MTU */
1502 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1503 if (pool == &adapter->rx_buff_pool[i])
1504 continue;
1505 if (!adapter->rx_buff_pool[i].active)
1506 continue;
1507 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1508 break;
1509 }
1510
1511 if (i == IBMVETH_NUM_BUFF_POOLS) {
1512 netdev_err(netdev, "no active pool >= MTU\n");
1513 return -EPERM;
1514 }
1515
1516 if (netif_running(netdev)) {
1517 adapter->pool_config = 1;
1518 ibmveth_close(netdev);
1519 pool->active = 0;
1520 adapter->pool_config = 0;
1521 if ((rc = ibmveth_open(netdev)))
1522 return rc;
1523 }
1524 pool->active = 0;
1525 }
1526 } else if (attr == &veth_num_attr) {
1527 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1528 return -EINVAL;
1529 } else {
1530 if (netif_running(netdev)) {
1531 adapter->pool_config = 1;
1532 ibmveth_close(netdev);
1533 adapter->pool_config = 0;
1534 pool->size = value;
1535 if ((rc = ibmveth_open(netdev)))
1536 return rc;
1537 } else {
1538 pool->size = value;
1539 }
1540 }
1541 } else if (attr == &veth_size_attr) {
1542 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1543 return -EINVAL;
1544 } else {
1545 if (netif_running(netdev)) {
1546 adapter->pool_config = 1;
1547 ibmveth_close(netdev);
1548 adapter->pool_config = 0;
1549 pool->buff_size = value;
1550 if ((rc = ibmveth_open(netdev)))
1551 return rc;
1552 } else {
1553 pool->buff_size = value;
1554 }
1555 }
1556 }
1557
1558 /* kick the interrupt handler to allocate/deallocate pools */
1559 ibmveth_interrupt(netdev->irq, netdev);
1560 return count;
1561}
1562
1563
1564#define ATTR(_name, _mode) \
1565 struct attribute veth_##_name##_attr = { \
1566 .name = __stringify(_name), .mode = _mode, \
1567 };
1568
1569static ATTR(active, 0644);
1570static ATTR(num, 0644);
1571static ATTR(size, 0644);
1572
1573static struct attribute *veth_pool_attrs[] = {
1574 &veth_active_attr,
1575 &veth_num_attr,
1576 &veth_size_attr,
1577 NULL,
1578};
1579
1580static const struct sysfs_ops veth_pool_ops = {
1581 .show = veth_pool_show,
1582 .store = veth_pool_store,
1583};
1584
1585static struct kobj_type ktype_veth_pool = {
1586 .release = NULL,
1587 .sysfs_ops = &veth_pool_ops,
1588 .default_attrs = veth_pool_attrs,
1589};
1590
1591static int ibmveth_resume(struct device *dev)
1592{
1593 struct net_device *netdev = dev_get_drvdata(dev);
1594 ibmveth_interrupt(netdev->irq, netdev);
1595 return 0;
1596}
1597
1598static struct vio_device_id ibmveth_device_table[] = {
1599 { "network", "IBM,l-lan"},
1600 { "", "" }
1601};
1602MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1603
1604static struct dev_pm_ops ibmveth_pm_ops = {
1605 .resume = ibmveth_resume
1606};
1607
1608static struct vio_driver ibmveth_driver = {
1609 .id_table = ibmveth_device_table,
1610 .probe = ibmveth_probe,
1611 .remove = ibmveth_remove,
1612 .get_desired_dma = ibmveth_get_desired_dma,
1613 .name = ibmveth_driver_name,
1614 .pm = &ibmveth_pm_ops,
1615};
1616
1617static int __init ibmveth_module_init(void)
1618{
1619 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1620 ibmveth_driver_string, ibmveth_driver_version);
1621
1622 return vio_register_driver(&ibmveth_driver);
1623}
1624
1625static void __exit ibmveth_module_exit(void)
1626{
1627 vio_unregister_driver(&ibmveth_driver);
1628}
1629
1630module_init(ibmveth_module_init);
1631module_exit(ibmveth_module_exit);