Loading...
1/*
2 * linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/in.h>
32#include <linux/ip.h>
33#include <linux/tcp.h>
34#include <linux/udp.h>
35#include <linux/if.h>
36#include <linux/list.h>
37#include <linux/slab.h>
38#include <linux/if_ether.h>
39#include <linux/notifier.h>
40#include <linux/reboot.h>
41#include <linux/memory.h>
42#include <asm/kexec.h>
43#include <linux/mutex.h>
44#include <linux/prefetch.h>
45
46#include <net/ip.h>
47
48#include "ehea.h"
49#include "ehea_qmr.h"
50#include "ehea_phyp.h"
51
52
53MODULE_LICENSE("GPL");
54MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
55MODULE_DESCRIPTION("IBM eServer HEA Driver");
56MODULE_VERSION(DRV_VERSION);
57
58
59static int msg_level = -1;
60static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
61static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
62static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
63static int sq_entries = EHEA_DEF_ENTRIES_SQ;
64static int use_mcs = 1;
65static int prop_carrier_state;
66
67module_param(msg_level, int, 0);
68module_param(rq1_entries, int, 0);
69module_param(rq2_entries, int, 0);
70module_param(rq3_entries, int, 0);
71module_param(sq_entries, int, 0);
72module_param(prop_carrier_state, int, 0);
73module_param(use_mcs, int, 0);
74
75MODULE_PARM_DESC(msg_level, "msg_level");
76MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
77 "port to stack. 1:yes, 0:no. Default = 0 ");
78MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
79 "[2^x - 1], x = [6..14]. Default = "
80 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
81MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
82 "[2^x - 1], x = [6..14]. Default = "
83 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
84MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
85 "[2^x - 1], x = [6..14]. Default = "
86 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
87MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
88 "[2^x - 1], x = [6..14]. Default = "
89 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
90MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
91 "Default = 1");
92
93static int port_name_cnt;
94static LIST_HEAD(adapter_list);
95static unsigned long ehea_driver_flags;
96static DEFINE_MUTEX(dlpar_mem_lock);
97static struct ehea_fw_handle_array ehea_fw_handles;
98static struct ehea_bcmc_reg_array ehea_bcmc_regs;
99
100
101static int __devinit ehea_probe_adapter(struct platform_device *dev,
102 const struct of_device_id *id);
103
104static int __devexit ehea_remove(struct platform_device *dev);
105
106static struct of_device_id ehea_device_table[] = {
107 {
108 .name = "lhea",
109 .compatible = "IBM,lhea",
110 },
111 {},
112};
113MODULE_DEVICE_TABLE(of, ehea_device_table);
114
115static struct of_platform_driver ehea_driver = {
116 .driver = {
117 .name = "ehea",
118 .owner = THIS_MODULE,
119 .of_match_table = ehea_device_table,
120 },
121 .probe = ehea_probe_adapter,
122 .remove = ehea_remove,
123};
124
125void ehea_dump(void *adr, int len, char *msg)
126{
127 int x;
128 unsigned char *deb = adr;
129 for (x = 0; x < len; x += 16) {
130 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
131 msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
132 deb += 16;
133 }
134}
135
136static void ehea_schedule_port_reset(struct ehea_port *port)
137{
138 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
139 schedule_work(&port->reset_task);
140}
141
142static void ehea_update_firmware_handles(void)
143{
144 struct ehea_fw_handle_entry *arr = NULL;
145 struct ehea_adapter *adapter;
146 int num_adapters = 0;
147 int num_ports = 0;
148 int num_portres = 0;
149 int i = 0;
150 int num_fw_handles, k, l;
151
152 /* Determine number of handles */
153 mutex_lock(&ehea_fw_handles.lock);
154
155 list_for_each_entry(adapter, &adapter_list, list) {
156 num_adapters++;
157
158 for (k = 0; k < EHEA_MAX_PORTS; k++) {
159 struct ehea_port *port = adapter->port[k];
160
161 if (!port || (port->state != EHEA_PORT_UP))
162 continue;
163
164 num_ports++;
165 num_portres += port->num_def_qps;
166 }
167 }
168
169 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
170 num_ports * EHEA_NUM_PORT_FW_HANDLES +
171 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
172
173 if (num_fw_handles) {
174 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
175 if (!arr)
176 goto out; /* Keep the existing array */
177 } else
178 goto out_update;
179
180 list_for_each_entry(adapter, &adapter_list, list) {
181 if (num_adapters == 0)
182 break;
183
184 for (k = 0; k < EHEA_MAX_PORTS; k++) {
185 struct ehea_port *port = adapter->port[k];
186
187 if (!port || (port->state != EHEA_PORT_UP) ||
188 (num_ports == 0))
189 continue;
190
191 for (l = 0; l < port->num_def_qps; l++) {
192 struct ehea_port_res *pr = &port->port_res[l];
193
194 arr[i].adh = adapter->handle;
195 arr[i++].fwh = pr->qp->fw_handle;
196 arr[i].adh = adapter->handle;
197 arr[i++].fwh = pr->send_cq->fw_handle;
198 arr[i].adh = adapter->handle;
199 arr[i++].fwh = pr->recv_cq->fw_handle;
200 arr[i].adh = adapter->handle;
201 arr[i++].fwh = pr->eq->fw_handle;
202 arr[i].adh = adapter->handle;
203 arr[i++].fwh = pr->send_mr.handle;
204 arr[i].adh = adapter->handle;
205 arr[i++].fwh = pr->recv_mr.handle;
206 }
207 arr[i].adh = adapter->handle;
208 arr[i++].fwh = port->qp_eq->fw_handle;
209 num_ports--;
210 }
211
212 arr[i].adh = adapter->handle;
213 arr[i++].fwh = adapter->neq->fw_handle;
214
215 if (adapter->mr.handle) {
216 arr[i].adh = adapter->handle;
217 arr[i++].fwh = adapter->mr.handle;
218 }
219 num_adapters--;
220 }
221
222out_update:
223 kfree(ehea_fw_handles.arr);
224 ehea_fw_handles.arr = arr;
225 ehea_fw_handles.num_entries = i;
226out:
227 mutex_unlock(&ehea_fw_handles.lock);
228}
229
230static void ehea_update_bcmc_registrations(void)
231{
232 unsigned long flags;
233 struct ehea_bcmc_reg_entry *arr = NULL;
234 struct ehea_adapter *adapter;
235 struct ehea_mc_list *mc_entry;
236 int num_registrations = 0;
237 int i = 0;
238 int k;
239
240 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
241
242 /* Determine number of registrations */
243 list_for_each_entry(adapter, &adapter_list, list)
244 for (k = 0; k < EHEA_MAX_PORTS; k++) {
245 struct ehea_port *port = adapter->port[k];
246
247 if (!port || (port->state != EHEA_PORT_UP))
248 continue;
249
250 num_registrations += 2; /* Broadcast registrations */
251
252 list_for_each_entry(mc_entry, &port->mc_list->list,list)
253 num_registrations += 2;
254 }
255
256 if (num_registrations) {
257 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
258 if (!arr)
259 goto out; /* Keep the existing array */
260 } else
261 goto out_update;
262
263 list_for_each_entry(adapter, &adapter_list, list) {
264 for (k = 0; k < EHEA_MAX_PORTS; k++) {
265 struct ehea_port *port = adapter->port[k];
266
267 if (!port || (port->state != EHEA_PORT_UP))
268 continue;
269
270 if (num_registrations == 0)
271 goto out_update;
272
273 arr[i].adh = adapter->handle;
274 arr[i].port_id = port->logical_port_id;
275 arr[i].reg_type = EHEA_BCMC_BROADCAST |
276 EHEA_BCMC_UNTAGGED;
277 arr[i++].macaddr = port->mac_addr;
278
279 arr[i].adh = adapter->handle;
280 arr[i].port_id = port->logical_port_id;
281 arr[i].reg_type = EHEA_BCMC_BROADCAST |
282 EHEA_BCMC_VLANID_ALL;
283 arr[i++].macaddr = port->mac_addr;
284 num_registrations -= 2;
285
286 list_for_each_entry(mc_entry,
287 &port->mc_list->list, list) {
288 if (num_registrations == 0)
289 goto out_update;
290
291 arr[i].adh = adapter->handle;
292 arr[i].port_id = port->logical_port_id;
293 arr[i].reg_type = EHEA_BCMC_MULTICAST |
294 EHEA_BCMC_UNTAGGED;
295 if (mc_entry->macaddr == 0)
296 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
297 arr[i++].macaddr = mc_entry->macaddr;
298
299 arr[i].adh = adapter->handle;
300 arr[i].port_id = port->logical_port_id;
301 arr[i].reg_type = EHEA_BCMC_MULTICAST |
302 EHEA_BCMC_VLANID_ALL;
303 if (mc_entry->macaddr == 0)
304 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
305 arr[i++].macaddr = mc_entry->macaddr;
306 num_registrations -= 2;
307 }
308 }
309 }
310
311out_update:
312 kfree(ehea_bcmc_regs.arr);
313 ehea_bcmc_regs.arr = arr;
314 ehea_bcmc_regs.num_entries = i;
315out:
316 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
317}
318
319static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
320 struct rtnl_link_stats64 *stats)
321{
322 struct ehea_port *port = netdev_priv(dev);
323 u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
324 int i;
325
326 for (i = 0; i < port->num_def_qps; i++) {
327 rx_packets += port->port_res[i].rx_packets;
328 rx_bytes += port->port_res[i].rx_bytes;
329 }
330
331 for (i = 0; i < port->num_def_qps; i++) {
332 tx_packets += port->port_res[i].tx_packets;
333 tx_bytes += port->port_res[i].tx_bytes;
334 }
335
336 stats->tx_packets = tx_packets;
337 stats->rx_bytes = rx_bytes;
338 stats->tx_bytes = tx_bytes;
339 stats->rx_packets = rx_packets;
340
341 stats->multicast = port->stats.multicast;
342 stats->rx_errors = port->stats.rx_errors;
343 return stats;
344}
345
346static void ehea_update_stats(struct work_struct *work)
347{
348 struct ehea_port *port =
349 container_of(work, struct ehea_port, stats_work.work);
350 struct net_device *dev = port->netdev;
351 struct rtnl_link_stats64 *stats = &port->stats;
352 struct hcp_ehea_port_cb2 *cb2;
353 u64 hret;
354
355 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
356 if (!cb2) {
357 netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
358 goto resched;
359 }
360
361 hret = ehea_h_query_ehea_port(port->adapter->handle,
362 port->logical_port_id,
363 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
364 if (hret != H_SUCCESS) {
365 netdev_err(dev, "query_ehea_port failed\n");
366 goto out_herr;
367 }
368
369 if (netif_msg_hw(port))
370 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
371
372 stats->multicast = cb2->rxmcp;
373 stats->rx_errors = cb2->rxuerr;
374
375out_herr:
376 free_page((unsigned long)cb2);
377resched:
378 schedule_delayed_work(&port->stats_work,
379 round_jiffies_relative(msecs_to_jiffies(1000)));
380}
381
382static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
383{
384 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
385 struct net_device *dev = pr->port->netdev;
386 int max_index_mask = pr->rq1_skba.len - 1;
387 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
388 int adder = 0;
389 int i;
390
391 pr->rq1_skba.os_skbs = 0;
392
393 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
394 if (nr_of_wqes > 0)
395 pr->rq1_skba.index = index;
396 pr->rq1_skba.os_skbs = fill_wqes;
397 return;
398 }
399
400 for (i = 0; i < fill_wqes; i++) {
401 if (!skb_arr_rq1[index]) {
402 skb_arr_rq1[index] = netdev_alloc_skb(dev,
403 EHEA_L_PKT_SIZE);
404 if (!skb_arr_rq1[index]) {
405 netdev_info(dev, "Unable to allocate enough skb in the array\n");
406 pr->rq1_skba.os_skbs = fill_wqes - i;
407 break;
408 }
409 }
410 index--;
411 index &= max_index_mask;
412 adder++;
413 }
414
415 if (adder == 0)
416 return;
417
418 /* Ring doorbell */
419 ehea_update_rq1a(pr->qp, adder);
420}
421
422static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
423{
424 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
425 struct net_device *dev = pr->port->netdev;
426 int i;
427
428 if (nr_rq1a > pr->rq1_skba.len) {
429 netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
430 return;
431 }
432
433 for (i = 0; i < nr_rq1a; i++) {
434 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
435 if (!skb_arr_rq1[i]) {
436 netdev_info(dev, "Not enough memory to allocate skb array\n");
437 break;
438 }
439 }
440 /* Ring doorbell */
441 ehea_update_rq1a(pr->qp, i - 1);
442}
443
444static int ehea_refill_rq_def(struct ehea_port_res *pr,
445 struct ehea_q_skb_arr *q_skba, int rq_nr,
446 int num_wqes, int wqe_type, int packet_size)
447{
448 struct net_device *dev = pr->port->netdev;
449 struct ehea_qp *qp = pr->qp;
450 struct sk_buff **skb_arr = q_skba->arr;
451 struct ehea_rwqe *rwqe;
452 int i, index, max_index_mask, fill_wqes;
453 int adder = 0;
454 int ret = 0;
455
456 fill_wqes = q_skba->os_skbs + num_wqes;
457 q_skba->os_skbs = 0;
458
459 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
460 q_skba->os_skbs = fill_wqes;
461 return ret;
462 }
463
464 index = q_skba->index;
465 max_index_mask = q_skba->len - 1;
466 for (i = 0; i < fill_wqes; i++) {
467 u64 tmp_addr;
468 struct sk_buff *skb;
469
470 skb = netdev_alloc_skb_ip_align(dev, packet_size);
471 if (!skb) {
472 q_skba->os_skbs = fill_wqes - i;
473 if (q_skba->os_skbs == q_skba->len - 2) {
474 netdev_info(pr->port->netdev,
475 "rq%i ran dry - no mem for skb\n",
476 rq_nr);
477 ret = -ENOMEM;
478 }
479 break;
480 }
481
482 skb_arr[index] = skb;
483 tmp_addr = ehea_map_vaddr(skb->data);
484 if (tmp_addr == -1) {
485 dev_kfree_skb(skb);
486 q_skba->os_skbs = fill_wqes - i;
487 ret = 0;
488 break;
489 }
490
491 rwqe = ehea_get_next_rwqe(qp, rq_nr);
492 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
493 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
494 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
495 rwqe->sg_list[0].vaddr = tmp_addr;
496 rwqe->sg_list[0].len = packet_size;
497 rwqe->data_segments = 1;
498
499 index++;
500 index &= max_index_mask;
501 adder++;
502 }
503
504 q_skba->index = index;
505 if (adder == 0)
506 goto out;
507
508 /* Ring doorbell */
509 iosync();
510 if (rq_nr == 2)
511 ehea_update_rq2a(pr->qp, adder);
512 else
513 ehea_update_rq3a(pr->qp, adder);
514out:
515 return ret;
516}
517
518
519static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
520{
521 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
522 nr_of_wqes, EHEA_RWQE2_TYPE,
523 EHEA_RQ2_PKT_SIZE);
524}
525
526
527static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
528{
529 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
530 nr_of_wqes, EHEA_RWQE3_TYPE,
531 EHEA_MAX_PACKET_SIZE);
532}
533
534static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
535{
536 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
537 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
538 return 0;
539 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
540 (cqe->header_length == 0))
541 return 0;
542 return -EINVAL;
543}
544
545static inline void ehea_fill_skb(struct net_device *dev,
546 struct sk_buff *skb, struct ehea_cqe *cqe,
547 struct ehea_port_res *pr)
548{
549 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
550
551 skb_put(skb, length);
552 skb->protocol = eth_type_trans(skb, dev);
553
554 /* The packet was not an IPV4 packet so a complemented checksum was
555 calculated. The value is found in the Internet Checksum field. */
556 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
557 skb->ip_summed = CHECKSUM_COMPLETE;
558 skb->csum = csum_unfold(~cqe->inet_checksum_value);
559 } else
560 skb->ip_summed = CHECKSUM_UNNECESSARY;
561
562 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
563}
564
565static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
566 int arr_len,
567 struct ehea_cqe *cqe)
568{
569 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
570 struct sk_buff *skb;
571 void *pref;
572 int x;
573
574 x = skb_index + 1;
575 x &= (arr_len - 1);
576
577 pref = skb_array[x];
578 if (pref) {
579 prefetchw(pref);
580 prefetchw(pref + EHEA_CACHE_LINE);
581
582 pref = (skb_array[x]->data);
583 prefetch(pref);
584 prefetch(pref + EHEA_CACHE_LINE);
585 prefetch(pref + EHEA_CACHE_LINE * 2);
586 prefetch(pref + EHEA_CACHE_LINE * 3);
587 }
588
589 skb = skb_array[skb_index];
590 skb_array[skb_index] = NULL;
591 return skb;
592}
593
594static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
595 int arr_len, int wqe_index)
596{
597 struct sk_buff *skb;
598 void *pref;
599 int x;
600
601 x = wqe_index + 1;
602 x &= (arr_len - 1);
603
604 pref = skb_array[x];
605 if (pref) {
606 prefetchw(pref);
607 prefetchw(pref + EHEA_CACHE_LINE);
608
609 pref = (skb_array[x]->data);
610 prefetchw(pref);
611 prefetchw(pref + EHEA_CACHE_LINE);
612 }
613
614 skb = skb_array[wqe_index];
615 skb_array[wqe_index] = NULL;
616 return skb;
617}
618
619static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
620 struct ehea_cqe *cqe, int *processed_rq2,
621 int *processed_rq3)
622{
623 struct sk_buff *skb;
624
625 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
626 pr->p_stats.err_tcp_cksum++;
627 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
628 pr->p_stats.err_ip_cksum++;
629 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
630 pr->p_stats.err_frame_crc++;
631
632 if (rq == 2) {
633 *processed_rq2 += 1;
634 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
635 dev_kfree_skb(skb);
636 } else if (rq == 3) {
637 *processed_rq3 += 1;
638 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
639 dev_kfree_skb(skb);
640 }
641
642 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
643 if (netif_msg_rx_err(pr->port)) {
644 pr_err("Critical receive error for QP %d. Resetting port.\n",
645 pr->qp->init_attr.qp_nr);
646 ehea_dump(cqe, sizeof(*cqe), "CQE");
647 }
648 ehea_schedule_port_reset(pr->port);
649 return 1;
650 }
651
652 return 0;
653}
654
655static int ehea_proc_rwqes(struct net_device *dev,
656 struct ehea_port_res *pr,
657 int budget)
658{
659 struct ehea_port *port = pr->port;
660 struct ehea_qp *qp = pr->qp;
661 struct ehea_cqe *cqe;
662 struct sk_buff *skb;
663 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
664 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
665 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
666 int skb_arr_rq1_len = pr->rq1_skba.len;
667 int skb_arr_rq2_len = pr->rq2_skba.len;
668 int skb_arr_rq3_len = pr->rq3_skba.len;
669 int processed, processed_rq1, processed_rq2, processed_rq3;
670 u64 processed_bytes = 0;
671 int wqe_index, last_wqe_index, rq, port_reset;
672
673 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
674 last_wqe_index = 0;
675
676 cqe = ehea_poll_rq1(qp, &wqe_index);
677 while ((processed < budget) && cqe) {
678 ehea_inc_rq1(qp);
679 processed_rq1++;
680 processed++;
681 if (netif_msg_rx_status(port))
682 ehea_dump(cqe, sizeof(*cqe), "CQE");
683
684 last_wqe_index = wqe_index;
685 rmb();
686 if (!ehea_check_cqe(cqe, &rq)) {
687 if (rq == 1) {
688 /* LL RQ1 */
689 skb = get_skb_by_index_ll(skb_arr_rq1,
690 skb_arr_rq1_len,
691 wqe_index);
692 if (unlikely(!skb)) {
693 netif_info(port, rx_err, dev,
694 "LL rq1: skb=NULL\n");
695
696 skb = netdev_alloc_skb(dev,
697 EHEA_L_PKT_SIZE);
698 if (!skb) {
699 netdev_err(dev, "Not enough memory to allocate skb\n");
700 break;
701 }
702 }
703 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
704 cqe->num_bytes_transfered - 4);
705 ehea_fill_skb(dev, skb, cqe, pr);
706 } else if (rq == 2) {
707 /* RQ2 */
708 skb = get_skb_by_index(skb_arr_rq2,
709 skb_arr_rq2_len, cqe);
710 if (unlikely(!skb)) {
711 netif_err(port, rx_err, dev,
712 "rq2: skb=NULL\n");
713 break;
714 }
715 ehea_fill_skb(dev, skb, cqe, pr);
716 processed_rq2++;
717 } else {
718 /* RQ3 */
719 skb = get_skb_by_index(skb_arr_rq3,
720 skb_arr_rq3_len, cqe);
721 if (unlikely(!skb)) {
722 netif_err(port, rx_err, dev,
723 "rq3: skb=NULL\n");
724 break;
725 }
726 ehea_fill_skb(dev, skb, cqe, pr);
727 processed_rq3++;
728 }
729
730 processed_bytes += skb->len;
731
732 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
733 __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
734
735 napi_gro_receive(&pr->napi, skb);
736 } else {
737 pr->p_stats.poll_receive_errors++;
738 port_reset = ehea_treat_poll_error(pr, rq, cqe,
739 &processed_rq2,
740 &processed_rq3);
741 if (port_reset)
742 break;
743 }
744 cqe = ehea_poll_rq1(qp, &wqe_index);
745 }
746
747 pr->rx_packets += processed;
748 pr->rx_bytes += processed_bytes;
749
750 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
751 ehea_refill_rq2(pr, processed_rq2);
752 ehea_refill_rq3(pr, processed_rq3);
753
754 return processed;
755}
756
757#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
758
759static void reset_sq_restart_flag(struct ehea_port *port)
760{
761 int i;
762
763 for (i = 0; i < port->num_def_qps; i++) {
764 struct ehea_port_res *pr = &port->port_res[i];
765 pr->sq_restart_flag = 0;
766 }
767 wake_up(&port->restart_wq);
768}
769
770static void check_sqs(struct ehea_port *port)
771{
772 struct ehea_swqe *swqe;
773 int swqe_index;
774 int i, k;
775
776 for (i = 0; i < port->num_def_qps; i++) {
777 struct ehea_port_res *pr = &port->port_res[i];
778 int ret;
779 k = 0;
780 swqe = ehea_get_swqe(pr->qp, &swqe_index);
781 memset(swqe, 0, SWQE_HEADER_SIZE);
782 atomic_dec(&pr->swqe_avail);
783
784 swqe->tx_control |= EHEA_SWQE_PURGE;
785 swqe->wr_id = SWQE_RESTART_CHECK;
786 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
787 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
788 swqe->immediate_data_length = 80;
789
790 ehea_post_swqe(pr->qp, swqe);
791
792 ret = wait_event_timeout(port->restart_wq,
793 pr->sq_restart_flag == 0,
794 msecs_to_jiffies(100));
795
796 if (!ret) {
797 pr_err("HW/SW queues out of sync\n");
798 ehea_schedule_port_reset(pr->port);
799 return;
800 }
801 }
802}
803
804
805static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
806{
807 struct sk_buff *skb;
808 struct ehea_cq *send_cq = pr->send_cq;
809 struct ehea_cqe *cqe;
810 int quota = my_quota;
811 int cqe_counter = 0;
812 int swqe_av = 0;
813 int index;
814 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
815 pr - &pr->port->port_res[0]);
816
817 cqe = ehea_poll_cq(send_cq);
818 while (cqe && (quota > 0)) {
819 ehea_inc_cq(send_cq);
820
821 cqe_counter++;
822 rmb();
823
824 if (cqe->wr_id == SWQE_RESTART_CHECK) {
825 pr->sq_restart_flag = 1;
826 swqe_av++;
827 break;
828 }
829
830 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
831 pr_err("Bad send completion status=0x%04X\n",
832 cqe->status);
833
834 if (netif_msg_tx_err(pr->port))
835 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
836
837 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
838 pr_err("Resetting port\n");
839 ehea_schedule_port_reset(pr->port);
840 break;
841 }
842 }
843
844 if (netif_msg_tx_done(pr->port))
845 ehea_dump(cqe, sizeof(*cqe), "CQE");
846
847 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
848 == EHEA_SWQE2_TYPE)) {
849
850 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
851 skb = pr->sq_skba.arr[index];
852 dev_kfree_skb(skb);
853 pr->sq_skba.arr[index] = NULL;
854 }
855
856 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
857 quota--;
858
859 cqe = ehea_poll_cq(send_cq);
860 }
861
862 ehea_update_feca(send_cq, cqe_counter);
863 atomic_add(swqe_av, &pr->swqe_avail);
864
865 if (unlikely(netif_tx_queue_stopped(txq) &&
866 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
867 __netif_tx_lock(txq, smp_processor_id());
868 if (netif_tx_queue_stopped(txq) &&
869 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
870 netif_tx_wake_queue(txq);
871 __netif_tx_unlock(txq);
872 }
873
874 wake_up(&pr->port->swqe_avail_wq);
875
876 return cqe;
877}
878
879#define EHEA_POLL_MAX_CQES 65535
880
881static int ehea_poll(struct napi_struct *napi, int budget)
882{
883 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
884 napi);
885 struct net_device *dev = pr->port->netdev;
886 struct ehea_cqe *cqe;
887 struct ehea_cqe *cqe_skb = NULL;
888 int wqe_index;
889 int rx = 0;
890
891 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
892 rx += ehea_proc_rwqes(dev, pr, budget - rx);
893
894 while (rx != budget) {
895 napi_complete(napi);
896 ehea_reset_cq_ep(pr->recv_cq);
897 ehea_reset_cq_ep(pr->send_cq);
898 ehea_reset_cq_n1(pr->recv_cq);
899 ehea_reset_cq_n1(pr->send_cq);
900 rmb();
901 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
902 cqe_skb = ehea_poll_cq(pr->send_cq);
903
904 if (!cqe && !cqe_skb)
905 return rx;
906
907 if (!napi_reschedule(napi))
908 return rx;
909
910 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
911 rx += ehea_proc_rwqes(dev, pr, budget - rx);
912 }
913
914 return rx;
915}
916
917#ifdef CONFIG_NET_POLL_CONTROLLER
918static void ehea_netpoll(struct net_device *dev)
919{
920 struct ehea_port *port = netdev_priv(dev);
921 int i;
922
923 for (i = 0; i < port->num_def_qps; i++)
924 napi_schedule(&port->port_res[i].napi);
925}
926#endif
927
928static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
929{
930 struct ehea_port_res *pr = param;
931
932 napi_schedule(&pr->napi);
933
934 return IRQ_HANDLED;
935}
936
937static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
938{
939 struct ehea_port *port = param;
940 struct ehea_eqe *eqe;
941 struct ehea_qp *qp;
942 u32 qp_token;
943 u64 resource_type, aer, aerr;
944 int reset_port = 0;
945
946 eqe = ehea_poll_eq(port->qp_eq);
947
948 while (eqe) {
949 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
950 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
951 eqe->entry, qp_token);
952
953 qp = port->port_res[qp_token].qp;
954
955 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
956 &aer, &aerr);
957
958 if (resource_type == EHEA_AER_RESTYPE_QP) {
959 if ((aer & EHEA_AER_RESET_MASK) ||
960 (aerr & EHEA_AERR_RESET_MASK))
961 reset_port = 1;
962 } else
963 reset_port = 1; /* Reset in case of CQ or EQ error */
964
965 eqe = ehea_poll_eq(port->qp_eq);
966 }
967
968 if (reset_port) {
969 pr_err("Resetting port\n");
970 ehea_schedule_port_reset(port);
971 }
972
973 return IRQ_HANDLED;
974}
975
976static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
977 int logical_port)
978{
979 int i;
980
981 for (i = 0; i < EHEA_MAX_PORTS; i++)
982 if (adapter->port[i])
983 if (adapter->port[i]->logical_port_id == logical_port)
984 return adapter->port[i];
985 return NULL;
986}
987
988int ehea_sense_port_attr(struct ehea_port *port)
989{
990 int ret;
991 u64 hret;
992 struct hcp_ehea_port_cb0 *cb0;
993
994 /* may be called via ehea_neq_tasklet() */
995 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
996 if (!cb0) {
997 pr_err("no mem for cb0\n");
998 ret = -ENOMEM;
999 goto out;
1000 }
1001
1002 hret = ehea_h_query_ehea_port(port->adapter->handle,
1003 port->logical_port_id, H_PORT_CB0,
1004 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
1005 cb0);
1006 if (hret != H_SUCCESS) {
1007 ret = -EIO;
1008 goto out_free;
1009 }
1010
1011 /* MAC address */
1012 port->mac_addr = cb0->port_mac_addr << 16;
1013
1014 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
1015 ret = -EADDRNOTAVAIL;
1016 goto out_free;
1017 }
1018
1019 /* Port speed */
1020 switch (cb0->port_speed) {
1021 case H_SPEED_10M_H:
1022 port->port_speed = EHEA_SPEED_10M;
1023 port->full_duplex = 0;
1024 break;
1025 case H_SPEED_10M_F:
1026 port->port_speed = EHEA_SPEED_10M;
1027 port->full_duplex = 1;
1028 break;
1029 case H_SPEED_100M_H:
1030 port->port_speed = EHEA_SPEED_100M;
1031 port->full_duplex = 0;
1032 break;
1033 case H_SPEED_100M_F:
1034 port->port_speed = EHEA_SPEED_100M;
1035 port->full_duplex = 1;
1036 break;
1037 case H_SPEED_1G_F:
1038 port->port_speed = EHEA_SPEED_1G;
1039 port->full_duplex = 1;
1040 break;
1041 case H_SPEED_10G_F:
1042 port->port_speed = EHEA_SPEED_10G;
1043 port->full_duplex = 1;
1044 break;
1045 default:
1046 port->port_speed = 0;
1047 port->full_duplex = 0;
1048 break;
1049 }
1050
1051 port->autoneg = 1;
1052 port->num_mcs = cb0->num_default_qps;
1053
1054 /* Number of default QPs */
1055 if (use_mcs)
1056 port->num_def_qps = cb0->num_default_qps;
1057 else
1058 port->num_def_qps = 1;
1059
1060 if (!port->num_def_qps) {
1061 ret = -EINVAL;
1062 goto out_free;
1063 }
1064
1065 ret = 0;
1066out_free:
1067 if (ret || netif_msg_probe(port))
1068 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1069 free_page((unsigned long)cb0);
1070out:
1071 return ret;
1072}
1073
1074int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1075{
1076 struct hcp_ehea_port_cb4 *cb4;
1077 u64 hret;
1078 int ret = 0;
1079
1080 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1081 if (!cb4) {
1082 pr_err("no mem for cb4\n");
1083 ret = -ENOMEM;
1084 goto out;
1085 }
1086
1087 cb4->port_speed = port_speed;
1088
1089 netif_carrier_off(port->netdev);
1090
1091 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1092 port->logical_port_id,
1093 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1094 if (hret == H_SUCCESS) {
1095 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1096
1097 hret = ehea_h_query_ehea_port(port->adapter->handle,
1098 port->logical_port_id,
1099 H_PORT_CB4, H_PORT_CB4_SPEED,
1100 cb4);
1101 if (hret == H_SUCCESS) {
1102 switch (cb4->port_speed) {
1103 case H_SPEED_10M_H:
1104 port->port_speed = EHEA_SPEED_10M;
1105 port->full_duplex = 0;
1106 break;
1107 case H_SPEED_10M_F:
1108 port->port_speed = EHEA_SPEED_10M;
1109 port->full_duplex = 1;
1110 break;
1111 case H_SPEED_100M_H:
1112 port->port_speed = EHEA_SPEED_100M;
1113 port->full_duplex = 0;
1114 break;
1115 case H_SPEED_100M_F:
1116 port->port_speed = EHEA_SPEED_100M;
1117 port->full_duplex = 1;
1118 break;
1119 case H_SPEED_1G_F:
1120 port->port_speed = EHEA_SPEED_1G;
1121 port->full_duplex = 1;
1122 break;
1123 case H_SPEED_10G_F:
1124 port->port_speed = EHEA_SPEED_10G;
1125 port->full_duplex = 1;
1126 break;
1127 default:
1128 port->port_speed = 0;
1129 port->full_duplex = 0;
1130 break;
1131 }
1132 } else {
1133 pr_err("Failed sensing port speed\n");
1134 ret = -EIO;
1135 }
1136 } else {
1137 if (hret == H_AUTHORITY) {
1138 pr_info("Hypervisor denied setting port speed\n");
1139 ret = -EPERM;
1140 } else {
1141 ret = -EIO;
1142 pr_err("Failed setting port speed\n");
1143 }
1144 }
1145 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1146 netif_carrier_on(port->netdev);
1147
1148 free_page((unsigned long)cb4);
1149out:
1150 return ret;
1151}
1152
1153static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1154{
1155 int ret;
1156 u8 ec;
1157 u8 portnum;
1158 struct ehea_port *port;
1159 struct net_device *dev;
1160
1161 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1162 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1163 port = ehea_get_port(adapter, portnum);
1164 dev = port->netdev;
1165
1166 switch (ec) {
1167 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1168
1169 if (!port) {
1170 netdev_err(dev, "unknown portnum %x\n", portnum);
1171 break;
1172 }
1173
1174 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1175 if (!netif_carrier_ok(dev)) {
1176 ret = ehea_sense_port_attr(port);
1177 if (ret) {
1178 netdev_err(dev, "failed resensing port attributes\n");
1179 break;
1180 }
1181
1182 netif_info(port, link, dev,
1183 "Logical port up: %dMbps %s Duplex\n",
1184 port->port_speed,
1185 port->full_duplex == 1 ?
1186 "Full" : "Half");
1187
1188 netif_carrier_on(dev);
1189 netif_wake_queue(dev);
1190 }
1191 } else
1192 if (netif_carrier_ok(dev)) {
1193 netif_info(port, link, dev,
1194 "Logical port down\n");
1195 netif_carrier_off(dev);
1196 netif_tx_disable(dev);
1197 }
1198
1199 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1200 port->phy_link = EHEA_PHY_LINK_UP;
1201 netif_info(port, link, dev,
1202 "Physical port up\n");
1203 if (prop_carrier_state)
1204 netif_carrier_on(dev);
1205 } else {
1206 port->phy_link = EHEA_PHY_LINK_DOWN;
1207 netif_info(port, link, dev,
1208 "Physical port down\n");
1209 if (prop_carrier_state)
1210 netif_carrier_off(dev);
1211 }
1212
1213 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1214 netdev_info(dev,
1215 "External switch port is primary port\n");
1216 else
1217 netdev_info(dev,
1218 "External switch port is backup port\n");
1219
1220 break;
1221 case EHEA_EC_ADAPTER_MALFUNC:
1222 netdev_err(dev, "Adapter malfunction\n");
1223 break;
1224 case EHEA_EC_PORT_MALFUNC:
1225 netdev_info(dev, "Port malfunction\n");
1226 netif_carrier_off(dev);
1227 netif_tx_disable(dev);
1228 break;
1229 default:
1230 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1231 break;
1232 }
1233}
1234
1235static void ehea_neq_tasklet(unsigned long data)
1236{
1237 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1238 struct ehea_eqe *eqe;
1239 u64 event_mask;
1240
1241 eqe = ehea_poll_eq(adapter->neq);
1242 pr_debug("eqe=%p\n", eqe);
1243
1244 while (eqe) {
1245 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1246 ehea_parse_eqe(adapter, eqe->entry);
1247 eqe = ehea_poll_eq(adapter->neq);
1248 pr_debug("next eqe=%p\n", eqe);
1249 }
1250
1251 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1252 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1253 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1254
1255 ehea_h_reset_events(adapter->handle,
1256 adapter->neq->fw_handle, event_mask);
1257}
1258
1259static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1260{
1261 struct ehea_adapter *adapter = param;
1262 tasklet_hi_schedule(&adapter->neq_tasklet);
1263 return IRQ_HANDLED;
1264}
1265
1266
1267static int ehea_fill_port_res(struct ehea_port_res *pr)
1268{
1269 int ret;
1270 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1271
1272 ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1273
1274 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1275
1276 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1277
1278 return ret;
1279}
1280
1281static int ehea_reg_interrupts(struct net_device *dev)
1282{
1283 struct ehea_port *port = netdev_priv(dev);
1284 struct ehea_port_res *pr;
1285 int i, ret;
1286
1287
1288 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1289 dev->name);
1290
1291 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1292 ehea_qp_aff_irq_handler,
1293 IRQF_DISABLED, port->int_aff_name, port);
1294 if (ret) {
1295 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1296 port->qp_eq->attr.ist1);
1297 goto out_free_qpeq;
1298 }
1299
1300 netif_info(port, ifup, dev,
1301 "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1302 port->qp_eq->attr.ist1);
1303
1304
1305 for (i = 0; i < port->num_def_qps; i++) {
1306 pr = &port->port_res[i];
1307 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1308 "%s-queue%d", dev->name, i);
1309 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1310 ehea_recv_irq_handler,
1311 IRQF_DISABLED, pr->int_send_name,
1312 pr);
1313 if (ret) {
1314 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1315 i, pr->eq->attr.ist1);
1316 goto out_free_req;
1317 }
1318 netif_info(port, ifup, dev,
1319 "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1320 pr->eq->attr.ist1, i);
1321 }
1322out:
1323 return ret;
1324
1325
1326out_free_req:
1327 while (--i >= 0) {
1328 u32 ist = port->port_res[i].eq->attr.ist1;
1329 ibmebus_free_irq(ist, &port->port_res[i]);
1330 }
1331
1332out_free_qpeq:
1333 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1334 i = port->num_def_qps;
1335
1336 goto out;
1337
1338}
1339
1340static void ehea_free_interrupts(struct net_device *dev)
1341{
1342 struct ehea_port *port = netdev_priv(dev);
1343 struct ehea_port_res *pr;
1344 int i;
1345
1346 /* send */
1347
1348 for (i = 0; i < port->num_def_qps; i++) {
1349 pr = &port->port_res[i];
1350 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1351 netif_info(port, intr, dev,
1352 "free send irq for res %d with handle 0x%X\n",
1353 i, pr->eq->attr.ist1);
1354 }
1355
1356 /* associated events */
1357 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1358 netif_info(port, intr, dev,
1359 "associated event interrupt for handle 0x%X freed\n",
1360 port->qp_eq->attr.ist1);
1361}
1362
1363static int ehea_configure_port(struct ehea_port *port)
1364{
1365 int ret, i;
1366 u64 hret, mask;
1367 struct hcp_ehea_port_cb0 *cb0;
1368
1369 ret = -ENOMEM;
1370 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1371 if (!cb0)
1372 goto out;
1373
1374 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1375 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1376 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1377 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1378 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1379 PXLY_RC_VLAN_FILTER)
1380 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1381
1382 for (i = 0; i < port->num_mcs; i++)
1383 if (use_mcs)
1384 cb0->default_qpn_arr[i] =
1385 port->port_res[i].qp->init_attr.qp_nr;
1386 else
1387 cb0->default_qpn_arr[i] =
1388 port->port_res[0].qp->init_attr.qp_nr;
1389
1390 if (netif_msg_ifup(port))
1391 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1392
1393 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1394 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1395
1396 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1397 port->logical_port_id,
1398 H_PORT_CB0, mask, cb0);
1399 ret = -EIO;
1400 if (hret != H_SUCCESS)
1401 goto out_free;
1402
1403 ret = 0;
1404
1405out_free:
1406 free_page((unsigned long)cb0);
1407out:
1408 return ret;
1409}
1410
1411static int ehea_gen_smrs(struct ehea_port_res *pr)
1412{
1413 int ret;
1414 struct ehea_adapter *adapter = pr->port->adapter;
1415
1416 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1417 if (ret)
1418 goto out;
1419
1420 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1421 if (ret)
1422 goto out_free;
1423
1424 return 0;
1425
1426out_free:
1427 ehea_rem_mr(&pr->send_mr);
1428out:
1429 pr_err("Generating SMRS failed\n");
1430 return -EIO;
1431}
1432
1433static int ehea_rem_smrs(struct ehea_port_res *pr)
1434{
1435 if ((ehea_rem_mr(&pr->send_mr)) ||
1436 (ehea_rem_mr(&pr->recv_mr)))
1437 return -EIO;
1438 else
1439 return 0;
1440}
1441
1442static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1443{
1444 int arr_size = sizeof(void *) * max_q_entries;
1445
1446 q_skba->arr = vzalloc(arr_size);
1447 if (!q_skba->arr)
1448 return -ENOMEM;
1449
1450 q_skba->len = max_q_entries;
1451 q_skba->index = 0;
1452 q_skba->os_skbs = 0;
1453
1454 return 0;
1455}
1456
1457static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1458 struct port_res_cfg *pr_cfg, int queue_token)
1459{
1460 struct ehea_adapter *adapter = port->adapter;
1461 enum ehea_eq_type eq_type = EHEA_EQ;
1462 struct ehea_qp_init_attr *init_attr = NULL;
1463 int ret = -EIO;
1464 u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1465
1466 tx_bytes = pr->tx_bytes;
1467 tx_packets = pr->tx_packets;
1468 rx_bytes = pr->rx_bytes;
1469 rx_packets = pr->rx_packets;
1470
1471 memset(pr, 0, sizeof(struct ehea_port_res));
1472
1473 pr->tx_bytes = rx_bytes;
1474 pr->tx_packets = tx_packets;
1475 pr->rx_bytes = rx_bytes;
1476 pr->rx_packets = rx_packets;
1477
1478 pr->port = port;
1479
1480 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1481 if (!pr->eq) {
1482 pr_err("create_eq failed (eq)\n");
1483 goto out_free;
1484 }
1485
1486 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1487 pr->eq->fw_handle,
1488 port->logical_port_id);
1489 if (!pr->recv_cq) {
1490 pr_err("create_cq failed (cq_recv)\n");
1491 goto out_free;
1492 }
1493
1494 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1495 pr->eq->fw_handle,
1496 port->logical_port_id);
1497 if (!pr->send_cq) {
1498 pr_err("create_cq failed (cq_send)\n");
1499 goto out_free;
1500 }
1501
1502 if (netif_msg_ifup(port))
1503 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1504 pr->send_cq->attr.act_nr_of_cqes,
1505 pr->recv_cq->attr.act_nr_of_cqes);
1506
1507 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1508 if (!init_attr) {
1509 ret = -ENOMEM;
1510 pr_err("no mem for ehea_qp_init_attr\n");
1511 goto out_free;
1512 }
1513
1514 init_attr->low_lat_rq1 = 1;
1515 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1516 init_attr->rq_count = 3;
1517 init_attr->qp_token = queue_token;
1518 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1519 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1520 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1521 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1522 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1523 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1524 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1525 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1526 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1527 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1528 init_attr->port_nr = port->logical_port_id;
1529 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1530 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1531 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1532
1533 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1534 if (!pr->qp) {
1535 pr_err("create_qp failed\n");
1536 ret = -EIO;
1537 goto out_free;
1538 }
1539
1540 if (netif_msg_ifup(port))
1541 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1542 init_attr->qp_nr,
1543 init_attr->act_nr_send_wqes,
1544 init_attr->act_nr_rwqes_rq1,
1545 init_attr->act_nr_rwqes_rq2,
1546 init_attr->act_nr_rwqes_rq3);
1547
1548 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1549
1550 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1551 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1552 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1553 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1554 if (ret)
1555 goto out_free;
1556
1557 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1558 if (ehea_gen_smrs(pr) != 0) {
1559 ret = -EIO;
1560 goto out_free;
1561 }
1562
1563 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1564
1565 kfree(init_attr);
1566
1567 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1568
1569 ret = 0;
1570 goto out;
1571
1572out_free:
1573 kfree(init_attr);
1574 vfree(pr->sq_skba.arr);
1575 vfree(pr->rq1_skba.arr);
1576 vfree(pr->rq2_skba.arr);
1577 vfree(pr->rq3_skba.arr);
1578 ehea_destroy_qp(pr->qp);
1579 ehea_destroy_cq(pr->send_cq);
1580 ehea_destroy_cq(pr->recv_cq);
1581 ehea_destroy_eq(pr->eq);
1582out:
1583 return ret;
1584}
1585
1586static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1587{
1588 int ret, i;
1589
1590 if (pr->qp)
1591 netif_napi_del(&pr->napi);
1592
1593 ret = ehea_destroy_qp(pr->qp);
1594
1595 if (!ret) {
1596 ehea_destroy_cq(pr->send_cq);
1597 ehea_destroy_cq(pr->recv_cq);
1598 ehea_destroy_eq(pr->eq);
1599
1600 for (i = 0; i < pr->rq1_skba.len; i++)
1601 if (pr->rq1_skba.arr[i])
1602 dev_kfree_skb(pr->rq1_skba.arr[i]);
1603
1604 for (i = 0; i < pr->rq2_skba.len; i++)
1605 if (pr->rq2_skba.arr[i])
1606 dev_kfree_skb(pr->rq2_skba.arr[i]);
1607
1608 for (i = 0; i < pr->rq3_skba.len; i++)
1609 if (pr->rq3_skba.arr[i])
1610 dev_kfree_skb(pr->rq3_skba.arr[i]);
1611
1612 for (i = 0; i < pr->sq_skba.len; i++)
1613 if (pr->sq_skba.arr[i])
1614 dev_kfree_skb(pr->sq_skba.arr[i]);
1615
1616 vfree(pr->rq1_skba.arr);
1617 vfree(pr->rq2_skba.arr);
1618 vfree(pr->rq3_skba.arr);
1619 vfree(pr->sq_skba.arr);
1620 ret = ehea_rem_smrs(pr);
1621 }
1622 return ret;
1623}
1624
1625static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
1626 u32 lkey)
1627{
1628 int skb_data_size = skb_headlen(skb);
1629 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1630 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1631 unsigned int immediate_len = SWQE2_MAX_IMM;
1632
1633 swqe->descriptors = 0;
1634
1635 if (skb_is_gso(skb)) {
1636 swqe->tx_control |= EHEA_SWQE_TSO;
1637 swqe->mss = skb_shinfo(skb)->gso_size;
1638 /*
1639 * For TSO packets we only copy the headers into the
1640 * immediate area.
1641 */
1642 immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1643 }
1644
1645 if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
1646 skb_copy_from_linear_data(skb, imm_data, immediate_len);
1647 swqe->immediate_data_length = immediate_len;
1648
1649 if (skb_data_size > immediate_len) {
1650 sg1entry->l_key = lkey;
1651 sg1entry->len = skb_data_size - immediate_len;
1652 sg1entry->vaddr =
1653 ehea_map_vaddr(skb->data + immediate_len);
1654 swqe->descriptors++;
1655 }
1656 } else {
1657 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1658 swqe->immediate_data_length = skb_data_size;
1659 }
1660}
1661
1662static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1663 struct ehea_swqe *swqe, u32 lkey)
1664{
1665 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1666 skb_frag_t *frag;
1667 int nfrags, sg1entry_contains_frag_data, i;
1668
1669 nfrags = skb_shinfo(skb)->nr_frags;
1670 sg1entry = &swqe->u.immdata_desc.sg_entry;
1671 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1672 sg1entry_contains_frag_data = 0;
1673
1674 write_swqe2_immediate(skb, swqe, lkey);
1675
1676 /* write descriptors */
1677 if (nfrags > 0) {
1678 if (swqe->descriptors == 0) {
1679 /* sg1entry not yet used */
1680 frag = &skb_shinfo(skb)->frags[0];
1681
1682 /* copy sg1entry data */
1683 sg1entry->l_key = lkey;
1684 sg1entry->len = skb_frag_size(frag);
1685 sg1entry->vaddr =
1686 ehea_map_vaddr(skb_frag_address(frag));
1687 swqe->descriptors++;
1688 sg1entry_contains_frag_data = 1;
1689 }
1690
1691 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1692
1693 frag = &skb_shinfo(skb)->frags[i];
1694 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1695
1696 sgentry->l_key = lkey;
1697 sgentry->len = skb_frag_size(frag);
1698 sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
1699 swqe->descriptors++;
1700 }
1701 }
1702}
1703
1704static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1705{
1706 int ret = 0;
1707 u64 hret;
1708 u8 reg_type;
1709
1710 /* De/Register untagged packets */
1711 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1712 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1713 port->logical_port_id,
1714 reg_type, port->mac_addr, 0, hcallid);
1715 if (hret != H_SUCCESS) {
1716 pr_err("%sregistering bc address failed (tagged)\n",
1717 hcallid == H_REG_BCMC ? "" : "de");
1718 ret = -EIO;
1719 goto out_herr;
1720 }
1721
1722 /* De/Register VLAN packets */
1723 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1724 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1725 port->logical_port_id,
1726 reg_type, port->mac_addr, 0, hcallid);
1727 if (hret != H_SUCCESS) {
1728 pr_err("%sregistering bc address failed (vlan)\n",
1729 hcallid == H_REG_BCMC ? "" : "de");
1730 ret = -EIO;
1731 }
1732out_herr:
1733 return ret;
1734}
1735
1736static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1737{
1738 struct ehea_port *port = netdev_priv(dev);
1739 struct sockaddr *mac_addr = sa;
1740 struct hcp_ehea_port_cb0 *cb0;
1741 int ret;
1742 u64 hret;
1743
1744 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1745 ret = -EADDRNOTAVAIL;
1746 goto out;
1747 }
1748
1749 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1750 if (!cb0) {
1751 pr_err("no mem for cb0\n");
1752 ret = -ENOMEM;
1753 goto out;
1754 }
1755
1756 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1757
1758 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1759
1760 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1761 port->logical_port_id, H_PORT_CB0,
1762 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1763 if (hret != H_SUCCESS) {
1764 ret = -EIO;
1765 goto out_free;
1766 }
1767
1768 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1769
1770 /* Deregister old MAC in pHYP */
1771 if (port->state == EHEA_PORT_UP) {
1772 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1773 if (ret)
1774 goto out_upregs;
1775 }
1776
1777 port->mac_addr = cb0->port_mac_addr << 16;
1778
1779 /* Register new MAC in pHYP */
1780 if (port->state == EHEA_PORT_UP) {
1781 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1782 if (ret)
1783 goto out_upregs;
1784 }
1785
1786 ret = 0;
1787
1788out_upregs:
1789 ehea_update_bcmc_registrations();
1790out_free:
1791 free_page((unsigned long)cb0);
1792out:
1793 return ret;
1794}
1795
1796static void ehea_promiscuous_error(u64 hret, int enable)
1797{
1798 if (hret == H_AUTHORITY)
1799 pr_info("Hypervisor denied %sabling promiscuous mode\n",
1800 enable == 1 ? "en" : "dis");
1801 else
1802 pr_err("failed %sabling promiscuous mode\n",
1803 enable == 1 ? "en" : "dis");
1804}
1805
1806static void ehea_promiscuous(struct net_device *dev, int enable)
1807{
1808 struct ehea_port *port = netdev_priv(dev);
1809 struct hcp_ehea_port_cb7 *cb7;
1810 u64 hret;
1811
1812 if (enable == port->promisc)
1813 return;
1814
1815 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1816 if (!cb7) {
1817 pr_err("no mem for cb7\n");
1818 goto out;
1819 }
1820
1821 /* Modify Pxs_DUCQPN in CB7 */
1822 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1823
1824 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1825 port->logical_port_id,
1826 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1827 if (hret) {
1828 ehea_promiscuous_error(hret, enable);
1829 goto out;
1830 }
1831
1832 port->promisc = enable;
1833out:
1834 free_page((unsigned long)cb7);
1835}
1836
1837static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1838 u32 hcallid)
1839{
1840 u64 hret;
1841 u8 reg_type;
1842
1843 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
1844 if (mc_mac_addr == 0)
1845 reg_type |= EHEA_BCMC_SCOPE_ALL;
1846
1847 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1848 port->logical_port_id,
1849 reg_type, mc_mac_addr, 0, hcallid);
1850 if (hret)
1851 goto out;
1852
1853 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
1854 if (mc_mac_addr == 0)
1855 reg_type |= EHEA_BCMC_SCOPE_ALL;
1856
1857 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1858 port->logical_port_id,
1859 reg_type, mc_mac_addr, 0, hcallid);
1860out:
1861 return hret;
1862}
1863
1864static int ehea_drop_multicast_list(struct net_device *dev)
1865{
1866 struct ehea_port *port = netdev_priv(dev);
1867 struct ehea_mc_list *mc_entry = port->mc_list;
1868 struct list_head *pos;
1869 struct list_head *temp;
1870 int ret = 0;
1871 u64 hret;
1872
1873 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1874 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1875
1876 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1877 H_DEREG_BCMC);
1878 if (hret) {
1879 pr_err("failed deregistering mcast MAC\n");
1880 ret = -EIO;
1881 }
1882
1883 list_del(pos);
1884 kfree(mc_entry);
1885 }
1886 return ret;
1887}
1888
1889static void ehea_allmulti(struct net_device *dev, int enable)
1890{
1891 struct ehea_port *port = netdev_priv(dev);
1892 u64 hret;
1893
1894 if (!port->allmulti) {
1895 if (enable) {
1896 /* Enable ALLMULTI */
1897 ehea_drop_multicast_list(dev);
1898 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1899 if (!hret)
1900 port->allmulti = 1;
1901 else
1902 netdev_err(dev,
1903 "failed enabling IFF_ALLMULTI\n");
1904 }
1905 } else {
1906 if (!enable) {
1907 /* Disable ALLMULTI */
1908 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1909 if (!hret)
1910 port->allmulti = 0;
1911 else
1912 netdev_err(dev,
1913 "failed disabling IFF_ALLMULTI\n");
1914 }
1915 }
1916}
1917
1918static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1919{
1920 struct ehea_mc_list *ehea_mcl_entry;
1921 u64 hret;
1922
1923 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1924 if (!ehea_mcl_entry) {
1925 pr_err("no mem for mcl_entry\n");
1926 return;
1927 }
1928
1929 INIT_LIST_HEAD(&ehea_mcl_entry->list);
1930
1931 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1932
1933 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1934 H_REG_BCMC);
1935 if (!hret)
1936 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1937 else {
1938 pr_err("failed registering mcast MAC\n");
1939 kfree(ehea_mcl_entry);
1940 }
1941}
1942
1943static void ehea_set_multicast_list(struct net_device *dev)
1944{
1945 struct ehea_port *port = netdev_priv(dev);
1946 struct netdev_hw_addr *ha;
1947 int ret;
1948
1949 ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
1950
1951 if (dev->flags & IFF_ALLMULTI) {
1952 ehea_allmulti(dev, 1);
1953 goto out;
1954 }
1955 ehea_allmulti(dev, 0);
1956
1957 if (!netdev_mc_empty(dev)) {
1958 ret = ehea_drop_multicast_list(dev);
1959 if (ret) {
1960 /* Dropping the current multicast list failed.
1961 * Enabling ALL_MULTI is the best we can do.
1962 */
1963 ehea_allmulti(dev, 1);
1964 }
1965
1966 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
1967 pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
1968 port->adapter->max_mc_mac);
1969 goto out;
1970 }
1971
1972 netdev_for_each_mc_addr(ha, dev)
1973 ehea_add_multicast_entry(port, ha->addr);
1974
1975 }
1976out:
1977 ehea_update_bcmc_registrations();
1978}
1979
1980static int ehea_change_mtu(struct net_device *dev, int new_mtu)
1981{
1982 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
1983 return -EINVAL;
1984 dev->mtu = new_mtu;
1985 return 0;
1986}
1987
1988static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
1989{
1990 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
1991
1992 if (skb->protocol != htons(ETH_P_IP))
1993 return;
1994
1995 if (skb->ip_summed == CHECKSUM_PARTIAL)
1996 swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
1997
1998 swqe->ip_start = skb_network_offset(skb);
1999 swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
2000
2001 switch (ip_hdr(skb)->protocol) {
2002 case IPPROTO_UDP:
2003 if (skb->ip_summed == CHECKSUM_PARTIAL)
2004 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
2005
2006 swqe->tcp_offset = swqe->ip_end + 1 +
2007 offsetof(struct udphdr, check);
2008 break;
2009
2010 case IPPROTO_TCP:
2011 if (skb->ip_summed == CHECKSUM_PARTIAL)
2012 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
2013
2014 swqe->tcp_offset = swqe->ip_end + 1 +
2015 offsetof(struct tcphdr, check);
2016 break;
2017 }
2018}
2019
2020static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2021 struct ehea_swqe *swqe, u32 lkey)
2022{
2023 swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
2024
2025 xmit_common(skb, swqe);
2026
2027 write_swqe2_data(skb, dev, swqe, lkey);
2028}
2029
2030static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2031 struct ehea_swqe *swqe)
2032{
2033 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2034
2035 xmit_common(skb, swqe);
2036
2037 if (!skb->data_len)
2038 skb_copy_from_linear_data(skb, imm_data, skb->len);
2039 else
2040 skb_copy_bits(skb, 0, imm_data, skb->len);
2041
2042 swqe->immediate_data_length = skb->len;
2043 dev_kfree_skb(skb);
2044}
2045
2046static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2047{
2048 struct ehea_port *port = netdev_priv(dev);
2049 struct ehea_swqe *swqe;
2050 u32 lkey;
2051 int swqe_index;
2052 struct ehea_port_res *pr;
2053 struct netdev_queue *txq;
2054
2055 pr = &port->port_res[skb_get_queue_mapping(skb)];
2056 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2057
2058 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2059 memset(swqe, 0, SWQE_HEADER_SIZE);
2060 atomic_dec(&pr->swqe_avail);
2061
2062 if (vlan_tx_tag_present(skb)) {
2063 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2064 swqe->vlan_tag = vlan_tx_tag_get(skb);
2065 }
2066
2067 pr->tx_packets++;
2068 pr->tx_bytes += skb->len;
2069
2070 if (skb->len <= SWQE3_MAX_IMM) {
2071 u32 sig_iv = port->sig_comp_iv;
2072 u32 swqe_num = pr->swqe_id_counter;
2073 ehea_xmit3(skb, dev, swqe);
2074 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2075 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2076 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2077 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2078 sig_iv);
2079 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2080 pr->swqe_ll_count = 0;
2081 } else
2082 pr->swqe_ll_count += 1;
2083 } else {
2084 swqe->wr_id =
2085 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2086 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2087 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2088 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2089 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2090
2091 pr->sq_skba.index++;
2092 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2093
2094 lkey = pr->send_mr.lkey;
2095 ehea_xmit2(skb, dev, swqe, lkey);
2096 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2097 }
2098 pr->swqe_id_counter += 1;
2099
2100 netif_info(port, tx_queued, dev,
2101 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2102 if (netif_msg_tx_queued(port))
2103 ehea_dump(swqe, 512, "swqe");
2104
2105 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2106 netif_tx_stop_queue(txq);
2107 swqe->tx_control |= EHEA_SWQE_PURGE;
2108 }
2109
2110 ehea_post_swqe(pr->qp, swqe);
2111
2112 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2113 pr->p_stats.queue_stopped++;
2114 netif_tx_stop_queue(txq);
2115 }
2116
2117 return NETDEV_TX_OK;
2118}
2119
2120static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2121{
2122 struct ehea_port *port = netdev_priv(dev);
2123 struct ehea_adapter *adapter = port->adapter;
2124 struct hcp_ehea_port_cb1 *cb1;
2125 int index;
2126 u64 hret;
2127 int err = 0;
2128
2129 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2130 if (!cb1) {
2131 pr_err("no mem for cb1\n");
2132 err = -ENOMEM;
2133 goto out;
2134 }
2135
2136 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2137 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2138 if (hret != H_SUCCESS) {
2139 pr_err("query_ehea_port failed\n");
2140 err = -EINVAL;
2141 goto out;
2142 }
2143
2144 index = (vid / 64);
2145 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2146
2147 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2148 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2149 if (hret != H_SUCCESS) {
2150 pr_err("modify_ehea_port failed\n");
2151 err = -EINVAL;
2152 }
2153out:
2154 free_page((unsigned long)cb1);
2155 return err;
2156}
2157
2158static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2159{
2160 struct ehea_port *port = netdev_priv(dev);
2161 struct ehea_adapter *adapter = port->adapter;
2162 struct hcp_ehea_port_cb1 *cb1;
2163 int index;
2164 u64 hret;
2165 int err = 0;
2166
2167 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2168 if (!cb1) {
2169 pr_err("no mem for cb1\n");
2170 err = -ENOMEM;
2171 goto out;
2172 }
2173
2174 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2175 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2176 if (hret != H_SUCCESS) {
2177 pr_err("query_ehea_port failed\n");
2178 err = -EINVAL;
2179 goto out;
2180 }
2181
2182 index = (vid / 64);
2183 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2184
2185 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2186 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2187 if (hret != H_SUCCESS) {
2188 pr_err("modify_ehea_port failed\n");
2189 err = -EINVAL;
2190 }
2191out:
2192 free_page((unsigned long)cb1);
2193 return err;
2194}
2195
2196static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2197{
2198 int ret = -EIO;
2199 u64 hret;
2200 u16 dummy16 = 0;
2201 u64 dummy64 = 0;
2202 struct hcp_modify_qp_cb0 *cb0;
2203
2204 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2205 if (!cb0) {
2206 ret = -ENOMEM;
2207 goto out;
2208 }
2209
2210 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2211 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2212 if (hret != H_SUCCESS) {
2213 pr_err("query_ehea_qp failed (1)\n");
2214 goto out;
2215 }
2216
2217 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2218 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2219 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2220 &dummy64, &dummy64, &dummy16, &dummy16);
2221 if (hret != H_SUCCESS) {
2222 pr_err("modify_ehea_qp failed (1)\n");
2223 goto out;
2224 }
2225
2226 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2227 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2228 if (hret != H_SUCCESS) {
2229 pr_err("query_ehea_qp failed (2)\n");
2230 goto out;
2231 }
2232
2233 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2234 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2235 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2236 &dummy64, &dummy64, &dummy16, &dummy16);
2237 if (hret != H_SUCCESS) {
2238 pr_err("modify_ehea_qp failed (2)\n");
2239 goto out;
2240 }
2241
2242 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2243 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2244 if (hret != H_SUCCESS) {
2245 pr_err("query_ehea_qp failed (3)\n");
2246 goto out;
2247 }
2248
2249 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2250 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2251 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2252 &dummy64, &dummy64, &dummy16, &dummy16);
2253 if (hret != H_SUCCESS) {
2254 pr_err("modify_ehea_qp failed (3)\n");
2255 goto out;
2256 }
2257
2258 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2259 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2260 if (hret != H_SUCCESS) {
2261 pr_err("query_ehea_qp failed (4)\n");
2262 goto out;
2263 }
2264
2265 ret = 0;
2266out:
2267 free_page((unsigned long)cb0);
2268 return ret;
2269}
2270
2271static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
2272{
2273 int ret, i;
2274 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2275 enum ehea_eq_type eq_type = EHEA_EQ;
2276
2277 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2278 EHEA_MAX_ENTRIES_EQ, 1);
2279 if (!port->qp_eq) {
2280 ret = -EINVAL;
2281 pr_err("ehea_create_eq failed (qp_eq)\n");
2282 goto out_kill_eq;
2283 }
2284
2285 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2286 pr_cfg.max_entries_scq = sq_entries * 2;
2287 pr_cfg.max_entries_sq = sq_entries;
2288 pr_cfg.max_entries_rq1 = rq1_entries;
2289 pr_cfg.max_entries_rq2 = rq2_entries;
2290 pr_cfg.max_entries_rq3 = rq3_entries;
2291
2292 pr_cfg_small_rx.max_entries_rcq = 1;
2293 pr_cfg_small_rx.max_entries_scq = sq_entries;
2294 pr_cfg_small_rx.max_entries_sq = sq_entries;
2295 pr_cfg_small_rx.max_entries_rq1 = 1;
2296 pr_cfg_small_rx.max_entries_rq2 = 1;
2297 pr_cfg_small_rx.max_entries_rq3 = 1;
2298
2299 for (i = 0; i < def_qps; i++) {
2300 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2301 if (ret)
2302 goto out_clean_pr;
2303 }
2304 for (i = def_qps; i < def_qps; i++) {
2305 ret = ehea_init_port_res(port, &port->port_res[i],
2306 &pr_cfg_small_rx, i);
2307 if (ret)
2308 goto out_clean_pr;
2309 }
2310
2311 return 0;
2312
2313out_clean_pr:
2314 while (--i >= 0)
2315 ehea_clean_portres(port, &port->port_res[i]);
2316
2317out_kill_eq:
2318 ehea_destroy_eq(port->qp_eq);
2319 return ret;
2320}
2321
2322static int ehea_clean_all_portres(struct ehea_port *port)
2323{
2324 int ret = 0;
2325 int i;
2326
2327 for (i = 0; i < port->num_def_qps; i++)
2328 ret |= ehea_clean_portres(port, &port->port_res[i]);
2329
2330 ret |= ehea_destroy_eq(port->qp_eq);
2331
2332 return ret;
2333}
2334
2335static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2336{
2337 if (adapter->active_ports)
2338 return;
2339
2340 ehea_rem_mr(&adapter->mr);
2341}
2342
2343static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2344{
2345 if (adapter->active_ports)
2346 return 0;
2347
2348 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2349}
2350
2351static int ehea_up(struct net_device *dev)
2352{
2353 int ret, i;
2354 struct ehea_port *port = netdev_priv(dev);
2355
2356 if (port->state == EHEA_PORT_UP)
2357 return 0;
2358
2359 ret = ehea_port_res_setup(port, port->num_def_qps);
2360 if (ret) {
2361 netdev_err(dev, "port_res_failed\n");
2362 goto out;
2363 }
2364
2365 /* Set default QP for this port */
2366 ret = ehea_configure_port(port);
2367 if (ret) {
2368 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2369 goto out_clean_pr;
2370 }
2371
2372 ret = ehea_reg_interrupts(dev);
2373 if (ret) {
2374 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2375 goto out_clean_pr;
2376 }
2377
2378 for (i = 0; i < port->num_def_qps; i++) {
2379 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2380 if (ret) {
2381 netdev_err(dev, "activate_qp failed\n");
2382 goto out_free_irqs;
2383 }
2384 }
2385
2386 for (i = 0; i < port->num_def_qps; i++) {
2387 ret = ehea_fill_port_res(&port->port_res[i]);
2388 if (ret) {
2389 netdev_err(dev, "out_free_irqs\n");
2390 goto out_free_irqs;
2391 }
2392 }
2393
2394 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2395 if (ret) {
2396 ret = -EIO;
2397 goto out_free_irqs;
2398 }
2399
2400 port->state = EHEA_PORT_UP;
2401
2402 ret = 0;
2403 goto out;
2404
2405out_free_irqs:
2406 ehea_free_interrupts(dev);
2407
2408out_clean_pr:
2409 ehea_clean_all_portres(port);
2410out:
2411 if (ret)
2412 netdev_info(dev, "Failed starting. ret=%i\n", ret);
2413
2414 ehea_update_bcmc_registrations();
2415 ehea_update_firmware_handles();
2416
2417 return ret;
2418}
2419
2420static void port_napi_disable(struct ehea_port *port)
2421{
2422 int i;
2423
2424 for (i = 0; i < port->num_def_qps; i++)
2425 napi_disable(&port->port_res[i].napi);
2426}
2427
2428static void port_napi_enable(struct ehea_port *port)
2429{
2430 int i;
2431
2432 for (i = 0; i < port->num_def_qps; i++)
2433 napi_enable(&port->port_res[i].napi);
2434}
2435
2436static int ehea_open(struct net_device *dev)
2437{
2438 int ret;
2439 struct ehea_port *port = netdev_priv(dev);
2440
2441 mutex_lock(&port->port_lock);
2442
2443 netif_info(port, ifup, dev, "enabling port\n");
2444
2445 ret = ehea_up(dev);
2446 if (!ret) {
2447 port_napi_enable(port);
2448 netif_tx_start_all_queues(dev);
2449 }
2450
2451 mutex_unlock(&port->port_lock);
2452 schedule_delayed_work(&port->stats_work,
2453 round_jiffies_relative(msecs_to_jiffies(1000)));
2454
2455 return ret;
2456}
2457
2458static int ehea_down(struct net_device *dev)
2459{
2460 int ret;
2461 struct ehea_port *port = netdev_priv(dev);
2462
2463 if (port->state == EHEA_PORT_DOWN)
2464 return 0;
2465
2466 ehea_drop_multicast_list(dev);
2467 ehea_allmulti(dev, 0);
2468 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2469
2470 ehea_free_interrupts(dev);
2471
2472 port->state = EHEA_PORT_DOWN;
2473
2474 ehea_update_bcmc_registrations();
2475
2476 ret = ehea_clean_all_portres(port);
2477 if (ret)
2478 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2479
2480 ehea_update_firmware_handles();
2481
2482 return ret;
2483}
2484
2485static int ehea_stop(struct net_device *dev)
2486{
2487 int ret;
2488 struct ehea_port *port = netdev_priv(dev);
2489
2490 netif_info(port, ifdown, dev, "disabling port\n");
2491
2492 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2493 cancel_work_sync(&port->reset_task);
2494 cancel_delayed_work_sync(&port->stats_work);
2495 mutex_lock(&port->port_lock);
2496 netif_tx_stop_all_queues(dev);
2497 port_napi_disable(port);
2498 ret = ehea_down(dev);
2499 mutex_unlock(&port->port_lock);
2500 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2501 return ret;
2502}
2503
2504static void ehea_purge_sq(struct ehea_qp *orig_qp)
2505{
2506 struct ehea_qp qp = *orig_qp;
2507 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2508 struct ehea_swqe *swqe;
2509 int wqe_index;
2510 int i;
2511
2512 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2513 swqe = ehea_get_swqe(&qp, &wqe_index);
2514 swqe->tx_control |= EHEA_SWQE_PURGE;
2515 }
2516}
2517
2518static void ehea_flush_sq(struct ehea_port *port)
2519{
2520 int i;
2521
2522 for (i = 0; i < port->num_def_qps; i++) {
2523 struct ehea_port_res *pr = &port->port_res[i];
2524 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2525 int ret;
2526
2527 ret = wait_event_timeout(port->swqe_avail_wq,
2528 atomic_read(&pr->swqe_avail) >= swqe_max,
2529 msecs_to_jiffies(100));
2530
2531 if (!ret) {
2532 pr_err("WARNING: sq not flushed completely\n");
2533 break;
2534 }
2535 }
2536}
2537
2538static int ehea_stop_qps(struct net_device *dev)
2539{
2540 struct ehea_port *port = netdev_priv(dev);
2541 struct ehea_adapter *adapter = port->adapter;
2542 struct hcp_modify_qp_cb0 *cb0;
2543 int ret = -EIO;
2544 int dret;
2545 int i;
2546 u64 hret;
2547 u64 dummy64 = 0;
2548 u16 dummy16 = 0;
2549
2550 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2551 if (!cb0) {
2552 ret = -ENOMEM;
2553 goto out;
2554 }
2555
2556 for (i = 0; i < (port->num_def_qps); i++) {
2557 struct ehea_port_res *pr = &port->port_res[i];
2558 struct ehea_qp *qp = pr->qp;
2559
2560 /* Purge send queue */
2561 ehea_purge_sq(qp);
2562
2563 /* Disable queue pair */
2564 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2565 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2566 cb0);
2567 if (hret != H_SUCCESS) {
2568 pr_err("query_ehea_qp failed (1)\n");
2569 goto out;
2570 }
2571
2572 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2573 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2574
2575 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2576 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2577 1), cb0, &dummy64,
2578 &dummy64, &dummy16, &dummy16);
2579 if (hret != H_SUCCESS) {
2580 pr_err("modify_ehea_qp failed (1)\n");
2581 goto out;
2582 }
2583
2584 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2585 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2586 cb0);
2587 if (hret != H_SUCCESS) {
2588 pr_err("query_ehea_qp failed (2)\n");
2589 goto out;
2590 }
2591
2592 /* deregister shared memory regions */
2593 dret = ehea_rem_smrs(pr);
2594 if (dret) {
2595 pr_err("unreg shared memory region failed\n");
2596 goto out;
2597 }
2598 }
2599
2600 ret = 0;
2601out:
2602 free_page((unsigned long)cb0);
2603
2604 return ret;
2605}
2606
2607static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2608{
2609 struct ehea_qp qp = *orig_qp;
2610 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2611 struct ehea_rwqe *rwqe;
2612 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2613 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2614 struct sk_buff *skb;
2615 u32 lkey = pr->recv_mr.lkey;
2616
2617
2618 int i;
2619 int index;
2620
2621 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2622 rwqe = ehea_get_next_rwqe(&qp, 2);
2623 rwqe->sg_list[0].l_key = lkey;
2624 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2625 skb = skba_rq2[index];
2626 if (skb)
2627 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2628 }
2629
2630 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2631 rwqe = ehea_get_next_rwqe(&qp, 3);
2632 rwqe->sg_list[0].l_key = lkey;
2633 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2634 skb = skba_rq3[index];
2635 if (skb)
2636 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2637 }
2638}
2639
2640static int ehea_restart_qps(struct net_device *dev)
2641{
2642 struct ehea_port *port = netdev_priv(dev);
2643 struct ehea_adapter *adapter = port->adapter;
2644 int ret = 0;
2645 int i;
2646
2647 struct hcp_modify_qp_cb0 *cb0;
2648 u64 hret;
2649 u64 dummy64 = 0;
2650 u16 dummy16 = 0;
2651
2652 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2653 if (!cb0) {
2654 ret = -ENOMEM;
2655 goto out;
2656 }
2657
2658 for (i = 0; i < (port->num_def_qps); i++) {
2659 struct ehea_port_res *pr = &port->port_res[i];
2660 struct ehea_qp *qp = pr->qp;
2661
2662 ret = ehea_gen_smrs(pr);
2663 if (ret) {
2664 netdev_err(dev, "creation of shared memory regions failed\n");
2665 goto out;
2666 }
2667
2668 ehea_update_rqs(qp, pr);
2669
2670 /* Enable queue pair */
2671 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2672 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2673 cb0);
2674 if (hret != H_SUCCESS) {
2675 netdev_err(dev, "query_ehea_qp failed (1)\n");
2676 goto out;
2677 }
2678
2679 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2680 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2681
2682 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2683 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2684 1), cb0, &dummy64,
2685 &dummy64, &dummy16, &dummy16);
2686 if (hret != H_SUCCESS) {
2687 netdev_err(dev, "modify_ehea_qp failed (1)\n");
2688 goto out;
2689 }
2690
2691 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2692 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2693 cb0);
2694 if (hret != H_SUCCESS) {
2695 netdev_err(dev, "query_ehea_qp failed (2)\n");
2696 goto out;
2697 }
2698
2699 /* refill entire queue */
2700 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2701 ehea_refill_rq2(pr, 0);
2702 ehea_refill_rq3(pr, 0);
2703 }
2704out:
2705 free_page((unsigned long)cb0);
2706
2707 return ret;
2708}
2709
2710static void ehea_reset_port(struct work_struct *work)
2711{
2712 int ret;
2713 struct ehea_port *port =
2714 container_of(work, struct ehea_port, reset_task);
2715 struct net_device *dev = port->netdev;
2716
2717 mutex_lock(&dlpar_mem_lock);
2718 port->resets++;
2719 mutex_lock(&port->port_lock);
2720 netif_tx_disable(dev);
2721
2722 port_napi_disable(port);
2723
2724 ehea_down(dev);
2725
2726 ret = ehea_up(dev);
2727 if (ret)
2728 goto out;
2729
2730 ehea_set_multicast_list(dev);
2731
2732 netif_info(port, timer, dev, "reset successful\n");
2733
2734 port_napi_enable(port);
2735
2736 netif_tx_wake_all_queues(dev);
2737out:
2738 mutex_unlock(&port->port_lock);
2739 mutex_unlock(&dlpar_mem_lock);
2740}
2741
2742static void ehea_rereg_mrs(void)
2743{
2744 int ret, i;
2745 struct ehea_adapter *adapter;
2746
2747 pr_info("LPAR memory changed - re-initializing driver\n");
2748
2749 list_for_each_entry(adapter, &adapter_list, list)
2750 if (adapter->active_ports) {
2751 /* Shutdown all ports */
2752 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2753 struct ehea_port *port = adapter->port[i];
2754 struct net_device *dev;
2755
2756 if (!port)
2757 continue;
2758
2759 dev = port->netdev;
2760
2761 if (dev->flags & IFF_UP) {
2762 mutex_lock(&port->port_lock);
2763 netif_tx_disable(dev);
2764 ehea_flush_sq(port);
2765 ret = ehea_stop_qps(dev);
2766 if (ret) {
2767 mutex_unlock(&port->port_lock);
2768 goto out;
2769 }
2770 port_napi_disable(port);
2771 mutex_unlock(&port->port_lock);
2772 }
2773 reset_sq_restart_flag(port);
2774 }
2775
2776 /* Unregister old memory region */
2777 ret = ehea_rem_mr(&adapter->mr);
2778 if (ret) {
2779 pr_err("unregister MR failed - driver inoperable!\n");
2780 goto out;
2781 }
2782 }
2783
2784 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2785
2786 list_for_each_entry(adapter, &adapter_list, list)
2787 if (adapter->active_ports) {
2788 /* Register new memory region */
2789 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2790 if (ret) {
2791 pr_err("register MR failed - driver inoperable!\n");
2792 goto out;
2793 }
2794
2795 /* Restart all ports */
2796 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2797 struct ehea_port *port = adapter->port[i];
2798
2799 if (port) {
2800 struct net_device *dev = port->netdev;
2801
2802 if (dev->flags & IFF_UP) {
2803 mutex_lock(&port->port_lock);
2804 ret = ehea_restart_qps(dev);
2805 if (!ret) {
2806 check_sqs(port);
2807 port_napi_enable(port);
2808 netif_tx_wake_all_queues(dev);
2809 } else {
2810 netdev_err(dev, "Unable to restart QPS\n");
2811 }
2812 mutex_unlock(&port->port_lock);
2813 }
2814 }
2815 }
2816 }
2817 pr_info("re-initializing driver complete\n");
2818out:
2819 return;
2820}
2821
2822static void ehea_tx_watchdog(struct net_device *dev)
2823{
2824 struct ehea_port *port = netdev_priv(dev);
2825
2826 if (netif_carrier_ok(dev) &&
2827 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2828 ehea_schedule_port_reset(port);
2829}
2830
2831static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2832{
2833 struct hcp_query_ehea *cb;
2834 u64 hret;
2835 int ret;
2836
2837 cb = (void *)get_zeroed_page(GFP_KERNEL);
2838 if (!cb) {
2839 ret = -ENOMEM;
2840 goto out;
2841 }
2842
2843 hret = ehea_h_query_ehea(adapter->handle, cb);
2844
2845 if (hret != H_SUCCESS) {
2846 ret = -EIO;
2847 goto out_herr;
2848 }
2849
2850 adapter->max_mc_mac = cb->max_mc_mac - 1;
2851 ret = 0;
2852
2853out_herr:
2854 free_page((unsigned long)cb);
2855out:
2856 return ret;
2857}
2858
2859static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2860{
2861 struct hcp_ehea_port_cb4 *cb4;
2862 u64 hret;
2863 int ret = 0;
2864
2865 *jumbo = 0;
2866
2867 /* (Try to) enable *jumbo frames */
2868 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
2869 if (!cb4) {
2870 pr_err("no mem for cb4\n");
2871 ret = -ENOMEM;
2872 goto out;
2873 } else {
2874 hret = ehea_h_query_ehea_port(port->adapter->handle,
2875 port->logical_port_id,
2876 H_PORT_CB4,
2877 H_PORT_CB4_JUMBO, cb4);
2878 if (hret == H_SUCCESS) {
2879 if (cb4->jumbo_frame)
2880 *jumbo = 1;
2881 else {
2882 cb4->jumbo_frame = 1;
2883 hret = ehea_h_modify_ehea_port(port->adapter->
2884 handle,
2885 port->
2886 logical_port_id,
2887 H_PORT_CB4,
2888 H_PORT_CB4_JUMBO,
2889 cb4);
2890 if (hret == H_SUCCESS)
2891 *jumbo = 1;
2892 }
2893 } else
2894 ret = -EINVAL;
2895
2896 free_page((unsigned long)cb4);
2897 }
2898out:
2899 return ret;
2900}
2901
2902static ssize_t ehea_show_port_id(struct device *dev,
2903 struct device_attribute *attr, char *buf)
2904{
2905 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2906 return sprintf(buf, "%d", port->logical_port_id);
2907}
2908
2909static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
2910 NULL);
2911
2912static void __devinit logical_port_release(struct device *dev)
2913{
2914 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2915 of_node_put(port->ofdev.dev.of_node);
2916}
2917
2918static struct device *ehea_register_port(struct ehea_port *port,
2919 struct device_node *dn)
2920{
2921 int ret;
2922
2923 port->ofdev.dev.of_node = of_node_get(dn);
2924 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
2925 port->ofdev.dev.bus = &ibmebus_bus_type;
2926
2927 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
2928 port->ofdev.dev.release = logical_port_release;
2929
2930 ret = of_device_register(&port->ofdev);
2931 if (ret) {
2932 pr_err("failed to register device. ret=%d\n", ret);
2933 goto out;
2934 }
2935
2936 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2937 if (ret) {
2938 pr_err("failed to register attributes, ret=%d\n", ret);
2939 goto out_unreg_of_dev;
2940 }
2941
2942 return &port->ofdev.dev;
2943
2944out_unreg_of_dev:
2945 of_device_unregister(&port->ofdev);
2946out:
2947 return NULL;
2948}
2949
2950static void ehea_unregister_port(struct ehea_port *port)
2951{
2952 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2953 of_device_unregister(&port->ofdev);
2954}
2955
2956static const struct net_device_ops ehea_netdev_ops = {
2957 .ndo_open = ehea_open,
2958 .ndo_stop = ehea_stop,
2959 .ndo_start_xmit = ehea_start_xmit,
2960#ifdef CONFIG_NET_POLL_CONTROLLER
2961 .ndo_poll_controller = ehea_netpoll,
2962#endif
2963 .ndo_get_stats64 = ehea_get_stats64,
2964 .ndo_set_mac_address = ehea_set_mac_addr,
2965 .ndo_validate_addr = eth_validate_addr,
2966 .ndo_set_rx_mode = ehea_set_multicast_list,
2967 .ndo_change_mtu = ehea_change_mtu,
2968 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
2969 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
2970 .ndo_tx_timeout = ehea_tx_watchdog,
2971};
2972
2973static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2974 u32 logical_port_id,
2975 struct device_node *dn)
2976{
2977 int ret;
2978 struct net_device *dev;
2979 struct ehea_port *port;
2980 struct device *port_dev;
2981 int jumbo;
2982
2983 /* allocate memory for the port structures */
2984 dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
2985
2986 if (!dev) {
2987 ret = -ENOMEM;
2988 goto out_err;
2989 }
2990
2991 port = netdev_priv(dev);
2992
2993 mutex_init(&port->port_lock);
2994 port->state = EHEA_PORT_DOWN;
2995 port->sig_comp_iv = sq_entries / 10;
2996
2997 port->adapter = adapter;
2998 port->netdev = dev;
2999 port->logical_port_id = logical_port_id;
3000
3001 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
3002
3003 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3004 if (!port->mc_list) {
3005 ret = -ENOMEM;
3006 goto out_free_ethdev;
3007 }
3008
3009 INIT_LIST_HEAD(&port->mc_list->list);
3010
3011 ret = ehea_sense_port_attr(port);
3012 if (ret)
3013 goto out_free_mc_list;
3014
3015 netif_set_real_num_rx_queues(dev, port->num_def_qps);
3016 netif_set_real_num_tx_queues(dev, port->num_def_qps);
3017
3018 port_dev = ehea_register_port(port, dn);
3019 if (!port_dev)
3020 goto out_free_mc_list;
3021
3022 SET_NETDEV_DEV(dev, port_dev);
3023
3024 /* initialize net_device structure */
3025 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3026
3027 dev->netdev_ops = &ehea_netdev_ops;
3028 ehea_set_ethtool_ops(dev);
3029
3030 dev->hw_features = NETIF_F_SG | NETIF_F_TSO
3031 | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
3032 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3033 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3034 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3035 | NETIF_F_RXCSUM;
3036 dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
3037 NETIF_F_IP_CSUM;
3038 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3039
3040 INIT_WORK(&port->reset_task, ehea_reset_port);
3041 INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
3042
3043 init_waitqueue_head(&port->swqe_avail_wq);
3044 init_waitqueue_head(&port->restart_wq);
3045
3046 memset(&port->stats, 0, sizeof(struct net_device_stats));
3047 ret = register_netdev(dev);
3048 if (ret) {
3049 pr_err("register_netdev failed. ret=%d\n", ret);
3050 goto out_unreg_port;
3051 }
3052
3053 ret = ehea_get_jumboframe_status(port, &jumbo);
3054 if (ret)
3055 netdev_err(dev, "failed determining jumbo frame status\n");
3056
3057 netdev_info(dev, "Jumbo frames are %sabled\n",
3058 jumbo == 1 ? "en" : "dis");
3059
3060 adapter->active_ports++;
3061
3062 return port;
3063
3064out_unreg_port:
3065 ehea_unregister_port(port);
3066
3067out_free_mc_list:
3068 kfree(port->mc_list);
3069
3070out_free_ethdev:
3071 free_netdev(dev);
3072
3073out_err:
3074 pr_err("setting up logical port with id=%d failed, ret=%d\n",
3075 logical_port_id, ret);
3076 return NULL;
3077}
3078
3079static void ehea_shutdown_single_port(struct ehea_port *port)
3080{
3081 struct ehea_adapter *adapter = port->adapter;
3082
3083 cancel_work_sync(&port->reset_task);
3084 cancel_delayed_work_sync(&port->stats_work);
3085 unregister_netdev(port->netdev);
3086 ehea_unregister_port(port);
3087 kfree(port->mc_list);
3088 free_netdev(port->netdev);
3089 adapter->active_ports--;
3090}
3091
3092static int ehea_setup_ports(struct ehea_adapter *adapter)
3093{
3094 struct device_node *lhea_dn;
3095 struct device_node *eth_dn = NULL;
3096
3097 const u32 *dn_log_port_id;
3098 int i = 0;
3099
3100 lhea_dn = adapter->ofdev->dev.of_node;
3101 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3102
3103 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3104 NULL);
3105 if (!dn_log_port_id) {
3106 pr_err("bad device node: eth_dn name=%s\n",
3107 eth_dn->full_name);
3108 continue;
3109 }
3110
3111 if (ehea_add_adapter_mr(adapter)) {
3112 pr_err("creating MR failed\n");
3113 of_node_put(eth_dn);
3114 return -EIO;
3115 }
3116
3117 adapter->port[i] = ehea_setup_single_port(adapter,
3118 *dn_log_port_id,
3119 eth_dn);
3120 if (adapter->port[i])
3121 netdev_info(adapter->port[i]->netdev,
3122 "logical port id #%d\n", *dn_log_port_id);
3123 else
3124 ehea_remove_adapter_mr(adapter);
3125
3126 i++;
3127 }
3128 return 0;
3129}
3130
3131static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3132 u32 logical_port_id)
3133{
3134 struct device_node *lhea_dn;
3135 struct device_node *eth_dn = NULL;
3136 const u32 *dn_log_port_id;
3137
3138 lhea_dn = adapter->ofdev->dev.of_node;
3139 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3140
3141 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3142 NULL);
3143 if (dn_log_port_id)
3144 if (*dn_log_port_id == logical_port_id)
3145 return eth_dn;
3146 }
3147
3148 return NULL;
3149}
3150
3151static ssize_t ehea_probe_port(struct device *dev,
3152 struct device_attribute *attr,
3153 const char *buf, size_t count)
3154{
3155 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3156 struct ehea_port *port;
3157 struct device_node *eth_dn = NULL;
3158 int i;
3159
3160 u32 logical_port_id;
3161
3162 sscanf(buf, "%d", &logical_port_id);
3163
3164 port = ehea_get_port(adapter, logical_port_id);
3165
3166 if (port) {
3167 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3168 logical_port_id);
3169 return -EINVAL;
3170 }
3171
3172 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3173
3174 if (!eth_dn) {
3175 pr_info("no logical port with id %d found\n", logical_port_id);
3176 return -EINVAL;
3177 }
3178
3179 if (ehea_add_adapter_mr(adapter)) {
3180 pr_err("creating MR failed\n");
3181 return -EIO;
3182 }
3183
3184 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3185
3186 of_node_put(eth_dn);
3187
3188 if (port) {
3189 for (i = 0; i < EHEA_MAX_PORTS; i++)
3190 if (!adapter->port[i]) {
3191 adapter->port[i] = port;
3192 break;
3193 }
3194
3195 netdev_info(port->netdev, "added: (logical port id=%d)\n",
3196 logical_port_id);
3197 } else {
3198 ehea_remove_adapter_mr(adapter);
3199 return -EIO;
3200 }
3201
3202 return (ssize_t) count;
3203}
3204
3205static ssize_t ehea_remove_port(struct device *dev,
3206 struct device_attribute *attr,
3207 const char *buf, size_t count)
3208{
3209 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3210 struct ehea_port *port;
3211 int i;
3212 u32 logical_port_id;
3213
3214 sscanf(buf, "%d", &logical_port_id);
3215
3216 port = ehea_get_port(adapter, logical_port_id);
3217
3218 if (port) {
3219 netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3220 logical_port_id);
3221
3222 ehea_shutdown_single_port(port);
3223
3224 for (i = 0; i < EHEA_MAX_PORTS; i++)
3225 if (adapter->port[i] == port) {
3226 adapter->port[i] = NULL;
3227 break;
3228 }
3229 } else {
3230 pr_err("removing port with logical port id=%d failed. port not configured.\n",
3231 logical_port_id);
3232 return -EINVAL;
3233 }
3234
3235 ehea_remove_adapter_mr(adapter);
3236
3237 return (ssize_t) count;
3238}
3239
3240static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3241static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3242
3243static int ehea_create_device_sysfs(struct platform_device *dev)
3244{
3245 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3246 if (ret)
3247 goto out;
3248
3249 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3250out:
3251 return ret;
3252}
3253
3254static void ehea_remove_device_sysfs(struct platform_device *dev)
3255{
3256 device_remove_file(&dev->dev, &dev_attr_probe_port);
3257 device_remove_file(&dev->dev, &dev_attr_remove_port);
3258}
3259
3260static int __devinit ehea_probe_adapter(struct platform_device *dev,
3261 const struct of_device_id *id)
3262{
3263 struct ehea_adapter *adapter;
3264 const u64 *adapter_handle;
3265 int ret;
3266 int i;
3267
3268 if (!dev || !dev->dev.of_node) {
3269 pr_err("Invalid ibmebus device probed\n");
3270 return -EINVAL;
3271 }
3272
3273 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3274 if (!adapter) {
3275 ret = -ENOMEM;
3276 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3277 goto out;
3278 }
3279
3280 list_add(&adapter->list, &adapter_list);
3281
3282 adapter->ofdev = dev;
3283
3284 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3285 NULL);
3286 if (adapter_handle)
3287 adapter->handle = *adapter_handle;
3288
3289 if (!adapter->handle) {
3290 dev_err(&dev->dev, "failed getting handle for adapter"
3291 " '%s'\n", dev->dev.of_node->full_name);
3292 ret = -ENODEV;
3293 goto out_free_ad;
3294 }
3295
3296 adapter->pd = EHEA_PD_ID;
3297
3298 dev_set_drvdata(&dev->dev, adapter);
3299
3300
3301 /* initialize adapter and ports */
3302 /* get adapter properties */
3303 ret = ehea_sense_adapter_attr(adapter);
3304 if (ret) {
3305 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3306 goto out_free_ad;
3307 }
3308
3309 adapter->neq = ehea_create_eq(adapter,
3310 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3311 if (!adapter->neq) {
3312 ret = -EIO;
3313 dev_err(&dev->dev, "NEQ creation failed\n");
3314 goto out_free_ad;
3315 }
3316
3317 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3318 (unsigned long)adapter);
3319
3320 ret = ehea_create_device_sysfs(dev);
3321 if (ret)
3322 goto out_kill_eq;
3323
3324 ret = ehea_setup_ports(adapter);
3325 if (ret) {
3326 dev_err(&dev->dev, "setup_ports failed\n");
3327 goto out_rem_dev_sysfs;
3328 }
3329
3330 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3331 ehea_interrupt_neq, IRQF_DISABLED,
3332 "ehea_neq", adapter);
3333 if (ret) {
3334 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3335 goto out_shutdown_ports;
3336 }
3337
3338 /* Handle any events that might be pending. */
3339 tasklet_hi_schedule(&adapter->neq_tasklet);
3340
3341 ret = 0;
3342 goto out;
3343
3344out_shutdown_ports:
3345 for (i = 0; i < EHEA_MAX_PORTS; i++)
3346 if (adapter->port[i]) {
3347 ehea_shutdown_single_port(adapter->port[i]);
3348 adapter->port[i] = NULL;
3349 }
3350
3351out_rem_dev_sysfs:
3352 ehea_remove_device_sysfs(dev);
3353
3354out_kill_eq:
3355 ehea_destroy_eq(adapter->neq);
3356
3357out_free_ad:
3358 list_del(&adapter->list);
3359 kfree(adapter);
3360
3361out:
3362 ehea_update_firmware_handles();
3363
3364 return ret;
3365}
3366
3367static int __devexit ehea_remove(struct platform_device *dev)
3368{
3369 struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
3370 int i;
3371
3372 for (i = 0; i < EHEA_MAX_PORTS; i++)
3373 if (adapter->port[i]) {
3374 ehea_shutdown_single_port(adapter->port[i]);
3375 adapter->port[i] = NULL;
3376 }
3377
3378 ehea_remove_device_sysfs(dev);
3379
3380 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3381 tasklet_kill(&adapter->neq_tasklet);
3382
3383 ehea_destroy_eq(adapter->neq);
3384 ehea_remove_adapter_mr(adapter);
3385 list_del(&adapter->list);
3386 kfree(adapter);
3387
3388 ehea_update_firmware_handles();
3389
3390 return 0;
3391}
3392
3393static void ehea_crash_handler(void)
3394{
3395 int i;
3396
3397 if (ehea_fw_handles.arr)
3398 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3399 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3400 ehea_fw_handles.arr[i].fwh,
3401 FORCE_FREE);
3402
3403 if (ehea_bcmc_regs.arr)
3404 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3405 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3406 ehea_bcmc_regs.arr[i].port_id,
3407 ehea_bcmc_regs.arr[i].reg_type,
3408 ehea_bcmc_regs.arr[i].macaddr,
3409 0, H_DEREG_BCMC);
3410}
3411
3412static int ehea_mem_notifier(struct notifier_block *nb,
3413 unsigned long action, void *data)
3414{
3415 int ret = NOTIFY_BAD;
3416 struct memory_notify *arg = data;
3417
3418 mutex_lock(&dlpar_mem_lock);
3419
3420 switch (action) {
3421 case MEM_CANCEL_OFFLINE:
3422 pr_info("memory offlining canceled");
3423 /* Readd canceled memory block */
3424 case MEM_ONLINE:
3425 pr_info("memory is going online");
3426 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3427 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3428 goto out_unlock;
3429 ehea_rereg_mrs();
3430 break;
3431 case MEM_GOING_OFFLINE:
3432 pr_info("memory is going offline");
3433 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3434 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3435 goto out_unlock;
3436 ehea_rereg_mrs();
3437 break;
3438 default:
3439 break;
3440 }
3441
3442 ehea_update_firmware_handles();
3443 ret = NOTIFY_OK;
3444
3445out_unlock:
3446 mutex_unlock(&dlpar_mem_lock);
3447 return ret;
3448}
3449
3450static struct notifier_block ehea_mem_nb = {
3451 .notifier_call = ehea_mem_notifier,
3452};
3453
3454static int ehea_reboot_notifier(struct notifier_block *nb,
3455 unsigned long action, void *unused)
3456{
3457 if (action == SYS_RESTART) {
3458 pr_info("Reboot: freeing all eHEA resources\n");
3459 ibmebus_unregister_driver(&ehea_driver);
3460 }
3461 return NOTIFY_DONE;
3462}
3463
3464static struct notifier_block ehea_reboot_nb = {
3465 .notifier_call = ehea_reboot_notifier,
3466};
3467
3468static int check_module_parm(void)
3469{
3470 int ret = 0;
3471
3472 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3473 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3474 pr_info("Bad parameter: rq1_entries\n");
3475 ret = -EINVAL;
3476 }
3477 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3478 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3479 pr_info("Bad parameter: rq2_entries\n");
3480 ret = -EINVAL;
3481 }
3482 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3483 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3484 pr_info("Bad parameter: rq3_entries\n");
3485 ret = -EINVAL;
3486 }
3487 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3488 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3489 pr_info("Bad parameter: sq_entries\n");
3490 ret = -EINVAL;
3491 }
3492
3493 return ret;
3494}
3495
3496static ssize_t ehea_show_capabilities(struct device_driver *drv,
3497 char *buf)
3498{
3499 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3500}
3501
3502static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3503 ehea_show_capabilities, NULL);
3504
3505static int __init ehea_module_init(void)
3506{
3507 int ret;
3508
3509 pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3510
3511 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3512 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3513
3514 mutex_init(&ehea_fw_handles.lock);
3515 spin_lock_init(&ehea_bcmc_regs.lock);
3516
3517 ret = check_module_parm();
3518 if (ret)
3519 goto out;
3520
3521 ret = ehea_create_busmap();
3522 if (ret)
3523 goto out;
3524
3525 ret = register_reboot_notifier(&ehea_reboot_nb);
3526 if (ret)
3527 pr_info("failed registering reboot notifier\n");
3528
3529 ret = register_memory_notifier(&ehea_mem_nb);
3530 if (ret)
3531 pr_info("failed registering memory remove notifier\n");
3532
3533 ret = crash_shutdown_register(ehea_crash_handler);
3534 if (ret)
3535 pr_info("failed registering crash handler\n");
3536
3537 ret = ibmebus_register_driver(&ehea_driver);
3538 if (ret) {
3539 pr_err("failed registering eHEA device driver on ebus\n");
3540 goto out2;
3541 }
3542
3543 ret = driver_create_file(&ehea_driver.driver,
3544 &driver_attr_capabilities);
3545 if (ret) {
3546 pr_err("failed to register capabilities attribute, ret=%d\n",
3547 ret);
3548 goto out3;
3549 }
3550
3551 return ret;
3552
3553out3:
3554 ibmebus_unregister_driver(&ehea_driver);
3555out2:
3556 unregister_memory_notifier(&ehea_mem_nb);
3557 unregister_reboot_notifier(&ehea_reboot_nb);
3558 crash_shutdown_unregister(ehea_crash_handler);
3559out:
3560 return ret;
3561}
3562
3563static void __exit ehea_module_exit(void)
3564{
3565 int ret;
3566
3567 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3568 ibmebus_unregister_driver(&ehea_driver);
3569 unregister_reboot_notifier(&ehea_reboot_nb);
3570 ret = crash_shutdown_unregister(ehea_crash_handler);
3571 if (ret)
3572 pr_info("failed unregistering crash handler\n");
3573 unregister_memory_notifier(&ehea_mem_nb);
3574 kfree(ehea_fw_handles.arr);
3575 kfree(ehea_bcmc_regs.arr);
3576 ehea_destroy_busmap();
3577}
3578
3579module_init(ehea_module_init);
3580module_exit(ehea_module_exit);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
4 *
5 * eHEA ethernet device driver for IBM eServer System p
6 *
7 * (C) Copyright IBM Corp. 2006
8 *
9 * Authors:
10 * Christoph Raisch <raisch@de.ibm.com>
11 * Jan-Bernd Themann <themann@de.ibm.com>
12 * Thomas Klein <tklein@de.ibm.com>
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/device.h>
18#include <linux/in.h>
19#include <linux/ip.h>
20#include <linux/tcp.h>
21#include <linux/udp.h>
22#include <linux/if.h>
23#include <linux/list.h>
24#include <linux/slab.h>
25#include <linux/if_ether.h>
26#include <linux/notifier.h>
27#include <linux/reboot.h>
28#include <linux/memory.h>
29#include <asm/kexec.h>
30#include <linux/mutex.h>
31#include <linux/prefetch.h>
32
33#include <net/ip.h>
34
35#include "ehea.h"
36#include "ehea_qmr.h"
37#include "ehea_phyp.h"
38
39
40MODULE_LICENSE("GPL");
41MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
42MODULE_DESCRIPTION("IBM eServer HEA Driver");
43MODULE_VERSION(DRV_VERSION);
44
45
46static int msg_level = -1;
47static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
48static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
49static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
50static int sq_entries = EHEA_DEF_ENTRIES_SQ;
51static int use_mcs = 1;
52static int prop_carrier_state;
53
54module_param(msg_level, int, 0);
55module_param(rq1_entries, int, 0);
56module_param(rq2_entries, int, 0);
57module_param(rq3_entries, int, 0);
58module_param(sq_entries, int, 0);
59module_param(prop_carrier_state, int, 0);
60module_param(use_mcs, int, 0);
61
62MODULE_PARM_DESC(msg_level, "msg_level");
63MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
64 "port to stack. 1:yes, 0:no. Default = 0 ");
65MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
66 "[2^x - 1], x = [7..14]. Default = "
67 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
68MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
69 "[2^x - 1], x = [7..14]. Default = "
70 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
71MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
72 "[2^x - 1], x = [7..14]. Default = "
73 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
74MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
75 "[2^x - 1], x = [7..14]. Default = "
76 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
77MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
78 "Default = 1");
79
80static int port_name_cnt;
81static LIST_HEAD(adapter_list);
82static unsigned long ehea_driver_flags;
83static DEFINE_MUTEX(dlpar_mem_lock);
84static struct ehea_fw_handle_array ehea_fw_handles;
85static struct ehea_bcmc_reg_array ehea_bcmc_regs;
86
87
88static int ehea_probe_adapter(struct platform_device *dev);
89
90static int ehea_remove(struct platform_device *dev);
91
92static const struct of_device_id ehea_module_device_table[] = {
93 {
94 .name = "lhea",
95 .compatible = "IBM,lhea",
96 },
97 {
98 .type = "network",
99 .compatible = "IBM,lhea-ethernet",
100 },
101 {},
102};
103MODULE_DEVICE_TABLE(of, ehea_module_device_table);
104
105static const struct of_device_id ehea_device_table[] = {
106 {
107 .name = "lhea",
108 .compatible = "IBM,lhea",
109 },
110 {},
111};
112
113static struct platform_driver ehea_driver = {
114 .driver = {
115 .name = "ehea",
116 .owner = THIS_MODULE,
117 .of_match_table = ehea_device_table,
118 },
119 .probe = ehea_probe_adapter,
120 .remove = ehea_remove,
121};
122
123void ehea_dump(void *adr, int len, char *msg)
124{
125 int x;
126 unsigned char *deb = adr;
127 for (x = 0; x < len; x += 16) {
128 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
129 msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
130 deb += 16;
131 }
132}
133
134static void ehea_schedule_port_reset(struct ehea_port *port)
135{
136 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
137 schedule_work(&port->reset_task);
138}
139
140static void ehea_update_firmware_handles(void)
141{
142 struct ehea_fw_handle_entry *arr = NULL;
143 struct ehea_adapter *adapter;
144 int num_adapters = 0;
145 int num_ports = 0;
146 int num_portres = 0;
147 int i = 0;
148 int num_fw_handles, k, l;
149
150 /* Determine number of handles */
151 mutex_lock(&ehea_fw_handles.lock);
152
153 list_for_each_entry(adapter, &adapter_list, list) {
154 num_adapters++;
155
156 for (k = 0; k < EHEA_MAX_PORTS; k++) {
157 struct ehea_port *port = adapter->port[k];
158
159 if (!port || (port->state != EHEA_PORT_UP))
160 continue;
161
162 num_ports++;
163 num_portres += port->num_def_qps;
164 }
165 }
166
167 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
168 num_ports * EHEA_NUM_PORT_FW_HANDLES +
169 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
170
171 if (num_fw_handles) {
172 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
173 if (!arr)
174 goto out; /* Keep the existing array */
175 } else
176 goto out_update;
177
178 list_for_each_entry(adapter, &adapter_list, list) {
179 if (num_adapters == 0)
180 break;
181
182 for (k = 0; k < EHEA_MAX_PORTS; k++) {
183 struct ehea_port *port = adapter->port[k];
184
185 if (!port || (port->state != EHEA_PORT_UP) ||
186 (num_ports == 0))
187 continue;
188
189 for (l = 0; l < port->num_def_qps; l++) {
190 struct ehea_port_res *pr = &port->port_res[l];
191
192 arr[i].adh = adapter->handle;
193 arr[i++].fwh = pr->qp->fw_handle;
194 arr[i].adh = adapter->handle;
195 arr[i++].fwh = pr->send_cq->fw_handle;
196 arr[i].adh = adapter->handle;
197 arr[i++].fwh = pr->recv_cq->fw_handle;
198 arr[i].adh = adapter->handle;
199 arr[i++].fwh = pr->eq->fw_handle;
200 arr[i].adh = adapter->handle;
201 arr[i++].fwh = pr->send_mr.handle;
202 arr[i].adh = adapter->handle;
203 arr[i++].fwh = pr->recv_mr.handle;
204 }
205 arr[i].adh = adapter->handle;
206 arr[i++].fwh = port->qp_eq->fw_handle;
207 num_ports--;
208 }
209
210 arr[i].adh = adapter->handle;
211 arr[i++].fwh = adapter->neq->fw_handle;
212
213 if (adapter->mr.handle) {
214 arr[i].adh = adapter->handle;
215 arr[i++].fwh = adapter->mr.handle;
216 }
217 num_adapters--;
218 }
219
220out_update:
221 kfree(ehea_fw_handles.arr);
222 ehea_fw_handles.arr = arr;
223 ehea_fw_handles.num_entries = i;
224out:
225 mutex_unlock(&ehea_fw_handles.lock);
226}
227
228static void ehea_update_bcmc_registrations(void)
229{
230 unsigned long flags;
231 struct ehea_bcmc_reg_entry *arr = NULL;
232 struct ehea_adapter *adapter;
233 struct ehea_mc_list *mc_entry;
234 int num_registrations = 0;
235 int i = 0;
236 int k;
237
238 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
239
240 /* Determine number of registrations */
241 list_for_each_entry(adapter, &adapter_list, list)
242 for (k = 0; k < EHEA_MAX_PORTS; k++) {
243 struct ehea_port *port = adapter->port[k];
244
245 if (!port || (port->state != EHEA_PORT_UP))
246 continue;
247
248 num_registrations += 2; /* Broadcast registrations */
249
250 list_for_each_entry(mc_entry, &port->mc_list->list,list)
251 num_registrations += 2;
252 }
253
254 if (num_registrations) {
255 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
256 if (!arr)
257 goto out; /* Keep the existing array */
258 } else
259 goto out_update;
260
261 list_for_each_entry(adapter, &adapter_list, list) {
262 for (k = 0; k < EHEA_MAX_PORTS; k++) {
263 struct ehea_port *port = adapter->port[k];
264
265 if (!port || (port->state != EHEA_PORT_UP))
266 continue;
267
268 if (num_registrations == 0)
269 goto out_update;
270
271 arr[i].adh = adapter->handle;
272 arr[i].port_id = port->logical_port_id;
273 arr[i].reg_type = EHEA_BCMC_BROADCAST |
274 EHEA_BCMC_UNTAGGED;
275 arr[i++].macaddr = port->mac_addr;
276
277 arr[i].adh = adapter->handle;
278 arr[i].port_id = port->logical_port_id;
279 arr[i].reg_type = EHEA_BCMC_BROADCAST |
280 EHEA_BCMC_VLANID_ALL;
281 arr[i++].macaddr = port->mac_addr;
282 num_registrations -= 2;
283
284 list_for_each_entry(mc_entry,
285 &port->mc_list->list, list) {
286 if (num_registrations == 0)
287 goto out_update;
288
289 arr[i].adh = adapter->handle;
290 arr[i].port_id = port->logical_port_id;
291 arr[i].reg_type = EHEA_BCMC_MULTICAST |
292 EHEA_BCMC_UNTAGGED;
293 if (mc_entry->macaddr == 0)
294 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
295 arr[i++].macaddr = mc_entry->macaddr;
296
297 arr[i].adh = adapter->handle;
298 arr[i].port_id = port->logical_port_id;
299 arr[i].reg_type = EHEA_BCMC_MULTICAST |
300 EHEA_BCMC_VLANID_ALL;
301 if (mc_entry->macaddr == 0)
302 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
303 arr[i++].macaddr = mc_entry->macaddr;
304 num_registrations -= 2;
305 }
306 }
307 }
308
309out_update:
310 kfree(ehea_bcmc_regs.arr);
311 ehea_bcmc_regs.arr = arr;
312 ehea_bcmc_regs.num_entries = i;
313out:
314 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
315}
316
317static void ehea_get_stats64(struct net_device *dev,
318 struct rtnl_link_stats64 *stats)
319{
320 struct ehea_port *port = netdev_priv(dev);
321 u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
322 int i;
323
324 for (i = 0; i < port->num_def_qps; i++) {
325 rx_packets += port->port_res[i].rx_packets;
326 rx_bytes += port->port_res[i].rx_bytes;
327 }
328
329 for (i = 0; i < port->num_def_qps; i++) {
330 tx_packets += port->port_res[i].tx_packets;
331 tx_bytes += port->port_res[i].tx_bytes;
332 }
333
334 stats->tx_packets = tx_packets;
335 stats->rx_bytes = rx_bytes;
336 stats->tx_bytes = tx_bytes;
337 stats->rx_packets = rx_packets;
338
339 stats->multicast = port->stats.multicast;
340 stats->rx_errors = port->stats.rx_errors;
341}
342
343static void ehea_update_stats(struct work_struct *work)
344{
345 struct ehea_port *port =
346 container_of(work, struct ehea_port, stats_work.work);
347 struct net_device *dev = port->netdev;
348 struct rtnl_link_stats64 *stats = &port->stats;
349 struct hcp_ehea_port_cb2 *cb2;
350 u64 hret;
351
352 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
353 if (!cb2) {
354 netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
355 goto resched;
356 }
357
358 hret = ehea_h_query_ehea_port(port->adapter->handle,
359 port->logical_port_id,
360 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
361 if (hret != H_SUCCESS) {
362 netdev_err(dev, "query_ehea_port failed\n");
363 goto out_herr;
364 }
365
366 if (netif_msg_hw(port))
367 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
368
369 stats->multicast = cb2->rxmcp;
370 stats->rx_errors = cb2->rxuerr;
371
372out_herr:
373 free_page((unsigned long)cb2);
374resched:
375 schedule_delayed_work(&port->stats_work,
376 round_jiffies_relative(msecs_to_jiffies(1000)));
377}
378
379static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
380{
381 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
382 struct net_device *dev = pr->port->netdev;
383 int max_index_mask = pr->rq1_skba.len - 1;
384 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
385 int adder = 0;
386 int i;
387
388 pr->rq1_skba.os_skbs = 0;
389
390 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
391 if (nr_of_wqes > 0)
392 pr->rq1_skba.index = index;
393 pr->rq1_skba.os_skbs = fill_wqes;
394 return;
395 }
396
397 for (i = 0; i < fill_wqes; i++) {
398 if (!skb_arr_rq1[index]) {
399 skb_arr_rq1[index] = netdev_alloc_skb(dev,
400 EHEA_L_PKT_SIZE);
401 if (!skb_arr_rq1[index]) {
402 pr->rq1_skba.os_skbs = fill_wqes - i;
403 break;
404 }
405 }
406 index--;
407 index &= max_index_mask;
408 adder++;
409 }
410
411 if (adder == 0)
412 return;
413
414 /* Ring doorbell */
415 ehea_update_rq1a(pr->qp, adder);
416}
417
418static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
419{
420 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
421 struct net_device *dev = pr->port->netdev;
422 int i;
423
424 if (nr_rq1a > pr->rq1_skba.len) {
425 netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
426 return;
427 }
428
429 for (i = 0; i < nr_rq1a; i++) {
430 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
431 if (!skb_arr_rq1[i])
432 break;
433 }
434 /* Ring doorbell */
435 ehea_update_rq1a(pr->qp, i - 1);
436}
437
438static int ehea_refill_rq_def(struct ehea_port_res *pr,
439 struct ehea_q_skb_arr *q_skba, int rq_nr,
440 int num_wqes, int wqe_type, int packet_size)
441{
442 struct net_device *dev = pr->port->netdev;
443 struct ehea_qp *qp = pr->qp;
444 struct sk_buff **skb_arr = q_skba->arr;
445 struct ehea_rwqe *rwqe;
446 int i, index, max_index_mask, fill_wqes;
447 int adder = 0;
448 int ret = 0;
449
450 fill_wqes = q_skba->os_skbs + num_wqes;
451 q_skba->os_skbs = 0;
452
453 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
454 q_skba->os_skbs = fill_wqes;
455 return ret;
456 }
457
458 index = q_skba->index;
459 max_index_mask = q_skba->len - 1;
460 for (i = 0; i < fill_wqes; i++) {
461 u64 tmp_addr;
462 struct sk_buff *skb;
463
464 skb = netdev_alloc_skb_ip_align(dev, packet_size);
465 if (!skb) {
466 q_skba->os_skbs = fill_wqes - i;
467 if (q_skba->os_skbs == q_skba->len - 2) {
468 netdev_info(pr->port->netdev,
469 "rq%i ran dry - no mem for skb\n",
470 rq_nr);
471 ret = -ENOMEM;
472 }
473 break;
474 }
475
476 skb_arr[index] = skb;
477 tmp_addr = ehea_map_vaddr(skb->data);
478 if (tmp_addr == -1) {
479 dev_consume_skb_any(skb);
480 q_skba->os_skbs = fill_wqes - i;
481 ret = 0;
482 break;
483 }
484
485 rwqe = ehea_get_next_rwqe(qp, rq_nr);
486 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
487 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
488 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
489 rwqe->sg_list[0].vaddr = tmp_addr;
490 rwqe->sg_list[0].len = packet_size;
491 rwqe->data_segments = 1;
492
493 index++;
494 index &= max_index_mask;
495 adder++;
496 }
497
498 q_skba->index = index;
499 if (adder == 0)
500 goto out;
501
502 /* Ring doorbell */
503 iosync();
504 if (rq_nr == 2)
505 ehea_update_rq2a(pr->qp, adder);
506 else
507 ehea_update_rq3a(pr->qp, adder);
508out:
509 return ret;
510}
511
512
513static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
514{
515 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
516 nr_of_wqes, EHEA_RWQE2_TYPE,
517 EHEA_RQ2_PKT_SIZE);
518}
519
520
521static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
522{
523 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
524 nr_of_wqes, EHEA_RWQE3_TYPE,
525 EHEA_MAX_PACKET_SIZE);
526}
527
528static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
529{
530 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
531 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
532 return 0;
533 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
534 (cqe->header_length == 0))
535 return 0;
536 return -EINVAL;
537}
538
539static inline void ehea_fill_skb(struct net_device *dev,
540 struct sk_buff *skb, struct ehea_cqe *cqe,
541 struct ehea_port_res *pr)
542{
543 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
544
545 skb_put(skb, length);
546 skb->protocol = eth_type_trans(skb, dev);
547
548 /* The packet was not an IPV4 packet so a complemented checksum was
549 calculated. The value is found in the Internet Checksum field. */
550 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
551 skb->ip_summed = CHECKSUM_COMPLETE;
552 skb->csum = csum_unfold(~cqe->inet_checksum_value);
553 } else
554 skb->ip_summed = CHECKSUM_UNNECESSARY;
555
556 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
557}
558
559static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
560 int arr_len,
561 struct ehea_cqe *cqe)
562{
563 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
564 struct sk_buff *skb;
565 void *pref;
566 int x;
567
568 x = skb_index + 1;
569 x &= (arr_len - 1);
570
571 pref = skb_array[x];
572 if (pref) {
573 prefetchw(pref);
574 prefetchw(pref + EHEA_CACHE_LINE);
575
576 pref = (skb_array[x]->data);
577 prefetch(pref);
578 prefetch(pref + EHEA_CACHE_LINE);
579 prefetch(pref + EHEA_CACHE_LINE * 2);
580 prefetch(pref + EHEA_CACHE_LINE * 3);
581 }
582
583 skb = skb_array[skb_index];
584 skb_array[skb_index] = NULL;
585 return skb;
586}
587
588static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
589 int arr_len, int wqe_index)
590{
591 struct sk_buff *skb;
592 void *pref;
593 int x;
594
595 x = wqe_index + 1;
596 x &= (arr_len - 1);
597
598 pref = skb_array[x];
599 if (pref) {
600 prefetchw(pref);
601 prefetchw(pref + EHEA_CACHE_LINE);
602
603 pref = (skb_array[x]->data);
604 prefetchw(pref);
605 prefetchw(pref + EHEA_CACHE_LINE);
606 }
607
608 skb = skb_array[wqe_index];
609 skb_array[wqe_index] = NULL;
610 return skb;
611}
612
613static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
614 struct ehea_cqe *cqe, int *processed_rq2,
615 int *processed_rq3)
616{
617 struct sk_buff *skb;
618
619 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
620 pr->p_stats.err_tcp_cksum++;
621 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
622 pr->p_stats.err_ip_cksum++;
623 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
624 pr->p_stats.err_frame_crc++;
625
626 if (rq == 2) {
627 *processed_rq2 += 1;
628 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
629 dev_kfree_skb(skb);
630 } else if (rq == 3) {
631 *processed_rq3 += 1;
632 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
633 dev_kfree_skb(skb);
634 }
635
636 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
637 if (netif_msg_rx_err(pr->port)) {
638 pr_err("Critical receive error for QP %d. Resetting port.\n",
639 pr->qp->init_attr.qp_nr);
640 ehea_dump(cqe, sizeof(*cqe), "CQE");
641 }
642 ehea_schedule_port_reset(pr->port);
643 return 1;
644 }
645
646 return 0;
647}
648
649static int ehea_proc_rwqes(struct net_device *dev,
650 struct ehea_port_res *pr,
651 int budget)
652{
653 struct ehea_port *port = pr->port;
654 struct ehea_qp *qp = pr->qp;
655 struct ehea_cqe *cqe;
656 struct sk_buff *skb;
657 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
658 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
659 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
660 int skb_arr_rq1_len = pr->rq1_skba.len;
661 int skb_arr_rq2_len = pr->rq2_skba.len;
662 int skb_arr_rq3_len = pr->rq3_skba.len;
663 int processed, processed_rq1, processed_rq2, processed_rq3;
664 u64 processed_bytes = 0;
665 int wqe_index, last_wqe_index, rq, port_reset;
666
667 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
668 last_wqe_index = 0;
669
670 cqe = ehea_poll_rq1(qp, &wqe_index);
671 while ((processed < budget) && cqe) {
672 ehea_inc_rq1(qp);
673 processed_rq1++;
674 processed++;
675 if (netif_msg_rx_status(port))
676 ehea_dump(cqe, sizeof(*cqe), "CQE");
677
678 last_wqe_index = wqe_index;
679 rmb();
680 if (!ehea_check_cqe(cqe, &rq)) {
681 if (rq == 1) {
682 /* LL RQ1 */
683 skb = get_skb_by_index_ll(skb_arr_rq1,
684 skb_arr_rq1_len,
685 wqe_index);
686 if (unlikely(!skb)) {
687 netif_info(port, rx_err, dev,
688 "LL rq1: skb=NULL\n");
689
690 skb = netdev_alloc_skb(dev,
691 EHEA_L_PKT_SIZE);
692 if (!skb)
693 break;
694 }
695 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
696 cqe->num_bytes_transfered - 4);
697 ehea_fill_skb(dev, skb, cqe, pr);
698 } else if (rq == 2) {
699 /* RQ2 */
700 skb = get_skb_by_index(skb_arr_rq2,
701 skb_arr_rq2_len, cqe);
702 if (unlikely(!skb)) {
703 netif_err(port, rx_err, dev,
704 "rq2: skb=NULL\n");
705 break;
706 }
707 ehea_fill_skb(dev, skb, cqe, pr);
708 processed_rq2++;
709 } else {
710 /* RQ3 */
711 skb = get_skb_by_index(skb_arr_rq3,
712 skb_arr_rq3_len, cqe);
713 if (unlikely(!skb)) {
714 netif_err(port, rx_err, dev,
715 "rq3: skb=NULL\n");
716 break;
717 }
718 ehea_fill_skb(dev, skb, cqe, pr);
719 processed_rq3++;
720 }
721
722 processed_bytes += skb->len;
723
724 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
725 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
726 cqe->vlan_tag);
727
728 napi_gro_receive(&pr->napi, skb);
729 } else {
730 pr->p_stats.poll_receive_errors++;
731 port_reset = ehea_treat_poll_error(pr, rq, cqe,
732 &processed_rq2,
733 &processed_rq3);
734 if (port_reset)
735 break;
736 }
737 cqe = ehea_poll_rq1(qp, &wqe_index);
738 }
739
740 pr->rx_packets += processed;
741 pr->rx_bytes += processed_bytes;
742
743 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
744 ehea_refill_rq2(pr, processed_rq2);
745 ehea_refill_rq3(pr, processed_rq3);
746
747 return processed;
748}
749
750#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
751
752static void reset_sq_restart_flag(struct ehea_port *port)
753{
754 int i;
755
756 for (i = 0; i < port->num_def_qps; i++) {
757 struct ehea_port_res *pr = &port->port_res[i];
758 pr->sq_restart_flag = 0;
759 }
760 wake_up(&port->restart_wq);
761}
762
763static void check_sqs(struct ehea_port *port)
764{
765 struct ehea_swqe *swqe;
766 int swqe_index;
767 int i;
768
769 for (i = 0; i < port->num_def_qps; i++) {
770 struct ehea_port_res *pr = &port->port_res[i];
771 int ret;
772 swqe = ehea_get_swqe(pr->qp, &swqe_index);
773 memset(swqe, 0, SWQE_HEADER_SIZE);
774 atomic_dec(&pr->swqe_avail);
775
776 swqe->tx_control |= EHEA_SWQE_PURGE;
777 swqe->wr_id = SWQE_RESTART_CHECK;
778 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
779 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
780 swqe->immediate_data_length = 80;
781
782 ehea_post_swqe(pr->qp, swqe);
783
784 ret = wait_event_timeout(port->restart_wq,
785 pr->sq_restart_flag == 0,
786 msecs_to_jiffies(100));
787
788 if (!ret) {
789 pr_err("HW/SW queues out of sync\n");
790 ehea_schedule_port_reset(pr->port);
791 return;
792 }
793 }
794}
795
796
797static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
798{
799 struct sk_buff *skb;
800 struct ehea_cq *send_cq = pr->send_cq;
801 struct ehea_cqe *cqe;
802 int quota = my_quota;
803 int cqe_counter = 0;
804 int swqe_av = 0;
805 int index;
806 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
807 pr - &pr->port->port_res[0]);
808
809 cqe = ehea_poll_cq(send_cq);
810 while (cqe && (quota > 0)) {
811 ehea_inc_cq(send_cq);
812
813 cqe_counter++;
814 rmb();
815
816 if (cqe->wr_id == SWQE_RESTART_CHECK) {
817 pr->sq_restart_flag = 1;
818 swqe_av++;
819 break;
820 }
821
822 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
823 pr_err("Bad send completion status=0x%04X\n",
824 cqe->status);
825
826 if (netif_msg_tx_err(pr->port))
827 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
828
829 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
830 pr_err("Resetting port\n");
831 ehea_schedule_port_reset(pr->port);
832 break;
833 }
834 }
835
836 if (netif_msg_tx_done(pr->port))
837 ehea_dump(cqe, sizeof(*cqe), "CQE");
838
839 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
840 == EHEA_SWQE2_TYPE)) {
841
842 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
843 skb = pr->sq_skba.arr[index];
844 dev_consume_skb_any(skb);
845 pr->sq_skba.arr[index] = NULL;
846 }
847
848 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
849 quota--;
850
851 cqe = ehea_poll_cq(send_cq);
852 }
853
854 ehea_update_feca(send_cq, cqe_counter);
855 atomic_add(swqe_av, &pr->swqe_avail);
856
857 if (unlikely(netif_tx_queue_stopped(txq) &&
858 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
859 __netif_tx_lock(txq, smp_processor_id());
860 if (netif_tx_queue_stopped(txq) &&
861 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
862 netif_tx_wake_queue(txq);
863 __netif_tx_unlock(txq);
864 }
865
866 wake_up(&pr->port->swqe_avail_wq);
867
868 return cqe;
869}
870
871#define EHEA_POLL_MAX_CQES 65535
872
873static int ehea_poll(struct napi_struct *napi, int budget)
874{
875 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
876 napi);
877 struct net_device *dev = pr->port->netdev;
878 struct ehea_cqe *cqe;
879 struct ehea_cqe *cqe_skb = NULL;
880 int wqe_index;
881 int rx = 0;
882
883 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
884 rx += ehea_proc_rwqes(dev, pr, budget - rx);
885
886 while (rx != budget) {
887 napi_complete(napi);
888 ehea_reset_cq_ep(pr->recv_cq);
889 ehea_reset_cq_ep(pr->send_cq);
890 ehea_reset_cq_n1(pr->recv_cq);
891 ehea_reset_cq_n1(pr->send_cq);
892 rmb();
893 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
894 cqe_skb = ehea_poll_cq(pr->send_cq);
895
896 if (!cqe && !cqe_skb)
897 return rx;
898
899 if (!napi_reschedule(napi))
900 return rx;
901
902 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
903 rx += ehea_proc_rwqes(dev, pr, budget - rx);
904 }
905
906 return rx;
907}
908
909static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
910{
911 struct ehea_port_res *pr = param;
912
913 napi_schedule(&pr->napi);
914
915 return IRQ_HANDLED;
916}
917
918static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
919{
920 struct ehea_port *port = param;
921 struct ehea_eqe *eqe;
922 struct ehea_qp *qp;
923 u32 qp_token;
924 u64 resource_type, aer, aerr;
925 int reset_port = 0;
926
927 eqe = ehea_poll_eq(port->qp_eq);
928
929 while (eqe) {
930 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
931 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
932 eqe->entry, qp_token);
933
934 qp = port->port_res[qp_token].qp;
935
936 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
937 &aer, &aerr);
938
939 if (resource_type == EHEA_AER_RESTYPE_QP) {
940 if ((aer & EHEA_AER_RESET_MASK) ||
941 (aerr & EHEA_AERR_RESET_MASK))
942 reset_port = 1;
943 } else
944 reset_port = 1; /* Reset in case of CQ or EQ error */
945
946 eqe = ehea_poll_eq(port->qp_eq);
947 }
948
949 if (reset_port) {
950 pr_err("Resetting port\n");
951 ehea_schedule_port_reset(port);
952 }
953
954 return IRQ_HANDLED;
955}
956
957static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
958 int logical_port)
959{
960 int i;
961
962 for (i = 0; i < EHEA_MAX_PORTS; i++)
963 if (adapter->port[i])
964 if (adapter->port[i]->logical_port_id == logical_port)
965 return adapter->port[i];
966 return NULL;
967}
968
969int ehea_sense_port_attr(struct ehea_port *port)
970{
971 int ret;
972 u64 hret;
973 struct hcp_ehea_port_cb0 *cb0;
974
975 /* may be called via ehea_neq_tasklet() */
976 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
977 if (!cb0) {
978 pr_err("no mem for cb0\n");
979 ret = -ENOMEM;
980 goto out;
981 }
982
983 hret = ehea_h_query_ehea_port(port->adapter->handle,
984 port->logical_port_id, H_PORT_CB0,
985 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
986 cb0);
987 if (hret != H_SUCCESS) {
988 ret = -EIO;
989 goto out_free;
990 }
991
992 /* MAC address */
993 port->mac_addr = cb0->port_mac_addr << 16;
994
995 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
996 ret = -EADDRNOTAVAIL;
997 goto out_free;
998 }
999
1000 /* Port speed */
1001 switch (cb0->port_speed) {
1002 case H_SPEED_10M_H:
1003 port->port_speed = EHEA_SPEED_10M;
1004 port->full_duplex = 0;
1005 break;
1006 case H_SPEED_10M_F:
1007 port->port_speed = EHEA_SPEED_10M;
1008 port->full_duplex = 1;
1009 break;
1010 case H_SPEED_100M_H:
1011 port->port_speed = EHEA_SPEED_100M;
1012 port->full_duplex = 0;
1013 break;
1014 case H_SPEED_100M_F:
1015 port->port_speed = EHEA_SPEED_100M;
1016 port->full_duplex = 1;
1017 break;
1018 case H_SPEED_1G_F:
1019 port->port_speed = EHEA_SPEED_1G;
1020 port->full_duplex = 1;
1021 break;
1022 case H_SPEED_10G_F:
1023 port->port_speed = EHEA_SPEED_10G;
1024 port->full_duplex = 1;
1025 break;
1026 default:
1027 port->port_speed = 0;
1028 port->full_duplex = 0;
1029 break;
1030 }
1031
1032 port->autoneg = 1;
1033 port->num_mcs = cb0->num_default_qps;
1034
1035 /* Number of default QPs */
1036 if (use_mcs)
1037 port->num_def_qps = cb0->num_default_qps;
1038 else
1039 port->num_def_qps = 1;
1040
1041 if (!port->num_def_qps) {
1042 ret = -EINVAL;
1043 goto out_free;
1044 }
1045
1046 ret = 0;
1047out_free:
1048 if (ret || netif_msg_probe(port))
1049 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1050 free_page((unsigned long)cb0);
1051out:
1052 return ret;
1053}
1054
1055int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1056{
1057 struct hcp_ehea_port_cb4 *cb4;
1058 u64 hret;
1059 int ret = 0;
1060
1061 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1062 if (!cb4) {
1063 pr_err("no mem for cb4\n");
1064 ret = -ENOMEM;
1065 goto out;
1066 }
1067
1068 cb4->port_speed = port_speed;
1069
1070 netif_carrier_off(port->netdev);
1071
1072 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1073 port->logical_port_id,
1074 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1075 if (hret == H_SUCCESS) {
1076 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1077
1078 hret = ehea_h_query_ehea_port(port->adapter->handle,
1079 port->logical_port_id,
1080 H_PORT_CB4, H_PORT_CB4_SPEED,
1081 cb4);
1082 if (hret == H_SUCCESS) {
1083 switch (cb4->port_speed) {
1084 case H_SPEED_10M_H:
1085 port->port_speed = EHEA_SPEED_10M;
1086 port->full_duplex = 0;
1087 break;
1088 case H_SPEED_10M_F:
1089 port->port_speed = EHEA_SPEED_10M;
1090 port->full_duplex = 1;
1091 break;
1092 case H_SPEED_100M_H:
1093 port->port_speed = EHEA_SPEED_100M;
1094 port->full_duplex = 0;
1095 break;
1096 case H_SPEED_100M_F:
1097 port->port_speed = EHEA_SPEED_100M;
1098 port->full_duplex = 1;
1099 break;
1100 case H_SPEED_1G_F:
1101 port->port_speed = EHEA_SPEED_1G;
1102 port->full_duplex = 1;
1103 break;
1104 case H_SPEED_10G_F:
1105 port->port_speed = EHEA_SPEED_10G;
1106 port->full_duplex = 1;
1107 break;
1108 default:
1109 port->port_speed = 0;
1110 port->full_duplex = 0;
1111 break;
1112 }
1113 } else {
1114 pr_err("Failed sensing port speed\n");
1115 ret = -EIO;
1116 }
1117 } else {
1118 if (hret == H_AUTHORITY) {
1119 pr_info("Hypervisor denied setting port speed\n");
1120 ret = -EPERM;
1121 } else {
1122 ret = -EIO;
1123 pr_err("Failed setting port speed\n");
1124 }
1125 }
1126 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1127 netif_carrier_on(port->netdev);
1128
1129 free_page((unsigned long)cb4);
1130out:
1131 return ret;
1132}
1133
1134static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1135{
1136 int ret;
1137 u8 ec;
1138 u8 portnum;
1139 struct ehea_port *port;
1140 struct net_device *dev;
1141
1142 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1143 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1144 port = ehea_get_port(adapter, portnum);
1145 if (!port) {
1146 netdev_err(NULL, "unknown portnum %x\n", portnum);
1147 return;
1148 }
1149 dev = port->netdev;
1150
1151 switch (ec) {
1152 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1153
1154 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1155 if (!netif_carrier_ok(dev)) {
1156 ret = ehea_sense_port_attr(port);
1157 if (ret) {
1158 netdev_err(dev, "failed resensing port attributes\n");
1159 break;
1160 }
1161
1162 netif_info(port, link, dev,
1163 "Logical port up: %dMbps %s Duplex\n",
1164 port->port_speed,
1165 port->full_duplex == 1 ?
1166 "Full" : "Half");
1167
1168 netif_carrier_on(dev);
1169 netif_wake_queue(dev);
1170 }
1171 } else
1172 if (netif_carrier_ok(dev)) {
1173 netif_info(port, link, dev,
1174 "Logical port down\n");
1175 netif_carrier_off(dev);
1176 netif_tx_disable(dev);
1177 }
1178
1179 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1180 port->phy_link = EHEA_PHY_LINK_UP;
1181 netif_info(port, link, dev,
1182 "Physical port up\n");
1183 if (prop_carrier_state)
1184 netif_carrier_on(dev);
1185 } else {
1186 port->phy_link = EHEA_PHY_LINK_DOWN;
1187 netif_info(port, link, dev,
1188 "Physical port down\n");
1189 if (prop_carrier_state)
1190 netif_carrier_off(dev);
1191 }
1192
1193 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1194 netdev_info(dev,
1195 "External switch port is primary port\n");
1196 else
1197 netdev_info(dev,
1198 "External switch port is backup port\n");
1199
1200 break;
1201 case EHEA_EC_ADAPTER_MALFUNC:
1202 netdev_err(dev, "Adapter malfunction\n");
1203 break;
1204 case EHEA_EC_PORT_MALFUNC:
1205 netdev_info(dev, "Port malfunction\n");
1206 netif_carrier_off(dev);
1207 netif_tx_disable(dev);
1208 break;
1209 default:
1210 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1211 break;
1212 }
1213}
1214
1215static void ehea_neq_tasklet(unsigned long data)
1216{
1217 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1218 struct ehea_eqe *eqe;
1219 u64 event_mask;
1220
1221 eqe = ehea_poll_eq(adapter->neq);
1222 pr_debug("eqe=%p\n", eqe);
1223
1224 while (eqe) {
1225 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1226 ehea_parse_eqe(adapter, eqe->entry);
1227 eqe = ehea_poll_eq(adapter->neq);
1228 pr_debug("next eqe=%p\n", eqe);
1229 }
1230
1231 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1232 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1233 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1234
1235 ehea_h_reset_events(adapter->handle,
1236 adapter->neq->fw_handle, event_mask);
1237}
1238
1239static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1240{
1241 struct ehea_adapter *adapter = param;
1242 tasklet_hi_schedule(&adapter->neq_tasklet);
1243 return IRQ_HANDLED;
1244}
1245
1246
1247static int ehea_fill_port_res(struct ehea_port_res *pr)
1248{
1249 int ret;
1250 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1251
1252 ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1253
1254 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1255
1256 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1257
1258 return ret;
1259}
1260
1261static int ehea_reg_interrupts(struct net_device *dev)
1262{
1263 struct ehea_port *port = netdev_priv(dev);
1264 struct ehea_port_res *pr;
1265 int i, ret;
1266
1267
1268 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1269 dev->name);
1270
1271 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1272 ehea_qp_aff_irq_handler,
1273 0, port->int_aff_name, port);
1274 if (ret) {
1275 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1276 port->qp_eq->attr.ist1);
1277 goto out_free_qpeq;
1278 }
1279
1280 netif_info(port, ifup, dev,
1281 "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1282 port->qp_eq->attr.ist1);
1283
1284
1285 for (i = 0; i < port->num_def_qps; i++) {
1286 pr = &port->port_res[i];
1287 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1288 "%s-queue%d", dev->name, i);
1289 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1290 ehea_recv_irq_handler,
1291 0, pr->int_send_name, pr);
1292 if (ret) {
1293 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1294 i, pr->eq->attr.ist1);
1295 goto out_free_req;
1296 }
1297 netif_info(port, ifup, dev,
1298 "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1299 pr->eq->attr.ist1, i);
1300 }
1301out:
1302 return ret;
1303
1304
1305out_free_req:
1306 while (--i >= 0) {
1307 u32 ist = port->port_res[i].eq->attr.ist1;
1308 ibmebus_free_irq(ist, &port->port_res[i]);
1309 }
1310
1311out_free_qpeq:
1312 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1313 i = port->num_def_qps;
1314
1315 goto out;
1316
1317}
1318
1319static void ehea_free_interrupts(struct net_device *dev)
1320{
1321 struct ehea_port *port = netdev_priv(dev);
1322 struct ehea_port_res *pr;
1323 int i;
1324
1325 /* send */
1326
1327 for (i = 0; i < port->num_def_qps; i++) {
1328 pr = &port->port_res[i];
1329 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1330 netif_info(port, intr, dev,
1331 "free send irq for res %d with handle 0x%X\n",
1332 i, pr->eq->attr.ist1);
1333 }
1334
1335 /* associated events */
1336 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1337 netif_info(port, intr, dev,
1338 "associated event interrupt for handle 0x%X freed\n",
1339 port->qp_eq->attr.ist1);
1340}
1341
1342static int ehea_configure_port(struct ehea_port *port)
1343{
1344 int ret, i;
1345 u64 hret, mask;
1346 struct hcp_ehea_port_cb0 *cb0;
1347
1348 ret = -ENOMEM;
1349 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1350 if (!cb0)
1351 goto out;
1352
1353 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1354 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1355 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1356 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1357 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1358 PXLY_RC_VLAN_FILTER)
1359 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1360
1361 for (i = 0; i < port->num_mcs; i++)
1362 if (use_mcs)
1363 cb0->default_qpn_arr[i] =
1364 port->port_res[i].qp->init_attr.qp_nr;
1365 else
1366 cb0->default_qpn_arr[i] =
1367 port->port_res[0].qp->init_attr.qp_nr;
1368
1369 if (netif_msg_ifup(port))
1370 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1371
1372 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1373 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1374
1375 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1376 port->logical_port_id,
1377 H_PORT_CB0, mask, cb0);
1378 ret = -EIO;
1379 if (hret != H_SUCCESS)
1380 goto out_free;
1381
1382 ret = 0;
1383
1384out_free:
1385 free_page((unsigned long)cb0);
1386out:
1387 return ret;
1388}
1389
1390static int ehea_gen_smrs(struct ehea_port_res *pr)
1391{
1392 int ret;
1393 struct ehea_adapter *adapter = pr->port->adapter;
1394
1395 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1396 if (ret)
1397 goto out;
1398
1399 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1400 if (ret)
1401 goto out_free;
1402
1403 return 0;
1404
1405out_free:
1406 ehea_rem_mr(&pr->send_mr);
1407out:
1408 pr_err("Generating SMRS failed\n");
1409 return -EIO;
1410}
1411
1412static int ehea_rem_smrs(struct ehea_port_res *pr)
1413{
1414 if ((ehea_rem_mr(&pr->send_mr)) ||
1415 (ehea_rem_mr(&pr->recv_mr)))
1416 return -EIO;
1417 else
1418 return 0;
1419}
1420
1421static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1422{
1423 int arr_size = sizeof(void *) * max_q_entries;
1424
1425 q_skba->arr = vzalloc(arr_size);
1426 if (!q_skba->arr)
1427 return -ENOMEM;
1428
1429 q_skba->len = max_q_entries;
1430 q_skba->index = 0;
1431 q_skba->os_skbs = 0;
1432
1433 return 0;
1434}
1435
1436static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1437 struct port_res_cfg *pr_cfg, int queue_token)
1438{
1439 struct ehea_adapter *adapter = port->adapter;
1440 enum ehea_eq_type eq_type = EHEA_EQ;
1441 struct ehea_qp_init_attr *init_attr = NULL;
1442 int ret = -EIO;
1443 u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1444
1445 tx_bytes = pr->tx_bytes;
1446 tx_packets = pr->tx_packets;
1447 rx_bytes = pr->rx_bytes;
1448 rx_packets = pr->rx_packets;
1449
1450 memset(pr, 0, sizeof(struct ehea_port_res));
1451
1452 pr->tx_bytes = tx_bytes;
1453 pr->tx_packets = tx_packets;
1454 pr->rx_bytes = rx_bytes;
1455 pr->rx_packets = rx_packets;
1456
1457 pr->port = port;
1458
1459 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1460 if (!pr->eq) {
1461 pr_err("create_eq failed (eq)\n");
1462 goto out_free;
1463 }
1464
1465 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1466 pr->eq->fw_handle,
1467 port->logical_port_id);
1468 if (!pr->recv_cq) {
1469 pr_err("create_cq failed (cq_recv)\n");
1470 goto out_free;
1471 }
1472
1473 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1474 pr->eq->fw_handle,
1475 port->logical_port_id);
1476 if (!pr->send_cq) {
1477 pr_err("create_cq failed (cq_send)\n");
1478 goto out_free;
1479 }
1480
1481 if (netif_msg_ifup(port))
1482 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1483 pr->send_cq->attr.act_nr_of_cqes,
1484 pr->recv_cq->attr.act_nr_of_cqes);
1485
1486 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1487 if (!init_attr) {
1488 ret = -ENOMEM;
1489 pr_err("no mem for ehea_qp_init_attr\n");
1490 goto out_free;
1491 }
1492
1493 init_attr->low_lat_rq1 = 1;
1494 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1495 init_attr->rq_count = 3;
1496 init_attr->qp_token = queue_token;
1497 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1498 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1499 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1500 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1501 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1502 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1503 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1504 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1505 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1506 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1507 init_attr->port_nr = port->logical_port_id;
1508 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1509 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1510 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1511
1512 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1513 if (!pr->qp) {
1514 pr_err("create_qp failed\n");
1515 ret = -EIO;
1516 goto out_free;
1517 }
1518
1519 if (netif_msg_ifup(port))
1520 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1521 init_attr->qp_nr,
1522 init_attr->act_nr_send_wqes,
1523 init_attr->act_nr_rwqes_rq1,
1524 init_attr->act_nr_rwqes_rq2,
1525 init_attr->act_nr_rwqes_rq3);
1526
1527 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1528
1529 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1530 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1531 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1532 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1533 if (ret)
1534 goto out_free;
1535
1536 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1537 if (ehea_gen_smrs(pr) != 0) {
1538 ret = -EIO;
1539 goto out_free;
1540 }
1541
1542 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1543
1544 kfree(init_attr);
1545
1546 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1547
1548 ret = 0;
1549 goto out;
1550
1551out_free:
1552 kfree(init_attr);
1553 vfree(pr->sq_skba.arr);
1554 vfree(pr->rq1_skba.arr);
1555 vfree(pr->rq2_skba.arr);
1556 vfree(pr->rq3_skba.arr);
1557 ehea_destroy_qp(pr->qp);
1558 ehea_destroy_cq(pr->send_cq);
1559 ehea_destroy_cq(pr->recv_cq);
1560 ehea_destroy_eq(pr->eq);
1561out:
1562 return ret;
1563}
1564
1565static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1566{
1567 int ret, i;
1568
1569 if (pr->qp)
1570 netif_napi_del(&pr->napi);
1571
1572 ret = ehea_destroy_qp(pr->qp);
1573
1574 if (!ret) {
1575 ehea_destroy_cq(pr->send_cq);
1576 ehea_destroy_cq(pr->recv_cq);
1577 ehea_destroy_eq(pr->eq);
1578
1579 for (i = 0; i < pr->rq1_skba.len; i++)
1580 dev_kfree_skb(pr->rq1_skba.arr[i]);
1581
1582 for (i = 0; i < pr->rq2_skba.len; i++)
1583 dev_kfree_skb(pr->rq2_skba.arr[i]);
1584
1585 for (i = 0; i < pr->rq3_skba.len; i++)
1586 dev_kfree_skb(pr->rq3_skba.arr[i]);
1587
1588 for (i = 0; i < pr->sq_skba.len; i++)
1589 dev_kfree_skb(pr->sq_skba.arr[i]);
1590
1591 vfree(pr->rq1_skba.arr);
1592 vfree(pr->rq2_skba.arr);
1593 vfree(pr->rq3_skba.arr);
1594 vfree(pr->sq_skba.arr);
1595 ret = ehea_rem_smrs(pr);
1596 }
1597 return ret;
1598}
1599
1600static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
1601 u32 lkey)
1602{
1603 int skb_data_size = skb_headlen(skb);
1604 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1605 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1606 unsigned int immediate_len = SWQE2_MAX_IMM;
1607
1608 swqe->descriptors = 0;
1609
1610 if (skb_is_gso(skb)) {
1611 swqe->tx_control |= EHEA_SWQE_TSO;
1612 swqe->mss = skb_shinfo(skb)->gso_size;
1613 /*
1614 * For TSO packets we only copy the headers into the
1615 * immediate area.
1616 */
1617 immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1618 }
1619
1620 if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
1621 skb_copy_from_linear_data(skb, imm_data, immediate_len);
1622 swqe->immediate_data_length = immediate_len;
1623
1624 if (skb_data_size > immediate_len) {
1625 sg1entry->l_key = lkey;
1626 sg1entry->len = skb_data_size - immediate_len;
1627 sg1entry->vaddr =
1628 ehea_map_vaddr(skb->data + immediate_len);
1629 swqe->descriptors++;
1630 }
1631 } else {
1632 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1633 swqe->immediate_data_length = skb_data_size;
1634 }
1635}
1636
1637static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1638 struct ehea_swqe *swqe, u32 lkey)
1639{
1640 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1641 skb_frag_t *frag;
1642 int nfrags, sg1entry_contains_frag_data, i;
1643
1644 nfrags = skb_shinfo(skb)->nr_frags;
1645 sg1entry = &swqe->u.immdata_desc.sg_entry;
1646 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1647 sg1entry_contains_frag_data = 0;
1648
1649 write_swqe2_immediate(skb, swqe, lkey);
1650
1651 /* write descriptors */
1652 if (nfrags > 0) {
1653 if (swqe->descriptors == 0) {
1654 /* sg1entry not yet used */
1655 frag = &skb_shinfo(skb)->frags[0];
1656
1657 /* copy sg1entry data */
1658 sg1entry->l_key = lkey;
1659 sg1entry->len = skb_frag_size(frag);
1660 sg1entry->vaddr =
1661 ehea_map_vaddr(skb_frag_address(frag));
1662 swqe->descriptors++;
1663 sg1entry_contains_frag_data = 1;
1664 }
1665
1666 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1667
1668 frag = &skb_shinfo(skb)->frags[i];
1669 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1670
1671 sgentry->l_key = lkey;
1672 sgentry->len = skb_frag_size(frag);
1673 sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
1674 swqe->descriptors++;
1675 }
1676 }
1677}
1678
1679static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1680{
1681 int ret = 0;
1682 u64 hret;
1683 u8 reg_type;
1684
1685 /* De/Register untagged packets */
1686 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1687 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1688 port->logical_port_id,
1689 reg_type, port->mac_addr, 0, hcallid);
1690 if (hret != H_SUCCESS) {
1691 pr_err("%sregistering bc address failed (tagged)\n",
1692 hcallid == H_REG_BCMC ? "" : "de");
1693 ret = -EIO;
1694 goto out_herr;
1695 }
1696
1697 /* De/Register VLAN packets */
1698 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1699 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1700 port->logical_port_id,
1701 reg_type, port->mac_addr, 0, hcallid);
1702 if (hret != H_SUCCESS) {
1703 pr_err("%sregistering bc address failed (vlan)\n",
1704 hcallid == H_REG_BCMC ? "" : "de");
1705 ret = -EIO;
1706 }
1707out_herr:
1708 return ret;
1709}
1710
1711static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1712{
1713 struct ehea_port *port = netdev_priv(dev);
1714 struct sockaddr *mac_addr = sa;
1715 struct hcp_ehea_port_cb0 *cb0;
1716 int ret;
1717 u64 hret;
1718
1719 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1720 ret = -EADDRNOTAVAIL;
1721 goto out;
1722 }
1723
1724 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1725 if (!cb0) {
1726 pr_err("no mem for cb0\n");
1727 ret = -ENOMEM;
1728 goto out;
1729 }
1730
1731 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1732
1733 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1734
1735 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1736 port->logical_port_id, H_PORT_CB0,
1737 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1738 if (hret != H_SUCCESS) {
1739 ret = -EIO;
1740 goto out_free;
1741 }
1742
1743 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1744
1745 /* Deregister old MAC in pHYP */
1746 if (port->state == EHEA_PORT_UP) {
1747 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1748 if (ret)
1749 goto out_upregs;
1750 }
1751
1752 port->mac_addr = cb0->port_mac_addr << 16;
1753
1754 /* Register new MAC in pHYP */
1755 if (port->state == EHEA_PORT_UP) {
1756 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1757 if (ret)
1758 goto out_upregs;
1759 }
1760
1761 ret = 0;
1762
1763out_upregs:
1764 ehea_update_bcmc_registrations();
1765out_free:
1766 free_page((unsigned long)cb0);
1767out:
1768 return ret;
1769}
1770
1771static void ehea_promiscuous_error(u64 hret, int enable)
1772{
1773 if (hret == H_AUTHORITY)
1774 pr_info("Hypervisor denied %sabling promiscuous mode\n",
1775 enable == 1 ? "en" : "dis");
1776 else
1777 pr_err("failed %sabling promiscuous mode\n",
1778 enable == 1 ? "en" : "dis");
1779}
1780
1781static void ehea_promiscuous(struct net_device *dev, int enable)
1782{
1783 struct ehea_port *port = netdev_priv(dev);
1784 struct hcp_ehea_port_cb7 *cb7;
1785 u64 hret;
1786
1787 if (enable == port->promisc)
1788 return;
1789
1790 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1791 if (!cb7) {
1792 pr_err("no mem for cb7\n");
1793 goto out;
1794 }
1795
1796 /* Modify Pxs_DUCQPN in CB7 */
1797 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1798
1799 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1800 port->logical_port_id,
1801 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1802 if (hret) {
1803 ehea_promiscuous_error(hret, enable);
1804 goto out;
1805 }
1806
1807 port->promisc = enable;
1808out:
1809 free_page((unsigned long)cb7);
1810}
1811
1812static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1813 u32 hcallid)
1814{
1815 u64 hret;
1816 u8 reg_type;
1817
1818 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
1819 if (mc_mac_addr == 0)
1820 reg_type |= EHEA_BCMC_SCOPE_ALL;
1821
1822 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1823 port->logical_port_id,
1824 reg_type, mc_mac_addr, 0, hcallid);
1825 if (hret)
1826 goto out;
1827
1828 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
1829 if (mc_mac_addr == 0)
1830 reg_type |= EHEA_BCMC_SCOPE_ALL;
1831
1832 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1833 port->logical_port_id,
1834 reg_type, mc_mac_addr, 0, hcallid);
1835out:
1836 return hret;
1837}
1838
1839static int ehea_drop_multicast_list(struct net_device *dev)
1840{
1841 struct ehea_port *port = netdev_priv(dev);
1842 struct ehea_mc_list *mc_entry = port->mc_list;
1843 struct list_head *pos;
1844 struct list_head *temp;
1845 int ret = 0;
1846 u64 hret;
1847
1848 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1849 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1850
1851 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1852 H_DEREG_BCMC);
1853 if (hret) {
1854 pr_err("failed deregistering mcast MAC\n");
1855 ret = -EIO;
1856 }
1857
1858 list_del(pos);
1859 kfree(mc_entry);
1860 }
1861 return ret;
1862}
1863
1864static void ehea_allmulti(struct net_device *dev, int enable)
1865{
1866 struct ehea_port *port = netdev_priv(dev);
1867 u64 hret;
1868
1869 if (!port->allmulti) {
1870 if (enable) {
1871 /* Enable ALLMULTI */
1872 ehea_drop_multicast_list(dev);
1873 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1874 if (!hret)
1875 port->allmulti = 1;
1876 else
1877 netdev_err(dev,
1878 "failed enabling IFF_ALLMULTI\n");
1879 }
1880 } else {
1881 if (!enable) {
1882 /* Disable ALLMULTI */
1883 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1884 if (!hret)
1885 port->allmulti = 0;
1886 else
1887 netdev_err(dev,
1888 "failed disabling IFF_ALLMULTI\n");
1889 }
1890 }
1891}
1892
1893static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1894{
1895 struct ehea_mc_list *ehea_mcl_entry;
1896 u64 hret;
1897
1898 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1899 if (!ehea_mcl_entry)
1900 return;
1901
1902 INIT_LIST_HEAD(&ehea_mcl_entry->list);
1903
1904 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1905
1906 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1907 H_REG_BCMC);
1908 if (!hret)
1909 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1910 else {
1911 pr_err("failed registering mcast MAC\n");
1912 kfree(ehea_mcl_entry);
1913 }
1914}
1915
1916static void ehea_set_multicast_list(struct net_device *dev)
1917{
1918 struct ehea_port *port = netdev_priv(dev);
1919 struct netdev_hw_addr *ha;
1920 int ret;
1921
1922 ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
1923
1924 if (dev->flags & IFF_ALLMULTI) {
1925 ehea_allmulti(dev, 1);
1926 goto out;
1927 }
1928 ehea_allmulti(dev, 0);
1929
1930 if (!netdev_mc_empty(dev)) {
1931 ret = ehea_drop_multicast_list(dev);
1932 if (ret) {
1933 /* Dropping the current multicast list failed.
1934 * Enabling ALL_MULTI is the best we can do.
1935 */
1936 ehea_allmulti(dev, 1);
1937 }
1938
1939 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
1940 pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
1941 port->adapter->max_mc_mac);
1942 goto out;
1943 }
1944
1945 netdev_for_each_mc_addr(ha, dev)
1946 ehea_add_multicast_entry(port, ha->addr);
1947
1948 }
1949out:
1950 ehea_update_bcmc_registrations();
1951}
1952
1953static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
1954{
1955 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
1956
1957 if (vlan_get_protocol(skb) != htons(ETH_P_IP))
1958 return;
1959
1960 if (skb->ip_summed == CHECKSUM_PARTIAL)
1961 swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
1962
1963 swqe->ip_start = skb_network_offset(skb);
1964 swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
1965
1966 switch (ip_hdr(skb)->protocol) {
1967 case IPPROTO_UDP:
1968 if (skb->ip_summed == CHECKSUM_PARTIAL)
1969 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
1970
1971 swqe->tcp_offset = swqe->ip_end + 1 +
1972 offsetof(struct udphdr, check);
1973 break;
1974
1975 case IPPROTO_TCP:
1976 if (skb->ip_summed == CHECKSUM_PARTIAL)
1977 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
1978
1979 swqe->tcp_offset = swqe->ip_end + 1 +
1980 offsetof(struct tcphdr, check);
1981 break;
1982 }
1983}
1984
1985static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1986 struct ehea_swqe *swqe, u32 lkey)
1987{
1988 swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
1989
1990 xmit_common(skb, swqe);
1991
1992 write_swqe2_data(skb, dev, swqe, lkey);
1993}
1994
1995static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
1996 struct ehea_swqe *swqe)
1997{
1998 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
1999
2000 xmit_common(skb, swqe);
2001
2002 if (!skb->data_len)
2003 skb_copy_from_linear_data(skb, imm_data, skb->len);
2004 else
2005 skb_copy_bits(skb, 0, imm_data, skb->len);
2006
2007 swqe->immediate_data_length = skb->len;
2008 dev_consume_skb_any(skb);
2009}
2010
2011static netdev_tx_t ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2012{
2013 struct ehea_port *port = netdev_priv(dev);
2014 struct ehea_swqe *swqe;
2015 u32 lkey;
2016 int swqe_index;
2017 struct ehea_port_res *pr;
2018 struct netdev_queue *txq;
2019
2020 pr = &port->port_res[skb_get_queue_mapping(skb)];
2021 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2022
2023 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2024 memset(swqe, 0, SWQE_HEADER_SIZE);
2025 atomic_dec(&pr->swqe_avail);
2026
2027 if (skb_vlan_tag_present(skb)) {
2028 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2029 swqe->vlan_tag = skb_vlan_tag_get(skb);
2030 }
2031
2032 pr->tx_packets++;
2033 pr->tx_bytes += skb->len;
2034
2035 if (skb->len <= SWQE3_MAX_IMM) {
2036 u32 sig_iv = port->sig_comp_iv;
2037 u32 swqe_num = pr->swqe_id_counter;
2038 ehea_xmit3(skb, dev, swqe);
2039 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2040 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2041 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2042 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2043 sig_iv);
2044 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2045 pr->swqe_ll_count = 0;
2046 } else
2047 pr->swqe_ll_count += 1;
2048 } else {
2049 swqe->wr_id =
2050 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2051 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2052 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2053 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2054 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2055
2056 pr->sq_skba.index++;
2057 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2058
2059 lkey = pr->send_mr.lkey;
2060 ehea_xmit2(skb, dev, swqe, lkey);
2061 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2062 }
2063 pr->swqe_id_counter += 1;
2064
2065 netif_info(port, tx_queued, dev,
2066 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2067 if (netif_msg_tx_queued(port))
2068 ehea_dump(swqe, 512, "swqe");
2069
2070 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2071 netif_tx_stop_queue(txq);
2072 swqe->tx_control |= EHEA_SWQE_PURGE;
2073 }
2074
2075 ehea_post_swqe(pr->qp, swqe);
2076
2077 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2078 pr->p_stats.queue_stopped++;
2079 netif_tx_stop_queue(txq);
2080 }
2081
2082 return NETDEV_TX_OK;
2083}
2084
2085static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
2086{
2087 struct ehea_port *port = netdev_priv(dev);
2088 struct ehea_adapter *adapter = port->adapter;
2089 struct hcp_ehea_port_cb1 *cb1;
2090 int index;
2091 u64 hret;
2092 int err = 0;
2093
2094 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2095 if (!cb1) {
2096 pr_err("no mem for cb1\n");
2097 err = -ENOMEM;
2098 goto out;
2099 }
2100
2101 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2102 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2103 if (hret != H_SUCCESS) {
2104 pr_err("query_ehea_port failed\n");
2105 err = -EINVAL;
2106 goto out;
2107 }
2108
2109 index = (vid / 64);
2110 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2111
2112 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2113 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2114 if (hret != H_SUCCESS) {
2115 pr_err("modify_ehea_port failed\n");
2116 err = -EINVAL;
2117 }
2118out:
2119 free_page((unsigned long)cb1);
2120 return err;
2121}
2122
2123static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
2124{
2125 struct ehea_port *port = netdev_priv(dev);
2126 struct ehea_adapter *adapter = port->adapter;
2127 struct hcp_ehea_port_cb1 *cb1;
2128 int index;
2129 u64 hret;
2130 int err = 0;
2131
2132 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2133 if (!cb1) {
2134 pr_err("no mem for cb1\n");
2135 err = -ENOMEM;
2136 goto out;
2137 }
2138
2139 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2140 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2141 if (hret != H_SUCCESS) {
2142 pr_err("query_ehea_port failed\n");
2143 err = -EINVAL;
2144 goto out;
2145 }
2146
2147 index = (vid / 64);
2148 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2149
2150 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2151 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2152 if (hret != H_SUCCESS) {
2153 pr_err("modify_ehea_port failed\n");
2154 err = -EINVAL;
2155 }
2156out:
2157 free_page((unsigned long)cb1);
2158 return err;
2159}
2160
2161static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2162{
2163 int ret = -EIO;
2164 u64 hret;
2165 u16 dummy16 = 0;
2166 u64 dummy64 = 0;
2167 struct hcp_modify_qp_cb0 *cb0;
2168
2169 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2170 if (!cb0) {
2171 ret = -ENOMEM;
2172 goto out;
2173 }
2174
2175 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2176 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2177 if (hret != H_SUCCESS) {
2178 pr_err("query_ehea_qp failed (1)\n");
2179 goto out;
2180 }
2181
2182 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2183 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2184 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2185 &dummy64, &dummy64, &dummy16, &dummy16);
2186 if (hret != H_SUCCESS) {
2187 pr_err("modify_ehea_qp failed (1)\n");
2188 goto out;
2189 }
2190
2191 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2192 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2193 if (hret != H_SUCCESS) {
2194 pr_err("query_ehea_qp failed (2)\n");
2195 goto out;
2196 }
2197
2198 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2199 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2200 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2201 &dummy64, &dummy64, &dummy16, &dummy16);
2202 if (hret != H_SUCCESS) {
2203 pr_err("modify_ehea_qp failed (2)\n");
2204 goto out;
2205 }
2206
2207 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2208 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2209 if (hret != H_SUCCESS) {
2210 pr_err("query_ehea_qp failed (3)\n");
2211 goto out;
2212 }
2213
2214 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2215 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2216 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2217 &dummy64, &dummy64, &dummy16, &dummy16);
2218 if (hret != H_SUCCESS) {
2219 pr_err("modify_ehea_qp failed (3)\n");
2220 goto out;
2221 }
2222
2223 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2224 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2225 if (hret != H_SUCCESS) {
2226 pr_err("query_ehea_qp failed (4)\n");
2227 goto out;
2228 }
2229
2230 ret = 0;
2231out:
2232 free_page((unsigned long)cb0);
2233 return ret;
2234}
2235
2236static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
2237{
2238 int ret, i;
2239 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2240 enum ehea_eq_type eq_type = EHEA_EQ;
2241
2242 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2243 EHEA_MAX_ENTRIES_EQ, 1);
2244 if (!port->qp_eq) {
2245 ret = -EINVAL;
2246 pr_err("ehea_create_eq failed (qp_eq)\n");
2247 goto out_kill_eq;
2248 }
2249
2250 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2251 pr_cfg.max_entries_scq = sq_entries * 2;
2252 pr_cfg.max_entries_sq = sq_entries;
2253 pr_cfg.max_entries_rq1 = rq1_entries;
2254 pr_cfg.max_entries_rq2 = rq2_entries;
2255 pr_cfg.max_entries_rq3 = rq3_entries;
2256
2257 pr_cfg_small_rx.max_entries_rcq = 1;
2258 pr_cfg_small_rx.max_entries_scq = sq_entries;
2259 pr_cfg_small_rx.max_entries_sq = sq_entries;
2260 pr_cfg_small_rx.max_entries_rq1 = 1;
2261 pr_cfg_small_rx.max_entries_rq2 = 1;
2262 pr_cfg_small_rx.max_entries_rq3 = 1;
2263
2264 for (i = 0; i < def_qps; i++) {
2265 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2266 if (ret)
2267 goto out_clean_pr;
2268 }
2269 for (i = def_qps; i < def_qps; i++) {
2270 ret = ehea_init_port_res(port, &port->port_res[i],
2271 &pr_cfg_small_rx, i);
2272 if (ret)
2273 goto out_clean_pr;
2274 }
2275
2276 return 0;
2277
2278out_clean_pr:
2279 while (--i >= 0)
2280 ehea_clean_portres(port, &port->port_res[i]);
2281
2282out_kill_eq:
2283 ehea_destroy_eq(port->qp_eq);
2284 return ret;
2285}
2286
2287static int ehea_clean_all_portres(struct ehea_port *port)
2288{
2289 int ret = 0;
2290 int i;
2291
2292 for (i = 0; i < port->num_def_qps; i++)
2293 ret |= ehea_clean_portres(port, &port->port_res[i]);
2294
2295 ret |= ehea_destroy_eq(port->qp_eq);
2296
2297 return ret;
2298}
2299
2300static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2301{
2302 if (adapter->active_ports)
2303 return;
2304
2305 ehea_rem_mr(&adapter->mr);
2306}
2307
2308static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2309{
2310 if (adapter->active_ports)
2311 return 0;
2312
2313 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2314}
2315
2316static int ehea_up(struct net_device *dev)
2317{
2318 int ret, i;
2319 struct ehea_port *port = netdev_priv(dev);
2320
2321 if (port->state == EHEA_PORT_UP)
2322 return 0;
2323
2324 ret = ehea_port_res_setup(port, port->num_def_qps);
2325 if (ret) {
2326 netdev_err(dev, "port_res_failed\n");
2327 goto out;
2328 }
2329
2330 /* Set default QP for this port */
2331 ret = ehea_configure_port(port);
2332 if (ret) {
2333 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2334 goto out_clean_pr;
2335 }
2336
2337 ret = ehea_reg_interrupts(dev);
2338 if (ret) {
2339 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2340 goto out_clean_pr;
2341 }
2342
2343 for (i = 0; i < port->num_def_qps; i++) {
2344 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2345 if (ret) {
2346 netdev_err(dev, "activate_qp failed\n");
2347 goto out_free_irqs;
2348 }
2349 }
2350
2351 for (i = 0; i < port->num_def_qps; i++) {
2352 ret = ehea_fill_port_res(&port->port_res[i]);
2353 if (ret) {
2354 netdev_err(dev, "out_free_irqs\n");
2355 goto out_free_irqs;
2356 }
2357 }
2358
2359 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2360 if (ret) {
2361 ret = -EIO;
2362 goto out_free_irqs;
2363 }
2364
2365 port->state = EHEA_PORT_UP;
2366
2367 ret = 0;
2368 goto out;
2369
2370out_free_irqs:
2371 ehea_free_interrupts(dev);
2372
2373out_clean_pr:
2374 ehea_clean_all_portres(port);
2375out:
2376 if (ret)
2377 netdev_info(dev, "Failed starting. ret=%i\n", ret);
2378
2379 ehea_update_bcmc_registrations();
2380 ehea_update_firmware_handles();
2381
2382 return ret;
2383}
2384
2385static void port_napi_disable(struct ehea_port *port)
2386{
2387 int i;
2388
2389 for (i = 0; i < port->num_def_qps; i++)
2390 napi_disable(&port->port_res[i].napi);
2391}
2392
2393static void port_napi_enable(struct ehea_port *port)
2394{
2395 int i;
2396
2397 for (i = 0; i < port->num_def_qps; i++)
2398 napi_enable(&port->port_res[i].napi);
2399}
2400
2401static int ehea_open(struct net_device *dev)
2402{
2403 int ret;
2404 struct ehea_port *port = netdev_priv(dev);
2405
2406 mutex_lock(&port->port_lock);
2407
2408 netif_info(port, ifup, dev, "enabling port\n");
2409
2410 netif_carrier_off(dev);
2411
2412 ret = ehea_up(dev);
2413 if (!ret) {
2414 port_napi_enable(port);
2415 netif_tx_start_all_queues(dev);
2416 }
2417
2418 mutex_unlock(&port->port_lock);
2419 schedule_delayed_work(&port->stats_work,
2420 round_jiffies_relative(msecs_to_jiffies(1000)));
2421
2422 return ret;
2423}
2424
2425static int ehea_down(struct net_device *dev)
2426{
2427 int ret;
2428 struct ehea_port *port = netdev_priv(dev);
2429
2430 if (port->state == EHEA_PORT_DOWN)
2431 return 0;
2432
2433 ehea_drop_multicast_list(dev);
2434 ehea_allmulti(dev, 0);
2435 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2436
2437 ehea_free_interrupts(dev);
2438
2439 port->state = EHEA_PORT_DOWN;
2440
2441 ehea_update_bcmc_registrations();
2442
2443 ret = ehea_clean_all_portres(port);
2444 if (ret)
2445 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2446
2447 ehea_update_firmware_handles();
2448
2449 return ret;
2450}
2451
2452static int ehea_stop(struct net_device *dev)
2453{
2454 int ret;
2455 struct ehea_port *port = netdev_priv(dev);
2456
2457 netif_info(port, ifdown, dev, "disabling port\n");
2458
2459 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2460 cancel_work_sync(&port->reset_task);
2461 cancel_delayed_work_sync(&port->stats_work);
2462 mutex_lock(&port->port_lock);
2463 netif_tx_stop_all_queues(dev);
2464 port_napi_disable(port);
2465 ret = ehea_down(dev);
2466 mutex_unlock(&port->port_lock);
2467 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2468 return ret;
2469}
2470
2471static void ehea_purge_sq(struct ehea_qp *orig_qp)
2472{
2473 struct ehea_qp qp = *orig_qp;
2474 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2475 struct ehea_swqe *swqe;
2476 int wqe_index;
2477 int i;
2478
2479 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2480 swqe = ehea_get_swqe(&qp, &wqe_index);
2481 swqe->tx_control |= EHEA_SWQE_PURGE;
2482 }
2483}
2484
2485static void ehea_flush_sq(struct ehea_port *port)
2486{
2487 int i;
2488
2489 for (i = 0; i < port->num_def_qps; i++) {
2490 struct ehea_port_res *pr = &port->port_res[i];
2491 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2492 int ret;
2493
2494 ret = wait_event_timeout(port->swqe_avail_wq,
2495 atomic_read(&pr->swqe_avail) >= swqe_max,
2496 msecs_to_jiffies(100));
2497
2498 if (!ret) {
2499 pr_err("WARNING: sq not flushed completely\n");
2500 break;
2501 }
2502 }
2503}
2504
2505static int ehea_stop_qps(struct net_device *dev)
2506{
2507 struct ehea_port *port = netdev_priv(dev);
2508 struct ehea_adapter *adapter = port->adapter;
2509 struct hcp_modify_qp_cb0 *cb0;
2510 int ret = -EIO;
2511 int dret;
2512 int i;
2513 u64 hret;
2514 u64 dummy64 = 0;
2515 u16 dummy16 = 0;
2516
2517 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2518 if (!cb0) {
2519 ret = -ENOMEM;
2520 goto out;
2521 }
2522
2523 for (i = 0; i < (port->num_def_qps); i++) {
2524 struct ehea_port_res *pr = &port->port_res[i];
2525 struct ehea_qp *qp = pr->qp;
2526
2527 /* Purge send queue */
2528 ehea_purge_sq(qp);
2529
2530 /* Disable queue pair */
2531 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2532 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2533 cb0);
2534 if (hret != H_SUCCESS) {
2535 pr_err("query_ehea_qp failed (1)\n");
2536 goto out;
2537 }
2538
2539 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2540 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2541
2542 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2543 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2544 1), cb0, &dummy64,
2545 &dummy64, &dummy16, &dummy16);
2546 if (hret != H_SUCCESS) {
2547 pr_err("modify_ehea_qp failed (1)\n");
2548 goto out;
2549 }
2550
2551 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2552 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2553 cb0);
2554 if (hret != H_SUCCESS) {
2555 pr_err("query_ehea_qp failed (2)\n");
2556 goto out;
2557 }
2558
2559 /* deregister shared memory regions */
2560 dret = ehea_rem_smrs(pr);
2561 if (dret) {
2562 pr_err("unreg shared memory region failed\n");
2563 goto out;
2564 }
2565 }
2566
2567 ret = 0;
2568out:
2569 free_page((unsigned long)cb0);
2570
2571 return ret;
2572}
2573
2574static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2575{
2576 struct ehea_qp qp = *orig_qp;
2577 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2578 struct ehea_rwqe *rwqe;
2579 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2580 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2581 struct sk_buff *skb;
2582 u32 lkey = pr->recv_mr.lkey;
2583
2584
2585 int i;
2586 int index;
2587
2588 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2589 rwqe = ehea_get_next_rwqe(&qp, 2);
2590 rwqe->sg_list[0].l_key = lkey;
2591 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2592 skb = skba_rq2[index];
2593 if (skb)
2594 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2595 }
2596
2597 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2598 rwqe = ehea_get_next_rwqe(&qp, 3);
2599 rwqe->sg_list[0].l_key = lkey;
2600 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2601 skb = skba_rq3[index];
2602 if (skb)
2603 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2604 }
2605}
2606
2607static int ehea_restart_qps(struct net_device *dev)
2608{
2609 struct ehea_port *port = netdev_priv(dev);
2610 struct ehea_adapter *adapter = port->adapter;
2611 int ret = 0;
2612 int i;
2613
2614 struct hcp_modify_qp_cb0 *cb0;
2615 u64 hret;
2616 u64 dummy64 = 0;
2617 u16 dummy16 = 0;
2618
2619 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2620 if (!cb0) {
2621 ret = -ENOMEM;
2622 goto out;
2623 }
2624
2625 for (i = 0; i < (port->num_def_qps); i++) {
2626 struct ehea_port_res *pr = &port->port_res[i];
2627 struct ehea_qp *qp = pr->qp;
2628
2629 ret = ehea_gen_smrs(pr);
2630 if (ret) {
2631 netdev_err(dev, "creation of shared memory regions failed\n");
2632 goto out;
2633 }
2634
2635 ehea_update_rqs(qp, pr);
2636
2637 /* Enable queue pair */
2638 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2639 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2640 cb0);
2641 if (hret != H_SUCCESS) {
2642 netdev_err(dev, "query_ehea_qp failed (1)\n");
2643 goto out;
2644 }
2645
2646 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2647 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2648
2649 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2650 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2651 1), cb0, &dummy64,
2652 &dummy64, &dummy16, &dummy16);
2653 if (hret != H_SUCCESS) {
2654 netdev_err(dev, "modify_ehea_qp failed (1)\n");
2655 goto out;
2656 }
2657
2658 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2659 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2660 cb0);
2661 if (hret != H_SUCCESS) {
2662 netdev_err(dev, "query_ehea_qp failed (2)\n");
2663 goto out;
2664 }
2665
2666 /* refill entire queue */
2667 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2668 ehea_refill_rq2(pr, 0);
2669 ehea_refill_rq3(pr, 0);
2670 }
2671out:
2672 free_page((unsigned long)cb0);
2673
2674 return ret;
2675}
2676
2677static void ehea_reset_port(struct work_struct *work)
2678{
2679 int ret;
2680 struct ehea_port *port =
2681 container_of(work, struct ehea_port, reset_task);
2682 struct net_device *dev = port->netdev;
2683
2684 mutex_lock(&dlpar_mem_lock);
2685 port->resets++;
2686 mutex_lock(&port->port_lock);
2687 netif_tx_disable(dev);
2688
2689 port_napi_disable(port);
2690
2691 ehea_down(dev);
2692
2693 ret = ehea_up(dev);
2694 if (ret)
2695 goto out;
2696
2697 ehea_set_multicast_list(dev);
2698
2699 netif_info(port, timer, dev, "reset successful\n");
2700
2701 port_napi_enable(port);
2702
2703 netif_tx_wake_all_queues(dev);
2704out:
2705 mutex_unlock(&port->port_lock);
2706 mutex_unlock(&dlpar_mem_lock);
2707}
2708
2709static void ehea_rereg_mrs(void)
2710{
2711 int ret, i;
2712 struct ehea_adapter *adapter;
2713
2714 pr_info("LPAR memory changed - re-initializing driver\n");
2715
2716 list_for_each_entry(adapter, &adapter_list, list)
2717 if (adapter->active_ports) {
2718 /* Shutdown all ports */
2719 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2720 struct ehea_port *port = adapter->port[i];
2721 struct net_device *dev;
2722
2723 if (!port)
2724 continue;
2725
2726 dev = port->netdev;
2727
2728 if (dev->flags & IFF_UP) {
2729 mutex_lock(&port->port_lock);
2730 netif_tx_disable(dev);
2731 ehea_flush_sq(port);
2732 ret = ehea_stop_qps(dev);
2733 if (ret) {
2734 mutex_unlock(&port->port_lock);
2735 goto out;
2736 }
2737 port_napi_disable(port);
2738 mutex_unlock(&port->port_lock);
2739 }
2740 reset_sq_restart_flag(port);
2741 }
2742
2743 /* Unregister old memory region */
2744 ret = ehea_rem_mr(&adapter->mr);
2745 if (ret) {
2746 pr_err("unregister MR failed - driver inoperable!\n");
2747 goto out;
2748 }
2749 }
2750
2751 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2752
2753 list_for_each_entry(adapter, &adapter_list, list)
2754 if (adapter->active_ports) {
2755 /* Register new memory region */
2756 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2757 if (ret) {
2758 pr_err("register MR failed - driver inoperable!\n");
2759 goto out;
2760 }
2761
2762 /* Restart all ports */
2763 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2764 struct ehea_port *port = adapter->port[i];
2765
2766 if (port) {
2767 struct net_device *dev = port->netdev;
2768
2769 if (dev->flags & IFF_UP) {
2770 mutex_lock(&port->port_lock);
2771 ret = ehea_restart_qps(dev);
2772 if (!ret) {
2773 check_sqs(port);
2774 port_napi_enable(port);
2775 netif_tx_wake_all_queues(dev);
2776 } else {
2777 netdev_err(dev, "Unable to restart QPS\n");
2778 }
2779 mutex_unlock(&port->port_lock);
2780 }
2781 }
2782 }
2783 }
2784 pr_info("re-initializing driver complete\n");
2785out:
2786 return;
2787}
2788
2789static void ehea_tx_watchdog(struct net_device *dev, unsigned int txqueue)
2790{
2791 struct ehea_port *port = netdev_priv(dev);
2792
2793 if (netif_carrier_ok(dev) &&
2794 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2795 ehea_schedule_port_reset(port);
2796}
2797
2798static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2799{
2800 struct hcp_query_ehea *cb;
2801 u64 hret;
2802 int ret;
2803
2804 cb = (void *)get_zeroed_page(GFP_KERNEL);
2805 if (!cb) {
2806 ret = -ENOMEM;
2807 goto out;
2808 }
2809
2810 hret = ehea_h_query_ehea(adapter->handle, cb);
2811
2812 if (hret != H_SUCCESS) {
2813 ret = -EIO;
2814 goto out_herr;
2815 }
2816
2817 adapter->max_mc_mac = cb->max_mc_mac - 1;
2818 ret = 0;
2819
2820out_herr:
2821 free_page((unsigned long)cb);
2822out:
2823 return ret;
2824}
2825
2826static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2827{
2828 struct hcp_ehea_port_cb4 *cb4;
2829 u64 hret;
2830 int ret = 0;
2831
2832 *jumbo = 0;
2833
2834 /* (Try to) enable *jumbo frames */
2835 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
2836 if (!cb4) {
2837 pr_err("no mem for cb4\n");
2838 ret = -ENOMEM;
2839 goto out;
2840 } else {
2841 hret = ehea_h_query_ehea_port(port->adapter->handle,
2842 port->logical_port_id,
2843 H_PORT_CB4,
2844 H_PORT_CB4_JUMBO, cb4);
2845 if (hret == H_SUCCESS) {
2846 if (cb4->jumbo_frame)
2847 *jumbo = 1;
2848 else {
2849 cb4->jumbo_frame = 1;
2850 hret = ehea_h_modify_ehea_port(port->adapter->
2851 handle,
2852 port->
2853 logical_port_id,
2854 H_PORT_CB4,
2855 H_PORT_CB4_JUMBO,
2856 cb4);
2857 if (hret == H_SUCCESS)
2858 *jumbo = 1;
2859 }
2860 } else
2861 ret = -EINVAL;
2862
2863 free_page((unsigned long)cb4);
2864 }
2865out:
2866 return ret;
2867}
2868
2869static ssize_t ehea_show_port_id(struct device *dev,
2870 struct device_attribute *attr, char *buf)
2871{
2872 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2873 return sprintf(buf, "%d", port->logical_port_id);
2874}
2875
2876static DEVICE_ATTR(log_port_id, 0444, ehea_show_port_id, NULL);
2877
2878static void logical_port_release(struct device *dev)
2879{
2880 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2881 of_node_put(port->ofdev.dev.of_node);
2882}
2883
2884static struct device *ehea_register_port(struct ehea_port *port,
2885 struct device_node *dn)
2886{
2887 int ret;
2888
2889 port->ofdev.dev.of_node = of_node_get(dn);
2890 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
2891 port->ofdev.dev.bus = &ibmebus_bus_type;
2892
2893 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
2894 port->ofdev.dev.release = logical_port_release;
2895
2896 ret = of_device_register(&port->ofdev);
2897 if (ret) {
2898 pr_err("failed to register device. ret=%d\n", ret);
2899 goto out;
2900 }
2901
2902 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2903 if (ret) {
2904 pr_err("failed to register attributes, ret=%d\n", ret);
2905 goto out_unreg_of_dev;
2906 }
2907
2908 return &port->ofdev.dev;
2909
2910out_unreg_of_dev:
2911 of_device_unregister(&port->ofdev);
2912out:
2913 return NULL;
2914}
2915
2916static void ehea_unregister_port(struct ehea_port *port)
2917{
2918 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2919 of_device_unregister(&port->ofdev);
2920}
2921
2922static const struct net_device_ops ehea_netdev_ops = {
2923 .ndo_open = ehea_open,
2924 .ndo_stop = ehea_stop,
2925 .ndo_start_xmit = ehea_start_xmit,
2926 .ndo_get_stats64 = ehea_get_stats64,
2927 .ndo_set_mac_address = ehea_set_mac_addr,
2928 .ndo_validate_addr = eth_validate_addr,
2929 .ndo_set_rx_mode = ehea_set_multicast_list,
2930 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
2931 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
2932 .ndo_tx_timeout = ehea_tx_watchdog,
2933};
2934
2935static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2936 u32 logical_port_id,
2937 struct device_node *dn)
2938{
2939 int ret;
2940 struct net_device *dev;
2941 struct ehea_port *port;
2942 struct device *port_dev;
2943 int jumbo;
2944
2945 /* allocate memory for the port structures */
2946 dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
2947
2948 if (!dev) {
2949 ret = -ENOMEM;
2950 goto out_err;
2951 }
2952
2953 port = netdev_priv(dev);
2954
2955 mutex_init(&port->port_lock);
2956 port->state = EHEA_PORT_DOWN;
2957 port->sig_comp_iv = sq_entries / 10;
2958
2959 port->adapter = adapter;
2960 port->netdev = dev;
2961 port->logical_port_id = logical_port_id;
2962
2963 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2964
2965 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
2966 if (!port->mc_list) {
2967 ret = -ENOMEM;
2968 goto out_free_ethdev;
2969 }
2970
2971 INIT_LIST_HEAD(&port->mc_list->list);
2972
2973 ret = ehea_sense_port_attr(port);
2974 if (ret)
2975 goto out_free_mc_list;
2976
2977 netif_set_real_num_rx_queues(dev, port->num_def_qps);
2978 netif_set_real_num_tx_queues(dev, port->num_def_qps);
2979
2980 port_dev = ehea_register_port(port, dn);
2981 if (!port_dev)
2982 goto out_free_mc_list;
2983
2984 SET_NETDEV_DEV(dev, port_dev);
2985
2986 /* initialize net_device structure */
2987 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
2988
2989 dev->netdev_ops = &ehea_netdev_ops;
2990 ehea_set_ethtool_ops(dev);
2991
2992 dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
2993 NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
2994 dev->features = NETIF_F_SG | NETIF_F_TSO |
2995 NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
2996 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2997 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
2998 dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
2999 NETIF_F_IP_CSUM;
3000 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3001
3002 /* MTU range: 68 - 9022 */
3003 dev->min_mtu = ETH_MIN_MTU;
3004 dev->max_mtu = EHEA_MAX_PACKET_SIZE;
3005
3006 INIT_WORK(&port->reset_task, ehea_reset_port);
3007 INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
3008
3009 init_waitqueue_head(&port->swqe_avail_wq);
3010 init_waitqueue_head(&port->restart_wq);
3011
3012 ret = register_netdev(dev);
3013 if (ret) {
3014 pr_err("register_netdev failed. ret=%d\n", ret);
3015 goto out_unreg_port;
3016 }
3017
3018 ret = ehea_get_jumboframe_status(port, &jumbo);
3019 if (ret)
3020 netdev_err(dev, "failed determining jumbo frame status\n");
3021
3022 netdev_info(dev, "Jumbo frames are %sabled\n",
3023 jumbo == 1 ? "en" : "dis");
3024
3025 adapter->active_ports++;
3026
3027 return port;
3028
3029out_unreg_port:
3030 ehea_unregister_port(port);
3031
3032out_free_mc_list:
3033 kfree(port->mc_list);
3034
3035out_free_ethdev:
3036 free_netdev(dev);
3037
3038out_err:
3039 pr_err("setting up logical port with id=%d failed, ret=%d\n",
3040 logical_port_id, ret);
3041 return NULL;
3042}
3043
3044static void ehea_shutdown_single_port(struct ehea_port *port)
3045{
3046 struct ehea_adapter *adapter = port->adapter;
3047
3048 cancel_work_sync(&port->reset_task);
3049 cancel_delayed_work_sync(&port->stats_work);
3050 unregister_netdev(port->netdev);
3051 ehea_unregister_port(port);
3052 kfree(port->mc_list);
3053 free_netdev(port->netdev);
3054 adapter->active_ports--;
3055}
3056
3057static int ehea_setup_ports(struct ehea_adapter *adapter)
3058{
3059 struct device_node *lhea_dn;
3060 struct device_node *eth_dn = NULL;
3061
3062 const u32 *dn_log_port_id;
3063 int i = 0;
3064
3065 lhea_dn = adapter->ofdev->dev.of_node;
3066 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3067
3068 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3069 NULL);
3070 if (!dn_log_port_id) {
3071 pr_err("bad device node: eth_dn name=%pOF\n", eth_dn);
3072 continue;
3073 }
3074
3075 if (ehea_add_adapter_mr(adapter)) {
3076 pr_err("creating MR failed\n");
3077 of_node_put(eth_dn);
3078 return -EIO;
3079 }
3080
3081 adapter->port[i] = ehea_setup_single_port(adapter,
3082 *dn_log_port_id,
3083 eth_dn);
3084 if (adapter->port[i])
3085 netdev_info(adapter->port[i]->netdev,
3086 "logical port id #%d\n", *dn_log_port_id);
3087 else
3088 ehea_remove_adapter_mr(adapter);
3089
3090 i++;
3091 }
3092 return 0;
3093}
3094
3095static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3096 u32 logical_port_id)
3097{
3098 struct device_node *lhea_dn;
3099 struct device_node *eth_dn = NULL;
3100 const u32 *dn_log_port_id;
3101
3102 lhea_dn = adapter->ofdev->dev.of_node;
3103 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3104
3105 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3106 NULL);
3107 if (dn_log_port_id)
3108 if (*dn_log_port_id == logical_port_id)
3109 return eth_dn;
3110 }
3111
3112 return NULL;
3113}
3114
3115static ssize_t ehea_probe_port(struct device *dev,
3116 struct device_attribute *attr,
3117 const char *buf, size_t count)
3118{
3119 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3120 struct ehea_port *port;
3121 struct device_node *eth_dn = NULL;
3122 int i;
3123
3124 u32 logical_port_id;
3125
3126 sscanf(buf, "%d", &logical_port_id);
3127
3128 port = ehea_get_port(adapter, logical_port_id);
3129
3130 if (port) {
3131 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3132 logical_port_id);
3133 return -EINVAL;
3134 }
3135
3136 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3137
3138 if (!eth_dn) {
3139 pr_info("no logical port with id %d found\n", logical_port_id);
3140 return -EINVAL;
3141 }
3142
3143 if (ehea_add_adapter_mr(adapter)) {
3144 pr_err("creating MR failed\n");
3145 of_node_put(eth_dn);
3146 return -EIO;
3147 }
3148
3149 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3150
3151 of_node_put(eth_dn);
3152
3153 if (port) {
3154 for (i = 0; i < EHEA_MAX_PORTS; i++)
3155 if (!adapter->port[i]) {
3156 adapter->port[i] = port;
3157 break;
3158 }
3159
3160 netdev_info(port->netdev, "added: (logical port id=%d)\n",
3161 logical_port_id);
3162 } else {
3163 ehea_remove_adapter_mr(adapter);
3164 return -EIO;
3165 }
3166
3167 return (ssize_t) count;
3168}
3169
3170static ssize_t ehea_remove_port(struct device *dev,
3171 struct device_attribute *attr,
3172 const char *buf, size_t count)
3173{
3174 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3175 struct ehea_port *port;
3176 int i;
3177 u32 logical_port_id;
3178
3179 sscanf(buf, "%d", &logical_port_id);
3180
3181 port = ehea_get_port(adapter, logical_port_id);
3182
3183 if (port) {
3184 netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3185 logical_port_id);
3186
3187 ehea_shutdown_single_port(port);
3188
3189 for (i = 0; i < EHEA_MAX_PORTS; i++)
3190 if (adapter->port[i] == port) {
3191 adapter->port[i] = NULL;
3192 break;
3193 }
3194 } else {
3195 pr_err("removing port with logical port id=%d failed. port not configured.\n",
3196 logical_port_id);
3197 return -EINVAL;
3198 }
3199
3200 ehea_remove_adapter_mr(adapter);
3201
3202 return (ssize_t) count;
3203}
3204
3205static DEVICE_ATTR(probe_port, 0200, NULL, ehea_probe_port);
3206static DEVICE_ATTR(remove_port, 0200, NULL, ehea_remove_port);
3207
3208static int ehea_create_device_sysfs(struct platform_device *dev)
3209{
3210 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3211 if (ret)
3212 goto out;
3213
3214 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3215out:
3216 return ret;
3217}
3218
3219static void ehea_remove_device_sysfs(struct platform_device *dev)
3220{
3221 device_remove_file(&dev->dev, &dev_attr_probe_port);
3222 device_remove_file(&dev->dev, &dev_attr_remove_port);
3223}
3224
3225static int ehea_reboot_notifier(struct notifier_block *nb,
3226 unsigned long action, void *unused)
3227{
3228 if (action == SYS_RESTART) {
3229 pr_info("Reboot: freeing all eHEA resources\n");
3230 ibmebus_unregister_driver(&ehea_driver);
3231 }
3232 return NOTIFY_DONE;
3233}
3234
3235static struct notifier_block ehea_reboot_nb = {
3236 .notifier_call = ehea_reboot_notifier,
3237};
3238
3239static int ehea_mem_notifier(struct notifier_block *nb,
3240 unsigned long action, void *data)
3241{
3242 int ret = NOTIFY_BAD;
3243 struct memory_notify *arg = data;
3244
3245 mutex_lock(&dlpar_mem_lock);
3246
3247 switch (action) {
3248 case MEM_CANCEL_OFFLINE:
3249 pr_info("memory offlining canceled");
3250 fallthrough; /* re-add canceled memory block */
3251
3252 case MEM_ONLINE:
3253 pr_info("memory is going online");
3254 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3255 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3256 goto out_unlock;
3257 ehea_rereg_mrs();
3258 break;
3259
3260 case MEM_GOING_OFFLINE:
3261 pr_info("memory is going offline");
3262 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3263 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3264 goto out_unlock;
3265 ehea_rereg_mrs();
3266 break;
3267
3268 default:
3269 break;
3270 }
3271
3272 ehea_update_firmware_handles();
3273 ret = NOTIFY_OK;
3274
3275out_unlock:
3276 mutex_unlock(&dlpar_mem_lock);
3277 return ret;
3278}
3279
3280static struct notifier_block ehea_mem_nb = {
3281 .notifier_call = ehea_mem_notifier,
3282};
3283
3284static void ehea_crash_handler(void)
3285{
3286 int i;
3287
3288 if (ehea_fw_handles.arr)
3289 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3290 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3291 ehea_fw_handles.arr[i].fwh,
3292 FORCE_FREE);
3293
3294 if (ehea_bcmc_regs.arr)
3295 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3296 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3297 ehea_bcmc_regs.arr[i].port_id,
3298 ehea_bcmc_regs.arr[i].reg_type,
3299 ehea_bcmc_regs.arr[i].macaddr,
3300 0, H_DEREG_BCMC);
3301}
3302
3303static atomic_t ehea_memory_hooks_registered;
3304
3305/* Register memory hooks on probe of first adapter */
3306static int ehea_register_memory_hooks(void)
3307{
3308 int ret = 0;
3309
3310 if (atomic_inc_return(&ehea_memory_hooks_registered) > 1)
3311 return 0;
3312
3313 ret = ehea_create_busmap();
3314 if (ret) {
3315 pr_info("ehea_create_busmap failed\n");
3316 goto out;
3317 }
3318
3319 ret = register_reboot_notifier(&ehea_reboot_nb);
3320 if (ret) {
3321 pr_info("register_reboot_notifier failed\n");
3322 goto out;
3323 }
3324
3325 ret = register_memory_notifier(&ehea_mem_nb);
3326 if (ret) {
3327 pr_info("register_memory_notifier failed\n");
3328 goto out2;
3329 }
3330
3331 ret = crash_shutdown_register(ehea_crash_handler);
3332 if (ret) {
3333 pr_info("crash_shutdown_register failed\n");
3334 goto out3;
3335 }
3336
3337 return 0;
3338
3339out3:
3340 unregister_memory_notifier(&ehea_mem_nb);
3341out2:
3342 unregister_reboot_notifier(&ehea_reboot_nb);
3343out:
3344 atomic_dec(&ehea_memory_hooks_registered);
3345 return ret;
3346}
3347
3348static void ehea_unregister_memory_hooks(void)
3349{
3350 /* Only remove the hooks if we've registered them */
3351 if (atomic_read(&ehea_memory_hooks_registered) == 0)
3352 return;
3353
3354 unregister_reboot_notifier(&ehea_reboot_nb);
3355 if (crash_shutdown_unregister(ehea_crash_handler))
3356 pr_info("failed unregistering crash handler\n");
3357 unregister_memory_notifier(&ehea_mem_nb);
3358}
3359
3360static int ehea_probe_adapter(struct platform_device *dev)
3361{
3362 struct ehea_adapter *adapter;
3363 const u64 *adapter_handle;
3364 int ret;
3365 int i;
3366
3367 ret = ehea_register_memory_hooks();
3368 if (ret)
3369 return ret;
3370
3371 if (!dev || !dev->dev.of_node) {
3372 pr_err("Invalid ibmebus device probed\n");
3373 return -EINVAL;
3374 }
3375
3376 adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
3377 if (!adapter) {
3378 ret = -ENOMEM;
3379 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3380 goto out;
3381 }
3382
3383 list_add(&adapter->list, &adapter_list);
3384
3385 adapter->ofdev = dev;
3386
3387 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3388 NULL);
3389 if (adapter_handle)
3390 adapter->handle = *adapter_handle;
3391
3392 if (!adapter->handle) {
3393 dev_err(&dev->dev, "failed getting handle for adapter"
3394 " '%pOF'\n", dev->dev.of_node);
3395 ret = -ENODEV;
3396 goto out_free_ad;
3397 }
3398
3399 adapter->pd = EHEA_PD_ID;
3400
3401 platform_set_drvdata(dev, adapter);
3402
3403
3404 /* initialize adapter and ports */
3405 /* get adapter properties */
3406 ret = ehea_sense_adapter_attr(adapter);
3407 if (ret) {
3408 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3409 goto out_free_ad;
3410 }
3411
3412 adapter->neq = ehea_create_eq(adapter,
3413 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3414 if (!adapter->neq) {
3415 ret = -EIO;
3416 dev_err(&dev->dev, "NEQ creation failed\n");
3417 goto out_free_ad;
3418 }
3419
3420 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3421 (unsigned long)adapter);
3422
3423 ret = ehea_create_device_sysfs(dev);
3424 if (ret)
3425 goto out_kill_eq;
3426
3427 ret = ehea_setup_ports(adapter);
3428 if (ret) {
3429 dev_err(&dev->dev, "setup_ports failed\n");
3430 goto out_rem_dev_sysfs;
3431 }
3432
3433 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3434 ehea_interrupt_neq, 0,
3435 "ehea_neq", adapter);
3436 if (ret) {
3437 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3438 goto out_shutdown_ports;
3439 }
3440
3441 /* Handle any events that might be pending. */
3442 tasklet_hi_schedule(&adapter->neq_tasklet);
3443
3444 ret = 0;
3445 goto out;
3446
3447out_shutdown_ports:
3448 for (i = 0; i < EHEA_MAX_PORTS; i++)
3449 if (adapter->port[i]) {
3450 ehea_shutdown_single_port(adapter->port[i]);
3451 adapter->port[i] = NULL;
3452 }
3453
3454out_rem_dev_sysfs:
3455 ehea_remove_device_sysfs(dev);
3456
3457out_kill_eq:
3458 ehea_destroy_eq(adapter->neq);
3459
3460out_free_ad:
3461 list_del(&adapter->list);
3462
3463out:
3464 ehea_update_firmware_handles();
3465
3466 return ret;
3467}
3468
3469static int ehea_remove(struct platform_device *dev)
3470{
3471 struct ehea_adapter *adapter = platform_get_drvdata(dev);
3472 int i;
3473
3474 for (i = 0; i < EHEA_MAX_PORTS; i++)
3475 if (adapter->port[i]) {
3476 ehea_shutdown_single_port(adapter->port[i]);
3477 adapter->port[i] = NULL;
3478 }
3479
3480 ehea_remove_device_sysfs(dev);
3481
3482 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3483 tasklet_kill(&adapter->neq_tasklet);
3484
3485 ehea_destroy_eq(adapter->neq);
3486 ehea_remove_adapter_mr(adapter);
3487 list_del(&adapter->list);
3488
3489 ehea_update_firmware_handles();
3490
3491 return 0;
3492}
3493
3494static int check_module_parm(void)
3495{
3496 int ret = 0;
3497
3498 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3499 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3500 pr_info("Bad parameter: rq1_entries\n");
3501 ret = -EINVAL;
3502 }
3503 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3504 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3505 pr_info("Bad parameter: rq2_entries\n");
3506 ret = -EINVAL;
3507 }
3508 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3509 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3510 pr_info("Bad parameter: rq3_entries\n");
3511 ret = -EINVAL;
3512 }
3513 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3514 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3515 pr_info("Bad parameter: sq_entries\n");
3516 ret = -EINVAL;
3517 }
3518
3519 return ret;
3520}
3521
3522static ssize_t capabilities_show(struct device_driver *drv, char *buf)
3523{
3524 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3525}
3526
3527static DRIVER_ATTR_RO(capabilities);
3528
3529static int __init ehea_module_init(void)
3530{
3531 int ret;
3532
3533 pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3534
3535 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3536 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3537
3538 mutex_init(&ehea_fw_handles.lock);
3539 spin_lock_init(&ehea_bcmc_regs.lock);
3540
3541 ret = check_module_parm();
3542 if (ret)
3543 goto out;
3544
3545 ret = ibmebus_register_driver(&ehea_driver);
3546 if (ret) {
3547 pr_err("failed registering eHEA device driver on ebus\n");
3548 goto out;
3549 }
3550
3551 ret = driver_create_file(&ehea_driver.driver,
3552 &driver_attr_capabilities);
3553 if (ret) {
3554 pr_err("failed to register capabilities attribute, ret=%d\n",
3555 ret);
3556 goto out2;
3557 }
3558
3559 return ret;
3560
3561out2:
3562 ibmebus_unregister_driver(&ehea_driver);
3563out:
3564 return ret;
3565}
3566
3567static void __exit ehea_module_exit(void)
3568{
3569 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3570 ibmebus_unregister_driver(&ehea_driver);
3571 ehea_unregister_memory_hooks();
3572 kfree(ehea_fw_handles.arr);
3573 kfree(ehea_bcmc_regs.arr);
3574 ehea_destroy_busmap();
3575}
3576
3577module_init(ehea_module_init);
3578module_exit(ehea_module_exit);