Linux Audio

Check our new training course

Loading...
v6.13.7
  1/**********************************************************************
  2 * Author: Cavium, Inc.
  3 *
  4 * Contact: support@cavium.com
  5 *          Please include "LiquidIO" in the subject.
  6 *
  7 * Copyright (c) 2003-2017 Cavium, Inc.
  8 *
  9 * This file is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License, Version 2, as
 11 * published by the Free Software Foundation.
 12 *
 13 * This file is distributed in the hope that it will be useful, but
 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
 16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
 17 ***********************************************************************/
 18#include <linux/pci.h>
 19#include <linux/if_vlan.h>
 20#include "liquidio_common.h"
 21#include "octeon_droq.h"
 22#include "octeon_iq.h"
 23#include "response_manager.h"
 24#include "octeon_device.h"
 25#include "octeon_nic.h"
 26#include "octeon_main.h"
 27#include "octeon_network.h"
 28#include "lio_vf_rep.h"
 29
 30static int lio_vf_rep_open(struct net_device *ndev);
 31static int lio_vf_rep_stop(struct net_device *ndev);
 32static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb,
 33				       struct net_device *ndev);
 34static void lio_vf_rep_tx_timeout(struct net_device *netdev, unsigned int txqueue);
 35static int lio_vf_rep_phys_port_name(struct net_device *dev,
 36				     char *buf, size_t len);
 37static void lio_vf_rep_get_stats64(struct net_device *dev,
 38				   struct rtnl_link_stats64 *stats64);
 39static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu);
 40static int lio_vf_get_port_parent_id(struct net_device *dev,
 41				     struct netdev_phys_item_id *ppid);
 42
 43static const struct net_device_ops lio_vf_rep_ndev_ops = {
 44	.ndo_open = lio_vf_rep_open,
 45	.ndo_stop = lio_vf_rep_stop,
 46	.ndo_start_xmit = lio_vf_rep_pkt_xmit,
 47	.ndo_tx_timeout = lio_vf_rep_tx_timeout,
 48	.ndo_get_phys_port_name = lio_vf_rep_phys_port_name,
 49	.ndo_get_stats64 = lio_vf_rep_get_stats64,
 50	.ndo_change_mtu = lio_vf_rep_change_mtu,
 51	.ndo_get_port_parent_id = lio_vf_get_port_parent_id,
 52};
 53
 54static int
 55lio_vf_rep_send_soft_command(struct octeon_device *oct,
 56			     void *req, int req_size,
 57			     void *resp, int resp_size)
 58{
 59	int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size;
 60	struct octeon_soft_command *sc = NULL;
 61	struct lio_vf_rep_resp *rep_resp;
 62	void *sc_req;
 63	int err;
 64
 65	sc = (struct octeon_soft_command *)
 66		octeon_alloc_soft_command(oct, req_size,
 67					  tot_resp_size, 0);
 68	if (!sc)
 69		return -ENOMEM;
 70
 71	init_completion(&sc->complete);
 72	sc->sc_status = OCTEON_REQUEST_PENDING;
 73
 74	sc_req = (struct lio_vf_rep_req *)sc->virtdptr;
 75	memcpy(sc_req, req, req_size);
 76
 77	rep_resp = (struct lio_vf_rep_resp *)sc->virtrptr;
 78	memset(rep_resp, 0, tot_resp_size);
 79	WRITE_ONCE(rep_resp->status, 1);
 80
 81	sc->iq_no = 0;
 82	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
 83				    OPCODE_NIC_VF_REP_CMD, 0, 0, 0);
 84
 85	err = octeon_send_soft_command(oct, sc);
 86	if (err == IQ_SEND_FAILED)
 87		goto free_buff;
 88
 89	err = wait_for_sc_completion_timeout(oct, sc, 0);
 90	if (err)
 91		return err;
 92
 93	err = READ_ONCE(rep_resp->status) ? -EBUSY : 0;
 94	if (err)
 95		dev_err(&oct->pci_dev->dev, "VF rep send config failed\n");
 96	else if (resp)
 97		memcpy(resp, (rep_resp + 1), resp_size);
 98
 99	WRITE_ONCE(sc->caller_is_done, true);
100	return err;
101
102free_buff:
103	octeon_free_soft_command(oct, sc);
104
105	return err;
106}
107
108static int
109lio_vf_rep_open(struct net_device *ndev)
110{
111	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
112	struct lio_vf_rep_req rep_cfg;
113	struct octeon_device *oct;
114	int ret;
115
116	oct = vf_rep->oct;
117
118	memset(&rep_cfg, 0, sizeof(rep_cfg));
119	rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
120	rep_cfg.ifidx = vf_rep->ifidx;
121	rep_cfg.rep_state.state = LIO_VF_REP_STATE_UP;
122
123	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
124					   sizeof(rep_cfg), NULL, 0);
125
126	if (ret) {
127		dev_err(&oct->pci_dev->dev,
128			"VF_REP open failed with err %d\n", ret);
129		return -EIO;
130	}
131
132	atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) |
133				      LIO_IFSTATE_RUNNING));
134
135	netif_carrier_on(ndev);
136	netif_start_queue(ndev);
137
138	return 0;
139}
140
141static int
142lio_vf_rep_stop(struct net_device *ndev)
143{
144	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
145	struct lio_vf_rep_req rep_cfg;
146	struct octeon_device *oct;
147	int ret;
148
149	oct = vf_rep->oct;
150
151	memset(&rep_cfg, 0, sizeof(rep_cfg));
152	rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
153	rep_cfg.ifidx = vf_rep->ifidx;
154	rep_cfg.rep_state.state = LIO_VF_REP_STATE_DOWN;
155
156	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
157					   sizeof(rep_cfg), NULL, 0);
158
159	if (ret) {
160		dev_err(&oct->pci_dev->dev,
161			"VF_REP dev stop failed with err %d\n", ret);
162		return -EIO;
163	}
164
165	atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) &
166				      ~LIO_IFSTATE_RUNNING));
167
168	netif_tx_disable(ndev);
169	netif_carrier_off(ndev);
170
171	return 0;
172}
173
174static void
175lio_vf_rep_tx_timeout(struct net_device *ndev, unsigned int txqueue)
176{
177	netif_trans_update(ndev);
178
179	netif_wake_queue(ndev);
180}
181
182static void
183lio_vf_rep_get_stats64(struct net_device *dev,
184		       struct rtnl_link_stats64 *stats64)
185{
186	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
187
188	/* Swap tx and rx stats as VF rep is a switch port */
189	stats64->tx_packets = vf_rep->stats.rx_packets;
190	stats64->tx_bytes   = vf_rep->stats.rx_bytes;
191	stats64->tx_dropped = vf_rep->stats.rx_dropped;
192
193	stats64->rx_packets = vf_rep->stats.tx_packets;
194	stats64->rx_bytes   = vf_rep->stats.tx_bytes;
195	stats64->rx_dropped = vf_rep->stats.tx_dropped;
196}
197
198static int
199lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
200{
201	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
202	struct lio_vf_rep_req rep_cfg;
203	struct octeon_device *oct;
204	int ret;
205
206	oct = vf_rep->oct;
207
208	memset(&rep_cfg, 0, sizeof(rep_cfg));
209	rep_cfg.req_type = LIO_VF_REP_REQ_MTU;
210	rep_cfg.ifidx = vf_rep->ifidx;
211	rep_cfg.rep_mtu.mtu = cpu_to_be32(new_mtu);
212
213	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
214					   sizeof(rep_cfg), NULL, 0);
215	if (ret) {
216		dev_err(&oct->pci_dev->dev,
217			"Change MTU failed with err %d\n", ret);
218		return -EIO;
219	}
220
221	WRITE_ONCE(ndev->mtu, new_mtu);
222
223	return 0;
224}
225
226static int
227lio_vf_rep_phys_port_name(struct net_device *dev,
228			  char *buf, size_t len)
229{
230	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
231	struct octeon_device *oct = vf_rep->oct;
232	int ret;
233
234	ret = snprintf(buf, len, "pf%dvf%d", oct->pf_num,
235		       vf_rep->ifidx - oct->pf_num * 64 - 1);
236	if (ret >= len)
237		return -EOPNOTSUPP;
238
239	return 0;
240}
241
242static struct net_device *
243lio_vf_rep_get_ndev(struct octeon_device *oct, int ifidx)
244{
245	int vf_id, max_vfs = CN23XX_MAX_VFS_PER_PF + 1;
246	int vfid_mask = max_vfs - 1;
247
248	if (ifidx <= oct->pf_num * max_vfs ||
249	    ifidx >= oct->pf_num * max_vfs + max_vfs)
250		return NULL;
251
252	/* ifidx 1-63 for PF0 VFs
253	 * ifidx 65-127 for PF1 VFs
254	 */
255	vf_id = (ifidx & vfid_mask) - 1;
256
257	return oct->vf_rep_list.ndev[vf_id];
258}
259
260static void
261lio_vf_rep_copy_packet(struct octeon_device *oct,
262		       struct sk_buff *skb,
263		       int len)
264{
265	if (likely(len > MIN_SKB_SIZE)) {
266		struct octeon_skb_page_info *pg_info;
267		unsigned char *va;
268
269		pg_info = ((struct octeon_skb_page_info *)(skb->cb));
270		if (pg_info->page) {
271			va = page_address(pg_info->page) +
272				pg_info->page_offset;
273			memcpy(skb->data, va, MIN_SKB_SIZE);
274			skb_put(skb, MIN_SKB_SIZE);
275			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
276					pg_info->page,
277					pg_info->page_offset + MIN_SKB_SIZE,
278					len - MIN_SKB_SIZE,
279					LIO_RXBUFFER_SZ);
280		}
 
 
 
 
 
 
281	} else {
282		struct octeon_skb_page_info *pg_info =
283			((struct octeon_skb_page_info *)(skb->cb));
284
285		skb_copy_to_linear_data(skb, page_address(pg_info->page) +
286					pg_info->page_offset, len);
287		skb_put(skb, len);
288		put_page(pg_info->page);
289	}
290}
291
292static int
293lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, void *buf)
294{
295	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
296	struct lio_vf_rep_desc *vf_rep;
297	struct net_device *vf_ndev;
298	struct octeon_device *oct;
299	union octeon_rh *rh;
300	struct sk_buff *skb;
301	int i, ifidx;
302
303	oct = lio_get_device(recv_pkt->octeon_id);
304	if (!oct)
305		goto free_buffers;
306
307	skb = recv_pkt->buffer_ptr[0];
308	rh = &recv_pkt->rh;
309	ifidx = rh->r.ossp;
310
311	vf_ndev = lio_vf_rep_get_ndev(oct, ifidx);
312	if (!vf_ndev)
313		goto free_buffers;
314
315	vf_rep = netdev_priv(vf_ndev);
316	if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
317	    recv_pkt->buffer_count > 1)
318		goto free_buffers;
319
320	skb->dev = vf_ndev;
321
322	/* Multiple buffers are not used for vf_rep packets.
323	 * So just buffer_size[0] is valid.
324	 */
325	lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]);
326
327	skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
328	skb->protocol = eth_type_trans(skb, skb->dev);
329	skb->ip_summed = CHECKSUM_NONE;
330
331	netif_rx(skb);
332
333	octeon_free_recv_info(recv_info);
334
335	return 0;
336
337free_buffers:
338	for (i = 0; i < recv_pkt->buffer_count; i++)
339		recv_buffer_free(recv_pkt->buffer_ptr[i]);
340
341	octeon_free_recv_info(recv_info);
342
343	return 0;
344}
345
346static void
347lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
348				u32 status, void *buf)
349{
350	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
351	struct sk_buff *skb = sc->ctxptr;
352	struct net_device *ndev = skb->dev;
353	u32 iq_no;
354
355	dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
356			 sc->datasize, DMA_TO_DEVICE);
357	dev_kfree_skb_any(skb);
358	iq_no = sc->iq_no;
359	octeon_free_soft_command(oct, sc);
360
361	if (octnet_iq_is_full(oct, iq_no))
362		return;
363
364	if (netif_queue_stopped(ndev))
365		netif_wake_queue(ndev);
366}
367
368static netdev_tx_t
369lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
370{
371	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
372	struct net_device *parent_ndev = vf_rep->parent_ndev;
373	struct octeon_device *oct = vf_rep->oct;
374	struct octeon_instr_pki_ih3 *pki_ih3;
375	struct octeon_soft_command *sc;
376	struct lio *parent_lio;
377	int status;
378
379	parent_lio = GET_LIO(parent_ndev);
380
381	if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
382	    skb->len <= 0)
383		goto xmit_failed;
384
385	if (octnet_iq_is_full(vf_rep->oct, parent_lio->txq)) {
386		dev_err(&oct->pci_dev->dev, "VF rep: Device IQ full\n");
387		netif_stop_queue(ndev);
388		return NETDEV_TX_BUSY;
389	}
390
391	sc = (struct octeon_soft_command *)
392		octeon_alloc_soft_command(oct, 0, 16, 0);
393	if (!sc) {
394		dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n");
395		goto xmit_failed;
396	}
397
398	/* Multiple buffers are not used for vf_rep packets. */
399	if (skb_shinfo(skb)->nr_frags != 0) {
400		dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n");
401		octeon_free_soft_command(oct, sc);
402		goto xmit_failed;
403	}
404
405	sc->dmadptr = dma_map_single(&oct->pci_dev->dev,
406				     skb->data, skb->len, DMA_TO_DEVICE);
407	if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) {
408		dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n");
409		octeon_free_soft_command(oct, sc);
410		goto xmit_failed;
411	}
412
413	sc->virtdptr = skb->data;
414	sc->datasize = skb->len;
415	sc->ctxptr = skb;
416	sc->iq_no = parent_lio->txq;
417
418	octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_PKT,
419				    vf_rep->ifidx, 0, 0);
420	pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
421	pki_ih3->tagtype = ORDERED_TAG;
422
423	sc->callback = lio_vf_rep_packet_sent_callback;
424	sc->callback_arg = sc;
425
426	status = octeon_send_soft_command(oct, sc);
427	if (status == IQ_SEND_FAILED) {
428		dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
429				 sc->datasize, DMA_TO_DEVICE);
430		octeon_free_soft_command(oct, sc);
431		goto xmit_failed;
432	}
433
434	if (status == IQ_SEND_STOP)
435		netif_stop_queue(ndev);
436
437	netif_trans_update(ndev);
438
439	return NETDEV_TX_OK;
440
441xmit_failed:
442	dev_kfree_skb_any(skb);
443
444	return NETDEV_TX_OK;
445}
446
447static int lio_vf_get_port_parent_id(struct net_device *dev,
448				     struct netdev_phys_item_id *ppid)
449{
450	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
451	struct net_device *parent_ndev = vf_rep->parent_ndev;
452	struct lio *lio = GET_LIO(parent_ndev);
453
454	ppid->id_len = ETH_ALEN;
455	ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
456
457	return 0;
458}
459
460static void
461lio_vf_rep_fetch_stats(struct work_struct *work)
462{
463	struct cavium_wk *wk = (struct cavium_wk *)work;
464	struct lio_vf_rep_desc *vf_rep = wk->ctxptr;
465	struct lio_vf_rep_stats stats;
466	struct lio_vf_rep_req rep_cfg;
467	struct octeon_device *oct;
468	int ret;
469
470	oct = vf_rep->oct;
471
472	memset(&rep_cfg, 0, sizeof(rep_cfg));
473	rep_cfg.req_type = LIO_VF_REP_REQ_STATS;
474	rep_cfg.ifidx = vf_rep->ifidx;
475
476	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg),
477					   &stats, sizeof(stats));
478
479	if (!ret) {
480		octeon_swap_8B_data((u64 *)&stats, (sizeof(stats) >> 3));
481		memcpy(&vf_rep->stats, &stats, sizeof(stats));
482	}
483
484	schedule_delayed_work(&vf_rep->stats_wk.work,
485			      msecs_to_jiffies(LIO_VF_REP_STATS_POLL_TIME_MS));
486}
487
488int
489lio_vf_rep_create(struct octeon_device *oct)
490{
491	struct lio_vf_rep_desc *vf_rep;
492	struct net_device *ndev;
493	int i, num_vfs;
494
495	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
496		return 0;
497
498	if (!oct->sriov_info.sriov_enabled)
499		return 0;
500
501	num_vfs = oct->sriov_info.num_vfs_alloced;
502
503	oct->vf_rep_list.num_vfs = 0;
504	for (i = 0; i < num_vfs; i++) {
505		ndev = alloc_etherdev(sizeof(struct lio_vf_rep_desc));
506
507		if (!ndev) {
508			dev_err(&oct->pci_dev->dev,
509				"VF rep device %d creation failed\n", i);
510			goto cleanup;
511		}
512
513		ndev->min_mtu = LIO_MIN_MTU_SIZE;
514		ndev->max_mtu = LIO_MAX_MTU_SIZE;
515		ndev->netdev_ops = &lio_vf_rep_ndev_ops;
516
517		vf_rep = netdev_priv(ndev);
518		memset(vf_rep, 0, sizeof(*vf_rep));
519
520		vf_rep->ndev = ndev;
521		vf_rep->oct = oct;
522		vf_rep->parent_ndev = oct->props[0].netdev;
523		vf_rep->ifidx = (oct->pf_num * 64) + i + 1;
524
525		eth_hw_addr_random(ndev);
526
527		if (register_netdev(ndev)) {
528			dev_err(&oct->pci_dev->dev, "VF rep nerdev registration failed\n");
529
530			free_netdev(ndev);
531			goto cleanup;
532		}
533
534		netif_carrier_off(ndev);
535
536		INIT_DELAYED_WORK(&vf_rep->stats_wk.work,
537				  lio_vf_rep_fetch_stats);
538		vf_rep->stats_wk.ctxptr = (void *)vf_rep;
539		schedule_delayed_work(&vf_rep->stats_wk.work,
540				      msecs_to_jiffies
541				      (LIO_VF_REP_STATS_POLL_TIME_MS));
542		oct->vf_rep_list.num_vfs++;
543		oct->vf_rep_list.ndev[i] = ndev;
544	}
545
546	if (octeon_register_dispatch_fn(oct, OPCODE_NIC,
547					OPCODE_NIC_VF_REP_PKT,
548					lio_vf_rep_pkt_recv, oct)) {
549		dev_err(&oct->pci_dev->dev, "VF rep Dispatch func registration failed\n");
550
551		goto cleanup;
552	}
553
554	return 0;
555
556cleanup:
557	for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
558		ndev = oct->vf_rep_list.ndev[i];
559		oct->vf_rep_list.ndev[i] = NULL;
560		if (ndev) {
561			vf_rep = netdev_priv(ndev);
562			cancel_delayed_work_sync
563				(&vf_rep->stats_wk.work);
564			unregister_netdev(ndev);
565			free_netdev(ndev);
566		}
567	}
568
569	oct->vf_rep_list.num_vfs = 0;
570
571	return -1;
572}
573
574void
575lio_vf_rep_destroy(struct octeon_device *oct)
576{
577	struct lio_vf_rep_desc *vf_rep;
578	struct net_device *ndev;
579	int i;
580
581	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
582		return;
583
584	if (!oct->sriov_info.sriov_enabled)
585		return;
586
587	for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
588		ndev = oct->vf_rep_list.ndev[i];
589		oct->vf_rep_list.ndev[i] = NULL;
590		if (ndev) {
591			vf_rep = netdev_priv(ndev);
592			cancel_delayed_work_sync
593				(&vf_rep->stats_wk.work);
594			netif_tx_disable(ndev);
595			netif_carrier_off(ndev);
596
597			unregister_netdev(ndev);
598			free_netdev(ndev);
599		}
600	}
601
602	oct->vf_rep_list.num_vfs = 0;
603}
604
605static int
606lio_vf_rep_netdev_event(struct notifier_block *nb,
607			unsigned long event, void *ptr)
608{
609	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
610	struct lio_vf_rep_desc *vf_rep;
611	struct lio_vf_rep_req rep_cfg;
612	struct octeon_device *oct;
613	int ret;
614
615	switch (event) {
616	case NETDEV_REGISTER:
617	case NETDEV_CHANGENAME:
618		break;
619
620	default:
621		return NOTIFY_DONE;
622	}
623
624	if (ndev->netdev_ops != &lio_vf_rep_ndev_ops)
625		return NOTIFY_DONE;
626
627	vf_rep = netdev_priv(ndev);
628	oct = vf_rep->oct;
629
630	if (strlen(ndev->name) > LIO_IF_NAME_SIZE) {
631		dev_err(&oct->pci_dev->dev,
632			"Device name change sync failed as the size is > %d\n",
633			LIO_IF_NAME_SIZE);
634		return NOTIFY_DONE;
635	}
636
637	memset(&rep_cfg, 0, sizeof(rep_cfg));
638	rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME;
639	rep_cfg.ifidx = vf_rep->ifidx;
640	strscpy(rep_cfg.rep_name.name, ndev->name,
641		sizeof(rep_cfg.rep_name.name));
642
643	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
644					   sizeof(rep_cfg), NULL, 0);
645	if (ret)
646		dev_err(&oct->pci_dev->dev,
647			"vf_rep netdev name change failed with err %d\n", ret);
648
649	return NOTIFY_DONE;
650}
651
652static struct notifier_block lio_vf_rep_netdev_notifier = {
653	.notifier_call = lio_vf_rep_netdev_event,
654};
655
656int
657lio_vf_rep_modinit(void)
658{
659	if (register_netdevice_notifier(&lio_vf_rep_netdev_notifier)) {
660		pr_err("netdev notifier registration failed\n");
661		return -EFAULT;
662	}
663
664	return 0;
665}
666
667void
668lio_vf_rep_modexit(void)
669{
670	if (unregister_netdevice_notifier(&lio_vf_rep_netdev_notifier))
671		pr_err("netdev notifier unregister failed\n");
672}
v5.9
  1/**********************************************************************
  2 * Author: Cavium, Inc.
  3 *
  4 * Contact: support@cavium.com
  5 *          Please include "LiquidIO" in the subject.
  6 *
  7 * Copyright (c) 2003-2017 Cavium, Inc.
  8 *
  9 * This file is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License, Version 2, as
 11 * published by the Free Software Foundation.
 12 *
 13 * This file is distributed in the hope that it will be useful, but
 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
 16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
 17 ***********************************************************************/
 18#include <linux/pci.h>
 19#include <linux/if_vlan.h>
 20#include "liquidio_common.h"
 21#include "octeon_droq.h"
 22#include "octeon_iq.h"
 23#include "response_manager.h"
 24#include "octeon_device.h"
 25#include "octeon_nic.h"
 26#include "octeon_main.h"
 27#include "octeon_network.h"
 28#include "lio_vf_rep.h"
 29
 30static int lio_vf_rep_open(struct net_device *ndev);
 31static int lio_vf_rep_stop(struct net_device *ndev);
 32static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb,
 33				       struct net_device *ndev);
 34static void lio_vf_rep_tx_timeout(struct net_device *netdev, unsigned int txqueue);
 35static int lio_vf_rep_phys_port_name(struct net_device *dev,
 36				     char *buf, size_t len);
 37static void lio_vf_rep_get_stats64(struct net_device *dev,
 38				   struct rtnl_link_stats64 *stats64);
 39static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu);
 40static int lio_vf_get_port_parent_id(struct net_device *dev,
 41				     struct netdev_phys_item_id *ppid);
 42
 43static const struct net_device_ops lio_vf_rep_ndev_ops = {
 44	.ndo_open = lio_vf_rep_open,
 45	.ndo_stop = lio_vf_rep_stop,
 46	.ndo_start_xmit = lio_vf_rep_pkt_xmit,
 47	.ndo_tx_timeout = lio_vf_rep_tx_timeout,
 48	.ndo_get_phys_port_name = lio_vf_rep_phys_port_name,
 49	.ndo_get_stats64 = lio_vf_rep_get_stats64,
 50	.ndo_change_mtu = lio_vf_rep_change_mtu,
 51	.ndo_get_port_parent_id = lio_vf_get_port_parent_id,
 52};
 53
 54static int
 55lio_vf_rep_send_soft_command(struct octeon_device *oct,
 56			     void *req, int req_size,
 57			     void *resp, int resp_size)
 58{
 59	int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size;
 60	struct octeon_soft_command *sc = NULL;
 61	struct lio_vf_rep_resp *rep_resp;
 62	void *sc_req;
 63	int err;
 64
 65	sc = (struct octeon_soft_command *)
 66		octeon_alloc_soft_command(oct, req_size,
 67					  tot_resp_size, 0);
 68	if (!sc)
 69		return -ENOMEM;
 70
 71	init_completion(&sc->complete);
 72	sc->sc_status = OCTEON_REQUEST_PENDING;
 73
 74	sc_req = (struct lio_vf_rep_req *)sc->virtdptr;
 75	memcpy(sc_req, req, req_size);
 76
 77	rep_resp = (struct lio_vf_rep_resp *)sc->virtrptr;
 78	memset(rep_resp, 0, tot_resp_size);
 79	WRITE_ONCE(rep_resp->status, 1);
 80
 81	sc->iq_no = 0;
 82	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
 83				    OPCODE_NIC_VF_REP_CMD, 0, 0, 0);
 84
 85	err = octeon_send_soft_command(oct, sc);
 86	if (err == IQ_SEND_FAILED)
 87		goto free_buff;
 88
 89	err = wait_for_sc_completion_timeout(oct, sc, 0);
 90	if (err)
 91		return err;
 92
 93	err = READ_ONCE(rep_resp->status) ? -EBUSY : 0;
 94	if (err)
 95		dev_err(&oct->pci_dev->dev, "VF rep send config failed\n");
 96	else if (resp)
 97		memcpy(resp, (rep_resp + 1), resp_size);
 98
 99	WRITE_ONCE(sc->caller_is_done, true);
100	return err;
101
102free_buff:
103	octeon_free_soft_command(oct, sc);
104
105	return err;
106}
107
108static int
109lio_vf_rep_open(struct net_device *ndev)
110{
111	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
112	struct lio_vf_rep_req rep_cfg;
113	struct octeon_device *oct;
114	int ret;
115
116	oct = vf_rep->oct;
117
118	memset(&rep_cfg, 0, sizeof(rep_cfg));
119	rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
120	rep_cfg.ifidx = vf_rep->ifidx;
121	rep_cfg.rep_state.state = LIO_VF_REP_STATE_UP;
122
123	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
124					   sizeof(rep_cfg), NULL, 0);
125
126	if (ret) {
127		dev_err(&oct->pci_dev->dev,
128			"VF_REP open failed with err %d\n", ret);
129		return -EIO;
130	}
131
132	atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) |
133				      LIO_IFSTATE_RUNNING));
134
135	netif_carrier_on(ndev);
136	netif_start_queue(ndev);
137
138	return 0;
139}
140
141static int
142lio_vf_rep_stop(struct net_device *ndev)
143{
144	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
145	struct lio_vf_rep_req rep_cfg;
146	struct octeon_device *oct;
147	int ret;
148
149	oct = vf_rep->oct;
150
151	memset(&rep_cfg, 0, sizeof(rep_cfg));
152	rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
153	rep_cfg.ifidx = vf_rep->ifidx;
154	rep_cfg.rep_state.state = LIO_VF_REP_STATE_DOWN;
155
156	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
157					   sizeof(rep_cfg), NULL, 0);
158
159	if (ret) {
160		dev_err(&oct->pci_dev->dev,
161			"VF_REP dev stop failed with err %d\n", ret);
162		return -EIO;
163	}
164
165	atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) &
166				      ~LIO_IFSTATE_RUNNING));
167
168	netif_tx_disable(ndev);
169	netif_carrier_off(ndev);
170
171	return 0;
172}
173
174static void
175lio_vf_rep_tx_timeout(struct net_device *ndev, unsigned int txqueue)
176{
177	netif_trans_update(ndev);
178
179	netif_wake_queue(ndev);
180}
181
182static void
183lio_vf_rep_get_stats64(struct net_device *dev,
184		       struct rtnl_link_stats64 *stats64)
185{
186	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
187
188	/* Swap tx and rx stats as VF rep is a switch port */
189	stats64->tx_packets = vf_rep->stats.rx_packets;
190	stats64->tx_bytes   = vf_rep->stats.rx_bytes;
191	stats64->tx_dropped = vf_rep->stats.rx_dropped;
192
193	stats64->rx_packets = vf_rep->stats.tx_packets;
194	stats64->rx_bytes   = vf_rep->stats.tx_bytes;
195	stats64->rx_dropped = vf_rep->stats.tx_dropped;
196}
197
198static int
199lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
200{
201	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
202	struct lio_vf_rep_req rep_cfg;
203	struct octeon_device *oct;
204	int ret;
205
206	oct = vf_rep->oct;
207
208	memset(&rep_cfg, 0, sizeof(rep_cfg));
209	rep_cfg.req_type = LIO_VF_REP_REQ_MTU;
210	rep_cfg.ifidx = vf_rep->ifidx;
211	rep_cfg.rep_mtu.mtu = cpu_to_be32(new_mtu);
212
213	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
214					   sizeof(rep_cfg), NULL, 0);
215	if (ret) {
216		dev_err(&oct->pci_dev->dev,
217			"Change MTU failed with err %d\n", ret);
218		return -EIO;
219	}
220
221	ndev->mtu = new_mtu;
222
223	return 0;
224}
225
226static int
227lio_vf_rep_phys_port_name(struct net_device *dev,
228			  char *buf, size_t len)
229{
230	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
231	struct octeon_device *oct = vf_rep->oct;
232	int ret;
233
234	ret = snprintf(buf, len, "pf%dvf%d", oct->pf_num,
235		       vf_rep->ifidx - oct->pf_num * 64 - 1);
236	if (ret >= len)
237		return -EOPNOTSUPP;
238
239	return 0;
240}
241
242static struct net_device *
243lio_vf_rep_get_ndev(struct octeon_device *oct, int ifidx)
244{
245	int vf_id, max_vfs = CN23XX_MAX_VFS_PER_PF + 1;
246	int vfid_mask = max_vfs - 1;
247
248	if (ifidx <= oct->pf_num * max_vfs ||
249	    ifidx >= oct->pf_num * max_vfs + max_vfs)
250		return NULL;
251
252	/* ifidx 1-63 for PF0 VFs
253	 * ifidx 65-127 for PF1 VFs
254	 */
255	vf_id = (ifidx & vfid_mask) - 1;
256
257	return oct->vf_rep_list.ndev[vf_id];
258}
259
260static void
261lio_vf_rep_copy_packet(struct octeon_device *oct,
262		       struct sk_buff *skb,
263		       int len)
264{
265	if (likely(len > MIN_SKB_SIZE)) {
266		struct octeon_skb_page_info *pg_info;
267		unsigned char *va;
268
269		pg_info = ((struct octeon_skb_page_info *)(skb->cb));
270		if (pg_info->page) {
271			va = page_address(pg_info->page) +
272				pg_info->page_offset;
273			memcpy(skb->data, va, MIN_SKB_SIZE);
274			skb_put(skb, MIN_SKB_SIZE);
 
 
 
 
 
275		}
276
277		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
278				pg_info->page,
279				pg_info->page_offset + MIN_SKB_SIZE,
280				len - MIN_SKB_SIZE,
281				LIO_RXBUFFER_SZ);
282	} else {
283		struct octeon_skb_page_info *pg_info =
284			((struct octeon_skb_page_info *)(skb->cb));
285
286		skb_copy_to_linear_data(skb, page_address(pg_info->page) +
287					pg_info->page_offset, len);
288		skb_put(skb, len);
289		put_page(pg_info->page);
290	}
291}
292
293static int
294lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, void *buf)
295{
296	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
297	struct lio_vf_rep_desc *vf_rep;
298	struct net_device *vf_ndev;
299	struct octeon_device *oct;
300	union octeon_rh *rh;
301	struct sk_buff *skb;
302	int i, ifidx;
303
304	oct = lio_get_device(recv_pkt->octeon_id);
305	if (!oct)
306		goto free_buffers;
307
308	skb = recv_pkt->buffer_ptr[0];
309	rh = &recv_pkt->rh;
310	ifidx = rh->r.ossp;
311
312	vf_ndev = lio_vf_rep_get_ndev(oct, ifidx);
313	if (!vf_ndev)
314		goto free_buffers;
315
316	vf_rep = netdev_priv(vf_ndev);
317	if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
318	    recv_pkt->buffer_count > 1)
319		goto free_buffers;
320
321	skb->dev = vf_ndev;
322
323	/* Multiple buffers are not used for vf_rep packets.
324	 * So just buffer_size[0] is valid.
325	 */
326	lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]);
327
328	skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
329	skb->protocol = eth_type_trans(skb, skb->dev);
330	skb->ip_summed = CHECKSUM_NONE;
331
332	netif_rx(skb);
333
334	octeon_free_recv_info(recv_info);
335
336	return 0;
337
338free_buffers:
339	for (i = 0; i < recv_pkt->buffer_count; i++)
340		recv_buffer_free(recv_pkt->buffer_ptr[i]);
341
342	octeon_free_recv_info(recv_info);
343
344	return 0;
345}
346
347static void
348lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
349				u32 status, void *buf)
350{
351	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
352	struct sk_buff *skb = sc->ctxptr;
353	struct net_device *ndev = skb->dev;
354	u32 iq_no;
355
356	dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
357			 sc->datasize, DMA_TO_DEVICE);
358	dev_kfree_skb_any(skb);
359	iq_no = sc->iq_no;
360	octeon_free_soft_command(oct, sc);
361
362	if (octnet_iq_is_full(oct, iq_no))
363		return;
364
365	if (netif_queue_stopped(ndev))
366		netif_wake_queue(ndev);
367}
368
369static netdev_tx_t
370lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
371{
372	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
373	struct net_device *parent_ndev = vf_rep->parent_ndev;
374	struct octeon_device *oct = vf_rep->oct;
375	struct octeon_instr_pki_ih3 *pki_ih3;
376	struct octeon_soft_command *sc;
377	struct lio *parent_lio;
378	int status;
379
380	parent_lio = GET_LIO(parent_ndev);
381
382	if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
383	    skb->len <= 0)
384		goto xmit_failed;
385
386	if (octnet_iq_is_full(vf_rep->oct, parent_lio->txq)) {
387		dev_err(&oct->pci_dev->dev, "VF rep: Device IQ full\n");
388		netif_stop_queue(ndev);
389		return NETDEV_TX_BUSY;
390	}
391
392	sc = (struct octeon_soft_command *)
393		octeon_alloc_soft_command(oct, 0, 16, 0);
394	if (!sc) {
395		dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n");
396		goto xmit_failed;
397	}
398
399	/* Multiple buffers are not used for vf_rep packets. */
400	if (skb_shinfo(skb)->nr_frags != 0) {
401		dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n");
402		octeon_free_soft_command(oct, sc);
403		goto xmit_failed;
404	}
405
406	sc->dmadptr = dma_map_single(&oct->pci_dev->dev,
407				     skb->data, skb->len, DMA_TO_DEVICE);
408	if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) {
409		dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n");
410		octeon_free_soft_command(oct, sc);
411		goto xmit_failed;
412	}
413
414	sc->virtdptr = skb->data;
415	sc->datasize = skb->len;
416	sc->ctxptr = skb;
417	sc->iq_no = parent_lio->txq;
418
419	octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_PKT,
420				    vf_rep->ifidx, 0, 0);
421	pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
422	pki_ih3->tagtype = ORDERED_TAG;
423
424	sc->callback = lio_vf_rep_packet_sent_callback;
425	sc->callback_arg = sc;
426
427	status = octeon_send_soft_command(oct, sc);
428	if (status == IQ_SEND_FAILED) {
429		dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
430				 sc->datasize, DMA_TO_DEVICE);
431		octeon_free_soft_command(oct, sc);
432		goto xmit_failed;
433	}
434
435	if (status == IQ_SEND_STOP)
436		netif_stop_queue(ndev);
437
438	netif_trans_update(ndev);
439
440	return NETDEV_TX_OK;
441
442xmit_failed:
443	dev_kfree_skb_any(skb);
444
445	return NETDEV_TX_OK;
446}
447
448static int lio_vf_get_port_parent_id(struct net_device *dev,
449				     struct netdev_phys_item_id *ppid)
450{
451	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
452	struct net_device *parent_ndev = vf_rep->parent_ndev;
453	struct lio *lio = GET_LIO(parent_ndev);
454
455	ppid->id_len = ETH_ALEN;
456	ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
457
458	return 0;
459}
460
461static void
462lio_vf_rep_fetch_stats(struct work_struct *work)
463{
464	struct cavium_wk *wk = (struct cavium_wk *)work;
465	struct lio_vf_rep_desc *vf_rep = wk->ctxptr;
466	struct lio_vf_rep_stats stats;
467	struct lio_vf_rep_req rep_cfg;
468	struct octeon_device *oct;
469	int ret;
470
471	oct = vf_rep->oct;
472
473	memset(&rep_cfg, 0, sizeof(rep_cfg));
474	rep_cfg.req_type = LIO_VF_REP_REQ_STATS;
475	rep_cfg.ifidx = vf_rep->ifidx;
476
477	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg),
478					   &stats, sizeof(stats));
479
480	if (!ret) {
481		octeon_swap_8B_data((u64 *)&stats, (sizeof(stats) >> 3));
482		memcpy(&vf_rep->stats, &stats, sizeof(stats));
483	}
484
485	schedule_delayed_work(&vf_rep->stats_wk.work,
486			      msecs_to_jiffies(LIO_VF_REP_STATS_POLL_TIME_MS));
487}
488
489int
490lio_vf_rep_create(struct octeon_device *oct)
491{
492	struct lio_vf_rep_desc *vf_rep;
493	struct net_device *ndev;
494	int i, num_vfs;
495
496	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
497		return 0;
498
499	if (!oct->sriov_info.sriov_enabled)
500		return 0;
501
502	num_vfs = oct->sriov_info.num_vfs_alloced;
503
504	oct->vf_rep_list.num_vfs = 0;
505	for (i = 0; i < num_vfs; i++) {
506		ndev = alloc_etherdev(sizeof(struct lio_vf_rep_desc));
507
508		if (!ndev) {
509			dev_err(&oct->pci_dev->dev,
510				"VF rep device %d creation failed\n", i);
511			goto cleanup;
512		}
513
514		ndev->min_mtu = LIO_MIN_MTU_SIZE;
515		ndev->max_mtu = LIO_MAX_MTU_SIZE;
516		ndev->netdev_ops = &lio_vf_rep_ndev_ops;
517
518		vf_rep = netdev_priv(ndev);
519		memset(vf_rep, 0, sizeof(*vf_rep));
520
521		vf_rep->ndev = ndev;
522		vf_rep->oct = oct;
523		vf_rep->parent_ndev = oct->props[0].netdev;
524		vf_rep->ifidx = (oct->pf_num * 64) + i + 1;
525
526		eth_hw_addr_random(ndev);
527
528		if (register_netdev(ndev)) {
529			dev_err(&oct->pci_dev->dev, "VF rep nerdev registration failed\n");
530
531			free_netdev(ndev);
532			goto cleanup;
533		}
534
535		netif_carrier_off(ndev);
536
537		INIT_DELAYED_WORK(&vf_rep->stats_wk.work,
538				  lio_vf_rep_fetch_stats);
539		vf_rep->stats_wk.ctxptr = (void *)vf_rep;
540		schedule_delayed_work(&vf_rep->stats_wk.work,
541				      msecs_to_jiffies
542				      (LIO_VF_REP_STATS_POLL_TIME_MS));
543		oct->vf_rep_list.num_vfs++;
544		oct->vf_rep_list.ndev[i] = ndev;
545	}
546
547	if (octeon_register_dispatch_fn(oct, OPCODE_NIC,
548					OPCODE_NIC_VF_REP_PKT,
549					lio_vf_rep_pkt_recv, oct)) {
550		dev_err(&oct->pci_dev->dev, "VF rep Dispatch func registration failed\n");
551
552		goto cleanup;
553	}
554
555	return 0;
556
557cleanup:
558	for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
559		ndev = oct->vf_rep_list.ndev[i];
560		oct->vf_rep_list.ndev[i] = NULL;
561		if (ndev) {
562			vf_rep = netdev_priv(ndev);
563			cancel_delayed_work_sync
564				(&vf_rep->stats_wk.work);
565			unregister_netdev(ndev);
566			free_netdev(ndev);
567		}
568	}
569
570	oct->vf_rep_list.num_vfs = 0;
571
572	return -1;
573}
574
575void
576lio_vf_rep_destroy(struct octeon_device *oct)
577{
578	struct lio_vf_rep_desc *vf_rep;
579	struct net_device *ndev;
580	int i;
581
582	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
583		return;
584
585	if (!oct->sriov_info.sriov_enabled)
586		return;
587
588	for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
589		ndev = oct->vf_rep_list.ndev[i];
590		oct->vf_rep_list.ndev[i] = NULL;
591		if (ndev) {
592			vf_rep = netdev_priv(ndev);
593			cancel_delayed_work_sync
594				(&vf_rep->stats_wk.work);
595			netif_tx_disable(ndev);
596			netif_carrier_off(ndev);
597
598			unregister_netdev(ndev);
599			free_netdev(ndev);
600		}
601	}
602
603	oct->vf_rep_list.num_vfs = 0;
604}
605
606static int
607lio_vf_rep_netdev_event(struct notifier_block *nb,
608			unsigned long event, void *ptr)
609{
610	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
611	struct lio_vf_rep_desc *vf_rep;
612	struct lio_vf_rep_req rep_cfg;
613	struct octeon_device *oct;
614	int ret;
615
616	switch (event) {
617	case NETDEV_REGISTER:
618	case NETDEV_CHANGENAME:
619		break;
620
621	default:
622		return NOTIFY_DONE;
623	}
624
625	if (ndev->netdev_ops != &lio_vf_rep_ndev_ops)
626		return NOTIFY_DONE;
627
628	vf_rep = netdev_priv(ndev);
629	oct = vf_rep->oct;
630
631	if (strlen(ndev->name) > LIO_IF_NAME_SIZE) {
632		dev_err(&oct->pci_dev->dev,
633			"Device name change sync failed as the size is > %d\n",
634			LIO_IF_NAME_SIZE);
635		return NOTIFY_DONE;
636	}
637
638	memset(&rep_cfg, 0, sizeof(rep_cfg));
639	rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME;
640	rep_cfg.ifidx = vf_rep->ifidx;
641	strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE);
 
642
643	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
644					   sizeof(rep_cfg), NULL, 0);
645	if (ret)
646		dev_err(&oct->pci_dev->dev,
647			"vf_rep netdev name change failed with err %d\n", ret);
648
649	return NOTIFY_DONE;
650}
651
652static struct notifier_block lio_vf_rep_netdev_notifier = {
653	.notifier_call = lio_vf_rep_netdev_event,
654};
655
656int
657lio_vf_rep_modinit(void)
658{
659	if (register_netdevice_notifier(&lio_vf_rep_netdev_notifier)) {
660		pr_err("netdev notifier registration failed\n");
661		return -EFAULT;
662	}
663
664	return 0;
665}
666
667void
668lio_vf_rep_modexit(void)
669{
670	if (unregister_netdevice_notifier(&lio_vf_rep_netdev_notifier))
671		pr_err("netdev notifier unregister failed\n");
672}