1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2017 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/if_vlan.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_nic.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
28 #include <net/switchdev.h>
29 #include "lio_vf_rep.h"
30 #include "octeon_network.h"
31 
32 static int lio_vf_rep_open(struct net_device *ndev);
33 static int lio_vf_rep_stop(struct net_device *ndev);
34 static int lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev);
35 static void lio_vf_rep_tx_timeout(struct net_device *netdev);
36 static int lio_vf_rep_phys_port_name(struct net_device *dev,
37 				     char *buf, size_t len);
38 static void lio_vf_rep_get_stats64(struct net_device *dev,
39 				   struct rtnl_link_stats64 *stats64);
40 static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu);
41 
42 static const struct net_device_ops lio_vf_rep_ndev_ops = {
43 	.ndo_open = lio_vf_rep_open,
44 	.ndo_stop = lio_vf_rep_stop,
45 	.ndo_start_xmit = lio_vf_rep_pkt_xmit,
46 	.ndo_tx_timeout = lio_vf_rep_tx_timeout,
47 	.ndo_get_phys_port_name = lio_vf_rep_phys_port_name,
48 	.ndo_get_stats64 = lio_vf_rep_get_stats64,
49 	.ndo_change_mtu = lio_vf_rep_change_mtu,
50 };
51 
52 static void
53 lio_vf_rep_send_sc_complete(struct octeon_device *oct,
54 			    u32 status, void *ptr)
55 {
56 	struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
57 	struct lio_vf_rep_sc_ctx *ctx =
58 		(struct lio_vf_rep_sc_ctx *)sc->ctxptr;
59 	struct lio_vf_rep_resp *resp =
60 		(struct lio_vf_rep_resp *)sc->virtrptr;
61 
62 	if (status != OCTEON_REQUEST_TIMEOUT && READ_ONCE(resp->status))
63 		WRITE_ONCE(resp->status, 0);
64 
65 	complete(&ctx->complete);
66 }
67 
68 static int
69 lio_vf_rep_send_soft_command(struct octeon_device *oct,
70 			     void *req, int req_size,
71 			     void *resp, int resp_size)
72 {
73 	int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size;
74 	int ctx_size = sizeof(struct lio_vf_rep_sc_ctx);
75 	struct octeon_soft_command *sc = NULL;
76 	struct lio_vf_rep_resp *rep_resp;
77 	struct lio_vf_rep_sc_ctx *ctx;
78 	void *sc_req;
79 	int err;
80 
81 	sc = (struct octeon_soft_command *)
82 		octeon_alloc_soft_command(oct, req_size,
83 					  tot_resp_size, ctx_size);
84 	if (!sc)
85 		return -ENOMEM;
86 
87 	ctx = (struct lio_vf_rep_sc_ctx *)sc->ctxptr;
88 	memset(ctx, 0, ctx_size);
89 	init_completion(&ctx->complete);
90 
91 	sc_req = (struct lio_vf_rep_req *)sc->virtdptr;
92 	memcpy(sc_req, req, req_size);
93 
94 	rep_resp = (struct lio_vf_rep_resp *)sc->virtrptr;
95 	memset(rep_resp, 0, tot_resp_size);
96 	WRITE_ONCE(rep_resp->status, 1);
97 
98 	sc->iq_no = 0;
99 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
100 				    OPCODE_NIC_VF_REP_CMD, 0, 0, 0);
101 	sc->callback = lio_vf_rep_send_sc_complete;
102 	sc->callback_arg = sc;
103 	sc->wait_time = LIO_VF_REP_REQ_TMO_MS;
104 
105 	err = octeon_send_soft_command(oct, sc);
106 	if (err == IQ_SEND_FAILED)
107 		goto free_buff;
108 
109 	wait_for_completion_timeout(&ctx->complete,
110 				    msecs_to_jiffies
111 				    (2 * LIO_VF_REP_REQ_TMO_MS));
112 	err = READ_ONCE(rep_resp->status) ? -EBUSY : 0;
113 	if (err)
114 		dev_err(&oct->pci_dev->dev, "VF rep send config failed\n");
115 
116 	if (resp)
117 		memcpy(resp, (rep_resp + 1), resp_size);
118 free_buff:
119 	octeon_free_soft_command(oct, sc);
120 
121 	return err;
122 }
123 
124 static int
125 lio_vf_rep_open(struct net_device *ndev)
126 {
127 	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
128 	struct lio_vf_rep_req rep_cfg;
129 	struct octeon_device *oct;
130 	int ret;
131 
132 	oct = vf_rep->oct;
133 
134 	memset(&rep_cfg, 0, sizeof(rep_cfg));
135 	rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
136 	rep_cfg.ifidx = vf_rep->ifidx;
137 	rep_cfg.rep_state.state = LIO_VF_REP_STATE_UP;
138 
139 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
140 					   sizeof(rep_cfg), NULL, 0);
141 
142 	if (ret) {
143 		dev_err(&oct->pci_dev->dev,
144 			"VF_REP open failed with err %d\n", ret);
145 		return -EIO;
146 	}
147 
148 	atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) |
149 				      LIO_IFSTATE_RUNNING));
150 
151 	netif_carrier_on(ndev);
152 	netif_start_queue(ndev);
153 
154 	return 0;
155 }
156 
157 static int
158 lio_vf_rep_stop(struct net_device *ndev)
159 {
160 	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
161 	struct lio_vf_rep_req rep_cfg;
162 	struct octeon_device *oct;
163 	int ret;
164 
165 	oct = vf_rep->oct;
166 
167 	memset(&rep_cfg, 0, sizeof(rep_cfg));
168 	rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
169 	rep_cfg.ifidx = vf_rep->ifidx;
170 	rep_cfg.rep_state.state = LIO_VF_REP_STATE_DOWN;
171 
172 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
173 					   sizeof(rep_cfg), NULL, 0);
174 
175 	if (ret) {
176 		dev_err(&oct->pci_dev->dev,
177 			"VF_REP dev stop failed with err %d\n", ret);
178 		return -EIO;
179 	}
180 
181 	atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) &
182 				      ~LIO_IFSTATE_RUNNING));
183 
184 	netif_tx_disable(ndev);
185 	netif_carrier_off(ndev);
186 
187 	return 0;
188 }
189 
190 static void
191 lio_vf_rep_tx_timeout(struct net_device *ndev)
192 {
193 	netif_trans_update(ndev);
194 
195 	netif_wake_queue(ndev);
196 }
197 
198 static void
199 lio_vf_rep_get_stats64(struct net_device *dev,
200 		       struct rtnl_link_stats64 *stats64)
201 {
202 	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
203 
204 	stats64->tx_packets = vf_rep->stats.tx_packets;
205 	stats64->tx_bytes   = vf_rep->stats.tx_bytes;
206 	stats64->tx_dropped = vf_rep->stats.tx_dropped;
207 
208 	stats64->rx_packets = vf_rep->stats.rx_packets;
209 	stats64->rx_bytes   = vf_rep->stats.rx_bytes;
210 	stats64->rx_dropped = vf_rep->stats.rx_dropped;
211 }
212 
213 static int
214 lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
215 {
216 	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
217 	struct lio_vf_rep_req rep_cfg;
218 	struct octeon_device *oct;
219 	int ret;
220 
221 	oct = vf_rep->oct;
222 
223 	memset(&rep_cfg, 0, sizeof(rep_cfg));
224 	rep_cfg.req_type = LIO_VF_REP_REQ_MTU;
225 	rep_cfg.ifidx = vf_rep->ifidx;
226 	rep_cfg.rep_mtu.mtu = cpu_to_be32(new_mtu);
227 
228 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
229 					   sizeof(rep_cfg), NULL, 0);
230 	if (ret) {
231 		dev_err(&oct->pci_dev->dev,
232 			"Change MTU failed with err %d\n", ret);
233 		return -EIO;
234 	}
235 
236 	ndev->mtu = new_mtu;
237 
238 	return 0;
239 }
240 
241 static int
242 lio_vf_rep_phys_port_name(struct net_device *dev,
243 			  char *buf, size_t len)
244 {
245 	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
246 	struct octeon_device *oct = vf_rep->oct;
247 	int ret;
248 
249 	ret = snprintf(buf, len, "pf%dvf%d", oct->pf_num,
250 		       vf_rep->ifidx - oct->pf_num * 64 - 1);
251 	if (ret >= len)
252 		return -EOPNOTSUPP;
253 
254 	return 0;
255 }
256 
257 static struct net_device *
258 lio_vf_rep_get_ndev(struct octeon_device *oct, int ifidx)
259 {
260 	int vf_id, max_vfs = CN23XX_MAX_VFS_PER_PF + 1;
261 	int vfid_mask = max_vfs - 1;
262 
263 	if (ifidx <= oct->pf_num * max_vfs ||
264 	    ifidx >= oct->pf_num * max_vfs + max_vfs)
265 		return NULL;
266 
267 	/* ifidx 1-63 for PF0 VFs
268 	 * ifidx 65-127 for PF1 VFs
269 	 */
270 	vf_id = (ifidx & vfid_mask) - 1;
271 
272 	return oct->vf_rep_list.ndev[vf_id];
273 }
274 
275 static void
276 lio_vf_rep_copy_packet(struct octeon_device *oct,
277 		       struct sk_buff *skb,
278 		       int len)
279 {
280 	if (likely(len > MIN_SKB_SIZE)) {
281 		struct octeon_skb_page_info *pg_info;
282 		unsigned char *va;
283 
284 		pg_info = ((struct octeon_skb_page_info *)(skb->cb));
285 		if (pg_info->page) {
286 			va = page_address(pg_info->page) +
287 				pg_info->page_offset;
288 			memcpy(skb->data, va, MIN_SKB_SIZE);
289 			skb_put(skb, MIN_SKB_SIZE);
290 		}
291 
292 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
293 				pg_info->page,
294 				pg_info->page_offset + MIN_SKB_SIZE,
295 				len - MIN_SKB_SIZE,
296 				LIO_RXBUFFER_SZ);
297 	} else {
298 		struct octeon_skb_page_info *pg_info =
299 			((struct octeon_skb_page_info *)(skb->cb));
300 
301 		skb_copy_to_linear_data(skb, page_address(pg_info->page) +
302 					pg_info->page_offset, len);
303 		skb_put(skb, len);
304 		put_page(pg_info->page);
305 	}
306 }
307 
308 static int
309 lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, void *buf)
310 {
311 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
312 	struct lio_vf_rep_desc *vf_rep;
313 	struct net_device *vf_ndev;
314 	struct octeon_device *oct;
315 	union octeon_rh *rh;
316 	struct sk_buff *skb;
317 	int i, ifidx;
318 
319 	oct = lio_get_device(recv_pkt->octeon_id);
320 	if (!oct)
321 		goto free_buffers;
322 
323 	skb = recv_pkt->buffer_ptr[0];
324 	rh = &recv_pkt->rh;
325 	ifidx = rh->r.ossp;
326 
327 	vf_ndev = lio_vf_rep_get_ndev(oct, ifidx);
328 	if (!vf_ndev)
329 		goto free_buffers;
330 
331 	vf_rep = netdev_priv(vf_ndev);
332 	if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
333 	    recv_pkt->buffer_count > 1)
334 		goto free_buffers;
335 
336 	skb->dev = vf_ndev;
337 
338 	/* Multiple buffers are not used for vf_rep packets.
339 	 * So just buffer_size[0] is valid.
340 	 */
341 	lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]);
342 
343 	skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
344 	skb->protocol = eth_type_trans(skb, skb->dev);
345 	skb->ip_summed = CHECKSUM_NONE;
346 
347 	netif_rx(skb);
348 
349 	octeon_free_recv_info(recv_info);
350 
351 	return 0;
352 
353 free_buffers:
354 	for (i = 0; i < recv_pkt->buffer_count; i++)
355 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
356 
357 	octeon_free_recv_info(recv_info);
358 
359 	return 0;
360 }
361 
362 static void
363 lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
364 				u32 status, void *buf)
365 {
366 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
367 	struct sk_buff *skb = sc->ctxptr;
368 	struct net_device *ndev = skb->dev;
369 
370 	dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
371 			 sc->datasize, DMA_TO_DEVICE);
372 	dev_kfree_skb_any(skb);
373 	octeon_free_soft_command(oct, sc);
374 
375 	if (octnet_iq_is_full(oct, sc->iq_no))
376 		return;
377 
378 	if (netif_queue_stopped(ndev))
379 		netif_wake_queue(ndev);
380 }
381 
382 static int
383 lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
384 {
385 	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
386 	struct net_device *parent_ndev = vf_rep->parent_ndev;
387 	struct octeon_device *oct = vf_rep->oct;
388 	struct octeon_instr_pki_ih3 *pki_ih3;
389 	struct octeon_soft_command *sc;
390 	struct lio *parent_lio;
391 	int status;
392 
393 	parent_lio = GET_LIO(parent_ndev);
394 
395 	if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
396 	    skb->len <= 0)
397 		goto xmit_failed;
398 
399 	if (octnet_iq_is_full(vf_rep->oct, parent_lio->txq)) {
400 		dev_err(&oct->pci_dev->dev, "VF rep: Device IQ full\n");
401 		netif_stop_queue(ndev);
402 		return NETDEV_TX_BUSY;
403 	}
404 
405 	sc = (struct octeon_soft_command *)
406 		octeon_alloc_soft_command(oct, 0, 0, 0);
407 	if (!sc) {
408 		dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n");
409 		goto xmit_failed;
410 	}
411 
412 	/* Multiple buffers are not used for vf_rep packets. */
413 	if (skb_shinfo(skb)->nr_frags != 0) {
414 		dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n");
415 		goto xmit_failed;
416 	}
417 
418 	sc->dmadptr = dma_map_single(&oct->pci_dev->dev,
419 				     skb->data, skb->len, DMA_TO_DEVICE);
420 	if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) {
421 		dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n");
422 		goto xmit_failed;
423 	}
424 
425 	sc->virtdptr = skb->data;
426 	sc->datasize = skb->len;
427 	sc->ctxptr = skb;
428 	sc->iq_no = parent_lio->txq;
429 
430 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_PKT,
431 				    vf_rep->ifidx, 0, 0);
432 	pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
433 	pki_ih3->tagtype = ORDERED_TAG;
434 
435 	sc->callback = lio_vf_rep_packet_sent_callback;
436 	sc->callback_arg = sc;
437 
438 	status = octeon_send_soft_command(oct, sc);
439 	if (status == IQ_SEND_FAILED) {
440 		dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
441 				 sc->datasize, DMA_TO_DEVICE);
442 		goto xmit_failed;
443 	}
444 
445 	if (status == IQ_SEND_STOP)
446 		netif_stop_queue(ndev);
447 
448 	netif_trans_update(ndev);
449 
450 	return NETDEV_TX_OK;
451 
452 xmit_failed:
453 	dev_kfree_skb_any(skb);
454 
455 	return NETDEV_TX_OK;
456 }
457 
458 static int
459 lio_vf_rep_attr_get(struct net_device *dev, struct switchdev_attr *attr)
460 {
461 	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
462 	struct net_device *parent_ndev = vf_rep->parent_ndev;
463 	struct lio *lio = GET_LIO(parent_ndev);
464 
465 	switch (attr->id) {
466 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
467 		attr->u.ppid.id_len = ETH_ALEN;
468 		ether_addr_copy(attr->u.ppid.id,
469 				(void *)&lio->linfo.hw_addr + 2);
470 		break;
471 
472 	default:
473 		return -EOPNOTSUPP;
474 	}
475 
476 	return 0;
477 }
478 
479 static const struct switchdev_ops lio_vf_rep_switchdev_ops = {
480 	.switchdev_port_attr_get        = lio_vf_rep_attr_get,
481 };
482 
483 static void
484 lio_vf_rep_fetch_stats(struct work_struct *work)
485 {
486 	struct cavium_wk *wk = (struct cavium_wk *)work;
487 	struct lio_vf_rep_desc *vf_rep = wk->ctxptr;
488 	struct lio_vf_rep_stats stats;
489 	struct lio_vf_rep_req rep_cfg;
490 	struct octeon_device *oct;
491 	int ret;
492 
493 	oct = vf_rep->oct;
494 
495 	memset(&rep_cfg, 0, sizeof(rep_cfg));
496 	rep_cfg.req_type = LIO_VF_REP_REQ_STATS;
497 	rep_cfg.ifidx = vf_rep->ifidx;
498 
499 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg),
500 					   &stats, sizeof(stats));
501 
502 	if (!ret) {
503 		octeon_swap_8B_data((u64 *)&stats, (sizeof(stats) >> 3));
504 		memcpy(&vf_rep->stats, &stats, sizeof(stats));
505 	}
506 
507 	schedule_delayed_work(&vf_rep->stats_wk.work,
508 			      msecs_to_jiffies(LIO_VF_REP_STATS_POLL_TIME_MS));
509 }
510 
511 int
512 lio_vf_rep_create(struct octeon_device *oct)
513 {
514 	struct lio_vf_rep_desc *vf_rep;
515 	struct net_device *ndev;
516 	int i, num_vfs;
517 
518 	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
519 		return 0;
520 
521 	if (!oct->sriov_info.sriov_enabled)
522 		return 0;
523 
524 	num_vfs = oct->sriov_info.num_vfs_alloced;
525 
526 	oct->vf_rep_list.num_vfs = 0;
527 	for (i = 0; i < num_vfs; i++) {
528 		ndev = alloc_etherdev(sizeof(struct lio_vf_rep_desc));
529 
530 		if (!ndev) {
531 			dev_err(&oct->pci_dev->dev,
532 				"VF rep device %d creation failed\n", i);
533 			goto cleanup;
534 		}
535 
536 		ndev->min_mtu = LIO_MIN_MTU_SIZE;
537 		ndev->max_mtu = LIO_MAX_MTU_SIZE;
538 		ndev->netdev_ops = &lio_vf_rep_ndev_ops;
539 		SWITCHDEV_SET_OPS(ndev, &lio_vf_rep_switchdev_ops);
540 
541 		vf_rep = netdev_priv(ndev);
542 		memset(vf_rep, 0, sizeof(*vf_rep));
543 
544 		vf_rep->ndev = ndev;
545 		vf_rep->oct = oct;
546 		vf_rep->parent_ndev = oct->props[0].netdev;
547 		vf_rep->ifidx = (oct->pf_num * 64) + i + 1;
548 
549 		eth_hw_addr_random(ndev);
550 
551 		if (register_netdev(ndev)) {
552 			dev_err(&oct->pci_dev->dev, "VF rep nerdev registration failed\n");
553 
554 			free_netdev(ndev);
555 			goto cleanup;
556 		}
557 
558 		netif_carrier_off(ndev);
559 
560 		INIT_DELAYED_WORK(&vf_rep->stats_wk.work,
561 				  lio_vf_rep_fetch_stats);
562 		vf_rep->stats_wk.ctxptr = (void *)vf_rep;
563 		schedule_delayed_work(&vf_rep->stats_wk.work,
564 				      msecs_to_jiffies
565 				      (LIO_VF_REP_STATS_POLL_TIME_MS));
566 		oct->vf_rep_list.num_vfs++;
567 		oct->vf_rep_list.ndev[i] = ndev;
568 	}
569 
570 	if (octeon_register_dispatch_fn(oct, OPCODE_NIC,
571 					OPCODE_NIC_VF_REP_PKT,
572 					lio_vf_rep_pkt_recv, oct)) {
573 		dev_err(&oct->pci_dev->dev, "VF rep Dispatch func registration failed\n");
574 
575 		goto cleanup;
576 	}
577 
578 	return 0;
579 
580 cleanup:
581 	for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
582 		ndev = oct->vf_rep_list.ndev[i];
583 		oct->vf_rep_list.ndev[i] = NULL;
584 		if (ndev) {
585 			vf_rep = netdev_priv(ndev);
586 			cancel_delayed_work_sync
587 				(&vf_rep->stats_wk.work);
588 			unregister_netdev(ndev);
589 			free_netdev(ndev);
590 		}
591 	}
592 
593 	oct->vf_rep_list.num_vfs = 0;
594 
595 	return -1;
596 }
597 
598 void
599 lio_vf_rep_destroy(struct octeon_device *oct)
600 {
601 	struct lio_vf_rep_desc *vf_rep;
602 	struct net_device *ndev;
603 	int i;
604 
605 	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
606 		return;
607 
608 	if (!oct->sriov_info.sriov_enabled)
609 		return;
610 
611 	for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
612 		ndev = oct->vf_rep_list.ndev[i];
613 		oct->vf_rep_list.ndev[i] = NULL;
614 		if (ndev) {
615 			vf_rep = netdev_priv(ndev);
616 			cancel_delayed_work_sync
617 				(&vf_rep->stats_wk.work);
618 			netif_tx_disable(ndev);
619 			netif_carrier_off(ndev);
620 
621 			unregister_netdev(ndev);
622 			free_netdev(ndev);
623 		}
624 	}
625 
626 	oct->vf_rep_list.num_vfs = 0;
627 }
628 
629 static int
630 lio_vf_rep_netdev_event(struct notifier_block *nb,
631 			unsigned long event, void *ptr)
632 {
633 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
634 	struct lio_vf_rep_desc *vf_rep;
635 	struct lio_vf_rep_req rep_cfg;
636 	struct octeon_device *oct;
637 	int ret;
638 
639 	switch (event) {
640 	case NETDEV_REGISTER:
641 	case NETDEV_CHANGENAME:
642 		break;
643 
644 	default:
645 		return NOTIFY_DONE;
646 	}
647 
648 	if (ndev->netdev_ops != &lio_vf_rep_ndev_ops)
649 		return NOTIFY_DONE;
650 
651 	vf_rep = netdev_priv(ndev);
652 	oct = vf_rep->oct;
653 
654 	if (strlen(ndev->name) > LIO_IF_NAME_SIZE) {
655 		dev_err(&oct->pci_dev->dev,
656 			"Device name change sync failed as the size is > %d\n",
657 			LIO_IF_NAME_SIZE);
658 		return NOTIFY_DONE;
659 	}
660 
661 	memset(&rep_cfg, 0, sizeof(rep_cfg));
662 	rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME;
663 	rep_cfg.ifidx = vf_rep->ifidx;
664 	strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE);
665 
666 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
667 					   sizeof(rep_cfg), NULL, 0);
668 	if (ret)
669 		dev_err(&oct->pci_dev->dev,
670 			"vf_rep netdev name change failed with err %d\n", ret);
671 
672 	return NOTIFY_DONE;
673 }
674 
675 static struct notifier_block lio_vf_rep_netdev_notifier = {
676 	.notifier_call = lio_vf_rep_netdev_event,
677 };
678 
679 int
680 lio_vf_rep_modinit(void)
681 {
682 	if (register_netdevice_notifier(&lio_vf_rep_netdev_notifier)) {
683 		pr_err("netdev notifier registration failed\n");
684 		return -EFAULT;
685 	}
686 
687 	return 0;
688 }
689 
690 void
691 lio_vf_rep_modexit(void)
692 {
693 	if (unregister_netdevice_notifier(&lio_vf_rep_netdev_notifier))
694 		pr_err("netdev notifier unregister failed\n");
695 }
696