xref: /openbmc/linux/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c (revision 05cf4fe738242183f1237f1b3a28b4479348c0a1)
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2017 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/if_vlan.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_nic.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
28 #include <net/switchdev.h>
29 #include "lio_vf_rep.h"
30 
31 static int lio_vf_rep_open(struct net_device *ndev);
32 static int lio_vf_rep_stop(struct net_device *ndev);
33 static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb,
34 				       struct net_device *ndev);
35 static void lio_vf_rep_tx_timeout(struct net_device *netdev);
36 static int lio_vf_rep_phys_port_name(struct net_device *dev,
37 				     char *buf, size_t len);
38 static void lio_vf_rep_get_stats64(struct net_device *dev,
39 				   struct rtnl_link_stats64 *stats64);
40 static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu);
41 
42 static const struct net_device_ops lio_vf_rep_ndev_ops = {
43 	.ndo_open = lio_vf_rep_open,
44 	.ndo_stop = lio_vf_rep_stop,
45 	.ndo_start_xmit = lio_vf_rep_pkt_xmit,
46 	.ndo_tx_timeout = lio_vf_rep_tx_timeout,
47 	.ndo_get_phys_port_name = lio_vf_rep_phys_port_name,
48 	.ndo_get_stats64 = lio_vf_rep_get_stats64,
49 	.ndo_change_mtu = lio_vf_rep_change_mtu,
50 };
51 
52 static int
53 lio_vf_rep_send_soft_command(struct octeon_device *oct,
54 			     void *req, int req_size,
55 			     void *resp, int resp_size)
56 {
57 	int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size;
58 	struct octeon_soft_command *sc = NULL;
59 	struct lio_vf_rep_resp *rep_resp;
60 	void *sc_req;
61 	int err;
62 
63 	sc = (struct octeon_soft_command *)
64 		octeon_alloc_soft_command(oct, req_size,
65 					  tot_resp_size, 0);
66 	if (!sc)
67 		return -ENOMEM;
68 
69 	init_completion(&sc->complete);
70 	sc->sc_status = OCTEON_REQUEST_PENDING;
71 
72 	sc_req = (struct lio_vf_rep_req *)sc->virtdptr;
73 	memcpy(sc_req, req, req_size);
74 
75 	rep_resp = (struct lio_vf_rep_resp *)sc->virtrptr;
76 	memset(rep_resp, 0, tot_resp_size);
77 	WRITE_ONCE(rep_resp->status, 1);
78 
79 	sc->iq_no = 0;
80 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
81 				    OPCODE_NIC_VF_REP_CMD, 0, 0, 0);
82 
83 	err = octeon_send_soft_command(oct, sc);
84 	if (err == IQ_SEND_FAILED)
85 		goto free_buff;
86 
87 	err = wait_for_sc_completion_timeout(oct, sc, 0);
88 	if (err)
89 		return err;
90 
91 	err = READ_ONCE(rep_resp->status) ? -EBUSY : 0;
92 	if (err)
93 		dev_err(&oct->pci_dev->dev, "VF rep send config failed\n");
94 	else if (resp)
95 		memcpy(resp, (rep_resp + 1), resp_size);
96 
97 	WRITE_ONCE(sc->caller_is_done, true);
98 	return err;
99 
100 free_buff:
101 	octeon_free_soft_command(oct, sc);
102 
103 	return err;
104 }
105 
106 static int
107 lio_vf_rep_open(struct net_device *ndev)
108 {
109 	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
110 	struct lio_vf_rep_req rep_cfg;
111 	struct octeon_device *oct;
112 	int ret;
113 
114 	oct = vf_rep->oct;
115 
116 	memset(&rep_cfg, 0, sizeof(rep_cfg));
117 	rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
118 	rep_cfg.ifidx = vf_rep->ifidx;
119 	rep_cfg.rep_state.state = LIO_VF_REP_STATE_UP;
120 
121 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
122 					   sizeof(rep_cfg), NULL, 0);
123 
124 	if (ret) {
125 		dev_err(&oct->pci_dev->dev,
126 			"VF_REP open failed with err %d\n", ret);
127 		return -EIO;
128 	}
129 
130 	atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) |
131 				      LIO_IFSTATE_RUNNING));
132 
133 	netif_carrier_on(ndev);
134 	netif_start_queue(ndev);
135 
136 	return 0;
137 }
138 
139 static int
140 lio_vf_rep_stop(struct net_device *ndev)
141 {
142 	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
143 	struct lio_vf_rep_req rep_cfg;
144 	struct octeon_device *oct;
145 	int ret;
146 
147 	oct = vf_rep->oct;
148 
149 	memset(&rep_cfg, 0, sizeof(rep_cfg));
150 	rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
151 	rep_cfg.ifidx = vf_rep->ifidx;
152 	rep_cfg.rep_state.state = LIO_VF_REP_STATE_DOWN;
153 
154 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
155 					   sizeof(rep_cfg), NULL, 0);
156 
157 	if (ret) {
158 		dev_err(&oct->pci_dev->dev,
159 			"VF_REP dev stop failed with err %d\n", ret);
160 		return -EIO;
161 	}
162 
163 	atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) &
164 				      ~LIO_IFSTATE_RUNNING));
165 
166 	netif_tx_disable(ndev);
167 	netif_carrier_off(ndev);
168 
169 	return 0;
170 }
171 
172 static void
173 lio_vf_rep_tx_timeout(struct net_device *ndev)
174 {
175 	netif_trans_update(ndev);
176 
177 	netif_wake_queue(ndev);
178 }
179 
180 static void
181 lio_vf_rep_get_stats64(struct net_device *dev,
182 		       struct rtnl_link_stats64 *stats64)
183 {
184 	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
185 
186 	/* Swap tx and rx stats as VF rep is a switch port */
187 	stats64->tx_packets = vf_rep->stats.rx_packets;
188 	stats64->tx_bytes   = vf_rep->stats.rx_bytes;
189 	stats64->tx_dropped = vf_rep->stats.rx_dropped;
190 
191 	stats64->rx_packets = vf_rep->stats.tx_packets;
192 	stats64->rx_bytes   = vf_rep->stats.tx_bytes;
193 	stats64->rx_dropped = vf_rep->stats.tx_dropped;
194 }
195 
196 static int
197 lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
198 {
199 	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
200 	struct lio_vf_rep_req rep_cfg;
201 	struct octeon_device *oct;
202 	int ret;
203 
204 	oct = vf_rep->oct;
205 
206 	memset(&rep_cfg, 0, sizeof(rep_cfg));
207 	rep_cfg.req_type = LIO_VF_REP_REQ_MTU;
208 	rep_cfg.ifidx = vf_rep->ifidx;
209 	rep_cfg.rep_mtu.mtu = cpu_to_be32(new_mtu);
210 
211 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
212 					   sizeof(rep_cfg), NULL, 0);
213 	if (ret) {
214 		dev_err(&oct->pci_dev->dev,
215 			"Change MTU failed with err %d\n", ret);
216 		return -EIO;
217 	}
218 
219 	ndev->mtu = new_mtu;
220 
221 	return 0;
222 }
223 
224 static int
225 lio_vf_rep_phys_port_name(struct net_device *dev,
226 			  char *buf, size_t len)
227 {
228 	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
229 	struct octeon_device *oct = vf_rep->oct;
230 	int ret;
231 
232 	ret = snprintf(buf, len, "pf%dvf%d", oct->pf_num,
233 		       vf_rep->ifidx - oct->pf_num * 64 - 1);
234 	if (ret >= len)
235 		return -EOPNOTSUPP;
236 
237 	return 0;
238 }
239 
240 static struct net_device *
241 lio_vf_rep_get_ndev(struct octeon_device *oct, int ifidx)
242 {
243 	int vf_id, max_vfs = CN23XX_MAX_VFS_PER_PF + 1;
244 	int vfid_mask = max_vfs - 1;
245 
246 	if (ifidx <= oct->pf_num * max_vfs ||
247 	    ifidx >= oct->pf_num * max_vfs + max_vfs)
248 		return NULL;
249 
250 	/* ifidx 1-63 for PF0 VFs
251 	 * ifidx 65-127 for PF1 VFs
252 	 */
253 	vf_id = (ifidx & vfid_mask) - 1;
254 
255 	return oct->vf_rep_list.ndev[vf_id];
256 }
257 
258 static void
259 lio_vf_rep_copy_packet(struct octeon_device *oct,
260 		       struct sk_buff *skb,
261 		       int len)
262 {
263 	if (likely(len > MIN_SKB_SIZE)) {
264 		struct octeon_skb_page_info *pg_info;
265 		unsigned char *va;
266 
267 		pg_info = ((struct octeon_skb_page_info *)(skb->cb));
268 		if (pg_info->page) {
269 			va = page_address(pg_info->page) +
270 				pg_info->page_offset;
271 			memcpy(skb->data, va, MIN_SKB_SIZE);
272 			skb_put(skb, MIN_SKB_SIZE);
273 		}
274 
275 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
276 				pg_info->page,
277 				pg_info->page_offset + MIN_SKB_SIZE,
278 				len - MIN_SKB_SIZE,
279 				LIO_RXBUFFER_SZ);
280 	} else {
281 		struct octeon_skb_page_info *pg_info =
282 			((struct octeon_skb_page_info *)(skb->cb));
283 
284 		skb_copy_to_linear_data(skb, page_address(pg_info->page) +
285 					pg_info->page_offset, len);
286 		skb_put(skb, len);
287 		put_page(pg_info->page);
288 	}
289 }
290 
291 static int
292 lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, void *buf)
293 {
294 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
295 	struct lio_vf_rep_desc *vf_rep;
296 	struct net_device *vf_ndev;
297 	struct octeon_device *oct;
298 	union octeon_rh *rh;
299 	struct sk_buff *skb;
300 	int i, ifidx;
301 
302 	oct = lio_get_device(recv_pkt->octeon_id);
303 	if (!oct)
304 		goto free_buffers;
305 
306 	skb = recv_pkt->buffer_ptr[0];
307 	rh = &recv_pkt->rh;
308 	ifidx = rh->r.ossp;
309 
310 	vf_ndev = lio_vf_rep_get_ndev(oct, ifidx);
311 	if (!vf_ndev)
312 		goto free_buffers;
313 
314 	vf_rep = netdev_priv(vf_ndev);
315 	if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
316 	    recv_pkt->buffer_count > 1)
317 		goto free_buffers;
318 
319 	skb->dev = vf_ndev;
320 
321 	/* Multiple buffers are not used for vf_rep packets.
322 	 * So just buffer_size[0] is valid.
323 	 */
324 	lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]);
325 
326 	skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
327 	skb->protocol = eth_type_trans(skb, skb->dev);
328 	skb->ip_summed = CHECKSUM_NONE;
329 
330 	netif_rx(skb);
331 
332 	octeon_free_recv_info(recv_info);
333 
334 	return 0;
335 
336 free_buffers:
337 	for (i = 0; i < recv_pkt->buffer_count; i++)
338 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
339 
340 	octeon_free_recv_info(recv_info);
341 
342 	return 0;
343 }
344 
345 static void
346 lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
347 				u32 status, void *buf)
348 {
349 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
350 	struct sk_buff *skb = sc->ctxptr;
351 	struct net_device *ndev = skb->dev;
352 	u32 iq_no;
353 
354 	dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
355 			 sc->datasize, DMA_TO_DEVICE);
356 	dev_kfree_skb_any(skb);
357 	iq_no = sc->iq_no;
358 	octeon_free_soft_command(oct, sc);
359 
360 	if (octnet_iq_is_full(oct, iq_no))
361 		return;
362 
363 	if (netif_queue_stopped(ndev))
364 		netif_wake_queue(ndev);
365 }
366 
367 static netdev_tx_t
368 lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
369 {
370 	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
371 	struct net_device *parent_ndev = vf_rep->parent_ndev;
372 	struct octeon_device *oct = vf_rep->oct;
373 	struct octeon_instr_pki_ih3 *pki_ih3;
374 	struct octeon_soft_command *sc;
375 	struct lio *parent_lio;
376 	int status;
377 
378 	parent_lio = GET_LIO(parent_ndev);
379 
380 	if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
381 	    skb->len <= 0)
382 		goto xmit_failed;
383 
384 	if (octnet_iq_is_full(vf_rep->oct, parent_lio->txq)) {
385 		dev_err(&oct->pci_dev->dev, "VF rep: Device IQ full\n");
386 		netif_stop_queue(ndev);
387 		return NETDEV_TX_BUSY;
388 	}
389 
390 	sc = (struct octeon_soft_command *)
391 		octeon_alloc_soft_command(oct, 0, 16, 0);
392 	if (!sc) {
393 		dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n");
394 		goto xmit_failed;
395 	}
396 
397 	/* Multiple buffers are not used for vf_rep packets. */
398 	if (skb_shinfo(skb)->nr_frags != 0) {
399 		dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n");
400 		octeon_free_soft_command(oct, sc);
401 		goto xmit_failed;
402 	}
403 
404 	sc->dmadptr = dma_map_single(&oct->pci_dev->dev,
405 				     skb->data, skb->len, DMA_TO_DEVICE);
406 	if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) {
407 		dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n");
408 		octeon_free_soft_command(oct, sc);
409 		goto xmit_failed;
410 	}
411 
412 	sc->virtdptr = skb->data;
413 	sc->datasize = skb->len;
414 	sc->ctxptr = skb;
415 	sc->iq_no = parent_lio->txq;
416 
417 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_PKT,
418 				    vf_rep->ifidx, 0, 0);
419 	pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
420 	pki_ih3->tagtype = ORDERED_TAG;
421 
422 	sc->callback = lio_vf_rep_packet_sent_callback;
423 	sc->callback_arg = sc;
424 
425 	status = octeon_send_soft_command(oct, sc);
426 	if (status == IQ_SEND_FAILED) {
427 		dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
428 				 sc->datasize, DMA_TO_DEVICE);
429 		octeon_free_soft_command(oct, sc);
430 		goto xmit_failed;
431 	}
432 
433 	if (status == IQ_SEND_STOP)
434 		netif_stop_queue(ndev);
435 
436 	netif_trans_update(ndev);
437 
438 	return NETDEV_TX_OK;
439 
440 xmit_failed:
441 	dev_kfree_skb_any(skb);
442 
443 	return NETDEV_TX_OK;
444 }
445 
446 static int
447 lio_vf_rep_attr_get(struct net_device *dev, struct switchdev_attr *attr)
448 {
449 	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
450 	struct net_device *parent_ndev = vf_rep->parent_ndev;
451 	struct lio *lio = GET_LIO(parent_ndev);
452 
453 	switch (attr->id) {
454 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
455 		attr->u.ppid.id_len = ETH_ALEN;
456 		ether_addr_copy(attr->u.ppid.id,
457 				(void *)&lio->linfo.hw_addr + 2);
458 		break;
459 
460 	default:
461 		return -EOPNOTSUPP;
462 	}
463 
464 	return 0;
465 }
466 
467 static const struct switchdev_ops lio_vf_rep_switchdev_ops = {
468 	.switchdev_port_attr_get        = lio_vf_rep_attr_get,
469 };
470 
471 static void
472 lio_vf_rep_fetch_stats(struct work_struct *work)
473 {
474 	struct cavium_wk *wk = (struct cavium_wk *)work;
475 	struct lio_vf_rep_desc *vf_rep = wk->ctxptr;
476 	struct lio_vf_rep_stats stats;
477 	struct lio_vf_rep_req rep_cfg;
478 	struct octeon_device *oct;
479 	int ret;
480 
481 	oct = vf_rep->oct;
482 
483 	memset(&rep_cfg, 0, sizeof(rep_cfg));
484 	rep_cfg.req_type = LIO_VF_REP_REQ_STATS;
485 	rep_cfg.ifidx = vf_rep->ifidx;
486 
487 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg),
488 					   &stats, sizeof(stats));
489 
490 	if (!ret) {
491 		octeon_swap_8B_data((u64 *)&stats, (sizeof(stats) >> 3));
492 		memcpy(&vf_rep->stats, &stats, sizeof(stats));
493 	}
494 
495 	schedule_delayed_work(&vf_rep->stats_wk.work,
496 			      msecs_to_jiffies(LIO_VF_REP_STATS_POLL_TIME_MS));
497 }
498 
499 int
500 lio_vf_rep_create(struct octeon_device *oct)
501 {
502 	struct lio_vf_rep_desc *vf_rep;
503 	struct net_device *ndev;
504 	int i, num_vfs;
505 
506 	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
507 		return 0;
508 
509 	if (!oct->sriov_info.sriov_enabled)
510 		return 0;
511 
512 	num_vfs = oct->sriov_info.num_vfs_alloced;
513 
514 	oct->vf_rep_list.num_vfs = 0;
515 	for (i = 0; i < num_vfs; i++) {
516 		ndev = alloc_etherdev(sizeof(struct lio_vf_rep_desc));
517 
518 		if (!ndev) {
519 			dev_err(&oct->pci_dev->dev,
520 				"VF rep device %d creation failed\n", i);
521 			goto cleanup;
522 		}
523 
524 		ndev->min_mtu = LIO_MIN_MTU_SIZE;
525 		ndev->max_mtu = LIO_MAX_MTU_SIZE;
526 		ndev->netdev_ops = &lio_vf_rep_ndev_ops;
527 		SWITCHDEV_SET_OPS(ndev, &lio_vf_rep_switchdev_ops);
528 
529 		vf_rep = netdev_priv(ndev);
530 		memset(vf_rep, 0, sizeof(*vf_rep));
531 
532 		vf_rep->ndev = ndev;
533 		vf_rep->oct = oct;
534 		vf_rep->parent_ndev = oct->props[0].netdev;
535 		vf_rep->ifidx = (oct->pf_num * 64) + i + 1;
536 
537 		eth_hw_addr_random(ndev);
538 
539 		if (register_netdev(ndev)) {
540 			dev_err(&oct->pci_dev->dev, "VF rep nerdev registration failed\n");
541 
542 			free_netdev(ndev);
543 			goto cleanup;
544 		}
545 
546 		netif_carrier_off(ndev);
547 
548 		INIT_DELAYED_WORK(&vf_rep->stats_wk.work,
549 				  lio_vf_rep_fetch_stats);
550 		vf_rep->stats_wk.ctxptr = (void *)vf_rep;
551 		schedule_delayed_work(&vf_rep->stats_wk.work,
552 				      msecs_to_jiffies
553 				      (LIO_VF_REP_STATS_POLL_TIME_MS));
554 		oct->vf_rep_list.num_vfs++;
555 		oct->vf_rep_list.ndev[i] = ndev;
556 	}
557 
558 	if (octeon_register_dispatch_fn(oct, OPCODE_NIC,
559 					OPCODE_NIC_VF_REP_PKT,
560 					lio_vf_rep_pkt_recv, oct)) {
561 		dev_err(&oct->pci_dev->dev, "VF rep Dispatch func registration failed\n");
562 
563 		goto cleanup;
564 	}
565 
566 	return 0;
567 
568 cleanup:
569 	for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
570 		ndev = oct->vf_rep_list.ndev[i];
571 		oct->vf_rep_list.ndev[i] = NULL;
572 		if (ndev) {
573 			vf_rep = netdev_priv(ndev);
574 			cancel_delayed_work_sync
575 				(&vf_rep->stats_wk.work);
576 			unregister_netdev(ndev);
577 			free_netdev(ndev);
578 		}
579 	}
580 
581 	oct->vf_rep_list.num_vfs = 0;
582 
583 	return -1;
584 }
585 
586 void
587 lio_vf_rep_destroy(struct octeon_device *oct)
588 {
589 	struct lio_vf_rep_desc *vf_rep;
590 	struct net_device *ndev;
591 	int i;
592 
593 	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
594 		return;
595 
596 	if (!oct->sriov_info.sriov_enabled)
597 		return;
598 
599 	for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
600 		ndev = oct->vf_rep_list.ndev[i];
601 		oct->vf_rep_list.ndev[i] = NULL;
602 		if (ndev) {
603 			vf_rep = netdev_priv(ndev);
604 			cancel_delayed_work_sync
605 				(&vf_rep->stats_wk.work);
606 			netif_tx_disable(ndev);
607 			netif_carrier_off(ndev);
608 
609 			unregister_netdev(ndev);
610 			free_netdev(ndev);
611 		}
612 	}
613 
614 	oct->vf_rep_list.num_vfs = 0;
615 }
616 
617 static int
618 lio_vf_rep_netdev_event(struct notifier_block *nb,
619 			unsigned long event, void *ptr)
620 {
621 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
622 	struct lio_vf_rep_desc *vf_rep;
623 	struct lio_vf_rep_req rep_cfg;
624 	struct octeon_device *oct;
625 	int ret;
626 
627 	switch (event) {
628 	case NETDEV_REGISTER:
629 	case NETDEV_CHANGENAME:
630 		break;
631 
632 	default:
633 		return NOTIFY_DONE;
634 	}
635 
636 	if (ndev->netdev_ops != &lio_vf_rep_ndev_ops)
637 		return NOTIFY_DONE;
638 
639 	vf_rep = netdev_priv(ndev);
640 	oct = vf_rep->oct;
641 
642 	if (strlen(ndev->name) > LIO_IF_NAME_SIZE) {
643 		dev_err(&oct->pci_dev->dev,
644 			"Device name change sync failed as the size is > %d\n",
645 			LIO_IF_NAME_SIZE);
646 		return NOTIFY_DONE;
647 	}
648 
649 	memset(&rep_cfg, 0, sizeof(rep_cfg));
650 	rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME;
651 	rep_cfg.ifidx = vf_rep->ifidx;
652 	strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE);
653 
654 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
655 					   sizeof(rep_cfg), NULL, 0);
656 	if (ret)
657 		dev_err(&oct->pci_dev->dev,
658 			"vf_rep netdev name change failed with err %d\n", ret);
659 
660 	return NOTIFY_DONE;
661 }
662 
663 static struct notifier_block lio_vf_rep_netdev_notifier = {
664 	.notifier_call = lio_vf_rep_netdev_event,
665 };
666 
667 int
668 lio_vf_rep_modinit(void)
669 {
670 	if (register_netdevice_notifier(&lio_vf_rep_netdev_notifier)) {
671 		pr_err("netdev notifier registration failed\n");
672 		return -EFAULT;
673 	}
674 
675 	return 0;
676 }
677 
678 void
679 lio_vf_rep_modexit(void)
680 {
681 	if (unregister_netdevice_notifier(&lio_vf_rep_netdev_notifier))
682 		pr_err("netdev notifier unregister failed\n");
683 }
684