Lines Matching refs:vinfo

37 static void hfi1_vnic_update_stats(struct hfi1_vnic_vport_info *vinfo,  in hfi1_vnic_update_stats()  argument
40 struct net_device *netdev = vinfo->netdev; in hfi1_vnic_update_stats()
44 for (i = 0; i < vinfo->num_tx_q; i++) { in hfi1_vnic_update_stats()
45 struct opa_vnic_stats *qstats = &vinfo->stats[i]; in hfi1_vnic_update_stats()
46 struct rtnl_link_stats64 *qnstats = &vinfo->stats[i].netstats; in hfi1_vnic_update_stats()
59 for (i = 0; i < vinfo->num_rx_q; i++) { in hfi1_vnic_update_stats()
60 struct opa_vnic_stats *qstats = &vinfo->stats[i]; in hfi1_vnic_update_stats()
61 struct rtnl_link_stats64 *qnstats = &vinfo->stats[i].netstats; in hfi1_vnic_update_stats()
123 static void hfi1_vnic_update_tx_counters(struct hfi1_vnic_vport_info *vinfo, in hfi1_vnic_update_tx_counters() argument
127 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_tx_counters()
152 static void hfi1_vnic_update_rx_counters(struct hfi1_vnic_vport_info *vinfo, in hfi1_vnic_update_rx_counters() argument
156 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_rx_counters()
185 struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); in hfi1_vnic_get_stats64() local
187 hfi1_vnic_update_stats(vinfo, vstats); in hfi1_vnic_get_stats64()
204 static void hfi1_vnic_maybe_stop_tx(struct hfi1_vnic_vport_info *vinfo, in hfi1_vnic_maybe_stop_tx() argument
207 netif_stop_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx()
208 if (!hfi1_vnic_sdma_write_avail(vinfo, q_idx)) in hfi1_vnic_maybe_stop_tx()
211 netif_start_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx()
217 struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); in hfi1_netdev_start_xmit() local
219 struct hfi1_devdata *dd = vinfo->dd; in hfi1_netdev_start_xmit()
227 vinfo->stats[q_idx].tx_drop_state++; in hfi1_netdev_start_xmit()
235 vinfo->stats[q_idx].tx_dlid_zero++; in hfi1_netdev_start_xmit()
255 err = dd->process_vnic_dma_send(dd, q_idx, vinfo, skb, pbc, pad_len); in hfi1_netdev_start_xmit()
258 vinfo->stats[q_idx].netstats.tx_fifo_errors++; in hfi1_netdev_start_xmit()
260 vinfo->stats[q_idx].netstats.tx_carrier_errors++; in hfi1_netdev_start_xmit()
266 hfi1_vnic_maybe_stop_tx(vinfo, q_idx); in hfi1_netdev_start_xmit()
273 hfi1_vnic_update_tx_counters(vinfo, q_idx, skb, err); in hfi1_netdev_start_xmit()
282 struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); in hfi1_vnic_select_queue() local
287 sde = sdma_select_engine_vl(vinfo->dd, mdata->entropy, mdata->vl); in hfi1_vnic_select_queue()
295 struct hfi1_vnic_vport_info *vinfo = rxq->vinfo; in hfi1_vnic_decap_skb() local
296 int max_len = vinfo->netdev->mtu + VLAN_ETH_HLEN; in hfi1_vnic_decap_skb()
303 vinfo->stats[rxq->idx].rx_oversize++; in hfi1_vnic_decap_skb()
305 vinfo->stats[rxq->idx].rx_runt++; in hfi1_vnic_decap_skb()
321 struct hfi1_vnic_vport_info *vinfo; in get_first_vnic_port() local
324 vinfo = hfi1_netdev_get_first_data(dd, &next_id); in get_first_vnic_port()
329 return vinfo; in get_first_vnic_port()
335 struct hfi1_vnic_vport_info *vinfo = NULL; in hfi1_vnic_bypass_rcv() local
345 vinfo = get_vnic_port(dd, vesw_id); in hfi1_vnic_bypass_rcv()
351 if (unlikely(!vinfo)) { in hfi1_vnic_bypass_rcv()
363 if (unlikely(!vinfo)) { in hfi1_vnic_bypass_rcv()
370 rxq = &vinfo->rxq[q_idx]; in hfi1_vnic_bypass_rcv()
371 if (unlikely(!netif_oper_up(vinfo->netdev))) { in hfi1_vnic_bypass_rcv()
372 vinfo->stats[q_idx].rx_drop_state++; in hfi1_vnic_bypass_rcv()
376 skb = netdev_alloc_skb(vinfo->netdev, packet->tlen); in hfi1_vnic_bypass_rcv()
378 vinfo->stats[q_idx].netstats.rx_fifo_errors++; in hfi1_vnic_bypass_rcv()
392 hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc); in hfi1_vnic_bypass_rcv()
404 static int hfi1_vnic_up(struct hfi1_vnic_vport_info *vinfo) in hfi1_vnic_up() argument
406 struct hfi1_devdata *dd = vinfo->dd; in hfi1_vnic_up()
407 struct net_device *netdev = vinfo->netdev; in hfi1_vnic_up()
411 if (!vinfo->vesw_id) in hfi1_vnic_up()
414 rc = hfi1_netdev_add_data(dd, VNIC_ID(vinfo->vesw_id), vinfo); in hfi1_vnic_up()
424 set_bit(HFI1_VNIC_UP, &vinfo->flags); in hfi1_vnic_up()
429 hfi1_netdev_remove_data(dd, VNIC_ID(vinfo->vesw_id)); in hfi1_vnic_up()
433 static void hfi1_vnic_down(struct hfi1_vnic_vport_info *vinfo) in hfi1_vnic_down() argument
435 struct hfi1_devdata *dd = vinfo->dd; in hfi1_vnic_down()
437 clear_bit(HFI1_VNIC_UP, &vinfo->flags); in hfi1_vnic_down()
438 netif_carrier_off(vinfo->netdev); in hfi1_vnic_down()
439 netif_tx_disable(vinfo->netdev); in hfi1_vnic_down()
440 hfi1_netdev_remove_data(dd, VNIC_ID(vinfo->vesw_id)); in hfi1_vnic_down()
447 struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); in hfi1_netdev_open() local
450 mutex_lock(&vinfo->lock); in hfi1_netdev_open()
451 rc = hfi1_vnic_up(vinfo); in hfi1_netdev_open()
452 mutex_unlock(&vinfo->lock); in hfi1_netdev_open()
458 struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); in hfi1_netdev_close() local
460 mutex_lock(&vinfo->lock); in hfi1_netdev_close()
461 if (test_bit(HFI1_VNIC_UP, &vinfo->flags)) in hfi1_netdev_close()
462 hfi1_vnic_down(vinfo); in hfi1_netdev_close()
463 mutex_unlock(&vinfo->lock); in hfi1_netdev_close()
467 static int hfi1_vnic_init(struct hfi1_vnic_vport_info *vinfo) in hfi1_vnic_init() argument
469 struct hfi1_devdata *dd = vinfo->dd; in hfi1_vnic_init()
488 hfi1_vnic_sdma_init(vinfo); in hfi1_vnic_init()
498 static void hfi1_vnic_deinit(struct hfi1_vnic_vport_info *vinfo) in hfi1_vnic_deinit() argument
500 struct hfi1_devdata *dd = vinfo->dd; in hfi1_vnic_deinit()
513 struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); in hfi1_vnic_set_vesw_id() local
520 if (id != vinfo->vesw_id) { in hfi1_vnic_set_vesw_id()
521 mutex_lock(&vinfo->lock); in hfi1_vnic_set_vesw_id()
522 if (test_bit(HFI1_VNIC_UP, &vinfo->flags)) { in hfi1_vnic_set_vesw_id()
523 hfi1_vnic_down(vinfo); in hfi1_vnic_set_vesw_id()
527 vinfo->vesw_id = id; in hfi1_vnic_set_vesw_id()
529 hfi1_vnic_up(vinfo); in hfi1_vnic_set_vesw_id()
531 mutex_unlock(&vinfo->lock); in hfi1_vnic_set_vesw_id()
546 struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); in hfi1_vnic_free_rn() local
548 hfi1_vnic_deinit(vinfo); in hfi1_vnic_free_rn()
549 mutex_destroy(&vinfo->lock); in hfi1_vnic_free_rn()
561 struct hfi1_vnic_vport_info *vinfo; in hfi1_vnic_alloc_rn() local
575 size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo); in hfi1_vnic_alloc_rn()
583 vinfo = opa_vnic_dev_priv(netdev); in hfi1_vnic_alloc_rn()
584 vinfo->dd = dd; in hfi1_vnic_alloc_rn()
585 vinfo->num_tx_q = chip_sdma_engines(dd); in hfi1_vnic_alloc_rn()
586 vinfo->num_rx_q = dd->num_netdev_contexts; in hfi1_vnic_alloc_rn()
587 vinfo->netdev = netdev; in hfi1_vnic_alloc_rn()
596 mutex_init(&vinfo->lock); in hfi1_vnic_alloc_rn()
598 for (i = 0; i < vinfo->num_rx_q; i++) { in hfi1_vnic_alloc_rn()
599 struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i]; in hfi1_vnic_alloc_rn()
602 rxq->vinfo = vinfo; in hfi1_vnic_alloc_rn()
606 rc = hfi1_vnic_init(vinfo); in hfi1_vnic_alloc_rn()
612 mutex_destroy(&vinfo->lock); in hfi1_vnic_alloc_rn()