Lines Matching refs:vp
70 static int vector_mmsg_rx(struct vector_private *vp, int budget);
103 static void vector_reset_stats(struct vector_private *vp) in vector_reset_stats() argument
105 vp->estats.rx_queue_max = 0; in vector_reset_stats()
106 vp->estats.rx_queue_running_average = 0; in vector_reset_stats()
107 vp->estats.tx_queue_max = 0; in vector_reset_stats()
108 vp->estats.tx_queue_running_average = 0; in vector_reset_stats()
109 vp->estats.rx_encaps_errors = 0; in vector_reset_stats()
110 vp->estats.tx_timeout_count = 0; in vector_reset_stats()
111 vp->estats.tx_restart_queue = 0; in vector_reset_stats()
112 vp->estats.tx_kicks = 0; in vector_reset_stats()
113 vp->estats.tx_flow_control_xon = 0; in vector_reset_stats()
114 vp->estats.tx_flow_control_xoff = 0; in vector_reset_stats()
115 vp->estats.sg_ok = 0; in vector_reset_stats()
116 vp->estats.sg_linearized = 0; in vector_reset_stats()
292 static int prep_msg(struct vector_private *vp, in prep_msg() argument
305 if (vp->header_size > 0) { in prep_msg()
306 iov[iov_index].iov_len = vp->header_size; in prep_msg()
307 vp->form_header(iov[iov_index].iov_base, skb, vp); in prep_msg()
313 vp->estats.sg_ok++; in prep_msg()
335 struct vector_private *vp = netdev_priv(qi->dev); in vector_enqueue() local
354 vp, in vector_enqueue()
361 mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr; in vector_enqueue()
362 mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size; in vector_enqueue()
409 struct vector_private *vp = netdev_priv(qi->dev); in vector_send() local
429 vp->fds->tx_fd, in vector_send()
434 vp->in_write_poll = in vector_send()
444 netdev_err(vp->dev, "sendmmsg err=%i\n", in vector_send()
446 vp->in_error = true; in vector_send()
456 if (result > vp->estats.tx_queue_max) in vector_send()
457 vp->estats.tx_queue_max = result; in vector_send()
458 vp->estats.tx_queue_running_average = in vector_send()
459 (vp->estats.tx_queue_running_average + result) >> 1; in vector_send()
466 vp->estats.tx_restart_queue++; in vector_send()
484 struct vector_private *vp = netdev_priv(qi->dev); in destroy_queue() local
505 if ((vp->header_size > 0) && in destroy_queue()
521 struct vector_private *vp, in create_queue() argument
535 result->dev = vp->dev; in create_queue()
559 if (vp->header_size > 0) in create_queue()
578 if (vp->header_size > 0) { in create_queue()
616 struct vector_private *vp, in prep_skb() argument
619 int linear = vp->max_packet + vp->headroom + SAFETY_MARGIN; in prep_skb()
626 if (vp->req_size <= linear) in prep_skb()
629 len = vp->req_size; in prep_skb()
632 len - vp->max_packet, in prep_skb()
637 if (vp->header_size > 0) in prep_skb()
644 skb_reserve(result, vp->headroom); in prep_skb()
645 result->dev = vp->dev; in prep_skb()
646 skb_put(result, vp->max_packet); in prep_skb()
647 result->data_len = len - vp->max_packet; in prep_skb()
648 result->len += len - vp->max_packet; in prep_skb()
652 iov[iov_index].iov_len = vp->max_packet; in prep_skb()
675 struct vector_private *vp = netdev_priv(qi->dev); in prep_queue_for_rx() local
688 *skbuff_vector = prep_skb(vp, &mmsg_vector->msg_hdr); in prep_queue_for_rx()
797 struct vector_private *vp; in vector_remove() local
803 vp = netdev_priv(dev); in vector_remove()
804 if (vp->fds != NULL) in vector_remove()
839 static int vector_legacy_rx(struct vector_private *vp) in vector_legacy_rx() argument
855 if (vp->header_size > 0) { in vector_legacy_rx()
856 iov[0].iov_base = vp->header_rxbuffer; in vector_legacy_rx()
857 iov[0].iov_len = vp->header_size; in vector_legacy_rx()
860 skb = prep_skb(vp, &hdr); in vector_legacy_rx()
869 vp->dev->stats.rx_dropped++; in vector_legacy_rx()
872 pkt_len = uml_vector_recvmsg(vp->fds->rx_fd, &hdr, 0); in vector_legacy_rx()
874 vp->in_error = true; in vector_legacy_rx()
879 if (pkt_len > vp->header_size) { in vector_legacy_rx()
880 if (vp->header_size > 0) { in vector_legacy_rx()
881 header_check = vp->verify_header( in vector_legacy_rx()
882 vp->header_rxbuffer, skb, vp); in vector_legacy_rx()
885 vp->dev->stats.rx_dropped++; in vector_legacy_rx()
886 vp->estats.rx_encaps_errors++; in vector_legacy_rx()
890 vp->estats.rx_csum_offload_good++; in vector_legacy_rx()
894 pskb_trim(skb, pkt_len - vp->rx_header_size); in vector_legacy_rx()
896 vp->dev->stats.rx_bytes += skb->len; in vector_legacy_rx()
897 vp->dev->stats.rx_packets++; in vector_legacy_rx()
898 napi_gro_receive(&vp->napi, skb); in vector_legacy_rx()
913 static int writev_tx(struct vector_private *vp, struct sk_buff *skb) in writev_tx() argument
918 iov[0].iov_base = vp->header_txbuffer; in writev_tx()
919 iov_count = prep_msg(vp, skb, (struct iovec *) &iov); in writev_tx()
925 vp->fds->tx_fd, in writev_tx()
933 netif_trans_update(vp->dev); in writev_tx()
934 netif_wake_queue(vp->dev); in writev_tx()
937 vp->dev->stats.tx_bytes += skb->len; in writev_tx()
938 vp->dev->stats.tx_packets++; in writev_tx()
940 vp->dev->stats.tx_dropped++; in writev_tx()
945 vp->dev->stats.tx_dropped++; in writev_tx()
948 vp->in_error = true; in writev_tx()
957 static int vector_mmsg_rx(struct vector_private *vp, int budget) in vector_mmsg_rx() argument
960 struct vector_queue *qi = vp->rx_queue; in vector_mmsg_rx()
978 vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0); in vector_mmsg_rx()
981 vp->in_error = true; in vector_mmsg_rx()
995 if (mmsg_vector->msg_len > vp->header_size) { in vector_mmsg_rx()
996 if (vp->header_size > 0) { in vector_mmsg_rx()
997 header_check = vp->verify_header( in vector_mmsg_rx()
1000 vp in vector_mmsg_rx()
1009 vp->estats.rx_encaps_errors++; in vector_mmsg_rx()
1013 vp->estats.rx_csum_offload_good++; in vector_mmsg_rx()
1018 mmsg_vector->msg_len - vp->rx_header_size); in vector_mmsg_rx()
1024 vp->dev->stats.rx_bytes += skb->len; in vector_mmsg_rx()
1025 vp->dev->stats.rx_packets++; in vector_mmsg_rx()
1026 napi_gro_receive(&vp->napi, skb); in vector_mmsg_rx()
1041 if (vp->estats.rx_queue_max < packet_count) in vector_mmsg_rx()
1042 vp->estats.rx_queue_max = packet_count; in vector_mmsg_rx()
1043 vp->estats.rx_queue_running_average = in vector_mmsg_rx()
1044 (vp->estats.rx_queue_running_average + packet_count) >> 1; in vector_mmsg_rx()
1051 struct vector_private *vp = netdev_priv(dev); in vector_net_start_xmit() local
1054 if (vp->in_error) { in vector_net_start_xmit()
1055 deactivate_fd(vp->fds->rx_fd, vp->rx_irq); in vector_net_start_xmit()
1056 if ((vp->fds->rx_fd != vp->fds->tx_fd) && (vp->tx_irq != 0)) in vector_net_start_xmit()
1057 deactivate_fd(vp->fds->tx_fd, vp->tx_irq); in vector_net_start_xmit()
1061 if ((vp->options & VECTOR_TX) == 0) { in vector_net_start_xmit()
1062 writev_tx(vp, skb); in vector_net_start_xmit()
1070 netdev_sent_queue(vp->dev, skb->len); in vector_net_start_xmit()
1071 queue_depth = vector_enqueue(vp->tx_queue, skb); in vector_net_start_xmit()
1073 if (queue_depth < vp->tx_queue->max_depth && netdev_xmit_more()) { in vector_net_start_xmit()
1074 mod_timer(&vp->tl, vp->coalesce); in vector_net_start_xmit()
1077 queue_depth = vector_send(vp->tx_queue); in vector_net_start_xmit()
1079 napi_schedule(&vp->napi); in vector_net_start_xmit()
1088 struct vector_private *vp = netdev_priv(dev); in vector_rx_interrupt() local
1092 napi_schedule(&vp->napi); in vector_rx_interrupt()
1100 struct vector_private *vp = netdev_priv(dev); in vector_tx_interrupt() local
1111 napi_schedule(&vp->napi); in vector_tx_interrupt()
1120 struct vector_private *vp = netdev_priv(dev); in vector_net_close() local
1124 del_timer(&vp->tl); in vector_net_close()
1126 if (vp->fds == NULL) in vector_net_close()
1130 if (vp->rx_irq > 0) { in vector_net_close()
1131 um_free_irq(vp->rx_irq, dev); in vector_net_close()
1132 vp->rx_irq = 0; in vector_net_close()
1134 if (vp->tx_irq > 0) { in vector_net_close()
1135 um_free_irq(vp->tx_irq, dev); in vector_net_close()
1136 vp->tx_irq = 0; in vector_net_close()
1138 napi_disable(&vp->napi); in vector_net_close()
1139 netif_napi_del(&vp->napi); in vector_net_close()
1140 if (vp->fds->rx_fd > 0) { in vector_net_close()
1141 if (vp->bpf) in vector_net_close()
1142 uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf); in vector_net_close()
1143 os_close_file(vp->fds->rx_fd); in vector_net_close()
1144 vp->fds->rx_fd = -1; in vector_net_close()
1146 if (vp->fds->tx_fd > 0) { in vector_net_close()
1147 os_close_file(vp->fds->tx_fd); in vector_net_close()
1148 vp->fds->tx_fd = -1; in vector_net_close()
1150 if (vp->bpf != NULL) in vector_net_close()
1151 kfree(vp->bpf->filter); in vector_net_close()
1152 kfree(vp->bpf); in vector_net_close()
1153 vp->bpf = NULL; in vector_net_close()
1154 kfree(vp->fds->remote_addr); in vector_net_close()
1155 kfree(vp->transport_data); in vector_net_close()
1156 kfree(vp->header_rxbuffer); in vector_net_close()
1157 kfree(vp->header_txbuffer); in vector_net_close()
1158 if (vp->rx_queue != NULL) in vector_net_close()
1159 destroy_queue(vp->rx_queue); in vector_net_close()
1160 if (vp->tx_queue != NULL) in vector_net_close()
1161 destroy_queue(vp->tx_queue); in vector_net_close()
1162 kfree(vp->fds); in vector_net_close()
1163 vp->fds = NULL; in vector_net_close()
1164 spin_lock_irqsave(&vp->lock, flags); in vector_net_close()
1165 vp->opened = false; in vector_net_close()
1166 vp->in_error = false; in vector_net_close()
1167 spin_unlock_irqrestore(&vp->lock, flags); in vector_net_close()
1173 struct vector_private *vp = container_of(napi, struct vector_private, napi); in vector_poll() local
1178 if ((vp->options & VECTOR_TX) != 0) in vector_poll()
1179 tx_enqueued = (vector_send(vp->tx_queue) > 0); in vector_poll()
1180 if ((vp->options & VECTOR_RX) > 0) in vector_poll()
1181 err = vector_mmsg_rx(vp, budget); in vector_poll()
1183 err = vector_legacy_rx(vp); in vector_poll()
1199 struct vector_private *vp = in vector_reset_tx() local
1201 netdev_reset_queue(vp->dev); in vector_reset_tx()
1202 netif_start_queue(vp->dev); in vector_reset_tx()
1203 netif_wake_queue(vp->dev); in vector_reset_tx()
1208 struct vector_private *vp = netdev_priv(dev); in vector_net_open() local
1213 spin_lock_irqsave(&vp->lock, flags); in vector_net_open()
1214 if (vp->opened) { in vector_net_open()
1215 spin_unlock_irqrestore(&vp->lock, flags); in vector_net_open()
1218 vp->opened = true; in vector_net_open()
1219 spin_unlock_irqrestore(&vp->lock, flags); in vector_net_open()
1221 vp->bpf = uml_vector_user_bpf(get_bpf_file(vp->parsed)); in vector_net_open()
1223 vp->fds = uml_vector_user_open(vp->unit, vp->parsed); in vector_net_open()
1225 if (vp->fds == NULL) in vector_net_open()
1228 if (build_transport_data(vp) < 0) in vector_net_open()
1231 if ((vp->options & VECTOR_RX) > 0) { in vector_net_open()
1232 vp->rx_queue = create_queue( in vector_net_open()
1233 vp, in vector_net_open()
1234 get_depth(vp->parsed), in vector_net_open()
1235 vp->rx_header_size, in vector_net_open()
1238 vp->rx_queue->queue_depth = get_depth(vp->parsed); in vector_net_open()
1240 vp->header_rxbuffer = kmalloc( in vector_net_open()
1241 vp->rx_header_size, in vector_net_open()
1244 if (vp->header_rxbuffer == NULL) in vector_net_open()
1247 if ((vp->options & VECTOR_TX) > 0) { in vector_net_open()
1248 vp->tx_queue = create_queue( in vector_net_open()
1249 vp, in vector_net_open()
1250 get_depth(vp->parsed), in vector_net_open()
1251 vp->header_size, in vector_net_open()
1255 vp->header_txbuffer = kmalloc(vp->header_size, GFP_KERNEL); in vector_net_open()
1256 if (vp->header_txbuffer == NULL) in vector_net_open()
1260 netif_napi_add_weight(vp->dev, &vp->napi, vector_poll, in vector_net_open()
1261 get_depth(vp->parsed)); in vector_net_open()
1262 napi_enable(&vp->napi); in vector_net_open()
1266 irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd, in vector_net_open()
1274 vp->rx_irq = irq_rr + VECTOR_BASE_IRQ; in vector_net_open()
1279 if ((vp->options & VECTOR_TX) > 0) { in vector_net_open()
1281 irq_rr + VECTOR_BASE_IRQ, vp->fds->tx_fd, in vector_net_open()
1290 vp->tx_irq = irq_rr + VECTOR_BASE_IRQ; in vector_net_open()
1294 if ((vp->options & VECTOR_QDISC_BYPASS) != 0) { in vector_net_open()
1295 if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd)) in vector_net_open()
1296 vp->options |= VECTOR_BPF; in vector_net_open()
1298 if (((vp->options & VECTOR_BPF) != 0) && (vp->bpf == NULL)) in vector_net_open()
1299 vp->bpf = uml_vector_default_bpf(dev->dev_addr); in vector_net_open()
1301 if (vp->bpf != NULL) in vector_net_open()
1302 uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf); in vector_net_open()
1305 vector_reset_stats(vp); in vector_net_open()
1312 napi_schedule(&vp->napi); in vector_net_open()
1314 vdevice = find_device(vp->unit); in vector_net_open()
1317 if ((vp->options & VECTOR_TX) != 0) in vector_net_open()
1318 add_timer(&vp->tl); in vector_net_open()
1334 struct vector_private *vp = netdev_priv(dev); in vector_net_tx_timeout() local
1336 vp->estats.tx_timeout_count++; in vector_net_tx_timeout()
1338 schedule_work(&vp->reset_tx); in vector_net_tx_timeout()
1351 struct vector_private *vp = netdev_priv(dev); in vector_set_features() local
1358 vp->req_size = 65536; in vector_set_features()
1361 vp->req_size = vp->max_packet + vp->headroom + SAFETY_MARGIN; in vector_set_features()
1383 struct vector_private *vp = netdev_priv(dev); in vector_net_load_bpf_flash() local
1388 if (!(vp->options & VECTOR_BPF_FLASH)) { in vector_net_load_bpf_flash()
1393 spin_lock(&vp->lock); in vector_net_load_bpf_flash()
1395 if (vp->bpf != NULL) { in vector_net_load_bpf_flash()
1396 if (vp->opened) in vector_net_load_bpf_flash()
1397 uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf); in vector_net_load_bpf_flash()
1398 kfree(vp->bpf->filter); in vector_net_load_bpf_flash()
1399 vp->bpf->filter = NULL; in vector_net_load_bpf_flash()
1401 vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC); in vector_net_load_bpf_flash()
1402 if (vp->bpf == NULL) { in vector_net_load_bpf_flash()
1408 vdevice = find_device(vp->unit); in vector_net_load_bpf_flash()
1413 vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC); in vector_net_load_bpf_flash()
1414 if (!vp->bpf->filter) in vector_net_load_bpf_flash()
1417 vp->bpf->len = fw->size / sizeof(struct sock_filter); in vector_net_load_bpf_flash()
1420 if (vp->opened) in vector_net_load_bpf_flash()
1421 result = uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf); in vector_net_load_bpf_flash()
1423 spin_unlock(&vp->lock); in vector_net_load_bpf_flash()
1431 spin_unlock(&vp->lock); in vector_net_load_bpf_flash()
1432 if (vp->bpf != NULL) in vector_net_load_bpf_flash()
1433 kfree(vp->bpf->filter); in vector_net_load_bpf_flash()
1434 kfree(vp->bpf); in vector_net_load_bpf_flash()
1435 vp->bpf = NULL; in vector_net_load_bpf_flash()
1444 struct vector_private *vp = netdev_priv(netdev); in vector_get_ringparam() local
1446 ring->rx_max_pending = vp->rx_queue->max_depth; in vector_get_ringparam()
1447 ring->tx_max_pending = vp->tx_queue->max_depth; in vector_get_ringparam()
1448 ring->rx_pending = vp->rx_queue->max_depth; in vector_get_ringparam()
1449 ring->tx_pending = vp->tx_queue->max_depth; in vector_get_ringparam()
1483 struct vector_private *vp = netdev_priv(dev); in vector_get_ethtool_stats() local
1485 memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats)); in vector_get_ethtool_stats()
1493 struct vector_private *vp = netdev_priv(netdev); in vector_get_coalesce() local
1495 ec->tx_coalesce_usecs = (vp->coalesce * 1000000) / HZ; in vector_get_coalesce()
1504 struct vector_private *vp = netdev_priv(netdev); in vector_set_coalesce() local
1506 vp->coalesce = (ec->tx_coalesce_usecs * HZ) / 1000000; in vector_set_coalesce()
1507 if (vp->coalesce == 0) in vector_set_coalesce()
1508 vp->coalesce = 1; in vector_set_coalesce()
1544 struct vector_private *vp = from_timer(vp, t, tl); in vector_timer_expire() local
1546 vp->estats.tx_kicks++; in vector_timer_expire()
1547 napi_schedule(&vp->napi); in vector_timer_expire()
1559 struct vector_private *vp; in vector_eth_configure() local
1586 vp = netdev_priv(dev); in vector_eth_configure()
1603 *vp = ((struct vector_private) in vector_eth_configure()
1605 .list = LIST_HEAD_INIT(vp->list), in vector_eth_configure()
1634 INIT_WORK(&vp->reset_tx, vector_reset_tx); in vector_eth_configure()
1636 timer_setup(&vp->tl, vector_timer_expire, 0); in vector_eth_configure()
1637 spin_lock_init(&vp->lock); in vector_eth_configure()