1dbddf429SAlex Dewar // SPDX-License-Identifier: GPL-2.0
249da7e64SAnton Ivanov /*
39807019aSAnton Ivanov * Copyright (C) 2017 - 2019 Cambridge Greys Limited
449da7e64SAnton Ivanov * Copyright (C) 2011 - 2014 Cisco Systems Inc
549da7e64SAnton Ivanov * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
649da7e64SAnton Ivanov * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
749da7e64SAnton Ivanov * James Leu (jleu@mindspring.net).
849da7e64SAnton Ivanov * Copyright (C) 2001 by various other people who didn't put their name here.
949da7e64SAnton Ivanov */
1049da7e64SAnton Ivanov
1157c8a661SMike Rapoport #include <linux/memblock.h>
1249da7e64SAnton Ivanov #include <linux/etherdevice.h>
1349da7e64SAnton Ivanov #include <linux/ethtool.h>
1449da7e64SAnton Ivanov #include <linux/inetdevice.h>
1549da7e64SAnton Ivanov #include <linux/init.h>
1649da7e64SAnton Ivanov #include <linux/list.h>
1749da7e64SAnton Ivanov #include <linux/netdevice.h>
1849da7e64SAnton Ivanov #include <linux/platform_device.h>
1949da7e64SAnton Ivanov #include <linux/rtnetlink.h>
2049da7e64SAnton Ivanov #include <linux/skbuff.h>
2149da7e64SAnton Ivanov #include <linux/slab.h>
2249da7e64SAnton Ivanov #include <linux/interrupt.h>
239807019aSAnton Ivanov #include <linux/firmware.h>
249807019aSAnton Ivanov #include <linux/fs.h>
259807019aSAnton Ivanov #include <uapi/linux/filter.h>
2649da7e64SAnton Ivanov #include <init.h>
2749da7e64SAnton Ivanov #include <irq_kern.h>
2849da7e64SAnton Ivanov #include <irq_user.h>
2949da7e64SAnton Ivanov #include <net_kern.h>
3049da7e64SAnton Ivanov #include <os.h>
3149da7e64SAnton Ivanov #include "mconsole_kern.h"
3249da7e64SAnton Ivanov #include "vector_user.h"
3349da7e64SAnton Ivanov #include "vector_kern.h"
3449da7e64SAnton Ivanov
3549da7e64SAnton Ivanov /*
3649da7e64SAnton Ivanov * Adapted from network devices with the following major changes:
3749da7e64SAnton Ivanov * All transports are static - simplifies the code significantly
3849da7e64SAnton Ivanov * Multiple FDs/IRQs per device
3949da7e64SAnton Ivanov * Vector IO optionally used for read/write, falling back to legacy
4049da7e64SAnton Ivanov * based on configuration and/or availability
4149da7e64SAnton Ivanov * Configuration is no longer positional - L2TPv3 and GRE require up to
4249da7e64SAnton Ivanov * 10 parameters, passing this as positional is not fit for purpose.
4349da7e64SAnton Ivanov * Only socket transports are supported
4449da7e64SAnton Ivanov */
4549da7e64SAnton Ivanov
4649da7e64SAnton Ivanov
4749da7e64SAnton Ivanov #define DRIVER_NAME "uml-vector"
4849da7e64SAnton Ivanov struct vector_cmd_line_arg {
4949da7e64SAnton Ivanov struct list_head list;
5049da7e64SAnton Ivanov int unit;
5149da7e64SAnton Ivanov char *arguments;
5249da7e64SAnton Ivanov };
5349da7e64SAnton Ivanov
5449da7e64SAnton Ivanov struct vector_device {
5549da7e64SAnton Ivanov struct list_head list;
5649da7e64SAnton Ivanov struct net_device *dev;
5749da7e64SAnton Ivanov struct platform_device pdev;
5849da7e64SAnton Ivanov int unit;
5949da7e64SAnton Ivanov int opened;
6049da7e64SAnton Ivanov };
6149da7e64SAnton Ivanov
6249da7e64SAnton Ivanov static LIST_HEAD(vec_cmd_line);
6349da7e64SAnton Ivanov
6449da7e64SAnton Ivanov static DEFINE_SPINLOCK(vector_devices_lock);
6549da7e64SAnton Ivanov static LIST_HEAD(vector_devices);
6649da7e64SAnton Ivanov
6749da7e64SAnton Ivanov static int driver_registered;
6849da7e64SAnton Ivanov
6949da7e64SAnton Ivanov static void vector_eth_configure(int n, struct arglist *def);
70b35507a4SAnton Ivanov static int vector_mmsg_rx(struct vector_private *vp, int budget);
7149da7e64SAnton Ivanov
7249da7e64SAnton Ivanov /* Argument accessors to set variables (and/or set default values)
7349da7e64SAnton Ivanov * mtu, buffer sizing, default headroom, etc
7449da7e64SAnton Ivanov */
7549da7e64SAnton Ivanov
7649da7e64SAnton Ivanov #define DEFAULT_HEADROOM 2
7749da7e64SAnton Ivanov #define SAFETY_MARGIN 32
7849da7e64SAnton Ivanov #define DEFAULT_VECTOR_SIZE 64
7949da7e64SAnton Ivanov #define TX_SMALL_PACKET 128
8049da7e64SAnton Ivanov #define MAX_IOV_SIZE (MAX_SKB_FRAGS + 1)
8149da7e64SAnton Ivanov
8249da7e64SAnton Ivanov static const struct {
8349da7e64SAnton Ivanov const char string[ETH_GSTRING_LEN];
8449da7e64SAnton Ivanov } ethtool_stats_keys[] = {
8549da7e64SAnton Ivanov { "rx_queue_max" },
8649da7e64SAnton Ivanov { "rx_queue_running_average" },
8749da7e64SAnton Ivanov { "tx_queue_max" },
8849da7e64SAnton Ivanov { "tx_queue_running_average" },
8949da7e64SAnton Ivanov { "rx_encaps_errors" },
9049da7e64SAnton Ivanov { "tx_timeout_count" },
9149da7e64SAnton Ivanov { "tx_restart_queue" },
9249da7e64SAnton Ivanov { "tx_kicks" },
9349da7e64SAnton Ivanov { "tx_flow_control_xon" },
9449da7e64SAnton Ivanov { "tx_flow_control_xoff" },
9549da7e64SAnton Ivanov { "rx_csum_offload_good" },
9649da7e64SAnton Ivanov { "rx_csum_offload_errors"},
9749da7e64SAnton Ivanov { "sg_ok"},
9849da7e64SAnton Ivanov { "sg_linearized"},
9949da7e64SAnton Ivanov };
10049da7e64SAnton Ivanov
10149da7e64SAnton Ivanov #define VECTOR_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
10249da7e64SAnton Ivanov
vector_reset_stats(struct vector_private * vp)10349da7e64SAnton Ivanov static void vector_reset_stats(struct vector_private *vp)
10449da7e64SAnton Ivanov {
10549da7e64SAnton Ivanov vp->estats.rx_queue_max = 0;
10649da7e64SAnton Ivanov vp->estats.rx_queue_running_average = 0;
10749da7e64SAnton Ivanov vp->estats.tx_queue_max = 0;
10849da7e64SAnton Ivanov vp->estats.tx_queue_running_average = 0;
10949da7e64SAnton Ivanov vp->estats.rx_encaps_errors = 0;
11049da7e64SAnton Ivanov vp->estats.tx_timeout_count = 0;
11149da7e64SAnton Ivanov vp->estats.tx_restart_queue = 0;
11249da7e64SAnton Ivanov vp->estats.tx_kicks = 0;
11349da7e64SAnton Ivanov vp->estats.tx_flow_control_xon = 0;
11449da7e64SAnton Ivanov vp->estats.tx_flow_control_xoff = 0;
11549da7e64SAnton Ivanov vp->estats.sg_ok = 0;
11649da7e64SAnton Ivanov vp->estats.sg_linearized = 0;
11749da7e64SAnton Ivanov }
11849da7e64SAnton Ivanov
get_mtu(struct arglist * def)11949da7e64SAnton Ivanov static int get_mtu(struct arglist *def)
12049da7e64SAnton Ivanov {
12149da7e64SAnton Ivanov char *mtu = uml_vector_fetch_arg(def, "mtu");
12249da7e64SAnton Ivanov long result;
12349da7e64SAnton Ivanov
12449da7e64SAnton Ivanov if (mtu != NULL) {
12549da7e64SAnton Ivanov if (kstrtoul(mtu, 10, &result) == 0)
12658531931SAnton Ivanov if ((result < (1 << 16) - 1) && (result >= 576))
12749da7e64SAnton Ivanov return result;
12849da7e64SAnton Ivanov }
12949da7e64SAnton Ivanov return ETH_MAX_PACKET;
13049da7e64SAnton Ivanov }
13149da7e64SAnton Ivanov
get_bpf_file(struct arglist * def)1329807019aSAnton Ivanov static char *get_bpf_file(struct arglist *def)
1339807019aSAnton Ivanov {
1349807019aSAnton Ivanov return uml_vector_fetch_arg(def, "bpffile");
1359807019aSAnton Ivanov }
1369807019aSAnton Ivanov
get_bpf_flash(struct arglist * def)1379807019aSAnton Ivanov static bool get_bpf_flash(struct arglist *def)
1389807019aSAnton Ivanov {
1399807019aSAnton Ivanov char *allow = uml_vector_fetch_arg(def, "bpfflash");
1409807019aSAnton Ivanov long result;
1419807019aSAnton Ivanov
1429807019aSAnton Ivanov if (allow != NULL) {
1439807019aSAnton Ivanov if (kstrtoul(allow, 10, &result) == 0)
1449807019aSAnton Ivanov return (allow > 0);
1459807019aSAnton Ivanov }
1469807019aSAnton Ivanov return false;
1479807019aSAnton Ivanov }
1489807019aSAnton Ivanov
get_depth(struct arglist * def)14949da7e64SAnton Ivanov static int get_depth(struct arglist *def)
15049da7e64SAnton Ivanov {
15149da7e64SAnton Ivanov char *mtu = uml_vector_fetch_arg(def, "depth");
15249da7e64SAnton Ivanov long result;
15349da7e64SAnton Ivanov
15449da7e64SAnton Ivanov if (mtu != NULL) {
15549da7e64SAnton Ivanov if (kstrtoul(mtu, 10, &result) == 0)
15649da7e64SAnton Ivanov return result;
15749da7e64SAnton Ivanov }
15849da7e64SAnton Ivanov return DEFAULT_VECTOR_SIZE;
15949da7e64SAnton Ivanov }
16049da7e64SAnton Ivanov
get_headroom(struct arglist * def)16149da7e64SAnton Ivanov static int get_headroom(struct arglist *def)
16249da7e64SAnton Ivanov {
16349da7e64SAnton Ivanov char *mtu = uml_vector_fetch_arg(def, "headroom");
16449da7e64SAnton Ivanov long result;
16549da7e64SAnton Ivanov
16649da7e64SAnton Ivanov if (mtu != NULL) {
16749da7e64SAnton Ivanov if (kstrtoul(mtu, 10, &result) == 0)
16849da7e64SAnton Ivanov return result;
16949da7e64SAnton Ivanov }
17049da7e64SAnton Ivanov return DEFAULT_HEADROOM;
17149da7e64SAnton Ivanov }
17249da7e64SAnton Ivanov
get_req_size(struct arglist * def)17349da7e64SAnton Ivanov static int get_req_size(struct arglist *def)
17449da7e64SAnton Ivanov {
17549da7e64SAnton Ivanov char *gro = uml_vector_fetch_arg(def, "gro");
17649da7e64SAnton Ivanov long result;
17749da7e64SAnton Ivanov
17849da7e64SAnton Ivanov if (gro != NULL) {
17949da7e64SAnton Ivanov if (kstrtoul(gro, 10, &result) == 0) {
18049da7e64SAnton Ivanov if (result > 0)
18149da7e64SAnton Ivanov return 65536;
18249da7e64SAnton Ivanov }
18349da7e64SAnton Ivanov }
18449da7e64SAnton Ivanov return get_mtu(def) + ETH_HEADER_OTHER +
18549da7e64SAnton Ivanov get_headroom(def) + SAFETY_MARGIN;
18649da7e64SAnton Ivanov }
18749da7e64SAnton Ivanov
18849da7e64SAnton Ivanov
get_transport_options(struct arglist * def)18949da7e64SAnton Ivanov static int get_transport_options(struct arglist *def)
19049da7e64SAnton Ivanov {
19149da7e64SAnton Ivanov char *transport = uml_vector_fetch_arg(def, "transport");
19249da7e64SAnton Ivanov char *vector = uml_vector_fetch_arg(def, "vec");
19349da7e64SAnton Ivanov
19449da7e64SAnton Ivanov int vec_rx = VECTOR_RX;
19549da7e64SAnton Ivanov int vec_tx = VECTOR_TX;
19649da7e64SAnton Ivanov long parsed;
1979807019aSAnton Ivanov int result = 0;
19849da7e64SAnton Ivanov
199237ce2e6SSjoerd Simons if (transport == NULL)
200237ce2e6SSjoerd Simons return -EINVAL;
201237ce2e6SSjoerd Simons
20249da7e64SAnton Ivanov if (vector != NULL) {
20349da7e64SAnton Ivanov if (kstrtoul(vector, 10, &parsed) == 0) {
20449da7e64SAnton Ivanov if (parsed == 0) {
20549da7e64SAnton Ivanov vec_rx = 0;
20649da7e64SAnton Ivanov vec_tx = 0;
20749da7e64SAnton Ivanov }
20849da7e64SAnton Ivanov }
20949da7e64SAnton Ivanov }
21049da7e64SAnton Ivanov
2119807019aSAnton Ivanov if (get_bpf_flash(def))
2129807019aSAnton Ivanov result = VECTOR_BPF_FLASH;
21349da7e64SAnton Ivanov
21449da7e64SAnton Ivanov if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
2159807019aSAnton Ivanov return result;
216b3b8ca2aSAnton Ivanov if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0)
2179807019aSAnton Ivanov return (result | vec_rx | VECTOR_BPF);
21849da7e64SAnton Ivanov if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
2199807019aSAnton Ivanov return (result | vec_rx | vec_tx | VECTOR_QDISC_BYPASS);
2209807019aSAnton Ivanov return (result | vec_rx | vec_tx);
22149da7e64SAnton Ivanov }
22249da7e64SAnton Ivanov
22349da7e64SAnton Ivanov
22449da7e64SAnton Ivanov /* A mini-buffer for packet drop read
22549da7e64SAnton Ivanov * All of our supported transports are datagram oriented and we always
22649da7e64SAnton Ivanov * read using recvmsg or recvmmsg. If we pass a buffer which is smaller
22749da7e64SAnton Ivanov * than the packet size it still counts as full packet read and will
22849da7e64SAnton Ivanov * clean the incoming stream to keep sigio/epoll happy
22949da7e64SAnton Ivanov */
23049da7e64SAnton Ivanov
23149da7e64SAnton Ivanov #define DROP_BUFFER_SIZE 32
23249da7e64SAnton Ivanov
23349da7e64SAnton Ivanov static char *drop_buffer;
23449da7e64SAnton Ivanov
23549da7e64SAnton Ivanov /* Array backed queues optimized for bulk enqueue/dequeue and
23649da7e64SAnton Ivanov * 1:N (small values of N) or 1:1 enqueuer/dequeuer ratios.
23749da7e64SAnton Ivanov * For more details and full design rationale see
23849da7e64SAnton Ivanov * http://foswiki.cambridgegreys.com/Main/EatYourTailAndEnjoyIt
23949da7e64SAnton Ivanov */
24049da7e64SAnton Ivanov
24149da7e64SAnton Ivanov
24249da7e64SAnton Ivanov /*
24349da7e64SAnton Ivanov * Advance the mmsg queue head by n = advance. Resets the queue to
24449da7e64SAnton Ivanov * maximum enqueue/dequeue-at-once capacity if possible. Called by
24549da7e64SAnton Ivanov * dequeuers. Caller must hold the head_lock!
24649da7e64SAnton Ivanov */
24749da7e64SAnton Ivanov
vector_advancehead(struct vector_queue * qi,int advance)24849da7e64SAnton Ivanov static int vector_advancehead(struct vector_queue *qi, int advance)
24949da7e64SAnton Ivanov {
25049da7e64SAnton Ivanov int queue_depth;
25149da7e64SAnton Ivanov
25249da7e64SAnton Ivanov qi->head =
25349da7e64SAnton Ivanov (qi->head + advance)
25449da7e64SAnton Ivanov % qi->max_depth;
25549da7e64SAnton Ivanov
25649da7e64SAnton Ivanov
25749da7e64SAnton Ivanov spin_lock(&qi->tail_lock);
25849da7e64SAnton Ivanov qi->queue_depth -= advance;
25949da7e64SAnton Ivanov
26049da7e64SAnton Ivanov /* we are at 0, use this to
26149da7e64SAnton Ivanov * reset head and tail so we can use max size vectors
26249da7e64SAnton Ivanov */
26349da7e64SAnton Ivanov
26449da7e64SAnton Ivanov if (qi->queue_depth == 0) {
26549da7e64SAnton Ivanov qi->head = 0;
26649da7e64SAnton Ivanov qi->tail = 0;
26749da7e64SAnton Ivanov }
26849da7e64SAnton Ivanov queue_depth = qi->queue_depth;
26949da7e64SAnton Ivanov spin_unlock(&qi->tail_lock);
27049da7e64SAnton Ivanov return queue_depth;
27149da7e64SAnton Ivanov }
27249da7e64SAnton Ivanov
27349da7e64SAnton Ivanov /* Advance the queue tail by n = advance.
27449da7e64SAnton Ivanov * This is called by enqueuers which should hold the
27549da7e64SAnton Ivanov * head lock already
27649da7e64SAnton Ivanov */
27749da7e64SAnton Ivanov
vector_advancetail(struct vector_queue * qi,int advance)27849da7e64SAnton Ivanov static int vector_advancetail(struct vector_queue *qi, int advance)
27949da7e64SAnton Ivanov {
28049da7e64SAnton Ivanov int queue_depth;
28149da7e64SAnton Ivanov
28249da7e64SAnton Ivanov qi->tail =
28349da7e64SAnton Ivanov (qi->tail + advance)
28449da7e64SAnton Ivanov % qi->max_depth;
28549da7e64SAnton Ivanov spin_lock(&qi->head_lock);
28649da7e64SAnton Ivanov qi->queue_depth += advance;
28749da7e64SAnton Ivanov queue_depth = qi->queue_depth;
28849da7e64SAnton Ivanov spin_unlock(&qi->head_lock);
28949da7e64SAnton Ivanov return queue_depth;
29049da7e64SAnton Ivanov }
29149da7e64SAnton Ivanov
prep_msg(struct vector_private * vp,struct sk_buff * skb,struct iovec * iov)29249da7e64SAnton Ivanov static int prep_msg(struct vector_private *vp,
29349da7e64SAnton Ivanov struct sk_buff *skb,
29449da7e64SAnton Ivanov struct iovec *iov)
29549da7e64SAnton Ivanov {
29649da7e64SAnton Ivanov int iov_index = 0;
29749da7e64SAnton Ivanov int nr_frags, frag;
29849da7e64SAnton Ivanov skb_frag_t *skb_frag;
29949da7e64SAnton Ivanov
30049da7e64SAnton Ivanov nr_frags = skb_shinfo(skb)->nr_frags;
30149da7e64SAnton Ivanov if (nr_frags > MAX_IOV_SIZE) {
30249da7e64SAnton Ivanov if (skb_linearize(skb) != 0)
30349da7e64SAnton Ivanov goto drop;
30449da7e64SAnton Ivanov }
30549da7e64SAnton Ivanov if (vp->header_size > 0) {
30649da7e64SAnton Ivanov iov[iov_index].iov_len = vp->header_size;
30749da7e64SAnton Ivanov vp->form_header(iov[iov_index].iov_base, skb, vp);
30849da7e64SAnton Ivanov iov_index++;
30949da7e64SAnton Ivanov }
31049da7e64SAnton Ivanov iov[iov_index].iov_base = skb->data;
31149da7e64SAnton Ivanov if (nr_frags > 0) {
31249da7e64SAnton Ivanov iov[iov_index].iov_len = skb->len - skb->data_len;
31349da7e64SAnton Ivanov vp->estats.sg_ok++;
31449da7e64SAnton Ivanov } else
31549da7e64SAnton Ivanov iov[iov_index].iov_len = skb->len;
31649da7e64SAnton Ivanov iov_index++;
31749da7e64SAnton Ivanov for (frag = 0; frag < nr_frags; frag++) {
31849da7e64SAnton Ivanov skb_frag = &skb_shinfo(skb)->frags[frag];
31949da7e64SAnton Ivanov iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
32049da7e64SAnton Ivanov iov[iov_index].iov_len = skb_frag_size(skb_frag);
32149da7e64SAnton Ivanov iov_index++;
32249da7e64SAnton Ivanov }
32349da7e64SAnton Ivanov return iov_index;
32449da7e64SAnton Ivanov drop:
32549da7e64SAnton Ivanov return -1;
32649da7e64SAnton Ivanov }
32749da7e64SAnton Ivanov /*
32849da7e64SAnton Ivanov * Generic vector enqueue with support for forming headers using transport
32949da7e64SAnton Ivanov * specific callback. Allows GRE, L2TPv3, RAW and other transports
33049da7e64SAnton Ivanov * to use a common enqueue procedure in vector mode
33149da7e64SAnton Ivanov */
33249da7e64SAnton Ivanov
vector_enqueue(struct vector_queue * qi,struct sk_buff * skb)33349da7e64SAnton Ivanov static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb)
33449da7e64SAnton Ivanov {
33549da7e64SAnton Ivanov struct vector_private *vp = netdev_priv(qi->dev);
33649da7e64SAnton Ivanov int queue_depth;
33749da7e64SAnton Ivanov int packet_len;
33849da7e64SAnton Ivanov struct mmsghdr *mmsg_vector = qi->mmsg_vector;
33949da7e64SAnton Ivanov int iov_count;
34049da7e64SAnton Ivanov
34149da7e64SAnton Ivanov spin_lock(&qi->tail_lock);
34249da7e64SAnton Ivanov spin_lock(&qi->head_lock);
34349da7e64SAnton Ivanov queue_depth = qi->queue_depth;
34449da7e64SAnton Ivanov spin_unlock(&qi->head_lock);
34549da7e64SAnton Ivanov
34649da7e64SAnton Ivanov if (skb)
34749da7e64SAnton Ivanov packet_len = skb->len;
34849da7e64SAnton Ivanov
34949da7e64SAnton Ivanov if (queue_depth < qi->max_depth) {
35049da7e64SAnton Ivanov
35149da7e64SAnton Ivanov *(qi->skbuff_vector + qi->tail) = skb;
35249da7e64SAnton Ivanov mmsg_vector += qi->tail;
35349da7e64SAnton Ivanov iov_count = prep_msg(
35449da7e64SAnton Ivanov vp,
35549da7e64SAnton Ivanov skb,
35649da7e64SAnton Ivanov mmsg_vector->msg_hdr.msg_iov
35749da7e64SAnton Ivanov );
35849da7e64SAnton Ivanov if (iov_count < 1)
35949da7e64SAnton Ivanov goto drop;
36049da7e64SAnton Ivanov mmsg_vector->msg_hdr.msg_iovlen = iov_count;
36149da7e64SAnton Ivanov mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr;
36249da7e64SAnton Ivanov mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size;
36349da7e64SAnton Ivanov queue_depth = vector_advancetail(qi, 1);
36449da7e64SAnton Ivanov } else
36549da7e64SAnton Ivanov goto drop;
36649da7e64SAnton Ivanov spin_unlock(&qi->tail_lock);
36749da7e64SAnton Ivanov return queue_depth;
36849da7e64SAnton Ivanov drop:
36949da7e64SAnton Ivanov qi->dev->stats.tx_dropped++;
37049da7e64SAnton Ivanov if (skb != NULL) {
37149da7e64SAnton Ivanov packet_len = skb->len;
37249da7e64SAnton Ivanov dev_consume_skb_any(skb);
37349da7e64SAnton Ivanov netdev_completed_queue(qi->dev, 1, packet_len);
37449da7e64SAnton Ivanov }
37549da7e64SAnton Ivanov spin_unlock(&qi->tail_lock);
37649da7e64SAnton Ivanov return queue_depth;
37749da7e64SAnton Ivanov }
37849da7e64SAnton Ivanov
consume_vector_skbs(struct vector_queue * qi,int count)37949da7e64SAnton Ivanov static int consume_vector_skbs(struct vector_queue *qi, int count)
38049da7e64SAnton Ivanov {
38149da7e64SAnton Ivanov struct sk_buff *skb;
38249da7e64SAnton Ivanov int skb_index;
38349da7e64SAnton Ivanov int bytes_compl = 0;
38449da7e64SAnton Ivanov
38549da7e64SAnton Ivanov for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) {
38649da7e64SAnton Ivanov skb = *(qi->skbuff_vector + skb_index);
38749da7e64SAnton Ivanov /* mark as empty to ensure correct destruction if
38849da7e64SAnton Ivanov * needed
38949da7e64SAnton Ivanov */
39049da7e64SAnton Ivanov bytes_compl += skb->len;
39149da7e64SAnton Ivanov *(qi->skbuff_vector + skb_index) = NULL;
39249da7e64SAnton Ivanov dev_consume_skb_any(skb);
39349da7e64SAnton Ivanov }
39449da7e64SAnton Ivanov qi->dev->stats.tx_bytes += bytes_compl;
39549da7e64SAnton Ivanov qi->dev->stats.tx_packets += count;
39649da7e64SAnton Ivanov netdev_completed_queue(qi->dev, count, bytes_compl);
39749da7e64SAnton Ivanov return vector_advancehead(qi, count);
39849da7e64SAnton Ivanov }
39949da7e64SAnton Ivanov
40049da7e64SAnton Ivanov /*
40149da7e64SAnton Ivanov * Generic vector deque via sendmmsg with support for forming headers
40249da7e64SAnton Ivanov * using transport specific callback. Allows GRE, L2TPv3, RAW and
40349da7e64SAnton Ivanov * other transports to use a common dequeue procedure in vector mode
40449da7e64SAnton Ivanov */
40549da7e64SAnton Ivanov
40649da7e64SAnton Ivanov
vector_send(struct vector_queue * qi)40749da7e64SAnton Ivanov static int vector_send(struct vector_queue *qi)
40849da7e64SAnton Ivanov {
40949da7e64SAnton Ivanov struct vector_private *vp = netdev_priv(qi->dev);
41049da7e64SAnton Ivanov struct mmsghdr *send_from;
41149da7e64SAnton Ivanov int result = 0, send_len, queue_depth = qi->max_depth;
41249da7e64SAnton Ivanov
41349da7e64SAnton Ivanov if (spin_trylock(&qi->head_lock)) {
41449da7e64SAnton Ivanov if (spin_trylock(&qi->tail_lock)) {
41549da7e64SAnton Ivanov /* update queue_depth to current value */
41649da7e64SAnton Ivanov queue_depth = qi->queue_depth;
41749da7e64SAnton Ivanov spin_unlock(&qi->tail_lock);
41849da7e64SAnton Ivanov while (queue_depth > 0) {
41949da7e64SAnton Ivanov /* Calculate the start of the vector */
42049da7e64SAnton Ivanov send_len = queue_depth;
42149da7e64SAnton Ivanov send_from = qi->mmsg_vector;
42249da7e64SAnton Ivanov send_from += qi->head;
42349da7e64SAnton Ivanov /* Adjust vector size if wraparound */
42449da7e64SAnton Ivanov if (send_len + qi->head > qi->max_depth)
42549da7e64SAnton Ivanov send_len = qi->max_depth - qi->head;
42649da7e64SAnton Ivanov /* Try to TX as many packets as possible */
42749da7e64SAnton Ivanov if (send_len > 0) {
42849da7e64SAnton Ivanov result = uml_vector_sendmmsg(
42949da7e64SAnton Ivanov vp->fds->tx_fd,
43049da7e64SAnton Ivanov send_from,
43149da7e64SAnton Ivanov send_len,
43249da7e64SAnton Ivanov 0
43349da7e64SAnton Ivanov );
43449da7e64SAnton Ivanov vp->in_write_poll =
43549da7e64SAnton Ivanov (result != send_len);
43649da7e64SAnton Ivanov }
43749da7e64SAnton Ivanov /* For some of the sendmmsg error scenarios
43849da7e64SAnton Ivanov * we may end being unsure in the TX success
43949da7e64SAnton Ivanov * for all packets. It is safer to declare
44049da7e64SAnton Ivanov * them all TX-ed and blame the network.
44149da7e64SAnton Ivanov */
44249da7e64SAnton Ivanov if (result < 0) {
44349da7e64SAnton Ivanov if (net_ratelimit())
44449da7e64SAnton Ivanov netdev_err(vp->dev, "sendmmsg err=%i\n",
44549da7e64SAnton Ivanov result);
446d47761dbSAnton Ivanov vp->in_error = true;
44749da7e64SAnton Ivanov result = send_len;
44849da7e64SAnton Ivanov }
44949da7e64SAnton Ivanov if (result > 0) {
45049da7e64SAnton Ivanov queue_depth =
45149da7e64SAnton Ivanov consume_vector_skbs(qi, result);
45249da7e64SAnton Ivanov /* This is equivalent to an TX IRQ.
45349da7e64SAnton Ivanov * Restart the upper layers to feed us
45449da7e64SAnton Ivanov * more packets.
45549da7e64SAnton Ivanov */
45649da7e64SAnton Ivanov if (result > vp->estats.tx_queue_max)
45749da7e64SAnton Ivanov vp->estats.tx_queue_max = result;
45849da7e64SAnton Ivanov vp->estats.tx_queue_running_average =
45949da7e64SAnton Ivanov (vp->estats.tx_queue_running_average + result) >> 1;
46049da7e64SAnton Ivanov }
46149da7e64SAnton Ivanov netif_wake_queue(qi->dev);
46249da7e64SAnton Ivanov /* if TX is busy, break out of the send loop,
46349da7e64SAnton Ivanov * poll write IRQ will reschedule xmit for us
46449da7e64SAnton Ivanov */
46549da7e64SAnton Ivanov if (result != send_len) {
46649da7e64SAnton Ivanov vp->estats.tx_restart_queue++;
46749da7e64SAnton Ivanov break;
46849da7e64SAnton Ivanov }
46949da7e64SAnton Ivanov }
47049da7e64SAnton Ivanov }
47149da7e64SAnton Ivanov spin_unlock(&qi->head_lock);
47249da7e64SAnton Ivanov }
47349da7e64SAnton Ivanov return queue_depth;
47449da7e64SAnton Ivanov }
47549da7e64SAnton Ivanov
47649da7e64SAnton Ivanov /* Queue destructor. Deliberately stateless so we can use
47749da7e64SAnton Ivanov * it in queue cleanup if initialization fails.
47849da7e64SAnton Ivanov */
47949da7e64SAnton Ivanov
destroy_queue(struct vector_queue * qi)48049da7e64SAnton Ivanov static void destroy_queue(struct vector_queue *qi)
48149da7e64SAnton Ivanov {
48249da7e64SAnton Ivanov int i;
48349da7e64SAnton Ivanov struct iovec *iov;
48449da7e64SAnton Ivanov struct vector_private *vp = netdev_priv(qi->dev);
48549da7e64SAnton Ivanov struct mmsghdr *mmsg_vector;
48649da7e64SAnton Ivanov
48749da7e64SAnton Ivanov if (qi == NULL)
48849da7e64SAnton Ivanov return;
48949da7e64SAnton Ivanov /* deallocate any skbuffs - we rely on any unused to be
49049da7e64SAnton Ivanov * set to NULL.
49149da7e64SAnton Ivanov */
49249da7e64SAnton Ivanov if (qi->skbuff_vector != NULL) {
49349da7e64SAnton Ivanov for (i = 0; i < qi->max_depth; i++) {
49449da7e64SAnton Ivanov if (*(qi->skbuff_vector + i) != NULL)
49549da7e64SAnton Ivanov dev_kfree_skb_any(*(qi->skbuff_vector + i));
49649da7e64SAnton Ivanov }
49749da7e64SAnton Ivanov kfree(qi->skbuff_vector);
49849da7e64SAnton Ivanov }
49949da7e64SAnton Ivanov /* deallocate matching IOV structures including header buffs */
50049da7e64SAnton Ivanov if (qi->mmsg_vector != NULL) {
50149da7e64SAnton Ivanov mmsg_vector = qi->mmsg_vector;
50249da7e64SAnton Ivanov for (i = 0; i < qi->max_depth; i++) {
50349da7e64SAnton Ivanov iov = mmsg_vector->msg_hdr.msg_iov;
50449da7e64SAnton Ivanov if (iov != NULL) {
50549da7e64SAnton Ivanov if ((vp->header_size > 0) &&
50649da7e64SAnton Ivanov (iov->iov_base != NULL))
50749da7e64SAnton Ivanov kfree(iov->iov_base);
50849da7e64SAnton Ivanov kfree(iov);
50949da7e64SAnton Ivanov }
51049da7e64SAnton Ivanov mmsg_vector++;
51149da7e64SAnton Ivanov }
51249da7e64SAnton Ivanov kfree(qi->mmsg_vector);
51349da7e64SAnton Ivanov }
51449da7e64SAnton Ivanov kfree(qi);
51549da7e64SAnton Ivanov }
51649da7e64SAnton Ivanov
51749da7e64SAnton Ivanov /*
51849da7e64SAnton Ivanov * Queue constructor. Create a queue with a given side.
51949da7e64SAnton Ivanov */
create_queue(struct vector_private * vp,int max_size,int header_size,int num_extra_frags)52049da7e64SAnton Ivanov static struct vector_queue *create_queue(
52149da7e64SAnton Ivanov struct vector_private *vp,
52249da7e64SAnton Ivanov int max_size,
52349da7e64SAnton Ivanov int header_size,
52449da7e64SAnton Ivanov int num_extra_frags)
52549da7e64SAnton Ivanov {
52649da7e64SAnton Ivanov struct vector_queue *result;
52749da7e64SAnton Ivanov int i;
52849da7e64SAnton Ivanov struct iovec *iov;
52949da7e64SAnton Ivanov struct mmsghdr *mmsg_vector;
53049da7e64SAnton Ivanov
53149da7e64SAnton Ivanov result = kmalloc(sizeof(struct vector_queue), GFP_KERNEL);
53249da7e64SAnton Ivanov if (result == NULL)
5334579a1baSAnton Ivanov return NULL;
53449da7e64SAnton Ivanov result->max_depth = max_size;
53549da7e64SAnton Ivanov result->dev = vp->dev;
53649da7e64SAnton Ivanov result->mmsg_vector = kmalloc(
53749da7e64SAnton Ivanov (sizeof(struct mmsghdr) * max_size), GFP_KERNEL);
5384579a1baSAnton Ivanov if (result->mmsg_vector == NULL)
5394579a1baSAnton Ivanov goto out_mmsg_fail;
54049da7e64SAnton Ivanov result->skbuff_vector = kmalloc(
54149da7e64SAnton Ivanov (sizeof(void *) * max_size), GFP_KERNEL);
5424579a1baSAnton Ivanov if (result->skbuff_vector == NULL)
5434579a1baSAnton Ivanov goto out_skb_fail;
5444579a1baSAnton Ivanov
5454579a1baSAnton Ivanov /* further failures can be handled safely by destroy_queue*/
54649da7e64SAnton Ivanov
54749da7e64SAnton Ivanov mmsg_vector = result->mmsg_vector;
54849da7e64SAnton Ivanov for (i = 0; i < max_size; i++) {
54949da7e64SAnton Ivanov /* Clear all pointers - we use non-NULL as marking on
55049da7e64SAnton Ivanov * what to free on destruction
55149da7e64SAnton Ivanov */
55249da7e64SAnton Ivanov *(result->skbuff_vector + i) = NULL;
55349da7e64SAnton Ivanov mmsg_vector->msg_hdr.msg_iov = NULL;
55449da7e64SAnton Ivanov mmsg_vector++;
55549da7e64SAnton Ivanov }
55649da7e64SAnton Ivanov mmsg_vector = result->mmsg_vector;
55749da7e64SAnton Ivanov result->max_iov_frags = num_extra_frags;
55849da7e64SAnton Ivanov for (i = 0; i < max_size; i++) {
55949da7e64SAnton Ivanov if (vp->header_size > 0)
5606da2ec56SKees Cook iov = kmalloc_array(3 + num_extra_frags,
5616da2ec56SKees Cook sizeof(struct iovec),
56249da7e64SAnton Ivanov GFP_KERNEL
56349da7e64SAnton Ivanov );
56449da7e64SAnton Ivanov else
5656da2ec56SKees Cook iov = kmalloc_array(2 + num_extra_frags,
5666da2ec56SKees Cook sizeof(struct iovec),
56749da7e64SAnton Ivanov GFP_KERNEL
56849da7e64SAnton Ivanov );
56949da7e64SAnton Ivanov if (iov == NULL)
57049da7e64SAnton Ivanov goto out_fail;
57149da7e64SAnton Ivanov mmsg_vector->msg_hdr.msg_iov = iov;
57249da7e64SAnton Ivanov mmsg_vector->msg_hdr.msg_iovlen = 1;
57349da7e64SAnton Ivanov mmsg_vector->msg_hdr.msg_control = NULL;
57449da7e64SAnton Ivanov mmsg_vector->msg_hdr.msg_controllen = 0;
57549da7e64SAnton Ivanov mmsg_vector->msg_hdr.msg_flags = MSG_DONTWAIT;
57649da7e64SAnton Ivanov mmsg_vector->msg_hdr.msg_name = NULL;
57749da7e64SAnton Ivanov mmsg_vector->msg_hdr.msg_namelen = 0;
57849da7e64SAnton Ivanov if (vp->header_size > 0) {
57949da7e64SAnton Ivanov iov->iov_base = kmalloc(header_size, GFP_KERNEL);
58049da7e64SAnton Ivanov if (iov->iov_base == NULL)
58149da7e64SAnton Ivanov goto out_fail;
58249da7e64SAnton Ivanov iov->iov_len = header_size;
58349da7e64SAnton Ivanov mmsg_vector->msg_hdr.msg_iovlen = 2;
58449da7e64SAnton Ivanov iov++;
58549da7e64SAnton Ivanov }
58649da7e64SAnton Ivanov iov->iov_base = NULL;
58749da7e64SAnton Ivanov iov->iov_len = 0;
58849da7e64SAnton Ivanov mmsg_vector++;
58949da7e64SAnton Ivanov }
59049da7e64SAnton Ivanov spin_lock_init(&result->head_lock);
59149da7e64SAnton Ivanov spin_lock_init(&result->tail_lock);
59249da7e64SAnton Ivanov result->queue_depth = 0;
59349da7e64SAnton Ivanov result->head = 0;
59449da7e64SAnton Ivanov result->tail = 0;
59549da7e64SAnton Ivanov return result;
5964579a1baSAnton Ivanov out_skb_fail:
5974579a1baSAnton Ivanov kfree(result->mmsg_vector);
5984579a1baSAnton Ivanov out_mmsg_fail:
5994579a1baSAnton Ivanov kfree(result);
6004579a1baSAnton Ivanov return NULL;
60149da7e64SAnton Ivanov out_fail:
60249da7e64SAnton Ivanov destroy_queue(result);
60349da7e64SAnton Ivanov return NULL;
60449da7e64SAnton Ivanov }
60549da7e64SAnton Ivanov
60649da7e64SAnton Ivanov /*
60749da7e64SAnton Ivanov * We do not use the RX queue as a proper wraparound queue for now
608b35507a4SAnton Ivanov * This is not necessary because the consumption via napi_gro_receive()
60949da7e64SAnton Ivanov * happens in-line. While we can try using the return code of
61049da7e64SAnton Ivanov * netif_rx() for flow control there are no drivers doing this today.
61149da7e64SAnton Ivanov * For this RX specific use we ignore the tail/head locks and
61249da7e64SAnton Ivanov * just read into a prepared queue filled with skbuffs.
61349da7e64SAnton Ivanov */
61449da7e64SAnton Ivanov
prep_skb(struct vector_private * vp,struct user_msghdr * msg)61549da7e64SAnton Ivanov static struct sk_buff *prep_skb(
61649da7e64SAnton Ivanov struct vector_private *vp,
61749da7e64SAnton Ivanov struct user_msghdr *msg)
61849da7e64SAnton Ivanov {
61949da7e64SAnton Ivanov int linear = vp->max_packet + vp->headroom + SAFETY_MARGIN;
62049da7e64SAnton Ivanov struct sk_buff *result;
62149da7e64SAnton Ivanov int iov_index = 0, len;
62249da7e64SAnton Ivanov struct iovec *iov = msg->msg_iov;
62349da7e64SAnton Ivanov int err, nr_frags, frag;
62449da7e64SAnton Ivanov skb_frag_t *skb_frag;
62549da7e64SAnton Ivanov
62649da7e64SAnton Ivanov if (vp->req_size <= linear)
62749da7e64SAnton Ivanov len = linear;
62849da7e64SAnton Ivanov else
62949da7e64SAnton Ivanov len = vp->req_size;
63049da7e64SAnton Ivanov result = alloc_skb_with_frags(
63149da7e64SAnton Ivanov linear,
63249da7e64SAnton Ivanov len - vp->max_packet,
63349da7e64SAnton Ivanov 3,
63449da7e64SAnton Ivanov &err,
63549da7e64SAnton Ivanov GFP_ATOMIC
63649da7e64SAnton Ivanov );
63749da7e64SAnton Ivanov if (vp->header_size > 0)
63849da7e64SAnton Ivanov iov_index++;
63949da7e64SAnton Ivanov if (result == NULL) {
64049da7e64SAnton Ivanov iov[iov_index].iov_base = NULL;
64149da7e64SAnton Ivanov iov[iov_index].iov_len = 0;
64249da7e64SAnton Ivanov goto done;
64349da7e64SAnton Ivanov }
64449da7e64SAnton Ivanov skb_reserve(result, vp->headroom);
64549da7e64SAnton Ivanov result->dev = vp->dev;
64649da7e64SAnton Ivanov skb_put(result, vp->max_packet);
64749da7e64SAnton Ivanov result->data_len = len - vp->max_packet;
64849da7e64SAnton Ivanov result->len += len - vp->max_packet;
64949da7e64SAnton Ivanov skb_reset_mac_header(result);
65049da7e64SAnton Ivanov result->ip_summed = CHECKSUM_NONE;
65149da7e64SAnton Ivanov iov[iov_index].iov_base = result->data;
65249da7e64SAnton Ivanov iov[iov_index].iov_len = vp->max_packet;
65349da7e64SAnton Ivanov iov_index++;
65449da7e64SAnton Ivanov
65549da7e64SAnton Ivanov nr_frags = skb_shinfo(result)->nr_frags;
65649da7e64SAnton Ivanov for (frag = 0; frag < nr_frags; frag++) {
65749da7e64SAnton Ivanov skb_frag = &skb_shinfo(result)->frags[frag];
65849da7e64SAnton Ivanov iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
65949da7e64SAnton Ivanov if (iov[iov_index].iov_base != NULL)
66049da7e64SAnton Ivanov iov[iov_index].iov_len = skb_frag_size(skb_frag);
66149da7e64SAnton Ivanov else
66249da7e64SAnton Ivanov iov[iov_index].iov_len = 0;
66349da7e64SAnton Ivanov iov_index++;
66449da7e64SAnton Ivanov }
66549da7e64SAnton Ivanov done:
66649da7e64SAnton Ivanov msg->msg_iovlen = iov_index;
66749da7e64SAnton Ivanov return result;
66849da7e64SAnton Ivanov }
66949da7e64SAnton Ivanov
67049da7e64SAnton Ivanov
67149da7e64SAnton Ivanov /* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs*/
67249da7e64SAnton Ivanov
prep_queue_for_rx(struct vector_queue * qi)67349da7e64SAnton Ivanov static void prep_queue_for_rx(struct vector_queue *qi)
67449da7e64SAnton Ivanov {
67549da7e64SAnton Ivanov struct vector_private *vp = netdev_priv(qi->dev);
67649da7e64SAnton Ivanov struct mmsghdr *mmsg_vector = qi->mmsg_vector;
67749da7e64SAnton Ivanov void **skbuff_vector = qi->skbuff_vector;
67849da7e64SAnton Ivanov int i;
67949da7e64SAnton Ivanov
68049da7e64SAnton Ivanov if (qi->queue_depth == 0)
68149da7e64SAnton Ivanov return;
68249da7e64SAnton Ivanov for (i = 0; i < qi->queue_depth; i++) {
68349da7e64SAnton Ivanov /* it is OK if allocation fails - recvmmsg with NULL data in
68449da7e64SAnton Ivanov * iov argument still performs an RX, just drops the packet
68549da7e64SAnton Ivanov * This allows us stop faffing around with a "drop buffer"
68649da7e64SAnton Ivanov */
68749da7e64SAnton Ivanov
68849da7e64SAnton Ivanov *skbuff_vector = prep_skb(vp, &mmsg_vector->msg_hdr);
68949da7e64SAnton Ivanov skbuff_vector++;
69049da7e64SAnton Ivanov mmsg_vector++;
69149da7e64SAnton Ivanov }
69249da7e64SAnton Ivanov qi->queue_depth = 0;
69349da7e64SAnton Ivanov }
69449da7e64SAnton Ivanov
find_device(int n)69549da7e64SAnton Ivanov static struct vector_device *find_device(int n)
69649da7e64SAnton Ivanov {
69749da7e64SAnton Ivanov struct vector_device *device;
69849da7e64SAnton Ivanov struct list_head *ele;
69949da7e64SAnton Ivanov
70049da7e64SAnton Ivanov spin_lock(&vector_devices_lock);
70149da7e64SAnton Ivanov list_for_each(ele, &vector_devices) {
70249da7e64SAnton Ivanov device = list_entry(ele, struct vector_device, list);
70349da7e64SAnton Ivanov if (device->unit == n)
70449da7e64SAnton Ivanov goto out;
70549da7e64SAnton Ivanov }
70649da7e64SAnton Ivanov device = NULL;
70749da7e64SAnton Ivanov out:
70849da7e64SAnton Ivanov spin_unlock(&vector_devices_lock);
70949da7e64SAnton Ivanov return device;
71049da7e64SAnton Ivanov }
71149da7e64SAnton Ivanov
vector_parse(char * str,int * index_out,char ** str_out,char ** error_out)71249da7e64SAnton Ivanov static int vector_parse(char *str, int *index_out, char **str_out,
71349da7e64SAnton Ivanov char **error_out)
71449da7e64SAnton Ivanov {
715584bfe63SChristophe JAILLET int n, len, err;
71649da7e64SAnton Ivanov char *start = str;
71749da7e64SAnton Ivanov
71849da7e64SAnton Ivanov len = strlen(str);
71949da7e64SAnton Ivanov
72049da7e64SAnton Ivanov while ((*str != ':') && (strlen(str) > 1))
72149da7e64SAnton Ivanov str++;
72249da7e64SAnton Ivanov if (*str != ':') {
72349da7e64SAnton Ivanov *error_out = "Expected ':' after device number";
724584bfe63SChristophe JAILLET return -EINVAL;
72549da7e64SAnton Ivanov }
72649da7e64SAnton Ivanov *str = '\0';
72749da7e64SAnton Ivanov
72849da7e64SAnton Ivanov err = kstrtouint(start, 0, &n);
72949da7e64SAnton Ivanov if (err < 0) {
73049da7e64SAnton Ivanov *error_out = "Bad device number";
73149da7e64SAnton Ivanov return err;
73249da7e64SAnton Ivanov }
73349da7e64SAnton Ivanov
73449da7e64SAnton Ivanov str++;
73549da7e64SAnton Ivanov if (find_device(n)) {
73649da7e64SAnton Ivanov *error_out = "Device already configured";
737584bfe63SChristophe JAILLET return -EINVAL;
73849da7e64SAnton Ivanov }
73949da7e64SAnton Ivanov
74049da7e64SAnton Ivanov *index_out = n;
74149da7e64SAnton Ivanov *str_out = str;
74249da7e64SAnton Ivanov return 0;
74349da7e64SAnton Ivanov }
74449da7e64SAnton Ivanov
vector_config(char * str,char ** error_out)74549da7e64SAnton Ivanov static int vector_config(char *str, char **error_out)
74649da7e64SAnton Ivanov {
74749da7e64SAnton Ivanov int err, n;
74849da7e64SAnton Ivanov char *params;
74949da7e64SAnton Ivanov struct arglist *parsed;
75049da7e64SAnton Ivanov
75149da7e64SAnton Ivanov err = vector_parse(str, &n, ¶ms, error_out);
75249da7e64SAnton Ivanov if (err != 0)
75349da7e64SAnton Ivanov return err;
75449da7e64SAnton Ivanov
75549da7e64SAnton Ivanov /* This string is broken up and the pieces used by the underlying
75649da7e64SAnton Ivanov * driver. We should copy it to make sure things do not go wrong
75749da7e64SAnton Ivanov * later.
75849da7e64SAnton Ivanov */
75949da7e64SAnton Ivanov
76049da7e64SAnton Ivanov params = kstrdup(params, GFP_KERNEL);
761be967f7dSChristophe JAILLET if (params == NULL) {
76249da7e64SAnton Ivanov *error_out = "vector_config failed to strdup string";
76349da7e64SAnton Ivanov return -ENOMEM;
76449da7e64SAnton Ivanov }
76549da7e64SAnton Ivanov
76649da7e64SAnton Ivanov parsed = uml_parse_vector_ifspec(params);
76749da7e64SAnton Ivanov
76849da7e64SAnton Ivanov if (parsed == NULL) {
76949da7e64SAnton Ivanov *error_out = "vector_config failed to parse parameters";
770*8f88c73aSXiang Yang kfree(params);
77149da7e64SAnton Ivanov return -EINVAL;
77249da7e64SAnton Ivanov }
77349da7e64SAnton Ivanov
77449da7e64SAnton Ivanov vector_eth_configure(n, parsed);
77549da7e64SAnton Ivanov return 0;
77649da7e64SAnton Ivanov }
77749da7e64SAnton Ivanov
vector_id(char ** str,int * start_out,int * end_out)77849da7e64SAnton Ivanov static int vector_id(char **str, int *start_out, int *end_out)
77949da7e64SAnton Ivanov {
78049da7e64SAnton Ivanov char *end;
78149da7e64SAnton Ivanov int n;
78249da7e64SAnton Ivanov
78349da7e64SAnton Ivanov n = simple_strtoul(*str, &end, 0);
78449da7e64SAnton Ivanov if ((*end != '\0') || (end == *str))
78549da7e64SAnton Ivanov return -1;
78649da7e64SAnton Ivanov
78749da7e64SAnton Ivanov *start_out = n;
78849da7e64SAnton Ivanov *end_out = n;
78949da7e64SAnton Ivanov *str = end;
79049da7e64SAnton Ivanov return n;
79149da7e64SAnton Ivanov }
79249da7e64SAnton Ivanov
vector_remove(int n,char ** error_out)79349da7e64SAnton Ivanov static int vector_remove(int n, char **error_out)
79449da7e64SAnton Ivanov {
79549da7e64SAnton Ivanov struct vector_device *vec_d;
79649da7e64SAnton Ivanov struct net_device *dev;
79749da7e64SAnton Ivanov struct vector_private *vp;
79849da7e64SAnton Ivanov
79949da7e64SAnton Ivanov vec_d = find_device(n);
80049da7e64SAnton Ivanov if (vec_d == NULL)
80149da7e64SAnton Ivanov return -ENODEV;
80249da7e64SAnton Ivanov dev = vec_d->dev;
80349da7e64SAnton Ivanov vp = netdev_priv(dev);
80449da7e64SAnton Ivanov if (vp->fds != NULL)
80549da7e64SAnton Ivanov return -EBUSY;
80649da7e64SAnton Ivanov unregister_netdev(dev);
80749da7e64SAnton Ivanov platform_device_unregister(&vec_d->pdev);
80849da7e64SAnton Ivanov return 0;
80949da7e64SAnton Ivanov }
81049da7e64SAnton Ivanov
81149da7e64SAnton Ivanov /*
81249da7e64SAnton Ivanov * There is no shared per-transport initialization code, so
81349da7e64SAnton Ivanov * we will just initialize each interface one by one and
81449da7e64SAnton Ivanov * add them to a list
81549da7e64SAnton Ivanov */
81649da7e64SAnton Ivanov
81749da7e64SAnton Ivanov static struct platform_driver uml_net_driver = {
81849da7e64SAnton Ivanov .driver = {
81949da7e64SAnton Ivanov .name = DRIVER_NAME,
82049da7e64SAnton Ivanov },
82149da7e64SAnton Ivanov };
82249da7e64SAnton Ivanov
82349da7e64SAnton Ivanov
vector_device_release(struct device * dev)82449da7e64SAnton Ivanov static void vector_device_release(struct device *dev)
82549da7e64SAnton Ivanov {
82649da7e64SAnton Ivanov struct vector_device *device = dev_get_drvdata(dev);
82749da7e64SAnton Ivanov struct net_device *netdev = device->dev;
82849da7e64SAnton Ivanov
82949da7e64SAnton Ivanov list_del(&device->list);
83049da7e64SAnton Ivanov kfree(device);
83149da7e64SAnton Ivanov free_netdev(netdev);
83249da7e64SAnton Ivanov }
83349da7e64SAnton Ivanov
83449da7e64SAnton Ivanov /* Bog standard recv using recvmsg - not used normally unless the user
83549da7e64SAnton Ivanov * explicitly specifies not to use recvmmsg vector RX.
83649da7e64SAnton Ivanov */
83749da7e64SAnton Ivanov
vector_legacy_rx(struct vector_private * vp)83849da7e64SAnton Ivanov static int vector_legacy_rx(struct vector_private *vp)
83949da7e64SAnton Ivanov {
84049da7e64SAnton Ivanov int pkt_len;
84149da7e64SAnton Ivanov struct user_msghdr hdr;
84249da7e64SAnton Ivanov struct iovec iov[2 + MAX_IOV_SIZE]; /* header + data use case only */
84349da7e64SAnton Ivanov int iovpos = 0;
84449da7e64SAnton Ivanov struct sk_buff *skb;
84549da7e64SAnton Ivanov int header_check;
84649da7e64SAnton Ivanov
84749da7e64SAnton Ivanov hdr.msg_name = NULL;
84849da7e64SAnton Ivanov hdr.msg_namelen = 0;
84949da7e64SAnton Ivanov hdr.msg_iov = (struct iovec *) &iov;
85049da7e64SAnton Ivanov hdr.msg_control = NULL;
85149da7e64SAnton Ivanov hdr.msg_controllen = 0;
85249da7e64SAnton Ivanov hdr.msg_flags = 0;
85349da7e64SAnton Ivanov
85449da7e64SAnton Ivanov if (vp->header_size > 0) {
85549da7e64SAnton Ivanov iov[0].iov_base = vp->header_rxbuffer;
85649da7e64SAnton Ivanov iov[0].iov_len = vp->header_size;
85749da7e64SAnton Ivanov }
85849da7e64SAnton Ivanov
85949da7e64SAnton Ivanov skb = prep_skb(vp, &hdr);
86049da7e64SAnton Ivanov
86149da7e64SAnton Ivanov if (skb == NULL) {
86249da7e64SAnton Ivanov /* Read a packet into drop_buffer and don't do
86349da7e64SAnton Ivanov * anything with it.
86449da7e64SAnton Ivanov */
86549da7e64SAnton Ivanov iov[iovpos].iov_base = drop_buffer;
86649da7e64SAnton Ivanov iov[iovpos].iov_len = DROP_BUFFER_SIZE;
86749da7e64SAnton Ivanov hdr.msg_iovlen = 1;
86849da7e64SAnton Ivanov vp->dev->stats.rx_dropped++;
86949da7e64SAnton Ivanov }
87049da7e64SAnton Ivanov
87149da7e64SAnton Ivanov pkt_len = uml_vector_recvmsg(vp->fds->rx_fd, &hdr, 0);
872d47761dbSAnton Ivanov if (pkt_len < 0) {
873d47761dbSAnton Ivanov vp->in_error = true;
874d47761dbSAnton Ivanov return pkt_len;
875d47761dbSAnton Ivanov }
87649da7e64SAnton Ivanov
87749da7e64SAnton Ivanov if (skb != NULL) {
87849da7e64SAnton Ivanov if (pkt_len > vp->header_size) {
87949da7e64SAnton Ivanov if (vp->header_size > 0) {
88049da7e64SAnton Ivanov header_check = vp->verify_header(
88149da7e64SAnton Ivanov vp->header_rxbuffer, skb, vp);
88249da7e64SAnton Ivanov if (header_check < 0) {
88349da7e64SAnton Ivanov dev_kfree_skb_irq(skb);
88449da7e64SAnton Ivanov vp->dev->stats.rx_dropped++;
88549da7e64SAnton Ivanov vp->estats.rx_encaps_errors++;
88649da7e64SAnton Ivanov return 0;
88749da7e64SAnton Ivanov }
88849da7e64SAnton Ivanov if (header_check > 0) {
88949da7e64SAnton Ivanov vp->estats.rx_csum_offload_good++;
89049da7e64SAnton Ivanov skb->ip_summed = CHECKSUM_UNNECESSARY;
89149da7e64SAnton Ivanov }
89249da7e64SAnton Ivanov }
89349da7e64SAnton Ivanov pskb_trim(skb, pkt_len - vp->rx_header_size);
89449da7e64SAnton Ivanov skb->protocol = eth_type_trans(skb, skb->dev);
89549da7e64SAnton Ivanov vp->dev->stats.rx_bytes += skb->len;
89649da7e64SAnton Ivanov vp->dev->stats.rx_packets++;
897b35507a4SAnton Ivanov napi_gro_receive(&vp->napi, skb);
89849da7e64SAnton Ivanov } else {
89949da7e64SAnton Ivanov dev_kfree_skb_irq(skb);
90049da7e64SAnton Ivanov }
90149da7e64SAnton Ivanov }
90249da7e64SAnton Ivanov return pkt_len;
90349da7e64SAnton Ivanov }
90449da7e64SAnton Ivanov
90549da7e64SAnton Ivanov /*
90649da7e64SAnton Ivanov * Packet at a time TX which falls back to vector TX if the
90749da7e64SAnton Ivanov * underlying transport is busy.
90849da7e64SAnton Ivanov */
90949da7e64SAnton Ivanov
91049da7e64SAnton Ivanov
91149da7e64SAnton Ivanov
writev_tx(struct vector_private * vp,struct sk_buff * skb)91249da7e64SAnton Ivanov static int writev_tx(struct vector_private *vp, struct sk_buff *skb)
91349da7e64SAnton Ivanov {
91449da7e64SAnton Ivanov struct iovec iov[3 + MAX_IOV_SIZE];
91549da7e64SAnton Ivanov int iov_count, pkt_len = 0;
91649da7e64SAnton Ivanov
91749da7e64SAnton Ivanov iov[0].iov_base = vp->header_txbuffer;
91849da7e64SAnton Ivanov iov_count = prep_msg(vp, skb, (struct iovec *) &iov);
91949da7e64SAnton Ivanov
92049da7e64SAnton Ivanov if (iov_count < 1)
92149da7e64SAnton Ivanov goto drop;
922d47761dbSAnton Ivanov
92349da7e64SAnton Ivanov pkt_len = uml_vector_writev(
92449da7e64SAnton Ivanov vp->fds->tx_fd,
92549da7e64SAnton Ivanov (struct iovec *) &iov,
92649da7e64SAnton Ivanov iov_count
92749da7e64SAnton Ivanov );
92849da7e64SAnton Ivanov
929d47761dbSAnton Ivanov if (pkt_len < 0)
930d47761dbSAnton Ivanov goto drop;
931d47761dbSAnton Ivanov
93249da7e64SAnton Ivanov netif_trans_update(vp->dev);
93349da7e64SAnton Ivanov netif_wake_queue(vp->dev);
93449da7e64SAnton Ivanov
93549da7e64SAnton Ivanov if (pkt_len > 0) {
93649da7e64SAnton Ivanov vp->dev->stats.tx_bytes += skb->len;
93749da7e64SAnton Ivanov vp->dev->stats.tx_packets++;
93849da7e64SAnton Ivanov } else {
93949da7e64SAnton Ivanov vp->dev->stats.tx_dropped++;
94049da7e64SAnton Ivanov }
94149da7e64SAnton Ivanov consume_skb(skb);
94249da7e64SAnton Ivanov return pkt_len;
94349da7e64SAnton Ivanov drop:
94449da7e64SAnton Ivanov vp->dev->stats.tx_dropped++;
94549da7e64SAnton Ivanov consume_skb(skb);
946d47761dbSAnton Ivanov if (pkt_len < 0)
947d47761dbSAnton Ivanov vp->in_error = true;
94849da7e64SAnton Ivanov return pkt_len;
94949da7e64SAnton Ivanov }
95049da7e64SAnton Ivanov
95149da7e64SAnton Ivanov /*
95249da7e64SAnton Ivanov * Receive as many messages as we can in one call using the special
95349da7e64SAnton Ivanov * mmsg vector matched to an skb vector which we prepared earlier.
95449da7e64SAnton Ivanov */
95549da7e64SAnton Ivanov
vector_mmsg_rx(struct vector_private * vp,int budget)956b35507a4SAnton Ivanov static int vector_mmsg_rx(struct vector_private *vp, int budget)
95749da7e64SAnton Ivanov {
95849da7e64SAnton Ivanov int packet_count, i;
95949da7e64SAnton Ivanov struct vector_queue *qi = vp->rx_queue;
96049da7e64SAnton Ivanov struct sk_buff *skb;
96149da7e64SAnton Ivanov struct mmsghdr *mmsg_vector = qi->mmsg_vector;
96249da7e64SAnton Ivanov void **skbuff_vector = qi->skbuff_vector;
96349da7e64SAnton Ivanov int header_check;
96449da7e64SAnton Ivanov
96549da7e64SAnton Ivanov /* Refresh the vector and make sure it is with new skbs and the
96649da7e64SAnton Ivanov * iovs are updated to point to them.
96749da7e64SAnton Ivanov */
96849da7e64SAnton Ivanov
96949da7e64SAnton Ivanov prep_queue_for_rx(qi);
97049da7e64SAnton Ivanov
97149da7e64SAnton Ivanov /* Fire the Lazy Gun - get as many packets as we can in one go. */
97249da7e64SAnton Ivanov
973b35507a4SAnton Ivanov if (budget > qi->max_depth)
974b35507a4SAnton Ivanov budget = qi->max_depth;
975b35507a4SAnton Ivanov
97649da7e64SAnton Ivanov packet_count = uml_vector_recvmmsg(
97749da7e64SAnton Ivanov vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
97849da7e64SAnton Ivanov
979d47761dbSAnton Ivanov if (packet_count < 0)
980d47761dbSAnton Ivanov vp->in_error = true;
981d47761dbSAnton Ivanov
98249da7e64SAnton Ivanov if (packet_count <= 0)
98349da7e64SAnton Ivanov return packet_count;
98449da7e64SAnton Ivanov
98549da7e64SAnton Ivanov /* We treat packet processing as enqueue, buffer refresh as dequeue
98649da7e64SAnton Ivanov * The queue_depth tells us how many buffers have been used and how
98749da7e64SAnton Ivanov * many do we need to prep the next time prep_queue_for_rx() is called.
98849da7e64SAnton Ivanov */
98949da7e64SAnton Ivanov
99049da7e64SAnton Ivanov qi->queue_depth = packet_count;
99149da7e64SAnton Ivanov
99249da7e64SAnton Ivanov for (i = 0; i < packet_count; i++) {
99349da7e64SAnton Ivanov skb = (*skbuff_vector);
99449da7e64SAnton Ivanov if (mmsg_vector->msg_len > vp->header_size) {
99549da7e64SAnton Ivanov if (vp->header_size > 0) {
99649da7e64SAnton Ivanov header_check = vp->verify_header(
99749da7e64SAnton Ivanov mmsg_vector->msg_hdr.msg_iov->iov_base,
99849da7e64SAnton Ivanov skb,
99949da7e64SAnton Ivanov vp
100049da7e64SAnton Ivanov );
100149da7e64SAnton Ivanov if (header_check < 0) {
100249da7e64SAnton Ivanov /* Overlay header failed to verify - discard.
100349da7e64SAnton Ivanov * We can actually keep this skb and reuse it,
100449da7e64SAnton Ivanov * but that will make the prep logic too
100549da7e64SAnton Ivanov * complex.
100649da7e64SAnton Ivanov */
100749da7e64SAnton Ivanov dev_kfree_skb_irq(skb);
100849da7e64SAnton Ivanov vp->estats.rx_encaps_errors++;
100949da7e64SAnton Ivanov continue;
101049da7e64SAnton Ivanov }
101149da7e64SAnton Ivanov if (header_check > 0) {
101249da7e64SAnton Ivanov vp->estats.rx_csum_offload_good++;
101349da7e64SAnton Ivanov skb->ip_summed = CHECKSUM_UNNECESSARY;
101449da7e64SAnton Ivanov }
101549da7e64SAnton Ivanov }
101649da7e64SAnton Ivanov pskb_trim(skb,
101749da7e64SAnton Ivanov mmsg_vector->msg_len - vp->rx_header_size);
101849da7e64SAnton Ivanov skb->protocol = eth_type_trans(skb, skb->dev);
101949da7e64SAnton Ivanov /*
102049da7e64SAnton Ivanov * We do not need to lock on updating stats here
102149da7e64SAnton Ivanov * The interrupt loop is non-reentrant.
102249da7e64SAnton Ivanov */
102349da7e64SAnton Ivanov vp->dev->stats.rx_bytes += skb->len;
102449da7e64SAnton Ivanov vp->dev->stats.rx_packets++;
1025b35507a4SAnton Ivanov napi_gro_receive(&vp->napi, skb);
102649da7e64SAnton Ivanov } else {
102749da7e64SAnton Ivanov /* Overlay header too short to do anything - discard.
102849da7e64SAnton Ivanov * We can actually keep this skb and reuse it,
102949da7e64SAnton Ivanov * but that will make the prep logic too complex.
103049da7e64SAnton Ivanov */
103149da7e64SAnton Ivanov if (skb != NULL)
103249da7e64SAnton Ivanov dev_kfree_skb_irq(skb);
103349da7e64SAnton Ivanov }
103449da7e64SAnton Ivanov (*skbuff_vector) = NULL;
103549da7e64SAnton Ivanov /* Move to the next buffer element */
103649da7e64SAnton Ivanov mmsg_vector++;
103749da7e64SAnton Ivanov skbuff_vector++;
103849da7e64SAnton Ivanov }
103949da7e64SAnton Ivanov if (packet_count > 0) {
104049da7e64SAnton Ivanov if (vp->estats.rx_queue_max < packet_count)
104149da7e64SAnton Ivanov vp->estats.rx_queue_max = packet_count;
104249da7e64SAnton Ivanov vp->estats.rx_queue_running_average =
104349da7e64SAnton Ivanov (vp->estats.rx_queue_running_average + packet_count) >> 1;
104449da7e64SAnton Ivanov }
104549da7e64SAnton Ivanov return packet_count;
104649da7e64SAnton Ivanov }
104749da7e64SAnton Ivanov
vector_net_start_xmit(struct sk_buff * skb,struct net_device * dev)104849da7e64SAnton Ivanov static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
104949da7e64SAnton Ivanov {
105049da7e64SAnton Ivanov struct vector_private *vp = netdev_priv(dev);
105149da7e64SAnton Ivanov int queue_depth = 0;
105249da7e64SAnton Ivanov
1053d47761dbSAnton Ivanov if (vp->in_error) {
1054d47761dbSAnton Ivanov deactivate_fd(vp->fds->rx_fd, vp->rx_irq);
1055d47761dbSAnton Ivanov if ((vp->fds->rx_fd != vp->fds->tx_fd) && (vp->tx_irq != 0))
1056d47761dbSAnton Ivanov deactivate_fd(vp->fds->tx_fd, vp->tx_irq);
1057d47761dbSAnton Ivanov return NETDEV_TX_BUSY;
1058d47761dbSAnton Ivanov }
1059d47761dbSAnton Ivanov
106049da7e64SAnton Ivanov if ((vp->options & VECTOR_TX) == 0) {
106149da7e64SAnton Ivanov writev_tx(vp, skb);
106249da7e64SAnton Ivanov return NETDEV_TX_OK;
106349da7e64SAnton Ivanov }
106449da7e64SAnton Ivanov
106549da7e64SAnton Ivanov /* We do BQL only in the vector path, no point doing it in
106649da7e64SAnton Ivanov * packet at a time mode as there is no device queue
106749da7e64SAnton Ivanov */
106849da7e64SAnton Ivanov
106949da7e64SAnton Ivanov netdev_sent_queue(vp->dev, skb->len);
107049da7e64SAnton Ivanov queue_depth = vector_enqueue(vp->tx_queue, skb);
107149da7e64SAnton Ivanov
1072b35507a4SAnton Ivanov if (queue_depth < vp->tx_queue->max_depth && netdev_xmit_more()) {
107349da7e64SAnton Ivanov mod_timer(&vp->tl, vp->coalesce);
107449da7e64SAnton Ivanov return NETDEV_TX_OK;
1075b35507a4SAnton Ivanov } else {
1076b35507a4SAnton Ivanov queue_depth = vector_send(vp->tx_queue);
1077b35507a4SAnton Ivanov if (queue_depth > 0)
1078b35507a4SAnton Ivanov napi_schedule(&vp->napi);
107949da7e64SAnton Ivanov }
1080b35507a4SAnton Ivanov
108149da7e64SAnton Ivanov return NETDEV_TX_OK;
108249da7e64SAnton Ivanov }
108349da7e64SAnton Ivanov
vector_rx_interrupt(int irq,void * dev_id)108449da7e64SAnton Ivanov static irqreturn_t vector_rx_interrupt(int irq, void *dev_id)
108549da7e64SAnton Ivanov {
108649da7e64SAnton Ivanov struct net_device *dev = dev_id;
108749da7e64SAnton Ivanov struct vector_private *vp = netdev_priv(dev);
108849da7e64SAnton Ivanov
108949da7e64SAnton Ivanov if (!netif_running(dev))
109049da7e64SAnton Ivanov return IRQ_NONE;
1091b35507a4SAnton Ivanov napi_schedule(&vp->napi);
109249da7e64SAnton Ivanov return IRQ_HANDLED;
109349da7e64SAnton Ivanov
109449da7e64SAnton Ivanov }
109549da7e64SAnton Ivanov
vector_tx_interrupt(int irq,void * dev_id)109649da7e64SAnton Ivanov static irqreturn_t vector_tx_interrupt(int irq, void *dev_id)
109749da7e64SAnton Ivanov {
109849da7e64SAnton Ivanov struct net_device *dev = dev_id;
109949da7e64SAnton Ivanov struct vector_private *vp = netdev_priv(dev);
110049da7e64SAnton Ivanov
110149da7e64SAnton Ivanov if (!netif_running(dev))
110249da7e64SAnton Ivanov return IRQ_NONE;
110349da7e64SAnton Ivanov /* We need to pay attention to it only if we got
110449da7e64SAnton Ivanov * -EAGAIN or -ENOBUFFS from sendmmsg. Otherwise
110549da7e64SAnton Ivanov * we ignore it. In the future, it may be worth
110649da7e64SAnton Ivanov * it to improve the IRQ controller a bit to make
110749da7e64SAnton Ivanov * tweaking the IRQ mask less costly
110849da7e64SAnton Ivanov */
110949da7e64SAnton Ivanov
1110b35507a4SAnton Ivanov napi_schedule(&vp->napi);
111149da7e64SAnton Ivanov return IRQ_HANDLED;
111249da7e64SAnton Ivanov
111349da7e64SAnton Ivanov }
111449da7e64SAnton Ivanov
111549da7e64SAnton Ivanov static int irq_rr;
111649da7e64SAnton Ivanov
vector_net_close(struct net_device * dev)111749da7e64SAnton Ivanov static int vector_net_close(struct net_device *dev)
111849da7e64SAnton Ivanov {
111949da7e64SAnton Ivanov struct vector_private *vp = netdev_priv(dev);
112049da7e64SAnton Ivanov unsigned long flags;
112149da7e64SAnton Ivanov
112249da7e64SAnton Ivanov netif_stop_queue(dev);
112349da7e64SAnton Ivanov del_timer(&vp->tl);
112449da7e64SAnton Ivanov
112549da7e64SAnton Ivanov if (vp->fds == NULL)
112649da7e64SAnton Ivanov return 0;
112749da7e64SAnton Ivanov
112849da7e64SAnton Ivanov /* Disable and free all IRQS */
112949da7e64SAnton Ivanov if (vp->rx_irq > 0) {
113049da7e64SAnton Ivanov um_free_irq(vp->rx_irq, dev);
113149da7e64SAnton Ivanov vp->rx_irq = 0;
113249da7e64SAnton Ivanov }
113349da7e64SAnton Ivanov if (vp->tx_irq > 0) {
113449da7e64SAnton Ivanov um_free_irq(vp->tx_irq, dev);
113549da7e64SAnton Ivanov vp->tx_irq = 0;
113649da7e64SAnton Ivanov }
1137b35507a4SAnton Ivanov napi_disable(&vp->napi);
1138b35507a4SAnton Ivanov netif_napi_del(&vp->napi);
113949da7e64SAnton Ivanov if (vp->fds->rx_fd > 0) {
11409807019aSAnton Ivanov if (vp->bpf)
11419807019aSAnton Ivanov uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
114249da7e64SAnton Ivanov os_close_file(vp->fds->rx_fd);
114349da7e64SAnton Ivanov vp->fds->rx_fd = -1;
114449da7e64SAnton Ivanov }
114549da7e64SAnton Ivanov if (vp->fds->tx_fd > 0) {
114649da7e64SAnton Ivanov os_close_file(vp->fds->tx_fd);
114749da7e64SAnton Ivanov vp->fds->tx_fd = -1;
114849da7e64SAnton Ivanov }
11499807019aSAnton Ivanov if (vp->bpf != NULL)
11509807019aSAnton Ivanov kfree(vp->bpf->filter);
115149da7e64SAnton Ivanov kfree(vp->bpf);
11529807019aSAnton Ivanov vp->bpf = NULL;
115349da7e64SAnton Ivanov kfree(vp->fds->remote_addr);
115449da7e64SAnton Ivanov kfree(vp->transport_data);
115549da7e64SAnton Ivanov kfree(vp->header_rxbuffer);
115649da7e64SAnton Ivanov kfree(vp->header_txbuffer);
115749da7e64SAnton Ivanov if (vp->rx_queue != NULL)
115849da7e64SAnton Ivanov destroy_queue(vp->rx_queue);
115949da7e64SAnton Ivanov if (vp->tx_queue != NULL)
116049da7e64SAnton Ivanov destroy_queue(vp->tx_queue);
116149da7e64SAnton Ivanov kfree(vp->fds);
116249da7e64SAnton Ivanov vp->fds = NULL;
116349da7e64SAnton Ivanov spin_lock_irqsave(&vp->lock, flags);
116449da7e64SAnton Ivanov vp->opened = false;
1165d47761dbSAnton Ivanov vp->in_error = false;
116649da7e64SAnton Ivanov spin_unlock_irqrestore(&vp->lock, flags);
116749da7e64SAnton Ivanov return 0;
116849da7e64SAnton Ivanov }
116949da7e64SAnton Ivanov
vector_poll(struct napi_struct * napi,int budget)1170b35507a4SAnton Ivanov static int vector_poll(struct napi_struct *napi, int budget)
117149da7e64SAnton Ivanov {
1172b35507a4SAnton Ivanov struct vector_private *vp = container_of(napi, struct vector_private, napi);
1173b35507a4SAnton Ivanov int work_done = 0;
1174b35507a4SAnton Ivanov int err;
1175b35507a4SAnton Ivanov bool tx_enqueued = false;
117649da7e64SAnton Ivanov
1177b35507a4SAnton Ivanov if ((vp->options & VECTOR_TX) != 0)
1178b35507a4SAnton Ivanov tx_enqueued = (vector_send(vp->tx_queue) > 0);
1179b35507a4SAnton Ivanov if ((vp->options & VECTOR_RX) > 0)
1180b35507a4SAnton Ivanov err = vector_mmsg_rx(vp, budget);
1181b35507a4SAnton Ivanov else {
1182b35507a4SAnton Ivanov err = vector_legacy_rx(vp);
1183b35507a4SAnton Ivanov if (err > 0)
1184b35507a4SAnton Ivanov err = 1;
118549da7e64SAnton Ivanov }
1186b35507a4SAnton Ivanov if (err > 0)
1187b35507a4SAnton Ivanov work_done += err;
1188b35507a4SAnton Ivanov
1189b35507a4SAnton Ivanov if (tx_enqueued || err > 0)
1190b35507a4SAnton Ivanov napi_schedule(napi);
1191b35507a4SAnton Ivanov if (work_done < budget)
1192b35507a4SAnton Ivanov napi_complete_done(napi, work_done);
1193b35507a4SAnton Ivanov return work_done;
1194b35507a4SAnton Ivanov }
1195b35507a4SAnton Ivanov
vector_reset_tx(struct work_struct * work)119649da7e64SAnton Ivanov static void vector_reset_tx(struct work_struct *work)
119749da7e64SAnton Ivanov {
119849da7e64SAnton Ivanov struct vector_private *vp =
119949da7e64SAnton Ivanov container_of(work, struct vector_private, reset_tx);
120049da7e64SAnton Ivanov netdev_reset_queue(vp->dev);
120149da7e64SAnton Ivanov netif_start_queue(vp->dev);
120249da7e64SAnton Ivanov netif_wake_queue(vp->dev);
120349da7e64SAnton Ivanov }
12049807019aSAnton Ivanov
vector_net_open(struct net_device * dev)120549da7e64SAnton Ivanov static int vector_net_open(struct net_device *dev)
120649da7e64SAnton Ivanov {
120749da7e64SAnton Ivanov struct vector_private *vp = netdev_priv(dev);
120849da7e64SAnton Ivanov unsigned long flags;
120949da7e64SAnton Ivanov int err = -EINVAL;
121049da7e64SAnton Ivanov struct vector_device *vdevice;
121149da7e64SAnton Ivanov
121249da7e64SAnton Ivanov spin_lock_irqsave(&vp->lock, flags);
12139f3199bcSWei Yongjun if (vp->opened) {
12149f3199bcSWei Yongjun spin_unlock_irqrestore(&vp->lock, flags);
121549da7e64SAnton Ivanov return -ENXIO;
12169f3199bcSWei Yongjun }
121749da7e64SAnton Ivanov vp->opened = true;
121849da7e64SAnton Ivanov spin_unlock_irqrestore(&vp->lock, flags);
121949da7e64SAnton Ivanov
12209807019aSAnton Ivanov vp->bpf = uml_vector_user_bpf(get_bpf_file(vp->parsed));
12219807019aSAnton Ivanov
122249da7e64SAnton Ivanov vp->fds = uml_vector_user_open(vp->unit, vp->parsed);
122349da7e64SAnton Ivanov
122449da7e64SAnton Ivanov if (vp->fds == NULL)
122549da7e64SAnton Ivanov goto out_close;
122649da7e64SAnton Ivanov
122749da7e64SAnton Ivanov if (build_transport_data(vp) < 0)
122849da7e64SAnton Ivanov goto out_close;
122949da7e64SAnton Ivanov
123049da7e64SAnton Ivanov if ((vp->options & VECTOR_RX) > 0) {
123149da7e64SAnton Ivanov vp->rx_queue = create_queue(
123249da7e64SAnton Ivanov vp,
123349da7e64SAnton Ivanov get_depth(vp->parsed),
123449da7e64SAnton Ivanov vp->rx_header_size,
123549da7e64SAnton Ivanov MAX_IOV_SIZE
123649da7e64SAnton Ivanov );
123749da7e64SAnton Ivanov vp->rx_queue->queue_depth = get_depth(vp->parsed);
123849da7e64SAnton Ivanov } else {
123949da7e64SAnton Ivanov vp->header_rxbuffer = kmalloc(
124049da7e64SAnton Ivanov vp->rx_header_size,
124149da7e64SAnton Ivanov GFP_KERNEL
124249da7e64SAnton Ivanov );
124349da7e64SAnton Ivanov if (vp->header_rxbuffer == NULL)
124449da7e64SAnton Ivanov goto out_close;
124549da7e64SAnton Ivanov }
124649da7e64SAnton Ivanov if ((vp->options & VECTOR_TX) > 0) {
124749da7e64SAnton Ivanov vp->tx_queue = create_queue(
124849da7e64SAnton Ivanov vp,
124949da7e64SAnton Ivanov get_depth(vp->parsed),
125049da7e64SAnton Ivanov vp->header_size,
125149da7e64SAnton Ivanov MAX_IOV_SIZE
125249da7e64SAnton Ivanov );
125349da7e64SAnton Ivanov } else {
125449da7e64SAnton Ivanov vp->header_txbuffer = kmalloc(vp->header_size, GFP_KERNEL);
125549da7e64SAnton Ivanov if (vp->header_txbuffer == NULL)
125649da7e64SAnton Ivanov goto out_close;
125749da7e64SAnton Ivanov }
125849da7e64SAnton Ivanov
12594d92c627SJakub Kicinski netif_napi_add_weight(vp->dev, &vp->napi, vector_poll,
12604d92c627SJakub Kicinski get_depth(vp->parsed));
1261b35507a4SAnton Ivanov napi_enable(&vp->napi);
1262b35507a4SAnton Ivanov
126349da7e64SAnton Ivanov /* READ IRQ */
126449da7e64SAnton Ivanov err = um_request_irq(
126549da7e64SAnton Ivanov irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd,
126649da7e64SAnton Ivanov IRQ_READ, vector_rx_interrupt,
126749da7e64SAnton Ivanov IRQF_SHARED, dev->name, dev);
126836d46a59SJohannes Berg if (err < 0) {
126949da7e64SAnton Ivanov netdev_err(dev, "vector_open: failed to get rx irq(%d)\n", err);
127049da7e64SAnton Ivanov err = -ENETUNREACH;
127149da7e64SAnton Ivanov goto out_close;
127249da7e64SAnton Ivanov }
127349da7e64SAnton Ivanov vp->rx_irq = irq_rr + VECTOR_BASE_IRQ;
127449da7e64SAnton Ivanov dev->irq = irq_rr + VECTOR_BASE_IRQ;
127549da7e64SAnton Ivanov irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE;
127649da7e64SAnton Ivanov
127749da7e64SAnton Ivanov /* WRITE IRQ - we need it only if we have vector TX */
127849da7e64SAnton Ivanov if ((vp->options & VECTOR_TX) > 0) {
127949da7e64SAnton Ivanov err = um_request_irq(
128049da7e64SAnton Ivanov irq_rr + VECTOR_BASE_IRQ, vp->fds->tx_fd,
128149da7e64SAnton Ivanov IRQ_WRITE, vector_tx_interrupt,
128249da7e64SAnton Ivanov IRQF_SHARED, dev->name, dev);
128336d46a59SJohannes Berg if (err < 0) {
128449da7e64SAnton Ivanov netdev_err(dev,
128549da7e64SAnton Ivanov "vector_open: failed to get tx irq(%d)\n", err);
128649da7e64SAnton Ivanov err = -ENETUNREACH;
128749da7e64SAnton Ivanov goto out_close;
128849da7e64SAnton Ivanov }
128949da7e64SAnton Ivanov vp->tx_irq = irq_rr + VECTOR_BASE_IRQ;
129049da7e64SAnton Ivanov irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE;
129149da7e64SAnton Ivanov }
129249da7e64SAnton Ivanov
1293e40238deSAnton Ivanov if ((vp->options & VECTOR_QDISC_BYPASS) != 0) {
1294e40238deSAnton Ivanov if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd))
12955ec91211SAnton Ivanov vp->options |= VECTOR_BPF;
1296e40238deSAnton Ivanov }
12979807019aSAnton Ivanov if (((vp->options & VECTOR_BPF) != 0) && (vp->bpf == NULL))
12989807019aSAnton Ivanov vp->bpf = uml_vector_default_bpf(dev->dev_addr);
12999807019aSAnton Ivanov
13009807019aSAnton Ivanov if (vp->bpf != NULL)
13019807019aSAnton Ivanov uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
130249da7e64SAnton Ivanov
130349da7e64SAnton Ivanov netif_start_queue(dev);
1304b35507a4SAnton Ivanov vector_reset_stats(vp);
130549da7e64SAnton Ivanov
130649da7e64SAnton Ivanov /* clear buffer - it can happen that the host side of the interface
130749da7e64SAnton Ivanov * is full when we get here. In this case, new data is never queued,
130849da7e64SAnton Ivanov * SIGIOs never arrive, and the net never works.
130949da7e64SAnton Ivanov */
131049da7e64SAnton Ivanov
1311b35507a4SAnton Ivanov napi_schedule(&vp->napi);
131249da7e64SAnton Ivanov
131349da7e64SAnton Ivanov vdevice = find_device(vp->unit);
131449da7e64SAnton Ivanov vdevice->opened = 1;
131549da7e64SAnton Ivanov
131649da7e64SAnton Ivanov if ((vp->options & VECTOR_TX) != 0)
131749da7e64SAnton Ivanov add_timer(&vp->tl);
131849da7e64SAnton Ivanov return 0;
131949da7e64SAnton Ivanov out_close:
132049da7e64SAnton Ivanov vector_net_close(dev);
132149da7e64SAnton Ivanov return err;
132249da7e64SAnton Ivanov }
132349da7e64SAnton Ivanov
132449da7e64SAnton Ivanov
vector_net_set_multicast_list(struct net_device * dev)132549da7e64SAnton Ivanov static void vector_net_set_multicast_list(struct net_device *dev)
132649da7e64SAnton Ivanov {
132749da7e64SAnton Ivanov /* TODO: - we can do some BPF games here */
132849da7e64SAnton Ivanov return;
132949da7e64SAnton Ivanov }
133049da7e64SAnton Ivanov
vector_net_tx_timeout(struct net_device * dev,unsigned int txqueue)13310290bd29SMichael S. Tsirkin static void vector_net_tx_timeout(struct net_device *dev, unsigned int txqueue)
133249da7e64SAnton Ivanov {
133349da7e64SAnton Ivanov struct vector_private *vp = netdev_priv(dev);
133449da7e64SAnton Ivanov
133549da7e64SAnton Ivanov vp->estats.tx_timeout_count++;
133649da7e64SAnton Ivanov netif_trans_update(dev);
133749da7e64SAnton Ivanov schedule_work(&vp->reset_tx);
133849da7e64SAnton Ivanov }
133949da7e64SAnton Ivanov
vector_fix_features(struct net_device * dev,netdev_features_t features)134049da7e64SAnton Ivanov static netdev_features_t vector_fix_features(struct net_device *dev,
134149da7e64SAnton Ivanov netdev_features_t features)
134249da7e64SAnton Ivanov {
134349da7e64SAnton Ivanov features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
134449da7e64SAnton Ivanov return features;
134549da7e64SAnton Ivanov }
134649da7e64SAnton Ivanov
vector_set_features(struct net_device * dev,netdev_features_t features)134749da7e64SAnton Ivanov static int vector_set_features(struct net_device *dev,
134849da7e64SAnton Ivanov netdev_features_t features)
134949da7e64SAnton Ivanov {
135049da7e64SAnton Ivanov struct vector_private *vp = netdev_priv(dev);
135149da7e64SAnton Ivanov /* Adjust buffer sizes for GSO/GRO. Unfortunately, there is
135249da7e64SAnton Ivanov * no way to negotiate it on raw sockets, so we can change
135349da7e64SAnton Ivanov * only our side.
135449da7e64SAnton Ivanov */
135549da7e64SAnton Ivanov if (features & NETIF_F_GRO)
135649da7e64SAnton Ivanov /* All new frame buffers will be GRO-sized */
135749da7e64SAnton Ivanov vp->req_size = 65536;
135849da7e64SAnton Ivanov else
135949da7e64SAnton Ivanov /* All new frame buffers will be normal sized */
136049da7e64SAnton Ivanov vp->req_size = vp->max_packet + vp->headroom + SAFETY_MARGIN;
136149da7e64SAnton Ivanov return 0;
136249da7e64SAnton Ivanov }
136349da7e64SAnton Ivanov
136449da7e64SAnton Ivanov #ifdef CONFIG_NET_POLL_CONTROLLER
vector_net_poll_controller(struct net_device * dev)136549da7e64SAnton Ivanov static void vector_net_poll_controller(struct net_device *dev)
136649da7e64SAnton Ivanov {
136749da7e64SAnton Ivanov disable_irq(dev->irq);
136849da7e64SAnton Ivanov vector_rx_interrupt(dev->irq, dev);
136949da7e64SAnton Ivanov enable_irq(dev->irq);
137049da7e64SAnton Ivanov }
137149da7e64SAnton Ivanov #endif
137249da7e64SAnton Ivanov
vector_net_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)137349da7e64SAnton Ivanov static void vector_net_get_drvinfo(struct net_device *dev,
137449da7e64SAnton Ivanov struct ethtool_drvinfo *info)
137549da7e64SAnton Ivanov {
1376e6e4d33fSWolfram Sang strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
137749da7e64SAnton Ivanov }
137849da7e64SAnton Ivanov
vector_net_load_bpf_flash(struct net_device * dev,struct ethtool_flash * efl)13799807019aSAnton Ivanov static int vector_net_load_bpf_flash(struct net_device *dev,
13809807019aSAnton Ivanov struct ethtool_flash *efl)
13819807019aSAnton Ivanov {
13829807019aSAnton Ivanov struct vector_private *vp = netdev_priv(dev);
13839807019aSAnton Ivanov struct vector_device *vdevice;
13849807019aSAnton Ivanov const struct firmware *fw;
13859807019aSAnton Ivanov int result = 0;
13869807019aSAnton Ivanov
13879807019aSAnton Ivanov if (!(vp->options & VECTOR_BPF_FLASH)) {
13889807019aSAnton Ivanov netdev_err(dev, "loading firmware not permitted: %s\n", efl->data);
13899807019aSAnton Ivanov return -1;
13909807019aSAnton Ivanov }
13919807019aSAnton Ivanov
13929807019aSAnton Ivanov spin_lock(&vp->lock);
13939807019aSAnton Ivanov
13949807019aSAnton Ivanov if (vp->bpf != NULL) {
13959807019aSAnton Ivanov if (vp->opened)
13969807019aSAnton Ivanov uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
13979807019aSAnton Ivanov kfree(vp->bpf->filter);
13989807019aSAnton Ivanov vp->bpf->filter = NULL;
13999807019aSAnton Ivanov } else {
1400e4e721feSTiezhu Yang vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC);
14019807019aSAnton Ivanov if (vp->bpf == NULL) {
14029807019aSAnton Ivanov netdev_err(dev, "failed to allocate memory for firmware\n");
14039807019aSAnton Ivanov goto flash_fail;
14049807019aSAnton Ivanov }
14059807019aSAnton Ivanov }
14069807019aSAnton Ivanov
14079807019aSAnton Ivanov vdevice = find_device(vp->unit);
14089807019aSAnton Ivanov
14099807019aSAnton Ivanov if (request_firmware(&fw, efl->data, &vdevice->pdev.dev))
14109807019aSAnton Ivanov goto flash_fail;
14119807019aSAnton Ivanov
1412e4e721feSTiezhu Yang vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC);
14139807019aSAnton Ivanov if (!vp->bpf->filter)
14149807019aSAnton Ivanov goto free_buffer;
14159807019aSAnton Ivanov
14169807019aSAnton Ivanov vp->bpf->len = fw->size / sizeof(struct sock_filter);
14179807019aSAnton Ivanov release_firmware(fw);
14189807019aSAnton Ivanov
14199807019aSAnton Ivanov if (vp->opened)
14209807019aSAnton Ivanov result = uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
14219807019aSAnton Ivanov
14229807019aSAnton Ivanov spin_unlock(&vp->lock);
14239807019aSAnton Ivanov
14249807019aSAnton Ivanov return result;
14259807019aSAnton Ivanov
14269807019aSAnton Ivanov free_buffer:
14279807019aSAnton Ivanov release_firmware(fw);
14289807019aSAnton Ivanov
14299807019aSAnton Ivanov flash_fail:
14309807019aSAnton Ivanov spin_unlock(&vp->lock);
14319807019aSAnton Ivanov if (vp->bpf != NULL)
14329807019aSAnton Ivanov kfree(vp->bpf->filter);
14339807019aSAnton Ivanov kfree(vp->bpf);
14349807019aSAnton Ivanov vp->bpf = NULL;
14359807019aSAnton Ivanov return -1;
14369807019aSAnton Ivanov }
14379807019aSAnton Ivanov
vector_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)143849da7e64SAnton Ivanov static void vector_get_ringparam(struct net_device *netdev,
143974624944SHao Chen struct ethtool_ringparam *ring,
144074624944SHao Chen struct kernel_ethtool_ringparam *kernel_ring,
144174624944SHao Chen struct netlink_ext_ack *extack)
144249da7e64SAnton Ivanov {
144349da7e64SAnton Ivanov struct vector_private *vp = netdev_priv(netdev);
144449da7e64SAnton Ivanov
144549da7e64SAnton Ivanov ring->rx_max_pending = vp->rx_queue->max_depth;
144649da7e64SAnton Ivanov ring->tx_max_pending = vp->tx_queue->max_depth;
144749da7e64SAnton Ivanov ring->rx_pending = vp->rx_queue->max_depth;
144849da7e64SAnton Ivanov ring->tx_pending = vp->tx_queue->max_depth;
144949da7e64SAnton Ivanov }
145049da7e64SAnton Ivanov
vector_get_strings(struct net_device * dev,u32 stringset,u8 * buf)145149da7e64SAnton Ivanov static void vector_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
145249da7e64SAnton Ivanov {
145349da7e64SAnton Ivanov switch (stringset) {
145449da7e64SAnton Ivanov case ETH_SS_TEST:
145549da7e64SAnton Ivanov *buf = '\0';
145649da7e64SAnton Ivanov break;
145749da7e64SAnton Ivanov case ETH_SS_STATS:
145849da7e64SAnton Ivanov memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
145949da7e64SAnton Ivanov break;
146049da7e64SAnton Ivanov default:
146149da7e64SAnton Ivanov WARN_ON(1);
146249da7e64SAnton Ivanov break;
146349da7e64SAnton Ivanov }
146449da7e64SAnton Ivanov }
146549da7e64SAnton Ivanov
vector_get_sset_count(struct net_device * dev,int sset)146649da7e64SAnton Ivanov static int vector_get_sset_count(struct net_device *dev, int sset)
146749da7e64SAnton Ivanov {
146849da7e64SAnton Ivanov switch (sset) {
146949da7e64SAnton Ivanov case ETH_SS_TEST:
147049da7e64SAnton Ivanov return 0;
147149da7e64SAnton Ivanov case ETH_SS_STATS:
147249da7e64SAnton Ivanov return VECTOR_NUM_STATS;
147349da7e64SAnton Ivanov default:
147449da7e64SAnton Ivanov return -EOPNOTSUPP;
147549da7e64SAnton Ivanov }
147649da7e64SAnton Ivanov }
147749da7e64SAnton Ivanov
vector_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)147849da7e64SAnton Ivanov static void vector_get_ethtool_stats(struct net_device *dev,
147949da7e64SAnton Ivanov struct ethtool_stats *estats,
148049da7e64SAnton Ivanov u64 *tmp_stats)
148149da7e64SAnton Ivanov {
148249da7e64SAnton Ivanov struct vector_private *vp = netdev_priv(dev);
148349da7e64SAnton Ivanov
148449da7e64SAnton Ivanov memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats));
148549da7e64SAnton Ivanov }
148649da7e64SAnton Ivanov
vector_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)148749da7e64SAnton Ivanov static int vector_get_coalesce(struct net_device *netdev,
14884baf0e0bSJohannes Berg struct ethtool_coalesce *ec,
14894baf0e0bSJohannes Berg struct kernel_ethtool_coalesce *kernel_coal,
14904baf0e0bSJohannes Berg struct netlink_ext_ack *extack)
149149da7e64SAnton Ivanov {
149249da7e64SAnton Ivanov struct vector_private *vp = netdev_priv(netdev);
149349da7e64SAnton Ivanov
149449da7e64SAnton Ivanov ec->tx_coalesce_usecs = (vp->coalesce * 1000000) / HZ;
149549da7e64SAnton Ivanov return 0;
149649da7e64SAnton Ivanov }
149749da7e64SAnton Ivanov
vector_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)149849da7e64SAnton Ivanov static int vector_set_coalesce(struct net_device *netdev,
14994baf0e0bSJohannes Berg struct ethtool_coalesce *ec,
15004baf0e0bSJohannes Berg struct kernel_ethtool_coalesce *kernel_coal,
15014baf0e0bSJohannes Berg struct netlink_ext_ack *extack)
150249da7e64SAnton Ivanov {
150349da7e64SAnton Ivanov struct vector_private *vp = netdev_priv(netdev);
150449da7e64SAnton Ivanov
150549da7e64SAnton Ivanov vp->coalesce = (ec->tx_coalesce_usecs * HZ) / 1000000;
150649da7e64SAnton Ivanov if (vp->coalesce == 0)
150749da7e64SAnton Ivanov vp->coalesce = 1;
150849da7e64SAnton Ivanov return 0;
150949da7e64SAnton Ivanov }
151049da7e64SAnton Ivanov
151149da7e64SAnton Ivanov static const struct ethtool_ops vector_net_ethtool_ops = {
1512cdc7aacaSJakub Kicinski .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS,
151349da7e64SAnton Ivanov .get_drvinfo = vector_net_get_drvinfo,
151449da7e64SAnton Ivanov .get_link = ethtool_op_get_link,
151549da7e64SAnton Ivanov .get_ts_info = ethtool_op_get_ts_info,
151649da7e64SAnton Ivanov .get_ringparam = vector_get_ringparam,
151749da7e64SAnton Ivanov .get_strings = vector_get_strings,
151849da7e64SAnton Ivanov .get_sset_count = vector_get_sset_count,
151949da7e64SAnton Ivanov .get_ethtool_stats = vector_get_ethtool_stats,
152049da7e64SAnton Ivanov .get_coalesce = vector_get_coalesce,
152149da7e64SAnton Ivanov .set_coalesce = vector_set_coalesce,
15229807019aSAnton Ivanov .flash_device = vector_net_load_bpf_flash,
152349da7e64SAnton Ivanov };
152449da7e64SAnton Ivanov
152549da7e64SAnton Ivanov
152649da7e64SAnton Ivanov static const struct net_device_ops vector_netdev_ops = {
152749da7e64SAnton Ivanov .ndo_open = vector_net_open,
152849da7e64SAnton Ivanov .ndo_stop = vector_net_close,
152949da7e64SAnton Ivanov .ndo_start_xmit = vector_net_start_xmit,
153049da7e64SAnton Ivanov .ndo_set_rx_mode = vector_net_set_multicast_list,
153149da7e64SAnton Ivanov .ndo_tx_timeout = vector_net_tx_timeout,
153249da7e64SAnton Ivanov .ndo_set_mac_address = eth_mac_addr,
153349da7e64SAnton Ivanov .ndo_validate_addr = eth_validate_addr,
153449da7e64SAnton Ivanov .ndo_fix_features = vector_fix_features,
153549da7e64SAnton Ivanov .ndo_set_features = vector_set_features,
153649da7e64SAnton Ivanov #ifdef CONFIG_NET_POLL_CONTROLLER
153749da7e64SAnton Ivanov .ndo_poll_controller = vector_net_poll_controller,
153849da7e64SAnton Ivanov #endif
153949da7e64SAnton Ivanov };
154049da7e64SAnton Ivanov
vector_timer_expire(struct timer_list * t)1541ce471fdbSAnton Ivanov static void vector_timer_expire(struct timer_list *t)
154249da7e64SAnton Ivanov {
1543ce471fdbSAnton Ivanov struct vector_private *vp = from_timer(vp, t, tl);
154449da7e64SAnton Ivanov
154549da7e64SAnton Ivanov vp->estats.tx_kicks++;
1546b35507a4SAnton Ivanov napi_schedule(&vp->napi);
154749da7e64SAnton Ivanov }
154849da7e64SAnton Ivanov
1549b35507a4SAnton Ivanov
1550b35507a4SAnton Ivanov
vector_eth_configure(int n,struct arglist * def)155149da7e64SAnton Ivanov static void vector_eth_configure(
155249da7e64SAnton Ivanov int n,
155349da7e64SAnton Ivanov struct arglist *def
155449da7e64SAnton Ivanov )
155549da7e64SAnton Ivanov {
155649da7e64SAnton Ivanov struct vector_device *device;
155749da7e64SAnton Ivanov struct net_device *dev;
155849da7e64SAnton Ivanov struct vector_private *vp;
155949da7e64SAnton Ivanov int err;
156049da7e64SAnton Ivanov
156149da7e64SAnton Ivanov device = kzalloc(sizeof(*device), GFP_KERNEL);
156249da7e64SAnton Ivanov if (device == NULL) {
156349da7e64SAnton Ivanov printk(KERN_ERR "eth_configure failed to allocate struct "
156449da7e64SAnton Ivanov "vector_device\n");
156549da7e64SAnton Ivanov return;
156649da7e64SAnton Ivanov }
156749da7e64SAnton Ivanov dev = alloc_etherdev(sizeof(struct vector_private));
156849da7e64SAnton Ivanov if (dev == NULL) {
156949da7e64SAnton Ivanov printk(KERN_ERR "eth_configure: failed to allocate struct "
157049da7e64SAnton Ivanov "net_device for vec%d\n", n);
157149da7e64SAnton Ivanov goto out_free_device;
157249da7e64SAnton Ivanov }
157349da7e64SAnton Ivanov
157449da7e64SAnton Ivanov dev->mtu = get_mtu(def);
157549da7e64SAnton Ivanov
157649da7e64SAnton Ivanov INIT_LIST_HEAD(&device->list);
157749da7e64SAnton Ivanov device->unit = n;
157849da7e64SAnton Ivanov
157949da7e64SAnton Ivanov /* If this name ends up conflicting with an existing registered
158049da7e64SAnton Ivanov * netdevice, that is OK, register_netdev{,ice}() will notice this
158149da7e64SAnton Ivanov * and fail.
158249da7e64SAnton Ivanov */
158349da7e64SAnton Ivanov snprintf(dev->name, sizeof(dev->name), "vec%d", n);
158449da7e64SAnton Ivanov uml_net_setup_etheraddr(dev, uml_vector_fetch_arg(def, "mac"));
158549da7e64SAnton Ivanov vp = netdev_priv(dev);
158649da7e64SAnton Ivanov
158749da7e64SAnton Ivanov /* sysfs register */
158849da7e64SAnton Ivanov if (!driver_registered) {
158949da7e64SAnton Ivanov platform_driver_register(¨_net_driver);
159049da7e64SAnton Ivanov driver_registered = 1;
159149da7e64SAnton Ivanov }
159249da7e64SAnton Ivanov device->pdev.id = n;
159349da7e64SAnton Ivanov device->pdev.name = DRIVER_NAME;
159449da7e64SAnton Ivanov device->pdev.dev.release = vector_device_release;
159549da7e64SAnton Ivanov dev_set_drvdata(&device->pdev.dev, device);
159649da7e64SAnton Ivanov if (platform_device_register(&device->pdev))
159749da7e64SAnton Ivanov goto out_free_netdev;
159849da7e64SAnton Ivanov SET_NETDEV_DEV(dev, &device->pdev.dev);
159949da7e64SAnton Ivanov
160049da7e64SAnton Ivanov device->dev = dev;
160149da7e64SAnton Ivanov
160249da7e64SAnton Ivanov *vp = ((struct vector_private)
160349da7e64SAnton Ivanov {
160449da7e64SAnton Ivanov .list = LIST_HEAD_INIT(vp->list),
160549da7e64SAnton Ivanov .dev = dev,
160649da7e64SAnton Ivanov .unit = n,
160749da7e64SAnton Ivanov .options = get_transport_options(def),
160849da7e64SAnton Ivanov .rx_irq = 0,
160949da7e64SAnton Ivanov .tx_irq = 0,
161049da7e64SAnton Ivanov .parsed = def,
161149da7e64SAnton Ivanov .max_packet = get_mtu(def) + ETH_HEADER_OTHER,
161249da7e64SAnton Ivanov /* TODO - we need to calculate headroom so that ip header
161349da7e64SAnton Ivanov * is 16 byte aligned all the time
161449da7e64SAnton Ivanov */
161549da7e64SAnton Ivanov .headroom = get_headroom(def),
161649da7e64SAnton Ivanov .form_header = NULL,
161749da7e64SAnton Ivanov .verify_header = NULL,
161849da7e64SAnton Ivanov .header_rxbuffer = NULL,
161949da7e64SAnton Ivanov .header_txbuffer = NULL,
162049da7e64SAnton Ivanov .header_size = 0,
162149da7e64SAnton Ivanov .rx_header_size = 0,
162249da7e64SAnton Ivanov .rexmit_scheduled = false,
162349da7e64SAnton Ivanov .opened = false,
162449da7e64SAnton Ivanov .transport_data = NULL,
162549da7e64SAnton Ivanov .in_write_poll = false,
162649da7e64SAnton Ivanov .coalesce = 2,
1627d47761dbSAnton Ivanov .req_size = get_req_size(def),
16289807019aSAnton Ivanov .in_error = false,
16299807019aSAnton Ivanov .bpf = NULL
163049da7e64SAnton Ivanov });
163149da7e64SAnton Ivanov
163249da7e64SAnton Ivanov dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST);
163349da7e64SAnton Ivanov INIT_WORK(&vp->reset_tx, vector_reset_tx);
163449da7e64SAnton Ivanov
1635ce471fdbSAnton Ivanov timer_setup(&vp->tl, vector_timer_expire, 0);
163649da7e64SAnton Ivanov spin_lock_init(&vp->lock);
163749da7e64SAnton Ivanov
163849da7e64SAnton Ivanov /* FIXME */
163949da7e64SAnton Ivanov dev->netdev_ops = &vector_netdev_ops;
164049da7e64SAnton Ivanov dev->ethtool_ops = &vector_net_ethtool_ops;
164149da7e64SAnton Ivanov dev->watchdog_timeo = (HZ >> 1);
164249da7e64SAnton Ivanov /* primary IRQ - fixme */
164349da7e64SAnton Ivanov dev->irq = 0; /* we will adjust this once opened */
164449da7e64SAnton Ivanov
164549da7e64SAnton Ivanov rtnl_lock();
164649da7e64SAnton Ivanov err = register_netdevice(dev);
164749da7e64SAnton Ivanov rtnl_unlock();
164849da7e64SAnton Ivanov if (err)
164949da7e64SAnton Ivanov goto out_undo_user_init;
165049da7e64SAnton Ivanov
165149da7e64SAnton Ivanov spin_lock(&vector_devices_lock);
165249da7e64SAnton Ivanov list_add(&device->list, &vector_devices);
165349da7e64SAnton Ivanov spin_unlock(&vector_devices_lock);
165449da7e64SAnton Ivanov
165549da7e64SAnton Ivanov return;
165649da7e64SAnton Ivanov
165749da7e64SAnton Ivanov out_undo_user_init:
165849da7e64SAnton Ivanov return;
165949da7e64SAnton Ivanov out_free_netdev:
166049da7e64SAnton Ivanov free_netdev(dev);
166149da7e64SAnton Ivanov out_free_device:
166249da7e64SAnton Ivanov kfree(device);
166349da7e64SAnton Ivanov }
166449da7e64SAnton Ivanov
166549da7e64SAnton Ivanov
166649da7e64SAnton Ivanov
166749da7e64SAnton Ivanov
166849da7e64SAnton Ivanov /*
166949da7e64SAnton Ivanov * Invoked late in the init
167049da7e64SAnton Ivanov */
167149da7e64SAnton Ivanov
vector_init(void)167249da7e64SAnton Ivanov static int __init vector_init(void)
167349da7e64SAnton Ivanov {
167449da7e64SAnton Ivanov struct list_head *ele;
167549da7e64SAnton Ivanov struct vector_cmd_line_arg *def;
167649da7e64SAnton Ivanov struct arglist *parsed;
167749da7e64SAnton Ivanov
167849da7e64SAnton Ivanov list_for_each(ele, &vec_cmd_line) {
167949da7e64SAnton Ivanov def = list_entry(ele, struct vector_cmd_line_arg, list);
168049da7e64SAnton Ivanov parsed = uml_parse_vector_ifspec(def->arguments);
168149da7e64SAnton Ivanov if (parsed != NULL)
168249da7e64SAnton Ivanov vector_eth_configure(def->unit, parsed);
168349da7e64SAnton Ivanov }
168449da7e64SAnton Ivanov return 0;
168549da7e64SAnton Ivanov }
168649da7e64SAnton Ivanov
168749da7e64SAnton Ivanov
168849da7e64SAnton Ivanov /* Invoked at initial argument parsing, only stores
168949da7e64SAnton Ivanov * arguments until a proper vector_init is called
169049da7e64SAnton Ivanov * later
169149da7e64SAnton Ivanov */
169249da7e64SAnton Ivanov
vector_setup(char * str)169349da7e64SAnton Ivanov static int __init vector_setup(char *str)
169449da7e64SAnton Ivanov {
169549da7e64SAnton Ivanov char *error;
169649da7e64SAnton Ivanov int n, err;
169749da7e64SAnton Ivanov struct vector_cmd_line_arg *new;
169849da7e64SAnton Ivanov
169949da7e64SAnton Ivanov err = vector_parse(str, &n, &str, &error);
170049da7e64SAnton Ivanov if (err) {
170149da7e64SAnton Ivanov printk(KERN_ERR "vector_setup - Couldn't parse '%s' : %s\n",
170249da7e64SAnton Ivanov str, error);
170349da7e64SAnton Ivanov return 1;
170449da7e64SAnton Ivanov }
17057e1c4e27SMike Rapoport new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
17068a7f97b9SMike Rapoport if (!new)
17078a7f97b9SMike Rapoport panic("%s: Failed to allocate %zu bytes\n", __func__,
17088a7f97b9SMike Rapoport sizeof(*new));
170949da7e64SAnton Ivanov INIT_LIST_HEAD(&new->list);
171049da7e64SAnton Ivanov new->unit = n;
171149da7e64SAnton Ivanov new->arguments = str;
171249da7e64SAnton Ivanov list_add_tail(&new->list, &vec_cmd_line);
171349da7e64SAnton Ivanov return 1;
171449da7e64SAnton Ivanov }
171549da7e64SAnton Ivanov
171649da7e64SAnton Ivanov __setup("vec", vector_setup);
171749da7e64SAnton Ivanov __uml_help(vector_setup,
171849da7e64SAnton Ivanov "vec[0-9]+:<option>=<value>,<option>=<value>\n"
171949da7e64SAnton Ivanov " Configure a vector io network device.\n\n"
172049da7e64SAnton Ivanov );
172149da7e64SAnton Ivanov
172249da7e64SAnton Ivanov late_initcall(vector_init);
172349da7e64SAnton Ivanov
172449da7e64SAnton Ivanov static struct mc_device vector_mc = {
172549da7e64SAnton Ivanov .list = LIST_HEAD_INIT(vector_mc.list),
172649da7e64SAnton Ivanov .name = "vec",
172749da7e64SAnton Ivanov .config = vector_config,
172849da7e64SAnton Ivanov .get_config = NULL,
172949da7e64SAnton Ivanov .id = vector_id,
173049da7e64SAnton Ivanov .remove = vector_remove,
173149da7e64SAnton Ivanov };
173249da7e64SAnton Ivanov
173349da7e64SAnton Ivanov #ifdef CONFIG_INET
vector_inetaddr_event(struct notifier_block * this,unsigned long event,void * ptr)173449da7e64SAnton Ivanov static int vector_inetaddr_event(
173549da7e64SAnton Ivanov struct notifier_block *this,
173649da7e64SAnton Ivanov unsigned long event,
173749da7e64SAnton Ivanov void *ptr)
173849da7e64SAnton Ivanov {
173949da7e64SAnton Ivanov return NOTIFY_DONE;
174049da7e64SAnton Ivanov }
174149da7e64SAnton Ivanov
174249da7e64SAnton Ivanov static struct notifier_block vector_inetaddr_notifier = {
174349da7e64SAnton Ivanov .notifier_call = vector_inetaddr_event,
174449da7e64SAnton Ivanov };
174549da7e64SAnton Ivanov
inet_register(void)174649da7e64SAnton Ivanov static void inet_register(void)
174749da7e64SAnton Ivanov {
174849da7e64SAnton Ivanov register_inetaddr_notifier(&vector_inetaddr_notifier);
174949da7e64SAnton Ivanov }
175049da7e64SAnton Ivanov #else
inet_register(void)175149da7e64SAnton Ivanov static inline void inet_register(void)
175249da7e64SAnton Ivanov {
175349da7e64SAnton Ivanov }
175449da7e64SAnton Ivanov #endif
175549da7e64SAnton Ivanov
vector_net_init(void)175649da7e64SAnton Ivanov static int vector_net_init(void)
175749da7e64SAnton Ivanov {
175849da7e64SAnton Ivanov mconsole_register_dev(&vector_mc);
175949da7e64SAnton Ivanov inet_register();
176049da7e64SAnton Ivanov return 0;
176149da7e64SAnton Ivanov }
176249da7e64SAnton Ivanov
176349da7e64SAnton Ivanov __initcall(vector_net_init);
176449da7e64SAnton Ivanov
176549da7e64SAnton Ivanov
176649da7e64SAnton Ivanov
1767