11ccea77eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2f942dc25SIan Campbell /*
3f942dc25SIan Campbell * Xenbus code for netif backend
4f942dc25SIan Campbell *
5f942dc25SIan Campbell * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
6f942dc25SIan Campbell * Copyright (C) 2005 XenSource Ltd
7f942dc25SIan Campbell */
8f942dc25SIan Campbell
9f942dc25SIan Campbell #include "common.h"
10e9ce7cb6SWei Liu #include <linux/vmalloc.h>
11e9ce7cb6SWei Liu #include <linux/rtnetlink.h>
12f942dc25SIan Campbell
134e15ee2cSPaul Durrant static int connect_data_rings(struct backend_info *be,
144e15ee2cSPaul Durrant struct xenvif_queue *queue);
15e9ce7cb6SWei Liu static void connect(struct backend_info *be);
16e9ce7cb6SWei Liu static int read_xenbus_vif_flags(struct backend_info *be);
172dd34339SAlexey Khoroshilov static int backend_create_xenvif(struct backend_info *be);
18f942dc25SIan Campbell static void unregister_hotplug_status_watch(struct backend_info *be);
19edafc132SPalik, Imre static void xen_unregister_watchers(struct xenvif *vif);
20dc62ccacSDavid Vrabel static void set_backend_state(struct backend_info *be,
21dc62ccacSDavid Vrabel enum xenbus_state state);
22f942dc25SIan Campbell
23f51de243SZoltan Kiss #ifdef CONFIG_DEBUG_FS
24f51de243SZoltan Kiss struct dentry *xen_netback_dbg_root = NULL;
25f51de243SZoltan Kiss
xenvif_read_io_ring(struct seq_file * m,void * v)26f51de243SZoltan Kiss static int xenvif_read_io_ring(struct seq_file *m, void *v)
27f51de243SZoltan Kiss {
28f51de243SZoltan Kiss struct xenvif_queue *queue = m->private;
29f51de243SZoltan Kiss struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
30f51de243SZoltan Kiss struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
31f48da8b1SDavid Vrabel struct netdev_queue *dev_queue;
32f51de243SZoltan Kiss
33f51de243SZoltan Kiss if (tx_ring->sring) {
34f51de243SZoltan Kiss struct xen_netif_tx_sring *sring = tx_ring->sring;
35f51de243SZoltan Kiss
36f51de243SZoltan Kiss seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id,
37f51de243SZoltan Kiss tx_ring->nr_ents);
38f51de243SZoltan Kiss seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
39f51de243SZoltan Kiss sring->req_prod,
40f51de243SZoltan Kiss sring->req_prod - sring->rsp_prod,
41f51de243SZoltan Kiss tx_ring->req_cons,
42f51de243SZoltan Kiss tx_ring->req_cons - sring->rsp_prod,
43f51de243SZoltan Kiss sring->req_event,
44f51de243SZoltan Kiss sring->req_event - sring->rsp_prod);
45f51de243SZoltan Kiss seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n",
46f51de243SZoltan Kiss sring->rsp_prod,
47f51de243SZoltan Kiss tx_ring->rsp_prod_pvt,
48f51de243SZoltan Kiss tx_ring->rsp_prod_pvt - sring->rsp_prod,
49f51de243SZoltan Kiss sring->rsp_event,
50f51de243SZoltan Kiss sring->rsp_event - sring->rsp_prod);
51f51de243SZoltan Kiss seq_printf(m, "pending prod %u pending cons %u nr_pending_reqs %u\n",
52f51de243SZoltan Kiss queue->pending_prod,
53f51de243SZoltan Kiss queue->pending_cons,
54f51de243SZoltan Kiss nr_pending_reqs(queue));
55f51de243SZoltan Kiss seq_printf(m, "dealloc prod %u dealloc cons %u dealloc_queue %u\n\n",
56f51de243SZoltan Kiss queue->dealloc_prod,
57f51de243SZoltan Kiss queue->dealloc_cons,
58f51de243SZoltan Kiss queue->dealloc_prod - queue->dealloc_cons);
59f51de243SZoltan Kiss }
60f51de243SZoltan Kiss
61f51de243SZoltan Kiss if (rx_ring->sring) {
62f51de243SZoltan Kiss struct xen_netif_rx_sring *sring = rx_ring->sring;
63f51de243SZoltan Kiss
64f51de243SZoltan Kiss seq_printf(m, "RX: nr_ents %u\n", rx_ring->nr_ents);
65f51de243SZoltan Kiss seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
66f51de243SZoltan Kiss sring->req_prod,
67f51de243SZoltan Kiss sring->req_prod - sring->rsp_prod,
68f51de243SZoltan Kiss rx_ring->req_cons,
69f51de243SZoltan Kiss rx_ring->req_cons - sring->rsp_prod,
70f51de243SZoltan Kiss sring->req_event,
71f51de243SZoltan Kiss sring->req_event - sring->rsp_prod);
72f51de243SZoltan Kiss seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n\n",
73f51de243SZoltan Kiss sring->rsp_prod,
74f51de243SZoltan Kiss rx_ring->rsp_prod_pvt,
75f51de243SZoltan Kiss rx_ring->rsp_prod_pvt - sring->rsp_prod,
76f51de243SZoltan Kiss sring->rsp_event,
77f51de243SZoltan Kiss sring->rsp_event - sring->rsp_prod);
78f51de243SZoltan Kiss }
79f51de243SZoltan Kiss
80f51de243SZoltan Kiss seq_printf(m, "NAPI state: %lx NAPI weight: %d TX queue len %u\n"
81f51de243SZoltan Kiss "Credit timer_pending: %d, credit: %lu, usec: %lu\n"
82f51de243SZoltan Kiss "remaining: %lu, expires: %lu, now: %lu\n",
83f51de243SZoltan Kiss queue->napi.state, queue->napi.weight,
84f51de243SZoltan Kiss skb_queue_len(&queue->tx_queue),
85f51de243SZoltan Kiss timer_pending(&queue->credit_timeout),
86f51de243SZoltan Kiss queue->credit_bytes,
87f51de243SZoltan Kiss queue->credit_usec,
88f51de243SZoltan Kiss queue->remaining_credit,
89f51de243SZoltan Kiss queue->credit_timeout.expires,
90f51de243SZoltan Kiss jiffies);
91f51de243SZoltan Kiss
92f48da8b1SDavid Vrabel dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id);
93f48da8b1SDavid Vrabel
94f48da8b1SDavid Vrabel seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n",
95f48da8b1SDavid Vrabel queue->rx_queue_len, queue->rx_queue_max,
96f48da8b1SDavid Vrabel skb_queue_len(&queue->rx_queue),
97f48da8b1SDavid Vrabel netif_tx_queue_stopped(dev_queue) ? "stopped" : "running");
98f48da8b1SDavid Vrabel
99f51de243SZoltan Kiss return 0;
100f51de243SZoltan Kiss }
101f51de243SZoltan Kiss
102f51de243SZoltan Kiss #define XENVIF_KICK_STR "kick"
1035c807005SWei Liu #define BUFFER_SIZE 32
104f51de243SZoltan Kiss
105f51de243SZoltan Kiss static ssize_t
xenvif_write_io_ring(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)106f51de243SZoltan Kiss xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count,
107f51de243SZoltan Kiss loff_t *ppos)
108f51de243SZoltan Kiss {
109f51de243SZoltan Kiss struct xenvif_queue *queue =
110f51de243SZoltan Kiss ((struct seq_file *)filp->private_data)->private;
111f51de243SZoltan Kiss int len;
1125c807005SWei Liu char write[BUFFER_SIZE];
113f51de243SZoltan Kiss
114f51de243SZoltan Kiss /* don't allow partial writes and check the length */
115f51de243SZoltan Kiss if (*ppos != 0)
116f51de243SZoltan Kiss return 0;
1175c807005SWei Liu if (count >= sizeof(write))
118f51de243SZoltan Kiss return -ENOSPC;
119f51de243SZoltan Kiss
120f51de243SZoltan Kiss len = simple_write_to_buffer(write,
1215c807005SWei Liu sizeof(write) - 1,
122f51de243SZoltan Kiss ppos,
123f51de243SZoltan Kiss buf,
124f51de243SZoltan Kiss count);
125f51de243SZoltan Kiss if (len < 0)
126f51de243SZoltan Kiss return len;
127f51de243SZoltan Kiss
1285c807005SWei Liu write[len] = '\0';
1295c807005SWei Liu
130f51de243SZoltan Kiss if (!strncmp(write, XENVIF_KICK_STR, sizeof(XENVIF_KICK_STR) - 1))
131f51de243SZoltan Kiss xenvif_interrupt(0, (void *)queue);
132f51de243SZoltan Kiss else {
133f51de243SZoltan Kiss pr_warn("Unknown command to io_ring_q%d. Available: kick\n",
134f51de243SZoltan Kiss queue->id);
135f51de243SZoltan Kiss count = -EINVAL;
136f51de243SZoltan Kiss }
137f51de243SZoltan Kiss return count;
138f51de243SZoltan Kiss }
139f51de243SZoltan Kiss
xenvif_io_ring_open(struct inode * inode,struct file * filp)140a9339b8eSPaul Durrant static int xenvif_io_ring_open(struct inode *inode, struct file *filp)
141f51de243SZoltan Kiss {
142f51de243SZoltan Kiss int ret;
143f51de243SZoltan Kiss void *queue = NULL;
144f51de243SZoltan Kiss
145f51de243SZoltan Kiss if (inode->i_private)
146f51de243SZoltan Kiss queue = inode->i_private;
147f51de243SZoltan Kiss ret = single_open(filp, xenvif_read_io_ring, queue);
148f51de243SZoltan Kiss filp->f_mode |= FMODE_PWRITE;
149f51de243SZoltan Kiss return ret;
150f51de243SZoltan Kiss }
151f51de243SZoltan Kiss
152f51de243SZoltan Kiss static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
153f51de243SZoltan Kiss .owner = THIS_MODULE,
154a9339b8eSPaul Durrant .open = xenvif_io_ring_open,
155f51de243SZoltan Kiss .read = seq_read,
156f51de243SZoltan Kiss .llseek = seq_lseek,
157f51de243SZoltan Kiss .release = single_release,
158f51de243SZoltan Kiss .write = xenvif_write_io_ring,
159f51de243SZoltan Kiss };
160f51de243SZoltan Kiss
xenvif_ctrl_show(struct seq_file * m,void * v)1615061e3f4SYangtao Li static int xenvif_ctrl_show(struct seq_file *m, void *v)
162a9339b8eSPaul Durrant {
163a9339b8eSPaul Durrant struct xenvif *vif = m->private;
164a9339b8eSPaul Durrant
165a9339b8eSPaul Durrant xenvif_dump_hash_info(vif, m);
166a9339b8eSPaul Durrant
167a9339b8eSPaul Durrant return 0;
168a9339b8eSPaul Durrant }
1695061e3f4SYangtao Li DEFINE_SHOW_ATTRIBUTE(xenvif_ctrl);
170a9339b8eSPaul Durrant
xenvif_debugfs_addif(struct xenvif * vif)171628fa76bSWei Liu static void xenvif_debugfs_addif(struct xenvif *vif)
172f51de243SZoltan Kiss {
173f51de243SZoltan Kiss int i;
174f51de243SZoltan Kiss
175f51de243SZoltan Kiss vif->xenvif_dbg_root = debugfs_create_dir(vif->dev->name,
176f51de243SZoltan Kiss xen_netback_dbg_root);
177f51de243SZoltan Kiss for (i = 0; i < vif->num_queues; ++i) {
178f51de243SZoltan Kiss char filename[sizeof("io_ring_q") + 4];
179f51de243SZoltan Kiss
180f51de243SZoltan Kiss snprintf(filename, sizeof(filename), "io_ring_q%d", i);
1816f20a697SGreg Kroah-Hartman debugfs_create_file(filename, 0600, vif->xenvif_dbg_root,
182f51de243SZoltan Kiss &vif->queues[i],
183f51de243SZoltan Kiss &xenvif_dbg_io_ring_ops_fops);
184f51de243SZoltan Kiss }
185a9339b8eSPaul Durrant
1866f20a697SGreg Kroah-Hartman if (vif->ctrl_irq)
1876f20a697SGreg Kroah-Hartman debugfs_create_file("ctrl", 0400, vif->xenvif_dbg_root, vif,
1885061e3f4SYangtao Li &xenvif_ctrl_fops);
189f51de243SZoltan Kiss }
190f51de243SZoltan Kiss
xenvif_debugfs_delif(struct xenvif * vif)191f51de243SZoltan Kiss static void xenvif_debugfs_delif(struct xenvif *vif)
192f51de243SZoltan Kiss {
193f51de243SZoltan Kiss debugfs_remove_recursive(vif->xenvif_dbg_root);
194f51de243SZoltan Kiss vif->xenvif_dbg_root = NULL;
195f51de243SZoltan Kiss }
196f51de243SZoltan Kiss #endif /* CONFIG_DEBUG_FS */
197f51de243SZoltan Kiss
198f942dc25SIan Campbell /*
199f942dc25SIan Campbell * Handle the creation of the hotplug script environment. We add the script
200f942dc25SIan Campbell * and vif variables to the environment, for the benefit of the vif-* hotplug
201f942dc25SIan Campbell * scripts.
202f942dc25SIan Campbell */
netback_uevent(const struct xenbus_device * xdev,struct kobj_uevent_env * env)203*2a81ada3SGreg Kroah-Hartman static int netback_uevent(const struct xenbus_device *xdev,
204f942dc25SIan Campbell struct kobj_uevent_env *env)
205f942dc25SIan Campbell {
206f942dc25SIan Campbell struct backend_info *be = dev_get_drvdata(&xdev->dev);
207f942dc25SIan Campbell
20831a41898SIan Campbell if (!be)
20931a41898SIan Campbell return 0;
21031a41898SIan Campbell
21131a41898SIan Campbell if (add_uevent_var(env, "script=%s", be->hotplug_script))
212f942dc25SIan Campbell return -ENOMEM;
213f942dc25SIan Campbell
21431a41898SIan Campbell if (!be->vif)
215f942dc25SIan Campbell return 0;
216f942dc25SIan Campbell
217f942dc25SIan Campbell return add_uevent_var(env, "vif=%s", be->vif->dev->name);
218f942dc25SIan Campbell }
219f942dc25SIan Campbell
220f942dc25SIan Campbell
backend_create_xenvif(struct backend_info * be)2212dd34339SAlexey Khoroshilov static int backend_create_xenvif(struct backend_info *be)
222f942dc25SIan Campbell {
223f942dc25SIan Campbell int err;
224f942dc25SIan Campbell long handle;
225f942dc25SIan Campbell struct xenbus_device *dev = be->dev;
226f15650b7SJan Beulich struct xenvif *vif;
227f942dc25SIan Campbell
228f942dc25SIan Campbell if (be->vif != NULL)
2292dd34339SAlexey Khoroshilov return 0;
230f942dc25SIan Campbell
231f942dc25SIan Campbell err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
232f942dc25SIan Campbell if (err != 1) {
233f942dc25SIan Campbell xenbus_dev_fatal(dev, err, "reading handle");
2342dd34339SAlexey Khoroshilov return (err < 0) ? err : -EINVAL;
235f942dc25SIan Campbell }
236f942dc25SIan Campbell
237f15650b7SJan Beulich vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
238f15650b7SJan Beulich if (IS_ERR(vif)) {
239f15650b7SJan Beulich err = PTR_ERR(vif);
240f942dc25SIan Campbell xenbus_dev_fatal(dev, err, "creating interface");
2412dd34339SAlexey Khoroshilov return err;
242f942dc25SIan Campbell }
243f15650b7SJan Beulich be->vif = vif;
2446dc400afSDongli Zhang vif->be = be;
245f942dc25SIan Campbell
246f942dc25SIan Campbell kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
2472dd34339SAlexey Khoroshilov return 0;
248f942dc25SIan Campbell }
249f942dc25SIan Campbell
backend_disconnect(struct backend_info * be)250ea732dffSPaul Durrant static void backend_disconnect(struct backend_info *be)
251f942dc25SIan Campbell {
252d67ce7daSPaul Durrant struct xenvif *vif = be->vif;
253d67ce7daSPaul Durrant
254d67ce7daSPaul Durrant if (vif) {
255b17075d5SIgor Druzhinin unsigned int num_queues = vif->num_queues;
2569a6cdf52SIgor Druzhinin unsigned int queue_index;
2579a6cdf52SIgor Druzhinin
258d67ce7daSPaul Durrant xen_unregister_watchers(vif);
259f51de243SZoltan Kiss #ifdef CONFIG_DEBUG_FS
260d67ce7daSPaul Durrant xenvif_debugfs_delif(vif);
261f51de243SZoltan Kiss #endif /* CONFIG_DEBUG_FS */
262d67ce7daSPaul Durrant xenvif_disconnect_data(vif);
263b17075d5SIgor Druzhinin
264b17075d5SIgor Druzhinin /* At this point some of the handlers may still be active
265b17075d5SIgor Druzhinin * so we need to have additional synchronization here.
266b17075d5SIgor Druzhinin */
267b17075d5SIgor Druzhinin vif->num_queues = 0;
268b17075d5SIgor Druzhinin synchronize_net();
269b17075d5SIgor Druzhinin
270b17075d5SIgor Druzhinin for (queue_index = 0; queue_index < num_queues; ++queue_index)
271d67ce7daSPaul Durrant xenvif_deinit_queue(&vif->queues[queue_index]);
2729a6cdf52SIgor Druzhinin
273b17075d5SIgor Druzhinin vfree(vif->queues);
274d67ce7daSPaul Durrant vif->queues = NULL;
275a254d8f9SPaul Durrant
276d67ce7daSPaul Durrant xenvif_disconnect_ctrl(vif);
277279f438eSPaul Durrant }
278f51de243SZoltan Kiss }
279279f438eSPaul Durrant
backend_connect(struct backend_info * be)280ea732dffSPaul Durrant static void backend_connect(struct backend_info *be)
281279f438eSPaul Durrant {
282ea732dffSPaul Durrant if (be->vif)
283ea732dffSPaul Durrant connect(be);
284ea732dffSPaul Durrant }
285279f438eSPaul Durrant
backend_switch_state(struct backend_info * be,enum xenbus_state state)286ea732dffSPaul Durrant static inline void backend_switch_state(struct backend_info *be,
287ea732dffSPaul Durrant enum xenbus_state state)
288ea732dffSPaul Durrant {
289ea732dffSPaul Durrant struct xenbus_device *dev = be->dev;
290ea732dffSPaul Durrant
291ea732dffSPaul Durrant pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
292ea732dffSPaul Durrant be->state = state;
293ea732dffSPaul Durrant
294ea732dffSPaul Durrant /* If we are waiting for a hotplug script then defer the
295ea732dffSPaul Durrant * actual xenbus state change.
296ea732dffSPaul Durrant */
297ea732dffSPaul Durrant if (!be->have_hotplug_status_watch)
298ea732dffSPaul Durrant xenbus_switch_state(dev, state);
299ea732dffSPaul Durrant }
300ea732dffSPaul Durrant
301ea732dffSPaul Durrant /* Handle backend state transitions:
302ea732dffSPaul Durrant *
303cce94483SFilipe Manco * The backend state starts in Initialising and the following transitions are
304ea732dffSPaul Durrant * allowed.
305ea732dffSPaul Durrant *
306cce94483SFilipe Manco * Initialising -> InitWait -> Connected
307cce94483SFilipe Manco * \
308cce94483SFilipe Manco * \ ^ \ |
309cce94483SFilipe Manco * \ | \ |
310cce94483SFilipe Manco * \ | \ |
311cce94483SFilipe Manco * \ | \ |
312cce94483SFilipe Manco * \ | \ |
313cce94483SFilipe Manco * \ | \ |
314cce94483SFilipe Manco * V | V V
315ea732dffSPaul Durrant *
316ea732dffSPaul Durrant * Closed <-> Closing
317ea732dffSPaul Durrant *
318ea732dffSPaul Durrant * The state argument specifies the eventual state of the backend and the
319ea732dffSPaul Durrant * function transitions to that state via the shortest path.
320ea732dffSPaul Durrant */
set_backend_state(struct backend_info * be,enum xenbus_state state)321ea732dffSPaul Durrant static void set_backend_state(struct backend_info *be,
322ea732dffSPaul Durrant enum xenbus_state state)
323ea732dffSPaul Durrant {
324ea732dffSPaul Durrant while (be->state != state) {
325ea732dffSPaul Durrant switch (be->state) {
326cce94483SFilipe Manco case XenbusStateInitialising:
327cce94483SFilipe Manco switch (state) {
328cce94483SFilipe Manco case XenbusStateInitWait:
329cce94483SFilipe Manco case XenbusStateConnected:
330cce94483SFilipe Manco case XenbusStateClosing:
331cce94483SFilipe Manco backend_switch_state(be, XenbusStateInitWait);
332cce94483SFilipe Manco break;
333cce94483SFilipe Manco case XenbusStateClosed:
334cce94483SFilipe Manco backend_switch_state(be, XenbusStateClosed);
335cce94483SFilipe Manco break;
336cce94483SFilipe Manco default:
337cce94483SFilipe Manco BUG();
338cce94483SFilipe Manco }
339cce94483SFilipe Manco break;
340ea732dffSPaul Durrant case XenbusStateClosed:
341ea732dffSPaul Durrant switch (state) {
342ea732dffSPaul Durrant case XenbusStateInitWait:
343ea732dffSPaul Durrant case XenbusStateConnected:
344ea732dffSPaul Durrant backend_switch_state(be, XenbusStateInitWait);
345ea732dffSPaul Durrant break;
346ea732dffSPaul Durrant case XenbusStateClosing:
347ea732dffSPaul Durrant backend_switch_state(be, XenbusStateClosing);
348ea732dffSPaul Durrant break;
349ea732dffSPaul Durrant default:
350ea732dffSPaul Durrant BUG();
351ea732dffSPaul Durrant }
352ea732dffSPaul Durrant break;
353ea732dffSPaul Durrant case XenbusStateInitWait:
354ea732dffSPaul Durrant switch (state) {
355ea732dffSPaul Durrant case XenbusStateConnected:
356ea732dffSPaul Durrant backend_connect(be);
357ea732dffSPaul Durrant backend_switch_state(be, XenbusStateConnected);
358ea732dffSPaul Durrant break;
359ea732dffSPaul Durrant case XenbusStateClosing:
360ea732dffSPaul Durrant case XenbusStateClosed:
361ea732dffSPaul Durrant backend_switch_state(be, XenbusStateClosing);
362ea732dffSPaul Durrant break;
363ea732dffSPaul Durrant default:
364ea732dffSPaul Durrant BUG();
365ea732dffSPaul Durrant }
366ea732dffSPaul Durrant break;
367ea732dffSPaul Durrant case XenbusStateConnected:
368ea732dffSPaul Durrant switch (state) {
369ea732dffSPaul Durrant case XenbusStateInitWait:
370ea732dffSPaul Durrant case XenbusStateClosing:
371ea732dffSPaul Durrant case XenbusStateClosed:
372ea732dffSPaul Durrant backend_disconnect(be);
373ea732dffSPaul Durrant backend_switch_state(be, XenbusStateClosing);
374ea732dffSPaul Durrant break;
375ea732dffSPaul Durrant default:
376ea732dffSPaul Durrant BUG();
377ea732dffSPaul Durrant }
378ea732dffSPaul Durrant break;
379ea732dffSPaul Durrant case XenbusStateClosing:
380ea732dffSPaul Durrant switch (state) {
381ea732dffSPaul Durrant case XenbusStateInitWait:
382ea732dffSPaul Durrant case XenbusStateConnected:
383ea732dffSPaul Durrant case XenbusStateClosed:
384ea732dffSPaul Durrant backend_switch_state(be, XenbusStateClosed);
385ea732dffSPaul Durrant break;
386ea732dffSPaul Durrant default:
387ea732dffSPaul Durrant BUG();
388ea732dffSPaul Durrant }
389ea732dffSPaul Durrant break;
390ea732dffSPaul Durrant default:
391ea732dffSPaul Durrant BUG();
392ea732dffSPaul Durrant }
393f942dc25SIan Campbell }
394f942dc25SIan Campbell }
395f942dc25SIan Campbell
read_xenbus_frontend_xdp(struct backend_info * be,struct xenbus_device * dev)3961c9535c7SDenis Kirjanov static void read_xenbus_frontend_xdp(struct backend_info *be,
3971c9535c7SDenis Kirjanov struct xenbus_device *dev)
3981c9535c7SDenis Kirjanov {
3991c9535c7SDenis Kirjanov struct xenvif *vif = be->vif;
4001c9535c7SDenis Kirjanov u16 headroom;
4011c9535c7SDenis Kirjanov int err;
4021c9535c7SDenis Kirjanov
4031c9535c7SDenis Kirjanov err = xenbus_scanf(XBT_NIL, dev->otherend,
4041c9535c7SDenis Kirjanov "xdp-headroom", "%hu", &headroom);
4051c9535c7SDenis Kirjanov if (err != 1) {
4061c9535c7SDenis Kirjanov vif->xdp_headroom = 0;
4071c9535c7SDenis Kirjanov return;
4081c9535c7SDenis Kirjanov }
4091c9535c7SDenis Kirjanov if (headroom > XEN_NETIF_MAX_XDP_HEADROOM)
4101c9535c7SDenis Kirjanov headroom = XEN_NETIF_MAX_XDP_HEADROOM;
4111c9535c7SDenis Kirjanov vif->xdp_headroom = headroom;
4121c9535c7SDenis Kirjanov }
4131c9535c7SDenis Kirjanov
414090c7ae8SLee Jones /*
415f942dc25SIan Campbell * Callback received when the frontend's state changes.
416f942dc25SIan Campbell */
frontend_changed(struct xenbus_device * dev,enum xenbus_state frontend_state)417f942dc25SIan Campbell static void frontend_changed(struct xenbus_device *dev,
418f942dc25SIan Campbell enum xenbus_state frontend_state)
419f942dc25SIan Campbell {
420f942dc25SIan Campbell struct backend_info *be = dev_get_drvdata(&dev->dev);
421f942dc25SIan Campbell
422ea732dffSPaul Durrant pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
423f942dc25SIan Campbell
424f942dc25SIan Campbell be->frontend_state = frontend_state;
425f942dc25SIan Campbell
426f942dc25SIan Campbell switch (frontend_state) {
427f942dc25SIan Campbell case XenbusStateInitialising:
428ea732dffSPaul Durrant set_backend_state(be, XenbusStateInitWait);
429f942dc25SIan Campbell break;
430f942dc25SIan Campbell
431f942dc25SIan Campbell case XenbusStateInitialised:
432f942dc25SIan Campbell break;
433f942dc25SIan Campbell
434f942dc25SIan Campbell case XenbusStateConnected:
435ea732dffSPaul Durrant set_backend_state(be, XenbusStateConnected);
436f942dc25SIan Campbell break;
437f942dc25SIan Campbell
4381c9535c7SDenis Kirjanov case XenbusStateReconfiguring:
4391c9535c7SDenis Kirjanov read_xenbus_frontend_xdp(be, dev);
4401c9535c7SDenis Kirjanov xenbus_switch_state(dev, XenbusStateReconfigured);
4411c9535c7SDenis Kirjanov break;
4421c9535c7SDenis Kirjanov
443f942dc25SIan Campbell case XenbusStateClosing:
444ea732dffSPaul Durrant set_backend_state(be, XenbusStateClosing);
445f942dc25SIan Campbell break;
446f942dc25SIan Campbell
447f942dc25SIan Campbell case XenbusStateClosed:
448ea732dffSPaul Durrant set_backend_state(be, XenbusStateClosed);
449f942dc25SIan Campbell if (xenbus_dev_is_online(dev))
450f942dc25SIan Campbell break;
451df561f66SGustavo A. R. Silva fallthrough; /* if not online */
452f942dc25SIan Campbell case XenbusStateUnknown:
453ea732dffSPaul Durrant set_backend_state(be, XenbusStateClosed);
454f942dc25SIan Campbell device_unregister(&dev->dev);
455f942dc25SIan Campbell break;
456f942dc25SIan Campbell
457f942dc25SIan Campbell default:
458f942dc25SIan Campbell xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
459f942dc25SIan Campbell frontend_state);
460f942dc25SIan Campbell break;
461f942dc25SIan Campbell }
462f942dc25SIan Campbell }
463f942dc25SIan Campbell
464f942dc25SIan Campbell
xen_net_read_rate(struct xenbus_device * dev,unsigned long * bytes,unsigned long * usec)465f942dc25SIan Campbell static void xen_net_read_rate(struct xenbus_device *dev,
466f942dc25SIan Campbell unsigned long *bytes, unsigned long *usec)
467f942dc25SIan Campbell {
468f942dc25SIan Campbell char *s, *e;
469f942dc25SIan Campbell unsigned long b, u;
470f942dc25SIan Campbell char *ratestr;
471f942dc25SIan Campbell
472f942dc25SIan Campbell /* Default to unlimited bandwidth. */
473f942dc25SIan Campbell *bytes = ~0UL;
474f942dc25SIan Campbell *usec = 0;
475f942dc25SIan Campbell
476f942dc25SIan Campbell ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
477f942dc25SIan Campbell if (IS_ERR(ratestr))
478f942dc25SIan Campbell return;
479f942dc25SIan Campbell
480f942dc25SIan Campbell s = ratestr;
481f942dc25SIan Campbell b = simple_strtoul(s, &e, 10);
482f942dc25SIan Campbell if ((s == e) || (*e != ','))
483f942dc25SIan Campbell goto fail;
484f942dc25SIan Campbell
485f942dc25SIan Campbell s = e + 1;
486f942dc25SIan Campbell u = simple_strtoul(s, &e, 10);
487f942dc25SIan Campbell if ((s == e) || (*e != '\0'))
488f942dc25SIan Campbell goto fail;
489f942dc25SIan Campbell
490f942dc25SIan Campbell *bytes = b;
491f942dc25SIan Campbell *usec = u;
492f942dc25SIan Campbell
493f942dc25SIan Campbell kfree(ratestr);
494f942dc25SIan Campbell return;
495f942dc25SIan Campbell
496f942dc25SIan Campbell fail:
497f942dc25SIan Campbell pr_warn("Failed to parse network rate limit. Traffic unlimited.\n");
498f942dc25SIan Campbell kfree(ratestr);
499f942dc25SIan Campbell }
500f942dc25SIan Campbell
xen_net_read_mac(struct xenbus_device * dev,u8 mac[])501f942dc25SIan Campbell static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
502f942dc25SIan Campbell {
503f942dc25SIan Campbell char *s, *e, *macstr;
504f942dc25SIan Campbell int i;
505f942dc25SIan Campbell
506f942dc25SIan Campbell macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
507f942dc25SIan Campbell if (IS_ERR(macstr))
508f942dc25SIan Campbell return PTR_ERR(macstr);
509f942dc25SIan Campbell
510f942dc25SIan Campbell for (i = 0; i < ETH_ALEN; i++) {
511f942dc25SIan Campbell mac[i] = simple_strtoul(s, &e, 16);
512f942dc25SIan Campbell if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
513f942dc25SIan Campbell kfree(macstr);
514f942dc25SIan Campbell return -ENOENT;
515f942dc25SIan Campbell }
516f942dc25SIan Campbell s = e+1;
517f942dc25SIan Campbell }
518f942dc25SIan Campbell
519f942dc25SIan Campbell kfree(macstr);
520f942dc25SIan Campbell return 0;
521f942dc25SIan Campbell }
522f942dc25SIan Campbell
xen_net_rate_changed(struct xenbus_watch * watch,const char * path,const char * token)523edafc132SPalik, Imre static void xen_net_rate_changed(struct xenbus_watch *watch,
5245584ea25SJuergen Gross const char *path, const char *token)
525edafc132SPalik, Imre {
526edafc132SPalik, Imre struct xenvif *vif = container_of(watch, struct xenvif, credit_watch);
527edafc132SPalik, Imre struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
528edafc132SPalik, Imre unsigned long credit_bytes;
529edafc132SPalik, Imre unsigned long credit_usec;
530edafc132SPalik, Imre unsigned int queue_index;
531edafc132SPalik, Imre
532edafc132SPalik, Imre xen_net_read_rate(dev, &credit_bytes, &credit_usec);
533edafc132SPalik, Imre for (queue_index = 0; queue_index < vif->num_queues; queue_index++) {
534edafc132SPalik, Imre struct xenvif_queue *queue = &vif->queues[queue_index];
535edafc132SPalik, Imre
536edafc132SPalik, Imre queue->credit_bytes = credit_bytes;
537edafc132SPalik, Imre queue->credit_usec = credit_usec;
538edafc132SPalik, Imre if (!mod_timer_pending(&queue->credit_timeout, jiffies) &&
539edafc132SPalik, Imre queue->remaining_credit > queue->credit_bytes) {
540edafc132SPalik, Imre queue->remaining_credit = queue->credit_bytes;
541edafc132SPalik, Imre }
542edafc132SPalik, Imre }
543edafc132SPalik, Imre }
544edafc132SPalik, Imre
xen_register_credit_watch(struct xenbus_device * dev,struct xenvif * vif)54522fae97dSPaul Durrant static int xen_register_credit_watch(struct xenbus_device *dev,
54622fae97dSPaul Durrant struct xenvif *vif)
547edafc132SPalik, Imre {
548edafc132SPalik, Imre int err = 0;
549edafc132SPalik, Imre char *node;
550edafc132SPalik, Imre unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
551edafc132SPalik, Imre
55212b322acSPalik, Imre if (vif->credit_watch.node)
55312b322acSPalik, Imre return -EADDRINUSE;
55412b322acSPalik, Imre
555edafc132SPalik, Imre node = kmalloc(maxlen, GFP_KERNEL);
556edafc132SPalik, Imre if (!node)
557edafc132SPalik, Imre return -ENOMEM;
558edafc132SPalik, Imre snprintf(node, maxlen, "%s/rate", dev->nodename);
559edafc132SPalik, Imre vif->credit_watch.node = node;
560fed1755bSSeongJae Park vif->credit_watch.will_handle = NULL;
561edafc132SPalik, Imre vif->credit_watch.callback = xen_net_rate_changed;
562edafc132SPalik, Imre err = register_xenbus_watch(&vif->credit_watch);
563edafc132SPalik, Imre if (err) {
564edafc132SPalik, Imre pr_err("Failed to set watcher %s\n", vif->credit_watch.node);
565edafc132SPalik, Imre kfree(node);
566edafc132SPalik, Imre vif->credit_watch.node = NULL;
567fed1755bSSeongJae Park vif->credit_watch.will_handle = NULL;
568edafc132SPalik, Imre vif->credit_watch.callback = NULL;
569edafc132SPalik, Imre }
570edafc132SPalik, Imre return err;
571edafc132SPalik, Imre }
572edafc132SPalik, Imre
xen_unregister_credit_watch(struct xenvif * vif)57322fae97dSPaul Durrant static void xen_unregister_credit_watch(struct xenvif *vif)
574edafc132SPalik, Imre {
575edafc132SPalik, Imre if (vif->credit_watch.node) {
576edafc132SPalik, Imre unregister_xenbus_watch(&vif->credit_watch);
577edafc132SPalik, Imre kfree(vif->credit_watch.node);
578edafc132SPalik, Imre vif->credit_watch.node = NULL;
579edafc132SPalik, Imre }
580edafc132SPalik, Imre }
581edafc132SPalik, Imre
xen_mcast_ctrl_changed(struct xenbus_watch * watch,const char * path,const char * token)58222fae97dSPaul Durrant static void xen_mcast_ctrl_changed(struct xenbus_watch *watch,
5835584ea25SJuergen Gross const char *path, const char *token)
58422fae97dSPaul Durrant {
58522fae97dSPaul Durrant struct xenvif *vif = container_of(watch, struct xenvif,
58622fae97dSPaul Durrant mcast_ctrl_watch);
58722fae97dSPaul Durrant struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
58822fae97dSPaul Durrant
589f95842e7SJuergen Gross vif->multicast_control = !!xenbus_read_unsigned(dev->otherend,
590f95842e7SJuergen Gross "request-multicast-control", 0);
59122fae97dSPaul Durrant }
59222fae97dSPaul Durrant
xen_register_mcast_ctrl_watch(struct xenbus_device * dev,struct xenvif * vif)59322fae97dSPaul Durrant static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
59422fae97dSPaul Durrant struct xenvif *vif)
59522fae97dSPaul Durrant {
59622fae97dSPaul Durrant int err = 0;
59722fae97dSPaul Durrant char *node;
59822fae97dSPaul Durrant unsigned maxlen = strlen(dev->otherend) +
59922fae97dSPaul Durrant sizeof("/request-multicast-control");
60022fae97dSPaul Durrant
60122fae97dSPaul Durrant if (vif->mcast_ctrl_watch.node) {
60222fae97dSPaul Durrant pr_err_ratelimited("Watch is already registered\n");
60322fae97dSPaul Durrant return -EADDRINUSE;
60422fae97dSPaul Durrant }
60522fae97dSPaul Durrant
60622fae97dSPaul Durrant node = kmalloc(maxlen, GFP_KERNEL);
60722fae97dSPaul Durrant if (!node) {
60822fae97dSPaul Durrant pr_err("Failed to allocate memory for watch\n");
60922fae97dSPaul Durrant return -ENOMEM;
61022fae97dSPaul Durrant }
61122fae97dSPaul Durrant snprintf(node, maxlen, "%s/request-multicast-control",
61222fae97dSPaul Durrant dev->otherend);
61322fae97dSPaul Durrant vif->mcast_ctrl_watch.node = node;
614fed1755bSSeongJae Park vif->mcast_ctrl_watch.will_handle = NULL;
61522fae97dSPaul Durrant vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed;
61622fae97dSPaul Durrant err = register_xenbus_watch(&vif->mcast_ctrl_watch);
61722fae97dSPaul Durrant if (err) {
61822fae97dSPaul Durrant pr_err("Failed to set watcher %s\n",
61922fae97dSPaul Durrant vif->mcast_ctrl_watch.node);
62022fae97dSPaul Durrant kfree(node);
62122fae97dSPaul Durrant vif->mcast_ctrl_watch.node = NULL;
622fed1755bSSeongJae Park vif->mcast_ctrl_watch.will_handle = NULL;
62322fae97dSPaul Durrant vif->mcast_ctrl_watch.callback = NULL;
62422fae97dSPaul Durrant }
62522fae97dSPaul Durrant return err;
62622fae97dSPaul Durrant }
62722fae97dSPaul Durrant
xen_unregister_mcast_ctrl_watch(struct xenvif * vif)62822fae97dSPaul Durrant static void xen_unregister_mcast_ctrl_watch(struct xenvif *vif)
62922fae97dSPaul Durrant {
63022fae97dSPaul Durrant if (vif->mcast_ctrl_watch.node) {
63122fae97dSPaul Durrant unregister_xenbus_watch(&vif->mcast_ctrl_watch);
63222fae97dSPaul Durrant kfree(vif->mcast_ctrl_watch.node);
63322fae97dSPaul Durrant vif->mcast_ctrl_watch.node = NULL;
63422fae97dSPaul Durrant }
63522fae97dSPaul Durrant }
63622fae97dSPaul Durrant
xen_register_watchers(struct xenbus_device * dev,struct xenvif * vif)63722fae97dSPaul Durrant static void xen_register_watchers(struct xenbus_device *dev,
63822fae97dSPaul Durrant struct xenvif *vif)
63922fae97dSPaul Durrant {
64022fae97dSPaul Durrant xen_register_credit_watch(dev, vif);
64122fae97dSPaul Durrant xen_register_mcast_ctrl_watch(dev, vif);
64222fae97dSPaul Durrant }
64322fae97dSPaul Durrant
xen_unregister_watchers(struct xenvif * vif)64422fae97dSPaul Durrant static void xen_unregister_watchers(struct xenvif *vif)
64522fae97dSPaul Durrant {
64622fae97dSPaul Durrant xen_unregister_mcast_ctrl_watch(vif);
64722fae97dSPaul Durrant xen_unregister_credit_watch(vif);
64822fae97dSPaul Durrant }
64922fae97dSPaul Durrant
unregister_hotplug_status_watch(struct backend_info * be)650f942dc25SIan Campbell static void unregister_hotplug_status_watch(struct backend_info *be)
651f942dc25SIan Campbell {
652f942dc25SIan Campbell if (be->have_hotplug_status_watch) {
653f942dc25SIan Campbell unregister_xenbus_watch(&be->hotplug_status_watch);
654f942dc25SIan Campbell kfree(be->hotplug_status_watch.node);
655f942dc25SIan Campbell }
656f942dc25SIan Campbell be->have_hotplug_status_watch = 0;
657f942dc25SIan Campbell }
658f942dc25SIan Campbell
hotplug_status_changed(struct xenbus_watch * watch,const char * path,const char * token)659f942dc25SIan Campbell static void hotplug_status_changed(struct xenbus_watch *watch,
6605584ea25SJuergen Gross const char *path,
6615584ea25SJuergen Gross const char *token)
662f942dc25SIan Campbell {
663f942dc25SIan Campbell struct backend_info *be = container_of(watch,
664f942dc25SIan Campbell struct backend_info,
665f942dc25SIan Campbell hotplug_status_watch);
666f942dc25SIan Campbell char *str;
667f942dc25SIan Campbell unsigned int len;
668f942dc25SIan Campbell
669f942dc25SIan Campbell str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len);
670f942dc25SIan Campbell if (IS_ERR(str))
671f942dc25SIan Campbell return;
672f942dc25SIan Campbell if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
673ea732dffSPaul Durrant /* Complete any pending state change */
674ea732dffSPaul Durrant xenbus_switch_state(be->dev, be->state);
675ea732dffSPaul Durrant
676f942dc25SIan Campbell /* Not interested in this watch anymore. */
677f942dc25SIan Campbell unregister_hotplug_status_watch(be);
678f942dc25SIan Campbell }
679f942dc25SIan Campbell kfree(str);
680f942dc25SIan Campbell }
681f942dc25SIan Campbell
connect_ctrl_ring(struct backend_info * be)6824e15ee2cSPaul Durrant static int connect_ctrl_ring(struct backend_info *be)
6834e15ee2cSPaul Durrant {
6844e15ee2cSPaul Durrant struct xenbus_device *dev = be->dev;
6854e15ee2cSPaul Durrant struct xenvif *vif = be->vif;
6864e15ee2cSPaul Durrant unsigned int val;
6874e15ee2cSPaul Durrant grant_ref_t ring_ref;
6884e15ee2cSPaul Durrant unsigned int evtchn;
6894e15ee2cSPaul Durrant int err;
6904e15ee2cSPaul Durrant
6916c27f99dSJan Beulich err = xenbus_scanf(XBT_NIL, dev->otherend,
6926c27f99dSJan Beulich "ctrl-ring-ref", "%u", &val);
6936c27f99dSJan Beulich if (err < 0)
6944e15ee2cSPaul Durrant goto done; /* The frontend does not have a control ring */
6954e15ee2cSPaul Durrant
6964e15ee2cSPaul Durrant ring_ref = val;
6974e15ee2cSPaul Durrant
6986c27f99dSJan Beulich err = xenbus_scanf(XBT_NIL, dev->otherend,
6996c27f99dSJan Beulich "event-channel-ctrl", "%u", &val);
7006c27f99dSJan Beulich if (err < 0) {
7014e15ee2cSPaul Durrant xenbus_dev_fatal(dev, err,
7024e15ee2cSPaul Durrant "reading %s/event-channel-ctrl",
7034e15ee2cSPaul Durrant dev->otherend);
7044e15ee2cSPaul Durrant goto fail;
7054e15ee2cSPaul Durrant }
7064e15ee2cSPaul Durrant
7074e15ee2cSPaul Durrant evtchn = val;
7084e15ee2cSPaul Durrant
7094e15ee2cSPaul Durrant err = xenvif_connect_ctrl(vif, ring_ref, evtchn);
7104e15ee2cSPaul Durrant if (err) {
7114e15ee2cSPaul Durrant xenbus_dev_fatal(dev, err,
7124e15ee2cSPaul Durrant "mapping shared-frame %u port %u",
7134e15ee2cSPaul Durrant ring_ref, evtchn);
7144e15ee2cSPaul Durrant goto fail;
7154e15ee2cSPaul Durrant }
7164e15ee2cSPaul Durrant
7174e15ee2cSPaul Durrant done:
7184e15ee2cSPaul Durrant return 0;
7194e15ee2cSPaul Durrant
7204e15ee2cSPaul Durrant fail:
7214e15ee2cSPaul Durrant return err;
7224e15ee2cSPaul Durrant }
7234e15ee2cSPaul Durrant
connect(struct backend_info * be)724f942dc25SIan Campbell static void connect(struct backend_info *be)
725f942dc25SIan Campbell {
726f942dc25SIan Campbell int err;
727f942dc25SIan Campbell struct xenbus_device *dev = be->dev;
728e9ce7cb6SWei Liu unsigned long credit_bytes, credit_usec;
729e9ce7cb6SWei Liu unsigned int queue_index;
7308d3d53b3SAndrew J. Bennieston unsigned int requested_num_queues;
731e9ce7cb6SWei Liu struct xenvif_queue *queue;
732f942dc25SIan Campbell
7338d3d53b3SAndrew J. Bennieston /* Check whether the frontend requested multiple queues
7348d3d53b3SAndrew J. Bennieston * and read the number requested.
7358d3d53b3SAndrew J. Bennieston */
736f95842e7SJuergen Gross requested_num_queues = xenbus_read_unsigned(dev->otherend,
737f95842e7SJuergen Gross "multi-queue-num-queues", 1);
738f95842e7SJuergen Gross if (requested_num_queues > xenvif_max_queues) {
7398d3d53b3SAndrew J. Bennieston /* buggy or malicious guest */
7400f06ac3bSArnd Bergmann xenbus_dev_fatal(dev, -EINVAL,
7418d3d53b3SAndrew J. Bennieston "guest requested %u queues, exceeding the maximum of %u.",
7428d3d53b3SAndrew J. Bennieston requested_num_queues, xenvif_max_queues);
7438d3d53b3SAndrew J. Bennieston return;
7448d3d53b3SAndrew J. Bennieston }
7458d3d53b3SAndrew J. Bennieston
746f942dc25SIan Campbell err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
747f942dc25SIan Campbell if (err) {
748f942dc25SIan Campbell xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
749f942dc25SIan Campbell return;
750f942dc25SIan Campbell }
751f942dc25SIan Campbell
752e9ce7cb6SWei Liu xen_net_read_rate(dev, &credit_bytes, &credit_usec);
75312b322acSPalik, Imre xen_unregister_watchers(be->vif);
754edafc132SPalik, Imre xen_register_watchers(dev, be->vif);
755e9ce7cb6SWei Liu read_xenbus_vif_flags(be);
756e9ce7cb6SWei Liu
7574e15ee2cSPaul Durrant err = connect_ctrl_ring(be);
7584e15ee2cSPaul Durrant if (err) {
7594e15ee2cSPaul Durrant xenbus_dev_fatal(dev, err, "connecting control ring");
7604e15ee2cSPaul Durrant return;
7614e15ee2cSPaul Durrant }
7624e15ee2cSPaul Durrant
7638d3d53b3SAndrew J. Bennieston /* Use the number of queues requested by the frontend */
764fad953ceSKees Cook be->vif->queues = vzalloc(array_size(requested_num_queues,
765fad953ceSKees Cook sizeof(struct xenvif_queue)));
766833b8f18SInsu Yun if (!be->vif->queues) {
767833b8f18SInsu Yun xenbus_dev_fatal(dev, -ENOMEM,
768833b8f18SInsu Yun "allocating queues");
769833b8f18SInsu Yun return;
770833b8f18SInsu Yun }
771833b8f18SInsu Yun
772f7b50c4eSWei Liu be->vif->num_queues = requested_num_queues;
773ecf08d2dSDavid Vrabel be->vif->stalled_queues = requested_num_queues;
774e9ce7cb6SWei Liu
775e9ce7cb6SWei Liu for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
776e9ce7cb6SWei Liu queue = &be->vif->queues[queue_index];
777e9ce7cb6SWei Liu queue->vif = be->vif;
778e9ce7cb6SWei Liu queue->id = queue_index;
779e9ce7cb6SWei Liu snprintf(queue->name, sizeof(queue->name), "%s-q%u",
780e9ce7cb6SWei Liu be->vif->dev->name, queue->id);
781e9ce7cb6SWei Liu
782e9ce7cb6SWei Liu err = xenvif_init_queue(queue);
7838d3d53b3SAndrew J. Bennieston if (err) {
7848d3d53b3SAndrew J. Bennieston /* xenvif_init_queue() cleans up after itself on
7858d3d53b3SAndrew J. Bennieston * failure, but we need to clean up any previously
7868d3d53b3SAndrew J. Bennieston * initialised queues. Set num_queues to i so that
7878d3d53b3SAndrew J. Bennieston * earlier queues can be destroyed using the regular
7888d3d53b3SAndrew J. Bennieston * disconnect logic.
7898d3d53b3SAndrew J. Bennieston */
790f7b50c4eSWei Liu be->vif->num_queues = queue_index;
791e9ce7cb6SWei Liu goto err;
7928d3d53b3SAndrew J. Bennieston }
793e9ce7cb6SWei Liu
794ce0e5c52SRoss Lagerwall queue->credit_bytes = credit_bytes;
795e9ce7cb6SWei Liu queue->remaining_credit = credit_bytes;
79607ff890dSPalik, Imre queue->credit_usec = credit_usec;
797e9ce7cb6SWei Liu
7984e15ee2cSPaul Durrant err = connect_data_rings(be, queue);
7998d3d53b3SAndrew J. Bennieston if (err) {
8004e15ee2cSPaul Durrant /* connect_data_rings() cleans up after itself on
8014e15ee2cSPaul Durrant * failure, but we need to clean up after
8024e15ee2cSPaul Durrant * xenvif_init_queue() here, and also clean up any
8034e15ee2cSPaul Durrant * previously initialised queues.
8048d3d53b3SAndrew J. Bennieston */
8058d3d53b3SAndrew J. Bennieston xenvif_deinit_queue(queue);
806f7b50c4eSWei Liu be->vif->num_queues = queue_index;
807e9ce7cb6SWei Liu goto err;
808e9ce7cb6SWei Liu }
8098d3d53b3SAndrew J. Bennieston }
810e9ce7cb6SWei Liu
811628fa76bSWei Liu #ifdef CONFIG_DEBUG_FS
812628fa76bSWei Liu xenvif_debugfs_addif(be->vif);
813628fa76bSWei Liu #endif /* CONFIG_DEBUG_FS */
814628fa76bSWei Liu
815f7b50c4eSWei Liu /* Initialisation completed, tell core driver the number of
816f7b50c4eSWei Liu * active queues.
817f7b50c4eSWei Liu */
818f7b50c4eSWei Liu rtnl_lock();
819f7b50c4eSWei Liu netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
820f7b50c4eSWei Liu netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
821f7b50c4eSWei Liu rtnl_unlock();
822f7b50c4eSWei Liu
823e9ce7cb6SWei Liu xenvif_carrier_on(be->vif);
824f942dc25SIan Campbell
825f942dc25SIan Campbell unregister_hotplug_status_watch(be);
826e8240addSMarek Marczykowski-Górecki err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
827e8240addSMarek Marczykowski-Górecki hotplug_status_changed,
828e8240addSMarek Marczykowski-Górecki "%s/%s", dev->nodename, "hotplug-status");
829e8240addSMarek Marczykowski-Górecki if (!err)
830f942dc25SIan Campbell be->have_hotplug_status_watch = 1;
831f942dc25SIan Campbell
832e9ce7cb6SWei Liu netif_tx_wake_all_queues(be->vif->dev);
833e9ce7cb6SWei Liu
834e9ce7cb6SWei Liu return;
835e9ce7cb6SWei Liu
836e9ce7cb6SWei Liu err:
837f7b50c4eSWei Liu if (be->vif->num_queues > 0)
8384e15ee2cSPaul Durrant xenvif_disconnect_data(be->vif); /* Clean up existing queues */
8399a6cdf52SIgor Druzhinin for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
8409a6cdf52SIgor Druzhinin xenvif_deinit_queue(&be->vif->queues[queue_index]);
841e9ce7cb6SWei Liu vfree(be->vif->queues);
842e9ce7cb6SWei Liu be->vif->queues = NULL;
843f7b50c4eSWei Liu be->vif->num_queues = 0;
8444e15ee2cSPaul Durrant xenvif_disconnect_ctrl(be->vif);
845e9ce7cb6SWei Liu return;
846f942dc25SIan Campbell }
847f942dc25SIan Campbell
848f942dc25SIan Campbell
connect_data_rings(struct backend_info * be,struct xenvif_queue * queue)8494e15ee2cSPaul Durrant static int connect_data_rings(struct backend_info *be,
8504e15ee2cSPaul Durrant struct xenvif_queue *queue)
851f942dc25SIan Campbell {
852f942dc25SIan Campbell struct xenbus_device *dev = be->dev;
853f7b50c4eSWei Liu unsigned int num_queues = queue->vif->num_queues;
854f942dc25SIan Campbell unsigned long tx_ring_ref, rx_ring_ref;
855e9ce7cb6SWei Liu unsigned int tx_evtchn, rx_evtchn;
856f942dc25SIan Campbell int err;
8578d3d53b3SAndrew J. Bennieston char *xspath;
8588d3d53b3SAndrew J. Bennieston size_t xspathsize;
8598d3d53b3SAndrew J. Bennieston const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
860f942dc25SIan Campbell
8618d3d53b3SAndrew J. Bennieston /* If the frontend requested 1 queue, or we have fallen back
8628d3d53b3SAndrew J. Bennieston * to single queue due to lack of frontend support for multi-
8638d3d53b3SAndrew J. Bennieston * queue, expect the remaining XenStore keys in the toplevel
8648d3d53b3SAndrew J. Bennieston * directory. Otherwise, expect them in a subdirectory called
8658d3d53b3SAndrew J. Bennieston * queue-N.
8668d3d53b3SAndrew J. Bennieston */
8678d3d53b3SAndrew J. Bennieston if (num_queues == 1) {
868f948ac23SMinghao Chi xspath = kstrdup(dev->otherend, GFP_KERNEL);
8698d3d53b3SAndrew J. Bennieston if (!xspath) {
8708d3d53b3SAndrew J. Bennieston xenbus_dev_fatal(dev, -ENOMEM,
8718d3d53b3SAndrew J. Bennieston "reading ring references");
8728d3d53b3SAndrew J. Bennieston return -ENOMEM;
8738d3d53b3SAndrew J. Bennieston }
8748d3d53b3SAndrew J. Bennieston } else {
8758d3d53b3SAndrew J. Bennieston xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
8768d3d53b3SAndrew J. Bennieston xspath = kzalloc(xspathsize, GFP_KERNEL);
8778d3d53b3SAndrew J. Bennieston if (!xspath) {
8788d3d53b3SAndrew J. Bennieston xenbus_dev_fatal(dev, -ENOMEM,
8798d3d53b3SAndrew J. Bennieston "reading ring references");
8808d3d53b3SAndrew J. Bennieston return -ENOMEM;
8818d3d53b3SAndrew J. Bennieston }
8828d3d53b3SAndrew J. Bennieston snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend,
8838d3d53b3SAndrew J. Bennieston queue->id);
8848d3d53b3SAndrew J. Bennieston }
8858d3d53b3SAndrew J. Bennieston
8868d3d53b3SAndrew J. Bennieston err = xenbus_gather(XBT_NIL, xspath,
887f942dc25SIan Campbell "tx-ring-ref", "%lu", &tx_ring_ref,
888e1f00a69SWei Liu "rx-ring-ref", "%lu", &rx_ring_ref, NULL);
889f942dc25SIan Campbell if (err) {
890f942dc25SIan Campbell xenbus_dev_fatal(dev, err,
891e1f00a69SWei Liu "reading %s/ring-ref",
8928d3d53b3SAndrew J. Bennieston xspath);
8938d3d53b3SAndrew J. Bennieston goto err;
894f942dc25SIan Campbell }
895f942dc25SIan Campbell
896e1f00a69SWei Liu /* Try split event channels first, then single event channel. */
8978d3d53b3SAndrew J. Bennieston err = xenbus_gather(XBT_NIL, xspath,
898e1f00a69SWei Liu "event-channel-tx", "%u", &tx_evtchn,
899e1f00a69SWei Liu "event-channel-rx", "%u", &rx_evtchn, NULL);
900e1f00a69SWei Liu if (err < 0) {
9018d3d53b3SAndrew J. Bennieston err = xenbus_scanf(XBT_NIL, xspath,
902e1f00a69SWei Liu "event-channel", "%u", &tx_evtchn);
903e1f00a69SWei Liu if (err < 0) {
904e1f00a69SWei Liu xenbus_dev_fatal(dev, err,
905e1f00a69SWei Liu "reading %s/event-channel(-tx/rx)",
9068d3d53b3SAndrew J. Bennieston xspath);
9078d3d53b3SAndrew J. Bennieston goto err;
908e1f00a69SWei Liu }
909e1f00a69SWei Liu rx_evtchn = tx_evtchn;
910e1f00a69SWei Liu }
911e1f00a69SWei Liu
912e9ce7cb6SWei Liu /* Map the shared frame, irq etc. */
9134e15ee2cSPaul Durrant err = xenvif_connect_data(queue, tx_ring_ref, rx_ring_ref,
914e9ce7cb6SWei Liu tx_evtchn, rx_evtchn);
915e9ce7cb6SWei Liu if (err) {
916e9ce7cb6SWei Liu xenbus_dev_fatal(dev, err,
917e9ce7cb6SWei Liu "mapping shared-frames %lu/%lu port tx %u rx %u",
918e9ce7cb6SWei Liu tx_ring_ref, rx_ring_ref,
919e9ce7cb6SWei Liu tx_evtchn, rx_evtchn);
9208d3d53b3SAndrew J. Bennieston goto err;
921e9ce7cb6SWei Liu }
922e9ce7cb6SWei Liu
9238d3d53b3SAndrew J. Bennieston err = 0;
9248d3d53b3SAndrew J. Bennieston err: /* Regular return falls through with err == 0 */
9258d3d53b3SAndrew J. Bennieston kfree(xspath);
9268d3d53b3SAndrew J. Bennieston return err;
927e9ce7cb6SWei Liu }
928e9ce7cb6SWei Liu
read_xenbus_vif_flags(struct backend_info * be)929e9ce7cb6SWei Liu static int read_xenbus_vif_flags(struct backend_info *be)
930e9ce7cb6SWei Liu {
931e9ce7cb6SWei Liu struct xenvif *vif = be->vif;
932e9ce7cb6SWei Liu struct xenbus_device *dev = be->dev;
933e9ce7cb6SWei Liu unsigned int rx_copy;
934f95842e7SJuergen Gross int err;
935e9ce7cb6SWei Liu
936f942dc25SIan Campbell err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
937f942dc25SIan Campbell &rx_copy);
938f942dc25SIan Campbell if (err == -ENOENT) {
939f942dc25SIan Campbell err = 0;
940f942dc25SIan Campbell rx_copy = 0;
941f942dc25SIan Campbell }
942f942dc25SIan Campbell if (err < 0) {
943f942dc25SIan Campbell xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
944f942dc25SIan Campbell dev->otherend);
945f942dc25SIan Campbell return err;
946f942dc25SIan Campbell }
947f942dc25SIan Campbell if (!rx_copy)
948f942dc25SIan Campbell return -EOPNOTSUPP;
949f942dc25SIan Campbell
950f95842e7SJuergen Gross if (!xenbus_read_unsigned(dev->otherend, "feature-rx-notify", 0)) {
95126c0e102SDavid Vrabel /* - Reduce drain timeout to poll more frequently for
95226c0e102SDavid Vrabel * Rx requests.
95326c0e102SDavid Vrabel * - Disable Rx stall detection.
95426c0e102SDavid Vrabel */
95526c0e102SDavid Vrabel be->vif->drain_timeout = msecs_to_jiffies(30);
95626c0e102SDavid Vrabel be->vif->stall_timeout = 0;
957f942dc25SIan Campbell }
958f942dc25SIan Campbell
959f95842e7SJuergen Gross vif->can_sg = !!xenbus_read_unsigned(dev->otherend, "feature-sg", 0);
960f942dc25SIan Campbell
96182cada22SPaul Durrant vif->gso_mask = 0;
96282cada22SPaul Durrant
963f95842e7SJuergen Gross if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv4", 0))
96482cada22SPaul Durrant vif->gso_mask |= GSO_BIT(TCPV4);
965f942dc25SIan Campbell
966f95842e7SJuergen Gross if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv6", 0))
96782cada22SPaul Durrant vif->gso_mask |= GSO_BIT(TCPV6);
96882cada22SPaul Durrant
969f95842e7SJuergen Gross vif->ip_csum = !xenbus_read_unsigned(dev->otherend,
970f95842e7SJuergen Gross "feature-no-csum-offload", 0);
971146c8a77SPaul Durrant
972f95842e7SJuergen Gross vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend,
973f95842e7SJuergen Gross "feature-ipv6-csum-offload", 0);
974f942dc25SIan Campbell
9751c9535c7SDenis Kirjanov read_xenbus_frontend_xdp(be, dev);
9761c9535c7SDenis Kirjanov
977f942dc25SIan Campbell return 0;
978f942dc25SIan Campbell }
979f942dc25SIan Campbell
netback_remove(struct xenbus_device * dev)9807cffcadeSDawei Li static void netback_remove(struct xenbus_device *dev)
98192fbeb43SPaul Durrant {
98292fbeb43SPaul Durrant struct backend_info *be = dev_get_drvdata(&dev->dev);
98392fbeb43SPaul Durrant
98492fbeb43SPaul Durrant unregister_hotplug_status_watch(be);
985c55f34b6SPaul Durrant xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
98692fbeb43SPaul Durrant if (be->vif) {
98792fbeb43SPaul Durrant kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
9889476654bSPaul Durrant backend_disconnect(be);
98992fbeb43SPaul Durrant xenvif_free(be->vif);
99092fbeb43SPaul Durrant be->vif = NULL;
99192fbeb43SPaul Durrant }
99292fbeb43SPaul Durrant kfree(be->hotplug_script);
99392fbeb43SPaul Durrant kfree(be);
99492fbeb43SPaul Durrant dev_set_drvdata(&dev->dev, NULL);
99592fbeb43SPaul Durrant }
99692fbeb43SPaul Durrant
997090c7ae8SLee Jones /*
99892fbeb43SPaul Durrant * Entry point to this code when a new device is created. Allocate the basic
99992fbeb43SPaul Durrant * structures and switch to InitWait.
100092fbeb43SPaul Durrant */
netback_probe(struct xenbus_device * dev,const struct xenbus_device_id * id)100192fbeb43SPaul Durrant static int netback_probe(struct xenbus_device *dev,
100292fbeb43SPaul Durrant const struct xenbus_device_id *id)
100392fbeb43SPaul Durrant {
100492fbeb43SPaul Durrant const char *message;
100592fbeb43SPaul Durrant struct xenbus_transaction xbt;
100692fbeb43SPaul Durrant int err;
100792fbeb43SPaul Durrant int sg;
100892fbeb43SPaul Durrant const char *script;
100992fbeb43SPaul Durrant struct backend_info *be = kzalloc(sizeof(*be), GFP_KERNEL);
101092fbeb43SPaul Durrant
101192fbeb43SPaul Durrant if (!be) {
101292fbeb43SPaul Durrant xenbus_dev_fatal(dev, -ENOMEM,
101392fbeb43SPaul Durrant "allocating backend structure");
101492fbeb43SPaul Durrant return -ENOMEM;
101592fbeb43SPaul Durrant }
101692fbeb43SPaul Durrant
101792fbeb43SPaul Durrant be->dev = dev;
101892fbeb43SPaul Durrant dev_set_drvdata(&dev->dev, be);
101992fbeb43SPaul Durrant
102092fbeb43SPaul Durrant sg = 1;
102192fbeb43SPaul Durrant
102292fbeb43SPaul Durrant do {
102392fbeb43SPaul Durrant err = xenbus_transaction_start(&xbt);
102492fbeb43SPaul Durrant if (err) {
102592fbeb43SPaul Durrant xenbus_dev_fatal(dev, err, "starting transaction");
102692fbeb43SPaul Durrant goto fail;
102792fbeb43SPaul Durrant }
102892fbeb43SPaul Durrant
102992fbeb43SPaul Durrant err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
103092fbeb43SPaul Durrant if (err) {
103192fbeb43SPaul Durrant message = "writing feature-sg";
103292fbeb43SPaul Durrant goto abort_transaction;
103392fbeb43SPaul Durrant }
103492fbeb43SPaul Durrant
103592fbeb43SPaul Durrant err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
103692fbeb43SPaul Durrant "%d", sg);
103792fbeb43SPaul Durrant if (err) {
103892fbeb43SPaul Durrant message = "writing feature-gso-tcpv4";
103992fbeb43SPaul Durrant goto abort_transaction;
104092fbeb43SPaul Durrant }
104192fbeb43SPaul Durrant
104292fbeb43SPaul Durrant err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
104392fbeb43SPaul Durrant "%d", sg);
104492fbeb43SPaul Durrant if (err) {
104592fbeb43SPaul Durrant message = "writing feature-gso-tcpv6";
104692fbeb43SPaul Durrant goto abort_transaction;
104792fbeb43SPaul Durrant }
104892fbeb43SPaul Durrant
104992fbeb43SPaul Durrant /* We support partial checksum setup for IPv6 packets */
105092fbeb43SPaul Durrant err = xenbus_printf(xbt, dev->nodename,
105192fbeb43SPaul Durrant "feature-ipv6-csum-offload",
105292fbeb43SPaul Durrant "%d", 1);
105392fbeb43SPaul Durrant if (err) {
105492fbeb43SPaul Durrant message = "writing feature-ipv6-csum-offload";
105592fbeb43SPaul Durrant goto abort_transaction;
105692fbeb43SPaul Durrant }
105792fbeb43SPaul Durrant
105892fbeb43SPaul Durrant /* We support rx-copy path. */
105992fbeb43SPaul Durrant err = xenbus_printf(xbt, dev->nodename,
106092fbeb43SPaul Durrant "feature-rx-copy", "%d", 1);
106192fbeb43SPaul Durrant if (err) {
106292fbeb43SPaul Durrant message = "writing feature-rx-copy";
106392fbeb43SPaul Durrant goto abort_transaction;
106492fbeb43SPaul Durrant }
106592fbeb43SPaul Durrant
10661c9535c7SDenis Kirjanov /* we can adjust a headroom for netfront XDP processing */
10671c9535c7SDenis Kirjanov err = xenbus_printf(xbt, dev->nodename,
10681c9535c7SDenis Kirjanov "feature-xdp-headroom", "%d",
10691c9535c7SDenis Kirjanov provides_xdp_headroom);
10701c9535c7SDenis Kirjanov if (err) {
10711c9535c7SDenis Kirjanov message = "writing feature-xdp-headroom";
10721c9535c7SDenis Kirjanov goto abort_transaction;
10731c9535c7SDenis Kirjanov }
10741c9535c7SDenis Kirjanov
107592fbeb43SPaul Durrant /* We don't support rx-flip path (except old guests who
107692fbeb43SPaul Durrant * don't grok this feature flag).
107792fbeb43SPaul Durrant */
107892fbeb43SPaul Durrant err = xenbus_printf(xbt, dev->nodename,
107992fbeb43SPaul Durrant "feature-rx-flip", "%d", 0);
108092fbeb43SPaul Durrant if (err) {
108192fbeb43SPaul Durrant message = "writing feature-rx-flip";
108292fbeb43SPaul Durrant goto abort_transaction;
108392fbeb43SPaul Durrant }
108492fbeb43SPaul Durrant
108592fbeb43SPaul Durrant /* We support dynamic multicast-control. */
108692fbeb43SPaul Durrant err = xenbus_printf(xbt, dev->nodename,
108792fbeb43SPaul Durrant "feature-multicast-control", "%d", 1);
108892fbeb43SPaul Durrant if (err) {
108992fbeb43SPaul Durrant message = "writing feature-multicast-control";
109092fbeb43SPaul Durrant goto abort_transaction;
109192fbeb43SPaul Durrant }
109292fbeb43SPaul Durrant
109392fbeb43SPaul Durrant err = xenbus_printf(xbt, dev->nodename,
109492fbeb43SPaul Durrant "feature-dynamic-multicast-control",
109592fbeb43SPaul Durrant "%d", 1);
109692fbeb43SPaul Durrant if (err) {
109792fbeb43SPaul Durrant message = "writing feature-dynamic-multicast-control";
109892fbeb43SPaul Durrant goto abort_transaction;
109992fbeb43SPaul Durrant }
110092fbeb43SPaul Durrant
110192fbeb43SPaul Durrant err = xenbus_transaction_end(xbt, 0);
110292fbeb43SPaul Durrant } while (err == -EAGAIN);
110392fbeb43SPaul Durrant
110492fbeb43SPaul Durrant if (err) {
110592fbeb43SPaul Durrant xenbus_dev_fatal(dev, err, "completing transaction");
110692fbeb43SPaul Durrant goto fail;
110792fbeb43SPaul Durrant }
110892fbeb43SPaul Durrant
110992fbeb43SPaul Durrant /* Split event channels support, this is optional so it is not
111092fbeb43SPaul Durrant * put inside the above loop.
111192fbeb43SPaul Durrant */
111292fbeb43SPaul Durrant err = xenbus_printf(XBT_NIL, dev->nodename,
111392fbeb43SPaul Durrant "feature-split-event-channels",
111492fbeb43SPaul Durrant "%u", separate_tx_rx_irq);
111592fbeb43SPaul Durrant if (err)
111692fbeb43SPaul Durrant pr_debug("Error writing feature-split-event-channels\n");
111792fbeb43SPaul Durrant
111892fbeb43SPaul Durrant /* Multi-queue support: This is an optional feature. */
111992fbeb43SPaul Durrant err = xenbus_printf(XBT_NIL, dev->nodename,
112092fbeb43SPaul Durrant "multi-queue-max-queues", "%u", xenvif_max_queues);
112192fbeb43SPaul Durrant if (err)
112292fbeb43SPaul Durrant pr_debug("Error writing multi-queue-max-queues\n");
112392fbeb43SPaul Durrant
112492fbeb43SPaul Durrant err = xenbus_printf(XBT_NIL, dev->nodename,
112592fbeb43SPaul Durrant "feature-ctrl-ring",
112692fbeb43SPaul Durrant "%u", true);
112792fbeb43SPaul Durrant if (err)
112892fbeb43SPaul Durrant pr_debug("Error writing feature-ctrl-ring\n");
112992fbeb43SPaul Durrant
1130f55c3188SPaul Durrant backend_switch_state(be, XenbusStateInitWait);
1131f55c3188SPaul Durrant
113292fbeb43SPaul Durrant script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
113392fbeb43SPaul Durrant if (IS_ERR(script)) {
113492fbeb43SPaul Durrant err = PTR_ERR(script);
113592fbeb43SPaul Durrant xenbus_dev_fatal(dev, err, "reading script");
113692fbeb43SPaul Durrant goto fail;
113792fbeb43SPaul Durrant }
113892fbeb43SPaul Durrant
113992fbeb43SPaul Durrant be->hotplug_script = script;
114092fbeb43SPaul Durrant
114192fbeb43SPaul Durrant /* This kicks hotplug scripts, so do it immediately. */
114292fbeb43SPaul Durrant err = backend_create_xenvif(be);
114392fbeb43SPaul Durrant if (err)
114492fbeb43SPaul Durrant goto fail;
114592fbeb43SPaul Durrant
114692fbeb43SPaul Durrant return 0;
114792fbeb43SPaul Durrant
114892fbeb43SPaul Durrant abort_transaction:
114992fbeb43SPaul Durrant xenbus_transaction_end(xbt, 1);
115092fbeb43SPaul Durrant xenbus_dev_fatal(dev, err, "%s", message);
115192fbeb43SPaul Durrant fail:
115292fbeb43SPaul Durrant pr_debug("failed\n");
115392fbeb43SPaul Durrant netback_remove(dev);
115492fbeb43SPaul Durrant return err;
115592fbeb43SPaul Durrant }
115692fbeb43SPaul Durrant
1157f942dc25SIan Campbell static const struct xenbus_device_id netback_ids[] = {
1158f942dc25SIan Campbell { "vif" },
1159f942dc25SIan Campbell { "" }
1160f942dc25SIan Campbell };
1161f942dc25SIan Campbell
116295afae48SDavid Vrabel static struct xenbus_driver netback_driver = {
116395afae48SDavid Vrabel .ids = netback_ids,
1164f942dc25SIan Campbell .probe = netback_probe,
1165f942dc25SIan Campbell .remove = netback_remove,
1166f942dc25SIan Campbell .uevent = netback_uevent,
1167f942dc25SIan Campbell .otherend_changed = frontend_changed,
11689476654bSPaul Durrant .allow_rebind = true,
116995afae48SDavid Vrabel };
1170f942dc25SIan Campbell
xenvif_xenbus_init(void)1171f942dc25SIan Campbell int xenvif_xenbus_init(void)
1172f942dc25SIan Campbell {
117373db144bSJan Beulich return xenbus_register_backend(&netback_driver);
1174f942dc25SIan Campbell }
1175b103f358SWei Liu
xenvif_xenbus_fini(void)1176b103f358SWei Liu void xenvif_xenbus_fini(void)
1177b103f358SWei Liu {
1178b103f358SWei Liu return xenbus_unregister_driver(&netback_driver);
1179b103f358SWei Liu }
1180