1788ef64aSOleksandr Andrushchenko // SPDX-License-Identifier: GPL-2.0 OR MIT
2788ef64aSOleksandr Andrushchenko 
3788ef64aSOleksandr Andrushchenko /*
4788ef64aSOleksandr Andrushchenko  * Xen para-virtual sound device
5788ef64aSOleksandr Andrushchenko  *
6788ef64aSOleksandr Andrushchenko  * Copyright (C) 2016-2018 EPAM Systems Inc.
7788ef64aSOleksandr Andrushchenko  *
8788ef64aSOleksandr Andrushchenko  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9788ef64aSOleksandr Andrushchenko  */
10788ef64aSOleksandr Andrushchenko 
11788ef64aSOleksandr Andrushchenko #include <xen/events.h>
12788ef64aSOleksandr Andrushchenko #include <xen/grant_table.h>
13788ef64aSOleksandr Andrushchenko #include <xen/xen.h>
14788ef64aSOleksandr Andrushchenko #include <xen/xenbus.h>
15788ef64aSOleksandr Andrushchenko 
16788ef64aSOleksandr Andrushchenko #include "xen_snd_front.h"
171cee5593SOleksandr Andrushchenko #include "xen_snd_front_alsa.h"
18788ef64aSOleksandr Andrushchenko #include "xen_snd_front_cfg.h"
19788ef64aSOleksandr Andrushchenko #include "xen_snd_front_evtchnl.h"
20788ef64aSOleksandr Andrushchenko 
evtchnl_interrupt_req(int irq,void * dev_id)21788ef64aSOleksandr Andrushchenko static irqreturn_t evtchnl_interrupt_req(int irq, void *dev_id)
22788ef64aSOleksandr Andrushchenko {
23788ef64aSOleksandr Andrushchenko 	struct xen_snd_front_evtchnl *channel = dev_id;
24788ef64aSOleksandr Andrushchenko 	struct xen_snd_front_info *front_info = channel->front_info;
25788ef64aSOleksandr Andrushchenko 	struct xensnd_resp *resp;
26788ef64aSOleksandr Andrushchenko 	RING_IDX i, rp;
27788ef64aSOleksandr Andrushchenko 
28788ef64aSOleksandr Andrushchenko 	if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
29788ef64aSOleksandr Andrushchenko 		return IRQ_HANDLED;
30788ef64aSOleksandr Andrushchenko 
31788ef64aSOleksandr Andrushchenko 	mutex_lock(&channel->ring_io_lock);
32788ef64aSOleksandr Andrushchenko 
33788ef64aSOleksandr Andrushchenko again:
34788ef64aSOleksandr Andrushchenko 	rp = channel->u.req.ring.sring->rsp_prod;
35788ef64aSOleksandr Andrushchenko 	/* Ensure we see queued responses up to rp. */
36788ef64aSOleksandr Andrushchenko 	rmb();
37788ef64aSOleksandr Andrushchenko 
38788ef64aSOleksandr Andrushchenko 	/*
39788ef64aSOleksandr Andrushchenko 	 * Assume that the backend is trusted to always write sane values
40788ef64aSOleksandr Andrushchenko 	 * to the ring counters, so no overflow checks on frontend side
41788ef64aSOleksandr Andrushchenko 	 * are required.
42788ef64aSOleksandr Andrushchenko 	 */
43788ef64aSOleksandr Andrushchenko 	for (i = channel->u.req.ring.rsp_cons; i != rp; i++) {
44788ef64aSOleksandr Andrushchenko 		resp = RING_GET_RESPONSE(&channel->u.req.ring, i);
45788ef64aSOleksandr Andrushchenko 		if (resp->id != channel->evt_id)
46788ef64aSOleksandr Andrushchenko 			continue;
47788ef64aSOleksandr Andrushchenko 		switch (resp->operation) {
48788ef64aSOleksandr Andrushchenko 		case XENSND_OP_OPEN:
49788ef64aSOleksandr Andrushchenko 		case XENSND_OP_CLOSE:
50788ef64aSOleksandr Andrushchenko 		case XENSND_OP_READ:
51788ef64aSOleksandr Andrushchenko 		case XENSND_OP_WRITE:
52788ef64aSOleksandr Andrushchenko 		case XENSND_OP_TRIGGER:
53788ef64aSOleksandr Andrushchenko 			channel->u.req.resp_status = resp->status;
54788ef64aSOleksandr Andrushchenko 			complete(&channel->u.req.completion);
55788ef64aSOleksandr Andrushchenko 			break;
56788ef64aSOleksandr Andrushchenko 		case XENSND_OP_HW_PARAM_QUERY:
57788ef64aSOleksandr Andrushchenko 			channel->u.req.resp_status = resp->status;
58788ef64aSOleksandr Andrushchenko 			channel->u.req.resp.hw_param =
59788ef64aSOleksandr Andrushchenko 					resp->resp.hw_param;
60788ef64aSOleksandr Andrushchenko 			complete(&channel->u.req.completion);
61788ef64aSOleksandr Andrushchenko 			break;
62788ef64aSOleksandr Andrushchenko 
63788ef64aSOleksandr Andrushchenko 		default:
64788ef64aSOleksandr Andrushchenko 			dev_err(&front_info->xb_dev->dev,
65788ef64aSOleksandr Andrushchenko 				"Operation %d is not supported\n",
66788ef64aSOleksandr Andrushchenko 				resp->operation);
67788ef64aSOleksandr Andrushchenko 			break;
68788ef64aSOleksandr Andrushchenko 		}
69788ef64aSOleksandr Andrushchenko 	}
70788ef64aSOleksandr Andrushchenko 
71788ef64aSOleksandr Andrushchenko 	channel->u.req.ring.rsp_cons = i;
72788ef64aSOleksandr Andrushchenko 	if (i != channel->u.req.ring.req_prod_pvt) {
73788ef64aSOleksandr Andrushchenko 		int more_to_do;
74788ef64aSOleksandr Andrushchenko 
75788ef64aSOleksandr Andrushchenko 		RING_FINAL_CHECK_FOR_RESPONSES(&channel->u.req.ring,
76788ef64aSOleksandr Andrushchenko 					       more_to_do);
77788ef64aSOleksandr Andrushchenko 		if (more_to_do)
78788ef64aSOleksandr Andrushchenko 			goto again;
79788ef64aSOleksandr Andrushchenko 	} else {
80788ef64aSOleksandr Andrushchenko 		channel->u.req.ring.sring->rsp_event = i + 1;
81788ef64aSOleksandr Andrushchenko 	}
82788ef64aSOleksandr Andrushchenko 
83788ef64aSOleksandr Andrushchenko 	mutex_unlock(&channel->ring_io_lock);
84788ef64aSOleksandr Andrushchenko 	return IRQ_HANDLED;
85788ef64aSOleksandr Andrushchenko }
86788ef64aSOleksandr Andrushchenko 
evtchnl_interrupt_evt(int irq,void * dev_id)87788ef64aSOleksandr Andrushchenko static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
88788ef64aSOleksandr Andrushchenko {
89788ef64aSOleksandr Andrushchenko 	struct xen_snd_front_evtchnl *channel = dev_id;
90788ef64aSOleksandr Andrushchenko 	struct xensnd_event_page *page = channel->u.evt.page;
91788ef64aSOleksandr Andrushchenko 	u32 cons, prod;
92788ef64aSOleksandr Andrushchenko 
93788ef64aSOleksandr Andrushchenko 	if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
94788ef64aSOleksandr Andrushchenko 		return IRQ_HANDLED;
95788ef64aSOleksandr Andrushchenko 
96788ef64aSOleksandr Andrushchenko 	mutex_lock(&channel->ring_io_lock);
97788ef64aSOleksandr Andrushchenko 
98788ef64aSOleksandr Andrushchenko 	prod = page->in_prod;
99788ef64aSOleksandr Andrushchenko 	/* Ensure we see ring contents up to prod. */
100788ef64aSOleksandr Andrushchenko 	virt_rmb();
101788ef64aSOleksandr Andrushchenko 	if (prod == page->in_cons)
102788ef64aSOleksandr Andrushchenko 		goto out;
103788ef64aSOleksandr Andrushchenko 
104788ef64aSOleksandr Andrushchenko 	/*
105788ef64aSOleksandr Andrushchenko 	 * Assume that the backend is trusted to always write sane values
106788ef64aSOleksandr Andrushchenko 	 * to the ring counters, so no overflow checks on frontend side
107788ef64aSOleksandr Andrushchenko 	 * are required.
108788ef64aSOleksandr Andrushchenko 	 */
109788ef64aSOleksandr Andrushchenko 	for (cons = page->in_cons; cons != prod; cons++) {
110788ef64aSOleksandr Andrushchenko 		struct xensnd_evt *event;
111788ef64aSOleksandr Andrushchenko 
112788ef64aSOleksandr Andrushchenko 		event = &XENSND_IN_RING_REF(page, cons);
113788ef64aSOleksandr Andrushchenko 		if (unlikely(event->id != channel->evt_id++))
114788ef64aSOleksandr Andrushchenko 			continue;
115788ef64aSOleksandr Andrushchenko 
116788ef64aSOleksandr Andrushchenko 		switch (event->type) {
117788ef64aSOleksandr Andrushchenko 		case XENSND_EVT_CUR_POS:
1181cee5593SOleksandr Andrushchenko 			xen_snd_front_alsa_handle_cur_pos(channel,
1191cee5593SOleksandr Andrushchenko 							  event->op.cur_pos.position);
120788ef64aSOleksandr Andrushchenko 			break;
121788ef64aSOleksandr Andrushchenko 		}
122788ef64aSOleksandr Andrushchenko 	}
123788ef64aSOleksandr Andrushchenko 
124788ef64aSOleksandr Andrushchenko 	page->in_cons = cons;
125788ef64aSOleksandr Andrushchenko 	/* Ensure ring contents. */
126788ef64aSOleksandr Andrushchenko 	virt_wmb();
127788ef64aSOleksandr Andrushchenko 
128788ef64aSOleksandr Andrushchenko out:
129788ef64aSOleksandr Andrushchenko 	mutex_unlock(&channel->ring_io_lock);
130788ef64aSOleksandr Andrushchenko 	return IRQ_HANDLED;
131788ef64aSOleksandr Andrushchenko }
132788ef64aSOleksandr Andrushchenko 
xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl * channel)133788ef64aSOleksandr Andrushchenko void xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl *channel)
134788ef64aSOleksandr Andrushchenko {
135788ef64aSOleksandr Andrushchenko 	int notify;
136788ef64aSOleksandr Andrushchenko 
137788ef64aSOleksandr Andrushchenko 	channel->u.req.ring.req_prod_pvt++;
138788ef64aSOleksandr Andrushchenko 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&channel->u.req.ring, notify);
139788ef64aSOleksandr Andrushchenko 	if (notify)
140788ef64aSOleksandr Andrushchenko 		notify_remote_via_irq(channel->irq);
141788ef64aSOleksandr Andrushchenko }
142788ef64aSOleksandr Andrushchenko 
evtchnl_free(struct xen_snd_front_info * front_info,struct xen_snd_front_evtchnl * channel)143788ef64aSOleksandr Andrushchenko static void evtchnl_free(struct xen_snd_front_info *front_info,
144788ef64aSOleksandr Andrushchenko 			 struct xen_snd_front_evtchnl *channel)
145788ef64aSOleksandr Andrushchenko {
146*360dc89dSJuergen Gross 	void *page = NULL;
147788ef64aSOleksandr Andrushchenko 
148788ef64aSOleksandr Andrushchenko 	if (channel->type == EVTCHNL_TYPE_REQ)
149*360dc89dSJuergen Gross 		page = channel->u.req.ring.sring;
150788ef64aSOleksandr Andrushchenko 	else if (channel->type == EVTCHNL_TYPE_EVT)
151*360dc89dSJuergen Gross 		page = channel->u.evt.page;
152788ef64aSOleksandr Andrushchenko 
153788ef64aSOleksandr Andrushchenko 	if (!page)
154788ef64aSOleksandr Andrushchenko 		return;
155788ef64aSOleksandr Andrushchenko 
156788ef64aSOleksandr Andrushchenko 	channel->state = EVTCHNL_STATE_DISCONNECTED;
157788ef64aSOleksandr Andrushchenko 	if (channel->type == EVTCHNL_TYPE_REQ) {
158788ef64aSOleksandr Andrushchenko 		/* Release all who still waits for response if any. */
159788ef64aSOleksandr Andrushchenko 		channel->u.req.resp_status = -EIO;
160788ef64aSOleksandr Andrushchenko 		complete_all(&channel->u.req.completion);
161788ef64aSOleksandr Andrushchenko 	}
162788ef64aSOleksandr Andrushchenko 
163788ef64aSOleksandr Andrushchenko 	if (channel->irq)
164788ef64aSOleksandr Andrushchenko 		unbind_from_irqhandler(channel->irq, channel);
165788ef64aSOleksandr Andrushchenko 
166788ef64aSOleksandr Andrushchenko 	if (channel->port)
167788ef64aSOleksandr Andrushchenko 		xenbus_free_evtchn(front_info->xb_dev, channel->port);
168788ef64aSOleksandr Andrushchenko 
169788ef64aSOleksandr Andrushchenko 	/* End access and free the page. */
170*360dc89dSJuergen Gross 	xenbus_teardown_ring(&page, 1, &channel->gref);
171788ef64aSOleksandr Andrushchenko 
172788ef64aSOleksandr Andrushchenko 	memset(channel, 0, sizeof(*channel));
173788ef64aSOleksandr Andrushchenko }
174788ef64aSOleksandr Andrushchenko 
xen_snd_front_evtchnl_free_all(struct xen_snd_front_info * front_info)175788ef64aSOleksandr Andrushchenko void xen_snd_front_evtchnl_free_all(struct xen_snd_front_info *front_info)
176788ef64aSOleksandr Andrushchenko {
177788ef64aSOleksandr Andrushchenko 	int i;
178788ef64aSOleksandr Andrushchenko 
179788ef64aSOleksandr Andrushchenko 	if (!front_info->evt_pairs)
180788ef64aSOleksandr Andrushchenko 		return;
181788ef64aSOleksandr Andrushchenko 
182788ef64aSOleksandr Andrushchenko 	for (i = 0; i < front_info->num_evt_pairs; i++) {
183788ef64aSOleksandr Andrushchenko 		evtchnl_free(front_info, &front_info->evt_pairs[i].req);
184788ef64aSOleksandr Andrushchenko 		evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
185788ef64aSOleksandr Andrushchenko 	}
186788ef64aSOleksandr Andrushchenko 
187788ef64aSOleksandr Andrushchenko 	kfree(front_info->evt_pairs);
188788ef64aSOleksandr Andrushchenko 	front_info->evt_pairs = NULL;
189788ef64aSOleksandr Andrushchenko }
190788ef64aSOleksandr Andrushchenko 
evtchnl_alloc(struct xen_snd_front_info * front_info,int index,struct xen_snd_front_evtchnl * channel,enum xen_snd_front_evtchnl_type type)191788ef64aSOleksandr Andrushchenko static int evtchnl_alloc(struct xen_snd_front_info *front_info, int index,
192788ef64aSOleksandr Andrushchenko 			 struct xen_snd_front_evtchnl *channel,
193788ef64aSOleksandr Andrushchenko 			 enum xen_snd_front_evtchnl_type type)
194788ef64aSOleksandr Andrushchenko {
195788ef64aSOleksandr Andrushchenko 	struct xenbus_device *xb_dev = front_info->xb_dev;
196*360dc89dSJuergen Gross 	void *page;
197788ef64aSOleksandr Andrushchenko 	irq_handler_t handler;
198788ef64aSOleksandr Andrushchenko 	char *handler_name = NULL;
199788ef64aSOleksandr Andrushchenko 	int ret;
200788ef64aSOleksandr Andrushchenko 
201788ef64aSOleksandr Andrushchenko 	memset(channel, 0, sizeof(*channel));
202788ef64aSOleksandr Andrushchenko 	channel->type = type;
203788ef64aSOleksandr Andrushchenko 	channel->index = index;
204788ef64aSOleksandr Andrushchenko 	channel->front_info = front_info;
205788ef64aSOleksandr Andrushchenko 	channel->state = EVTCHNL_STATE_DISCONNECTED;
206*360dc89dSJuergen Gross 	ret = xenbus_setup_ring(xb_dev, GFP_KERNEL, &page, 1, &channel->gref);
207*360dc89dSJuergen Gross 	if (ret)
208788ef64aSOleksandr Andrushchenko 		goto fail;
209788ef64aSOleksandr Andrushchenko 
210788ef64aSOleksandr Andrushchenko 	handler_name = kasprintf(GFP_KERNEL, "%s-%s", XENSND_DRIVER_NAME,
211788ef64aSOleksandr Andrushchenko 				 type == EVTCHNL_TYPE_REQ ?
212788ef64aSOleksandr Andrushchenko 				 XENSND_FIELD_RING_REF :
213788ef64aSOleksandr Andrushchenko 				 XENSND_FIELD_EVT_RING_REF);
214788ef64aSOleksandr Andrushchenko 	if (!handler_name) {
215788ef64aSOleksandr Andrushchenko 		ret = -ENOMEM;
216788ef64aSOleksandr Andrushchenko 		goto fail;
217788ef64aSOleksandr Andrushchenko 	}
218788ef64aSOleksandr Andrushchenko 
219788ef64aSOleksandr Andrushchenko 	mutex_init(&channel->ring_io_lock);
220788ef64aSOleksandr Andrushchenko 
221788ef64aSOleksandr Andrushchenko 	if (type == EVTCHNL_TYPE_REQ) {
222*360dc89dSJuergen Gross 		struct xen_sndif_sring *sring = page;
223788ef64aSOleksandr Andrushchenko 
224788ef64aSOleksandr Andrushchenko 		init_completion(&channel->u.req.completion);
225788ef64aSOleksandr Andrushchenko 		mutex_init(&channel->u.req.req_io_lock);
226*360dc89dSJuergen Gross 		XEN_FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE);
227788ef64aSOleksandr Andrushchenko 
228788ef64aSOleksandr Andrushchenko 		handler = evtchnl_interrupt_req;
229788ef64aSOleksandr Andrushchenko 	} else {
230*360dc89dSJuergen Gross 		channel->u.evt.page = page;
231788ef64aSOleksandr Andrushchenko 		handler = evtchnl_interrupt_evt;
232788ef64aSOleksandr Andrushchenko 	}
233788ef64aSOleksandr Andrushchenko 
234788ef64aSOleksandr Andrushchenko 	ret = xenbus_alloc_evtchn(xb_dev, &channel->port);
235788ef64aSOleksandr Andrushchenko 	if (ret < 0)
236788ef64aSOleksandr Andrushchenko 		goto fail;
237788ef64aSOleksandr Andrushchenko 
238788ef64aSOleksandr Andrushchenko 	ret = bind_evtchn_to_irq(channel->port);
239788ef64aSOleksandr Andrushchenko 	if (ret < 0) {
240788ef64aSOleksandr Andrushchenko 		dev_err(&xb_dev->dev,
241788ef64aSOleksandr Andrushchenko 			"Failed to bind IRQ for domid %d port %d: %d\n",
242788ef64aSOleksandr Andrushchenko 			front_info->xb_dev->otherend_id, channel->port, ret);
243788ef64aSOleksandr Andrushchenko 		goto fail;
244788ef64aSOleksandr Andrushchenko 	}
245788ef64aSOleksandr Andrushchenko 
246788ef64aSOleksandr Andrushchenko 	channel->irq = ret;
247788ef64aSOleksandr Andrushchenko 
248788ef64aSOleksandr Andrushchenko 	ret = request_threaded_irq(channel->irq, NULL, handler,
249788ef64aSOleksandr Andrushchenko 				   IRQF_ONESHOT, handler_name, channel);
250788ef64aSOleksandr Andrushchenko 	if (ret < 0) {
251788ef64aSOleksandr Andrushchenko 		dev_err(&xb_dev->dev, "Failed to request IRQ %d: %d\n",
252788ef64aSOleksandr Andrushchenko 			channel->irq, ret);
253788ef64aSOleksandr Andrushchenko 		goto fail;
254788ef64aSOleksandr Andrushchenko 	}
255788ef64aSOleksandr Andrushchenko 
256788ef64aSOleksandr Andrushchenko 	kfree(handler_name);
257788ef64aSOleksandr Andrushchenko 	return 0;
258788ef64aSOleksandr Andrushchenko 
259788ef64aSOleksandr Andrushchenko fail:
260788ef64aSOleksandr Andrushchenko 	kfree(handler_name);
261788ef64aSOleksandr Andrushchenko 	dev_err(&xb_dev->dev, "Failed to allocate ring: %d\n", ret);
262788ef64aSOleksandr Andrushchenko 	return ret;
263788ef64aSOleksandr Andrushchenko }
264788ef64aSOleksandr Andrushchenko 
xen_snd_front_evtchnl_create_all(struct xen_snd_front_info * front_info,int num_streams)265788ef64aSOleksandr Andrushchenko int xen_snd_front_evtchnl_create_all(struct xen_snd_front_info *front_info,
266788ef64aSOleksandr Andrushchenko 				     int num_streams)
267788ef64aSOleksandr Andrushchenko {
268788ef64aSOleksandr Andrushchenko 	struct xen_front_cfg_card *cfg = &front_info->cfg;
269788ef64aSOleksandr Andrushchenko 	struct device *dev = &front_info->xb_dev->dev;
270788ef64aSOleksandr Andrushchenko 	int d, ret = 0;
271788ef64aSOleksandr Andrushchenko 
272788ef64aSOleksandr Andrushchenko 	front_info->evt_pairs =
273788ef64aSOleksandr Andrushchenko 			kcalloc(num_streams,
274788ef64aSOleksandr Andrushchenko 				sizeof(struct xen_snd_front_evtchnl_pair),
275788ef64aSOleksandr Andrushchenko 				GFP_KERNEL);
276788ef64aSOleksandr Andrushchenko 	if (!front_info->evt_pairs)
277788ef64aSOleksandr Andrushchenko 		return -ENOMEM;
278788ef64aSOleksandr Andrushchenko 
279788ef64aSOleksandr Andrushchenko 	/* Iterate over devices and their streams and create event channels. */
280788ef64aSOleksandr Andrushchenko 	for (d = 0; d < cfg->num_pcm_instances; d++) {
281788ef64aSOleksandr Andrushchenko 		struct xen_front_cfg_pcm_instance *pcm_instance;
282788ef64aSOleksandr Andrushchenko 		int s, index;
283788ef64aSOleksandr Andrushchenko 
284788ef64aSOleksandr Andrushchenko 		pcm_instance = &cfg->pcm_instances[d];
285788ef64aSOleksandr Andrushchenko 
286788ef64aSOleksandr Andrushchenko 		for (s = 0; s < pcm_instance->num_streams_pb; s++) {
287788ef64aSOleksandr Andrushchenko 			index = pcm_instance->streams_pb[s].index;
288788ef64aSOleksandr Andrushchenko 
289788ef64aSOleksandr Andrushchenko 			ret = evtchnl_alloc(front_info, index,
290788ef64aSOleksandr Andrushchenko 					    &front_info->evt_pairs[index].req,
291788ef64aSOleksandr Andrushchenko 					    EVTCHNL_TYPE_REQ);
292788ef64aSOleksandr Andrushchenko 			if (ret < 0) {
293788ef64aSOleksandr Andrushchenko 				dev_err(dev, "Error allocating control channel\n");
294788ef64aSOleksandr Andrushchenko 				goto fail;
295788ef64aSOleksandr Andrushchenko 			}
296788ef64aSOleksandr Andrushchenko 
297788ef64aSOleksandr Andrushchenko 			ret = evtchnl_alloc(front_info, index,
298788ef64aSOleksandr Andrushchenko 					    &front_info->evt_pairs[index].evt,
299788ef64aSOleksandr Andrushchenko 					    EVTCHNL_TYPE_EVT);
300788ef64aSOleksandr Andrushchenko 			if (ret < 0) {
301788ef64aSOleksandr Andrushchenko 				dev_err(dev, "Error allocating in-event channel\n");
302788ef64aSOleksandr Andrushchenko 				goto fail;
303788ef64aSOleksandr Andrushchenko 			}
304788ef64aSOleksandr Andrushchenko 		}
305788ef64aSOleksandr Andrushchenko 
306788ef64aSOleksandr Andrushchenko 		for (s = 0; s < pcm_instance->num_streams_cap; s++) {
307788ef64aSOleksandr Andrushchenko 			index = pcm_instance->streams_cap[s].index;
308788ef64aSOleksandr Andrushchenko 
309788ef64aSOleksandr Andrushchenko 			ret = evtchnl_alloc(front_info, index,
310788ef64aSOleksandr Andrushchenko 					    &front_info->evt_pairs[index].req,
311788ef64aSOleksandr Andrushchenko 					    EVTCHNL_TYPE_REQ);
312788ef64aSOleksandr Andrushchenko 			if (ret < 0) {
313788ef64aSOleksandr Andrushchenko 				dev_err(dev, "Error allocating control channel\n");
314788ef64aSOleksandr Andrushchenko 				goto fail;
315788ef64aSOleksandr Andrushchenko 			}
316788ef64aSOleksandr Andrushchenko 
317788ef64aSOleksandr Andrushchenko 			ret = evtchnl_alloc(front_info, index,
318788ef64aSOleksandr Andrushchenko 					    &front_info->evt_pairs[index].evt,
319788ef64aSOleksandr Andrushchenko 					    EVTCHNL_TYPE_EVT);
320788ef64aSOleksandr Andrushchenko 			if (ret < 0) {
321788ef64aSOleksandr Andrushchenko 				dev_err(dev, "Error allocating in-event channel\n");
322788ef64aSOleksandr Andrushchenko 				goto fail;
323788ef64aSOleksandr Andrushchenko 			}
324788ef64aSOleksandr Andrushchenko 		}
325788ef64aSOleksandr Andrushchenko 	}
326788ef64aSOleksandr Andrushchenko 
327788ef64aSOleksandr Andrushchenko 	front_info->num_evt_pairs = num_streams;
328788ef64aSOleksandr Andrushchenko 	return 0;
329788ef64aSOleksandr Andrushchenko 
330788ef64aSOleksandr Andrushchenko fail:
331788ef64aSOleksandr Andrushchenko 	xen_snd_front_evtchnl_free_all(front_info);
332788ef64aSOleksandr Andrushchenko 	return ret;
333788ef64aSOleksandr Andrushchenko }
334788ef64aSOleksandr Andrushchenko 
evtchnl_publish(struct xenbus_transaction xbt,struct xen_snd_front_evtchnl * channel,const char * path,const char * node_ring,const char * node_chnl)335788ef64aSOleksandr Andrushchenko static int evtchnl_publish(struct xenbus_transaction xbt,
336788ef64aSOleksandr Andrushchenko 			   struct xen_snd_front_evtchnl *channel,
337788ef64aSOleksandr Andrushchenko 			   const char *path, const char *node_ring,
338788ef64aSOleksandr Andrushchenko 			   const char *node_chnl)
339788ef64aSOleksandr Andrushchenko {
340788ef64aSOleksandr Andrushchenko 	struct xenbus_device *xb_dev = channel->front_info->xb_dev;
341788ef64aSOleksandr Andrushchenko 	int ret;
342788ef64aSOleksandr Andrushchenko 
343788ef64aSOleksandr Andrushchenko 	/* Write control channel ring reference. */
344788ef64aSOleksandr Andrushchenko 	ret = xenbus_printf(xbt, path, node_ring, "%u", channel->gref);
345788ef64aSOleksandr Andrushchenko 	if (ret < 0) {
346788ef64aSOleksandr Andrushchenko 		dev_err(&xb_dev->dev, "Error writing ring-ref: %d\n", ret);
347788ef64aSOleksandr Andrushchenko 		return ret;
348788ef64aSOleksandr Andrushchenko 	}
349788ef64aSOleksandr Andrushchenko 
350788ef64aSOleksandr Andrushchenko 	/* Write event channel ring reference. */
351788ef64aSOleksandr Andrushchenko 	ret = xenbus_printf(xbt, path, node_chnl, "%u", channel->port);
352788ef64aSOleksandr Andrushchenko 	if (ret < 0) {
353788ef64aSOleksandr Andrushchenko 		dev_err(&xb_dev->dev, "Error writing event channel: %d\n", ret);
354788ef64aSOleksandr Andrushchenko 		return ret;
355788ef64aSOleksandr Andrushchenko 	}
356788ef64aSOleksandr Andrushchenko 
357788ef64aSOleksandr Andrushchenko 	return 0;
358788ef64aSOleksandr Andrushchenko }
359788ef64aSOleksandr Andrushchenko 
xen_snd_front_evtchnl_publish_all(struct xen_snd_front_info * front_info)360788ef64aSOleksandr Andrushchenko int xen_snd_front_evtchnl_publish_all(struct xen_snd_front_info *front_info)
361788ef64aSOleksandr Andrushchenko {
362788ef64aSOleksandr Andrushchenko 	struct xen_front_cfg_card *cfg = &front_info->cfg;
363788ef64aSOleksandr Andrushchenko 	struct xenbus_transaction xbt;
364788ef64aSOleksandr Andrushchenko 	int ret, d;
365788ef64aSOleksandr Andrushchenko 
366788ef64aSOleksandr Andrushchenko again:
367788ef64aSOleksandr Andrushchenko 	ret = xenbus_transaction_start(&xbt);
368788ef64aSOleksandr Andrushchenko 	if (ret < 0) {
369788ef64aSOleksandr Andrushchenko 		xenbus_dev_fatal(front_info->xb_dev, ret,
370788ef64aSOleksandr Andrushchenko 				 "starting transaction");
371788ef64aSOleksandr Andrushchenko 		return ret;
372788ef64aSOleksandr Andrushchenko 	}
373788ef64aSOleksandr Andrushchenko 
374788ef64aSOleksandr Andrushchenko 	for (d = 0; d < cfg->num_pcm_instances; d++) {
375788ef64aSOleksandr Andrushchenko 		struct xen_front_cfg_pcm_instance *pcm_instance;
376788ef64aSOleksandr Andrushchenko 		int s, index;
377788ef64aSOleksandr Andrushchenko 
378788ef64aSOleksandr Andrushchenko 		pcm_instance = &cfg->pcm_instances[d];
379788ef64aSOleksandr Andrushchenko 
380788ef64aSOleksandr Andrushchenko 		for (s = 0; s < pcm_instance->num_streams_pb; s++) {
381788ef64aSOleksandr Andrushchenko 			index = pcm_instance->streams_pb[s].index;
382788ef64aSOleksandr Andrushchenko 
383788ef64aSOleksandr Andrushchenko 			ret = evtchnl_publish(xbt,
384788ef64aSOleksandr Andrushchenko 					      &front_info->evt_pairs[index].req,
385788ef64aSOleksandr Andrushchenko 					      pcm_instance->streams_pb[s].xenstore_path,
386788ef64aSOleksandr Andrushchenko 					      XENSND_FIELD_RING_REF,
387788ef64aSOleksandr Andrushchenko 					      XENSND_FIELD_EVT_CHNL);
388788ef64aSOleksandr Andrushchenko 			if (ret < 0)
389788ef64aSOleksandr Andrushchenko 				goto fail;
390788ef64aSOleksandr Andrushchenko 
391788ef64aSOleksandr Andrushchenko 			ret = evtchnl_publish(xbt,
392788ef64aSOleksandr Andrushchenko 					      &front_info->evt_pairs[index].evt,
393788ef64aSOleksandr Andrushchenko 					      pcm_instance->streams_pb[s].xenstore_path,
394788ef64aSOleksandr Andrushchenko 					      XENSND_FIELD_EVT_RING_REF,
395788ef64aSOleksandr Andrushchenko 					      XENSND_FIELD_EVT_EVT_CHNL);
396788ef64aSOleksandr Andrushchenko 			if (ret < 0)
397788ef64aSOleksandr Andrushchenko 				goto fail;
398788ef64aSOleksandr Andrushchenko 		}
399788ef64aSOleksandr Andrushchenko 
400788ef64aSOleksandr Andrushchenko 		for (s = 0; s < pcm_instance->num_streams_cap; s++) {
401788ef64aSOleksandr Andrushchenko 			index = pcm_instance->streams_cap[s].index;
402788ef64aSOleksandr Andrushchenko 
403788ef64aSOleksandr Andrushchenko 			ret = evtchnl_publish(xbt,
404788ef64aSOleksandr Andrushchenko 					      &front_info->evt_pairs[index].req,
405788ef64aSOleksandr Andrushchenko 					      pcm_instance->streams_cap[s].xenstore_path,
406788ef64aSOleksandr Andrushchenko 					      XENSND_FIELD_RING_REF,
407788ef64aSOleksandr Andrushchenko 					      XENSND_FIELD_EVT_CHNL);
408788ef64aSOleksandr Andrushchenko 			if (ret < 0)
409788ef64aSOleksandr Andrushchenko 				goto fail;
410788ef64aSOleksandr Andrushchenko 
411788ef64aSOleksandr Andrushchenko 			ret = evtchnl_publish(xbt,
412788ef64aSOleksandr Andrushchenko 					      &front_info->evt_pairs[index].evt,
413788ef64aSOleksandr Andrushchenko 					      pcm_instance->streams_cap[s].xenstore_path,
414788ef64aSOleksandr Andrushchenko 					      XENSND_FIELD_EVT_RING_REF,
415788ef64aSOleksandr Andrushchenko 					      XENSND_FIELD_EVT_EVT_CHNL);
416788ef64aSOleksandr Andrushchenko 			if (ret < 0)
417788ef64aSOleksandr Andrushchenko 				goto fail;
418788ef64aSOleksandr Andrushchenko 		}
419788ef64aSOleksandr Andrushchenko 	}
420788ef64aSOleksandr Andrushchenko 	ret = xenbus_transaction_end(xbt, 0);
421788ef64aSOleksandr Andrushchenko 	if (ret < 0) {
422788ef64aSOleksandr Andrushchenko 		if (ret == -EAGAIN)
423788ef64aSOleksandr Andrushchenko 			goto again;
424788ef64aSOleksandr Andrushchenko 
425788ef64aSOleksandr Andrushchenko 		xenbus_dev_fatal(front_info->xb_dev, ret,
426788ef64aSOleksandr Andrushchenko 				 "completing transaction");
427788ef64aSOleksandr Andrushchenko 		goto fail_to_end;
428788ef64aSOleksandr Andrushchenko 	}
429788ef64aSOleksandr Andrushchenko 	return 0;
430788ef64aSOleksandr Andrushchenko fail:
431788ef64aSOleksandr Andrushchenko 	xenbus_transaction_end(xbt, 1);
432788ef64aSOleksandr Andrushchenko fail_to_end:
433788ef64aSOleksandr Andrushchenko 	xenbus_dev_fatal(front_info->xb_dev, ret, "writing XenStore");
434788ef64aSOleksandr Andrushchenko 	return ret;
435788ef64aSOleksandr Andrushchenko }
436788ef64aSOleksandr Andrushchenko 
xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair * evt_pair,bool is_connected)437788ef64aSOleksandr Andrushchenko void xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair *evt_pair,
438788ef64aSOleksandr Andrushchenko 					      bool is_connected)
439788ef64aSOleksandr Andrushchenko {
440788ef64aSOleksandr Andrushchenko 	enum xen_snd_front_evtchnl_state state;
441788ef64aSOleksandr Andrushchenko 
442788ef64aSOleksandr Andrushchenko 	if (is_connected)
443788ef64aSOleksandr Andrushchenko 		state = EVTCHNL_STATE_CONNECTED;
444788ef64aSOleksandr Andrushchenko 	else
445788ef64aSOleksandr Andrushchenko 		state = EVTCHNL_STATE_DISCONNECTED;
446788ef64aSOleksandr Andrushchenko 
447788ef64aSOleksandr Andrushchenko 	mutex_lock(&evt_pair->req.ring_io_lock);
448788ef64aSOleksandr Andrushchenko 	evt_pair->req.state = state;
449788ef64aSOleksandr Andrushchenko 	mutex_unlock(&evt_pair->req.ring_io_lock);
450788ef64aSOleksandr Andrushchenko 
451788ef64aSOleksandr Andrushchenko 	mutex_lock(&evt_pair->evt.ring_io_lock);
452788ef64aSOleksandr Andrushchenko 	evt_pair->evt.state = state;
453788ef64aSOleksandr Andrushchenko 	mutex_unlock(&evt_pair->evt.ring_io_lock);
454788ef64aSOleksandr Andrushchenko }
455788ef64aSOleksandr Andrushchenko 
xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair * evt_pair)456788ef64aSOleksandr Andrushchenko void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair)
457788ef64aSOleksandr Andrushchenko {
458788ef64aSOleksandr Andrushchenko 	mutex_lock(&evt_pair->req.ring_io_lock);
459788ef64aSOleksandr Andrushchenko 	evt_pair->req.evt_next_id = 0;
460788ef64aSOleksandr Andrushchenko 	mutex_unlock(&evt_pair->req.ring_io_lock);
461788ef64aSOleksandr Andrushchenko 
462788ef64aSOleksandr Andrushchenko 	mutex_lock(&evt_pair->evt.ring_io_lock);
463788ef64aSOleksandr Andrushchenko 	evt_pair->evt.evt_next_id = 0;
464788ef64aSOleksandr Andrushchenko 	mutex_unlock(&evt_pair->evt.ring_io_lock);
465788ef64aSOleksandr Andrushchenko }
466788ef64aSOleksandr Andrushchenko 
467