1788ef64aSOleksandr Andrushchenko // SPDX-License-Identifier: GPL-2.0 OR MIT
2788ef64aSOleksandr Andrushchenko 
3788ef64aSOleksandr Andrushchenko /*
4788ef64aSOleksandr Andrushchenko  * Xen para-virtual sound device
5788ef64aSOleksandr Andrushchenko  *
6788ef64aSOleksandr Andrushchenko  * Copyright (C) 2016-2018 EPAM Systems Inc.
7788ef64aSOleksandr Andrushchenko  *
8788ef64aSOleksandr Andrushchenko  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9788ef64aSOleksandr Andrushchenko  */
10788ef64aSOleksandr Andrushchenko 
11788ef64aSOleksandr Andrushchenko #include <xen/events.h>
12788ef64aSOleksandr Andrushchenko #include <xen/grant_table.h>
13788ef64aSOleksandr Andrushchenko #include <xen/xen.h>
14788ef64aSOleksandr Andrushchenko #include <xen/xenbus.h>
15788ef64aSOleksandr Andrushchenko 
16788ef64aSOleksandr Andrushchenko #include "xen_snd_front.h"
171cee5593SOleksandr Andrushchenko #include "xen_snd_front_alsa.h"
18788ef64aSOleksandr Andrushchenko #include "xen_snd_front_cfg.h"
19788ef64aSOleksandr Andrushchenko #include "xen_snd_front_evtchnl.h"
20788ef64aSOleksandr Andrushchenko 
21788ef64aSOleksandr Andrushchenko static irqreturn_t evtchnl_interrupt_req(int irq, void *dev_id)
22788ef64aSOleksandr Andrushchenko {
23788ef64aSOleksandr Andrushchenko 	struct xen_snd_front_evtchnl *channel = dev_id;
24788ef64aSOleksandr Andrushchenko 	struct xen_snd_front_info *front_info = channel->front_info;
25788ef64aSOleksandr Andrushchenko 	struct xensnd_resp *resp;
26788ef64aSOleksandr Andrushchenko 	RING_IDX i, rp;
27788ef64aSOleksandr Andrushchenko 
28788ef64aSOleksandr Andrushchenko 	if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
29788ef64aSOleksandr Andrushchenko 		return IRQ_HANDLED;
30788ef64aSOleksandr Andrushchenko 
31788ef64aSOleksandr Andrushchenko 	mutex_lock(&channel->ring_io_lock);
32788ef64aSOleksandr Andrushchenko 
33788ef64aSOleksandr Andrushchenko again:
34788ef64aSOleksandr Andrushchenko 	rp = channel->u.req.ring.sring->rsp_prod;
35788ef64aSOleksandr Andrushchenko 	/* Ensure we see queued responses up to rp. */
36788ef64aSOleksandr Andrushchenko 	rmb();
37788ef64aSOleksandr Andrushchenko 
38788ef64aSOleksandr Andrushchenko 	/*
39788ef64aSOleksandr Andrushchenko 	 * Assume that the backend is trusted to always write sane values
40788ef64aSOleksandr Andrushchenko 	 * to the ring counters, so no overflow checks on frontend side
41788ef64aSOleksandr Andrushchenko 	 * are required.
42788ef64aSOleksandr Andrushchenko 	 */
43788ef64aSOleksandr Andrushchenko 	for (i = channel->u.req.ring.rsp_cons; i != rp; i++) {
44788ef64aSOleksandr Andrushchenko 		resp = RING_GET_RESPONSE(&channel->u.req.ring, i);
45788ef64aSOleksandr Andrushchenko 		if (resp->id != channel->evt_id)
46788ef64aSOleksandr Andrushchenko 			continue;
47788ef64aSOleksandr Andrushchenko 		switch (resp->operation) {
48788ef64aSOleksandr Andrushchenko 		case XENSND_OP_OPEN:
49788ef64aSOleksandr Andrushchenko 			/* fall through */
50788ef64aSOleksandr Andrushchenko 		case XENSND_OP_CLOSE:
51788ef64aSOleksandr Andrushchenko 			/* fall through */
52788ef64aSOleksandr Andrushchenko 		case XENSND_OP_READ:
53788ef64aSOleksandr Andrushchenko 			/* fall through */
54788ef64aSOleksandr Andrushchenko 		case XENSND_OP_WRITE:
55788ef64aSOleksandr Andrushchenko 			/* fall through */
56788ef64aSOleksandr Andrushchenko 		case XENSND_OP_TRIGGER:
57788ef64aSOleksandr Andrushchenko 			channel->u.req.resp_status = resp->status;
58788ef64aSOleksandr Andrushchenko 			complete(&channel->u.req.completion);
59788ef64aSOleksandr Andrushchenko 			break;
60788ef64aSOleksandr Andrushchenko 		case XENSND_OP_HW_PARAM_QUERY:
61788ef64aSOleksandr Andrushchenko 			channel->u.req.resp_status = resp->status;
62788ef64aSOleksandr Andrushchenko 			channel->u.req.resp.hw_param =
63788ef64aSOleksandr Andrushchenko 					resp->resp.hw_param;
64788ef64aSOleksandr Andrushchenko 			complete(&channel->u.req.completion);
65788ef64aSOleksandr Andrushchenko 			break;
66788ef64aSOleksandr Andrushchenko 
67788ef64aSOleksandr Andrushchenko 		default:
68788ef64aSOleksandr Andrushchenko 			dev_err(&front_info->xb_dev->dev,
69788ef64aSOleksandr Andrushchenko 				"Operation %d is not supported\n",
70788ef64aSOleksandr Andrushchenko 				resp->operation);
71788ef64aSOleksandr Andrushchenko 			break;
72788ef64aSOleksandr Andrushchenko 		}
73788ef64aSOleksandr Andrushchenko 	}
74788ef64aSOleksandr Andrushchenko 
75788ef64aSOleksandr Andrushchenko 	channel->u.req.ring.rsp_cons = i;
76788ef64aSOleksandr Andrushchenko 	if (i != channel->u.req.ring.req_prod_pvt) {
77788ef64aSOleksandr Andrushchenko 		int more_to_do;
78788ef64aSOleksandr Andrushchenko 
79788ef64aSOleksandr Andrushchenko 		RING_FINAL_CHECK_FOR_RESPONSES(&channel->u.req.ring,
80788ef64aSOleksandr Andrushchenko 					       more_to_do);
81788ef64aSOleksandr Andrushchenko 		if (more_to_do)
82788ef64aSOleksandr Andrushchenko 			goto again;
83788ef64aSOleksandr Andrushchenko 	} else {
84788ef64aSOleksandr Andrushchenko 		channel->u.req.ring.sring->rsp_event = i + 1;
85788ef64aSOleksandr Andrushchenko 	}
86788ef64aSOleksandr Andrushchenko 
87788ef64aSOleksandr Andrushchenko 	mutex_unlock(&channel->ring_io_lock);
88788ef64aSOleksandr Andrushchenko 	return IRQ_HANDLED;
89788ef64aSOleksandr Andrushchenko }
90788ef64aSOleksandr Andrushchenko 
91788ef64aSOleksandr Andrushchenko static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
92788ef64aSOleksandr Andrushchenko {
93788ef64aSOleksandr Andrushchenko 	struct xen_snd_front_evtchnl *channel = dev_id;
94788ef64aSOleksandr Andrushchenko 	struct xensnd_event_page *page = channel->u.evt.page;
95788ef64aSOleksandr Andrushchenko 	u32 cons, prod;
96788ef64aSOleksandr Andrushchenko 
97788ef64aSOleksandr Andrushchenko 	if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
98788ef64aSOleksandr Andrushchenko 		return IRQ_HANDLED;
99788ef64aSOleksandr Andrushchenko 
100788ef64aSOleksandr Andrushchenko 	mutex_lock(&channel->ring_io_lock);
101788ef64aSOleksandr Andrushchenko 
102788ef64aSOleksandr Andrushchenko 	prod = page->in_prod;
103788ef64aSOleksandr Andrushchenko 	/* Ensure we see ring contents up to prod. */
104788ef64aSOleksandr Andrushchenko 	virt_rmb();
105788ef64aSOleksandr Andrushchenko 	if (prod == page->in_cons)
106788ef64aSOleksandr Andrushchenko 		goto out;
107788ef64aSOleksandr Andrushchenko 
108788ef64aSOleksandr Andrushchenko 	/*
109788ef64aSOleksandr Andrushchenko 	 * Assume that the backend is trusted to always write sane values
110788ef64aSOleksandr Andrushchenko 	 * to the ring counters, so no overflow checks on frontend side
111788ef64aSOleksandr Andrushchenko 	 * are required.
112788ef64aSOleksandr Andrushchenko 	 */
113788ef64aSOleksandr Andrushchenko 	for (cons = page->in_cons; cons != prod; cons++) {
114788ef64aSOleksandr Andrushchenko 		struct xensnd_evt *event;
115788ef64aSOleksandr Andrushchenko 
116788ef64aSOleksandr Andrushchenko 		event = &XENSND_IN_RING_REF(page, cons);
117788ef64aSOleksandr Andrushchenko 		if (unlikely(event->id != channel->evt_id++))
118788ef64aSOleksandr Andrushchenko 			continue;
119788ef64aSOleksandr Andrushchenko 
120788ef64aSOleksandr Andrushchenko 		switch (event->type) {
121788ef64aSOleksandr Andrushchenko 		case XENSND_EVT_CUR_POS:
1221cee5593SOleksandr Andrushchenko 			xen_snd_front_alsa_handle_cur_pos(channel,
1231cee5593SOleksandr Andrushchenko 							  event->op.cur_pos.position);
124788ef64aSOleksandr Andrushchenko 			break;
125788ef64aSOleksandr Andrushchenko 		}
126788ef64aSOleksandr Andrushchenko 	}
127788ef64aSOleksandr Andrushchenko 
128788ef64aSOleksandr Andrushchenko 	page->in_cons = cons;
129788ef64aSOleksandr Andrushchenko 	/* Ensure ring contents. */
130788ef64aSOleksandr Andrushchenko 	virt_wmb();
131788ef64aSOleksandr Andrushchenko 
132788ef64aSOleksandr Andrushchenko out:
133788ef64aSOleksandr Andrushchenko 	mutex_unlock(&channel->ring_io_lock);
134788ef64aSOleksandr Andrushchenko 	return IRQ_HANDLED;
135788ef64aSOleksandr Andrushchenko }
136788ef64aSOleksandr Andrushchenko 
137788ef64aSOleksandr Andrushchenko void xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl *channel)
138788ef64aSOleksandr Andrushchenko {
139788ef64aSOleksandr Andrushchenko 	int notify;
140788ef64aSOleksandr Andrushchenko 
141788ef64aSOleksandr Andrushchenko 	channel->u.req.ring.req_prod_pvt++;
142788ef64aSOleksandr Andrushchenko 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&channel->u.req.ring, notify);
143788ef64aSOleksandr Andrushchenko 	if (notify)
144788ef64aSOleksandr Andrushchenko 		notify_remote_via_irq(channel->irq);
145788ef64aSOleksandr Andrushchenko }
146788ef64aSOleksandr Andrushchenko 
147788ef64aSOleksandr Andrushchenko static void evtchnl_free(struct xen_snd_front_info *front_info,
148788ef64aSOleksandr Andrushchenko 			 struct xen_snd_front_evtchnl *channel)
149788ef64aSOleksandr Andrushchenko {
150788ef64aSOleksandr Andrushchenko 	unsigned long page = 0;
151788ef64aSOleksandr Andrushchenko 
152788ef64aSOleksandr Andrushchenko 	if (channel->type == EVTCHNL_TYPE_REQ)
153788ef64aSOleksandr Andrushchenko 		page = (unsigned long)channel->u.req.ring.sring;
154788ef64aSOleksandr Andrushchenko 	else if (channel->type == EVTCHNL_TYPE_EVT)
155788ef64aSOleksandr Andrushchenko 		page = (unsigned long)channel->u.evt.page;
156788ef64aSOleksandr Andrushchenko 
157788ef64aSOleksandr Andrushchenko 	if (!page)
158788ef64aSOleksandr Andrushchenko 		return;
159788ef64aSOleksandr Andrushchenko 
160788ef64aSOleksandr Andrushchenko 	channel->state = EVTCHNL_STATE_DISCONNECTED;
161788ef64aSOleksandr Andrushchenko 	if (channel->type == EVTCHNL_TYPE_REQ) {
162788ef64aSOleksandr Andrushchenko 		/* Release all who still waits for response if any. */
163788ef64aSOleksandr Andrushchenko 		channel->u.req.resp_status = -EIO;
164788ef64aSOleksandr Andrushchenko 		complete_all(&channel->u.req.completion);
165788ef64aSOleksandr Andrushchenko 	}
166788ef64aSOleksandr Andrushchenko 
167788ef64aSOleksandr Andrushchenko 	if (channel->irq)
168788ef64aSOleksandr Andrushchenko 		unbind_from_irqhandler(channel->irq, channel);
169788ef64aSOleksandr Andrushchenko 
170788ef64aSOleksandr Andrushchenko 	if (channel->port)
171788ef64aSOleksandr Andrushchenko 		xenbus_free_evtchn(front_info->xb_dev, channel->port);
172788ef64aSOleksandr Andrushchenko 
173788ef64aSOleksandr Andrushchenko 	/* End access and free the page. */
174788ef64aSOleksandr Andrushchenko 	if (channel->gref != GRANT_INVALID_REF)
175788ef64aSOleksandr Andrushchenko 		gnttab_end_foreign_access(channel->gref, 0, page);
176788ef64aSOleksandr Andrushchenko 	else
177788ef64aSOleksandr Andrushchenko 		free_page(page);
178788ef64aSOleksandr Andrushchenko 
179788ef64aSOleksandr Andrushchenko 	memset(channel, 0, sizeof(*channel));
180788ef64aSOleksandr Andrushchenko }
181788ef64aSOleksandr Andrushchenko 
182788ef64aSOleksandr Andrushchenko void xen_snd_front_evtchnl_free_all(struct xen_snd_front_info *front_info)
183788ef64aSOleksandr Andrushchenko {
184788ef64aSOleksandr Andrushchenko 	int i;
185788ef64aSOleksandr Andrushchenko 
186788ef64aSOleksandr Andrushchenko 	if (!front_info->evt_pairs)
187788ef64aSOleksandr Andrushchenko 		return;
188788ef64aSOleksandr Andrushchenko 
189788ef64aSOleksandr Andrushchenko 	for (i = 0; i < front_info->num_evt_pairs; i++) {
190788ef64aSOleksandr Andrushchenko 		evtchnl_free(front_info, &front_info->evt_pairs[i].req);
191788ef64aSOleksandr Andrushchenko 		evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
192788ef64aSOleksandr Andrushchenko 	}
193788ef64aSOleksandr Andrushchenko 
194788ef64aSOleksandr Andrushchenko 	kfree(front_info->evt_pairs);
195788ef64aSOleksandr Andrushchenko 	front_info->evt_pairs = NULL;
196788ef64aSOleksandr Andrushchenko }
197788ef64aSOleksandr Andrushchenko 
198788ef64aSOleksandr Andrushchenko static int evtchnl_alloc(struct xen_snd_front_info *front_info, int index,
199788ef64aSOleksandr Andrushchenko 			 struct xen_snd_front_evtchnl *channel,
200788ef64aSOleksandr Andrushchenko 			 enum xen_snd_front_evtchnl_type type)
201788ef64aSOleksandr Andrushchenko {
202788ef64aSOleksandr Andrushchenko 	struct xenbus_device *xb_dev = front_info->xb_dev;
203788ef64aSOleksandr Andrushchenko 	unsigned long page;
204788ef64aSOleksandr Andrushchenko 	grant_ref_t gref;
205788ef64aSOleksandr Andrushchenko 	irq_handler_t handler;
206788ef64aSOleksandr Andrushchenko 	char *handler_name = NULL;
207788ef64aSOleksandr Andrushchenko 	int ret;
208788ef64aSOleksandr Andrushchenko 
209788ef64aSOleksandr Andrushchenko 	memset(channel, 0, sizeof(*channel));
210788ef64aSOleksandr Andrushchenko 	channel->type = type;
211788ef64aSOleksandr Andrushchenko 	channel->index = index;
212788ef64aSOleksandr Andrushchenko 	channel->front_info = front_info;
213788ef64aSOleksandr Andrushchenko 	channel->state = EVTCHNL_STATE_DISCONNECTED;
214788ef64aSOleksandr Andrushchenko 	channel->gref = GRANT_INVALID_REF;
215788ef64aSOleksandr Andrushchenko 	page = get_zeroed_page(GFP_KERNEL);
216788ef64aSOleksandr Andrushchenko 	if (!page) {
217788ef64aSOleksandr Andrushchenko 		ret = -ENOMEM;
218788ef64aSOleksandr Andrushchenko 		goto fail;
219788ef64aSOleksandr Andrushchenko 	}
220788ef64aSOleksandr Andrushchenko 
221788ef64aSOleksandr Andrushchenko 	handler_name = kasprintf(GFP_KERNEL, "%s-%s", XENSND_DRIVER_NAME,
222788ef64aSOleksandr Andrushchenko 				 type == EVTCHNL_TYPE_REQ ?
223788ef64aSOleksandr Andrushchenko 				 XENSND_FIELD_RING_REF :
224788ef64aSOleksandr Andrushchenko 				 XENSND_FIELD_EVT_RING_REF);
225788ef64aSOleksandr Andrushchenko 	if (!handler_name) {
226788ef64aSOleksandr Andrushchenko 		ret = -ENOMEM;
227788ef64aSOleksandr Andrushchenko 		goto fail;
228788ef64aSOleksandr Andrushchenko 	}
229788ef64aSOleksandr Andrushchenko 
230788ef64aSOleksandr Andrushchenko 	mutex_init(&channel->ring_io_lock);
231788ef64aSOleksandr Andrushchenko 
232788ef64aSOleksandr Andrushchenko 	if (type == EVTCHNL_TYPE_REQ) {
233788ef64aSOleksandr Andrushchenko 		struct xen_sndif_sring *sring = (struct xen_sndif_sring *)page;
234788ef64aSOleksandr Andrushchenko 
235788ef64aSOleksandr Andrushchenko 		init_completion(&channel->u.req.completion);
236788ef64aSOleksandr Andrushchenko 		mutex_init(&channel->u.req.req_io_lock);
237788ef64aSOleksandr Andrushchenko 		SHARED_RING_INIT(sring);
238788ef64aSOleksandr Andrushchenko 		FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE);
239788ef64aSOleksandr Andrushchenko 
240788ef64aSOleksandr Andrushchenko 		ret = xenbus_grant_ring(xb_dev, sring, 1, &gref);
241788ef64aSOleksandr Andrushchenko 		if (ret < 0) {
242788ef64aSOleksandr Andrushchenko 			channel->u.req.ring.sring = NULL;
243788ef64aSOleksandr Andrushchenko 			goto fail;
244788ef64aSOleksandr Andrushchenko 		}
245788ef64aSOleksandr Andrushchenko 
246788ef64aSOleksandr Andrushchenko 		handler = evtchnl_interrupt_req;
247788ef64aSOleksandr Andrushchenko 	} else {
248788ef64aSOleksandr Andrushchenko 		ret = gnttab_grant_foreign_access(xb_dev->otherend_id,
249788ef64aSOleksandr Andrushchenko 						  virt_to_gfn((void *)page), 0);
250788ef64aSOleksandr Andrushchenko 		if (ret < 0)
251788ef64aSOleksandr Andrushchenko 			goto fail;
252788ef64aSOleksandr Andrushchenko 
253788ef64aSOleksandr Andrushchenko 		channel->u.evt.page = (struct xensnd_event_page *)page;
254788ef64aSOleksandr Andrushchenko 		gref = ret;
255788ef64aSOleksandr Andrushchenko 		handler = evtchnl_interrupt_evt;
256788ef64aSOleksandr Andrushchenko 	}
257788ef64aSOleksandr Andrushchenko 
258788ef64aSOleksandr Andrushchenko 	channel->gref = gref;
259788ef64aSOleksandr Andrushchenko 
260788ef64aSOleksandr Andrushchenko 	ret = xenbus_alloc_evtchn(xb_dev, &channel->port);
261788ef64aSOleksandr Andrushchenko 	if (ret < 0)
262788ef64aSOleksandr Andrushchenko 		goto fail;
263788ef64aSOleksandr Andrushchenko 
264788ef64aSOleksandr Andrushchenko 	ret = bind_evtchn_to_irq(channel->port);
265788ef64aSOleksandr Andrushchenko 	if (ret < 0) {
266788ef64aSOleksandr Andrushchenko 		dev_err(&xb_dev->dev,
267788ef64aSOleksandr Andrushchenko 			"Failed to bind IRQ for domid %d port %d: %d\n",
268788ef64aSOleksandr Andrushchenko 			front_info->xb_dev->otherend_id, channel->port, ret);
269788ef64aSOleksandr Andrushchenko 		goto fail;
270788ef64aSOleksandr Andrushchenko 	}
271788ef64aSOleksandr Andrushchenko 
272788ef64aSOleksandr Andrushchenko 	channel->irq = ret;
273788ef64aSOleksandr Andrushchenko 
274788ef64aSOleksandr Andrushchenko 	ret = request_threaded_irq(channel->irq, NULL, handler,
275788ef64aSOleksandr Andrushchenko 				   IRQF_ONESHOT, handler_name, channel);
276788ef64aSOleksandr Andrushchenko 	if (ret < 0) {
277788ef64aSOleksandr Andrushchenko 		dev_err(&xb_dev->dev, "Failed to request IRQ %d: %d\n",
278788ef64aSOleksandr Andrushchenko 			channel->irq, ret);
279788ef64aSOleksandr Andrushchenko 		goto fail;
280788ef64aSOleksandr Andrushchenko 	}
281788ef64aSOleksandr Andrushchenko 
282788ef64aSOleksandr Andrushchenko 	kfree(handler_name);
283788ef64aSOleksandr Andrushchenko 	return 0;
284788ef64aSOleksandr Andrushchenko 
285788ef64aSOleksandr Andrushchenko fail:
286788ef64aSOleksandr Andrushchenko 	if (page)
287788ef64aSOleksandr Andrushchenko 		free_page(page);
288788ef64aSOleksandr Andrushchenko 	kfree(handler_name);
289788ef64aSOleksandr Andrushchenko 	dev_err(&xb_dev->dev, "Failed to allocate ring: %d\n", ret);
290788ef64aSOleksandr Andrushchenko 	return ret;
291788ef64aSOleksandr Andrushchenko }
292788ef64aSOleksandr Andrushchenko 
293788ef64aSOleksandr Andrushchenko int xen_snd_front_evtchnl_create_all(struct xen_snd_front_info *front_info,
294788ef64aSOleksandr Andrushchenko 				     int num_streams)
295788ef64aSOleksandr Andrushchenko {
296788ef64aSOleksandr Andrushchenko 	struct xen_front_cfg_card *cfg = &front_info->cfg;
297788ef64aSOleksandr Andrushchenko 	struct device *dev = &front_info->xb_dev->dev;
298788ef64aSOleksandr Andrushchenko 	int d, ret = 0;
299788ef64aSOleksandr Andrushchenko 
300788ef64aSOleksandr Andrushchenko 	front_info->evt_pairs =
301788ef64aSOleksandr Andrushchenko 			kcalloc(num_streams,
302788ef64aSOleksandr Andrushchenko 				sizeof(struct xen_snd_front_evtchnl_pair),
303788ef64aSOleksandr Andrushchenko 				GFP_KERNEL);
304788ef64aSOleksandr Andrushchenko 	if (!front_info->evt_pairs)
305788ef64aSOleksandr Andrushchenko 		return -ENOMEM;
306788ef64aSOleksandr Andrushchenko 
307788ef64aSOleksandr Andrushchenko 	/* Iterate over devices and their streams and create event channels. */
308788ef64aSOleksandr Andrushchenko 	for (d = 0; d < cfg->num_pcm_instances; d++) {
309788ef64aSOleksandr Andrushchenko 		struct xen_front_cfg_pcm_instance *pcm_instance;
310788ef64aSOleksandr Andrushchenko 		int s, index;
311788ef64aSOleksandr Andrushchenko 
312788ef64aSOleksandr Andrushchenko 		pcm_instance = &cfg->pcm_instances[d];
313788ef64aSOleksandr Andrushchenko 
314788ef64aSOleksandr Andrushchenko 		for (s = 0; s < pcm_instance->num_streams_pb; s++) {
315788ef64aSOleksandr Andrushchenko 			index = pcm_instance->streams_pb[s].index;
316788ef64aSOleksandr Andrushchenko 
317788ef64aSOleksandr Andrushchenko 			ret = evtchnl_alloc(front_info, index,
318788ef64aSOleksandr Andrushchenko 					    &front_info->evt_pairs[index].req,
319788ef64aSOleksandr Andrushchenko 					    EVTCHNL_TYPE_REQ);
320788ef64aSOleksandr Andrushchenko 			if (ret < 0) {
321788ef64aSOleksandr Andrushchenko 				dev_err(dev, "Error allocating control channel\n");
322788ef64aSOleksandr Andrushchenko 				goto fail;
323788ef64aSOleksandr Andrushchenko 			}
324788ef64aSOleksandr Andrushchenko 
325788ef64aSOleksandr Andrushchenko 			ret = evtchnl_alloc(front_info, index,
326788ef64aSOleksandr Andrushchenko 					    &front_info->evt_pairs[index].evt,
327788ef64aSOleksandr Andrushchenko 					    EVTCHNL_TYPE_EVT);
328788ef64aSOleksandr Andrushchenko 			if (ret < 0) {
329788ef64aSOleksandr Andrushchenko 				dev_err(dev, "Error allocating in-event channel\n");
330788ef64aSOleksandr Andrushchenko 				goto fail;
331788ef64aSOleksandr Andrushchenko 			}
332788ef64aSOleksandr Andrushchenko 		}
333788ef64aSOleksandr Andrushchenko 
334788ef64aSOleksandr Andrushchenko 		for (s = 0; s < pcm_instance->num_streams_cap; s++) {
335788ef64aSOleksandr Andrushchenko 			index = pcm_instance->streams_cap[s].index;
336788ef64aSOleksandr Andrushchenko 
337788ef64aSOleksandr Andrushchenko 			ret = evtchnl_alloc(front_info, index,
338788ef64aSOleksandr Andrushchenko 					    &front_info->evt_pairs[index].req,
339788ef64aSOleksandr Andrushchenko 					    EVTCHNL_TYPE_REQ);
340788ef64aSOleksandr Andrushchenko 			if (ret < 0) {
341788ef64aSOleksandr Andrushchenko 				dev_err(dev, "Error allocating control channel\n");
342788ef64aSOleksandr Andrushchenko 				goto fail;
343788ef64aSOleksandr Andrushchenko 			}
344788ef64aSOleksandr Andrushchenko 
345788ef64aSOleksandr Andrushchenko 			ret = evtchnl_alloc(front_info, index,
346788ef64aSOleksandr Andrushchenko 					    &front_info->evt_pairs[index].evt,
347788ef64aSOleksandr Andrushchenko 					    EVTCHNL_TYPE_EVT);
348788ef64aSOleksandr Andrushchenko 			if (ret < 0) {
349788ef64aSOleksandr Andrushchenko 				dev_err(dev, "Error allocating in-event channel\n");
350788ef64aSOleksandr Andrushchenko 				goto fail;
351788ef64aSOleksandr Andrushchenko 			}
352788ef64aSOleksandr Andrushchenko 		}
353788ef64aSOleksandr Andrushchenko 	}
354788ef64aSOleksandr Andrushchenko 	if (ret < 0)
355788ef64aSOleksandr Andrushchenko 		goto fail;
356788ef64aSOleksandr Andrushchenko 
357788ef64aSOleksandr Andrushchenko 	front_info->num_evt_pairs = num_streams;
358788ef64aSOleksandr Andrushchenko 	return 0;
359788ef64aSOleksandr Andrushchenko 
360788ef64aSOleksandr Andrushchenko fail:
361788ef64aSOleksandr Andrushchenko 	xen_snd_front_evtchnl_free_all(front_info);
362788ef64aSOleksandr Andrushchenko 	return ret;
363788ef64aSOleksandr Andrushchenko }
364788ef64aSOleksandr Andrushchenko 
365788ef64aSOleksandr Andrushchenko static int evtchnl_publish(struct xenbus_transaction xbt,
366788ef64aSOleksandr Andrushchenko 			   struct xen_snd_front_evtchnl *channel,
367788ef64aSOleksandr Andrushchenko 			   const char *path, const char *node_ring,
368788ef64aSOleksandr Andrushchenko 			   const char *node_chnl)
369788ef64aSOleksandr Andrushchenko {
370788ef64aSOleksandr Andrushchenko 	struct xenbus_device *xb_dev = channel->front_info->xb_dev;
371788ef64aSOleksandr Andrushchenko 	int ret;
372788ef64aSOleksandr Andrushchenko 
373788ef64aSOleksandr Andrushchenko 	/* Write control channel ring reference. */
374788ef64aSOleksandr Andrushchenko 	ret = xenbus_printf(xbt, path, node_ring, "%u", channel->gref);
375788ef64aSOleksandr Andrushchenko 	if (ret < 0) {
376788ef64aSOleksandr Andrushchenko 		dev_err(&xb_dev->dev, "Error writing ring-ref: %d\n", ret);
377788ef64aSOleksandr Andrushchenko 		return ret;
378788ef64aSOleksandr Andrushchenko 	}
379788ef64aSOleksandr Andrushchenko 
380788ef64aSOleksandr Andrushchenko 	/* Write event channel ring reference. */
381788ef64aSOleksandr Andrushchenko 	ret = xenbus_printf(xbt, path, node_chnl, "%u", channel->port);
382788ef64aSOleksandr Andrushchenko 	if (ret < 0) {
383788ef64aSOleksandr Andrushchenko 		dev_err(&xb_dev->dev, "Error writing event channel: %d\n", ret);
384788ef64aSOleksandr Andrushchenko 		return ret;
385788ef64aSOleksandr Andrushchenko 	}
386788ef64aSOleksandr Andrushchenko 
387788ef64aSOleksandr Andrushchenko 	return 0;
388788ef64aSOleksandr Andrushchenko }
389788ef64aSOleksandr Andrushchenko 
390788ef64aSOleksandr Andrushchenko int xen_snd_front_evtchnl_publish_all(struct xen_snd_front_info *front_info)
391788ef64aSOleksandr Andrushchenko {
392788ef64aSOleksandr Andrushchenko 	struct xen_front_cfg_card *cfg = &front_info->cfg;
393788ef64aSOleksandr Andrushchenko 	struct xenbus_transaction xbt;
394788ef64aSOleksandr Andrushchenko 	int ret, d;
395788ef64aSOleksandr Andrushchenko 
396788ef64aSOleksandr Andrushchenko again:
397788ef64aSOleksandr Andrushchenko 	ret = xenbus_transaction_start(&xbt);
398788ef64aSOleksandr Andrushchenko 	if (ret < 0) {
399788ef64aSOleksandr Andrushchenko 		xenbus_dev_fatal(front_info->xb_dev, ret,
400788ef64aSOleksandr Andrushchenko 				 "starting transaction");
401788ef64aSOleksandr Andrushchenko 		return ret;
402788ef64aSOleksandr Andrushchenko 	}
403788ef64aSOleksandr Andrushchenko 
404788ef64aSOleksandr Andrushchenko 	for (d = 0; d < cfg->num_pcm_instances; d++) {
405788ef64aSOleksandr Andrushchenko 		struct xen_front_cfg_pcm_instance *pcm_instance;
406788ef64aSOleksandr Andrushchenko 		int s, index;
407788ef64aSOleksandr Andrushchenko 
408788ef64aSOleksandr Andrushchenko 		pcm_instance = &cfg->pcm_instances[d];
409788ef64aSOleksandr Andrushchenko 
410788ef64aSOleksandr Andrushchenko 		for (s = 0; s < pcm_instance->num_streams_pb; s++) {
411788ef64aSOleksandr Andrushchenko 			index = pcm_instance->streams_pb[s].index;
412788ef64aSOleksandr Andrushchenko 
413788ef64aSOleksandr Andrushchenko 			ret = evtchnl_publish(xbt,
414788ef64aSOleksandr Andrushchenko 					      &front_info->evt_pairs[index].req,
415788ef64aSOleksandr Andrushchenko 					      pcm_instance->streams_pb[s].xenstore_path,
416788ef64aSOleksandr Andrushchenko 					      XENSND_FIELD_RING_REF,
417788ef64aSOleksandr Andrushchenko 					      XENSND_FIELD_EVT_CHNL);
418788ef64aSOleksandr Andrushchenko 			if (ret < 0)
419788ef64aSOleksandr Andrushchenko 				goto fail;
420788ef64aSOleksandr Andrushchenko 
421788ef64aSOleksandr Andrushchenko 			ret = evtchnl_publish(xbt,
422788ef64aSOleksandr Andrushchenko 					      &front_info->evt_pairs[index].evt,
423788ef64aSOleksandr Andrushchenko 					      pcm_instance->streams_pb[s].xenstore_path,
424788ef64aSOleksandr Andrushchenko 					      XENSND_FIELD_EVT_RING_REF,
425788ef64aSOleksandr Andrushchenko 					      XENSND_FIELD_EVT_EVT_CHNL);
426788ef64aSOleksandr Andrushchenko 			if (ret < 0)
427788ef64aSOleksandr Andrushchenko 				goto fail;
428788ef64aSOleksandr Andrushchenko 		}
429788ef64aSOleksandr Andrushchenko 
430788ef64aSOleksandr Andrushchenko 		for (s = 0; s < pcm_instance->num_streams_cap; s++) {
431788ef64aSOleksandr Andrushchenko 			index = pcm_instance->streams_cap[s].index;
432788ef64aSOleksandr Andrushchenko 
433788ef64aSOleksandr Andrushchenko 			ret = evtchnl_publish(xbt,
434788ef64aSOleksandr Andrushchenko 					      &front_info->evt_pairs[index].req,
435788ef64aSOleksandr Andrushchenko 					      pcm_instance->streams_cap[s].xenstore_path,
436788ef64aSOleksandr Andrushchenko 					      XENSND_FIELD_RING_REF,
437788ef64aSOleksandr Andrushchenko 					      XENSND_FIELD_EVT_CHNL);
438788ef64aSOleksandr Andrushchenko 			if (ret < 0)
439788ef64aSOleksandr Andrushchenko 				goto fail;
440788ef64aSOleksandr Andrushchenko 
441788ef64aSOleksandr Andrushchenko 			ret = evtchnl_publish(xbt,
442788ef64aSOleksandr Andrushchenko 					      &front_info->evt_pairs[index].evt,
443788ef64aSOleksandr Andrushchenko 					      pcm_instance->streams_cap[s].xenstore_path,
444788ef64aSOleksandr Andrushchenko 					      XENSND_FIELD_EVT_RING_REF,
445788ef64aSOleksandr Andrushchenko 					      XENSND_FIELD_EVT_EVT_CHNL);
446788ef64aSOleksandr Andrushchenko 			if (ret < 0)
447788ef64aSOleksandr Andrushchenko 				goto fail;
448788ef64aSOleksandr Andrushchenko 		}
449788ef64aSOleksandr Andrushchenko 	}
450788ef64aSOleksandr Andrushchenko 	ret = xenbus_transaction_end(xbt, 0);
451788ef64aSOleksandr Andrushchenko 	if (ret < 0) {
452788ef64aSOleksandr Andrushchenko 		if (ret == -EAGAIN)
453788ef64aSOleksandr Andrushchenko 			goto again;
454788ef64aSOleksandr Andrushchenko 
455788ef64aSOleksandr Andrushchenko 		xenbus_dev_fatal(front_info->xb_dev, ret,
456788ef64aSOleksandr Andrushchenko 				 "completing transaction");
457788ef64aSOleksandr Andrushchenko 		goto fail_to_end;
458788ef64aSOleksandr Andrushchenko 	}
459788ef64aSOleksandr Andrushchenko 	return 0;
460788ef64aSOleksandr Andrushchenko fail:
461788ef64aSOleksandr Andrushchenko 	xenbus_transaction_end(xbt, 1);
462788ef64aSOleksandr Andrushchenko fail_to_end:
463788ef64aSOleksandr Andrushchenko 	xenbus_dev_fatal(front_info->xb_dev, ret, "writing XenStore");
464788ef64aSOleksandr Andrushchenko 	return ret;
465788ef64aSOleksandr Andrushchenko }
466788ef64aSOleksandr Andrushchenko 
467788ef64aSOleksandr Andrushchenko void xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair *evt_pair,
468788ef64aSOleksandr Andrushchenko 					      bool is_connected)
469788ef64aSOleksandr Andrushchenko {
470788ef64aSOleksandr Andrushchenko 	enum xen_snd_front_evtchnl_state state;
471788ef64aSOleksandr Andrushchenko 
472788ef64aSOleksandr Andrushchenko 	if (is_connected)
473788ef64aSOleksandr Andrushchenko 		state = EVTCHNL_STATE_CONNECTED;
474788ef64aSOleksandr Andrushchenko 	else
475788ef64aSOleksandr Andrushchenko 		state = EVTCHNL_STATE_DISCONNECTED;
476788ef64aSOleksandr Andrushchenko 
477788ef64aSOleksandr Andrushchenko 	mutex_lock(&evt_pair->req.ring_io_lock);
478788ef64aSOleksandr Andrushchenko 	evt_pair->req.state = state;
479788ef64aSOleksandr Andrushchenko 	mutex_unlock(&evt_pair->req.ring_io_lock);
480788ef64aSOleksandr Andrushchenko 
481788ef64aSOleksandr Andrushchenko 	mutex_lock(&evt_pair->evt.ring_io_lock);
482788ef64aSOleksandr Andrushchenko 	evt_pair->evt.state = state;
483788ef64aSOleksandr Andrushchenko 	mutex_unlock(&evt_pair->evt.ring_io_lock);
484788ef64aSOleksandr Andrushchenko }
485788ef64aSOleksandr Andrushchenko 
486788ef64aSOleksandr Andrushchenko void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair)
487788ef64aSOleksandr Andrushchenko {
488788ef64aSOleksandr Andrushchenko 	mutex_lock(&evt_pair->req.ring_io_lock);
489788ef64aSOleksandr Andrushchenko 	evt_pair->req.evt_next_id = 0;
490788ef64aSOleksandr Andrushchenko 	mutex_unlock(&evt_pair->req.ring_io_lock);
491788ef64aSOleksandr Andrushchenko 
492788ef64aSOleksandr Andrushchenko 	mutex_lock(&evt_pair->evt.ring_io_lock);
493788ef64aSOleksandr Andrushchenko 	evt_pair->evt.evt_next_id = 0;
494788ef64aSOleksandr Andrushchenko 	mutex_unlock(&evt_pair->evt.ring_io_lock);
495788ef64aSOleksandr Andrushchenko }
496788ef64aSOleksandr Andrushchenko 
497