xref: /openbmc/linux/drivers/bus/mhi/ep/main.c (revision 8a1c24bb908f9ecbc4be0fea014df67d43161551)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * MHI Endpoint bus stack
4  *
5  * Copyright (C) 2022 Linaro Ltd.
6  * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/delay.h>
11 #include <linux/dma-direction.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/irq.h>
15 #include <linux/mhi_ep.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/module.h>
18 #include "internal.h"
19 
20 #define M0_WAIT_DELAY_MS	100
21 #define M0_WAIT_COUNT		100
22 
23 static DEFINE_IDA(mhi_ep_cntrl_ida);
24 
25 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
26 static int mhi_ep_destroy_device(struct device *dev, void *data);
27 
28 static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
29 			     struct mhi_ring_element *el, bool bei)
30 {
31 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
32 	union mhi_ep_ring_ctx *ctx;
33 	struct mhi_ep_ring *ring;
34 	int ret;
35 
36 	mutex_lock(&mhi_cntrl->event_lock);
37 	ring = &mhi_cntrl->mhi_event[ring_idx].ring;
38 	ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx];
39 	if (!ring->started) {
40 		ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx);
41 		if (ret) {
42 			dev_err(dev, "Error starting event ring (%u)\n", ring_idx);
43 			goto err_unlock;
44 		}
45 	}
46 
47 	/* Add element to the event ring */
48 	ret = mhi_ep_ring_add_element(ring, el);
49 	if (ret) {
50 		dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx);
51 		goto err_unlock;
52 	}
53 
54 	mutex_unlock(&mhi_cntrl->event_lock);
55 
56 	/*
57 	 * Raise IRQ to host only if the BEI flag is not set in TRE. Host might
58 	 * set this flag for interrupt moderation as per MHI protocol.
59 	 */
60 	if (!bei)
61 		mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
62 
63 	return 0;
64 
65 err_unlock:
66 	mutex_unlock(&mhi_cntrl->event_lock);
67 
68 	return ret;
69 }
70 
71 static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
72 					struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
73 {
74 	struct mhi_ring_element event = {};
75 
76 	event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
77 	event.dword[0] = MHI_TRE_EV_DWORD0(code, len);
78 	event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
79 
80 	return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre));
81 }
82 
83 int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
84 {
85 	struct mhi_ring_element event = {};
86 
87 	event.dword[0] = MHI_SC_EV_DWORD0(state);
88 	event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
89 
90 	return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
91 }
92 
93 int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
94 {
95 	struct mhi_ring_element event = {};
96 
97 	event.dword[0] = MHI_EE_EV_DWORD0(exec_env);
98 	event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
99 
100 	return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
101 }
102 
103 static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
104 {
105 	struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
106 	struct mhi_ring_element event = {};
107 
108 	event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
109 	event.dword[0] = MHI_CC_EV_DWORD0(code);
110 	event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
111 
112 	return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
113 }
114 
115 static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
116 {
117 	struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
118 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
119 	struct mhi_result result = {};
120 	struct mhi_ep_chan *mhi_chan;
121 	struct mhi_ep_ring *ch_ring;
122 	u32 tmp, ch_id;
123 	int ret;
124 
125 	ch_id = MHI_TRE_GET_CMD_CHID(el);
126 
127 	/* Check if the channel is supported by the controller */
128 	if ((ch_id > mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) {
129 		dev_err(dev, "Channel (%u) not supported!\n", ch_id);
130 		return -ENODEV;
131 	}
132 
133 	mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
134 	ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
135 
136 	switch (MHI_TRE_GET_CMD_TYPE(el)) {
137 	case MHI_PKT_TYPE_START_CHAN_CMD:
138 		dev_dbg(dev, "Received START command for channel (%u)\n", ch_id);
139 
140 		mutex_lock(&mhi_chan->lock);
141 		/* Initialize and configure the corresponding channel ring */
142 		if (!ch_ring->started) {
143 			ret = mhi_ep_ring_start(mhi_cntrl, ch_ring,
144 				(union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]);
145 			if (ret) {
146 				dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id);
147 				ret = mhi_ep_send_cmd_comp_event(mhi_cntrl,
148 							MHI_EV_CC_UNDEFINED_ERR);
149 				if (ret)
150 					dev_err(dev, "Error sending completion event: %d\n", ret);
151 
152 				goto err_unlock;
153 			}
154 		}
155 
156 		/* Set channel state to RUNNING */
157 		mhi_chan->state = MHI_CH_STATE_RUNNING;
158 		tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
159 		tmp &= ~CHAN_CTX_CHSTATE_MASK;
160 		tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
161 		mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
162 
163 		ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
164 		if (ret) {
165 			dev_err(dev, "Error sending command completion event (%u)\n",
166 				MHI_EV_CC_SUCCESS);
167 			goto err_unlock;
168 		}
169 
170 		mutex_unlock(&mhi_chan->lock);
171 
172 		/*
173 		 * Create MHI device only during UL channel start. Since the MHI
174 		 * channels operate in a pair, we'll associate both UL and DL
175 		 * channels to the same device.
176 		 *
177 		 * We also need to check for mhi_dev != NULL because, the host
178 		 * will issue START_CHAN command during resume and we don't
179 		 * destroy the device during suspend.
180 		 */
181 		if (!(ch_id % 2) && !mhi_chan->mhi_dev) {
182 			ret = mhi_ep_create_device(mhi_cntrl, ch_id);
183 			if (ret) {
184 				dev_err(dev, "Error creating device for channel (%u)\n", ch_id);
185 				mhi_ep_handle_syserr(mhi_cntrl);
186 				return ret;
187 			}
188 		}
189 
190 		/* Finally, enable DB for the channel */
191 		mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id);
192 
193 		break;
194 	case MHI_PKT_TYPE_STOP_CHAN_CMD:
195 		dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
196 		if (!ch_ring->started) {
197 			dev_err(dev, "Channel (%u) not opened\n", ch_id);
198 			return -ENODEV;
199 		}
200 
201 		mutex_lock(&mhi_chan->lock);
202 		/* Disable DB for the channel */
203 		mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
204 
205 		/* Send channel disconnect status to client drivers */
206 		if (mhi_chan->xfer_cb) {
207 			result.transaction_status = -ENOTCONN;
208 			result.bytes_xferd = 0;
209 			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
210 		}
211 
212 		/* Set channel state to STOP */
213 		mhi_chan->state = MHI_CH_STATE_STOP;
214 		tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
215 		tmp &= ~CHAN_CTX_CHSTATE_MASK;
216 		tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP);
217 		mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
218 
219 		ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
220 		if (ret) {
221 			dev_err(dev, "Error sending command completion event (%u)\n",
222 				MHI_EV_CC_SUCCESS);
223 			goto err_unlock;
224 		}
225 
226 		mutex_unlock(&mhi_chan->lock);
227 		break;
228 	case MHI_PKT_TYPE_RESET_CHAN_CMD:
229 		dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id);
230 		if (!ch_ring->started) {
231 			dev_err(dev, "Channel (%u) not opened\n", ch_id);
232 			return -ENODEV;
233 		}
234 
235 		mutex_lock(&mhi_chan->lock);
236 		/* Stop and reset the transfer ring */
237 		mhi_ep_ring_reset(mhi_cntrl, ch_ring);
238 
239 		/* Send channel disconnect status to client driver */
240 		if (mhi_chan->xfer_cb) {
241 			result.transaction_status = -ENOTCONN;
242 			result.bytes_xferd = 0;
243 			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
244 		}
245 
246 		/* Set channel state to DISABLED */
247 		mhi_chan->state = MHI_CH_STATE_DISABLED;
248 		tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
249 		tmp &= ~CHAN_CTX_CHSTATE_MASK;
250 		tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
251 		mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
252 
253 		ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
254 		if (ret) {
255 			dev_err(dev, "Error sending command completion event (%u)\n",
256 				MHI_EV_CC_SUCCESS);
257 			goto err_unlock;
258 		}
259 
260 		mutex_unlock(&mhi_chan->lock);
261 		break;
262 	default:
263 		dev_err(dev, "Invalid command received: %lu for channel (%u)\n",
264 			MHI_TRE_GET_CMD_TYPE(el), ch_id);
265 		return -EINVAL;
266 	}
267 
268 	return 0;
269 
270 err_unlock:
271 	mutex_unlock(&mhi_chan->lock);
272 
273 	return ret;
274 }
275 
276 bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir)
277 {
278 	struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan :
279 								mhi_dev->ul_chan;
280 	struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
281 	struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
282 
283 	return !!(ring->rd_offset == ring->wr_offset);
284 }
285 EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
286 
287 static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
288 				struct mhi_ep_ring *ring,
289 				struct mhi_result *result,
290 				u32 len)
291 {
292 	struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
293 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
294 	size_t tr_len, read_offset, write_offset;
295 	struct mhi_ring_element *el;
296 	bool tr_done = false;
297 	void *write_addr;
298 	u64 read_addr;
299 	u32 buf_left;
300 	int ret;
301 
302 	buf_left = len;
303 
304 	do {
305 		/* Don't process the transfer ring if the channel is not in RUNNING state */
306 		if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
307 			dev_err(dev, "Channel not available\n");
308 			return -ENODEV;
309 		}
310 
311 		el = &ring->ring_cache[ring->rd_offset];
312 
313 		/* Check if there is data pending to be read from previous read operation */
314 		if (mhi_chan->tre_bytes_left) {
315 			dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
316 			tr_len = min(buf_left, mhi_chan->tre_bytes_left);
317 		} else {
318 			mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
319 			mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
320 			mhi_chan->tre_bytes_left = mhi_chan->tre_size;
321 
322 			tr_len = min(buf_left, mhi_chan->tre_size);
323 		}
324 
325 		read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
326 		write_offset = len - buf_left;
327 		read_addr = mhi_chan->tre_loc + read_offset;
328 		write_addr = result->buf_addr + write_offset;
329 
330 		dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
331 		ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len);
332 		if (ret < 0) {
333 			dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
334 			return ret;
335 		}
336 
337 		buf_left -= tr_len;
338 		mhi_chan->tre_bytes_left -= tr_len;
339 
340 		/*
341 		 * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
342 		 * read completely:
343 		 *
344 		 * 1. Send completion event to the host based on the flags set in TRE.
345 		 * 2. Increment the local read offset of the transfer ring.
346 		 */
347 		if (!mhi_chan->tre_bytes_left) {
348 			/*
349 			 * The host will split the data packet into multiple TREs if it can't fit
350 			 * the packet in a single TRE. In that case, CHAIN flag will be set by the
351 			 * host for all TREs except the last one.
352 			 */
353 			if (MHI_TRE_DATA_GET_CHAIN(el)) {
354 				/*
355 				 * IEOB (Interrupt on End of Block) flag will be set by the host if
356 				 * it expects the completion event for all TREs of a TD.
357 				 */
358 				if (MHI_TRE_DATA_GET_IEOB(el)) {
359 					ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
360 								     MHI_TRE_DATA_GET_LEN(el),
361 								     MHI_EV_CC_EOB);
362 					if (ret < 0) {
363 						dev_err(&mhi_chan->mhi_dev->dev,
364 							"Error sending transfer compl. event\n");
365 						return ret;
366 					}
367 				}
368 			} else {
369 				/*
370 				 * IEOT (Interrupt on End of Transfer) flag will be set by the host
371 				 * for the last TRE of the TD and expects the completion event for
372 				 * the same.
373 				 */
374 				if (MHI_TRE_DATA_GET_IEOT(el)) {
375 					ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
376 								     MHI_TRE_DATA_GET_LEN(el),
377 								     MHI_EV_CC_EOT);
378 					if (ret < 0) {
379 						dev_err(&mhi_chan->mhi_dev->dev,
380 							"Error sending transfer compl. event\n");
381 						return ret;
382 					}
383 				}
384 
385 				tr_done = true;
386 			}
387 
388 			mhi_ep_ring_inc_index(ring);
389 		}
390 
391 		result->bytes_xferd += tr_len;
392 	} while (buf_left && !tr_done);
393 
394 	return 0;
395 }
396 
397 static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
398 {
399 	struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
400 	struct mhi_result result = {};
401 	u32 len = MHI_EP_DEFAULT_MTU;
402 	struct mhi_ep_chan *mhi_chan;
403 	int ret;
404 
405 	mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
406 
407 	/*
408 	 * Bail out if transfer callback is not registered for the channel.
409 	 * This is most likely due to the client driver not loaded at this point.
410 	 */
411 	if (!mhi_chan->xfer_cb) {
412 		dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n");
413 		return -ENODEV;
414 	}
415 
416 	if (ring->ch_id % 2) {
417 		/* DL channel */
418 		result.dir = mhi_chan->dir;
419 		mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
420 	} else {
421 		/* UL channel */
422 		result.buf_addr = kzalloc(len, GFP_KERNEL);
423 		if (!result.buf_addr)
424 			return -ENOMEM;
425 
426 		do {
427 			ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
428 			if (ret < 0) {
429 				dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
430 				kfree(result.buf_addr);
431 				return ret;
432 			}
433 
434 			result.dir = mhi_chan->dir;
435 			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
436 			result.bytes_xferd = 0;
437 			memset(result.buf_addr, 0, len);
438 
439 			/* Read until the ring becomes empty */
440 		} while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
441 
442 		kfree(result.buf_addr);
443 	}
444 
445 	return 0;
446 }
447 
448 /* TODO: Handle partially formed TDs */
449 int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
450 {
451 	struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
452 	struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
453 	struct device *dev = &mhi_chan->mhi_dev->dev;
454 	struct mhi_ring_element *el;
455 	u32 buf_left, read_offset;
456 	struct mhi_ep_ring *ring;
457 	enum mhi_ev_ccs code;
458 	void *read_addr;
459 	u64 write_addr;
460 	size_t tr_len;
461 	u32 tre_len;
462 	int ret;
463 
464 	buf_left = skb->len;
465 	ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
466 
467 	mutex_lock(&mhi_chan->lock);
468 
469 	do {
470 		/* Don't process the transfer ring if the channel is not in RUNNING state */
471 		if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
472 			dev_err(dev, "Channel not available\n");
473 			ret = -ENODEV;
474 			goto err_exit;
475 		}
476 
477 		if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) {
478 			dev_err(dev, "TRE not available!\n");
479 			ret = -ENOSPC;
480 			goto err_exit;
481 		}
482 
483 		el = &ring->ring_cache[ring->rd_offset];
484 		tre_len = MHI_TRE_DATA_GET_LEN(el);
485 
486 		tr_len = min(buf_left, tre_len);
487 		read_offset = skb->len - buf_left;
488 		read_addr = skb->data + read_offset;
489 		write_addr = MHI_TRE_DATA_GET_PTR(el);
490 
491 		dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
492 		ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len);
493 		if (ret < 0) {
494 			dev_err(dev, "Error writing to the channel\n");
495 			goto err_exit;
496 		}
497 
498 		buf_left -= tr_len;
499 		/*
500 		 * For all TREs queued by the host for DL channel, only the EOT flag will be set.
501 		 * If the packet doesn't fit into a single TRE, send the OVERFLOW event to
502 		 * the host so that the host can adjust the packet boundary to next TREs. Else send
503 		 * the EOT event to the host indicating the packet boundary.
504 		 */
505 		if (buf_left)
506 			code = MHI_EV_CC_OVERFLOW;
507 		else
508 			code = MHI_EV_CC_EOT;
509 
510 		ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
511 		if (ret) {
512 			dev_err(dev, "Error sending transfer completion event\n");
513 			goto err_exit;
514 		}
515 
516 		mhi_ep_ring_inc_index(ring);
517 	} while (buf_left);
518 
519 	mutex_unlock(&mhi_chan->lock);
520 
521 	return 0;
522 
523 err_exit:
524 	mutex_unlock(&mhi_chan->lock);
525 
526 	return ret;
527 }
528 EXPORT_SYMBOL_GPL(mhi_ep_queue_skb);
529 
530 static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
531 {
532 	size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
533 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
534 	int ret;
535 
536 	/* Update the number of event rings (NER) programmed by the host */
537 	mhi_ep_mmio_update_ner(mhi_cntrl);
538 
539 	dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n",
540 		 mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings);
541 
542 	ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
543 	ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
544 	cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
545 
546 	/* Get the channel context base pointer from host */
547 	mhi_ep_mmio_get_chc_base(mhi_cntrl);
548 
549 	/* Allocate and map memory for caching host channel context */
550 	ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa,
551 				   &mhi_cntrl->ch_ctx_cache_phys,
552 				   (void __iomem **) &mhi_cntrl->ch_ctx_cache,
553 				   ch_ctx_host_size);
554 	if (ret) {
555 		dev_err(dev, "Failed to allocate and map ch_ctx_cache\n");
556 		return ret;
557 	}
558 
559 	/* Get the event context base pointer from host */
560 	mhi_ep_mmio_get_erc_base(mhi_cntrl);
561 
562 	/* Allocate and map memory for caching host event context */
563 	ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa,
564 				   &mhi_cntrl->ev_ctx_cache_phys,
565 				   (void __iomem **) &mhi_cntrl->ev_ctx_cache,
566 				   ev_ctx_host_size);
567 	if (ret) {
568 		dev_err(dev, "Failed to allocate and map ev_ctx_cache\n");
569 		goto err_ch_ctx;
570 	}
571 
572 	/* Get the command context base pointer from host */
573 	mhi_ep_mmio_get_crc_base(mhi_cntrl);
574 
575 	/* Allocate and map memory for caching host command context */
576 	ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa,
577 				   &mhi_cntrl->cmd_ctx_cache_phys,
578 				   (void __iomem **) &mhi_cntrl->cmd_ctx_cache,
579 				   cmd_ctx_host_size);
580 	if (ret) {
581 		dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n");
582 		goto err_ev_ctx;
583 	}
584 
585 	/* Initialize command ring */
586 	ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring,
587 				(union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache);
588 	if (ret) {
589 		dev_err(dev, "Failed to start the command ring\n");
590 		goto err_cmd_ctx;
591 	}
592 
593 	return ret;
594 
595 err_cmd_ctx:
596 	mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
597 			      (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
598 
599 err_ev_ctx:
600 	mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
601 			      (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
602 
603 err_ch_ctx:
604 	mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
605 			      (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
606 
607 	return ret;
608 }
609 
610 static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
611 {
612 	size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
613 
614 	ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
615 	ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
616 	cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
617 
618 	mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
619 			      (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
620 
621 	mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
622 			      (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
623 
624 	mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
625 			      (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
626 }
627 
628 static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl)
629 {
630 	/*
631 	 * Doorbell interrupts are enabled when the corresponding channel gets started.
632 	 * Enabling all interrupts here triggers spurious irqs as some of the interrupts
633 	 * associated with hw channels always get triggered.
634 	 */
635 	mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl);
636 	mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl);
637 }
638 
639 static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl)
640 {
641 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
642 	enum mhi_state state;
643 	bool mhi_reset;
644 	u32 count = 0;
645 	int ret;
646 
647 	/* Wait for Host to set the M0 state */
648 	do {
649 		msleep(M0_WAIT_DELAY_MS);
650 		mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
651 		if (mhi_reset) {
652 			/* Clear the MHI reset if host is in reset state */
653 			mhi_ep_mmio_clear_reset(mhi_cntrl);
654 			dev_info(dev, "Detected Host reset while waiting for M0\n");
655 		}
656 		count++;
657 	} while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT);
658 
659 	if (state != MHI_STATE_M0) {
660 		dev_err(dev, "Host failed to enter M0\n");
661 		return -ETIMEDOUT;
662 	}
663 
664 	ret = mhi_ep_cache_host_cfg(mhi_cntrl);
665 	if (ret) {
666 		dev_err(dev, "Failed to cache host config\n");
667 		return ret;
668 	}
669 
670 	mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
671 
672 	/* Enable all interrupts now */
673 	mhi_ep_enable_int(mhi_cntrl);
674 
675 	return 0;
676 }
677 
678 static void mhi_ep_cmd_ring_worker(struct work_struct *work)
679 {
680 	struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work);
681 	struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
682 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
683 	struct mhi_ring_element *el;
684 	int ret;
685 
686 	/* Update the write offset for the ring */
687 	ret = mhi_ep_update_wr_offset(ring);
688 	if (ret) {
689 		dev_err(dev, "Error updating write offset for ring\n");
690 		return;
691 	}
692 
693 	/* Sanity check to make sure there are elements in the ring */
694 	if (ring->rd_offset == ring->wr_offset)
695 		return;
696 
697 	/*
698 	 * Process command ring element till write offset. In case of an error, just try to
699 	 * process next element.
700 	 */
701 	while (ring->rd_offset != ring->wr_offset) {
702 		el = &ring->ring_cache[ring->rd_offset];
703 
704 		ret = mhi_ep_process_cmd_ring(ring, el);
705 		if (ret)
706 			dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset);
707 
708 		mhi_ep_ring_inc_index(ring);
709 	}
710 }
711 
712 static void mhi_ep_ch_ring_worker(struct work_struct *work)
713 {
714 	struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
715 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
716 	struct mhi_ep_ring_item *itr, *tmp;
717 	struct mhi_ring_element *el;
718 	struct mhi_ep_ring *ring;
719 	struct mhi_ep_chan *chan;
720 	unsigned long flags;
721 	LIST_HEAD(head);
722 	int ret;
723 
724 	spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
725 	list_splice_tail_init(&mhi_cntrl->ch_db_list, &head);
726 	spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
727 
728 	/* Process each queued channel ring. In case of an error, just process next element. */
729 	list_for_each_entry_safe(itr, tmp, &head, node) {
730 		list_del(&itr->node);
731 		ring = itr->ring;
732 
733 		chan = &mhi_cntrl->mhi_chan[ring->ch_id];
734 		mutex_lock(&chan->lock);
735 
736 		/*
737 		 * The ring could've stopped while we waited to grab the (chan->lock), so do
738 		 * a sanity check before going further.
739 		 */
740 		if (!ring->started) {
741 			mutex_unlock(&chan->lock);
742 			kfree(itr);
743 			continue;
744 		}
745 
746 		/* Update the write offset for the ring */
747 		ret = mhi_ep_update_wr_offset(ring);
748 		if (ret) {
749 			dev_err(dev, "Error updating write offset for ring\n");
750 			mutex_unlock(&chan->lock);
751 			kfree(itr);
752 			continue;
753 		}
754 
755 		/* Sanity check to make sure there are elements in the ring */
756 		if (ring->rd_offset == ring->wr_offset) {
757 			mutex_unlock(&chan->lock);
758 			kfree(itr);
759 			continue;
760 		}
761 
762 		el = &ring->ring_cache[ring->rd_offset];
763 
764 		dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
765 		ret = mhi_ep_process_ch_ring(ring, el);
766 		if (ret) {
767 			dev_err(dev, "Error processing ring for channel (%u): %d\n",
768 				ring->ch_id, ret);
769 			mutex_unlock(&chan->lock);
770 			kfree(itr);
771 			continue;
772 		}
773 
774 		mutex_unlock(&chan->lock);
775 		kfree(itr);
776 	}
777 }
778 
779 static void mhi_ep_state_worker(struct work_struct *work)
780 {
781 	struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work);
782 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
783 	struct mhi_ep_state_transition *itr, *tmp;
784 	unsigned long flags;
785 	LIST_HEAD(head);
786 	int ret;
787 
788 	spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
789 	list_splice_tail_init(&mhi_cntrl->st_transition_list, &head);
790 	spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
791 
792 	list_for_each_entry_safe(itr, tmp, &head, node) {
793 		list_del(&itr->node);
794 		dev_dbg(dev, "Handling MHI state transition to %s\n",
795 			 mhi_state_str(itr->state));
796 
797 		switch (itr->state) {
798 		case MHI_STATE_M0:
799 			ret = mhi_ep_set_m0_state(mhi_cntrl);
800 			if (ret)
801 				dev_err(dev, "Failed to transition to M0 state\n");
802 			break;
803 		case MHI_STATE_M3:
804 			ret = mhi_ep_set_m3_state(mhi_cntrl);
805 			if (ret)
806 				dev_err(dev, "Failed to transition to M3 state\n");
807 			break;
808 		default:
809 			dev_err(dev, "Invalid MHI state transition: %d\n", itr->state);
810 			break;
811 		}
812 		kfree(itr);
813 	}
814 }
815 
816 static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int,
817 				    u32 ch_idx)
818 {
819 	struct mhi_ep_ring_item *item;
820 	struct mhi_ep_ring *ring;
821 	bool work = !!ch_int;
822 	LIST_HEAD(head);
823 	u32 i;
824 
825 	/* First add the ring items to a local list */
826 	for_each_set_bit(i, &ch_int, 32) {
827 		/* Channel index varies for each register: 0, 32, 64, 96 */
828 		u32 ch_id = ch_idx + i;
829 
830 		ring = &mhi_cntrl->mhi_chan[ch_id].ring;
831 		item = kzalloc(sizeof(*item), GFP_ATOMIC);
832 		if (!item)
833 			return;
834 
835 		item->ring = ring;
836 		list_add_tail(&item->node, &head);
837 	}
838 
839 	/* Now, splice the local list into ch_db_list and queue the work item */
840 	if (work) {
841 		spin_lock(&mhi_cntrl->list_lock);
842 		list_splice_tail_init(&head, &mhi_cntrl->ch_db_list);
843 		spin_unlock(&mhi_cntrl->list_lock);
844 
845 		queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work);
846 	}
847 }
848 
849 /*
850  * Channel interrupt statuses are contained in 4 registers each of 32bit length.
851  * For checking all interrupts, we need to loop through each registers and then
852  * check for bits set.
853  */
854 static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
855 {
856 	u32 ch_int, ch_idx, i;
857 
858 	/* Bail out if there is no channel doorbell interrupt */
859 	if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl))
860 		return;
861 
862 	for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
863 		ch_idx = i * MHI_MASK_CH_LEN;
864 
865 		/* Only process channel interrupt if the mask is enabled */
866 		ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask;
867 		if (ch_int) {
868 			mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx);
869 			mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
870 							mhi_cntrl->chdb[i].status);
871 		}
872 	}
873 }
874 
875 static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl,
876 					 enum mhi_state state)
877 {
878 	struct mhi_ep_state_transition *item;
879 
880 	item = kzalloc(sizeof(*item), GFP_ATOMIC);
881 	if (!item)
882 		return;
883 
884 	item->state = state;
885 	spin_lock(&mhi_cntrl->list_lock);
886 	list_add_tail(&item->node, &mhi_cntrl->st_transition_list);
887 	spin_unlock(&mhi_cntrl->list_lock);
888 
889 	queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work);
890 }
891 
892 /*
893  * Interrupt handler that services interrupts raised by the host writing to
894  * MHICTRL and Command ring doorbell (CRDB) registers for state change and
895  * channel interrupts.
896  */
897 static irqreturn_t mhi_ep_irq(int irq, void *data)
898 {
899 	struct mhi_ep_cntrl *mhi_cntrl = data;
900 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
901 	enum mhi_state state;
902 	u32 int_value;
903 	bool mhi_reset;
904 
905 	/* Acknowledge the ctrl interrupt */
906 	int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS);
907 	mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value);
908 
909 	/* Check for ctrl interrupt */
910 	if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) {
911 		dev_dbg(dev, "Processing ctrl interrupt\n");
912 		mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
913 		if (mhi_reset) {
914 			dev_info(dev, "Host triggered MHI reset!\n");
915 			disable_irq_nosync(mhi_cntrl->irq);
916 			schedule_work(&mhi_cntrl->reset_work);
917 			return IRQ_HANDLED;
918 		}
919 
920 		mhi_ep_process_ctrl_interrupt(mhi_cntrl, state);
921 	}
922 
923 	/* Check for command doorbell interrupt */
924 	if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) {
925 		dev_dbg(dev, "Processing command doorbell interrupt\n");
926 		queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work);
927 	}
928 
929 	/* Check for channel interrupts */
930 	mhi_ep_check_channel_interrupt(mhi_cntrl);
931 
932 	return IRQ_HANDLED;
933 }
934 
935 static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
936 {
937 	struct mhi_ep_ring *ch_ring, *ev_ring;
938 	struct mhi_result result = {};
939 	struct mhi_ep_chan *mhi_chan;
940 	int i;
941 
942 	/* Stop all the channels */
943 	for (i = 0; i < mhi_cntrl->max_chan; i++) {
944 		mhi_chan = &mhi_cntrl->mhi_chan[i];
945 		if (!mhi_chan->ring.started)
946 			continue;
947 
948 		mutex_lock(&mhi_chan->lock);
949 		/* Send channel disconnect status to client drivers */
950 		if (mhi_chan->xfer_cb) {
951 			result.transaction_status = -ENOTCONN;
952 			result.bytes_xferd = 0;
953 			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
954 		}
955 
956 		mhi_chan->state = MHI_CH_STATE_DISABLED;
957 		mutex_unlock(&mhi_chan->lock);
958 	}
959 
960 	flush_workqueue(mhi_cntrl->wq);
961 
962 	/* Destroy devices associated with all channels */
963 	device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device);
964 
965 	/* Stop and reset the transfer rings */
966 	for (i = 0; i < mhi_cntrl->max_chan; i++) {
967 		mhi_chan = &mhi_cntrl->mhi_chan[i];
968 		if (!mhi_chan->ring.started)
969 			continue;
970 
971 		ch_ring = &mhi_cntrl->mhi_chan[i].ring;
972 		mutex_lock(&mhi_chan->lock);
973 		mhi_ep_ring_reset(mhi_cntrl, ch_ring);
974 		mutex_unlock(&mhi_chan->lock);
975 	}
976 
977 	/* Stop and reset the event rings */
978 	for (i = 0; i < mhi_cntrl->event_rings; i++) {
979 		ev_ring = &mhi_cntrl->mhi_event[i].ring;
980 		if (!ev_ring->started)
981 			continue;
982 
983 		mutex_lock(&mhi_cntrl->event_lock);
984 		mhi_ep_ring_reset(mhi_cntrl, ev_ring);
985 		mutex_unlock(&mhi_cntrl->event_lock);
986 	}
987 
988 	/* Stop and reset the command ring */
989 	mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring);
990 
991 	mhi_ep_free_host_cfg(mhi_cntrl);
992 	mhi_ep_mmio_mask_interrupts(mhi_cntrl);
993 
994 	mhi_cntrl->enabled = false;
995 }
996 
997 static void mhi_ep_reset_worker(struct work_struct *work)
998 {
999 	struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
1000 	enum mhi_state cur_state;
1001 
1002 	mhi_ep_power_down(mhi_cntrl);
1003 
1004 	spin_lock_bh(&mhi_cntrl->state_lock);
1005 	/* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
1006 	mhi_ep_mmio_reset(mhi_cntrl);
1007 	cur_state = mhi_cntrl->mhi_state;
1008 	spin_unlock_bh(&mhi_cntrl->state_lock);
1009 
1010 	/*
1011 	 * Only proceed further if the reset is due to SYS_ERR. The host will
1012 	 * issue reset during shutdown also and we don't need to do re-init in
1013 	 * that case.
1014 	 */
1015 	if (cur_state == MHI_STATE_SYS_ERR)
1016 		mhi_ep_power_up(mhi_cntrl);
1017 }
1018 
1019 /*
1020  * We don't need to do anything special other than setting the MHI SYS_ERR
1021  * state. The host will reset all contexts and issue MHI RESET so that we
1022  * could also recover from error state.
1023  */
1024 void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl)
1025 {
1026 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1027 	int ret;
1028 
1029 	ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1030 	if (ret)
1031 		return;
1032 
1033 	/* Signal host that the device went to SYS_ERR state */
1034 	ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR);
1035 	if (ret)
1036 		dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret);
1037 }
1038 
1039 int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
1040 {
1041 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1042 	int ret, i;
1043 
1044 	/*
1045 	 * Mask all interrupts until the state machine is ready. Interrupts will
1046 	 * be enabled later with mhi_ep_enable().
1047 	 */
1048 	mhi_ep_mmio_mask_interrupts(mhi_cntrl);
1049 	mhi_ep_mmio_init(mhi_cntrl);
1050 
1051 	mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)),
1052 					GFP_KERNEL);
1053 	if (!mhi_cntrl->mhi_event)
1054 		return -ENOMEM;
1055 
1056 	/* Initialize command, channel and event rings */
1057 	mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0);
1058 	for (i = 0; i < mhi_cntrl->max_chan; i++)
1059 		mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i);
1060 	for (i = 0; i < mhi_cntrl->event_rings; i++)
1061 		mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i);
1062 
1063 	mhi_cntrl->mhi_state = MHI_STATE_RESET;
1064 
1065 	/* Set AMSS EE before signaling ready state */
1066 	mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
1067 
1068 	/* All set, notify the host that we are ready */
1069 	ret = mhi_ep_set_ready_state(mhi_cntrl);
1070 	if (ret)
1071 		goto err_free_event;
1072 
1073 	dev_dbg(dev, "READY state notification sent to the host\n");
1074 
1075 	ret = mhi_ep_enable(mhi_cntrl);
1076 	if (ret) {
1077 		dev_err(dev, "Failed to enable MHI endpoint\n");
1078 		goto err_free_event;
1079 	}
1080 
1081 	enable_irq(mhi_cntrl->irq);
1082 	mhi_cntrl->enabled = true;
1083 
1084 	return 0;
1085 
1086 err_free_event:
1087 	kfree(mhi_cntrl->mhi_event);
1088 
1089 	return ret;
1090 }
1091 EXPORT_SYMBOL_GPL(mhi_ep_power_up);
1092 
1093 void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
1094 {
1095 	if (mhi_cntrl->enabled) {
1096 		mhi_ep_abort_transfer(mhi_cntrl);
1097 		kfree(mhi_cntrl->mhi_event);
1098 		disable_irq(mhi_cntrl->irq);
1099 	}
1100 }
1101 EXPORT_SYMBOL_GPL(mhi_ep_power_down);
1102 
1103 void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
1104 {
1105 	struct mhi_ep_chan *mhi_chan;
1106 	u32 tmp;
1107 	int i;
1108 
1109 	for (i = 0; i < mhi_cntrl->max_chan; i++) {
1110 		mhi_chan = &mhi_cntrl->mhi_chan[i];
1111 
1112 		if (!mhi_chan->mhi_dev)
1113 			continue;
1114 
1115 		mutex_lock(&mhi_chan->lock);
1116 		/* Skip if the channel is not currently running */
1117 		tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
1118 		if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) {
1119 			mutex_unlock(&mhi_chan->lock);
1120 			continue;
1121 		}
1122 
1123 		dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
1124 		/* Set channel state to SUSPENDED */
1125 		mhi_chan->state = MHI_CH_STATE_SUSPENDED;
1126 		tmp &= ~CHAN_CTX_CHSTATE_MASK;
1127 		tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
1128 		mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
1129 		mutex_unlock(&mhi_chan->lock);
1130 	}
1131 }
1132 
1133 void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
1134 {
1135 	struct mhi_ep_chan *mhi_chan;
1136 	u32 tmp;
1137 	int i;
1138 
1139 	for (i = 0; i < mhi_cntrl->max_chan; i++) {
1140 		mhi_chan = &mhi_cntrl->mhi_chan[i];
1141 
1142 		if (!mhi_chan->mhi_dev)
1143 			continue;
1144 
1145 		mutex_lock(&mhi_chan->lock);
1146 		/* Skip if the channel is not currently suspended */
1147 		tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
1148 		if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) {
1149 			mutex_unlock(&mhi_chan->lock);
1150 			continue;
1151 		}
1152 
1153 		dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
1154 		/* Set channel state to RUNNING */
1155 		mhi_chan->state = MHI_CH_STATE_RUNNING;
1156 		tmp &= ~CHAN_CTX_CHSTATE_MASK;
1157 		tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
1158 		mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
1159 		mutex_unlock(&mhi_chan->lock);
1160 	}
1161 }
1162 
1163 static void mhi_ep_release_device(struct device *dev)
1164 {
1165 	struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1166 
1167 	if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1168 		mhi_dev->mhi_cntrl->mhi_dev = NULL;
1169 
1170 	/*
1171 	 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1172 	 * devices for the channels will only get created in mhi_ep_create_device()
1173 	 * if the mhi_dev associated with it is NULL.
1174 	 */
1175 	if (mhi_dev->ul_chan)
1176 		mhi_dev->ul_chan->mhi_dev = NULL;
1177 
1178 	if (mhi_dev->dl_chan)
1179 		mhi_dev->dl_chan->mhi_dev = NULL;
1180 
1181 	kfree(mhi_dev);
1182 }
1183 
1184 static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl,
1185 						 enum mhi_device_type dev_type)
1186 {
1187 	struct mhi_ep_device *mhi_dev;
1188 	struct device *dev;
1189 
1190 	mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1191 	if (!mhi_dev)
1192 		return ERR_PTR(-ENOMEM);
1193 
1194 	dev = &mhi_dev->dev;
1195 	device_initialize(dev);
1196 	dev->bus = &mhi_ep_bus_type;
1197 	dev->release = mhi_ep_release_device;
1198 
1199 	/* Controller device is always allocated first */
1200 	if (dev_type == MHI_DEVICE_CONTROLLER)
1201 		/* for MHI controller device, parent is the bus device (e.g. PCI EPF) */
1202 		dev->parent = mhi_cntrl->cntrl_dev;
1203 	else
1204 		/* for MHI client devices, parent is the MHI controller device */
1205 		dev->parent = &mhi_cntrl->mhi_dev->dev;
1206 
1207 	mhi_dev->mhi_cntrl = mhi_cntrl;
1208 	mhi_dev->dev_type = dev_type;
1209 
1210 	return mhi_dev;
1211 }
1212 
1213 /*
1214  * MHI channels are always defined in pairs with UL as the even numbered
1215  * channel and DL as odd numbered one. This function gets UL channel (primary)
1216  * as the ch_id and always looks after the next entry in channel list for
1217  * the corresponding DL channel (secondary).
1218  */
1219 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
1220 {
1221 	struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
1222 	struct device *dev = mhi_cntrl->cntrl_dev;
1223 	struct mhi_ep_device *mhi_dev;
1224 	int ret;
1225 
1226 	/* Check if the channel name is same for both UL and DL */
1227 	if (strcmp(mhi_chan->name, mhi_chan[1].name)) {
1228 		dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n",
1229 			mhi_chan->name, mhi_chan[1].name);
1230 		return -EINVAL;
1231 	}
1232 
1233 	mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER);
1234 	if (IS_ERR(mhi_dev))
1235 		return PTR_ERR(mhi_dev);
1236 
1237 	/* Configure primary channel */
1238 	mhi_dev->ul_chan = mhi_chan;
1239 	get_device(&mhi_dev->dev);
1240 	mhi_chan->mhi_dev = mhi_dev;
1241 
1242 	/* Configure secondary channel as well */
1243 	mhi_chan++;
1244 	mhi_dev->dl_chan = mhi_chan;
1245 	get_device(&mhi_dev->dev);
1246 	mhi_chan->mhi_dev = mhi_dev;
1247 
1248 	/* Channel name is same for both UL and DL */
1249 	mhi_dev->name = mhi_chan->name;
1250 	ret = dev_set_name(&mhi_dev->dev, "%s_%s",
1251 		     dev_name(&mhi_cntrl->mhi_dev->dev),
1252 		     mhi_dev->name);
1253 	if (ret) {
1254 		put_device(&mhi_dev->dev);
1255 		return ret;
1256 	}
1257 
1258 	ret = device_add(&mhi_dev->dev);
1259 	if (ret)
1260 		put_device(&mhi_dev->dev);
1261 
1262 	return ret;
1263 }
1264 
1265 static int mhi_ep_destroy_device(struct device *dev, void *data)
1266 {
1267 	struct mhi_ep_device *mhi_dev;
1268 	struct mhi_ep_cntrl *mhi_cntrl;
1269 	struct mhi_ep_chan *ul_chan, *dl_chan;
1270 
1271 	if (dev->bus != &mhi_ep_bus_type)
1272 		return 0;
1273 
1274 	mhi_dev = to_mhi_ep_device(dev);
1275 	mhi_cntrl = mhi_dev->mhi_cntrl;
1276 
1277 	/* Only destroy devices created for channels */
1278 	if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1279 		return 0;
1280 
1281 	ul_chan = mhi_dev->ul_chan;
1282 	dl_chan = mhi_dev->dl_chan;
1283 
1284 	if (ul_chan)
1285 		put_device(&ul_chan->mhi_dev->dev);
1286 
1287 	if (dl_chan)
1288 		put_device(&dl_chan->mhi_dev->dev);
1289 
1290 	dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n",
1291 		 mhi_dev->name);
1292 
1293 	/* Notify the client and remove the device from MHI bus */
1294 	device_del(dev);
1295 	put_device(dev);
1296 
1297 	return 0;
1298 }
1299 
1300 static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl,
1301 			    const struct mhi_ep_cntrl_config *config)
1302 {
1303 	const struct mhi_ep_channel_config *ch_cfg;
1304 	struct device *dev = mhi_cntrl->cntrl_dev;
1305 	u32 chan, i;
1306 	int ret = -EINVAL;
1307 
1308 	mhi_cntrl->max_chan = config->max_channels;
1309 
1310 	/*
1311 	 * Allocate max_channels supported by the MHI endpoint and populate
1312 	 * only the defined channels
1313 	 */
1314 	mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan),
1315 				      GFP_KERNEL);
1316 	if (!mhi_cntrl->mhi_chan)
1317 		return -ENOMEM;
1318 
1319 	for (i = 0; i < config->num_channels; i++) {
1320 		struct mhi_ep_chan *mhi_chan;
1321 
1322 		ch_cfg = &config->ch_cfg[i];
1323 
1324 		chan = ch_cfg->num;
1325 		if (chan >= mhi_cntrl->max_chan) {
1326 			dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n",
1327 				chan, mhi_cntrl->max_chan);
1328 			goto error_chan_cfg;
1329 		}
1330 
1331 		/* Bi-directional and direction less channels are not supported */
1332 		if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) {
1333 			dev_err(dev, "Invalid direction (%u) for channel (%u)\n",
1334 				ch_cfg->dir, chan);
1335 			goto error_chan_cfg;
1336 		}
1337 
1338 		mhi_chan = &mhi_cntrl->mhi_chan[chan];
1339 		mhi_chan->name = ch_cfg->name;
1340 		mhi_chan->chan = chan;
1341 		mhi_chan->dir = ch_cfg->dir;
1342 		mutex_init(&mhi_chan->lock);
1343 	}
1344 
1345 	return 0;
1346 
1347 error_chan_cfg:
1348 	kfree(mhi_cntrl->mhi_chan);
1349 
1350 	return ret;
1351 }
1352 
1353 /*
1354  * Allocate channel and command rings here. Event rings will be allocated
1355  * in mhi_ep_power_up() as the config comes from the host.
1356  */
1357 int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
1358 				const struct mhi_ep_cntrl_config *config)
1359 {
1360 	struct mhi_ep_device *mhi_dev;
1361 	int ret;
1362 
1363 	if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
1364 		return -EINVAL;
1365 
1366 	ret = mhi_ep_chan_init(mhi_cntrl, config);
1367 	if (ret)
1368 		return ret;
1369 
1370 	mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
1371 	if (!mhi_cntrl->mhi_cmd) {
1372 		ret = -ENOMEM;
1373 		goto err_free_ch;
1374 	}
1375 
1376 	INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
1377 	INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
1378 	INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
1379 	INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker);
1380 
1381 	mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
1382 	if (!mhi_cntrl->wq) {
1383 		ret = -ENOMEM;
1384 		goto err_free_cmd;
1385 	}
1386 
1387 	INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
1388 	INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
1389 	spin_lock_init(&mhi_cntrl->state_lock);
1390 	spin_lock_init(&mhi_cntrl->list_lock);
1391 	mutex_init(&mhi_cntrl->event_lock);
1392 
1393 	/* Set MHI version and AMSS EE before enumeration */
1394 	mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version);
1395 	mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
1396 
1397 	/* Set controller index */
1398 	ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL);
1399 	if (ret < 0)
1400 		goto err_destroy_wq;
1401 
1402 	mhi_cntrl->index = ret;
1403 
1404 	irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN);
1405 	ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH,
1406 			  "doorbell_irq", mhi_cntrl);
1407 	if (ret) {
1408 		dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n");
1409 		goto err_ida_free;
1410 	}
1411 
1412 	/* Allocate the controller device */
1413 	mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER);
1414 	if (IS_ERR(mhi_dev)) {
1415 		dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n");
1416 		ret = PTR_ERR(mhi_dev);
1417 		goto err_free_irq;
1418 	}
1419 
1420 	ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index);
1421 	if (ret)
1422 		goto err_put_dev;
1423 
1424 	mhi_dev->name = dev_name(&mhi_dev->dev);
1425 	mhi_cntrl->mhi_dev = mhi_dev;
1426 
1427 	ret = device_add(&mhi_dev->dev);
1428 	if (ret)
1429 		goto err_put_dev;
1430 
1431 	dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n");
1432 
1433 	return 0;
1434 
1435 err_put_dev:
1436 	put_device(&mhi_dev->dev);
1437 err_free_irq:
1438 	free_irq(mhi_cntrl->irq, mhi_cntrl);
1439 err_ida_free:
1440 	ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
1441 err_destroy_wq:
1442 	destroy_workqueue(mhi_cntrl->wq);
1443 err_free_cmd:
1444 	kfree(mhi_cntrl->mhi_cmd);
1445 err_free_ch:
1446 	kfree(mhi_cntrl->mhi_chan);
1447 
1448 	return ret;
1449 }
1450 EXPORT_SYMBOL_GPL(mhi_ep_register_controller);
1451 
1452 /*
1453  * It is expected that the controller drivers will power down the MHI EP stack
1454  * using "mhi_ep_power_down()" before calling this function to unregister themselves.
1455  */
1456 void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
1457 {
1458 	struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev;
1459 
1460 	destroy_workqueue(mhi_cntrl->wq);
1461 
1462 	free_irq(mhi_cntrl->irq, mhi_cntrl);
1463 
1464 	kfree(mhi_cntrl->mhi_cmd);
1465 	kfree(mhi_cntrl->mhi_chan);
1466 
1467 	device_del(&mhi_dev->dev);
1468 	put_device(&mhi_dev->dev);
1469 
1470 	ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
1471 }
1472 EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller);
1473 
1474 static int mhi_ep_driver_probe(struct device *dev)
1475 {
1476 	struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1477 	struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
1478 	struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan;
1479 	struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan;
1480 
1481 	ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1482 	dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1483 
1484 	return mhi_drv->probe(mhi_dev, mhi_dev->id);
1485 }
1486 
1487 static int mhi_ep_driver_remove(struct device *dev)
1488 {
1489 	struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1490 	struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
1491 	struct mhi_result result = {};
1492 	struct mhi_ep_chan *mhi_chan;
1493 	int dir;
1494 
1495 	/* Skip if it is a controller device */
1496 	if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1497 		return 0;
1498 
1499 	/* Disconnect the channels associated with the driver */
1500 	for (dir = 0; dir < 2; dir++) {
1501 		mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1502 
1503 		if (!mhi_chan)
1504 			continue;
1505 
1506 		mutex_lock(&mhi_chan->lock);
1507 		/* Send channel disconnect status to the client driver */
1508 		if (mhi_chan->xfer_cb) {
1509 			result.transaction_status = -ENOTCONN;
1510 			result.bytes_xferd = 0;
1511 			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1512 		}
1513 
1514 		mhi_chan->state = MHI_CH_STATE_DISABLED;
1515 		mhi_chan->xfer_cb = NULL;
1516 		mutex_unlock(&mhi_chan->lock);
1517 	}
1518 
1519 	/* Remove the client driver now */
1520 	mhi_drv->remove(mhi_dev);
1521 
1522 	return 0;
1523 }
1524 
1525 int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner)
1526 {
1527 	struct device_driver *driver = &mhi_drv->driver;
1528 
1529 	if (!mhi_drv->probe || !mhi_drv->remove)
1530 		return -EINVAL;
1531 
1532 	/* Client drivers should have callbacks defined for both channels */
1533 	if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb)
1534 		return -EINVAL;
1535 
1536 	driver->bus = &mhi_ep_bus_type;
1537 	driver->owner = owner;
1538 	driver->probe = mhi_ep_driver_probe;
1539 	driver->remove = mhi_ep_driver_remove;
1540 
1541 	return driver_register(driver);
1542 }
1543 EXPORT_SYMBOL_GPL(__mhi_ep_driver_register);
1544 
1545 void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv)
1546 {
1547 	driver_unregister(&mhi_drv->driver);
1548 }
1549 EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister);
1550 
1551 static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env)
1552 {
1553 	struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1554 
1555 	return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT,
1556 					mhi_dev->name);
1557 }
1558 
1559 static int mhi_ep_match(struct device *dev, struct device_driver *drv)
1560 {
1561 	struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1562 	struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
1563 	const struct mhi_device_id *id;
1564 
1565 	/*
1566 	 * If the device is a controller type then there is no client driver
1567 	 * associated with it
1568 	 */
1569 	if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1570 		return 0;
1571 
1572 	for (id = mhi_drv->id_table; id->chan[0]; id++)
1573 		if (!strcmp(mhi_dev->name, id->chan)) {
1574 			mhi_dev->id = id;
1575 			return 1;
1576 		}
1577 
1578 	return 0;
1579 };
1580 
1581 struct bus_type mhi_ep_bus_type = {
1582 	.name = "mhi_ep",
1583 	.dev_name = "mhi_ep",
1584 	.match = mhi_ep_match,
1585 	.uevent = mhi_ep_uevent,
1586 };
1587 
1588 static int __init mhi_ep_init(void)
1589 {
1590 	return bus_register(&mhi_ep_bus_type);
1591 }
1592 
1593 static void __exit mhi_ep_exit(void)
1594 {
1595 	bus_unregister(&mhi_ep_bus_type);
1596 }
1597 
1598 postcore_initcall(mhi_ep_init);
1599 module_exit(mhi_ep_exit);
1600 
1601 MODULE_LICENSE("GPL v2");
1602 MODULE_DESCRIPTION("MHI Bus Endpoint stack");
1603 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
1604