1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * MHI Endpoint bus stack
4 *
5 * Copyright (C) 2022 Linaro Ltd.
6 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
7 */
8
9 #include <linux/bitfield.h>
10 #include <linux/delay.h>
11 #include <linux/dma-direction.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/irq.h>
15 #include <linux/mhi_ep.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/module.h>
18 #include "internal.h"
19
20 #define M0_WAIT_DELAY_MS 100
21 #define M0_WAIT_COUNT 100
22
23 static DEFINE_IDA(mhi_ep_cntrl_ida);
24
25 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
26 static int mhi_ep_destroy_device(struct device *dev, void *data);
27
mhi_ep_send_event(struct mhi_ep_cntrl * mhi_cntrl,u32 ring_idx,struct mhi_ring_element * el,bool bei)28 static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
29 struct mhi_ring_element *el, bool bei)
30 {
31 struct device *dev = &mhi_cntrl->mhi_dev->dev;
32 union mhi_ep_ring_ctx *ctx;
33 struct mhi_ep_ring *ring;
34 int ret;
35
36 mutex_lock(&mhi_cntrl->event_lock);
37 ring = &mhi_cntrl->mhi_event[ring_idx].ring;
38 ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx];
39 if (!ring->started) {
40 ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx);
41 if (ret) {
42 dev_err(dev, "Error starting event ring (%u)\n", ring_idx);
43 goto err_unlock;
44 }
45 }
46
47 /* Add element to the event ring */
48 ret = mhi_ep_ring_add_element(ring, el);
49 if (ret) {
50 dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx);
51 goto err_unlock;
52 }
53
54 mutex_unlock(&mhi_cntrl->event_lock);
55
56 /*
57 * Raise IRQ to host only if the BEI flag is not set in TRE. Host might
58 * set this flag for interrupt moderation as per MHI protocol.
59 */
60 if (!bei)
61 mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
62
63 return 0;
64
65 err_unlock:
66 mutex_unlock(&mhi_cntrl->event_lock);
67
68 return ret;
69 }
70
mhi_ep_send_completion_event(struct mhi_ep_cntrl * mhi_cntrl,struct mhi_ep_ring * ring,struct mhi_ring_element * tre,u32 len,enum mhi_ev_ccs code)71 static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
72 struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
73 {
74 struct mhi_ring_element *event;
75 int ret;
76
77 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
78 if (!event)
79 return -ENOMEM;
80
81 event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
82 event->dword[0] = MHI_TRE_EV_DWORD0(code, len);
83 event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
84
85 ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre));
86 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
87
88 return ret;
89 }
90
mhi_ep_send_state_change_event(struct mhi_ep_cntrl * mhi_cntrl,enum mhi_state state)91 int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
92 {
93 struct mhi_ring_element *event;
94 int ret;
95
96 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
97 if (!event)
98 return -ENOMEM;
99
100 event->dword[0] = MHI_SC_EV_DWORD0(state);
101 event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
102
103 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
104 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
105
106 return ret;
107 }
108
mhi_ep_send_ee_event(struct mhi_ep_cntrl * mhi_cntrl,enum mhi_ee_type exec_env)109 int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
110 {
111 struct mhi_ring_element *event;
112 int ret;
113
114 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
115 if (!event)
116 return -ENOMEM;
117
118 event->dword[0] = MHI_EE_EV_DWORD0(exec_env);
119 event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
120
121 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
122 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
123
124 return ret;
125 }
126
mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl * mhi_cntrl,enum mhi_ev_ccs code)127 static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
128 {
129 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
130 struct mhi_ring_element *event;
131 int ret;
132
133 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
134 if (!event)
135 return -ENOMEM;
136
137 event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
138 event->dword[0] = MHI_CC_EV_DWORD0(code);
139 event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
140
141 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
142 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
143
144 return ret;
145 }
146
mhi_ep_process_cmd_ring(struct mhi_ep_ring * ring,struct mhi_ring_element * el)147 static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
148 {
149 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
150 struct device *dev = &mhi_cntrl->mhi_dev->dev;
151 struct mhi_result result = {};
152 struct mhi_ep_chan *mhi_chan;
153 struct mhi_ep_ring *ch_ring;
154 u32 tmp, ch_id;
155 int ret;
156
157 ch_id = MHI_TRE_GET_CMD_CHID(el);
158
159 /* Check if the channel is supported by the controller */
160 if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) {
161 dev_dbg(dev, "Channel (%u) not supported!\n", ch_id);
162 return -ENODEV;
163 }
164
165 mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
166 ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
167
168 switch (MHI_TRE_GET_CMD_TYPE(el)) {
169 case MHI_PKT_TYPE_START_CHAN_CMD:
170 dev_dbg(dev, "Received START command for channel (%u)\n", ch_id);
171
172 mutex_lock(&mhi_chan->lock);
173 /* Initialize and configure the corresponding channel ring */
174 if (!ch_ring->started) {
175 ret = mhi_ep_ring_start(mhi_cntrl, ch_ring,
176 (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]);
177 if (ret) {
178 dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id);
179 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl,
180 MHI_EV_CC_UNDEFINED_ERR);
181 if (ret)
182 dev_err(dev, "Error sending completion event: %d\n", ret);
183
184 goto err_unlock;
185 }
186 }
187
188 /* Set channel state to RUNNING */
189 mhi_chan->state = MHI_CH_STATE_RUNNING;
190 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
191 tmp &= ~CHAN_CTX_CHSTATE_MASK;
192 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
193 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
194
195 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
196 if (ret) {
197 dev_err(dev, "Error sending command completion event (%u)\n",
198 MHI_EV_CC_SUCCESS);
199 goto err_unlock;
200 }
201
202 mutex_unlock(&mhi_chan->lock);
203
204 /*
205 * Create MHI device only during UL channel start. Since the MHI
206 * channels operate in a pair, we'll associate both UL and DL
207 * channels to the same device.
208 *
209 * We also need to check for mhi_dev != NULL because, the host
210 * will issue START_CHAN command during resume and we don't
211 * destroy the device during suspend.
212 */
213 if (!(ch_id % 2) && !mhi_chan->mhi_dev) {
214 ret = mhi_ep_create_device(mhi_cntrl, ch_id);
215 if (ret) {
216 dev_err(dev, "Error creating device for channel (%u)\n", ch_id);
217 mhi_ep_handle_syserr(mhi_cntrl);
218 return ret;
219 }
220 }
221
222 /* Finally, enable DB for the channel */
223 mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id);
224
225 break;
226 case MHI_PKT_TYPE_STOP_CHAN_CMD:
227 dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
228 if (!ch_ring->started) {
229 dev_err(dev, "Channel (%u) not opened\n", ch_id);
230 return -ENODEV;
231 }
232
233 mutex_lock(&mhi_chan->lock);
234 /* Disable DB for the channel */
235 mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
236
237 /* Send channel disconnect status to client drivers */
238 if (mhi_chan->xfer_cb) {
239 result.transaction_status = -ENOTCONN;
240 result.bytes_xferd = 0;
241 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
242 }
243
244 /* Set channel state to STOP */
245 mhi_chan->state = MHI_CH_STATE_STOP;
246 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
247 tmp &= ~CHAN_CTX_CHSTATE_MASK;
248 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP);
249 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
250
251 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
252 if (ret) {
253 dev_err(dev, "Error sending command completion event (%u)\n",
254 MHI_EV_CC_SUCCESS);
255 goto err_unlock;
256 }
257
258 mutex_unlock(&mhi_chan->lock);
259 break;
260 case MHI_PKT_TYPE_RESET_CHAN_CMD:
261 dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id);
262 if (!ch_ring->started) {
263 dev_err(dev, "Channel (%u) not opened\n", ch_id);
264 return -ENODEV;
265 }
266
267 mutex_lock(&mhi_chan->lock);
268 /* Stop and reset the transfer ring */
269 mhi_ep_ring_reset(mhi_cntrl, ch_ring);
270
271 /* Send channel disconnect status to client driver */
272 if (mhi_chan->xfer_cb) {
273 result.transaction_status = -ENOTCONN;
274 result.bytes_xferd = 0;
275 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
276 }
277
278 /* Set channel state to DISABLED */
279 mhi_chan->state = MHI_CH_STATE_DISABLED;
280 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
281 tmp &= ~CHAN_CTX_CHSTATE_MASK;
282 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
283 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
284
285 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
286 if (ret) {
287 dev_err(dev, "Error sending command completion event (%u)\n",
288 MHI_EV_CC_SUCCESS);
289 goto err_unlock;
290 }
291
292 mutex_unlock(&mhi_chan->lock);
293 break;
294 default:
295 dev_err(dev, "Invalid command received: %lu for channel (%u)\n",
296 MHI_TRE_GET_CMD_TYPE(el), ch_id);
297 return -EINVAL;
298 }
299
300 return 0;
301
302 err_unlock:
303 mutex_unlock(&mhi_chan->lock);
304
305 return ret;
306 }
307
mhi_ep_queue_is_empty(struct mhi_ep_device * mhi_dev,enum dma_data_direction dir)308 bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir)
309 {
310 struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan :
311 mhi_dev->ul_chan;
312 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
313 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
314
315 return !!(ring->rd_offset == ring->wr_offset);
316 }
317 EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
318
mhi_ep_read_channel(struct mhi_ep_cntrl * mhi_cntrl,struct mhi_ep_ring * ring,struct mhi_result * result,u32 len)319 static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
320 struct mhi_ep_ring *ring,
321 struct mhi_result *result,
322 u32 len)
323 {
324 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
325 struct device *dev = &mhi_cntrl->mhi_dev->dev;
326 size_t tr_len, read_offset, write_offset;
327 struct mhi_ep_buf_info buf_info = {};
328 struct mhi_ring_element *el;
329 bool tr_done = false;
330 u32 buf_left;
331 int ret;
332
333 buf_left = len;
334
335 do {
336 /* Don't process the transfer ring if the channel is not in RUNNING state */
337 if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
338 dev_err(dev, "Channel not available\n");
339 return -ENODEV;
340 }
341
342 el = &ring->ring_cache[ring->rd_offset];
343
344 /* Check if there is data pending to be read from previous read operation */
345 if (mhi_chan->tre_bytes_left) {
346 dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
347 tr_len = min(buf_left, mhi_chan->tre_bytes_left);
348 } else {
349 mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
350 mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
351 mhi_chan->tre_bytes_left = mhi_chan->tre_size;
352
353 tr_len = min(buf_left, mhi_chan->tre_size);
354 }
355
356 read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
357 write_offset = len - buf_left;
358
359 buf_info.host_addr = mhi_chan->tre_loc + read_offset;
360 buf_info.dev_addr = result->buf_addr + write_offset;
361 buf_info.size = tr_len;
362
363 dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
364 ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
365 if (ret < 0) {
366 dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
367 return ret;
368 }
369
370 buf_left -= tr_len;
371 mhi_chan->tre_bytes_left -= tr_len;
372
373 /*
374 * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
375 * read completely:
376 *
377 * 1. Send completion event to the host based on the flags set in TRE.
378 * 2. Increment the local read offset of the transfer ring.
379 */
380 if (!mhi_chan->tre_bytes_left) {
381 /*
382 * The host will split the data packet into multiple TREs if it can't fit
383 * the packet in a single TRE. In that case, CHAIN flag will be set by the
384 * host for all TREs except the last one.
385 */
386 if (MHI_TRE_DATA_GET_CHAIN(el)) {
387 /*
388 * IEOB (Interrupt on End of Block) flag will be set by the host if
389 * it expects the completion event for all TREs of a TD.
390 */
391 if (MHI_TRE_DATA_GET_IEOB(el)) {
392 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
393 MHI_TRE_DATA_GET_LEN(el),
394 MHI_EV_CC_EOB);
395 if (ret < 0) {
396 dev_err(&mhi_chan->mhi_dev->dev,
397 "Error sending transfer compl. event\n");
398 return ret;
399 }
400 }
401 } else {
402 /*
403 * IEOT (Interrupt on End of Transfer) flag will be set by the host
404 * for the last TRE of the TD and expects the completion event for
405 * the same.
406 */
407 if (MHI_TRE_DATA_GET_IEOT(el)) {
408 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
409 MHI_TRE_DATA_GET_LEN(el),
410 MHI_EV_CC_EOT);
411 if (ret < 0) {
412 dev_err(&mhi_chan->mhi_dev->dev,
413 "Error sending transfer compl. event\n");
414 return ret;
415 }
416 }
417
418 tr_done = true;
419 }
420
421 mhi_ep_ring_inc_index(ring);
422 }
423
424 result->bytes_xferd += tr_len;
425 } while (buf_left && !tr_done);
426
427 return 0;
428 }
429
mhi_ep_process_ch_ring(struct mhi_ep_ring * ring,struct mhi_ring_element * el)430 static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
431 {
432 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
433 struct mhi_result result = {};
434 u32 len = MHI_EP_DEFAULT_MTU;
435 struct mhi_ep_chan *mhi_chan;
436 int ret;
437
438 mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
439
440 /*
441 * Bail out if transfer callback is not registered for the channel.
442 * This is most likely due to the client driver not loaded at this point.
443 */
444 if (!mhi_chan->xfer_cb) {
445 dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n");
446 return -ENODEV;
447 }
448
449 if (ring->ch_id % 2) {
450 /* DL channel */
451 result.dir = mhi_chan->dir;
452 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
453 } else {
454 /* UL channel */
455 result.buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
456 if (!result.buf_addr)
457 return -ENOMEM;
458
459 do {
460 ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
461 if (ret < 0) {
462 dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
463 kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
464 return ret;
465 }
466
467 result.dir = mhi_chan->dir;
468 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
469 result.bytes_xferd = 0;
470 memset(result.buf_addr, 0, len);
471
472 /* Read until the ring becomes empty */
473 } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
474
475 kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
476 }
477
478 return 0;
479 }
480
481 /* TODO: Handle partially formed TDs */
mhi_ep_queue_skb(struct mhi_ep_device * mhi_dev,struct sk_buff * skb)482 int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
483 {
484 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
485 struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
486 struct device *dev = &mhi_chan->mhi_dev->dev;
487 struct mhi_ep_buf_info buf_info = {};
488 struct mhi_ring_element *el;
489 u32 buf_left, read_offset;
490 struct mhi_ep_ring *ring;
491 enum mhi_ev_ccs code;
492 size_t tr_len;
493 u32 tre_len;
494 int ret;
495
496 buf_left = skb->len;
497 ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
498
499 mutex_lock(&mhi_chan->lock);
500
501 do {
502 /* Don't process the transfer ring if the channel is not in RUNNING state */
503 if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
504 dev_err(dev, "Channel not available\n");
505 ret = -ENODEV;
506 goto err_exit;
507 }
508
509 if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) {
510 dev_err(dev, "TRE not available!\n");
511 ret = -ENOSPC;
512 goto err_exit;
513 }
514
515 el = &ring->ring_cache[ring->rd_offset];
516 tre_len = MHI_TRE_DATA_GET_LEN(el);
517
518 tr_len = min(buf_left, tre_len);
519 read_offset = skb->len - buf_left;
520
521 buf_info.dev_addr = skb->data + read_offset;
522 buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el);
523 buf_info.size = tr_len;
524
525 dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
526 ret = mhi_cntrl->write_to_host(mhi_cntrl, &buf_info);
527 if (ret < 0) {
528 dev_err(dev, "Error writing to the channel\n");
529 goto err_exit;
530 }
531
532 buf_left -= tr_len;
533 /*
534 * For all TREs queued by the host for DL channel, only the EOT flag will be set.
535 * If the packet doesn't fit into a single TRE, send the OVERFLOW event to
536 * the host so that the host can adjust the packet boundary to next TREs. Else send
537 * the EOT event to the host indicating the packet boundary.
538 */
539 if (buf_left)
540 code = MHI_EV_CC_OVERFLOW;
541 else
542 code = MHI_EV_CC_EOT;
543
544 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
545 if (ret) {
546 dev_err(dev, "Error sending transfer completion event\n");
547 goto err_exit;
548 }
549
550 mhi_ep_ring_inc_index(ring);
551 } while (buf_left);
552
553 mutex_unlock(&mhi_chan->lock);
554
555 return 0;
556
557 err_exit:
558 mutex_unlock(&mhi_chan->lock);
559
560 return ret;
561 }
562 EXPORT_SYMBOL_GPL(mhi_ep_queue_skb);
563
mhi_ep_cache_host_cfg(struct mhi_ep_cntrl * mhi_cntrl)564 static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
565 {
566 size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
567 struct device *dev = &mhi_cntrl->mhi_dev->dev;
568 int ret;
569
570 /* Update the number of event rings (NER) programmed by the host */
571 mhi_ep_mmio_update_ner(mhi_cntrl);
572
573 dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n",
574 mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings);
575
576 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
577 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
578 cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
579
580 /* Get the channel context base pointer from host */
581 mhi_ep_mmio_get_chc_base(mhi_cntrl);
582
583 /* Allocate and map memory for caching host channel context */
584 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa,
585 &mhi_cntrl->ch_ctx_cache_phys,
586 (void __iomem **) &mhi_cntrl->ch_ctx_cache,
587 ch_ctx_host_size);
588 if (ret) {
589 dev_err(dev, "Failed to allocate and map ch_ctx_cache\n");
590 return ret;
591 }
592
593 /* Get the event context base pointer from host */
594 mhi_ep_mmio_get_erc_base(mhi_cntrl);
595
596 /* Allocate and map memory for caching host event context */
597 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa,
598 &mhi_cntrl->ev_ctx_cache_phys,
599 (void __iomem **) &mhi_cntrl->ev_ctx_cache,
600 ev_ctx_host_size);
601 if (ret) {
602 dev_err(dev, "Failed to allocate and map ev_ctx_cache\n");
603 goto err_ch_ctx;
604 }
605
606 /* Get the command context base pointer from host */
607 mhi_ep_mmio_get_crc_base(mhi_cntrl);
608
609 /* Allocate and map memory for caching host command context */
610 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa,
611 &mhi_cntrl->cmd_ctx_cache_phys,
612 (void __iomem **) &mhi_cntrl->cmd_ctx_cache,
613 cmd_ctx_host_size);
614 if (ret) {
615 dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n");
616 goto err_ev_ctx;
617 }
618
619 /* Initialize command ring */
620 ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring,
621 (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache);
622 if (ret) {
623 dev_err(dev, "Failed to start the command ring\n");
624 goto err_cmd_ctx;
625 }
626
627 return ret;
628
629 err_cmd_ctx:
630 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
631 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
632
633 err_ev_ctx:
634 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
635 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
636
637 err_ch_ctx:
638 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
639 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
640
641 return ret;
642 }
643
mhi_ep_free_host_cfg(struct mhi_ep_cntrl * mhi_cntrl)644 static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
645 {
646 size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
647
648 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
649 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
650 cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
651
652 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
653 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
654
655 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
656 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
657
658 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
659 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
660 }
661
mhi_ep_enable_int(struct mhi_ep_cntrl * mhi_cntrl)662 static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl)
663 {
664 /*
665 * Doorbell interrupts are enabled when the corresponding channel gets started.
666 * Enabling all interrupts here triggers spurious irqs as some of the interrupts
667 * associated with hw channels always get triggered.
668 */
669 mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl);
670 mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl);
671 }
672
mhi_ep_enable(struct mhi_ep_cntrl * mhi_cntrl)673 static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl)
674 {
675 struct device *dev = &mhi_cntrl->mhi_dev->dev;
676 enum mhi_state state;
677 bool mhi_reset;
678 u32 count = 0;
679 int ret;
680
681 /* Wait for Host to set the M0 state */
682 do {
683 msleep(M0_WAIT_DELAY_MS);
684 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
685 if (mhi_reset) {
686 /* Clear the MHI reset if host is in reset state */
687 mhi_ep_mmio_clear_reset(mhi_cntrl);
688 dev_info(dev, "Detected Host reset while waiting for M0\n");
689 }
690 count++;
691 } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT);
692
693 if (state != MHI_STATE_M0) {
694 dev_err(dev, "Host failed to enter M0\n");
695 return -ETIMEDOUT;
696 }
697
698 ret = mhi_ep_cache_host_cfg(mhi_cntrl);
699 if (ret) {
700 dev_err(dev, "Failed to cache host config\n");
701 return ret;
702 }
703
704 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
705
706 /* Enable all interrupts now */
707 mhi_ep_enable_int(mhi_cntrl);
708
709 return 0;
710 }
711
mhi_ep_cmd_ring_worker(struct work_struct * work)712 static void mhi_ep_cmd_ring_worker(struct work_struct *work)
713 {
714 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work);
715 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
716 struct device *dev = &mhi_cntrl->mhi_dev->dev;
717 struct mhi_ring_element *el;
718 int ret;
719
720 /* Update the write offset for the ring */
721 ret = mhi_ep_update_wr_offset(ring);
722 if (ret) {
723 dev_err(dev, "Error updating write offset for ring\n");
724 return;
725 }
726
727 /* Sanity check to make sure there are elements in the ring */
728 if (ring->rd_offset == ring->wr_offset)
729 return;
730
731 /*
732 * Process command ring element till write offset. In case of an error, just try to
733 * process next element.
734 */
735 while (ring->rd_offset != ring->wr_offset) {
736 el = &ring->ring_cache[ring->rd_offset];
737
738 ret = mhi_ep_process_cmd_ring(ring, el);
739 if (ret && ret != -ENODEV)
740 dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset);
741
742 mhi_ep_ring_inc_index(ring);
743 }
744 }
745
mhi_ep_ch_ring_worker(struct work_struct * work)746 static void mhi_ep_ch_ring_worker(struct work_struct *work)
747 {
748 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
749 struct device *dev = &mhi_cntrl->mhi_dev->dev;
750 struct mhi_ep_ring_item *itr, *tmp;
751 struct mhi_ring_element *el;
752 struct mhi_ep_ring *ring;
753 struct mhi_ep_chan *chan;
754 unsigned long flags;
755 LIST_HEAD(head);
756 int ret;
757
758 spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
759 list_splice_tail_init(&mhi_cntrl->ch_db_list, &head);
760 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
761
762 /* Process each queued channel ring. In case of an error, just process next element. */
763 list_for_each_entry_safe(itr, tmp, &head, node) {
764 list_del(&itr->node);
765 ring = itr->ring;
766
767 chan = &mhi_cntrl->mhi_chan[ring->ch_id];
768 mutex_lock(&chan->lock);
769
770 /*
771 * The ring could've stopped while we waited to grab the (chan->lock), so do
772 * a sanity check before going further.
773 */
774 if (!ring->started) {
775 mutex_unlock(&chan->lock);
776 kfree(itr);
777 continue;
778 }
779
780 /* Update the write offset for the ring */
781 ret = mhi_ep_update_wr_offset(ring);
782 if (ret) {
783 dev_err(dev, "Error updating write offset for ring\n");
784 mutex_unlock(&chan->lock);
785 kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
786 continue;
787 }
788
789 /* Sanity check to make sure there are elements in the ring */
790 if (ring->rd_offset == ring->wr_offset) {
791 mutex_unlock(&chan->lock);
792 kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
793 continue;
794 }
795
796 el = &ring->ring_cache[ring->rd_offset];
797
798 dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
799 ret = mhi_ep_process_ch_ring(ring, el);
800 if (ret) {
801 dev_err(dev, "Error processing ring for channel (%u): %d\n",
802 ring->ch_id, ret);
803 mutex_unlock(&chan->lock);
804 kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
805 continue;
806 }
807
808 mutex_unlock(&chan->lock);
809 kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
810 }
811 }
812
mhi_ep_state_worker(struct work_struct * work)813 static void mhi_ep_state_worker(struct work_struct *work)
814 {
815 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work);
816 struct device *dev = &mhi_cntrl->mhi_dev->dev;
817 struct mhi_ep_state_transition *itr, *tmp;
818 unsigned long flags;
819 LIST_HEAD(head);
820 int ret;
821
822 spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
823 list_splice_tail_init(&mhi_cntrl->st_transition_list, &head);
824 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
825
826 list_for_each_entry_safe(itr, tmp, &head, node) {
827 list_del(&itr->node);
828 dev_dbg(dev, "Handling MHI state transition to %s\n",
829 mhi_state_str(itr->state));
830
831 switch (itr->state) {
832 case MHI_STATE_M0:
833 ret = mhi_ep_set_m0_state(mhi_cntrl);
834 if (ret)
835 dev_err(dev, "Failed to transition to M0 state\n");
836 break;
837 case MHI_STATE_M3:
838 ret = mhi_ep_set_m3_state(mhi_cntrl);
839 if (ret)
840 dev_err(dev, "Failed to transition to M3 state\n");
841 break;
842 default:
843 dev_err(dev, "Invalid MHI state transition: %d\n", itr->state);
844 break;
845 }
846 kfree(itr);
847 }
848 }
849
mhi_ep_queue_channel_db(struct mhi_ep_cntrl * mhi_cntrl,unsigned long ch_int,u32 ch_idx)850 static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int,
851 u32 ch_idx)
852 {
853 struct mhi_ep_ring_item *item;
854 struct mhi_ep_ring *ring;
855 bool work = !!ch_int;
856 LIST_HEAD(head);
857 u32 i;
858
859 /* First add the ring items to a local list */
860 for_each_set_bit(i, &ch_int, 32) {
861 /* Channel index varies for each register: 0, 32, 64, 96 */
862 u32 ch_id = ch_idx + i;
863
864 ring = &mhi_cntrl->mhi_chan[ch_id].ring;
865 item = kmem_cache_zalloc(mhi_cntrl->ring_item_cache, GFP_ATOMIC);
866 if (!item)
867 return;
868
869 item->ring = ring;
870 list_add_tail(&item->node, &head);
871 }
872
873 /* Now, splice the local list into ch_db_list and queue the work item */
874 if (work) {
875 spin_lock(&mhi_cntrl->list_lock);
876 list_splice_tail_init(&head, &mhi_cntrl->ch_db_list);
877 spin_unlock(&mhi_cntrl->list_lock);
878
879 queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work);
880 }
881 }
882
883 /*
884 * Channel interrupt statuses are contained in 4 registers each of 32bit length.
885 * For checking all interrupts, we need to loop through each registers and then
886 * check for bits set.
887 */
mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl * mhi_cntrl)888 static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
889 {
890 u32 ch_int, ch_idx, i;
891
892 /* Bail out if there is no channel doorbell interrupt */
893 if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl))
894 return;
895
896 for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
897 ch_idx = i * MHI_MASK_CH_LEN;
898
899 /* Only process channel interrupt if the mask is enabled */
900 ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask;
901 if (ch_int) {
902 mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx);
903 mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
904 mhi_cntrl->chdb[i].status);
905 }
906 }
907 }
908
mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl * mhi_cntrl,enum mhi_state state)909 static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl,
910 enum mhi_state state)
911 {
912 struct mhi_ep_state_transition *item;
913
914 item = kzalloc(sizeof(*item), GFP_ATOMIC);
915 if (!item)
916 return;
917
918 item->state = state;
919 spin_lock(&mhi_cntrl->list_lock);
920 list_add_tail(&item->node, &mhi_cntrl->st_transition_list);
921 spin_unlock(&mhi_cntrl->list_lock);
922
923 queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work);
924 }
925
926 /*
927 * Interrupt handler that services interrupts raised by the host writing to
928 * MHICTRL and Command ring doorbell (CRDB) registers for state change and
929 * channel interrupts.
930 */
mhi_ep_irq(int irq,void * data)931 static irqreturn_t mhi_ep_irq(int irq, void *data)
932 {
933 struct mhi_ep_cntrl *mhi_cntrl = data;
934 struct device *dev = &mhi_cntrl->mhi_dev->dev;
935 enum mhi_state state;
936 u32 int_value;
937 bool mhi_reset;
938
939 /* Acknowledge the ctrl interrupt */
940 int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS);
941 mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value);
942
943 /* Check for ctrl interrupt */
944 if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) {
945 dev_dbg(dev, "Processing ctrl interrupt\n");
946 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
947 if (mhi_reset) {
948 dev_info(dev, "Host triggered MHI reset!\n");
949 disable_irq_nosync(mhi_cntrl->irq);
950 schedule_work(&mhi_cntrl->reset_work);
951 return IRQ_HANDLED;
952 }
953
954 mhi_ep_process_ctrl_interrupt(mhi_cntrl, state);
955 }
956
957 /* Check for command doorbell interrupt */
958 if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) {
959 dev_dbg(dev, "Processing command doorbell interrupt\n");
960 queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work);
961 }
962
963 /* Check for channel interrupts */
964 mhi_ep_check_channel_interrupt(mhi_cntrl);
965
966 return IRQ_HANDLED;
967 }
968
mhi_ep_abort_transfer(struct mhi_ep_cntrl * mhi_cntrl)969 static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
970 {
971 struct mhi_ep_ring *ch_ring, *ev_ring;
972 struct mhi_result result = {};
973 struct mhi_ep_chan *mhi_chan;
974 int i;
975
976 /* Stop all the channels */
977 for (i = 0; i < mhi_cntrl->max_chan; i++) {
978 mhi_chan = &mhi_cntrl->mhi_chan[i];
979 if (!mhi_chan->ring.started)
980 continue;
981
982 mutex_lock(&mhi_chan->lock);
983 /* Send channel disconnect status to client drivers */
984 if (mhi_chan->xfer_cb) {
985 result.transaction_status = -ENOTCONN;
986 result.bytes_xferd = 0;
987 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
988 }
989
990 mhi_chan->state = MHI_CH_STATE_DISABLED;
991 mutex_unlock(&mhi_chan->lock);
992 }
993
994 flush_workqueue(mhi_cntrl->wq);
995
996 /* Destroy devices associated with all channels */
997 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device);
998
999 /* Stop and reset the transfer rings */
1000 for (i = 0; i < mhi_cntrl->max_chan; i++) {
1001 mhi_chan = &mhi_cntrl->mhi_chan[i];
1002 if (!mhi_chan->ring.started)
1003 continue;
1004
1005 ch_ring = &mhi_cntrl->mhi_chan[i].ring;
1006 mutex_lock(&mhi_chan->lock);
1007 mhi_ep_ring_reset(mhi_cntrl, ch_ring);
1008 mutex_unlock(&mhi_chan->lock);
1009 }
1010
1011 /* Stop and reset the event rings */
1012 for (i = 0; i < mhi_cntrl->event_rings; i++) {
1013 ev_ring = &mhi_cntrl->mhi_event[i].ring;
1014 if (!ev_ring->started)
1015 continue;
1016
1017 mutex_lock(&mhi_cntrl->event_lock);
1018 mhi_ep_ring_reset(mhi_cntrl, ev_ring);
1019 mutex_unlock(&mhi_cntrl->event_lock);
1020 }
1021
1022 /* Stop and reset the command ring */
1023 mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring);
1024
1025 mhi_ep_free_host_cfg(mhi_cntrl);
1026 mhi_ep_mmio_mask_interrupts(mhi_cntrl);
1027
1028 mhi_cntrl->enabled = false;
1029 }
1030
mhi_ep_reset_worker(struct work_struct * work)1031 static void mhi_ep_reset_worker(struct work_struct *work)
1032 {
1033 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
1034 enum mhi_state cur_state;
1035
1036 mhi_ep_power_down(mhi_cntrl);
1037
1038 mutex_lock(&mhi_cntrl->state_lock);
1039
1040 /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
1041 mhi_ep_mmio_reset(mhi_cntrl);
1042 cur_state = mhi_cntrl->mhi_state;
1043
1044 /*
1045 * Only proceed further if the reset is due to SYS_ERR. The host will
1046 * issue reset during shutdown also and we don't need to do re-init in
1047 * that case.
1048 */
1049 if (cur_state == MHI_STATE_SYS_ERR)
1050 mhi_ep_power_up(mhi_cntrl);
1051
1052 mutex_unlock(&mhi_cntrl->state_lock);
1053 }
1054
1055 /*
1056 * We don't need to do anything special other than setting the MHI SYS_ERR
1057 * state. The host will reset all contexts and issue MHI RESET so that we
1058 * could also recover from error state.
1059 */
mhi_ep_handle_syserr(struct mhi_ep_cntrl * mhi_cntrl)1060 void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl)
1061 {
1062 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1063 int ret;
1064
1065 ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1066 if (ret)
1067 return;
1068
1069 /* Signal host that the device went to SYS_ERR state */
1070 ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR);
1071 if (ret)
1072 dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret);
1073 }
1074
mhi_ep_power_up(struct mhi_ep_cntrl * mhi_cntrl)1075 int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
1076 {
1077 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1078 int ret, i;
1079
1080 /*
1081 * Mask all interrupts until the state machine is ready. Interrupts will
1082 * be enabled later with mhi_ep_enable().
1083 */
1084 mhi_ep_mmio_mask_interrupts(mhi_cntrl);
1085 mhi_ep_mmio_init(mhi_cntrl);
1086
1087 mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)),
1088 GFP_KERNEL);
1089 if (!mhi_cntrl->mhi_event)
1090 return -ENOMEM;
1091
1092 /* Initialize command, channel and event rings */
1093 mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0);
1094 for (i = 0; i < mhi_cntrl->max_chan; i++)
1095 mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i);
1096 for (i = 0; i < mhi_cntrl->event_rings; i++)
1097 mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i);
1098
1099 mhi_cntrl->mhi_state = MHI_STATE_RESET;
1100
1101 /* Set AMSS EE before signaling ready state */
1102 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
1103
1104 /* All set, notify the host that we are ready */
1105 ret = mhi_ep_set_ready_state(mhi_cntrl);
1106 if (ret)
1107 goto err_free_event;
1108
1109 dev_dbg(dev, "READY state notification sent to the host\n");
1110
1111 ret = mhi_ep_enable(mhi_cntrl);
1112 if (ret) {
1113 dev_err(dev, "Failed to enable MHI endpoint\n");
1114 goto err_free_event;
1115 }
1116
1117 enable_irq(mhi_cntrl->irq);
1118 mhi_cntrl->enabled = true;
1119
1120 return 0;
1121
1122 err_free_event:
1123 kfree(mhi_cntrl->mhi_event);
1124
1125 return ret;
1126 }
1127 EXPORT_SYMBOL_GPL(mhi_ep_power_up);
1128
mhi_ep_power_down(struct mhi_ep_cntrl * mhi_cntrl)1129 void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
1130 {
1131 if (mhi_cntrl->enabled) {
1132 mhi_ep_abort_transfer(mhi_cntrl);
1133 kfree(mhi_cntrl->mhi_event);
1134 disable_irq(mhi_cntrl->irq);
1135 }
1136 }
1137 EXPORT_SYMBOL_GPL(mhi_ep_power_down);
1138
mhi_ep_suspend_channels(struct mhi_ep_cntrl * mhi_cntrl)1139 void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
1140 {
1141 struct mhi_ep_chan *mhi_chan;
1142 u32 tmp;
1143 int i;
1144
1145 for (i = 0; i < mhi_cntrl->max_chan; i++) {
1146 mhi_chan = &mhi_cntrl->mhi_chan[i];
1147
1148 if (!mhi_chan->mhi_dev)
1149 continue;
1150
1151 mutex_lock(&mhi_chan->lock);
1152 /* Skip if the channel is not currently running */
1153 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
1154 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) {
1155 mutex_unlock(&mhi_chan->lock);
1156 continue;
1157 }
1158
1159 dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
1160 /* Set channel state to SUSPENDED */
1161 mhi_chan->state = MHI_CH_STATE_SUSPENDED;
1162 tmp &= ~CHAN_CTX_CHSTATE_MASK;
1163 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
1164 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
1165 mutex_unlock(&mhi_chan->lock);
1166 }
1167 }
1168
mhi_ep_resume_channels(struct mhi_ep_cntrl * mhi_cntrl)1169 void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
1170 {
1171 struct mhi_ep_chan *mhi_chan;
1172 u32 tmp;
1173 int i;
1174
1175 for (i = 0; i < mhi_cntrl->max_chan; i++) {
1176 mhi_chan = &mhi_cntrl->mhi_chan[i];
1177
1178 if (!mhi_chan->mhi_dev)
1179 continue;
1180
1181 mutex_lock(&mhi_chan->lock);
1182 /* Skip if the channel is not currently suspended */
1183 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
1184 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) {
1185 mutex_unlock(&mhi_chan->lock);
1186 continue;
1187 }
1188
1189 dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
1190 /* Set channel state to RUNNING */
1191 mhi_chan->state = MHI_CH_STATE_RUNNING;
1192 tmp &= ~CHAN_CTX_CHSTATE_MASK;
1193 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
1194 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
1195 mutex_unlock(&mhi_chan->lock);
1196 }
1197 }
1198
mhi_ep_release_device(struct device * dev)1199 static void mhi_ep_release_device(struct device *dev)
1200 {
1201 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1202
1203 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1204 mhi_dev->mhi_cntrl->mhi_dev = NULL;
1205
1206 /*
1207 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1208 * devices for the channels will only get created in mhi_ep_create_device()
1209 * if the mhi_dev associated with it is NULL.
1210 */
1211 if (mhi_dev->ul_chan)
1212 mhi_dev->ul_chan->mhi_dev = NULL;
1213
1214 if (mhi_dev->dl_chan)
1215 mhi_dev->dl_chan->mhi_dev = NULL;
1216
1217 kfree(mhi_dev);
1218 }
1219
mhi_ep_alloc_device(struct mhi_ep_cntrl * mhi_cntrl,enum mhi_device_type dev_type)1220 static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl,
1221 enum mhi_device_type dev_type)
1222 {
1223 struct mhi_ep_device *mhi_dev;
1224 struct device *dev;
1225
1226 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1227 if (!mhi_dev)
1228 return ERR_PTR(-ENOMEM);
1229
1230 dev = &mhi_dev->dev;
1231 device_initialize(dev);
1232 dev->bus = &mhi_ep_bus_type;
1233 dev->release = mhi_ep_release_device;
1234
1235 /* Controller device is always allocated first */
1236 if (dev_type == MHI_DEVICE_CONTROLLER)
1237 /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */
1238 dev->parent = mhi_cntrl->cntrl_dev;
1239 else
1240 /* for MHI client devices, parent is the MHI controller device */
1241 dev->parent = &mhi_cntrl->mhi_dev->dev;
1242
1243 mhi_dev->mhi_cntrl = mhi_cntrl;
1244 mhi_dev->dev_type = dev_type;
1245
1246 return mhi_dev;
1247 }
1248
1249 /*
1250 * MHI channels are always defined in pairs with UL as the even numbered
1251 * channel and DL as odd numbered one. This function gets UL channel (primary)
1252 * as the ch_id and always looks after the next entry in channel list for
1253 * the corresponding DL channel (secondary).
1254 */
mhi_ep_create_device(struct mhi_ep_cntrl * mhi_cntrl,u32 ch_id)1255 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
1256 {
1257 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
1258 struct device *dev = mhi_cntrl->cntrl_dev;
1259 struct mhi_ep_device *mhi_dev;
1260 int ret;
1261
1262 /* Check if the channel name is same for both UL and DL */
1263 if (strcmp(mhi_chan->name, mhi_chan[1].name)) {
1264 dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n",
1265 mhi_chan->name, mhi_chan[1].name);
1266 return -EINVAL;
1267 }
1268
1269 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER);
1270 if (IS_ERR(mhi_dev))
1271 return PTR_ERR(mhi_dev);
1272
1273 /* Configure primary channel */
1274 mhi_dev->ul_chan = mhi_chan;
1275 get_device(&mhi_dev->dev);
1276 mhi_chan->mhi_dev = mhi_dev;
1277
1278 /* Configure secondary channel as well */
1279 mhi_chan++;
1280 mhi_dev->dl_chan = mhi_chan;
1281 get_device(&mhi_dev->dev);
1282 mhi_chan->mhi_dev = mhi_dev;
1283
1284 /* Channel name is same for both UL and DL */
1285 mhi_dev->name = mhi_chan->name;
1286 ret = dev_set_name(&mhi_dev->dev, "%s_%s",
1287 dev_name(&mhi_cntrl->mhi_dev->dev),
1288 mhi_dev->name);
1289 if (ret) {
1290 put_device(&mhi_dev->dev);
1291 return ret;
1292 }
1293
1294 ret = device_add(&mhi_dev->dev);
1295 if (ret)
1296 put_device(&mhi_dev->dev);
1297
1298 return ret;
1299 }
1300
mhi_ep_destroy_device(struct device * dev,void * data)1301 static int mhi_ep_destroy_device(struct device *dev, void *data)
1302 {
1303 struct mhi_ep_device *mhi_dev;
1304 struct mhi_ep_cntrl *mhi_cntrl;
1305 struct mhi_ep_chan *ul_chan, *dl_chan;
1306
1307 if (dev->bus != &mhi_ep_bus_type)
1308 return 0;
1309
1310 mhi_dev = to_mhi_ep_device(dev);
1311 mhi_cntrl = mhi_dev->mhi_cntrl;
1312
1313 /* Only destroy devices created for channels */
1314 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1315 return 0;
1316
1317 ul_chan = mhi_dev->ul_chan;
1318 dl_chan = mhi_dev->dl_chan;
1319
1320 if (ul_chan)
1321 put_device(&ul_chan->mhi_dev->dev);
1322
1323 if (dl_chan)
1324 put_device(&dl_chan->mhi_dev->dev);
1325
1326 dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n",
1327 mhi_dev->name);
1328
1329 /* Notify the client and remove the device from MHI bus */
1330 device_del(dev);
1331 put_device(dev);
1332
1333 return 0;
1334 }
1335
mhi_ep_chan_init(struct mhi_ep_cntrl * mhi_cntrl,const struct mhi_ep_cntrl_config * config)1336 static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl,
1337 const struct mhi_ep_cntrl_config *config)
1338 {
1339 const struct mhi_ep_channel_config *ch_cfg;
1340 struct device *dev = mhi_cntrl->cntrl_dev;
1341 u32 chan, i;
1342 int ret = -EINVAL;
1343
1344 mhi_cntrl->max_chan = config->max_channels;
1345
1346 /*
1347 * Allocate max_channels supported by the MHI endpoint and populate
1348 * only the defined channels
1349 */
1350 mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan),
1351 GFP_KERNEL);
1352 if (!mhi_cntrl->mhi_chan)
1353 return -ENOMEM;
1354
1355 for (i = 0; i < config->num_channels; i++) {
1356 struct mhi_ep_chan *mhi_chan;
1357
1358 ch_cfg = &config->ch_cfg[i];
1359
1360 chan = ch_cfg->num;
1361 if (chan >= mhi_cntrl->max_chan) {
1362 dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n",
1363 chan, mhi_cntrl->max_chan);
1364 goto error_chan_cfg;
1365 }
1366
1367 /* Bi-directional and direction less channels are not supported */
1368 if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) {
1369 dev_err(dev, "Invalid direction (%u) for channel (%u)\n",
1370 ch_cfg->dir, chan);
1371 goto error_chan_cfg;
1372 }
1373
1374 mhi_chan = &mhi_cntrl->mhi_chan[chan];
1375 mhi_chan->name = ch_cfg->name;
1376 mhi_chan->chan = chan;
1377 mhi_chan->dir = ch_cfg->dir;
1378 mutex_init(&mhi_chan->lock);
1379 }
1380
1381 return 0;
1382
1383 error_chan_cfg:
1384 kfree(mhi_cntrl->mhi_chan);
1385
1386 return ret;
1387 }
1388
1389 /*
1390 * Allocate channel and command rings here. Event rings will be allocated
1391 * in mhi_ep_power_up() as the config comes from the host.
1392 */
mhi_ep_register_controller(struct mhi_ep_cntrl * mhi_cntrl,const struct mhi_ep_cntrl_config * config)1393 int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
1394 const struct mhi_ep_cntrl_config *config)
1395 {
1396 struct mhi_ep_device *mhi_dev;
1397 int ret;
1398
1399 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
1400 return -EINVAL;
1401
1402 ret = mhi_ep_chan_init(mhi_cntrl, config);
1403 if (ret)
1404 return ret;
1405
1406 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
1407 if (!mhi_cntrl->mhi_cmd) {
1408 ret = -ENOMEM;
1409 goto err_free_ch;
1410 }
1411
1412 mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el",
1413 sizeof(struct mhi_ring_element), 0,
1414 SLAB_CACHE_DMA, NULL);
1415 if (!mhi_cntrl->ev_ring_el_cache) {
1416 ret = -ENOMEM;
1417 goto err_free_cmd;
1418 }
1419
1420 mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0,
1421 SLAB_CACHE_DMA, NULL);
1422 if (!mhi_cntrl->tre_buf_cache) {
1423 ret = -ENOMEM;
1424 goto err_destroy_ev_ring_el_cache;
1425 }
1426
1427 mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item",
1428 sizeof(struct mhi_ep_ring_item), 0,
1429 0, NULL);
1430 if (!mhi_cntrl->ring_item_cache) {
1431 ret = -ENOMEM;
1432 goto err_destroy_tre_buf_cache;
1433 }
1434 INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
1435 INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
1436 INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
1437 INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker);
1438
1439 mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
1440 if (!mhi_cntrl->wq) {
1441 ret = -ENOMEM;
1442 goto err_destroy_ring_item_cache;
1443 }
1444
1445 INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
1446 INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
1447 spin_lock_init(&mhi_cntrl->list_lock);
1448 mutex_init(&mhi_cntrl->state_lock);
1449 mutex_init(&mhi_cntrl->event_lock);
1450
1451 /* Set MHI version and AMSS EE before enumeration */
1452 mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version);
1453 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
1454
1455 /* Set controller index */
1456 ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL);
1457 if (ret < 0)
1458 goto err_destroy_wq;
1459
1460 mhi_cntrl->index = ret;
1461
1462 irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN);
1463 ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH,
1464 "doorbell_irq", mhi_cntrl);
1465 if (ret) {
1466 dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n");
1467 goto err_ida_free;
1468 }
1469
1470 /* Allocate the controller device */
1471 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER);
1472 if (IS_ERR(mhi_dev)) {
1473 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n");
1474 ret = PTR_ERR(mhi_dev);
1475 goto err_free_irq;
1476 }
1477
1478 ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index);
1479 if (ret)
1480 goto err_put_dev;
1481
1482 mhi_dev->name = dev_name(&mhi_dev->dev);
1483 mhi_cntrl->mhi_dev = mhi_dev;
1484
1485 ret = device_add(&mhi_dev->dev);
1486 if (ret)
1487 goto err_put_dev;
1488
1489 dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n");
1490
1491 return 0;
1492
1493 err_put_dev:
1494 put_device(&mhi_dev->dev);
1495 err_free_irq:
1496 free_irq(mhi_cntrl->irq, mhi_cntrl);
1497 err_ida_free:
1498 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
1499 err_destroy_wq:
1500 destroy_workqueue(mhi_cntrl->wq);
1501 err_destroy_ring_item_cache:
1502 kmem_cache_destroy(mhi_cntrl->ring_item_cache);
1503 err_destroy_ev_ring_el_cache:
1504 kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache);
1505 err_destroy_tre_buf_cache:
1506 kmem_cache_destroy(mhi_cntrl->tre_buf_cache);
1507 err_free_cmd:
1508 kfree(mhi_cntrl->mhi_cmd);
1509 err_free_ch:
1510 kfree(mhi_cntrl->mhi_chan);
1511
1512 return ret;
1513 }
1514 EXPORT_SYMBOL_GPL(mhi_ep_register_controller);
1515
1516 /*
1517 * It is expected that the controller drivers will power down the MHI EP stack
1518 * using "mhi_ep_power_down()" before calling this function to unregister themselves.
1519 */
mhi_ep_unregister_controller(struct mhi_ep_cntrl * mhi_cntrl)1520 void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
1521 {
1522 struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev;
1523
1524 destroy_workqueue(mhi_cntrl->wq);
1525
1526 free_irq(mhi_cntrl->irq, mhi_cntrl);
1527
1528 kmem_cache_destroy(mhi_cntrl->tre_buf_cache);
1529 kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache);
1530 kmem_cache_destroy(mhi_cntrl->ring_item_cache);
1531 kfree(mhi_cntrl->mhi_cmd);
1532 kfree(mhi_cntrl->mhi_chan);
1533
1534 device_del(&mhi_dev->dev);
1535 put_device(&mhi_dev->dev);
1536
1537 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
1538 }
1539 EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller);
1540
mhi_ep_driver_probe(struct device * dev)1541 static int mhi_ep_driver_probe(struct device *dev)
1542 {
1543 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1544 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
1545 struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan;
1546 struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan;
1547
1548 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1549 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1550
1551 return mhi_drv->probe(mhi_dev, mhi_dev->id);
1552 }
1553
mhi_ep_driver_remove(struct device * dev)1554 static int mhi_ep_driver_remove(struct device *dev)
1555 {
1556 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1557 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
1558 struct mhi_result result = {};
1559 struct mhi_ep_chan *mhi_chan;
1560 int dir;
1561
1562 /* Skip if it is a controller device */
1563 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1564 return 0;
1565
1566 /* Disconnect the channels associated with the driver */
1567 for (dir = 0; dir < 2; dir++) {
1568 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1569
1570 if (!mhi_chan)
1571 continue;
1572
1573 mutex_lock(&mhi_chan->lock);
1574 /* Send channel disconnect status to the client driver */
1575 if (mhi_chan->xfer_cb) {
1576 result.transaction_status = -ENOTCONN;
1577 result.bytes_xferd = 0;
1578 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1579 }
1580
1581 mhi_chan->state = MHI_CH_STATE_DISABLED;
1582 mhi_chan->xfer_cb = NULL;
1583 mutex_unlock(&mhi_chan->lock);
1584 }
1585
1586 /* Remove the client driver now */
1587 mhi_drv->remove(mhi_dev);
1588
1589 return 0;
1590 }
1591
__mhi_ep_driver_register(struct mhi_ep_driver * mhi_drv,struct module * owner)1592 int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner)
1593 {
1594 struct device_driver *driver = &mhi_drv->driver;
1595
1596 if (!mhi_drv->probe || !mhi_drv->remove)
1597 return -EINVAL;
1598
1599 /* Client drivers should have callbacks defined for both channels */
1600 if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb)
1601 return -EINVAL;
1602
1603 driver->bus = &mhi_ep_bus_type;
1604 driver->owner = owner;
1605 driver->probe = mhi_ep_driver_probe;
1606 driver->remove = mhi_ep_driver_remove;
1607
1608 return driver_register(driver);
1609 }
1610 EXPORT_SYMBOL_GPL(__mhi_ep_driver_register);
1611
mhi_ep_driver_unregister(struct mhi_ep_driver * mhi_drv)1612 void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv)
1613 {
1614 driver_unregister(&mhi_drv->driver);
1615 }
1616 EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister);
1617
mhi_ep_uevent(const struct device * dev,struct kobj_uevent_env * env)1618 static int mhi_ep_uevent(const struct device *dev, struct kobj_uevent_env *env)
1619 {
1620 const struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1621
1622 return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT,
1623 mhi_dev->name);
1624 }
1625
mhi_ep_match(struct device * dev,struct device_driver * drv)1626 static int mhi_ep_match(struct device *dev, struct device_driver *drv)
1627 {
1628 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1629 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
1630 const struct mhi_device_id *id;
1631
1632 /*
1633 * If the device is a controller type then there is no client driver
1634 * associated with it
1635 */
1636 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1637 return 0;
1638
1639 for (id = mhi_drv->id_table; id->chan[0]; id++)
1640 if (!strcmp(mhi_dev->name, id->chan)) {
1641 mhi_dev->id = id;
1642 return 1;
1643 }
1644
1645 return 0;
1646 };
1647
1648 struct bus_type mhi_ep_bus_type = {
1649 .name = "mhi_ep",
1650 .dev_name = "mhi_ep",
1651 .match = mhi_ep_match,
1652 .uevent = mhi_ep_uevent,
1653 };
1654
mhi_ep_init(void)1655 static int __init mhi_ep_init(void)
1656 {
1657 return bus_register(&mhi_ep_bus_type);
1658 }
1659
mhi_ep_exit(void)1660 static void __exit mhi_ep_exit(void)
1661 {
1662 bus_unregister(&mhi_ep_bus_type);
1663 }
1664
1665 postcore_initcall(mhi_ep_init);
1666 module_exit(mhi_ep_exit);
1667
1668 MODULE_LICENSE("GPL v2");
1669 MODULE_DESCRIPTION("MHI Bus Endpoint stack");
1670 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
1671