xref: /openbmc/linux/drivers/bus/mhi/host/init.c (revision 228662b0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4  *
5  */
6 
7 #include <linux/bitfield.h>
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/dma-direction.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/idr.h>
13 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/mhi.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/wait.h>
21 #include "internal.h"
22 
23 static DEFINE_IDA(mhi_controller_ida);
24 
25 const char * const mhi_ee_str[MHI_EE_MAX] = {
26 	[MHI_EE_PBL] = "PRIMARY BOOTLOADER",
27 	[MHI_EE_SBL] = "SECONDARY BOOTLOADER",
28 	[MHI_EE_AMSS] = "MISSION MODE",
29 	[MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE",
30 	[MHI_EE_WFW] = "WLAN FIRMWARE",
31 	[MHI_EE_PTHRU] = "PASS THROUGH",
32 	[MHI_EE_EDL] = "EMERGENCY DOWNLOAD",
33 	[MHI_EE_FP] = "FLASH PROGRAMMER",
34 	[MHI_EE_DISABLE_TRANSITION] = "DISABLE",
35 	[MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
36 };
37 
38 const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
39 	[DEV_ST_TRANSITION_PBL] = "PBL",
40 	[DEV_ST_TRANSITION_READY] = "READY",
41 	[DEV_ST_TRANSITION_SBL] = "SBL",
42 	[DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
43 	[DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER",
44 	[DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR",
45 	[DEV_ST_TRANSITION_DISABLE] = "DISABLE",
46 };
47 
48 const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
49 	[MHI_CH_STATE_TYPE_RESET] = "RESET",
50 	[MHI_CH_STATE_TYPE_STOP] = "STOP",
51 	[MHI_CH_STATE_TYPE_START] = "START",
52 };
53 
54 static const char * const mhi_pm_state_str[] = {
55 	[MHI_PM_STATE_DISABLE] = "DISABLE",
56 	[MHI_PM_STATE_POR] = "POWER ON RESET",
57 	[MHI_PM_STATE_M0] = "M0",
58 	[MHI_PM_STATE_M2] = "M2",
59 	[MHI_PM_STATE_M3_ENTER] = "M?->M3",
60 	[MHI_PM_STATE_M3] = "M3",
61 	[MHI_PM_STATE_M3_EXIT] = "M3->M0",
62 	[MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
63 	[MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
64 	[MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
65 	[MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
66 	[MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
67 };
68 
69 const char *to_mhi_pm_state_str(u32 state)
70 {
71 	int index;
72 
73 	if (state)
74 		index = __fls(state);
75 
76 	if (!state || index >= ARRAY_SIZE(mhi_pm_state_str))
77 		return "Invalid State";
78 
79 	return mhi_pm_state_str[index];
80 }
81 
82 static ssize_t serial_number_show(struct device *dev,
83 				  struct device_attribute *attr,
84 				  char *buf)
85 {
86 	struct mhi_device *mhi_dev = to_mhi_device(dev);
87 	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
88 
89 	return sysfs_emit(buf, "Serial Number: %u\n",
90 			mhi_cntrl->serial_number);
91 }
92 static DEVICE_ATTR_RO(serial_number);
93 
94 static ssize_t oem_pk_hash_show(struct device *dev,
95 				struct device_attribute *attr,
96 				char *buf)
97 {
98 	struct mhi_device *mhi_dev = to_mhi_device(dev);
99 	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
100 	int i, cnt = 0;
101 
102 	for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
103 		cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n",
104 				i, mhi_cntrl->oem_pk_hash[i]);
105 
106 	return cnt;
107 }
108 static DEVICE_ATTR_RO(oem_pk_hash);
109 
110 static struct attribute *mhi_dev_attrs[] = {
111 	&dev_attr_serial_number.attr,
112 	&dev_attr_oem_pk_hash.attr,
113 	NULL,
114 };
115 ATTRIBUTE_GROUPS(mhi_dev);
116 
117 /* MHI protocol requires the transfer ring to be aligned with ring length */
118 static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
119 				  struct mhi_ring *ring,
120 				  u64 len)
121 {
122 	ring->alloc_size = len + (len - 1);
123 	ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
124 					       &ring->dma_handle, GFP_KERNEL);
125 	if (!ring->pre_aligned)
126 		return -ENOMEM;
127 
128 	ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
129 	ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
130 
131 	return 0;
132 }
133 
134 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
135 {
136 	int i;
137 	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
138 
139 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
140 		if (mhi_event->offload_ev)
141 			continue;
142 
143 		free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
144 	}
145 
146 	free_irq(mhi_cntrl->irq[0], mhi_cntrl);
147 }
148 
149 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
150 {
151 	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
152 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
153 	unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
154 	int i, ret;
155 
156 	/* if controller driver has set irq_flags, use it */
157 	if (mhi_cntrl->irq_flags)
158 		irq_flags = mhi_cntrl->irq_flags;
159 
160 	/* Setup BHI_INTVEC IRQ */
161 	ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
162 				   mhi_intvec_threaded_handler,
163 				   irq_flags,
164 				   "bhi", mhi_cntrl);
165 	if (ret)
166 		return ret;
167 
168 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
169 		if (mhi_event->offload_ev)
170 			continue;
171 
172 		if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
173 			dev_err(dev, "irq %d not available for event ring\n",
174 				mhi_event->irq);
175 			ret = -EINVAL;
176 			goto error_request;
177 		}
178 
179 		ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
180 				  mhi_irq_handler,
181 				  irq_flags,
182 				  "mhi", mhi_event);
183 		if (ret) {
184 			dev_err(dev, "Error requesting irq:%d for ev:%d\n",
185 				mhi_cntrl->irq[mhi_event->irq], i);
186 			goto error_request;
187 		}
188 	}
189 
190 	return 0;
191 
192 error_request:
193 	for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
194 		if (mhi_event->offload_ev)
195 			continue;
196 
197 		free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
198 	}
199 	free_irq(mhi_cntrl->irq[0], mhi_cntrl);
200 
201 	return ret;
202 }
203 
204 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
205 {
206 	int i;
207 	struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
208 	struct mhi_cmd *mhi_cmd;
209 	struct mhi_event *mhi_event;
210 	struct mhi_ring *ring;
211 
212 	mhi_cmd = mhi_cntrl->mhi_cmd;
213 	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
214 		ring = &mhi_cmd->ring;
215 		dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
216 				  ring->pre_aligned, ring->dma_handle);
217 		ring->base = NULL;
218 		ring->iommu_base = 0;
219 	}
220 
221 	dma_free_coherent(mhi_cntrl->cntrl_dev,
222 			  sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
223 			  mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
224 
225 	mhi_event = mhi_cntrl->mhi_event;
226 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
227 		if (mhi_event->offload_ev)
228 			continue;
229 
230 		ring = &mhi_event->ring;
231 		dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
232 				  ring->pre_aligned, ring->dma_handle);
233 		ring->base = NULL;
234 		ring->iommu_base = 0;
235 	}
236 
237 	dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
238 			  mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
239 			  mhi_ctxt->er_ctxt_addr);
240 
241 	dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
242 			  mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
243 			  mhi_ctxt->chan_ctxt_addr);
244 
245 	kfree(mhi_ctxt);
246 	mhi_cntrl->mhi_ctxt = NULL;
247 }
248 
249 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
250 {
251 	struct mhi_ctxt *mhi_ctxt;
252 	struct mhi_chan_ctxt *chan_ctxt;
253 	struct mhi_event_ctxt *er_ctxt;
254 	struct mhi_cmd_ctxt *cmd_ctxt;
255 	struct mhi_chan *mhi_chan;
256 	struct mhi_event *mhi_event;
257 	struct mhi_cmd *mhi_cmd;
258 	u32 tmp;
259 	int ret = -ENOMEM, i;
260 
261 	atomic_set(&mhi_cntrl->dev_wake, 0);
262 	atomic_set(&mhi_cntrl->pending_pkts, 0);
263 
264 	mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
265 	if (!mhi_ctxt)
266 		return -ENOMEM;
267 
268 	/* Setup channel ctxt */
269 	mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
270 						 sizeof(*mhi_ctxt->chan_ctxt) *
271 						 mhi_cntrl->max_chan,
272 						 &mhi_ctxt->chan_ctxt_addr,
273 						 GFP_KERNEL);
274 	if (!mhi_ctxt->chan_ctxt)
275 		goto error_alloc_chan_ctxt;
276 
277 	mhi_chan = mhi_cntrl->mhi_chan;
278 	chan_ctxt = mhi_ctxt->chan_ctxt;
279 	for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
280 		/* Skip if it is an offload channel */
281 		if (mhi_chan->offload_ch)
282 			continue;
283 
284 		tmp = le32_to_cpu(chan_ctxt->chcfg);
285 		tmp &= ~CHAN_CTX_CHSTATE_MASK;
286 		tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
287 		tmp &= ~CHAN_CTX_BRSTMODE_MASK;
288 		tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode);
289 		tmp &= ~CHAN_CTX_POLLCFG_MASK;
290 		tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg);
291 		chan_ctxt->chcfg = cpu_to_le32(tmp);
292 
293 		chan_ctxt->chtype = cpu_to_le32(mhi_chan->type);
294 		chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index);
295 
296 		mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
297 		mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
298 	}
299 
300 	/* Setup event context */
301 	mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
302 					       sizeof(*mhi_ctxt->er_ctxt) *
303 					       mhi_cntrl->total_ev_rings,
304 					       &mhi_ctxt->er_ctxt_addr,
305 					       GFP_KERNEL);
306 	if (!mhi_ctxt->er_ctxt)
307 		goto error_alloc_er_ctxt;
308 
309 	er_ctxt = mhi_ctxt->er_ctxt;
310 	mhi_event = mhi_cntrl->mhi_event;
311 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
312 		     mhi_event++) {
313 		struct mhi_ring *ring = &mhi_event->ring;
314 
315 		/* Skip if it is an offload event */
316 		if (mhi_event->offload_ev)
317 			continue;
318 
319 		tmp = le32_to_cpu(er_ctxt->intmod);
320 		tmp &= ~EV_CTX_INTMODC_MASK;
321 		tmp &= ~EV_CTX_INTMODT_MASK;
322 		tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod);
323 		er_ctxt->intmod = cpu_to_le32(tmp);
324 
325 		er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID);
326 		er_ctxt->msivec = cpu_to_le32(mhi_event->irq);
327 		mhi_event->db_cfg.db_mode = true;
328 
329 		ring->el_size = sizeof(struct mhi_ring_element);
330 		ring->len = ring->el_size * ring->elements;
331 		ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
332 		if (ret)
333 			goto error_alloc_er;
334 
335 		/*
336 		 * If the read pointer equals to the write pointer, then the
337 		 * ring is empty
338 		 */
339 		ring->rp = ring->wp = ring->base;
340 		er_ctxt->rbase = cpu_to_le64(ring->iommu_base);
341 		er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
342 		er_ctxt->rlen = cpu_to_le64(ring->len);
343 		ring->ctxt_wp = &er_ctxt->wp;
344 	}
345 
346 	/* Setup cmd context */
347 	ret = -ENOMEM;
348 	mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
349 						sizeof(*mhi_ctxt->cmd_ctxt) *
350 						NR_OF_CMD_RINGS,
351 						&mhi_ctxt->cmd_ctxt_addr,
352 						GFP_KERNEL);
353 	if (!mhi_ctxt->cmd_ctxt)
354 		goto error_alloc_er;
355 
356 	mhi_cmd = mhi_cntrl->mhi_cmd;
357 	cmd_ctxt = mhi_ctxt->cmd_ctxt;
358 	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
359 		struct mhi_ring *ring = &mhi_cmd->ring;
360 
361 		ring->el_size = sizeof(struct mhi_ring_element);
362 		ring->elements = CMD_EL_PER_RING;
363 		ring->len = ring->el_size * ring->elements;
364 		ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
365 		if (ret)
366 			goto error_alloc_cmd;
367 
368 		ring->rp = ring->wp = ring->base;
369 		cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base);
370 		cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
371 		cmd_ctxt->rlen = cpu_to_le64(ring->len);
372 		ring->ctxt_wp = &cmd_ctxt->wp;
373 	}
374 
375 	mhi_cntrl->mhi_ctxt = mhi_ctxt;
376 
377 	return 0;
378 
379 error_alloc_cmd:
380 	for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
381 		struct mhi_ring *ring = &mhi_cmd->ring;
382 
383 		dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
384 				  ring->pre_aligned, ring->dma_handle);
385 	}
386 	dma_free_coherent(mhi_cntrl->cntrl_dev,
387 			  sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
388 			  mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
389 	i = mhi_cntrl->total_ev_rings;
390 	mhi_event = mhi_cntrl->mhi_event + i;
391 
392 error_alloc_er:
393 	for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
394 		struct mhi_ring *ring = &mhi_event->ring;
395 
396 		if (mhi_event->offload_ev)
397 			continue;
398 
399 		dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
400 				  ring->pre_aligned, ring->dma_handle);
401 	}
402 	dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
403 			  mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
404 			  mhi_ctxt->er_ctxt_addr);
405 
406 error_alloc_er_ctxt:
407 	dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
408 			  mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
409 			  mhi_ctxt->chan_ctxt_addr);
410 
411 error_alloc_chan_ctxt:
412 	kfree(mhi_ctxt);
413 
414 	return ret;
415 }
416 
417 int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
418 {
419 	u32 val;
420 	int i, ret;
421 	struct mhi_chan *mhi_chan;
422 	struct mhi_event *mhi_event;
423 	void __iomem *base = mhi_cntrl->regs;
424 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
425 	struct {
426 		u32 offset;
427 		u32 mask;
428 		u32 val;
429 	} reg_info[] = {
430 		{
431 			CCABAP_HIGHER, U32_MAX,
432 			upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
433 		},
434 		{
435 			CCABAP_LOWER, U32_MAX,
436 			lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
437 		},
438 		{
439 			ECABAP_HIGHER, U32_MAX,
440 			upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
441 		},
442 		{
443 			ECABAP_LOWER, U32_MAX,
444 			lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
445 		},
446 		{
447 			CRCBAP_HIGHER, U32_MAX,
448 			upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
449 		},
450 		{
451 			CRCBAP_LOWER, U32_MAX,
452 			lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
453 		},
454 		{
455 			MHICFG, MHICFG_NER_MASK,
456 			mhi_cntrl->total_ev_rings,
457 		},
458 		{
459 			MHICFG, MHICFG_NHWER_MASK,
460 			mhi_cntrl->hw_ev_rings,
461 		},
462 		{
463 			MHICTRLBASE_HIGHER, U32_MAX,
464 			upper_32_bits(mhi_cntrl->iova_start),
465 		},
466 		{
467 			MHICTRLBASE_LOWER, U32_MAX,
468 			lower_32_bits(mhi_cntrl->iova_start),
469 		},
470 		{
471 			MHIDATABASE_HIGHER, U32_MAX,
472 			upper_32_bits(mhi_cntrl->iova_start),
473 		},
474 		{
475 			MHIDATABASE_LOWER, U32_MAX,
476 			lower_32_bits(mhi_cntrl->iova_start),
477 		},
478 		{
479 			MHICTRLLIMIT_HIGHER, U32_MAX,
480 			upper_32_bits(mhi_cntrl->iova_stop),
481 		},
482 		{
483 			MHICTRLLIMIT_LOWER, U32_MAX,
484 			lower_32_bits(mhi_cntrl->iova_stop),
485 		},
486 		{
487 			MHIDATALIMIT_HIGHER, U32_MAX,
488 			upper_32_bits(mhi_cntrl->iova_stop),
489 		},
490 		{
491 			MHIDATALIMIT_LOWER, U32_MAX,
492 			lower_32_bits(mhi_cntrl->iova_stop),
493 		},
494 		{ 0, 0, 0 }
495 	};
496 
497 	dev_dbg(dev, "Initializing MHI registers\n");
498 
499 	/* Read channel db offset */
500 	ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val);
501 	if (ret) {
502 		dev_err(dev, "Unable to read CHDBOFF register\n");
503 		return -EIO;
504 	}
505 
506 	/* Setup wake db */
507 	mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
508 	mhi_cntrl->wake_set = false;
509 
510 	/* Setup channel db address for each channel in tre_ring */
511 	mhi_chan = mhi_cntrl->mhi_chan;
512 	for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
513 		mhi_chan->tre_ring.db_addr = base + val;
514 
515 	/* Read event ring db offset */
516 	ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val);
517 	if (ret) {
518 		dev_err(dev, "Unable to read ERDBOFF register\n");
519 		return -EIO;
520 	}
521 
522 	/* Setup event db address for each ev_ring */
523 	mhi_event = mhi_cntrl->mhi_event;
524 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
525 		if (mhi_event->offload_ev)
526 			continue;
527 
528 		mhi_event->ring.db_addr = base + val;
529 	}
530 
531 	/* Setup DB register for primary CMD rings */
532 	mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
533 
534 	/* Write to MMIO registers */
535 	for (i = 0; reg_info[i].offset; i++)
536 		mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
537 				    reg_info[i].mask, reg_info[i].val);
538 
539 	return 0;
540 }
541 
542 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
543 			  struct mhi_chan *mhi_chan)
544 {
545 	struct mhi_ring *buf_ring;
546 	struct mhi_ring *tre_ring;
547 	struct mhi_chan_ctxt *chan_ctxt;
548 	u32 tmp;
549 
550 	buf_ring = &mhi_chan->buf_ring;
551 	tre_ring = &mhi_chan->tre_ring;
552 	chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
553 
554 	if (!chan_ctxt->rbase) /* Already uninitialized */
555 		return;
556 
557 	dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
558 			  tre_ring->pre_aligned, tre_ring->dma_handle);
559 	vfree(buf_ring->base);
560 
561 	buf_ring->base = tre_ring->base = NULL;
562 	tre_ring->ctxt_wp = NULL;
563 	chan_ctxt->rbase = 0;
564 	chan_ctxt->rlen = 0;
565 	chan_ctxt->rp = 0;
566 	chan_ctxt->wp = 0;
567 
568 	tmp = le32_to_cpu(chan_ctxt->chcfg);
569 	tmp &= ~CHAN_CTX_CHSTATE_MASK;
570 	tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
571 	chan_ctxt->chcfg = cpu_to_le32(tmp);
572 
573 	/* Update to all cores */
574 	smp_wmb();
575 }
576 
577 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
578 		       struct mhi_chan *mhi_chan)
579 {
580 	struct mhi_ring *buf_ring;
581 	struct mhi_ring *tre_ring;
582 	struct mhi_chan_ctxt *chan_ctxt;
583 	u32 tmp;
584 	int ret;
585 
586 	buf_ring = &mhi_chan->buf_ring;
587 	tre_ring = &mhi_chan->tre_ring;
588 	tre_ring->el_size = sizeof(struct mhi_ring_element);
589 	tre_ring->len = tre_ring->el_size * tre_ring->elements;
590 	chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
591 	ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
592 	if (ret)
593 		return -ENOMEM;
594 
595 	buf_ring->el_size = sizeof(struct mhi_buf_info);
596 	buf_ring->len = buf_ring->el_size * buf_ring->elements;
597 	buf_ring->base = vzalloc(buf_ring->len);
598 
599 	if (!buf_ring->base) {
600 		dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
601 				  tre_ring->pre_aligned, tre_ring->dma_handle);
602 		return -ENOMEM;
603 	}
604 
605 	tmp = le32_to_cpu(chan_ctxt->chcfg);
606 	tmp &= ~CHAN_CTX_CHSTATE_MASK;
607 	tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_ENABLED);
608 	chan_ctxt->chcfg = cpu_to_le32(tmp);
609 
610 	chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base);
611 	chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
612 	chan_ctxt->rlen = cpu_to_le64(tre_ring->len);
613 	tre_ring->ctxt_wp = &chan_ctxt->wp;
614 
615 	tre_ring->rp = tre_ring->wp = tre_ring->base;
616 	buf_ring->rp = buf_ring->wp = buf_ring->base;
617 	mhi_chan->db_cfg.db_mode = 1;
618 
619 	/* Update to all cores */
620 	smp_wmb();
621 
622 	return 0;
623 }
624 
625 static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
626 			const struct mhi_controller_config *config)
627 {
628 	struct mhi_event *mhi_event;
629 	const struct mhi_event_config *event_cfg;
630 	struct device *dev = mhi_cntrl->cntrl_dev;
631 	int i, num;
632 
633 	num = config->num_events;
634 	mhi_cntrl->total_ev_rings = num;
635 	mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
636 				       GFP_KERNEL);
637 	if (!mhi_cntrl->mhi_event)
638 		return -ENOMEM;
639 
640 	/* Populate event ring */
641 	mhi_event = mhi_cntrl->mhi_event;
642 	for (i = 0; i < num; i++) {
643 		event_cfg = &config->event_cfg[i];
644 
645 		mhi_event->er_index = i;
646 		mhi_event->ring.elements = event_cfg->num_elements;
647 		mhi_event->intmod = event_cfg->irq_moderation_ms;
648 		mhi_event->irq = event_cfg->irq;
649 
650 		if (event_cfg->channel != U32_MAX) {
651 			/* This event ring has a dedicated channel */
652 			mhi_event->chan = event_cfg->channel;
653 			if (mhi_event->chan >= mhi_cntrl->max_chan) {
654 				dev_err(dev,
655 					"Event Ring channel not available\n");
656 				goto error_ev_cfg;
657 			}
658 
659 			mhi_event->mhi_chan =
660 				&mhi_cntrl->mhi_chan[mhi_event->chan];
661 		}
662 
663 		/* Priority is fixed to 1 for now */
664 		mhi_event->priority = 1;
665 
666 		mhi_event->db_cfg.brstmode = event_cfg->mode;
667 		if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
668 			goto error_ev_cfg;
669 
670 		if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
671 			mhi_event->db_cfg.process_db = mhi_db_brstmode;
672 		else
673 			mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
674 
675 		mhi_event->data_type = event_cfg->data_type;
676 
677 		switch (mhi_event->data_type) {
678 		case MHI_ER_DATA:
679 			mhi_event->process_event = mhi_process_data_event_ring;
680 			break;
681 		case MHI_ER_CTRL:
682 			mhi_event->process_event = mhi_process_ctrl_ev_ring;
683 			break;
684 		default:
685 			dev_err(dev, "Event Ring type not supported\n");
686 			goto error_ev_cfg;
687 		}
688 
689 		mhi_event->hw_ring = event_cfg->hardware_event;
690 		if (mhi_event->hw_ring)
691 			mhi_cntrl->hw_ev_rings++;
692 		else
693 			mhi_cntrl->sw_ev_rings++;
694 
695 		mhi_event->cl_manage = event_cfg->client_managed;
696 		mhi_event->offload_ev = event_cfg->offload_channel;
697 		mhi_event++;
698 	}
699 
700 	return 0;
701 
702 error_ev_cfg:
703 
704 	kfree(mhi_cntrl->mhi_event);
705 	return -EINVAL;
706 }
707 
708 static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
709 			const struct mhi_controller_config *config)
710 {
711 	const struct mhi_channel_config *ch_cfg;
712 	struct device *dev = mhi_cntrl->cntrl_dev;
713 	int i;
714 	u32 chan;
715 
716 	mhi_cntrl->max_chan = config->max_channels;
717 
718 	/*
719 	 * The allocation of MHI channels can exceed 32KB in some scenarios,
720 	 * so to avoid any memory possible allocation failures, vzalloc is
721 	 * used here
722 	 */
723 	mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
724 				      sizeof(*mhi_cntrl->mhi_chan));
725 	if (!mhi_cntrl->mhi_chan)
726 		return -ENOMEM;
727 
728 	INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
729 
730 	/* Populate channel configurations */
731 	for (i = 0; i < config->num_channels; i++) {
732 		struct mhi_chan *mhi_chan;
733 
734 		ch_cfg = &config->ch_cfg[i];
735 
736 		chan = ch_cfg->num;
737 		if (chan >= mhi_cntrl->max_chan) {
738 			dev_err(dev, "Channel %d not available\n", chan);
739 			goto error_chan_cfg;
740 		}
741 
742 		mhi_chan = &mhi_cntrl->mhi_chan[chan];
743 		mhi_chan->name = ch_cfg->name;
744 		mhi_chan->chan = chan;
745 
746 		mhi_chan->tre_ring.elements = ch_cfg->num_elements;
747 		if (!mhi_chan->tre_ring.elements)
748 			goto error_chan_cfg;
749 
750 		/*
751 		 * For some channels, local ring length should be bigger than
752 		 * the transfer ring length due to internal logical channels
753 		 * in device. So host can queue much more buffers than transfer
754 		 * ring length. Example, RSC channels should have a larger local
755 		 * channel length than transfer ring length.
756 		 */
757 		mhi_chan->buf_ring.elements = ch_cfg->local_elements;
758 		if (!mhi_chan->buf_ring.elements)
759 			mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
760 		mhi_chan->er_index = ch_cfg->event_ring;
761 		mhi_chan->dir = ch_cfg->dir;
762 
763 		/*
764 		 * For most channels, chtype is identical to channel directions.
765 		 * So, if it is not defined then assign channel direction to
766 		 * chtype
767 		 */
768 		mhi_chan->type = ch_cfg->type;
769 		if (!mhi_chan->type)
770 			mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
771 
772 		mhi_chan->ee_mask = ch_cfg->ee_mask;
773 		mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
774 		mhi_chan->lpm_notify = ch_cfg->lpm_notify;
775 		mhi_chan->offload_ch = ch_cfg->offload_channel;
776 		mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
777 		mhi_chan->pre_alloc = ch_cfg->auto_queue;
778 		mhi_chan->wake_capable = ch_cfg->wake_capable;
779 
780 		/*
781 		 * If MHI host allocates buffers, then the channel direction
782 		 * should be DMA_FROM_DEVICE
783 		 */
784 		if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
785 			dev_err(dev, "Invalid channel configuration\n");
786 			goto error_chan_cfg;
787 		}
788 
789 		/*
790 		 * Bi-directional and direction less channel must be an
791 		 * offload channel
792 		 */
793 		if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
794 		     mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
795 			dev_err(dev, "Invalid channel configuration\n");
796 			goto error_chan_cfg;
797 		}
798 
799 		if (!mhi_chan->offload_ch) {
800 			mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
801 			if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
802 				dev_err(dev, "Invalid Door bell mode\n");
803 				goto error_chan_cfg;
804 			}
805 		}
806 
807 		if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
808 			mhi_chan->db_cfg.process_db = mhi_db_brstmode;
809 		else
810 			mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
811 
812 		mhi_chan->configured = true;
813 
814 		if (mhi_chan->lpm_notify)
815 			list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
816 	}
817 
818 	return 0;
819 
820 error_chan_cfg:
821 	vfree(mhi_cntrl->mhi_chan);
822 
823 	return -EINVAL;
824 }
825 
826 static int parse_config(struct mhi_controller *mhi_cntrl,
827 			const struct mhi_controller_config *config)
828 {
829 	int ret;
830 
831 	/* Parse MHI channel configuration */
832 	ret = parse_ch_cfg(mhi_cntrl, config);
833 	if (ret)
834 		return ret;
835 
836 	/* Parse MHI event configuration */
837 	ret = parse_ev_cfg(mhi_cntrl, config);
838 	if (ret)
839 		goto error_ev_cfg;
840 
841 	mhi_cntrl->timeout_ms = config->timeout_ms;
842 	if (!mhi_cntrl->timeout_ms)
843 		mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
844 
845 	mhi_cntrl->bounce_buf = config->use_bounce_buf;
846 	mhi_cntrl->buffer_len = config->buf_len;
847 	if (!mhi_cntrl->buffer_len)
848 		mhi_cntrl->buffer_len = MHI_MAX_MTU;
849 
850 	/* By default, host is allowed to ring DB in both M0 and M2 states */
851 	mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
852 	if (config->m2_no_db)
853 		mhi_cntrl->db_access &= ~MHI_PM_M2;
854 
855 	return 0;
856 
857 error_ev_cfg:
858 	vfree(mhi_cntrl->mhi_chan);
859 
860 	return ret;
861 }
862 
863 int mhi_register_controller(struct mhi_controller *mhi_cntrl,
864 			    const struct mhi_controller_config *config)
865 {
866 	struct mhi_event *mhi_event;
867 	struct mhi_chan *mhi_chan;
868 	struct mhi_cmd *mhi_cmd;
869 	struct mhi_device *mhi_dev;
870 	u32 soc_info;
871 	int ret, i;
872 
873 	if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
874 	    !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
875 	    !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
876 	    !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs ||
877 	    !mhi_cntrl->irq || !mhi_cntrl->reg_len)
878 		return -EINVAL;
879 
880 	ret = parse_config(mhi_cntrl, config);
881 	if (ret)
882 		return -EINVAL;
883 
884 	mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
885 				     sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
886 	if (!mhi_cntrl->mhi_cmd) {
887 		ret = -ENOMEM;
888 		goto err_free_event;
889 	}
890 
891 	INIT_LIST_HEAD(&mhi_cntrl->transition_list);
892 	mutex_init(&mhi_cntrl->pm_mutex);
893 	rwlock_init(&mhi_cntrl->pm_lock);
894 	spin_lock_init(&mhi_cntrl->transition_lock);
895 	spin_lock_init(&mhi_cntrl->wlock);
896 	INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
897 	init_waitqueue_head(&mhi_cntrl->state_event);
898 
899 	mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
900 	if (!mhi_cntrl->hiprio_wq) {
901 		dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
902 		ret = -ENOMEM;
903 		goto err_free_cmd;
904 	}
905 
906 	mhi_cmd = mhi_cntrl->mhi_cmd;
907 	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
908 		spin_lock_init(&mhi_cmd->lock);
909 
910 	mhi_event = mhi_cntrl->mhi_event;
911 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
912 		/* Skip for offload events */
913 		if (mhi_event->offload_ev)
914 			continue;
915 
916 		mhi_event->mhi_cntrl = mhi_cntrl;
917 		spin_lock_init(&mhi_event->lock);
918 		if (mhi_event->data_type == MHI_ER_CTRL)
919 			tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
920 				     (ulong)mhi_event);
921 		else
922 			tasklet_init(&mhi_event->task, mhi_ev_task,
923 				     (ulong)mhi_event);
924 	}
925 
926 	mhi_chan = mhi_cntrl->mhi_chan;
927 	for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
928 		mutex_init(&mhi_chan->mutex);
929 		init_completion(&mhi_chan->completion);
930 		rwlock_init(&mhi_chan->lock);
931 
932 		/* used in setting bei field of TRE */
933 		mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
934 		mhi_chan->intmod = mhi_event->intmod;
935 	}
936 
937 	if (mhi_cntrl->bounce_buf) {
938 		mhi_cntrl->map_single = mhi_map_single_use_bb;
939 		mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
940 	} else {
941 		mhi_cntrl->map_single = mhi_map_single_no_bb;
942 		mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
943 	}
944 
945 	/* Read the MHI device info */
946 	ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
947 			   SOC_HW_VERSION_OFFS, &soc_info);
948 	if (ret)
949 		goto err_destroy_wq;
950 
951 	mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info);
952 	mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info);
953 	mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info);
954 	mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info);
955 
956 	mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
957 	if (mhi_cntrl->index < 0) {
958 		ret = mhi_cntrl->index;
959 		goto err_destroy_wq;
960 	}
961 
962 	/* Register controller with MHI bus */
963 	mhi_dev = mhi_alloc_device(mhi_cntrl);
964 	if (IS_ERR(mhi_dev)) {
965 		dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
966 		ret = PTR_ERR(mhi_dev);
967 		goto err_ida_free;
968 	}
969 
970 	mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
971 	mhi_dev->mhi_cntrl = mhi_cntrl;
972 	dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
973 	mhi_dev->name = dev_name(&mhi_dev->dev);
974 
975 	/* Init wakeup source */
976 	device_init_wakeup(&mhi_dev->dev, true);
977 
978 	ret = device_add(&mhi_dev->dev);
979 	if (ret)
980 		goto err_release_dev;
981 
982 	mhi_cntrl->mhi_dev = mhi_dev;
983 
984 	mhi_create_debugfs(mhi_cntrl);
985 
986 	return 0;
987 
988 err_release_dev:
989 	put_device(&mhi_dev->dev);
990 err_ida_free:
991 	ida_free(&mhi_controller_ida, mhi_cntrl->index);
992 err_destroy_wq:
993 	destroy_workqueue(mhi_cntrl->hiprio_wq);
994 err_free_cmd:
995 	kfree(mhi_cntrl->mhi_cmd);
996 err_free_event:
997 	kfree(mhi_cntrl->mhi_event);
998 	vfree(mhi_cntrl->mhi_chan);
999 
1000 	return ret;
1001 }
1002 EXPORT_SYMBOL_GPL(mhi_register_controller);
1003 
1004 void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
1005 {
1006 	struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
1007 	struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
1008 	unsigned int i;
1009 
1010 	mhi_destroy_debugfs(mhi_cntrl);
1011 
1012 	destroy_workqueue(mhi_cntrl->hiprio_wq);
1013 	kfree(mhi_cntrl->mhi_cmd);
1014 	kfree(mhi_cntrl->mhi_event);
1015 
1016 	/* Drop the references to MHI devices created for channels */
1017 	for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
1018 		if (!mhi_chan->mhi_dev)
1019 			continue;
1020 
1021 		put_device(&mhi_chan->mhi_dev->dev);
1022 	}
1023 	vfree(mhi_cntrl->mhi_chan);
1024 
1025 	device_del(&mhi_dev->dev);
1026 	put_device(&mhi_dev->dev);
1027 
1028 	ida_free(&mhi_controller_ida, mhi_cntrl->index);
1029 }
1030 EXPORT_SYMBOL_GPL(mhi_unregister_controller);
1031 
1032 struct mhi_controller *mhi_alloc_controller(void)
1033 {
1034 	struct mhi_controller *mhi_cntrl;
1035 
1036 	mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
1037 
1038 	return mhi_cntrl;
1039 }
1040 EXPORT_SYMBOL_GPL(mhi_alloc_controller);
1041 
1042 void mhi_free_controller(struct mhi_controller *mhi_cntrl)
1043 {
1044 	kfree(mhi_cntrl);
1045 }
1046 EXPORT_SYMBOL_GPL(mhi_free_controller);
1047 
1048 int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
1049 {
1050 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1051 	u32 bhi_off, bhie_off;
1052 	int ret;
1053 
1054 	mutex_lock(&mhi_cntrl->pm_mutex);
1055 
1056 	ret = mhi_init_dev_ctxt(mhi_cntrl);
1057 	if (ret)
1058 		goto error_dev_ctxt;
1059 
1060 	ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off);
1061 	if (ret) {
1062 		dev_err(dev, "Error getting BHI offset\n");
1063 		goto error_reg_offset;
1064 	}
1065 
1066 	if (bhi_off >= mhi_cntrl->reg_len) {
1067 		dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n",
1068 			bhi_off, mhi_cntrl->reg_len);
1069 		ret = -EINVAL;
1070 		goto error_reg_offset;
1071 	}
1072 	mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
1073 
1074 	if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) {
1075 		ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
1076 				   &bhie_off);
1077 		if (ret) {
1078 			dev_err(dev, "Error getting BHIE offset\n");
1079 			goto error_reg_offset;
1080 		}
1081 
1082 		if (bhie_off >= mhi_cntrl->reg_len) {
1083 			dev_err(dev,
1084 				"BHIe offset: 0x%x is out of range: 0x%zx\n",
1085 				bhie_off, mhi_cntrl->reg_len);
1086 			ret = -EINVAL;
1087 			goto error_reg_offset;
1088 		}
1089 		mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
1090 	}
1091 
1092 	if (mhi_cntrl->rddm_size) {
1093 		/*
1094 		 * This controller supports RDDM, so we need to manually clear
1095 		 * BHIE RX registers since POR values are undefined.
1096 		 */
1097 		memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
1098 			  0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
1099 			  4);
1100 		/*
1101 		 * Allocate RDDM table for debugging purpose if specified
1102 		 */
1103 		mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
1104 				     mhi_cntrl->rddm_size);
1105 		if (mhi_cntrl->rddm_image)
1106 			mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
1107 	}
1108 
1109 	mutex_unlock(&mhi_cntrl->pm_mutex);
1110 
1111 	return 0;
1112 
1113 error_reg_offset:
1114 	mhi_deinit_dev_ctxt(mhi_cntrl);
1115 
1116 error_dev_ctxt:
1117 	mutex_unlock(&mhi_cntrl->pm_mutex);
1118 
1119 	return ret;
1120 }
1121 EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
1122 
1123 void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
1124 {
1125 	if (mhi_cntrl->fbc_image) {
1126 		mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1127 		mhi_cntrl->fbc_image = NULL;
1128 	}
1129 
1130 	if (mhi_cntrl->rddm_image) {
1131 		mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1132 		mhi_cntrl->rddm_image = NULL;
1133 	}
1134 
1135 	mhi_cntrl->bhi = NULL;
1136 	mhi_cntrl->bhie = NULL;
1137 
1138 	mhi_deinit_dev_ctxt(mhi_cntrl);
1139 }
1140 EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
1141 
1142 static void mhi_release_device(struct device *dev)
1143 {
1144 	struct mhi_device *mhi_dev = to_mhi_device(dev);
1145 
1146 	/*
1147 	 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1148 	 * devices for the channels will only get created if the mhi_dev
1149 	 * associated with it is NULL. This scenario will happen during the
1150 	 * controller suspend and resume.
1151 	 */
1152 	if (mhi_dev->ul_chan)
1153 		mhi_dev->ul_chan->mhi_dev = NULL;
1154 
1155 	if (mhi_dev->dl_chan)
1156 		mhi_dev->dl_chan->mhi_dev = NULL;
1157 
1158 	kfree(mhi_dev);
1159 }
1160 
1161 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
1162 {
1163 	struct mhi_device *mhi_dev;
1164 	struct device *dev;
1165 
1166 	mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1167 	if (!mhi_dev)
1168 		return ERR_PTR(-ENOMEM);
1169 
1170 	dev = &mhi_dev->dev;
1171 	device_initialize(dev);
1172 	dev->bus = &mhi_bus_type;
1173 	dev->release = mhi_release_device;
1174 
1175 	if (mhi_cntrl->mhi_dev) {
1176 		/* for MHI client devices, parent is the MHI controller device */
1177 		dev->parent = &mhi_cntrl->mhi_dev->dev;
1178 	} else {
1179 		/* for MHI controller device, parent is the bus device (e.g. pci device) */
1180 		dev->parent = mhi_cntrl->cntrl_dev;
1181 	}
1182 
1183 	mhi_dev->mhi_cntrl = mhi_cntrl;
1184 	mhi_dev->dev_wake = 0;
1185 
1186 	return mhi_dev;
1187 }
1188 
1189 static int mhi_driver_probe(struct device *dev)
1190 {
1191 	struct mhi_device *mhi_dev = to_mhi_device(dev);
1192 	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1193 	struct device_driver *drv = dev->driver;
1194 	struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1195 	struct mhi_event *mhi_event;
1196 	struct mhi_chan *ul_chan = mhi_dev->ul_chan;
1197 	struct mhi_chan *dl_chan = mhi_dev->dl_chan;
1198 	int ret;
1199 
1200 	/* Bring device out of LPM */
1201 	ret = mhi_device_get_sync(mhi_dev);
1202 	if (ret)
1203 		return ret;
1204 
1205 	ret = -EINVAL;
1206 
1207 	if (ul_chan) {
1208 		/*
1209 		 * If channel supports LPM notifications then status_cb should
1210 		 * be provided
1211 		 */
1212 		if (ul_chan->lpm_notify && !mhi_drv->status_cb)
1213 			goto exit_probe;
1214 
1215 		/* For non-offload channels then xfer_cb should be provided */
1216 		if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
1217 			goto exit_probe;
1218 
1219 		ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1220 	}
1221 
1222 	ret = -EINVAL;
1223 	if (dl_chan) {
1224 		/*
1225 		 * If channel supports LPM notifications then status_cb should
1226 		 * be provided
1227 		 */
1228 		if (dl_chan->lpm_notify && !mhi_drv->status_cb)
1229 			goto exit_probe;
1230 
1231 		/* For non-offload channels then xfer_cb should be provided */
1232 		if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
1233 			goto exit_probe;
1234 
1235 		mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
1236 
1237 		/*
1238 		 * If the channel event ring is managed by client, then
1239 		 * status_cb must be provided so that the framework can
1240 		 * notify pending data
1241 		 */
1242 		if (mhi_event->cl_manage && !mhi_drv->status_cb)
1243 			goto exit_probe;
1244 
1245 		dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1246 	}
1247 
1248 	/* Call the user provided probe function */
1249 	ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
1250 	if (ret)
1251 		goto exit_probe;
1252 
1253 	mhi_device_put(mhi_dev);
1254 
1255 	return ret;
1256 
1257 exit_probe:
1258 	mhi_unprepare_from_transfer(mhi_dev);
1259 
1260 	mhi_device_put(mhi_dev);
1261 
1262 	return ret;
1263 }
1264 
1265 static int mhi_driver_remove(struct device *dev)
1266 {
1267 	struct mhi_device *mhi_dev = to_mhi_device(dev);
1268 	struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
1269 	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1270 	struct mhi_chan *mhi_chan;
1271 	enum mhi_ch_state ch_state[] = {
1272 		MHI_CH_STATE_DISABLED,
1273 		MHI_CH_STATE_DISABLED
1274 	};
1275 	int dir;
1276 
1277 	/* Skip if it is a controller device */
1278 	if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1279 		return 0;
1280 
1281 	/* Reset both channels */
1282 	for (dir = 0; dir < 2; dir++) {
1283 		mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1284 
1285 		if (!mhi_chan)
1286 			continue;
1287 
1288 		/* Wake all threads waiting for completion */
1289 		write_lock_irq(&mhi_chan->lock);
1290 		mhi_chan->ccs = MHI_EV_CC_INVALID;
1291 		complete_all(&mhi_chan->completion);
1292 		write_unlock_irq(&mhi_chan->lock);
1293 
1294 		/* Set the channel state to disabled */
1295 		mutex_lock(&mhi_chan->mutex);
1296 		write_lock_irq(&mhi_chan->lock);
1297 		ch_state[dir] = mhi_chan->ch_state;
1298 		mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
1299 		write_unlock_irq(&mhi_chan->lock);
1300 
1301 		/* Reset the non-offload channel */
1302 		if (!mhi_chan->offload_ch)
1303 			mhi_reset_chan(mhi_cntrl, mhi_chan);
1304 
1305 		mutex_unlock(&mhi_chan->mutex);
1306 	}
1307 
1308 	mhi_drv->remove(mhi_dev);
1309 
1310 	/* De-init channel if it was enabled */
1311 	for (dir = 0; dir < 2; dir++) {
1312 		mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1313 
1314 		if (!mhi_chan)
1315 			continue;
1316 
1317 		mutex_lock(&mhi_chan->mutex);
1318 
1319 		if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
1320 		     ch_state[dir] == MHI_CH_STATE_STOP) &&
1321 		    !mhi_chan->offload_ch)
1322 			mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1323 
1324 		mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1325 
1326 		mutex_unlock(&mhi_chan->mutex);
1327 	}
1328 
1329 	while (mhi_dev->dev_wake)
1330 		mhi_device_put(mhi_dev);
1331 
1332 	return 0;
1333 }
1334 
1335 int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
1336 {
1337 	struct device_driver *driver = &mhi_drv->driver;
1338 
1339 	if (!mhi_drv->probe || !mhi_drv->remove)
1340 		return -EINVAL;
1341 
1342 	driver->bus = &mhi_bus_type;
1343 	driver->owner = owner;
1344 	driver->probe = mhi_driver_probe;
1345 	driver->remove = mhi_driver_remove;
1346 
1347 	return driver_register(driver);
1348 }
1349 EXPORT_SYMBOL_GPL(__mhi_driver_register);
1350 
1351 void mhi_driver_unregister(struct mhi_driver *mhi_drv)
1352 {
1353 	driver_unregister(&mhi_drv->driver);
1354 }
1355 EXPORT_SYMBOL_GPL(mhi_driver_unregister);
1356 
1357 static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
1358 {
1359 	struct mhi_device *mhi_dev = to_mhi_device(dev);
1360 
1361 	return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
1362 					mhi_dev->name);
1363 }
1364 
1365 static int mhi_match(struct device *dev, struct device_driver *drv)
1366 {
1367 	struct mhi_device *mhi_dev = to_mhi_device(dev);
1368 	struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1369 	const struct mhi_device_id *id;
1370 
1371 	/*
1372 	 * If the device is a controller type then there is no client driver
1373 	 * associated with it
1374 	 */
1375 	if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1376 		return 0;
1377 
1378 	for (id = mhi_drv->id_table; id->chan[0]; id++)
1379 		if (!strcmp(mhi_dev->name, id->chan)) {
1380 			mhi_dev->id = id;
1381 			return 1;
1382 		}
1383 
1384 	return 0;
1385 };
1386 
1387 struct bus_type mhi_bus_type = {
1388 	.name = "mhi",
1389 	.dev_name = "mhi",
1390 	.match = mhi_match,
1391 	.uevent = mhi_uevent,
1392 	.dev_groups = mhi_dev_groups,
1393 };
1394 
1395 static int __init mhi_init(void)
1396 {
1397 	mhi_debugfs_init();
1398 	return bus_register(&mhi_bus_type);
1399 }
1400 
1401 static void __exit mhi_exit(void)
1402 {
1403 	mhi_debugfs_exit();
1404 	bus_unregister(&mhi_bus_type);
1405 }
1406 
1407 postcore_initcall(mhi_init);
1408 module_exit(mhi_exit);
1409 
1410 MODULE_LICENSE("GPL v2");
1411 MODULE_DESCRIPTION("MHI Host Interface");
1412