1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Qualcomm self-authenticating modem subsystem remoteproc driver
4  *
5  * Copyright (C) 2016 Linaro Ltd.
6  * Copyright (C) 2014 Sony Mobile Communications AB
7  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/module.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/regmap.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/remoteproc.h>
25 #include <linux/reset.h>
26 #include <linux/soc/qcom/mdt_loader.h>
27 #include <linux/iopoll.h>
28 
29 #include "remoteproc_internal.h"
30 #include "qcom_common.h"
31 #include "qcom_q6v5.h"
32 
33 #include <linux/qcom_scm.h>
34 
35 #define MPSS_CRASH_REASON_SMEM		421
36 
37 /* RMB Status Register Values */
38 #define RMB_PBL_SUCCESS			0x1
39 
40 #define RMB_MBA_XPU_UNLOCKED		0x1
41 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED	0x2
42 #define RMB_MBA_META_DATA_AUTH_SUCCESS	0x3
43 #define RMB_MBA_AUTH_COMPLETE		0x4
44 
45 /* PBL/MBA interface registers */
46 #define RMB_MBA_IMAGE_REG		0x00
47 #define RMB_PBL_STATUS_REG		0x04
48 #define RMB_MBA_COMMAND_REG		0x08
49 #define RMB_MBA_STATUS_REG		0x0C
50 #define RMB_PMI_META_DATA_REG		0x10
51 #define RMB_PMI_CODE_START_REG		0x14
52 #define RMB_PMI_CODE_LENGTH_REG		0x18
53 #define RMB_MBA_MSS_STATUS		0x40
54 #define RMB_MBA_ALT_RESET		0x44
55 
56 #define RMB_CMD_META_DATA_READY		0x1
57 #define RMB_CMD_LOAD_READY		0x2
58 
59 /* QDSP6SS Register Offsets */
60 #define QDSP6SS_RESET_REG		0x014
61 #define QDSP6SS_GFMUX_CTL_REG		0x020
62 #define QDSP6SS_PWR_CTL_REG		0x030
63 #define QDSP6SS_MEM_PWR_CTL		0x0B0
64 #define QDSP6V6SS_MEM_PWR_CTL		0x034
65 #define QDSP6SS_STRAP_ACC		0x110
66 
67 /* AXI Halt Register Offsets */
68 #define AXI_HALTREQ_REG			0x0
69 #define AXI_HALTACK_REG			0x4
70 #define AXI_IDLE_REG			0x8
71 
72 #define HALT_ACK_TIMEOUT_MS		100
73 
74 /* QDSP6SS_RESET */
75 #define Q6SS_STOP_CORE			BIT(0)
76 #define Q6SS_CORE_ARES			BIT(1)
77 #define Q6SS_BUS_ARES_ENABLE		BIT(2)
78 
79 /* QDSP6SS_GFMUX_CTL */
80 #define Q6SS_CLK_ENABLE			BIT(1)
81 
82 /* QDSP6SS_PWR_CTL */
83 #define Q6SS_L2DATA_SLP_NRET_N_0	BIT(0)
84 #define Q6SS_L2DATA_SLP_NRET_N_1	BIT(1)
85 #define Q6SS_L2DATA_SLP_NRET_N_2	BIT(2)
86 #define Q6SS_L2TAG_SLP_NRET_N		BIT(16)
87 #define Q6SS_ETB_SLP_NRET_N		BIT(17)
88 #define Q6SS_L2DATA_STBY_N		BIT(18)
89 #define Q6SS_SLP_RET_N			BIT(19)
90 #define Q6SS_CLAMP_IO			BIT(20)
91 #define QDSS_BHS_ON			BIT(21)
92 #define QDSS_LDO_BYP			BIT(22)
93 
94 /* QDSP6v56 parameters */
95 #define QDSP6v56_LDO_BYP		BIT(25)
96 #define QDSP6v56_BHS_ON		BIT(24)
97 #define QDSP6v56_CLAMP_WL		BIT(21)
98 #define QDSP6v56_CLAMP_QMC_MEM		BIT(22)
99 #define HALT_CHECK_MAX_LOOPS		200
100 #define QDSP6SS_XO_CBCR		0x0038
101 #define QDSP6SS_ACC_OVERRIDE_VAL		0x20
102 
103 /* QDSP6v65 parameters */
104 #define QDSP6SS_SLEEP                   0x3C
105 #define QDSP6SS_BOOT_CORE_START         0x400
106 #define QDSP6SS_BOOT_CMD                0x404
107 #define SLEEP_CHECK_MAX_LOOPS           200
108 #define BOOT_FSM_TIMEOUT                10000
109 
110 struct reg_info {
111 	struct regulator *reg;
112 	int uV;
113 	int uA;
114 };
115 
116 struct qcom_mss_reg_res {
117 	const char *supply;
118 	int uV;
119 	int uA;
120 };
121 
122 struct rproc_hexagon_res {
123 	const char *hexagon_mba_image;
124 	struct qcom_mss_reg_res *proxy_supply;
125 	struct qcom_mss_reg_res *active_supply;
126 	char **proxy_clk_names;
127 	char **reset_clk_names;
128 	char **active_clk_names;
129 	char **active_pd_names;
130 	char **proxy_pd_names;
131 	int version;
132 	bool need_mem_protection;
133 	bool has_alt_reset;
134 };
135 
136 struct q6v5 {
137 	struct device *dev;
138 	struct rproc *rproc;
139 
140 	void __iomem *reg_base;
141 	void __iomem *rmb_base;
142 
143 	struct regmap *halt_map;
144 	u32 halt_q6;
145 	u32 halt_modem;
146 	u32 halt_nc;
147 
148 	struct reset_control *mss_restart;
149 	struct reset_control *pdc_reset;
150 
151 	struct qcom_q6v5 q6v5;
152 
153 	struct clk *active_clks[8];
154 	struct clk *reset_clks[4];
155 	struct clk *proxy_clks[4];
156 	struct device *active_pds[1];
157 	struct device *proxy_pds[3];
158 	int active_clk_count;
159 	int reset_clk_count;
160 	int proxy_clk_count;
161 	int active_pd_count;
162 	int proxy_pd_count;
163 
164 	struct reg_info active_regs[1];
165 	struct reg_info proxy_regs[3];
166 	int active_reg_count;
167 	int proxy_reg_count;
168 
169 	bool running;
170 
171 	bool dump_mba_loaded;
172 	unsigned long dump_segment_mask;
173 	unsigned long dump_complete_mask;
174 
175 	phys_addr_t mba_phys;
176 	void *mba_region;
177 	size_t mba_size;
178 
179 	phys_addr_t mpss_phys;
180 	phys_addr_t mpss_reloc;
181 	void *mpss_region;
182 	size_t mpss_size;
183 
184 	struct qcom_rproc_glink glink_subdev;
185 	struct qcom_rproc_subdev smd_subdev;
186 	struct qcom_rproc_ssr ssr_subdev;
187 	struct qcom_sysmon *sysmon;
188 	bool need_mem_protection;
189 	bool has_alt_reset;
190 	int mpss_perm;
191 	int mba_perm;
192 	const char *hexagon_mdt_image;
193 	int version;
194 };
195 
196 enum {
197 	MSS_MSM8916,
198 	MSS_MSM8974,
199 	MSS_MSM8996,
200 	MSS_MSM8998,
201 	MSS_SDM845,
202 };
203 
204 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
205 			       const struct qcom_mss_reg_res *reg_res)
206 {
207 	int rc;
208 	int i;
209 
210 	if (!reg_res)
211 		return 0;
212 
213 	for (i = 0; reg_res[i].supply; i++) {
214 		regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
215 		if (IS_ERR(regs[i].reg)) {
216 			rc = PTR_ERR(regs[i].reg);
217 			if (rc != -EPROBE_DEFER)
218 				dev_err(dev, "Failed to get %s\n regulator",
219 					reg_res[i].supply);
220 			return rc;
221 		}
222 
223 		regs[i].uV = reg_res[i].uV;
224 		regs[i].uA = reg_res[i].uA;
225 	}
226 
227 	return i;
228 }
229 
230 static int q6v5_regulator_enable(struct q6v5 *qproc,
231 				 struct reg_info *regs, int count)
232 {
233 	int ret;
234 	int i;
235 
236 	for (i = 0; i < count; i++) {
237 		if (regs[i].uV > 0) {
238 			ret = regulator_set_voltage(regs[i].reg,
239 					regs[i].uV, INT_MAX);
240 			if (ret) {
241 				dev_err(qproc->dev,
242 					"Failed to request voltage for %d.\n",
243 						i);
244 				goto err;
245 			}
246 		}
247 
248 		if (regs[i].uA > 0) {
249 			ret = regulator_set_load(regs[i].reg,
250 						 regs[i].uA);
251 			if (ret < 0) {
252 				dev_err(qproc->dev,
253 					"Failed to set regulator mode\n");
254 				goto err;
255 			}
256 		}
257 
258 		ret = regulator_enable(regs[i].reg);
259 		if (ret) {
260 			dev_err(qproc->dev, "Regulator enable failed\n");
261 			goto err;
262 		}
263 	}
264 
265 	return 0;
266 err:
267 	for (; i >= 0; i--) {
268 		if (regs[i].uV > 0)
269 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
270 
271 		if (regs[i].uA > 0)
272 			regulator_set_load(regs[i].reg, 0);
273 
274 		regulator_disable(regs[i].reg);
275 	}
276 
277 	return ret;
278 }
279 
280 static void q6v5_regulator_disable(struct q6v5 *qproc,
281 				   struct reg_info *regs, int count)
282 {
283 	int i;
284 
285 	for (i = 0; i < count; i++) {
286 		if (regs[i].uV > 0)
287 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
288 
289 		if (regs[i].uA > 0)
290 			regulator_set_load(regs[i].reg, 0);
291 
292 		regulator_disable(regs[i].reg);
293 	}
294 }
295 
296 static int q6v5_clk_enable(struct device *dev,
297 			   struct clk **clks, int count)
298 {
299 	int rc;
300 	int i;
301 
302 	for (i = 0; i < count; i++) {
303 		rc = clk_prepare_enable(clks[i]);
304 		if (rc) {
305 			dev_err(dev, "Clock enable failed\n");
306 			goto err;
307 		}
308 	}
309 
310 	return 0;
311 err:
312 	for (i--; i >= 0; i--)
313 		clk_disable_unprepare(clks[i]);
314 
315 	return rc;
316 }
317 
318 static void q6v5_clk_disable(struct device *dev,
319 			     struct clk **clks, int count)
320 {
321 	int i;
322 
323 	for (i = 0; i < count; i++)
324 		clk_disable_unprepare(clks[i]);
325 }
326 
327 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
328 			   size_t pd_count)
329 {
330 	int ret;
331 	int i;
332 
333 	for (i = 0; i < pd_count; i++) {
334 		dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
335 		ret = pm_runtime_get_sync(pds[i]);
336 		if (ret < 0)
337 			goto unroll_pd_votes;
338 	}
339 
340 	return 0;
341 
342 unroll_pd_votes:
343 	for (i--; i >= 0; i--) {
344 		dev_pm_genpd_set_performance_state(pds[i], 0);
345 		pm_runtime_put(pds[i]);
346 	}
347 
348 	return ret;
349 };
350 
351 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
352 			     size_t pd_count)
353 {
354 	int i;
355 
356 	for (i = 0; i < pd_count; i++) {
357 		dev_pm_genpd_set_performance_state(pds[i], 0);
358 		pm_runtime_put(pds[i]);
359 	}
360 }
361 
362 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
363 				   bool remote_owner, phys_addr_t addr,
364 				   size_t size)
365 {
366 	struct qcom_scm_vmperm next;
367 
368 	if (!qproc->need_mem_protection)
369 		return 0;
370 	if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
371 		return 0;
372 	if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
373 		return 0;
374 
375 	next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
376 	next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
377 
378 	return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
379 				   current_perm, &next, 1);
380 }
381 
382 static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
383 {
384 	struct q6v5 *qproc = rproc->priv;
385 
386 	memcpy(qproc->mba_region, fw->data, fw->size);
387 
388 	return 0;
389 }
390 
391 static int q6v5_reset_assert(struct q6v5 *qproc)
392 {
393 	int ret;
394 
395 	if (qproc->has_alt_reset) {
396 		reset_control_assert(qproc->pdc_reset);
397 		ret = reset_control_reset(qproc->mss_restart);
398 		reset_control_deassert(qproc->pdc_reset);
399 	} else {
400 		ret = reset_control_assert(qproc->mss_restart);
401 	}
402 
403 	return ret;
404 }
405 
406 static int q6v5_reset_deassert(struct q6v5 *qproc)
407 {
408 	int ret;
409 
410 	if (qproc->has_alt_reset) {
411 		reset_control_assert(qproc->pdc_reset);
412 		writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
413 		ret = reset_control_reset(qproc->mss_restart);
414 		writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
415 		reset_control_deassert(qproc->pdc_reset);
416 	} else {
417 		ret = reset_control_deassert(qproc->mss_restart);
418 	}
419 
420 	return ret;
421 }
422 
423 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
424 {
425 	unsigned long timeout;
426 	s32 val;
427 
428 	timeout = jiffies + msecs_to_jiffies(ms);
429 	for (;;) {
430 		val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
431 		if (val)
432 			break;
433 
434 		if (time_after(jiffies, timeout))
435 			return -ETIMEDOUT;
436 
437 		msleep(1);
438 	}
439 
440 	return val;
441 }
442 
443 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
444 {
445 
446 	unsigned long timeout;
447 	s32 val;
448 
449 	timeout = jiffies + msecs_to_jiffies(ms);
450 	for (;;) {
451 		val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
452 		if (val < 0)
453 			break;
454 
455 		if (!status && val)
456 			break;
457 		else if (status && val == status)
458 			break;
459 
460 		if (time_after(jiffies, timeout))
461 			return -ETIMEDOUT;
462 
463 		msleep(1);
464 	}
465 
466 	return val;
467 }
468 
469 static int q6v5proc_reset(struct q6v5 *qproc)
470 {
471 	u32 val;
472 	int ret;
473 	int i;
474 
475 	if (qproc->version == MSS_SDM845) {
476 		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
477 		val |= 0x1;
478 		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
479 
480 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
481 					 val, !(val & BIT(31)), 1,
482 					 SLEEP_CHECK_MAX_LOOPS);
483 		if (ret) {
484 			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
485 			return -ETIMEDOUT;
486 		}
487 
488 		/* De-assert QDSP6 stop core */
489 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
490 		/* Trigger boot FSM */
491 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
492 
493 		ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
494 				val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
495 		if (ret) {
496 			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
497 			/* Reset the modem so that boot FSM is in reset state */
498 			q6v5_reset_deassert(qproc);
499 			return ret;
500 		}
501 
502 		goto pbl_wait;
503 	} else if (qproc->version == MSS_MSM8996 ||
504 		   qproc->version == MSS_MSM8998) {
505 		int mem_pwr_ctl;
506 
507 		/* Override the ACC value if required */
508 		writel(QDSP6SS_ACC_OVERRIDE_VAL,
509 		       qproc->reg_base + QDSP6SS_STRAP_ACC);
510 
511 		/* Assert resets, stop core */
512 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
513 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
514 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
515 
516 		/* BHS require xo cbcr to be enabled */
517 		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
518 		val |= 0x1;
519 		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
520 
521 		/* Read CLKOFF bit to go low indicating CLK is enabled */
522 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
523 					 val, !(val & BIT(31)), 1,
524 					 HALT_CHECK_MAX_LOOPS);
525 		if (ret) {
526 			dev_err(qproc->dev,
527 				"xo cbcr enabling timed out (rc:%d)\n", ret);
528 			return ret;
529 		}
530 		/* Enable power block headswitch and wait for it to stabilize */
531 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
532 		val |= QDSP6v56_BHS_ON;
533 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
534 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
535 		udelay(1);
536 
537 		/* Put LDO in bypass mode */
538 		val |= QDSP6v56_LDO_BYP;
539 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
540 
541 		/* Deassert QDSP6 compiler memory clamp */
542 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
543 		val &= ~QDSP6v56_CLAMP_QMC_MEM;
544 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
545 
546 		/* Deassert memory peripheral sleep and L2 memory standby */
547 		val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
548 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
549 
550 		/* Turn on L1, L2, ETB and JU memories 1 at a time */
551 		if (qproc->version == MSS_MSM8996) {
552 			mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
553 			i = 19;
554 		} else {
555 			/* MSS_MSM8998 */
556 			mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
557 			i = 28;
558 		}
559 		val = readl(qproc->reg_base + mem_pwr_ctl);
560 		for (; i >= 0; i--) {
561 			val |= BIT(i);
562 			writel(val, qproc->reg_base + mem_pwr_ctl);
563 			/*
564 			 * Read back value to ensure the write is done then
565 			 * wait for 1us for both memory peripheral and data
566 			 * array to turn on.
567 			 */
568 			val |= readl(qproc->reg_base + mem_pwr_ctl);
569 			udelay(1);
570 		}
571 		/* Remove word line clamp */
572 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
573 		val &= ~QDSP6v56_CLAMP_WL;
574 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
575 	} else {
576 		/* Assert resets, stop core */
577 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
578 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
579 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
580 
581 		/* Enable power block headswitch and wait for it to stabilize */
582 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
583 		val |= QDSS_BHS_ON | QDSS_LDO_BYP;
584 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
585 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
586 		udelay(1);
587 		/*
588 		 * Turn on memories. L2 banks should be done individually
589 		 * to minimize inrush current.
590 		 */
591 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
592 		val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
593 			Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
594 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
595 		val |= Q6SS_L2DATA_SLP_NRET_N_2;
596 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
597 		val |= Q6SS_L2DATA_SLP_NRET_N_1;
598 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
599 		val |= Q6SS_L2DATA_SLP_NRET_N_0;
600 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
601 	}
602 	/* Remove IO clamp */
603 	val &= ~Q6SS_CLAMP_IO;
604 	writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
605 
606 	/* Bring core out of reset */
607 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
608 	val &= ~Q6SS_CORE_ARES;
609 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
610 
611 	/* Turn on core clock */
612 	val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
613 	val |= Q6SS_CLK_ENABLE;
614 	writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
615 
616 	/* Start core execution */
617 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
618 	val &= ~Q6SS_STOP_CORE;
619 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
620 
621 pbl_wait:
622 	/* Wait for PBL status */
623 	ret = q6v5_rmb_pbl_wait(qproc, 1000);
624 	if (ret == -ETIMEDOUT) {
625 		dev_err(qproc->dev, "PBL boot timed out\n");
626 	} else if (ret != RMB_PBL_SUCCESS) {
627 		dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
628 		ret = -EINVAL;
629 	} else {
630 		ret = 0;
631 	}
632 
633 	return ret;
634 }
635 
636 static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
637 				   struct regmap *halt_map,
638 				   u32 offset)
639 {
640 	unsigned long timeout;
641 	unsigned int val;
642 	int ret;
643 
644 	/* Check if we're already idle */
645 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
646 	if (!ret && val)
647 		return;
648 
649 	/* Assert halt request */
650 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
651 
652 	/* Wait for halt */
653 	timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
654 	for (;;) {
655 		ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
656 		if (ret || val || time_after(jiffies, timeout))
657 			break;
658 
659 		msleep(1);
660 	}
661 
662 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
663 	if (ret || !val)
664 		dev_err(qproc->dev, "port failed halt\n");
665 
666 	/* Clear halt request (port will remain halted until reset) */
667 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
668 }
669 
670 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
671 {
672 	unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
673 	dma_addr_t phys;
674 	void *metadata;
675 	int mdata_perm;
676 	int xferop_ret;
677 	size_t size;
678 	void *ptr;
679 	int ret;
680 
681 	metadata = qcom_mdt_read_metadata(fw, &size);
682 	if (IS_ERR(metadata))
683 		return PTR_ERR(metadata);
684 
685 	ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
686 	if (!ptr) {
687 		kfree(metadata);
688 		dev_err(qproc->dev, "failed to allocate mdt buffer\n");
689 		return -ENOMEM;
690 	}
691 
692 	memcpy(ptr, metadata, size);
693 
694 	/* Hypervisor mapping to access metadata by modem */
695 	mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
696 	ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, phys, size);
697 	if (ret) {
698 		dev_err(qproc->dev,
699 			"assigning Q6 access to metadata failed: %d\n", ret);
700 		ret = -EAGAIN;
701 		goto free_dma_attrs;
702 	}
703 
704 	writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
705 	writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
706 
707 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
708 	if (ret == -ETIMEDOUT)
709 		dev_err(qproc->dev, "MPSS header authentication timed out\n");
710 	else if (ret < 0)
711 		dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
712 
713 	/* Metadata authentication done, remove modem access */
714 	xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, phys, size);
715 	if (xferop_ret)
716 		dev_warn(qproc->dev,
717 			 "mdt buffer not reclaimed system may become unstable\n");
718 
719 free_dma_attrs:
720 	dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
721 	kfree(metadata);
722 
723 	return ret < 0 ? ret : 0;
724 }
725 
726 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
727 {
728 	if (phdr->p_type != PT_LOAD)
729 		return false;
730 
731 	if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
732 		return false;
733 
734 	if (!phdr->p_memsz)
735 		return false;
736 
737 	return true;
738 }
739 
740 static int q6v5_mba_load(struct q6v5 *qproc)
741 {
742 	int ret;
743 	int xfermemop_ret;
744 
745 	qcom_q6v5_prepare(&qproc->q6v5);
746 
747 	ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
748 	if (ret < 0) {
749 		dev_err(qproc->dev, "failed to enable active power domains\n");
750 		goto disable_irqs;
751 	}
752 
753 	ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
754 	if (ret < 0) {
755 		dev_err(qproc->dev, "failed to enable proxy power domains\n");
756 		goto disable_active_pds;
757 	}
758 
759 	ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
760 				    qproc->proxy_reg_count);
761 	if (ret) {
762 		dev_err(qproc->dev, "failed to enable proxy supplies\n");
763 		goto disable_proxy_pds;
764 	}
765 
766 	ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
767 			      qproc->proxy_clk_count);
768 	if (ret) {
769 		dev_err(qproc->dev, "failed to enable proxy clocks\n");
770 		goto disable_proxy_reg;
771 	}
772 
773 	ret = q6v5_regulator_enable(qproc, qproc->active_regs,
774 				    qproc->active_reg_count);
775 	if (ret) {
776 		dev_err(qproc->dev, "failed to enable supplies\n");
777 		goto disable_proxy_clk;
778 	}
779 
780 	ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
781 			      qproc->reset_clk_count);
782 	if (ret) {
783 		dev_err(qproc->dev, "failed to enable reset clocks\n");
784 		goto disable_vdd;
785 	}
786 
787 	ret = q6v5_reset_deassert(qproc);
788 	if (ret) {
789 		dev_err(qproc->dev, "failed to deassert mss restart\n");
790 		goto disable_reset_clks;
791 	}
792 
793 	ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
794 			      qproc->active_clk_count);
795 	if (ret) {
796 		dev_err(qproc->dev, "failed to enable clocks\n");
797 		goto assert_reset;
798 	}
799 
800 	/* Assign MBA image access in DDR to q6 */
801 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
802 				      qproc->mba_phys, qproc->mba_size);
803 	if (ret) {
804 		dev_err(qproc->dev,
805 			"assigning Q6 access to mba memory failed: %d\n", ret);
806 		goto disable_active_clks;
807 	}
808 
809 	writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
810 
811 	ret = q6v5proc_reset(qproc);
812 	if (ret)
813 		goto reclaim_mba;
814 
815 	ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
816 	if (ret == -ETIMEDOUT) {
817 		dev_err(qproc->dev, "MBA boot timed out\n");
818 		goto halt_axi_ports;
819 	} else if (ret != RMB_MBA_XPU_UNLOCKED &&
820 		   ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
821 		dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
822 		ret = -EINVAL;
823 		goto halt_axi_ports;
824 	}
825 
826 	qproc->dump_mba_loaded = true;
827 	return 0;
828 
829 halt_axi_ports:
830 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
831 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
832 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
833 
834 reclaim_mba:
835 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
836 						qproc->mba_phys,
837 						qproc->mba_size);
838 	if (xfermemop_ret) {
839 		dev_err(qproc->dev,
840 			"Failed to reclaim mba buffer, system may become unstable\n");
841 	}
842 
843 disable_active_clks:
844 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
845 			 qproc->active_clk_count);
846 assert_reset:
847 	q6v5_reset_assert(qproc);
848 disable_reset_clks:
849 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
850 			 qproc->reset_clk_count);
851 disable_vdd:
852 	q6v5_regulator_disable(qproc, qproc->active_regs,
853 			       qproc->active_reg_count);
854 disable_proxy_clk:
855 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
856 			 qproc->proxy_clk_count);
857 disable_proxy_reg:
858 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
859 			       qproc->proxy_reg_count);
860 disable_proxy_pds:
861 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
862 disable_active_pds:
863 	q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
864 disable_irqs:
865 	qcom_q6v5_unprepare(&qproc->q6v5);
866 
867 	return ret;
868 }
869 
870 static void q6v5_mba_reclaim(struct q6v5 *qproc)
871 {
872 	int ret;
873 	u32 val;
874 
875 	qproc->dump_mba_loaded = false;
876 
877 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
878 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
879 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
880 	if (qproc->version == MSS_MSM8996) {
881 		/*
882 		 * To avoid high MX current during LPASS/MSS restart.
883 		 */
884 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
885 		val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
886 			QDSP6v56_CLAMP_QMC_MEM;
887 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
888 	}
889 
890 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
891 				      false, qproc->mpss_phys,
892 				      qproc->mpss_size);
893 	WARN_ON(ret);
894 
895 	q6v5_reset_assert(qproc);
896 
897 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
898 			 qproc->reset_clk_count);
899 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
900 			 qproc->active_clk_count);
901 	q6v5_regulator_disable(qproc, qproc->active_regs,
902 			       qproc->active_reg_count);
903 	q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
904 
905 	/* In case of failure or coredump scenario where reclaiming MBA memory
906 	 * could not happen reclaim it here.
907 	 */
908 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
909 				      qproc->mba_phys,
910 				      qproc->mba_size);
911 	WARN_ON(ret);
912 
913 	ret = qcom_q6v5_unprepare(&qproc->q6v5);
914 	if (ret) {
915 		q6v5_pds_disable(qproc, qproc->proxy_pds,
916 				 qproc->proxy_pd_count);
917 		q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
918 				 qproc->proxy_clk_count);
919 		q6v5_regulator_disable(qproc, qproc->proxy_regs,
920 				       qproc->proxy_reg_count);
921 	}
922 }
923 
924 static int q6v5_mpss_load(struct q6v5 *qproc)
925 {
926 	const struct elf32_phdr *phdrs;
927 	const struct elf32_phdr *phdr;
928 	const struct firmware *seg_fw;
929 	const struct firmware *fw;
930 	struct elf32_hdr *ehdr;
931 	phys_addr_t mpss_reloc;
932 	phys_addr_t boot_addr;
933 	phys_addr_t min_addr = PHYS_ADDR_MAX;
934 	phys_addr_t max_addr = 0;
935 	bool relocate = false;
936 	char *fw_name;
937 	size_t fw_name_len;
938 	ssize_t offset;
939 	size_t size = 0;
940 	void *ptr;
941 	int ret;
942 	int i;
943 
944 	fw_name_len = strlen(qproc->hexagon_mdt_image);
945 	if (fw_name_len <= 4)
946 		return -EINVAL;
947 
948 	fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
949 	if (!fw_name)
950 		return -ENOMEM;
951 
952 	ret = request_firmware(&fw, fw_name, qproc->dev);
953 	if (ret < 0) {
954 		dev_err(qproc->dev, "unable to load %s\n", fw_name);
955 		goto out;
956 	}
957 
958 	/* Initialize the RMB validator */
959 	writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
960 
961 	ret = q6v5_mpss_init_image(qproc, fw);
962 	if (ret)
963 		goto release_firmware;
964 
965 	ehdr = (struct elf32_hdr *)fw->data;
966 	phdrs = (struct elf32_phdr *)(ehdr + 1);
967 
968 	for (i = 0; i < ehdr->e_phnum; i++) {
969 		phdr = &phdrs[i];
970 
971 		if (!q6v5_phdr_valid(phdr))
972 			continue;
973 
974 		if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
975 			relocate = true;
976 
977 		if (phdr->p_paddr < min_addr)
978 			min_addr = phdr->p_paddr;
979 
980 		if (phdr->p_paddr + phdr->p_memsz > max_addr)
981 			max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
982 	}
983 
984 	mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
985 	qproc->mpss_reloc = mpss_reloc;
986 	/* Load firmware segments */
987 	for (i = 0; i < ehdr->e_phnum; i++) {
988 		phdr = &phdrs[i];
989 
990 		if (!q6v5_phdr_valid(phdr))
991 			continue;
992 
993 		offset = phdr->p_paddr - mpss_reloc;
994 		if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
995 			dev_err(qproc->dev, "segment outside memory range\n");
996 			ret = -EINVAL;
997 			goto release_firmware;
998 		}
999 
1000 		ptr = qproc->mpss_region + offset;
1001 
1002 		if (phdr->p_filesz && phdr->p_offset < fw->size) {
1003 			/* Firmware is large enough to be non-split */
1004 			if (phdr->p_offset + phdr->p_filesz > fw->size) {
1005 				dev_err(qproc->dev,
1006 					"failed to load segment %d from truncated file %s\n",
1007 					i, fw_name);
1008 				ret = -EINVAL;
1009 				goto release_firmware;
1010 			}
1011 
1012 			memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
1013 		} else if (phdr->p_filesz) {
1014 			/* Replace "xxx.xxx" with "xxx.bxx" */
1015 			sprintf(fw_name + fw_name_len - 3, "b%02d", i);
1016 			ret = request_firmware(&seg_fw, fw_name, qproc->dev);
1017 			if (ret) {
1018 				dev_err(qproc->dev, "failed to load %s\n", fw_name);
1019 				goto release_firmware;
1020 			}
1021 
1022 			memcpy(ptr, seg_fw->data, seg_fw->size);
1023 
1024 			release_firmware(seg_fw);
1025 		}
1026 
1027 		if (phdr->p_memsz > phdr->p_filesz) {
1028 			memset(ptr + phdr->p_filesz, 0,
1029 			       phdr->p_memsz - phdr->p_filesz);
1030 		}
1031 		size += phdr->p_memsz;
1032 	}
1033 
1034 	/* Transfer ownership of modem ddr region to q6 */
1035 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
1036 				      qproc->mpss_phys, qproc->mpss_size);
1037 	if (ret) {
1038 		dev_err(qproc->dev,
1039 			"assigning Q6 access to mpss memory failed: %d\n", ret);
1040 		ret = -EAGAIN;
1041 		goto release_firmware;
1042 	}
1043 
1044 	boot_addr = relocate ? qproc->mpss_phys : min_addr;
1045 	writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1046 	writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1047 	writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1048 
1049 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1050 	if (ret == -ETIMEDOUT)
1051 		dev_err(qproc->dev, "MPSS authentication timed out\n");
1052 	else if (ret < 0)
1053 		dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1054 
1055 release_firmware:
1056 	release_firmware(fw);
1057 out:
1058 	kfree(fw_name);
1059 
1060 	return ret < 0 ? ret : 0;
1061 }
1062 
1063 static void qcom_q6v5_dump_segment(struct rproc *rproc,
1064 				   struct rproc_dump_segment *segment,
1065 				   void *dest)
1066 {
1067 	int ret = 0;
1068 	struct q6v5 *qproc = rproc->priv;
1069 	unsigned long mask = BIT((unsigned long)segment->priv);
1070 	void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
1071 
1072 	/* Unlock mba before copying segments */
1073 	if (!qproc->dump_mba_loaded)
1074 		ret = q6v5_mba_load(qproc);
1075 
1076 	if (!ptr || ret)
1077 		memset(dest, 0xff, segment->size);
1078 	else
1079 		memcpy(dest, ptr, segment->size);
1080 
1081 	qproc->dump_segment_mask |= mask;
1082 
1083 	/* Reclaim mba after copying segments */
1084 	if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
1085 		if (qproc->dump_mba_loaded)
1086 			q6v5_mba_reclaim(qproc);
1087 	}
1088 }
1089 
1090 static int q6v5_start(struct rproc *rproc)
1091 {
1092 	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1093 	int xfermemop_ret;
1094 	int ret;
1095 
1096 	ret = q6v5_mba_load(qproc);
1097 	if (ret)
1098 		return ret;
1099 
1100 	dev_info(qproc->dev, "MBA booted, loading mpss\n");
1101 
1102 	ret = q6v5_mpss_load(qproc);
1103 	if (ret)
1104 		goto reclaim_mpss;
1105 
1106 	ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1107 	if (ret == -ETIMEDOUT) {
1108 		dev_err(qproc->dev, "start timed out\n");
1109 		goto reclaim_mpss;
1110 	}
1111 
1112 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
1113 						qproc->mba_phys,
1114 						qproc->mba_size);
1115 	if (xfermemop_ret)
1116 		dev_err(qproc->dev,
1117 			"Failed to reclaim mba buffer system may become unstable\n");
1118 
1119 	/* Reset Dump Segment Mask */
1120 	qproc->dump_segment_mask = 0;
1121 	qproc->running = true;
1122 
1123 	return 0;
1124 
1125 reclaim_mpss:
1126 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1127 						false, qproc->mpss_phys,
1128 						qproc->mpss_size);
1129 	WARN_ON(xfermemop_ret);
1130 	q6v5_mba_reclaim(qproc);
1131 
1132 	return ret;
1133 }
1134 
1135 static int q6v5_stop(struct rproc *rproc)
1136 {
1137 	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1138 	int ret;
1139 
1140 	qproc->running = false;
1141 
1142 	ret = qcom_q6v5_request_stop(&qproc->q6v5);
1143 	if (ret == -ETIMEDOUT)
1144 		dev_err(qproc->dev, "timed out on wait\n");
1145 
1146 	q6v5_mba_reclaim(qproc);
1147 
1148 	return 0;
1149 }
1150 
1151 static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
1152 {
1153 	struct q6v5 *qproc = rproc->priv;
1154 	int offset;
1155 
1156 	offset = da - qproc->mpss_reloc;
1157 	if (offset < 0 || offset + len > qproc->mpss_size)
1158 		return NULL;
1159 
1160 	return qproc->mpss_region + offset;
1161 }
1162 
1163 static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1164 					    const struct firmware *mba_fw)
1165 {
1166 	const struct firmware *fw;
1167 	const struct elf32_phdr *phdrs;
1168 	const struct elf32_phdr *phdr;
1169 	const struct elf32_hdr *ehdr;
1170 	struct q6v5 *qproc = rproc->priv;
1171 	unsigned long i;
1172 	int ret;
1173 
1174 	ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
1175 	if (ret < 0) {
1176 		dev_err(qproc->dev, "unable to load %s\n",
1177 			qproc->hexagon_mdt_image);
1178 		return ret;
1179 	}
1180 
1181 	ehdr = (struct elf32_hdr *)fw->data;
1182 	phdrs = (struct elf32_phdr *)(ehdr + 1);
1183 	qproc->dump_complete_mask = 0;
1184 
1185 	for (i = 0; i < ehdr->e_phnum; i++) {
1186 		phdr = &phdrs[i];
1187 
1188 		if (!q6v5_phdr_valid(phdr))
1189 			continue;
1190 
1191 		ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1192 							phdr->p_memsz,
1193 							qcom_q6v5_dump_segment,
1194 							(void *)i);
1195 		if (ret)
1196 			break;
1197 
1198 		qproc->dump_complete_mask |= BIT(i);
1199 	}
1200 
1201 	release_firmware(fw);
1202 	return ret;
1203 }
1204 
1205 static const struct rproc_ops q6v5_ops = {
1206 	.start = q6v5_start,
1207 	.stop = q6v5_stop,
1208 	.da_to_va = q6v5_da_to_va,
1209 	.parse_fw = qcom_q6v5_register_dump_segments,
1210 	.load = q6v5_load,
1211 };
1212 
1213 static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
1214 {
1215 	struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
1216 
1217 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1218 			 qproc->proxy_clk_count);
1219 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
1220 			       qproc->proxy_reg_count);
1221 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1222 }
1223 
1224 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1225 {
1226 	struct of_phandle_args args;
1227 	struct resource *res;
1228 	int ret;
1229 
1230 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
1231 	qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
1232 	if (IS_ERR(qproc->reg_base))
1233 		return PTR_ERR(qproc->reg_base);
1234 
1235 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
1236 	qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
1237 	if (IS_ERR(qproc->rmb_base))
1238 		return PTR_ERR(qproc->rmb_base);
1239 
1240 	ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1241 					       "qcom,halt-regs", 3, 0, &args);
1242 	if (ret < 0) {
1243 		dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1244 		return -EINVAL;
1245 	}
1246 
1247 	qproc->halt_map = syscon_node_to_regmap(args.np);
1248 	of_node_put(args.np);
1249 	if (IS_ERR(qproc->halt_map))
1250 		return PTR_ERR(qproc->halt_map);
1251 
1252 	qproc->halt_q6 = args.args[0];
1253 	qproc->halt_modem = args.args[1];
1254 	qproc->halt_nc = args.args[2];
1255 
1256 	return 0;
1257 }
1258 
1259 static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1260 		char **clk_names)
1261 {
1262 	int i;
1263 
1264 	if (!clk_names)
1265 		return 0;
1266 
1267 	for (i = 0; clk_names[i]; i++) {
1268 		clks[i] = devm_clk_get(dev, clk_names[i]);
1269 		if (IS_ERR(clks[i])) {
1270 			int rc = PTR_ERR(clks[i]);
1271 
1272 			if (rc != -EPROBE_DEFER)
1273 				dev_err(dev, "Failed to get %s clock\n",
1274 					clk_names[i]);
1275 			return rc;
1276 		}
1277 	}
1278 
1279 	return i;
1280 }
1281 
1282 static int q6v5_pds_attach(struct device *dev, struct device **devs,
1283 			   char **pd_names)
1284 {
1285 	size_t num_pds = 0;
1286 	int ret;
1287 	int i;
1288 
1289 	if (!pd_names)
1290 		return 0;
1291 
1292 	while (pd_names[num_pds])
1293 		num_pds++;
1294 
1295 	for (i = 0; i < num_pds; i++) {
1296 		devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
1297 		if (IS_ERR_OR_NULL(devs[i])) {
1298 			ret = PTR_ERR(devs[i]) ? : -ENODATA;
1299 			goto unroll_attach;
1300 		}
1301 	}
1302 
1303 	return num_pds;
1304 
1305 unroll_attach:
1306 	for (i--; i >= 0; i--)
1307 		dev_pm_domain_detach(devs[i], false);
1308 
1309 	return ret;
1310 };
1311 
1312 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1313 			    size_t pd_count)
1314 {
1315 	int i;
1316 
1317 	for (i = 0; i < pd_count; i++)
1318 		dev_pm_domain_detach(pds[i], false);
1319 }
1320 
1321 static int q6v5_init_reset(struct q6v5 *qproc)
1322 {
1323 	qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
1324 							      "mss_restart");
1325 	if (IS_ERR(qproc->mss_restart)) {
1326 		dev_err(qproc->dev, "failed to acquire mss restart\n");
1327 		return PTR_ERR(qproc->mss_restart);
1328 	}
1329 
1330 	if (qproc->has_alt_reset) {
1331 		qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1332 								    "pdc_reset");
1333 		if (IS_ERR(qproc->pdc_reset)) {
1334 			dev_err(qproc->dev, "failed to acquire pdc reset\n");
1335 			return PTR_ERR(qproc->pdc_reset);
1336 		}
1337 	}
1338 
1339 	return 0;
1340 }
1341 
1342 static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1343 {
1344 	struct device_node *child;
1345 	struct device_node *node;
1346 	struct resource r;
1347 	int ret;
1348 
1349 	child = of_get_child_by_name(qproc->dev->of_node, "mba");
1350 	node = of_parse_phandle(child, "memory-region", 0);
1351 	ret = of_address_to_resource(node, 0, &r);
1352 	if (ret) {
1353 		dev_err(qproc->dev, "unable to resolve mba region\n");
1354 		return ret;
1355 	}
1356 	of_node_put(node);
1357 
1358 	qproc->mba_phys = r.start;
1359 	qproc->mba_size = resource_size(&r);
1360 	qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1361 	if (!qproc->mba_region) {
1362 		dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1363 			&r.start, qproc->mba_size);
1364 		return -EBUSY;
1365 	}
1366 
1367 	child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1368 	node = of_parse_phandle(child, "memory-region", 0);
1369 	ret = of_address_to_resource(node, 0, &r);
1370 	if (ret) {
1371 		dev_err(qproc->dev, "unable to resolve mpss region\n");
1372 		return ret;
1373 	}
1374 	of_node_put(node);
1375 
1376 	qproc->mpss_phys = qproc->mpss_reloc = r.start;
1377 	qproc->mpss_size = resource_size(&r);
1378 	qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
1379 	if (!qproc->mpss_region) {
1380 		dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1381 			&r.start, qproc->mpss_size);
1382 		return -EBUSY;
1383 	}
1384 
1385 	return 0;
1386 }
1387 
1388 static int q6v5_probe(struct platform_device *pdev)
1389 {
1390 	const struct rproc_hexagon_res *desc;
1391 	struct q6v5 *qproc;
1392 	struct rproc *rproc;
1393 	const char *mba_image;
1394 	int ret;
1395 
1396 	desc = of_device_get_match_data(&pdev->dev);
1397 	if (!desc)
1398 		return -EINVAL;
1399 
1400 	if (desc->need_mem_protection && !qcom_scm_is_available())
1401 		return -EPROBE_DEFER;
1402 
1403 	mba_image = desc->hexagon_mba_image;
1404 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1405 					    0, &mba_image);
1406 	if (ret < 0 && ret != -EINVAL)
1407 		return ret;
1408 
1409 	rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
1410 			    mba_image, sizeof(*qproc));
1411 	if (!rproc) {
1412 		dev_err(&pdev->dev, "failed to allocate rproc\n");
1413 		return -ENOMEM;
1414 	}
1415 
1416 	rproc->auto_boot = false;
1417 
1418 	qproc = (struct q6v5 *)rproc->priv;
1419 	qproc->dev = &pdev->dev;
1420 	qproc->rproc = rproc;
1421 	qproc->hexagon_mdt_image = "modem.mdt";
1422 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1423 					    1, &qproc->hexagon_mdt_image);
1424 	if (ret < 0 && ret != -EINVAL)
1425 		return ret;
1426 
1427 	platform_set_drvdata(pdev, qproc);
1428 
1429 	ret = q6v5_init_mem(qproc, pdev);
1430 	if (ret)
1431 		goto free_rproc;
1432 
1433 	ret = q6v5_alloc_memory_region(qproc);
1434 	if (ret)
1435 		goto free_rproc;
1436 
1437 	ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1438 			       desc->proxy_clk_names);
1439 	if (ret < 0) {
1440 		dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
1441 		goto free_rproc;
1442 	}
1443 	qproc->proxy_clk_count = ret;
1444 
1445 	ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
1446 			       desc->reset_clk_names);
1447 	if (ret < 0) {
1448 		dev_err(&pdev->dev, "Failed to get reset clocks.\n");
1449 		goto free_rproc;
1450 	}
1451 	qproc->reset_clk_count = ret;
1452 
1453 	ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1454 			       desc->active_clk_names);
1455 	if (ret < 0) {
1456 		dev_err(&pdev->dev, "Failed to get active clocks.\n");
1457 		goto free_rproc;
1458 	}
1459 	qproc->active_clk_count = ret;
1460 
1461 	ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1462 				  desc->proxy_supply);
1463 	if (ret < 0) {
1464 		dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
1465 		goto free_rproc;
1466 	}
1467 	qproc->proxy_reg_count = ret;
1468 
1469 	ret = q6v5_regulator_init(&pdev->dev,  qproc->active_regs,
1470 				  desc->active_supply);
1471 	if (ret < 0) {
1472 		dev_err(&pdev->dev, "Failed to get active regulators.\n");
1473 		goto free_rproc;
1474 	}
1475 	qproc->active_reg_count = ret;
1476 
1477 	ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
1478 			      desc->active_pd_names);
1479 	if (ret < 0) {
1480 		dev_err(&pdev->dev, "Failed to attach active power domains\n");
1481 		goto free_rproc;
1482 	}
1483 	qproc->active_pd_count = ret;
1484 
1485 	ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
1486 			      desc->proxy_pd_names);
1487 	if (ret < 0) {
1488 		dev_err(&pdev->dev, "Failed to init power domains\n");
1489 		goto detach_active_pds;
1490 	}
1491 	qproc->proxy_pd_count = ret;
1492 
1493 	qproc->has_alt_reset = desc->has_alt_reset;
1494 	ret = q6v5_init_reset(qproc);
1495 	if (ret)
1496 		goto detach_proxy_pds;
1497 
1498 	qproc->version = desc->version;
1499 	qproc->need_mem_protection = desc->need_mem_protection;
1500 
1501 	ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
1502 			     qcom_msa_handover);
1503 	if (ret)
1504 		goto detach_proxy_pds;
1505 
1506 	qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1507 	qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
1508 	qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
1509 	qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
1510 	qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
1511 	qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
1512 	if (IS_ERR(qproc->sysmon)) {
1513 		ret = PTR_ERR(qproc->sysmon);
1514 		goto detach_proxy_pds;
1515 	}
1516 
1517 	ret = rproc_add(rproc);
1518 	if (ret)
1519 		goto detach_proxy_pds;
1520 
1521 	return 0;
1522 
1523 detach_proxy_pds:
1524 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1525 detach_active_pds:
1526 	q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
1527 free_rproc:
1528 	rproc_free(rproc);
1529 
1530 	return ret;
1531 }
1532 
1533 static int q6v5_remove(struct platform_device *pdev)
1534 {
1535 	struct q6v5 *qproc = platform_get_drvdata(pdev);
1536 
1537 	rproc_del(qproc->rproc);
1538 
1539 	qcom_remove_sysmon_subdev(qproc->sysmon);
1540 	qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
1541 	qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
1542 	qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
1543 
1544 	q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
1545 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1546 
1547 	rproc_free(qproc->rproc);
1548 
1549 	return 0;
1550 }
1551 
1552 static const struct rproc_hexagon_res sdm845_mss = {
1553 	.hexagon_mba_image = "mba.mbn",
1554 	.proxy_clk_names = (char*[]){
1555 			"xo",
1556 			"prng",
1557 			NULL
1558 	},
1559 	.reset_clk_names = (char*[]){
1560 			"iface",
1561 			"snoc_axi",
1562 			NULL
1563 	},
1564 	.active_clk_names = (char*[]){
1565 			"bus",
1566 			"mem",
1567 			"gpll0_mss",
1568 			"mnoc_axi",
1569 			NULL
1570 	},
1571 	.active_pd_names = (char*[]){
1572 			"load_state",
1573 			NULL
1574 	},
1575 	.proxy_pd_names = (char*[]){
1576 			"cx",
1577 			"mx",
1578 			"mss",
1579 			NULL
1580 	},
1581 	.need_mem_protection = true,
1582 	.has_alt_reset = true,
1583 	.version = MSS_SDM845,
1584 };
1585 
1586 static const struct rproc_hexagon_res msm8998_mss = {
1587 	.hexagon_mba_image = "mba.mbn",
1588 	.proxy_clk_names = (char*[]){
1589 			"xo",
1590 			"qdss",
1591 			"mem",
1592 			NULL
1593 	},
1594 	.active_clk_names = (char*[]){
1595 			"iface",
1596 			"bus",
1597 			"mem",
1598 			"gpll0_mss",
1599 			"mnoc_axi",
1600 			"snoc_axi",
1601 			NULL
1602 	},
1603 	.proxy_pd_names = (char*[]){
1604 			"cx",
1605 			"mx",
1606 			NULL
1607 	},
1608 	.need_mem_protection = true,
1609 	.has_alt_reset = false,
1610 	.version = MSS_MSM8998,
1611 };
1612 
1613 static const struct rproc_hexagon_res msm8996_mss = {
1614 	.hexagon_mba_image = "mba.mbn",
1615 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1616 		{
1617 			.supply = "pll",
1618 			.uA = 100000,
1619 		},
1620 		{}
1621 	},
1622 	.proxy_clk_names = (char*[]){
1623 			"xo",
1624 			"pnoc",
1625 			"qdss",
1626 			NULL
1627 	},
1628 	.active_clk_names = (char*[]){
1629 			"iface",
1630 			"bus",
1631 			"mem",
1632 			"gpll0_mss",
1633 			"snoc_axi",
1634 			"mnoc_axi",
1635 			NULL
1636 	},
1637 	.need_mem_protection = true,
1638 	.has_alt_reset = false,
1639 	.version = MSS_MSM8996,
1640 };
1641 
1642 static const struct rproc_hexagon_res msm8916_mss = {
1643 	.hexagon_mba_image = "mba.mbn",
1644 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1645 		{
1646 			.supply = "mx",
1647 			.uV = 1050000,
1648 		},
1649 		{
1650 			.supply = "cx",
1651 			.uA = 100000,
1652 		},
1653 		{
1654 			.supply = "pll",
1655 			.uA = 100000,
1656 		},
1657 		{}
1658 	},
1659 	.proxy_clk_names = (char*[]){
1660 		"xo",
1661 		NULL
1662 	},
1663 	.active_clk_names = (char*[]){
1664 		"iface",
1665 		"bus",
1666 		"mem",
1667 		NULL
1668 	},
1669 	.need_mem_protection = false,
1670 	.has_alt_reset = false,
1671 	.version = MSS_MSM8916,
1672 };
1673 
1674 static const struct rproc_hexagon_res msm8974_mss = {
1675 	.hexagon_mba_image = "mba.b00",
1676 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1677 		{
1678 			.supply = "mx",
1679 			.uV = 1050000,
1680 		},
1681 		{
1682 			.supply = "cx",
1683 			.uA = 100000,
1684 		},
1685 		{
1686 			.supply = "pll",
1687 			.uA = 100000,
1688 		},
1689 		{}
1690 	},
1691 	.active_supply = (struct qcom_mss_reg_res[]) {
1692 		{
1693 			.supply = "mss",
1694 			.uV = 1050000,
1695 			.uA = 100000,
1696 		},
1697 		{}
1698 	},
1699 	.proxy_clk_names = (char*[]){
1700 		"xo",
1701 		NULL
1702 	},
1703 	.active_clk_names = (char*[]){
1704 		"iface",
1705 		"bus",
1706 		"mem",
1707 		NULL
1708 	},
1709 	.need_mem_protection = false,
1710 	.has_alt_reset = false,
1711 	.version = MSS_MSM8974,
1712 };
1713 
1714 static const struct of_device_id q6v5_of_match[] = {
1715 	{ .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1716 	{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1717 	{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
1718 	{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
1719 	{ .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
1720 	{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
1721 	{ },
1722 };
1723 MODULE_DEVICE_TABLE(of, q6v5_of_match);
1724 
1725 static struct platform_driver q6v5_driver = {
1726 	.probe = q6v5_probe,
1727 	.remove = q6v5_remove,
1728 	.driver = {
1729 		.name = "qcom-q6v5-mss",
1730 		.of_match_table = q6v5_of_match,
1731 	},
1732 };
1733 module_platform_driver(q6v5_driver);
1734 
1735 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
1736 MODULE_LICENSE("GPL v2");
1737