1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Qualcomm self-authenticating modem subsystem remoteproc driver
4  *
5  * Copyright (C) 2016 Linaro Ltd.
6  * Copyright (C) 2014 Sony Mobile Communications AB
7  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/module.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/regmap.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/remoteproc.h>
25 #include "linux/remoteproc/qcom_q6v5_ipa_notify.h"
26 #include <linux/reset.h>
27 #include <linux/soc/qcom/mdt_loader.h>
28 #include <linux/iopoll.h>
29 #include <linux/slab.h>
30 
31 #include "remoteproc_internal.h"
32 #include "qcom_common.h"
33 #include "qcom_q6v5.h"
34 
35 #include <linux/qcom_scm.h>
36 
37 #define MPSS_CRASH_REASON_SMEM		421
38 
39 /* RMB Status Register Values */
40 #define RMB_PBL_SUCCESS			0x1
41 
42 #define RMB_MBA_XPU_UNLOCKED		0x1
43 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED	0x2
44 #define RMB_MBA_META_DATA_AUTH_SUCCESS	0x3
45 #define RMB_MBA_AUTH_COMPLETE		0x4
46 
47 /* PBL/MBA interface registers */
48 #define RMB_MBA_IMAGE_REG		0x00
49 #define RMB_PBL_STATUS_REG		0x04
50 #define RMB_MBA_COMMAND_REG		0x08
51 #define RMB_MBA_STATUS_REG		0x0C
52 #define RMB_PMI_META_DATA_REG		0x10
53 #define RMB_PMI_CODE_START_REG		0x14
54 #define RMB_PMI_CODE_LENGTH_REG		0x18
55 #define RMB_MBA_MSS_STATUS		0x40
56 #define RMB_MBA_ALT_RESET		0x44
57 
58 #define RMB_CMD_META_DATA_READY		0x1
59 #define RMB_CMD_LOAD_READY		0x2
60 
61 /* QDSP6SS Register Offsets */
62 #define QDSP6SS_RESET_REG		0x014
63 #define QDSP6SS_GFMUX_CTL_REG		0x020
64 #define QDSP6SS_PWR_CTL_REG		0x030
65 #define QDSP6SS_MEM_PWR_CTL		0x0B0
66 #define QDSP6V6SS_MEM_PWR_CTL		0x034
67 #define QDSP6SS_STRAP_ACC		0x110
68 
69 /* AXI Halt Register Offsets */
70 #define AXI_HALTREQ_REG			0x0
71 #define AXI_HALTACK_REG			0x4
72 #define AXI_IDLE_REG			0x8
73 #define AXI_GATING_VALID_OVERRIDE	BIT(0)
74 
75 #define HALT_ACK_TIMEOUT_US		100000
76 
77 /* QDSP6SS_RESET */
78 #define Q6SS_STOP_CORE			BIT(0)
79 #define Q6SS_CORE_ARES			BIT(1)
80 #define Q6SS_BUS_ARES_ENABLE		BIT(2)
81 
82 /* QDSP6SS CBCR */
83 #define Q6SS_CBCR_CLKEN			BIT(0)
84 #define Q6SS_CBCR_CLKOFF		BIT(31)
85 #define Q6SS_CBCR_TIMEOUT_US		200
86 
87 /* QDSP6SS_GFMUX_CTL */
88 #define Q6SS_CLK_ENABLE			BIT(1)
89 
90 /* QDSP6SS_PWR_CTL */
91 #define Q6SS_L2DATA_SLP_NRET_N_0	BIT(0)
92 #define Q6SS_L2DATA_SLP_NRET_N_1	BIT(1)
93 #define Q6SS_L2DATA_SLP_NRET_N_2	BIT(2)
94 #define Q6SS_L2TAG_SLP_NRET_N		BIT(16)
95 #define Q6SS_ETB_SLP_NRET_N		BIT(17)
96 #define Q6SS_L2DATA_STBY_N		BIT(18)
97 #define Q6SS_SLP_RET_N			BIT(19)
98 #define Q6SS_CLAMP_IO			BIT(20)
99 #define QDSS_BHS_ON			BIT(21)
100 #define QDSS_LDO_BYP			BIT(22)
101 
102 /* QDSP6v56 parameters */
103 #define QDSP6v56_LDO_BYP		BIT(25)
104 #define QDSP6v56_BHS_ON		BIT(24)
105 #define QDSP6v56_CLAMP_WL		BIT(21)
106 #define QDSP6v56_CLAMP_QMC_MEM		BIT(22)
107 #define QDSP6SS_XO_CBCR		0x0038
108 #define QDSP6SS_ACC_OVERRIDE_VAL		0x20
109 
110 /* QDSP6v65 parameters */
111 #define QDSP6SS_CORE_CBCR		0x20
112 #define QDSP6SS_SLEEP                   0x3C
113 #define QDSP6SS_BOOT_CORE_START         0x400
114 #define QDSP6SS_BOOT_CMD                0x404
115 #define QDSP6SS_BOOT_STATUS		0x408
116 #define BOOT_STATUS_TIMEOUT_US		200
117 #define BOOT_FSM_TIMEOUT                10000
118 
119 struct reg_info {
120 	struct regulator *reg;
121 	int uV;
122 	int uA;
123 };
124 
125 struct qcom_mss_reg_res {
126 	const char *supply;
127 	int uV;
128 	int uA;
129 };
130 
131 struct rproc_hexagon_res {
132 	const char *hexagon_mba_image;
133 	struct qcom_mss_reg_res *proxy_supply;
134 	struct qcom_mss_reg_res *active_supply;
135 	char **proxy_clk_names;
136 	char **reset_clk_names;
137 	char **active_clk_names;
138 	char **active_pd_names;
139 	char **proxy_pd_names;
140 	int version;
141 	bool need_mem_protection;
142 	bool has_alt_reset;
143 	bool has_spare_reg;
144 };
145 
146 struct q6v5 {
147 	struct device *dev;
148 	struct rproc *rproc;
149 
150 	void __iomem *reg_base;
151 	void __iomem *rmb_base;
152 
153 	struct regmap *halt_map;
154 	struct regmap *conn_map;
155 
156 	u32 halt_q6;
157 	u32 halt_modem;
158 	u32 halt_nc;
159 	u32 conn_box;
160 
161 	struct reset_control *mss_restart;
162 	struct reset_control *pdc_reset;
163 
164 	struct qcom_q6v5 q6v5;
165 
166 	struct clk *active_clks[8];
167 	struct clk *reset_clks[4];
168 	struct clk *proxy_clks[4];
169 	struct device *active_pds[1];
170 	struct device *proxy_pds[3];
171 	int active_clk_count;
172 	int reset_clk_count;
173 	int proxy_clk_count;
174 	int active_pd_count;
175 	int proxy_pd_count;
176 
177 	struct reg_info active_regs[1];
178 	struct reg_info proxy_regs[3];
179 	int active_reg_count;
180 	int proxy_reg_count;
181 
182 	bool running;
183 
184 	bool dump_mba_loaded;
185 	unsigned long dump_segment_mask;
186 	unsigned long dump_complete_mask;
187 
188 	phys_addr_t mba_phys;
189 	void *mba_region;
190 	size_t mba_size;
191 
192 	phys_addr_t mpss_phys;
193 	phys_addr_t mpss_reloc;
194 	size_t mpss_size;
195 
196 	struct qcom_rproc_glink glink_subdev;
197 	struct qcom_rproc_subdev smd_subdev;
198 	struct qcom_rproc_ssr ssr_subdev;
199 	struct qcom_rproc_ipa_notify ipa_notify_subdev;
200 	struct qcom_sysmon *sysmon;
201 	bool need_mem_protection;
202 	bool has_alt_reset;
203 	bool has_spare_reg;
204 	int mpss_perm;
205 	int mba_perm;
206 	const char *hexagon_mdt_image;
207 	int version;
208 };
209 
210 enum {
211 	MSS_MSM8916,
212 	MSS_MSM8974,
213 	MSS_MSM8996,
214 	MSS_MSM8998,
215 	MSS_SC7180,
216 	MSS_SDM845,
217 };
218 
219 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
220 			       const struct qcom_mss_reg_res *reg_res)
221 {
222 	int rc;
223 	int i;
224 
225 	if (!reg_res)
226 		return 0;
227 
228 	for (i = 0; reg_res[i].supply; i++) {
229 		regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
230 		if (IS_ERR(regs[i].reg)) {
231 			rc = PTR_ERR(regs[i].reg);
232 			if (rc != -EPROBE_DEFER)
233 				dev_err(dev, "Failed to get %s\n regulator",
234 					reg_res[i].supply);
235 			return rc;
236 		}
237 
238 		regs[i].uV = reg_res[i].uV;
239 		regs[i].uA = reg_res[i].uA;
240 	}
241 
242 	return i;
243 }
244 
245 static int q6v5_regulator_enable(struct q6v5 *qproc,
246 				 struct reg_info *regs, int count)
247 {
248 	int ret;
249 	int i;
250 
251 	for (i = 0; i < count; i++) {
252 		if (regs[i].uV > 0) {
253 			ret = regulator_set_voltage(regs[i].reg,
254 					regs[i].uV, INT_MAX);
255 			if (ret) {
256 				dev_err(qproc->dev,
257 					"Failed to request voltage for %d.\n",
258 						i);
259 				goto err;
260 			}
261 		}
262 
263 		if (regs[i].uA > 0) {
264 			ret = regulator_set_load(regs[i].reg,
265 						 regs[i].uA);
266 			if (ret < 0) {
267 				dev_err(qproc->dev,
268 					"Failed to set regulator mode\n");
269 				goto err;
270 			}
271 		}
272 
273 		ret = regulator_enable(regs[i].reg);
274 		if (ret) {
275 			dev_err(qproc->dev, "Regulator enable failed\n");
276 			goto err;
277 		}
278 	}
279 
280 	return 0;
281 err:
282 	for (; i >= 0; i--) {
283 		if (regs[i].uV > 0)
284 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
285 
286 		if (regs[i].uA > 0)
287 			regulator_set_load(regs[i].reg, 0);
288 
289 		regulator_disable(regs[i].reg);
290 	}
291 
292 	return ret;
293 }
294 
295 static void q6v5_regulator_disable(struct q6v5 *qproc,
296 				   struct reg_info *regs, int count)
297 {
298 	int i;
299 
300 	for (i = 0; i < count; i++) {
301 		if (regs[i].uV > 0)
302 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
303 
304 		if (regs[i].uA > 0)
305 			regulator_set_load(regs[i].reg, 0);
306 
307 		regulator_disable(regs[i].reg);
308 	}
309 }
310 
311 static int q6v5_clk_enable(struct device *dev,
312 			   struct clk **clks, int count)
313 {
314 	int rc;
315 	int i;
316 
317 	for (i = 0; i < count; i++) {
318 		rc = clk_prepare_enable(clks[i]);
319 		if (rc) {
320 			dev_err(dev, "Clock enable failed\n");
321 			goto err;
322 		}
323 	}
324 
325 	return 0;
326 err:
327 	for (i--; i >= 0; i--)
328 		clk_disable_unprepare(clks[i]);
329 
330 	return rc;
331 }
332 
333 static void q6v5_clk_disable(struct device *dev,
334 			     struct clk **clks, int count)
335 {
336 	int i;
337 
338 	for (i = 0; i < count; i++)
339 		clk_disable_unprepare(clks[i]);
340 }
341 
342 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
343 			   size_t pd_count)
344 {
345 	int ret;
346 	int i;
347 
348 	for (i = 0; i < pd_count; i++) {
349 		dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
350 		ret = pm_runtime_get_sync(pds[i]);
351 		if (ret < 0)
352 			goto unroll_pd_votes;
353 	}
354 
355 	return 0;
356 
357 unroll_pd_votes:
358 	for (i--; i >= 0; i--) {
359 		dev_pm_genpd_set_performance_state(pds[i], 0);
360 		pm_runtime_put(pds[i]);
361 	}
362 
363 	return ret;
364 }
365 
366 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
367 			     size_t pd_count)
368 {
369 	int i;
370 
371 	for (i = 0; i < pd_count; i++) {
372 		dev_pm_genpd_set_performance_state(pds[i], 0);
373 		pm_runtime_put(pds[i]);
374 	}
375 }
376 
377 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
378 				   bool local, bool remote, phys_addr_t addr,
379 				   size_t size)
380 {
381 	struct qcom_scm_vmperm next[2];
382 	int perms = 0;
383 
384 	if (!qproc->need_mem_protection)
385 		return 0;
386 
387 	if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
388 	    remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
389 		return 0;
390 
391 	if (local) {
392 		next[perms].vmid = QCOM_SCM_VMID_HLOS;
393 		next[perms].perm = QCOM_SCM_PERM_RWX;
394 		perms++;
395 	}
396 
397 	if (remote) {
398 		next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
399 		next[perms].perm = QCOM_SCM_PERM_RW;
400 		perms++;
401 	}
402 
403 	return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
404 				   current_perm, next, perms);
405 }
406 
407 static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
408 {
409 	struct q6v5 *qproc = rproc->priv;
410 
411 	memcpy(qproc->mba_region, fw->data, fw->size);
412 
413 	return 0;
414 }
415 
416 static int q6v5_reset_assert(struct q6v5 *qproc)
417 {
418 	int ret;
419 
420 	if (qproc->has_alt_reset) {
421 		reset_control_assert(qproc->pdc_reset);
422 		ret = reset_control_reset(qproc->mss_restart);
423 		reset_control_deassert(qproc->pdc_reset);
424 	} else if (qproc->has_spare_reg) {
425 		/*
426 		 * When the AXI pipeline is being reset with the Q6 modem partly
427 		 * operational there is possibility of AXI valid signal to
428 		 * glitch, leading to spurious transactions and Q6 hangs. A work
429 		 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
430 		 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE
431 		 * is withdrawn post MSS assert followed by a MSS deassert,
432 		 * while holding the PDC reset.
433 		 */
434 		reset_control_assert(qproc->pdc_reset);
435 		regmap_update_bits(qproc->conn_map, qproc->conn_box,
436 				   AXI_GATING_VALID_OVERRIDE, 1);
437 		reset_control_assert(qproc->mss_restart);
438 		reset_control_deassert(qproc->pdc_reset);
439 		regmap_update_bits(qproc->conn_map, qproc->conn_box,
440 				   AXI_GATING_VALID_OVERRIDE, 0);
441 		ret = reset_control_deassert(qproc->mss_restart);
442 	} else {
443 		ret = reset_control_assert(qproc->mss_restart);
444 	}
445 
446 	return ret;
447 }
448 
449 static int q6v5_reset_deassert(struct q6v5 *qproc)
450 {
451 	int ret;
452 
453 	if (qproc->has_alt_reset) {
454 		reset_control_assert(qproc->pdc_reset);
455 		writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
456 		ret = reset_control_reset(qproc->mss_restart);
457 		writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
458 		reset_control_deassert(qproc->pdc_reset);
459 	} else if (qproc->has_spare_reg) {
460 		ret = reset_control_reset(qproc->mss_restart);
461 	} else {
462 		ret = reset_control_deassert(qproc->mss_restart);
463 	}
464 
465 	return ret;
466 }
467 
468 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
469 {
470 	unsigned long timeout;
471 	s32 val;
472 
473 	timeout = jiffies + msecs_to_jiffies(ms);
474 	for (;;) {
475 		val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
476 		if (val)
477 			break;
478 
479 		if (time_after(jiffies, timeout))
480 			return -ETIMEDOUT;
481 
482 		msleep(1);
483 	}
484 
485 	return val;
486 }
487 
488 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
489 {
490 
491 	unsigned long timeout;
492 	s32 val;
493 
494 	timeout = jiffies + msecs_to_jiffies(ms);
495 	for (;;) {
496 		val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
497 		if (val < 0)
498 			break;
499 
500 		if (!status && val)
501 			break;
502 		else if (status && val == status)
503 			break;
504 
505 		if (time_after(jiffies, timeout))
506 			return -ETIMEDOUT;
507 
508 		msleep(1);
509 	}
510 
511 	return val;
512 }
513 
514 static int q6v5proc_reset(struct q6v5 *qproc)
515 {
516 	u32 val;
517 	int ret;
518 	int i;
519 
520 	if (qproc->version == MSS_SDM845) {
521 		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
522 		val |= Q6SS_CBCR_CLKEN;
523 		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
524 
525 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
526 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
527 					 Q6SS_CBCR_TIMEOUT_US);
528 		if (ret) {
529 			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
530 			return -ETIMEDOUT;
531 		}
532 
533 		/* De-assert QDSP6 stop core */
534 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
535 		/* Trigger boot FSM */
536 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
537 
538 		ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
539 				val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
540 		if (ret) {
541 			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
542 			/* Reset the modem so that boot FSM is in reset state */
543 			q6v5_reset_deassert(qproc);
544 			return ret;
545 		}
546 
547 		goto pbl_wait;
548 	} else if (qproc->version == MSS_SC7180) {
549 		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
550 		val |= Q6SS_CBCR_CLKEN;
551 		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
552 
553 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
554 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
555 					 Q6SS_CBCR_TIMEOUT_US);
556 		if (ret) {
557 			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
558 			return -ETIMEDOUT;
559 		}
560 
561 		/* Turn on the XO clock needed for PLL setup */
562 		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
563 		val |= Q6SS_CBCR_CLKEN;
564 		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
565 
566 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
567 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
568 					 Q6SS_CBCR_TIMEOUT_US);
569 		if (ret) {
570 			dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
571 			return -ETIMEDOUT;
572 		}
573 
574 		/* Configure Q6 core CBCR to auto-enable after reset sequence */
575 		val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
576 		val |= Q6SS_CBCR_CLKEN;
577 		writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
578 
579 		/* De-assert the Q6 stop core signal */
580 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
581 
582 		/* Trigger the boot FSM to start the Q6 out-of-reset sequence */
583 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
584 
585 		/* Poll the QDSP6SS_BOOT_STATUS for FSM completion */
586 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS,
587 					 val, (val & BIT(0)) != 0, 1,
588 					 BOOT_STATUS_TIMEOUT_US);
589 		if (ret) {
590 			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
591 			/* Reset the modem so that boot FSM is in reset state */
592 			q6v5_reset_deassert(qproc);
593 			return ret;
594 		}
595 		goto pbl_wait;
596 	} else if (qproc->version == MSS_MSM8996 ||
597 		   qproc->version == MSS_MSM8998) {
598 		int mem_pwr_ctl;
599 
600 		/* Override the ACC value if required */
601 		writel(QDSP6SS_ACC_OVERRIDE_VAL,
602 		       qproc->reg_base + QDSP6SS_STRAP_ACC);
603 
604 		/* Assert resets, stop core */
605 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
606 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
607 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
608 
609 		/* BHS require xo cbcr to be enabled */
610 		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
611 		val |= Q6SS_CBCR_CLKEN;
612 		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
613 
614 		/* Read CLKOFF bit to go low indicating CLK is enabled */
615 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
616 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
617 					 Q6SS_CBCR_TIMEOUT_US);
618 		if (ret) {
619 			dev_err(qproc->dev,
620 				"xo cbcr enabling timed out (rc:%d)\n", ret);
621 			return ret;
622 		}
623 		/* Enable power block headswitch and wait for it to stabilize */
624 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
625 		val |= QDSP6v56_BHS_ON;
626 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
627 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
628 		udelay(1);
629 
630 		/* Put LDO in bypass mode */
631 		val |= QDSP6v56_LDO_BYP;
632 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
633 
634 		/* Deassert QDSP6 compiler memory clamp */
635 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
636 		val &= ~QDSP6v56_CLAMP_QMC_MEM;
637 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
638 
639 		/* Deassert memory peripheral sleep and L2 memory standby */
640 		val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
641 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
642 
643 		/* Turn on L1, L2, ETB and JU memories 1 at a time */
644 		if (qproc->version == MSS_MSM8996) {
645 			mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
646 			i = 19;
647 		} else {
648 			/* MSS_MSM8998 */
649 			mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
650 			i = 28;
651 		}
652 		val = readl(qproc->reg_base + mem_pwr_ctl);
653 		for (; i >= 0; i--) {
654 			val |= BIT(i);
655 			writel(val, qproc->reg_base + mem_pwr_ctl);
656 			/*
657 			 * Read back value to ensure the write is done then
658 			 * wait for 1us for both memory peripheral and data
659 			 * array to turn on.
660 			 */
661 			val |= readl(qproc->reg_base + mem_pwr_ctl);
662 			udelay(1);
663 		}
664 		/* Remove word line clamp */
665 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
666 		val &= ~QDSP6v56_CLAMP_WL;
667 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
668 	} else {
669 		/* Assert resets, stop core */
670 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
671 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
672 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
673 
674 		/* Enable power block headswitch and wait for it to stabilize */
675 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
676 		val |= QDSS_BHS_ON | QDSS_LDO_BYP;
677 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
678 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
679 		udelay(1);
680 		/*
681 		 * Turn on memories. L2 banks should be done individually
682 		 * to minimize inrush current.
683 		 */
684 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
685 		val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
686 			Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
687 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
688 		val |= Q6SS_L2DATA_SLP_NRET_N_2;
689 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
690 		val |= Q6SS_L2DATA_SLP_NRET_N_1;
691 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
692 		val |= Q6SS_L2DATA_SLP_NRET_N_0;
693 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
694 	}
695 	/* Remove IO clamp */
696 	val &= ~Q6SS_CLAMP_IO;
697 	writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
698 
699 	/* Bring core out of reset */
700 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
701 	val &= ~Q6SS_CORE_ARES;
702 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
703 
704 	/* Turn on core clock */
705 	val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
706 	val |= Q6SS_CLK_ENABLE;
707 	writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
708 
709 	/* Start core execution */
710 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
711 	val &= ~Q6SS_STOP_CORE;
712 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
713 
714 pbl_wait:
715 	/* Wait for PBL status */
716 	ret = q6v5_rmb_pbl_wait(qproc, 1000);
717 	if (ret == -ETIMEDOUT) {
718 		dev_err(qproc->dev, "PBL boot timed out\n");
719 	} else if (ret != RMB_PBL_SUCCESS) {
720 		dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
721 		ret = -EINVAL;
722 	} else {
723 		ret = 0;
724 	}
725 
726 	return ret;
727 }
728 
729 static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
730 				   struct regmap *halt_map,
731 				   u32 offset)
732 {
733 	unsigned int val;
734 	int ret;
735 
736 	/* Check if we're already idle */
737 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
738 	if (!ret && val)
739 		return;
740 
741 	/* Assert halt request */
742 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
743 
744 	/* Wait for halt */
745 	regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
746 				 val, 1000, HALT_ACK_TIMEOUT_US);
747 
748 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
749 	if (ret || !val)
750 		dev_err(qproc->dev, "port failed halt\n");
751 
752 	/* Clear halt request (port will remain halted until reset) */
753 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
754 }
755 
756 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
757 {
758 	unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
759 	dma_addr_t phys;
760 	void *metadata;
761 	int mdata_perm;
762 	int xferop_ret;
763 	size_t size;
764 	void *ptr;
765 	int ret;
766 
767 	metadata = qcom_mdt_read_metadata(fw, &size);
768 	if (IS_ERR(metadata))
769 		return PTR_ERR(metadata);
770 
771 	ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
772 	if (!ptr) {
773 		kfree(metadata);
774 		dev_err(qproc->dev, "failed to allocate mdt buffer\n");
775 		return -ENOMEM;
776 	}
777 
778 	memcpy(ptr, metadata, size);
779 
780 	/* Hypervisor mapping to access metadata by modem */
781 	mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
782 	ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
783 				      phys, size);
784 	if (ret) {
785 		dev_err(qproc->dev,
786 			"assigning Q6 access to metadata failed: %d\n", ret);
787 		ret = -EAGAIN;
788 		goto free_dma_attrs;
789 	}
790 
791 	writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
792 	writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
793 
794 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
795 	if (ret == -ETIMEDOUT)
796 		dev_err(qproc->dev, "MPSS header authentication timed out\n");
797 	else if (ret < 0)
798 		dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
799 
800 	/* Metadata authentication done, remove modem access */
801 	xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
802 					     phys, size);
803 	if (xferop_ret)
804 		dev_warn(qproc->dev,
805 			 "mdt buffer not reclaimed system may become unstable\n");
806 
807 free_dma_attrs:
808 	dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
809 	kfree(metadata);
810 
811 	return ret < 0 ? ret : 0;
812 }
813 
814 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
815 {
816 	if (phdr->p_type != PT_LOAD)
817 		return false;
818 
819 	if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
820 		return false;
821 
822 	if (!phdr->p_memsz)
823 		return false;
824 
825 	return true;
826 }
827 
828 static int q6v5_mba_load(struct q6v5 *qproc)
829 {
830 	int ret;
831 	int xfermemop_ret;
832 
833 	qcom_q6v5_prepare(&qproc->q6v5);
834 
835 	ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
836 	if (ret < 0) {
837 		dev_err(qproc->dev, "failed to enable active power domains\n");
838 		goto disable_irqs;
839 	}
840 
841 	ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
842 	if (ret < 0) {
843 		dev_err(qproc->dev, "failed to enable proxy power domains\n");
844 		goto disable_active_pds;
845 	}
846 
847 	ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
848 				    qproc->proxy_reg_count);
849 	if (ret) {
850 		dev_err(qproc->dev, "failed to enable proxy supplies\n");
851 		goto disable_proxy_pds;
852 	}
853 
854 	ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
855 			      qproc->proxy_clk_count);
856 	if (ret) {
857 		dev_err(qproc->dev, "failed to enable proxy clocks\n");
858 		goto disable_proxy_reg;
859 	}
860 
861 	ret = q6v5_regulator_enable(qproc, qproc->active_regs,
862 				    qproc->active_reg_count);
863 	if (ret) {
864 		dev_err(qproc->dev, "failed to enable supplies\n");
865 		goto disable_proxy_clk;
866 	}
867 
868 	ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
869 			      qproc->reset_clk_count);
870 	if (ret) {
871 		dev_err(qproc->dev, "failed to enable reset clocks\n");
872 		goto disable_vdd;
873 	}
874 
875 	ret = q6v5_reset_deassert(qproc);
876 	if (ret) {
877 		dev_err(qproc->dev, "failed to deassert mss restart\n");
878 		goto disable_reset_clks;
879 	}
880 
881 	ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
882 			      qproc->active_clk_count);
883 	if (ret) {
884 		dev_err(qproc->dev, "failed to enable clocks\n");
885 		goto assert_reset;
886 	}
887 
888 	/* Assign MBA image access in DDR to q6 */
889 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
890 				      qproc->mba_phys, qproc->mba_size);
891 	if (ret) {
892 		dev_err(qproc->dev,
893 			"assigning Q6 access to mba memory failed: %d\n", ret);
894 		goto disable_active_clks;
895 	}
896 
897 	writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
898 
899 	ret = q6v5proc_reset(qproc);
900 	if (ret)
901 		goto reclaim_mba;
902 
903 	ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
904 	if (ret == -ETIMEDOUT) {
905 		dev_err(qproc->dev, "MBA boot timed out\n");
906 		goto halt_axi_ports;
907 	} else if (ret != RMB_MBA_XPU_UNLOCKED &&
908 		   ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
909 		dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
910 		ret = -EINVAL;
911 		goto halt_axi_ports;
912 	}
913 
914 	qproc->dump_mba_loaded = true;
915 	return 0;
916 
917 halt_axi_ports:
918 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
919 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
920 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
921 
922 reclaim_mba:
923 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
924 						false, qproc->mba_phys,
925 						qproc->mba_size);
926 	if (xfermemop_ret) {
927 		dev_err(qproc->dev,
928 			"Failed to reclaim mba buffer, system may become unstable\n");
929 	}
930 
931 disable_active_clks:
932 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
933 			 qproc->active_clk_count);
934 assert_reset:
935 	q6v5_reset_assert(qproc);
936 disable_reset_clks:
937 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
938 			 qproc->reset_clk_count);
939 disable_vdd:
940 	q6v5_regulator_disable(qproc, qproc->active_regs,
941 			       qproc->active_reg_count);
942 disable_proxy_clk:
943 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
944 			 qproc->proxy_clk_count);
945 disable_proxy_reg:
946 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
947 			       qproc->proxy_reg_count);
948 disable_proxy_pds:
949 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
950 disable_active_pds:
951 	q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
952 disable_irqs:
953 	qcom_q6v5_unprepare(&qproc->q6v5);
954 
955 	return ret;
956 }
957 
958 static void q6v5_mba_reclaim(struct q6v5 *qproc)
959 {
960 	int ret;
961 	u32 val;
962 
963 	qproc->dump_mba_loaded = false;
964 
965 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
966 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
967 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
968 	if (qproc->version == MSS_MSM8996) {
969 		/*
970 		 * To avoid high MX current during LPASS/MSS restart.
971 		 */
972 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
973 		val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
974 			QDSP6v56_CLAMP_QMC_MEM;
975 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
976 	}
977 
978 	q6v5_reset_assert(qproc);
979 
980 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
981 			 qproc->reset_clk_count);
982 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
983 			 qproc->active_clk_count);
984 	q6v5_regulator_disable(qproc, qproc->active_regs,
985 			       qproc->active_reg_count);
986 	q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
987 
988 	/* In case of failure or coredump scenario where reclaiming MBA memory
989 	 * could not happen reclaim it here.
990 	 */
991 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
992 				      qproc->mba_phys,
993 				      qproc->mba_size);
994 	WARN_ON(ret);
995 
996 	ret = qcom_q6v5_unprepare(&qproc->q6v5);
997 	if (ret) {
998 		q6v5_pds_disable(qproc, qproc->proxy_pds,
999 				 qproc->proxy_pd_count);
1000 		q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1001 				 qproc->proxy_clk_count);
1002 		q6v5_regulator_disable(qproc, qproc->proxy_regs,
1003 				       qproc->proxy_reg_count);
1004 	}
1005 }
1006 
1007 static int q6v5_reload_mba(struct rproc *rproc)
1008 {
1009 	struct q6v5 *qproc = rproc->priv;
1010 	const struct firmware *fw;
1011 	int ret;
1012 
1013 	ret = request_firmware(&fw, rproc->firmware, qproc->dev);
1014 	if (ret < 0)
1015 		return ret;
1016 
1017 	q6v5_load(rproc, fw);
1018 	ret = q6v5_mba_load(qproc);
1019 	release_firmware(fw);
1020 
1021 	return ret;
1022 }
1023 
1024 static int q6v5_mpss_load(struct q6v5 *qproc)
1025 {
1026 	const struct elf32_phdr *phdrs;
1027 	const struct elf32_phdr *phdr;
1028 	const struct firmware *seg_fw;
1029 	const struct firmware *fw;
1030 	struct elf32_hdr *ehdr;
1031 	phys_addr_t mpss_reloc;
1032 	phys_addr_t boot_addr;
1033 	phys_addr_t min_addr = PHYS_ADDR_MAX;
1034 	phys_addr_t max_addr = 0;
1035 	u32 code_length;
1036 	bool relocate = false;
1037 	char *fw_name;
1038 	size_t fw_name_len;
1039 	ssize_t offset;
1040 	size_t size = 0;
1041 	void *ptr;
1042 	int ret;
1043 	int i;
1044 
1045 	fw_name_len = strlen(qproc->hexagon_mdt_image);
1046 	if (fw_name_len <= 4)
1047 		return -EINVAL;
1048 
1049 	fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
1050 	if (!fw_name)
1051 		return -ENOMEM;
1052 
1053 	ret = request_firmware(&fw, fw_name, qproc->dev);
1054 	if (ret < 0) {
1055 		dev_err(qproc->dev, "unable to load %s\n", fw_name);
1056 		goto out;
1057 	}
1058 
1059 	/* Initialize the RMB validator */
1060 	writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1061 
1062 	ret = q6v5_mpss_init_image(qproc, fw);
1063 	if (ret)
1064 		goto release_firmware;
1065 
1066 	ehdr = (struct elf32_hdr *)fw->data;
1067 	phdrs = (struct elf32_phdr *)(ehdr + 1);
1068 
1069 	for (i = 0; i < ehdr->e_phnum; i++) {
1070 		phdr = &phdrs[i];
1071 
1072 		if (!q6v5_phdr_valid(phdr))
1073 			continue;
1074 
1075 		if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
1076 			relocate = true;
1077 
1078 		if (phdr->p_paddr < min_addr)
1079 			min_addr = phdr->p_paddr;
1080 
1081 		if (phdr->p_paddr + phdr->p_memsz > max_addr)
1082 			max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
1083 	}
1084 
1085 	/**
1086 	 * In case of a modem subsystem restart on secure devices, the modem
1087 	 * memory can be reclaimed only after MBA is loaded. For modem cold
1088 	 * boot this will be a nop
1089 	 */
1090 	q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
1091 				qproc->mpss_phys, qproc->mpss_size);
1092 
1093 	/* Share ownership between Linux and MSS, during segment loading */
1094 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
1095 				      qproc->mpss_phys, qproc->mpss_size);
1096 	if (ret) {
1097 		dev_err(qproc->dev,
1098 			"assigning Q6 access to mpss memory failed: %d\n", ret);
1099 		ret = -EAGAIN;
1100 		goto release_firmware;
1101 	}
1102 
1103 	mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
1104 	qproc->mpss_reloc = mpss_reloc;
1105 	/* Load firmware segments */
1106 	for (i = 0; i < ehdr->e_phnum; i++) {
1107 		phdr = &phdrs[i];
1108 
1109 		if (!q6v5_phdr_valid(phdr))
1110 			continue;
1111 
1112 		offset = phdr->p_paddr - mpss_reloc;
1113 		if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
1114 			dev_err(qproc->dev, "segment outside memory range\n");
1115 			ret = -EINVAL;
1116 			goto release_firmware;
1117 		}
1118 
1119 		ptr = ioremap_wc(qproc->mpss_phys + offset, phdr->p_memsz);
1120 		if (!ptr) {
1121 			dev_err(qproc->dev,
1122 				"unable to map memory region: %pa+%zx-%x\n",
1123 				&qproc->mpss_phys, offset, phdr->p_memsz);
1124 			goto release_firmware;
1125 		}
1126 
1127 		if (phdr->p_filesz && phdr->p_offset < fw->size) {
1128 			/* Firmware is large enough to be non-split */
1129 			if (phdr->p_offset + phdr->p_filesz > fw->size) {
1130 				dev_err(qproc->dev,
1131 					"failed to load segment %d from truncated file %s\n",
1132 					i, fw_name);
1133 				ret = -EINVAL;
1134 				iounmap(ptr);
1135 				goto release_firmware;
1136 			}
1137 
1138 			memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
1139 		} else if (phdr->p_filesz) {
1140 			/* Replace "xxx.xxx" with "xxx.bxx" */
1141 			sprintf(fw_name + fw_name_len - 3, "b%02d", i);
1142 			ret = request_firmware(&seg_fw, fw_name, qproc->dev);
1143 			if (ret) {
1144 				dev_err(qproc->dev, "failed to load %s\n", fw_name);
1145 				iounmap(ptr);
1146 				goto release_firmware;
1147 			}
1148 
1149 			memcpy(ptr, seg_fw->data, seg_fw->size);
1150 
1151 			release_firmware(seg_fw);
1152 		}
1153 
1154 		if (phdr->p_memsz > phdr->p_filesz) {
1155 			memset(ptr + phdr->p_filesz, 0,
1156 			       phdr->p_memsz - phdr->p_filesz);
1157 		}
1158 		iounmap(ptr);
1159 		size += phdr->p_memsz;
1160 
1161 		code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1162 		if (!code_length) {
1163 			boot_addr = relocate ? qproc->mpss_phys : min_addr;
1164 			writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1165 			writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1166 		}
1167 		writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1168 
1169 		ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
1170 		if (ret < 0) {
1171 			dev_err(qproc->dev, "MPSS authentication failed: %d\n",
1172 				ret);
1173 			goto release_firmware;
1174 		}
1175 	}
1176 
1177 	/* Transfer ownership of modem ddr region to q6 */
1178 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
1179 				      qproc->mpss_phys, qproc->mpss_size);
1180 	if (ret) {
1181 		dev_err(qproc->dev,
1182 			"assigning Q6 access to mpss memory failed: %d\n", ret);
1183 		ret = -EAGAIN;
1184 		goto release_firmware;
1185 	}
1186 
1187 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1188 	if (ret == -ETIMEDOUT)
1189 		dev_err(qproc->dev, "MPSS authentication timed out\n");
1190 	else if (ret < 0)
1191 		dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1192 
1193 release_firmware:
1194 	release_firmware(fw);
1195 out:
1196 	kfree(fw_name);
1197 
1198 	return ret < 0 ? ret : 0;
1199 }
1200 
1201 static void qcom_q6v5_dump_segment(struct rproc *rproc,
1202 				   struct rproc_dump_segment *segment,
1203 				   void *dest)
1204 {
1205 	int ret = 0;
1206 	struct q6v5 *qproc = rproc->priv;
1207 	unsigned long mask = BIT((unsigned long)segment->priv);
1208 	int offset = segment->da - qproc->mpss_reloc;
1209 	void *ptr = NULL;
1210 
1211 	/* Unlock mba before copying segments */
1212 	if (!qproc->dump_mba_loaded) {
1213 		ret = q6v5_reload_mba(rproc);
1214 		if (!ret) {
1215 			/* Reset ownership back to Linux to copy segments */
1216 			ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1217 						      true, false,
1218 						      qproc->mpss_phys,
1219 						      qproc->mpss_size);
1220 		}
1221 	}
1222 
1223 	if (!ret)
1224 		ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size);
1225 
1226 	if (ptr) {
1227 		memcpy(dest, ptr, segment->size);
1228 		iounmap(ptr);
1229 	} else {
1230 		memset(dest, 0xff, segment->size);
1231 	}
1232 
1233 	qproc->dump_segment_mask |= mask;
1234 
1235 	/* Reclaim mba after copying segments */
1236 	if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
1237 		if (qproc->dump_mba_loaded) {
1238 			/* Try to reset ownership back to Q6 */
1239 			q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1240 						false, true,
1241 						qproc->mpss_phys,
1242 						qproc->mpss_size);
1243 			q6v5_mba_reclaim(qproc);
1244 		}
1245 	}
1246 }
1247 
1248 static int q6v5_start(struct rproc *rproc)
1249 {
1250 	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1251 	int xfermemop_ret;
1252 	int ret;
1253 
1254 	ret = q6v5_mba_load(qproc);
1255 	if (ret)
1256 		return ret;
1257 
1258 	dev_info(qproc->dev, "MBA booted, loading mpss\n");
1259 
1260 	ret = q6v5_mpss_load(qproc);
1261 	if (ret)
1262 		goto reclaim_mpss;
1263 
1264 	ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1265 	if (ret == -ETIMEDOUT) {
1266 		dev_err(qproc->dev, "start timed out\n");
1267 		goto reclaim_mpss;
1268 	}
1269 
1270 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
1271 						false, qproc->mba_phys,
1272 						qproc->mba_size);
1273 	if (xfermemop_ret)
1274 		dev_err(qproc->dev,
1275 			"Failed to reclaim mba buffer system may become unstable\n");
1276 
1277 	/* Reset Dump Segment Mask */
1278 	qproc->dump_segment_mask = 0;
1279 	qproc->running = true;
1280 
1281 	return 0;
1282 
1283 reclaim_mpss:
1284 	q6v5_mba_reclaim(qproc);
1285 
1286 	return ret;
1287 }
1288 
1289 static int q6v5_stop(struct rproc *rproc)
1290 {
1291 	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1292 	int ret;
1293 
1294 	qproc->running = false;
1295 
1296 	ret = qcom_q6v5_request_stop(&qproc->q6v5);
1297 	if (ret == -ETIMEDOUT)
1298 		dev_err(qproc->dev, "timed out on wait\n");
1299 
1300 	q6v5_mba_reclaim(qproc);
1301 
1302 	return 0;
1303 }
1304 
1305 static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1306 					    const struct firmware *mba_fw)
1307 {
1308 	const struct firmware *fw;
1309 	const struct elf32_phdr *phdrs;
1310 	const struct elf32_phdr *phdr;
1311 	const struct elf32_hdr *ehdr;
1312 	struct q6v5 *qproc = rproc->priv;
1313 	unsigned long i;
1314 	int ret;
1315 
1316 	ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
1317 	if (ret < 0) {
1318 		dev_err(qproc->dev, "unable to load %s\n",
1319 			qproc->hexagon_mdt_image);
1320 		return ret;
1321 	}
1322 
1323 	rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
1324 
1325 	ehdr = (struct elf32_hdr *)fw->data;
1326 	phdrs = (struct elf32_phdr *)(ehdr + 1);
1327 	qproc->dump_complete_mask = 0;
1328 
1329 	for (i = 0; i < ehdr->e_phnum; i++) {
1330 		phdr = &phdrs[i];
1331 
1332 		if (!q6v5_phdr_valid(phdr))
1333 			continue;
1334 
1335 		ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1336 							phdr->p_memsz,
1337 							qcom_q6v5_dump_segment,
1338 							(void *)i);
1339 		if (ret)
1340 			break;
1341 
1342 		qproc->dump_complete_mask |= BIT(i);
1343 	}
1344 
1345 	release_firmware(fw);
1346 	return ret;
1347 }
1348 
1349 static const struct rproc_ops q6v5_ops = {
1350 	.start = q6v5_start,
1351 	.stop = q6v5_stop,
1352 	.parse_fw = qcom_q6v5_register_dump_segments,
1353 	.load = q6v5_load,
1354 };
1355 
1356 static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
1357 {
1358 	struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
1359 
1360 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1361 			 qproc->proxy_clk_count);
1362 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
1363 			       qproc->proxy_reg_count);
1364 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1365 }
1366 
1367 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1368 {
1369 	struct of_phandle_args args;
1370 	struct resource *res;
1371 	int ret;
1372 
1373 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
1374 	qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
1375 	if (IS_ERR(qproc->reg_base))
1376 		return PTR_ERR(qproc->reg_base);
1377 
1378 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
1379 	qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
1380 	if (IS_ERR(qproc->rmb_base))
1381 		return PTR_ERR(qproc->rmb_base);
1382 
1383 	ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1384 					       "qcom,halt-regs", 3, 0, &args);
1385 	if (ret < 0) {
1386 		dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1387 		return -EINVAL;
1388 	}
1389 
1390 	qproc->halt_map = syscon_node_to_regmap(args.np);
1391 	of_node_put(args.np);
1392 	if (IS_ERR(qproc->halt_map))
1393 		return PTR_ERR(qproc->halt_map);
1394 
1395 	qproc->halt_q6 = args.args[0];
1396 	qproc->halt_modem = args.args[1];
1397 	qproc->halt_nc = args.args[2];
1398 
1399 	if (qproc->has_spare_reg) {
1400 		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1401 						       "qcom,spare-regs",
1402 						       1, 0, &args);
1403 		if (ret < 0) {
1404 			dev_err(&pdev->dev, "failed to parse spare-regs\n");
1405 			return -EINVAL;
1406 		}
1407 
1408 		qproc->conn_map = syscon_node_to_regmap(args.np);
1409 		of_node_put(args.np);
1410 		if (IS_ERR(qproc->conn_map))
1411 			return PTR_ERR(qproc->conn_map);
1412 
1413 		qproc->conn_box = args.args[0];
1414 	}
1415 
1416 	return 0;
1417 }
1418 
1419 static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1420 		char **clk_names)
1421 {
1422 	int i;
1423 
1424 	if (!clk_names)
1425 		return 0;
1426 
1427 	for (i = 0; clk_names[i]; i++) {
1428 		clks[i] = devm_clk_get(dev, clk_names[i]);
1429 		if (IS_ERR(clks[i])) {
1430 			int rc = PTR_ERR(clks[i]);
1431 
1432 			if (rc != -EPROBE_DEFER)
1433 				dev_err(dev, "Failed to get %s clock\n",
1434 					clk_names[i]);
1435 			return rc;
1436 		}
1437 	}
1438 
1439 	return i;
1440 }
1441 
1442 static int q6v5_pds_attach(struct device *dev, struct device **devs,
1443 			   char **pd_names)
1444 {
1445 	size_t num_pds = 0;
1446 	int ret;
1447 	int i;
1448 
1449 	if (!pd_names)
1450 		return 0;
1451 
1452 	while (pd_names[num_pds])
1453 		num_pds++;
1454 
1455 	for (i = 0; i < num_pds; i++) {
1456 		devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
1457 		if (IS_ERR_OR_NULL(devs[i])) {
1458 			ret = PTR_ERR(devs[i]) ? : -ENODATA;
1459 			goto unroll_attach;
1460 		}
1461 	}
1462 
1463 	return num_pds;
1464 
1465 unroll_attach:
1466 	for (i--; i >= 0; i--)
1467 		dev_pm_domain_detach(devs[i], false);
1468 
1469 	return ret;
1470 }
1471 
1472 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1473 			    size_t pd_count)
1474 {
1475 	int i;
1476 
1477 	for (i = 0; i < pd_count; i++)
1478 		dev_pm_domain_detach(pds[i], false);
1479 }
1480 
1481 static int q6v5_init_reset(struct q6v5 *qproc)
1482 {
1483 	qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
1484 							      "mss_restart");
1485 	if (IS_ERR(qproc->mss_restart)) {
1486 		dev_err(qproc->dev, "failed to acquire mss restart\n");
1487 		return PTR_ERR(qproc->mss_restart);
1488 	}
1489 
1490 	if (qproc->has_alt_reset || qproc->has_spare_reg) {
1491 		qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1492 								    "pdc_reset");
1493 		if (IS_ERR(qproc->pdc_reset)) {
1494 			dev_err(qproc->dev, "failed to acquire pdc reset\n");
1495 			return PTR_ERR(qproc->pdc_reset);
1496 		}
1497 	}
1498 
1499 	return 0;
1500 }
1501 
1502 static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1503 {
1504 	struct device_node *child;
1505 	struct device_node *node;
1506 	struct resource r;
1507 	int ret;
1508 
1509 	/*
1510 	 * In the absence of mba/mpss sub-child, extract the mba and mpss
1511 	 * reserved memory regions from device's memory-region property.
1512 	 */
1513 	child = of_get_child_by_name(qproc->dev->of_node, "mba");
1514 	if (!child)
1515 		node = of_parse_phandle(qproc->dev->of_node,
1516 					"memory-region", 0);
1517 	else
1518 		node = of_parse_phandle(child, "memory-region", 0);
1519 
1520 	ret = of_address_to_resource(node, 0, &r);
1521 	if (ret) {
1522 		dev_err(qproc->dev, "unable to resolve mba region\n");
1523 		return ret;
1524 	}
1525 	of_node_put(node);
1526 
1527 	qproc->mba_phys = r.start;
1528 	qproc->mba_size = resource_size(&r);
1529 	qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1530 	if (!qproc->mba_region) {
1531 		dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1532 			&r.start, qproc->mba_size);
1533 		return -EBUSY;
1534 	}
1535 
1536 	if (!child) {
1537 		node = of_parse_phandle(qproc->dev->of_node,
1538 					"memory-region", 1);
1539 	} else {
1540 		child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1541 		node = of_parse_phandle(child, "memory-region", 0);
1542 	}
1543 
1544 	ret = of_address_to_resource(node, 0, &r);
1545 	if (ret) {
1546 		dev_err(qproc->dev, "unable to resolve mpss region\n");
1547 		return ret;
1548 	}
1549 	of_node_put(node);
1550 
1551 	qproc->mpss_phys = qproc->mpss_reloc = r.start;
1552 	qproc->mpss_size = resource_size(&r);
1553 
1554 	return 0;
1555 }
1556 
1557 #if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY)
1558 
1559 /* Register IPA notification function */
1560 int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify,
1561 			     void *data)
1562 {
1563 	struct qcom_rproc_ipa_notify *ipa_notify;
1564 	struct q6v5 *qproc = rproc->priv;
1565 
1566 	if (!notify)
1567 		return -EINVAL;
1568 
1569 	ipa_notify = &qproc->ipa_notify_subdev;
1570 	if (ipa_notify->notify)
1571 		return -EBUSY;
1572 
1573 	ipa_notify->notify = notify;
1574 	ipa_notify->data = data;
1575 
1576 	return 0;
1577 }
1578 EXPORT_SYMBOL_GPL(qcom_register_ipa_notify);
1579 
1580 /* Deregister IPA notification function */
1581 void qcom_deregister_ipa_notify(struct rproc *rproc)
1582 {
1583 	struct q6v5 *qproc = rproc->priv;
1584 
1585 	qproc->ipa_notify_subdev.notify = NULL;
1586 }
1587 EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify);
1588 #endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
1589 
1590 static int q6v5_probe(struct platform_device *pdev)
1591 {
1592 	const struct rproc_hexagon_res *desc;
1593 	struct q6v5 *qproc;
1594 	struct rproc *rproc;
1595 	const char *mba_image;
1596 	int ret;
1597 
1598 	desc = of_device_get_match_data(&pdev->dev);
1599 	if (!desc)
1600 		return -EINVAL;
1601 
1602 	if (desc->need_mem_protection && !qcom_scm_is_available())
1603 		return -EPROBE_DEFER;
1604 
1605 	mba_image = desc->hexagon_mba_image;
1606 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1607 					    0, &mba_image);
1608 	if (ret < 0 && ret != -EINVAL)
1609 		return ret;
1610 
1611 	rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
1612 			    mba_image, sizeof(*qproc));
1613 	if (!rproc) {
1614 		dev_err(&pdev->dev, "failed to allocate rproc\n");
1615 		return -ENOMEM;
1616 	}
1617 
1618 	rproc->auto_boot = false;
1619 	rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
1620 
1621 	qproc = (struct q6v5 *)rproc->priv;
1622 	qproc->dev = &pdev->dev;
1623 	qproc->rproc = rproc;
1624 	qproc->hexagon_mdt_image = "modem.mdt";
1625 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1626 					    1, &qproc->hexagon_mdt_image);
1627 	if (ret < 0 && ret != -EINVAL)
1628 		goto free_rproc;
1629 
1630 	platform_set_drvdata(pdev, qproc);
1631 
1632 	qproc->has_spare_reg = desc->has_spare_reg;
1633 	ret = q6v5_init_mem(qproc, pdev);
1634 	if (ret)
1635 		goto free_rproc;
1636 
1637 	ret = q6v5_alloc_memory_region(qproc);
1638 	if (ret)
1639 		goto free_rproc;
1640 
1641 	ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1642 			       desc->proxy_clk_names);
1643 	if (ret < 0) {
1644 		dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
1645 		goto free_rproc;
1646 	}
1647 	qproc->proxy_clk_count = ret;
1648 
1649 	ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
1650 			       desc->reset_clk_names);
1651 	if (ret < 0) {
1652 		dev_err(&pdev->dev, "Failed to get reset clocks.\n");
1653 		goto free_rproc;
1654 	}
1655 	qproc->reset_clk_count = ret;
1656 
1657 	ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1658 			       desc->active_clk_names);
1659 	if (ret < 0) {
1660 		dev_err(&pdev->dev, "Failed to get active clocks.\n");
1661 		goto free_rproc;
1662 	}
1663 	qproc->active_clk_count = ret;
1664 
1665 	ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1666 				  desc->proxy_supply);
1667 	if (ret < 0) {
1668 		dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
1669 		goto free_rproc;
1670 	}
1671 	qproc->proxy_reg_count = ret;
1672 
1673 	ret = q6v5_regulator_init(&pdev->dev,  qproc->active_regs,
1674 				  desc->active_supply);
1675 	if (ret < 0) {
1676 		dev_err(&pdev->dev, "Failed to get active regulators.\n");
1677 		goto free_rproc;
1678 	}
1679 	qproc->active_reg_count = ret;
1680 
1681 	ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
1682 			      desc->active_pd_names);
1683 	if (ret < 0) {
1684 		dev_err(&pdev->dev, "Failed to attach active power domains\n");
1685 		goto free_rproc;
1686 	}
1687 	qproc->active_pd_count = ret;
1688 
1689 	ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
1690 			      desc->proxy_pd_names);
1691 	if (ret < 0) {
1692 		dev_err(&pdev->dev, "Failed to init power domains\n");
1693 		goto detach_active_pds;
1694 	}
1695 	qproc->proxy_pd_count = ret;
1696 
1697 	qproc->has_alt_reset = desc->has_alt_reset;
1698 	ret = q6v5_init_reset(qproc);
1699 	if (ret)
1700 		goto detach_proxy_pds;
1701 
1702 	qproc->version = desc->version;
1703 	qproc->need_mem_protection = desc->need_mem_protection;
1704 
1705 	ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
1706 			     qcom_msa_handover);
1707 	if (ret)
1708 		goto detach_proxy_pds;
1709 
1710 	qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1711 	qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
1712 	qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
1713 	qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
1714 	qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
1715 	qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
1716 	qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
1717 	if (IS_ERR(qproc->sysmon)) {
1718 		ret = PTR_ERR(qproc->sysmon);
1719 		goto remove_subdevs;
1720 	}
1721 
1722 	ret = rproc_add(rproc);
1723 	if (ret)
1724 		goto remove_sysmon_subdev;
1725 
1726 	return 0;
1727 
1728 remove_sysmon_subdev:
1729 	qcom_remove_sysmon_subdev(qproc->sysmon);
1730 remove_subdevs:
1731 	qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev);
1732 	qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
1733 	qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
1734 	qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
1735 detach_proxy_pds:
1736 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1737 detach_active_pds:
1738 	q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
1739 free_rproc:
1740 	rproc_free(rproc);
1741 
1742 	return ret;
1743 }
1744 
1745 static int q6v5_remove(struct platform_device *pdev)
1746 {
1747 	struct q6v5 *qproc = platform_get_drvdata(pdev);
1748 	struct rproc *rproc = qproc->rproc;
1749 
1750 	rproc_del(rproc);
1751 
1752 	qcom_remove_sysmon_subdev(qproc->sysmon);
1753 	qcom_remove_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
1754 	qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
1755 	qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
1756 	qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
1757 
1758 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1759 	q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
1760 
1761 	rproc_free(rproc);
1762 
1763 	return 0;
1764 }
1765 
1766 static const struct rproc_hexagon_res sc7180_mss = {
1767 	.hexagon_mba_image = "mba.mbn",
1768 	.proxy_clk_names = (char*[]){
1769 		"xo",
1770 		NULL
1771 	},
1772 	.reset_clk_names = (char*[]){
1773 		"iface",
1774 		"bus",
1775 		"snoc_axi",
1776 		NULL
1777 	},
1778 	.active_clk_names = (char*[]){
1779 		"mnoc_axi",
1780 		"nav",
1781 		NULL
1782 	},
1783 	.active_pd_names = (char*[]){
1784 		"load_state",
1785 		NULL
1786 	},
1787 	.proxy_pd_names = (char*[]){
1788 		"cx",
1789 		"mx",
1790 		"mss",
1791 		NULL
1792 	},
1793 	.need_mem_protection = true,
1794 	.has_alt_reset = false,
1795 	.has_spare_reg = true,
1796 	.version = MSS_SC7180,
1797 };
1798 
1799 static const struct rproc_hexagon_res sdm845_mss = {
1800 	.hexagon_mba_image = "mba.mbn",
1801 	.proxy_clk_names = (char*[]){
1802 			"xo",
1803 			"prng",
1804 			NULL
1805 	},
1806 	.reset_clk_names = (char*[]){
1807 			"iface",
1808 			"snoc_axi",
1809 			NULL
1810 	},
1811 	.active_clk_names = (char*[]){
1812 			"bus",
1813 			"mem",
1814 			"gpll0_mss",
1815 			"mnoc_axi",
1816 			NULL
1817 	},
1818 	.active_pd_names = (char*[]){
1819 			"load_state",
1820 			NULL
1821 	},
1822 	.proxy_pd_names = (char*[]){
1823 			"cx",
1824 			"mx",
1825 			"mss",
1826 			NULL
1827 	},
1828 	.need_mem_protection = true,
1829 	.has_alt_reset = true,
1830 	.has_spare_reg = false,
1831 	.version = MSS_SDM845,
1832 };
1833 
1834 static const struct rproc_hexagon_res msm8998_mss = {
1835 	.hexagon_mba_image = "mba.mbn",
1836 	.proxy_clk_names = (char*[]){
1837 			"xo",
1838 			"qdss",
1839 			"mem",
1840 			NULL
1841 	},
1842 	.active_clk_names = (char*[]){
1843 			"iface",
1844 			"bus",
1845 			"gpll0_mss",
1846 			"mnoc_axi",
1847 			"snoc_axi",
1848 			NULL
1849 	},
1850 	.proxy_pd_names = (char*[]){
1851 			"cx",
1852 			"mx",
1853 			NULL
1854 	},
1855 	.need_mem_protection = true,
1856 	.has_alt_reset = false,
1857 	.has_spare_reg = false,
1858 	.version = MSS_MSM8998,
1859 };
1860 
1861 static const struct rproc_hexagon_res msm8996_mss = {
1862 	.hexagon_mba_image = "mba.mbn",
1863 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1864 		{
1865 			.supply = "pll",
1866 			.uA = 100000,
1867 		},
1868 		{}
1869 	},
1870 	.proxy_clk_names = (char*[]){
1871 			"xo",
1872 			"pnoc",
1873 			"qdss",
1874 			NULL
1875 	},
1876 	.active_clk_names = (char*[]){
1877 			"iface",
1878 			"bus",
1879 			"mem",
1880 			"gpll0_mss",
1881 			"snoc_axi",
1882 			"mnoc_axi",
1883 			NULL
1884 	},
1885 	.need_mem_protection = true,
1886 	.has_alt_reset = false,
1887 	.has_spare_reg = false,
1888 	.version = MSS_MSM8996,
1889 };
1890 
1891 static const struct rproc_hexagon_res msm8916_mss = {
1892 	.hexagon_mba_image = "mba.mbn",
1893 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1894 		{
1895 			.supply = "mx",
1896 			.uV = 1050000,
1897 		},
1898 		{
1899 			.supply = "cx",
1900 			.uA = 100000,
1901 		},
1902 		{
1903 			.supply = "pll",
1904 			.uA = 100000,
1905 		},
1906 		{}
1907 	},
1908 	.proxy_clk_names = (char*[]){
1909 		"xo",
1910 		NULL
1911 	},
1912 	.active_clk_names = (char*[]){
1913 		"iface",
1914 		"bus",
1915 		"mem",
1916 		NULL
1917 	},
1918 	.need_mem_protection = false,
1919 	.has_alt_reset = false,
1920 	.has_spare_reg = false,
1921 	.version = MSS_MSM8916,
1922 };
1923 
1924 static const struct rproc_hexagon_res msm8974_mss = {
1925 	.hexagon_mba_image = "mba.b00",
1926 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1927 		{
1928 			.supply = "mx",
1929 			.uV = 1050000,
1930 		},
1931 		{
1932 			.supply = "cx",
1933 			.uA = 100000,
1934 		},
1935 		{
1936 			.supply = "pll",
1937 			.uA = 100000,
1938 		},
1939 		{}
1940 	},
1941 	.active_supply = (struct qcom_mss_reg_res[]) {
1942 		{
1943 			.supply = "mss",
1944 			.uV = 1050000,
1945 			.uA = 100000,
1946 		},
1947 		{}
1948 	},
1949 	.proxy_clk_names = (char*[]){
1950 		"xo",
1951 		NULL
1952 	},
1953 	.active_clk_names = (char*[]){
1954 		"iface",
1955 		"bus",
1956 		"mem",
1957 		NULL
1958 	},
1959 	.need_mem_protection = false,
1960 	.has_alt_reset = false,
1961 	.has_spare_reg = false,
1962 	.version = MSS_MSM8974,
1963 };
1964 
1965 static const struct of_device_id q6v5_of_match[] = {
1966 	{ .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1967 	{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1968 	{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
1969 	{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
1970 	{ .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
1971 	{ .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
1972 	{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
1973 	{ },
1974 };
1975 MODULE_DEVICE_TABLE(of, q6v5_of_match);
1976 
1977 static struct platform_driver q6v5_driver = {
1978 	.probe = q6v5_probe,
1979 	.remove = q6v5_remove,
1980 	.driver = {
1981 		.name = "qcom-q6v5-mss",
1982 		.of_match_table = q6v5_of_match,
1983 	},
1984 };
1985 module_platform_driver(q6v5_driver);
1986 
1987 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
1988 MODULE_LICENSE("GPL v2");
1989