xref: /openbmc/linux/drivers/remoteproc/qcom_q6v5_mss.c (revision 7ac516d39dd35450596a2dbb9dedea2c85eab1ea)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Qualcomm self-authenticating modem subsystem remoteproc driver
4  *
5  * Copyright (C) 2016 Linaro Ltd.
6  * Copyright (C) 2014 Sony Mobile Communications AB
7  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/module.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/regmap.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/remoteproc.h>
25 #include "linux/remoteproc/qcom_q6v5_ipa_notify.h"
26 #include <linux/reset.h>
27 #include <linux/soc/qcom/mdt_loader.h>
28 #include <linux/iopoll.h>
29 
30 #include "remoteproc_internal.h"
31 #include "qcom_common.h"
32 #include "qcom_pil_info.h"
33 #include "qcom_q6v5.h"
34 
35 #include <linux/qcom_scm.h>
36 
37 #define MPSS_CRASH_REASON_SMEM		421
38 
39 /* RMB Status Register Values */
40 #define RMB_PBL_SUCCESS			0x1
41 
42 #define RMB_MBA_XPU_UNLOCKED		0x1
43 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED	0x2
44 #define RMB_MBA_META_DATA_AUTH_SUCCESS	0x3
45 #define RMB_MBA_AUTH_COMPLETE		0x4
46 
47 /* PBL/MBA interface registers */
48 #define RMB_MBA_IMAGE_REG		0x00
49 #define RMB_PBL_STATUS_REG		0x04
50 #define RMB_MBA_COMMAND_REG		0x08
51 #define RMB_MBA_STATUS_REG		0x0C
52 #define RMB_PMI_META_DATA_REG		0x10
53 #define RMB_PMI_CODE_START_REG		0x14
54 #define RMB_PMI_CODE_LENGTH_REG		0x18
55 #define RMB_MBA_MSS_STATUS		0x40
56 #define RMB_MBA_ALT_RESET		0x44
57 
58 #define RMB_CMD_META_DATA_READY		0x1
59 #define RMB_CMD_LOAD_READY		0x2
60 
61 /* QDSP6SS Register Offsets */
62 #define QDSP6SS_RESET_REG		0x014
63 #define QDSP6SS_GFMUX_CTL_REG		0x020
64 #define QDSP6SS_PWR_CTL_REG		0x030
65 #define QDSP6SS_MEM_PWR_CTL		0x0B0
66 #define QDSP6V6SS_MEM_PWR_CTL		0x034
67 #define QDSP6SS_STRAP_ACC		0x110
68 
69 /* AXI Halt Register Offsets */
70 #define AXI_HALTREQ_REG			0x0
71 #define AXI_HALTACK_REG			0x4
72 #define AXI_IDLE_REG			0x8
73 #define AXI_GATING_VALID_OVERRIDE	BIT(0)
74 
75 #define HALT_ACK_TIMEOUT_US		100000
76 
77 /* QDSP6SS_RESET */
78 #define Q6SS_STOP_CORE			BIT(0)
79 #define Q6SS_CORE_ARES			BIT(1)
80 #define Q6SS_BUS_ARES_ENABLE		BIT(2)
81 
82 /* QDSP6SS CBCR */
83 #define Q6SS_CBCR_CLKEN			BIT(0)
84 #define Q6SS_CBCR_CLKOFF		BIT(31)
85 #define Q6SS_CBCR_TIMEOUT_US		200
86 
87 /* QDSP6SS_GFMUX_CTL */
88 #define Q6SS_CLK_ENABLE			BIT(1)
89 
90 /* QDSP6SS_PWR_CTL */
91 #define Q6SS_L2DATA_SLP_NRET_N_0	BIT(0)
92 #define Q6SS_L2DATA_SLP_NRET_N_1	BIT(1)
93 #define Q6SS_L2DATA_SLP_NRET_N_2	BIT(2)
94 #define Q6SS_L2TAG_SLP_NRET_N		BIT(16)
95 #define Q6SS_ETB_SLP_NRET_N		BIT(17)
96 #define Q6SS_L2DATA_STBY_N		BIT(18)
97 #define Q6SS_SLP_RET_N			BIT(19)
98 #define Q6SS_CLAMP_IO			BIT(20)
99 #define QDSS_BHS_ON			BIT(21)
100 #define QDSS_LDO_BYP			BIT(22)
101 
102 /* QDSP6v56 parameters */
103 #define QDSP6v56_LDO_BYP		BIT(25)
104 #define QDSP6v56_BHS_ON		BIT(24)
105 #define QDSP6v56_CLAMP_WL		BIT(21)
106 #define QDSP6v56_CLAMP_QMC_MEM		BIT(22)
107 #define QDSP6SS_XO_CBCR		0x0038
108 #define QDSP6SS_ACC_OVERRIDE_VAL		0x20
109 
110 /* QDSP6v65 parameters */
111 #define QDSP6SS_CORE_CBCR		0x20
112 #define QDSP6SS_SLEEP                   0x3C
113 #define QDSP6SS_BOOT_CORE_START         0x400
114 #define QDSP6SS_BOOT_CMD                0x404
115 #define BOOT_FSM_TIMEOUT                10000
116 
117 struct reg_info {
118 	struct regulator *reg;
119 	int uV;
120 	int uA;
121 };
122 
123 struct qcom_mss_reg_res {
124 	const char *supply;
125 	int uV;
126 	int uA;
127 };
128 
129 struct rproc_hexagon_res {
130 	const char *hexagon_mba_image;
131 	struct qcom_mss_reg_res *proxy_supply;
132 	struct qcom_mss_reg_res *active_supply;
133 	char **proxy_clk_names;
134 	char **reset_clk_names;
135 	char **active_clk_names;
136 	char **active_pd_names;
137 	char **proxy_pd_names;
138 	int version;
139 	bool need_mem_protection;
140 	bool has_alt_reset;
141 	bool has_spare_reg;
142 };
143 
144 struct q6v5 {
145 	struct device *dev;
146 	struct rproc *rproc;
147 
148 	void __iomem *reg_base;
149 	void __iomem *rmb_base;
150 
151 	struct regmap *halt_map;
152 	struct regmap *conn_map;
153 
154 	u32 halt_q6;
155 	u32 halt_modem;
156 	u32 halt_nc;
157 	u32 conn_box;
158 
159 	struct reset_control *mss_restart;
160 	struct reset_control *pdc_reset;
161 
162 	struct qcom_q6v5 q6v5;
163 
164 	struct clk *active_clks[8];
165 	struct clk *reset_clks[4];
166 	struct clk *proxy_clks[4];
167 	struct device *active_pds[1];
168 	struct device *proxy_pds[3];
169 	int active_clk_count;
170 	int reset_clk_count;
171 	int proxy_clk_count;
172 	int active_pd_count;
173 	int proxy_pd_count;
174 
175 	struct reg_info active_regs[1];
176 	struct reg_info proxy_regs[3];
177 	int active_reg_count;
178 	int proxy_reg_count;
179 
180 	bool running;
181 
182 	bool dump_mba_loaded;
183 	size_t current_dump_size;
184 	size_t total_dump_size;
185 
186 	phys_addr_t mba_phys;
187 	void *mba_region;
188 	size_t mba_size;
189 
190 	phys_addr_t mpss_phys;
191 	phys_addr_t mpss_reloc;
192 	size_t mpss_size;
193 
194 	struct qcom_rproc_glink glink_subdev;
195 	struct qcom_rproc_subdev smd_subdev;
196 	struct qcom_rproc_ssr ssr_subdev;
197 	struct qcom_rproc_ipa_notify ipa_notify_subdev;
198 	struct qcom_sysmon *sysmon;
199 	bool need_mem_protection;
200 	bool has_alt_reset;
201 	bool has_spare_reg;
202 	int mpss_perm;
203 	int mba_perm;
204 	const char *hexagon_mdt_image;
205 	int version;
206 };
207 
208 enum {
209 	MSS_MSM8916,
210 	MSS_MSM8974,
211 	MSS_MSM8996,
212 	MSS_MSM8998,
213 	MSS_SC7180,
214 	MSS_SDM845,
215 };
216 
217 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
218 			       const struct qcom_mss_reg_res *reg_res)
219 {
220 	int rc;
221 	int i;
222 
223 	if (!reg_res)
224 		return 0;
225 
226 	for (i = 0; reg_res[i].supply; i++) {
227 		regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
228 		if (IS_ERR(regs[i].reg)) {
229 			rc = PTR_ERR(regs[i].reg);
230 			if (rc != -EPROBE_DEFER)
231 				dev_err(dev, "Failed to get %s\n regulator",
232 					reg_res[i].supply);
233 			return rc;
234 		}
235 
236 		regs[i].uV = reg_res[i].uV;
237 		regs[i].uA = reg_res[i].uA;
238 	}
239 
240 	return i;
241 }
242 
243 static int q6v5_regulator_enable(struct q6v5 *qproc,
244 				 struct reg_info *regs, int count)
245 {
246 	int ret;
247 	int i;
248 
249 	for (i = 0; i < count; i++) {
250 		if (regs[i].uV > 0) {
251 			ret = regulator_set_voltage(regs[i].reg,
252 					regs[i].uV, INT_MAX);
253 			if (ret) {
254 				dev_err(qproc->dev,
255 					"Failed to request voltage for %d.\n",
256 						i);
257 				goto err;
258 			}
259 		}
260 
261 		if (regs[i].uA > 0) {
262 			ret = regulator_set_load(regs[i].reg,
263 						 regs[i].uA);
264 			if (ret < 0) {
265 				dev_err(qproc->dev,
266 					"Failed to set regulator mode\n");
267 				goto err;
268 			}
269 		}
270 
271 		ret = regulator_enable(regs[i].reg);
272 		if (ret) {
273 			dev_err(qproc->dev, "Regulator enable failed\n");
274 			goto err;
275 		}
276 	}
277 
278 	return 0;
279 err:
280 	for (; i >= 0; i--) {
281 		if (regs[i].uV > 0)
282 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
283 
284 		if (regs[i].uA > 0)
285 			regulator_set_load(regs[i].reg, 0);
286 
287 		regulator_disable(regs[i].reg);
288 	}
289 
290 	return ret;
291 }
292 
293 static void q6v5_regulator_disable(struct q6v5 *qproc,
294 				   struct reg_info *regs, int count)
295 {
296 	int i;
297 
298 	for (i = 0; i < count; i++) {
299 		if (regs[i].uV > 0)
300 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
301 
302 		if (regs[i].uA > 0)
303 			regulator_set_load(regs[i].reg, 0);
304 
305 		regulator_disable(regs[i].reg);
306 	}
307 }
308 
309 static int q6v5_clk_enable(struct device *dev,
310 			   struct clk **clks, int count)
311 {
312 	int rc;
313 	int i;
314 
315 	for (i = 0; i < count; i++) {
316 		rc = clk_prepare_enable(clks[i]);
317 		if (rc) {
318 			dev_err(dev, "Clock enable failed\n");
319 			goto err;
320 		}
321 	}
322 
323 	return 0;
324 err:
325 	for (i--; i >= 0; i--)
326 		clk_disable_unprepare(clks[i]);
327 
328 	return rc;
329 }
330 
331 static void q6v5_clk_disable(struct device *dev,
332 			     struct clk **clks, int count)
333 {
334 	int i;
335 
336 	for (i = 0; i < count; i++)
337 		clk_disable_unprepare(clks[i]);
338 }
339 
340 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
341 			   size_t pd_count)
342 {
343 	int ret;
344 	int i;
345 
346 	for (i = 0; i < pd_count; i++) {
347 		dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
348 		ret = pm_runtime_get_sync(pds[i]);
349 		if (ret < 0)
350 			goto unroll_pd_votes;
351 	}
352 
353 	return 0;
354 
355 unroll_pd_votes:
356 	for (i--; i >= 0; i--) {
357 		dev_pm_genpd_set_performance_state(pds[i], 0);
358 		pm_runtime_put(pds[i]);
359 	}
360 
361 	return ret;
362 }
363 
364 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
365 			     size_t pd_count)
366 {
367 	int i;
368 
369 	for (i = 0; i < pd_count; i++) {
370 		dev_pm_genpd_set_performance_state(pds[i], 0);
371 		pm_runtime_put(pds[i]);
372 	}
373 }
374 
375 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
376 				   bool local, bool remote, phys_addr_t addr,
377 				   size_t size)
378 {
379 	struct qcom_scm_vmperm next[2];
380 	int perms = 0;
381 
382 	if (!qproc->need_mem_protection)
383 		return 0;
384 
385 	if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
386 	    remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
387 		return 0;
388 
389 	if (local) {
390 		next[perms].vmid = QCOM_SCM_VMID_HLOS;
391 		next[perms].perm = QCOM_SCM_PERM_RWX;
392 		perms++;
393 	}
394 
395 	if (remote) {
396 		next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
397 		next[perms].perm = QCOM_SCM_PERM_RW;
398 		perms++;
399 	}
400 
401 	return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
402 				   current_perm, next, perms);
403 }
404 
405 static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
406 {
407 	struct q6v5 *qproc = rproc->priv;
408 
409 	memcpy(qproc->mba_region, fw->data, fw->size);
410 
411 	return 0;
412 }
413 
414 static int q6v5_reset_assert(struct q6v5 *qproc)
415 {
416 	int ret;
417 
418 	if (qproc->has_alt_reset) {
419 		reset_control_assert(qproc->pdc_reset);
420 		ret = reset_control_reset(qproc->mss_restart);
421 		reset_control_deassert(qproc->pdc_reset);
422 	} else if (qproc->has_spare_reg) {
423 		/*
424 		 * When the AXI pipeline is being reset with the Q6 modem partly
425 		 * operational there is possibility of AXI valid signal to
426 		 * glitch, leading to spurious transactions and Q6 hangs. A work
427 		 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
428 		 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE
429 		 * is withdrawn post MSS assert followed by a MSS deassert,
430 		 * while holding the PDC reset.
431 		 */
432 		reset_control_assert(qproc->pdc_reset);
433 		regmap_update_bits(qproc->conn_map, qproc->conn_box,
434 				   AXI_GATING_VALID_OVERRIDE, 1);
435 		reset_control_assert(qproc->mss_restart);
436 		reset_control_deassert(qproc->pdc_reset);
437 		regmap_update_bits(qproc->conn_map, qproc->conn_box,
438 				   AXI_GATING_VALID_OVERRIDE, 0);
439 		ret = reset_control_deassert(qproc->mss_restart);
440 	} else {
441 		ret = reset_control_assert(qproc->mss_restart);
442 	}
443 
444 	return ret;
445 }
446 
447 static int q6v5_reset_deassert(struct q6v5 *qproc)
448 {
449 	int ret;
450 
451 	if (qproc->has_alt_reset) {
452 		reset_control_assert(qproc->pdc_reset);
453 		writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
454 		ret = reset_control_reset(qproc->mss_restart);
455 		writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
456 		reset_control_deassert(qproc->pdc_reset);
457 	} else if (qproc->has_spare_reg) {
458 		ret = reset_control_reset(qproc->mss_restart);
459 	} else {
460 		ret = reset_control_deassert(qproc->mss_restart);
461 	}
462 
463 	return ret;
464 }
465 
466 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
467 {
468 	unsigned long timeout;
469 	s32 val;
470 
471 	timeout = jiffies + msecs_to_jiffies(ms);
472 	for (;;) {
473 		val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
474 		if (val)
475 			break;
476 
477 		if (time_after(jiffies, timeout))
478 			return -ETIMEDOUT;
479 
480 		msleep(1);
481 	}
482 
483 	return val;
484 }
485 
486 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
487 {
488 
489 	unsigned long timeout;
490 	s32 val;
491 
492 	timeout = jiffies + msecs_to_jiffies(ms);
493 	for (;;) {
494 		val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
495 		if (val < 0)
496 			break;
497 
498 		if (!status && val)
499 			break;
500 		else if (status && val == status)
501 			break;
502 
503 		if (time_after(jiffies, timeout))
504 			return -ETIMEDOUT;
505 
506 		msleep(1);
507 	}
508 
509 	return val;
510 }
511 
512 static int q6v5proc_reset(struct q6v5 *qproc)
513 {
514 	u32 val;
515 	int ret;
516 	int i;
517 
518 	if (qproc->version == MSS_SDM845) {
519 		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
520 		val |= Q6SS_CBCR_CLKEN;
521 		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
522 
523 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
524 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
525 					 Q6SS_CBCR_TIMEOUT_US);
526 		if (ret) {
527 			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
528 			return -ETIMEDOUT;
529 		}
530 
531 		/* De-assert QDSP6 stop core */
532 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
533 		/* Trigger boot FSM */
534 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
535 
536 		ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
537 				val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
538 		if (ret) {
539 			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
540 			/* Reset the modem so that boot FSM is in reset state */
541 			q6v5_reset_deassert(qproc);
542 			return ret;
543 		}
544 
545 		goto pbl_wait;
546 	} else if (qproc->version == MSS_SC7180) {
547 		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
548 		val |= Q6SS_CBCR_CLKEN;
549 		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
550 
551 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
552 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
553 					 Q6SS_CBCR_TIMEOUT_US);
554 		if (ret) {
555 			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
556 			return -ETIMEDOUT;
557 		}
558 
559 		/* Turn on the XO clock needed for PLL setup */
560 		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
561 		val |= Q6SS_CBCR_CLKEN;
562 		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
563 
564 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
565 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
566 					 Q6SS_CBCR_TIMEOUT_US);
567 		if (ret) {
568 			dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
569 			return -ETIMEDOUT;
570 		}
571 
572 		/* Configure Q6 core CBCR to auto-enable after reset sequence */
573 		val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
574 		val |= Q6SS_CBCR_CLKEN;
575 		writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
576 
577 		/* De-assert the Q6 stop core signal */
578 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
579 
580 		/* Wait for 10 us for any staggering logic to settle */
581 		usleep_range(10, 20);
582 
583 		/* Trigger the boot FSM to start the Q6 out-of-reset sequence */
584 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
585 
586 		/* Poll the MSS_STATUS for FSM completion */
587 		ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
588 					 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
589 		if (ret) {
590 			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
591 			/* Reset the modem so that boot FSM is in reset state */
592 			q6v5_reset_deassert(qproc);
593 			return ret;
594 		}
595 		goto pbl_wait;
596 	} else if (qproc->version == MSS_MSM8996 ||
597 		   qproc->version == MSS_MSM8998) {
598 		int mem_pwr_ctl;
599 
600 		/* Override the ACC value if required */
601 		writel(QDSP6SS_ACC_OVERRIDE_VAL,
602 		       qproc->reg_base + QDSP6SS_STRAP_ACC);
603 
604 		/* Assert resets, stop core */
605 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
606 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
607 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
608 
609 		/* BHS require xo cbcr to be enabled */
610 		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
611 		val |= Q6SS_CBCR_CLKEN;
612 		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
613 
614 		/* Read CLKOFF bit to go low indicating CLK is enabled */
615 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
616 					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
617 					 Q6SS_CBCR_TIMEOUT_US);
618 		if (ret) {
619 			dev_err(qproc->dev,
620 				"xo cbcr enabling timed out (rc:%d)\n", ret);
621 			return ret;
622 		}
623 		/* Enable power block headswitch and wait for it to stabilize */
624 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
625 		val |= QDSP6v56_BHS_ON;
626 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
627 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
628 		udelay(1);
629 
630 		/* Put LDO in bypass mode */
631 		val |= QDSP6v56_LDO_BYP;
632 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
633 
634 		/* Deassert QDSP6 compiler memory clamp */
635 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
636 		val &= ~QDSP6v56_CLAMP_QMC_MEM;
637 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
638 
639 		/* Deassert memory peripheral sleep and L2 memory standby */
640 		val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
641 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
642 
643 		/* Turn on L1, L2, ETB and JU memories 1 at a time */
644 		if (qproc->version == MSS_MSM8996) {
645 			mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
646 			i = 19;
647 		} else {
648 			/* MSS_MSM8998 */
649 			mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
650 			i = 28;
651 		}
652 		val = readl(qproc->reg_base + mem_pwr_ctl);
653 		for (; i >= 0; i--) {
654 			val |= BIT(i);
655 			writel(val, qproc->reg_base + mem_pwr_ctl);
656 			/*
657 			 * Read back value to ensure the write is done then
658 			 * wait for 1us for both memory peripheral and data
659 			 * array to turn on.
660 			 */
661 			val |= readl(qproc->reg_base + mem_pwr_ctl);
662 			udelay(1);
663 		}
664 		/* Remove word line clamp */
665 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
666 		val &= ~QDSP6v56_CLAMP_WL;
667 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
668 	} else {
669 		/* Assert resets, stop core */
670 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
671 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
672 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
673 
674 		/* Enable power block headswitch and wait for it to stabilize */
675 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
676 		val |= QDSS_BHS_ON | QDSS_LDO_BYP;
677 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
678 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
679 		udelay(1);
680 		/*
681 		 * Turn on memories. L2 banks should be done individually
682 		 * to minimize inrush current.
683 		 */
684 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
685 		val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
686 			Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
687 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
688 		val |= Q6SS_L2DATA_SLP_NRET_N_2;
689 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
690 		val |= Q6SS_L2DATA_SLP_NRET_N_1;
691 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
692 		val |= Q6SS_L2DATA_SLP_NRET_N_0;
693 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
694 	}
695 	/* Remove IO clamp */
696 	val &= ~Q6SS_CLAMP_IO;
697 	writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
698 
699 	/* Bring core out of reset */
700 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
701 	val &= ~Q6SS_CORE_ARES;
702 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
703 
704 	/* Turn on core clock */
705 	val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
706 	val |= Q6SS_CLK_ENABLE;
707 	writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
708 
709 	/* Start core execution */
710 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
711 	val &= ~Q6SS_STOP_CORE;
712 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
713 
714 pbl_wait:
715 	/* Wait for PBL status */
716 	ret = q6v5_rmb_pbl_wait(qproc, 1000);
717 	if (ret == -ETIMEDOUT) {
718 		dev_err(qproc->dev, "PBL boot timed out\n");
719 	} else if (ret != RMB_PBL_SUCCESS) {
720 		dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
721 		ret = -EINVAL;
722 	} else {
723 		ret = 0;
724 	}
725 
726 	return ret;
727 }
728 
729 static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
730 				   struct regmap *halt_map,
731 				   u32 offset)
732 {
733 	unsigned int val;
734 	int ret;
735 
736 	/* Check if we're already idle */
737 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
738 	if (!ret && val)
739 		return;
740 
741 	/* Assert halt request */
742 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
743 
744 	/* Wait for halt */
745 	regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
746 				 val, 1000, HALT_ACK_TIMEOUT_US);
747 
748 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
749 	if (ret || !val)
750 		dev_err(qproc->dev, "port failed halt\n");
751 
752 	/* Clear halt request (port will remain halted until reset) */
753 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
754 }
755 
756 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
757 {
758 	unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
759 	dma_addr_t phys;
760 	void *metadata;
761 	int mdata_perm;
762 	int xferop_ret;
763 	size_t size;
764 	void *ptr;
765 	int ret;
766 
767 	metadata = qcom_mdt_read_metadata(fw, &size);
768 	if (IS_ERR(metadata))
769 		return PTR_ERR(metadata);
770 
771 	ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
772 	if (!ptr) {
773 		kfree(metadata);
774 		dev_err(qproc->dev, "failed to allocate mdt buffer\n");
775 		return -ENOMEM;
776 	}
777 
778 	memcpy(ptr, metadata, size);
779 
780 	/* Hypervisor mapping to access metadata by modem */
781 	mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
782 	ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
783 				      phys, size);
784 	if (ret) {
785 		dev_err(qproc->dev,
786 			"assigning Q6 access to metadata failed: %d\n", ret);
787 		ret = -EAGAIN;
788 		goto free_dma_attrs;
789 	}
790 
791 	writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
792 	writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
793 
794 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
795 	if (ret == -ETIMEDOUT)
796 		dev_err(qproc->dev, "MPSS header authentication timed out\n");
797 	else if (ret < 0)
798 		dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
799 
800 	/* Metadata authentication done, remove modem access */
801 	xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
802 					     phys, size);
803 	if (xferop_ret)
804 		dev_warn(qproc->dev,
805 			 "mdt buffer not reclaimed system may become unstable\n");
806 
807 free_dma_attrs:
808 	dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
809 	kfree(metadata);
810 
811 	return ret < 0 ? ret : 0;
812 }
813 
814 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
815 {
816 	if (phdr->p_type != PT_LOAD)
817 		return false;
818 
819 	if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
820 		return false;
821 
822 	if (!phdr->p_memsz)
823 		return false;
824 
825 	return true;
826 }
827 
828 static int q6v5_mba_load(struct q6v5 *qproc)
829 {
830 	int ret;
831 	int xfermemop_ret;
832 
833 	qcom_q6v5_prepare(&qproc->q6v5);
834 
835 	ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
836 	if (ret < 0) {
837 		dev_err(qproc->dev, "failed to enable active power domains\n");
838 		goto disable_irqs;
839 	}
840 
841 	ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
842 	if (ret < 0) {
843 		dev_err(qproc->dev, "failed to enable proxy power domains\n");
844 		goto disable_active_pds;
845 	}
846 
847 	ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
848 				    qproc->proxy_reg_count);
849 	if (ret) {
850 		dev_err(qproc->dev, "failed to enable proxy supplies\n");
851 		goto disable_proxy_pds;
852 	}
853 
854 	ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
855 			      qproc->proxy_clk_count);
856 	if (ret) {
857 		dev_err(qproc->dev, "failed to enable proxy clocks\n");
858 		goto disable_proxy_reg;
859 	}
860 
861 	ret = q6v5_regulator_enable(qproc, qproc->active_regs,
862 				    qproc->active_reg_count);
863 	if (ret) {
864 		dev_err(qproc->dev, "failed to enable supplies\n");
865 		goto disable_proxy_clk;
866 	}
867 
868 	ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
869 			      qproc->reset_clk_count);
870 	if (ret) {
871 		dev_err(qproc->dev, "failed to enable reset clocks\n");
872 		goto disable_vdd;
873 	}
874 
875 	ret = q6v5_reset_deassert(qproc);
876 	if (ret) {
877 		dev_err(qproc->dev, "failed to deassert mss restart\n");
878 		goto disable_reset_clks;
879 	}
880 
881 	ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
882 			      qproc->active_clk_count);
883 	if (ret) {
884 		dev_err(qproc->dev, "failed to enable clocks\n");
885 		goto assert_reset;
886 	}
887 
888 	/* Assign MBA image access in DDR to q6 */
889 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
890 				      qproc->mba_phys, qproc->mba_size);
891 	if (ret) {
892 		dev_err(qproc->dev,
893 			"assigning Q6 access to mba memory failed: %d\n", ret);
894 		goto disable_active_clks;
895 	}
896 
897 	writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
898 
899 	ret = q6v5proc_reset(qproc);
900 	if (ret)
901 		goto reclaim_mba;
902 
903 	ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
904 	if (ret == -ETIMEDOUT) {
905 		dev_err(qproc->dev, "MBA boot timed out\n");
906 		goto halt_axi_ports;
907 	} else if (ret != RMB_MBA_XPU_UNLOCKED &&
908 		   ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
909 		dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
910 		ret = -EINVAL;
911 		goto halt_axi_ports;
912 	}
913 
914 	qproc->dump_mba_loaded = true;
915 	return 0;
916 
917 halt_axi_ports:
918 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
919 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
920 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
921 
922 reclaim_mba:
923 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
924 						false, qproc->mba_phys,
925 						qproc->mba_size);
926 	if (xfermemop_ret) {
927 		dev_err(qproc->dev,
928 			"Failed to reclaim mba buffer, system may become unstable\n");
929 	}
930 
931 disable_active_clks:
932 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
933 			 qproc->active_clk_count);
934 assert_reset:
935 	q6v5_reset_assert(qproc);
936 disable_reset_clks:
937 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
938 			 qproc->reset_clk_count);
939 disable_vdd:
940 	q6v5_regulator_disable(qproc, qproc->active_regs,
941 			       qproc->active_reg_count);
942 disable_proxy_clk:
943 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
944 			 qproc->proxy_clk_count);
945 disable_proxy_reg:
946 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
947 			       qproc->proxy_reg_count);
948 disable_proxy_pds:
949 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
950 disable_active_pds:
951 	q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
952 disable_irqs:
953 	qcom_q6v5_unprepare(&qproc->q6v5);
954 
955 	return ret;
956 }
957 
958 static void q6v5_mba_reclaim(struct q6v5 *qproc)
959 {
960 	int ret;
961 	u32 val;
962 
963 	qproc->dump_mba_loaded = false;
964 
965 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
966 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
967 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
968 	if (qproc->version == MSS_MSM8996) {
969 		/*
970 		 * To avoid high MX current during LPASS/MSS restart.
971 		 */
972 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
973 		val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
974 			QDSP6v56_CLAMP_QMC_MEM;
975 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
976 	}
977 
978 	q6v5_reset_assert(qproc);
979 
980 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
981 			 qproc->reset_clk_count);
982 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
983 			 qproc->active_clk_count);
984 	q6v5_regulator_disable(qproc, qproc->active_regs,
985 			       qproc->active_reg_count);
986 	q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
987 
988 	/* In case of failure or coredump scenario where reclaiming MBA memory
989 	 * could not happen reclaim it here.
990 	 */
991 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
992 				      qproc->mba_phys,
993 				      qproc->mba_size);
994 	WARN_ON(ret);
995 
996 	ret = qcom_q6v5_unprepare(&qproc->q6v5);
997 	if (ret) {
998 		q6v5_pds_disable(qproc, qproc->proxy_pds,
999 				 qproc->proxy_pd_count);
1000 		q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1001 				 qproc->proxy_clk_count);
1002 		q6v5_regulator_disable(qproc, qproc->proxy_regs,
1003 				       qproc->proxy_reg_count);
1004 	}
1005 }
1006 
1007 static int q6v5_reload_mba(struct rproc *rproc)
1008 {
1009 	struct q6v5 *qproc = rproc->priv;
1010 	const struct firmware *fw;
1011 	int ret;
1012 
1013 	ret = request_firmware(&fw, rproc->firmware, qproc->dev);
1014 	if (ret < 0)
1015 		return ret;
1016 
1017 	q6v5_load(rproc, fw);
1018 	ret = q6v5_mba_load(qproc);
1019 	release_firmware(fw);
1020 
1021 	return ret;
1022 }
1023 
1024 static int q6v5_mpss_load(struct q6v5 *qproc)
1025 {
1026 	const struct elf32_phdr *phdrs;
1027 	const struct elf32_phdr *phdr;
1028 	const struct firmware *seg_fw;
1029 	const struct firmware *fw;
1030 	struct elf32_hdr *ehdr;
1031 	phys_addr_t mpss_reloc;
1032 	phys_addr_t boot_addr;
1033 	phys_addr_t min_addr = PHYS_ADDR_MAX;
1034 	phys_addr_t max_addr = 0;
1035 	u32 code_length;
1036 	bool relocate = false;
1037 	char *fw_name;
1038 	size_t fw_name_len;
1039 	ssize_t offset;
1040 	size_t size = 0;
1041 	void *ptr;
1042 	int ret;
1043 	int i;
1044 
1045 	fw_name_len = strlen(qproc->hexagon_mdt_image);
1046 	if (fw_name_len <= 4)
1047 		return -EINVAL;
1048 
1049 	fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
1050 	if (!fw_name)
1051 		return -ENOMEM;
1052 
1053 	ret = request_firmware(&fw, fw_name, qproc->dev);
1054 	if (ret < 0) {
1055 		dev_err(qproc->dev, "unable to load %s\n", fw_name);
1056 		goto out;
1057 	}
1058 
1059 	/* Initialize the RMB validator */
1060 	writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1061 
1062 	ret = q6v5_mpss_init_image(qproc, fw);
1063 	if (ret)
1064 		goto release_firmware;
1065 
1066 	ehdr = (struct elf32_hdr *)fw->data;
1067 	phdrs = (struct elf32_phdr *)(ehdr + 1);
1068 
1069 	for (i = 0; i < ehdr->e_phnum; i++) {
1070 		phdr = &phdrs[i];
1071 
1072 		if (!q6v5_phdr_valid(phdr))
1073 			continue;
1074 
1075 		if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
1076 			relocate = true;
1077 
1078 		if (phdr->p_paddr < min_addr)
1079 			min_addr = phdr->p_paddr;
1080 
1081 		if (phdr->p_paddr + phdr->p_memsz > max_addr)
1082 			max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
1083 	}
1084 
1085 	/**
1086 	 * In case of a modem subsystem restart on secure devices, the modem
1087 	 * memory can be reclaimed only after MBA is loaded. For modem cold
1088 	 * boot this will be a nop
1089 	 */
1090 	q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
1091 				qproc->mpss_phys, qproc->mpss_size);
1092 
1093 	/* Share ownership between Linux and MSS, during segment loading */
1094 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
1095 				      qproc->mpss_phys, qproc->mpss_size);
1096 	if (ret) {
1097 		dev_err(qproc->dev,
1098 			"assigning Q6 access to mpss memory failed: %d\n", ret);
1099 		ret = -EAGAIN;
1100 		goto release_firmware;
1101 	}
1102 
1103 	mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
1104 	qproc->mpss_reloc = mpss_reloc;
1105 	/* Load firmware segments */
1106 	for (i = 0; i < ehdr->e_phnum; i++) {
1107 		phdr = &phdrs[i];
1108 
1109 		if (!q6v5_phdr_valid(phdr))
1110 			continue;
1111 
1112 		offset = phdr->p_paddr - mpss_reloc;
1113 		if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
1114 			dev_err(qproc->dev, "segment outside memory range\n");
1115 			ret = -EINVAL;
1116 			goto release_firmware;
1117 		}
1118 
1119 		ptr = ioremap_wc(qproc->mpss_phys + offset, phdr->p_memsz);
1120 		if (!ptr) {
1121 			dev_err(qproc->dev,
1122 				"unable to map memory region: %pa+%zx-%x\n",
1123 				&qproc->mpss_phys, offset, phdr->p_memsz);
1124 			goto release_firmware;
1125 		}
1126 
1127 		if (phdr->p_filesz && phdr->p_offset < fw->size) {
1128 			/* Firmware is large enough to be non-split */
1129 			if (phdr->p_offset + phdr->p_filesz > fw->size) {
1130 				dev_err(qproc->dev,
1131 					"failed to load segment %d from truncated file %s\n",
1132 					i, fw_name);
1133 				ret = -EINVAL;
1134 				iounmap(ptr);
1135 				goto release_firmware;
1136 			}
1137 
1138 			memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
1139 		} else if (phdr->p_filesz) {
1140 			/* Replace "xxx.xxx" with "xxx.bxx" */
1141 			sprintf(fw_name + fw_name_len - 3, "b%02d", i);
1142 			ret = request_firmware(&seg_fw, fw_name, qproc->dev);
1143 			if (ret) {
1144 				dev_err(qproc->dev, "failed to load %s\n", fw_name);
1145 				iounmap(ptr);
1146 				goto release_firmware;
1147 			}
1148 
1149 			memcpy(ptr, seg_fw->data, seg_fw->size);
1150 
1151 			release_firmware(seg_fw);
1152 		}
1153 
1154 		if (phdr->p_memsz > phdr->p_filesz) {
1155 			memset(ptr + phdr->p_filesz, 0,
1156 			       phdr->p_memsz - phdr->p_filesz);
1157 		}
1158 		iounmap(ptr);
1159 		size += phdr->p_memsz;
1160 
1161 		code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1162 		if (!code_length) {
1163 			boot_addr = relocate ? qproc->mpss_phys : min_addr;
1164 			writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1165 			writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1166 		}
1167 		writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1168 
1169 		ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
1170 		if (ret < 0) {
1171 			dev_err(qproc->dev, "MPSS authentication failed: %d\n",
1172 				ret);
1173 			goto release_firmware;
1174 		}
1175 	}
1176 
1177 	/* Transfer ownership of modem ddr region to q6 */
1178 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
1179 				      qproc->mpss_phys, qproc->mpss_size);
1180 	if (ret) {
1181 		dev_err(qproc->dev,
1182 			"assigning Q6 access to mpss memory failed: %d\n", ret);
1183 		ret = -EAGAIN;
1184 		goto release_firmware;
1185 	}
1186 
1187 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1188 	if (ret == -ETIMEDOUT)
1189 		dev_err(qproc->dev, "MPSS authentication timed out\n");
1190 	else if (ret < 0)
1191 		dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1192 
1193 	qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size);
1194 
1195 release_firmware:
1196 	release_firmware(fw);
1197 out:
1198 	kfree(fw_name);
1199 
1200 	return ret < 0 ? ret : 0;
1201 }
1202 
1203 static void qcom_q6v5_dump_segment(struct rproc *rproc,
1204 				   struct rproc_dump_segment *segment,
1205 				   void *dest)
1206 {
1207 	int ret = 0;
1208 	struct q6v5 *qproc = rproc->priv;
1209 	int offset = segment->da - qproc->mpss_reloc;
1210 	void *ptr = NULL;
1211 
1212 	/* Unlock mba before copying segments */
1213 	if (!qproc->dump_mba_loaded) {
1214 		ret = q6v5_reload_mba(rproc);
1215 		if (!ret) {
1216 			/* Reset ownership back to Linux to copy segments */
1217 			ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1218 						      true, false,
1219 						      qproc->mpss_phys,
1220 						      qproc->mpss_size);
1221 		}
1222 	}
1223 
1224 	if (!ret)
1225 		ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size);
1226 
1227 	if (ptr) {
1228 		memcpy(dest, ptr, segment->size);
1229 		iounmap(ptr);
1230 	} else {
1231 		memset(dest, 0xff, segment->size);
1232 	}
1233 
1234 	qproc->current_dump_size += segment->size;
1235 
1236 	/* Reclaim mba after copying segments */
1237 	if (qproc->current_dump_size == qproc->total_dump_size) {
1238 		if (qproc->dump_mba_loaded) {
1239 			/* Try to reset ownership back to Q6 */
1240 			q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1241 						false, true,
1242 						qproc->mpss_phys,
1243 						qproc->mpss_size);
1244 			q6v5_mba_reclaim(qproc);
1245 		}
1246 	}
1247 }
1248 
1249 static int q6v5_start(struct rproc *rproc)
1250 {
1251 	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1252 	int xfermemop_ret;
1253 	int ret;
1254 
1255 	ret = q6v5_mba_load(qproc);
1256 	if (ret)
1257 		return ret;
1258 
1259 	dev_info(qproc->dev, "MBA booted, loading mpss\n");
1260 
1261 	ret = q6v5_mpss_load(qproc);
1262 	if (ret)
1263 		goto reclaim_mpss;
1264 
1265 	ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1266 	if (ret == -ETIMEDOUT) {
1267 		dev_err(qproc->dev, "start timed out\n");
1268 		goto reclaim_mpss;
1269 	}
1270 
1271 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
1272 						false, qproc->mba_phys,
1273 						qproc->mba_size);
1274 	if (xfermemop_ret)
1275 		dev_err(qproc->dev,
1276 			"Failed to reclaim mba buffer system may become unstable\n");
1277 
1278 	/* Reset Dump Segment Mask */
1279 	qproc->current_dump_size = 0;
1280 	qproc->running = true;
1281 
1282 	return 0;
1283 
1284 reclaim_mpss:
1285 	q6v5_mba_reclaim(qproc);
1286 
1287 	return ret;
1288 }
1289 
1290 static int q6v5_stop(struct rproc *rproc)
1291 {
1292 	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1293 	int ret;
1294 
1295 	qproc->running = false;
1296 
1297 	ret = qcom_q6v5_request_stop(&qproc->q6v5);
1298 	if (ret == -ETIMEDOUT)
1299 		dev_err(qproc->dev, "timed out on wait\n");
1300 
1301 	q6v5_mba_reclaim(qproc);
1302 
1303 	return 0;
1304 }
1305 
1306 static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1307 					    const struct firmware *mba_fw)
1308 {
1309 	const struct firmware *fw;
1310 	const struct elf32_phdr *phdrs;
1311 	const struct elf32_phdr *phdr;
1312 	const struct elf32_hdr *ehdr;
1313 	struct q6v5 *qproc = rproc->priv;
1314 	unsigned long i;
1315 	int ret;
1316 
1317 	ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
1318 	if (ret < 0) {
1319 		dev_err(qproc->dev, "unable to load %s\n",
1320 			qproc->hexagon_mdt_image);
1321 		return ret;
1322 	}
1323 
1324 	rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
1325 
1326 	ehdr = (struct elf32_hdr *)fw->data;
1327 	phdrs = (struct elf32_phdr *)(ehdr + 1);
1328 	qproc->total_dump_size = 0;
1329 
1330 	for (i = 0; i < ehdr->e_phnum; i++) {
1331 		phdr = &phdrs[i];
1332 
1333 		if (!q6v5_phdr_valid(phdr))
1334 			continue;
1335 
1336 		ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1337 							phdr->p_memsz,
1338 							qcom_q6v5_dump_segment,
1339 							NULL);
1340 		if (ret)
1341 			break;
1342 
1343 		qproc->total_dump_size += phdr->p_memsz;
1344 	}
1345 
1346 	release_firmware(fw);
1347 	return ret;
1348 }
1349 
1350 static const struct rproc_ops q6v5_ops = {
1351 	.start = q6v5_start,
1352 	.stop = q6v5_stop,
1353 	.parse_fw = qcom_q6v5_register_dump_segments,
1354 	.load = q6v5_load,
1355 };
1356 
1357 static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
1358 {
1359 	struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
1360 
1361 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1362 			 qproc->proxy_clk_count);
1363 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
1364 			       qproc->proxy_reg_count);
1365 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1366 }
1367 
1368 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1369 {
1370 	struct of_phandle_args args;
1371 	struct resource *res;
1372 	int ret;
1373 
1374 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
1375 	qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
1376 	if (IS_ERR(qproc->reg_base))
1377 		return PTR_ERR(qproc->reg_base);
1378 
1379 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
1380 	qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
1381 	if (IS_ERR(qproc->rmb_base))
1382 		return PTR_ERR(qproc->rmb_base);
1383 
1384 	ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1385 					       "qcom,halt-regs", 3, 0, &args);
1386 	if (ret < 0) {
1387 		dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1388 		return -EINVAL;
1389 	}
1390 
1391 	qproc->halt_map = syscon_node_to_regmap(args.np);
1392 	of_node_put(args.np);
1393 	if (IS_ERR(qproc->halt_map))
1394 		return PTR_ERR(qproc->halt_map);
1395 
1396 	qproc->halt_q6 = args.args[0];
1397 	qproc->halt_modem = args.args[1];
1398 	qproc->halt_nc = args.args[2];
1399 
1400 	if (qproc->has_spare_reg) {
1401 		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1402 						       "qcom,spare-regs",
1403 						       1, 0, &args);
1404 		if (ret < 0) {
1405 			dev_err(&pdev->dev, "failed to parse spare-regs\n");
1406 			return -EINVAL;
1407 		}
1408 
1409 		qproc->conn_map = syscon_node_to_regmap(args.np);
1410 		of_node_put(args.np);
1411 		if (IS_ERR(qproc->conn_map))
1412 			return PTR_ERR(qproc->conn_map);
1413 
1414 		qproc->conn_box = args.args[0];
1415 	}
1416 
1417 	return 0;
1418 }
1419 
1420 static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1421 		char **clk_names)
1422 {
1423 	int i;
1424 
1425 	if (!clk_names)
1426 		return 0;
1427 
1428 	for (i = 0; clk_names[i]; i++) {
1429 		clks[i] = devm_clk_get(dev, clk_names[i]);
1430 		if (IS_ERR(clks[i])) {
1431 			int rc = PTR_ERR(clks[i]);
1432 
1433 			if (rc != -EPROBE_DEFER)
1434 				dev_err(dev, "Failed to get %s clock\n",
1435 					clk_names[i]);
1436 			return rc;
1437 		}
1438 	}
1439 
1440 	return i;
1441 }
1442 
1443 static int q6v5_pds_attach(struct device *dev, struct device **devs,
1444 			   char **pd_names)
1445 {
1446 	size_t num_pds = 0;
1447 	int ret;
1448 	int i;
1449 
1450 	if (!pd_names)
1451 		return 0;
1452 
1453 	while (pd_names[num_pds])
1454 		num_pds++;
1455 
1456 	for (i = 0; i < num_pds; i++) {
1457 		devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
1458 		if (IS_ERR_OR_NULL(devs[i])) {
1459 			ret = PTR_ERR(devs[i]) ? : -ENODATA;
1460 			goto unroll_attach;
1461 		}
1462 	}
1463 
1464 	return num_pds;
1465 
1466 unroll_attach:
1467 	for (i--; i >= 0; i--)
1468 		dev_pm_domain_detach(devs[i], false);
1469 
1470 	return ret;
1471 }
1472 
1473 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1474 			    size_t pd_count)
1475 {
1476 	int i;
1477 
1478 	for (i = 0; i < pd_count; i++)
1479 		dev_pm_domain_detach(pds[i], false);
1480 }
1481 
1482 static int q6v5_init_reset(struct q6v5 *qproc)
1483 {
1484 	qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
1485 							      "mss_restart");
1486 	if (IS_ERR(qproc->mss_restart)) {
1487 		dev_err(qproc->dev, "failed to acquire mss restart\n");
1488 		return PTR_ERR(qproc->mss_restart);
1489 	}
1490 
1491 	if (qproc->has_alt_reset || qproc->has_spare_reg) {
1492 		qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1493 								    "pdc_reset");
1494 		if (IS_ERR(qproc->pdc_reset)) {
1495 			dev_err(qproc->dev, "failed to acquire pdc reset\n");
1496 			return PTR_ERR(qproc->pdc_reset);
1497 		}
1498 	}
1499 
1500 	return 0;
1501 }
1502 
1503 static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1504 {
1505 	struct device_node *child;
1506 	struct device_node *node;
1507 	struct resource r;
1508 	int ret;
1509 
1510 	/*
1511 	 * In the absence of mba/mpss sub-child, extract the mba and mpss
1512 	 * reserved memory regions from device's memory-region property.
1513 	 */
1514 	child = of_get_child_by_name(qproc->dev->of_node, "mba");
1515 	if (!child)
1516 		node = of_parse_phandle(qproc->dev->of_node,
1517 					"memory-region", 0);
1518 	else
1519 		node = of_parse_phandle(child, "memory-region", 0);
1520 
1521 	ret = of_address_to_resource(node, 0, &r);
1522 	if (ret) {
1523 		dev_err(qproc->dev, "unable to resolve mba region\n");
1524 		return ret;
1525 	}
1526 	of_node_put(node);
1527 
1528 	qproc->mba_phys = r.start;
1529 	qproc->mba_size = resource_size(&r);
1530 	qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1531 	if (!qproc->mba_region) {
1532 		dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1533 			&r.start, qproc->mba_size);
1534 		return -EBUSY;
1535 	}
1536 
1537 	if (!child) {
1538 		node = of_parse_phandle(qproc->dev->of_node,
1539 					"memory-region", 1);
1540 	} else {
1541 		child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1542 		node = of_parse_phandle(child, "memory-region", 0);
1543 	}
1544 
1545 	ret = of_address_to_resource(node, 0, &r);
1546 	if (ret) {
1547 		dev_err(qproc->dev, "unable to resolve mpss region\n");
1548 		return ret;
1549 	}
1550 	of_node_put(node);
1551 
1552 	qproc->mpss_phys = qproc->mpss_reloc = r.start;
1553 	qproc->mpss_size = resource_size(&r);
1554 
1555 	return 0;
1556 }
1557 
1558 #if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY)
1559 
1560 /* Register IPA notification function */
1561 int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify,
1562 			     void *data)
1563 {
1564 	struct qcom_rproc_ipa_notify *ipa_notify;
1565 	struct q6v5 *qproc = rproc->priv;
1566 
1567 	if (!notify)
1568 		return -EINVAL;
1569 
1570 	ipa_notify = &qproc->ipa_notify_subdev;
1571 	if (ipa_notify->notify)
1572 		return -EBUSY;
1573 
1574 	ipa_notify->notify = notify;
1575 	ipa_notify->data = data;
1576 
1577 	return 0;
1578 }
1579 EXPORT_SYMBOL_GPL(qcom_register_ipa_notify);
1580 
1581 /* Deregister IPA notification function */
1582 void qcom_deregister_ipa_notify(struct rproc *rproc)
1583 {
1584 	struct q6v5 *qproc = rproc->priv;
1585 
1586 	qproc->ipa_notify_subdev.notify = NULL;
1587 }
1588 EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify);
1589 #endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
1590 
1591 static int q6v5_probe(struct platform_device *pdev)
1592 {
1593 	const struct rproc_hexagon_res *desc;
1594 	struct q6v5 *qproc;
1595 	struct rproc *rproc;
1596 	const char *mba_image;
1597 	int ret;
1598 
1599 	desc = of_device_get_match_data(&pdev->dev);
1600 	if (!desc)
1601 		return -EINVAL;
1602 
1603 	if (desc->need_mem_protection && !qcom_scm_is_available())
1604 		return -EPROBE_DEFER;
1605 
1606 	mba_image = desc->hexagon_mba_image;
1607 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1608 					    0, &mba_image);
1609 	if (ret < 0 && ret != -EINVAL)
1610 		return ret;
1611 
1612 	rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
1613 			    mba_image, sizeof(*qproc));
1614 	if (!rproc) {
1615 		dev_err(&pdev->dev, "failed to allocate rproc\n");
1616 		return -ENOMEM;
1617 	}
1618 
1619 	rproc->auto_boot = false;
1620 	rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
1621 
1622 	qproc = (struct q6v5 *)rproc->priv;
1623 	qproc->dev = &pdev->dev;
1624 	qproc->rproc = rproc;
1625 	qproc->hexagon_mdt_image = "modem.mdt";
1626 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1627 					    1, &qproc->hexagon_mdt_image);
1628 	if (ret < 0 && ret != -EINVAL)
1629 		goto free_rproc;
1630 
1631 	platform_set_drvdata(pdev, qproc);
1632 
1633 	qproc->has_spare_reg = desc->has_spare_reg;
1634 	ret = q6v5_init_mem(qproc, pdev);
1635 	if (ret)
1636 		goto free_rproc;
1637 
1638 	ret = q6v5_alloc_memory_region(qproc);
1639 	if (ret)
1640 		goto free_rproc;
1641 
1642 	ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1643 			       desc->proxy_clk_names);
1644 	if (ret < 0) {
1645 		dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
1646 		goto free_rproc;
1647 	}
1648 	qproc->proxy_clk_count = ret;
1649 
1650 	ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
1651 			       desc->reset_clk_names);
1652 	if (ret < 0) {
1653 		dev_err(&pdev->dev, "Failed to get reset clocks.\n");
1654 		goto free_rproc;
1655 	}
1656 	qproc->reset_clk_count = ret;
1657 
1658 	ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1659 			       desc->active_clk_names);
1660 	if (ret < 0) {
1661 		dev_err(&pdev->dev, "Failed to get active clocks.\n");
1662 		goto free_rproc;
1663 	}
1664 	qproc->active_clk_count = ret;
1665 
1666 	ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1667 				  desc->proxy_supply);
1668 	if (ret < 0) {
1669 		dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
1670 		goto free_rproc;
1671 	}
1672 	qproc->proxy_reg_count = ret;
1673 
1674 	ret = q6v5_regulator_init(&pdev->dev,  qproc->active_regs,
1675 				  desc->active_supply);
1676 	if (ret < 0) {
1677 		dev_err(&pdev->dev, "Failed to get active regulators.\n");
1678 		goto free_rproc;
1679 	}
1680 	qproc->active_reg_count = ret;
1681 
1682 	ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
1683 			      desc->active_pd_names);
1684 	if (ret < 0) {
1685 		dev_err(&pdev->dev, "Failed to attach active power domains\n");
1686 		goto free_rproc;
1687 	}
1688 	qproc->active_pd_count = ret;
1689 
1690 	ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
1691 			      desc->proxy_pd_names);
1692 	if (ret < 0) {
1693 		dev_err(&pdev->dev, "Failed to init power domains\n");
1694 		goto detach_active_pds;
1695 	}
1696 	qproc->proxy_pd_count = ret;
1697 
1698 	qproc->has_alt_reset = desc->has_alt_reset;
1699 	ret = q6v5_init_reset(qproc);
1700 	if (ret)
1701 		goto detach_proxy_pds;
1702 
1703 	qproc->version = desc->version;
1704 	qproc->need_mem_protection = desc->need_mem_protection;
1705 
1706 	ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
1707 			     qcom_msa_handover);
1708 	if (ret)
1709 		goto detach_proxy_pds;
1710 
1711 	qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1712 	qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
1713 	qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
1714 	qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
1715 	qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
1716 	qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
1717 	qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
1718 	if (IS_ERR(qproc->sysmon)) {
1719 		ret = PTR_ERR(qproc->sysmon);
1720 		goto remove_subdevs;
1721 	}
1722 
1723 	ret = rproc_add(rproc);
1724 	if (ret)
1725 		goto remove_sysmon_subdev;
1726 
1727 	return 0;
1728 
1729 remove_sysmon_subdev:
1730 	qcom_remove_sysmon_subdev(qproc->sysmon);
1731 remove_subdevs:
1732 	qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev);
1733 	qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
1734 	qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
1735 	qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
1736 detach_proxy_pds:
1737 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1738 detach_active_pds:
1739 	q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
1740 free_rproc:
1741 	rproc_free(rproc);
1742 
1743 	return ret;
1744 }
1745 
1746 static int q6v5_remove(struct platform_device *pdev)
1747 {
1748 	struct q6v5 *qproc = platform_get_drvdata(pdev);
1749 	struct rproc *rproc = qproc->rproc;
1750 
1751 	rproc_del(rproc);
1752 
1753 	qcom_remove_sysmon_subdev(qproc->sysmon);
1754 	qcom_remove_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
1755 	qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
1756 	qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
1757 	qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
1758 
1759 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1760 	q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
1761 
1762 	rproc_free(rproc);
1763 
1764 	return 0;
1765 }
1766 
1767 static const struct rproc_hexagon_res sc7180_mss = {
1768 	.hexagon_mba_image = "mba.mbn",
1769 	.proxy_clk_names = (char*[]){
1770 		"xo",
1771 		NULL
1772 	},
1773 	.reset_clk_names = (char*[]){
1774 		"iface",
1775 		"bus",
1776 		"snoc_axi",
1777 		NULL
1778 	},
1779 	.active_clk_names = (char*[]){
1780 		"mnoc_axi",
1781 		"nav",
1782 		NULL
1783 	},
1784 	.active_pd_names = (char*[]){
1785 		"load_state",
1786 		NULL
1787 	},
1788 	.proxy_pd_names = (char*[]){
1789 		"cx",
1790 		"mx",
1791 		"mss",
1792 		NULL
1793 	},
1794 	.need_mem_protection = true,
1795 	.has_alt_reset = false,
1796 	.has_spare_reg = true,
1797 	.version = MSS_SC7180,
1798 };
1799 
1800 static const struct rproc_hexagon_res sdm845_mss = {
1801 	.hexagon_mba_image = "mba.mbn",
1802 	.proxy_clk_names = (char*[]){
1803 			"xo",
1804 			"prng",
1805 			NULL
1806 	},
1807 	.reset_clk_names = (char*[]){
1808 			"iface",
1809 			"snoc_axi",
1810 			NULL
1811 	},
1812 	.active_clk_names = (char*[]){
1813 			"bus",
1814 			"mem",
1815 			"gpll0_mss",
1816 			"mnoc_axi",
1817 			NULL
1818 	},
1819 	.active_pd_names = (char*[]){
1820 			"load_state",
1821 			NULL
1822 	},
1823 	.proxy_pd_names = (char*[]){
1824 			"cx",
1825 			"mx",
1826 			"mss",
1827 			NULL
1828 	},
1829 	.need_mem_protection = true,
1830 	.has_alt_reset = true,
1831 	.has_spare_reg = false,
1832 	.version = MSS_SDM845,
1833 };
1834 
1835 static const struct rproc_hexagon_res msm8998_mss = {
1836 	.hexagon_mba_image = "mba.mbn",
1837 	.proxy_clk_names = (char*[]){
1838 			"xo",
1839 			"qdss",
1840 			"mem",
1841 			NULL
1842 	},
1843 	.active_clk_names = (char*[]){
1844 			"iface",
1845 			"bus",
1846 			"gpll0_mss",
1847 			"mnoc_axi",
1848 			"snoc_axi",
1849 			NULL
1850 	},
1851 	.proxy_pd_names = (char*[]){
1852 			"cx",
1853 			"mx",
1854 			NULL
1855 	},
1856 	.need_mem_protection = true,
1857 	.has_alt_reset = false,
1858 	.has_spare_reg = false,
1859 	.version = MSS_MSM8998,
1860 };
1861 
1862 static const struct rproc_hexagon_res msm8996_mss = {
1863 	.hexagon_mba_image = "mba.mbn",
1864 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1865 		{
1866 			.supply = "pll",
1867 			.uA = 100000,
1868 		},
1869 		{}
1870 	},
1871 	.proxy_clk_names = (char*[]){
1872 			"xo",
1873 			"pnoc",
1874 			"qdss",
1875 			NULL
1876 	},
1877 	.active_clk_names = (char*[]){
1878 			"iface",
1879 			"bus",
1880 			"mem",
1881 			"gpll0_mss",
1882 			"snoc_axi",
1883 			"mnoc_axi",
1884 			NULL
1885 	},
1886 	.need_mem_protection = true,
1887 	.has_alt_reset = false,
1888 	.has_spare_reg = false,
1889 	.version = MSS_MSM8996,
1890 };
1891 
1892 static const struct rproc_hexagon_res msm8916_mss = {
1893 	.hexagon_mba_image = "mba.mbn",
1894 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1895 		{
1896 			.supply = "mx",
1897 			.uV = 1050000,
1898 		},
1899 		{
1900 			.supply = "cx",
1901 			.uA = 100000,
1902 		},
1903 		{
1904 			.supply = "pll",
1905 			.uA = 100000,
1906 		},
1907 		{}
1908 	},
1909 	.proxy_clk_names = (char*[]){
1910 		"xo",
1911 		NULL
1912 	},
1913 	.active_clk_names = (char*[]){
1914 		"iface",
1915 		"bus",
1916 		"mem",
1917 		NULL
1918 	},
1919 	.need_mem_protection = false,
1920 	.has_alt_reset = false,
1921 	.has_spare_reg = false,
1922 	.version = MSS_MSM8916,
1923 };
1924 
1925 static const struct rproc_hexagon_res msm8974_mss = {
1926 	.hexagon_mba_image = "mba.b00",
1927 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1928 		{
1929 			.supply = "mx",
1930 			.uV = 1050000,
1931 		},
1932 		{
1933 			.supply = "cx",
1934 			.uA = 100000,
1935 		},
1936 		{
1937 			.supply = "pll",
1938 			.uA = 100000,
1939 		},
1940 		{}
1941 	},
1942 	.active_supply = (struct qcom_mss_reg_res[]) {
1943 		{
1944 			.supply = "mss",
1945 			.uV = 1050000,
1946 			.uA = 100000,
1947 		},
1948 		{}
1949 	},
1950 	.proxy_clk_names = (char*[]){
1951 		"xo",
1952 		NULL
1953 	},
1954 	.active_clk_names = (char*[]){
1955 		"iface",
1956 		"bus",
1957 		"mem",
1958 		NULL
1959 	},
1960 	.need_mem_protection = false,
1961 	.has_alt_reset = false,
1962 	.has_spare_reg = false,
1963 	.version = MSS_MSM8974,
1964 };
1965 
1966 static const struct of_device_id q6v5_of_match[] = {
1967 	{ .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1968 	{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1969 	{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
1970 	{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
1971 	{ .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
1972 	{ .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
1973 	{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
1974 	{ },
1975 };
1976 MODULE_DEVICE_TABLE(of, q6v5_of_match);
1977 
1978 static struct platform_driver q6v5_driver = {
1979 	.probe = q6v5_probe,
1980 	.remove = q6v5_remove,
1981 	.driver = {
1982 		.name = "qcom-q6v5-mss",
1983 		.of_match_table = q6v5_of_match,
1984 	},
1985 };
1986 module_platform_driver(q6v5_driver);
1987 
1988 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
1989 MODULE_LICENSE("GPL v2");
1990