1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/acpi.h>
7 #include <linux/adreno-smmu-priv.h>
8 #include <linux/delay.h>
9 #include <linux/of_device.h>
10 #include <linux/firmware/qcom/qcom_scm.h>
11
12 #include "arm-smmu.h"
13 #include "arm-smmu-qcom.h"
14
15 #define QCOM_DUMMY_VAL -1
16
to_qcom_smmu(struct arm_smmu_device * smmu)17 static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
18 {
19 return container_of(smmu, struct qcom_smmu, smmu);
20 }
21
qcom_smmu_tlb_sync(struct arm_smmu_device * smmu,int page,int sync,int status)22 static void qcom_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
23 int sync, int status)
24 {
25 unsigned int spin_cnt, delay;
26 u32 reg;
27
28 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
29 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
30 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
31 reg = arm_smmu_readl(smmu, page, status);
32 if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
33 return;
34 cpu_relax();
35 }
36 udelay(delay);
37 }
38
39 qcom_smmu_tlb_sync_debug(smmu);
40 }
41
qcom_adreno_smmu_write_sctlr(struct arm_smmu_device * smmu,int idx,u32 reg)42 static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx,
43 u32 reg)
44 {
45 struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
46
47 /*
48 * On the GPU device we want to process subsequent transactions after a
49 * fault to keep the GPU from hanging
50 */
51 reg |= ARM_SMMU_SCTLR_HUPCF;
52
53 if (qsmmu->stall_enabled & BIT(idx))
54 reg |= ARM_SMMU_SCTLR_CFCFG;
55
56 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
57 }
58
qcom_adreno_smmu_get_fault_info(const void * cookie,struct adreno_smmu_fault_info * info)59 static void qcom_adreno_smmu_get_fault_info(const void *cookie,
60 struct adreno_smmu_fault_info *info)
61 {
62 struct arm_smmu_domain *smmu_domain = (void *)cookie;
63 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
64 struct arm_smmu_device *smmu = smmu_domain->smmu;
65
66 info->fsr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSR);
67 info->fsynr0 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR0);
68 info->fsynr1 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR1);
69 info->far = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_FAR);
70 info->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
71 info->ttbr0 = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_TTBR0);
72 info->contextidr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_CONTEXTIDR);
73 }
74
qcom_adreno_smmu_set_stall(const void * cookie,bool enabled)75 static void qcom_adreno_smmu_set_stall(const void *cookie, bool enabled)
76 {
77 struct arm_smmu_domain *smmu_domain = (void *)cookie;
78 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
79 struct qcom_smmu *qsmmu = to_qcom_smmu(smmu_domain->smmu);
80
81 if (enabled)
82 qsmmu->stall_enabled |= BIT(cfg->cbndx);
83 else
84 qsmmu->stall_enabled &= ~BIT(cfg->cbndx);
85 }
86
qcom_adreno_smmu_resume_translation(const void * cookie,bool terminate)87 static void qcom_adreno_smmu_resume_translation(const void *cookie, bool terminate)
88 {
89 struct arm_smmu_domain *smmu_domain = (void *)cookie;
90 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
91 struct arm_smmu_device *smmu = smmu_domain->smmu;
92 u32 reg = 0;
93
94 if (terminate)
95 reg |= ARM_SMMU_RESUME_TERMINATE;
96
97 arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
98 }
99
100 #define QCOM_ADRENO_SMMU_GPU_SID 0
101
qcom_adreno_smmu_is_gpu_device(struct device * dev)102 static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
103 {
104 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
105 int i;
106
107 /*
108 * The GPU will always use SID 0 so that is a handy way to uniquely
109 * identify it and configure it for per-instance pagetables
110 */
111 for (i = 0; i < fwspec->num_ids; i++) {
112 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
113
114 if (sid == QCOM_ADRENO_SMMU_GPU_SID)
115 return true;
116 }
117
118 return false;
119 }
120
qcom_adreno_smmu_get_ttbr1_cfg(const void * cookie)121 static const struct io_pgtable_cfg *qcom_adreno_smmu_get_ttbr1_cfg(
122 const void *cookie)
123 {
124 struct arm_smmu_domain *smmu_domain = (void *)cookie;
125 struct io_pgtable *pgtable =
126 io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
127 return &pgtable->cfg;
128 }
129
130 /*
131 * Local implementation to configure TTBR0 with the specified pagetable config.
132 * The GPU driver will call this to enable TTBR0 when per-instance pagetables
133 * are active
134 */
135
qcom_adreno_smmu_set_ttbr0_cfg(const void * cookie,const struct io_pgtable_cfg * pgtbl_cfg)136 static int qcom_adreno_smmu_set_ttbr0_cfg(const void *cookie,
137 const struct io_pgtable_cfg *pgtbl_cfg)
138 {
139 struct arm_smmu_domain *smmu_domain = (void *)cookie;
140 struct io_pgtable *pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
141 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
142 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
143
144 /* The domain must have split pagetables already enabled */
145 if (cb->tcr[0] & ARM_SMMU_TCR_EPD1)
146 return -EINVAL;
147
148 /* If the pagetable config is NULL, disable TTBR0 */
149 if (!pgtbl_cfg) {
150 /* Do nothing if it is already disabled */
151 if ((cb->tcr[0] & ARM_SMMU_TCR_EPD0))
152 return -EINVAL;
153
154 /* Set TCR to the original configuration */
155 cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg);
156 cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
157 } else {
158 u32 tcr = cb->tcr[0];
159
160 /* Don't call this again if TTBR0 is already enabled */
161 if (!(cb->tcr[0] & ARM_SMMU_TCR_EPD0))
162 return -EINVAL;
163
164 tcr |= arm_smmu_lpae_tcr(pgtbl_cfg);
165 tcr &= ~(ARM_SMMU_TCR_EPD0 | ARM_SMMU_TCR_EPD1);
166
167 cb->tcr[0] = tcr;
168 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
169 cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
170 }
171
172 arm_smmu_write_context_bank(smmu_domain->smmu, cb->cfg->cbndx);
173
174 return 0;
175 }
176
qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain * smmu_domain,struct arm_smmu_device * smmu,struct device * dev,int start)177 static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
178 struct arm_smmu_device *smmu,
179 struct device *dev, int start)
180 {
181 int count;
182
183 /*
184 * Assign context bank 0 to the GPU device so the GPU hardware can
185 * switch pagetables
186 */
187 if (qcom_adreno_smmu_is_gpu_device(dev)) {
188 start = 0;
189 count = 1;
190 } else {
191 start = 1;
192 count = smmu->num_context_banks;
193 }
194
195 return __arm_smmu_alloc_bitmap(smmu->context_map, start, count);
196 }
197
qcom_adreno_can_do_ttbr1(struct arm_smmu_device * smmu)198 static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu)
199 {
200 const struct device_node *np = smmu->dev->of_node;
201
202 if (of_device_is_compatible(np, "qcom,msm8996-smmu-v2"))
203 return false;
204
205 return true;
206 }
207
qcom_adreno_smmu_init_context(struct arm_smmu_domain * smmu_domain,struct io_pgtable_cfg * pgtbl_cfg,struct device * dev)208 static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
209 struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
210 {
211 struct adreno_smmu_priv *priv;
212
213 smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
214
215 /* Only enable split pagetables for the GPU device (SID 0) */
216 if (!qcom_adreno_smmu_is_gpu_device(dev))
217 return 0;
218
219 /*
220 * All targets that use the qcom,adreno-smmu compatible string *should*
221 * be AARCH64 stage 1 but double check because the arm-smmu code assumes
222 * that is the case when the TTBR1 quirk is enabled
223 */
224 if (qcom_adreno_can_do_ttbr1(smmu_domain->smmu) &&
225 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) &&
226 (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64))
227 pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1;
228
229 /*
230 * Initialize private interface with GPU:
231 */
232
233 priv = dev_get_drvdata(dev);
234 priv->cookie = smmu_domain;
235 priv->get_ttbr1_cfg = qcom_adreno_smmu_get_ttbr1_cfg;
236 priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg;
237 priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
238 priv->set_stall = qcom_adreno_smmu_set_stall;
239 priv->resume_translation = qcom_adreno_smmu_resume_translation;
240
241 return 0;
242 }
243
244 static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
245 { .compatible = "qcom,adreno" },
246 { .compatible = "qcom,adreno-gmu" },
247 { .compatible = "qcom,mdp4" },
248 { .compatible = "qcom,mdss" },
249 { .compatible = "qcom,sc7180-mdss" },
250 { .compatible = "qcom,sc7180-mss-pil" },
251 { .compatible = "qcom,sc7280-mdss" },
252 { .compatible = "qcom,sc7280-mss-pil" },
253 { .compatible = "qcom,sc8180x-mdss" },
254 { .compatible = "qcom,sc8280xp-mdss" },
255 { .compatible = "qcom,sdm670-mdss" },
256 { .compatible = "qcom,sdm845-mdss" },
257 { .compatible = "qcom,sdm845-mss-pil" },
258 { .compatible = "qcom,sm6350-mdss" },
259 { .compatible = "qcom,sm6375-mdss" },
260 { .compatible = "qcom,sm8150-mdss" },
261 { .compatible = "qcom,sm8250-mdss" },
262 { }
263 };
264
qcom_smmu_init_context(struct arm_smmu_domain * smmu_domain,struct io_pgtable_cfg * pgtbl_cfg,struct device * dev)265 static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain,
266 struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
267 {
268 smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
269
270 return 0;
271 }
272
qcom_smmu_cfg_probe(struct arm_smmu_device * smmu)273 static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
274 {
275 struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
276 unsigned int last_s2cr;
277 u32 reg;
278 u32 smr;
279 int i;
280
281 /*
282 * MSM8998 LPASS SMMU reports 13 context banks, but accessing
283 * the last context bank crashes the system.
284 */
285 if (of_device_is_compatible(smmu->dev->of_node, "qcom,msm8998-smmu-v2") &&
286 smmu->num_context_banks == 13) {
287 smmu->num_context_banks = 12;
288 } else if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm630-smmu-v2")) {
289 if (smmu->num_context_banks == 21) /* SDM630 / SDM660 A2NOC SMMU */
290 smmu->num_context_banks = 7;
291 else if (smmu->num_context_banks == 14) /* SDM630 / SDM660 LPASS SMMU */
292 smmu->num_context_banks = 13;
293 }
294
295 /*
296 * Some platforms support more than the Arm SMMU architected maximum of
297 * 128 stream matching groups. For unknown reasons, the additional
298 * groups don't exhibit the same behavior as the architected registers,
299 * so limit the groups to 128 until the behavior is fixed for the other
300 * groups.
301 */
302 if (smmu->num_mapping_groups > 128) {
303 dev_notice(smmu->dev, "\tLimiting the stream matching groups to 128\n");
304 smmu->num_mapping_groups = 128;
305 }
306
307 last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
308
309 /*
310 * With some firmware versions writes to S2CR of type FAULT are
311 * ignored, and writing BYPASS will end up written as FAULT in the
312 * register. Perform a write to S2CR to detect if this is the case and
313 * if so reserve a context bank to emulate bypass streams.
314 */
315 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) |
316 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) |
317 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT);
318 arm_smmu_gr0_write(smmu, last_s2cr, reg);
319 reg = arm_smmu_gr0_read(smmu, last_s2cr);
320 if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) {
321 qsmmu->bypass_quirk = true;
322 qsmmu->bypass_cbndx = smmu->num_context_banks - 1;
323
324 set_bit(qsmmu->bypass_cbndx, smmu->context_map);
325
326 arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
327
328 reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
329 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
330 }
331
332 for (i = 0; i < smmu->num_mapping_groups; i++) {
333 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
334
335 if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
336 /* Ignore valid bit for SMR mask extraction. */
337 smr &= ~ARM_SMMU_SMR_VALID;
338 smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
339 smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
340 smmu->smrs[i].valid = true;
341
342 smmu->s2crs[i].type = S2CR_TYPE_BYPASS;
343 smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT;
344 smmu->s2crs[i].cbndx = 0xff;
345 }
346 }
347
348 return 0;
349 }
350
qcom_adreno_smmuv2_cfg_probe(struct arm_smmu_device * smmu)351 static int qcom_adreno_smmuv2_cfg_probe(struct arm_smmu_device *smmu)
352 {
353 /* Support for 16K pages is advertised on some SoCs, but it doesn't seem to work */
354 smmu->features &= ~ARM_SMMU_FEAT_FMT_AARCH64_16K;
355
356 /* TZ protects several last context banks, hide them from Linux */
357 if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm630-smmu-v2") &&
358 smmu->num_context_banks == 5)
359 smmu->num_context_banks = 2;
360
361 return 0;
362 }
363
qcom_smmu_write_s2cr(struct arm_smmu_device * smmu,int idx)364 static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
365 {
366 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
367 struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
368 u32 cbndx = s2cr->cbndx;
369 u32 type = s2cr->type;
370 u32 reg;
371
372 if (qsmmu->bypass_quirk) {
373 if (type == S2CR_TYPE_BYPASS) {
374 /*
375 * Firmware with quirky S2CR handling will substitute
376 * BYPASS writes with FAULT, so point the stream to the
377 * reserved context bank and ask for translation on the
378 * stream
379 */
380 type = S2CR_TYPE_TRANS;
381 cbndx = qsmmu->bypass_cbndx;
382 } else if (type == S2CR_TYPE_FAULT) {
383 /*
384 * Firmware with quirky S2CR handling will ignore FAULT
385 * writes, so trick it to write FAULT by asking for a
386 * BYPASS.
387 */
388 type = S2CR_TYPE_BYPASS;
389 cbndx = 0xff;
390 }
391 }
392
393 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) |
394 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) |
395 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
396 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
397 }
398
qcom_smmu_def_domain_type(struct device * dev)399 static int qcom_smmu_def_domain_type(struct device *dev)
400 {
401 const struct of_device_id *match =
402 of_match_device(qcom_smmu_client_of_match, dev);
403
404 return match ? IOMMU_DOMAIN_IDENTITY : 0;
405 }
406
qcom_sdm845_smmu500_reset(struct arm_smmu_device * smmu)407 static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
408 {
409 int ret;
410
411 arm_mmu500_reset(smmu);
412
413 /*
414 * To address performance degradation in non-real time clients,
415 * such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
416 * such as MTP and db845, whose firmwares implement secure monitor
417 * call handlers to turn on/off the wait-for-safe logic.
418 */
419 ret = qcom_scm_qsmmu500_wait_safe_toggle(0);
420 if (ret)
421 dev_warn(smmu->dev, "Failed to turn off SAFE logic\n");
422
423 return ret;
424 }
425
426 static const struct arm_smmu_impl qcom_smmu_v2_impl = {
427 .init_context = qcom_smmu_init_context,
428 .cfg_probe = qcom_smmu_cfg_probe,
429 .def_domain_type = qcom_smmu_def_domain_type,
430 .write_s2cr = qcom_smmu_write_s2cr,
431 .tlb_sync = qcom_smmu_tlb_sync,
432 };
433
434 static const struct arm_smmu_impl qcom_smmu_500_impl = {
435 .init_context = qcom_smmu_init_context,
436 .cfg_probe = qcom_smmu_cfg_probe,
437 .def_domain_type = qcom_smmu_def_domain_type,
438 .reset = arm_mmu500_reset,
439 .write_s2cr = qcom_smmu_write_s2cr,
440 .tlb_sync = qcom_smmu_tlb_sync,
441 };
442
443 static const struct arm_smmu_impl sdm845_smmu_500_impl = {
444 .init_context = qcom_smmu_init_context,
445 .cfg_probe = qcom_smmu_cfg_probe,
446 .def_domain_type = qcom_smmu_def_domain_type,
447 .reset = qcom_sdm845_smmu500_reset,
448 .write_s2cr = qcom_smmu_write_s2cr,
449 .tlb_sync = qcom_smmu_tlb_sync,
450 };
451
452 static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
453 .init_context = qcom_adreno_smmu_init_context,
454 .cfg_probe = qcom_adreno_smmuv2_cfg_probe,
455 .def_domain_type = qcom_smmu_def_domain_type,
456 .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
457 .write_sctlr = qcom_adreno_smmu_write_sctlr,
458 .tlb_sync = qcom_smmu_tlb_sync,
459 };
460
461 static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = {
462 .init_context = qcom_adreno_smmu_init_context,
463 .def_domain_type = qcom_smmu_def_domain_type,
464 .reset = arm_mmu500_reset,
465 .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
466 .write_sctlr = qcom_adreno_smmu_write_sctlr,
467 .tlb_sync = qcom_smmu_tlb_sync,
468 };
469
qcom_smmu_create(struct arm_smmu_device * smmu,const struct qcom_smmu_match_data * data)470 static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
471 const struct qcom_smmu_match_data *data)
472 {
473 const struct device_node *np = smmu->dev->of_node;
474 const struct arm_smmu_impl *impl;
475 struct qcom_smmu *qsmmu;
476
477 if (!data)
478 return ERR_PTR(-EINVAL);
479
480 if (np && of_device_is_compatible(np, "qcom,adreno-smmu"))
481 impl = data->adreno_impl;
482 else
483 impl = data->impl;
484
485 if (!impl)
486 return smmu;
487
488 /* Check to make sure qcom_scm has finished probing */
489 if (!qcom_scm_is_available())
490 return ERR_PTR(-EPROBE_DEFER);
491
492 qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL);
493 if (!qsmmu)
494 return ERR_PTR(-ENOMEM);
495
496 qsmmu->smmu.impl = impl;
497 qsmmu->cfg = data->cfg;
498
499 return &qsmmu->smmu;
500 }
501
502 /* Implementation Defined Register Space 0 register offsets */
503 static const u32 qcom_smmu_impl0_reg_offset[] = {
504 [QCOM_SMMU_TBU_PWR_STATUS] = 0x2204,
505 [QCOM_SMMU_STATS_SYNC_INV_TBU_ACK] = 0x25dc,
506 [QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR] = 0x2670,
507 };
508
509 static const struct qcom_smmu_config qcom_smmu_impl0_cfg = {
510 .reg_offset = qcom_smmu_impl0_reg_offset,
511 };
512
513 /*
514 * It is not yet possible to use MDP SMMU with the bypass quirk on the msm8996,
515 * there are not enough context banks.
516 */
517 static const struct qcom_smmu_match_data msm8996_smmu_data = {
518 .impl = NULL,
519 .adreno_impl = &qcom_adreno_smmu_v2_impl,
520 };
521
522 static const struct qcom_smmu_match_data qcom_smmu_v2_data = {
523 .impl = &qcom_smmu_v2_impl,
524 .adreno_impl = &qcom_adreno_smmu_v2_impl,
525 };
526
527 static const struct qcom_smmu_match_data sdm845_smmu_500_data = {
528 .impl = &sdm845_smmu_500_impl,
529 /*
530 * No need for adreno impl here. On sdm845 the Adreno SMMU is handled
531 * by the separate sdm845-smmu-v2 device.
532 */
533 /* Also no debug configuration. */
534 };
535
536 static const struct qcom_smmu_match_data qcom_smmu_500_impl0_data = {
537 .impl = &qcom_smmu_500_impl,
538 .adreno_impl = &qcom_adreno_smmu_500_impl,
539 .cfg = &qcom_smmu_impl0_cfg,
540 };
541
542 /*
543 * Do not add any more qcom,SOC-smmu-500 entries to this list, unless they need
544 * special handling and can not be covered by the qcom,smmu-500 entry.
545 */
546 static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
547 { .compatible = "qcom,msm8996-smmu-v2", .data = &msm8996_smmu_data },
548 { .compatible = "qcom,msm8998-smmu-v2", .data = &qcom_smmu_v2_data },
549 { .compatible = "qcom,qcm2290-smmu-500", .data = &qcom_smmu_500_impl0_data },
550 { .compatible = "qcom,qdu1000-smmu-500", .data = &qcom_smmu_500_impl0_data },
551 { .compatible = "qcom,sc7180-smmu-500", .data = &qcom_smmu_500_impl0_data },
552 { .compatible = "qcom,sc7180-smmu-v2", .data = &qcom_smmu_v2_data },
553 { .compatible = "qcom,sc7280-smmu-500", .data = &qcom_smmu_500_impl0_data },
554 { .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data },
555 { .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data },
556 { .compatible = "qcom,sdm630-smmu-v2", .data = &qcom_smmu_v2_data },
557 { .compatible = "qcom,sdm845-smmu-v2", .data = &qcom_smmu_v2_data },
558 { .compatible = "qcom,sdm845-smmu-500", .data = &sdm845_smmu_500_data },
559 { .compatible = "qcom,sm6115-smmu-500", .data = &qcom_smmu_500_impl0_data},
560 { .compatible = "qcom,sm6125-smmu-500", .data = &qcom_smmu_500_impl0_data },
561 { .compatible = "qcom,sm6350-smmu-v2", .data = &qcom_smmu_v2_data },
562 { .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data },
563 { .compatible = "qcom,sm6375-smmu-v2", .data = &qcom_smmu_v2_data },
564 { .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data },
565 { .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data },
566 { .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data },
567 { .compatible = "qcom,sm8350-smmu-500", .data = &qcom_smmu_500_impl0_data },
568 { .compatible = "qcom,sm8450-smmu-500", .data = &qcom_smmu_500_impl0_data },
569 { .compatible = "qcom,smmu-500", .data = &qcom_smmu_500_impl0_data },
570 { }
571 };
572
573 #ifdef CONFIG_ACPI
574 static struct acpi_platform_list qcom_acpi_platlist[] = {
575 { "LENOVO", "CB-01 ", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" },
576 { "QCOM ", "QCOMEDK2", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" },
577 { }
578 };
579 #endif
580
qcom_smmu_impl_init(struct arm_smmu_device * smmu)581 struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
582 {
583 const struct device_node *np = smmu->dev->of_node;
584 const struct of_device_id *match;
585
586 #ifdef CONFIG_ACPI
587 if (np == NULL) {
588 /* Match platform for ACPI boot */
589 if (acpi_match_platform_list(qcom_acpi_platlist) >= 0)
590 return qcom_smmu_create(smmu, &qcom_smmu_500_impl0_data);
591 }
592 #endif
593
594 match = of_match_node(qcom_smmu_impl_of_match, np);
595 if (match)
596 return qcom_smmu_create(smmu, match->data);
597
598 /*
599 * If you hit this WARN_ON() you are missing an entry in the
600 * qcom_smmu_impl_of_match[] table, and GPU per-process page-
601 * tables will be broken.
602 */
603 WARN(of_device_is_compatible(np, "qcom,adreno-smmu"),
604 "Missing qcom_smmu_impl_of_match entry for: %s",
605 dev_name(smmu->dev));
606
607 return smmu;
608 }
609