1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/acpi.h>
7 #include <linux/adreno-smmu-priv.h>
8 #include <linux/delay.h>
9 #include <linux/of_device.h>
10 #include <linux/firmware/qcom/qcom_scm.h>
11
12 #include "arm-smmu.h"
13 #include "arm-smmu-qcom.h"
14
15 #define QCOM_DUMMY_VAL -1
16
to_qcom_smmu(struct arm_smmu_device * smmu)17 static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
18 {
19 return container_of(smmu, struct qcom_smmu, smmu);
20 }
21
qcom_smmu_tlb_sync(struct arm_smmu_device * smmu,int page,int sync,int status)22 static void qcom_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
23 int sync, int status)
24 {
25 unsigned int spin_cnt, delay;
26 u32 reg;
27
28 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
29 for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
30 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
31 reg = arm_smmu_readl(smmu, page, status);
32 if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
33 return;
34 cpu_relax();
35 }
36 udelay(delay);
37 }
38
39 qcom_smmu_tlb_sync_debug(smmu);
40 }
41
qcom_adreno_smmu_write_sctlr(struct arm_smmu_device * smmu,int idx,u32 reg)42 static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx,
43 u32 reg)
44 {
45 struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
46
47 /*
48 * On the GPU device we want to process subsequent transactions after a
49 * fault to keep the GPU from hanging
50 */
51 reg |= ARM_SMMU_SCTLR_HUPCF;
52
53 if (qsmmu->stall_enabled & BIT(idx))
54 reg |= ARM_SMMU_SCTLR_CFCFG;
55
56 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
57 }
58
qcom_adreno_smmu_get_fault_info(const void * cookie,struct adreno_smmu_fault_info * info)59 static void qcom_adreno_smmu_get_fault_info(const void *cookie,
60 struct adreno_smmu_fault_info *info)
61 {
62 struct arm_smmu_domain *smmu_domain = (void *)cookie;
63 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
64 struct arm_smmu_device *smmu = smmu_domain->smmu;
65
66 info->fsr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSR);
67 info->fsynr0 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR0);
68 info->fsynr1 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR1);
69 info->far = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_FAR);
70 info->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
71 info->ttbr0 = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_TTBR0);
72 info->contextidr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_CONTEXTIDR);
73 }
74
qcom_adreno_smmu_set_stall(const void * cookie,bool enabled)75 static void qcom_adreno_smmu_set_stall(const void *cookie, bool enabled)
76 {
77 struct arm_smmu_domain *smmu_domain = (void *)cookie;
78 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
79 struct qcom_smmu *qsmmu = to_qcom_smmu(smmu_domain->smmu);
80
81 if (enabled)
82 qsmmu->stall_enabled |= BIT(cfg->cbndx);
83 else
84 qsmmu->stall_enabled &= ~BIT(cfg->cbndx);
85 }
86
qcom_adreno_smmu_resume_translation(const void * cookie,bool terminate)87 static void qcom_adreno_smmu_resume_translation(const void *cookie, bool terminate)
88 {
89 struct arm_smmu_domain *smmu_domain = (void *)cookie;
90 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
91 struct arm_smmu_device *smmu = smmu_domain->smmu;
92 u32 reg = 0;
93
94 if (terminate)
95 reg |= ARM_SMMU_RESUME_TERMINATE;
96
97 arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
98 }
99
100 #define QCOM_ADRENO_SMMU_GPU_SID 0
101
qcom_adreno_smmu_is_gpu_device(struct device * dev)102 static bool qcom_adreno_smmu_is_gpu_device(struct device *dev)
103 {
104 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
105 int i;
106
107 /*
108 * The GPU will always use SID 0 so that is a handy way to uniquely
109 * identify it and configure it for per-instance pagetables
110 */
111 for (i = 0; i < fwspec->num_ids; i++) {
112 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
113
114 if (sid == QCOM_ADRENO_SMMU_GPU_SID)
115 return true;
116 }
117
118 return false;
119 }
120
qcom_adreno_smmu_get_ttbr1_cfg(const void * cookie)121 static const struct io_pgtable_cfg *qcom_adreno_smmu_get_ttbr1_cfg(
122 const void *cookie)
123 {
124 struct arm_smmu_domain *smmu_domain = (void *)cookie;
125 struct io_pgtable *pgtable =
126 io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
127 return &pgtable->cfg;
128 }
129
130 /*
131 * Local implementation to configure TTBR0 with the specified pagetable config.
132 * The GPU driver will call this to enable TTBR0 when per-instance pagetables
133 * are active
134 */
135
qcom_adreno_smmu_set_ttbr0_cfg(const void * cookie,const struct io_pgtable_cfg * pgtbl_cfg)136 static int qcom_adreno_smmu_set_ttbr0_cfg(const void *cookie,
137 const struct io_pgtable_cfg *pgtbl_cfg)
138 {
139 struct arm_smmu_domain *smmu_domain = (void *)cookie;
140 struct io_pgtable *pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
141 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
142 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
143
144 /* The domain must have split pagetables already enabled */
145 if (cb->tcr[0] & ARM_SMMU_TCR_EPD1)
146 return -EINVAL;
147
148 /* If the pagetable config is NULL, disable TTBR0 */
149 if (!pgtbl_cfg) {
150 /* Do nothing if it is already disabled */
151 if ((cb->tcr[0] & ARM_SMMU_TCR_EPD0))
152 return -EINVAL;
153
154 /* Set TCR to the original configuration */
155 cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg);
156 cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
157 } else {
158 u32 tcr = cb->tcr[0];
159
160 /* Don't call this again if TTBR0 is already enabled */
161 if (!(cb->tcr[0] & ARM_SMMU_TCR_EPD0))
162 return -EINVAL;
163
164 tcr |= arm_smmu_lpae_tcr(pgtbl_cfg);
165 tcr &= ~(ARM_SMMU_TCR_EPD0 | ARM_SMMU_TCR_EPD1);
166
167 cb->tcr[0] = tcr;
168 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
169 cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid);
170 }
171
172 arm_smmu_write_context_bank(smmu_domain->smmu, cb->cfg->cbndx);
173
174 return 0;
175 }
176
qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain * smmu_domain,struct arm_smmu_device * smmu,struct device * dev,int start)177 static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
178 struct arm_smmu_device *smmu,
179 struct device *dev, int start)
180 {
181 int count;
182
183 /*
184 * Assign context bank 0 to the GPU device so the GPU hardware can
185 * switch pagetables
186 */
187 if (qcom_adreno_smmu_is_gpu_device(dev)) {
188 start = 0;
189 count = 1;
190 } else {
191 start = 1;
192 count = smmu->num_context_banks;
193 }
194
195 return __arm_smmu_alloc_bitmap(smmu->context_map, start, count);
196 }
197
qcom_adreno_can_do_ttbr1(struct arm_smmu_device * smmu)198 static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu)
199 {
200 const struct device_node *np = smmu->dev->of_node;
201
202 if (of_device_is_compatible(np, "qcom,msm8996-smmu-v2"))
203 return false;
204
205 return true;
206 }
207
qcom_adreno_smmu_init_context(struct arm_smmu_domain * smmu_domain,struct io_pgtable_cfg * pgtbl_cfg,struct device * dev)208 static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
209 struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
210 {
211 struct adreno_smmu_priv *priv;
212
213 smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
214
215 /* Only enable split pagetables for the GPU device (SID 0) */
216 if (!qcom_adreno_smmu_is_gpu_device(dev))
217 return 0;
218
219 /*
220 * All targets that use the qcom,adreno-smmu compatible string *should*
221 * be AARCH64 stage 1 but double check because the arm-smmu code assumes
222 * that is the case when the TTBR1 quirk is enabled
223 */
224 if (qcom_adreno_can_do_ttbr1(smmu_domain->smmu) &&
225 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) &&
226 (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64))
227 pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1;
228
229 /*
230 * Initialize private interface with GPU:
231 */
232
233 priv = dev_get_drvdata(dev);
234 priv->cookie = smmu_domain;
235 priv->get_ttbr1_cfg = qcom_adreno_smmu_get_ttbr1_cfg;
236 priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg;
237 priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
238 priv->set_stall = qcom_adreno_smmu_set_stall;
239 priv->resume_translation = qcom_adreno_smmu_resume_translation;
240
241 return 0;
242 }
243
244 static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
245 { .compatible = "qcom,adreno" },
246 { .compatible = "qcom,adreno-gmu" },
247 { .compatible = "qcom,mdp4" },
248 { .compatible = "qcom,mdss" },
249 { .compatible = "qcom,sc7180-mdss" },
250 { .compatible = "qcom,sc7180-mss-pil" },
251 { .compatible = "qcom,sc7280-mdss" },
252 { .compatible = "qcom,sc7280-mss-pil" },
253 { .compatible = "qcom,sc8180x-mdss" },
254 { .compatible = "qcom,sc8280xp-mdss" },
255 { .compatible = "qcom,sdm845-mdss" },
256 { .compatible = "qcom,sdm845-mss-pil" },
257 { .compatible = "qcom,sm6350-mdss" },
258 { .compatible = "qcom,sm6375-mdss" },
259 { .compatible = "qcom,sm8150-mdss" },
260 { .compatible = "qcom,sm8250-mdss" },
261 { }
262 };
263
qcom_smmu_init_context(struct arm_smmu_domain * smmu_domain,struct io_pgtable_cfg * pgtbl_cfg,struct device * dev)264 static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain,
265 struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
266 {
267 smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
268
269 return 0;
270 }
271
qcom_smmu_cfg_probe(struct arm_smmu_device * smmu)272 static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
273 {
274 struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
275 unsigned int last_s2cr;
276 u32 reg;
277 u32 smr;
278 int i;
279
280 /*
281 * Some platforms support more than the Arm SMMU architected maximum of
282 * 128 stream matching groups. For unknown reasons, the additional
283 * groups don't exhibit the same behavior as the architected registers,
284 * so limit the groups to 128 until the behavior is fixed for the other
285 * groups.
286 */
287 if (smmu->num_mapping_groups > 128) {
288 dev_notice(smmu->dev, "\tLimiting the stream matching groups to 128\n");
289 smmu->num_mapping_groups = 128;
290 }
291
292 last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
293
294 /*
295 * With some firmware versions writes to S2CR of type FAULT are
296 * ignored, and writing BYPASS will end up written as FAULT in the
297 * register. Perform a write to S2CR to detect if this is the case and
298 * if so reserve a context bank to emulate bypass streams.
299 */
300 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) |
301 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) |
302 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT);
303 arm_smmu_gr0_write(smmu, last_s2cr, reg);
304 reg = arm_smmu_gr0_read(smmu, last_s2cr);
305 if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) {
306 qsmmu->bypass_quirk = true;
307 qsmmu->bypass_cbndx = smmu->num_context_banks - 1;
308
309 set_bit(qsmmu->bypass_cbndx, smmu->context_map);
310
311 arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
312
313 reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
314 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
315 }
316
317 for (i = 0; i < smmu->num_mapping_groups; i++) {
318 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
319
320 if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
321 /* Ignore valid bit for SMR mask extraction. */
322 smr &= ~ARM_SMMU_SMR_VALID;
323 smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
324 smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
325 smmu->smrs[i].valid = true;
326
327 smmu->s2crs[i].type = S2CR_TYPE_BYPASS;
328 smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT;
329 smmu->s2crs[i].cbndx = 0xff;
330 }
331 }
332
333 return 0;
334 }
335
qcom_smmu_write_s2cr(struct arm_smmu_device * smmu,int idx)336 static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
337 {
338 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
339 struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
340 u32 cbndx = s2cr->cbndx;
341 u32 type = s2cr->type;
342 u32 reg;
343
344 if (qsmmu->bypass_quirk) {
345 if (type == S2CR_TYPE_BYPASS) {
346 /*
347 * Firmware with quirky S2CR handling will substitute
348 * BYPASS writes with FAULT, so point the stream to the
349 * reserved context bank and ask for translation on the
350 * stream
351 */
352 type = S2CR_TYPE_TRANS;
353 cbndx = qsmmu->bypass_cbndx;
354 } else if (type == S2CR_TYPE_FAULT) {
355 /*
356 * Firmware with quirky S2CR handling will ignore FAULT
357 * writes, so trick it to write FAULT by asking for a
358 * BYPASS.
359 */
360 type = S2CR_TYPE_BYPASS;
361 cbndx = 0xff;
362 }
363 }
364
365 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) |
366 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) |
367 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
368 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
369 }
370
qcom_smmu_def_domain_type(struct device * dev)371 static int qcom_smmu_def_domain_type(struct device *dev)
372 {
373 const struct of_device_id *match =
374 of_match_device(qcom_smmu_client_of_match, dev);
375
376 return match ? IOMMU_DOMAIN_IDENTITY : 0;
377 }
378
qcom_sdm845_smmu500_reset(struct arm_smmu_device * smmu)379 static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
380 {
381 int ret;
382
383 arm_mmu500_reset(smmu);
384
385 /*
386 * To address performance degradation in non-real time clients,
387 * such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
388 * such as MTP and db845, whose firmwares implement secure monitor
389 * call handlers to turn on/off the wait-for-safe logic.
390 */
391 ret = qcom_scm_qsmmu500_wait_safe_toggle(0);
392 if (ret)
393 dev_warn(smmu->dev, "Failed to turn off SAFE logic\n");
394
395 return ret;
396 }
397
398 static const struct arm_smmu_impl qcom_smmu_v2_impl = {
399 .init_context = qcom_smmu_init_context,
400 .cfg_probe = qcom_smmu_cfg_probe,
401 .def_domain_type = qcom_smmu_def_domain_type,
402 .write_s2cr = qcom_smmu_write_s2cr,
403 .tlb_sync = qcom_smmu_tlb_sync,
404 };
405
406 static const struct arm_smmu_impl qcom_smmu_500_impl = {
407 .init_context = qcom_smmu_init_context,
408 .cfg_probe = qcom_smmu_cfg_probe,
409 .def_domain_type = qcom_smmu_def_domain_type,
410 .reset = arm_mmu500_reset,
411 .write_s2cr = qcom_smmu_write_s2cr,
412 .tlb_sync = qcom_smmu_tlb_sync,
413 };
414
415 static const struct arm_smmu_impl sdm845_smmu_500_impl = {
416 .init_context = qcom_smmu_init_context,
417 .cfg_probe = qcom_smmu_cfg_probe,
418 .def_domain_type = qcom_smmu_def_domain_type,
419 .reset = qcom_sdm845_smmu500_reset,
420 .write_s2cr = qcom_smmu_write_s2cr,
421 .tlb_sync = qcom_smmu_tlb_sync,
422 };
423
424 static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
425 .init_context = qcom_adreno_smmu_init_context,
426 .def_domain_type = qcom_smmu_def_domain_type,
427 .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
428 .write_sctlr = qcom_adreno_smmu_write_sctlr,
429 .tlb_sync = qcom_smmu_tlb_sync,
430 };
431
432 static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = {
433 .init_context = qcom_adreno_smmu_init_context,
434 .def_domain_type = qcom_smmu_def_domain_type,
435 .reset = arm_mmu500_reset,
436 .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
437 .write_sctlr = qcom_adreno_smmu_write_sctlr,
438 .tlb_sync = qcom_smmu_tlb_sync,
439 };
440
qcom_smmu_create(struct arm_smmu_device * smmu,const struct qcom_smmu_match_data * data)441 static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
442 const struct qcom_smmu_match_data *data)
443 {
444 const struct device_node *np = smmu->dev->of_node;
445 const struct arm_smmu_impl *impl;
446 struct qcom_smmu *qsmmu;
447
448 if (!data)
449 return ERR_PTR(-EINVAL);
450
451 if (np && of_device_is_compatible(np, "qcom,adreno-smmu"))
452 impl = data->adreno_impl;
453 else
454 impl = data->impl;
455
456 if (!impl)
457 return smmu;
458
459 /* Check to make sure qcom_scm has finished probing */
460 if (!qcom_scm_is_available())
461 return ERR_PTR(-EPROBE_DEFER);
462
463 qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL);
464 if (!qsmmu)
465 return ERR_PTR(-ENOMEM);
466
467 qsmmu->smmu.impl = impl;
468 qsmmu->cfg = data->cfg;
469
470 return &qsmmu->smmu;
471 }
472
473 /* Implementation Defined Register Space 0 register offsets */
474 static const u32 qcom_smmu_impl0_reg_offset[] = {
475 [QCOM_SMMU_TBU_PWR_STATUS] = 0x2204,
476 [QCOM_SMMU_STATS_SYNC_INV_TBU_ACK] = 0x25dc,
477 [QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR] = 0x2670,
478 };
479
480 static const struct qcom_smmu_config qcom_smmu_impl0_cfg = {
481 .reg_offset = qcom_smmu_impl0_reg_offset,
482 };
483
484 /*
485 * It is not yet possible to use MDP SMMU with the bypass quirk on the msm8996,
486 * there are not enough context banks.
487 */
488 static const struct qcom_smmu_match_data msm8996_smmu_data = {
489 .impl = NULL,
490 .adreno_impl = &qcom_adreno_smmu_v2_impl,
491 };
492
493 static const struct qcom_smmu_match_data qcom_smmu_v2_data = {
494 .impl = &qcom_smmu_v2_impl,
495 .adreno_impl = &qcom_adreno_smmu_v2_impl,
496 };
497
498 static const struct qcom_smmu_match_data sdm845_smmu_500_data = {
499 .impl = &sdm845_smmu_500_impl,
500 /*
501 * No need for adreno impl here. On sdm845 the Adreno SMMU is handled
502 * by the separate sdm845-smmu-v2 device.
503 */
504 /* Also no debug configuration. */
505 };
506
507 static const struct qcom_smmu_match_data qcom_smmu_500_impl0_data = {
508 .impl = &qcom_smmu_500_impl,
509 .adreno_impl = &qcom_adreno_smmu_500_impl,
510 .cfg = &qcom_smmu_impl0_cfg,
511 };
512
513 /*
514 * Do not add any more qcom,SOC-smmu-500 entries to this list, unless they need
515 * special handling and can not be covered by the qcom,smmu-500 entry.
516 */
517 static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
518 { .compatible = "qcom,msm8996-smmu-v2", .data = &msm8996_smmu_data },
519 { .compatible = "qcom,msm8998-smmu-v2", .data = &qcom_smmu_v2_data },
520 { .compatible = "qcom,qcm2290-smmu-500", .data = &qcom_smmu_500_impl0_data },
521 { .compatible = "qcom,qdu1000-smmu-500", .data = &qcom_smmu_500_impl0_data },
522 { .compatible = "qcom,sc7180-smmu-500", .data = &qcom_smmu_500_impl0_data },
523 { .compatible = "qcom,sc7180-smmu-v2", .data = &qcom_smmu_v2_data },
524 { .compatible = "qcom,sc7280-smmu-500", .data = &qcom_smmu_500_impl0_data },
525 { .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data },
526 { .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data },
527 { .compatible = "qcom,sdm630-smmu-v2", .data = &qcom_smmu_v2_data },
528 { .compatible = "qcom,sdm845-smmu-v2", .data = &qcom_smmu_v2_data },
529 { .compatible = "qcom,sdm845-smmu-500", .data = &sdm845_smmu_500_data },
530 { .compatible = "qcom,sm6115-smmu-500", .data = &qcom_smmu_500_impl0_data},
531 { .compatible = "qcom,sm6125-smmu-500", .data = &qcom_smmu_500_impl0_data },
532 { .compatible = "qcom,sm6350-smmu-v2", .data = &qcom_smmu_v2_data },
533 { .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data },
534 { .compatible = "qcom,sm6375-smmu-v2", .data = &qcom_smmu_v2_data },
535 { .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data },
536 { .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data },
537 { .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data },
538 { .compatible = "qcom,sm8350-smmu-500", .data = &qcom_smmu_500_impl0_data },
539 { .compatible = "qcom,sm8450-smmu-500", .data = &qcom_smmu_500_impl0_data },
540 { .compatible = "qcom,smmu-500", .data = &qcom_smmu_500_impl0_data },
541 { }
542 };
543
544 #ifdef CONFIG_ACPI
545 static struct acpi_platform_list qcom_acpi_platlist[] = {
546 { "LENOVO", "CB-01 ", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" },
547 { "QCOM ", "QCOMEDK2", 0x8180, ACPI_SIG_IORT, equal, "QCOM SMMU" },
548 { }
549 };
550 #endif
551
qcom_smmu_impl_init(struct arm_smmu_device * smmu)552 struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
553 {
554 const struct device_node *np = smmu->dev->of_node;
555 const struct of_device_id *match;
556
557 #ifdef CONFIG_ACPI
558 if (np == NULL) {
559 /* Match platform for ACPI boot */
560 if (acpi_match_platform_list(qcom_acpi_platlist) >= 0)
561 return qcom_smmu_create(smmu, &qcom_smmu_500_impl0_data);
562 }
563 #endif
564
565 match = of_match_node(qcom_smmu_impl_of_match, np);
566 if (match)
567 return qcom_smmu_create(smmu, match->data);
568
569 /*
570 * If you hit this WARN_ON() you are missing an entry in the
571 * qcom_smmu_impl_of_match[] table, and GPU per-process page-
572 * tables will be broken.
573 */
574 WARN(of_device_is_compatible(np, "qcom,adreno-smmu"),
575 "Missing qcom_smmu_impl_of_match entry for: %s",
576 dev_name(smmu->dev));
577
578 return smmu;
579 }
580