xref: /openbmc/linux/drivers/gpu/drm/msm/msm_iommu.c (revision ccb01374)
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include "msm_drv.h"
19 #include "msm_mmu.h"
20 
21 struct msm_iommu {
22 	struct msm_mmu base;
23 	struct iommu_domain *domain;
24 };
25 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
26 
27 static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
28 		unsigned long iova, int flags, void *arg)
29 {
30 	struct msm_iommu *iommu = arg;
31 	if (iommu->base.handler)
32 		return iommu->base.handler(iommu->base.arg, iova, flags);
33 	pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
34 	return 0;
35 }
36 
37 static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
38 			    int cnt)
39 {
40 	struct msm_iommu *iommu = to_msm_iommu(mmu);
41 	int ret;
42 
43 	pm_runtime_get_sync(mmu->dev);
44 	ret = iommu_attach_device(iommu->domain, mmu->dev);
45 	pm_runtime_put_sync(mmu->dev);
46 
47 	return ret;
48 }
49 
50 static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
51 			     int cnt)
52 {
53 	struct msm_iommu *iommu = to_msm_iommu(mmu);
54 
55 	pm_runtime_get_sync(mmu->dev);
56 	iommu_detach_device(iommu->domain, mmu->dev);
57 	pm_runtime_put_sync(mmu->dev);
58 }
59 
60 static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
61 		struct sg_table *sgt, unsigned len, int prot)
62 {
63 	struct msm_iommu *iommu = to_msm_iommu(mmu);
64 	size_t ret;
65 
66 //	pm_runtime_get_sync(mmu->dev);
67 	ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
68 //	pm_runtime_put_sync(mmu->dev);
69 	WARN_ON(!ret);
70 
71 	return (ret == len) ? 0 : -EINVAL;
72 }
73 
74 static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
75 {
76 	struct msm_iommu *iommu = to_msm_iommu(mmu);
77 
78 	pm_runtime_get_sync(mmu->dev);
79 	iommu_unmap(iommu->domain, iova, len);
80 	pm_runtime_put_sync(mmu->dev);
81 
82 	return 0;
83 }
84 
85 static void msm_iommu_destroy(struct msm_mmu *mmu)
86 {
87 	struct msm_iommu *iommu = to_msm_iommu(mmu);
88 	iommu_domain_free(iommu->domain);
89 	kfree(iommu);
90 }
91 
92 static const struct msm_mmu_funcs funcs = {
93 		.attach = msm_iommu_attach,
94 		.detach = msm_iommu_detach,
95 		.map = msm_iommu_map,
96 		.unmap = msm_iommu_unmap,
97 		.destroy = msm_iommu_destroy,
98 };
99 
100 struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
101 {
102 	struct msm_iommu *iommu;
103 
104 	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
105 	if (!iommu)
106 		return ERR_PTR(-ENOMEM);
107 
108 	iommu->domain = domain;
109 	msm_mmu_init(&iommu->base, dev, &funcs);
110 	iommu_set_fault_handler(domain, msm_fault_handler, iommu);
111 
112 	return &iommu->base;
113 }
114