xref: /openbmc/linux/drivers/gpu/drm/msm/msm_iommu.c (revision fcc8487d)
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include "msm_drv.h"
19 #include "msm_mmu.h"
20 
21 struct msm_iommu {
22 	struct msm_mmu base;
23 	struct iommu_domain *domain;
24 };
25 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
26 
27 static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
28 		unsigned long iova, int flags, void *arg)
29 {
30 	struct msm_iommu *iommu = arg;
31 	if (iommu->base.handler)
32 		return iommu->base.handler(iommu->base.arg, iova, flags);
33 	pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
34 	return 0;
35 }
36 
37 static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
38 			    int cnt)
39 {
40 	struct msm_iommu *iommu = to_msm_iommu(mmu);
41 	int ret;
42 
43 	pm_runtime_get_sync(mmu->dev);
44 	ret = iommu_attach_device(iommu->domain, mmu->dev);
45 	pm_runtime_put_sync(mmu->dev);
46 
47 	return ret;
48 }
49 
50 static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
51 			     int cnt)
52 {
53 	struct msm_iommu *iommu = to_msm_iommu(mmu);
54 
55 	pm_runtime_get_sync(mmu->dev);
56 	iommu_detach_device(iommu->domain, mmu->dev);
57 	pm_runtime_put_sync(mmu->dev);
58 }
59 
60 static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
61 		struct sg_table *sgt, unsigned len, int prot)
62 {
63 	struct msm_iommu *iommu = to_msm_iommu(mmu);
64 	size_t ret;
65 
66 //	pm_runtime_get_sync(mmu->dev);
67 	ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
68 //	pm_runtime_put_sync(mmu->dev);
69 	WARN_ON(ret < 0);
70 
71 	return (ret == len) ? 0 : -EINVAL;
72 }
73 
74 static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
75 		struct sg_table *sgt, unsigned len)
76 {
77 	struct msm_iommu *iommu = to_msm_iommu(mmu);
78 
79 	pm_runtime_get_sync(mmu->dev);
80 	iommu_unmap(iommu->domain, iova, len);
81 	pm_runtime_put_sync(mmu->dev);
82 
83 	return 0;
84 }
85 
86 static void msm_iommu_destroy(struct msm_mmu *mmu)
87 {
88 	struct msm_iommu *iommu = to_msm_iommu(mmu);
89 	iommu_domain_free(iommu->domain);
90 	kfree(iommu);
91 }
92 
93 static const struct msm_mmu_funcs funcs = {
94 		.attach = msm_iommu_attach,
95 		.detach = msm_iommu_detach,
96 		.map = msm_iommu_map,
97 		.unmap = msm_iommu_unmap,
98 		.destroy = msm_iommu_destroy,
99 };
100 
101 struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
102 {
103 	struct msm_iommu *iommu;
104 
105 	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
106 	if (!iommu)
107 		return ERR_PTR(-ENOMEM);
108 
109 	iommu->domain = domain;
110 	msm_mmu_init(&iommu->base, dev, &funcs);
111 	iommu_set_fault_handler(domain, msm_fault_handler, iommu);
112 
113 	return &iommu->base;
114 }
115