xref: /openbmc/linux/drivers/iommu/tegra-gart.c (revision e58e871b)
1 /*
2  * IOMMU API for GART in Tegra20
3  *
4  * Copyright (c) 2010-2012, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  */
19 
20 #define pr_fmt(fmt)	"%s(): " fmt, __func__
21 
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/spinlock.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/mm.h>
28 #include <linux/list.h>
29 #include <linux/device.h>
30 #include <linux/io.h>
31 #include <linux/iommu.h>
32 #include <linux/of.h>
33 
34 #include <asm/cacheflush.h>
35 
36 /* bitmap of the page sizes currently supported */
37 #define GART_IOMMU_PGSIZES	(SZ_4K)
38 
39 #define GART_REG_BASE		0x24
40 #define GART_CONFIG		(0x24 - GART_REG_BASE)
41 #define GART_ENTRY_ADDR		(0x28 - GART_REG_BASE)
42 #define GART_ENTRY_DATA		(0x2c - GART_REG_BASE)
43 #define GART_ENTRY_PHYS_ADDR_VALID	(1 << 31)
44 
45 #define GART_PAGE_SHIFT		12
46 #define GART_PAGE_SIZE		(1 << GART_PAGE_SHIFT)
47 #define GART_PAGE_MASK						\
48 	(~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)
49 
50 struct gart_client {
51 	struct device		*dev;
52 	struct list_head	list;
53 };
54 
55 struct gart_device {
56 	void __iomem		*regs;
57 	u32			*savedata;
58 	u32			page_count;	/* total remappable size */
59 	dma_addr_t		iovmm_base;	/* offset to vmm_area */
60 	spinlock_t		pte_lock;	/* for pagetable */
61 	struct list_head	client;
62 	spinlock_t		client_lock;	/* for client list */
63 	struct device		*dev;
64 };
65 
66 struct gart_domain {
67 	struct iommu_domain domain;		/* generic domain handle */
68 	struct gart_device *gart;		/* link to gart device   */
69 };
70 
71 static struct gart_device *gart_handle; /* unique for a system */
72 
73 #define GART_PTE(_pfn)						\
74 	(GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
75 
76 static struct gart_domain *to_gart_domain(struct iommu_domain *dom)
77 {
78 	return container_of(dom, struct gart_domain, domain);
79 }
80 
81 /*
82  * Any interaction between any block on PPSB and a block on APB or AHB
83  * must have these read-back to ensure the APB/AHB bus transaction is
84  * complete before initiating activity on the PPSB block.
85  */
86 #define FLUSH_GART_REGS(gart)	((void)readl((gart)->regs + GART_CONFIG))
87 
88 #define for_each_gart_pte(gart, iova)					\
89 	for (iova = gart->iovmm_base;					\
90 	     iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
91 	     iova += GART_PAGE_SIZE)
92 
93 static inline void gart_set_pte(struct gart_device *gart,
94 				unsigned long offs, u32 pte)
95 {
96 	writel(offs, gart->regs + GART_ENTRY_ADDR);
97 	writel(pte, gart->regs + GART_ENTRY_DATA);
98 
99 	dev_dbg(gart->dev, "%s %08lx:%08x\n",
100 		 pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK);
101 }
102 
103 static inline unsigned long gart_read_pte(struct gart_device *gart,
104 					  unsigned long offs)
105 {
106 	unsigned long pte;
107 
108 	writel(offs, gart->regs + GART_ENTRY_ADDR);
109 	pte = readl(gart->regs + GART_ENTRY_DATA);
110 
111 	return pte;
112 }
113 
114 static void do_gart_setup(struct gart_device *gart, const u32 *data)
115 {
116 	unsigned long iova;
117 
118 	for_each_gart_pte(gart, iova)
119 		gart_set_pte(gart, iova, data ? *(data++) : 0);
120 
121 	writel(1, gart->regs + GART_CONFIG);
122 	FLUSH_GART_REGS(gart);
123 }
124 
125 #ifdef DEBUG
126 static void gart_dump_table(struct gart_device *gart)
127 {
128 	unsigned long iova;
129 	unsigned long flags;
130 
131 	spin_lock_irqsave(&gart->pte_lock, flags);
132 	for_each_gart_pte(gart, iova) {
133 		unsigned long pte;
134 
135 		pte = gart_read_pte(gart, iova);
136 
137 		dev_dbg(gart->dev, "%s %08lx:%08lx\n",
138 			(GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ",
139 			iova, pte & GART_PAGE_MASK);
140 	}
141 	spin_unlock_irqrestore(&gart->pte_lock, flags);
142 }
143 #else
144 static inline void gart_dump_table(struct gart_device *gart)
145 {
146 }
147 #endif
148 
149 static inline bool gart_iova_range_valid(struct gart_device *gart,
150 					 unsigned long iova, size_t bytes)
151 {
152 	unsigned long iova_start, iova_end, gart_start, gart_end;
153 
154 	iova_start = iova;
155 	iova_end = iova_start + bytes - 1;
156 	gart_start = gart->iovmm_base;
157 	gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1;
158 
159 	if (iova_start < gart_start)
160 		return false;
161 	if (iova_end > gart_end)
162 		return false;
163 	return true;
164 }
165 
166 static int gart_iommu_attach_dev(struct iommu_domain *domain,
167 				 struct device *dev)
168 {
169 	struct gart_domain *gart_domain = to_gart_domain(domain);
170 	struct gart_device *gart = gart_domain->gart;
171 	struct gart_client *client, *c;
172 	int err = 0;
173 
174 	client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
175 	if (!client)
176 		return -ENOMEM;
177 	client->dev = dev;
178 
179 	spin_lock(&gart->client_lock);
180 	list_for_each_entry(c, &gart->client, list) {
181 		if (c->dev == dev) {
182 			dev_err(gart->dev,
183 				"%s is already attached\n", dev_name(dev));
184 			err = -EINVAL;
185 			goto fail;
186 		}
187 	}
188 	list_add(&client->list, &gart->client);
189 	spin_unlock(&gart->client_lock);
190 	dev_dbg(gart->dev, "Attached %s\n", dev_name(dev));
191 	return 0;
192 
193 fail:
194 	devm_kfree(gart->dev, client);
195 	spin_unlock(&gart->client_lock);
196 	return err;
197 }
198 
199 static void gart_iommu_detach_dev(struct iommu_domain *domain,
200 				  struct device *dev)
201 {
202 	struct gart_domain *gart_domain = to_gart_domain(domain);
203 	struct gart_device *gart = gart_domain->gart;
204 	struct gart_client *c;
205 
206 	spin_lock(&gart->client_lock);
207 
208 	list_for_each_entry(c, &gart->client, list) {
209 		if (c->dev == dev) {
210 			list_del(&c->list);
211 			devm_kfree(gart->dev, c);
212 			dev_dbg(gart->dev, "Detached %s\n", dev_name(dev));
213 			goto out;
214 		}
215 	}
216 	dev_err(gart->dev, "Couldn't find\n");
217 out:
218 	spin_unlock(&gart->client_lock);
219 }
220 
221 static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
222 {
223 	struct gart_domain *gart_domain;
224 	struct gart_device *gart;
225 
226 	if (type != IOMMU_DOMAIN_UNMANAGED)
227 		return NULL;
228 
229 	gart = gart_handle;
230 	if (!gart)
231 		return NULL;
232 
233 	gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL);
234 	if (!gart_domain)
235 		return NULL;
236 
237 	gart_domain->gart = gart;
238 	gart_domain->domain.geometry.aperture_start = gart->iovmm_base;
239 	gart_domain->domain.geometry.aperture_end = gart->iovmm_base +
240 					gart->page_count * GART_PAGE_SIZE - 1;
241 	gart_domain->domain.geometry.force_aperture = true;
242 
243 	return &gart_domain->domain;
244 }
245 
246 static void gart_iommu_domain_free(struct iommu_domain *domain)
247 {
248 	struct gart_domain *gart_domain = to_gart_domain(domain);
249 	struct gart_device *gart = gart_domain->gart;
250 
251 	if (gart) {
252 		spin_lock(&gart->client_lock);
253 		if (!list_empty(&gart->client)) {
254 			struct gart_client *c;
255 
256 			list_for_each_entry(c, &gart->client, list)
257 				gart_iommu_detach_dev(domain, c->dev);
258 		}
259 		spin_unlock(&gart->client_lock);
260 	}
261 
262 	kfree(gart_domain);
263 }
264 
265 static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
266 			  phys_addr_t pa, size_t bytes, int prot)
267 {
268 	struct gart_domain *gart_domain = to_gart_domain(domain);
269 	struct gart_device *gart = gart_domain->gart;
270 	unsigned long flags;
271 	unsigned long pfn;
272 
273 	if (!gart_iova_range_valid(gart, iova, bytes))
274 		return -EINVAL;
275 
276 	spin_lock_irqsave(&gart->pte_lock, flags);
277 	pfn = __phys_to_pfn(pa);
278 	if (!pfn_valid(pfn)) {
279 		dev_err(gart->dev, "Invalid page: %pa\n", &pa);
280 		spin_unlock_irqrestore(&gart->pte_lock, flags);
281 		return -EINVAL;
282 	}
283 	gart_set_pte(gart, iova, GART_PTE(pfn));
284 	FLUSH_GART_REGS(gart);
285 	spin_unlock_irqrestore(&gart->pte_lock, flags);
286 	return 0;
287 }
288 
289 static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
290 			       size_t bytes)
291 {
292 	struct gart_domain *gart_domain = to_gart_domain(domain);
293 	struct gart_device *gart = gart_domain->gart;
294 	unsigned long flags;
295 
296 	if (!gart_iova_range_valid(gart, iova, bytes))
297 		return 0;
298 
299 	spin_lock_irqsave(&gart->pte_lock, flags);
300 	gart_set_pte(gart, iova, 0);
301 	FLUSH_GART_REGS(gart);
302 	spin_unlock_irqrestore(&gart->pte_lock, flags);
303 	return 0;
304 }
305 
306 static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
307 					   dma_addr_t iova)
308 {
309 	struct gart_domain *gart_domain = to_gart_domain(domain);
310 	struct gart_device *gart = gart_domain->gart;
311 	unsigned long pte;
312 	phys_addr_t pa;
313 	unsigned long flags;
314 
315 	if (!gart_iova_range_valid(gart, iova, 0))
316 		return -EINVAL;
317 
318 	spin_lock_irqsave(&gart->pte_lock, flags);
319 	pte = gart_read_pte(gart, iova);
320 	spin_unlock_irqrestore(&gart->pte_lock, flags);
321 
322 	pa = (pte & GART_PAGE_MASK);
323 	if (!pfn_valid(__phys_to_pfn(pa))) {
324 		dev_err(gart->dev, "No entry for %08llx:%pa\n",
325 			 (unsigned long long)iova, &pa);
326 		gart_dump_table(gart);
327 		return -EINVAL;
328 	}
329 	return pa;
330 }
331 
332 static bool gart_iommu_capable(enum iommu_cap cap)
333 {
334 	return false;
335 }
336 
337 static const struct iommu_ops gart_iommu_ops = {
338 	.capable	= gart_iommu_capable,
339 	.domain_alloc	= gart_iommu_domain_alloc,
340 	.domain_free	= gart_iommu_domain_free,
341 	.attach_dev	= gart_iommu_attach_dev,
342 	.detach_dev	= gart_iommu_detach_dev,
343 	.map		= gart_iommu_map,
344 	.map_sg		= default_iommu_map_sg,
345 	.unmap		= gart_iommu_unmap,
346 	.iova_to_phys	= gart_iommu_iova_to_phys,
347 	.pgsize_bitmap	= GART_IOMMU_PGSIZES,
348 };
349 
350 static int tegra_gart_suspend(struct device *dev)
351 {
352 	struct gart_device *gart = dev_get_drvdata(dev);
353 	unsigned long iova;
354 	u32 *data = gart->savedata;
355 	unsigned long flags;
356 
357 	spin_lock_irqsave(&gart->pte_lock, flags);
358 	for_each_gart_pte(gart, iova)
359 		*(data++) = gart_read_pte(gart, iova);
360 	spin_unlock_irqrestore(&gart->pte_lock, flags);
361 	return 0;
362 }
363 
364 static int tegra_gart_resume(struct device *dev)
365 {
366 	struct gart_device *gart = dev_get_drvdata(dev);
367 	unsigned long flags;
368 
369 	spin_lock_irqsave(&gart->pte_lock, flags);
370 	do_gart_setup(gart, gart->savedata);
371 	spin_unlock_irqrestore(&gart->pte_lock, flags);
372 	return 0;
373 }
374 
375 static int tegra_gart_probe(struct platform_device *pdev)
376 {
377 	struct gart_device *gart;
378 	struct resource *res, *res_remap;
379 	void __iomem *gart_regs;
380 	struct device *dev = &pdev->dev;
381 
382 	if (gart_handle)
383 		return -EIO;
384 
385 	BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
386 
387 	/* the GART memory aperture is required */
388 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
389 	res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
390 	if (!res || !res_remap) {
391 		dev_err(dev, "GART memory aperture expected\n");
392 		return -ENXIO;
393 	}
394 
395 	gart = devm_kzalloc(dev, sizeof(*gart), GFP_KERNEL);
396 	if (!gart) {
397 		dev_err(dev, "failed to allocate gart_device\n");
398 		return -ENOMEM;
399 	}
400 
401 	gart_regs = devm_ioremap(dev, res->start, resource_size(res));
402 	if (!gart_regs) {
403 		dev_err(dev, "failed to remap GART registers\n");
404 		return -ENXIO;
405 	}
406 
407 	gart->dev = &pdev->dev;
408 	spin_lock_init(&gart->pte_lock);
409 	spin_lock_init(&gart->client_lock);
410 	INIT_LIST_HEAD(&gart->client);
411 	gart->regs = gart_regs;
412 	gart->iovmm_base = (dma_addr_t)res_remap->start;
413 	gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT);
414 
415 	gart->savedata = vmalloc(sizeof(u32) * gart->page_count);
416 	if (!gart->savedata) {
417 		dev_err(dev, "failed to allocate context save area\n");
418 		return -ENOMEM;
419 	}
420 
421 	platform_set_drvdata(pdev, gart);
422 	do_gart_setup(gart, NULL);
423 
424 	gart_handle = gart;
425 
426 	return 0;
427 }
428 
429 static int tegra_gart_remove(struct platform_device *pdev)
430 {
431 	struct gart_device *gart = platform_get_drvdata(pdev);
432 
433 	writel(0, gart->regs + GART_CONFIG);
434 	if (gart->savedata)
435 		vfree(gart->savedata);
436 	gart_handle = NULL;
437 	return 0;
438 }
439 
440 static const struct dev_pm_ops tegra_gart_pm_ops = {
441 	.suspend	= tegra_gart_suspend,
442 	.resume		= tegra_gart_resume,
443 };
444 
445 static const struct of_device_id tegra_gart_of_match[] = {
446 	{ .compatible = "nvidia,tegra20-gart", },
447 	{ },
448 };
449 MODULE_DEVICE_TABLE(of, tegra_gart_of_match);
450 
451 static struct platform_driver tegra_gart_driver = {
452 	.probe		= tegra_gart_probe,
453 	.remove		= tegra_gart_remove,
454 	.driver = {
455 		.name	= "tegra-gart",
456 		.pm	= &tegra_gart_pm_ops,
457 		.of_match_table = tegra_gart_of_match,
458 	},
459 };
460 
461 static int tegra_gart_init(void)
462 {
463 	return platform_driver_register(&tegra_gart_driver);
464 }
465 
466 static void __exit tegra_gart_exit(void)
467 {
468 	platform_driver_unregister(&tegra_gart_driver);
469 }
470 
471 subsys_initcall(tegra_gart_init);
472 module_exit(tegra_gart_exit);
473 
474 MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
475 MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
476 MODULE_ALIAS("platform:tegra-gart");
477 MODULE_LICENSE("GPL v2");
478