1a61127c2SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2d53e54b4SHiroshi DOYU /*
370722309SDmitry Osipenko * IOMMU API for Graphics Address Relocation Table on Tegra20
4d53e54b4SHiroshi DOYU *
5d53e54b4SHiroshi DOYU * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
6d53e54b4SHiroshi DOYU *
739fcbbccSPaul Gortmaker * Author: Hiroshi DOYU <hdoyu@nvidia.com>
8d53e54b4SHiroshi DOYU */
9d53e54b4SHiroshi DOYU
105dd82cdbSDmitry Osipenko #define dev_fmt(fmt) "gart: " fmt
115dd82cdbSDmitry Osipenko
12d53e54b4SHiroshi DOYU #include <linux/io.h>
13d53e54b4SHiroshi DOYU #include <linux/iommu.h>
144f821c10SDmitry Osipenko #include <linux/moduleparam.h>
15ce2785a7SDmitry Osipenko #include <linux/platform_device.h>
164f821c10SDmitry Osipenko #include <linux/slab.h>
174f821c10SDmitry Osipenko #include <linux/spinlock.h>
184f821c10SDmitry Osipenko #include <linux/vmalloc.h>
19d53e54b4SHiroshi DOYU
20ce2785a7SDmitry Osipenko #include <soc/tegra/mc.h>
21ce2785a7SDmitry Osipenko
22774dfc9bSHiroshi DOYU #define GART_REG_BASE 0x24
23774dfc9bSHiroshi DOYU #define GART_CONFIG (0x24 - GART_REG_BASE)
24774dfc9bSHiroshi DOYU #define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
25774dfc9bSHiroshi DOYU #define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
2670722309SDmitry Osipenko
2770722309SDmitry Osipenko #define GART_ENTRY_PHYS_ADDR_VALID BIT(31)
28d53e54b4SHiroshi DOYU
29d53e54b4SHiroshi DOYU #define GART_PAGE_SHIFT 12
30d53e54b4SHiroshi DOYU #define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
3170722309SDmitry Osipenko #define GART_PAGE_MASK GENMASK(30, GART_PAGE_SHIFT)
3270722309SDmitry Osipenko
3370722309SDmitry Osipenko /* bitmap of the page sizes currently supported */
3470722309SDmitry Osipenko #define GART_IOMMU_PGSIZES (GART_PAGE_SIZE)
35d53e54b4SHiroshi DOYU
36d53e54b4SHiroshi DOYU struct gart_device {
37d53e54b4SHiroshi DOYU void __iomem *regs;
38d53e54b4SHiroshi DOYU u32 *savedata;
3970722309SDmitry Osipenko unsigned long iovmm_base; /* offset to vmm_area start */
4070722309SDmitry Osipenko unsigned long iovmm_end; /* offset to vmm_area end */
41d53e54b4SHiroshi DOYU spinlock_t pte_lock; /* for pagetable */
42e7e23670SDmitry Osipenko spinlock_t dom_lock; /* for active domain */
43e7e23670SDmitry Osipenko unsigned int active_devices; /* number of active devices */
447d849b7bSDmitry Osipenko struct iommu_domain *active_domain; /* current active domain */
45c184ae83SJoerg Roedel struct iommu_device iommu; /* IOMMU Core handle */
4670722309SDmitry Osipenko struct device *dev;
47d53e54b4SHiroshi DOYU };
48d53e54b4SHiroshi DOYU
49d53e54b4SHiroshi DOYU static struct gart_device *gart_handle; /* unique for a system */
50d53e54b4SHiroshi DOYU
5140c9b882SDmitry Osipenko static bool gart_debug;
5240c9b882SDmitry Osipenko
53d53e54b4SHiroshi DOYU /*
54d53e54b4SHiroshi DOYU * Any interaction between any block on PPSB and a block on APB or AHB
55d53e54b4SHiroshi DOYU * must have these read-back to ensure the APB/AHB bus transaction is
56d53e54b4SHiroshi DOYU * complete before initiating activity on the PPSB block.
57d53e54b4SHiroshi DOYU */
5870722309SDmitry Osipenko #define FLUSH_GART_REGS(gart) readl_relaxed((gart)->regs + GART_CONFIG)
59d53e54b4SHiroshi DOYU
60d53e54b4SHiroshi DOYU #define for_each_gart_pte(gart, iova) \
61d53e54b4SHiroshi DOYU for (iova = gart->iovmm_base; \
6270722309SDmitry Osipenko iova < gart->iovmm_end; \
63d53e54b4SHiroshi DOYU iova += GART_PAGE_SIZE)
64d53e54b4SHiroshi DOYU
gart_set_pte(struct gart_device * gart,unsigned long iova,unsigned long pte)65d53e54b4SHiroshi DOYU static inline void gart_set_pte(struct gart_device *gart,
6670722309SDmitry Osipenko unsigned long iova, unsigned long pte)
67d53e54b4SHiroshi DOYU {
6870722309SDmitry Osipenko writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
6970722309SDmitry Osipenko writel_relaxed(pte, gart->regs + GART_ENTRY_DATA);
70d53e54b4SHiroshi DOYU }
71d53e54b4SHiroshi DOYU
gart_read_pte(struct gart_device * gart,unsigned long iova)72d53e54b4SHiroshi DOYU static inline unsigned long gart_read_pte(struct gart_device *gart,
7370722309SDmitry Osipenko unsigned long iova)
74d53e54b4SHiroshi DOYU {
75d53e54b4SHiroshi DOYU unsigned long pte;
76d53e54b4SHiroshi DOYU
7770722309SDmitry Osipenko writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
7870722309SDmitry Osipenko pte = readl_relaxed(gart->regs + GART_ENTRY_DATA);
79d53e54b4SHiroshi DOYU
80d53e54b4SHiroshi DOYU return pte;
81d53e54b4SHiroshi DOYU }
82d53e54b4SHiroshi DOYU
do_gart_setup(struct gart_device * gart,const u32 * data)83d53e54b4SHiroshi DOYU static void do_gart_setup(struct gart_device *gart, const u32 *data)
84d53e54b4SHiroshi DOYU {
85d53e54b4SHiroshi DOYU unsigned long iova;
86d53e54b4SHiroshi DOYU
87d53e54b4SHiroshi DOYU for_each_gart_pte(gart, iova)
88d53e54b4SHiroshi DOYU gart_set_pte(gart, iova, data ? *(data++) : 0);
89d53e54b4SHiroshi DOYU
9070722309SDmitry Osipenko writel_relaxed(1, gart->regs + GART_CONFIG);
91d53e54b4SHiroshi DOYU FLUSH_GART_REGS(gart);
92d53e54b4SHiroshi DOYU }
93d53e54b4SHiroshi DOYU
gart_iova_range_invalid(struct gart_device * gart,unsigned long iova,size_t bytes)9470722309SDmitry Osipenko static inline bool gart_iova_range_invalid(struct gart_device *gart,
95d53e54b4SHiroshi DOYU unsigned long iova, size_t bytes)
96d53e54b4SHiroshi DOYU {
9770722309SDmitry Osipenko return unlikely(iova < gart->iovmm_base || bytes != GART_PAGE_SIZE ||
9870722309SDmitry Osipenko iova + bytes > gart->iovmm_end);
9970722309SDmitry Osipenko }
100d53e54b4SHiroshi DOYU
gart_pte_valid(struct gart_device * gart,unsigned long iova)10170722309SDmitry Osipenko static inline bool gart_pte_valid(struct gart_device *gart, unsigned long iova)
10270722309SDmitry Osipenko {
10370722309SDmitry Osipenko return !!(gart_read_pte(gart, iova) & GART_ENTRY_PHYS_ADDR_VALID);
104d53e54b4SHiroshi DOYU }
105d53e54b4SHiroshi DOYU
gart_iommu_attach_dev(struct iommu_domain * domain,struct device * dev)106d53e54b4SHiroshi DOYU static int gart_iommu_attach_dev(struct iommu_domain *domain,
107d53e54b4SHiroshi DOYU struct device *dev)
108d53e54b4SHiroshi DOYU {
109cc0e1205SDmitry Osipenko struct gart_device *gart = gart_handle;
110e7e23670SDmitry Osipenko int ret = 0;
111d53e54b4SHiroshi DOYU
112e7e23670SDmitry Osipenko spin_lock(&gart->dom_lock);
113d53e54b4SHiroshi DOYU
1147d849b7bSDmitry Osipenko if (gart->active_domain && gart->active_domain != domain) {
115f4a14773SNicolin Chen ret = -EINVAL;
116a5616e24SJoerg Roedel } else if (dev_iommu_priv_get(dev) != domain) {
117a5616e24SJoerg Roedel dev_iommu_priv_set(dev, domain);
1187d849b7bSDmitry Osipenko gart->active_domain = domain;
119e7e23670SDmitry Osipenko gart->active_devices++;
120d53e54b4SHiroshi DOYU }
121d53e54b4SHiroshi DOYU
122e7e23670SDmitry Osipenko spin_unlock(&gart->dom_lock);
123d53e54b4SHiroshi DOYU
124e7e23670SDmitry Osipenko return ret;
125c3086fadSDmitry Osipenko }
126c3086fadSDmitry Osipenko
gart_iommu_set_platform_dma(struct device * dev)127*c1fe9119SLu Baolu static void gart_iommu_set_platform_dma(struct device *dev)
128c3086fadSDmitry Osipenko {
129*c1fe9119SLu Baolu struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
130e7e23670SDmitry Osipenko struct gart_device *gart = gart_handle;
131c3086fadSDmitry Osipenko
132e7e23670SDmitry Osipenko spin_lock(&gart->dom_lock);
133e7e23670SDmitry Osipenko
134a5616e24SJoerg Roedel if (dev_iommu_priv_get(dev) == domain) {
135a5616e24SJoerg Roedel dev_iommu_priv_set(dev, NULL);
136e7e23670SDmitry Osipenko
137e7e23670SDmitry Osipenko if (--gart->active_devices == 0)
138e7e23670SDmitry Osipenko gart->active_domain = NULL;
139e7e23670SDmitry Osipenko }
140e7e23670SDmitry Osipenko
141e7e23670SDmitry Osipenko spin_unlock(&gart->dom_lock);
142d53e54b4SHiroshi DOYU }
143d53e54b4SHiroshi DOYU
gart_iommu_domain_alloc(unsigned type)144b5cbb386SJoerg Roedel static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
145d53e54b4SHiroshi DOYU {
146e7e23670SDmitry Osipenko struct iommu_domain *domain;
147836a8ac9SThierry Reding
148b5cbb386SJoerg Roedel if (type != IOMMU_DOMAIN_UNMANAGED)
149b5cbb386SJoerg Roedel return NULL;
150b5cbb386SJoerg Roedel
151e7e23670SDmitry Osipenko domain = kzalloc(sizeof(*domain), GFP_KERNEL);
152e7e23670SDmitry Osipenko if (domain) {
15370722309SDmitry Osipenko domain->geometry.aperture_start = gart_handle->iovmm_base;
15470722309SDmitry Osipenko domain->geometry.aperture_end = gart_handle->iovmm_end - 1;
155e7e23670SDmitry Osipenko domain->geometry.force_aperture = true;
156e7e23670SDmitry Osipenko }
157836a8ac9SThierry Reding
158e7e23670SDmitry Osipenko return domain;
159d53e54b4SHiroshi DOYU }
160d53e54b4SHiroshi DOYU
gart_iommu_domain_free(struct iommu_domain * domain)161b5cbb386SJoerg Roedel static void gart_iommu_domain_free(struct iommu_domain *domain)
162d53e54b4SHiroshi DOYU {
163e7e23670SDmitry Osipenko WARN_ON(gart_handle->active_domain == domain);
164e7e23670SDmitry Osipenko kfree(domain);
165d53e54b4SHiroshi DOYU }
166d53e54b4SHiroshi DOYU
__gart_iommu_map(struct gart_device * gart,unsigned long iova,unsigned long pa)16770722309SDmitry Osipenko static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
16870722309SDmitry Osipenko unsigned long pa)
16970722309SDmitry Osipenko {
17070722309SDmitry Osipenko if (unlikely(gart_debug && gart_pte_valid(gart, iova))) {
17170722309SDmitry Osipenko dev_err(gart->dev, "Page entry is in-use\n");
17270722309SDmitry Osipenko return -EINVAL;
17370722309SDmitry Osipenko }
17470722309SDmitry Osipenko
17570722309SDmitry Osipenko gart_set_pte(gart, iova, GART_ENTRY_PHYS_ADDR_VALID | pa);
17670722309SDmitry Osipenko
17770722309SDmitry Osipenko return 0;
17870722309SDmitry Osipenko }
17970722309SDmitry Osipenko
gart_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t pa,size_t bytes,int prot,gfp_t gfp)180d53e54b4SHiroshi DOYU static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
181781ca2deSTom Murphy phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
182d53e54b4SHiroshi DOYU {
183e7e23670SDmitry Osipenko struct gart_device *gart = gart_handle;
18470722309SDmitry Osipenko int ret;
185d53e54b4SHiroshi DOYU
18670722309SDmitry Osipenko if (gart_iova_range_invalid(gart, iova, bytes))
187d53e54b4SHiroshi DOYU return -EINVAL;
188d53e54b4SHiroshi DOYU
18970722309SDmitry Osipenko spin_lock(&gart->pte_lock);
19070722309SDmitry Osipenko ret = __gart_iommu_map(gart, iova, (unsigned long)pa);
19170722309SDmitry Osipenko spin_unlock(&gart->pte_lock);
19270722309SDmitry Osipenko
19370722309SDmitry Osipenko return ret;
19470722309SDmitry Osipenko }
19570722309SDmitry Osipenko
__gart_iommu_unmap(struct gart_device * gart,unsigned long iova)19670722309SDmitry Osipenko static inline int __gart_iommu_unmap(struct gart_device *gart,
19770722309SDmitry Osipenko unsigned long iova)
19870722309SDmitry Osipenko {
19970722309SDmitry Osipenko if (unlikely(gart_debug && !gart_pte_valid(gart, iova))) {
20070722309SDmitry Osipenko dev_err(gart->dev, "Page entry is invalid\n");
201d53e54b4SHiroshi DOYU return -EINVAL;
202d53e54b4SHiroshi DOYU }
20370722309SDmitry Osipenko
20470722309SDmitry Osipenko gart_set_pte(gart, iova, 0);
20570722309SDmitry Osipenko
206d53e54b4SHiroshi DOYU return 0;
207d53e54b4SHiroshi DOYU }
208d53e54b4SHiroshi DOYU
gart_iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t bytes,struct iommu_iotlb_gather * gather)209d53e54b4SHiroshi DOYU static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
21056f8af5eSWill Deacon size_t bytes, struct iommu_iotlb_gather *gather)
211d53e54b4SHiroshi DOYU {
212e7e23670SDmitry Osipenko struct gart_device *gart = gart_handle;
21370722309SDmitry Osipenko int err;
214d53e54b4SHiroshi DOYU
21570722309SDmitry Osipenko if (gart_iova_range_invalid(gart, iova, bytes))
216d53e54b4SHiroshi DOYU return 0;
217d53e54b4SHiroshi DOYU
21870722309SDmitry Osipenko spin_lock(&gart->pte_lock);
21970722309SDmitry Osipenko err = __gart_iommu_unmap(gart, iova);
22070722309SDmitry Osipenko spin_unlock(&gart->pte_lock);
22170722309SDmitry Osipenko
22270722309SDmitry Osipenko return err ? 0 : bytes;
223d53e54b4SHiroshi DOYU }
224d53e54b4SHiroshi DOYU
gart_iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)225d53e54b4SHiroshi DOYU static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
226bb5547acSVarun Sethi dma_addr_t iova)
227d53e54b4SHiroshi DOYU {
228e7e23670SDmitry Osipenko struct gart_device *gart = gart_handle;
229d53e54b4SHiroshi DOYU unsigned long pte;
230d53e54b4SHiroshi DOYU
23170722309SDmitry Osipenko if (gart_iova_range_invalid(gart, iova, GART_PAGE_SIZE))
232d53e54b4SHiroshi DOYU return -EINVAL;
233d53e54b4SHiroshi DOYU
23470722309SDmitry Osipenko spin_lock(&gart->pte_lock);
235d53e54b4SHiroshi DOYU pte = gart_read_pte(gart, iova);
23670722309SDmitry Osipenko spin_unlock(&gart->pte_lock);
237d53e54b4SHiroshi DOYU
23870722309SDmitry Osipenko return pte & GART_PAGE_MASK;
239d53e54b4SHiroshi DOYU }
240d53e54b4SHiroshi DOYU
gart_iommu_probe_device(struct device * dev)241b287ba73SJoerg Roedel static struct iommu_device *gart_iommu_probe_device(struct device *dev)
24215f9a310SRobin Murphy {
2438c3d6923SJoerg Roedel if (!dev_iommu_fwspec_get(dev))
244b287ba73SJoerg Roedel return ERR_PTR(-ENODEV);
2454b6f0ea3SDmitry Osipenko
246b287ba73SJoerg Roedel return &gart_handle->iommu;
24715f9a310SRobin Murphy }
24815f9a310SRobin Murphy
gart_iommu_of_xlate(struct device * dev,struct of_phandle_args * args)2494b6f0ea3SDmitry Osipenko static int gart_iommu_of_xlate(struct device *dev,
2504b6f0ea3SDmitry Osipenko struct of_phandle_args *args)
2514b6f0ea3SDmitry Osipenko {
2524b6f0ea3SDmitry Osipenko return 0;
2534b6f0ea3SDmitry Osipenko }
2544b6f0ea3SDmitry Osipenko
gart_iommu_sync_map(struct iommu_domain * domain,unsigned long iova,size_t size)2552ebbd258SYong Wu static void gart_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
2562ebbd258SYong Wu size_t size)
2572fc0ac18SDmitry Osipenko {
25870722309SDmitry Osipenko FLUSH_GART_REGS(gart_handle);
2592fc0ac18SDmitry Osipenko }
2602fc0ac18SDmitry Osipenko
gart_iommu_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)26156f8af5eSWill Deacon static void gart_iommu_sync(struct iommu_domain *domain,
26256f8af5eSWill Deacon struct iommu_iotlb_gather *gather)
26356f8af5eSWill Deacon {
264862c3715SYong Wu size_t length = gather->end - gather->start + 1;
2652ebbd258SYong Wu
2662ebbd258SYong Wu gart_iommu_sync_map(domain, gather->start, length);
26756f8af5eSWill Deacon }
26856f8af5eSWill Deacon
269b22f6434SThierry Reding static const struct iommu_ops gart_iommu_ops = {
270b5cbb386SJoerg Roedel .domain_alloc = gart_iommu_domain_alloc,
271b287ba73SJoerg Roedel .probe_device = gart_iommu_probe_device,
27215f9a310SRobin Murphy .device_group = generic_device_group,
273*c1fe9119SLu Baolu .set_platform_dma_ops = gart_iommu_set_platform_dma,
2749a630a4bSLu Baolu .pgsize_bitmap = GART_IOMMU_PGSIZES,
2759a630a4bSLu Baolu .of_xlate = gart_iommu_of_xlate,
2769a630a4bSLu Baolu .default_domain_ops = &(const struct iommu_domain_ops) {
2779a630a4bSLu Baolu .attach_dev = gart_iommu_attach_dev,
278d53e54b4SHiroshi DOYU .map = gart_iommu_map,
279d53e54b4SHiroshi DOYU .unmap = gart_iommu_unmap,
280d53e54b4SHiroshi DOYU .iova_to_phys = gart_iommu_iova_to_phys,
28156f8af5eSWill Deacon .iotlb_sync_map = gart_iommu_sync_map,
2822fc0ac18SDmitry Osipenko .iotlb_sync = gart_iommu_sync,
2839a630a4bSLu Baolu .free = gart_iommu_domain_free,
2849a630a4bSLu Baolu }
285d53e54b4SHiroshi DOYU };
286d53e54b4SHiroshi DOYU
tegra_gart_suspend(struct gart_device * gart)287ce2785a7SDmitry Osipenko int tegra_gart_suspend(struct gart_device *gart)
288d53e54b4SHiroshi DOYU {
289d53e54b4SHiroshi DOYU u32 *data = gart->savedata;
29070722309SDmitry Osipenko unsigned long iova;
291d53e54b4SHiroshi DOYU
29270722309SDmitry Osipenko /*
29370722309SDmitry Osipenko * All GART users shall be suspended at this point. Disable
29470722309SDmitry Osipenko * address translation to trap all GART accesses as invalid
29570722309SDmitry Osipenko * memory accesses.
29670722309SDmitry Osipenko */
29770722309SDmitry Osipenko writel_relaxed(0, gart->regs + GART_CONFIG);
29870722309SDmitry Osipenko FLUSH_GART_REGS(gart);
29970722309SDmitry Osipenko
300d53e54b4SHiroshi DOYU for_each_gart_pte(gart, iova)
301d53e54b4SHiroshi DOYU *(data++) = gart_read_pte(gart, iova);
30270722309SDmitry Osipenko
303d53e54b4SHiroshi DOYU return 0;
304d53e54b4SHiroshi DOYU }
305d53e54b4SHiroshi DOYU
tegra_gart_resume(struct gart_device * gart)306ce2785a7SDmitry Osipenko int tegra_gart_resume(struct gart_device *gart)
307d53e54b4SHiroshi DOYU {
308d53e54b4SHiroshi DOYU do_gart_setup(gart, gart->savedata);
30970722309SDmitry Osipenko
310d53e54b4SHiroshi DOYU return 0;
311d53e54b4SHiroshi DOYU }
312d53e54b4SHiroshi DOYU
tegra_gart_probe(struct device * dev,struct tegra_mc * mc)313ce2785a7SDmitry Osipenko struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
314d53e54b4SHiroshi DOYU {
315d53e54b4SHiroshi DOYU struct gart_device *gart;
31670722309SDmitry Osipenko struct resource *res;
31770722309SDmitry Osipenko int err;
318d53e54b4SHiroshi DOYU
319d53e54b4SHiroshi DOYU BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
320d53e54b4SHiroshi DOYU
321d53e54b4SHiroshi DOYU /* the GART memory aperture is required */
32270722309SDmitry Osipenko res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1);
32370722309SDmitry Osipenko if (!res) {
32470722309SDmitry Osipenko dev_err(dev, "Memory aperture resource unavailable\n");
325ce2785a7SDmitry Osipenko return ERR_PTR(-ENXIO);
326d53e54b4SHiroshi DOYU }
327d53e54b4SHiroshi DOYU
328167d67d5SDmitry Osipenko gart = kzalloc(sizeof(*gart), GFP_KERNEL);
32970722309SDmitry Osipenko if (!gart)
330ce2785a7SDmitry Osipenko return ERR_PTR(-ENOMEM);
331d53e54b4SHiroshi DOYU
33270722309SDmitry Osipenko gart_handle = gart;
33370722309SDmitry Osipenko
33470722309SDmitry Osipenko gart->dev = dev;
33570722309SDmitry Osipenko gart->regs = mc->regs + GART_REG_BASE;
33670722309SDmitry Osipenko gart->iovmm_base = res->start;
33770722309SDmitry Osipenko gart->iovmm_end = res->end + 1;
33870722309SDmitry Osipenko spin_lock_init(&gart->pte_lock);
33970722309SDmitry Osipenko spin_lock_init(&gart->dom_lock);
34070722309SDmitry Osipenko
34170722309SDmitry Osipenko do_gart_setup(gart, NULL);
34270722309SDmitry Osipenko
34370722309SDmitry Osipenko err = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart");
34470722309SDmitry Osipenko if (err)
345167d67d5SDmitry Osipenko goto free_gart;
346c184ae83SJoerg Roedel
3472d471b20SRobin Murphy err = iommu_device_register(&gart->iommu, &gart_iommu_ops, dev);
34870722309SDmitry Osipenko if (err)
349ae95c46dSDmitry Osipenko goto remove_sysfs;
350c184ae83SJoerg Roedel
35170722309SDmitry Osipenko gart->savedata = vmalloc(resource_size(res) / GART_PAGE_SIZE *
35270722309SDmitry Osipenko sizeof(u32));
353d53e54b4SHiroshi DOYU if (!gart->savedata) {
35470722309SDmitry Osipenko err = -ENOMEM;
355ae95c46dSDmitry Osipenko goto unregister_iommu;
356d53e54b4SHiroshi DOYU }
357d53e54b4SHiroshi DOYU
358ce2785a7SDmitry Osipenko return gart;
359ae95c46dSDmitry Osipenko
360ae95c46dSDmitry Osipenko unregister_iommu:
361ae95c46dSDmitry Osipenko iommu_device_unregister(&gart->iommu);
362ae95c46dSDmitry Osipenko remove_sysfs:
363ae95c46dSDmitry Osipenko iommu_device_sysfs_remove(&gart->iommu);
364167d67d5SDmitry Osipenko free_gart:
365167d67d5SDmitry Osipenko kfree(gart);
366ae95c46dSDmitry Osipenko
36770722309SDmitry Osipenko return ERR_PTR(err);
368d53e54b4SHiroshi DOYU }
369d53e54b4SHiroshi DOYU
37039fcbbccSPaul Gortmaker module_param(gart_debug, bool, 0644);
37140c9b882SDmitry Osipenko MODULE_PARM_DESC(gart_debug, "Enable GART debugging");
372