10920654fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2fffcda11SJoerg Roedel /*
3fffcda11SJoerg Roedel * Dynamic DMA mapping support for AMD Hammer.
4fffcda11SJoerg Roedel *
5fffcda11SJoerg Roedel * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
6fffcda11SJoerg Roedel * This allows to use PCI devices that only support 32bit addresses on systems
7fffcda11SJoerg Roedel * with more than 4GB.
8fffcda11SJoerg Roedel *
9985098a0SMauro Carvalho Chehab * See Documentation/core-api/dma-api-howto.rst for the interface specification.
10fffcda11SJoerg Roedel *
11fffcda11SJoerg Roedel * Copyright 2002 Andi Kleen, SuSE Labs.
12fffcda11SJoerg Roedel */
13fffcda11SJoerg Roedel
14fffcda11SJoerg Roedel #include <linux/types.h>
15fffcda11SJoerg Roedel #include <linux/ctype.h>
16fffcda11SJoerg Roedel #include <linux/agp_backend.h>
17fffcda11SJoerg Roedel #include <linux/init.h>
18fffcda11SJoerg Roedel #include <linux/mm.h>
19fffcda11SJoerg Roedel #include <linux/sched.h>
20b17b0153SIngo Molnar #include <linux/sched/debug.h>
21fffcda11SJoerg Roedel #include <linux/string.h>
22fffcda11SJoerg Roedel #include <linux/spinlock.h>
23fffcda11SJoerg Roedel #include <linux/pci.h>
24fffcda11SJoerg Roedel #include <linux/topology.h>
25fffcda11SJoerg Roedel #include <linux/interrupt.h>
26fffcda11SJoerg Roedel #include <linux/bitmap.h>
27fffcda11SJoerg Roedel #include <linux/kdebug.h>
28fffcda11SJoerg Roedel #include <linux/scatterlist.h>
29fffcda11SJoerg Roedel #include <linux/iommu-helper.h>
30fffcda11SJoerg Roedel #include <linux/syscore_ops.h>
31fffcda11SJoerg Roedel #include <linux/io.h>
32fffcda11SJoerg Roedel #include <linux/gfp.h>
3360063497SArun Sharma #include <linux/atomic.h>
34ea8c64acSChristoph Hellwig #include <linux/dma-direct.h>
350a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
36fffcda11SJoerg Roedel #include <asm/mtrr.h>
37fffcda11SJoerg Roedel #include <asm/proto.h>
38fffcda11SJoerg Roedel #include <asm/iommu.h>
39fffcda11SJoerg Roedel #include <asm/gart.h>
40d1163651SLaura Abbott #include <asm/set_memory.h>
41fffcda11SJoerg Roedel #include <asm/dma.h>
42fffcda11SJoerg Roedel #include <asm/amd_nb.h>
43fffcda11SJoerg Roedel #include <asm/x86_init.h>
44fffcda11SJoerg Roedel
45fffcda11SJoerg Roedel static unsigned long iommu_bus_base; /* GART remapping area (physical) */
46fffcda11SJoerg Roedel static unsigned long iommu_size; /* size of remapping area bytes */
47fffcda11SJoerg Roedel static unsigned long iommu_pages; /* .. and in pages */
48fffcda11SJoerg Roedel
49fffcda11SJoerg Roedel static u32 *iommu_gatt_base; /* Remapping table */
50fffcda11SJoerg Roedel
51fffcda11SJoerg Roedel /*
52fffcda11SJoerg Roedel * If this is disabled the IOMMU will use an optimized flushing strategy
53fffcda11SJoerg Roedel * of only flushing when an mapping is reused. With it true the GART is
54fffcda11SJoerg Roedel * flushed for every mapping. Problem is that doing the lazy flush seems
55fffcda11SJoerg Roedel * to trigger bugs with some popular PCI cards, in particular 3ware (but
563163600cSJason Wang * has been also seen with Qlogic at least).
57fffcda11SJoerg Roedel */
58fffcda11SJoerg Roedel static int iommu_fullflush = 1;
59fffcda11SJoerg Roedel
60fffcda11SJoerg Roedel /* Allocation bitmap for the remapping area: */
61fffcda11SJoerg Roedel static DEFINE_SPINLOCK(iommu_bitmap_lock);
62fffcda11SJoerg Roedel /* Guarded by iommu_bitmap_lock: */
63fffcda11SJoerg Roedel static unsigned long *iommu_gart_bitmap;
64fffcda11SJoerg Roedel
65fffcda11SJoerg Roedel static u32 gart_unmapped_entry;
66fffcda11SJoerg Roedel
67fffcda11SJoerg Roedel #define GPTE_VALID 1
68fffcda11SJoerg Roedel #define GPTE_COHERENT 2
69fffcda11SJoerg Roedel #define GPTE_ENCODE(x) \
70fffcda11SJoerg Roedel (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
71fffcda11SJoerg Roedel #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
72fffcda11SJoerg Roedel
73fffcda11SJoerg Roedel #ifdef CONFIG_AGP
74fffcda11SJoerg Roedel #define AGPEXTERN extern
75fffcda11SJoerg Roedel #else
76fffcda11SJoerg Roedel #define AGPEXTERN
77fffcda11SJoerg Roedel #endif
78fffcda11SJoerg Roedel
79fffcda11SJoerg Roedel /* GART can only remap to physical addresses < 1TB */
80fffcda11SJoerg Roedel #define GART_MAX_PHYS_ADDR (1ULL << 40)
81fffcda11SJoerg Roedel
82fffcda11SJoerg Roedel /* backdoor interface to AGP driver */
83fffcda11SJoerg Roedel AGPEXTERN int agp_memory_reserved;
84fffcda11SJoerg Roedel AGPEXTERN __u32 *agp_gatt_table;
85fffcda11SJoerg Roedel
86fffcda11SJoerg Roedel static unsigned long next_bit; /* protected by iommu_bitmap_lock */
87fffcda11SJoerg Roedel static bool need_flush; /* global flush state. set for each gart wrap */
88fffcda11SJoerg Roedel
alloc_iommu(struct device * dev,int size,unsigned long align_mask)89fffcda11SJoerg Roedel static unsigned long alloc_iommu(struct device *dev, int size,
90fffcda11SJoerg Roedel unsigned long align_mask)
91fffcda11SJoerg Roedel {
92fffcda11SJoerg Roedel unsigned long offset, flags;
93fffcda11SJoerg Roedel unsigned long boundary_size;
94fffcda11SJoerg Roedel unsigned long base_index;
95fffcda11SJoerg Roedel
96fffcda11SJoerg Roedel base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
97fffcda11SJoerg Roedel PAGE_SIZE) >> PAGE_SHIFT;
981e9d90dbSNicolin Chen boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT);
99fffcda11SJoerg Roedel
100fffcda11SJoerg Roedel spin_lock_irqsave(&iommu_bitmap_lock, flags);
101fffcda11SJoerg Roedel offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
102fffcda11SJoerg Roedel size, base_index, boundary_size, align_mask);
103fffcda11SJoerg Roedel if (offset == -1) {
104fffcda11SJoerg Roedel need_flush = true;
105fffcda11SJoerg Roedel offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
106fffcda11SJoerg Roedel size, base_index, boundary_size,
107fffcda11SJoerg Roedel align_mask);
108fffcda11SJoerg Roedel }
109fffcda11SJoerg Roedel if (offset != -1) {
110fffcda11SJoerg Roedel next_bit = offset+size;
111fffcda11SJoerg Roedel if (next_bit >= iommu_pages) {
112fffcda11SJoerg Roedel next_bit = 0;
113fffcda11SJoerg Roedel need_flush = true;
114fffcda11SJoerg Roedel }
115fffcda11SJoerg Roedel }
116fffcda11SJoerg Roedel if (iommu_fullflush)
117fffcda11SJoerg Roedel need_flush = true;
118fffcda11SJoerg Roedel spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
119fffcda11SJoerg Roedel
120fffcda11SJoerg Roedel return offset;
121fffcda11SJoerg Roedel }
122fffcda11SJoerg Roedel
free_iommu(unsigned long offset,int size)123fffcda11SJoerg Roedel static void free_iommu(unsigned long offset, int size)
124fffcda11SJoerg Roedel {
125fffcda11SJoerg Roedel unsigned long flags;
126fffcda11SJoerg Roedel
127fffcda11SJoerg Roedel spin_lock_irqsave(&iommu_bitmap_lock, flags);
128fffcda11SJoerg Roedel bitmap_clear(iommu_gart_bitmap, offset, size);
129fffcda11SJoerg Roedel if (offset >= next_bit)
130fffcda11SJoerg Roedel next_bit = offset + size;
131fffcda11SJoerg Roedel spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
132fffcda11SJoerg Roedel }
133fffcda11SJoerg Roedel
134fffcda11SJoerg Roedel /*
135fffcda11SJoerg Roedel * Use global flush state to avoid races with multiple flushers.
136fffcda11SJoerg Roedel */
flush_gart(void)137fffcda11SJoerg Roedel static void flush_gart(void)
138fffcda11SJoerg Roedel {
139fffcda11SJoerg Roedel unsigned long flags;
140fffcda11SJoerg Roedel
141fffcda11SJoerg Roedel spin_lock_irqsave(&iommu_bitmap_lock, flags);
142fffcda11SJoerg Roedel if (need_flush) {
143fffcda11SJoerg Roedel amd_flush_garts();
144fffcda11SJoerg Roedel need_flush = false;
145fffcda11SJoerg Roedel }
146fffcda11SJoerg Roedel spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
147fffcda11SJoerg Roedel }
148fffcda11SJoerg Roedel
149fffcda11SJoerg Roedel #ifdef CONFIG_IOMMU_LEAK
150fffcda11SJoerg Roedel /* Debugging aid for drivers that don't free their IOMMU tables */
dump_leak(void)151fffcda11SJoerg Roedel static void dump_leak(void)
152fffcda11SJoerg Roedel {
153fffcda11SJoerg Roedel static int dump;
154fffcda11SJoerg Roedel
155fffcda11SJoerg Roedel if (dump)
156fffcda11SJoerg Roedel return;
157fffcda11SJoerg Roedel dump = 1;
158fffcda11SJoerg Roedel
1599cb8f069SDmitry Safonov show_stack(NULL, NULL, KERN_ERR);
160fffcda11SJoerg Roedel debug_dma_dump_mappings(NULL);
161fffcda11SJoerg Roedel }
162fffcda11SJoerg Roedel #endif
163fffcda11SJoerg Roedel
iommu_full(struct device * dev,size_t size,int dir)164fffcda11SJoerg Roedel static void iommu_full(struct device *dev, size_t size, int dir)
165fffcda11SJoerg Roedel {
166fffcda11SJoerg Roedel /*
167fffcda11SJoerg Roedel * Ran out of IOMMU space for this operation. This is very bad.
168fffcda11SJoerg Roedel * Unfortunately the drivers cannot handle this operation properly.
169fffcda11SJoerg Roedel * Return some non mapped prereserved space in the aperture and
170fffcda11SJoerg Roedel * let the Northbridge deal with it. This will result in garbage
171fffcda11SJoerg Roedel * in the IO operation. When the size exceeds the prereserved space
172fffcda11SJoerg Roedel * memory corruption will occur or random memory will be DMAed
173fffcda11SJoerg Roedel * out. Hopefully no network devices use single mappings that big.
174fffcda11SJoerg Roedel */
175fffcda11SJoerg Roedel
176fffcda11SJoerg Roedel dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
177fffcda11SJoerg Roedel #ifdef CONFIG_IOMMU_LEAK
178fffcda11SJoerg Roedel dump_leak();
179fffcda11SJoerg Roedel #endif
180fffcda11SJoerg Roedel }
181fffcda11SJoerg Roedel
182fffcda11SJoerg Roedel static inline int
need_iommu(struct device * dev,unsigned long addr,size_t size)183fffcda11SJoerg Roedel need_iommu(struct device *dev, unsigned long addr, size_t size)
184fffcda11SJoerg Roedel {
18568a33b17SChristoph Hellwig return force_iommu || !dma_capable(dev, addr, size, true);
186fffcda11SJoerg Roedel }
187fffcda11SJoerg Roedel
188fffcda11SJoerg Roedel static inline int
nonforced_iommu(struct device * dev,unsigned long addr,size_t size)189fffcda11SJoerg Roedel nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
190fffcda11SJoerg Roedel {
19168a33b17SChristoph Hellwig return !dma_capable(dev, addr, size, true);
192fffcda11SJoerg Roedel }
193fffcda11SJoerg Roedel
194fffcda11SJoerg Roedel /* Map a single continuous physical area into the IOMMU.
195fffcda11SJoerg Roedel * Caller needs to check if the iommu is needed and flush.
196fffcda11SJoerg Roedel */
dma_map_area(struct device * dev,dma_addr_t phys_mem,size_t size,int dir,unsigned long align_mask)197fffcda11SJoerg Roedel static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
198fffcda11SJoerg Roedel size_t size, int dir, unsigned long align_mask)
199fffcda11SJoerg Roedel {
200fffcda11SJoerg Roedel unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
201fffcda11SJoerg Roedel unsigned long iommu_page;
202fffcda11SJoerg Roedel int i;
203fffcda11SJoerg Roedel
204fffcda11SJoerg Roedel if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
2059e8aa6b5SChristoph Hellwig return DMA_MAPPING_ERROR;
206fffcda11SJoerg Roedel
207fffcda11SJoerg Roedel iommu_page = alloc_iommu(dev, npages, align_mask);
208fffcda11SJoerg Roedel if (iommu_page == -1) {
209fffcda11SJoerg Roedel if (!nonforced_iommu(dev, phys_mem, size))
210fffcda11SJoerg Roedel return phys_mem;
211fffcda11SJoerg Roedel if (panic_on_overflow)
212fffcda11SJoerg Roedel panic("dma_map_area overflow %lu bytes\n", size);
213fffcda11SJoerg Roedel iommu_full(dev, size, dir);
2149e8aa6b5SChristoph Hellwig return DMA_MAPPING_ERROR;
215fffcda11SJoerg Roedel }
216fffcda11SJoerg Roedel
217fffcda11SJoerg Roedel for (i = 0; i < npages; i++) {
218fffcda11SJoerg Roedel iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
219fffcda11SJoerg Roedel phys_mem += PAGE_SIZE;
220fffcda11SJoerg Roedel }
221fffcda11SJoerg Roedel return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
222fffcda11SJoerg Roedel }
223fffcda11SJoerg Roedel
224fffcda11SJoerg Roedel /* Map a single area into the IOMMU */
gart_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)225fffcda11SJoerg Roedel static dma_addr_t gart_map_page(struct device *dev, struct page *page,
226fffcda11SJoerg Roedel unsigned long offset, size_t size,
227fffcda11SJoerg Roedel enum dma_data_direction dir,
22800085f1eSKrzysztof Kozlowski unsigned long attrs)
229fffcda11SJoerg Roedel {
230fffcda11SJoerg Roedel unsigned long bus;
231fffcda11SJoerg Roedel phys_addr_t paddr = page_to_phys(page) + offset;
232fffcda11SJoerg Roedel
233fffcda11SJoerg Roedel if (!need_iommu(dev, paddr, size))
234fffcda11SJoerg Roedel return paddr;
235fffcda11SJoerg Roedel
236fffcda11SJoerg Roedel bus = dma_map_area(dev, paddr, size, dir, 0);
237fffcda11SJoerg Roedel flush_gart();
238fffcda11SJoerg Roedel
239fffcda11SJoerg Roedel return bus;
240fffcda11SJoerg Roedel }
241fffcda11SJoerg Roedel
242fffcda11SJoerg Roedel /*
243fffcda11SJoerg Roedel * Free a DMA mapping.
244fffcda11SJoerg Roedel */
gart_unmap_page(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)245fffcda11SJoerg Roedel static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
246fffcda11SJoerg Roedel size_t size, enum dma_data_direction dir,
24700085f1eSKrzysztof Kozlowski unsigned long attrs)
248fffcda11SJoerg Roedel {
249fffcda11SJoerg Roedel unsigned long iommu_page;
250fffcda11SJoerg Roedel int npages;
251fffcda11SJoerg Roedel int i;
252fffcda11SJoerg Roedel
25306f55fd2SChristoph Hellwig if (WARN_ON_ONCE(dma_addr == DMA_MAPPING_ERROR))
25406f55fd2SChristoph Hellwig return;
25506f55fd2SChristoph Hellwig
25606f55fd2SChristoph Hellwig /*
25706f55fd2SChristoph Hellwig * This driver will not always use a GART mapping, but might have
25806f55fd2SChristoph Hellwig * created a direct mapping instead. If that is the case there is
25906f55fd2SChristoph Hellwig * nothing to unmap here.
26006f55fd2SChristoph Hellwig */
26106f55fd2SChristoph Hellwig if (dma_addr < iommu_bus_base ||
262fffcda11SJoerg Roedel dma_addr >= iommu_bus_base + iommu_size)
263fffcda11SJoerg Roedel return;
264fffcda11SJoerg Roedel
265fffcda11SJoerg Roedel iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
266fffcda11SJoerg Roedel npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
267fffcda11SJoerg Roedel for (i = 0; i < npages; i++) {
268fffcda11SJoerg Roedel iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
269fffcda11SJoerg Roedel }
270fffcda11SJoerg Roedel free_iommu(iommu_page, npages);
271fffcda11SJoerg Roedel }
272fffcda11SJoerg Roedel
273fffcda11SJoerg Roedel /*
274fffcda11SJoerg Roedel * Wrapper for pci_unmap_single working with scatterlists.
275fffcda11SJoerg Roedel */
gart_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)276fffcda11SJoerg Roedel static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
27700085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs)
278fffcda11SJoerg Roedel {
279fffcda11SJoerg Roedel struct scatterlist *s;
280fffcda11SJoerg Roedel int i;
281fffcda11SJoerg Roedel
282fffcda11SJoerg Roedel for_each_sg(sg, s, nents, i) {
283fffcda11SJoerg Roedel if (!s->dma_length || !s->length)
284fffcda11SJoerg Roedel break;
28500085f1eSKrzysztof Kozlowski gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0);
286fffcda11SJoerg Roedel }
287fffcda11SJoerg Roedel }
288fffcda11SJoerg Roedel
289fffcda11SJoerg Roedel /* Fallback for dma_map_sg in case of overflow */
dma_map_sg_nonforce(struct device * dev,struct scatterlist * sg,int nents,int dir)290fffcda11SJoerg Roedel static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
291fffcda11SJoerg Roedel int nents, int dir)
292fffcda11SJoerg Roedel {
293fffcda11SJoerg Roedel struct scatterlist *s;
294fffcda11SJoerg Roedel int i;
295fffcda11SJoerg Roedel
296fffcda11SJoerg Roedel #ifdef CONFIG_IOMMU_DEBUG
297fffcda11SJoerg Roedel pr_debug("dma_map_sg overflow\n");
298fffcda11SJoerg Roedel #endif
299fffcda11SJoerg Roedel
300fffcda11SJoerg Roedel for_each_sg(sg, s, nents, i) {
301fffcda11SJoerg Roedel unsigned long addr = sg_phys(s);
302fffcda11SJoerg Roedel
303fffcda11SJoerg Roedel if (nonforced_iommu(dev, addr, s->length)) {
304fffcda11SJoerg Roedel addr = dma_map_area(dev, addr, s->length, dir, 0);
3059e8aa6b5SChristoph Hellwig if (addr == DMA_MAPPING_ERROR) {
306fffcda11SJoerg Roedel if (i > 0)
30700085f1eSKrzysztof Kozlowski gart_unmap_sg(dev, sg, i, dir, 0);
308fffcda11SJoerg Roedel nents = 0;
309fffcda11SJoerg Roedel sg[0].dma_length = 0;
310fffcda11SJoerg Roedel break;
311fffcda11SJoerg Roedel }
312fffcda11SJoerg Roedel }
313fffcda11SJoerg Roedel s->dma_address = addr;
314fffcda11SJoerg Roedel s->dma_length = s->length;
315fffcda11SJoerg Roedel }
316fffcda11SJoerg Roedel flush_gart();
317fffcda11SJoerg Roedel
318fffcda11SJoerg Roedel return nents;
319fffcda11SJoerg Roedel }
320fffcda11SJoerg Roedel
321fffcda11SJoerg Roedel /* Map multiple scatterlist entries continuous into the first. */
__dma_map_cont(struct device * dev,struct scatterlist * start,int nelems,struct scatterlist * sout,unsigned long pages)322fffcda11SJoerg Roedel static int __dma_map_cont(struct device *dev, struct scatterlist *start,
323fffcda11SJoerg Roedel int nelems, struct scatterlist *sout,
324fffcda11SJoerg Roedel unsigned long pages)
325fffcda11SJoerg Roedel {
326fffcda11SJoerg Roedel unsigned long iommu_start = alloc_iommu(dev, pages, 0);
327fffcda11SJoerg Roedel unsigned long iommu_page = iommu_start;
328fffcda11SJoerg Roedel struct scatterlist *s;
329fffcda11SJoerg Roedel int i;
330fffcda11SJoerg Roedel
331fffcda11SJoerg Roedel if (iommu_start == -1)
332fcacc8a6SMartin Oliveira return -ENOMEM;
333fffcda11SJoerg Roedel
334fffcda11SJoerg Roedel for_each_sg(start, s, nelems, i) {
335fffcda11SJoerg Roedel unsigned long pages, addr;
336fffcda11SJoerg Roedel unsigned long phys_addr = s->dma_address;
337fffcda11SJoerg Roedel
338fffcda11SJoerg Roedel BUG_ON(s != start && s->offset);
339fffcda11SJoerg Roedel if (s == start) {
340fffcda11SJoerg Roedel sout->dma_address = iommu_bus_base;
341fffcda11SJoerg Roedel sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
342fffcda11SJoerg Roedel sout->dma_length = s->length;
343fffcda11SJoerg Roedel } else {
344fffcda11SJoerg Roedel sout->dma_length += s->length;
345fffcda11SJoerg Roedel }
346fffcda11SJoerg Roedel
347fffcda11SJoerg Roedel addr = phys_addr;
348fffcda11SJoerg Roedel pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
349fffcda11SJoerg Roedel while (pages--) {
350fffcda11SJoerg Roedel iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
351fffcda11SJoerg Roedel addr += PAGE_SIZE;
352fffcda11SJoerg Roedel iommu_page++;
353fffcda11SJoerg Roedel }
354fffcda11SJoerg Roedel }
355fffcda11SJoerg Roedel BUG_ON(iommu_page - iommu_start != pages);
356fffcda11SJoerg Roedel
357fffcda11SJoerg Roedel return 0;
358fffcda11SJoerg Roedel }
359fffcda11SJoerg Roedel
360fffcda11SJoerg Roedel static inline int
dma_map_cont(struct device * dev,struct scatterlist * start,int nelems,struct scatterlist * sout,unsigned long pages,int need)361fffcda11SJoerg Roedel dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
362fffcda11SJoerg Roedel struct scatterlist *sout, unsigned long pages, int need)
363fffcda11SJoerg Roedel {
364fffcda11SJoerg Roedel if (!need) {
365fffcda11SJoerg Roedel BUG_ON(nelems != 1);
366fffcda11SJoerg Roedel sout->dma_address = start->dma_address;
367fffcda11SJoerg Roedel sout->dma_length = start->length;
368fffcda11SJoerg Roedel return 0;
369fffcda11SJoerg Roedel }
370fffcda11SJoerg Roedel return __dma_map_cont(dev, start, nelems, sout, pages);
371fffcda11SJoerg Roedel }
372fffcda11SJoerg Roedel
373fffcda11SJoerg Roedel /*
374fffcda11SJoerg Roedel * DMA map all entries in a scatterlist.
375fffcda11SJoerg Roedel * Merge chunks that have page aligned sizes into a continuous mapping.
376fffcda11SJoerg Roedel */
gart_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)377fffcda11SJoerg Roedel static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
37800085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs)
379fffcda11SJoerg Roedel {
380fffcda11SJoerg Roedel struct scatterlist *s, *ps, *start_sg, *sgmap;
381fcacc8a6SMartin Oliveira int need = 0, nextneed, i, out, start, ret;
382fffcda11SJoerg Roedel unsigned long pages = 0;
383fffcda11SJoerg Roedel unsigned int seg_size;
384fffcda11SJoerg Roedel unsigned int max_seg_size;
385fffcda11SJoerg Roedel
386fffcda11SJoerg Roedel if (nents == 0)
387fcacc8a6SMartin Oliveira return -EINVAL;
388fffcda11SJoerg Roedel
389fffcda11SJoerg Roedel out = 0;
390fffcda11SJoerg Roedel start = 0;
391fffcda11SJoerg Roedel start_sg = sg;
392fffcda11SJoerg Roedel sgmap = sg;
393fffcda11SJoerg Roedel seg_size = 0;
394fffcda11SJoerg Roedel max_seg_size = dma_get_max_seg_size(dev);
395fffcda11SJoerg Roedel ps = NULL; /* shut up gcc */
396fffcda11SJoerg Roedel
397fffcda11SJoerg Roedel for_each_sg(sg, s, nents, i) {
398fffcda11SJoerg Roedel dma_addr_t addr = sg_phys(s);
399fffcda11SJoerg Roedel
400fffcda11SJoerg Roedel s->dma_address = addr;
401fffcda11SJoerg Roedel BUG_ON(s->length == 0);
402fffcda11SJoerg Roedel
403fffcda11SJoerg Roedel nextneed = need_iommu(dev, addr, s->length);
404fffcda11SJoerg Roedel
405fffcda11SJoerg Roedel /* Handle the previous not yet processed entries */
406fffcda11SJoerg Roedel if (i > start) {
407fffcda11SJoerg Roedel /*
408fffcda11SJoerg Roedel * Can only merge when the last chunk ends on a
409fffcda11SJoerg Roedel * page boundary and the new one doesn't have an
410fffcda11SJoerg Roedel * offset.
411fffcda11SJoerg Roedel */
412fffcda11SJoerg Roedel if (!iommu_merge || !nextneed || !need || s->offset ||
413fffcda11SJoerg Roedel (s->length + seg_size > max_seg_size) ||
414fffcda11SJoerg Roedel (ps->offset + ps->length) % PAGE_SIZE) {
415fcacc8a6SMartin Oliveira ret = dma_map_cont(dev, start_sg, i - start,
416fcacc8a6SMartin Oliveira sgmap, pages, need);
417fcacc8a6SMartin Oliveira if (ret < 0)
418fffcda11SJoerg Roedel goto error;
419fffcda11SJoerg Roedel out++;
420fffcda11SJoerg Roedel
421fffcda11SJoerg Roedel seg_size = 0;
422fffcda11SJoerg Roedel sgmap = sg_next(sgmap);
423fffcda11SJoerg Roedel pages = 0;
424fffcda11SJoerg Roedel start = i;
425fffcda11SJoerg Roedel start_sg = s;
426fffcda11SJoerg Roedel }
427fffcda11SJoerg Roedel }
428fffcda11SJoerg Roedel
429fffcda11SJoerg Roedel seg_size += s->length;
430fffcda11SJoerg Roedel need = nextneed;
431fffcda11SJoerg Roedel pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
432fffcda11SJoerg Roedel ps = s;
433fffcda11SJoerg Roedel }
434fcacc8a6SMartin Oliveira ret = dma_map_cont(dev, start_sg, i - start, sgmap, pages, need);
435fcacc8a6SMartin Oliveira if (ret < 0)
436fffcda11SJoerg Roedel goto error;
437fffcda11SJoerg Roedel out++;
438fffcda11SJoerg Roedel flush_gart();
439fffcda11SJoerg Roedel if (out < nents) {
440fffcda11SJoerg Roedel sgmap = sg_next(sgmap);
441fffcda11SJoerg Roedel sgmap->dma_length = 0;
442fffcda11SJoerg Roedel }
443fffcda11SJoerg Roedel return out;
444fffcda11SJoerg Roedel
445fffcda11SJoerg Roedel error:
446fffcda11SJoerg Roedel flush_gart();
44700085f1eSKrzysztof Kozlowski gart_unmap_sg(dev, sg, out, dir, 0);
448fffcda11SJoerg Roedel
449fffcda11SJoerg Roedel /* When it was forced or merged try again in a dumb way */
450fffcda11SJoerg Roedel if (force_iommu || iommu_merge) {
451fffcda11SJoerg Roedel out = dma_map_sg_nonforce(dev, sg, nents, dir);
452fffcda11SJoerg Roedel if (out > 0)
453fffcda11SJoerg Roedel return out;
454fffcda11SJoerg Roedel }
455fffcda11SJoerg Roedel if (panic_on_overflow)
456fffcda11SJoerg Roedel panic("dma_map_sg: overflow on %lu pages\n", pages);
457fffcda11SJoerg Roedel
458fffcda11SJoerg Roedel iommu_full(dev, pages << PAGE_SHIFT, dir);
459fcacc8a6SMartin Oliveira return ret;
460fffcda11SJoerg Roedel }
461fffcda11SJoerg Roedel
462fffcda11SJoerg Roedel /* allocate and map a coherent mapping */
463fffcda11SJoerg Roedel static void *
gart_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_addr,gfp_t flag,unsigned long attrs)464fffcda11SJoerg Roedel gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
46500085f1eSKrzysztof Kozlowski gfp_t flag, unsigned long attrs)
466fffcda11SJoerg Roedel {
46751c7eebaSChristoph Hellwig void *vaddr;
468fffcda11SJoerg Roedel
4692f5388a2SChristoph Hellwig vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
47051c7eebaSChristoph Hellwig if (!vaddr ||
47151c7eebaSChristoph Hellwig !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
47251c7eebaSChristoph Hellwig return vaddr;
473fffcda11SJoerg Roedel
47451c7eebaSChristoph Hellwig *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size,
47551c7eebaSChristoph Hellwig DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1);
476fffcda11SJoerg Roedel flush_gart();
4779e8aa6b5SChristoph Hellwig if (unlikely(*dma_addr == DMA_MAPPING_ERROR))
47851c7eebaSChristoph Hellwig goto out_free;
47951c7eebaSChristoph Hellwig return vaddr;
48051c7eebaSChristoph Hellwig out_free:
4812f5388a2SChristoph Hellwig dma_direct_free(dev, size, vaddr, *dma_addr, attrs);
482fffcda11SJoerg Roedel return NULL;
483fffcda11SJoerg Roedel }
484fffcda11SJoerg Roedel
485fffcda11SJoerg Roedel /* free a coherent mapping */
486fffcda11SJoerg Roedel static void
gart_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_addr,unsigned long attrs)487fffcda11SJoerg Roedel gart_free_coherent(struct device *dev, size_t size, void *vaddr,
48800085f1eSKrzysztof Kozlowski dma_addr_t dma_addr, unsigned long attrs)
489fffcda11SJoerg Roedel {
49000085f1eSKrzysztof Kozlowski gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
4912f5388a2SChristoph Hellwig dma_direct_free(dev, size, vaddr, dma_addr, attrs);
492fffcda11SJoerg Roedel }
493fffcda11SJoerg Roedel
494fffcda11SJoerg Roedel static int no_agp;
495fffcda11SJoerg Roedel
check_iommu_size(unsigned long aper,u64 aper_size)496fffcda11SJoerg Roedel static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
497fffcda11SJoerg Roedel {
498fffcda11SJoerg Roedel unsigned long a;
499fffcda11SJoerg Roedel
500fffcda11SJoerg Roedel if (!iommu_size) {
501fffcda11SJoerg Roedel iommu_size = aper_size;
502fffcda11SJoerg Roedel if (!no_agp)
503fffcda11SJoerg Roedel iommu_size /= 2;
504fffcda11SJoerg Roedel }
505fffcda11SJoerg Roedel
506fffcda11SJoerg Roedel a = aper + iommu_size;
507*82328227SPasha Tatashin iommu_size -= round_up(a, PMD_SIZE) - a;
508fffcda11SJoerg Roedel
509fffcda11SJoerg Roedel if (iommu_size < 64*1024*1024) {
5108d3bcc44SKefeng Wang pr_warn("PCI-DMA: Warning: Small IOMMU %luMB."
511fffcda11SJoerg Roedel " Consider increasing the AGP aperture in BIOS\n",
512fffcda11SJoerg Roedel iommu_size >> 20);
513fffcda11SJoerg Roedel }
514fffcda11SJoerg Roedel
515fffcda11SJoerg Roedel return iommu_size;
516fffcda11SJoerg Roedel }
517fffcda11SJoerg Roedel
read_aperture(struct pci_dev * dev,u32 * size)518fffcda11SJoerg Roedel static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
519fffcda11SJoerg Roedel {
520fffcda11SJoerg Roedel unsigned aper_size = 0, aper_base_32, aper_order;
521fffcda11SJoerg Roedel u64 aper_base;
522fffcda11SJoerg Roedel
523fffcda11SJoerg Roedel pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
524fffcda11SJoerg Roedel pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
525fffcda11SJoerg Roedel aper_order = (aper_order >> 1) & 7;
526fffcda11SJoerg Roedel
527fffcda11SJoerg Roedel aper_base = aper_base_32 & 0x7fff;
528fffcda11SJoerg Roedel aper_base <<= 25;
529fffcda11SJoerg Roedel
530fffcda11SJoerg Roedel aper_size = (32 * 1024 * 1024) << aper_order;
531fffcda11SJoerg Roedel if (aper_base + aper_size > 0x100000000UL || !aper_size)
532fffcda11SJoerg Roedel aper_base = 0;
533fffcda11SJoerg Roedel
534fffcda11SJoerg Roedel *size = aper_size;
535fffcda11SJoerg Roedel return aper_base;
536fffcda11SJoerg Roedel }
537fffcda11SJoerg Roedel
enable_gart_translations(void)538fffcda11SJoerg Roedel static void enable_gart_translations(void)
539fffcda11SJoerg Roedel {
540fffcda11SJoerg Roedel int i;
541fffcda11SJoerg Roedel
542fffcda11SJoerg Roedel if (!amd_nb_has_feature(AMD_NB_GART))
543fffcda11SJoerg Roedel return;
544fffcda11SJoerg Roedel
545fffcda11SJoerg Roedel for (i = 0; i < amd_nb_num(); i++) {
546fffcda11SJoerg Roedel struct pci_dev *dev = node_to_amd_nb(i)->misc;
547fffcda11SJoerg Roedel
548fffcda11SJoerg Roedel enable_gart_translation(dev, __pa(agp_gatt_table));
549fffcda11SJoerg Roedel }
550fffcda11SJoerg Roedel
551fffcda11SJoerg Roedel /* Flush the GART-TLB to remove stale entries */
552fffcda11SJoerg Roedel amd_flush_garts();
553fffcda11SJoerg Roedel }
554fffcda11SJoerg Roedel
555fffcda11SJoerg Roedel /*
556fffcda11SJoerg Roedel * If fix_up_north_bridges is set, the north bridges have to be fixed up on
557fffcda11SJoerg Roedel * resume in the same way as they are handled in gart_iommu_hole_init().
558fffcda11SJoerg Roedel */
559fffcda11SJoerg Roedel static bool fix_up_north_bridges;
560fffcda11SJoerg Roedel static u32 aperture_order;
561fffcda11SJoerg Roedel static u32 aperture_alloc;
562fffcda11SJoerg Roedel
set_up_gart_resume(u32 aper_order,u32 aper_alloc)563fffcda11SJoerg Roedel void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
564fffcda11SJoerg Roedel {
565fffcda11SJoerg Roedel fix_up_north_bridges = true;
566fffcda11SJoerg Roedel aperture_order = aper_order;
567fffcda11SJoerg Roedel aperture_alloc = aper_alloc;
568fffcda11SJoerg Roedel }
569fffcda11SJoerg Roedel
gart_fixup_northbridges(void)570fffcda11SJoerg Roedel static void gart_fixup_northbridges(void)
571fffcda11SJoerg Roedel {
572fffcda11SJoerg Roedel int i;
573fffcda11SJoerg Roedel
574fffcda11SJoerg Roedel if (!fix_up_north_bridges)
575fffcda11SJoerg Roedel return;
576fffcda11SJoerg Roedel
577fffcda11SJoerg Roedel if (!amd_nb_has_feature(AMD_NB_GART))
578fffcda11SJoerg Roedel return;
579fffcda11SJoerg Roedel
580fffcda11SJoerg Roedel pr_info("PCI-DMA: Restoring GART aperture settings\n");
581fffcda11SJoerg Roedel
582fffcda11SJoerg Roedel for (i = 0; i < amd_nb_num(); i++) {
583fffcda11SJoerg Roedel struct pci_dev *dev = node_to_amd_nb(i)->misc;
584fffcda11SJoerg Roedel
585fffcda11SJoerg Roedel /*
586fffcda11SJoerg Roedel * Don't enable translations just yet. That is the next
587fffcda11SJoerg Roedel * step. Restore the pre-suspend aperture settings.
588fffcda11SJoerg Roedel */
589fffcda11SJoerg Roedel gart_set_size_and_enable(dev, aperture_order);
590fffcda11SJoerg Roedel pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
591fffcda11SJoerg Roedel }
592fffcda11SJoerg Roedel }
593fffcda11SJoerg Roedel
gart_resume(void)594fffcda11SJoerg Roedel static void gart_resume(void)
595fffcda11SJoerg Roedel {
596fffcda11SJoerg Roedel pr_info("PCI-DMA: Resuming GART IOMMU\n");
597fffcda11SJoerg Roedel
598fffcda11SJoerg Roedel gart_fixup_northbridges();
599fffcda11SJoerg Roedel
600fffcda11SJoerg Roedel enable_gart_translations();
601fffcda11SJoerg Roedel }
602fffcda11SJoerg Roedel
603fffcda11SJoerg Roedel static struct syscore_ops gart_syscore_ops = {
604fffcda11SJoerg Roedel .resume = gart_resume,
605fffcda11SJoerg Roedel
606fffcda11SJoerg Roedel };
607fffcda11SJoerg Roedel
608fffcda11SJoerg Roedel /*
609fffcda11SJoerg Roedel * Private Northbridge GATT initialization in case we cannot use the
610fffcda11SJoerg Roedel * AGP driver for some reason.
611fffcda11SJoerg Roedel */
init_amd_gatt(struct agp_kern_info * info)612fffcda11SJoerg Roedel static __init int init_amd_gatt(struct agp_kern_info *info)
613fffcda11SJoerg Roedel {
614fffcda11SJoerg Roedel unsigned aper_size, gatt_size, new_aper_size;
615fffcda11SJoerg Roedel unsigned aper_base, new_aper_base;
616fffcda11SJoerg Roedel struct pci_dev *dev;
617fffcda11SJoerg Roedel void *gatt;
618fffcda11SJoerg Roedel int i;
619fffcda11SJoerg Roedel
620fffcda11SJoerg Roedel pr_info("PCI-DMA: Disabling AGP.\n");
621fffcda11SJoerg Roedel
622fffcda11SJoerg Roedel aper_size = aper_base = info->aper_size = 0;
623fffcda11SJoerg Roedel dev = NULL;
624fffcda11SJoerg Roedel for (i = 0; i < amd_nb_num(); i++) {
625fffcda11SJoerg Roedel dev = node_to_amd_nb(i)->misc;
626fffcda11SJoerg Roedel new_aper_base = read_aperture(dev, &new_aper_size);
627fffcda11SJoerg Roedel if (!new_aper_base)
628fffcda11SJoerg Roedel goto nommu;
629fffcda11SJoerg Roedel
630fffcda11SJoerg Roedel if (!aper_base) {
631fffcda11SJoerg Roedel aper_size = new_aper_size;
632fffcda11SJoerg Roedel aper_base = new_aper_base;
633fffcda11SJoerg Roedel }
634fffcda11SJoerg Roedel if (aper_size != new_aper_size || aper_base != new_aper_base)
635fffcda11SJoerg Roedel goto nommu;
636fffcda11SJoerg Roedel }
637fffcda11SJoerg Roedel if (!aper_base)
638fffcda11SJoerg Roedel goto nommu;
639fffcda11SJoerg Roedel
640fffcda11SJoerg Roedel info->aper_base = aper_base;
641fffcda11SJoerg Roedel info->aper_size = aper_size >> 20;
642fffcda11SJoerg Roedel
643fffcda11SJoerg Roedel gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
644fffcda11SJoerg Roedel gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
645fffcda11SJoerg Roedel get_order(gatt_size));
646fffcda11SJoerg Roedel if (!gatt)
647fffcda11SJoerg Roedel panic("Cannot allocate GATT table");
648fffcda11SJoerg Roedel if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
649fffcda11SJoerg Roedel panic("Could not set GART PTEs to uncacheable pages");
650fffcda11SJoerg Roedel
651fffcda11SJoerg Roedel agp_gatt_table = gatt;
652fffcda11SJoerg Roedel
653fffcda11SJoerg Roedel register_syscore_ops(&gart_syscore_ops);
654fffcda11SJoerg Roedel
655fffcda11SJoerg Roedel flush_gart();
656fffcda11SJoerg Roedel
657fffcda11SJoerg Roedel pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
658fffcda11SJoerg Roedel aper_base, aper_size>>10);
659fffcda11SJoerg Roedel
660fffcda11SJoerg Roedel return 0;
661fffcda11SJoerg Roedel
662fffcda11SJoerg Roedel nommu:
663fffcda11SJoerg Roedel /* Should not happen anymore */
6648d3bcc44SKefeng Wang pr_warn("PCI-DMA: More than 4GB of RAM and no IOMMU - falling back to iommu=soft.\n");
665fffcda11SJoerg Roedel return -1;
666fffcda11SJoerg Roedel }
667fffcda11SJoerg Roedel
6685299709dSBart Van Assche static const struct dma_map_ops gart_dma_ops = {
669fffcda11SJoerg Roedel .map_sg = gart_map_sg,
670fffcda11SJoerg Roedel .unmap_sg = gart_unmap_sg,
671fffcda11SJoerg Roedel .map_page = gart_map_page,
672fffcda11SJoerg Roedel .unmap_page = gart_unmap_page,
673baa676fcSAndrzej Pietrasiewicz .alloc = gart_alloc_coherent,
674baa676fcSAndrzej Pietrasiewicz .free = gart_free_coherent,
675f9f3232aSChristoph Hellwig .mmap = dma_common_mmap,
676f9f3232aSChristoph Hellwig .get_sgtable = dma_common_get_sgtable,
677fec777c3SChristoph Hellwig .dma_supported = dma_direct_supported,
678249baa54SChristoph Hellwig .get_required_mask = dma_direct_get_required_mask,
679efa70f2fSChristoph Hellwig .alloc_pages = dma_direct_alloc_pages,
680efa70f2fSChristoph Hellwig .free_pages = dma_direct_free_pages,
681fffcda11SJoerg Roedel };
682fffcda11SJoerg Roedel
gart_iommu_shutdown(void)683fffcda11SJoerg Roedel static void gart_iommu_shutdown(void)
684fffcda11SJoerg Roedel {
685fffcda11SJoerg Roedel struct pci_dev *dev;
686fffcda11SJoerg Roedel int i;
687fffcda11SJoerg Roedel
688fffcda11SJoerg Roedel /* don't shutdown it if there is AGP installed */
689fffcda11SJoerg Roedel if (!no_agp)
690fffcda11SJoerg Roedel return;
691fffcda11SJoerg Roedel
692fffcda11SJoerg Roedel if (!amd_nb_has_feature(AMD_NB_GART))
693fffcda11SJoerg Roedel return;
694fffcda11SJoerg Roedel
695fffcda11SJoerg Roedel for (i = 0; i < amd_nb_num(); i++) {
696fffcda11SJoerg Roedel u32 ctl;
697fffcda11SJoerg Roedel
698fffcda11SJoerg Roedel dev = node_to_amd_nb(i)->misc;
699fffcda11SJoerg Roedel pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
700fffcda11SJoerg Roedel
701fffcda11SJoerg Roedel ctl &= ~GARTEN;
702fffcda11SJoerg Roedel
703fffcda11SJoerg Roedel pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
704fffcda11SJoerg Roedel }
705fffcda11SJoerg Roedel }
706fffcda11SJoerg Roedel
gart_iommu_init(void)707fffcda11SJoerg Roedel int __init gart_iommu_init(void)
708fffcda11SJoerg Roedel {
709fffcda11SJoerg Roedel struct agp_kern_info info;
710fffcda11SJoerg Roedel unsigned long iommu_start;
711fffcda11SJoerg Roedel unsigned long aper_base, aper_size;
712fffcda11SJoerg Roedel unsigned long start_pfn, end_pfn;
713fffcda11SJoerg Roedel unsigned long scratch;
714fffcda11SJoerg Roedel
715fffcda11SJoerg Roedel if (!amd_nb_has_feature(AMD_NB_GART))
716fffcda11SJoerg Roedel return 0;
717fffcda11SJoerg Roedel
718fffcda11SJoerg Roedel #ifndef CONFIG_AGP_AMD64
719fffcda11SJoerg Roedel no_agp = 1;
720fffcda11SJoerg Roedel #else
721fffcda11SJoerg Roedel /* Makefile puts PCI initialization via subsys_initcall first. */
722fffcda11SJoerg Roedel /* Add other AMD AGP bridge drivers here */
723fffcda11SJoerg Roedel no_agp = no_agp ||
724fffcda11SJoerg Roedel (agp_amd64_init() < 0) ||
725fffcda11SJoerg Roedel (agp_copy_info(agp_bridge, &info) < 0);
726fffcda11SJoerg Roedel #endif
727fffcda11SJoerg Roedel
728fffcda11SJoerg Roedel if (no_iommu ||
729fffcda11SJoerg Roedel (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
730fffcda11SJoerg Roedel !gart_iommu_aperture ||
731fffcda11SJoerg Roedel (no_agp && init_amd_gatt(&info) < 0)) {
732fffcda11SJoerg Roedel if (max_pfn > MAX_DMA32_PFN) {
7338d3bcc44SKefeng Wang pr_warn("More than 4GB of memory but GART IOMMU not available.\n");
7348d3bcc44SKefeng Wang pr_warn("falling back to iommu=soft.\n");
735fffcda11SJoerg Roedel }
736fffcda11SJoerg Roedel return 0;
737fffcda11SJoerg Roedel }
738fffcda11SJoerg Roedel
739fffcda11SJoerg Roedel /* need to map that range */
740fffcda11SJoerg Roedel aper_size = info.aper_size << 20;
741fffcda11SJoerg Roedel aper_base = info.aper_base;
742fffcda11SJoerg Roedel end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
743fffcda11SJoerg Roedel
7445101730cSYinghai Lu start_pfn = PFN_DOWN(aper_base);
7455101730cSYinghai Lu if (!pfn_range_is_mapped(start_pfn, end_pfn))
746c164fbb4SLogan Gunthorpe init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT,
747c164fbb4SLogan Gunthorpe PAGE_KERNEL);
748fffcda11SJoerg Roedel
749fffcda11SJoerg Roedel pr_info("PCI-DMA: using GART IOMMU.\n");
750fffcda11SJoerg Roedel iommu_size = check_iommu_size(info.aper_base, aper_size);
751fffcda11SJoerg Roedel iommu_pages = iommu_size >> PAGE_SHIFT;
752fffcda11SJoerg Roedel
753fffcda11SJoerg Roedel iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
754fffcda11SJoerg Roedel get_order(iommu_pages/8));
755fffcda11SJoerg Roedel if (!iommu_gart_bitmap)
756fffcda11SJoerg Roedel panic("Cannot allocate iommu bitmap\n");
757fffcda11SJoerg Roedel
758fffcda11SJoerg Roedel pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
759fffcda11SJoerg Roedel iommu_size >> 20);
760fffcda11SJoerg Roedel
761fffcda11SJoerg Roedel agp_memory_reserved = iommu_size;
762fffcda11SJoerg Roedel iommu_start = aper_size - iommu_size;
763fffcda11SJoerg Roedel iommu_bus_base = info.aper_base + iommu_start;
764fffcda11SJoerg Roedel iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
765fffcda11SJoerg Roedel
766fffcda11SJoerg Roedel /*
767fffcda11SJoerg Roedel * Unmap the IOMMU part of the GART. The alias of the page is
768fffcda11SJoerg Roedel * always mapped with cache enabled and there is no full cache
769fffcda11SJoerg Roedel * coherency across the GART remapping. The unmapping avoids
770fffcda11SJoerg Roedel * automatic prefetches from the CPU allocating cache lines in
771fffcda11SJoerg Roedel * there. All CPU accesses are done via the direct mapping to
772fffcda11SJoerg Roedel * the backing memory. The GART address is only used by PCI
773fffcda11SJoerg Roedel * devices.
774fffcda11SJoerg Roedel */
775fffcda11SJoerg Roedel set_memory_np((unsigned long)__va(iommu_bus_base),
776fffcda11SJoerg Roedel iommu_size >> PAGE_SHIFT);
777fffcda11SJoerg Roedel /*
778fffcda11SJoerg Roedel * Tricky. The GART table remaps the physical memory range,
779fffcda11SJoerg Roedel * so the CPU wont notice potential aliases and if the memory
780fffcda11SJoerg Roedel * is remapped to UC later on, we might surprise the PCI devices
781fffcda11SJoerg Roedel * with a stray writeout of a cacheline. So play it sure and
782fffcda11SJoerg Roedel * do an explicit, full-scale wbinvd() _after_ having marked all
783fffcda11SJoerg Roedel * the pages as Not-Present:
784fffcda11SJoerg Roedel */
785fffcda11SJoerg Roedel wbinvd();
786fffcda11SJoerg Roedel
787fffcda11SJoerg Roedel /*
788fffcda11SJoerg Roedel * Now all caches are flushed and we can safely enable
789fffcda11SJoerg Roedel * GART hardware. Doing it early leaves the possibility
790fffcda11SJoerg Roedel * of stale cache entries that can lead to GART PTE
791fffcda11SJoerg Roedel * errors.
792fffcda11SJoerg Roedel */
793fffcda11SJoerg Roedel enable_gart_translations();
794fffcda11SJoerg Roedel
795fffcda11SJoerg Roedel /*
796fffcda11SJoerg Roedel * Try to workaround a bug (thanks to BenH):
797fffcda11SJoerg Roedel * Set unmapped entries to a scratch page instead of 0.
798fffcda11SJoerg Roedel * Any prefetches that hit unmapped entries won't get an bus abort
799fffcda11SJoerg Roedel * then. (P2P bridge may be prefetching on DMA reads).
800fffcda11SJoerg Roedel */
801fffcda11SJoerg Roedel scratch = get_zeroed_page(GFP_KERNEL);
802fffcda11SJoerg Roedel if (!scratch)
803fffcda11SJoerg Roedel panic("Cannot allocate iommu scratch page");
804fffcda11SJoerg Roedel gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
805fffcda11SJoerg Roedel
806fffcda11SJoerg Roedel flush_gart();
807fffcda11SJoerg Roedel dma_ops = &gart_dma_ops;
808fffcda11SJoerg Roedel x86_platform.iommu_shutdown = gart_iommu_shutdown;
80978013eaaSChristoph Hellwig x86_swiotlb_enable = false;
810fffcda11SJoerg Roedel
811fffcda11SJoerg Roedel return 0;
812fffcda11SJoerg Roedel }
813fffcda11SJoerg Roedel
gart_parse_options(char * p)814fffcda11SJoerg Roedel void __init gart_parse_options(char *p)
815fffcda11SJoerg Roedel {
816fffcda11SJoerg Roedel int arg;
817fffcda11SJoerg Roedel
818fffcda11SJoerg Roedel if (isdigit(*p) && get_option(&p, &arg))
819fffcda11SJoerg Roedel iommu_size = arg;
820fffcda11SJoerg Roedel if (!strncmp(p, "fullflush", 9))
821fffcda11SJoerg Roedel iommu_fullflush = 1;
822fffcda11SJoerg Roedel if (!strncmp(p, "nofullflush", 11))
823fffcda11SJoerg Roedel iommu_fullflush = 0;
824fffcda11SJoerg Roedel if (!strncmp(p, "noagp", 5))
825fffcda11SJoerg Roedel no_agp = 1;
826fffcda11SJoerg Roedel if (!strncmp(p, "noaperture", 10))
827fffcda11SJoerg Roedel fix_aperture = 0;
828fffcda11SJoerg Roedel /* duplicated from pci-dma.c */
829fffcda11SJoerg Roedel if (!strncmp(p, "force", 5))
830fffcda11SJoerg Roedel gart_iommu_aperture_allowed = 1;
831fffcda11SJoerg Roedel if (!strncmp(p, "allowed", 7))
832fffcda11SJoerg Roedel gart_iommu_aperture_allowed = 1;
833fffcda11SJoerg Roedel if (!strncmp(p, "memaper", 7)) {
834fffcda11SJoerg Roedel fallback_aper_force = 1;
835fffcda11SJoerg Roedel p += 7;
836fffcda11SJoerg Roedel if (*p == '=') {
837fffcda11SJoerg Roedel ++p;
838fffcda11SJoerg Roedel if (get_option(&p, &arg))
839fffcda11SJoerg Roedel fallback_aper_order = arg;
840fffcda11SJoerg Roedel }
841fffcda11SJoerg Roedel }
842fffcda11SJoerg Roedel }
843