xref: /openbmc/linux/arch/x86/kernel/pci-dma.c (revision bbecb07f)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/dma-mapping.h>
3 #include <linux/dma-debug.h>
4 #include <linux/dmar.h>
5 #include <linux/export.h>
6 #include <linux/bootmem.h>
7 #include <linux/gfp.h>
8 #include <linux/pci.h>
9 #include <linux/kmemleak.h>
10 
11 #include <asm/proto.h>
12 #include <asm/dma.h>
13 #include <asm/iommu.h>
14 #include <asm/gart.h>
15 #include <asm/calgary.h>
16 #include <asm/x86_init.h>
17 #include <asm/iommu_table.h>
18 
19 static int forbid_dac __read_mostly;
20 
21 const struct dma_map_ops *dma_ops = &nommu_dma_ops;
22 EXPORT_SYMBOL(dma_ops);
23 
24 static int iommu_sac_force __read_mostly;
25 
26 #ifdef CONFIG_IOMMU_DEBUG
27 int panic_on_overflow __read_mostly = 1;
28 int force_iommu __read_mostly = 1;
29 #else
30 int panic_on_overflow __read_mostly = 0;
31 int force_iommu __read_mostly = 0;
32 #endif
33 
34 int iommu_merge __read_mostly = 0;
35 
36 int no_iommu __read_mostly;
37 /* Set this to 1 if there is a HW IOMMU in the system */
38 int iommu_detected __read_mostly = 0;
39 
40 /*
41  * This variable becomes 1 if iommu=pt is passed on the kernel command line.
42  * If this variable is 1, IOMMU implementations do no DMA translation for
43  * devices and allow every device to access to whole physical memory. This is
44  * useful if a user wants to use an IOMMU only for KVM device assignment to
45  * guests and not for driver dma translation.
46  */
47 int iommu_pass_through __read_mostly;
48 
49 extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
50 
51 /* Dummy device used for NULL arguments (normally ISA). */
52 struct device x86_dma_fallback_dev = {
53 	.init_name = "fallback device",
54 	.coherent_dma_mask = ISA_DMA_BIT_MASK,
55 	.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
56 };
57 EXPORT_SYMBOL(x86_dma_fallback_dev);
58 
59 /* Number of entries preallocated for DMA-API debugging */
60 #define PREALLOC_DMA_DEBUG_ENTRIES       65536
61 
62 void __init pci_iommu_alloc(void)
63 {
64 	struct iommu_table_entry *p;
65 
66 	sort_iommu_table(__iommu_table, __iommu_table_end);
67 	check_iommu_entries(__iommu_table, __iommu_table_end);
68 
69 	for (p = __iommu_table; p < __iommu_table_end; p++) {
70 		if (p && p->detect && p->detect() > 0) {
71 			p->flags |= IOMMU_DETECTED;
72 			if (p->early_init)
73 				p->early_init();
74 			if (p->flags & IOMMU_FINISH_IF_DETECTED)
75 				break;
76 		}
77 	}
78 }
79 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
80 				 dma_addr_t *dma_addr, gfp_t flag,
81 				 unsigned long attrs)
82 {
83 	unsigned long dma_mask;
84 	struct page *page;
85 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
86 	dma_addr_t addr;
87 
88 	dma_mask = dma_alloc_coherent_mask(dev, flag);
89 
90 	flag &= ~__GFP_ZERO;
91 again:
92 	page = NULL;
93 	/* CMA can be used only in the context which permits sleeping */
94 	if (gfpflags_allow_blocking(flag)) {
95 		page = dma_alloc_from_contiguous(dev, count, get_order(size),
96 						 flag);
97 		if (page) {
98 			addr = phys_to_dma(dev, page_to_phys(page));
99 			if (addr + size > dma_mask) {
100 				dma_release_from_contiguous(dev, page, count);
101 				page = NULL;
102 			}
103 		}
104 	}
105 	/* fallback */
106 	if (!page)
107 		page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
108 	if (!page)
109 		return NULL;
110 
111 	addr = phys_to_dma(dev, page_to_phys(page));
112 	if (addr + size > dma_mask) {
113 		__free_pages(page, get_order(size));
114 
115 		if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
116 			flag = (flag & ~GFP_DMA32) | GFP_DMA;
117 			goto again;
118 		}
119 
120 		return NULL;
121 	}
122 	memset(page_address(page), 0, size);
123 	*dma_addr = addr;
124 	return page_address(page);
125 }
126 
127 void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
128 			       dma_addr_t dma_addr, unsigned long attrs)
129 {
130 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
131 	struct page *page = virt_to_page(vaddr);
132 
133 	if (!dma_release_from_contiguous(dev, page, count))
134 		free_pages((unsigned long)vaddr, get_order(size));
135 }
136 
137 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
138 {
139 	if (!*dev)
140 		*dev = &x86_dma_fallback_dev;
141 
142 	*gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
143 	*gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
144 
145 	if (!is_device_dma_capable(*dev))
146 		return false;
147 	return true;
148 
149 }
150 EXPORT_SYMBOL(arch_dma_alloc_attrs);
151 
152 /*
153  * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
154  * parameter documentation.
155  */
156 static __init int iommu_setup(char *p)
157 {
158 	iommu_merge = 1;
159 
160 	if (!p)
161 		return -EINVAL;
162 
163 	while (*p) {
164 		if (!strncmp(p, "off", 3))
165 			no_iommu = 1;
166 		/* gart_parse_options has more force support */
167 		if (!strncmp(p, "force", 5))
168 			force_iommu = 1;
169 		if (!strncmp(p, "noforce", 7)) {
170 			iommu_merge = 0;
171 			force_iommu = 0;
172 		}
173 
174 		if (!strncmp(p, "biomerge", 8)) {
175 			iommu_merge = 1;
176 			force_iommu = 1;
177 		}
178 		if (!strncmp(p, "panic", 5))
179 			panic_on_overflow = 1;
180 		if (!strncmp(p, "nopanic", 7))
181 			panic_on_overflow = 0;
182 		if (!strncmp(p, "merge", 5)) {
183 			iommu_merge = 1;
184 			force_iommu = 1;
185 		}
186 		if (!strncmp(p, "nomerge", 7))
187 			iommu_merge = 0;
188 		if (!strncmp(p, "forcesac", 8))
189 			iommu_sac_force = 1;
190 		if (!strncmp(p, "allowdac", 8))
191 			forbid_dac = 0;
192 		if (!strncmp(p, "nodac", 5))
193 			forbid_dac = 1;
194 		if (!strncmp(p, "usedac", 6)) {
195 			forbid_dac = -1;
196 			return 1;
197 		}
198 #ifdef CONFIG_SWIOTLB
199 		if (!strncmp(p, "soft", 4))
200 			swiotlb = 1;
201 #endif
202 		if (!strncmp(p, "pt", 2))
203 			iommu_pass_through = 1;
204 
205 		gart_parse_options(p);
206 
207 #ifdef CONFIG_CALGARY_IOMMU
208 		if (!strncmp(p, "calgary", 7))
209 			use_calgary = 1;
210 #endif /* CONFIG_CALGARY_IOMMU */
211 
212 		p += strcspn(p, ",");
213 		if (*p == ',')
214 			++p;
215 	}
216 	return 0;
217 }
218 early_param("iommu", iommu_setup);
219 
220 int x86_dma_supported(struct device *dev, u64 mask)
221 {
222 #ifdef CONFIG_PCI
223 	if (mask > 0xffffffff && forbid_dac > 0) {
224 		dev_info(dev, "PCI: Disallowing DAC for device\n");
225 		return 0;
226 	}
227 #endif
228 
229 	/* Copied from i386. Doesn't make much sense, because it will
230 	   only work for pci_alloc_coherent.
231 	   The caller just has to use GFP_DMA in this case. */
232 	if (mask < DMA_BIT_MASK(24))
233 		return 0;
234 
235 	/* Tell the device to use SAC when IOMMU force is on.  This
236 	   allows the driver to use cheaper accesses in some cases.
237 
238 	   Problem with this is that if we overflow the IOMMU area and
239 	   return DAC as fallback address the device may not handle it
240 	   correctly.
241 
242 	   As a special case some controllers have a 39bit address
243 	   mode that is as efficient as 32bit (aic79xx). Don't force
244 	   SAC for these.  Assume all masks <= 40 bits are of this
245 	   type. Normally this doesn't make any difference, but gives
246 	   more gentle handling of IOMMU overflow. */
247 	if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
248 		dev_info(dev, "Force SAC with mask %Lx\n", mask);
249 		return 0;
250 	}
251 
252 	return 1;
253 }
254 
255 static int __init pci_iommu_init(void)
256 {
257 	struct iommu_table_entry *p;
258 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
259 
260 #ifdef CONFIG_PCI
261 	dma_debug_add_bus(&pci_bus_type);
262 #endif
263 	x86_init.iommu.iommu_init();
264 
265 	for (p = __iommu_table; p < __iommu_table_end; p++) {
266 		if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
267 			p->late_init();
268 	}
269 
270 	return 0;
271 }
272 /* Must execute after PCI subsystem */
273 rootfs_initcall(pci_iommu_init);
274 
275 #ifdef CONFIG_PCI
276 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
277 
278 static void via_no_dac(struct pci_dev *dev)
279 {
280 	if (forbid_dac == 0) {
281 		dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
282 		forbid_dac = 1;
283 	}
284 }
285 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
286 				PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
287 #endif
288