xref: /openbmc/linux/arch/x86/kernel/pci-dma.c (revision 615c36f5)
1 #include <linux/dma-mapping.h>
2 #include <linux/dma-debug.h>
3 #include <linux/dmar.h>
4 #include <linux/export.h>
5 #include <linux/bootmem.h>
6 #include <linux/gfp.h>
7 #include <linux/pci.h>
8 #include <linux/kmemleak.h>
9 
10 #include <asm/proto.h>
11 #include <asm/dma.h>
12 #include <asm/iommu.h>
13 #include <asm/gart.h>
14 #include <asm/calgary.h>
15 #include <asm/x86_init.h>
16 #include <asm/iommu_table.h>
17 
18 static int forbid_dac __read_mostly;
19 
20 struct dma_map_ops *dma_ops = &nommu_dma_ops;
21 EXPORT_SYMBOL(dma_ops);
22 
23 static int iommu_sac_force __read_mostly;
24 
25 #ifdef CONFIG_IOMMU_DEBUG
26 int panic_on_overflow __read_mostly = 1;
27 int force_iommu __read_mostly = 1;
28 #else
29 int panic_on_overflow __read_mostly = 0;
30 int force_iommu __read_mostly = 0;
31 #endif
32 
33 int iommu_merge __read_mostly = 0;
34 
35 int no_iommu __read_mostly;
36 /* Set this to 1 if there is a HW IOMMU in the system */
37 int iommu_detected __read_mostly = 0;
38 
39 /*
40  * This variable becomes 1 if iommu=pt is passed on the kernel command line.
41  * If this variable is 1, IOMMU implementations do no DMA translation for
42  * devices and allow every device to access to whole physical memory. This is
43  * useful if a user wants to use an IOMMU only for KVM device assignment to
44  * guests and not for driver dma translation.
45  */
46 int iommu_pass_through __read_mostly;
47 
48 extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
49 
50 /* Dummy device used for NULL arguments (normally ISA). */
51 struct device x86_dma_fallback_dev = {
52 	.init_name = "fallback device",
53 	.coherent_dma_mask = ISA_DMA_BIT_MASK,
54 	.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
55 };
56 EXPORT_SYMBOL(x86_dma_fallback_dev);
57 
58 /* Number of entries preallocated for DMA-API debugging */
59 #define PREALLOC_DMA_DEBUG_ENTRIES       32768
60 
61 int dma_set_mask(struct device *dev, u64 mask)
62 {
63 	if (!dev->dma_mask || !dma_supported(dev, mask))
64 		return -EIO;
65 
66 	*dev->dma_mask = mask;
67 
68 	return 0;
69 }
70 EXPORT_SYMBOL(dma_set_mask);
71 
72 void __init pci_iommu_alloc(void)
73 {
74 	struct iommu_table_entry *p;
75 
76 	sort_iommu_table(__iommu_table, __iommu_table_end);
77 	check_iommu_entries(__iommu_table, __iommu_table_end);
78 
79 	for (p = __iommu_table; p < __iommu_table_end; p++) {
80 		if (p && p->detect && p->detect() > 0) {
81 			p->flags |= IOMMU_DETECTED;
82 			if (p->early_init)
83 				p->early_init();
84 			if (p->flags & IOMMU_FINISH_IF_DETECTED)
85 				break;
86 		}
87 	}
88 }
89 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
90 				 dma_addr_t *dma_addr, gfp_t flag)
91 {
92 	unsigned long dma_mask;
93 	struct page *page;
94 	dma_addr_t addr;
95 
96 	dma_mask = dma_alloc_coherent_mask(dev, flag);
97 
98 	flag |= __GFP_ZERO;
99 again:
100 	page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
101 	if (!page)
102 		return NULL;
103 
104 	addr = page_to_phys(page);
105 	if (addr + size > dma_mask) {
106 		__free_pages(page, get_order(size));
107 
108 		if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
109 			flag = (flag & ~GFP_DMA32) | GFP_DMA;
110 			goto again;
111 		}
112 
113 		return NULL;
114 	}
115 
116 	*dma_addr = addr;
117 	return page_address(page);
118 }
119 
120 /*
121  * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
122  * parameter documentation.
123  */
124 static __init int iommu_setup(char *p)
125 {
126 	iommu_merge = 1;
127 
128 	if (!p)
129 		return -EINVAL;
130 
131 	while (*p) {
132 		if (!strncmp(p, "off", 3))
133 			no_iommu = 1;
134 		/* gart_parse_options has more force support */
135 		if (!strncmp(p, "force", 5))
136 			force_iommu = 1;
137 		if (!strncmp(p, "noforce", 7)) {
138 			iommu_merge = 0;
139 			force_iommu = 0;
140 		}
141 
142 		if (!strncmp(p, "biomerge", 8)) {
143 			iommu_merge = 1;
144 			force_iommu = 1;
145 		}
146 		if (!strncmp(p, "panic", 5))
147 			panic_on_overflow = 1;
148 		if (!strncmp(p, "nopanic", 7))
149 			panic_on_overflow = 0;
150 		if (!strncmp(p, "merge", 5)) {
151 			iommu_merge = 1;
152 			force_iommu = 1;
153 		}
154 		if (!strncmp(p, "nomerge", 7))
155 			iommu_merge = 0;
156 		if (!strncmp(p, "forcesac", 8))
157 			iommu_sac_force = 1;
158 		if (!strncmp(p, "allowdac", 8))
159 			forbid_dac = 0;
160 		if (!strncmp(p, "nodac", 5))
161 			forbid_dac = 1;
162 		if (!strncmp(p, "usedac", 6)) {
163 			forbid_dac = -1;
164 			return 1;
165 		}
166 #ifdef CONFIG_SWIOTLB
167 		if (!strncmp(p, "soft", 4))
168 			swiotlb = 1;
169 #endif
170 		if (!strncmp(p, "pt", 2))
171 			iommu_pass_through = 1;
172 
173 		gart_parse_options(p);
174 
175 #ifdef CONFIG_CALGARY_IOMMU
176 		if (!strncmp(p, "calgary", 7))
177 			use_calgary = 1;
178 #endif /* CONFIG_CALGARY_IOMMU */
179 
180 		p += strcspn(p, ",");
181 		if (*p == ',')
182 			++p;
183 	}
184 	return 0;
185 }
186 early_param("iommu", iommu_setup);
187 
188 int dma_supported(struct device *dev, u64 mask)
189 {
190 	struct dma_map_ops *ops = get_dma_ops(dev);
191 
192 #ifdef CONFIG_PCI
193 	if (mask > 0xffffffff && forbid_dac > 0) {
194 		dev_info(dev, "PCI: Disallowing DAC for device\n");
195 		return 0;
196 	}
197 #endif
198 
199 	if (ops->dma_supported)
200 		return ops->dma_supported(dev, mask);
201 
202 	/* Copied from i386. Doesn't make much sense, because it will
203 	   only work for pci_alloc_coherent.
204 	   The caller just has to use GFP_DMA in this case. */
205 	if (mask < DMA_BIT_MASK(24))
206 		return 0;
207 
208 	/* Tell the device to use SAC when IOMMU force is on.  This
209 	   allows the driver to use cheaper accesses in some cases.
210 
211 	   Problem with this is that if we overflow the IOMMU area and
212 	   return DAC as fallback address the device may not handle it
213 	   correctly.
214 
215 	   As a special case some controllers have a 39bit address
216 	   mode that is as efficient as 32bit (aic79xx). Don't force
217 	   SAC for these.  Assume all masks <= 40 bits are of this
218 	   type. Normally this doesn't make any difference, but gives
219 	   more gentle handling of IOMMU overflow. */
220 	if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
221 		dev_info(dev, "Force SAC with mask %Lx\n", mask);
222 		return 0;
223 	}
224 
225 	return 1;
226 }
227 EXPORT_SYMBOL(dma_supported);
228 
229 static int __init pci_iommu_init(void)
230 {
231 	struct iommu_table_entry *p;
232 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
233 
234 #ifdef CONFIG_PCI
235 	dma_debug_add_bus(&pci_bus_type);
236 #endif
237 	x86_init.iommu.iommu_init();
238 
239 	for (p = __iommu_table; p < __iommu_table_end; p++) {
240 		if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
241 			p->late_init();
242 	}
243 
244 	return 0;
245 }
246 /* Must execute after PCI subsystem */
247 rootfs_initcall(pci_iommu_init);
248 
249 #ifdef CONFIG_PCI
250 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
251 
252 static __devinit void via_no_dac(struct pci_dev *dev)
253 {
254 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
255 		dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
256 		forbid_dac = 1;
257 	}
258 }
259 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
260 #endif
261