1 #include <linux/dma-mapping.h> 2 #include <linux/dma-debug.h> 3 #include <linux/dmar.h> 4 #include <linux/export.h> 5 #include <linux/bootmem.h> 6 #include <linux/gfp.h> 7 #include <linux/pci.h> 8 #include <linux/kmemleak.h> 9 10 #include <asm/proto.h> 11 #include <asm/dma.h> 12 #include <asm/iommu.h> 13 #include <asm/gart.h> 14 #include <asm/calgary.h> 15 #include <asm/x86_init.h> 16 #include <asm/iommu_table.h> 17 18 static int forbid_dac __read_mostly; 19 20 struct dma_map_ops *dma_ops = &nommu_dma_ops; 21 EXPORT_SYMBOL(dma_ops); 22 23 static int iommu_sac_force __read_mostly; 24 25 #ifdef CONFIG_IOMMU_DEBUG 26 int panic_on_overflow __read_mostly = 1; 27 int force_iommu __read_mostly = 1; 28 #else 29 int panic_on_overflow __read_mostly = 0; 30 int force_iommu __read_mostly = 0; 31 #endif 32 33 int iommu_merge __read_mostly = 0; 34 35 int no_iommu __read_mostly; 36 /* Set this to 1 if there is a HW IOMMU in the system */ 37 int iommu_detected __read_mostly = 0; 38 39 /* 40 * This variable becomes 1 if iommu=pt is passed on the kernel command line. 41 * If this variable is 1, IOMMU implementations do no DMA translation for 42 * devices and allow every device to access to whole physical memory. This is 43 * useful if a user wants to use an IOMMU only for KVM device assignment to 44 * guests and not for driver dma translation. 45 */ 46 int iommu_pass_through __read_mostly; 47 48 /* 49 * Group multi-function PCI devices into a single device-group for the 50 * iommu_device_group interface. This tells the iommu driver to pretend 51 * it cannot distinguish between functions of a device, exposing only one 52 * group for the device. Useful for disallowing use of individual PCI 53 * functions from userspace drivers. 54 */ 55 int iommu_group_mf __read_mostly; 56 57 extern struct iommu_table_entry __iommu_table[], __iommu_table_end[]; 58 59 /* Dummy device used for NULL arguments (normally ISA). */ 60 struct device x86_dma_fallback_dev = { 61 .init_name = "fallback device", 62 .coherent_dma_mask = ISA_DMA_BIT_MASK, 63 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask, 64 }; 65 EXPORT_SYMBOL(x86_dma_fallback_dev); 66 67 /* Number of entries preallocated for DMA-API debugging */ 68 #define PREALLOC_DMA_DEBUG_ENTRIES 32768 69 70 int dma_set_mask(struct device *dev, u64 mask) 71 { 72 if (!dev->dma_mask || !dma_supported(dev, mask)) 73 return -EIO; 74 75 *dev->dma_mask = mask; 76 77 return 0; 78 } 79 EXPORT_SYMBOL(dma_set_mask); 80 81 void __init pci_iommu_alloc(void) 82 { 83 struct iommu_table_entry *p; 84 85 sort_iommu_table(__iommu_table, __iommu_table_end); 86 check_iommu_entries(__iommu_table, __iommu_table_end); 87 88 for (p = __iommu_table; p < __iommu_table_end; p++) { 89 if (p && p->detect && p->detect() > 0) { 90 p->flags |= IOMMU_DETECTED; 91 if (p->early_init) 92 p->early_init(); 93 if (p->flags & IOMMU_FINISH_IF_DETECTED) 94 break; 95 } 96 } 97 } 98 void *dma_generic_alloc_coherent(struct device *dev, size_t size, 99 dma_addr_t *dma_addr, gfp_t flag, 100 struct dma_attrs *attrs) 101 { 102 unsigned long dma_mask; 103 struct page *page; 104 dma_addr_t addr; 105 106 dma_mask = dma_alloc_coherent_mask(dev, flag); 107 108 flag |= __GFP_ZERO; 109 again: 110 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size)); 111 if (!page) 112 return NULL; 113 114 addr = page_to_phys(page); 115 if (addr + size > dma_mask) { 116 __free_pages(page, get_order(size)); 117 118 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) { 119 flag = (flag & ~GFP_DMA32) | GFP_DMA; 120 goto again; 121 } 122 123 return NULL; 124 } 125 126 *dma_addr = addr; 127 return page_address(page); 128 } 129 130 /* 131 * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel 132 * parameter documentation. 133 */ 134 static __init int iommu_setup(char *p) 135 { 136 iommu_merge = 1; 137 138 if (!p) 139 return -EINVAL; 140 141 while (*p) { 142 if (!strncmp(p, "off", 3)) 143 no_iommu = 1; 144 /* gart_parse_options has more force support */ 145 if (!strncmp(p, "force", 5)) 146 force_iommu = 1; 147 if (!strncmp(p, "noforce", 7)) { 148 iommu_merge = 0; 149 force_iommu = 0; 150 } 151 152 if (!strncmp(p, "biomerge", 8)) { 153 iommu_merge = 1; 154 force_iommu = 1; 155 } 156 if (!strncmp(p, "panic", 5)) 157 panic_on_overflow = 1; 158 if (!strncmp(p, "nopanic", 7)) 159 panic_on_overflow = 0; 160 if (!strncmp(p, "merge", 5)) { 161 iommu_merge = 1; 162 force_iommu = 1; 163 } 164 if (!strncmp(p, "nomerge", 7)) 165 iommu_merge = 0; 166 if (!strncmp(p, "forcesac", 8)) 167 iommu_sac_force = 1; 168 if (!strncmp(p, "allowdac", 8)) 169 forbid_dac = 0; 170 if (!strncmp(p, "nodac", 5)) 171 forbid_dac = 1; 172 if (!strncmp(p, "usedac", 6)) { 173 forbid_dac = -1; 174 return 1; 175 } 176 #ifdef CONFIG_SWIOTLB 177 if (!strncmp(p, "soft", 4)) 178 swiotlb = 1; 179 #endif 180 if (!strncmp(p, "pt", 2)) 181 iommu_pass_through = 1; 182 if (!strncmp(p, "group_mf", 8)) 183 iommu_group_mf = 1; 184 185 gart_parse_options(p); 186 187 #ifdef CONFIG_CALGARY_IOMMU 188 if (!strncmp(p, "calgary", 7)) 189 use_calgary = 1; 190 #endif /* CONFIG_CALGARY_IOMMU */ 191 192 p += strcspn(p, ","); 193 if (*p == ',') 194 ++p; 195 } 196 return 0; 197 } 198 early_param("iommu", iommu_setup); 199 200 int dma_supported(struct device *dev, u64 mask) 201 { 202 struct dma_map_ops *ops = get_dma_ops(dev); 203 204 #ifdef CONFIG_PCI 205 if (mask > 0xffffffff && forbid_dac > 0) { 206 dev_info(dev, "PCI: Disallowing DAC for device\n"); 207 return 0; 208 } 209 #endif 210 211 if (ops->dma_supported) 212 return ops->dma_supported(dev, mask); 213 214 /* Copied from i386. Doesn't make much sense, because it will 215 only work for pci_alloc_coherent. 216 The caller just has to use GFP_DMA in this case. */ 217 if (mask < DMA_BIT_MASK(24)) 218 return 0; 219 220 /* Tell the device to use SAC when IOMMU force is on. This 221 allows the driver to use cheaper accesses in some cases. 222 223 Problem with this is that if we overflow the IOMMU area and 224 return DAC as fallback address the device may not handle it 225 correctly. 226 227 As a special case some controllers have a 39bit address 228 mode that is as efficient as 32bit (aic79xx). Don't force 229 SAC for these. Assume all masks <= 40 bits are of this 230 type. Normally this doesn't make any difference, but gives 231 more gentle handling of IOMMU overflow. */ 232 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) { 233 dev_info(dev, "Force SAC with mask %Lx\n", mask); 234 return 0; 235 } 236 237 return 1; 238 } 239 EXPORT_SYMBOL(dma_supported); 240 241 static int __init pci_iommu_init(void) 242 { 243 struct iommu_table_entry *p; 244 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 245 246 #ifdef CONFIG_PCI 247 dma_debug_add_bus(&pci_bus_type); 248 #endif 249 x86_init.iommu.iommu_init(); 250 251 for (p = __iommu_table; p < __iommu_table_end; p++) { 252 if (p && (p->flags & IOMMU_DETECTED) && p->late_init) 253 p->late_init(); 254 } 255 256 return 0; 257 } 258 /* Must execute after PCI subsystem */ 259 rootfs_initcall(pci_iommu_init); 260 261 #ifdef CONFIG_PCI 262 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ 263 264 static __devinit void via_no_dac(struct pci_dev *dev) 265 { 266 if (forbid_dac == 0) { 267 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n"); 268 forbid_dac = 1; 269 } 270 } 271 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, 272 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac); 273 #endif 274