1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> 7 * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> 8 * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com> 9 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. 10 * IP32 changes by Ilya. 11 * Cavium Networks: Create new dma setup for Cavium Networks Octeon based on 12 * the kernels original. 13 */ 14 #include <linux/types.h> 15 #include <linux/mm.h> 16 #include <linux/module.h> 17 #include <linux/string.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/platform_device.h> 20 #include <linux/scatterlist.h> 21 22 #include <linux/cache.h> 23 #include <linux/io.h> 24 25 #include <asm/octeon/octeon.h> 26 #include <asm/octeon/cvmx-npi-defs.h> 27 #include <asm/octeon/cvmx-pci-defs.h> 28 29 #include <dma-coherence.h> 30 31 #ifdef CONFIG_PCI 32 #include <asm/octeon/pci-octeon.h> 33 #endif 34 35 #define BAR2_PCI_ADDRESS 0x8000000000ul 36 37 struct bar1_index_state { 38 int16_t ref_count; /* Number of PCI mappings using this index */ 39 uint16_t address_bits; /* Upper bits of physical address. This is 40 shifted 22 bits */ 41 }; 42 43 #ifdef CONFIG_PCI 44 static DEFINE_SPINLOCK(bar1_lock); 45 static struct bar1_index_state bar1_state[32]; 46 #endif 47 48 dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size) 49 { 50 #ifndef CONFIG_PCI 51 /* Without PCI/PCIe this function can be called for Octeon internal 52 devices such as USB. These devices all support 64bit addressing */ 53 mb(); 54 return virt_to_phys(ptr); 55 #else 56 unsigned long flags; 57 uint64_t dma_mask; 58 int64_t start_index; 59 dma_addr_t result = -1; 60 uint64_t physical = virt_to_phys(ptr); 61 int64_t index; 62 63 mb(); 64 /* 65 * Use the DMA masks to determine the allowed memory 66 * region. For us it doesn't limit the actual memory, just the 67 * address visible over PCI. Devices with limits need to use 68 * lower indexed Bar1 entries. 69 */ 70 if (dev) { 71 dma_mask = dev->coherent_dma_mask; 72 if (dev->dma_mask) 73 dma_mask = *dev->dma_mask; 74 } else { 75 dma_mask = 0xfffffffful; 76 } 77 78 /* 79 * Platform devices, such as the internal USB, skip all 80 * translation and use Octeon physical addresses directly. 81 */ 82 if (!dev || dev->bus == &platform_bus_type) 83 return physical; 84 85 switch (octeon_dma_bar_type) { 86 case OCTEON_DMA_BAR_TYPE_PCIE: 87 if (unlikely(physical < (16ul << 10))) 88 panic("dma_map_single: Not allowed to map first 16KB." 89 " It interferes with BAR0 special area\n"); 90 else if ((physical + size >= (256ul << 20)) && 91 (physical < (512ul << 20))) 92 panic("dma_map_single: Not allowed to map bootbus\n"); 93 else if ((physical + size >= 0x400000000ull) && 94 physical < 0x410000000ull) 95 panic("dma_map_single: " 96 "Attempt to map illegal memory address 0x%llx\n", 97 physical); 98 else if (physical >= 0x420000000ull) 99 panic("dma_map_single: " 100 "Attempt to map illegal memory address 0x%llx\n", 101 physical); 102 else if ((physical + size >= 103 (4ull<<30) - (OCTEON_PCI_BAR1_HOLE_SIZE<<20)) 104 && physical < (4ull<<30)) 105 pr_warning("dma_map_single: Warning: " 106 "Mapping memory address that might " 107 "conflict with devices 0x%llx-0x%llx\n", 108 physical, physical+size-1); 109 /* The 2nd 256MB is mapped at 256<<20 instead of 0x410000000 */ 110 if ((physical >= 0x410000000ull) && physical < 0x420000000ull) 111 result = physical - 0x400000000ull; 112 else 113 result = physical; 114 if (((result+size-1) & dma_mask) != result+size-1) 115 panic("dma_map_single: Attempt to map address " 116 "0x%llx-0x%llx, which can't be accessed " 117 "according to the dma mask 0x%llx\n", 118 physical, physical+size-1, dma_mask); 119 goto done; 120 121 case OCTEON_DMA_BAR_TYPE_BIG: 122 #ifdef CONFIG_64BIT 123 /* If the device supports 64bit addressing, then use BAR2 */ 124 if (dma_mask > BAR2_PCI_ADDRESS) { 125 result = physical + BAR2_PCI_ADDRESS; 126 goto done; 127 } 128 #endif 129 if (unlikely(physical < (4ul << 10))) { 130 panic("dma_map_single: Not allowed to map first 4KB. " 131 "It interferes with BAR0 special area\n"); 132 } else if (physical < (256ul << 20)) { 133 if (unlikely(physical + size > (256ul << 20))) 134 panic("dma_map_single: Requested memory spans " 135 "Bar0 0:256MB and bootbus\n"); 136 result = physical; 137 goto done; 138 } else if (unlikely(physical < (512ul << 20))) { 139 panic("dma_map_single: Not allowed to map bootbus\n"); 140 } else if (physical < (2ul << 30)) { 141 if (unlikely(physical + size > (2ul << 30))) 142 panic("dma_map_single: Requested memory spans " 143 "Bar0 512MB:2GB and BAR1\n"); 144 result = physical; 145 goto done; 146 } else if (physical < (2ul << 30) + (128 << 20)) { 147 /* Fall through */ 148 } else if (physical < 149 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)) { 150 if (unlikely 151 (physical + size > 152 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20))) 153 panic("dma_map_single: Requested memory " 154 "extends past Bar1 (4GB-%luMB)\n", 155 OCTEON_PCI_BAR1_HOLE_SIZE); 156 result = physical; 157 goto done; 158 } else if ((physical >= 0x410000000ull) && 159 (physical < 0x420000000ull)) { 160 if (unlikely(physical + size > 0x420000000ull)) 161 panic("dma_map_single: Requested memory spans " 162 "non existant memory\n"); 163 /* BAR0 fixed mapping 256MB:512MB -> 164 * 16GB+256MB:16GB+512MB */ 165 result = physical - 0x400000000ull; 166 goto done; 167 } else { 168 /* Continued below switch statement */ 169 } 170 break; 171 172 case OCTEON_DMA_BAR_TYPE_SMALL: 173 #ifdef CONFIG_64BIT 174 /* If the device supports 64bit addressing, then use BAR2 */ 175 if (dma_mask > BAR2_PCI_ADDRESS) { 176 result = physical + BAR2_PCI_ADDRESS; 177 goto done; 178 } 179 #endif 180 /* Continued below switch statement */ 181 break; 182 183 default: 184 panic("dma_map_single: Invalid octeon_dma_bar_type\n"); 185 } 186 187 /* Don't allow mapping to span multiple Bar entries. The hardware guys 188 won't guarantee that DMA across boards work */ 189 if (unlikely((physical >> 22) != ((physical + size - 1) >> 22))) 190 panic("dma_map_single: " 191 "Requested memory spans more than one Bar1 entry\n"); 192 193 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) 194 start_index = 31; 195 else if (unlikely(dma_mask < (1ul << 27))) 196 start_index = (dma_mask >> 22); 197 else 198 start_index = 31; 199 200 /* Only one processor can access the Bar register at once */ 201 spin_lock_irqsave(&bar1_lock, flags); 202 203 /* Look through Bar1 for existing mapping that will work */ 204 for (index = start_index; index >= 0; index--) { 205 if ((bar1_state[index].address_bits == physical >> 22) && 206 (bar1_state[index].ref_count)) { 207 /* An existing mapping will work, use it */ 208 bar1_state[index].ref_count++; 209 if (unlikely(bar1_state[index].ref_count < 0)) 210 panic("dma_map_single: " 211 "Bar1[%d] reference count overflowed\n", 212 (int) index); 213 result = (index << 22) | (physical & ((1 << 22) - 1)); 214 /* Large BAR1 is offset at 2GB */ 215 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) 216 result += 2ul << 30; 217 goto done_unlock; 218 } 219 } 220 221 /* No existing mappings, look for a free entry */ 222 for (index = start_index; index >= 0; index--) { 223 if (unlikely(bar1_state[index].ref_count == 0)) { 224 union cvmx_pci_bar1_indexx bar1_index; 225 /* We have a free entry, use it */ 226 bar1_state[index].ref_count = 1; 227 bar1_state[index].address_bits = physical >> 22; 228 bar1_index.u32 = 0; 229 /* Address bits[35:22] sent to L2C */ 230 bar1_index.s.addr_idx = physical >> 22; 231 /* Don't put PCI accesses in L2. */ 232 bar1_index.s.ca = 1; 233 /* Endian Swap Mode */ 234 bar1_index.s.end_swp = 1; 235 /* Set '1' when the selected address range is valid. */ 236 bar1_index.s.addr_v = 1; 237 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 238 bar1_index.u32); 239 /* An existing mapping will work, use it */ 240 result = (index << 22) | (physical & ((1 << 22) - 1)); 241 /* Large BAR1 is offset at 2GB */ 242 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) 243 result += 2ul << 30; 244 goto done_unlock; 245 } 246 } 247 248 pr_err("dma_map_single: " 249 "Can't find empty BAR1 index for physical mapping 0x%llx\n", 250 (unsigned long long) physical); 251 252 done_unlock: 253 spin_unlock_irqrestore(&bar1_lock, flags); 254 done: 255 pr_debug("dma_map_single 0x%llx->0x%llx\n", physical, result); 256 return result; 257 #endif 258 } 259 260 void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) 261 { 262 #ifndef CONFIG_PCI 263 /* 264 * Without PCI/PCIe this function can be called for Octeon internal 265 * devices such as USB. These devices all support 64bit addressing. 266 */ 267 return; 268 #else 269 unsigned long flags; 270 uint64_t index; 271 272 /* 273 * Platform devices, such as the internal USB, skip all 274 * translation and use Octeon physical addresses directly. 275 */ 276 if (dev->bus == &platform_bus_type) 277 return; 278 279 switch (octeon_dma_bar_type) { 280 case OCTEON_DMA_BAR_TYPE_PCIE: 281 /* Nothing to do, all mappings are static */ 282 goto done; 283 284 case OCTEON_DMA_BAR_TYPE_BIG: 285 #ifdef CONFIG_64BIT 286 /* Nothing to do for addresses using BAR2 */ 287 if (dma_addr >= BAR2_PCI_ADDRESS) 288 goto done; 289 #endif 290 if (unlikely(dma_addr < (4ul << 10))) 291 panic("dma_unmap_single: Unexpect DMA address 0x%llx\n", 292 dma_addr); 293 else if (dma_addr < (2ul << 30)) 294 /* Nothing to do for addresses using BAR0 */ 295 goto done; 296 else if (dma_addr < (2ul << 30) + (128ul << 20)) 297 /* Need to unmap, fall through */ 298 index = (dma_addr - (2ul << 30)) >> 22; 299 else if (dma_addr < 300 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)) 301 goto done; /* Nothing to do for the rest of BAR1 */ 302 else 303 panic("dma_unmap_single: Unexpect DMA address 0x%llx\n", 304 dma_addr); 305 /* Continued below switch statement */ 306 break; 307 308 case OCTEON_DMA_BAR_TYPE_SMALL: 309 #ifdef CONFIG_64BIT 310 /* Nothing to do for addresses using BAR2 */ 311 if (dma_addr >= BAR2_PCI_ADDRESS) 312 goto done; 313 #endif 314 index = dma_addr >> 22; 315 /* Continued below switch statement */ 316 break; 317 318 default: 319 panic("dma_unmap_single: Invalid octeon_dma_bar_type\n"); 320 } 321 322 if (unlikely(index > 31)) 323 panic("dma_unmap_single: " 324 "Attempt to unmap an invalid address (0x%llx)\n", 325 dma_addr); 326 327 spin_lock_irqsave(&bar1_lock, flags); 328 bar1_state[index].ref_count--; 329 if (bar1_state[index].ref_count == 0) 330 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0); 331 else if (unlikely(bar1_state[index].ref_count < 0)) 332 panic("dma_unmap_single: Bar1[%u] reference count < 0\n", 333 (int) index); 334 spin_unlock_irqrestore(&bar1_lock, flags); 335 done: 336 pr_debug("dma_unmap_single 0x%llx\n", dma_addr); 337 return; 338 #endif 339 } 340