1a88b5ba8SSam Ravnborg /* iommu.c: Generic sparc64 IOMMU support. 2a88b5ba8SSam Ravnborg * 3a88b5ba8SSam Ravnborg * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net) 4a88b5ba8SSam Ravnborg * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com) 5a88b5ba8SSam Ravnborg */ 6a88b5ba8SSam Ravnborg 7a88b5ba8SSam Ravnborg #include <linux/kernel.h> 8a88b5ba8SSam Ravnborg #include <linux/module.h> 95a0e3ad6STejun Heo #include <linux/slab.h> 10a88b5ba8SSam Ravnborg #include <linux/delay.h> 11a88b5ba8SSam Ravnborg #include <linux/device.h> 12a88b5ba8SSam Ravnborg #include <linux/dma-mapping.h> 13a88b5ba8SSam Ravnborg #include <linux/errno.h> 14a88b5ba8SSam Ravnborg #include <linux/iommu-helper.h> 15a66022c4SAkinobu Mita #include <linux/bitmap.h> 16a88b5ba8SSam Ravnborg 17a88b5ba8SSam Ravnborg #ifdef CONFIG_PCI 18a88b5ba8SSam Ravnborg #include <linux/pci.h> 19a88b5ba8SSam Ravnborg #endif 20a88b5ba8SSam Ravnborg 21a88b5ba8SSam Ravnborg #include <asm/iommu.h> 22a88b5ba8SSam Ravnborg 23a88b5ba8SSam Ravnborg #include "iommu_common.h" 24a88b5ba8SSam Ravnborg 25a88b5ba8SSam Ravnborg #define STC_CTXMATCH_ADDR(STC, CTX) \ 26a88b5ba8SSam Ravnborg ((STC)->strbuf_ctxmatch_base + ((CTX) << 3)) 27a88b5ba8SSam Ravnborg #define STC_FLUSHFLAG_INIT(STC) \ 28a88b5ba8SSam Ravnborg (*((STC)->strbuf_flushflag) = 0UL) 29a88b5ba8SSam Ravnborg #define STC_FLUSHFLAG_SET(STC) \ 30a88b5ba8SSam Ravnborg (*((STC)->strbuf_flushflag) != 0UL) 31a88b5ba8SSam Ravnborg 32a88b5ba8SSam Ravnborg #define iommu_read(__reg) \ 33a88b5ba8SSam Ravnborg ({ u64 __ret; \ 34a88b5ba8SSam Ravnborg __asm__ __volatile__("ldxa [%1] %2, %0" \ 35a88b5ba8SSam Ravnborg : "=r" (__ret) \ 36a88b5ba8SSam Ravnborg : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \ 37a88b5ba8SSam Ravnborg : "memory"); \ 38a88b5ba8SSam Ravnborg __ret; \ 39a88b5ba8SSam Ravnborg }) 40a88b5ba8SSam Ravnborg #define iommu_write(__reg, __val) \ 41a88b5ba8SSam Ravnborg __asm__ __volatile__("stxa %0, [%1] %2" \ 42a88b5ba8SSam Ravnborg : /* no outputs */ \ 43a88b5ba8SSam Ravnborg : "r" (__val), "r" (__reg), \ 44a88b5ba8SSam Ravnborg "i" (ASI_PHYS_BYPASS_EC_E)) 45a88b5ba8SSam Ravnborg 46a88b5ba8SSam Ravnborg /* Must be invoked under the IOMMU lock. */ 47a88b5ba8SSam Ravnborg static void iommu_flushall(struct iommu *iommu) 48a88b5ba8SSam Ravnborg { 49a88b5ba8SSam Ravnborg if (iommu->iommu_flushinv) { 50a88b5ba8SSam Ravnborg iommu_write(iommu->iommu_flushinv, ~(u64)0); 51a88b5ba8SSam Ravnborg } else { 52a88b5ba8SSam Ravnborg unsigned long tag; 53a88b5ba8SSam Ravnborg int entry; 54a88b5ba8SSam Ravnborg 55a88b5ba8SSam Ravnborg tag = iommu->iommu_tags; 56a88b5ba8SSam Ravnborg for (entry = 0; entry < 16; entry++) { 57a88b5ba8SSam Ravnborg iommu_write(tag, 0); 58a88b5ba8SSam Ravnborg tag += 8; 59a88b5ba8SSam Ravnborg } 60a88b5ba8SSam Ravnborg 61a88b5ba8SSam Ravnborg /* Ensure completion of previous PIO writes. */ 62a88b5ba8SSam Ravnborg (void) iommu_read(iommu->write_complete_reg); 63a88b5ba8SSam Ravnborg } 64a88b5ba8SSam Ravnborg } 65a88b5ba8SSam Ravnborg 66a88b5ba8SSam Ravnborg #define IOPTE_CONSISTENT(CTX) \ 67a88b5ba8SSam Ravnborg (IOPTE_VALID | IOPTE_CACHE | \ 68a88b5ba8SSam Ravnborg (((CTX) << 47) & IOPTE_CONTEXT)) 69a88b5ba8SSam Ravnborg 70a88b5ba8SSam Ravnborg #define IOPTE_STREAMING(CTX) \ 71a88b5ba8SSam Ravnborg (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF) 72a88b5ba8SSam Ravnborg 73a88b5ba8SSam Ravnborg /* Existing mappings are never marked invalid, instead they 74a88b5ba8SSam Ravnborg * are pointed to a dummy page. 75a88b5ba8SSam Ravnborg */ 76a88b5ba8SSam Ravnborg #define IOPTE_IS_DUMMY(iommu, iopte) \ 77a88b5ba8SSam Ravnborg ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa) 78a88b5ba8SSam Ravnborg 79a88b5ba8SSam Ravnborg static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) 80a88b5ba8SSam Ravnborg { 81a88b5ba8SSam Ravnborg unsigned long val = iopte_val(*iopte); 82a88b5ba8SSam Ravnborg 83a88b5ba8SSam Ravnborg val &= ~IOPTE_PAGE; 84a88b5ba8SSam Ravnborg val |= iommu->dummy_page_pa; 85a88b5ba8SSam Ravnborg 86a88b5ba8SSam Ravnborg iopte_val(*iopte) = val; 87a88b5ba8SSam Ravnborg } 88a88b5ba8SSam Ravnborg 89a88b5ba8SSam Ravnborg /* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle' 90a88b5ba8SSam Ravnborg * facility it must all be done in one pass while under the iommu lock. 91a88b5ba8SSam Ravnborg * 92a88b5ba8SSam Ravnborg * On sun4u platforms, we only flush the IOMMU once every time we've passed 93a88b5ba8SSam Ravnborg * over the entire page table doing allocations. Therefore we only ever advance 94a88b5ba8SSam Ravnborg * the hint and cannot backtrack it. 95a88b5ba8SSam Ravnborg */ 96a88b5ba8SSam Ravnborg unsigned long iommu_range_alloc(struct device *dev, 97a88b5ba8SSam Ravnborg struct iommu *iommu, 98a88b5ba8SSam Ravnborg unsigned long npages, 99a88b5ba8SSam Ravnborg unsigned long *handle) 100a88b5ba8SSam Ravnborg { 101a88b5ba8SSam Ravnborg unsigned long n, end, start, limit, boundary_size; 102a88b5ba8SSam Ravnborg struct iommu_arena *arena = &iommu->arena; 103a88b5ba8SSam Ravnborg int pass = 0; 104a88b5ba8SSam Ravnborg 105a88b5ba8SSam Ravnborg /* This allocator was derived from x86_64's bit string search */ 106a88b5ba8SSam Ravnborg 107a88b5ba8SSam Ravnborg /* Sanity check */ 108a88b5ba8SSam Ravnborg if (unlikely(npages == 0)) { 109a88b5ba8SSam Ravnborg if (printk_ratelimit()) 110a88b5ba8SSam Ravnborg WARN_ON(1); 111a88b5ba8SSam Ravnborg return DMA_ERROR_CODE; 112a88b5ba8SSam Ravnborg } 113a88b5ba8SSam Ravnborg 114a88b5ba8SSam Ravnborg if (handle && *handle) 115a88b5ba8SSam Ravnborg start = *handle; 116a88b5ba8SSam Ravnborg else 117a88b5ba8SSam Ravnborg start = arena->hint; 118a88b5ba8SSam Ravnborg 119a88b5ba8SSam Ravnborg limit = arena->limit; 120a88b5ba8SSam Ravnborg 121a88b5ba8SSam Ravnborg /* The case below can happen if we have a small segment appended 122a88b5ba8SSam Ravnborg * to a large, or when the previous alloc was at the very end of 123a88b5ba8SSam Ravnborg * the available space. If so, go back to the beginning and flush. 124a88b5ba8SSam Ravnborg */ 125a88b5ba8SSam Ravnborg if (start >= limit) { 126a88b5ba8SSam Ravnborg start = 0; 127a88b5ba8SSam Ravnborg if (iommu->flush_all) 128a88b5ba8SSam Ravnborg iommu->flush_all(iommu); 129a88b5ba8SSam Ravnborg } 130a88b5ba8SSam Ravnborg 131a88b5ba8SSam Ravnborg again: 132a88b5ba8SSam Ravnborg 133a88b5ba8SSam Ravnborg if (dev) 134a88b5ba8SSam Ravnborg boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 135a88b5ba8SSam Ravnborg 1 << IO_PAGE_SHIFT); 136a88b5ba8SSam Ravnborg else 137a88b5ba8SSam Ravnborg boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT); 138a88b5ba8SSam Ravnborg 139a88b5ba8SSam Ravnborg n = iommu_area_alloc(arena->map, limit, start, npages, 140a88b5ba8SSam Ravnborg iommu->page_table_map_base >> IO_PAGE_SHIFT, 141a88b5ba8SSam Ravnborg boundary_size >> IO_PAGE_SHIFT, 0); 142a88b5ba8SSam Ravnborg if (n == -1) { 143a88b5ba8SSam Ravnborg if (likely(pass < 1)) { 144a88b5ba8SSam Ravnborg /* First failure, rescan from the beginning. */ 145a88b5ba8SSam Ravnborg start = 0; 146a88b5ba8SSam Ravnborg if (iommu->flush_all) 147a88b5ba8SSam Ravnborg iommu->flush_all(iommu); 148a88b5ba8SSam Ravnborg pass++; 149a88b5ba8SSam Ravnborg goto again; 150a88b5ba8SSam Ravnborg } else { 151a88b5ba8SSam Ravnborg /* Second failure, give up */ 152a88b5ba8SSam Ravnborg return DMA_ERROR_CODE; 153a88b5ba8SSam Ravnborg } 154a88b5ba8SSam Ravnborg } 155a88b5ba8SSam Ravnborg 156a88b5ba8SSam Ravnborg end = n + npages; 157a88b5ba8SSam Ravnborg 158a88b5ba8SSam Ravnborg arena->hint = end; 159a88b5ba8SSam Ravnborg 160a88b5ba8SSam Ravnborg /* Update handle for SG allocations */ 161a88b5ba8SSam Ravnborg if (handle) 162a88b5ba8SSam Ravnborg *handle = end; 163a88b5ba8SSam Ravnborg 164a88b5ba8SSam Ravnborg return n; 165a88b5ba8SSam Ravnborg } 166a88b5ba8SSam Ravnborg 167a88b5ba8SSam Ravnborg void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages) 168a88b5ba8SSam Ravnborg { 169a88b5ba8SSam Ravnborg struct iommu_arena *arena = &iommu->arena; 170a88b5ba8SSam Ravnborg unsigned long entry; 171a88b5ba8SSam Ravnborg 172a88b5ba8SSam Ravnborg entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; 173a88b5ba8SSam Ravnborg 174a66022c4SAkinobu Mita bitmap_clear(arena->map, entry, npages); 175a88b5ba8SSam Ravnborg } 176a88b5ba8SSam Ravnborg 177a88b5ba8SSam Ravnborg int iommu_table_init(struct iommu *iommu, int tsbsize, 178a88b5ba8SSam Ravnborg u32 dma_offset, u32 dma_addr_mask, 179a88b5ba8SSam Ravnborg int numa_node) 180a88b5ba8SSam Ravnborg { 181a88b5ba8SSam Ravnborg unsigned long i, order, sz, num_tsb_entries; 182a88b5ba8SSam Ravnborg struct page *page; 183a88b5ba8SSam Ravnborg 184a88b5ba8SSam Ravnborg num_tsb_entries = tsbsize / sizeof(iopte_t); 185a88b5ba8SSam Ravnborg 186a88b5ba8SSam Ravnborg /* Setup initial software IOMMU state. */ 187a88b5ba8SSam Ravnborg spin_lock_init(&iommu->lock); 188a88b5ba8SSam Ravnborg iommu->ctx_lowest_free = 1; 189a88b5ba8SSam Ravnborg iommu->page_table_map_base = dma_offset; 190a88b5ba8SSam Ravnborg iommu->dma_addr_mask = dma_addr_mask; 191a88b5ba8SSam Ravnborg 192a88b5ba8SSam Ravnborg /* Allocate and initialize the free area map. */ 193a88b5ba8SSam Ravnborg sz = num_tsb_entries / 8; 194a88b5ba8SSam Ravnborg sz = (sz + 7UL) & ~7UL; 195a88b5ba8SSam Ravnborg iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node); 196a88b5ba8SSam Ravnborg if (!iommu->arena.map) { 197a88b5ba8SSam Ravnborg printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n"); 198a88b5ba8SSam Ravnborg return -ENOMEM; 199a88b5ba8SSam Ravnborg } 200a88b5ba8SSam Ravnborg memset(iommu->arena.map, 0, sz); 201a88b5ba8SSam Ravnborg iommu->arena.limit = num_tsb_entries; 202a88b5ba8SSam Ravnborg 203a88b5ba8SSam Ravnborg if (tlb_type != hypervisor) 204a88b5ba8SSam Ravnborg iommu->flush_all = iommu_flushall; 205a88b5ba8SSam Ravnborg 206a88b5ba8SSam Ravnborg /* Allocate and initialize the dummy page which we 207a88b5ba8SSam Ravnborg * set inactive IO PTEs to point to. 208a88b5ba8SSam Ravnborg */ 209a88b5ba8SSam Ravnborg page = alloc_pages_node(numa_node, GFP_KERNEL, 0); 210a88b5ba8SSam Ravnborg if (!page) { 211a88b5ba8SSam Ravnborg printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n"); 212a88b5ba8SSam Ravnborg goto out_free_map; 213a88b5ba8SSam Ravnborg } 214a88b5ba8SSam Ravnborg iommu->dummy_page = (unsigned long) page_address(page); 215a88b5ba8SSam Ravnborg memset((void *)iommu->dummy_page, 0, PAGE_SIZE); 216a88b5ba8SSam Ravnborg iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); 217a88b5ba8SSam Ravnborg 218a88b5ba8SSam Ravnborg /* Now allocate and setup the IOMMU page table itself. */ 219a88b5ba8SSam Ravnborg order = get_order(tsbsize); 220a88b5ba8SSam Ravnborg page = alloc_pages_node(numa_node, GFP_KERNEL, order); 221a88b5ba8SSam Ravnborg if (!page) { 222a88b5ba8SSam Ravnborg printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n"); 223a88b5ba8SSam Ravnborg goto out_free_dummy_page; 224a88b5ba8SSam Ravnborg } 225a88b5ba8SSam Ravnborg iommu->page_table = (iopte_t *)page_address(page); 226a88b5ba8SSam Ravnborg 227a88b5ba8SSam Ravnborg for (i = 0; i < num_tsb_entries; i++) 228a88b5ba8SSam Ravnborg iopte_make_dummy(iommu, &iommu->page_table[i]); 229a88b5ba8SSam Ravnborg 230a88b5ba8SSam Ravnborg return 0; 231a88b5ba8SSam Ravnborg 232a88b5ba8SSam Ravnborg out_free_dummy_page: 233a88b5ba8SSam Ravnborg free_page(iommu->dummy_page); 234a88b5ba8SSam Ravnborg iommu->dummy_page = 0UL; 235a88b5ba8SSam Ravnborg 236a88b5ba8SSam Ravnborg out_free_map: 237a88b5ba8SSam Ravnborg kfree(iommu->arena.map); 238a88b5ba8SSam Ravnborg iommu->arena.map = NULL; 239a88b5ba8SSam Ravnborg 240a88b5ba8SSam Ravnborg return -ENOMEM; 241a88b5ba8SSam Ravnborg } 242a88b5ba8SSam Ravnborg 243a88b5ba8SSam Ravnborg static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu, 244a88b5ba8SSam Ravnborg unsigned long npages) 245a88b5ba8SSam Ravnborg { 246a88b5ba8SSam Ravnborg unsigned long entry; 247a88b5ba8SSam Ravnborg 248a88b5ba8SSam Ravnborg entry = iommu_range_alloc(dev, iommu, npages, NULL); 249a88b5ba8SSam Ravnborg if (unlikely(entry == DMA_ERROR_CODE)) 250a88b5ba8SSam Ravnborg return NULL; 251a88b5ba8SSam Ravnborg 252a88b5ba8SSam Ravnborg return iommu->page_table + entry; 253a88b5ba8SSam Ravnborg } 254a88b5ba8SSam Ravnborg 255a88b5ba8SSam Ravnborg static int iommu_alloc_ctx(struct iommu *iommu) 256a88b5ba8SSam Ravnborg { 257a88b5ba8SSam Ravnborg int lowest = iommu->ctx_lowest_free; 258a88b5ba8SSam Ravnborg int sz = IOMMU_NUM_CTXS - lowest; 259a88b5ba8SSam Ravnborg int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest); 260a88b5ba8SSam Ravnborg 261a88b5ba8SSam Ravnborg if (unlikely(n == sz)) { 262a88b5ba8SSam Ravnborg n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); 263a88b5ba8SSam Ravnborg if (unlikely(n == lowest)) { 264a88b5ba8SSam Ravnborg printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); 265a88b5ba8SSam Ravnborg n = 0; 266a88b5ba8SSam Ravnborg } 267a88b5ba8SSam Ravnborg } 268a88b5ba8SSam Ravnborg if (n) 269a88b5ba8SSam Ravnborg __set_bit(n, iommu->ctx_bitmap); 270a88b5ba8SSam Ravnborg 271a88b5ba8SSam Ravnborg return n; 272a88b5ba8SSam Ravnborg } 273a88b5ba8SSam Ravnborg 274a88b5ba8SSam Ravnborg static inline void iommu_free_ctx(struct iommu *iommu, int ctx) 275a88b5ba8SSam Ravnborg { 276a88b5ba8SSam Ravnborg if (likely(ctx)) { 277a88b5ba8SSam Ravnborg __clear_bit(ctx, iommu->ctx_bitmap); 278a88b5ba8SSam Ravnborg if (ctx < iommu->ctx_lowest_free) 279a88b5ba8SSam Ravnborg iommu->ctx_lowest_free = ctx; 280a88b5ba8SSam Ravnborg } 281a88b5ba8SSam Ravnborg } 282a88b5ba8SSam Ravnborg 283a88b5ba8SSam Ravnborg static void *dma_4u_alloc_coherent(struct device *dev, size_t size, 284a88b5ba8SSam Ravnborg dma_addr_t *dma_addrp, gfp_t gfp) 285a88b5ba8SSam Ravnborg { 286a88b5ba8SSam Ravnborg unsigned long flags, order, first_page; 287a88b5ba8SSam Ravnborg struct iommu *iommu; 288a88b5ba8SSam Ravnborg struct page *page; 289a88b5ba8SSam Ravnborg int npages, nid; 290a88b5ba8SSam Ravnborg iopte_t *iopte; 291a88b5ba8SSam Ravnborg void *ret; 292a88b5ba8SSam Ravnborg 293a88b5ba8SSam Ravnborg size = IO_PAGE_ALIGN(size); 294a88b5ba8SSam Ravnborg order = get_order(size); 295a88b5ba8SSam Ravnborg if (order >= 10) 296a88b5ba8SSam Ravnborg return NULL; 297a88b5ba8SSam Ravnborg 298a88b5ba8SSam Ravnborg nid = dev->archdata.numa_node; 299a88b5ba8SSam Ravnborg page = alloc_pages_node(nid, gfp, order); 300a88b5ba8SSam Ravnborg if (unlikely(!page)) 301a88b5ba8SSam Ravnborg return NULL; 302a88b5ba8SSam Ravnborg 303a88b5ba8SSam Ravnborg first_page = (unsigned long) page_address(page); 304a88b5ba8SSam Ravnborg memset((char *)first_page, 0, PAGE_SIZE << order); 305a88b5ba8SSam Ravnborg 306a88b5ba8SSam Ravnborg iommu = dev->archdata.iommu; 307a88b5ba8SSam Ravnborg 308a88b5ba8SSam Ravnborg spin_lock_irqsave(&iommu->lock, flags); 309a88b5ba8SSam Ravnborg iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT); 310a88b5ba8SSam Ravnborg spin_unlock_irqrestore(&iommu->lock, flags); 311a88b5ba8SSam Ravnborg 312a88b5ba8SSam Ravnborg if (unlikely(iopte == NULL)) { 313a88b5ba8SSam Ravnborg free_pages(first_page, order); 314a88b5ba8SSam Ravnborg return NULL; 315a88b5ba8SSam Ravnborg } 316a88b5ba8SSam Ravnborg 317a88b5ba8SSam Ravnborg *dma_addrp = (iommu->page_table_map_base + 318a88b5ba8SSam Ravnborg ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); 319a88b5ba8SSam Ravnborg ret = (void *) first_page; 320a88b5ba8SSam Ravnborg npages = size >> IO_PAGE_SHIFT; 321a88b5ba8SSam Ravnborg first_page = __pa(first_page); 322a88b5ba8SSam Ravnborg while (npages--) { 323a88b5ba8SSam Ravnborg iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) | 324a88b5ba8SSam Ravnborg IOPTE_WRITE | 325a88b5ba8SSam Ravnborg (first_page & IOPTE_PAGE)); 326a88b5ba8SSam Ravnborg iopte++; 327a88b5ba8SSam Ravnborg first_page += IO_PAGE_SIZE; 328a88b5ba8SSam Ravnborg } 329a88b5ba8SSam Ravnborg 330a88b5ba8SSam Ravnborg return ret; 331a88b5ba8SSam Ravnborg } 332a88b5ba8SSam Ravnborg 333a88b5ba8SSam Ravnborg static void dma_4u_free_coherent(struct device *dev, size_t size, 334a88b5ba8SSam Ravnborg void *cpu, dma_addr_t dvma) 335a88b5ba8SSam Ravnborg { 336a88b5ba8SSam Ravnborg struct iommu *iommu; 337a88b5ba8SSam Ravnborg iopte_t *iopte; 338a88b5ba8SSam Ravnborg unsigned long flags, order, npages; 339a88b5ba8SSam Ravnborg 340a88b5ba8SSam Ravnborg npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 341a88b5ba8SSam Ravnborg iommu = dev->archdata.iommu; 342a88b5ba8SSam Ravnborg iopte = iommu->page_table + 343a88b5ba8SSam Ravnborg ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 344a88b5ba8SSam Ravnborg 345a88b5ba8SSam Ravnborg spin_lock_irqsave(&iommu->lock, flags); 346a88b5ba8SSam Ravnborg 347a88b5ba8SSam Ravnborg iommu_range_free(iommu, dvma, npages); 348a88b5ba8SSam Ravnborg 349a88b5ba8SSam Ravnborg spin_unlock_irqrestore(&iommu->lock, flags); 350a88b5ba8SSam Ravnborg 351a88b5ba8SSam Ravnborg order = get_order(size); 352a88b5ba8SSam Ravnborg if (order < 10) 353a88b5ba8SSam Ravnborg free_pages((unsigned long)cpu, order); 354a88b5ba8SSam Ravnborg } 355a88b5ba8SSam Ravnborg 356797a7568SFUJITA Tomonori static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, 357797a7568SFUJITA Tomonori unsigned long offset, size_t sz, 358bc0a14f1SFUJITA Tomonori enum dma_data_direction direction, 359bc0a14f1SFUJITA Tomonori struct dma_attrs *attrs) 360a88b5ba8SSam Ravnborg { 361a88b5ba8SSam Ravnborg struct iommu *iommu; 362a88b5ba8SSam Ravnborg struct strbuf *strbuf; 363a88b5ba8SSam Ravnborg iopte_t *base; 364a88b5ba8SSam Ravnborg unsigned long flags, npages, oaddr; 365a88b5ba8SSam Ravnborg unsigned long i, base_paddr, ctx; 366a88b5ba8SSam Ravnborg u32 bus_addr, ret; 367a88b5ba8SSam Ravnborg unsigned long iopte_protection; 368a88b5ba8SSam Ravnborg 369a88b5ba8SSam Ravnborg iommu = dev->archdata.iommu; 370a88b5ba8SSam Ravnborg strbuf = dev->archdata.stc; 371a88b5ba8SSam Ravnborg 372a88b5ba8SSam Ravnborg if (unlikely(direction == DMA_NONE)) 373a88b5ba8SSam Ravnborg goto bad_no_ctx; 374a88b5ba8SSam Ravnborg 375797a7568SFUJITA Tomonori oaddr = (unsigned long)(page_address(page) + offset); 376a88b5ba8SSam Ravnborg npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 377a88b5ba8SSam Ravnborg npages >>= IO_PAGE_SHIFT; 378a88b5ba8SSam Ravnborg 379a88b5ba8SSam Ravnborg spin_lock_irqsave(&iommu->lock, flags); 380a88b5ba8SSam Ravnborg base = alloc_npages(dev, iommu, npages); 381a88b5ba8SSam Ravnborg ctx = 0; 382a88b5ba8SSam Ravnborg if (iommu->iommu_ctxflush) 383a88b5ba8SSam Ravnborg ctx = iommu_alloc_ctx(iommu); 384a88b5ba8SSam Ravnborg spin_unlock_irqrestore(&iommu->lock, flags); 385a88b5ba8SSam Ravnborg 386a88b5ba8SSam Ravnborg if (unlikely(!base)) 387a88b5ba8SSam Ravnborg goto bad; 388a88b5ba8SSam Ravnborg 389a88b5ba8SSam Ravnborg bus_addr = (iommu->page_table_map_base + 390a88b5ba8SSam Ravnborg ((base - iommu->page_table) << IO_PAGE_SHIFT)); 391a88b5ba8SSam Ravnborg ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 392a88b5ba8SSam Ravnborg base_paddr = __pa(oaddr & IO_PAGE_MASK); 393a88b5ba8SSam Ravnborg if (strbuf->strbuf_enabled) 394a88b5ba8SSam Ravnborg iopte_protection = IOPTE_STREAMING(ctx); 395a88b5ba8SSam Ravnborg else 396a88b5ba8SSam Ravnborg iopte_protection = IOPTE_CONSISTENT(ctx); 397a88b5ba8SSam Ravnborg if (direction != DMA_TO_DEVICE) 398a88b5ba8SSam Ravnborg iopte_protection |= IOPTE_WRITE; 399a88b5ba8SSam Ravnborg 400a88b5ba8SSam Ravnborg for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE) 401a88b5ba8SSam Ravnborg iopte_val(*base) = iopte_protection | base_paddr; 402a88b5ba8SSam Ravnborg 403a88b5ba8SSam Ravnborg return ret; 404a88b5ba8SSam Ravnborg 405a88b5ba8SSam Ravnborg bad: 406a88b5ba8SSam Ravnborg iommu_free_ctx(iommu, ctx); 407a88b5ba8SSam Ravnborg bad_no_ctx: 408a88b5ba8SSam Ravnborg if (printk_ratelimit()) 409a88b5ba8SSam Ravnborg WARN_ON(1); 410a88b5ba8SSam Ravnborg return DMA_ERROR_CODE; 411a88b5ba8SSam Ravnborg } 412a88b5ba8SSam Ravnborg 413a88b5ba8SSam Ravnborg static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, 414a88b5ba8SSam Ravnborg u32 vaddr, unsigned long ctx, unsigned long npages, 415a88b5ba8SSam Ravnborg enum dma_data_direction direction) 416a88b5ba8SSam Ravnborg { 417a88b5ba8SSam Ravnborg int limit; 418a88b5ba8SSam Ravnborg 419a88b5ba8SSam Ravnborg if (strbuf->strbuf_ctxflush && 420a88b5ba8SSam Ravnborg iommu->iommu_ctxflush) { 421a88b5ba8SSam Ravnborg unsigned long matchreg, flushreg; 422a88b5ba8SSam Ravnborg u64 val; 423a88b5ba8SSam Ravnborg 424a88b5ba8SSam Ravnborg flushreg = strbuf->strbuf_ctxflush; 425a88b5ba8SSam Ravnborg matchreg = STC_CTXMATCH_ADDR(strbuf, ctx); 426a88b5ba8SSam Ravnborg 427a88b5ba8SSam Ravnborg iommu_write(flushreg, ctx); 428a88b5ba8SSam Ravnborg val = iommu_read(matchreg); 429a88b5ba8SSam Ravnborg val &= 0xffff; 430a88b5ba8SSam Ravnborg if (!val) 431a88b5ba8SSam Ravnborg goto do_flush_sync; 432a88b5ba8SSam Ravnborg 433a88b5ba8SSam Ravnborg while (val) { 434a88b5ba8SSam Ravnborg if (val & 0x1) 435a88b5ba8SSam Ravnborg iommu_write(flushreg, ctx); 436a88b5ba8SSam Ravnborg val >>= 1; 437a88b5ba8SSam Ravnborg } 438a88b5ba8SSam Ravnborg val = iommu_read(matchreg); 439a88b5ba8SSam Ravnborg if (unlikely(val)) { 440a88b5ba8SSam Ravnborg printk(KERN_WARNING "strbuf_flush: ctx flush " 44190181136SSam Ravnborg "timeout matchreg[%llx] ctx[%lx]\n", 442a88b5ba8SSam Ravnborg val, ctx); 443a88b5ba8SSam Ravnborg goto do_page_flush; 444a88b5ba8SSam Ravnborg } 445a88b5ba8SSam Ravnborg } else { 446a88b5ba8SSam Ravnborg unsigned long i; 447a88b5ba8SSam Ravnborg 448a88b5ba8SSam Ravnborg do_page_flush: 449a88b5ba8SSam Ravnborg for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) 450a88b5ba8SSam Ravnborg iommu_write(strbuf->strbuf_pflush, vaddr); 451a88b5ba8SSam Ravnborg } 452a88b5ba8SSam Ravnborg 453a88b5ba8SSam Ravnborg do_flush_sync: 454a88b5ba8SSam Ravnborg /* If the device could not have possibly put dirty data into 455a88b5ba8SSam Ravnborg * the streaming cache, no flush-flag synchronization needs 456a88b5ba8SSam Ravnborg * to be performed. 457a88b5ba8SSam Ravnborg */ 458a88b5ba8SSam Ravnborg if (direction == DMA_TO_DEVICE) 459a88b5ba8SSam Ravnborg return; 460a88b5ba8SSam Ravnborg 461a88b5ba8SSam Ravnborg STC_FLUSHFLAG_INIT(strbuf); 462a88b5ba8SSam Ravnborg iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); 463a88b5ba8SSam Ravnborg (void) iommu_read(iommu->write_complete_reg); 464a88b5ba8SSam Ravnborg 465a88b5ba8SSam Ravnborg limit = 100000; 466a88b5ba8SSam Ravnborg while (!STC_FLUSHFLAG_SET(strbuf)) { 467a88b5ba8SSam Ravnborg limit--; 468a88b5ba8SSam Ravnborg if (!limit) 469a88b5ba8SSam Ravnborg break; 470a88b5ba8SSam Ravnborg udelay(1); 471a88b5ba8SSam Ravnborg rmb(); 472a88b5ba8SSam Ravnborg } 473a88b5ba8SSam Ravnborg if (!limit) 474a88b5ba8SSam Ravnborg printk(KERN_WARNING "strbuf_flush: flushflag timeout " 475a88b5ba8SSam Ravnborg "vaddr[%08x] ctx[%lx] npages[%ld]\n", 476a88b5ba8SSam Ravnborg vaddr, ctx, npages); 477a88b5ba8SSam Ravnborg } 478a88b5ba8SSam Ravnborg 479797a7568SFUJITA Tomonori static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, 480bc0a14f1SFUJITA Tomonori size_t sz, enum dma_data_direction direction, 481bc0a14f1SFUJITA Tomonori struct dma_attrs *attrs) 482a88b5ba8SSam Ravnborg { 483a88b5ba8SSam Ravnborg struct iommu *iommu; 484a88b5ba8SSam Ravnborg struct strbuf *strbuf; 485a88b5ba8SSam Ravnborg iopte_t *base; 486a88b5ba8SSam Ravnborg unsigned long flags, npages, ctx, i; 487a88b5ba8SSam Ravnborg 488a88b5ba8SSam Ravnborg if (unlikely(direction == DMA_NONE)) { 489a88b5ba8SSam Ravnborg if (printk_ratelimit()) 490a88b5ba8SSam Ravnborg WARN_ON(1); 491a88b5ba8SSam Ravnborg return; 492a88b5ba8SSam Ravnborg } 493a88b5ba8SSam Ravnborg 494a88b5ba8SSam Ravnborg iommu = dev->archdata.iommu; 495a88b5ba8SSam Ravnborg strbuf = dev->archdata.stc; 496a88b5ba8SSam Ravnborg 497a88b5ba8SSam Ravnborg npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 498a88b5ba8SSam Ravnborg npages >>= IO_PAGE_SHIFT; 499a88b5ba8SSam Ravnborg base = iommu->page_table + 500a88b5ba8SSam Ravnborg ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 501a88b5ba8SSam Ravnborg bus_addr &= IO_PAGE_MASK; 502a88b5ba8SSam Ravnborg 503a88b5ba8SSam Ravnborg spin_lock_irqsave(&iommu->lock, flags); 504a88b5ba8SSam Ravnborg 505a88b5ba8SSam Ravnborg /* Record the context, if any. */ 506a88b5ba8SSam Ravnborg ctx = 0; 507a88b5ba8SSam Ravnborg if (iommu->iommu_ctxflush) 508a88b5ba8SSam Ravnborg ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; 509a88b5ba8SSam Ravnborg 510a88b5ba8SSam Ravnborg /* Step 1: Kick data out of streaming buffers if necessary. */ 511a88b5ba8SSam Ravnborg if (strbuf->strbuf_enabled) 512a88b5ba8SSam Ravnborg strbuf_flush(strbuf, iommu, bus_addr, ctx, 513a88b5ba8SSam Ravnborg npages, direction); 514a88b5ba8SSam Ravnborg 515a88b5ba8SSam Ravnborg /* Step 2: Clear out TSB entries. */ 516a88b5ba8SSam Ravnborg for (i = 0; i < npages; i++) 517a88b5ba8SSam Ravnborg iopte_make_dummy(iommu, base + i); 518a88b5ba8SSam Ravnborg 519a88b5ba8SSam Ravnborg iommu_range_free(iommu, bus_addr, npages); 520a88b5ba8SSam Ravnborg 521a88b5ba8SSam Ravnborg iommu_free_ctx(iommu, ctx); 522a88b5ba8SSam Ravnborg 523a88b5ba8SSam Ravnborg spin_unlock_irqrestore(&iommu->lock, flags); 524a88b5ba8SSam Ravnborg } 525a88b5ba8SSam Ravnborg 526a88b5ba8SSam Ravnborg static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, 527bc0a14f1SFUJITA Tomonori int nelems, enum dma_data_direction direction, 528bc0a14f1SFUJITA Tomonori struct dma_attrs *attrs) 529a88b5ba8SSam Ravnborg { 530a88b5ba8SSam Ravnborg struct scatterlist *s, *outs, *segstart; 531a88b5ba8SSam Ravnborg unsigned long flags, handle, prot, ctx; 532a88b5ba8SSam Ravnborg dma_addr_t dma_next = 0, dma_addr; 533a88b5ba8SSam Ravnborg unsigned int max_seg_size; 534a88b5ba8SSam Ravnborg unsigned long seg_boundary_size; 535a88b5ba8SSam Ravnborg int outcount, incount, i; 536a88b5ba8SSam Ravnborg struct strbuf *strbuf; 537a88b5ba8SSam Ravnborg struct iommu *iommu; 538a88b5ba8SSam Ravnborg unsigned long base_shift; 539a88b5ba8SSam Ravnborg 540a88b5ba8SSam Ravnborg BUG_ON(direction == DMA_NONE); 541a88b5ba8SSam Ravnborg 542a88b5ba8SSam Ravnborg iommu = dev->archdata.iommu; 543a88b5ba8SSam Ravnborg strbuf = dev->archdata.stc; 544a88b5ba8SSam Ravnborg if (nelems == 0 || !iommu) 545a88b5ba8SSam Ravnborg return 0; 546a88b5ba8SSam Ravnborg 547a88b5ba8SSam Ravnborg spin_lock_irqsave(&iommu->lock, flags); 548a88b5ba8SSam Ravnborg 549a88b5ba8SSam Ravnborg ctx = 0; 550a88b5ba8SSam Ravnborg if (iommu->iommu_ctxflush) 551a88b5ba8SSam Ravnborg ctx = iommu_alloc_ctx(iommu); 552a88b5ba8SSam Ravnborg 553a88b5ba8SSam Ravnborg if (strbuf->strbuf_enabled) 554a88b5ba8SSam Ravnborg prot = IOPTE_STREAMING(ctx); 555a88b5ba8SSam Ravnborg else 556a88b5ba8SSam Ravnborg prot = IOPTE_CONSISTENT(ctx); 557a88b5ba8SSam Ravnborg if (direction != DMA_TO_DEVICE) 558a88b5ba8SSam Ravnborg prot |= IOPTE_WRITE; 559a88b5ba8SSam Ravnborg 560a88b5ba8SSam Ravnborg outs = s = segstart = &sglist[0]; 561a88b5ba8SSam Ravnborg outcount = 1; 562a88b5ba8SSam Ravnborg incount = nelems; 563a88b5ba8SSam Ravnborg handle = 0; 564a88b5ba8SSam Ravnborg 565a88b5ba8SSam Ravnborg /* Init first segment length for backout at failure */ 566a88b5ba8SSam Ravnborg outs->dma_length = 0; 567a88b5ba8SSam Ravnborg 568a88b5ba8SSam Ravnborg max_seg_size = dma_get_max_seg_size(dev); 569a88b5ba8SSam Ravnborg seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 570a88b5ba8SSam Ravnborg IO_PAGE_SIZE) >> IO_PAGE_SHIFT; 571a88b5ba8SSam Ravnborg base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT; 572a88b5ba8SSam Ravnborg for_each_sg(sglist, s, nelems, i) { 573a88b5ba8SSam Ravnborg unsigned long paddr, npages, entry, out_entry = 0, slen; 574a88b5ba8SSam Ravnborg iopte_t *base; 575a88b5ba8SSam Ravnborg 576a88b5ba8SSam Ravnborg slen = s->length; 577a88b5ba8SSam Ravnborg /* Sanity check */ 578a88b5ba8SSam Ravnborg if (slen == 0) { 579a88b5ba8SSam Ravnborg dma_next = 0; 580a88b5ba8SSam Ravnborg continue; 581a88b5ba8SSam Ravnborg } 582a88b5ba8SSam Ravnborg /* Allocate iommu entries for that segment */ 583a88b5ba8SSam Ravnborg paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); 584a88b5ba8SSam Ravnborg npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); 585a88b5ba8SSam Ravnborg entry = iommu_range_alloc(dev, iommu, npages, &handle); 586a88b5ba8SSam Ravnborg 587a88b5ba8SSam Ravnborg /* Handle failure */ 588a88b5ba8SSam Ravnborg if (unlikely(entry == DMA_ERROR_CODE)) { 589a88b5ba8SSam Ravnborg if (printk_ratelimit()) 590a88b5ba8SSam Ravnborg printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" 591a88b5ba8SSam Ravnborg " npages %lx\n", iommu, paddr, npages); 592a88b5ba8SSam Ravnborg goto iommu_map_failed; 593a88b5ba8SSam Ravnborg } 594a88b5ba8SSam Ravnborg 595a88b5ba8SSam Ravnborg base = iommu->page_table + entry; 596a88b5ba8SSam Ravnborg 597a88b5ba8SSam Ravnborg /* Convert entry to a dma_addr_t */ 598a88b5ba8SSam Ravnborg dma_addr = iommu->page_table_map_base + 599a88b5ba8SSam Ravnborg (entry << IO_PAGE_SHIFT); 600a88b5ba8SSam Ravnborg dma_addr |= (s->offset & ~IO_PAGE_MASK); 601a88b5ba8SSam Ravnborg 602a88b5ba8SSam Ravnborg /* Insert into HW table */ 603a88b5ba8SSam Ravnborg paddr &= IO_PAGE_MASK; 604a88b5ba8SSam Ravnborg while (npages--) { 605a88b5ba8SSam Ravnborg iopte_val(*base) = prot | paddr; 606a88b5ba8SSam Ravnborg base++; 607a88b5ba8SSam Ravnborg paddr += IO_PAGE_SIZE; 608a88b5ba8SSam Ravnborg } 609a88b5ba8SSam Ravnborg 610a88b5ba8SSam Ravnborg /* If we are in an open segment, try merging */ 611a88b5ba8SSam Ravnborg if (segstart != s) { 612a88b5ba8SSam Ravnborg /* We cannot merge if: 613a88b5ba8SSam Ravnborg * - allocated dma_addr isn't contiguous to previous allocation 614a88b5ba8SSam Ravnborg */ 615a88b5ba8SSam Ravnborg if ((dma_addr != dma_next) || 616a88b5ba8SSam Ravnborg (outs->dma_length + s->length > max_seg_size) || 617a88b5ba8SSam Ravnborg (is_span_boundary(out_entry, base_shift, 618a88b5ba8SSam Ravnborg seg_boundary_size, outs, s))) { 619a88b5ba8SSam Ravnborg /* Can't merge: create a new segment */ 620a88b5ba8SSam Ravnborg segstart = s; 621a88b5ba8SSam Ravnborg outcount++; 622a88b5ba8SSam Ravnborg outs = sg_next(outs); 623a88b5ba8SSam Ravnborg } else { 624a88b5ba8SSam Ravnborg outs->dma_length += s->length; 625a88b5ba8SSam Ravnborg } 626a88b5ba8SSam Ravnborg } 627a88b5ba8SSam Ravnborg 628a88b5ba8SSam Ravnborg if (segstart == s) { 629a88b5ba8SSam Ravnborg /* This is a new segment, fill entries */ 630a88b5ba8SSam Ravnborg outs->dma_address = dma_addr; 631a88b5ba8SSam Ravnborg outs->dma_length = slen; 632a88b5ba8SSam Ravnborg out_entry = entry; 633a88b5ba8SSam Ravnborg } 634a88b5ba8SSam Ravnborg 635a88b5ba8SSam Ravnborg /* Calculate next page pointer for contiguous check */ 636a88b5ba8SSam Ravnborg dma_next = dma_addr + slen; 637a88b5ba8SSam Ravnborg } 638a88b5ba8SSam Ravnborg 639a88b5ba8SSam Ravnborg spin_unlock_irqrestore(&iommu->lock, flags); 640a88b5ba8SSam Ravnborg 641a88b5ba8SSam Ravnborg if (outcount < incount) { 642a88b5ba8SSam Ravnborg outs = sg_next(outs); 643a88b5ba8SSam Ravnborg outs->dma_address = DMA_ERROR_CODE; 644a88b5ba8SSam Ravnborg outs->dma_length = 0; 645a88b5ba8SSam Ravnborg } 646a88b5ba8SSam Ravnborg 647a88b5ba8SSam Ravnborg return outcount; 648a88b5ba8SSam Ravnborg 649a88b5ba8SSam Ravnborg iommu_map_failed: 650a88b5ba8SSam Ravnborg for_each_sg(sglist, s, nelems, i) { 651a88b5ba8SSam Ravnborg if (s->dma_length != 0) { 652a88b5ba8SSam Ravnborg unsigned long vaddr, npages, entry, j; 653a88b5ba8SSam Ravnborg iopte_t *base; 654a88b5ba8SSam Ravnborg 655a88b5ba8SSam Ravnborg vaddr = s->dma_address & IO_PAGE_MASK; 656a88b5ba8SSam Ravnborg npages = iommu_num_pages(s->dma_address, s->dma_length, 657a88b5ba8SSam Ravnborg IO_PAGE_SIZE); 658a88b5ba8SSam Ravnborg iommu_range_free(iommu, vaddr, npages); 659a88b5ba8SSam Ravnborg 660a88b5ba8SSam Ravnborg entry = (vaddr - iommu->page_table_map_base) 661a88b5ba8SSam Ravnborg >> IO_PAGE_SHIFT; 662a88b5ba8SSam Ravnborg base = iommu->page_table + entry; 663a88b5ba8SSam Ravnborg 664a88b5ba8SSam Ravnborg for (j = 0; j < npages; j++) 665a88b5ba8SSam Ravnborg iopte_make_dummy(iommu, base + j); 666a88b5ba8SSam Ravnborg 667a88b5ba8SSam Ravnborg s->dma_address = DMA_ERROR_CODE; 668a88b5ba8SSam Ravnborg s->dma_length = 0; 669a88b5ba8SSam Ravnborg } 670a88b5ba8SSam Ravnborg if (s == outs) 671a88b5ba8SSam Ravnborg break; 672a88b5ba8SSam Ravnborg } 673a88b5ba8SSam Ravnborg spin_unlock_irqrestore(&iommu->lock, flags); 674a88b5ba8SSam Ravnborg 675a88b5ba8SSam Ravnborg return 0; 676a88b5ba8SSam Ravnborg } 677a88b5ba8SSam Ravnborg 678a88b5ba8SSam Ravnborg /* If contexts are being used, they are the same in all of the mappings 679a88b5ba8SSam Ravnborg * we make for a particular SG. 680a88b5ba8SSam Ravnborg */ 681a88b5ba8SSam Ravnborg static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) 682a88b5ba8SSam Ravnborg { 683a88b5ba8SSam Ravnborg unsigned long ctx = 0; 684a88b5ba8SSam Ravnborg 685a88b5ba8SSam Ravnborg if (iommu->iommu_ctxflush) { 686a88b5ba8SSam Ravnborg iopte_t *base; 687a88b5ba8SSam Ravnborg u32 bus_addr; 688a88b5ba8SSam Ravnborg 689a88b5ba8SSam Ravnborg bus_addr = sg->dma_address & IO_PAGE_MASK; 690a88b5ba8SSam Ravnborg base = iommu->page_table + 691a88b5ba8SSam Ravnborg ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 692a88b5ba8SSam Ravnborg 693a88b5ba8SSam Ravnborg ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; 694a88b5ba8SSam Ravnborg } 695a88b5ba8SSam Ravnborg return ctx; 696a88b5ba8SSam Ravnborg } 697a88b5ba8SSam Ravnborg 698a88b5ba8SSam Ravnborg static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, 699bc0a14f1SFUJITA Tomonori int nelems, enum dma_data_direction direction, 700bc0a14f1SFUJITA Tomonori struct dma_attrs *attrs) 701a88b5ba8SSam Ravnborg { 702a88b5ba8SSam Ravnborg unsigned long flags, ctx; 703a88b5ba8SSam Ravnborg struct scatterlist *sg; 704a88b5ba8SSam Ravnborg struct strbuf *strbuf; 705a88b5ba8SSam Ravnborg struct iommu *iommu; 706a88b5ba8SSam Ravnborg 707a88b5ba8SSam Ravnborg BUG_ON(direction == DMA_NONE); 708a88b5ba8SSam Ravnborg 709a88b5ba8SSam Ravnborg iommu = dev->archdata.iommu; 710a88b5ba8SSam Ravnborg strbuf = dev->archdata.stc; 711a88b5ba8SSam Ravnborg 712a88b5ba8SSam Ravnborg ctx = fetch_sg_ctx(iommu, sglist); 713a88b5ba8SSam Ravnborg 714a88b5ba8SSam Ravnborg spin_lock_irqsave(&iommu->lock, flags); 715a88b5ba8SSam Ravnborg 716a88b5ba8SSam Ravnborg sg = sglist; 717a88b5ba8SSam Ravnborg while (nelems--) { 718a88b5ba8SSam Ravnborg dma_addr_t dma_handle = sg->dma_address; 719a88b5ba8SSam Ravnborg unsigned int len = sg->dma_length; 720a88b5ba8SSam Ravnborg unsigned long npages, entry; 721a88b5ba8SSam Ravnborg iopte_t *base; 722a88b5ba8SSam Ravnborg int i; 723a88b5ba8SSam Ravnborg 724a88b5ba8SSam Ravnborg if (!len) 725a88b5ba8SSam Ravnborg break; 726a88b5ba8SSam Ravnborg npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); 727a88b5ba8SSam Ravnborg iommu_range_free(iommu, dma_handle, npages); 728a88b5ba8SSam Ravnborg 729a88b5ba8SSam Ravnborg entry = ((dma_handle - iommu->page_table_map_base) 730a88b5ba8SSam Ravnborg >> IO_PAGE_SHIFT); 731a88b5ba8SSam Ravnborg base = iommu->page_table + entry; 732a88b5ba8SSam Ravnborg 733a88b5ba8SSam Ravnborg dma_handle &= IO_PAGE_MASK; 734a88b5ba8SSam Ravnborg if (strbuf->strbuf_enabled) 735a88b5ba8SSam Ravnborg strbuf_flush(strbuf, iommu, dma_handle, ctx, 736a88b5ba8SSam Ravnborg npages, direction); 737a88b5ba8SSam Ravnborg 738a88b5ba8SSam Ravnborg for (i = 0; i < npages; i++) 739a88b5ba8SSam Ravnborg iopte_make_dummy(iommu, base + i); 740a88b5ba8SSam Ravnborg 741a88b5ba8SSam Ravnborg sg = sg_next(sg); 742a88b5ba8SSam Ravnborg } 743a88b5ba8SSam Ravnborg 744a88b5ba8SSam Ravnborg iommu_free_ctx(iommu, ctx); 745a88b5ba8SSam Ravnborg 746a88b5ba8SSam Ravnborg spin_unlock_irqrestore(&iommu->lock, flags); 747a88b5ba8SSam Ravnborg } 748a88b5ba8SSam Ravnborg 749a88b5ba8SSam Ravnborg static void dma_4u_sync_single_for_cpu(struct device *dev, 750a88b5ba8SSam Ravnborg dma_addr_t bus_addr, size_t sz, 751a88b5ba8SSam Ravnborg enum dma_data_direction direction) 752a88b5ba8SSam Ravnborg { 753a88b5ba8SSam Ravnborg struct iommu *iommu; 754a88b5ba8SSam Ravnborg struct strbuf *strbuf; 755a88b5ba8SSam Ravnborg unsigned long flags, ctx, npages; 756a88b5ba8SSam Ravnborg 757a88b5ba8SSam Ravnborg iommu = dev->archdata.iommu; 758a88b5ba8SSam Ravnborg strbuf = dev->archdata.stc; 759a88b5ba8SSam Ravnborg 760a88b5ba8SSam Ravnborg if (!strbuf->strbuf_enabled) 761a88b5ba8SSam Ravnborg return; 762a88b5ba8SSam Ravnborg 763a88b5ba8SSam Ravnborg spin_lock_irqsave(&iommu->lock, flags); 764a88b5ba8SSam Ravnborg 765a88b5ba8SSam Ravnborg npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 766a88b5ba8SSam Ravnborg npages >>= IO_PAGE_SHIFT; 767a88b5ba8SSam Ravnborg bus_addr &= IO_PAGE_MASK; 768a88b5ba8SSam Ravnborg 769a88b5ba8SSam Ravnborg /* Step 1: Record the context, if any. */ 770a88b5ba8SSam Ravnborg ctx = 0; 771a88b5ba8SSam Ravnborg if (iommu->iommu_ctxflush && 772a88b5ba8SSam Ravnborg strbuf->strbuf_ctxflush) { 773a88b5ba8SSam Ravnborg iopte_t *iopte; 774a88b5ba8SSam Ravnborg 775a88b5ba8SSam Ravnborg iopte = iommu->page_table + 776a88b5ba8SSam Ravnborg ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT); 777a88b5ba8SSam Ravnborg ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; 778a88b5ba8SSam Ravnborg } 779a88b5ba8SSam Ravnborg 780a88b5ba8SSam Ravnborg /* Step 2: Kick data out of streaming buffers. */ 781a88b5ba8SSam Ravnborg strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); 782a88b5ba8SSam Ravnborg 783a88b5ba8SSam Ravnborg spin_unlock_irqrestore(&iommu->lock, flags); 784a88b5ba8SSam Ravnborg } 785a88b5ba8SSam Ravnborg 786a88b5ba8SSam Ravnborg static void dma_4u_sync_sg_for_cpu(struct device *dev, 787a88b5ba8SSam Ravnborg struct scatterlist *sglist, int nelems, 788a88b5ba8SSam Ravnborg enum dma_data_direction direction) 789a88b5ba8SSam Ravnborg { 790a88b5ba8SSam Ravnborg struct iommu *iommu; 791a88b5ba8SSam Ravnborg struct strbuf *strbuf; 792a88b5ba8SSam Ravnborg unsigned long flags, ctx, npages, i; 793a88b5ba8SSam Ravnborg struct scatterlist *sg, *sgprv; 794a88b5ba8SSam Ravnborg u32 bus_addr; 795a88b5ba8SSam Ravnborg 796a88b5ba8SSam Ravnborg iommu = dev->archdata.iommu; 797a88b5ba8SSam Ravnborg strbuf = dev->archdata.stc; 798a88b5ba8SSam Ravnborg 799a88b5ba8SSam Ravnborg if (!strbuf->strbuf_enabled) 800a88b5ba8SSam Ravnborg return; 801a88b5ba8SSam Ravnborg 802a88b5ba8SSam Ravnborg spin_lock_irqsave(&iommu->lock, flags); 803a88b5ba8SSam Ravnborg 804a88b5ba8SSam Ravnborg /* Step 1: Record the context, if any. */ 805a88b5ba8SSam Ravnborg ctx = 0; 806a88b5ba8SSam Ravnborg if (iommu->iommu_ctxflush && 807a88b5ba8SSam Ravnborg strbuf->strbuf_ctxflush) { 808a88b5ba8SSam Ravnborg iopte_t *iopte; 809a88b5ba8SSam Ravnborg 810a88b5ba8SSam Ravnborg iopte = iommu->page_table + 811a88b5ba8SSam Ravnborg ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 812a88b5ba8SSam Ravnborg ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; 813a88b5ba8SSam Ravnborg } 814a88b5ba8SSam Ravnborg 815a88b5ba8SSam Ravnborg /* Step 2: Kick data out of streaming buffers. */ 816a88b5ba8SSam Ravnborg bus_addr = sglist[0].dma_address & IO_PAGE_MASK; 817a88b5ba8SSam Ravnborg sgprv = NULL; 818a88b5ba8SSam Ravnborg for_each_sg(sglist, sg, nelems, i) { 819a88b5ba8SSam Ravnborg if (sg->dma_length == 0) 820a88b5ba8SSam Ravnborg break; 821a88b5ba8SSam Ravnborg sgprv = sg; 822a88b5ba8SSam Ravnborg } 823a88b5ba8SSam Ravnborg 824a88b5ba8SSam Ravnborg npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) 825a88b5ba8SSam Ravnborg - bus_addr) >> IO_PAGE_SHIFT; 826a88b5ba8SSam Ravnborg strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); 827a88b5ba8SSam Ravnborg 828a88b5ba8SSam Ravnborg spin_unlock_irqrestore(&iommu->lock, flags); 829a88b5ba8SSam Ravnborg } 830a88b5ba8SSam Ravnborg 83102f7a189SFUJITA Tomonori static struct dma_map_ops sun4u_dma_ops = { 832a88b5ba8SSam Ravnborg .alloc_coherent = dma_4u_alloc_coherent, 833a88b5ba8SSam Ravnborg .free_coherent = dma_4u_free_coherent, 834797a7568SFUJITA Tomonori .map_page = dma_4u_map_page, 835797a7568SFUJITA Tomonori .unmap_page = dma_4u_unmap_page, 836a88b5ba8SSam Ravnborg .map_sg = dma_4u_map_sg, 837a88b5ba8SSam Ravnborg .unmap_sg = dma_4u_unmap_sg, 838a88b5ba8SSam Ravnborg .sync_single_for_cpu = dma_4u_sync_single_for_cpu, 839a88b5ba8SSam Ravnborg .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, 840a88b5ba8SSam Ravnborg }; 841a88b5ba8SSam Ravnborg 84202f7a189SFUJITA Tomonori struct dma_map_ops *dma_ops = &sun4u_dma_ops; 843a88b5ba8SSam Ravnborg EXPORT_SYMBOL(dma_ops); 844a88b5ba8SSam Ravnborg 845ee664a92SFUJITA Tomonori extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask); 846ee664a92SFUJITA Tomonori 847a88b5ba8SSam Ravnborg int dma_supported(struct device *dev, u64 device_mask) 848a88b5ba8SSam Ravnborg { 849a88b5ba8SSam Ravnborg struct iommu *iommu = dev->archdata.iommu; 850a88b5ba8SSam Ravnborg u64 dma_addr_mask = iommu->dma_addr_mask; 851a88b5ba8SSam Ravnborg 852a88b5ba8SSam Ravnborg if (device_mask >= (1UL << 32UL)) 853a88b5ba8SSam Ravnborg return 0; 854a88b5ba8SSam Ravnborg 855a88b5ba8SSam Ravnborg if ((device_mask & dma_addr_mask) == dma_addr_mask) 856a88b5ba8SSam Ravnborg return 1; 857a88b5ba8SSam Ravnborg 858a88b5ba8SSam Ravnborg #ifdef CONFIG_PCI 859a88b5ba8SSam Ravnborg if (dev->bus == &pci_bus_type) 860ee664a92SFUJITA Tomonori return pci64_dma_supported(to_pci_dev(dev), device_mask); 861a88b5ba8SSam Ravnborg #endif 862a88b5ba8SSam Ravnborg 863a88b5ba8SSam Ravnborg return 0; 864a88b5ba8SSam Ravnborg } 865a88b5ba8SSam Ravnborg EXPORT_SYMBOL(dma_supported); 866