1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Virtual DMA allocation 4 * 5 * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de) 6 * 7 * 11/26/2000 -- disabled the existing code because it didn't work for 8 * me in 2.4. Replaced with a significantly more primitive version 9 * similar to the sun3 code. the old functionality was probably more 10 * desirable, but.... -- Sam Creasey (sammy@oh.verio.com) 11 * 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/init.h> 16 #include <linux/bitops.h> 17 #include <linux/mm.h> 18 #include <linux/memblock.h> 19 #include <linux/vmalloc.h> 20 21 #include <asm/sun3x.h> 22 #include <asm/dvma.h> 23 #include <asm/io.h> 24 #include <asm/page.h> 25 #include <asm/pgalloc.h> 26 27 /* IOMMU support */ 28 29 #define IOMMU_ADDR_MASK 0x03ffe000 30 #define IOMMU_CACHE_INHIBIT 0x00000040 31 #define IOMMU_FULL_BLOCK 0x00000020 32 #define IOMMU_MODIFIED 0x00000010 33 #define IOMMU_USED 0x00000008 34 #define IOMMU_WRITE_PROTECT 0x00000004 35 #define IOMMU_DT_MASK 0x00000003 36 #define IOMMU_DT_INVALID 0x00000000 37 #define IOMMU_DT_VALID 0x00000001 38 #define IOMMU_DT_BAD 0x00000002 39 40 41 static volatile unsigned long *iommu_pte = (unsigned long *)SUN3X_IOMMU; 42 43 44 #define dvma_entry_paddr(index) (iommu_pte[index] & IOMMU_ADDR_MASK) 45 #define dvma_entry_vaddr(index,paddr) ((index << DVMA_PAGE_SHIFT) | \ 46 (paddr & (DVMA_PAGE_SIZE-1))) 47 #if 0 48 #define dvma_entry_set(index,addr) (iommu_pte[index] = \ 49 (addr & IOMMU_ADDR_MASK) | \ 50 IOMMU_DT_VALID | IOMMU_CACHE_INHIBIT) 51 #else 52 #define dvma_entry_set(index,addr) (iommu_pte[index] = \ 53 (addr & IOMMU_ADDR_MASK) | \ 54 IOMMU_DT_VALID) 55 #endif 56 #define dvma_entry_clr(index) (iommu_pte[index] = IOMMU_DT_INVALID) 57 #define dvma_entry_hash(addr) ((addr >> DVMA_PAGE_SHIFT) ^ \ 58 ((addr & 0x03c00000) >> \ 59 (DVMA_PAGE_SHIFT+4))) 60 61 #ifdef DEBUG 62 /* code to print out a dvma mapping for debugging purposes */ 63 void dvma_print (unsigned long dvma_addr) 64 { 65 66 unsigned long index; 67 68 index = dvma_addr >> DVMA_PAGE_SHIFT; 69 70 pr_info("idx %lx dvma_addr %08lx paddr %08lx\n", index, dvma_addr, 71 dvma_entry_paddr(index)); 72 } 73 #endif 74 75 76 /* create a virtual mapping for a page assigned within the IOMMU 77 so that the cpu can reach it easily */ 78 inline int dvma_map_cpu(unsigned long kaddr, 79 unsigned long vaddr, int len) 80 { 81 pgd_t *pgd; 82 p4d_t *p4d; 83 pud_t *pud; 84 unsigned long end; 85 int ret = 0; 86 87 kaddr &= PAGE_MASK; 88 vaddr &= PAGE_MASK; 89 90 end = PAGE_ALIGN(vaddr + len); 91 92 pr_debug("dvma: mapping kern %08lx to virt %08lx\n", kaddr, vaddr); 93 pgd = pgd_offset_k(vaddr); 94 p4d = p4d_offset(pgd, vaddr); 95 pud = pud_offset(p4d, vaddr); 96 97 do { 98 pmd_t *pmd; 99 unsigned long end2; 100 101 if((pmd = pmd_alloc(&init_mm, pud, vaddr)) == NULL) { 102 ret = -ENOMEM; 103 goto out; 104 } 105 106 if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK)) 107 end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK; 108 else 109 end2 = end; 110 111 do { 112 pte_t *pte; 113 unsigned long end3; 114 115 if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) { 116 ret = -ENOMEM; 117 goto out; 118 } 119 120 if((end2 & PMD_MASK) > (vaddr & PMD_MASK)) 121 end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK; 122 else 123 end3 = end2; 124 125 do { 126 pr_debug("mapping %08lx phys to %08lx\n", 127 __pa(kaddr), vaddr); 128 set_pte(pte, pfn_pte(virt_to_pfn(kaddr), 129 PAGE_KERNEL)); 130 pte++; 131 kaddr += PAGE_SIZE; 132 vaddr += PAGE_SIZE; 133 } while(vaddr < end3); 134 135 } while(vaddr < end2); 136 137 } while(vaddr < end); 138 139 flush_tlb_all(); 140 141 out: 142 return ret; 143 } 144 145 146 inline int dvma_map_iommu(unsigned long kaddr, unsigned long baddr, 147 int len) 148 { 149 unsigned long end, index; 150 151 index = baddr >> DVMA_PAGE_SHIFT; 152 end = ((baddr+len) >> DVMA_PAGE_SHIFT); 153 154 if(len & ~DVMA_PAGE_MASK) 155 end++; 156 157 for(; index < end ; index++) { 158 // if(dvma_entry_use(index)) 159 // BUG(); 160 // pr_info("mapping pa %lx to ba %lx\n", __pa(kaddr), 161 // index << DVMA_PAGE_SHIFT); 162 163 dvma_entry_set(index, __pa(kaddr)); 164 165 iommu_pte[index] |= IOMMU_FULL_BLOCK; 166 // dvma_entry_inc(index); 167 168 kaddr += DVMA_PAGE_SIZE; 169 } 170 171 #ifdef DEBUG 172 for(index = (baddr >> DVMA_PAGE_SHIFT); index < end; index++) 173 dvma_print(index << DVMA_PAGE_SHIFT); 174 #endif 175 return 0; 176 177 } 178 179 void dvma_unmap_iommu(unsigned long baddr, int len) 180 { 181 182 int index, end; 183 184 185 index = baddr >> DVMA_PAGE_SHIFT; 186 end = (DVMA_PAGE_ALIGN(baddr+len) >> DVMA_PAGE_SHIFT); 187 188 for(; index < end ; index++) { 189 pr_debug("freeing bus mapping %08x\n", 190 index << DVMA_PAGE_SHIFT); 191 #if 0 192 if(!dvma_entry_use(index)) 193 pr_info("dvma_unmap freeing unused entry %04x\n", 194 index); 195 else 196 dvma_entry_dec(index); 197 #endif 198 dvma_entry_clr(index); 199 } 200 201 } 202