1 #include <linux/prefetch.h>
2 
3 /**
4  * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
5  * @ioc: The I/O Controller.
6  * @startsg: The scatter/gather list of coalesced chunks.
7  * @nents: The number of entries in the scatter/gather list.
8  * @hint: The DMA Hint.
9  *
10  * This function inserts the coalesced scatter/gather list chunks into the
11  * I/O Controller's I/O Pdir.
12  */
13 static inline unsigned int
14 iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
15 		unsigned long hint,
16 		void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
17 					    unsigned long))
18 {
19 	struct scatterlist *dma_sg = startsg;	/* pointer to current DMA */
20 	unsigned int n_mappings = 0;
21 	unsigned long dma_offset = 0, dma_len = 0;
22 	u64 *pdirp = NULL;
23 
24 	/* Horrible hack.  For efficiency's sake, dma_sg starts one
25 	 * entry below the true start (it is immediately incremented
26 	 * in the loop) */
27 	 dma_sg--;
28 
29 	while (nents-- > 0) {
30 		unsigned long vaddr;
31 		long size;
32 
33 		DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
34 			   (unsigned long)sg_dma_address(startsg), cnt,
35 			   sg_virt(startsg), startsg->length
36 		);
37 
38 
39 		/*
40 		** Look for the start of a new DMA stream
41 		*/
42 
43 		if (sg_dma_address(startsg) & PIDE_FLAG) {
44 			u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;
45 
46 			BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg)));
47 
48 			dma_sg++;
49 
50 			dma_len = sg_dma_len(startsg);
51 			sg_dma_len(startsg) = 0;
52 			dma_offset = (unsigned long) pide & ~IOVP_MASK;
53 			n_mappings++;
54 #if defined(ZX1_SUPPORT)
55 			/* Pluto IOMMU IO Virt Address is not zero based */
56 			sg_dma_address(dma_sg) = pide | ioc->ibase;
57 #else
58 			/* SBA, ccio, and dino are zero based.
59 			 * Trying to save a few CPU cycles for most users.
60 			 */
61 			sg_dma_address(dma_sg) = pide;
62 #endif
63 			pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
64 			prefetchw(pdirp);
65 		}
66 
67 		BUG_ON(pdirp == NULL);
68 
69 		vaddr = (unsigned long)sg_virt(startsg);
70 		sg_dma_len(dma_sg) += startsg->length;
71 		size = startsg->length + dma_offset;
72 		dma_offset = 0;
73 #ifdef IOMMU_MAP_STATS
74 		ioc->msg_pages += startsg->length >> IOVP_SHIFT;
75 #endif
76 		do {
77 			iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
78 					    vaddr, hint);
79 			vaddr += IOVP_SIZE;
80 			size -= IOVP_SIZE;
81 			pdirp++;
82 		} while(unlikely(size > 0));
83 		startsg++;
84 	}
85 	return(n_mappings);
86 }
87 
88 
89 /*
90 ** First pass is to walk the SG list and determine where the breaks are
91 ** in the DMA stream. Allocates PDIR entries but does not fill them.
92 ** Returns the number of DMA chunks.
93 **
94 ** Doing the fill separate from the coalescing/allocation keeps the
95 ** code simpler. Future enhancement could make one pass through
96 ** the sglist do both.
97 */
98 
99 static inline unsigned int
100 iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
101 		struct scatterlist *startsg, int nents,
102 		int (*iommu_alloc_range)(struct ioc *, struct device *, size_t))
103 {
104 	struct scatterlist *contig_sg;	   /* contig chunk head */
105 	unsigned long dma_offset, dma_len; /* start/len of DMA stream */
106 	unsigned int n_mappings = 0;
107 	unsigned int max_seg_size = dma_get_max_seg_size(dev);
108 
109 	while (nents > 0) {
110 
111 		/*
112 		** Prepare for first/next DMA stream
113 		*/
114 		contig_sg = startsg;
115 		dma_len = startsg->length;
116 		dma_offset = startsg->offset;
117 
118 		/* PARANOID: clear entries */
119 		sg_dma_address(startsg) = 0;
120 		sg_dma_len(startsg) = 0;
121 
122 		/*
123 		** This loop terminates one iteration "early" since
124 		** it's always looking one "ahead".
125 		*/
126 		while(--nents > 0) {
127 			unsigned long prev_end, sg_start;
128 
129 			prev_end = (unsigned long)sg_virt(startsg) +
130 							startsg->length;
131 
132 			startsg++;
133 			sg_start = (unsigned long)sg_virt(startsg);
134 
135 			/* PARANOID: clear entries */
136 			sg_dma_address(startsg) = 0;
137 			sg_dma_len(startsg) = 0;
138 
139 			/*
140 			** First make sure current dma stream won't
141 			** exceed DMA_CHUNK_SIZE if we coalesce the
142 			** next entry.
143 			*/
144 			if(unlikely(ALIGN(dma_len + dma_offset + startsg->length,
145 					    IOVP_SIZE) > DMA_CHUNK_SIZE))
146 				break;
147 
148 			if (startsg->length + dma_len > max_seg_size)
149 				break;
150 
151 			/*
152 			* Next see if we can append the next chunk (i.e.
153 			* it must end on one page and begin on another, or
154 			* it must start on the same address as the previous
155 			* entry ended.
156 			*/
157 			if (unlikely((prev_end != sg_start) ||
158 				((prev_end | sg_start) & ~PAGE_MASK)))
159 				break;
160 
161 			dma_len += startsg->length;
162 		}
163 
164 		/*
165 		** End of DMA Stream
166 		** Terminate last VCONTIG block.
167 		** Allocate space for DMA stream.
168 		*/
169 		sg_dma_len(contig_sg) = dma_len;
170 		dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
171 		sg_dma_address(contig_sg) =
172 			PIDE_FLAG
173 			| (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
174 			| dma_offset;
175 		n_mappings++;
176 	}
177 
178 	return n_mappings;
179 }
180 
181