xref: /openbmc/linux/drivers/gpu/drm/radeon/si_dma.c (revision 089a49b6)
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 #include <drm/drmP.h>
25 #include "radeon.h"
26 #include "radeon_asic.h"
27 #include "sid.h"
28 
29 u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
30 
31 /**
32  * si_dma_is_lockup - Check if the DMA engine is locked up
33  *
34  * @rdev: radeon_device pointer
35  * @ring: radeon_ring structure holding ring information
36  *
37  * Check if the async DMA engine is locked up.
38  * Returns true if the engine appears to be locked up, false if not.
39  */
40 bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
41 {
42 	u32 reset_mask = si_gpu_check_soft_reset(rdev);
43 	u32 mask;
44 
45 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
46 		mask = RADEON_RESET_DMA;
47 	else
48 		mask = RADEON_RESET_DMA1;
49 
50 	if (!(reset_mask & mask)) {
51 		radeon_ring_lockup_update(ring);
52 		return false;
53 	}
54 	/* force ring activities */
55 	radeon_ring_force_activity(rdev, ring);
56 	return radeon_ring_test_lockup(rdev, ring);
57 }
58 
59 /**
60  * si_dma_vm_set_page - update the page tables using the DMA
61  *
62  * @rdev: radeon_device pointer
63  * @ib: indirect buffer to fill with commands
64  * @pe: addr of the page entry
65  * @addr: dst addr to write into pe
66  * @count: number of page entries to update
67  * @incr: increase next addr by incr bytes
68  * @flags: access flags
69  *
70  * Update the page tables using the DMA (SI).
71  */
72 void si_dma_vm_set_page(struct radeon_device *rdev,
73 			struct radeon_ib *ib,
74 			uint64_t pe,
75 			uint64_t addr, unsigned count,
76 			uint32_t incr, uint32_t flags)
77 {
78 	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
79 	uint64_t value;
80 	unsigned ndw;
81 
82 	if (flags & RADEON_VM_PAGE_SYSTEM) {
83 		while (count) {
84 			ndw = count * 2;
85 			if (ndw > 0xFFFFE)
86 				ndw = 0xFFFFE;
87 
88 			/* for non-physically contiguous pages (system) */
89 			ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
90 			ib->ptr[ib->length_dw++] = pe;
91 			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
92 			for (; ndw > 0; ndw -= 2, --count, pe += 8) {
93 				if (flags & RADEON_VM_PAGE_SYSTEM) {
94 					value = radeon_vm_map_gart(rdev, addr);
95 					value &= 0xFFFFFFFFFFFFF000ULL;
96 				} else if (flags & RADEON_VM_PAGE_VALID) {
97 					value = addr;
98 				} else {
99 					value = 0;
100 				}
101 				addr += incr;
102 				value |= r600_flags;
103 				ib->ptr[ib->length_dw++] = value;
104 				ib->ptr[ib->length_dw++] = upper_32_bits(value);
105 			}
106 		}
107 	} else {
108 		while (count) {
109 			ndw = count * 2;
110 			if (ndw > 0xFFFFE)
111 				ndw = 0xFFFFE;
112 
113 			if (flags & RADEON_VM_PAGE_VALID)
114 				value = addr;
115 			else
116 				value = 0;
117 			/* for physically contiguous pages (vram) */
118 			ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
119 			ib->ptr[ib->length_dw++] = pe; /* dst addr */
120 			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
121 			ib->ptr[ib->length_dw++] = r600_flags; /* mask */
122 			ib->ptr[ib->length_dw++] = 0;
123 			ib->ptr[ib->length_dw++] = value; /* value */
124 			ib->ptr[ib->length_dw++] = upper_32_bits(value);
125 			ib->ptr[ib->length_dw++] = incr; /* increment size */
126 			ib->ptr[ib->length_dw++] = 0;
127 			pe += ndw * 4;
128 			addr += (ndw / 2) * incr;
129 			count -= ndw / 2;
130 		}
131 	}
132 	while (ib->length_dw & 0x7)
133 		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
134 }
135 
136 void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
137 {
138 	struct radeon_ring *ring = &rdev->ring[ridx];
139 
140 	if (vm == NULL)
141 		return;
142 
143 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
144 	if (vm->id < 8) {
145 		radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
146 	} else {
147 		radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
148 	}
149 	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
150 
151 	/* flush hdp cache */
152 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
153 	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
154 	radeon_ring_write(ring, 1);
155 
156 	/* bits 0-7 are the VM contexts0-7 */
157 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
158 	radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
159 	radeon_ring_write(ring, 1 << vm->id);
160 }
161 
162 /**
163  * si_copy_dma - copy pages using the DMA engine
164  *
165  * @rdev: radeon_device pointer
166  * @src_offset: src GPU address
167  * @dst_offset: dst GPU address
168  * @num_gpu_pages: number of GPU pages to xfer
169  * @fence: radeon fence object
170  *
171  * Copy GPU paging using the DMA engine (SI).
172  * Used by the radeon ttm implementation to move pages if
173  * registered as the asic copy callback.
174  */
175 int si_copy_dma(struct radeon_device *rdev,
176 		uint64_t src_offset, uint64_t dst_offset,
177 		unsigned num_gpu_pages,
178 		struct radeon_fence **fence)
179 {
180 	struct radeon_semaphore *sem = NULL;
181 	int ring_index = rdev->asic->copy.dma_ring_index;
182 	struct radeon_ring *ring = &rdev->ring[ring_index];
183 	u32 size_in_bytes, cur_size_in_bytes;
184 	int i, num_loops;
185 	int r = 0;
186 
187 	r = radeon_semaphore_create(rdev, &sem);
188 	if (r) {
189 		DRM_ERROR("radeon: moving bo (%d).\n", r);
190 		return r;
191 	}
192 
193 	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
194 	num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
195 	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
196 	if (r) {
197 		DRM_ERROR("radeon: moving bo (%d).\n", r);
198 		radeon_semaphore_free(rdev, &sem, NULL);
199 		return r;
200 	}
201 
202 	if (radeon_fence_need_sync(*fence, ring->idx)) {
203 		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
204 					    ring->idx);
205 		radeon_fence_note_sync(*fence, ring->idx);
206 	} else {
207 		radeon_semaphore_free(rdev, &sem, NULL);
208 	}
209 
210 	for (i = 0; i < num_loops; i++) {
211 		cur_size_in_bytes = size_in_bytes;
212 		if (cur_size_in_bytes > 0xFFFFF)
213 			cur_size_in_bytes = 0xFFFFF;
214 		size_in_bytes -= cur_size_in_bytes;
215 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
216 		radeon_ring_write(ring, dst_offset & 0xffffffff);
217 		radeon_ring_write(ring, src_offset & 0xffffffff);
218 		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
219 		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
220 		src_offset += cur_size_in_bytes;
221 		dst_offset += cur_size_in_bytes;
222 	}
223 
224 	r = radeon_fence_emit(rdev, fence, ring->idx);
225 	if (r) {
226 		radeon_ring_unlock_undo(rdev, ring);
227 		return r;
228 	}
229 
230 	radeon_ring_unlock_commit(rdev, ring);
231 	radeon_semaphore_free(rdev, &sem, *fence);
232 
233 	return r;
234 }
235 
236