Lines Matching +full:sync +full:- +full:update +full:- +full:mask
32 * si_dma_is_lockup - Check if the DMA engine is locked up
43 u32 mask; in si_dma_is_lockup() local
45 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in si_dma_is_lockup()
46 mask = RADEON_RESET_DMA; in si_dma_is_lockup()
48 mask = RADEON_RESET_DMA1; in si_dma_is_lockup()
50 if (!(reset_mask & mask)) { in si_dma_is_lockup()
58 * si_dma_vm_copy_pages - update PTEs by copying them from the GART
64 * @count: number of page entries to update
66 * Update PTEs by copying them from the GART using the DMA (SI).
78 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, in si_dma_vm_copy_pages()
80 ib->ptr[ib->length_dw++] = lower_32_bits(pe); in si_dma_vm_copy_pages()
81 ib->ptr[ib->length_dw++] = lower_32_bits(src); in si_dma_vm_copy_pages()
82 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; in si_dma_vm_copy_pages()
83 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; in si_dma_vm_copy_pages()
87 count -= bytes / 8; in si_dma_vm_copy_pages()
92 * si_dma_vm_write_pages - update PTEs by writing them manually
98 * @count: number of page entries to update
102 * Update PTEs by writing them manually using the DMA (SI).
118 /* for non-physically contiguous pages (system) */ in si_dma_vm_write_pages()
119 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); in si_dma_vm_write_pages()
120 ib->ptr[ib->length_dw++] = pe; in si_dma_vm_write_pages()
121 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; in si_dma_vm_write_pages()
122 for (; ndw > 0; ndw -= 2, --count, pe += 8) { in si_dma_vm_write_pages()
132 ib->ptr[ib->length_dw++] = value; in si_dma_vm_write_pages()
133 ib->ptr[ib->length_dw++] = upper_32_bits(value); in si_dma_vm_write_pages()
139 * si_dma_vm_set_pages - update the page tables using the DMA
145 * @count: number of page entries to update
149 * Update the page tables using the DMA (SI).
171 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); in si_dma_vm_set_pages()
172 ib->ptr[ib->length_dw++] = pe; /* dst addr */ in si_dma_vm_set_pages()
173 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; in si_dma_vm_set_pages()
174 ib->ptr[ib->length_dw++] = flags; /* mask */ in si_dma_vm_set_pages()
175 ib->ptr[ib->length_dw++] = 0; in si_dma_vm_set_pages()
176 ib->ptr[ib->length_dw++] = value; /* value */ in si_dma_vm_set_pages()
177 ib->ptr[ib->length_dw++] = upper_32_bits(value); in si_dma_vm_set_pages()
178 ib->ptr[ib->length_dw++] = incr; /* increment size */ in si_dma_vm_set_pages()
179 ib->ptr[ib->length_dw++] = 0; in si_dma_vm_set_pages()
182 count -= ndw / 2; in si_dma_vm_set_pages()
194 …radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> … in si_dma_vm_flush()
203 /* bits 0-7 are the VM contexts0-7 */ in si_dma_vm_flush()
212 radeon_ring_write(ring, 1 << vm_id); /* mask */ in si_dma_vm_flush()
218 * si_copy_dma - copy pages using the DMA engine
224 * @resv: reservation object to sync to
236 struct radeon_sync sync; in si_copy_dma() local
237 int ring_index = rdev->asic->copy.dma_ring_index; in si_copy_dma()
238 struct radeon_ring *ring = &rdev->ring[ring_index]; in si_copy_dma()
243 radeon_sync_create(&sync); in si_copy_dma()
250 radeon_sync_free(rdev, &sync, NULL); in si_copy_dma()
254 radeon_sync_resv(rdev, &sync, resv, false); in si_copy_dma()
255 radeon_sync_rings(rdev, &sync, ring->idx); in si_copy_dma()
261 size_in_bytes -= cur_size_in_bytes; in si_copy_dma()
271 r = radeon_fence_emit(rdev, &fence, ring->idx); in si_copy_dma()
274 radeon_sync_free(rdev, &sync, NULL); in si_copy_dma()
279 radeon_sync_free(rdev, &sync, fence); in si_copy_dma()