Lines Matching refs:ring

44 								struct amdgpu_ring *ring)  in amdgpu_ring_mux_sw_entry()  argument
46 return ring->entry_index < mux->ring_entry_size ? in amdgpu_ring_mux_sw_entry()
47 &mux->ring_entry[ring->entry_index] : NULL; in amdgpu_ring_mux_sw_entry()
52 struct amdgpu_ring *ring, in amdgpu_ring_mux_copy_pkt_from_sw_ring() argument
58 start = s_start & ring->buf_mask; in amdgpu_ring_mux_copy_pkt_from_sw_ring()
59 end = s_end & ring->buf_mask; in amdgpu_ring_mux_copy_pkt_from_sw_ring()
66 amdgpu_ring_alloc(real_ring, (ring->ring_size >> 2) + end - start); in amdgpu_ring_mux_copy_pkt_from_sw_ring()
67 amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start], in amdgpu_ring_mux_copy_pkt_from_sw_ring()
68 (ring->ring_size >> 2) - start); in amdgpu_ring_mux_copy_pkt_from_sw_ring()
69 amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[0], end); in amdgpu_ring_mux_copy_pkt_from_sw_ring()
72 amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start], end - start); in amdgpu_ring_mux_copy_pkt_from_sw_ring()
88 if (mux->ring_entry[i].ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) { in amdgpu_mux_resubmit_chunks()
99 last_seq = atomic_read(&e->ring->fence_drv.last_seq); in amdgpu_mux_resubmit_chunks()
105 amdgpu_fence_update_start_timestamp(e->ring, in amdgpu_mux_resubmit_chunks()
109 le32_to_cpu(*(e->ring->fence_drv.cpu_addr + 2))) { in amdgpu_mux_resubmit_chunks()
110 if (chunk->cntl_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
111 amdgpu_ring_patch_cntl(e->ring, in amdgpu_mux_resubmit_chunks()
113 if (chunk->ce_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
114 amdgpu_ring_patch_ce(e->ring, chunk->ce_offset); in amdgpu_mux_resubmit_chunks()
115 if (chunk->de_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
116 amdgpu_ring_patch_de(e->ring, chunk->de_offset); in amdgpu_mux_resubmit_chunks()
118 amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring, in amdgpu_mux_resubmit_chunks()
149 int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, in amdgpu_ring_mux_init() argument
152 mux->real_ring = ring; in amdgpu_ring_mux_init()
196 int amdgpu_ring_mux_add_sw_ring(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in amdgpu_ring_mux_add_sw_ring() argument
206 ring->entry_index = mux->num_ring_entries; in amdgpu_ring_mux_add_sw_ring()
207 e->ring = ring; in amdgpu_ring_mux_add_sw_ring()
214 void amdgpu_ring_mux_set_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, u64 wptr) in amdgpu_ring_mux_set_wptr() argument
220 if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) in amdgpu_ring_mux_set_wptr()
223 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_set_wptr()
231 if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && mux->pending_trailing_fence_signaled) { in amdgpu_ring_mux_set_wptr()
238 if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && e->sw_cptr < mux->wptr_resubmit) in amdgpu_ring_mux_set_wptr()
244 if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT || mux->wptr_resubmit < wptr) { in amdgpu_ring_mux_set_wptr()
245 amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, ring, e->sw_cptr, wptr); in amdgpu_ring_mux_set_wptr()
254 u64 amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in amdgpu_ring_mux_get_wptr() argument
258 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_get_wptr()
283 u64 amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in amdgpu_ring_mux_get_rptr() argument
288 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_get_rptr()
306 e->sw_rptr = (e->sw_cptr + offset) & ring->buf_mask; in amdgpu_ring_mux_get_rptr()
317 u64 amdgpu_sw_ring_get_rptr_gfx(struct amdgpu_ring *ring) in amdgpu_sw_ring_get_rptr_gfx() argument
319 struct amdgpu_device *adev = ring->adev; in amdgpu_sw_ring_get_rptr_gfx()
322 WARN_ON(!ring->is_sw_ring); in amdgpu_sw_ring_get_rptr_gfx()
323 return amdgpu_ring_mux_get_rptr(mux, ring); in amdgpu_sw_ring_get_rptr_gfx()
326 u64 amdgpu_sw_ring_get_wptr_gfx(struct amdgpu_ring *ring) in amdgpu_sw_ring_get_wptr_gfx() argument
328 struct amdgpu_device *adev = ring->adev; in amdgpu_sw_ring_get_wptr_gfx()
331 WARN_ON(!ring->is_sw_ring); in amdgpu_sw_ring_get_wptr_gfx()
332 return amdgpu_ring_mux_get_wptr(mux, ring); in amdgpu_sw_ring_get_wptr_gfx()
335 void amdgpu_sw_ring_set_wptr_gfx(struct amdgpu_ring *ring) in amdgpu_sw_ring_set_wptr_gfx() argument
337 struct amdgpu_device *adev = ring->adev; in amdgpu_sw_ring_set_wptr_gfx()
340 WARN_ON(!ring->is_sw_ring); in amdgpu_sw_ring_set_wptr_gfx()
341 amdgpu_ring_mux_set_wptr(mux, ring, ring->wptr); in amdgpu_sw_ring_set_wptr_gfx()
345 void amdgpu_sw_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) in amdgpu_sw_ring_insert_nop() argument
347 WARN_ON(!ring->is_sw_ring); in amdgpu_sw_ring_insert_nop()
365 struct amdgpu_ring *ring; in amdgpu_mcbp_scan() local
370 ring = mux->ring_entry[i].ring; in amdgpu_mcbp_scan()
371 if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT && in amdgpu_mcbp_scan()
372 amdgpu_fence_count_emitted(ring) > 0) in amdgpu_mcbp_scan()
374 if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && in amdgpu_mcbp_scan()
375 amdgpu_fence_last_unsignaled_time_us(ring) > in amdgpu_mcbp_scan()
394 void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring) in amdgpu_sw_ring_ib_begin() argument
396 struct amdgpu_device *adev = ring->adev; in amdgpu_sw_ring_ib_begin()
399 WARN_ON(!ring->is_sw_ring); in amdgpu_sw_ring_ib_begin()
400 if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) { in amdgpu_sw_ring_ib_begin()
406 amdgpu_ring_mux_start_ib(mux, ring); in amdgpu_sw_ring_ib_begin()
409 void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring) in amdgpu_sw_ring_ib_end() argument
411 struct amdgpu_device *adev = ring->adev; in amdgpu_sw_ring_ib_end()
414 WARN_ON(!ring->is_sw_ring); in amdgpu_sw_ring_ib_end()
415 if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) in amdgpu_sw_ring_ib_end()
417 amdgpu_ring_mux_end_ib(mux, ring); in amdgpu_sw_ring_ib_end()
420 void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type) in amdgpu_sw_ring_ib_mark_offset() argument
422 struct amdgpu_device *adev = ring->adev; in amdgpu_sw_ring_ib_mark_offset()
426 if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) in amdgpu_sw_ring_ib_mark_offset()
429 offset = ring->wptr & ring->buf_mask; in amdgpu_sw_ring_ib_mark_offset()
431 amdgpu_ring_mux_ib_mark_offset(mux, ring, offset, type); in amdgpu_sw_ring_ib_mark_offset()
434 void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in amdgpu_ring_mux_start_ib() argument
443 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_start_ib()
455 chunk->start = ring->wptr; in amdgpu_ring_mux_start_ib()
457 chunk->cntl_offset = ring->buf_mask + 1; in amdgpu_ring_mux_start_ib()
458 chunk->de_offset = ring->buf_mask + 1; in amdgpu_ring_mux_start_ib()
459 chunk->ce_offset = ring->buf_mask + 1; in amdgpu_ring_mux_start_ib()
463 static void scan_and_remove_signaled_chunk(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in scan_and_remove_signaled_chunk() argument
469 e = amdgpu_ring_mux_sw_entry(mux, ring); in scan_and_remove_signaled_chunk()
475 last_seq = atomic_read(&ring->fence_drv.last_seq); in scan_and_remove_signaled_chunk()
486 struct amdgpu_ring *ring, u64 offset, in amdgpu_ring_mux_ib_mark_offset() argument
492 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_ib_mark_offset()
520 void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) in amdgpu_ring_mux_end_ib() argument
525 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_end_ib()
537 chunk->end = ring->wptr; in amdgpu_ring_mux_end_ib()
538 chunk->sync_seq = READ_ONCE(ring->fence_drv.sync_seq); in amdgpu_ring_mux_end_ib()
540 scan_and_remove_signaled_chunk(mux, ring); in amdgpu_ring_mux_end_ib()
546 struct amdgpu_ring *ring = NULL; in amdgpu_mcbp_handle_trailing_fence_irq() local
557 if (e->ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) { in amdgpu_mcbp_handle_trailing_fence_irq()
558 ring = e->ring; in amdgpu_mcbp_handle_trailing_fence_irq()
563 if (!ring) { in amdgpu_mcbp_handle_trailing_fence_irq()
568 amdgpu_fence_process(ring); in amdgpu_mcbp_handle_trailing_fence_irq()
569 if (amdgpu_fence_count_emitted(ring) > 0) { in amdgpu_mcbp_handle_trailing_fence_irq()
571 mux->seqno_to_resubmit = ring->fence_drv.sync_seq; in amdgpu_mcbp_handle_trailing_fence_irq()