1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #ifndef INTEL_RING_H 8 #define INTEL_RING_H 9 10 #include "i915_gem.h" /* GEM_BUG_ON */ 11 #include "i915_request.h" 12 #include "intel_ring_types.h" 13 14 struct intel_engine_cs; 15 16 struct intel_ring * 17 intel_engine_create_ring(struct intel_engine_cs *engine, int size); 18 19 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords); 20 int intel_ring_cacheline_align(struct i915_request *rq); 21 22 unsigned int intel_ring_update_space(struct intel_ring *ring); 23 24 int intel_ring_pin(struct intel_ring *ring); 25 void intel_ring_unpin(struct intel_ring *ring); 26 void intel_ring_reset(struct intel_ring *ring, u32 tail); 27 28 void intel_ring_free(struct kref *ref); 29 30 static inline struct intel_ring *intel_ring_get(struct intel_ring *ring) 31 { 32 kref_get(&ring->ref); 33 return ring; 34 } 35 36 static inline void intel_ring_put(struct intel_ring *ring) 37 { 38 kref_put(&ring->ref, intel_ring_free); 39 } 40 41 static inline void intel_ring_advance(struct i915_request *rq, u32 *cs) 42 { 43 /* Dummy function. 44 * 45 * This serves as a placeholder in the code so that the reader 46 * can compare against the preceding intel_ring_begin() and 47 * check that the number of dwords emitted matches the space 48 * reserved for the command packet (i.e. the value passed to 49 * intel_ring_begin()). 50 */ 51 GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs); 52 } 53 54 static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos) 55 { 56 return pos & (ring->size - 1); 57 } 58 59 static inline int intel_ring_direction(const struct intel_ring *ring, 60 u32 next, u32 prev) 61 { 62 typecheck(typeof(ring->size), next); 63 typecheck(typeof(ring->size), prev); 64 return (next - prev) << ring->wrap; 65 } 66 67 static inline bool 68 intel_ring_offset_valid(const struct intel_ring *ring, 69 unsigned int pos) 70 { 71 if (pos & -ring->size) /* must be strictly within the ring */ 72 return false; 73 74 if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */ 75 return false; 76 77 return true; 78 } 79 80 static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr) 81 { 82 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ 83 u32 offset = addr - rq->ring->vaddr; 84 GEM_BUG_ON(offset > rq->ring->size); 85 return intel_ring_wrap(rq->ring, offset); 86 } 87 88 static inline void 89 assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) 90 { 91 GEM_BUG_ON(!intel_ring_offset_valid(ring, tail)); 92 93 /* 94 * "Ring Buffer Use" 95 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 96 * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5 97 * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5 98 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the 99 * same cacheline, the Head Pointer must not be greater than the Tail 100 * Pointer." 101 * 102 * We use ring->head as the last known location of the actual RING_HEAD, 103 * it may have advanced but in the worst case it is equally the same 104 * as ring->head and so we should never program RING_TAIL to advance 105 * into the same cacheline as ring->head. 106 */ 107 #define cacheline(a) round_down(a, CACHELINE_BYTES) 108 GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) && 109 tail < ring->head); 110 #undef cacheline 111 } 112 113 static inline unsigned int 114 intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) 115 { 116 /* Whilst writes to the tail are strictly order, there is no 117 * serialisation between readers and the writers. The tail may be 118 * read by i915_request_retire() just as it is being updated 119 * by execlists, as although the breadcrumb is complete, the context 120 * switch hasn't been seen. 121 */ 122 assert_ring_tail_valid(ring, tail); 123 ring->tail = tail; 124 return tail; 125 } 126 127 static inline unsigned int 128 __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size) 129 { 130 /* 131 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the 132 * same cacheline, the Head Pointer must not be greater than the Tail 133 * Pointer." 134 */ 135 GEM_BUG_ON(!is_power_of_2(size)); 136 return (head - tail - CACHELINE_BYTES) & (size - 1); 137 } 138 139 #endif /* INTEL_RING_H */ 140