1 /* 2 * Copyright (c) 2014-2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #ifndef __INTEL_FRONTBUFFER_H__ 25 #define __INTEL_FRONTBUFFER_H__ 26 27 #include <linux/atomic.h> 28 #include <linux/kref.h> 29 30 #include "gem/i915_gem_object_types.h" 31 #include "i915_active_types.h" 32 33 struct drm_i915_private; 34 35 enum fb_op_origin { 36 ORIGIN_CPU = 0, 37 ORIGIN_CS, 38 ORIGIN_FLIP, 39 ORIGIN_DIRTYFB, 40 ORIGIN_CURSOR_UPDATE, 41 }; 42 43 struct intel_frontbuffer { 44 struct kref ref; 45 atomic_t bits; 46 struct i915_active write; 47 struct drm_i915_gem_object *obj; 48 struct rcu_head rcu; 49 }; 50 51 void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915, 52 unsigned frontbuffer_bits); 53 void intel_frontbuffer_flip_complete(struct drm_i915_private *i915, 54 unsigned frontbuffer_bits); 55 void intel_frontbuffer_flip(struct drm_i915_private *i915, 56 unsigned frontbuffer_bits); 57 58 void intel_frontbuffer_put(struct intel_frontbuffer *front); 59 60 static inline struct intel_frontbuffer * 61 __intel_frontbuffer_get(const struct drm_i915_gem_object *obj) 62 { 63 struct intel_frontbuffer *front; 64 65 if (likely(!rcu_access_pointer(obj->frontbuffer))) 66 return NULL; 67 68 rcu_read_lock(); 69 do { 70 front = rcu_dereference(obj->frontbuffer); 71 if (!front) 72 break; 73 74 if (unlikely(!kref_get_unless_zero(&front->ref))) 75 continue; 76 77 if (likely(front == rcu_access_pointer(obj->frontbuffer))) 78 break; 79 80 intel_frontbuffer_put(front); 81 } while (1); 82 rcu_read_unlock(); 83 84 return front; 85 } 86 87 struct intel_frontbuffer * 88 intel_frontbuffer_get(struct drm_i915_gem_object *obj); 89 90 void __intel_fb_invalidate(struct intel_frontbuffer *front, 91 enum fb_op_origin origin, 92 unsigned int frontbuffer_bits); 93 94 /** 95 * intel_frontbuffer_invalidate - invalidate frontbuffer object 96 * @front: GEM object to invalidate 97 * @origin: which operation caused the invalidation 98 * 99 * This function gets called every time rendering on the given object starts and 100 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must 101 * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed 102 * until the rendering completes or a flip on this frontbuffer plane is 103 * scheduled. 104 */ 105 static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front, 106 enum fb_op_origin origin) 107 { 108 unsigned int frontbuffer_bits; 109 110 if (!front) 111 return false; 112 113 frontbuffer_bits = atomic_read(&front->bits); 114 if (!frontbuffer_bits) 115 return false; 116 117 __intel_fb_invalidate(front, origin, frontbuffer_bits); 118 return true; 119 } 120 121 void __intel_fb_flush(struct intel_frontbuffer *front, 122 enum fb_op_origin origin, 123 unsigned int frontbuffer_bits); 124 125 /** 126 * intel_frontbuffer_flush - flush frontbuffer object 127 * @front: GEM object to flush 128 * @origin: which operation caused the flush 129 * 130 * This function gets called every time rendering on the given object has 131 * completed and frontbuffer caching can be started again. 132 */ 133 static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front, 134 enum fb_op_origin origin) 135 { 136 unsigned int frontbuffer_bits; 137 138 if (!front) 139 return; 140 141 frontbuffer_bits = atomic_read(&front->bits); 142 if (!frontbuffer_bits) 143 return; 144 145 __intel_fb_flush(front, origin, frontbuffer_bits); 146 } 147 148 void intel_frontbuffer_track(struct intel_frontbuffer *old, 149 struct intel_frontbuffer *new, 150 unsigned int frontbuffer_bits); 151 152 #endif /* __INTEL_FRONTBUFFER_H__ */ 153