1 /*
2 * Copyright (c) 2014-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef __INTEL_FRONTBUFFER_H__
25 #define __INTEL_FRONTBUFFER_H__
26
27 #include <linux/atomic.h>
28 #include <linux/bits.h>
29 #include <linux/kref.h>
30
31 #include "i915_active_types.h"
32
33 struct drm_i915_private;
34
35 enum fb_op_origin {
36 ORIGIN_CPU = 0,
37 ORIGIN_CS,
38 ORIGIN_FLIP,
39 ORIGIN_DIRTYFB,
40 ORIGIN_CURSOR_UPDATE,
41 };
42
43 struct intel_frontbuffer {
44 struct kref ref;
45 atomic_t bits;
46 struct i915_active write;
47 struct drm_i915_gem_object *obj;
48 struct rcu_head rcu;
49 };
50
51 /*
52 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
53 * considered to be the frontbuffer for the given plane interface-wise. This
54 * doesn't mean that the hw necessarily already scans it out, but that any
55 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
56 *
57 * We have one bit per pipe and per scanout plane type.
58 */
59 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
60 #define INTEL_FRONTBUFFER(pipe, plane_id) \
61 BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe));
62 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
63 BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
64 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
65 GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
66 INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
67
68 void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
69 unsigned frontbuffer_bits);
70 void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
71 unsigned frontbuffer_bits);
72 void intel_frontbuffer_flip(struct drm_i915_private *i915,
73 unsigned frontbuffer_bits);
74
75 void intel_frontbuffer_put(struct intel_frontbuffer *front);
76
77 struct intel_frontbuffer *
78 intel_frontbuffer_get(struct drm_i915_gem_object *obj);
79
80 void __intel_fb_invalidate(struct intel_frontbuffer *front,
81 enum fb_op_origin origin,
82 unsigned int frontbuffer_bits);
83
84 /**
85 * intel_frontbuffer_invalidate - invalidate frontbuffer object
86 * @front: GEM object to invalidate
87 * @origin: which operation caused the invalidation
88 *
89 * This function gets called every time rendering on the given object starts and
90 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
91 * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
92 * until the rendering completes or a flip on this frontbuffer plane is
93 * scheduled.
94 */
intel_frontbuffer_invalidate(struct intel_frontbuffer * front,enum fb_op_origin origin)95 static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
96 enum fb_op_origin origin)
97 {
98 unsigned int frontbuffer_bits;
99
100 if (!front)
101 return false;
102
103 frontbuffer_bits = atomic_read(&front->bits);
104 if (!frontbuffer_bits)
105 return false;
106
107 __intel_fb_invalidate(front, origin, frontbuffer_bits);
108 return true;
109 }
110
111 void __intel_fb_flush(struct intel_frontbuffer *front,
112 enum fb_op_origin origin,
113 unsigned int frontbuffer_bits);
114
115 /**
116 * intel_frontbuffer_flush - flush frontbuffer object
117 * @front: GEM object to flush
118 * @origin: which operation caused the flush
119 *
120 * This function gets called every time rendering on the given object has
121 * completed and frontbuffer caching can be started again.
122 */
intel_frontbuffer_flush(struct intel_frontbuffer * front,enum fb_op_origin origin)123 static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front,
124 enum fb_op_origin origin)
125 {
126 unsigned int frontbuffer_bits;
127
128 if (!front)
129 return;
130
131 frontbuffer_bits = atomic_read(&front->bits);
132 if (!frontbuffer_bits)
133 return;
134
135 __intel_fb_flush(front, origin, frontbuffer_bits);
136 }
137
138 void intel_frontbuffer_track(struct intel_frontbuffer *old,
139 struct intel_frontbuffer *new,
140 unsigned int frontbuffer_bits);
141
142 #endif /* __INTEL_FRONTBUFFER_H__ */
143