1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/string.h>
7 
8 #include "i915_drv.h"
9 #include "intel_atomic.h"
10 #include "intel_display_types.h"
11 #include "intel_global_state.h"
12 
13 static void __intel_atomic_global_state_free(struct kref *kref)
14 {
15 	struct intel_global_state *obj_state =
16 		container_of(kref, struct intel_global_state, ref);
17 	struct intel_global_obj *obj = obj_state->obj;
18 
19 	obj->funcs->atomic_destroy_state(obj, obj_state);
20 }
21 
22 static void intel_atomic_global_state_put(struct intel_global_state *obj_state)
23 {
24 	kref_put(&obj_state->ref, __intel_atomic_global_state_free);
25 }
26 
27 static struct intel_global_state *
28 intel_atomic_global_state_get(struct intel_global_state *obj_state)
29 {
30 	kref_get(&obj_state->ref);
31 
32 	return obj_state;
33 }
34 
35 void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
36 				  struct intel_global_obj *obj,
37 				  struct intel_global_state *state,
38 				  const struct intel_global_state_funcs *funcs)
39 {
40 	memset(obj, 0, sizeof(*obj));
41 
42 	state->obj = obj;
43 
44 	kref_init(&state->ref);
45 
46 	obj->state = state;
47 	obj->funcs = funcs;
48 	list_add_tail(&obj->head, &dev_priv->global_obj_list);
49 }
50 
51 void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv)
52 {
53 	struct intel_global_obj *obj, *next;
54 
55 	list_for_each_entry_safe(obj, next, &dev_priv->global_obj_list, head) {
56 		list_del(&obj->head);
57 
58 		drm_WARN_ON(&dev_priv->drm, kref_read(&obj->state->ref) != 1);
59 		intel_atomic_global_state_put(obj->state);
60 	}
61 }
62 
63 static void assert_global_state_write_locked(struct drm_i915_private *dev_priv)
64 {
65 	struct intel_crtc *crtc;
66 
67 	for_each_intel_crtc(&dev_priv->drm, crtc)
68 		drm_modeset_lock_assert_held(&crtc->base.mutex);
69 }
70 
71 static bool modeset_lock_is_held(struct drm_modeset_acquire_ctx *ctx,
72 				 struct drm_modeset_lock *lock)
73 {
74 	struct drm_modeset_lock *l;
75 
76 	list_for_each_entry(l, &ctx->locked, head) {
77 		if (lock == l)
78 			return true;
79 	}
80 
81 	return false;
82 }
83 
84 static void assert_global_state_read_locked(struct intel_atomic_state *state)
85 {
86 	struct drm_modeset_acquire_ctx *ctx = state->base.acquire_ctx;
87 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
88 	struct intel_crtc *crtc;
89 
90 	for_each_intel_crtc(&dev_priv->drm, crtc) {
91 		if (modeset_lock_is_held(ctx, &crtc->base.mutex))
92 			return;
93 	}
94 
95 	drm_WARN(&dev_priv->drm, 1, "Global state not read locked\n");
96 }
97 
98 struct intel_global_state *
99 intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
100 				  struct intel_global_obj *obj)
101 {
102 	struct drm_i915_private *i915 = to_i915(state->base.dev);
103 	int index, num_objs, i;
104 	size_t size;
105 	struct __intel_global_objs_state *arr;
106 	struct intel_global_state *obj_state;
107 
108 	for (i = 0; i < state->num_global_objs; i++)
109 		if (obj == state->global_objs[i].ptr)
110 			return state->global_objs[i].state;
111 
112 	assert_global_state_read_locked(state);
113 
114 	num_objs = state->num_global_objs + 1;
115 	size = sizeof(*state->global_objs) * num_objs;
116 	arr = krealloc(state->global_objs, size, GFP_KERNEL);
117 	if (!arr)
118 		return ERR_PTR(-ENOMEM);
119 
120 	state->global_objs = arr;
121 	index = state->num_global_objs;
122 	memset(&state->global_objs[index], 0, sizeof(*state->global_objs));
123 
124 	obj_state = obj->funcs->atomic_duplicate_state(obj);
125 	if (!obj_state)
126 		return ERR_PTR(-ENOMEM);
127 
128 	obj_state->obj = obj;
129 	obj_state->changed = false;
130 
131 	kref_init(&obj_state->ref);
132 
133 	state->global_objs[index].state = obj_state;
134 	state->global_objs[index].old_state =
135 		intel_atomic_global_state_get(obj->state);
136 	state->global_objs[index].new_state = obj_state;
137 	state->global_objs[index].ptr = obj;
138 	obj_state->state = state;
139 
140 	state->num_global_objs = num_objs;
141 
142 	drm_dbg_atomic(&i915->drm, "Added new global object %p state %p to %p\n",
143 		       obj, obj_state, state);
144 
145 	return obj_state;
146 }
147 
148 struct intel_global_state *
149 intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state,
150 				      struct intel_global_obj *obj)
151 {
152 	int i;
153 
154 	for (i = 0; i < state->num_global_objs; i++)
155 		if (obj == state->global_objs[i].ptr)
156 			return state->global_objs[i].old_state;
157 
158 	return NULL;
159 }
160 
161 struct intel_global_state *
162 intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
163 				      struct intel_global_obj *obj)
164 {
165 	int i;
166 
167 	for (i = 0; i < state->num_global_objs; i++)
168 		if (obj == state->global_objs[i].ptr)
169 			return state->global_objs[i].new_state;
170 
171 	return NULL;
172 }
173 
174 void intel_atomic_swap_global_state(struct intel_atomic_state *state)
175 {
176 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
177 	struct intel_global_state *old_obj_state, *new_obj_state;
178 	struct intel_global_obj *obj;
179 	int i;
180 
181 	for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
182 					    new_obj_state, i) {
183 		drm_WARN_ON(&dev_priv->drm, obj->state != old_obj_state);
184 
185 		/*
186 		 * If the new state wasn't modified (and properly
187 		 * locked for write access) we throw it away.
188 		 */
189 		if (!new_obj_state->changed)
190 			continue;
191 
192 		assert_global_state_write_locked(dev_priv);
193 
194 		old_obj_state->state = state;
195 		new_obj_state->state = NULL;
196 
197 		state->global_objs[i].state = old_obj_state;
198 
199 		intel_atomic_global_state_put(obj->state);
200 		obj->state = intel_atomic_global_state_get(new_obj_state);
201 	}
202 }
203 
204 void intel_atomic_clear_global_state(struct intel_atomic_state *state)
205 {
206 	int i;
207 
208 	for (i = 0; i < state->num_global_objs; i++) {
209 		intel_atomic_global_state_put(state->global_objs[i].old_state);
210 		intel_atomic_global_state_put(state->global_objs[i].new_state);
211 
212 		state->global_objs[i].ptr = NULL;
213 		state->global_objs[i].state = NULL;
214 		state->global_objs[i].old_state = NULL;
215 		state->global_objs[i].new_state = NULL;
216 	}
217 	state->num_global_objs = 0;
218 }
219 
220 int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
221 {
222 	struct intel_atomic_state *state = obj_state->state;
223 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
224 	struct intel_crtc *crtc;
225 
226 	for_each_intel_crtc(&dev_priv->drm, crtc) {
227 		int ret;
228 
229 		ret = drm_modeset_lock(&crtc->base.mutex,
230 				       state->base.acquire_ctx);
231 		if (ret)
232 			return ret;
233 	}
234 
235 	obj_state->changed = true;
236 
237 	return 0;
238 }
239 
240 int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
241 {
242 	struct intel_atomic_state *state = obj_state->state;
243 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
244 	struct intel_crtc *crtc;
245 
246 	for_each_intel_crtc(&dev_priv->drm, crtc) {
247 		struct intel_crtc_state *crtc_state;
248 
249 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
250 		if (IS_ERR(crtc_state))
251 			return PTR_ERR(crtc_state);
252 	}
253 
254 	obj_state->changed = true;
255 
256 	return 0;
257 }
258