1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
4  * Author: James.Qian.Wang <james.qian.wang@arm.com>
5  *
6  */
7 #include <linux/component.h>
8 #include <linux/interrupt.h>
9 
10 #include <drm/drm_atomic.h>
11 #include <drm/drm_atomic_helper.h>
12 #include <drm/drm_drv.h>
13 #include <drm/drm_fb_helper.h>
14 #include <drm/drm_gem_cma_helper.h>
15 #include <drm/drm_gem_framebuffer_helper.h>
16 #include <drm/drm_irq.h>
17 #include <drm/drm_probe_helper.h>
18 #include <drm/drm_vblank.h>
19 
20 #include "komeda_dev.h"
21 #include "komeda_framebuffer.h"
22 #include "komeda_kms.h"
23 
24 DEFINE_DRM_GEM_CMA_FOPS(komeda_cma_fops);
25 
26 static int komeda_gem_cma_dumb_create(struct drm_file *file,
27 				      struct drm_device *dev,
28 				      struct drm_mode_create_dumb *args)
29 {
30 	struct komeda_dev *mdev = dev->dev_private;
31 	u32 pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
32 
33 	args->pitch = ALIGN(pitch, mdev->chip.bus_width);
34 
35 	return drm_gem_cma_dumb_create_internal(file, dev, args);
36 }
37 
38 static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
39 {
40 	struct drm_device *drm = data;
41 	struct komeda_dev *mdev = drm->dev_private;
42 	struct komeda_kms_dev *kms = to_kdev(drm);
43 	struct komeda_events evts;
44 	irqreturn_t status;
45 	u32 i;
46 
47 	/* Call into the CHIP to recognize events */
48 	memset(&evts, 0, sizeof(evts));
49 	status = mdev->funcs->irq_handler(mdev, &evts);
50 
51 	komeda_print_events(&evts);
52 
53 	/* Notify the crtc to handle the events */
54 	for (i = 0; i < kms->n_crtcs; i++)
55 		komeda_crtc_handle_event(&kms->crtcs[i], &evts);
56 
57 	return status;
58 }
59 
60 static struct drm_driver komeda_kms_driver = {
61 	.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
62 	.lastclose			= drm_fb_helper_lastclose,
63 	.gem_free_object_unlocked	= drm_gem_cma_free_object,
64 	.gem_vm_ops			= &drm_gem_cma_vm_ops,
65 	.dumb_create			= komeda_gem_cma_dumb_create,
66 	.prime_handle_to_fd		= drm_gem_prime_handle_to_fd,
67 	.prime_fd_to_handle		= drm_gem_prime_fd_to_handle,
68 	.gem_prime_get_sg_table		= drm_gem_cma_prime_get_sg_table,
69 	.gem_prime_import_sg_table	= drm_gem_cma_prime_import_sg_table,
70 	.gem_prime_vmap			= drm_gem_cma_prime_vmap,
71 	.gem_prime_vunmap		= drm_gem_cma_prime_vunmap,
72 	.gem_prime_mmap			= drm_gem_cma_prime_mmap,
73 	.fops = &komeda_cma_fops,
74 	.name = "komeda",
75 	.desc = "Arm Komeda Display Processor driver",
76 	.date = "20181101",
77 	.major = 0,
78 	.minor = 1,
79 };
80 
81 static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
82 {
83 	struct drm_device *dev = old_state->dev;
84 
85 	drm_atomic_helper_commit_modeset_disables(dev, old_state);
86 
87 	drm_atomic_helper_commit_planes(dev, old_state, 0);
88 
89 	drm_atomic_helper_commit_modeset_enables(dev, old_state);
90 
91 	drm_atomic_helper_wait_for_flip_done(dev, old_state);
92 
93 	drm_atomic_helper_commit_hw_done(old_state);
94 
95 	drm_atomic_helper_cleanup_planes(dev, old_state);
96 }
97 
98 static const struct drm_mode_config_helper_funcs komeda_mode_config_helpers = {
99 	.atomic_commit_tail = komeda_kms_commit_tail,
100 };
101 
102 static int komeda_plane_state_list_add(struct drm_plane_state *plane_st,
103 				       struct list_head *zorder_list)
104 {
105 	struct komeda_plane_state *new = to_kplane_st(plane_st);
106 	struct komeda_plane_state *node, *last;
107 
108 	last = list_empty(zorder_list) ?
109 	       NULL : list_last_entry(zorder_list, typeof(*last), zlist_node);
110 
111 	/* Considering the list sequence is zpos increasing, so if list is empty
112 	 * or the zpos of new node bigger than the last node in list, no need
113 	 * loop and just insert the new one to the tail of the list.
114 	 */
115 	if (!last || (new->base.zpos > last->base.zpos)) {
116 		list_add_tail(&new->zlist_node, zorder_list);
117 		return 0;
118 	}
119 
120 	/* Build the list by zpos increasing */
121 	list_for_each_entry(node, zorder_list, zlist_node) {
122 		if (new->base.zpos < node->base.zpos) {
123 			list_add_tail(&new->zlist_node, &node->zlist_node);
124 			break;
125 		} else if (node->base.zpos == new->base.zpos) {
126 			struct drm_plane *a = node->base.plane;
127 			struct drm_plane *b = new->base.plane;
128 
129 			/* Komeda doesn't support setting a same zpos for
130 			 * different planes.
131 			 */
132 			DRM_DEBUG_ATOMIC("PLANE: %s and PLANE: %s are configured same zpos: %d.\n",
133 					 a->name, b->name, node->base.zpos);
134 			return -EINVAL;
135 		}
136 	}
137 
138 	return 0;
139 }
140 
141 static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
142 				      struct drm_crtc_state *crtc_st)
143 {
144 	struct drm_atomic_state *state = crtc_st->state;
145 	struct komeda_crtc *kcrtc = to_kcrtc(crtc);
146 	struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
147 	struct komeda_plane_state *kplane_st;
148 	struct drm_plane_state *plane_st;
149 	struct drm_plane *plane;
150 	struct list_head zorder_list;
151 	int order = 0, err;
152 
153 	DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
154 			 crtc->base.id, crtc->name);
155 
156 	INIT_LIST_HEAD(&zorder_list);
157 
158 	/* This loop also added all effected planes into the new state */
159 	drm_for_each_plane_mask(plane, crtc->dev, crtc_st->plane_mask) {
160 		plane_st = drm_atomic_get_plane_state(state, plane);
161 		if (IS_ERR(plane_st))
162 			return PTR_ERR(plane_st);
163 
164 		/* Build a list by zpos increasing */
165 		err = komeda_plane_state_list_add(plane_st, &zorder_list);
166 		if (err)
167 			return err;
168 	}
169 
170 	kcrtc_st->max_slave_zorder = 0;
171 
172 	list_for_each_entry(kplane_st, &zorder_list, zlist_node) {
173 		plane_st = &kplane_st->base;
174 		plane = plane_st->plane;
175 
176 		plane_st->normalized_zpos = order++;
177 		/* When layer_split has been enabled, one plane will be handled
178 		 * by two separated komeda layers (left/right), which may needs
179 		 * two zorders.
180 		 * - zorder: for left_layer for left display part.
181 		 * - zorder + 1: will be reserved for right layer.
182 		 */
183 		if (to_kplane_st(plane_st)->layer_split)
184 			order++;
185 
186 		DRM_DEBUG_ATOMIC("[PLANE:%d:%s] zpos:%d, normalized zpos: %d\n",
187 				 plane->base.id, plane->name,
188 				 plane_st->zpos, plane_st->normalized_zpos);
189 
190 		/* calculate max slave zorder */
191 		if (has_bit(drm_plane_index(plane), kcrtc->slave_planes))
192 			kcrtc_st->max_slave_zorder =
193 				max(plane_st->normalized_zpos,
194 				    kcrtc_st->max_slave_zorder);
195 	}
196 
197 	crtc_st->zpos_changed = true;
198 
199 	return 0;
200 }
201 
202 static int komeda_kms_check(struct drm_device *dev,
203 			    struct drm_atomic_state *state)
204 {
205 	struct drm_crtc *crtc;
206 	struct drm_crtc_state *new_crtc_st;
207 	int i, err;
208 
209 	err = drm_atomic_helper_check_modeset(dev, state);
210 	if (err)
211 		return err;
212 
213 	/* Komeda need to re-calculate resource assumption in every commit
214 	 * so need to add all affected_planes (even unchanged) to
215 	 * drm_atomic_state.
216 	 */
217 	for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) {
218 		err = drm_atomic_add_affected_planes(state, crtc);
219 		if (err)
220 			return err;
221 
222 		err = komeda_crtc_normalize_zpos(crtc, new_crtc_st);
223 		if (err)
224 			return err;
225 	}
226 
227 	err = drm_atomic_helper_check_planes(dev, state);
228 	if (err)
229 		return err;
230 
231 	return 0;
232 }
233 
234 static const struct drm_mode_config_funcs komeda_mode_config_funcs = {
235 	.fb_create		= komeda_fb_create,
236 	.atomic_check		= komeda_kms_check,
237 	.atomic_commit		= drm_atomic_helper_commit,
238 };
239 
240 static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms,
241 					struct komeda_dev *mdev)
242 {
243 	struct drm_mode_config *config = &kms->base.mode_config;
244 
245 	drm_mode_config_init(&kms->base);
246 
247 	komeda_kms_setup_crtcs(kms, mdev);
248 
249 	/* Get value from dev */
250 	config->min_width	= 0;
251 	config->min_height	= 0;
252 	config->max_width	= 4096;
253 	config->max_height	= 4096;
254 	config->allow_fb_modifiers = true;
255 
256 	config->funcs = &komeda_mode_config_funcs;
257 	config->helper_private = &komeda_mode_config_helpers;
258 }
259 
260 struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
261 {
262 	struct komeda_kms_dev *kms = kzalloc(sizeof(*kms), GFP_KERNEL);
263 	struct drm_device *drm;
264 	int err;
265 
266 	if (!kms)
267 		return ERR_PTR(-ENOMEM);
268 
269 	drm = &kms->base;
270 	err = drm_dev_init(drm, &komeda_kms_driver, mdev->dev);
271 	if (err)
272 		goto free_kms;
273 
274 	drm->dev_private = mdev;
275 
276 	komeda_kms_mode_config_init(kms, mdev);
277 
278 	err = komeda_kms_add_private_objs(kms, mdev);
279 	if (err)
280 		goto cleanup_mode_config;
281 
282 	err = komeda_kms_add_planes(kms, mdev);
283 	if (err)
284 		goto cleanup_mode_config;
285 
286 	err = drm_vblank_init(drm, kms->n_crtcs);
287 	if (err)
288 		goto cleanup_mode_config;
289 
290 	err = komeda_kms_add_crtcs(kms, mdev);
291 	if (err)
292 		goto cleanup_mode_config;
293 
294 	err = komeda_kms_add_wb_connectors(kms, mdev);
295 	if (err)
296 		goto cleanup_mode_config;
297 
298 	err = component_bind_all(mdev->dev, kms);
299 	if (err)
300 		goto cleanup_mode_config;
301 
302 	drm_mode_config_reset(drm);
303 
304 	err = devm_request_irq(drm->dev, mdev->irq,
305 			       komeda_kms_irq_handler, IRQF_SHARED,
306 			       drm->driver->name, drm);
307 	if (err)
308 		goto free_component_binding;
309 
310 	err = mdev->funcs->enable_irq(mdev);
311 	if (err)
312 		goto free_component_binding;
313 
314 	drm->irq_enabled = true;
315 
316 	drm_kms_helper_poll_init(drm);
317 
318 	err = drm_dev_register(drm, 0);
319 	if (err)
320 		goto free_interrupts;
321 
322 	return kms;
323 
324 free_interrupts:
325 	drm_kms_helper_poll_fini(drm);
326 	drm->irq_enabled = false;
327 	mdev->funcs->disable_irq(mdev);
328 free_component_binding:
329 	component_unbind_all(mdev->dev, drm);
330 cleanup_mode_config:
331 	drm_mode_config_cleanup(drm);
332 	komeda_kms_cleanup_private_objs(kms);
333 	drm->dev_private = NULL;
334 	drm_dev_put(drm);
335 free_kms:
336 	kfree(kms);
337 	return ERR_PTR(err);
338 }
339 
340 void komeda_kms_detach(struct komeda_kms_dev *kms)
341 {
342 	struct drm_device *drm = &kms->base;
343 	struct komeda_dev *mdev = drm->dev_private;
344 
345 	drm_dev_unregister(drm);
346 	drm_kms_helper_poll_fini(drm);
347 	drm_atomic_helper_shutdown(drm);
348 	drm->irq_enabled = false;
349 	mdev->funcs->disable_irq(mdev);
350 	component_unbind_all(mdev->dev, drm);
351 	drm_mode_config_cleanup(drm);
352 	komeda_kms_cleanup_private_objs(kms);
353 	drm->dev_private = NULL;
354 	drm_dev_put(drm);
355 }
356