1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2cf3a7e4cSRob Clark /* 3cf3a7e4cSRob Clark * Copyright (C) 2014 Red Hat 4cf3a7e4cSRob Clark * Author: Rob Clark <robdclark@gmail.com> 5cf3a7e4cSRob Clark */ 6cf3a7e4cSRob Clark 772fdb40cSDaniel Vetter #include <drm/drm_atomic_uapi.h> 8e78ad765SDaniel Vetter #include <drm/drm_gem_framebuffer_helper.h> 9feea39a8SSam Ravnborg #include <drm/drm_vblank.h> 1072fdb40cSDaniel Vetter 11d934a712SRob Clark #include "msm_atomic_trace.h" 12cf3a7e4cSRob Clark #include "msm_drv.h" 13db8f4d5dSSean Paul #include "msm_gem.h" 14cf3a7e4cSRob Clark #include "msm_kms.h" 15cf3a7e4cSRob Clark 16db8f4d5dSSean Paul int msm_atomic_prepare_fb(struct drm_plane *plane, 17db8f4d5dSSean Paul struct drm_plane_state *new_state) 18db8f4d5dSSean Paul { 19db8f4d5dSSean Paul struct msm_drm_private *priv = plane->dev->dev_private; 20db8f4d5dSSean Paul struct msm_kms *kms = priv->kms; 21db8f4d5dSSean Paul 22db8f4d5dSSean Paul if (!new_state->fb) 23db8f4d5dSSean Paul return 0; 24db8f4d5dSSean Paul 25e78ad765SDaniel Vetter drm_gem_fb_prepare_fb(plane, new_state); 26db8f4d5dSSean Paul 27db8f4d5dSSean Paul return msm_framebuffer_prepare(new_state->fb, kms->aspace); 28db8f4d5dSSean Paul } 29db8f4d5dSSean Paul 3043906812SRob Clark /* 3143906812SRob Clark * Helpers to control vblanks while we flush.. basically just to ensure 3243906812SRob Clark * that vblank accounting is switched on, so we get valid seqn/timestamp 3343906812SRob Clark * on pageflip events (if requested) 3443906812SRob Clark */ 3543906812SRob Clark 3643906812SRob Clark static void vblank_get(struct msm_kms *kms, unsigned crtc_mask) 3743906812SRob Clark { 3843906812SRob Clark struct drm_crtc *crtc; 3943906812SRob Clark 4043906812SRob Clark for_each_crtc_mask(kms->dev, crtc, crtc_mask) { 4143906812SRob Clark if (!crtc->state->active) 4243906812SRob Clark continue; 4343906812SRob Clark drm_crtc_vblank_get(crtc); 4443906812SRob Clark } 4543906812SRob Clark } 4643906812SRob Clark 4743906812SRob Clark static void vblank_put(struct msm_kms *kms, unsigned crtc_mask) 4843906812SRob Clark { 4943906812SRob Clark struct drm_crtc *crtc; 5043906812SRob Clark 5143906812SRob Clark for_each_crtc_mask(kms->dev, crtc, crtc_mask) { 5243906812SRob Clark if (!crtc->state->active) 5343906812SRob Clark continue; 5443906812SRob Clark drm_crtc_vblank_put(crtc); 5543906812SRob Clark } 5643906812SRob Clark } 5743906812SRob Clark 58b3d91800SKrishna Manikandan static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) 59b3d91800SKrishna Manikandan { 60*743c97caSStephen Boyd int crtc_index; 61b3d91800SKrishna Manikandan struct drm_crtc *crtc; 62b3d91800SKrishna Manikandan 63*743c97caSStephen Boyd for_each_crtc_mask(kms->dev, crtc, crtc_mask) { 64*743c97caSStephen Boyd crtc_index = drm_crtc_index(crtc); 65*743c97caSStephen Boyd mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index); 66*743c97caSStephen Boyd } 67b3d91800SKrishna Manikandan } 68b3d91800SKrishna Manikandan 69b3d91800SKrishna Manikandan static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) 70b3d91800SKrishna Manikandan { 71b3d91800SKrishna Manikandan struct drm_crtc *crtc; 72b3d91800SKrishna Manikandan 73cb21f3f8SRob Clark for_each_crtc_mask_reverse(kms->dev, crtc, crtc_mask) 74b3d91800SKrishna Manikandan mutex_unlock(&kms->commit_lock[drm_crtc_index(crtc)]); 75b3d91800SKrishna Manikandan } 76b3d91800SKrishna Manikandan 772d99ced7SRob Clark static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) 782d99ced7SRob Clark { 792d99ced7SRob Clark unsigned crtc_mask = BIT(crtc_idx); 802d99ced7SRob Clark 81d934a712SRob Clark trace_msm_atomic_async_commit_start(crtc_mask); 82d934a712SRob Clark 83b3d91800SKrishna Manikandan lock_crtcs(kms, crtc_mask); 842d99ced7SRob Clark 852d99ced7SRob Clark if (!(kms->pending_crtc_mask & crtc_mask)) { 86b3d91800SKrishna Manikandan unlock_crtcs(kms, crtc_mask); 87d934a712SRob Clark goto out; 882d99ced7SRob Clark } 892d99ced7SRob Clark 902d99ced7SRob Clark kms->pending_crtc_mask &= ~crtc_mask; 912d99ced7SRob Clark 922d99ced7SRob Clark kms->funcs->enable_commit(kms); 932d99ced7SRob Clark 9443906812SRob Clark vblank_get(kms, crtc_mask); 9543906812SRob Clark 962d99ced7SRob Clark /* 972d99ced7SRob Clark * Flush hardware updates: 982d99ced7SRob Clark */ 99d934a712SRob Clark trace_msm_atomic_flush_commit(crtc_mask); 1002d99ced7SRob Clark kms->funcs->flush_commit(kms, crtc_mask); 1012d99ced7SRob Clark 1022d99ced7SRob Clark /* 1032d99ced7SRob Clark * Wait for flush to complete: 1042d99ced7SRob Clark */ 105d934a712SRob Clark trace_msm_atomic_wait_flush_start(crtc_mask); 1062d99ced7SRob Clark kms->funcs->wait_flush(kms, crtc_mask); 107d934a712SRob Clark trace_msm_atomic_wait_flush_finish(crtc_mask); 1082d99ced7SRob Clark 10943906812SRob Clark vblank_put(kms, crtc_mask); 11043906812SRob Clark 1112d99ced7SRob Clark kms->funcs->complete_commit(kms, crtc_mask); 112b3d91800SKrishna Manikandan unlock_crtcs(kms, crtc_mask); 1132d99ced7SRob Clark kms->funcs->disable_commit(kms); 114d934a712SRob Clark 115d934a712SRob Clark out: 116d934a712SRob Clark trace_msm_atomic_async_commit_finish(crtc_mask); 1172d99ced7SRob Clark } 1182d99ced7SRob Clark 1192d99ced7SRob Clark static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t) 1202d99ced7SRob Clark { 1212d99ced7SRob Clark struct msm_pending_timer *timer = container_of(t, 1222d99ced7SRob Clark struct msm_pending_timer, timer); 1232d99ced7SRob Clark 124363bcec9SRob Clark kthread_queue_work(timer->worker, &timer->work); 1252d99ced7SRob Clark 1262d99ced7SRob Clark return HRTIMER_NORESTART; 1272d99ced7SRob Clark } 1282d99ced7SRob Clark 129363bcec9SRob Clark static void msm_atomic_pending_work(struct kthread_work *work) 1302d99ced7SRob Clark { 1312d99ced7SRob Clark struct msm_pending_timer *timer = container_of(work, 1322d99ced7SRob Clark struct msm_pending_timer, work); 1332d99ced7SRob Clark 1342d99ced7SRob Clark msm_atomic_async_commit(timer->kms, timer->crtc_idx); 1352d99ced7SRob Clark } 1362d99ced7SRob Clark 137363bcec9SRob Clark int msm_atomic_init_pending_timer(struct msm_pending_timer *timer, 1382d99ced7SRob Clark struct msm_kms *kms, int crtc_idx) 1392d99ced7SRob Clark { 1402d99ced7SRob Clark timer->kms = kms; 1412d99ced7SRob Clark timer->crtc_idx = crtc_idx; 1422d99ced7SRob Clark hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1432d99ced7SRob Clark timer->timer.function = msm_atomic_pending_timer; 144363bcec9SRob Clark 145363bcec9SRob Clark timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx); 146363bcec9SRob Clark if (IS_ERR(timer->worker)) { 147363bcec9SRob Clark int ret = PTR_ERR(timer->worker); 148363bcec9SRob Clark timer->worker = NULL; 149363bcec9SRob Clark return ret; 150363bcec9SRob Clark } 151363bcec9SRob Clark sched_set_fifo(timer->worker->task); 152363bcec9SRob Clark kthread_init_work(&timer->work, msm_atomic_pending_work); 153363bcec9SRob Clark 154363bcec9SRob Clark return 0; 155363bcec9SRob Clark } 156363bcec9SRob Clark 157363bcec9SRob Clark void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer) 158363bcec9SRob Clark { 159363bcec9SRob Clark if (timer->worker) 160363bcec9SRob Clark kthread_destroy_worker(timer->worker); 1612d99ced7SRob Clark } 1622d99ced7SRob Clark 1632d99ced7SRob Clark static bool can_do_async(struct drm_atomic_state *state, 1642d99ced7SRob Clark struct drm_crtc **async_crtc) 1652d99ced7SRob Clark { 1662d99ced7SRob Clark struct drm_connector_state *connector_state; 1672d99ced7SRob Clark struct drm_connector *connector; 1682d99ced7SRob Clark struct drm_crtc_state *crtc_state; 1692d99ced7SRob Clark struct drm_crtc *crtc; 1702d99ced7SRob Clark int i, num_crtcs = 0; 1712d99ced7SRob Clark 1722d99ced7SRob Clark if (!(state->legacy_cursor_update || state->async_update)) 1732d99ced7SRob Clark return false; 1742d99ced7SRob Clark 1752d99ced7SRob Clark /* any connector change, means slow path: */ 1762d99ced7SRob Clark for_each_new_connector_in_state(state, connector, connector_state, i) 1772d99ced7SRob Clark return false; 1782d99ced7SRob Clark 1792d99ced7SRob Clark for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1802d99ced7SRob Clark if (drm_atomic_crtc_needs_modeset(crtc_state)) 1812d99ced7SRob Clark return false; 1822d99ced7SRob Clark if (++num_crtcs > 1) 1832d99ced7SRob Clark return false; 1842d99ced7SRob Clark *async_crtc = crtc; 1852d99ced7SRob Clark } 1862d99ced7SRob Clark 1872d99ced7SRob Clark return true; 1882d99ced7SRob Clark } 1892d99ced7SRob Clark 190d4d2c604SRob Clark /* Get bitmask of crtcs that will need to be flushed. The bitmask 191d4d2c604SRob Clark * can be used with for_each_crtc_mask() iterator, to iterate 192d4d2c604SRob Clark * effected crtcs without needing to preserve the atomic state. 193d4d2c604SRob Clark */ 194d4d2c604SRob Clark static unsigned get_crtc_mask(struct drm_atomic_state *state) 195d4d2c604SRob Clark { 196d4d2c604SRob Clark struct drm_crtc_state *crtc_state; 197d4d2c604SRob Clark struct drm_crtc *crtc; 198d4d2c604SRob Clark unsigned i, mask = 0; 199d4d2c604SRob Clark 200d4d2c604SRob Clark for_each_new_crtc_in_state(state, crtc, crtc_state, i) 201d4d2c604SRob Clark mask |= drm_crtc_mask(crtc); 202d4d2c604SRob Clark 203d4d2c604SRob Clark return mask; 204d4d2c604SRob Clark } 205d4d2c604SRob Clark 206d14659f5SSean Paul void msm_atomic_commit_tail(struct drm_atomic_state *state) 207cf3a7e4cSRob Clark { 208cf3a7e4cSRob Clark struct drm_device *dev = state->dev; 2090b776d45SRob Clark struct msm_drm_private *priv = dev->dev_private; 2100b776d45SRob Clark struct msm_kms *kms = priv->kms; 2112d99ced7SRob Clark struct drm_crtc *async_crtc = NULL; 212d4d2c604SRob Clark unsigned crtc_mask = get_crtc_mask(state); 2132d99ced7SRob Clark bool async = kms->funcs->vsync_time && 2142d99ced7SRob Clark can_do_async(state, &async_crtc); 2150b776d45SRob Clark 216d934a712SRob Clark trace_msm_atomic_commit_tail_start(async, crtc_mask); 217d934a712SRob Clark 218e35a29d5SRob Clark kms->funcs->enable_commit(kms); 2192d99ced7SRob Clark 2202d99ced7SRob Clark /* 2212d99ced7SRob Clark * Ensure any previous (potentially async) commit has 2222d99ced7SRob Clark * completed: 2232d99ced7SRob Clark */ 224b3d91800SKrishna Manikandan lock_crtcs(kms, crtc_mask); 225d934a712SRob Clark trace_msm_atomic_wait_flush_start(crtc_mask); 2262d99ced7SRob Clark kms->funcs->wait_flush(kms, crtc_mask); 227d934a712SRob Clark trace_msm_atomic_wait_flush_finish(crtc_mask); 2282d99ced7SRob Clark 2292d99ced7SRob Clark /* 2302d99ced7SRob Clark * Now that there is no in-progress flush, prepare the 2312d99ced7SRob Clark * current update: 2322d99ced7SRob Clark */ 2330b776d45SRob Clark kms->funcs->prepare_commit(kms, state); 234cf3a7e4cSRob Clark 2359f6b6564SRob Clark /* 2369f6b6564SRob Clark * Push atomic updates down to hardware: 2379f6b6564SRob Clark */ 2381af434a9SDaniel Vetter drm_atomic_helper_commit_modeset_disables(dev, state); 2392b58e98dSLiu Ying drm_atomic_helper_commit_planes(dev, state, 0); 2401af434a9SDaniel Vetter drm_atomic_helper_commit_modeset_enables(dev, state); 241cf3a7e4cSRob Clark 2422d99ced7SRob Clark if (async) { 2432d99ced7SRob Clark struct msm_pending_timer *timer = 2442d99ced7SRob Clark &kms->pending_timers[drm_crtc_index(async_crtc)]; 2452d99ced7SRob Clark 2462d99ced7SRob Clark /* async updates are limited to single-crtc updates: */ 2472d99ced7SRob Clark WARN_ON(crtc_mask != drm_crtc_mask(async_crtc)); 2482d99ced7SRob Clark 2492d99ced7SRob Clark /* 2502d99ced7SRob Clark * Start timer if we don't already have an update pending 2512d99ced7SRob Clark * on this crtc: 2522d99ced7SRob Clark */ 2532d99ced7SRob Clark if (!(kms->pending_crtc_mask & crtc_mask)) { 2542d99ced7SRob Clark ktime_t vsync_time, wakeup_time; 2552d99ced7SRob Clark 2562d99ced7SRob Clark kms->pending_crtc_mask |= crtc_mask; 2572d99ced7SRob Clark 2582d99ced7SRob Clark vsync_time = kms->funcs->vsync_time(kms, async_crtc); 2592d99ced7SRob Clark wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1)); 2602d99ced7SRob Clark 2612d99ced7SRob Clark hrtimer_start(&timer->timer, wakeup_time, 2622d99ced7SRob Clark HRTIMER_MODE_ABS); 2632d99ced7SRob Clark } 2642d99ced7SRob Clark 2652d99ced7SRob Clark kms->funcs->disable_commit(kms); 266b3d91800SKrishna Manikandan unlock_crtcs(kms, crtc_mask); 2672d99ced7SRob Clark /* 2682d99ced7SRob Clark * At this point, from drm core's perspective, we 2692d99ced7SRob Clark * are done with the atomic update, so we can just 2702d99ced7SRob Clark * go ahead and signal that it is done: 2712d99ced7SRob Clark */ 2722d99ced7SRob Clark drm_atomic_helper_commit_hw_done(state); 2732d99ced7SRob Clark drm_atomic_helper_cleanup_planes(dev, state); 2742d99ced7SRob Clark 275d934a712SRob Clark trace_msm_atomic_commit_tail_finish(async, crtc_mask); 276d934a712SRob Clark 2772d99ced7SRob Clark return; 2782d99ced7SRob Clark } 2792d99ced7SRob Clark 2802d99ced7SRob Clark /* 2812d99ced7SRob Clark * If there is any async flush pending on updated crtcs, fold 2822d99ced7SRob Clark * them into the current flush. 2832d99ced7SRob Clark */ 2842d99ced7SRob Clark kms->pending_crtc_mask &= ~crtc_mask; 2852d99ced7SRob Clark 28643906812SRob Clark vblank_get(kms, crtc_mask); 28743906812SRob Clark 2889f6b6564SRob Clark /* 2899f6b6564SRob Clark * Flush hardware updates: 2909f6b6564SRob Clark */ 291d934a712SRob Clark trace_msm_atomic_flush_commit(crtc_mask); 2929f6b6564SRob Clark kms->funcs->flush_commit(kms, crtc_mask); 293b3d91800SKrishna Manikandan unlock_crtcs(kms, crtc_mask); 2942d99ced7SRob Clark /* 2952d99ced7SRob Clark * Wait for flush to complete: 2962d99ced7SRob Clark */ 297d934a712SRob Clark trace_msm_atomic_wait_flush_start(crtc_mask); 298d4d2c604SRob Clark kms->funcs->wait_flush(kms, crtc_mask); 299d934a712SRob Clark trace_msm_atomic_wait_flush_finish(crtc_mask); 3002d99ced7SRob Clark 30143906812SRob Clark vblank_put(kms, crtc_mask); 30243906812SRob Clark 303b3d91800SKrishna Manikandan lock_crtcs(kms, crtc_mask); 30480b4b4a7SRob Clark kms->funcs->complete_commit(kms, crtc_mask); 305b3d91800SKrishna Manikandan unlock_crtcs(kms, crtc_mask); 306e35a29d5SRob Clark kms->funcs->disable_commit(kms); 30770db18dcSSean Paul 30870db18dcSSean Paul drm_atomic_helper_commit_hw_done(state); 30970db18dcSSean Paul drm_atomic_helper_cleanup_planes(dev, state); 310d934a712SRob Clark 311d934a712SRob Clark trace_msm_atomic_commit_tail_finish(async, crtc_mask); 312347b90b4SSean Paul } 313