1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2cf3a7e4cSRob Clark /* 3cf3a7e4cSRob Clark * Copyright (C) 2014 Red Hat 4cf3a7e4cSRob Clark * Author: Rob Clark <robdclark@gmail.com> 5cf3a7e4cSRob Clark */ 6cf3a7e4cSRob Clark 772fdb40cSDaniel Vetter #include <drm/drm_atomic_uapi.h> 8e78ad765SDaniel Vetter #include <drm/drm_gem_framebuffer_helper.h> 9feea39a8SSam Ravnborg #include <drm/drm_vblank.h> 1072fdb40cSDaniel Vetter 11d934a712SRob Clark #include "msm_atomic_trace.h" 12cf3a7e4cSRob Clark #include "msm_drv.h" 13db8f4d5dSSean Paul #include "msm_gem.h" 14cf3a7e4cSRob Clark #include "msm_kms.h" 15cf3a7e4cSRob Clark 16db8f4d5dSSean Paul int msm_atomic_prepare_fb(struct drm_plane *plane, 17db8f4d5dSSean Paul struct drm_plane_state *new_state) 18db8f4d5dSSean Paul { 19db8f4d5dSSean Paul struct msm_drm_private *priv = plane->dev->dev_private; 20db8f4d5dSSean Paul struct msm_kms *kms = priv->kms; 21db8f4d5dSSean Paul 22db8f4d5dSSean Paul if (!new_state->fb) 23db8f4d5dSSean Paul return 0; 24db8f4d5dSSean Paul 25e78ad765SDaniel Vetter drm_gem_fb_prepare_fb(plane, new_state); 26db8f4d5dSSean Paul 27db8f4d5dSSean Paul return msm_framebuffer_prepare(new_state->fb, kms->aspace); 28db8f4d5dSSean Paul } 29db8f4d5dSSean Paul 3043906812SRob Clark /* 3143906812SRob Clark * Helpers to control vblanks while we flush.. basically just to ensure 3243906812SRob Clark * that vblank accounting is switched on, so we get valid seqn/timestamp 3343906812SRob Clark * on pageflip events (if requested) 3443906812SRob Clark */ 3543906812SRob Clark 3643906812SRob Clark static void vblank_get(struct msm_kms *kms, unsigned crtc_mask) 3743906812SRob Clark { 3843906812SRob Clark struct drm_crtc *crtc; 3943906812SRob Clark 4043906812SRob Clark for_each_crtc_mask(kms->dev, crtc, crtc_mask) { 4143906812SRob Clark if (!crtc->state->active) 4243906812SRob Clark continue; 4343906812SRob Clark drm_crtc_vblank_get(crtc); 4443906812SRob Clark } 4543906812SRob Clark } 4643906812SRob Clark 4743906812SRob Clark static void vblank_put(struct msm_kms *kms, unsigned crtc_mask) 4843906812SRob Clark { 4943906812SRob Clark struct drm_crtc *crtc; 5043906812SRob Clark 5143906812SRob Clark for_each_crtc_mask(kms->dev, crtc, crtc_mask) { 5243906812SRob Clark if (!crtc->state->active) 5343906812SRob Clark continue; 5443906812SRob Clark drm_crtc_vblank_put(crtc); 5543906812SRob Clark } 5643906812SRob Clark } 5743906812SRob Clark 58b3d91800SKrishna Manikandan static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) 59b3d91800SKrishna Manikandan { 60b3d91800SKrishna Manikandan struct drm_crtc *crtc; 61b3d91800SKrishna Manikandan 62b3d91800SKrishna Manikandan for_each_crtc_mask(kms->dev, crtc, crtc_mask) 63b3d91800SKrishna Manikandan mutex_lock(&kms->commit_lock[drm_crtc_index(crtc)]); 64b3d91800SKrishna Manikandan } 65b3d91800SKrishna Manikandan 66b3d91800SKrishna Manikandan static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) 67b3d91800SKrishna Manikandan { 68b3d91800SKrishna Manikandan struct drm_crtc *crtc; 69b3d91800SKrishna Manikandan 70cb21f3f8SRob Clark for_each_crtc_mask_reverse(kms->dev, crtc, crtc_mask) 71b3d91800SKrishna Manikandan mutex_unlock(&kms->commit_lock[drm_crtc_index(crtc)]); 72b3d91800SKrishna Manikandan } 73b3d91800SKrishna Manikandan 742d99ced7SRob Clark static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) 752d99ced7SRob Clark { 762d99ced7SRob Clark unsigned crtc_mask = BIT(crtc_idx); 772d99ced7SRob Clark 78d934a712SRob Clark trace_msm_atomic_async_commit_start(crtc_mask); 79d934a712SRob Clark 80b3d91800SKrishna Manikandan lock_crtcs(kms, crtc_mask); 812d99ced7SRob Clark 822d99ced7SRob Clark if (!(kms->pending_crtc_mask & crtc_mask)) { 83b3d91800SKrishna Manikandan unlock_crtcs(kms, crtc_mask); 84d934a712SRob Clark goto out; 852d99ced7SRob Clark } 862d99ced7SRob Clark 872d99ced7SRob Clark kms->pending_crtc_mask &= ~crtc_mask; 882d99ced7SRob Clark 892d99ced7SRob Clark kms->funcs->enable_commit(kms); 902d99ced7SRob Clark 9143906812SRob Clark vblank_get(kms, crtc_mask); 9243906812SRob Clark 932d99ced7SRob Clark /* 942d99ced7SRob Clark * Flush hardware updates: 952d99ced7SRob Clark */ 96d934a712SRob Clark trace_msm_atomic_flush_commit(crtc_mask); 972d99ced7SRob Clark kms->funcs->flush_commit(kms, crtc_mask); 982d99ced7SRob Clark 992d99ced7SRob Clark /* 1002d99ced7SRob Clark * Wait for flush to complete: 1012d99ced7SRob Clark */ 102d934a712SRob Clark trace_msm_atomic_wait_flush_start(crtc_mask); 1032d99ced7SRob Clark kms->funcs->wait_flush(kms, crtc_mask); 104d934a712SRob Clark trace_msm_atomic_wait_flush_finish(crtc_mask); 1052d99ced7SRob Clark 10643906812SRob Clark vblank_put(kms, crtc_mask); 10743906812SRob Clark 1082d99ced7SRob Clark kms->funcs->complete_commit(kms, crtc_mask); 109b3d91800SKrishna Manikandan unlock_crtcs(kms, crtc_mask); 1102d99ced7SRob Clark kms->funcs->disable_commit(kms); 111d934a712SRob Clark 112d934a712SRob Clark out: 113d934a712SRob Clark trace_msm_atomic_async_commit_finish(crtc_mask); 1142d99ced7SRob Clark } 1152d99ced7SRob Clark 1162d99ced7SRob Clark static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t) 1172d99ced7SRob Clark { 1182d99ced7SRob Clark struct msm_pending_timer *timer = container_of(t, 1192d99ced7SRob Clark struct msm_pending_timer, timer); 1202d99ced7SRob Clark 121*363bcec9SRob Clark kthread_queue_work(timer->worker, &timer->work); 1222d99ced7SRob Clark 1232d99ced7SRob Clark return HRTIMER_NORESTART; 1242d99ced7SRob Clark } 1252d99ced7SRob Clark 126*363bcec9SRob Clark static void msm_atomic_pending_work(struct kthread_work *work) 1272d99ced7SRob Clark { 1282d99ced7SRob Clark struct msm_pending_timer *timer = container_of(work, 1292d99ced7SRob Clark struct msm_pending_timer, work); 1302d99ced7SRob Clark 1312d99ced7SRob Clark msm_atomic_async_commit(timer->kms, timer->crtc_idx); 1322d99ced7SRob Clark } 1332d99ced7SRob Clark 134*363bcec9SRob Clark int msm_atomic_init_pending_timer(struct msm_pending_timer *timer, 1352d99ced7SRob Clark struct msm_kms *kms, int crtc_idx) 1362d99ced7SRob Clark { 1372d99ced7SRob Clark timer->kms = kms; 1382d99ced7SRob Clark timer->crtc_idx = crtc_idx; 1392d99ced7SRob Clark hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1402d99ced7SRob Clark timer->timer.function = msm_atomic_pending_timer; 141*363bcec9SRob Clark 142*363bcec9SRob Clark timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx); 143*363bcec9SRob Clark if (IS_ERR(timer->worker)) { 144*363bcec9SRob Clark int ret = PTR_ERR(timer->worker); 145*363bcec9SRob Clark timer->worker = NULL; 146*363bcec9SRob Clark return ret; 147*363bcec9SRob Clark } 148*363bcec9SRob Clark sched_set_fifo(timer->worker->task); 149*363bcec9SRob Clark kthread_init_work(&timer->work, msm_atomic_pending_work); 150*363bcec9SRob Clark 151*363bcec9SRob Clark return 0; 152*363bcec9SRob Clark } 153*363bcec9SRob Clark 154*363bcec9SRob Clark void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer) 155*363bcec9SRob Clark { 156*363bcec9SRob Clark if (timer->worker) 157*363bcec9SRob Clark kthread_destroy_worker(timer->worker); 1582d99ced7SRob Clark } 1592d99ced7SRob Clark 1602d99ced7SRob Clark static bool can_do_async(struct drm_atomic_state *state, 1612d99ced7SRob Clark struct drm_crtc **async_crtc) 1622d99ced7SRob Clark { 1632d99ced7SRob Clark struct drm_connector_state *connector_state; 1642d99ced7SRob Clark struct drm_connector *connector; 1652d99ced7SRob Clark struct drm_crtc_state *crtc_state; 1662d99ced7SRob Clark struct drm_crtc *crtc; 1672d99ced7SRob Clark int i, num_crtcs = 0; 1682d99ced7SRob Clark 1692d99ced7SRob Clark if (!(state->legacy_cursor_update || state->async_update)) 1702d99ced7SRob Clark return false; 1712d99ced7SRob Clark 1722d99ced7SRob Clark /* any connector change, means slow path: */ 1732d99ced7SRob Clark for_each_new_connector_in_state(state, connector, connector_state, i) 1742d99ced7SRob Clark return false; 1752d99ced7SRob Clark 1762d99ced7SRob Clark for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1772d99ced7SRob Clark if (drm_atomic_crtc_needs_modeset(crtc_state)) 1782d99ced7SRob Clark return false; 1792d99ced7SRob Clark if (++num_crtcs > 1) 1802d99ced7SRob Clark return false; 1812d99ced7SRob Clark *async_crtc = crtc; 1822d99ced7SRob Clark } 1832d99ced7SRob Clark 1842d99ced7SRob Clark return true; 1852d99ced7SRob Clark } 1862d99ced7SRob Clark 187d4d2c604SRob Clark /* Get bitmask of crtcs that will need to be flushed. The bitmask 188d4d2c604SRob Clark * can be used with for_each_crtc_mask() iterator, to iterate 189d4d2c604SRob Clark * effected crtcs without needing to preserve the atomic state. 190d4d2c604SRob Clark */ 191d4d2c604SRob Clark static unsigned get_crtc_mask(struct drm_atomic_state *state) 192d4d2c604SRob Clark { 193d4d2c604SRob Clark struct drm_crtc_state *crtc_state; 194d4d2c604SRob Clark struct drm_crtc *crtc; 195d4d2c604SRob Clark unsigned i, mask = 0; 196d4d2c604SRob Clark 197d4d2c604SRob Clark for_each_new_crtc_in_state(state, crtc, crtc_state, i) 198d4d2c604SRob Clark mask |= drm_crtc_mask(crtc); 199d4d2c604SRob Clark 200d4d2c604SRob Clark return mask; 201d4d2c604SRob Clark } 202d4d2c604SRob Clark 203d14659f5SSean Paul void msm_atomic_commit_tail(struct drm_atomic_state *state) 204cf3a7e4cSRob Clark { 205cf3a7e4cSRob Clark struct drm_device *dev = state->dev; 2060b776d45SRob Clark struct msm_drm_private *priv = dev->dev_private; 2070b776d45SRob Clark struct msm_kms *kms = priv->kms; 2082d99ced7SRob Clark struct drm_crtc *async_crtc = NULL; 209d4d2c604SRob Clark unsigned crtc_mask = get_crtc_mask(state); 2102d99ced7SRob Clark bool async = kms->funcs->vsync_time && 2112d99ced7SRob Clark can_do_async(state, &async_crtc); 2120b776d45SRob Clark 213d934a712SRob Clark trace_msm_atomic_commit_tail_start(async, crtc_mask); 214d934a712SRob Clark 215e35a29d5SRob Clark kms->funcs->enable_commit(kms); 2162d99ced7SRob Clark 2172d99ced7SRob Clark /* 2182d99ced7SRob Clark * Ensure any previous (potentially async) commit has 2192d99ced7SRob Clark * completed: 2202d99ced7SRob Clark */ 221b3d91800SKrishna Manikandan lock_crtcs(kms, crtc_mask); 222d934a712SRob Clark trace_msm_atomic_wait_flush_start(crtc_mask); 2232d99ced7SRob Clark kms->funcs->wait_flush(kms, crtc_mask); 224d934a712SRob Clark trace_msm_atomic_wait_flush_finish(crtc_mask); 2252d99ced7SRob Clark 2262d99ced7SRob Clark /* 2272d99ced7SRob Clark * Now that there is no in-progress flush, prepare the 2282d99ced7SRob Clark * current update: 2292d99ced7SRob Clark */ 2300b776d45SRob Clark kms->funcs->prepare_commit(kms, state); 231cf3a7e4cSRob Clark 2329f6b6564SRob Clark /* 2339f6b6564SRob Clark * Push atomic updates down to hardware: 2349f6b6564SRob Clark */ 2351af434a9SDaniel Vetter drm_atomic_helper_commit_modeset_disables(dev, state); 2362b58e98dSLiu Ying drm_atomic_helper_commit_planes(dev, state, 0); 2371af434a9SDaniel Vetter drm_atomic_helper_commit_modeset_enables(dev, state); 238cf3a7e4cSRob Clark 2392d99ced7SRob Clark if (async) { 2402d99ced7SRob Clark struct msm_pending_timer *timer = 2412d99ced7SRob Clark &kms->pending_timers[drm_crtc_index(async_crtc)]; 2422d99ced7SRob Clark 2432d99ced7SRob Clark /* async updates are limited to single-crtc updates: */ 2442d99ced7SRob Clark WARN_ON(crtc_mask != drm_crtc_mask(async_crtc)); 2452d99ced7SRob Clark 2462d99ced7SRob Clark /* 2472d99ced7SRob Clark * Start timer if we don't already have an update pending 2482d99ced7SRob Clark * on this crtc: 2492d99ced7SRob Clark */ 2502d99ced7SRob Clark if (!(kms->pending_crtc_mask & crtc_mask)) { 2512d99ced7SRob Clark ktime_t vsync_time, wakeup_time; 2522d99ced7SRob Clark 2532d99ced7SRob Clark kms->pending_crtc_mask |= crtc_mask; 2542d99ced7SRob Clark 2552d99ced7SRob Clark vsync_time = kms->funcs->vsync_time(kms, async_crtc); 2562d99ced7SRob Clark wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1)); 2572d99ced7SRob Clark 2582d99ced7SRob Clark hrtimer_start(&timer->timer, wakeup_time, 2592d99ced7SRob Clark HRTIMER_MODE_ABS); 2602d99ced7SRob Clark } 2612d99ced7SRob Clark 2622d99ced7SRob Clark kms->funcs->disable_commit(kms); 263b3d91800SKrishna Manikandan unlock_crtcs(kms, crtc_mask); 2642d99ced7SRob Clark /* 2652d99ced7SRob Clark * At this point, from drm core's perspective, we 2662d99ced7SRob Clark * are done with the atomic update, so we can just 2672d99ced7SRob Clark * go ahead and signal that it is done: 2682d99ced7SRob Clark */ 2692d99ced7SRob Clark drm_atomic_helper_commit_hw_done(state); 2702d99ced7SRob Clark drm_atomic_helper_cleanup_planes(dev, state); 2712d99ced7SRob Clark 272d934a712SRob Clark trace_msm_atomic_commit_tail_finish(async, crtc_mask); 273d934a712SRob Clark 2742d99ced7SRob Clark return; 2752d99ced7SRob Clark } 2762d99ced7SRob Clark 2772d99ced7SRob Clark /* 2782d99ced7SRob Clark * If there is any async flush pending on updated crtcs, fold 2792d99ced7SRob Clark * them into the current flush. 2802d99ced7SRob Clark */ 2812d99ced7SRob Clark kms->pending_crtc_mask &= ~crtc_mask; 2822d99ced7SRob Clark 28343906812SRob Clark vblank_get(kms, crtc_mask); 28443906812SRob Clark 2859f6b6564SRob Clark /* 2869f6b6564SRob Clark * Flush hardware updates: 2879f6b6564SRob Clark */ 288d934a712SRob Clark trace_msm_atomic_flush_commit(crtc_mask); 2899f6b6564SRob Clark kms->funcs->flush_commit(kms, crtc_mask); 290b3d91800SKrishna Manikandan unlock_crtcs(kms, crtc_mask); 2912d99ced7SRob Clark /* 2922d99ced7SRob Clark * Wait for flush to complete: 2932d99ced7SRob Clark */ 294d934a712SRob Clark trace_msm_atomic_wait_flush_start(crtc_mask); 295d4d2c604SRob Clark kms->funcs->wait_flush(kms, crtc_mask); 296d934a712SRob Clark trace_msm_atomic_wait_flush_finish(crtc_mask); 2972d99ced7SRob Clark 29843906812SRob Clark vblank_put(kms, crtc_mask); 29943906812SRob Clark 300b3d91800SKrishna Manikandan lock_crtcs(kms, crtc_mask); 30180b4b4a7SRob Clark kms->funcs->complete_commit(kms, crtc_mask); 302b3d91800SKrishna Manikandan unlock_crtcs(kms, crtc_mask); 303e35a29d5SRob Clark kms->funcs->disable_commit(kms); 30470db18dcSSean Paul 30570db18dcSSean Paul drm_atomic_helper_commit_hw_done(state); 30670db18dcSSean Paul drm_atomic_helper_cleanup_planes(dev, state); 307d934a712SRob Clark 308d934a712SRob Clark trace_msm_atomic_commit_tail_finish(async, crtc_mask); 309347b90b4SSean Paul } 310