1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2c8b75bcaSEric Anholt /* 3c8b75bcaSEric Anholt * Copyright (C) 2015 Broadcom 4c8b75bcaSEric Anholt */ 5c8b75bcaSEric Anholt 6c8b75bcaSEric Anholt /** 7c8b75bcaSEric Anholt * DOC: VC4 KMS 8c8b75bcaSEric Anholt * 9c8b75bcaSEric Anholt * This is the general code for implementing KMS mode setting that 10c8b75bcaSEric Anholt * doesn't clearly associate with any of the other objects (plane, 11c8b75bcaSEric Anholt * crtc, HDMI encoder). 12c8b75bcaSEric Anholt */ 13c8b75bcaSEric Anholt 14d7d96c00SMaxime Ripard #include <linux/clk.h> 15d7d96c00SMaxime Ripard 16b7e8e25bSMasahiro Yamada #include <drm/drm_atomic.h> 17b7e8e25bSMasahiro Yamada #include <drm/drm_atomic_helper.h> 18fd6d6d80SSam Ravnborg #include <drm/drm_crtc.h> 199762477cSNoralf Trønnes #include <drm/drm_gem_framebuffer_helper.h> 20fcd70cd3SDaniel Vetter #include <drm/drm_plane_helper.h> 21fcd70cd3SDaniel Vetter #include <drm/drm_probe_helper.h> 22fd6d6d80SSam Ravnborg #include <drm/drm_vblank.h> 23fd6d6d80SSam Ravnborg 24c8b75bcaSEric Anholt #include "vc4_drv.h" 25766cc6b1SStefan Schake #include "vc4_regs.h" 26766cc6b1SStefan Schake 27a9661f27SMaxime Ripard #define HVS_NUM_CHANNELS 3 28a9661f27SMaxime Ripard 29766cc6b1SStefan Schake struct vc4_ctm_state { 30766cc6b1SStefan Schake struct drm_private_state base; 31766cc6b1SStefan Schake struct drm_color_ctm *ctm; 32766cc6b1SStefan Schake int fifo; 33766cc6b1SStefan Schake }; 34766cc6b1SStefan Schake 35766cc6b1SStefan Schake static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv) 36766cc6b1SStefan Schake { 37766cc6b1SStefan Schake return container_of(priv, struct vc4_ctm_state, base); 38766cc6b1SStefan Schake } 39766cc6b1SStefan Schake 40f2df84e0SMaxime Ripard struct vc4_hvs_state { 41f2df84e0SMaxime Ripard struct drm_private_state base; 42f2df84e0SMaxime Ripard unsigned int unassigned_channels; 43f2df84e0SMaxime Ripard }; 44f2df84e0SMaxime Ripard 45f2df84e0SMaxime Ripard static struct vc4_hvs_state * 46f2df84e0SMaxime Ripard to_vc4_hvs_state(struct drm_private_state *priv) 47f2df84e0SMaxime Ripard { 48f2df84e0SMaxime Ripard return container_of(priv, struct vc4_hvs_state, base); 49f2df84e0SMaxime Ripard } 50f2df84e0SMaxime Ripard 514686da83SBoris Brezillon struct vc4_load_tracker_state { 524686da83SBoris Brezillon struct drm_private_state base; 534686da83SBoris Brezillon u64 hvs_load; 544686da83SBoris Brezillon u64 membus_load; 554686da83SBoris Brezillon }; 564686da83SBoris Brezillon 574686da83SBoris Brezillon static struct vc4_load_tracker_state * 584686da83SBoris Brezillon to_vc4_load_tracker_state(struct drm_private_state *priv) 594686da83SBoris Brezillon { 604686da83SBoris Brezillon return container_of(priv, struct vc4_load_tracker_state, base); 614686da83SBoris Brezillon } 624686da83SBoris Brezillon 63766cc6b1SStefan Schake static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state, 64766cc6b1SStefan Schake struct drm_private_obj *manager) 65766cc6b1SStefan Schake { 66766cc6b1SStefan Schake struct drm_device *dev = state->dev; 6788e08589SMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(dev); 68766cc6b1SStefan Schake struct drm_private_state *priv_state; 69766cc6b1SStefan Schake int ret; 70766cc6b1SStefan Schake 71766cc6b1SStefan Schake ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx); 72766cc6b1SStefan Schake if (ret) 73766cc6b1SStefan Schake return ERR_PTR(ret); 74766cc6b1SStefan Schake 75766cc6b1SStefan Schake priv_state = drm_atomic_get_private_obj_state(state, manager); 76766cc6b1SStefan Schake if (IS_ERR(priv_state)) 77766cc6b1SStefan Schake return ERR_CAST(priv_state); 78766cc6b1SStefan Schake 79766cc6b1SStefan Schake return to_vc4_ctm_state(priv_state); 80766cc6b1SStefan Schake } 81766cc6b1SStefan Schake 82766cc6b1SStefan Schake static struct drm_private_state * 83766cc6b1SStefan Schake vc4_ctm_duplicate_state(struct drm_private_obj *obj) 84766cc6b1SStefan Schake { 85766cc6b1SStefan Schake struct vc4_ctm_state *state; 86766cc6b1SStefan Schake 87766cc6b1SStefan Schake state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 88766cc6b1SStefan Schake if (!state) 89766cc6b1SStefan Schake return NULL; 90766cc6b1SStefan Schake 91766cc6b1SStefan Schake __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 92766cc6b1SStefan Schake 93766cc6b1SStefan Schake return &state->base; 94766cc6b1SStefan Schake } 95766cc6b1SStefan Schake 96766cc6b1SStefan Schake static void vc4_ctm_destroy_state(struct drm_private_obj *obj, 97766cc6b1SStefan Schake struct drm_private_state *state) 98766cc6b1SStefan Schake { 99766cc6b1SStefan Schake struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state); 100766cc6b1SStefan Schake 101766cc6b1SStefan Schake kfree(ctm_state); 102766cc6b1SStefan Schake } 103766cc6b1SStefan Schake 104766cc6b1SStefan Schake static const struct drm_private_state_funcs vc4_ctm_state_funcs = { 105766cc6b1SStefan Schake .atomic_duplicate_state = vc4_ctm_duplicate_state, 106766cc6b1SStefan Schake .atomic_destroy_state = vc4_ctm_destroy_state, 107766cc6b1SStefan Schake }; 108766cc6b1SStefan Schake 109dcda7c28SMaxime Ripard static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused) 110dcda7c28SMaxime Ripard { 111dcda7c28SMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(dev); 112dcda7c28SMaxime Ripard 113dcda7c28SMaxime Ripard drm_atomic_private_obj_fini(&vc4->ctm_manager); 114dcda7c28SMaxime Ripard } 115dcda7c28SMaxime Ripard 116dcda7c28SMaxime Ripard static int vc4_ctm_obj_init(struct vc4_dev *vc4) 117dcda7c28SMaxime Ripard { 118dcda7c28SMaxime Ripard struct vc4_ctm_state *ctm_state; 119dcda7c28SMaxime Ripard 120dcda7c28SMaxime Ripard drm_modeset_lock_init(&vc4->ctm_state_lock); 121dcda7c28SMaxime Ripard 122dcda7c28SMaxime Ripard ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL); 123dcda7c28SMaxime Ripard if (!ctm_state) 124dcda7c28SMaxime Ripard return -ENOMEM; 125dcda7c28SMaxime Ripard 126dcda7c28SMaxime Ripard drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base, 127dcda7c28SMaxime Ripard &vc4_ctm_state_funcs); 128dcda7c28SMaxime Ripard 1293c354ed1SMaxime Ripard return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL); 130dcda7c28SMaxime Ripard } 131dcda7c28SMaxime Ripard 132766cc6b1SStefan Schake /* Converts a DRM S31.32 value to the HW S0.9 format. */ 133766cc6b1SStefan Schake static u16 vc4_ctm_s31_32_to_s0_9(u64 in) 134766cc6b1SStefan Schake { 135766cc6b1SStefan Schake u16 r; 136766cc6b1SStefan Schake 137766cc6b1SStefan Schake /* Sign bit. */ 138766cc6b1SStefan Schake r = in & BIT_ULL(63) ? BIT(9) : 0; 139766cc6b1SStefan Schake 140766cc6b1SStefan Schake if ((in & GENMASK_ULL(62, 32)) > 0) { 141766cc6b1SStefan Schake /* We have zero integer bits so we can only saturate here. */ 142766cc6b1SStefan Schake r |= GENMASK(8, 0); 143766cc6b1SStefan Schake } else { 144766cc6b1SStefan Schake /* Otherwise take the 9 most important fractional bits. */ 145766cc6b1SStefan Schake r |= (in >> 23) & GENMASK(8, 0); 146766cc6b1SStefan Schake } 147766cc6b1SStefan Schake 148766cc6b1SStefan Schake return r; 149766cc6b1SStefan Schake } 150766cc6b1SStefan Schake 151766cc6b1SStefan Schake static void 152766cc6b1SStefan Schake vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state) 153766cc6b1SStefan Schake { 154766cc6b1SStefan Schake struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state); 155766cc6b1SStefan Schake struct drm_color_ctm *ctm = ctm_state->ctm; 156766cc6b1SStefan Schake 157766cc6b1SStefan Schake if (ctm_state->fifo) { 158766cc6b1SStefan Schake HVS_WRITE(SCALER_OLEDCOEF2, 159766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]), 160766cc6b1SStefan Schake SCALER_OLEDCOEF2_R_TO_R) | 161766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]), 162766cc6b1SStefan Schake SCALER_OLEDCOEF2_R_TO_G) | 163766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]), 164766cc6b1SStefan Schake SCALER_OLEDCOEF2_R_TO_B)); 165766cc6b1SStefan Schake HVS_WRITE(SCALER_OLEDCOEF1, 166766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]), 167766cc6b1SStefan Schake SCALER_OLEDCOEF1_G_TO_R) | 168766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]), 169766cc6b1SStefan Schake SCALER_OLEDCOEF1_G_TO_G) | 170766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]), 171766cc6b1SStefan Schake SCALER_OLEDCOEF1_G_TO_B)); 172766cc6b1SStefan Schake HVS_WRITE(SCALER_OLEDCOEF0, 173766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]), 174766cc6b1SStefan Schake SCALER_OLEDCOEF0_B_TO_R) | 175766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]), 176766cc6b1SStefan Schake SCALER_OLEDCOEF0_B_TO_G) | 177766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]), 178766cc6b1SStefan Schake SCALER_OLEDCOEF0_B_TO_B)); 179766cc6b1SStefan Schake } 180766cc6b1SStefan Schake 181766cc6b1SStefan Schake HVS_WRITE(SCALER_OLEDOFFS, 182766cc6b1SStefan Schake VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO)); 183766cc6b1SStefan Schake } 184c8b75bcaSEric Anholt 185f2df84e0SMaxime Ripard static struct vc4_hvs_state * 186f2df84e0SMaxime Ripard vc4_hvs_get_global_state(struct drm_atomic_state *state) 187f2df84e0SMaxime Ripard { 188f2df84e0SMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(state->dev); 189f2df84e0SMaxime Ripard struct drm_private_state *priv_state; 190f2df84e0SMaxime Ripard 191f2df84e0SMaxime Ripard priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels); 192f2df84e0SMaxime Ripard if (IS_ERR(priv_state)) 193f2df84e0SMaxime Ripard return ERR_CAST(priv_state); 194f2df84e0SMaxime Ripard 195f2df84e0SMaxime Ripard return to_vc4_hvs_state(priv_state); 196f2df84e0SMaxime Ripard } 197f2df84e0SMaxime Ripard 19887ebcd42SMaxime Ripard static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4, 19987ebcd42SMaxime Ripard struct drm_atomic_state *state) 20087ebcd42SMaxime Ripard { 20187ebcd42SMaxime Ripard struct drm_crtc_state *crtc_state; 20287ebcd42SMaxime Ripard struct drm_crtc *crtc; 20387ebcd42SMaxime Ripard unsigned int i; 20487ebcd42SMaxime Ripard 20587ebcd42SMaxime Ripard for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 20687ebcd42SMaxime Ripard struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); 20787ebcd42SMaxime Ripard u32 dispctrl; 20887ebcd42SMaxime Ripard u32 dsp3_mux; 20987ebcd42SMaxime Ripard 21087ebcd42SMaxime Ripard if (!crtc_state->active) 21187ebcd42SMaxime Ripard continue; 21287ebcd42SMaxime Ripard 21387ebcd42SMaxime Ripard if (vc4_state->assigned_channel != 2) 21487ebcd42SMaxime Ripard continue; 21587ebcd42SMaxime Ripard 21687ebcd42SMaxime Ripard /* 21787ebcd42SMaxime Ripard * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to 21887ebcd42SMaxime Ripard * FIFO X'. 21987ebcd42SMaxime Ripard * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'. 22087ebcd42SMaxime Ripard * 22187ebcd42SMaxime Ripard * DSP3 is connected to FIFO2 unless the transposer is 22287ebcd42SMaxime Ripard * enabled. In this case, FIFO 2 is directly accessed by the 22387ebcd42SMaxime Ripard * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 22487ebcd42SMaxime Ripard * route. 22587ebcd42SMaxime Ripard */ 22687ebcd42SMaxime Ripard if (vc4_state->feed_txp) 22787ebcd42SMaxime Ripard dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); 22887ebcd42SMaxime Ripard else 22987ebcd42SMaxime Ripard dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); 23087ebcd42SMaxime Ripard 23187ebcd42SMaxime Ripard dispctrl = HVS_READ(SCALER_DISPCTRL) & 23287ebcd42SMaxime Ripard ~SCALER_DISPCTRL_DSP3_MUX_MASK; 23387ebcd42SMaxime Ripard HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux); 23487ebcd42SMaxime Ripard } 23587ebcd42SMaxime Ripard } 23687ebcd42SMaxime Ripard 23787ebcd42SMaxime Ripard static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, 23887ebcd42SMaxime Ripard struct drm_atomic_state *state) 23987ebcd42SMaxime Ripard { 24087ebcd42SMaxime Ripard struct drm_crtc_state *crtc_state; 24187ebcd42SMaxime Ripard struct drm_crtc *crtc; 2422820526dSMaxime Ripard unsigned char mux; 24387ebcd42SMaxime Ripard unsigned int i; 24487ebcd42SMaxime Ripard u32 reg; 24587ebcd42SMaxime Ripard 24687ebcd42SMaxime Ripard for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 24787ebcd42SMaxime Ripard struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); 24887ebcd42SMaxime Ripard struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 24987ebcd42SMaxime Ripard 2502820526dSMaxime Ripard if (!vc4_state->update_muxing) 25187ebcd42SMaxime Ripard continue; 25287ebcd42SMaxime Ripard 25387ebcd42SMaxime Ripard switch (vc4_crtc->data->hvs_output) { 25487ebcd42SMaxime Ripard case 2: 2552820526dSMaxime Ripard mux = (vc4_state->assigned_channel == 2) ? 0 : 1; 2562820526dSMaxime Ripard reg = HVS_READ(SCALER_DISPECTRL); 2572820526dSMaxime Ripard HVS_WRITE(SCALER_DISPECTRL, 2582820526dSMaxime Ripard (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) | 2592820526dSMaxime Ripard VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX)); 26087ebcd42SMaxime Ripard break; 26187ebcd42SMaxime Ripard 26287ebcd42SMaxime Ripard case 3: 2632820526dSMaxime Ripard if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) 2642820526dSMaxime Ripard mux = 3; 2652820526dSMaxime Ripard else 2662820526dSMaxime Ripard mux = vc4_state->assigned_channel; 2672820526dSMaxime Ripard 2682820526dSMaxime Ripard reg = HVS_READ(SCALER_DISPCTRL); 2692820526dSMaxime Ripard HVS_WRITE(SCALER_DISPCTRL, 2702820526dSMaxime Ripard (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) | 2712820526dSMaxime Ripard VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX)); 27287ebcd42SMaxime Ripard break; 27387ebcd42SMaxime Ripard 27487ebcd42SMaxime Ripard case 4: 2752820526dSMaxime Ripard if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) 2762820526dSMaxime Ripard mux = 3; 2772820526dSMaxime Ripard else 2782820526dSMaxime Ripard mux = vc4_state->assigned_channel; 2792820526dSMaxime Ripard 2802820526dSMaxime Ripard reg = HVS_READ(SCALER_DISPEOLN); 2812820526dSMaxime Ripard HVS_WRITE(SCALER_DISPEOLN, 2822820526dSMaxime Ripard (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) | 2832820526dSMaxime Ripard VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX)); 2842820526dSMaxime Ripard 28587ebcd42SMaxime Ripard break; 28687ebcd42SMaxime Ripard 28787ebcd42SMaxime Ripard case 5: 2882820526dSMaxime Ripard if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) 2892820526dSMaxime Ripard mux = 3; 2902820526dSMaxime Ripard else 2912820526dSMaxime Ripard mux = vc4_state->assigned_channel; 2922820526dSMaxime Ripard 2932820526dSMaxime Ripard reg = HVS_READ(SCALER_DISPDITHER); 2942820526dSMaxime Ripard HVS_WRITE(SCALER_DISPDITHER, 2952820526dSMaxime Ripard (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) | 2962820526dSMaxime Ripard VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX)); 29787ebcd42SMaxime Ripard break; 29887ebcd42SMaxime Ripard 29987ebcd42SMaxime Ripard default: 30087ebcd42SMaxime Ripard break; 30187ebcd42SMaxime Ripard } 30287ebcd42SMaxime Ripard } 30387ebcd42SMaxime Ripard } 30487ebcd42SMaxime Ripard 305b501baccSEric Anholt static void 306cf1b372eSEric Anholt vc4_atomic_complete_commit(struct drm_atomic_state *state) 307b501baccSEric Anholt { 308b501baccSEric Anholt struct drm_device *dev = state->dev; 309b501baccSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 310d7d96c00SMaxime Ripard struct vc4_hvs *hvs = vc4->hvs; 31159635667SMaxime Ripard struct drm_crtc_state *new_crtc_state; 31259635667SMaxime Ripard struct drm_crtc *crtc; 313531a1b62SBoris Brezillon int i; 314531a1b62SBoris Brezillon 31559635667SMaxime Ripard for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 31687ebcd42SMaxime Ripard struct vc4_crtc_state *vc4_crtc_state; 31759635667SMaxime Ripard 31859635667SMaxime Ripard if (!new_crtc_state->commit) 319531a1b62SBoris Brezillon continue; 320531a1b62SBoris Brezillon 32187ebcd42SMaxime Ripard vc4_crtc_state = to_vc4_crtc_state(new_crtc_state); 32287ebcd42SMaxime Ripard vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel); 323531a1b62SBoris Brezillon } 324b501baccSEric Anholt 325d7d96c00SMaxime Ripard if (vc4->hvs->hvs5) 326d7d96c00SMaxime Ripard clk_set_min_rate(hvs->core_clk, 500000000); 327d7d96c00SMaxime Ripard 32834c8ea40SBoris Brezillon drm_atomic_helper_wait_for_fences(dev, state, false); 32934c8ea40SBoris Brezillon 33034c8ea40SBoris Brezillon drm_atomic_helper_wait_for_dependencies(state); 33134c8ea40SBoris Brezillon 332b501baccSEric Anholt drm_atomic_helper_commit_modeset_disables(dev, state); 333b501baccSEric Anholt 334766cc6b1SStefan Schake vc4_ctm_commit(vc4, state); 335766cc6b1SStefan Schake 33687ebcd42SMaxime Ripard if (vc4->hvs->hvs5) 33787ebcd42SMaxime Ripard vc5_hvs_pv_muxing_commit(vc4, state); 33887ebcd42SMaxime Ripard else 33987ebcd42SMaxime Ripard vc4_hvs_pv_muxing_commit(vc4, state); 34087ebcd42SMaxime Ripard 3412b58e98dSLiu Ying drm_atomic_helper_commit_planes(dev, state, 0); 342b501baccSEric Anholt 343b501baccSEric Anholt drm_atomic_helper_commit_modeset_enables(dev, state); 344b501baccSEric Anholt 3451ebe99a7SBoris Brezillon drm_atomic_helper_fake_vblank(state); 3461ebe99a7SBoris Brezillon 34734c8ea40SBoris Brezillon drm_atomic_helper_commit_hw_done(state); 34834c8ea40SBoris Brezillon 349184d3cf4SBoris Brezillon drm_atomic_helper_wait_for_flip_done(dev, state); 350b501baccSEric Anholt 351b501baccSEric Anholt drm_atomic_helper_cleanup_planes(dev, state); 352b501baccSEric Anholt 35334c8ea40SBoris Brezillon drm_atomic_helper_commit_cleanup_done(state); 35434c8ea40SBoris Brezillon 355d7d96c00SMaxime Ripard if (vc4->hvs->hvs5) 356d7d96c00SMaxime Ripard clk_set_min_rate(hvs->core_clk, 0); 357d7d96c00SMaxime Ripard 3580853695cSChris Wilson drm_atomic_state_put(state); 359b501baccSEric Anholt 360b501baccSEric Anholt up(&vc4->async_modeset); 361b501baccSEric Anholt } 362b501baccSEric Anholt 363cf1b372eSEric Anholt static void commit_work(struct work_struct *work) 364b501baccSEric Anholt { 365cf1b372eSEric Anholt struct drm_atomic_state *state = container_of(work, 366cf1b372eSEric Anholt struct drm_atomic_state, 367cf1b372eSEric Anholt commit_work); 368cf1b372eSEric Anholt vc4_atomic_complete_commit(state); 369b501baccSEric Anholt } 370b501baccSEric Anholt 371b501baccSEric Anholt /** 372b501baccSEric Anholt * vc4_atomic_commit - commit validated state object 373b501baccSEric Anholt * @dev: DRM device 374b501baccSEric Anholt * @state: the driver state object 375eb63961bSMaarten Lankhorst * @nonblock: nonblocking commit 376b501baccSEric Anholt * 377b501baccSEric Anholt * This function commits a with drm_atomic_helper_check() pre-validated state 378b501baccSEric Anholt * object. This can still fail when e.g. the framebuffer reservation fails. For 379b501baccSEric Anholt * now this doesn't implement asynchronous commits. 380b501baccSEric Anholt * 381b501baccSEric Anholt * RETURNS 382b501baccSEric Anholt * Zero for success or -errno. 383b501baccSEric Anholt */ 384b501baccSEric Anholt static int vc4_atomic_commit(struct drm_device *dev, 385b501baccSEric Anholt struct drm_atomic_state *state, 386eb63961bSMaarten Lankhorst bool nonblock) 387b501baccSEric Anholt { 388b501baccSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 389b501baccSEric Anholt int ret; 390b501baccSEric Anholt 391539c320bSGustavo Padovan if (state->async_update) { 392539c320bSGustavo Padovan ret = down_interruptible(&vc4->async_modeset); 393539c320bSGustavo Padovan if (ret) 394539c320bSGustavo Padovan return ret; 395539c320bSGustavo Padovan 396539c320bSGustavo Padovan ret = drm_atomic_helper_prepare_planes(dev, state); 397539c320bSGustavo Padovan if (ret) { 398539c320bSGustavo Padovan up(&vc4->async_modeset); 399539c320bSGustavo Padovan return ret; 400539c320bSGustavo Padovan } 401539c320bSGustavo Padovan 402539c320bSGustavo Padovan drm_atomic_helper_async_commit(dev, state); 403539c320bSGustavo Padovan 404539c320bSGustavo Padovan drm_atomic_helper_cleanup_planes(dev, state); 405539c320bSGustavo Padovan 406539c320bSGustavo Padovan up(&vc4->async_modeset); 407539c320bSGustavo Padovan 408539c320bSGustavo Padovan return 0; 409539c320bSGustavo Padovan } 410539c320bSGustavo Padovan 411fcc86cb4SBoris Brezillon /* We know for sure we don't want an async update here. Set 412fcc86cb4SBoris Brezillon * state->legacy_cursor_update to false to prevent 413fcc86cb4SBoris Brezillon * drm_atomic_helper_setup_commit() from auto-completing 414fcc86cb4SBoris Brezillon * commit->flip_done. 415fcc86cb4SBoris Brezillon */ 416fcc86cb4SBoris Brezillon state->legacy_cursor_update = false; 41734c8ea40SBoris Brezillon ret = drm_atomic_helper_setup_commit(state, nonblock); 41834c8ea40SBoris Brezillon if (ret) 41934c8ea40SBoris Brezillon return ret; 42026fc78f6SDerek Foreman 421cf1b372eSEric Anholt INIT_WORK(&state->commit_work, commit_work); 422cf1b372eSEric Anholt 423b501baccSEric Anholt ret = down_interruptible(&vc4->async_modeset); 424cf1b372eSEric Anholt if (ret) 425b501baccSEric Anholt return ret; 426b501baccSEric Anholt 427b501baccSEric Anholt ret = drm_atomic_helper_prepare_planes(dev, state); 428b501baccSEric Anholt if (ret) { 429b501baccSEric Anholt up(&vc4->async_modeset); 430b501baccSEric Anholt return ret; 431b501baccSEric Anholt } 432b501baccSEric Anholt 43353ad0694SEric Anholt if (!nonblock) { 43453ad0694SEric Anholt ret = drm_atomic_helper_wait_for_fences(dev, state, true); 43553ad0694SEric Anholt if (ret) { 43653ad0694SEric Anholt drm_atomic_helper_cleanup_planes(dev, state); 43753ad0694SEric Anholt up(&vc4->async_modeset); 43853ad0694SEric Anholt return ret; 43953ad0694SEric Anholt } 44053ad0694SEric Anholt } 44153ad0694SEric Anholt 442b501baccSEric Anholt /* 443b501baccSEric Anholt * This is the point of no return - everything below never fails except 444b501baccSEric Anholt * when the hw goes bonghits. Which means we can commit the new state on 445b501baccSEric Anholt * the software side now. 446b501baccSEric Anholt */ 447b501baccSEric Anholt 448d68bc0e7SMaarten Lankhorst BUG_ON(drm_atomic_helper_swap_state(state, false) < 0); 449b501baccSEric Anholt 450b501baccSEric Anholt /* 451b501baccSEric Anholt * Everything below can be run asynchronously without the need to grab 452b501baccSEric Anholt * any modeset locks at all under one condition: It must be guaranteed 453b501baccSEric Anholt * that the asynchronous work has either been cancelled (if the driver 454b501baccSEric Anholt * supports it, which at least requires that the framebuffers get 455b501baccSEric Anholt * cleaned up with drm_atomic_helper_cleanup_planes()) or completed 456b501baccSEric Anholt * before the new state gets committed on the software side with 457b501baccSEric Anholt * drm_atomic_helper_swap_state(). 458b501baccSEric Anholt * 459b501baccSEric Anholt * This scheme allows new atomic state updates to be prepared and 460b501baccSEric Anholt * checked in parallel to the asynchronous completion of the previous 461b501baccSEric Anholt * update. Which is important since compositors need to figure out the 462b501baccSEric Anholt * composition of the next frame right after having submitted the 463b501baccSEric Anholt * current layout. 464b501baccSEric Anholt */ 465b501baccSEric Anholt 4660853695cSChris Wilson drm_atomic_state_get(state); 467cf1b372eSEric Anholt if (nonblock) 468cf1b372eSEric Anholt queue_work(system_unbound_wq, &state->commit_work); 469cf1b372eSEric Anholt else 470cf1b372eSEric Anholt vc4_atomic_complete_commit(state); 471b501baccSEric Anholt 472b501baccSEric Anholt return 0; 473b501baccSEric Anholt } 474b501baccSEric Anholt 47583753117SEric Anholt static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, 47683753117SEric Anholt struct drm_file *file_priv, 47783753117SEric Anholt const struct drm_mode_fb_cmd2 *mode_cmd) 47883753117SEric Anholt { 47983753117SEric Anholt struct drm_mode_fb_cmd2 mode_cmd_local; 48083753117SEric Anholt 48183753117SEric Anholt /* If the user didn't specify a modifier, use the 48283753117SEric Anholt * vc4_set_tiling_ioctl() state for the BO. 48383753117SEric Anholt */ 48483753117SEric Anholt if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) { 48583753117SEric Anholt struct drm_gem_object *gem_obj; 48683753117SEric Anholt struct vc4_bo *bo; 48783753117SEric Anholt 48883753117SEric Anholt gem_obj = drm_gem_object_lookup(file_priv, 48983753117SEric Anholt mode_cmd->handles[0]); 49083753117SEric Anholt if (!gem_obj) { 491fb95992aSEric Anholt DRM_DEBUG("Failed to look up GEM BO %d\n", 49283753117SEric Anholt mode_cmd->handles[0]); 49383753117SEric Anholt return ERR_PTR(-ENOENT); 49483753117SEric Anholt } 49583753117SEric Anholt bo = to_vc4_bo(gem_obj); 49683753117SEric Anholt 49783753117SEric Anholt mode_cmd_local = *mode_cmd; 49883753117SEric Anholt 49983753117SEric Anholt if (bo->t_format) { 50083753117SEric Anholt mode_cmd_local.modifier[0] = 50183753117SEric Anholt DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED; 50283753117SEric Anholt } else { 50383753117SEric Anholt mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE; 50483753117SEric Anholt } 50583753117SEric Anholt 506f7a8cd30SEmil Velikov drm_gem_object_put(gem_obj); 50783753117SEric Anholt 50883753117SEric Anholt mode_cmd = &mode_cmd_local; 50983753117SEric Anholt } 51083753117SEric Anholt 5119762477cSNoralf Trønnes return drm_gem_fb_create(dev, file_priv, mode_cmd); 51283753117SEric Anholt } 51383753117SEric Anholt 514766cc6b1SStefan Schake /* Our CTM has some peculiar limitations: we can only enable it for one CRTC 515766cc6b1SStefan Schake * at a time and the HW only supports S0.9 scalars. To account for the latter, 516766cc6b1SStefan Schake * we don't allow userland to set a CTM that we have no hope of approximating. 517766cc6b1SStefan Schake */ 518766cc6b1SStefan Schake static int 519766cc6b1SStefan Schake vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 520766cc6b1SStefan Schake { 521766cc6b1SStefan Schake struct vc4_dev *vc4 = to_vc4_dev(dev); 522766cc6b1SStefan Schake struct vc4_ctm_state *ctm_state = NULL; 523766cc6b1SStefan Schake struct drm_crtc *crtc; 524766cc6b1SStefan Schake struct drm_crtc_state *old_crtc_state, *new_crtc_state; 525766cc6b1SStefan Schake struct drm_color_ctm *ctm; 526766cc6b1SStefan Schake int i; 527766cc6b1SStefan Schake 528766cc6b1SStefan Schake for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 529766cc6b1SStefan Schake /* CTM is being disabled. */ 530766cc6b1SStefan Schake if (!new_crtc_state->ctm && old_crtc_state->ctm) { 531766cc6b1SStefan Schake ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager); 532766cc6b1SStefan Schake if (IS_ERR(ctm_state)) 533766cc6b1SStefan Schake return PTR_ERR(ctm_state); 534766cc6b1SStefan Schake ctm_state->fifo = 0; 535766cc6b1SStefan Schake } 536766cc6b1SStefan Schake } 537766cc6b1SStefan Schake 538766cc6b1SStefan Schake for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 539766cc6b1SStefan Schake if (new_crtc_state->ctm == old_crtc_state->ctm) 540766cc6b1SStefan Schake continue; 541766cc6b1SStefan Schake 542766cc6b1SStefan Schake if (!ctm_state) { 543766cc6b1SStefan Schake ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager); 544766cc6b1SStefan Schake if (IS_ERR(ctm_state)) 545766cc6b1SStefan Schake return PTR_ERR(ctm_state); 546766cc6b1SStefan Schake } 547766cc6b1SStefan Schake 548766cc6b1SStefan Schake /* CTM is being enabled or the matrix changed. */ 549766cc6b1SStefan Schake if (new_crtc_state->ctm) { 55087ebcd42SMaxime Ripard struct vc4_crtc_state *vc4_crtc_state = 55187ebcd42SMaxime Ripard to_vc4_crtc_state(new_crtc_state); 55287ebcd42SMaxime Ripard 553766cc6b1SStefan Schake /* fifo is 1-based since 0 disables CTM. */ 55487ebcd42SMaxime Ripard int fifo = vc4_crtc_state->assigned_channel + 1; 555766cc6b1SStefan Schake 556766cc6b1SStefan Schake /* Check userland isn't trying to turn on CTM for more 557766cc6b1SStefan Schake * than one CRTC at a time. 558766cc6b1SStefan Schake */ 559766cc6b1SStefan Schake if (ctm_state->fifo && ctm_state->fifo != fifo) { 560766cc6b1SStefan Schake DRM_DEBUG_DRIVER("Too many CTM configured\n"); 561766cc6b1SStefan Schake return -EINVAL; 562766cc6b1SStefan Schake } 563766cc6b1SStefan Schake 564766cc6b1SStefan Schake /* Check we can approximate the specified CTM. 565766cc6b1SStefan Schake * We disallow scalars |c| > 1.0 since the HW has 566766cc6b1SStefan Schake * no integer bits. 567766cc6b1SStefan Schake */ 568766cc6b1SStefan Schake ctm = new_crtc_state->ctm->data; 569766cc6b1SStefan Schake for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) { 570766cc6b1SStefan Schake u64 val = ctm->matrix[i]; 571766cc6b1SStefan Schake 572766cc6b1SStefan Schake val &= ~BIT_ULL(63); 573766cc6b1SStefan Schake if (val > BIT_ULL(32)) 574766cc6b1SStefan Schake return -EINVAL; 575766cc6b1SStefan Schake } 576766cc6b1SStefan Schake 577766cc6b1SStefan Schake ctm_state->fifo = fifo; 578766cc6b1SStefan Schake ctm_state->ctm = ctm; 579766cc6b1SStefan Schake } 580766cc6b1SStefan Schake } 581766cc6b1SStefan Schake 582766cc6b1SStefan Schake return 0; 583766cc6b1SStefan Schake } 584766cc6b1SStefan Schake 5854686da83SBoris Brezillon static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state) 5864686da83SBoris Brezillon { 5874686da83SBoris Brezillon struct drm_plane_state *old_plane_state, *new_plane_state; 5884686da83SBoris Brezillon struct vc4_dev *vc4 = to_vc4_dev(state->dev); 5894686da83SBoris Brezillon struct vc4_load_tracker_state *load_state; 5904686da83SBoris Brezillon struct drm_private_state *priv_state; 5914686da83SBoris Brezillon struct drm_plane *plane; 5924686da83SBoris Brezillon int i; 5934686da83SBoris Brezillon 594f437bc1eSMaxime Ripard if (!vc4->load_tracker_available) 595f437bc1eSMaxime Ripard return 0; 596f437bc1eSMaxime Ripard 5974686da83SBoris Brezillon priv_state = drm_atomic_get_private_obj_state(state, 5984686da83SBoris Brezillon &vc4->load_tracker); 5994686da83SBoris Brezillon if (IS_ERR(priv_state)) 6004686da83SBoris Brezillon return PTR_ERR(priv_state); 6014686da83SBoris Brezillon 6024686da83SBoris Brezillon load_state = to_vc4_load_tracker_state(priv_state); 6034686da83SBoris Brezillon for_each_oldnew_plane_in_state(state, plane, old_plane_state, 6044686da83SBoris Brezillon new_plane_state, i) { 6054686da83SBoris Brezillon struct vc4_plane_state *vc4_plane_state; 6064686da83SBoris Brezillon 6074686da83SBoris Brezillon if (old_plane_state->fb && old_plane_state->crtc) { 6084686da83SBoris Brezillon vc4_plane_state = to_vc4_plane_state(old_plane_state); 6094686da83SBoris Brezillon load_state->membus_load -= vc4_plane_state->membus_load; 6104686da83SBoris Brezillon load_state->hvs_load -= vc4_plane_state->hvs_load; 6114686da83SBoris Brezillon } 6124686da83SBoris Brezillon 6134686da83SBoris Brezillon if (new_plane_state->fb && new_plane_state->crtc) { 6144686da83SBoris Brezillon vc4_plane_state = to_vc4_plane_state(new_plane_state); 6154686da83SBoris Brezillon load_state->membus_load += vc4_plane_state->membus_load; 6164686da83SBoris Brezillon load_state->hvs_load += vc4_plane_state->hvs_load; 6174686da83SBoris Brezillon } 6184686da83SBoris Brezillon } 6194686da83SBoris Brezillon 6206b5c029dSPaul Kocialkowski /* Don't check the load when the tracker is disabled. */ 6216b5c029dSPaul Kocialkowski if (!vc4->load_tracker_enabled) 6226b5c029dSPaul Kocialkowski return 0; 6236b5c029dSPaul Kocialkowski 6244686da83SBoris Brezillon /* The absolute limit is 2Gbyte/sec, but let's take a margin to let 6254686da83SBoris Brezillon * the system work when other blocks are accessing the memory. 6264686da83SBoris Brezillon */ 6274686da83SBoris Brezillon if (load_state->membus_load > SZ_1G + SZ_512M) 6284686da83SBoris Brezillon return -ENOSPC; 6294686da83SBoris Brezillon 6304686da83SBoris Brezillon /* HVS clock is supposed to run @ 250Mhz, let's take a margin and 6314686da83SBoris Brezillon * consider the maximum number of cycles is 240M. 6324686da83SBoris Brezillon */ 6334686da83SBoris Brezillon if (load_state->hvs_load > 240000000ULL) 6344686da83SBoris Brezillon return -ENOSPC; 6354686da83SBoris Brezillon 6364686da83SBoris Brezillon return 0; 6374686da83SBoris Brezillon } 6384686da83SBoris Brezillon 6394686da83SBoris Brezillon static struct drm_private_state * 6404686da83SBoris Brezillon vc4_load_tracker_duplicate_state(struct drm_private_obj *obj) 6414686da83SBoris Brezillon { 6424686da83SBoris Brezillon struct vc4_load_tracker_state *state; 6434686da83SBoris Brezillon 6444686da83SBoris Brezillon state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 6454686da83SBoris Brezillon if (!state) 6464686da83SBoris Brezillon return NULL; 6474686da83SBoris Brezillon 6484686da83SBoris Brezillon __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 6494686da83SBoris Brezillon 6504686da83SBoris Brezillon return &state->base; 6514686da83SBoris Brezillon } 6524686da83SBoris Brezillon 6534686da83SBoris Brezillon static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj, 6544686da83SBoris Brezillon struct drm_private_state *state) 6554686da83SBoris Brezillon { 6564686da83SBoris Brezillon struct vc4_load_tracker_state *load_state; 6574686da83SBoris Brezillon 6584686da83SBoris Brezillon load_state = to_vc4_load_tracker_state(state); 6594686da83SBoris Brezillon kfree(load_state); 6604686da83SBoris Brezillon } 6614686da83SBoris Brezillon 6624686da83SBoris Brezillon static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = { 6634686da83SBoris Brezillon .atomic_duplicate_state = vc4_load_tracker_duplicate_state, 6644686da83SBoris Brezillon .atomic_destroy_state = vc4_load_tracker_destroy_state, 6654686da83SBoris Brezillon }; 6664686da83SBoris Brezillon 667dcda7c28SMaxime Ripard static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused) 668dcda7c28SMaxime Ripard { 669dcda7c28SMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(dev); 670dcda7c28SMaxime Ripard 671dcda7c28SMaxime Ripard if (!vc4->load_tracker_available) 672dcda7c28SMaxime Ripard return; 673dcda7c28SMaxime Ripard 674dcda7c28SMaxime Ripard drm_atomic_private_obj_fini(&vc4->load_tracker); 675dcda7c28SMaxime Ripard } 676dcda7c28SMaxime Ripard 677dcda7c28SMaxime Ripard static int vc4_load_tracker_obj_init(struct vc4_dev *vc4) 678dcda7c28SMaxime Ripard { 679dcda7c28SMaxime Ripard struct vc4_load_tracker_state *load_state; 680dcda7c28SMaxime Ripard 681dcda7c28SMaxime Ripard if (!vc4->load_tracker_available) 682dcda7c28SMaxime Ripard return 0; 683dcda7c28SMaxime Ripard 684dcda7c28SMaxime Ripard load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); 685dcda7c28SMaxime Ripard if (!load_state) 686dcda7c28SMaxime Ripard return -ENOMEM; 687dcda7c28SMaxime Ripard 688dcda7c28SMaxime Ripard drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker, 689dcda7c28SMaxime Ripard &load_state->base, 690dcda7c28SMaxime Ripard &vc4_load_tracker_state_funcs); 691dcda7c28SMaxime Ripard 6923c354ed1SMaxime Ripard return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL); 693dcda7c28SMaxime Ripard } 694dcda7c28SMaxime Ripard 695f2df84e0SMaxime Ripard static struct drm_private_state * 696f2df84e0SMaxime Ripard vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj) 697f2df84e0SMaxime Ripard { 698f2df84e0SMaxime Ripard struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state); 699f2df84e0SMaxime Ripard struct vc4_hvs_state *state; 700f2df84e0SMaxime Ripard 701f2df84e0SMaxime Ripard state = kzalloc(sizeof(*state), GFP_KERNEL); 702f2df84e0SMaxime Ripard if (!state) 703f2df84e0SMaxime Ripard return NULL; 704f2df84e0SMaxime Ripard 705f2df84e0SMaxime Ripard __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 706f2df84e0SMaxime Ripard 707f2df84e0SMaxime Ripard state->unassigned_channels = old_state->unassigned_channels; 708f2df84e0SMaxime Ripard 709f2df84e0SMaxime Ripard return &state->base; 710f2df84e0SMaxime Ripard } 711f2df84e0SMaxime Ripard 712f2df84e0SMaxime Ripard static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj, 713f2df84e0SMaxime Ripard struct drm_private_state *state) 714f2df84e0SMaxime Ripard { 715f2df84e0SMaxime Ripard struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state); 716f2df84e0SMaxime Ripard 717f2df84e0SMaxime Ripard kfree(hvs_state); 718f2df84e0SMaxime Ripard } 719f2df84e0SMaxime Ripard 720f2df84e0SMaxime Ripard static const struct drm_private_state_funcs vc4_hvs_state_funcs = { 721f2df84e0SMaxime Ripard .atomic_duplicate_state = vc4_hvs_channels_duplicate_state, 722f2df84e0SMaxime Ripard .atomic_destroy_state = vc4_hvs_channels_destroy_state, 723f2df84e0SMaxime Ripard }; 724f2df84e0SMaxime Ripard 725f2df84e0SMaxime Ripard static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused) 726f2df84e0SMaxime Ripard { 727f2df84e0SMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(dev); 728f2df84e0SMaxime Ripard 729f2df84e0SMaxime Ripard drm_atomic_private_obj_fini(&vc4->hvs_channels); 730f2df84e0SMaxime Ripard } 731f2df84e0SMaxime Ripard 732f2df84e0SMaxime Ripard static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4) 733f2df84e0SMaxime Ripard { 734f2df84e0SMaxime Ripard struct vc4_hvs_state *state; 735f2df84e0SMaxime Ripard 736f2df84e0SMaxime Ripard state = kzalloc(sizeof(*state), GFP_KERNEL); 737f2df84e0SMaxime Ripard if (!state) 738f2df84e0SMaxime Ripard return -ENOMEM; 739f2df84e0SMaxime Ripard 740f2df84e0SMaxime Ripard state->unassigned_channels = GENMASK(HVS_NUM_CHANNELS - 1, 0); 741f2df84e0SMaxime Ripard drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels, 742f2df84e0SMaxime Ripard &state->base, 743f2df84e0SMaxime Ripard &vc4_hvs_state_funcs); 744f2df84e0SMaxime Ripard 745f2df84e0SMaxime Ripard return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL); 746f2df84e0SMaxime Ripard } 747f2df84e0SMaxime Ripard 748b5dbc4d3SMaxime Ripard /* 749b5dbc4d3SMaxime Ripard * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and 750b5dbc4d3SMaxime Ripard * the TXP (and therefore all the CRTCs found on that platform). 751b5dbc4d3SMaxime Ripard * 752b5dbc4d3SMaxime Ripard * The naive (and our initial) implementation would just iterate over 753b5dbc4d3SMaxime Ripard * all the active CRTCs, try to find a suitable FIFO, and then remove it 754b5dbc4d3SMaxime Ripard * from the pool of available FIFOs. However, there are a few corner 755b5dbc4d3SMaxime Ripard * cases that need to be considered: 756b5dbc4d3SMaxime Ripard * 757b5dbc4d3SMaxime Ripard * - When running in a dual-display setup (so with two CRTCs involved), 758b5dbc4d3SMaxime Ripard * we can update the state of a single CRTC (for example by changing 759b5dbc4d3SMaxime Ripard * its mode using xrandr under X11) without affecting the other. In 760b5dbc4d3SMaxime Ripard * this case, the other CRTC wouldn't be in the state at all, so we 761b5dbc4d3SMaxime Ripard * need to consider all the running CRTCs in the DRM device to assign 762b5dbc4d3SMaxime Ripard * a FIFO, not just the one in the state. 763b5dbc4d3SMaxime Ripard * 764f2df84e0SMaxime Ripard * - To fix the above, we can't use drm_atomic_get_crtc_state on all 765f2df84e0SMaxime Ripard * enabled CRTCs to pull their CRTC state into the global state, since 766f2df84e0SMaxime Ripard * a page flip would start considering their vblank to complete. Since 767f2df84e0SMaxime Ripard * we don't have a guarantee that they are actually active, that 768f2df84e0SMaxime Ripard * vblank might never happen, and shouldn't even be considered if we 769f2df84e0SMaxime Ripard * want to do a page flip on a single CRTC. That can be tested by 770f2df84e0SMaxime Ripard * doing a modetest -v first on HDMI1 and then on HDMI0. 771f2df84e0SMaxime Ripard * 772b5dbc4d3SMaxime Ripard * - Since we need the pixelvalve to be disabled and enabled back when 773b5dbc4d3SMaxime Ripard * the FIFO is changed, we should keep the FIFO assigned for as long 774b5dbc4d3SMaxime Ripard * as the CRTC is enabled, only considering it free again once that 775b5dbc4d3SMaxime Ripard * CRTC has been disabled. This can be tested by booting X11 on a 776b5dbc4d3SMaxime Ripard * single display, and changing the resolution down and then back up. 777b5dbc4d3SMaxime Ripard */ 778a72b0458SMaxime Ripard static int vc4_pv_muxing_atomic_check(struct drm_device *dev, 779a72b0458SMaxime Ripard struct drm_atomic_state *state) 780766cc6b1SStefan Schake { 781f2df84e0SMaxime Ripard struct vc4_hvs_state *hvs_new_state; 7828ba0b6d1SMaxime Ripard struct drm_crtc_state *old_crtc_state, *new_crtc_state; 78387ebcd42SMaxime Ripard struct drm_crtc *crtc; 784a72b0458SMaxime Ripard unsigned int i; 78587ebcd42SMaxime Ripard 786f2df84e0SMaxime Ripard hvs_new_state = vc4_hvs_get_global_state(state); 787f2df84e0SMaxime Ripard if (!hvs_new_state) 788f2df84e0SMaxime Ripard return -EINVAL; 789089d8341SMaxime Ripard 7908ba0b6d1SMaxime Ripard for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 791f2df84e0SMaxime Ripard struct vc4_crtc_state *old_vc4_crtc_state = 792f2df84e0SMaxime Ripard to_vc4_crtc_state(old_crtc_state); 7938ba0b6d1SMaxime Ripard struct vc4_crtc_state *new_vc4_crtc_state = 7948ba0b6d1SMaxime Ripard to_vc4_crtc_state(new_crtc_state); 79587ebcd42SMaxime Ripard struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 79687ebcd42SMaxime Ripard unsigned int matching_channels; 79787ebcd42SMaxime Ripard 7982820526dSMaxime Ripard /* Nothing to do here, let's skip it */ 7992820526dSMaxime Ripard if (old_crtc_state->enable == new_crtc_state->enable) 8002820526dSMaxime Ripard continue; 8012820526dSMaxime Ripard 8022820526dSMaxime Ripard /* Muxing will need to be modified, mark it as such */ 8032820526dSMaxime Ripard new_vc4_crtc_state->update_muxing = true; 8042820526dSMaxime Ripard 8052820526dSMaxime Ripard /* If we're disabling our CRTC, we put back our channel */ 8062820526dSMaxime Ripard if (!new_crtc_state->enable) { 807f2df84e0SMaxime Ripard hvs_new_state->unassigned_channels |= BIT(old_vc4_crtc_state->assigned_channel); 8088ba0b6d1SMaxime Ripard new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; 8092820526dSMaxime Ripard continue; 810f2df84e0SMaxime Ripard } 8118ba0b6d1SMaxime Ripard 81287ebcd42SMaxime Ripard /* 81387ebcd42SMaxime Ripard * The problem we have to solve here is that we have 81487ebcd42SMaxime Ripard * up to 7 encoders, connected to up to 6 CRTCs. 81587ebcd42SMaxime Ripard * 81687ebcd42SMaxime Ripard * Those CRTCs, depending on the instance, can be 81787ebcd42SMaxime Ripard * routed to 1, 2 or 3 HVS FIFOs, and we need to set 81887ebcd42SMaxime Ripard * the change the muxing between FIFOs and outputs in 81987ebcd42SMaxime Ripard * the HVS accordingly. 82087ebcd42SMaxime Ripard * 82187ebcd42SMaxime Ripard * It would be pretty hard to come up with an 82287ebcd42SMaxime Ripard * algorithm that would generically solve 82387ebcd42SMaxime Ripard * this. However, the current routing trees we support 82487ebcd42SMaxime Ripard * allow us to simplify a bit the problem. 82587ebcd42SMaxime Ripard * 82687ebcd42SMaxime Ripard * Indeed, with the current supported layouts, if we 82787ebcd42SMaxime Ripard * try to assign in the ascending crtc index order the 82887ebcd42SMaxime Ripard * FIFOs, we can't fall into the situation where an 82987ebcd42SMaxime Ripard * earlier CRTC that had multiple routes is assigned 83087ebcd42SMaxime Ripard * one that was the only option for a later CRTC. 83187ebcd42SMaxime Ripard * 83287ebcd42SMaxime Ripard * If the layout changes and doesn't give us that in 83387ebcd42SMaxime Ripard * the future, we will need to have something smarter, 83487ebcd42SMaxime Ripard * but it works so far. 83587ebcd42SMaxime Ripard */ 836f2df84e0SMaxime Ripard matching_channels = hvs_new_state->unassigned_channels & vc4_crtc->data->hvs_available_channels; 83787ebcd42SMaxime Ripard if (matching_channels) { 83887ebcd42SMaxime Ripard unsigned int channel = ffs(matching_channels) - 1; 83987ebcd42SMaxime Ripard 8408ba0b6d1SMaxime Ripard new_vc4_crtc_state->assigned_channel = channel; 841f2df84e0SMaxime Ripard hvs_new_state->unassigned_channels &= ~BIT(channel); 84287ebcd42SMaxime Ripard } else { 84387ebcd42SMaxime Ripard return -EINVAL; 84487ebcd42SMaxime Ripard } 84587ebcd42SMaxime Ripard } 846766cc6b1SStefan Schake 847a72b0458SMaxime Ripard return 0; 848a72b0458SMaxime Ripard } 849a72b0458SMaxime Ripard 850a72b0458SMaxime Ripard static int 851a72b0458SMaxime Ripard vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 852a72b0458SMaxime Ripard { 853a72b0458SMaxime Ripard int ret; 854a72b0458SMaxime Ripard 855a72b0458SMaxime Ripard ret = vc4_pv_muxing_atomic_check(dev, state); 856a72b0458SMaxime Ripard if (ret) 857a72b0458SMaxime Ripard return ret; 858a72b0458SMaxime Ripard 859766cc6b1SStefan Schake ret = vc4_ctm_atomic_check(dev, state); 860766cc6b1SStefan Schake if (ret < 0) 861766cc6b1SStefan Schake return ret; 862766cc6b1SStefan Schake 8634686da83SBoris Brezillon ret = drm_atomic_helper_check(dev, state); 8644686da83SBoris Brezillon if (ret) 8654686da83SBoris Brezillon return ret; 8664686da83SBoris Brezillon 8674686da83SBoris Brezillon return vc4_load_tracker_atomic_check(state); 868766cc6b1SStefan Schake } 869766cc6b1SStefan Schake 870c8b75bcaSEric Anholt static const struct drm_mode_config_funcs vc4_mode_funcs = { 871766cc6b1SStefan Schake .atomic_check = vc4_atomic_check, 872b501baccSEric Anholt .atomic_commit = vc4_atomic_commit, 87383753117SEric Anholt .fb_create = vc4_fb_create, 874c8b75bcaSEric Anholt }; 875c8b75bcaSEric Anholt 876c8b75bcaSEric Anholt int vc4_kms_load(struct drm_device *dev) 877c8b75bcaSEric Anholt { 87848666d56SDerek Foreman struct vc4_dev *vc4 = to_vc4_dev(dev); 879f437bc1eSMaxime Ripard bool is_vc5 = of_device_is_compatible(dev->dev->of_node, 880f437bc1eSMaxime Ripard "brcm,bcm2711-vc5"); 881c8b75bcaSEric Anholt int ret; 882c8b75bcaSEric Anholt 883f437bc1eSMaxime Ripard if (!is_vc5) { 884f437bc1eSMaxime Ripard vc4->load_tracker_available = true; 885f437bc1eSMaxime Ripard 886f437bc1eSMaxime Ripard /* Start with the load tracker enabled. Can be 887f437bc1eSMaxime Ripard * disabled through the debugfs load_tracker file. 8886b5c029dSPaul Kocialkowski */ 8896b5c029dSPaul Kocialkowski vc4->load_tracker_enabled = true; 890f437bc1eSMaxime Ripard } 8916b5c029dSPaul Kocialkowski 892b501baccSEric Anholt sema_init(&vc4->async_modeset, 1); 893b501baccSEric Anholt 8947d2818f5SMario Kleiner /* Set support for vblank irq fast disable, before drm_vblank_init() */ 8957d2818f5SMario Kleiner dev->vblank_disable_immediate = true; 8967d2818f5SMario Kleiner 897ffc26740SEric Anholt dev->irq_enabled = true; 898c8b75bcaSEric Anholt ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 899c8b75bcaSEric Anholt if (ret < 0) { 900c8b75bcaSEric Anholt dev_err(dev->dev, "failed to initialize vblank\n"); 901c8b75bcaSEric Anholt return ret; 902c8b75bcaSEric Anholt } 903c8b75bcaSEric Anholt 904f437bc1eSMaxime Ripard if (is_vc5) { 905f437bc1eSMaxime Ripard dev->mode_config.max_width = 7680; 906f437bc1eSMaxime Ripard dev->mode_config.max_height = 7680; 907f437bc1eSMaxime Ripard } else { 908c8b75bcaSEric Anholt dev->mode_config.max_width = 2048; 909c8b75bcaSEric Anholt dev->mode_config.max_height = 2048; 910f437bc1eSMaxime Ripard } 911f437bc1eSMaxime Ripard 912c8b75bcaSEric Anholt dev->mode_config.funcs = &vc4_mode_funcs; 913c8b75bcaSEric Anholt dev->mode_config.preferred_depth = 24; 914b501baccSEric Anholt dev->mode_config.async_page_flip = true; 915423ad7b3SDaniel Stone dev->mode_config.allow_fb_modifiers = true; 916b501baccSEric Anholt 917dcda7c28SMaxime Ripard ret = vc4_ctm_obj_init(vc4); 918dcda7c28SMaxime Ripard if (ret) 919dcda7c28SMaxime Ripard return ret; 920766cc6b1SStefan Schake 921dcda7c28SMaxime Ripard ret = vc4_load_tracker_obj_init(vc4); 922dcda7c28SMaxime Ripard if (ret) 923dcda7c28SMaxime Ripard return ret; 9244686da83SBoris Brezillon 925f2df84e0SMaxime Ripard ret = vc4_hvs_channels_obj_init(vc4); 926f2df84e0SMaxime Ripard if (ret) 927f2df84e0SMaxime Ripard return ret; 928f2df84e0SMaxime Ripard 929c8b75bcaSEric Anholt drm_mode_config_reset(dev); 930c8b75bcaSEric Anholt 931c8b75bcaSEric Anholt drm_kms_helper_poll_init(dev); 932c8b75bcaSEric Anholt 933c8b75bcaSEric Anholt return 0; 934c8b75bcaSEric Anholt } 935