1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2013 Red Hat 5 * Author: Rob Clark <robdclark@gmail.com> 6 */ 7 8 #ifndef __MSM_KMS_H__ 9 #define __MSM_KMS_H__ 10 11 #include <linux/clk.h> 12 #include <linux/regulator/consumer.h> 13 14 #include "msm_drv.h" 15 16 #define MAX_PLANE 4 17 18 /* As there are different display controller blocks depending on the 19 * snapdragon version, the kms support is split out and the appropriate 20 * implementation is loaded at runtime. The kms module is responsible 21 * for constructing the appropriate planes/crtcs/encoders/connectors. 22 */ 23 struct msm_kms_funcs { 24 /* hw initialization: */ 25 int (*hw_init)(struct msm_kms *kms); 26 /* irq handling: */ 27 void (*irq_preinstall)(struct msm_kms *kms); 28 int (*irq_postinstall)(struct msm_kms *kms); 29 void (*irq_uninstall)(struct msm_kms *kms); 30 irqreturn_t (*irq)(struct msm_kms *kms); 31 int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); 32 void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); 33 34 /* 35 * Atomic commit handling: 36 * 37 * Note that in the case of async commits, the funcs which take 38 * a crtc_mask (ie. ->flush_commit(), and ->complete_commit()) 39 * might not be evenly balanced with ->prepare_commit(), however 40 * each crtc that effected by a ->prepare_commit() (potentially 41 * multiple times) will eventually (at end of vsync period) be 42 * flushed and completed. 43 * 44 * This has some implications about tracking of cleanup state, 45 * for example SMP blocks to release after commit completes. Ie. 46 * cleanup state should be also duplicated in the various 47 * duplicate_state() methods, as the current cleanup state at 48 * ->complete_commit() time may have accumulated cleanup work 49 * from multiple commits. 50 */ 51 52 /** 53 * Enable/disable power/clks needed for hw access done in other 54 * commit related methods. 55 * 56 * If mdp4 is migrated to runpm, we could probably drop these 57 * and use runpm directly. 58 */ 59 void (*enable_commit)(struct msm_kms *kms); 60 void (*disable_commit)(struct msm_kms *kms); 61 62 /** 63 * If the kms backend supports async commit, it should implement 64 * this method to return the time of the next vsync. This is 65 * used to determine a time slightly before vsync, for the async 66 * commit timer to run and complete an async commit. 67 */ 68 ktime_t (*vsync_time)(struct msm_kms *kms, struct drm_crtc *crtc); 69 70 /** 71 * Prepare for atomic commit. This is called after any previous 72 * (async or otherwise) commit has completed. 73 */ 74 void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state); 75 76 /** 77 * Flush an atomic commit. This is called after the hardware 78 * updates have already been pushed down to effected planes/ 79 * crtcs/encoders/connectors. 80 */ 81 void (*flush_commit)(struct msm_kms *kms, unsigned crtc_mask); 82 83 /** 84 * Wait for any in-progress flush to complete on the specified 85 * crtcs. This should not block if there is no in-progress 86 * commit (ie. don't just wait for a vblank), as it will also 87 * be called before ->prepare_commit() to ensure any potential 88 * "async" commit has completed. 89 */ 90 void (*wait_flush)(struct msm_kms *kms, unsigned crtc_mask); 91 92 /** 93 * Clean up after commit is completed. This is called after 94 * ->wait_flush(), to give the backend a chance to do any 95 * post-commit cleanup. 96 */ 97 void (*complete_commit)(struct msm_kms *kms, unsigned crtc_mask); 98 99 /* 100 * Format handling: 101 */ 102 103 /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */ 104 const struct msm_format *(*get_format)(struct msm_kms *kms, 105 const uint32_t format, 106 const uint64_t modifiers); 107 /* do format checking on format modified through fb_cmd2 modifiers */ 108 int (*check_modified_format)(const struct msm_kms *kms, 109 const struct msm_format *msm_fmt, 110 const struct drm_mode_fb_cmd2 *cmd, 111 struct drm_gem_object **bos); 112 113 /* misc: */ 114 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, 115 struct drm_encoder *encoder); 116 int (*set_split_display)(struct msm_kms *kms, 117 struct drm_encoder *encoder, 118 struct drm_encoder *slave_encoder, 119 bool is_cmd_mode); 120 /* cleanup: */ 121 void (*destroy)(struct msm_kms *kms); 122 123 /* snapshot: */ 124 void (*snapshot)(struct msm_disp_state *disp_state, struct msm_kms *kms); 125 126 #ifdef CONFIG_DEBUG_FS 127 /* debugfs: */ 128 int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor); 129 #endif 130 }; 131 132 struct msm_kms; 133 134 /* 135 * A per-crtc timer for pending async atomic flushes. Scheduled to expire 136 * shortly before vblank to flush pending async updates. 137 */ 138 struct msm_pending_timer { 139 struct msm_hrtimer_work work; 140 struct kthread_worker *worker; 141 struct msm_kms *kms; 142 unsigned crtc_idx; 143 }; 144 145 struct msm_kms { 146 const struct msm_kms_funcs *funcs; 147 struct drm_device *dev; 148 149 /* irq number to be passed on to msm_irq_install */ 150 int irq; 151 152 /* mapper-id used to request GEM buffer mapped for scanout: */ 153 struct msm_gem_address_space *aspace; 154 155 /* disp snapshot support */ 156 struct kthread_worker *dump_worker; 157 struct kthread_work dump_work; 158 struct mutex dump_mutex; 159 160 /* 161 * For async commit, where ->flush_commit() and later happens 162 * from the crtc's pending_timer close to end of the frame: 163 */ 164 struct mutex commit_lock[MAX_CRTCS]; 165 unsigned pending_crtc_mask; 166 struct msm_pending_timer pending_timers[MAX_CRTCS]; 167 }; 168 169 static inline int msm_kms_init(struct msm_kms *kms, 170 const struct msm_kms_funcs *funcs) 171 { 172 unsigned i, ret; 173 174 for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) 175 mutex_init(&kms->commit_lock[i]); 176 177 kms->funcs = funcs; 178 179 for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) { 180 ret = msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i); 181 if (ret) { 182 return ret; 183 } 184 } 185 186 return 0; 187 } 188 189 static inline void msm_kms_destroy(struct msm_kms *kms) 190 { 191 unsigned i; 192 193 for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) 194 msm_atomic_destroy_pending_timer(&kms->pending_timers[i]); 195 } 196 197 struct msm_kms *mdp4_kms_init(struct drm_device *dev); 198 struct msm_kms *mdp5_kms_init(struct drm_device *dev); 199 struct msm_kms *dpu_kms_init(struct drm_device *dev); 200 201 extern const struct of_device_id dpu_dt_match[]; 202 extern const struct of_device_id mdp5_dt_match[]; 203 204 struct msm_mdss_funcs { 205 int (*enable)(struct msm_mdss *mdss); 206 int (*disable)(struct msm_mdss *mdss); 207 void (*destroy)(struct msm_mdss *mdss); 208 }; 209 210 struct msm_mdss { 211 struct device *dev; 212 const struct msm_mdss_funcs *funcs; 213 }; 214 215 int mdp5_mdss_init(struct platform_device *dev); 216 int dpu_mdss_init(struct platform_device *dev); 217 218 #define for_each_crtc_mask(dev, crtc, crtc_mask) \ 219 drm_for_each_crtc(crtc, dev) \ 220 for_each_if (drm_crtc_mask(crtc) & (crtc_mask)) 221 222 #define for_each_crtc_mask_reverse(dev, crtc, crtc_mask) \ 223 drm_for_each_crtc_reverse(crtc, dev) \ 224 for_each_if (drm_crtc_mask(crtc) & (crtc_mask)) 225 226 #endif /* __MSM_KMS_H__ */ 227