1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include "msm_gpu.h" 8 #include "msm_gpu_trace.h" 9 10 #include <linux/devfreq.h> 11 #include <linux/devfreq_cooling.h> 12 13 /* 14 * Power Management: 15 */ 16 17 static int msm_devfreq_target(struct device *dev, unsigned long *freq, 18 u32 flags) 19 { 20 struct msm_gpu *gpu = dev_to_gpu(dev); 21 struct dev_pm_opp *opp; 22 23 opp = devfreq_recommended_opp(dev, freq, flags); 24 25 /* 26 * If the GPU is idle, devfreq is not aware, so just ignore 27 * it's requests 28 */ 29 if (gpu->devfreq.idle_freq) { 30 gpu->devfreq.idle_freq = *freq; 31 return 0; 32 } 33 34 if (IS_ERR(opp)) 35 return PTR_ERR(opp); 36 37 trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp)); 38 39 if (gpu->funcs->gpu_set_freq) 40 gpu->funcs->gpu_set_freq(gpu, opp); 41 else 42 clk_set_rate(gpu->core_clk, *freq); 43 44 dev_pm_opp_put(opp); 45 46 return 0; 47 } 48 49 static unsigned long get_freq(struct msm_gpu *gpu) 50 { 51 if (gpu->devfreq.idle_freq) 52 return gpu->devfreq.idle_freq; 53 54 if (gpu->funcs->gpu_get_freq) 55 return gpu->funcs->gpu_get_freq(gpu); 56 57 return clk_get_rate(gpu->core_clk); 58 } 59 60 static int msm_devfreq_get_dev_status(struct device *dev, 61 struct devfreq_dev_status *status) 62 { 63 struct msm_gpu *gpu = dev_to_gpu(dev); 64 ktime_t time; 65 66 status->current_frequency = get_freq(gpu); 67 status->busy_time = gpu->funcs->gpu_busy(gpu); 68 69 time = ktime_get(); 70 status->total_time = ktime_us_delta(time, gpu->devfreq.time); 71 gpu->devfreq.time = time; 72 73 return 0; 74 } 75 76 static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) 77 { 78 *freq = get_freq(dev_to_gpu(dev)); 79 80 return 0; 81 } 82 83 static struct devfreq_dev_profile msm_devfreq_profile = { 84 .timer = DEVFREQ_TIMER_DELAYED, 85 .polling_ms = 50, 86 .target = msm_devfreq_target, 87 .get_dev_status = msm_devfreq_get_dev_status, 88 .get_cur_freq = msm_devfreq_get_cur_freq, 89 }; 90 91 void msm_devfreq_init(struct msm_gpu *gpu) 92 { 93 /* We need target support to do devfreq */ 94 if (!gpu->funcs->gpu_busy) 95 return; 96 97 msm_devfreq_profile.initial_freq = gpu->fast_rate; 98 99 /* 100 * Don't set the freq_table or max_state and let devfreq build the table 101 * from OPP 102 * After a deferred probe, these may have be left to non-zero values, 103 * so set them back to zero before creating the devfreq device 104 */ 105 msm_devfreq_profile.freq_table = NULL; 106 msm_devfreq_profile.max_state = 0; 107 108 gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev, 109 &msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND, 110 NULL); 111 112 if (IS_ERR(gpu->devfreq.devfreq)) { 113 DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); 114 gpu->devfreq.devfreq = NULL; 115 return; 116 } 117 118 devfreq_suspend_device(gpu->devfreq.devfreq); 119 120 gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, 121 gpu->devfreq.devfreq); 122 if (IS_ERR(gpu->cooling)) { 123 DRM_DEV_ERROR(&gpu->pdev->dev, 124 "Couldn't register GPU cooling device\n"); 125 gpu->cooling = NULL; 126 } 127 } 128 129 void msm_devfreq_cleanup(struct msm_gpu *gpu) 130 { 131 devfreq_cooling_unregister(gpu->cooling); 132 } 133 134 void msm_devfreq_resume(struct msm_gpu *gpu) 135 { 136 gpu->devfreq.busy_cycles = 0; 137 gpu->devfreq.time = ktime_get(); 138 139 devfreq_resume_device(gpu->devfreq.devfreq); 140 } 141 142 void msm_devfreq_suspend(struct msm_gpu *gpu) 143 { 144 devfreq_suspend_device(gpu->devfreq.devfreq); 145 } 146 147 void msm_devfreq_active(struct msm_gpu *gpu) 148 { 149 struct msm_gpu_devfreq *df = &gpu->devfreq; 150 struct devfreq_dev_status status; 151 unsigned int idle_time; 152 unsigned long target_freq = df->idle_freq; 153 154 if (!df->devfreq) 155 return; 156 157 /* 158 * Hold devfreq lock to synchronize with get_dev_status()/ 159 * target() callbacks 160 */ 161 mutex_lock(&df->devfreq->lock); 162 163 idle_time = ktime_to_ms(ktime_sub(ktime_get(), df->idle_time)); 164 165 /* 166 * If we've been idle for a significant fraction of a polling 167 * interval, then we won't meet the threshold of busyness for 168 * the governor to ramp up the freq.. so give some boost 169 */ 170 if (idle_time > msm_devfreq_profile.polling_ms/2) { 171 target_freq *= 2; 172 } 173 174 df->idle_freq = 0; 175 176 msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0); 177 178 /* 179 * Reset the polling interval so we aren't inconsistent 180 * about freq vs busy/total cycles 181 */ 182 msm_devfreq_get_dev_status(&gpu->pdev->dev, &status); 183 184 mutex_unlock(&df->devfreq->lock); 185 } 186 187 void msm_devfreq_idle(struct msm_gpu *gpu) 188 { 189 struct msm_gpu_devfreq *df = &gpu->devfreq; 190 unsigned long idle_freq, target_freq = 0; 191 192 if (!df->devfreq) 193 return; 194 195 /* 196 * Hold devfreq lock to synchronize with get_dev_status()/ 197 * target() callbacks 198 */ 199 mutex_lock(&df->devfreq->lock); 200 201 idle_freq = get_freq(gpu); 202 203 msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0); 204 205 df->idle_time = ktime_get(); 206 df->idle_freq = idle_freq; 207 208 mutex_unlock(&df->devfreq->lock); 209 } 210