1 /* 2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 #include "priv.h" 23 24 #include <subdev/clk.h> 25 #include <subdev/timer.h> 26 #include <subdev/volt.h> 27 28 #define BUSY_SLOT 0 29 #define CLK_SLOT 7 30 31 struct gk20a_pmu_dvfs_data { 32 int p_load_target; 33 int p_load_max; 34 int p_smooth; 35 unsigned int avg_load; 36 }; 37 38 struct gk20a_pmu { 39 struct nvkm_pmu base; 40 struct nvkm_alarm alarm; 41 struct gk20a_pmu_dvfs_data *data; 42 }; 43 44 struct gk20a_pmu_dvfs_dev_status { 45 unsigned long total; 46 unsigned long busy; 47 int cur_state; 48 }; 49 50 static int 51 gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state) 52 { 53 struct nvkm_clk *clk = nvkm_clk(pmu); 54 55 return nvkm_clk_astate(clk, *state, 0, false); 56 } 57 58 static int 59 gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state) 60 { 61 struct nvkm_clk *clk = nvkm_clk(pmu); 62 63 *state = clk->pstate; 64 return 0; 65 } 66 67 static int 68 gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu, 69 int *state, int load) 70 { 71 struct gk20a_pmu_dvfs_data *data = pmu->data; 72 struct nvkm_clk *clk = nvkm_clk(pmu); 73 int cur_level, level; 74 75 /* For GK20A, the performance level is directly mapped to pstate */ 76 level = cur_level = clk->pstate; 77 78 if (load > data->p_load_max) { 79 level = min(clk->state_nr - 1, level + (clk->state_nr / 3)); 80 } else { 81 level += ((load - data->p_load_target) * 10 / 82 data->p_load_target) / 2; 83 level = max(0, level); 84 level = min(clk->state_nr - 1, level); 85 } 86 87 nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n", 88 cur_level, level); 89 90 *state = level; 91 92 if (level == cur_level) 93 return 0; 94 else 95 return 1; 96 } 97 98 static int 99 gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, 100 struct gk20a_pmu_dvfs_dev_status *status) 101 { 102 struct nvkm_device *device = pmu->base.subdev.device; 103 status->busy = nvkm_rd32(device, 0x10a508 + (BUSY_SLOT * 0x10)); 104 status->total= nvkm_rd32(device, 0x10a508 + (CLK_SLOT * 0x10)); 105 return 0; 106 } 107 108 static void 109 gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu) 110 { 111 struct nvkm_device *device = pmu->base.subdev.device; 112 nvkm_wr32(device, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000); 113 nvkm_wr32(device, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000); 114 } 115 116 static void 117 gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm) 118 { 119 struct gk20a_pmu *pmu = 120 container_of(alarm, struct gk20a_pmu, alarm); 121 struct gk20a_pmu_dvfs_data *data = pmu->data; 122 struct gk20a_pmu_dvfs_dev_status status; 123 struct nvkm_subdev *subdev = &pmu->base.subdev; 124 struct nvkm_device *device = subdev->device; 125 struct nvkm_clk *clk = device->clk; 126 struct nvkm_volt *volt = device->volt; 127 u32 utilization = 0; 128 int state, ret; 129 130 /* 131 * The PMU is initialized before CLK and VOLT, so we have to make sure the 132 * CLK and VOLT are ready here. 133 */ 134 if (!clk || !volt) 135 goto resched; 136 137 ret = gk20a_pmu_dvfs_get_dev_status(pmu, &status); 138 if (ret) { 139 nvkm_warn(subdev, "failed to get device status\n"); 140 goto resched; 141 } 142 143 if (status.total) 144 utilization = div_u64((u64)status.busy * 100, status.total); 145 146 data->avg_load = (data->p_smooth * data->avg_load) + utilization; 147 data->avg_load /= data->p_smooth + 1; 148 nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n", 149 utilization, data->avg_load); 150 151 ret = gk20a_pmu_dvfs_get_cur_state(pmu, &state); 152 if (ret) { 153 nvkm_warn(subdev, "failed to get current state\n"); 154 goto resched; 155 } 156 157 if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) { 158 nvkm_trace(subdev, "set new state to %d\n", state); 159 gk20a_pmu_dvfs_target(pmu, &state); 160 } 161 162 resched: 163 gk20a_pmu_dvfs_reset_dev_status(pmu); 164 nvkm_timer_alarm(pmu, 100000000, alarm); 165 } 166 167 static int 168 gk20a_pmu_fini(struct nvkm_object *object, bool suspend) 169 { 170 struct gk20a_pmu *pmu = (void *)object; 171 172 nvkm_timer_alarm_cancel(pmu, &pmu->alarm); 173 174 return nvkm_subdev_fini_old(&pmu->base.subdev, suspend); 175 } 176 177 static int 178 gk20a_pmu_init(struct nvkm_object *object) 179 { 180 struct gk20a_pmu *pmu = (void *)object; 181 struct nvkm_device *device = pmu->base.subdev.device; 182 int ret; 183 184 ret = nvkm_subdev_init_old(&pmu->base.subdev); 185 if (ret) 186 return ret; 187 188 pmu->base.pgob = nvkm_pmu_pgob; 189 190 /* init pwr perf counter */ 191 nvkm_wr32(device, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001); 192 nvkm_wr32(device, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002); 193 nvkm_wr32(device, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003); 194 195 nvkm_timer_alarm(pmu, 2000000000, &pmu->alarm); 196 return ret; 197 } 198 199 static struct gk20a_pmu_dvfs_data 200 gk20a_dvfs_data= { 201 .p_load_target = 70, 202 .p_load_max = 90, 203 .p_smooth = 1, 204 }; 205 206 static int 207 gk20a_pmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 208 struct nvkm_oclass *oclass, void *data, u32 size, 209 struct nvkm_object **pobject) 210 { 211 struct gk20a_pmu *pmu; 212 int ret; 213 214 ret = nvkm_pmu_create(parent, engine, oclass, &pmu); 215 *pobject = nv_object(pmu); 216 if (ret) 217 return ret; 218 219 pmu->data = &gk20a_dvfs_data; 220 221 nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work); 222 return 0; 223 } 224 225 struct nvkm_oclass * 226 gk20a_pmu_oclass = &(struct nvkm_pmu_impl) { 227 .base.handle = NV_SUBDEV(PMU, 0xea), 228 .base.ofuncs = &(struct nvkm_ofuncs) { 229 .ctor = gk20a_pmu_ctor, 230 .dtor = _nvkm_pmu_dtor, 231 .init = gk20a_pmu_init, 232 .fini = gk20a_pmu_fini, 233 }, 234 }.base; 235