1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Anhua Xu 25 * Kevin Tian <kevin.tian@intel.com> 26 * 27 * Contributors: 28 * Min He <min.he@intel.com> 29 * Bing Niu <bing.niu@intel.com> 30 * Zhi Wang <zhi.a.wang@intel.com> 31 * 32 */ 33 34 #include "i915_drv.h" 35 #include "gvt.h" 36 37 static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) 38 { 39 struct intel_vgpu_execlist *execlist; 40 enum intel_engine_id i; 41 struct intel_engine_cs *engine; 42 43 for_each_engine(engine, vgpu->gvt->dev_priv, i) { 44 execlist = &vgpu->execlist[i]; 45 if (!list_empty(workload_q_head(vgpu, i))) 46 return true; 47 } 48 49 return false; 50 } 51 52 static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) 53 { 54 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 55 enum intel_engine_id i; 56 struct intel_engine_cs *engine; 57 58 /* no target to schedule */ 59 if (!scheduler->next_vgpu) 60 return; 61 62 gvt_dbg_sched("try to schedule next vgpu %d\n", 63 scheduler->next_vgpu->id); 64 65 /* 66 * after the flag is set, workload dispatch thread will 67 * stop dispatching workload for current vgpu 68 */ 69 scheduler->need_reschedule = true; 70 71 /* still have uncompleted workload? */ 72 for_each_engine(engine, gvt->dev_priv, i) { 73 if (scheduler->current_workload[i]) { 74 gvt_dbg_sched("still have running workload\n"); 75 return; 76 } 77 } 78 79 gvt_dbg_sched("switch to next vgpu %d\n", 80 scheduler->next_vgpu->id); 81 82 /* switch current vgpu */ 83 scheduler->current_vgpu = scheduler->next_vgpu; 84 scheduler->next_vgpu = NULL; 85 86 scheduler->need_reschedule = false; 87 88 /* wake up workload dispatch thread */ 89 for_each_engine(engine, gvt->dev_priv, i) 90 wake_up(&scheduler->waitq[i]); 91 } 92 93 struct tbs_vgpu_data { 94 struct list_head list; 95 struct intel_vgpu *vgpu; 96 /* put some per-vgpu sched stats here */ 97 }; 98 99 struct tbs_sched_data { 100 struct intel_gvt *gvt; 101 struct delayed_work work; 102 unsigned long period; 103 struct list_head runq_head; 104 }; 105 106 #define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000) 107 108 static void tbs_sched_func(struct work_struct *work) 109 { 110 struct tbs_sched_data *sched_data = container_of(work, 111 struct tbs_sched_data, work.work); 112 struct tbs_vgpu_data *vgpu_data; 113 114 struct intel_gvt *gvt = sched_data->gvt; 115 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 116 117 struct intel_vgpu *vgpu = NULL; 118 struct list_head *pos, *head; 119 120 mutex_lock(&gvt->lock); 121 122 /* no vgpu or has already had a target */ 123 if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu) 124 goto out; 125 126 if (scheduler->current_vgpu) { 127 vgpu_data = scheduler->current_vgpu->sched_data; 128 head = &vgpu_data->list; 129 } else { 130 gvt_dbg_sched("no current vgpu search from q head\n"); 131 head = &sched_data->runq_head; 132 } 133 134 /* search a vgpu with pending workload */ 135 list_for_each(pos, head) { 136 if (pos == &sched_data->runq_head) 137 continue; 138 139 vgpu_data = container_of(pos, struct tbs_vgpu_data, list); 140 if (!vgpu_has_pending_workload(vgpu_data->vgpu)) 141 continue; 142 143 vgpu = vgpu_data->vgpu; 144 break; 145 } 146 147 if (vgpu) { 148 scheduler->next_vgpu = vgpu; 149 gvt_dbg_sched("pick next vgpu %d\n", vgpu->id); 150 } 151 out: 152 if (scheduler->next_vgpu) { 153 gvt_dbg_sched("try to schedule next vgpu %d\n", 154 scheduler->next_vgpu->id); 155 try_to_schedule_next_vgpu(gvt); 156 } 157 158 /* 159 * still have vgpu on runq 160 * or last schedule haven't finished due to running workload 161 */ 162 if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu) 163 schedule_delayed_work(&sched_data->work, sched_data->period); 164 165 mutex_unlock(&gvt->lock); 166 } 167 168 static int tbs_sched_init(struct intel_gvt *gvt) 169 { 170 struct intel_gvt_workload_scheduler *scheduler = 171 &gvt->scheduler; 172 173 struct tbs_sched_data *data; 174 175 data = kzalloc(sizeof(*data), GFP_KERNEL); 176 if (!data) 177 return -ENOMEM; 178 179 INIT_LIST_HEAD(&data->runq_head); 180 INIT_DELAYED_WORK(&data->work, tbs_sched_func); 181 data->period = GVT_DEFAULT_TIME_SLICE; 182 data->gvt = gvt; 183 184 scheduler->sched_data = data; 185 return 0; 186 } 187 188 static void tbs_sched_clean(struct intel_gvt *gvt) 189 { 190 struct intel_gvt_workload_scheduler *scheduler = 191 &gvt->scheduler; 192 struct tbs_sched_data *data = scheduler->sched_data; 193 194 cancel_delayed_work(&data->work); 195 kfree(data); 196 scheduler->sched_data = NULL; 197 } 198 199 static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) 200 { 201 struct tbs_vgpu_data *data; 202 203 data = kzalloc(sizeof(*data), GFP_KERNEL); 204 if (!data) 205 return -ENOMEM; 206 207 data->vgpu = vgpu; 208 INIT_LIST_HEAD(&data->list); 209 210 vgpu->sched_data = data; 211 return 0; 212 } 213 214 static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) 215 { 216 kfree(vgpu->sched_data); 217 vgpu->sched_data = NULL; 218 } 219 220 static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) 221 { 222 struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; 223 struct tbs_vgpu_data *vgpu_data = vgpu->sched_data; 224 225 if (!list_empty(&vgpu_data->list)) 226 return; 227 228 list_add_tail(&vgpu_data->list, &sched_data->runq_head); 229 schedule_delayed_work(&sched_data->work, sched_data->period); 230 } 231 232 static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) 233 { 234 struct tbs_vgpu_data *vgpu_data = vgpu->sched_data; 235 236 list_del_init(&vgpu_data->list); 237 } 238 239 static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { 240 .init = tbs_sched_init, 241 .clean = tbs_sched_clean, 242 .init_vgpu = tbs_sched_init_vgpu, 243 .clean_vgpu = tbs_sched_clean_vgpu, 244 .start_schedule = tbs_sched_start_schedule, 245 .stop_schedule = tbs_sched_stop_schedule, 246 }; 247 248 int intel_gvt_init_sched_policy(struct intel_gvt *gvt) 249 { 250 gvt->scheduler.sched_ops = &tbs_schedule_ops; 251 252 return gvt->scheduler.sched_ops->init(gvt); 253 } 254 255 void intel_gvt_clean_sched_policy(struct intel_gvt *gvt) 256 { 257 gvt->scheduler.sched_ops->clean(gvt); 258 } 259 260 int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu) 261 { 262 return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu); 263 } 264 265 void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu) 266 { 267 vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu); 268 } 269 270 void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) 271 { 272 gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); 273 274 vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); 275 } 276 277 void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) 278 { 279 struct intel_gvt_workload_scheduler *scheduler = 280 &vgpu->gvt->scheduler; 281 282 gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); 283 284 scheduler->sched_ops->stop_schedule(vgpu); 285 286 if (scheduler->next_vgpu == vgpu) 287 scheduler->next_vgpu = NULL; 288 289 if (scheduler->current_vgpu == vgpu) { 290 /* stop workload dispatching */ 291 scheduler->need_reschedule = true; 292 scheduler->current_vgpu = NULL; 293 } 294 } 295