scheduler.c (ca797d29cd63e7b71b4eea29aff3b1cefd1ecb59) scheduler.c (59a716c6477c2a095adf274e8f76b9889af7bc7b)
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the

--- 140 unchanged lines hidden (view full) ---

149 unsigned long action, void *data)
150{
151 struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
152 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
153 shadow_ctx_notifier_block[req->engine->id]);
154 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
155 enum intel_engine_id ring_id = req->engine->id;
156 struct intel_vgpu_workload *workload;
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the

--- 140 unchanged lines hidden (view full) ---

149 unsigned long action, void *data)
150{
151 struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
152 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
153 shadow_ctx_notifier_block[req->engine->id]);
154 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
155 enum intel_engine_id ring_id = req->engine->id;
156 struct intel_vgpu_workload *workload;
157 unsigned long flags;
158
159 if (!is_gvt_request(req)) {
157
158 if (!is_gvt_request(req)) {
160 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
159 spin_lock_bh(&scheduler->mmio_context_lock);
161 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
162 scheduler->engine_owner[ring_id]) {
163 /* Switch ring from vGPU to host. */
164 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
165 NULL, ring_id);
166 scheduler->engine_owner[ring_id] = NULL;
167 }
160 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
161 scheduler->engine_owner[ring_id]) {
162 /* Switch ring from vGPU to host. */
163 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
164 NULL, ring_id);
165 scheduler->engine_owner[ring_id] = NULL;
166 }
168 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
167 spin_unlock_bh(&scheduler->mmio_context_lock);
169
170 return NOTIFY_OK;
171 }
172
173 workload = scheduler->current_workload[ring_id];
174 if (unlikely(!workload))
175 return NOTIFY_OK;
176
177 switch (action) {
178 case INTEL_CONTEXT_SCHEDULE_IN:
168
169 return NOTIFY_OK;
170 }
171
172 workload = scheduler->current_workload[ring_id];
173 if (unlikely(!workload))
174 return NOTIFY_OK;
175
176 switch (action) {
177 case INTEL_CONTEXT_SCHEDULE_IN:
179 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
178 spin_lock_bh(&scheduler->mmio_context_lock);
180 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
181 /* Switch ring from host to vGPU or vGPU to vGPU. */
182 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
183 workload->vgpu, ring_id);
184 scheduler->engine_owner[ring_id] = workload->vgpu;
185 } else
186 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
187 ring_id, workload->vgpu->id);
179 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
180 /* Switch ring from host to vGPU or vGPU to vGPU. */
181 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
182 workload->vgpu, ring_id);
183 scheduler->engine_owner[ring_id] = workload->vgpu;
184 } else
185 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
186 ring_id, workload->vgpu->id);
188 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
187 spin_unlock_bh(&scheduler->mmio_context_lock);
189 atomic_set(&workload->shadow_ctx_active, 1);
190 break;
191 case INTEL_CONTEXT_SCHEDULE_OUT:
192 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
193 save_ring_hw_state(workload->vgpu, ring_id);
194 atomic_set(&workload->shadow_ctx_active, 0);
195 break;
196 default:

--- 44 unchanged lines hidden (view full) ---

241 workload->rb_len);
242
243 cs += workload->rb_len / sizeof(u32);
244 intel_ring_advance(workload->req, cs);
245
246 return 0;
247}
248
188 atomic_set(&workload->shadow_ctx_active, 1);
189 break;
190 case INTEL_CONTEXT_SCHEDULE_OUT:
191 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
192 save_ring_hw_state(workload->vgpu, ring_id);
193 atomic_set(&workload->shadow_ctx_active, 0);
194 break;
195 default:

--- 44 unchanged lines hidden (view full) ---

240 workload->rb_len);
241
242 cs += workload->rb_len / sizeof(u32);
243 intel_ring_advance(workload->req, cs);
244
245 return 0;
246}
247
249void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
248static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
250{
251 if (!wa_ctx->indirect_ctx.obj)
252 return;
253
254 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
255 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
256}
257

--- 1066 unchanged lines hidden (view full) ---

1324 if (ret && (vgpu_is_vm_unhealthy(ret))) {
1325 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1326 intel_vgpu_destroy_workload(workload);
1327 return ERR_PTR(ret);
1328 }
1329
1330 return workload;
1331}
249{
250 if (!wa_ctx->indirect_ctx.obj)
251 return;
252
253 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
254 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
255}
256

--- 1066 unchanged lines hidden (view full) ---

1323 if (ret && (vgpu_is_vm_unhealthy(ret))) {
1324 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1325 intel_vgpu_destroy_workload(workload);
1326 return ERR_PTR(ret);
1327 }
1328
1329 return workload;
1330}
1331
1332/**
1333 * intel_vgpu_queue_workload - Qeue a vGPU workload
1334 * @workload: the workload to queue in
1335 */
1336void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1337{
1338 list_add_tail(&workload->list,
1339 workload_q_head(workload->vgpu, workload->ring_id));
1340 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
1341}