xref: /openbmc/linux/drivers/gpu/drm/i915/gt/uc/intel_guc.c (revision e7253313)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2019 Intel Corporation
4  */
5 
6 #include "gt/intel_gt.h"
7 #include "gt/intel_gt_irq.h"
8 #include "gt/intel_gt_pm_irq.h"
9 #include "intel_guc.h"
10 #include "intel_guc_ads.h"
11 #include "intel_guc_submission.h"
12 #include "i915_drv.h"
13 
14 /**
15  * DOC: GuC
16  *
17  * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
18  * designed to offload some of the functionality usually performed by the host
19  * driver; currently the main operations it can take care of are:
20  *
21  * - Authentication of the HuC, which is required to fully enable HuC usage.
22  * - Low latency graphics context scheduling (a.k.a. GuC submission).
23  * - GT Power management.
24  *
25  * The enable_guc module parameter can be used to select which of those
26  * operations to enable within GuC. Note that not all the operations are
27  * supported on all gen9+ platforms.
28  *
29  * Enabling the GuC is not mandatory and therefore the firmware is only loaded
30  * if at least one of the operations is selected. However, not loading the GuC
31  * might result in the loss of some features that do require the GuC (currently
32  * just the HuC, but more are expected to land in the future).
33  */
34 
35 static void gen8_guc_raise_irq(struct intel_guc *guc)
36 {
37 	struct intel_gt *gt = guc_to_gt(guc);
38 
39 	intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
40 }
41 
42 static void gen11_guc_raise_irq(struct intel_guc *guc)
43 {
44 	struct intel_gt *gt = guc_to_gt(guc);
45 
46 	intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0);
47 }
48 
49 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
50 {
51 	GEM_BUG_ON(!guc->send_regs.base);
52 	GEM_BUG_ON(!guc->send_regs.count);
53 	GEM_BUG_ON(i >= guc->send_regs.count);
54 
55 	return _MMIO(guc->send_regs.base + 4 * i);
56 }
57 
58 void intel_guc_init_send_regs(struct intel_guc *guc)
59 {
60 	struct intel_gt *gt = guc_to_gt(guc);
61 	enum forcewake_domains fw_domains = 0;
62 	unsigned int i;
63 
64 	if (INTEL_GEN(gt->i915) >= 11) {
65 		guc->send_regs.base =
66 				i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
67 		guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
68 	} else {
69 		guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
70 		guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
71 		BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
72 	}
73 
74 	for (i = 0; i < guc->send_regs.count; i++) {
75 		fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
76 					guc_send_reg(guc, i),
77 					FW_REG_READ | FW_REG_WRITE);
78 	}
79 	guc->send_regs.fw_domains = fw_domains;
80 }
81 
82 static void gen9_reset_guc_interrupts(struct intel_guc *guc)
83 {
84 	struct intel_gt *gt = guc_to_gt(guc);
85 
86 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
87 
88 	spin_lock_irq(&gt->irq_lock);
89 	gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
90 	spin_unlock_irq(&gt->irq_lock);
91 }
92 
93 static void gen9_enable_guc_interrupts(struct intel_guc *guc)
94 {
95 	struct intel_gt *gt = guc_to_gt(guc);
96 
97 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
98 
99 	spin_lock_irq(&gt->irq_lock);
100 	if (!guc->interrupts.enabled) {
101 		WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
102 			     gt->pm_guc_events);
103 		guc->interrupts.enabled = true;
104 		gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
105 	}
106 	spin_unlock_irq(&gt->irq_lock);
107 }
108 
109 static void gen9_disable_guc_interrupts(struct intel_guc *guc)
110 {
111 	struct intel_gt *gt = guc_to_gt(guc);
112 
113 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
114 
115 	spin_lock_irq(&gt->irq_lock);
116 	guc->interrupts.enabled = false;
117 
118 	gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
119 
120 	spin_unlock_irq(&gt->irq_lock);
121 	intel_synchronize_irq(gt->i915);
122 
123 	gen9_reset_guc_interrupts(guc);
124 }
125 
126 static void gen11_reset_guc_interrupts(struct intel_guc *guc)
127 {
128 	struct intel_gt *gt = guc_to_gt(guc);
129 
130 	spin_lock_irq(&gt->irq_lock);
131 	gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
132 	spin_unlock_irq(&gt->irq_lock);
133 }
134 
135 static void gen11_enable_guc_interrupts(struct intel_guc *guc)
136 {
137 	struct intel_gt *gt = guc_to_gt(guc);
138 
139 	spin_lock_irq(&gt->irq_lock);
140 	if (!guc->interrupts.enabled) {
141 		u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
142 
143 		WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
144 		intel_uncore_write(gt->uncore,
145 				   GEN11_GUC_SG_INTR_ENABLE, events);
146 		intel_uncore_write(gt->uncore,
147 				   GEN11_GUC_SG_INTR_MASK, ~events);
148 		guc->interrupts.enabled = true;
149 	}
150 	spin_unlock_irq(&gt->irq_lock);
151 }
152 
153 static void gen11_disable_guc_interrupts(struct intel_guc *guc)
154 {
155 	struct intel_gt *gt = guc_to_gt(guc);
156 
157 	spin_lock_irq(&gt->irq_lock);
158 	guc->interrupts.enabled = false;
159 
160 	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
161 	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
162 
163 	spin_unlock_irq(&gt->irq_lock);
164 	intel_synchronize_irq(gt->i915);
165 
166 	gen11_reset_guc_interrupts(guc);
167 }
168 
169 void intel_guc_init_early(struct intel_guc *guc)
170 {
171 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
172 
173 	intel_guc_fw_init_early(guc);
174 	intel_guc_ct_init_early(&guc->ct);
175 	intel_guc_log_init_early(&guc->log);
176 	intel_guc_submission_init_early(guc);
177 
178 	mutex_init(&guc->send_mutex);
179 	spin_lock_init(&guc->irq_lock);
180 	guc->send = intel_guc_send_nop;
181 	guc->handler = intel_guc_to_host_event_handler_nop;
182 	if (INTEL_GEN(i915) >= 11) {
183 		guc->notify = gen11_guc_raise_irq;
184 		guc->interrupts.reset = gen11_reset_guc_interrupts;
185 		guc->interrupts.enable = gen11_enable_guc_interrupts;
186 		guc->interrupts.disable = gen11_disable_guc_interrupts;
187 	} else {
188 		guc->notify = gen8_guc_raise_irq;
189 		guc->interrupts.reset = gen9_reset_guc_interrupts;
190 		guc->interrupts.enable = gen9_enable_guc_interrupts;
191 		guc->interrupts.disable = gen9_disable_guc_interrupts;
192 	}
193 }
194 
195 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
196 {
197 	u32 level = intel_guc_log_get_level(&guc->log);
198 	u32 flags = 0;
199 
200 	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
201 		flags |= GUC_LOG_DISABLED;
202 	else
203 		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
204 			 GUC_LOG_VERBOSITY_SHIFT;
205 
206 	return flags;
207 }
208 
209 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
210 {
211 	u32 flags = 0;
212 
213 	if (!intel_guc_is_submission_supported(guc))
214 		flags |= GUC_CTL_DISABLE_SCHEDULER;
215 
216 	return flags;
217 }
218 
219 static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
220 {
221 	u32 flags = 0;
222 
223 	if (intel_guc_is_submission_supported(guc)) {
224 		u32 ctxnum, base;
225 
226 		base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
227 		ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;
228 
229 		base >>= PAGE_SHIFT;
230 		flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
231 			(ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
232 	}
233 	return flags;
234 }
235 
236 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
237 {
238 	u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
239 	u32 flags;
240 
241 	#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
242 	#define UNIT SZ_1M
243 	#define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
244 	#else
245 	#define UNIT SZ_4K
246 	#define FLAG 0
247 	#endif
248 
249 	BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
250 	BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
251 	BUILD_BUG_ON(!DPC_BUFFER_SIZE);
252 	BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
253 	BUILD_BUG_ON(!ISR_BUFFER_SIZE);
254 	BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
255 
256 	BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
257 			(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
258 	BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
259 			(GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
260 	BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
261 			(GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
262 
263 	flags = GUC_LOG_VALID |
264 		GUC_LOG_NOTIFY_ON_HALF_FULL |
265 		FLAG |
266 		((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
267 		((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
268 		((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
269 		(offset << GUC_LOG_BUF_ADDR_SHIFT);
270 
271 	#undef UNIT
272 	#undef FLAG
273 
274 	return flags;
275 }
276 
277 static u32 guc_ctl_ads_flags(struct intel_guc *guc)
278 {
279 	u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
280 	u32 flags = ads << GUC_ADS_ADDR_SHIFT;
281 
282 	return flags;
283 }
284 
285 /*
286  * Initialise the GuC parameter block before starting the firmware
287  * transfer. These parameters are read by the firmware on startup
288  * and cannot be changed thereafter.
289  */
290 static void guc_init_params(struct intel_guc *guc)
291 {
292 	u32 *params = guc->params;
293 	int i;
294 
295 	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
296 
297 	params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
298 	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
299 	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
300 	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
301 	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
302 
303 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
304 		DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
305 }
306 
307 /*
308  * Initialise the GuC parameter block before starting the firmware
309  * transfer. These parameters are read by the firmware on startup
310  * and cannot be changed thereafter.
311  */
312 void intel_guc_write_params(struct intel_guc *guc)
313 {
314 	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
315 	int i;
316 
317 	/*
318 	 * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
319 	 * they are power context saved so it's ok to release forcewake
320 	 * when we are done here and take it again at xfer time.
321 	 */
322 	intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER);
323 
324 	intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
325 
326 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
327 		intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
328 
329 	intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER);
330 }
331 
332 int intel_guc_init(struct intel_guc *guc)
333 {
334 	struct intel_gt *gt = guc_to_gt(guc);
335 	int ret;
336 
337 	ret = intel_uc_fw_init(&guc->fw);
338 	if (ret)
339 		goto err_fetch;
340 
341 	ret = intel_guc_log_create(&guc->log);
342 	if (ret)
343 		goto err_fw;
344 
345 	ret = intel_guc_ads_create(guc);
346 	if (ret)
347 		goto err_log;
348 	GEM_BUG_ON(!guc->ads_vma);
349 
350 	ret = intel_guc_ct_init(&guc->ct);
351 	if (ret)
352 		goto err_ads;
353 
354 	if (intel_guc_is_submission_supported(guc)) {
355 		/*
356 		 * This is stuff we need to have available at fw load time
357 		 * if we are planning to enable submission later
358 		 */
359 		ret = intel_guc_submission_init(guc);
360 		if (ret)
361 			goto err_ct;
362 	}
363 
364 	/* now that everything is perma-pinned, initialize the parameters */
365 	guc_init_params(guc);
366 
367 	/* We need to notify the guc whenever we change the GGTT */
368 	i915_ggtt_enable_guc(gt->ggtt);
369 
370 	return 0;
371 
372 err_ct:
373 	intel_guc_ct_fini(&guc->ct);
374 err_ads:
375 	intel_guc_ads_destroy(guc);
376 err_log:
377 	intel_guc_log_destroy(&guc->log);
378 err_fw:
379 	intel_uc_fw_fini(&guc->fw);
380 err_fetch:
381 	intel_uc_fw_cleanup_fetch(&guc->fw);
382 	DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret);
383 	return ret;
384 }
385 
386 void intel_guc_fini(struct intel_guc *guc)
387 {
388 	struct intel_gt *gt = guc_to_gt(guc);
389 
390 	if (!intel_uc_fw_is_available(&guc->fw))
391 		return;
392 
393 	i915_ggtt_disable_guc(gt->ggtt);
394 
395 	if (intel_guc_is_submission_supported(guc))
396 		intel_guc_submission_fini(guc);
397 
398 	intel_guc_ct_fini(&guc->ct);
399 
400 	intel_guc_ads_destroy(guc);
401 	intel_guc_log_destroy(&guc->log);
402 	intel_uc_fw_fini(&guc->fw);
403 	intel_uc_fw_cleanup_fetch(&guc->fw);
404 }
405 
406 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
407 		       u32 *response_buf, u32 response_buf_size)
408 {
409 	WARN(1, "Unexpected send: action=%#x\n", *action);
410 	return -ENODEV;
411 }
412 
413 void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
414 {
415 	WARN(1, "Unexpected event: no suitable handler\n");
416 }
417 
418 /*
419  * This function implements the MMIO based host to GuC interface.
420  */
421 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
422 			u32 *response_buf, u32 response_buf_size)
423 {
424 	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
425 	u32 status;
426 	int i;
427 	int ret;
428 
429 	GEM_BUG_ON(!len);
430 	GEM_BUG_ON(len > guc->send_regs.count);
431 
432 	/* We expect only action code */
433 	GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
434 
435 	/* If CT is available, we expect to use MMIO only during init/fini */
436 	GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
437 		   *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
438 
439 	mutex_lock(&guc->send_mutex);
440 	intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
441 
442 	for (i = 0; i < len; i++)
443 		intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
444 
445 	intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
446 
447 	intel_guc_notify(guc);
448 
449 	/*
450 	 * No GuC command should ever take longer than 10ms.
451 	 * Fast commands should still complete in 10us.
452 	 */
453 	ret = __intel_wait_for_register_fw(uncore,
454 					   guc_send_reg(guc, 0),
455 					   INTEL_GUC_MSG_TYPE_MASK,
456 					   INTEL_GUC_MSG_TYPE_RESPONSE <<
457 					   INTEL_GUC_MSG_TYPE_SHIFT,
458 					   10, 10, &status);
459 	/* If GuC explicitly returned an error, convert it to -EIO */
460 	if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
461 		ret = -EIO;
462 
463 	if (ret) {
464 		DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
465 			  action[0], ret, status);
466 		goto out;
467 	}
468 
469 	if (response_buf) {
470 		int count = min(response_buf_size, guc->send_regs.count - 1);
471 
472 		for (i = 0; i < count; i++)
473 			response_buf[i] = intel_uncore_read(uncore,
474 							    guc_send_reg(guc, i + 1));
475 	}
476 
477 	/* Use data from the GuC response as our return value */
478 	ret = INTEL_GUC_MSG_TO_DATA(status);
479 
480 out:
481 	intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
482 	mutex_unlock(&guc->send_mutex);
483 
484 	return ret;
485 }
486 
487 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
488 				       const u32 *payload, u32 len)
489 {
490 	u32 msg;
491 
492 	if (unlikely(!len))
493 		return -EPROTO;
494 
495 	/* Make sure to handle only enabled messages */
496 	msg = payload[0] & guc->msg_enabled_mask;
497 
498 	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
499 		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
500 		intel_guc_log_handle_flush_event(&guc->log);
501 
502 	return 0;
503 }
504 
505 int intel_guc_sample_forcewake(struct intel_guc *guc)
506 {
507 	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
508 	u32 action[2];
509 
510 	action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
511 	/* WaRsDisableCoarsePowerGating:skl,cnl */
512 	if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
513 		action[1] = 0;
514 	else
515 		/* bit 0 and 1 are for Render and Media domain separately */
516 		action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
517 
518 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
519 }
520 
521 /**
522  * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
523  * @guc: intel_guc structure
524  * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
525  *
526  * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
527  * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
528  * intel_huc_auth().
529  *
530  * Return:	non-zero code on error
531  */
532 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
533 {
534 	u32 action[] = {
535 		INTEL_GUC_ACTION_AUTHENTICATE_HUC,
536 		rsa_offset
537 	};
538 
539 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
540 }
541 
542 /**
543  * intel_guc_suspend() - notify GuC entering suspend state
544  * @guc:	the guc
545  */
546 int intel_guc_suspend(struct intel_guc *guc)
547 {
548 	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
549 	int ret;
550 	u32 status;
551 	u32 action[] = {
552 		INTEL_GUC_ACTION_ENTER_S_STATE,
553 		GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
554 	};
555 
556 	/*
557 	 * If GuC communication is enabled but submission is not supported,
558 	 * we do not need to suspend the GuC.
559 	 */
560 	if (!intel_guc_submission_is_enabled(guc))
561 		return 0;
562 
563 	/*
564 	 * The ENTER_S_STATE action queues the save/restore operation in GuC FW
565 	 * and then returns, so waiting on the H2G is not enough to guarantee
566 	 * GuC is done. When all the processing is done, GuC writes
567 	 * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll
568 	 * on that. Note that GuC does not ensure that the value in the register
569 	 * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
570 	 * in progress so we need to take care of that ourselves as well.
571 	 */
572 
573 	intel_uncore_write(uncore, SOFT_SCRATCH(14),
574 			   INTEL_GUC_SLEEP_STATE_INVALID_MASK);
575 
576 	ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
577 	if (ret)
578 		return ret;
579 
580 	ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14),
581 					INTEL_GUC_SLEEP_STATE_INVALID_MASK,
582 					0, 0, 10, &status);
583 	if (ret)
584 		return ret;
585 
586 	if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
587 		DRM_ERROR("GuC failed to change sleep state. "
588 			  "action=0x%x, err=%u\n",
589 			  action[0], status);
590 		return -EIO;
591 	}
592 
593 	return 0;
594 }
595 
596 /**
597  * intel_guc_reset_engine() - ask GuC to reset an engine
598  * @guc:	intel_guc structure
599  * @engine:	engine to be reset
600  */
601 int intel_guc_reset_engine(struct intel_guc *guc,
602 			   struct intel_engine_cs *engine)
603 {
604 	/* XXX: to be implemented with submission interface rework */
605 
606 	return -ENODEV;
607 }
608 
609 /**
610  * intel_guc_resume() - notify GuC resuming from suspend state
611  * @guc:	the guc
612  */
613 int intel_guc_resume(struct intel_guc *guc)
614 {
615 	u32 action[] = {
616 		INTEL_GUC_ACTION_EXIT_S_STATE,
617 		GUC_POWER_D0,
618 	};
619 
620 	/*
621 	 * If GuC communication is enabled but submission is not supported,
622 	 * we do not need to resume the GuC but we do need to enable the
623 	 * GuC communication on resume (above).
624 	 */
625 	if (!intel_guc_submission_is_enabled(guc))
626 		return 0;
627 
628 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
629 }
630 
631 /**
632  * DOC: GuC Memory Management
633  *
634  * GuC can't allocate any memory for its own usage, so all the allocations must
635  * be handled by the host driver. GuC accesses the memory via the GGTT, with the
636  * exception of the top and bottom parts of the 4GB address space, which are
637  * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
638  * or other parts of the HW. The driver must take care not to place objects that
639  * the GuC is going to access in these reserved ranges. The layout of the GuC
640  * address space is shown below:
641  *
642  * ::
643  *
644  *     +===========> +====================+ <== FFFF_FFFF
645  *     ^             |      Reserved      |
646  *     |             +====================+ <== GUC_GGTT_TOP
647  *     |             |                    |
648  *     |             |        DRAM        |
649  *    GuC            |                    |
650  *  Address    +===> +====================+ <== GuC ggtt_pin_bias
651  *   Space     ^     |                    |
652  *     |       |     |                    |
653  *     |      GuC    |        GuC         |
654  *     |     WOPCM   |       WOPCM        |
655  *     |      Size   |                    |
656  *     |       |     |                    |
657  *     v       v     |                    |
658  *     +=======+===> +====================+ <== 0000_0000
659  *
660  * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
661  * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
662  * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
663  */
664 
665 /**
666  * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
667  * @guc:	the guc
668  * @size:	size of area to allocate (both virtual space and memory)
669  *
670  * This is a wrapper to create an object for use with the GuC. In order to
671  * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
672  * both some backing storage and a range inside the Global GTT. We must pin
673  * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
674  * range is reserved inside GuC.
675  *
676  * Return:	A i915_vma if successful, otherwise an ERR_PTR.
677  */
678 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
679 {
680 	struct intel_gt *gt = guc_to_gt(guc);
681 	struct drm_i915_gem_object *obj;
682 	struct i915_vma *vma;
683 	u64 flags;
684 	int ret;
685 
686 	obj = i915_gem_object_create_shmem(gt->i915, size);
687 	if (IS_ERR(obj))
688 		return ERR_CAST(obj);
689 
690 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
691 	if (IS_ERR(vma))
692 		goto err;
693 
694 	flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
695 	ret = i915_vma_pin(vma, 0, 0, flags);
696 	if (ret) {
697 		vma = ERR_PTR(ret);
698 		goto err;
699 	}
700 
701 	return i915_vma_make_unshrinkable(vma);
702 
703 err:
704 	i915_gem_object_put(obj);
705 	return vma;
706 }
707