xref: /openbmc/linux/drivers/gpu/drm/i915/intel_pcode.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
14dd4375bSJani Nikula // SPDX-License-Identifier: MIT
24dd4375bSJani Nikula /*
34dd4375bSJani Nikula  * Copyright © 2013-2021 Intel Corporation
44dd4375bSJani Nikula  */
54dd4375bSJani Nikula 
64dd4375bSJani Nikula #include "i915_drv.h"
7ce2fce25SMatt Roper #include "i915_reg.h"
84dd4375bSJani Nikula #include "intel_pcode.h"
94dd4375bSJani Nikula 
gen6_check_mailbox_status(u32 mbox)104dd4375bSJani Nikula static int gen6_check_mailbox_status(u32 mbox)
114dd4375bSJani Nikula {
124dd4375bSJani Nikula 	switch (mbox & GEN6_PCODE_ERROR_MASK) {
134dd4375bSJani Nikula 	case GEN6_PCODE_SUCCESS:
144dd4375bSJani Nikula 		return 0;
154dd4375bSJani Nikula 	case GEN6_PCODE_UNIMPLEMENTED_CMD:
164dd4375bSJani Nikula 		return -ENODEV;
174dd4375bSJani Nikula 	case GEN6_PCODE_ILLEGAL_CMD:
184dd4375bSJani Nikula 		return -ENXIO;
194dd4375bSJani Nikula 	case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
204dd4375bSJani Nikula 	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
214dd4375bSJani Nikula 		return -EOVERFLOW;
224dd4375bSJani Nikula 	case GEN6_PCODE_TIMEOUT:
234dd4375bSJani Nikula 		return -ETIMEDOUT;
244dd4375bSJani Nikula 	default:
254dd4375bSJani Nikula 		MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
264dd4375bSJani Nikula 		return 0;
274dd4375bSJani Nikula 	}
284dd4375bSJani Nikula }
294dd4375bSJani Nikula 
gen7_check_mailbox_status(u32 mbox)304dd4375bSJani Nikula static int gen7_check_mailbox_status(u32 mbox)
314dd4375bSJani Nikula {
324dd4375bSJani Nikula 	switch (mbox & GEN6_PCODE_ERROR_MASK) {
334dd4375bSJani Nikula 	case GEN6_PCODE_SUCCESS:
344dd4375bSJani Nikula 		return 0;
354dd4375bSJani Nikula 	case GEN6_PCODE_ILLEGAL_CMD:
364dd4375bSJani Nikula 		return -ENXIO;
374dd4375bSJani Nikula 	case GEN7_PCODE_TIMEOUT:
384dd4375bSJani Nikula 		return -ETIMEDOUT;
394dd4375bSJani Nikula 	case GEN7_PCODE_ILLEGAL_DATA:
404dd4375bSJani Nikula 		return -EINVAL;
414dd4375bSJani Nikula 	case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
424dd4375bSJani Nikula 		return -ENXIO;
434dd4375bSJani Nikula 	case GEN11_PCODE_LOCKED:
444dd4375bSJani Nikula 		return -EBUSY;
454dd4375bSJani Nikula 	case GEN11_PCODE_REJECTED:
464dd4375bSJani Nikula 		return -EACCES;
474dd4375bSJani Nikula 	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
484dd4375bSJani Nikula 		return -EOVERFLOW;
494dd4375bSJani Nikula 	default:
504dd4375bSJani Nikula 		MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
514dd4375bSJani Nikula 		return 0;
524dd4375bSJani Nikula 	}
534dd4375bSJani Nikula }
544dd4375bSJani Nikula 
__snb_pcode_rw(struct intel_uncore * uncore,u32 mbox,u32 * val,u32 * val1,int fast_timeout_us,int slow_timeout_ms,bool is_read)55ee421bb4SAshutosh Dixit static int __snb_pcode_rw(struct intel_uncore *uncore, u32 mbox,
566650ebcbSJani Nikula 			  u32 *val, u32 *val1,
576650ebcbSJani Nikula 			  int fast_timeout_us, int slow_timeout_ms,
584dd4375bSJani Nikula 			  bool is_read)
594dd4375bSJani Nikula {
60ee421bb4SAshutosh Dixit 	lockdep_assert_held(&uncore->i915->sb_lock);
614dd4375bSJani Nikula 
624dd4375bSJani Nikula 	/*
634dd4375bSJani Nikula 	 * GEN6_PCODE_* are outside of the forcewake domain, we can use
644dd4375bSJani Nikula 	 * intel_uncore_read/write_fw variants to reduce the amount of work
654dd4375bSJani Nikula 	 * required when reading/writing.
664dd4375bSJani Nikula 	 */
674dd4375bSJani Nikula 
684dd4375bSJani Nikula 	if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
694dd4375bSJani Nikula 		return -EAGAIN;
704dd4375bSJani Nikula 
714dd4375bSJani Nikula 	intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
724dd4375bSJani Nikula 	intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
734dd4375bSJani Nikula 	intel_uncore_write_fw(uncore,
744dd4375bSJani Nikula 			      GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
754dd4375bSJani Nikula 
764dd4375bSJani Nikula 	if (__intel_wait_for_register_fw(uncore,
774dd4375bSJani Nikula 					 GEN6_PCODE_MAILBOX,
784dd4375bSJani Nikula 					 GEN6_PCODE_READY, 0,
794dd4375bSJani Nikula 					 fast_timeout_us,
804dd4375bSJani Nikula 					 slow_timeout_ms,
814dd4375bSJani Nikula 					 &mbox))
824dd4375bSJani Nikula 		return -ETIMEDOUT;
834dd4375bSJani Nikula 
844dd4375bSJani Nikula 	if (is_read)
854dd4375bSJani Nikula 		*val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
864dd4375bSJani Nikula 	if (is_read && val1)
874dd4375bSJani Nikula 		*val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
884dd4375bSJani Nikula 
89ee421bb4SAshutosh Dixit 	if (GRAPHICS_VER(uncore->i915) > 6)
904dd4375bSJani Nikula 		return gen7_check_mailbox_status(mbox);
914dd4375bSJani Nikula 	else
924dd4375bSJani Nikula 		return gen6_check_mailbox_status(mbox);
934dd4375bSJani Nikula }
944dd4375bSJani Nikula 
snb_pcode_read(struct intel_uncore * uncore,u32 mbox,u32 * val,u32 * val1)95ee421bb4SAshutosh Dixit int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
964dd4375bSJani Nikula {
974dd4375bSJani Nikula 	int err;
984dd4375bSJani Nikula 
99ee421bb4SAshutosh Dixit 	mutex_lock(&uncore->i915->sb_lock);
100ee421bb4SAshutosh Dixit 	err = __snb_pcode_rw(uncore, mbox, val, val1, 500, 20, true);
101ee421bb4SAshutosh Dixit 	mutex_unlock(&uncore->i915->sb_lock);
1024dd4375bSJani Nikula 
1034dd4375bSJani Nikula 	if (err) {
104ee421bb4SAshutosh Dixit 		drm_dbg(&uncore->i915->drm,
1054dd4375bSJani Nikula 			"warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
1064dd4375bSJani Nikula 			mbox, __builtin_return_address(0), err);
1074dd4375bSJani Nikula 	}
1084dd4375bSJani Nikula 
1094dd4375bSJani Nikula 	return err;
1104dd4375bSJani Nikula }
1114dd4375bSJani Nikula 
snb_pcode_write_timeout(struct intel_uncore * uncore,u32 mbox,u32 val,int fast_timeout_us,int slow_timeout_ms)112ee421bb4SAshutosh Dixit int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
1136650ebcbSJani Nikula 			    int fast_timeout_us, int slow_timeout_ms)
1144dd4375bSJani Nikula {
1154dd4375bSJani Nikula 	int err;
1164dd4375bSJani Nikula 
117ee421bb4SAshutosh Dixit 	mutex_lock(&uncore->i915->sb_lock);
118ee421bb4SAshutosh Dixit 	err = __snb_pcode_rw(uncore, mbox, &val, NULL,
1196650ebcbSJani Nikula 			     fast_timeout_us, slow_timeout_ms, false);
120ee421bb4SAshutosh Dixit 	mutex_unlock(&uncore->i915->sb_lock);
1214dd4375bSJani Nikula 
1224dd4375bSJani Nikula 	if (err) {
123ee421bb4SAshutosh Dixit 		drm_dbg(&uncore->i915->drm,
1244dd4375bSJani Nikula 			"warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
1254dd4375bSJani Nikula 			val, mbox, __builtin_return_address(0), err);
1264dd4375bSJani Nikula 	}
1274dd4375bSJani Nikula 
1284dd4375bSJani Nikula 	return err;
1294dd4375bSJani Nikula }
1304dd4375bSJani Nikula 
skl_pcode_try_request(struct intel_uncore * uncore,u32 mbox,u32 request,u32 reply_mask,u32 reply,u32 * status)131ee421bb4SAshutosh Dixit static bool skl_pcode_try_request(struct intel_uncore *uncore, u32 mbox,
1324dd4375bSJani Nikula 				  u32 request, u32 reply_mask, u32 reply,
1334dd4375bSJani Nikula 				  u32 *status)
1344dd4375bSJani Nikula {
135ee421bb4SAshutosh Dixit 	*status = __snb_pcode_rw(uncore, mbox, &request, NULL, 500, 0, true);
1364dd4375bSJani Nikula 
1379e0a1c3cSStanislav Lisovskiy 	return (*status == 0) && ((request & reply_mask) == reply);
1384dd4375bSJani Nikula }
1394dd4375bSJani Nikula 
1404dd4375bSJani Nikula /**
1414dd4375bSJani Nikula  * skl_pcode_request - send PCODE request until acknowledgment
142ee421bb4SAshutosh Dixit  * @uncore: uncore
1434dd4375bSJani Nikula  * @mbox: PCODE mailbox ID the request is targeted for
1444dd4375bSJani Nikula  * @request: request ID
1454dd4375bSJani Nikula  * @reply_mask: mask used to check for request acknowledgment
1464dd4375bSJani Nikula  * @reply: value used to check for request acknowledgment
1474dd4375bSJani Nikula  * @timeout_base_ms: timeout for polling with preemption enabled
1484dd4375bSJani Nikula  *
1494dd4375bSJani Nikula  * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
1504dd4375bSJani Nikula  * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
1514dd4375bSJani Nikula  * The request is acknowledged once the PCODE reply dword equals @reply after
1524dd4375bSJani Nikula  * applying @reply_mask. Polling is first attempted with preemption enabled
1534dd4375bSJani Nikula  * for @timeout_base_ms and if this times out for another 50 ms with
1544dd4375bSJani Nikula  * preemption disabled.
1554dd4375bSJani Nikula  *
1564dd4375bSJani Nikula  * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
1574dd4375bSJani Nikula  * other error as reported by PCODE.
1584dd4375bSJani Nikula  */
skl_pcode_request(struct intel_uncore * uncore,u32 mbox,u32 request,u32 reply_mask,u32 reply,int timeout_base_ms)159ee421bb4SAshutosh Dixit int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request,
1604dd4375bSJani Nikula 		      u32 reply_mask, u32 reply, int timeout_base_ms)
1614dd4375bSJani Nikula {
1624dd4375bSJani Nikula 	u32 status;
1634dd4375bSJani Nikula 	int ret;
1644dd4375bSJani Nikula 
165ee421bb4SAshutosh Dixit 	mutex_lock(&uncore->i915->sb_lock);
1664dd4375bSJani Nikula 
1674dd4375bSJani Nikula #define COND \
168ee421bb4SAshutosh Dixit 	skl_pcode_try_request(uncore, mbox, request, reply_mask, reply, &status)
1694dd4375bSJani Nikula 
1704dd4375bSJani Nikula 	/*
1714dd4375bSJani Nikula 	 * Prime the PCODE by doing a request first. Normally it guarantees
1724dd4375bSJani Nikula 	 * that a subsequent request, at most @timeout_base_ms later, succeeds.
1734dd4375bSJani Nikula 	 * _wait_for() doesn't guarantee when its passed condition is evaluated
1744dd4375bSJani Nikula 	 * first, so send the first request explicitly.
1754dd4375bSJani Nikula 	 */
1764dd4375bSJani Nikula 	if (COND) {
1774dd4375bSJani Nikula 		ret = 0;
1784dd4375bSJani Nikula 		goto out;
1794dd4375bSJani Nikula 	}
1804dd4375bSJani Nikula 	ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
1814dd4375bSJani Nikula 	if (!ret)
1824dd4375bSJani Nikula 		goto out;
1834dd4375bSJani Nikula 
1844dd4375bSJani Nikula 	/*
1854dd4375bSJani Nikula 	 * The above can time out if the number of requests was low (2 in the
1864dd4375bSJani Nikula 	 * worst case) _and_ PCODE was busy for some reason even after a
1874dd4375bSJani Nikula 	 * (queued) request and @timeout_base_ms delay. As a workaround retry
1884dd4375bSJani Nikula 	 * the poll with preemption disabled to maximize the number of
1894dd4375bSJani Nikula 	 * requests. Increase the timeout from @timeout_base_ms to 50ms to
1904dd4375bSJani Nikula 	 * account for interrupts that could reduce the number of these
1914dd4375bSJani Nikula 	 * requests, and for any quirks of the PCODE firmware that delays
1924dd4375bSJani Nikula 	 * the request completion.
1934dd4375bSJani Nikula 	 */
194ee421bb4SAshutosh Dixit 	drm_dbg_kms(&uncore->i915->drm,
1954dd4375bSJani Nikula 		    "PCODE timeout, retrying with preemption disabled\n");
196ee421bb4SAshutosh Dixit 	drm_WARN_ON_ONCE(&uncore->i915->drm, timeout_base_ms > 3);
1974dd4375bSJani Nikula 	preempt_disable();
1984dd4375bSJani Nikula 	ret = wait_for_atomic(COND, 50);
1994dd4375bSJani Nikula 	preempt_enable();
2004dd4375bSJani Nikula 
2014dd4375bSJani Nikula out:
202ee421bb4SAshutosh Dixit 	mutex_unlock(&uncore->i915->sb_lock);
20340a56956SStanislav Lisovskiy 	return status ? status : ret;
2044dd4375bSJani Nikula #undef COND
2054dd4375bSJani Nikula }
2064dd4375bSJani Nikula 
pcode_init_wait(struct intel_uncore * uncore,int timeout_ms)207*fec37500SAravind Iddamsetty static int pcode_init_wait(struct intel_uncore *uncore, int timeout_ms)
208*fec37500SAravind Iddamsetty {
209*fec37500SAravind Iddamsetty 	if (__intel_wait_for_register_fw(uncore,
210*fec37500SAravind Iddamsetty 					 GEN6_PCODE_MAILBOX,
211*fec37500SAravind Iddamsetty 					 GEN6_PCODE_READY, 0,
212*fec37500SAravind Iddamsetty 					 500, timeout_ms,
213*fec37500SAravind Iddamsetty 					 NULL))
214*fec37500SAravind Iddamsetty 		return -EPROBE_DEFER;
215*fec37500SAravind Iddamsetty 
216*fec37500SAravind Iddamsetty 	return skl_pcode_request(uncore,
217*fec37500SAravind Iddamsetty 				 DG1_PCODE_STATUS,
218*fec37500SAravind Iddamsetty 				 DG1_UNCORE_GET_INIT_STATUS,
219*fec37500SAravind Iddamsetty 				 DG1_UNCORE_INIT_STATUS_COMPLETE,
220*fec37500SAravind Iddamsetty 				 DG1_UNCORE_INIT_STATUS_COMPLETE, timeout_ms);
221*fec37500SAravind Iddamsetty }
222*fec37500SAravind Iddamsetty 
intel_pcode_init(struct intel_uncore * uncore)223ee421bb4SAshutosh Dixit int intel_pcode_init(struct intel_uncore *uncore)
2244dd4375bSJani Nikula {
225*fec37500SAravind Iddamsetty 	int err;
226*fec37500SAravind Iddamsetty 
227ee421bb4SAshutosh Dixit 	if (!IS_DGFX(uncore->i915))
228ee421bb4SAshutosh Dixit 		return 0;
2294dd4375bSJani Nikula 
230*fec37500SAravind Iddamsetty 	/*
231*fec37500SAravind Iddamsetty 	 * Wait 10 seconds so that the punit to settle and complete
232*fec37500SAravind Iddamsetty 	 * any outstanding transactions upon module load
233*fec37500SAravind Iddamsetty 	 */
234*fec37500SAravind Iddamsetty 	err = pcode_init_wait(uncore, 10000);
235*fec37500SAravind Iddamsetty 
236*fec37500SAravind Iddamsetty 	if (err) {
237*fec37500SAravind Iddamsetty 		drm_notice(&uncore->i915->drm,
238*fec37500SAravind Iddamsetty 			   "Waiting for HW initialisation...\n");
239*fec37500SAravind Iddamsetty 		err = pcode_init_wait(uncore, 180000);
240*fec37500SAravind Iddamsetty 	}
241*fec37500SAravind Iddamsetty 
242*fec37500SAravind Iddamsetty 	return err;
2434dd4375bSJani Nikula }
2445f38c3fbSDale B Stimson 
snb_pcode_read_p(struct intel_uncore * uncore,u32 mbcmd,u32 p1,u32 p2,u32 * val)2455f38c3fbSDale B Stimson int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val)
2465f38c3fbSDale B Stimson {
2475f38c3fbSDale B Stimson 	intel_wakeref_t wakeref;
2485f38c3fbSDale B Stimson 	u32 mbox;
2495f38c3fbSDale B Stimson 	int err;
2505f38c3fbSDale B Stimson 
2515f38c3fbSDale B Stimson 	mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
2525f38c3fbSDale B Stimson 		| REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
2535f38c3fbSDale B Stimson 		| REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
2545f38c3fbSDale B Stimson 
2555f38c3fbSDale B Stimson 	with_intel_runtime_pm(uncore->rpm, wakeref)
2565f38c3fbSDale B Stimson 		err = snb_pcode_read(uncore, mbox, val, NULL);
2575f38c3fbSDale B Stimson 
2585f38c3fbSDale B Stimson 	return err;
2595f38c3fbSDale B Stimson }
2605f38c3fbSDale B Stimson 
snb_pcode_write_p(struct intel_uncore * uncore,u32 mbcmd,u32 p1,u32 p2,u32 val)2615f38c3fbSDale B Stimson int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val)
2625f38c3fbSDale B Stimson {
2635f38c3fbSDale B Stimson 	intel_wakeref_t wakeref;
2645f38c3fbSDale B Stimson 	u32 mbox;
2655f38c3fbSDale B Stimson 	int err;
2665f38c3fbSDale B Stimson 
2675f38c3fbSDale B Stimson 	mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
2685f38c3fbSDale B Stimson 		| REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
2695f38c3fbSDale B Stimson 		| REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
2705f38c3fbSDale B Stimson 
2715f38c3fbSDale B Stimson 	with_intel_runtime_pm(uncore->rpm, wakeref)
2725f38c3fbSDale B Stimson 		err = snb_pcode_write(uncore, mbox, val);
2735f38c3fbSDale B Stimson 
2745f38c3fbSDale B Stimson 	return err;
2755f38c3fbSDale B Stimson }
276