1*837d542aSEvan Quan /*
2*837d542aSEvan Quan * Copyright 2013 Advanced Micro Devices, Inc.
3*837d542aSEvan Quan *
4*837d542aSEvan Quan * Permission is hereby granted, free of charge, to any person obtaining a
5*837d542aSEvan Quan * copy of this software and associated documentation files (the "Software"),
6*837d542aSEvan Quan * to deal in the Software without restriction, including without limitation
7*837d542aSEvan Quan * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*837d542aSEvan Quan * and/or sell copies of the Software, and to permit persons to whom the
9*837d542aSEvan Quan * Software is furnished to do so, subject to the following conditions:
10*837d542aSEvan Quan *
11*837d542aSEvan Quan * The above copyright notice and this permission notice shall be included in
12*837d542aSEvan Quan * all copies or substantial portions of the Software.
13*837d542aSEvan Quan *
14*837d542aSEvan Quan * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*837d542aSEvan Quan * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*837d542aSEvan Quan * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17*837d542aSEvan Quan * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*837d542aSEvan Quan * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*837d542aSEvan Quan * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*837d542aSEvan Quan * OTHER DEALINGS IN THE SOFTWARE.
21*837d542aSEvan Quan *
22*837d542aSEvan Quan * Authors: Alex Deucher
23*837d542aSEvan Quan */
24*837d542aSEvan Quan
25*837d542aSEvan Quan #include "amdgpu.h"
26*837d542aSEvan Quan #include "cikd.h"
27*837d542aSEvan Quan #include "kv_dpm.h"
28*837d542aSEvan Quan
29*837d542aSEvan Quan #include "smu/smu_7_0_0_d.h"
30*837d542aSEvan Quan #include "smu/smu_7_0_0_sh_mask.h"
31*837d542aSEvan Quan
amdgpu_kv_notify_message_to_smu(struct amdgpu_device * adev,u32 id)32*837d542aSEvan Quan int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id)
33*837d542aSEvan Quan {
34*837d542aSEvan Quan u32 i;
35*837d542aSEvan Quan u32 tmp = 0;
36*837d542aSEvan Quan
37*837d542aSEvan Quan WREG32(mmSMC_MESSAGE_0, id & SMC_MESSAGE_0__SMC_MSG_MASK);
38*837d542aSEvan Quan
39*837d542aSEvan Quan for (i = 0; i < adev->usec_timeout; i++) {
40*837d542aSEvan Quan if ((RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK) != 0)
41*837d542aSEvan Quan break;
42*837d542aSEvan Quan udelay(1);
43*837d542aSEvan Quan }
44*837d542aSEvan Quan tmp = RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK;
45*837d542aSEvan Quan
46*837d542aSEvan Quan if (tmp != 1) {
47*837d542aSEvan Quan if (tmp == 0xFF)
48*837d542aSEvan Quan return -EINVAL;
49*837d542aSEvan Quan else if (tmp == 0xFE)
50*837d542aSEvan Quan return -EINVAL;
51*837d542aSEvan Quan }
52*837d542aSEvan Quan
53*837d542aSEvan Quan return 0;
54*837d542aSEvan Quan }
55*837d542aSEvan Quan
amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device * adev,u32 * enable_mask)56*837d542aSEvan Quan int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask)
57*837d542aSEvan Quan {
58*837d542aSEvan Quan int ret;
59*837d542aSEvan Quan
60*837d542aSEvan Quan ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SCLKDPM_GetEnabledMask);
61*837d542aSEvan Quan
62*837d542aSEvan Quan if (ret == 0)
63*837d542aSEvan Quan *enable_mask = RREG32_SMC(ixSMC_SYSCON_MSG_ARG_0);
64*837d542aSEvan Quan
65*837d542aSEvan Quan return ret;
66*837d542aSEvan Quan }
67*837d542aSEvan Quan
amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device * adev,PPSMC_Msg msg,u32 parameter)68*837d542aSEvan Quan int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
69*837d542aSEvan Quan PPSMC_Msg msg, u32 parameter)
70*837d542aSEvan Quan {
71*837d542aSEvan Quan
72*837d542aSEvan Quan WREG32(mmSMC_MSG_ARG_0, parameter);
73*837d542aSEvan Quan
74*837d542aSEvan Quan return amdgpu_kv_notify_message_to_smu(adev, msg);
75*837d542aSEvan Quan }
76*837d542aSEvan Quan
kv_set_smc_sram_address(struct amdgpu_device * adev,u32 smc_address,u32 limit)77*837d542aSEvan Quan static int kv_set_smc_sram_address(struct amdgpu_device *adev,
78*837d542aSEvan Quan u32 smc_address, u32 limit)
79*837d542aSEvan Quan {
80*837d542aSEvan Quan if (smc_address & 3)
81*837d542aSEvan Quan return -EINVAL;
82*837d542aSEvan Quan if ((smc_address + 3) > limit)
83*837d542aSEvan Quan return -EINVAL;
84*837d542aSEvan Quan
85*837d542aSEvan Quan WREG32(mmSMC_IND_INDEX_0, smc_address);
86*837d542aSEvan Quan WREG32_P(mmSMC_IND_ACCESS_CNTL, 0,
87*837d542aSEvan Quan ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
88*837d542aSEvan Quan
89*837d542aSEvan Quan return 0;
90*837d542aSEvan Quan }
91*837d542aSEvan Quan
amdgpu_kv_read_smc_sram_dword(struct amdgpu_device * adev,u32 smc_address,u32 * value,u32 limit)92*837d542aSEvan Quan int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
93*837d542aSEvan Quan u32 *value, u32 limit)
94*837d542aSEvan Quan {
95*837d542aSEvan Quan int ret;
96*837d542aSEvan Quan
97*837d542aSEvan Quan ret = kv_set_smc_sram_address(adev, smc_address, limit);
98*837d542aSEvan Quan if (ret)
99*837d542aSEvan Quan return ret;
100*837d542aSEvan Quan
101*837d542aSEvan Quan *value = RREG32(mmSMC_IND_DATA_0);
102*837d542aSEvan Quan return 0;
103*837d542aSEvan Quan }
104*837d542aSEvan Quan
amdgpu_kv_smc_dpm_enable(struct amdgpu_device * adev,bool enable)105*837d542aSEvan Quan int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable)
106*837d542aSEvan Quan {
107*837d542aSEvan Quan if (enable)
108*837d542aSEvan Quan return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Enable);
109*837d542aSEvan Quan else
110*837d542aSEvan Quan return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Disable);
111*837d542aSEvan Quan }
112*837d542aSEvan Quan
amdgpu_kv_smc_bapm_enable(struct amdgpu_device * adev,bool enable)113*837d542aSEvan Quan int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable)
114*837d542aSEvan Quan {
115*837d542aSEvan Quan if (enable)
116*837d542aSEvan Quan return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableBAPM);
117*837d542aSEvan Quan else
118*837d542aSEvan Quan return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableBAPM);
119*837d542aSEvan Quan }
120*837d542aSEvan Quan
amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device * adev,u32 smc_start_address,const u8 * src,u32 byte_count,u32 limit)121*837d542aSEvan Quan int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev,
122*837d542aSEvan Quan u32 smc_start_address,
123*837d542aSEvan Quan const u8 *src, u32 byte_count, u32 limit)
124*837d542aSEvan Quan {
125*837d542aSEvan Quan int ret;
126*837d542aSEvan Quan u32 data, original_data, addr, extra_shift, t_byte, count, mask;
127*837d542aSEvan Quan
128*837d542aSEvan Quan if ((smc_start_address + byte_count) > limit)
129*837d542aSEvan Quan return -EINVAL;
130*837d542aSEvan Quan
131*837d542aSEvan Quan addr = smc_start_address;
132*837d542aSEvan Quan t_byte = addr & 3;
133*837d542aSEvan Quan
134*837d542aSEvan Quan /* RMW for the initial bytes */
135*837d542aSEvan Quan if (t_byte != 0) {
136*837d542aSEvan Quan addr -= t_byte;
137*837d542aSEvan Quan
138*837d542aSEvan Quan ret = kv_set_smc_sram_address(adev, addr, limit);
139*837d542aSEvan Quan if (ret)
140*837d542aSEvan Quan return ret;
141*837d542aSEvan Quan
142*837d542aSEvan Quan original_data = RREG32(mmSMC_IND_DATA_0);
143*837d542aSEvan Quan
144*837d542aSEvan Quan data = 0;
145*837d542aSEvan Quan mask = 0;
146*837d542aSEvan Quan count = 4;
147*837d542aSEvan Quan while (count > 0) {
148*837d542aSEvan Quan if (t_byte > 0) {
149*837d542aSEvan Quan mask = (mask << 8) | 0xff;
150*837d542aSEvan Quan t_byte--;
151*837d542aSEvan Quan } else if (byte_count > 0) {
152*837d542aSEvan Quan data = (data << 8) + *src++;
153*837d542aSEvan Quan byte_count--;
154*837d542aSEvan Quan mask <<= 8;
155*837d542aSEvan Quan } else {
156*837d542aSEvan Quan data <<= 8;
157*837d542aSEvan Quan mask = (mask << 8) | 0xff;
158*837d542aSEvan Quan }
159*837d542aSEvan Quan count--;
160*837d542aSEvan Quan }
161*837d542aSEvan Quan
162*837d542aSEvan Quan data |= original_data & mask;
163*837d542aSEvan Quan
164*837d542aSEvan Quan ret = kv_set_smc_sram_address(adev, addr, limit);
165*837d542aSEvan Quan if (ret)
166*837d542aSEvan Quan return ret;
167*837d542aSEvan Quan
168*837d542aSEvan Quan WREG32(mmSMC_IND_DATA_0, data);
169*837d542aSEvan Quan
170*837d542aSEvan Quan addr += 4;
171*837d542aSEvan Quan }
172*837d542aSEvan Quan
173*837d542aSEvan Quan while (byte_count >= 4) {
174*837d542aSEvan Quan /* SMC address space is BE */
175*837d542aSEvan Quan data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
176*837d542aSEvan Quan
177*837d542aSEvan Quan ret = kv_set_smc_sram_address(adev, addr, limit);
178*837d542aSEvan Quan if (ret)
179*837d542aSEvan Quan return ret;
180*837d542aSEvan Quan
181*837d542aSEvan Quan WREG32(mmSMC_IND_DATA_0, data);
182*837d542aSEvan Quan
183*837d542aSEvan Quan src += 4;
184*837d542aSEvan Quan byte_count -= 4;
185*837d542aSEvan Quan addr += 4;
186*837d542aSEvan Quan }
187*837d542aSEvan Quan
188*837d542aSEvan Quan /* RMW for the final bytes */
189*837d542aSEvan Quan if (byte_count > 0) {
190*837d542aSEvan Quan data = 0;
191*837d542aSEvan Quan
192*837d542aSEvan Quan ret = kv_set_smc_sram_address(adev, addr, limit);
193*837d542aSEvan Quan if (ret)
194*837d542aSEvan Quan return ret;
195*837d542aSEvan Quan
196*837d542aSEvan Quan original_data = RREG32(mmSMC_IND_DATA_0);
197*837d542aSEvan Quan
198*837d542aSEvan Quan extra_shift = 8 * (4 - byte_count);
199*837d542aSEvan Quan
200*837d542aSEvan Quan while (byte_count > 0) {
201*837d542aSEvan Quan /* SMC address space is BE */
202*837d542aSEvan Quan data = (data << 8) + *src++;
203*837d542aSEvan Quan byte_count--;
204*837d542aSEvan Quan }
205*837d542aSEvan Quan
206*837d542aSEvan Quan data <<= extra_shift;
207*837d542aSEvan Quan
208*837d542aSEvan Quan data |= (original_data & ~((~0UL) << extra_shift));
209*837d542aSEvan Quan
210*837d542aSEvan Quan ret = kv_set_smc_sram_address(adev, addr, limit);
211*837d542aSEvan Quan if (ret)
212*837d542aSEvan Quan return ret;
213*837d542aSEvan Quan
214*837d542aSEvan Quan WREG32(mmSMC_IND_DATA_0, data);
215*837d542aSEvan Quan }
216*837d542aSEvan Quan return 0;
217*837d542aSEvan Quan }
218*837d542aSEvan Quan
219