1d99605eaSHawking Zhang /*
2d99605eaSHawking Zhang * Copyright 2018 Advanced Micro Devices, Inc.
3d99605eaSHawking Zhang *
4d99605eaSHawking Zhang * Permission is hereby granted, free of charge, to any person obtaining a
5d99605eaSHawking Zhang * copy of this software and associated documentation files (the "Software"),
6d99605eaSHawking Zhang * to deal in the Software without restriction, including without limitation
7d99605eaSHawking Zhang * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8d99605eaSHawking Zhang * and/or sell copies of the Software, and to permit persons to whom the
9d99605eaSHawking Zhang * Software is furnished to do so, subject to the following conditions:
10d99605eaSHawking Zhang *
11d99605eaSHawking Zhang * The above copyright notice and this permission notice shall be included in
12d99605eaSHawking Zhang * all copies or substantial portions of the Software.
13d99605eaSHawking Zhang *
14d99605eaSHawking Zhang * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15d99605eaSHawking Zhang * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16d99605eaSHawking Zhang * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17d99605eaSHawking Zhang * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18d99605eaSHawking Zhang * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19d99605eaSHawking Zhang * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20d99605eaSHawking Zhang * OTHER DEALINGS IN THE SOFTWARE.
21d99605eaSHawking Zhang *
22d99605eaSHawking Zhang */
23d99605eaSHawking Zhang #include "amdgpu.h"
24d99605eaSHawking Zhang #include "df_v1_7.h"
25d99605eaSHawking Zhang
26d99605eaSHawking Zhang #include "df/df_1_7_default.h"
27d99605eaSHawking Zhang #include "df/df_1_7_offset.h"
28d99605eaSHawking Zhang #include "df/df_1_7_sh_mask.h"
29d99605eaSHawking Zhang
30d99605eaSHawking Zhang static u32 df_v1_7_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
31d99605eaSHawking Zhang
df_v1_7_sw_init(struct amdgpu_device * adev)328f78f1b0SJonathan Kim static void df_v1_7_sw_init(struct amdgpu_device *adev)
33d99605eaSHawking Zhang {
3422d39fe7SJoseph Greathouse adev->df.hash_status.hash_64k = false;
3522d39fe7SJoseph Greathouse adev->df.hash_status.hash_2m = false;
3622d39fe7SJoseph Greathouse adev->df.hash_status.hash_1g = false;
37d99605eaSHawking Zhang }
38d99605eaSHawking Zhang
df_v1_7_sw_fini(struct amdgpu_device * adev)39f1d59e00SJack Zhang static void df_v1_7_sw_fini(struct amdgpu_device *adev)
40f1d59e00SJack Zhang {
41f1d59e00SJack Zhang }
42f1d59e00SJack Zhang
df_v1_7_enable_broadcast_mode(struct amdgpu_device * adev,bool enable)43d99605eaSHawking Zhang static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev,
44d99605eaSHawking Zhang bool enable)
45d99605eaSHawking Zhang {
46d99605eaSHawking Zhang u32 tmp;
47d99605eaSHawking Zhang
48d99605eaSHawking Zhang if (enable) {
49d99605eaSHawking Zhang tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl);
50d99605eaSHawking Zhang tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
51d99605eaSHawking Zhang WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp);
52d99605eaSHawking Zhang } else
53d99605eaSHawking Zhang WREG32_SOC15(DF, 0, mmFabricConfigAccessControl,
54d99605eaSHawking Zhang mmFabricConfigAccessControl_DEFAULT);
55d99605eaSHawking Zhang }
56d99605eaSHawking Zhang
df_v1_7_get_fb_channel_number(struct amdgpu_device * adev)57d99605eaSHawking Zhang static u32 df_v1_7_get_fb_channel_number(struct amdgpu_device *adev)
58d99605eaSHawking Zhang {
59d99605eaSHawking Zhang u32 tmp;
60d99605eaSHawking Zhang
61d99605eaSHawking Zhang tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
62d99605eaSHawking Zhang tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
63d99605eaSHawking Zhang tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
64d99605eaSHawking Zhang
65d99605eaSHawking Zhang return tmp;
66d99605eaSHawking Zhang }
67d99605eaSHawking Zhang
df_v1_7_get_hbm_channel_number(struct amdgpu_device * adev)68d99605eaSHawking Zhang static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
69d99605eaSHawking Zhang {
70d99605eaSHawking Zhang int fb_channel_number;
71d99605eaSHawking Zhang
72bdf84a80SJoseph Greathouse fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
73*f9267972SMa Jun if (fb_channel_number >= ARRAY_SIZE(df_v1_7_channel_number))
74*f9267972SMa Jun fb_channel_number = 0;
75d99605eaSHawking Zhang
76d99605eaSHawking Zhang return df_v1_7_channel_number[fb_channel_number];
77d99605eaSHawking Zhang }
78d99605eaSHawking Zhang
df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable)79d99605eaSHawking Zhang static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
80d99605eaSHawking Zhang bool enable)
81d99605eaSHawking Zhang {
82d99605eaSHawking Zhang u32 tmp;
83d99605eaSHawking Zhang
84d99605eaSHawking Zhang /* Put DF on broadcast mode */
85bdf84a80SJoseph Greathouse adev->df.funcs->enable_broadcast_mode(adev, true);
86d99605eaSHawking Zhang
87d99605eaSHawking Zhang if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
88d99605eaSHawking Zhang tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
89d99605eaSHawking Zhang tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
90d99605eaSHawking Zhang tmp |= DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY;
91d99605eaSHawking Zhang WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
92d99605eaSHawking Zhang } else {
93d99605eaSHawking Zhang tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
94d99605eaSHawking Zhang tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
95d99605eaSHawking Zhang tmp |= DF_V1_7_MGCG_DISABLE;
96d99605eaSHawking Zhang WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
97d99605eaSHawking Zhang }
98d99605eaSHawking Zhang
99a2f42b5eSJonathan Neuschäfer /* Exit broadcast mode */
100bdf84a80SJoseph Greathouse adev->df.funcs->enable_broadcast_mode(adev, false);
101d99605eaSHawking Zhang }
102d99605eaSHawking Zhang
df_v1_7_get_clockgating_state(struct amdgpu_device * adev,u64 * flags)103d99605eaSHawking Zhang static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev,
10425faeddcSEvan Quan u64 *flags)
105d99605eaSHawking Zhang {
106d99605eaSHawking Zhang u32 tmp;
107d99605eaSHawking Zhang
108d99605eaSHawking Zhang /* AMD_CG_SUPPORT_DF_MGCG */
109d99605eaSHawking Zhang tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
110d99605eaSHawking Zhang if (tmp & DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY)
111d99605eaSHawking Zhang *flags |= AMD_CG_SUPPORT_DF_MGCG;
112d99605eaSHawking Zhang }
113d99605eaSHawking Zhang
df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device * adev,bool enable)1141ca2393bSAlex Deucher static void df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device *adev,
1151ca2393bSAlex Deucher bool enable)
1161ca2393bSAlex Deucher {
1171ca2393bSAlex Deucher WREG32_FIELD15(DF, 0, DF_CS_AON0_CoherentSlaveModeCtrlA0,
1181ca2393bSAlex Deucher ForceParWrRMW, enable);
1191ca2393bSAlex Deucher }
1201ca2393bSAlex Deucher
121d99605eaSHawking Zhang const struct amdgpu_df_funcs df_v1_7_funcs = {
1228f78f1b0SJonathan Kim .sw_init = df_v1_7_sw_init,
123f1d59e00SJack Zhang .sw_fini = df_v1_7_sw_fini,
124d99605eaSHawking Zhang .enable_broadcast_mode = df_v1_7_enable_broadcast_mode,
125d99605eaSHawking Zhang .get_fb_channel_number = df_v1_7_get_fb_channel_number,
126d99605eaSHawking Zhang .get_hbm_channel_number = df_v1_7_get_hbm_channel_number,
127d99605eaSHawking Zhang .update_medium_grain_clock_gating = df_v1_7_update_medium_grain_clock_gating,
128d99605eaSHawking Zhang .get_clockgating_state = df_v1_7_get_clockgating_state,
1291ca2393bSAlex Deucher .enable_ecc_force_par_wr_rmw = df_v1_7_enable_ecc_force_par_wr_rmw,
130d99605eaSHawking Zhang };
131