1 /* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "amdgpu_xcp.h" 25 26 static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr, 27 struct amdgpu_xcp_ip *xcp_ip, int xcp_state) 28 { 29 int (*run_func)(void *handle, uint32_t inst_mask); 30 int ret = 0; 31 32 if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs) 33 return 0; 34 35 run_func = NULL; 36 37 switch (xcp_state) { 38 case AMDGPU_XCP_PREPARE_SUSPEND: 39 run_func = xcp_ip->ip_funcs->prepare_suspend; 40 break; 41 case AMDGPU_XCP_SUSPEND: 42 run_func = xcp_ip->ip_funcs->suspend; 43 break; 44 case AMDGPU_XCP_PREPARE_RESUME: 45 run_func = xcp_ip->ip_funcs->prepare_resume; 46 break; 47 case AMDGPU_XCP_RESUME: 48 run_func = xcp_ip->ip_funcs->resume; 49 break; 50 } 51 52 if (run_func) 53 ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask); 54 55 return ret; 56 } 57 58 static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, 59 int state) 60 { 61 struct amdgpu_xcp_ip *xcp_ip; 62 struct amdgpu_xcp *xcp; 63 int i, ret; 64 65 if (xcp_id > MAX_XCP || !xcp_mgr->xcp[xcp_id].valid) 66 return -EINVAL; 67 68 xcp = &xcp_mgr->xcp[xcp_id]; 69 for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) { 70 xcp_ip = &xcp->ip[i]; 71 ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state); 72 if (ret) 73 break; 74 } 75 76 return ret; 77 } 78 79 int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) 80 { 81 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, 82 AMDGPU_XCP_PREPARE_SUSPEND); 83 } 84 85 int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) 86 { 87 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND); 88 } 89 90 int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) 91 { 92 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, 93 AMDGPU_XCP_PREPARE_RESUME); 94 } 95 96 int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) 97 { 98 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME); 99 } 100 101 static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, 102 struct amdgpu_xcp_ip *ip) 103 { 104 struct amdgpu_xcp *xcp; 105 106 if (!ip) 107 return; 108 109 xcp = &xcp_mgr->xcp[xcp_id]; 110 xcp->ip[ip->ip_id] = *ip; 111 xcp->ip[ip->ip_id].valid = true; 112 113 xcp->valid = true; 114 } 115 116 static int __amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps) 117 { 118 struct amdgpu_xcp_ip ip; 119 int i, j, ret; 120 121 for (i = 0; i < MAX_XCP; ++i) 122 xcp_mgr->xcp[i].valid = false; 123 124 for (i = 0; i < num_xcps; ++i) { 125 for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) { 126 ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j, 127 &ip); 128 if (ret) 129 continue; 130 131 __amdgpu_xcp_add_block(xcp_mgr, i, &ip); 132 } 133 } 134 135 xcp_mgr->num_xcps = num_xcps; 136 137 return 0; 138 } 139 140 int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode) 141 { 142 int ret, num_xcps = 0; 143 144 if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE) 145 return -EINVAL; 146 147 if (xcp_mgr->mode == mode) 148 return 0; 149 150 if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode) 151 return 0; 152 153 mutex_lock(&xcp_mgr->xcp_lock); 154 155 ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps); 156 157 if (ret) 158 goto out; 159 160 if (!num_xcps || num_xcps > MAX_XCP) { 161 ret = -EINVAL; 162 goto out; 163 } 164 165 xcp_mgr->mode = mode; 166 __amdgpu_xcp_init(xcp_mgr, num_xcps); 167 out: 168 mutex_unlock(&xcp_mgr->xcp_lock); 169 170 return ret; 171 } 172 173 int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) 174 { 175 int mode; 176 177 if (xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) 178 return xcp_mgr->mode; 179 180 if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode) 181 return xcp_mgr->mode; 182 183 mutex_lock(&xcp_mgr->xcp_lock); 184 mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr); 185 if (mode != xcp_mgr->mode) 186 dev_WARN( 187 xcp_mgr->adev->dev, 188 "Cached partition mode %d not matching with device mode %d", 189 xcp_mgr->mode, mode); 190 191 mutex_unlock(&xcp_mgr->xcp_lock); 192 193 return mode; 194 } 195 196 int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, 197 int init_num_xcps, 198 struct amdgpu_xcp_mgr_funcs *xcp_funcs) 199 { 200 struct amdgpu_xcp_mgr *xcp_mgr; 201 202 if (!xcp_funcs || !xcp_funcs->switch_partition_mode || 203 !xcp_funcs->get_ip_details) 204 return -EINVAL; 205 206 xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL); 207 208 if (!xcp_mgr) 209 return -ENOMEM; 210 211 xcp_mgr->adev = adev; 212 xcp_mgr->funcs = xcp_funcs; 213 xcp_mgr->mode = init_mode; 214 mutex_init(&xcp_mgr->xcp_lock); 215 216 if (init_mode != AMDGPU_XCP_MODE_NONE) 217 __amdgpu_xcp_init(xcp_mgr, init_num_xcps); 218 219 adev->xcp_mgr = xcp_mgr; 220 221 return 0; 222 } 223 224 int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr, 225 enum AMDGPU_XCP_IP_BLOCK ip, int instance) 226 { 227 struct amdgpu_xcp *xcp; 228 int i, id_mask = 0; 229 230 if (ip >= AMDGPU_XCP_MAX_BLOCKS) 231 return -EINVAL; 232 233 for (i = 0; i < xcp_mgr->num_xcps; ++i) { 234 xcp = &xcp_mgr->xcp[i]; 235 if ((xcp->valid) && (xcp->ip[ip].valid) && 236 (xcp->ip[ip].inst_mask & BIT(instance))) 237 id_mask |= BIT(i); 238 } 239 240 if (!id_mask) 241 id_mask = -ENXIO; 242 243 return id_mask; 244 } 245