1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "nbio/nbio_6_1_offset.h" 26 #include "nbio/nbio_6_1_sh_mask.h" 27 #include "gc/gc_9_0_offset.h" 28 #include "gc/gc_9_0_sh_mask.h" 29 #include "mp/mp_9_0_offset.h" 30 #include "soc15.h" 31 #include "vega10_ih.h" 32 #include "soc15_common.h" 33 #include "mxgpu_ai.h" 34 35 static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev) 36 { 37 WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2); 38 } 39 40 static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) 41 { 42 WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0); 43 } 44 45 /* 46 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine 47 * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1 48 * by host. 49 * 50 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the 51 * correct value since it doesn't return the RCV_DW0 under the case that 52 * RCV_MSG_VALID is set by host. 53 */ 54 static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev) 55 { 56 return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 57 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); 58 } 59 60 61 static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, 62 enum idh_event event) 63 { 64 u32 reg; 65 66 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 67 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0)); 68 if (reg != event) 69 return -ENOENT; 70 71 xgpu_ai_mailbox_send_ack(adev); 72 73 return 0; 74 } 75 76 static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) { 77 return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2; 78 } 79 80 static int xgpu_ai_poll_ack(struct amdgpu_device *adev) 81 { 82 int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT; 83 u8 reg; 84 85 do { 86 reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE); 87 if (reg & 2) 88 return 0; 89 90 mdelay(5); 91 timeout -= 5; 92 } while (timeout > 1); 93 94 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT); 95 96 return -ETIME; 97 } 98 99 static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) 100 { 101 int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT; 102 103 do { 104 r = xgpu_ai_mailbox_rcv_msg(adev, event); 105 if (!r) 106 return 0; 107 108 msleep(10); 109 timeout -= 10; 110 } while (timeout > 1); 111 112 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); 113 114 return -ETIME; 115 } 116 117 static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, 118 enum idh_request req, u32 data1, u32 data2, u32 data3) { 119 u32 reg; 120 int r; 121 uint8_t trn; 122 123 /* IMPORTANT: 124 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK 125 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK 126 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack() 127 * will return immediatly 128 */ 129 do { 130 xgpu_ai_mailbox_set_valid(adev, false); 131 trn = xgpu_ai_peek_ack(adev); 132 if (trn) { 133 pr_err("trn=%x ACK should not assert! wait again !\n", trn); 134 msleep(1); 135 } 136 } while(trn); 137 138 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 139 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); 140 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, 141 MSGBUF_DATA, req); 142 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), 143 reg); 144 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1), 145 data1); 146 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2), 147 data2); 148 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3), 149 data3); 150 151 xgpu_ai_mailbox_set_valid(adev, true); 152 153 /* start to poll ack */ 154 r = xgpu_ai_poll_ack(adev); 155 if (r) 156 pr_err("Doesn't get ack from pf, continue\n"); 157 158 xgpu_ai_mailbox_set_valid(adev, false); 159 } 160 161 static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, 162 enum idh_request req) 163 { 164 int r; 165 166 xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); 167 168 /* start to check msg if request is idh_req_gpu_init_access */ 169 if (req == IDH_REQ_GPU_INIT_ACCESS || 170 req == IDH_REQ_GPU_FINI_ACCESS || 171 req == IDH_REQ_GPU_RESET_ACCESS) { 172 r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); 173 if (r) { 174 pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); 175 return r; 176 } 177 /* Retrieve checksum from mailbox2 */ 178 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { 179 adev->virt.fw_reserve.checksum_key = 180 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 181 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); 182 } 183 } 184 185 return 0; 186 } 187 188 static int xgpu_ai_request_reset(struct amdgpu_device *adev) 189 { 190 return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); 191 } 192 193 static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev, 194 bool init) 195 { 196 enum idh_request req; 197 198 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS; 199 return xgpu_ai_send_access_requests(adev, req); 200 } 201 202 static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev, 203 bool init) 204 { 205 enum idh_request req; 206 int r = 0; 207 208 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS; 209 r = xgpu_ai_send_access_requests(adev, req); 210 211 return r; 212 } 213 214 static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev, 215 struct amdgpu_irq_src *source, 216 struct amdgpu_iv_entry *entry) 217 { 218 DRM_DEBUG("get ack intr and do nothing.\n"); 219 return 0; 220 } 221 222 static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev, 223 struct amdgpu_irq_src *source, 224 unsigned type, 225 enum amdgpu_interrupt_state state) 226 { 227 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); 228 229 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN, 230 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); 231 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); 232 233 return 0; 234 } 235 236 static void xgpu_ai_mailbox_flr_work(struct work_struct *work) 237 { 238 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); 239 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); 240 int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT; 241 int locked; 242 243 /* block amdgpu_gpu_recover till msg FLR COMPLETE received, 244 * otherwise the mailbox msg will be ruined/reseted by 245 * the VF FLR. 246 * 247 * we can unlock the lock_reset to allow "amdgpu_job_timedout" 248 * to run gpu_recover() after FLR_NOTIFICATION_CMPL received 249 * which means host side had finished this VF's FLR. 250 */ 251 locked = mutex_trylock(&adev->lock_reset); 252 if (locked) 253 adev->in_gpu_reset = true; 254 255 do { 256 if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) 257 goto flr_done; 258 259 msleep(10); 260 timeout -= 10; 261 } while (timeout > 1); 262 263 flr_done: 264 if (locked) { 265 adev->in_gpu_reset = false; 266 mutex_unlock(&adev->lock_reset); 267 } 268 269 /* Trigger recovery for world switch failure if no TDR */ 270 if (amdgpu_device_should_recover_gpu(adev) 271 && adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT) 272 amdgpu_device_gpu_recover(adev, NULL); 273 } 274 275 static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, 276 struct amdgpu_irq_src *src, 277 unsigned type, 278 enum amdgpu_interrupt_state state) 279 { 280 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); 281 282 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN, 283 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); 284 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); 285 286 return 0; 287 } 288 289 static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, 290 struct amdgpu_irq_src *source, 291 struct amdgpu_iv_entry *entry) 292 { 293 enum idh_event event = xgpu_ai_mailbox_peek_msg(adev); 294 295 switch (event) { 296 case IDH_FLR_NOTIFICATION: 297 if (amdgpu_sriov_runtime(adev)) 298 schedule_work(&adev->virt.flr_work); 299 break; 300 case IDH_QUERY_ALIVE: 301 xgpu_ai_mailbox_send_ack(adev); 302 break; 303 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore 304 * it byfar since that polling thread will handle it, 305 * other msg like flr complete is not handled here. 306 */ 307 case IDH_CLR_MSG_BUF: 308 case IDH_FLR_NOTIFICATION_CMPL: 309 case IDH_READY_TO_ACCESS_GPU: 310 default: 311 break; 312 } 313 314 return 0; 315 } 316 317 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = { 318 .set = xgpu_ai_set_mailbox_ack_irq, 319 .process = xgpu_ai_mailbox_ack_irq, 320 }; 321 322 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = { 323 .set = xgpu_ai_set_mailbox_rcv_irq, 324 .process = xgpu_ai_mailbox_rcv_irq, 325 }; 326 327 void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev) 328 { 329 adev->virt.ack_irq.num_types = 1; 330 adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs; 331 adev->virt.rcv_irq.num_types = 1; 332 adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs; 333 } 334 335 int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev) 336 { 337 int r; 338 339 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); 340 if (r) 341 return r; 342 343 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); 344 if (r) { 345 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 346 return r; 347 } 348 349 return 0; 350 } 351 352 int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev) 353 { 354 int r; 355 356 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); 357 if (r) 358 return r; 359 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); 360 if (r) { 361 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 362 return r; 363 } 364 365 INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work); 366 367 return 0; 368 } 369 370 void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev) 371 { 372 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); 373 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 374 } 375 376 const struct amdgpu_virt_ops xgpu_ai_virt_ops = { 377 .req_full_gpu = xgpu_ai_request_full_gpu_access, 378 .rel_full_gpu = xgpu_ai_release_full_gpu_access, 379 .reset_gpu = xgpu_ai_request_reset, 380 .wait_reset = NULL, 381 .trans_msg = xgpu_ai_mailbox_trans_msg, 382 }; 383