1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu_reset.h" 25 #include "aldebaran.h" 26 #include "sienna_cichlid.h" 27 28 int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl, 29 struct amdgpu_reset_handler *handler) 30 { 31 /* TODO: Check if handler exists? */ 32 list_add_tail(&handler->handler_list, &reset_ctl->reset_handlers); 33 return 0; 34 } 35 36 int amdgpu_reset_init(struct amdgpu_device *adev) 37 { 38 int ret = 0; 39 40 adev->amdgpu_reset_level_mask = 0x1; 41 42 switch (adev->ip_versions[MP1_HWIP][0]) { 43 case IP_VERSION(13, 0, 2): 44 ret = aldebaran_reset_init(adev); 45 break; 46 case IP_VERSION(11, 0, 7): 47 ret = sienna_cichlid_reset_init(adev); 48 break; 49 default: 50 break; 51 } 52 53 return ret; 54 } 55 56 int amdgpu_reset_fini(struct amdgpu_device *adev) 57 { 58 int ret = 0; 59 60 switch (adev->ip_versions[MP1_HWIP][0]) { 61 case IP_VERSION(13, 0, 2): 62 ret = aldebaran_reset_fini(adev); 63 break; 64 case IP_VERSION(11, 0, 7): 65 ret = sienna_cichlid_reset_fini(adev); 66 break; 67 default: 68 break; 69 } 70 71 return ret; 72 } 73 74 int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev, 75 struct amdgpu_reset_context *reset_context) 76 { 77 struct amdgpu_reset_handler *reset_handler = NULL; 78 79 if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2)) 80 return -ENOSYS; 81 82 if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags)) 83 return -ENOSYS; 84 85 if (adev->reset_cntl && adev->reset_cntl->get_reset_handler) 86 reset_handler = adev->reset_cntl->get_reset_handler( 87 adev->reset_cntl, reset_context); 88 if (!reset_handler) 89 return -ENOSYS; 90 91 return reset_handler->prepare_hwcontext(adev->reset_cntl, 92 reset_context); 93 } 94 95 int amdgpu_reset_perform_reset(struct amdgpu_device *adev, 96 struct amdgpu_reset_context *reset_context) 97 { 98 int ret; 99 struct amdgpu_reset_handler *reset_handler = NULL; 100 101 if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2)) 102 return -ENOSYS; 103 104 if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags)) 105 return -ENOSYS; 106 107 if (adev->reset_cntl) 108 reset_handler = adev->reset_cntl->get_reset_handler( 109 adev->reset_cntl, reset_context); 110 if (!reset_handler) 111 return -ENOSYS; 112 113 ret = reset_handler->perform_reset(adev->reset_cntl, reset_context); 114 if (ret) 115 return ret; 116 117 return reset_handler->restore_hwcontext(adev->reset_cntl, 118 reset_context); 119 } 120 121 122 void amdgpu_reset_destroy_reset_domain(struct kref *ref) 123 { 124 struct amdgpu_reset_domain *reset_domain = container_of(ref, 125 struct amdgpu_reset_domain, 126 refcount); 127 if (reset_domain->wq) 128 destroy_workqueue(reset_domain->wq); 129 130 kvfree(reset_domain); 131 } 132 133 struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type, 134 char *wq_name) 135 { 136 struct amdgpu_reset_domain *reset_domain; 137 138 reset_domain = kvzalloc(sizeof(struct amdgpu_reset_domain), GFP_KERNEL); 139 if (!reset_domain) { 140 DRM_ERROR("Failed to allocate amdgpu_reset_domain!"); 141 return NULL; 142 } 143 144 reset_domain->type = type; 145 kref_init(&reset_domain->refcount); 146 147 reset_domain->wq = create_singlethread_workqueue(wq_name); 148 if (!reset_domain->wq) { 149 DRM_ERROR("Failed to allocate wq for amdgpu_reset_domain!"); 150 amdgpu_reset_put_reset_domain(reset_domain); 151 return NULL; 152 153 } 154 155 atomic_set(&reset_domain->in_gpu_reset, 0); 156 atomic_set(&reset_domain->reset_res, 0); 157 init_rwsem(&reset_domain->sem); 158 159 return reset_domain; 160 } 161 162 void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain) 163 { 164 atomic_set(&reset_domain->in_gpu_reset, 1); 165 down_write(&reset_domain->sem); 166 } 167 168 169 void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain) 170 { 171 atomic_set(&reset_domain->in_gpu_reset, 0); 172 up_write(&reset_domain->sem); 173 } 174 175 176 177