1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu_reset.h"
25 #include "aldebaran.h"
26 #include "sienna_cichlid.h"
27 #include "smu_v13_0_10.h"
28 
29 int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
30 			     struct amdgpu_reset_handler *handler)
31 {
32 	/* TODO: Check if handler exists? */
33 	list_add_tail(&handler->handler_list, &reset_ctl->reset_handlers);
34 	return 0;
35 }
36 
37 int amdgpu_reset_init(struct amdgpu_device *adev)
38 {
39 	int ret = 0;
40 
41 	switch (adev->ip_versions[MP1_HWIP][0]) {
42 	case IP_VERSION(13, 0, 2):
43 		ret = aldebaran_reset_init(adev);
44 		break;
45 	case IP_VERSION(11, 0, 7):
46 		ret = sienna_cichlid_reset_init(adev);
47 		break;
48 	case IP_VERSION(13, 0, 10):
49 		ret = smu_v13_0_10_reset_init(adev);
50 		break;
51 	default:
52 		break;
53 	}
54 
55 	return ret;
56 }
57 
58 int amdgpu_reset_fini(struct amdgpu_device *adev)
59 {
60 	int ret = 0;
61 
62 	switch (adev->ip_versions[MP1_HWIP][0]) {
63 	case IP_VERSION(13, 0, 2):
64 		ret = aldebaran_reset_fini(adev);
65 		break;
66 	case IP_VERSION(11, 0, 7):
67 		ret = sienna_cichlid_reset_fini(adev);
68 		break;
69 	case IP_VERSION(13, 0, 10):
70 		ret = smu_v13_0_10_reset_fini(adev);
71 		break;
72 	default:
73 		break;
74 	}
75 
76 	return ret;
77 }
78 
79 int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
80 				   struct amdgpu_reset_context *reset_context)
81 {
82 	struct amdgpu_reset_handler *reset_handler = NULL;
83 
84 	if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
85 		reset_handler = adev->reset_cntl->get_reset_handler(
86 			adev->reset_cntl, reset_context);
87 	if (!reset_handler)
88 		return -ENOSYS;
89 
90 	return reset_handler->prepare_hwcontext(adev->reset_cntl,
91 						reset_context);
92 }
93 
94 int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
95 			       struct amdgpu_reset_context *reset_context)
96 {
97 	int ret;
98 	struct amdgpu_reset_handler *reset_handler = NULL;
99 
100 	if (adev->reset_cntl)
101 		reset_handler = adev->reset_cntl->get_reset_handler(
102 			adev->reset_cntl, reset_context);
103 	if (!reset_handler)
104 		return -ENOSYS;
105 
106 	ret = reset_handler->perform_reset(adev->reset_cntl, reset_context);
107 	if (ret)
108 		return ret;
109 
110 	return reset_handler->restore_hwcontext(adev->reset_cntl,
111 						reset_context);
112 }
113 
114 
115 void amdgpu_reset_destroy_reset_domain(struct kref *ref)
116 {
117 	struct amdgpu_reset_domain *reset_domain = container_of(ref,
118 								struct amdgpu_reset_domain,
119 								refcount);
120 	if (reset_domain->wq)
121 		destroy_workqueue(reset_domain->wq);
122 
123 	kvfree(reset_domain);
124 }
125 
126 struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type,
127 							     char *wq_name)
128 {
129 	struct amdgpu_reset_domain *reset_domain;
130 
131 	reset_domain = kvzalloc(sizeof(struct amdgpu_reset_domain), GFP_KERNEL);
132 	if (!reset_domain) {
133 		DRM_ERROR("Failed to allocate amdgpu_reset_domain!");
134 		return NULL;
135 	}
136 
137 	reset_domain->type = type;
138 	kref_init(&reset_domain->refcount);
139 
140 	reset_domain->wq = create_singlethread_workqueue(wq_name);
141 	if (!reset_domain->wq) {
142 		DRM_ERROR("Failed to allocate wq for amdgpu_reset_domain!");
143 		amdgpu_reset_put_reset_domain(reset_domain);
144 		return NULL;
145 
146 	}
147 
148 	atomic_set(&reset_domain->in_gpu_reset, 0);
149 	atomic_set(&reset_domain->reset_res, 0);
150 	init_rwsem(&reset_domain->sem);
151 
152 	return reset_domain;
153 }
154 
155 void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain)
156 {
157 	atomic_set(&reset_domain->in_gpu_reset, 1);
158 	down_write(&reset_domain->sem);
159 }
160 
161 
162 void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain)
163 {
164 	atomic_set(&reset_domain->in_gpu_reset, 0);
165 	up_write(&reset_domain->sem);
166 }
167 
168 
169 
170