1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu_reset.h"
25 #include "aldebaran.h"
26 #include "sienna_cichlid.h"
27 
28 int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
29 			     struct amdgpu_reset_handler *handler)
30 {
31 	/* TODO: Check if handler exists? */
32 	list_add_tail(&handler->handler_list, &reset_ctl->reset_handlers);
33 	return 0;
34 }
35 
36 int amdgpu_reset_init(struct amdgpu_device *adev)
37 {
38 	int ret = 0;
39 
40 	switch (adev->ip_versions[MP1_HWIP][0]) {
41 	case IP_VERSION(13, 0, 2):
42 		ret = aldebaran_reset_init(adev);
43 		break;
44 	case IP_VERSION(11, 0, 7):
45 		ret = sienna_cichlid_reset_init(adev);
46 		break;
47 	default:
48 		break;
49 	}
50 
51 	return ret;
52 }
53 
54 int amdgpu_reset_fini(struct amdgpu_device *adev)
55 {
56 	int ret = 0;
57 
58 	switch (adev->ip_versions[MP1_HWIP][0]) {
59 	case IP_VERSION(13, 0, 2):
60 		ret = aldebaran_reset_fini(adev);
61 		break;
62 	case IP_VERSION(11, 0, 7):
63 		ret = sienna_cichlid_reset_fini(adev);
64 		break;
65 	default:
66 		break;
67 	}
68 
69 	return ret;
70 }
71 
72 int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
73 				   struct amdgpu_reset_context *reset_context)
74 {
75 	struct amdgpu_reset_handler *reset_handler = NULL;
76 
77 	if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
78 		reset_handler = adev->reset_cntl->get_reset_handler(
79 			adev->reset_cntl, reset_context);
80 	if (!reset_handler)
81 		return -ENOSYS;
82 
83 	return reset_handler->prepare_hwcontext(adev->reset_cntl,
84 						reset_context);
85 }
86 
87 int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
88 			       struct amdgpu_reset_context *reset_context)
89 {
90 	int ret;
91 	struct amdgpu_reset_handler *reset_handler = NULL;
92 
93 	if (adev->reset_cntl)
94 		reset_handler = adev->reset_cntl->get_reset_handler(
95 			adev->reset_cntl, reset_context);
96 	if (!reset_handler)
97 		return -ENOSYS;
98 
99 	ret = reset_handler->perform_reset(adev->reset_cntl, reset_context);
100 	if (ret)
101 		return ret;
102 
103 	return reset_handler->restore_hwcontext(adev->reset_cntl,
104 						reset_context);
105 }
106 
107 
108 void amdgpu_reset_destroy_reset_domain(struct kref *ref)
109 {
110 	struct amdgpu_reset_domain *reset_domain = container_of(ref,
111 								struct amdgpu_reset_domain,
112 								refcount);
113 	if (reset_domain->wq)
114 		destroy_workqueue(reset_domain->wq);
115 
116 	kvfree(reset_domain);
117 }
118 
119 struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type,
120 							     char *wq_name)
121 {
122 	struct amdgpu_reset_domain *reset_domain;
123 
124 	reset_domain = kvzalloc(sizeof(struct amdgpu_reset_domain), GFP_KERNEL);
125 	if (!reset_domain) {
126 		DRM_ERROR("Failed to allocate amdgpu_reset_domain!");
127 		return NULL;
128 	}
129 
130 	reset_domain->type = type;
131 	kref_init(&reset_domain->refcount);
132 
133 	reset_domain->wq = create_singlethread_workqueue(wq_name);
134 	if (!reset_domain->wq) {
135 		DRM_ERROR("Failed to allocate wq for amdgpu_reset_domain!");
136 		amdgpu_reset_put_reset_domain(reset_domain);
137 		return NULL;
138 
139 	}
140 
141 	atomic_set(&reset_domain->in_gpu_reset, 0);
142 	atomic_set(&reset_domain->reset_res, 0);
143 	init_rwsem(&reset_domain->sem);
144 
145 	return reset_domain;
146 }
147 
148 void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain)
149 {
150 	atomic_set(&reset_domain->in_gpu_reset, 1);
151 	down_write(&reset_domain->sem);
152 }
153 
154 
155 void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain)
156 {
157 	atomic_set(&reset_domain->in_gpu_reset, 0);
158 	up_write(&reset_domain->sem);
159 }
160 
161 
162 
163