1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/bitops.h>
6 #include <linux/delay.h>
7 #include "adf_accel_devices.h"
8 #include "adf_cfg.h"
9 #include "adf_common_drv.h"
10 
11 static LIST_HEAD(service_table);
12 static DEFINE_MUTEX(service_lock);
13 
14 static void adf_service_add(struct service_hndl *service)
15 {
16 	mutex_lock(&service_lock);
17 	list_add(&service->list, &service_table);
18 	mutex_unlock(&service_lock);
19 }
20 
21 int adf_service_register(struct service_hndl *service)
22 {
23 	memset(service->init_status, 0, sizeof(service->init_status));
24 	memset(service->start_status, 0, sizeof(service->start_status));
25 	adf_service_add(service);
26 	return 0;
27 }
28 
29 static void adf_service_remove(struct service_hndl *service)
30 {
31 	mutex_lock(&service_lock);
32 	list_del(&service->list);
33 	mutex_unlock(&service_lock);
34 }
35 
36 int adf_service_unregister(struct service_hndl *service)
37 {
38 	int i;
39 
40 	for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
41 		if (service->init_status[i] || service->start_status[i]) {
42 			pr_err("QAT: Could not remove active service\n");
43 			return -EFAULT;
44 		}
45 	}
46 	adf_service_remove(service);
47 	return 0;
48 }
49 
50 /**
51  * adf_dev_init() - Init data structures and services for the given accel device
52  * @accel_dev: Pointer to acceleration device.
53  *
54  * Initialize the ring data structures and the admin comms and arbitration
55  * services.
56  *
57  * Return: 0 on success, error code otherwise.
58  */
59 static int adf_dev_init(struct adf_accel_dev *accel_dev)
60 {
61 	struct service_hndl *service;
62 	struct list_head *list_itr;
63 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
64 	int ret;
65 
66 	if (!hw_data) {
67 		dev_err(&GET_DEV(accel_dev),
68 			"Failed to init device - hw_data not set\n");
69 		return -EFAULT;
70 	}
71 
72 	if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
73 	    !accel_dev->is_vf) {
74 		dev_err(&GET_DEV(accel_dev), "Device not configured\n");
75 		return -EFAULT;
76 	}
77 
78 	if (adf_init_etr_data(accel_dev)) {
79 		dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
80 		return -EFAULT;
81 	}
82 
83 	if (hw_data->init_device && hw_data->init_device(accel_dev)) {
84 		dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
85 		return -EFAULT;
86 	}
87 
88 	if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
89 		dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
90 		return -EFAULT;
91 	}
92 
93 	if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
94 		dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
95 		return -EFAULT;
96 	}
97 
98 	if (adf_ae_init(accel_dev)) {
99 		dev_err(&GET_DEV(accel_dev),
100 			"Failed to initialise Acceleration Engine\n");
101 		return -EFAULT;
102 	}
103 	set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
104 
105 	if (adf_ae_fw_load(accel_dev)) {
106 		dev_err(&GET_DEV(accel_dev),
107 			"Failed to load acceleration FW\n");
108 		return -EFAULT;
109 	}
110 	set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
111 
112 	if (hw_data->alloc_irq(accel_dev)) {
113 		dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
114 		return -EFAULT;
115 	}
116 	set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
117 
118 	hw_data->enable_ints(accel_dev);
119 	hw_data->enable_error_correction(accel_dev);
120 
121 	ret = hw_data->pfvf_ops.enable_comms(accel_dev);
122 	if (ret)
123 		return ret;
124 
125 	if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
126 	    accel_dev->is_vf) {
127 		if (qat_crypto_vf_dev_config(accel_dev))
128 			return -EFAULT;
129 	}
130 
131 	/*
132 	 * Subservice initialisation is divided into two stages: init and start.
133 	 * This is to facilitate any ordering dependencies between services
134 	 * prior to starting any of the accelerators.
135 	 */
136 	list_for_each(list_itr, &service_table) {
137 		service = list_entry(list_itr, struct service_hndl, list);
138 		if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
139 			dev_err(&GET_DEV(accel_dev),
140 				"Failed to initialise service %s\n",
141 				service->name);
142 			return -EFAULT;
143 		}
144 		set_bit(accel_dev->accel_id, service->init_status);
145 	}
146 
147 	return 0;
148 }
149 
150 /**
151  * adf_dev_start() - Start acceleration service for the given accel device
152  * @accel_dev:    Pointer to acceleration device.
153  *
154  * Function notifies all the registered services that the acceleration device
155  * is ready to be used.
156  * To be used by QAT device specific drivers.
157  *
158  * Return: 0 on success, error code otherwise.
159  */
160 static int adf_dev_start(struct adf_accel_dev *accel_dev)
161 {
162 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
163 	struct service_hndl *service;
164 	struct list_head *list_itr;
165 
166 	set_bit(ADF_STATUS_STARTING, &accel_dev->status);
167 
168 	if (adf_ae_start(accel_dev)) {
169 		dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
170 		return -EFAULT;
171 	}
172 	set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
173 
174 	if (hw_data->send_admin_init(accel_dev)) {
175 		dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
176 		return -EFAULT;
177 	}
178 
179 	/* Set ssm watch dog timer */
180 	if (hw_data->set_ssm_wdtimer)
181 		hw_data->set_ssm_wdtimer(accel_dev);
182 
183 	/* Enable Power Management */
184 	if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
185 		dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
186 		return -EFAULT;
187 	}
188 
189 	list_for_each(list_itr, &service_table) {
190 		service = list_entry(list_itr, struct service_hndl, list);
191 		if (service->event_hld(accel_dev, ADF_EVENT_START)) {
192 			dev_err(&GET_DEV(accel_dev),
193 				"Failed to start service %s\n",
194 				service->name);
195 			return -EFAULT;
196 		}
197 		set_bit(accel_dev->accel_id, service->start_status);
198 	}
199 
200 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
201 	set_bit(ADF_STATUS_STARTED, &accel_dev->status);
202 
203 	if (!list_empty(&accel_dev->crypto_list) &&
204 	    (qat_algs_register() || qat_asym_algs_register())) {
205 		dev_err(&GET_DEV(accel_dev),
206 			"Failed to register crypto algs\n");
207 		set_bit(ADF_STATUS_STARTING, &accel_dev->status);
208 		clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
209 		return -EFAULT;
210 	}
211 
212 	if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
213 		dev_err(&GET_DEV(accel_dev),
214 			"Failed to register compression algs\n");
215 		set_bit(ADF_STATUS_STARTING, &accel_dev->status);
216 		clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
217 		return -EFAULT;
218 	}
219 	return 0;
220 }
221 
222 /**
223  * adf_dev_stop() - Stop acceleration service for the given accel device
224  * @accel_dev:    Pointer to acceleration device.
225  *
226  * Function notifies all the registered services that the acceleration device
227  * is shuting down.
228  * To be used by QAT device specific drivers.
229  *
230  * Return: void
231  */
232 static void adf_dev_stop(struct adf_accel_dev *accel_dev)
233 {
234 	struct service_hndl *service;
235 	struct list_head *list_itr;
236 	bool wait = false;
237 	int ret;
238 
239 	if (!adf_dev_started(accel_dev) &&
240 	    !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
241 		return;
242 
243 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
244 	clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
245 
246 	if (!list_empty(&accel_dev->crypto_list)) {
247 		qat_algs_unregister();
248 		qat_asym_algs_unregister();
249 	}
250 
251 	if (!list_empty(&accel_dev->compression_list))
252 		qat_comp_algs_unregister();
253 
254 	list_for_each(list_itr, &service_table) {
255 		service = list_entry(list_itr, struct service_hndl, list);
256 		if (!test_bit(accel_dev->accel_id, service->start_status))
257 			continue;
258 		ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
259 		if (!ret) {
260 			clear_bit(accel_dev->accel_id, service->start_status);
261 		} else if (ret == -EAGAIN) {
262 			wait = true;
263 			clear_bit(accel_dev->accel_id, service->start_status);
264 		}
265 	}
266 
267 	if (wait)
268 		msleep(100);
269 
270 	if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
271 		if (adf_ae_stop(accel_dev))
272 			dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
273 		else
274 			clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
275 	}
276 }
277 
278 /**
279  * adf_dev_shutdown() - shutdown acceleration services and data strucutures
280  * @accel_dev: Pointer to acceleration device
281  *
282  * Cleanup the ring data structures and the admin comms and arbitration
283  * services.
284  */
285 static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
286 {
287 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
288 	struct service_hndl *service;
289 	struct list_head *list_itr;
290 
291 	if (!hw_data) {
292 		dev_err(&GET_DEV(accel_dev),
293 			"QAT: Failed to shutdown device - hw_data not set\n");
294 		return;
295 	}
296 
297 	if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
298 		adf_ae_fw_release(accel_dev);
299 		clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
300 	}
301 
302 	if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
303 		if (adf_ae_shutdown(accel_dev))
304 			dev_err(&GET_DEV(accel_dev),
305 				"Failed to shutdown Accel Engine\n");
306 		else
307 			clear_bit(ADF_STATUS_AE_INITIALISED,
308 				  &accel_dev->status);
309 	}
310 
311 	list_for_each(list_itr, &service_table) {
312 		service = list_entry(list_itr, struct service_hndl, list);
313 		if (!test_bit(accel_dev->accel_id, service->init_status))
314 			continue;
315 		if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
316 			dev_err(&GET_DEV(accel_dev),
317 				"Failed to shutdown service %s\n",
318 				service->name);
319 		else
320 			clear_bit(accel_dev->accel_id, service->init_status);
321 	}
322 
323 	hw_data->disable_iov(accel_dev);
324 
325 	if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
326 		hw_data->free_irq(accel_dev);
327 		clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
328 	}
329 
330 	/* Delete configuration only if not restarting */
331 	if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
332 		adf_cfg_del_all(accel_dev);
333 
334 	if (hw_data->exit_arb)
335 		hw_data->exit_arb(accel_dev);
336 
337 	if (hw_data->exit_admin_comms)
338 		hw_data->exit_admin_comms(accel_dev);
339 
340 	adf_cleanup_etr_data(accel_dev);
341 	adf_dev_restore(accel_dev);
342 }
343 
344 int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
345 {
346 	struct service_hndl *service;
347 	struct list_head *list_itr;
348 
349 	list_for_each(list_itr, &service_table) {
350 		service = list_entry(list_itr, struct service_hndl, list);
351 		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
352 			dev_err(&GET_DEV(accel_dev),
353 				"Failed to restart service %s.\n",
354 				service->name);
355 	}
356 	return 0;
357 }
358 
359 int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
360 {
361 	struct service_hndl *service;
362 	struct list_head *list_itr;
363 
364 	list_for_each(list_itr, &service_table) {
365 		service = list_entry(list_itr, struct service_hndl, list);
366 		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
367 			dev_err(&GET_DEV(accel_dev),
368 				"Failed to restart service %s.\n",
369 				service->name);
370 	}
371 	return 0;
372 }
373 
374 static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
375 {
376 	char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
377 	int ret;
378 
379 	ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
380 				      ADF_SERVICES_ENABLED, services);
381 
382 	adf_dev_stop(accel_dev);
383 	adf_dev_shutdown(accel_dev);
384 
385 	if (!ret) {
386 		ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
387 		if (ret)
388 			return ret;
389 
390 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
391 						  ADF_SERVICES_ENABLED,
392 						  services, ADF_STR);
393 		if (ret)
394 			return ret;
395 	}
396 
397 	return 0;
398 }
399 
400 int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
401 {
402 	int ret = 0;
403 
404 	if (!accel_dev)
405 		return -EINVAL;
406 
407 	mutex_lock(&accel_dev->state_lock);
408 
409 	if (!adf_dev_started(accel_dev)) {
410 		dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
411 			 accel_dev->accel_id);
412 		ret = -EINVAL;
413 		goto out;
414 	}
415 
416 	if (reconfig) {
417 		ret = adf_dev_shutdown_cache_cfg(accel_dev);
418 		goto out;
419 	}
420 
421 	adf_dev_stop(accel_dev);
422 	adf_dev_shutdown(accel_dev);
423 
424 out:
425 	mutex_unlock(&accel_dev->state_lock);
426 	return ret;
427 }
428 EXPORT_SYMBOL_GPL(adf_dev_down);
429 
430 int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
431 {
432 	int ret = 0;
433 
434 	if (!accel_dev)
435 		return -EINVAL;
436 
437 	mutex_lock(&accel_dev->state_lock);
438 
439 	if (adf_dev_started(accel_dev)) {
440 		dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
441 			 accel_dev->accel_id);
442 		ret = -EALREADY;
443 		goto out;
444 	}
445 
446 	if (config && GET_HW_DATA(accel_dev)->dev_config) {
447 		ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
448 		if (unlikely(ret))
449 			goto out;
450 	}
451 
452 	ret = adf_dev_init(accel_dev);
453 	if (unlikely(ret))
454 		goto out;
455 
456 	ret = adf_dev_start(accel_dev);
457 
458 out:
459 	mutex_unlock(&accel_dev->state_lock);
460 	return ret;
461 }
462 EXPORT_SYMBOL_GPL(adf_dev_up);
463 
464 int adf_dev_restart(struct adf_accel_dev *accel_dev)
465 {
466 	int ret = 0;
467 
468 	if (!accel_dev)
469 		return -EFAULT;
470 
471 	adf_dev_down(accel_dev, false);
472 
473 	ret = adf_dev_up(accel_dev, false);
474 	/* if device is already up return success*/
475 	if (ret == -EALREADY)
476 		return 0;
477 
478 	return ret;
479 }
480 EXPORT_SYMBOL_GPL(adf_dev_restart);
481