1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/bitops.h>
6 #include <linux/delay.h>
7 #include "adf_accel_devices.h"
8 #include "adf_cfg.h"
9 #include "adf_common_drv.h"
10 #include "adf_dbgfs.h"
11 #include "adf_heartbeat.h"
12 
13 static LIST_HEAD(service_table);
14 static DEFINE_MUTEX(service_lock);
15 
16 static void adf_service_add(struct service_hndl *service)
17 {
18 	mutex_lock(&service_lock);
19 	list_add(&service->list, &service_table);
20 	mutex_unlock(&service_lock);
21 }
22 
23 int adf_service_register(struct service_hndl *service)
24 {
25 	memset(service->init_status, 0, sizeof(service->init_status));
26 	memset(service->start_status, 0, sizeof(service->start_status));
27 	adf_service_add(service);
28 	return 0;
29 }
30 
31 static void adf_service_remove(struct service_hndl *service)
32 {
33 	mutex_lock(&service_lock);
34 	list_del(&service->list);
35 	mutex_unlock(&service_lock);
36 }
37 
38 int adf_service_unregister(struct service_hndl *service)
39 {
40 	int i;
41 
42 	for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
43 		if (service->init_status[i] || service->start_status[i]) {
44 			pr_err("QAT: Could not remove active service\n");
45 			return -EFAULT;
46 		}
47 	}
48 	adf_service_remove(service);
49 	return 0;
50 }
51 
52 /**
53  * adf_dev_init() - Init data structures and services for the given accel device
54  * @accel_dev: Pointer to acceleration device.
55  *
56  * Initialize the ring data structures and the admin comms and arbitration
57  * services.
58  *
59  * Return: 0 on success, error code otherwise.
60  */
61 static int adf_dev_init(struct adf_accel_dev *accel_dev)
62 {
63 	struct service_hndl *service;
64 	struct list_head *list_itr;
65 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
66 	int ret;
67 
68 	if (!hw_data) {
69 		dev_err(&GET_DEV(accel_dev),
70 			"Failed to init device - hw_data not set\n");
71 		return -EFAULT;
72 	}
73 
74 	if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
75 	    !accel_dev->is_vf) {
76 		dev_err(&GET_DEV(accel_dev), "Device not configured\n");
77 		return -EFAULT;
78 	}
79 
80 	if (adf_init_etr_data(accel_dev)) {
81 		dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
82 		return -EFAULT;
83 	}
84 
85 	if (hw_data->init_device && hw_data->init_device(accel_dev)) {
86 		dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
87 		return -EFAULT;
88 	}
89 
90 	if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
91 		dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
92 		return -EFAULT;
93 	}
94 
95 	if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
96 		dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
97 		return -EFAULT;
98 	}
99 
100 	if (adf_ae_init(accel_dev)) {
101 		dev_err(&GET_DEV(accel_dev),
102 			"Failed to initialise Acceleration Engine\n");
103 		return -EFAULT;
104 	}
105 	set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
106 
107 	if (adf_ae_fw_load(accel_dev)) {
108 		dev_err(&GET_DEV(accel_dev),
109 			"Failed to load acceleration FW\n");
110 		return -EFAULT;
111 	}
112 	set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
113 
114 	if (hw_data->alloc_irq(accel_dev)) {
115 		dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
116 		return -EFAULT;
117 	}
118 	set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
119 
120 	hw_data->enable_ints(accel_dev);
121 	hw_data->enable_error_correction(accel_dev);
122 
123 	ret = hw_data->pfvf_ops.enable_comms(accel_dev);
124 	if (ret)
125 		return ret;
126 
127 	if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
128 	    accel_dev->is_vf) {
129 		if (qat_crypto_vf_dev_config(accel_dev))
130 			return -EFAULT;
131 	}
132 
133 	adf_heartbeat_init(accel_dev);
134 
135 	/*
136 	 * Subservice initialisation is divided into two stages: init and start.
137 	 * This is to facilitate any ordering dependencies between services
138 	 * prior to starting any of the accelerators.
139 	 */
140 	list_for_each(list_itr, &service_table) {
141 		service = list_entry(list_itr, struct service_hndl, list);
142 		if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
143 			dev_err(&GET_DEV(accel_dev),
144 				"Failed to initialise service %s\n",
145 				service->name);
146 			return -EFAULT;
147 		}
148 		set_bit(accel_dev->accel_id, service->init_status);
149 	}
150 
151 	return 0;
152 }
153 
154 /**
155  * adf_dev_start() - Start acceleration service for the given accel device
156  * @accel_dev:    Pointer to acceleration device.
157  *
158  * Function notifies all the registered services that the acceleration device
159  * is ready to be used.
160  * To be used by QAT device specific drivers.
161  *
162  * Return: 0 on success, error code otherwise.
163  */
164 static int adf_dev_start(struct adf_accel_dev *accel_dev)
165 {
166 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
167 	struct service_hndl *service;
168 	struct list_head *list_itr;
169 	int ret;
170 
171 	set_bit(ADF_STATUS_STARTING, &accel_dev->status);
172 
173 	if (adf_ae_start(accel_dev)) {
174 		dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
175 		return -EFAULT;
176 	}
177 	set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
178 
179 	if (hw_data->send_admin_init(accel_dev)) {
180 		dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
181 		return -EFAULT;
182 	}
183 
184 	if (hw_data->measure_clock) {
185 		ret = hw_data->measure_clock(accel_dev);
186 		if (ret) {
187 			dev_err(&GET_DEV(accel_dev), "Failed measure device clock\n");
188 			return ret;
189 		}
190 	}
191 
192 	/* Set ssm watch dog timer */
193 	if (hw_data->set_ssm_wdtimer)
194 		hw_data->set_ssm_wdtimer(accel_dev);
195 
196 	/* Enable Power Management */
197 	if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
198 		dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
199 		return -EFAULT;
200 	}
201 
202 	if (hw_data->start_timer) {
203 		ret = hw_data->start_timer(accel_dev);
204 		if (ret) {
205 			dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n");
206 			return ret;
207 		}
208 	}
209 
210 	adf_heartbeat_start(accel_dev);
211 
212 	list_for_each(list_itr, &service_table) {
213 		service = list_entry(list_itr, struct service_hndl, list);
214 		if (service->event_hld(accel_dev, ADF_EVENT_START)) {
215 			dev_err(&GET_DEV(accel_dev),
216 				"Failed to start service %s\n",
217 				service->name);
218 			return -EFAULT;
219 		}
220 		set_bit(accel_dev->accel_id, service->start_status);
221 	}
222 
223 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
224 	set_bit(ADF_STATUS_STARTED, &accel_dev->status);
225 
226 	if (!list_empty(&accel_dev->crypto_list) &&
227 	    (qat_algs_register() || qat_asym_algs_register())) {
228 		dev_err(&GET_DEV(accel_dev),
229 			"Failed to register crypto algs\n");
230 		set_bit(ADF_STATUS_STARTING, &accel_dev->status);
231 		clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
232 		return -EFAULT;
233 	}
234 
235 	if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
236 		dev_err(&GET_DEV(accel_dev),
237 			"Failed to register compression algs\n");
238 		set_bit(ADF_STATUS_STARTING, &accel_dev->status);
239 		clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
240 		return -EFAULT;
241 	}
242 
243 	adf_dbgfs_add(accel_dev);
244 
245 	return 0;
246 }
247 
248 /**
249  * adf_dev_stop() - Stop acceleration service for the given accel device
250  * @accel_dev:    Pointer to acceleration device.
251  *
252  * Function notifies all the registered services that the acceleration device
253  * is shuting down.
254  * To be used by QAT device specific drivers.
255  *
256  * Return: void
257  */
258 static void adf_dev_stop(struct adf_accel_dev *accel_dev)
259 {
260 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
261 	struct service_hndl *service;
262 	struct list_head *list_itr;
263 	bool wait = false;
264 	int ret;
265 
266 	if (!adf_dev_started(accel_dev) &&
267 	    !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
268 		return;
269 
270 	adf_dbgfs_rm(accel_dev);
271 
272 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
273 	clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
274 
275 	if (!list_empty(&accel_dev->crypto_list)) {
276 		qat_algs_unregister();
277 		qat_asym_algs_unregister();
278 	}
279 
280 	if (!list_empty(&accel_dev->compression_list))
281 		qat_comp_algs_unregister();
282 
283 	list_for_each(list_itr, &service_table) {
284 		service = list_entry(list_itr, struct service_hndl, list);
285 		if (!test_bit(accel_dev->accel_id, service->start_status))
286 			continue;
287 		ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
288 		if (!ret) {
289 			clear_bit(accel_dev->accel_id, service->start_status);
290 		} else if (ret == -EAGAIN) {
291 			wait = true;
292 			clear_bit(accel_dev->accel_id, service->start_status);
293 		}
294 	}
295 
296 	if (hw_data->stop_timer)
297 		hw_data->stop_timer(accel_dev);
298 
299 	if (wait)
300 		msleep(100);
301 
302 	if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
303 		if (adf_ae_stop(accel_dev))
304 			dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
305 		else
306 			clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
307 	}
308 }
309 
310 /**
311  * adf_dev_shutdown() - shutdown acceleration services and data strucutures
312  * @accel_dev: Pointer to acceleration device
313  *
314  * Cleanup the ring data structures and the admin comms and arbitration
315  * services.
316  */
317 static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
318 {
319 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
320 	struct service_hndl *service;
321 	struct list_head *list_itr;
322 
323 	if (!hw_data) {
324 		dev_err(&GET_DEV(accel_dev),
325 			"QAT: Failed to shutdown device - hw_data not set\n");
326 		return;
327 	}
328 
329 	if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
330 		adf_ae_fw_release(accel_dev);
331 		clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
332 	}
333 
334 	if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
335 		if (adf_ae_shutdown(accel_dev))
336 			dev_err(&GET_DEV(accel_dev),
337 				"Failed to shutdown Accel Engine\n");
338 		else
339 			clear_bit(ADF_STATUS_AE_INITIALISED,
340 				  &accel_dev->status);
341 	}
342 
343 	list_for_each(list_itr, &service_table) {
344 		service = list_entry(list_itr, struct service_hndl, list);
345 		if (!test_bit(accel_dev->accel_id, service->init_status))
346 			continue;
347 		if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
348 			dev_err(&GET_DEV(accel_dev),
349 				"Failed to shutdown service %s\n",
350 				service->name);
351 		else
352 			clear_bit(accel_dev->accel_id, service->init_status);
353 	}
354 
355 	adf_heartbeat_shutdown(accel_dev);
356 
357 	hw_data->disable_iov(accel_dev);
358 
359 	if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
360 		hw_data->free_irq(accel_dev);
361 		clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
362 	}
363 
364 	/* Delete configuration only if not restarting */
365 	if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
366 		adf_cfg_del_all(accel_dev);
367 
368 	if (hw_data->exit_arb)
369 		hw_data->exit_arb(accel_dev);
370 
371 	if (hw_data->exit_admin_comms)
372 		hw_data->exit_admin_comms(accel_dev);
373 
374 	adf_cleanup_etr_data(accel_dev);
375 	adf_dev_restore(accel_dev);
376 }
377 
378 int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
379 {
380 	struct service_hndl *service;
381 	struct list_head *list_itr;
382 
383 	list_for_each(list_itr, &service_table) {
384 		service = list_entry(list_itr, struct service_hndl, list);
385 		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
386 			dev_err(&GET_DEV(accel_dev),
387 				"Failed to restart service %s.\n",
388 				service->name);
389 	}
390 	return 0;
391 }
392 
393 int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
394 {
395 	struct service_hndl *service;
396 	struct list_head *list_itr;
397 
398 	list_for_each(list_itr, &service_table) {
399 		service = list_entry(list_itr, struct service_hndl, list);
400 		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
401 			dev_err(&GET_DEV(accel_dev),
402 				"Failed to restart service %s.\n",
403 				service->name);
404 	}
405 	return 0;
406 }
407 
408 static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
409 {
410 	char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
411 	int ret;
412 
413 	ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
414 				      ADF_SERVICES_ENABLED, services);
415 
416 	adf_dev_stop(accel_dev);
417 	adf_dev_shutdown(accel_dev);
418 
419 	if (!ret) {
420 		ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
421 		if (ret)
422 			return ret;
423 
424 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
425 						  ADF_SERVICES_ENABLED,
426 						  services, ADF_STR);
427 		if (ret)
428 			return ret;
429 	}
430 
431 	return 0;
432 }
433 
434 int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
435 {
436 	int ret = 0;
437 
438 	if (!accel_dev)
439 		return -EINVAL;
440 
441 	mutex_lock(&accel_dev->state_lock);
442 
443 	if (!adf_dev_started(accel_dev)) {
444 		dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
445 			 accel_dev->accel_id);
446 		ret = -EINVAL;
447 		goto out;
448 	}
449 
450 	if (reconfig) {
451 		ret = adf_dev_shutdown_cache_cfg(accel_dev);
452 		goto out;
453 	}
454 
455 	adf_dev_stop(accel_dev);
456 	adf_dev_shutdown(accel_dev);
457 
458 out:
459 	mutex_unlock(&accel_dev->state_lock);
460 	return ret;
461 }
462 EXPORT_SYMBOL_GPL(adf_dev_down);
463 
464 int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
465 {
466 	int ret = 0;
467 
468 	if (!accel_dev)
469 		return -EINVAL;
470 
471 	mutex_lock(&accel_dev->state_lock);
472 
473 	if (adf_dev_started(accel_dev)) {
474 		dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
475 			 accel_dev->accel_id);
476 		ret = -EALREADY;
477 		goto out;
478 	}
479 
480 	if (config && GET_HW_DATA(accel_dev)->dev_config) {
481 		ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
482 		if (unlikely(ret))
483 			goto out;
484 	}
485 
486 	ret = adf_dev_init(accel_dev);
487 	if (unlikely(ret))
488 		goto out;
489 
490 	ret = adf_dev_start(accel_dev);
491 
492 out:
493 	mutex_unlock(&accel_dev->state_lock);
494 	return ret;
495 }
496 EXPORT_SYMBOL_GPL(adf_dev_up);
497 
498 int adf_dev_restart(struct adf_accel_dev *accel_dev)
499 {
500 	int ret = 0;
501 
502 	if (!accel_dev)
503 		return -EFAULT;
504 
505 	adf_dev_down(accel_dev, false);
506 
507 	ret = adf_dev_up(accel_dev, false);
508 	/* if device is already up return success*/
509 	if (ret == -EALREADY)
510 		return 0;
511 
512 	return ret;
513 }
514 EXPORT_SYMBOL_GPL(adf_dev_restart);
515