1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 Intel Corporation */
3 #include <linux/device.h>
4 #include <linux/module.h>
5 #include <linux/pci.h>
6 
7 #include <adf_accel_devices.h>
8 #include <adf_cfg.h>
9 #include <adf_common_drv.h>
10 #include <adf_dbgfs.h>
11 
12 #include "adf_4xxx_hw_data.h"
13 #include "qat_compression.h"
14 #include "qat_crypto.h"
15 #include "adf_transport_access_macros.h"
16 
17 static const struct pci_device_id adf_pci_tbl[] = {
18 	{ PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
19 	{ PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
20 	{ PCI_VDEVICE(INTEL, ADF_402XX_PCI_DEVICE_ID), },
21 	{ }
22 };
23 MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
24 
25 enum configs {
26 	DEV_CFG_CY = 0,
27 	DEV_CFG_DC,
28 	DEV_CFG_SYM,
29 	DEV_CFG_ASYM,
30 	DEV_CFG_ASYM_SYM,
31 	DEV_CFG_ASYM_DC,
32 	DEV_CFG_DC_ASYM,
33 	DEV_CFG_SYM_DC,
34 	DEV_CFG_DC_SYM,
35 };
36 
37 static const char * const services_operations[] = {
38 	ADF_CFG_CY,
39 	ADF_CFG_DC,
40 	ADF_CFG_SYM,
41 	ADF_CFG_ASYM,
42 	ADF_CFG_ASYM_SYM,
43 	ADF_CFG_ASYM_DC,
44 	ADF_CFG_DC_ASYM,
45 	ADF_CFG_SYM_DC,
46 	ADF_CFG_DC_SYM,
47 };
48 
49 static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
50 {
51 	if (accel_dev->hw_device) {
52 		adf_clean_hw_data_4xxx(accel_dev->hw_device);
53 		accel_dev->hw_device = NULL;
54 	}
55 	adf_dbgfs_exit(accel_dev);
56 	adf_cfg_dev_remove(accel_dev);
57 	adf_devmgr_rm_dev(accel_dev, NULL);
58 }
59 
60 static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
61 {
62 	const char *config;
63 	int ret;
64 
65 	config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
66 
67 	ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
68 	if (ret)
69 		return ret;
70 
71 	/* Default configuration is crypto only for even devices
72 	 * and compression for odd devices
73 	 */
74 	ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
75 					  ADF_SERVICES_ENABLED, config,
76 					  ADF_STR);
77 	if (ret)
78 		return ret;
79 
80 	return 0;
81 }
82 
83 static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
84 {
85 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
86 	int banks = GET_MAX_BANKS(accel_dev);
87 	int cpus = num_online_cpus();
88 	unsigned long bank, val;
89 	int instances;
90 	int ret;
91 	int i;
92 
93 	if (adf_hw_dev_has_crypto(accel_dev))
94 		instances = min(cpus, banks / 2);
95 	else
96 		instances = 0;
97 
98 	for (i = 0; i < instances; i++) {
99 		val = i;
100 		bank = i * 2;
101 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
102 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
103 						  key, &bank, ADF_DEC);
104 		if (ret)
105 			goto err;
106 
107 		bank += 1;
108 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
109 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
110 						  key, &bank, ADF_DEC);
111 		if (ret)
112 			goto err;
113 
114 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
115 			 i);
116 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
117 						  key, &val, ADF_DEC);
118 		if (ret)
119 			goto err;
120 
121 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
122 		val = 128;
123 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
124 						  key, &val, ADF_DEC);
125 		if (ret)
126 			goto err;
127 
128 		val = 512;
129 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
130 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
131 						  key, &val, ADF_DEC);
132 		if (ret)
133 			goto err;
134 
135 		val = 0;
136 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
137 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
138 						  key, &val, ADF_DEC);
139 		if (ret)
140 			goto err;
141 
142 		val = 0;
143 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
144 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
145 						  key, &val, ADF_DEC);
146 		if (ret)
147 			goto err;
148 
149 		val = 1;
150 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
151 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
152 						  key, &val, ADF_DEC);
153 		if (ret)
154 			goto err;
155 
156 		val = 1;
157 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
158 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
159 						  key, &val, ADF_DEC);
160 		if (ret)
161 			goto err;
162 
163 		val = ADF_COALESCING_DEF_TIME;
164 		snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
165 		ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
166 						  key, &val, ADF_DEC);
167 		if (ret)
168 			goto err;
169 	}
170 
171 	val = i;
172 	ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
173 					  &val, ADF_DEC);
174 	if (ret)
175 		goto err;
176 
177 	val = 0;
178 	ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
179 					  &val, ADF_DEC);
180 	if (ret)
181 		goto err;
182 
183 	return 0;
184 err:
185 	dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
186 	return ret;
187 }
188 
189 static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
190 {
191 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
192 	int banks = GET_MAX_BANKS(accel_dev);
193 	int cpus = num_online_cpus();
194 	unsigned long val;
195 	int instances;
196 	int ret;
197 	int i;
198 
199 	if (adf_hw_dev_has_compression(accel_dev))
200 		instances = min(cpus, banks);
201 	else
202 		instances = 0;
203 
204 	for (i = 0; i < instances; i++) {
205 		val = i;
206 		snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
207 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
208 						  key, &val, ADF_DEC);
209 		if (ret)
210 			goto err;
211 
212 		val = 512;
213 		snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
214 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
215 						  key, &val, ADF_DEC);
216 		if (ret)
217 			goto err;
218 
219 		val = 0;
220 		snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
221 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
222 						  key, &val, ADF_DEC);
223 		if (ret)
224 			goto err;
225 
226 		val = 1;
227 		snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
228 		ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
229 						  key, &val, ADF_DEC);
230 		if (ret)
231 			goto err;
232 
233 		val = ADF_COALESCING_DEF_TIME;
234 		snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
235 		ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
236 						  key, &val, ADF_DEC);
237 		if (ret)
238 			goto err;
239 	}
240 
241 	val = i;
242 	ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
243 					  &val, ADF_DEC);
244 	if (ret)
245 		goto err;
246 
247 	val = 0;
248 	ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
249 					  &val, ADF_DEC);
250 	if (ret)
251 		goto err;
252 
253 	return 0;
254 err:
255 	dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
256 	return ret;
257 }
258 
259 static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
260 {
261 	unsigned long val;
262 	int ret;
263 
264 	val = 0;
265 	ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
266 					  &val, ADF_DEC);
267 	if (ret)
268 		return ret;
269 
270 	return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
271 					  &val, ADF_DEC);
272 }
273 
274 int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
275 {
276 	char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
277 	int ret;
278 
279 	ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
280 	if (ret)
281 		goto err;
282 
283 	ret = adf_cfg_section_add(accel_dev, "Accelerator0");
284 	if (ret)
285 		goto err;
286 
287 	ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
288 				      ADF_SERVICES_ENABLED, services);
289 	if (ret)
290 		goto err;
291 
292 	ret = sysfs_match_string(services_operations, services);
293 	if (ret < 0)
294 		goto err;
295 
296 	switch (ret) {
297 	case DEV_CFG_CY:
298 	case DEV_CFG_ASYM_SYM:
299 		ret = adf_crypto_dev_config(accel_dev);
300 		break;
301 	case DEV_CFG_DC:
302 		ret = adf_comp_dev_config(accel_dev);
303 		break;
304 	default:
305 		ret = adf_no_dev_config(accel_dev);
306 		break;
307 	}
308 
309 	if (ret)
310 		goto err;
311 
312 	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
313 
314 	return ret;
315 
316 err:
317 	dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
318 	return ret;
319 }
320 
321 static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
322 {
323 	struct adf_accel_dev *accel_dev;
324 	struct adf_accel_pci *accel_pci_dev;
325 	struct adf_hw_device_data *hw_data;
326 	unsigned int i, bar_nr;
327 	unsigned long bar_mask;
328 	struct adf_bar *bar;
329 	int ret;
330 
331 	if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
332 		/*
333 		 * If the accelerator is connected to a node with no memory
334 		 * there is no point in using the accelerator since the remote
335 		 * memory transaction will be very slow.
336 		 */
337 		dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
338 		return -EINVAL;
339 	}
340 
341 	accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL);
342 	if (!accel_dev)
343 		return -ENOMEM;
344 
345 	INIT_LIST_HEAD(&accel_dev->crypto_list);
346 	accel_pci_dev = &accel_dev->accel_pci_dev;
347 	accel_pci_dev->pci_dev = pdev;
348 
349 	/*
350 	 * Add accel device to accel table
351 	 * This should be called before adf_cleanup_accel is called
352 	 */
353 	if (adf_devmgr_add_dev(accel_dev, NULL)) {
354 		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
355 		return -EFAULT;
356 	}
357 
358 	accel_dev->owner = THIS_MODULE;
359 	/* Allocate and initialise device hardware meta-data structure */
360 	hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL);
361 	if (!hw_data) {
362 		ret = -ENOMEM;
363 		goto out_err;
364 	}
365 
366 	accel_dev->hw_device = hw_data;
367 	adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device);
368 
369 	pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
370 	pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses);
371 
372 	/* Get Accelerators and Accelerators Engines masks */
373 	hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
374 	hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
375 	accel_pci_dev->sku = hw_data->get_sku(hw_data);
376 	/* If the device has no acceleration engines then ignore it */
377 	if (!hw_data->accel_mask || !hw_data->ae_mask ||
378 	    (~hw_data->ae_mask & 0x01)) {
379 		dev_err(&pdev->dev, "No acceleration units found.\n");
380 		ret = -EFAULT;
381 		goto out_err;
382 	}
383 
384 	/* Create device configuration table */
385 	ret = adf_cfg_dev_add(accel_dev);
386 	if (ret)
387 		goto out_err;
388 
389 	/* Enable PCI device */
390 	ret = pcim_enable_device(pdev);
391 	if (ret) {
392 		dev_err(&pdev->dev, "Can't enable PCI device.\n");
393 		goto out_err;
394 	}
395 
396 	/* Set DMA identifier */
397 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
398 	if (ret) {
399 		dev_err(&pdev->dev, "No usable DMA configuration.\n");
400 		goto out_err;
401 	}
402 
403 	ret = adf_cfg_dev_init(accel_dev);
404 	if (ret) {
405 		dev_err(&pdev->dev, "Failed to initialize configuration.\n");
406 		goto out_err;
407 	}
408 
409 	/* Get accelerator capabilities mask */
410 	hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
411 	if (!hw_data->accel_capabilities_mask) {
412 		dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
413 		ret = -EINVAL;
414 		goto out_err;
415 	}
416 
417 	/* Find and map all the device's BARS */
418 	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK;
419 
420 	ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
421 	if (ret) {
422 		dev_err(&pdev->dev, "Failed to map pci regions.\n");
423 		goto out_err;
424 	}
425 
426 	i = 0;
427 	for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) {
428 		bar = &accel_pci_dev->pci_bars[i++];
429 		bar->virt_addr = pcim_iomap_table(pdev)[bar_nr];
430 	}
431 
432 	pci_set_master(pdev);
433 
434 	if (pci_save_state(pdev)) {
435 		dev_err(&pdev->dev, "Failed to save pci state.\n");
436 		ret = -ENOMEM;
437 		goto out_err;
438 	}
439 
440 	adf_dbgfs_init(accel_dev);
441 
442 	ret = adf_dev_up(accel_dev, true);
443 	if (ret)
444 		goto out_err_dev_stop;
445 
446 	ret = adf_sysfs_init(accel_dev);
447 	if (ret)
448 		goto out_err_dev_stop;
449 
450 	return ret;
451 
452 out_err_dev_stop:
453 	adf_dev_down(accel_dev, false);
454 out_err:
455 	adf_cleanup_accel(accel_dev);
456 	return ret;
457 }
458 
459 static void adf_remove(struct pci_dev *pdev)
460 {
461 	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
462 
463 	if (!accel_dev) {
464 		pr_err("QAT: Driver removal failed\n");
465 		return;
466 	}
467 	adf_dev_down(accel_dev, false);
468 	adf_cleanup_accel(accel_dev);
469 }
470 
471 static struct pci_driver adf_driver = {
472 	.id_table = adf_pci_tbl,
473 	.name = ADF_4XXX_DEVICE_NAME,
474 	.probe = adf_probe,
475 	.remove = adf_remove,
476 	.sriov_configure = adf_sriov_configure,
477 	.err_handler = &adf_err_handler,
478 };
479 
480 module_pci_driver(adf_driver);
481 
482 MODULE_LICENSE("Dual BSD/GPL");
483 MODULE_AUTHOR("Intel");
484 MODULE_FIRMWARE(ADF_4XXX_FW);
485 MODULE_FIRMWARE(ADF_4XXX_MMP);
486 MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
487 MODULE_VERSION(ADF_DRV_VERSION);
488 MODULE_SOFTDEP("pre: crypto-intel_qat");
489