1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 Intel Corporation */
3 #include <linux/device.h>
4 #include <linux/module.h>
5 #include <linux/pci.h>
6
7 #include <adf_accel_devices.h>
8 #include <adf_cfg.h>
9 #include <adf_common_drv.h>
10 #include <adf_dbgfs.h>
11 #include <adf_heartbeat.h>
12
13 #include "adf_4xxx_hw_data.h"
14 #include "adf_cfg_services.h"
15 #include "qat_compression.h"
16 #include "qat_crypto.h"
17 #include "adf_transport_access_macros.h"
18
19 static const struct pci_device_id adf_pci_tbl[] = {
20 { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
21 { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
22 { PCI_VDEVICE(INTEL, ADF_402XX_PCI_DEVICE_ID), },
23 { }
24 };
25 MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
26
adf_cleanup_accel(struct adf_accel_dev * accel_dev)27 static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
28 {
29 if (accel_dev->hw_device) {
30 adf_clean_hw_data_4xxx(accel_dev->hw_device);
31 accel_dev->hw_device = NULL;
32 }
33 adf_dbgfs_exit(accel_dev);
34 adf_cfg_dev_remove(accel_dev);
35 adf_devmgr_rm_dev(accel_dev, NULL);
36 }
37
adf_cfg_dev_init(struct adf_accel_dev * accel_dev)38 static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
39 {
40 const char *config;
41 int ret;
42
43 config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
44
45 ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
46 if (ret)
47 return ret;
48
49 /* Default configuration is crypto only for even devices
50 * and compression for odd devices
51 */
52 ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
53 ADF_SERVICES_ENABLED, config,
54 ADF_STR);
55 if (ret)
56 return ret;
57
58 adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS);
59
60 return 0;
61 }
62
adf_crypto_dev_config(struct adf_accel_dev * accel_dev)63 static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
64 {
65 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
66 int banks = GET_MAX_BANKS(accel_dev);
67 int cpus = num_online_cpus();
68 unsigned long bank, val;
69 int instances;
70 int ret;
71 int i;
72
73 if (adf_hw_dev_has_crypto(accel_dev))
74 instances = min(cpus, banks / 2);
75 else
76 instances = 0;
77
78 for (i = 0; i < instances; i++) {
79 val = i;
80 bank = i * 2;
81 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
82 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
83 key, &bank, ADF_DEC);
84 if (ret)
85 goto err;
86
87 bank += 1;
88 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
89 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
90 key, &bank, ADF_DEC);
91 if (ret)
92 goto err;
93
94 snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
95 i);
96 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
97 key, &val, ADF_DEC);
98 if (ret)
99 goto err;
100
101 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
102 val = 128;
103 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
104 key, &val, ADF_DEC);
105 if (ret)
106 goto err;
107
108 val = 512;
109 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
110 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
111 key, &val, ADF_DEC);
112 if (ret)
113 goto err;
114
115 val = 0;
116 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
117 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
118 key, &val, ADF_DEC);
119 if (ret)
120 goto err;
121
122 val = 0;
123 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
124 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
125 key, &val, ADF_DEC);
126 if (ret)
127 goto err;
128
129 val = 1;
130 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
131 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
132 key, &val, ADF_DEC);
133 if (ret)
134 goto err;
135
136 val = 1;
137 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
138 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
139 key, &val, ADF_DEC);
140 if (ret)
141 goto err;
142
143 val = ADF_COALESCING_DEF_TIME;
144 snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
145 ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
146 key, &val, ADF_DEC);
147 if (ret)
148 goto err;
149 }
150
151 val = i;
152 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
153 &val, ADF_DEC);
154 if (ret)
155 goto err;
156
157 val = 0;
158 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
159 &val, ADF_DEC);
160 if (ret)
161 goto err;
162
163 return 0;
164 err:
165 dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
166 return ret;
167 }
168
adf_comp_dev_config(struct adf_accel_dev * accel_dev)169 static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
170 {
171 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
172 int banks = GET_MAX_BANKS(accel_dev);
173 int cpus = num_online_cpus();
174 unsigned long val;
175 int instances;
176 int ret;
177 int i;
178
179 if (adf_hw_dev_has_compression(accel_dev))
180 instances = min(cpus, banks);
181 else
182 instances = 0;
183
184 for (i = 0; i < instances; i++) {
185 val = i;
186 snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
187 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
188 key, &val, ADF_DEC);
189 if (ret)
190 goto err;
191
192 val = 512;
193 snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
194 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
195 key, &val, ADF_DEC);
196 if (ret)
197 goto err;
198
199 val = 0;
200 snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
201 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
202 key, &val, ADF_DEC);
203 if (ret)
204 goto err;
205
206 val = 1;
207 snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
208 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
209 key, &val, ADF_DEC);
210 if (ret)
211 goto err;
212
213 val = ADF_COALESCING_DEF_TIME;
214 snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
215 ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
216 key, &val, ADF_DEC);
217 if (ret)
218 goto err;
219 }
220
221 val = i;
222 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
223 &val, ADF_DEC);
224 if (ret)
225 goto err;
226
227 val = 0;
228 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
229 &val, ADF_DEC);
230 if (ret)
231 goto err;
232
233 return 0;
234 err:
235 dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
236 return ret;
237 }
238
adf_no_dev_config(struct adf_accel_dev * accel_dev)239 static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
240 {
241 unsigned long val;
242 int ret;
243
244 val = 0;
245 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
246 &val, ADF_DEC);
247 if (ret)
248 return ret;
249
250 return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
251 &val, ADF_DEC);
252 }
253
adf_gen4_dev_config(struct adf_accel_dev * accel_dev)254 int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
255 {
256 char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
257 int ret;
258
259 ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
260 if (ret)
261 goto err;
262
263 ret = adf_cfg_section_add(accel_dev, "Accelerator0");
264 if (ret)
265 goto err;
266
267 ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
268 ADF_SERVICES_ENABLED, services);
269 if (ret)
270 goto err;
271
272 ret = sysfs_match_string(adf_cfg_services, services);
273 if (ret < 0)
274 goto err;
275
276 switch (ret) {
277 case SVC_CY:
278 case SVC_CY2:
279 ret = adf_crypto_dev_config(accel_dev);
280 break;
281 case SVC_DC:
282 case SVC_DCC:
283 ret = adf_comp_dev_config(accel_dev);
284 break;
285 default:
286 ret = adf_no_dev_config(accel_dev);
287 break;
288 }
289
290 if (ret)
291 goto err;
292
293 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
294
295 return ret;
296
297 err:
298 dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
299 return ret;
300 }
301
adf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)302 static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
303 {
304 struct adf_accel_dev *accel_dev;
305 struct adf_accel_pci *accel_pci_dev;
306 struct adf_hw_device_data *hw_data;
307 unsigned int i, bar_nr;
308 unsigned long bar_mask;
309 struct adf_bar *bar;
310 int ret;
311
312 if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
313 /*
314 * If the accelerator is connected to a node with no memory
315 * there is no point in using the accelerator since the remote
316 * memory transaction will be very slow.
317 */
318 dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
319 return -EINVAL;
320 }
321
322 accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL);
323 if (!accel_dev)
324 return -ENOMEM;
325
326 INIT_LIST_HEAD(&accel_dev->crypto_list);
327 accel_pci_dev = &accel_dev->accel_pci_dev;
328 accel_pci_dev->pci_dev = pdev;
329
330 /*
331 * Add accel device to accel table
332 * This should be called before adf_cleanup_accel is called
333 */
334 if (adf_devmgr_add_dev(accel_dev, NULL)) {
335 dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
336 return -EFAULT;
337 }
338
339 accel_dev->owner = THIS_MODULE;
340 /* Allocate and initialise device hardware meta-data structure */
341 hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL);
342 if (!hw_data) {
343 ret = -ENOMEM;
344 goto out_err;
345 }
346
347 accel_dev->hw_device = hw_data;
348 adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device);
349
350 pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
351 pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses);
352
353 /* Get Accelerators and Accelerators Engines masks */
354 hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
355 hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
356 accel_pci_dev->sku = hw_data->get_sku(hw_data);
357 /* If the device has no acceleration engines then ignore it */
358 if (!hw_data->accel_mask || !hw_data->ae_mask ||
359 (~hw_data->ae_mask & 0x01)) {
360 dev_err(&pdev->dev, "No acceleration units found.\n");
361 ret = -EFAULT;
362 goto out_err;
363 }
364
365 /* Create device configuration table */
366 ret = adf_cfg_dev_add(accel_dev);
367 if (ret)
368 goto out_err;
369
370 /* Enable PCI device */
371 ret = pcim_enable_device(pdev);
372 if (ret) {
373 dev_err(&pdev->dev, "Can't enable PCI device.\n");
374 goto out_err;
375 }
376
377 /* Set DMA identifier */
378 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
379 if (ret) {
380 dev_err(&pdev->dev, "No usable DMA configuration.\n");
381 goto out_err;
382 }
383
384 ret = adf_cfg_dev_init(accel_dev);
385 if (ret) {
386 dev_err(&pdev->dev, "Failed to initialize configuration.\n");
387 goto out_err;
388 }
389
390 /* Get accelerator capabilities mask */
391 hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
392 if (!hw_data->accel_capabilities_mask) {
393 dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
394 ret = -EINVAL;
395 goto out_err;
396 }
397
398 /* Find and map all the device's BARS */
399 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK;
400
401 ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
402 if (ret) {
403 dev_err(&pdev->dev, "Failed to map pci regions.\n");
404 goto out_err;
405 }
406
407 i = 0;
408 for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) {
409 bar = &accel_pci_dev->pci_bars[i++];
410 bar->virt_addr = pcim_iomap_table(pdev)[bar_nr];
411 }
412
413 pci_set_master(pdev);
414
415 if (pci_save_state(pdev)) {
416 dev_err(&pdev->dev, "Failed to save pci state.\n");
417 ret = -ENOMEM;
418 goto out_err;
419 }
420
421 adf_dbgfs_init(accel_dev);
422
423 ret = adf_dev_up(accel_dev, true);
424 if (ret)
425 goto out_err_dev_stop;
426
427 ret = adf_sysfs_init(accel_dev);
428 if (ret)
429 goto out_err_dev_stop;
430
431 return ret;
432
433 out_err_dev_stop:
434 adf_dev_down(accel_dev, false);
435 out_err:
436 adf_cleanup_accel(accel_dev);
437 return ret;
438 }
439
adf_remove(struct pci_dev * pdev)440 static void adf_remove(struct pci_dev *pdev)
441 {
442 struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
443
444 if (!accel_dev) {
445 pr_err("QAT: Driver removal failed\n");
446 return;
447 }
448 adf_dev_down(accel_dev, false);
449 adf_cleanup_accel(accel_dev);
450 }
451
452 static struct pci_driver adf_driver = {
453 .id_table = adf_pci_tbl,
454 .name = ADF_4XXX_DEVICE_NAME,
455 .probe = adf_probe,
456 .remove = adf_remove,
457 .sriov_configure = adf_sriov_configure,
458 .err_handler = &adf_err_handler,
459 };
460
461 module_pci_driver(adf_driver);
462
463 MODULE_LICENSE("Dual BSD/GPL");
464 MODULE_AUTHOR("Intel");
465 MODULE_FIRMWARE(ADF_4XXX_FW);
466 MODULE_FIRMWARE(ADF_402XX_FW);
467 MODULE_FIRMWARE(ADF_4XXX_MMP);
468 MODULE_FIRMWARE(ADF_402XX_MMP);
469 MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
470 MODULE_VERSION(ADF_DRV_VERSION);
471 MODULE_SOFTDEP("pre: crypto-intel_qat");
472