1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/aer.h>
3 #include <linux/delay.h>
4 #include <linux/firmware.h>
5 #include <linux/list.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/pci.h>
9 #include <linux/pci_ids.h>
10 
11 #include "nitrox_dev.h"
12 #include "nitrox_common.h"
13 #include "nitrox_csr.h"
14 #include "nitrox_hal.h"
15 #include "nitrox_isr.h"
16 #include "nitrox_debugfs.h"
17 
18 #define CNN55XX_DEV_ID	0x12
19 #define UCODE_HLEN 48
20 #define DEFAULT_SE_GROUP 0
21 #define DEFAULT_AE_GROUP 0
22 
23 #define DRIVER_VERSION "1.2"
24 #define CNN55XX_UCD_BLOCK_SIZE 32768
25 #define CNN55XX_MAX_UCODE_SIZE (CNN55XX_UCD_BLOCK_SIZE * 2)
26 #define FW_DIR "cavium/"
27 /* SE microcode */
28 #define SE_FW	FW_DIR "cnn55xx_se.fw"
29 /* AE microcode */
30 #define AE_FW	FW_DIR "cnn55xx_ae.fw"
31 
32 static const char nitrox_driver_name[] = "CNN55XX";
33 
34 static LIST_HEAD(ndevlist);
35 static DEFINE_MUTEX(devlist_lock);
36 static unsigned int num_devices;
37 
38 /**
39  * nitrox_pci_tbl - PCI Device ID Table
40  */
41 static const struct pci_device_id nitrox_pci_tbl[] = {
42 	{PCI_VDEVICE(CAVIUM, CNN55XX_DEV_ID), 0},
43 	/* required last entry */
44 	{0, }
45 };
46 MODULE_DEVICE_TABLE(pci, nitrox_pci_tbl);
47 
48 static unsigned int qlen = DEFAULT_CMD_QLEN;
49 module_param(qlen, uint, 0644);
50 MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
51 
52 #ifdef CONFIG_PCI_IOV
53 int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs);
54 #else
55 int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
56 {
57 	return 0;
58 }
59 #endif
60 
61 /**
62  * struct ucode - Firmware Header
63  * @id: microcode ID
64  * @version: firmware version
65  * @code_size: code section size
66  * @raz: alignment
67  * @code: code section
68  */
69 struct ucode {
70 	u8 id;
71 	char version[VERSION_LEN - 1];
72 	__be32 code_size;
73 	u8 raz[12];
74 	u64 code[0];
75 };
76 
77 /**
78  * write_to_ucd_unit - Write Firmware to NITROX UCD unit
79  */
80 static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size,
81 			      u64 *ucode_data, int block_num)
82 {
83 	u32 code_size;
84 	u64 offset, data;
85 	int i = 0;
86 
87 	/*
88 	 * UCD structure
89 	 *
90 	 *  -------------
91 	 *  |    BLK 7  |
92 	 *  -------------
93 	 *  |    BLK 6  |
94 	 *  -------------
95 	 *  |    ...    |
96 	 *  -------------
97 	 *  |    BLK 0  |
98 	 *  -------------
99 	 *  Total of 8 blocks, each size 32KB
100 	 */
101 
102 	/* set the block number */
103 	offset = UCD_UCODE_LOAD_BLOCK_NUM;
104 	nitrox_write_csr(ndev, offset, block_num);
105 
106 	code_size = ucode_size;
107 	code_size = roundup(code_size, 8);
108 	while (code_size) {
109 		data = ucode_data[i];
110 		/* write 8 bytes at a time */
111 		offset = UCD_UCODE_LOAD_IDX_DATAX(i);
112 		nitrox_write_csr(ndev, offset, data);
113 		code_size -= 8;
114 		i++;
115 	}
116 
117 	usleep_range(300, 400);
118 }
119 
120 static int nitrox_load_fw(struct nitrox_device *ndev)
121 {
122 	const struct firmware *fw;
123 	const char *fw_name;
124 	struct ucode *ucode;
125 	u64 *ucode_data;
126 	u64 offset;
127 	union ucd_core_eid_ucode_block_num core_2_eid_val;
128 	union aqm_grp_execmsk_lo aqm_grp_execmask_lo;
129 	union aqm_grp_execmsk_hi aqm_grp_execmask_hi;
130 	u32 ucode_size;
131 	int ret, i = 0;
132 
133 	fw_name = SE_FW;
134 	dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
135 
136 	ret = request_firmware(&fw, fw_name, DEV(ndev));
137 	if (ret < 0) {
138 		dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
139 		return ret;
140 	}
141 
142 	ucode = (struct ucode *)fw->data;
143 
144 	ucode_size = be32_to_cpu(ucode->code_size) * 2;
145 	if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
146 		dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
147 			ucode_size, fw_name);
148 		release_firmware(fw);
149 		return -EINVAL;
150 	}
151 	ucode_data = ucode->code;
152 
153 	/* copy the firmware version */
154 	memcpy(&ndev->hw.fw_name[0][0], ucode->version, (VERSION_LEN - 2));
155 	ndev->hw.fw_name[0][VERSION_LEN - 1] = '\0';
156 
157 	/* Load SE Firmware on UCD Block 0 */
158 	write_to_ucd_unit(ndev, ucode_size, ucode_data, 0);
159 
160 	release_firmware(fw);
161 
162 	/* put all SE cores in DEFAULT_SE_GROUP */
163 	offset = POM_GRP_EXECMASKX(DEFAULT_SE_GROUP);
164 	nitrox_write_csr(ndev, offset, (~0ULL));
165 
166 	/* write block number and firmware length
167 	 * bit:<2:0> block number
168 	 * bit:3 is set SE uses 32KB microcode
169 	 * bit:3 is clear SE uses 64KB microcode
170 	 */
171 	core_2_eid_val.value = 0ULL;
172 	core_2_eid_val.ucode_blk = 0;
173 	if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
174 		core_2_eid_val.ucode_len = 1;
175 	else
176 		core_2_eid_val.ucode_len = 0;
177 
178 	for (i = 0; i < ndev->hw.se_cores; i++) {
179 		offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i);
180 		nitrox_write_csr(ndev, offset, core_2_eid_val.value);
181 	}
182 
183 
184 	fw_name = AE_FW;
185 	dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
186 
187 	ret = request_firmware(&fw, fw_name, DEV(ndev));
188 	if (ret < 0) {
189 		dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
190 		return ret;
191 	}
192 
193 	ucode = (struct ucode *)fw->data;
194 
195 	ucode_size = be32_to_cpu(ucode->code_size) * 2;
196 	if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
197 		dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
198 			ucode_size, fw_name);
199 		release_firmware(fw);
200 		return -EINVAL;
201 	}
202 	ucode_data = ucode->code;
203 
204 	/* copy the firmware version */
205 	memcpy(&ndev->hw.fw_name[1][0], ucode->version, (VERSION_LEN - 2));
206 	ndev->hw.fw_name[1][VERSION_LEN - 1] = '\0';
207 
208 	/* Load AE Firmware on UCD Block 2 */
209 	write_to_ucd_unit(ndev, ucode_size, ucode_data, 2);
210 
211 	release_firmware(fw);
212 
213 	/* put all AE cores in DEFAULT_AE_GROUP */
214 	offset = AQM_GRP_EXECMSK_LOX(DEFAULT_AE_GROUP);
215 	aqm_grp_execmask_lo.exec_0_to_39 = 0xFFFFFFFFFFULL;
216 	nitrox_write_csr(ndev, offset, aqm_grp_execmask_lo.value);
217 	offset = AQM_GRP_EXECMSK_HIX(DEFAULT_AE_GROUP);
218 	aqm_grp_execmask_hi.exec_40_to_79 = 0xFFFFFFFFFFULL;
219 	nitrox_write_csr(ndev, offset, aqm_grp_execmask_hi.value);
220 
221 	/* write block number and firmware length
222 	 * bit:<2:0> block number
223 	 * bit:3 is set SE uses 32KB microcode
224 	 * bit:3 is clear SE uses 64KB microcode
225 	 */
226 	core_2_eid_val.value = 0ULL;
227 	core_2_eid_val.ucode_blk = 0;
228 	if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
229 		core_2_eid_val.ucode_len = 1;
230 	else
231 		core_2_eid_val.ucode_len = 0;
232 
233 	for (i = 0; i < ndev->hw.ae_cores; i++) {
234 		offset = UCD_AE_EID_UCODE_BLOCK_NUMX(i);
235 		nitrox_write_csr(ndev, offset, core_2_eid_val.value);
236 	}
237 
238 	return 0;
239 }
240 
241 /**
242  * nitrox_add_to_devlist - add NITROX device to global device list
243  * @ndev: NITROX device
244  */
245 static int nitrox_add_to_devlist(struct nitrox_device *ndev)
246 {
247 	struct nitrox_device *dev;
248 	int ret = 0;
249 
250 	INIT_LIST_HEAD(&ndev->list);
251 	refcount_set(&ndev->refcnt, 1);
252 
253 	mutex_lock(&devlist_lock);
254 	list_for_each_entry(dev, &ndevlist, list) {
255 		if (dev == ndev) {
256 			ret = -EEXIST;
257 			goto unlock;
258 		}
259 	}
260 	ndev->idx = num_devices++;
261 	list_add_tail(&ndev->list, &ndevlist);
262 unlock:
263 	mutex_unlock(&devlist_lock);
264 	return ret;
265 }
266 
267 /**
268  * nitrox_remove_from_devlist - remove NITROX device from
269  *   global device list
270  * @ndev: NITROX device
271  */
272 static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
273 {
274 	mutex_lock(&devlist_lock);
275 	list_del(&ndev->list);
276 	num_devices--;
277 	mutex_unlock(&devlist_lock);
278 }
279 
280 struct nitrox_device *nitrox_get_first_device(void)
281 {
282 	struct nitrox_device *ndev = NULL;
283 
284 	mutex_lock(&devlist_lock);
285 	list_for_each_entry(ndev, &ndevlist, list) {
286 		if (nitrox_ready(ndev))
287 			break;
288 	}
289 	mutex_unlock(&devlist_lock);
290 	if (!ndev)
291 		return NULL;
292 
293 	refcount_inc(&ndev->refcnt);
294 	/* barrier to sync with other cpus */
295 	smp_mb__after_atomic();
296 	return ndev;
297 }
298 
299 void nitrox_put_device(struct nitrox_device *ndev)
300 {
301 	if (!ndev)
302 		return;
303 
304 	refcount_dec(&ndev->refcnt);
305 	/* barrier to sync with other cpus */
306 	smp_mb__after_atomic();
307 }
308 
309 static int nitrox_device_flr(struct pci_dev *pdev)
310 {
311 	int pos = 0;
312 
313 	pos = pci_save_state(pdev);
314 	if (pos) {
315 		dev_err(&pdev->dev, "Failed to save pci state\n");
316 		return -ENOMEM;
317 	}
318 
319 	/* check flr support */
320 	if (pcie_has_flr(pdev))
321 		pcie_flr(pdev);
322 
323 	pci_restore_state(pdev);
324 
325 	return 0;
326 }
327 
328 static int nitrox_pf_sw_init(struct nitrox_device *ndev)
329 {
330 	int err;
331 
332 	err = nitrox_common_sw_init(ndev);
333 	if (err)
334 		return err;
335 
336 	err = nitrox_register_interrupts(ndev);
337 	if (err)
338 		nitrox_common_sw_cleanup(ndev);
339 
340 	return err;
341 }
342 
343 static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
344 {
345 	nitrox_unregister_interrupts(ndev);
346 	nitrox_common_sw_cleanup(ndev);
347 }
348 
349 /**
350  * nitrox_bist_check - Check NITORX BIST registers status
351  * @ndev: NITROX device
352  */
353 static int nitrox_bist_check(struct nitrox_device *ndev)
354 {
355 	u64 value = 0;
356 	int i;
357 
358 	for (i = 0; i < NR_CLUSTERS; i++) {
359 		value += nitrox_read_csr(ndev, EMU_BIST_STATUSX(i));
360 		value += nitrox_read_csr(ndev, EFL_CORE_BIST_REGX(i));
361 	}
362 	value += nitrox_read_csr(ndev, UCD_BIST_STATUS);
363 	value += nitrox_read_csr(ndev, NPS_CORE_BIST_REG);
364 	value += nitrox_read_csr(ndev, NPS_CORE_NPC_BIST_REG);
365 	value += nitrox_read_csr(ndev, NPS_PKT_SLC_BIST_REG);
366 	value += nitrox_read_csr(ndev, NPS_PKT_IN_BIST_REG);
367 	value += nitrox_read_csr(ndev, POM_BIST_REG);
368 	value += nitrox_read_csr(ndev, BMI_BIST_REG);
369 	value += nitrox_read_csr(ndev, EFL_TOP_BIST_STAT);
370 	value += nitrox_read_csr(ndev, BMO_BIST_REG);
371 	value += nitrox_read_csr(ndev, LBC_BIST_STATUS);
372 	value += nitrox_read_csr(ndev, PEM_BIST_STATUSX(0));
373 	if (value)
374 		return -EIO;
375 	return 0;
376 }
377 
378 static int nitrox_pf_hw_init(struct nitrox_device *ndev)
379 {
380 	int err;
381 
382 	err = nitrox_bist_check(ndev);
383 	if (err) {
384 		dev_err(&ndev->pdev->dev, "BIST check failed\n");
385 		return err;
386 	}
387 	/* get cores information */
388 	nitrox_get_hwinfo(ndev);
389 
390 	nitrox_config_nps_core_unit(ndev);
391 	nitrox_config_aqm_unit(ndev);
392 	nitrox_config_nps_pkt_unit(ndev);
393 	nitrox_config_pom_unit(ndev);
394 	nitrox_config_efl_unit(ndev);
395 	/* configure IO units */
396 	nitrox_config_bmi_unit(ndev);
397 	nitrox_config_bmo_unit(ndev);
398 	/* configure Local Buffer Cache */
399 	nitrox_config_lbc_unit(ndev);
400 	nitrox_config_rand_unit(ndev);
401 
402 	/* load firmware on cores */
403 	err = nitrox_load_fw(ndev);
404 	if (err)
405 		return err;
406 
407 	nitrox_config_emu_unit(ndev);
408 
409 	return 0;
410 }
411 
412 /**
413  * nitrox_probe - NITROX Initialization function.
414  * @pdev: PCI device information struct
415  * @id: entry in nitrox_pci_tbl
416  *
417  * Return: 0, if the driver is bound to the device, or
418  *         a negative error if there is failure.
419  */
420 static int nitrox_probe(struct pci_dev *pdev,
421 			const struct pci_device_id *id)
422 {
423 	struct nitrox_device *ndev;
424 	int err;
425 
426 	dev_info_once(&pdev->dev, "%s driver version %s\n",
427 		      nitrox_driver_name, DRIVER_VERSION);
428 
429 	err = pci_enable_device_mem(pdev);
430 	if (err)
431 		return err;
432 
433 	/* do FLR */
434 	err = nitrox_device_flr(pdev);
435 	if (err) {
436 		dev_err(&pdev->dev, "FLR failed\n");
437 		pci_disable_device(pdev);
438 		return err;
439 	}
440 
441 	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
442 		dev_dbg(&pdev->dev, "DMA to 64-BIT address\n");
443 	} else {
444 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
445 		if (err) {
446 			dev_err(&pdev->dev, "DMA configuration failed\n");
447 			pci_disable_device(pdev);
448 			return err;
449 		}
450 	}
451 
452 	err = pci_request_mem_regions(pdev, nitrox_driver_name);
453 	if (err) {
454 		pci_disable_device(pdev);
455 		return err;
456 	}
457 	pci_set_master(pdev);
458 
459 	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
460 	if (!ndev) {
461 		err = -ENOMEM;
462 		goto ndev_fail;
463 	}
464 
465 	pci_set_drvdata(pdev, ndev);
466 	ndev->pdev = pdev;
467 
468 	/* add to device list */
469 	nitrox_add_to_devlist(ndev);
470 
471 	ndev->hw.vendor_id = pdev->vendor;
472 	ndev->hw.device_id = pdev->device;
473 	ndev->hw.revision_id = pdev->revision;
474 	/* command timeout in jiffies */
475 	ndev->timeout = msecs_to_jiffies(CMD_TIMEOUT);
476 	ndev->node = dev_to_node(&pdev->dev);
477 	if (ndev->node == NUMA_NO_NODE)
478 		ndev->node = 0;
479 
480 	ndev->bar_addr = ioremap(pci_resource_start(pdev, 0),
481 				 pci_resource_len(pdev, 0));
482 	if (!ndev->bar_addr) {
483 		err = -EIO;
484 		goto ioremap_err;
485 	}
486 	/* allocate command queus based on cpus, max queues are 64 */
487 	ndev->nr_queues = min_t(u32, MAX_PF_QUEUES, num_online_cpus());
488 	ndev->qlen = qlen;
489 
490 	err = nitrox_pf_sw_init(ndev);
491 	if (err)
492 		goto ioremap_err;
493 
494 	err = nitrox_pf_hw_init(ndev);
495 	if (err)
496 		goto pf_hw_fail;
497 
498 	nitrox_debugfs_init(ndev);
499 
500 	/* clear the statistics */
501 	atomic64_set(&ndev->stats.posted, 0);
502 	atomic64_set(&ndev->stats.completed, 0);
503 	atomic64_set(&ndev->stats.dropped, 0);
504 
505 	atomic_set(&ndev->state, __NDEV_READY);
506 	/* barrier to sync with other cpus */
507 	smp_mb__after_atomic();
508 
509 	err = nitrox_crypto_register();
510 	if (err)
511 		goto crypto_fail;
512 
513 	return 0;
514 
515 crypto_fail:
516 	nitrox_debugfs_exit(ndev);
517 	atomic_set(&ndev->state, __NDEV_NOT_READY);
518 	/* barrier to sync with other cpus */
519 	smp_mb__after_atomic();
520 pf_hw_fail:
521 	nitrox_pf_sw_cleanup(ndev);
522 ioremap_err:
523 	nitrox_remove_from_devlist(ndev);
524 	kfree(ndev);
525 	pci_set_drvdata(pdev, NULL);
526 ndev_fail:
527 	pci_release_mem_regions(pdev);
528 	pci_disable_device(pdev);
529 	return err;
530 }
531 
532 /**
533  * nitrox_remove - Unbind the driver from the device.
534  * @pdev: PCI device information struct
535  */
536 static void nitrox_remove(struct pci_dev *pdev)
537 {
538 	struct nitrox_device *ndev = pci_get_drvdata(pdev);
539 
540 	if (!ndev)
541 		return;
542 
543 	if (!refcount_dec_and_test(&ndev->refcnt)) {
544 		dev_err(DEV(ndev), "Device refcnt not zero (%d)\n",
545 			refcount_read(&ndev->refcnt));
546 		return;
547 	}
548 
549 	dev_info(DEV(ndev), "Removing Device %x:%x\n",
550 		 ndev->hw.vendor_id, ndev->hw.device_id);
551 
552 	atomic_set(&ndev->state, __NDEV_NOT_READY);
553 	/* barrier to sync with other cpus */
554 	smp_mb__after_atomic();
555 
556 	nitrox_remove_from_devlist(ndev);
557 
558 #ifdef CONFIG_PCI_IOV
559 	/* disable SR-IOV */
560 	nitrox_sriov_configure(pdev, 0);
561 #endif
562 	nitrox_crypto_unregister();
563 	nitrox_debugfs_exit(ndev);
564 	nitrox_pf_sw_cleanup(ndev);
565 
566 	iounmap(ndev->bar_addr);
567 	kfree(ndev);
568 
569 	pci_set_drvdata(pdev, NULL);
570 	pci_release_mem_regions(pdev);
571 	pci_disable_device(pdev);
572 }
573 
574 static void nitrox_shutdown(struct pci_dev *pdev)
575 {
576 	pci_set_drvdata(pdev, NULL);
577 	pci_release_mem_regions(pdev);
578 	pci_disable_device(pdev);
579 }
580 
581 static struct pci_driver nitrox_driver = {
582 	.name = nitrox_driver_name,
583 	.id_table = nitrox_pci_tbl,
584 	.probe = nitrox_probe,
585 	.remove	= nitrox_remove,
586 	.shutdown = nitrox_shutdown,
587 #ifdef CONFIG_PCI_IOV
588 	.sriov_configure = nitrox_sriov_configure,
589 #endif
590 };
591 
592 module_pci_driver(nitrox_driver);
593 
594 MODULE_AUTHOR("Srikanth Jampala <Jampala.Srikanth@cavium.com>");
595 MODULE_DESCRIPTION("Cavium CNN55XX PF Driver" DRIVER_VERSION " ");
596 MODULE_LICENSE("GPL");
597 MODULE_VERSION(DRIVER_VERSION);
598 MODULE_FIRMWARE(SE_FW);
599