1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/delay.h>
3 #include <linux/firmware.h>
4 #include <linux/list.h>
5 #include <linux/module.h>
6 #include <linux/mutex.h>
7 #include <linux/pci.h>
8 #include <linux/pci_ids.h>
9 
10 #include "nitrox_dev.h"
11 #include "nitrox_common.h"
12 #include "nitrox_csr.h"
13 #include "nitrox_hal.h"
14 #include "nitrox_isr.h"
15 #include "nitrox_debugfs.h"
16 
17 #define CNN55XX_DEV_ID	0x12
18 #define UCODE_HLEN 48
19 #define DEFAULT_SE_GROUP 0
20 #define DEFAULT_AE_GROUP 0
21 
22 #define DRIVER_VERSION "1.2"
23 #define CNN55XX_UCD_BLOCK_SIZE 32768
24 #define CNN55XX_MAX_UCODE_SIZE (CNN55XX_UCD_BLOCK_SIZE * 2)
25 #define FW_DIR "cavium/"
26 /* SE microcode */
27 #define SE_FW	FW_DIR "cnn55xx_se.fw"
28 /* AE microcode */
29 #define AE_FW	FW_DIR "cnn55xx_ae.fw"
30 
31 static const char nitrox_driver_name[] = "CNN55XX";
32 
33 static LIST_HEAD(ndevlist);
34 static DEFINE_MUTEX(devlist_lock);
35 static unsigned int num_devices;
36 
37 /*
38  * nitrox_pci_tbl - PCI Device ID Table
39  */
40 static const struct pci_device_id nitrox_pci_tbl[] = {
41 	{PCI_VDEVICE(CAVIUM, CNN55XX_DEV_ID), 0},
42 	/* required last entry */
43 	{0, }
44 };
45 MODULE_DEVICE_TABLE(pci, nitrox_pci_tbl);
46 
47 static unsigned int qlen = DEFAULT_CMD_QLEN;
48 module_param(qlen, uint, 0644);
49 MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
50 
51 /**
52  * struct ucode - Firmware Header
53  * @id: microcode ID
54  * @version: firmware version
55  * @code_size: code section size
56  * @raz: alignment
57  * @code: code section
58  */
59 struct ucode {
60 	u8 id;
61 	char version[VERSION_LEN - 1];
62 	__be32 code_size;
63 	u8 raz[12];
64 	u64 code[];
65 };
66 
67 /*
68  * write_to_ucd_unit - Write Firmware to NITROX UCD unit
69  */
70 static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size,
71 			      u64 *ucode_data, int block_num)
72 {
73 	u32 code_size;
74 	u64 offset, data;
75 	int i = 0;
76 
77 	/*
78 	 * UCD structure
79 	 *
80 	 *  -------------
81 	 *  |    BLK 7  |
82 	 *  -------------
83 	 *  |    BLK 6  |
84 	 *  -------------
85 	 *  |    ...    |
86 	 *  -------------
87 	 *  |    BLK 0  |
88 	 *  -------------
89 	 *  Total of 8 blocks, each size 32KB
90 	 */
91 
92 	/* set the block number */
93 	offset = UCD_UCODE_LOAD_BLOCK_NUM;
94 	nitrox_write_csr(ndev, offset, block_num);
95 
96 	code_size = roundup(ucode_size, 16);
97 	while (code_size) {
98 		data = ucode_data[i];
99 		/* write 8 bytes at a time */
100 		offset = UCD_UCODE_LOAD_IDX_DATAX(i);
101 		nitrox_write_csr(ndev, offset, data);
102 		code_size -= 8;
103 		i++;
104 	}
105 
106 	usleep_range(300, 400);
107 }
108 
109 static int nitrox_load_fw(struct nitrox_device *ndev)
110 {
111 	const struct firmware *fw;
112 	const char *fw_name;
113 	struct ucode *ucode;
114 	u64 *ucode_data;
115 	u64 offset;
116 	union ucd_core_eid_ucode_block_num core_2_eid_val;
117 	union aqm_grp_execmsk_lo aqm_grp_execmask_lo;
118 	union aqm_grp_execmsk_hi aqm_grp_execmask_hi;
119 	u32 ucode_size;
120 	int ret, i = 0;
121 
122 	fw_name = SE_FW;
123 	dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
124 
125 	ret = request_firmware(&fw, fw_name, DEV(ndev));
126 	if (ret < 0) {
127 		dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
128 		return ret;
129 	}
130 
131 	ucode = (struct ucode *)fw->data;
132 
133 	ucode_size = be32_to_cpu(ucode->code_size) * 2;
134 	if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
135 		dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
136 			ucode_size, fw_name);
137 		release_firmware(fw);
138 		return -EINVAL;
139 	}
140 	ucode_data = ucode->code;
141 
142 	/* copy the firmware version */
143 	memcpy(&ndev->hw.fw_name[0][0], ucode->version, (VERSION_LEN - 2));
144 	ndev->hw.fw_name[0][VERSION_LEN - 1] = '\0';
145 
146 	/* Load SE Firmware on UCD Block 0 */
147 	write_to_ucd_unit(ndev, ucode_size, ucode_data, 0);
148 
149 	release_firmware(fw);
150 
151 	/* put all SE cores in DEFAULT_SE_GROUP */
152 	offset = POM_GRP_EXECMASKX(DEFAULT_SE_GROUP);
153 	nitrox_write_csr(ndev, offset, (~0ULL));
154 
155 	/* write block number and firmware length
156 	 * bit:<2:0> block number
157 	 * bit:3 is set SE uses 32KB microcode
158 	 * bit:3 is clear SE uses 64KB microcode
159 	 */
160 	core_2_eid_val.value = 0ULL;
161 	core_2_eid_val.ucode_blk = 0;
162 	if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
163 		core_2_eid_val.ucode_len = 1;
164 	else
165 		core_2_eid_val.ucode_len = 0;
166 
167 	for (i = 0; i < ndev->hw.se_cores; i++) {
168 		offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i);
169 		nitrox_write_csr(ndev, offset, core_2_eid_val.value);
170 	}
171 
172 
173 	fw_name = AE_FW;
174 	dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
175 
176 	ret = request_firmware(&fw, fw_name, DEV(ndev));
177 	if (ret < 0) {
178 		dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
179 		return ret;
180 	}
181 
182 	ucode = (struct ucode *)fw->data;
183 
184 	ucode_size = be32_to_cpu(ucode->code_size) * 2;
185 	if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
186 		dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
187 			ucode_size, fw_name);
188 		release_firmware(fw);
189 		return -EINVAL;
190 	}
191 	ucode_data = ucode->code;
192 
193 	/* copy the firmware version */
194 	memcpy(&ndev->hw.fw_name[1][0], ucode->version, (VERSION_LEN - 2));
195 	ndev->hw.fw_name[1][VERSION_LEN - 1] = '\0';
196 
197 	/* Load AE Firmware on UCD Block 2 */
198 	write_to_ucd_unit(ndev, ucode_size, ucode_data, 2);
199 
200 	release_firmware(fw);
201 
202 	/* put all AE cores in DEFAULT_AE_GROUP */
203 	offset = AQM_GRP_EXECMSK_LOX(DEFAULT_AE_GROUP);
204 	aqm_grp_execmask_lo.exec_0_to_39 = 0xFFFFFFFFFFULL;
205 	nitrox_write_csr(ndev, offset, aqm_grp_execmask_lo.value);
206 	offset = AQM_GRP_EXECMSK_HIX(DEFAULT_AE_GROUP);
207 	aqm_grp_execmask_hi.exec_40_to_79 = 0xFFFFFFFFFFULL;
208 	nitrox_write_csr(ndev, offset, aqm_grp_execmask_hi.value);
209 
210 	/* write block number and firmware length
211 	 * bit:<2:0> block number
212 	 * bit:3 is set AE uses 32KB microcode
213 	 * bit:3 is clear AE uses 64KB microcode
214 	 */
215 	core_2_eid_val.value = 0ULL;
216 	core_2_eid_val.ucode_blk = 2;
217 	if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
218 		core_2_eid_val.ucode_len = 1;
219 	else
220 		core_2_eid_val.ucode_len = 0;
221 
222 	for (i = 0; i < ndev->hw.ae_cores; i++) {
223 		offset = UCD_AE_EID_UCODE_BLOCK_NUMX(i);
224 		nitrox_write_csr(ndev, offset, core_2_eid_val.value);
225 	}
226 
227 	return 0;
228 }
229 
230 /**
231  * nitrox_add_to_devlist - add NITROX device to global device list
232  * @ndev: NITROX device
233  */
234 static int nitrox_add_to_devlist(struct nitrox_device *ndev)
235 {
236 	struct nitrox_device *dev;
237 	int ret = 0;
238 
239 	INIT_LIST_HEAD(&ndev->list);
240 	refcount_set(&ndev->refcnt, 1);
241 
242 	mutex_lock(&devlist_lock);
243 	list_for_each_entry(dev, &ndevlist, list) {
244 		if (dev == ndev) {
245 			ret = -EEXIST;
246 			goto unlock;
247 		}
248 	}
249 	ndev->idx = num_devices++;
250 	list_add_tail(&ndev->list, &ndevlist);
251 unlock:
252 	mutex_unlock(&devlist_lock);
253 	return ret;
254 }
255 
256 /**
257  * nitrox_remove_from_devlist - remove NITROX device from
258  *   global device list
259  * @ndev: NITROX device
260  */
261 static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
262 {
263 	mutex_lock(&devlist_lock);
264 	list_del(&ndev->list);
265 	num_devices--;
266 	mutex_unlock(&devlist_lock);
267 }
268 
269 struct nitrox_device *nitrox_get_first_device(void)
270 {
271 	struct nitrox_device *ndev = NULL, *iter;
272 
273 	mutex_lock(&devlist_lock);
274 	list_for_each_entry(iter, &ndevlist, list) {
275 		if (nitrox_ready(iter)) {
276 			ndev = iter;
277 			break;
278 		}
279 	}
280 	mutex_unlock(&devlist_lock);
281 	if (!ndev)
282 		return NULL;
283 
284 	refcount_inc(&ndev->refcnt);
285 	/* barrier to sync with other cpus */
286 	smp_mb__after_atomic();
287 	return ndev;
288 }
289 
290 void nitrox_put_device(struct nitrox_device *ndev)
291 {
292 	if (!ndev)
293 		return;
294 
295 	refcount_dec(&ndev->refcnt);
296 	/* barrier to sync with other cpus */
297 	smp_mb__after_atomic();
298 }
299 
300 static int nitrox_device_flr(struct pci_dev *pdev)
301 {
302 	int pos = 0;
303 
304 	pos = pci_save_state(pdev);
305 	if (pos) {
306 		dev_err(&pdev->dev, "Failed to save pci state\n");
307 		return -ENOMEM;
308 	}
309 
310 	pcie_reset_flr(pdev, PCI_RESET_DO_RESET);
311 
312 	pci_restore_state(pdev);
313 
314 	return 0;
315 }
316 
317 static int nitrox_pf_sw_init(struct nitrox_device *ndev)
318 {
319 	int err;
320 
321 	err = nitrox_common_sw_init(ndev);
322 	if (err)
323 		return err;
324 
325 	err = nitrox_register_interrupts(ndev);
326 	if (err)
327 		nitrox_common_sw_cleanup(ndev);
328 
329 	return err;
330 }
331 
332 static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
333 {
334 	nitrox_unregister_interrupts(ndev);
335 	nitrox_common_sw_cleanup(ndev);
336 }
337 
338 /**
339  * nitrox_bist_check - Check NITROX BIST registers status
340  * @ndev: NITROX device
341  */
342 static int nitrox_bist_check(struct nitrox_device *ndev)
343 {
344 	u64 value = 0;
345 	int i;
346 
347 	for (i = 0; i < NR_CLUSTERS; i++) {
348 		value += nitrox_read_csr(ndev, EMU_BIST_STATUSX(i));
349 		value += nitrox_read_csr(ndev, EFL_CORE_BIST_REGX(i));
350 	}
351 	value += nitrox_read_csr(ndev, UCD_BIST_STATUS);
352 	value += nitrox_read_csr(ndev, NPS_CORE_BIST_REG);
353 	value += nitrox_read_csr(ndev, NPS_CORE_NPC_BIST_REG);
354 	value += nitrox_read_csr(ndev, NPS_PKT_SLC_BIST_REG);
355 	value += nitrox_read_csr(ndev, NPS_PKT_IN_BIST_REG);
356 	value += nitrox_read_csr(ndev, POM_BIST_REG);
357 	value += nitrox_read_csr(ndev, BMI_BIST_REG);
358 	value += nitrox_read_csr(ndev, EFL_TOP_BIST_STAT);
359 	value += nitrox_read_csr(ndev, BMO_BIST_REG);
360 	value += nitrox_read_csr(ndev, LBC_BIST_STATUS);
361 	value += nitrox_read_csr(ndev, PEM_BIST_STATUSX(0));
362 	if (value)
363 		return -EIO;
364 	return 0;
365 }
366 
367 static int nitrox_pf_hw_init(struct nitrox_device *ndev)
368 {
369 	int err;
370 
371 	err = nitrox_bist_check(ndev);
372 	if (err) {
373 		dev_err(&ndev->pdev->dev, "BIST check failed\n");
374 		return err;
375 	}
376 	/* get cores information */
377 	nitrox_get_hwinfo(ndev);
378 
379 	nitrox_config_nps_core_unit(ndev);
380 	nitrox_config_aqm_unit(ndev);
381 	nitrox_config_nps_pkt_unit(ndev);
382 	nitrox_config_pom_unit(ndev);
383 	nitrox_config_efl_unit(ndev);
384 	/* configure IO units */
385 	nitrox_config_bmi_unit(ndev);
386 	nitrox_config_bmo_unit(ndev);
387 	/* configure Local Buffer Cache */
388 	nitrox_config_lbc_unit(ndev);
389 	nitrox_config_rand_unit(ndev);
390 
391 	/* load firmware on cores */
392 	err = nitrox_load_fw(ndev);
393 	if (err)
394 		return err;
395 
396 	nitrox_config_emu_unit(ndev);
397 
398 	return 0;
399 }
400 
401 /**
402  * nitrox_probe - NITROX Initialization function.
403  * @pdev: PCI device information struct
404  * @id: entry in nitrox_pci_tbl
405  *
406  * Return: 0, if the driver is bound to the device, or
407  *         a negative error if there is failure.
408  */
409 static int nitrox_probe(struct pci_dev *pdev,
410 			const struct pci_device_id *id)
411 {
412 	struct nitrox_device *ndev;
413 	int err;
414 
415 	dev_info_once(&pdev->dev, "%s driver version %s\n",
416 		      nitrox_driver_name, DRIVER_VERSION);
417 
418 	err = pci_enable_device_mem(pdev);
419 	if (err)
420 		return err;
421 
422 	/* do FLR */
423 	err = nitrox_device_flr(pdev);
424 	if (err) {
425 		dev_err(&pdev->dev, "FLR failed\n");
426 		goto flr_fail;
427 	}
428 
429 	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
430 		dev_dbg(&pdev->dev, "DMA to 64-BIT address\n");
431 	} else {
432 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
433 		if (err) {
434 			dev_err(&pdev->dev, "DMA configuration failed\n");
435 			goto flr_fail;
436 		}
437 	}
438 
439 	err = pci_request_mem_regions(pdev, nitrox_driver_name);
440 	if (err)
441 		goto flr_fail;
442 	pci_set_master(pdev);
443 
444 	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
445 	if (!ndev) {
446 		err = -ENOMEM;
447 		goto ndev_fail;
448 	}
449 
450 	pci_set_drvdata(pdev, ndev);
451 	ndev->pdev = pdev;
452 
453 	/* add to device list */
454 	nitrox_add_to_devlist(ndev);
455 
456 	ndev->hw.vendor_id = pdev->vendor;
457 	ndev->hw.device_id = pdev->device;
458 	ndev->hw.revision_id = pdev->revision;
459 	/* command timeout in jiffies */
460 	ndev->timeout = msecs_to_jiffies(CMD_TIMEOUT);
461 	ndev->node = dev_to_node(&pdev->dev);
462 	if (ndev->node == NUMA_NO_NODE)
463 		ndev->node = 0;
464 
465 	ndev->bar_addr = ioremap(pci_resource_start(pdev, 0),
466 				 pci_resource_len(pdev, 0));
467 	if (!ndev->bar_addr) {
468 		err = -EIO;
469 		goto ioremap_err;
470 	}
471 	/* allocate command queus based on cpus, max queues are 64 */
472 	ndev->nr_queues = min_t(u32, MAX_PF_QUEUES, num_online_cpus());
473 	ndev->qlen = qlen;
474 
475 	err = nitrox_pf_sw_init(ndev);
476 	if (err)
477 		goto pf_sw_fail;
478 
479 	err = nitrox_pf_hw_init(ndev);
480 	if (err)
481 		goto pf_hw_fail;
482 
483 	nitrox_debugfs_init(ndev);
484 
485 	/* clear the statistics */
486 	atomic64_set(&ndev->stats.posted, 0);
487 	atomic64_set(&ndev->stats.completed, 0);
488 	atomic64_set(&ndev->stats.dropped, 0);
489 
490 	atomic_set(&ndev->state, __NDEV_READY);
491 	/* barrier to sync with other cpus */
492 	smp_mb__after_atomic();
493 
494 	err = nitrox_crypto_register();
495 	if (err)
496 		goto crypto_fail;
497 
498 	return 0;
499 
500 crypto_fail:
501 	nitrox_debugfs_exit(ndev);
502 	atomic_set(&ndev->state, __NDEV_NOT_READY);
503 	/* barrier to sync with other cpus */
504 	smp_mb__after_atomic();
505 pf_hw_fail:
506 	nitrox_pf_sw_cleanup(ndev);
507 pf_sw_fail:
508 	iounmap(ndev->bar_addr);
509 ioremap_err:
510 	nitrox_remove_from_devlist(ndev);
511 	kfree(ndev);
512 	pci_set_drvdata(pdev, NULL);
513 ndev_fail:
514 	pci_release_mem_regions(pdev);
515 flr_fail:
516 	pci_disable_device(pdev);
517 	return err;
518 }
519 
520 /**
521  * nitrox_remove - Unbind the driver from the device.
522  * @pdev: PCI device information struct
523  */
524 static void nitrox_remove(struct pci_dev *pdev)
525 {
526 	struct nitrox_device *ndev = pci_get_drvdata(pdev);
527 
528 	if (!ndev)
529 		return;
530 
531 	if (!refcount_dec_and_test(&ndev->refcnt)) {
532 		dev_err(DEV(ndev), "Device refcnt not zero (%d)\n",
533 			refcount_read(&ndev->refcnt));
534 		return;
535 	}
536 
537 	dev_info(DEV(ndev), "Removing Device %x:%x\n",
538 		 ndev->hw.vendor_id, ndev->hw.device_id);
539 
540 	atomic_set(&ndev->state, __NDEV_NOT_READY);
541 	/* barrier to sync with other cpus */
542 	smp_mb__after_atomic();
543 
544 	nitrox_remove_from_devlist(ndev);
545 
546 	/* disable SR-IOV */
547 	nitrox_sriov_configure(pdev, 0);
548 	nitrox_crypto_unregister();
549 	nitrox_debugfs_exit(ndev);
550 	nitrox_pf_sw_cleanup(ndev);
551 
552 	iounmap(ndev->bar_addr);
553 	kfree(ndev);
554 
555 	pci_set_drvdata(pdev, NULL);
556 	pci_release_mem_regions(pdev);
557 	pci_disable_device(pdev);
558 }
559 
560 static void nitrox_shutdown(struct pci_dev *pdev)
561 {
562 	pci_set_drvdata(pdev, NULL);
563 	pci_release_mem_regions(pdev);
564 	pci_disable_device(pdev);
565 }
566 
567 static struct pci_driver nitrox_driver = {
568 	.name = nitrox_driver_name,
569 	.id_table = nitrox_pci_tbl,
570 	.probe = nitrox_probe,
571 	.remove	= nitrox_remove,
572 	.shutdown = nitrox_shutdown,
573 	.sriov_configure = nitrox_sriov_configure,
574 };
575 
576 module_pci_driver(nitrox_driver);
577 
578 MODULE_AUTHOR("Srikanth Jampala <Jampala.Srikanth@cavium.com>");
579 MODULE_DESCRIPTION("Cavium CNN55XX PF Driver" DRIVER_VERSION " ");
580 MODULE_LICENSE("GPL");
581 MODULE_VERSION(DRIVER_VERSION);
582 MODULE_FIRMWARE(SE_FW);
583