1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) 2 /* Copyright(c) 2014 - 2020 Intel Corporation */ 3 #include <linux/kernel.h> 4 #include <linux/module.h> 5 #include <linux/pci.h> 6 #include <linux/init.h> 7 #include <linux/types.h> 8 #include <linux/fs.h> 9 #include <linux/slab.h> 10 #include <linux/errno.h> 11 #include <linux/device.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/platform_device.h> 14 #include <linux/workqueue.h> 15 #include <linux/io.h> 16 #include <adf_accel_devices.h> 17 #include <adf_common_drv.h> 18 #include <adf_cfg.h> 19 #include "adf_dh895xcc_hw_data.h" 20 21 static const struct pci_device_id adf_pci_tbl[] = { 22 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), }, 23 { } 24 }; 25 MODULE_DEVICE_TABLE(pci, adf_pci_tbl); 26 27 static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); 28 static void adf_remove(struct pci_dev *dev); 29 30 static struct pci_driver adf_driver = { 31 .id_table = adf_pci_tbl, 32 .name = ADF_DH895XCC_DEVICE_NAME, 33 .probe = adf_probe, 34 .remove = adf_remove, 35 .sriov_configure = adf_sriov_configure, 36 .err_handler = &adf_err_handler, 37 }; 38 39 static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) 40 { 41 pci_release_regions(accel_dev->accel_pci_dev.pci_dev); 42 pci_disable_device(accel_dev->accel_pci_dev.pci_dev); 43 } 44 45 static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) 46 { 47 struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; 48 int i; 49 50 for (i = 0; i < ADF_PCI_MAX_BARS; i++) { 51 struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; 52 53 if (bar->virt_addr) 54 pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); 55 } 56 57 if (accel_dev->hw_device) { 58 switch (accel_pci_dev->pci_dev->device) { 59 case PCI_DEVICE_ID_INTEL_QAT_DH895XCC: 60 adf_clean_hw_data_dh895xcc(accel_dev->hw_device); 61 break; 62 default: 63 break; 64 } 65 kfree(accel_dev->hw_device); 66 accel_dev->hw_device = NULL; 67 } 68 adf_cfg_dev_remove(accel_dev); 69 debugfs_remove(accel_dev->debugfs_dir); 70 adf_devmgr_rm_dev(accel_dev, NULL); 71 } 72 73 static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 74 { 75 struct adf_accel_dev *accel_dev; 76 struct adf_accel_pci *accel_pci_dev; 77 struct adf_hw_device_data *hw_data; 78 char name[ADF_DEVICE_NAME_LENGTH]; 79 unsigned int i, bar_nr; 80 unsigned long bar_mask; 81 int ret; 82 83 switch (ent->device) { 84 case PCI_DEVICE_ID_INTEL_QAT_DH895XCC: 85 break; 86 default: 87 dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); 88 return -ENODEV; 89 } 90 91 if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { 92 /* If the accelerator is connected to a node with no memory 93 * there is no point in using the accelerator since the remote 94 * memory transaction will be very slow. */ 95 dev_err(&pdev->dev, "Invalid NUMA configuration.\n"); 96 return -EINVAL; 97 } 98 99 accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, 100 dev_to_node(&pdev->dev)); 101 if (!accel_dev) 102 return -ENOMEM; 103 104 INIT_LIST_HEAD(&accel_dev->crypto_list); 105 accel_pci_dev = &accel_dev->accel_pci_dev; 106 accel_pci_dev->pci_dev = pdev; 107 108 /* Add accel device to accel table. 109 * This should be called before adf_cleanup_accel is called */ 110 if (adf_devmgr_add_dev(accel_dev, NULL)) { 111 dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); 112 kfree(accel_dev); 113 return -EFAULT; 114 } 115 116 accel_dev->owner = THIS_MODULE; 117 /* Allocate and configure device configuration structure */ 118 hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, 119 dev_to_node(&pdev->dev)); 120 if (!hw_data) { 121 ret = -ENOMEM; 122 goto out_err; 123 } 124 125 accel_dev->hw_device = hw_data; 126 adf_init_hw_data_dh895xcc(accel_dev->hw_device); 127 pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); 128 pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, 129 &hw_data->fuses); 130 131 /* Get Accelerators and Accelerators Engines masks */ 132 hw_data->accel_mask = hw_data->get_accel_mask(hw_data); 133 hw_data->ae_mask = hw_data->get_ae_mask(hw_data); 134 accel_pci_dev->sku = hw_data->get_sku(hw_data); 135 /* If the device has no acceleration engines then ignore it. */ 136 if (!hw_data->accel_mask || !hw_data->ae_mask || 137 ((~hw_data->ae_mask) & 0x01)) { 138 dev_err(&pdev->dev, "No acceleration units found"); 139 ret = -EFAULT; 140 goto out_err; 141 } 142 143 /* Create dev top level debugfs entry */ 144 snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX, 145 hw_data->dev_class->name, pci_name(pdev)); 146 147 accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); 148 149 /* Create device configuration table */ 150 ret = adf_cfg_dev_add(accel_dev); 151 if (ret) 152 goto out_err; 153 154 pcie_set_readrq(pdev, 1024); 155 156 /* enable PCI device */ 157 if (pci_enable_device(pdev)) { 158 ret = -EFAULT; 159 goto out_err; 160 } 161 162 /* set dma identifier */ 163 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); 164 if (ret) { 165 dev_err(&pdev->dev, "No usable DMA configuration\n"); 166 goto out_err_disable; 167 } 168 169 if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) { 170 ret = -EFAULT; 171 goto out_err_disable; 172 } 173 174 /* Get accelerator capabilities mask */ 175 hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); 176 177 /* Find and map all the device's BARS */ 178 i = 0; 179 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); 180 for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { 181 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; 182 183 bar->base_addr = pci_resource_start(pdev, bar_nr); 184 if (!bar->base_addr) 185 break; 186 bar->size = pci_resource_len(pdev, bar_nr); 187 bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); 188 if (!bar->virt_addr) { 189 dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); 190 ret = -EFAULT; 191 goto out_err_free_reg; 192 } 193 } 194 pci_set_master(pdev); 195 196 if (pci_save_state(pdev)) { 197 dev_err(&pdev->dev, "Failed to save pci state\n"); 198 ret = -ENOMEM; 199 goto out_err_free_reg; 200 } 201 202 ret = adf_dev_up(accel_dev, true); 203 if (ret) 204 goto out_err_dev_stop; 205 206 return ret; 207 208 out_err_dev_stop: 209 adf_dev_down(accel_dev, false); 210 out_err_free_reg: 211 pci_release_regions(accel_pci_dev->pci_dev); 212 out_err_disable: 213 pci_disable_device(accel_pci_dev->pci_dev); 214 out_err: 215 adf_cleanup_accel(accel_dev); 216 kfree(accel_dev); 217 return ret; 218 } 219 220 static void adf_remove(struct pci_dev *pdev) 221 { 222 struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); 223 224 if (!accel_dev) { 225 pr_err("QAT: Driver removal failed\n"); 226 return; 227 } 228 adf_dev_down(accel_dev, false); 229 adf_cleanup_accel(accel_dev); 230 adf_cleanup_pci_dev(accel_dev); 231 kfree(accel_dev); 232 } 233 234 static int __init adfdrv_init(void) 235 { 236 request_module("intel_qat"); 237 238 if (pci_register_driver(&adf_driver)) { 239 pr_err("QAT: Driver initialization failed\n"); 240 return -EFAULT; 241 } 242 return 0; 243 } 244 245 static void __exit adfdrv_release(void) 246 { 247 pci_unregister_driver(&adf_driver); 248 } 249 250 module_init(adfdrv_init); 251 module_exit(adfdrv_release); 252 253 MODULE_LICENSE("Dual BSD/GPL"); 254 MODULE_AUTHOR("Intel"); 255 MODULE_FIRMWARE(ADF_DH895XCC_FW); 256 MODULE_FIRMWARE(ADF_DH895XCC_MMP); 257 MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); 258 MODULE_VERSION(ADF_DRV_VERSION); 259