1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) 2 /* Copyright(c) 2014 - 2020 Intel Corporation */ 3 #include <linux/firmware.h> 4 #include <linux/pci.h> 5 #include "adf_cfg.h" 6 #include "adf_accel_devices.h" 7 #include "adf_common_drv.h" 8 #include "icp_qat_uclo.h" 9 10 static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr, 11 u32 fw_size) 12 { 13 struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; 14 struct adf_hw_device_data *hw_device = accel_dev->hw_device; 15 struct icp_qat_fw_loader_handle *loader; 16 const char *obj_name; 17 u32 num_objs; 18 u32 ae_mask; 19 int i; 20 21 loader = loader_data->fw_loader; 22 num_objs = hw_device->uof_get_num_objs(); 23 24 for (i = 0; i < num_objs; i++) { 25 obj_name = hw_device->uof_get_name(accel_dev, i); 26 ae_mask = hw_device->uof_get_ae_mask(accel_dev, i); 27 if (!obj_name || !ae_mask) { 28 dev_err(&GET_DEV(accel_dev), "Invalid UOF image\n"); 29 goto out_err; 30 } 31 32 if (qat_uclo_set_cfg_ae_mask(loader, ae_mask)) { 33 dev_err(&GET_DEV(accel_dev), 34 "Invalid mask for UOF image\n"); 35 goto out_err; 36 } 37 if (qat_uclo_map_obj(loader, fw_addr, fw_size, obj_name)) { 38 dev_err(&GET_DEV(accel_dev), 39 "Failed to map UOF firmware\n"); 40 goto out_err; 41 } 42 if (qat_uclo_wr_all_uimage(loader)) { 43 dev_err(&GET_DEV(accel_dev), 44 "Failed to load UOF firmware\n"); 45 goto out_err; 46 } 47 qat_uclo_del_obj(loader); 48 } 49 50 return 0; 51 52 out_err: 53 adf_ae_fw_release(accel_dev); 54 return -EFAULT; 55 } 56 57 int adf_ae_fw_load(struct adf_accel_dev *accel_dev) 58 { 59 struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; 60 struct adf_hw_device_data *hw_device = accel_dev->hw_device; 61 void *fw_addr, *mmp_addr; 62 u32 fw_size, mmp_size; 63 64 if (!hw_device->fw_name) 65 return 0; 66 67 if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name, 68 &accel_dev->accel_pci_dev.pci_dev->dev)) { 69 dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n", 70 hw_device->fw_mmp_name); 71 return -EFAULT; 72 } 73 if (request_firmware(&loader_data->uof_fw, hw_device->fw_name, 74 &accel_dev->accel_pci_dev.pci_dev->dev)) { 75 dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n", 76 hw_device->fw_name); 77 goto out_err; 78 } 79 80 fw_size = loader_data->uof_fw->size; 81 fw_addr = (void *)loader_data->uof_fw->data; 82 mmp_size = loader_data->mmp_fw->size; 83 mmp_addr = (void *)loader_data->mmp_fw->data; 84 85 if (qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size)) { 86 dev_err(&GET_DEV(accel_dev), "Failed to load MMP\n"); 87 goto out_err; 88 } 89 90 if (hw_device->uof_get_num_objs) 91 return adf_ae_fw_load_images(accel_dev, fw_addr, fw_size); 92 93 if (qat_uclo_map_obj(loader_data->fw_loader, fw_addr, fw_size, NULL)) { 94 dev_err(&GET_DEV(accel_dev), "Failed to map FW\n"); 95 goto out_err; 96 } 97 if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) { 98 dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n"); 99 goto out_err; 100 } 101 return 0; 102 103 out_err: 104 adf_ae_fw_release(accel_dev); 105 return -EFAULT; 106 } 107 108 void adf_ae_fw_release(struct adf_accel_dev *accel_dev) 109 { 110 struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; 111 struct adf_hw_device_data *hw_device = accel_dev->hw_device; 112 113 if (!hw_device->fw_name) 114 return; 115 116 qat_uclo_del_obj(loader_data->fw_loader); 117 qat_hal_deinit(loader_data->fw_loader); 118 release_firmware(loader_data->uof_fw); 119 release_firmware(loader_data->mmp_fw); 120 loader_data->uof_fw = NULL; 121 loader_data->mmp_fw = NULL; 122 loader_data->fw_loader = NULL; 123 } 124 125 int adf_ae_start(struct adf_accel_dev *accel_dev) 126 { 127 struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; 128 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 129 u32 ae_ctr; 130 131 if (!hw_data->fw_name) 132 return 0; 133 134 ae_ctr = qat_hal_start(loader_data->fw_loader); 135 dev_info(&GET_DEV(accel_dev), 136 "qat_dev%d started %d acceleration engines\n", 137 accel_dev->accel_id, ae_ctr); 138 return 0; 139 } 140 141 int adf_ae_stop(struct adf_accel_dev *accel_dev) 142 { 143 struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; 144 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 145 u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); 146 147 if (!hw_data->fw_name) 148 return 0; 149 150 for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { 151 if (hw_data->ae_mask & (1 << ae)) { 152 qat_hal_stop(loader_data->fw_loader, ae, 0xFF); 153 ae_ctr++; 154 } 155 } 156 dev_info(&GET_DEV(accel_dev), 157 "qat_dev%d stopped %d acceleration engines\n", 158 accel_dev->accel_id, ae_ctr); 159 return 0; 160 } 161 162 static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae) 163 { 164 struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; 165 166 qat_hal_reset(loader_data->fw_loader); 167 if (qat_hal_clr_reset(loader_data->fw_loader)) 168 return -EFAULT; 169 170 return 0; 171 } 172 173 int adf_ae_init(struct adf_accel_dev *accel_dev) 174 { 175 struct adf_fw_loader_data *loader_data; 176 struct adf_hw_device_data *hw_device = accel_dev->hw_device; 177 178 if (!hw_device->fw_name) 179 return 0; 180 181 loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL); 182 if (!loader_data) 183 return -ENOMEM; 184 185 accel_dev->fw_loader = loader_data; 186 if (qat_hal_init(accel_dev)) { 187 dev_err(&GET_DEV(accel_dev), "Failed to init the AEs\n"); 188 kfree(loader_data); 189 return -EFAULT; 190 } 191 if (adf_ae_reset(accel_dev, 0)) { 192 dev_err(&GET_DEV(accel_dev), "Failed to reset the AEs\n"); 193 qat_hal_deinit(loader_data->fw_loader); 194 kfree(loader_data); 195 return -EFAULT; 196 } 197 return 0; 198 } 199 200 int adf_ae_shutdown(struct adf_accel_dev *accel_dev) 201 { 202 struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; 203 struct adf_hw_device_data *hw_device = accel_dev->hw_device; 204 205 if (!hw_device->fw_name) 206 return 0; 207 208 qat_hal_deinit(loader_data->fw_loader); 209 kfree(accel_dev->fw_loader); 210 accel_dev->fw_loader = NULL; 211 return 0; 212 } 213