1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) 2 /* Copyright(c) 2020 - 2021 Intel Corporation */ 3 #include <linux/iopoll.h> 4 #include <adf_accel_devices.h> 5 #include <adf_cfg.h> 6 #include <adf_common_drv.h> 7 #include <adf_gen4_dc.h> 8 #include <adf_gen4_hw_data.h> 9 #include <adf_gen4_pfvf.h> 10 #include <adf_gen4_pm.h> 11 #include "adf_4xxx_hw_data.h" 12 #include "icp_qat_hw.h" 13 14 enum adf_fw_objs { 15 ADF_FW_SYM_OBJ, 16 ADF_FW_ASYM_OBJ, 17 ADF_FW_DC_OBJ, 18 ADF_FW_ADMIN_OBJ, 19 }; 20 21 static const char * const adf_4xxx_fw_objs[] = { 22 [ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ, 23 [ADF_FW_ASYM_OBJ] = ADF_4XXX_ASYM_OBJ, 24 [ADF_FW_DC_OBJ] = ADF_4XXX_DC_OBJ, 25 [ADF_FW_ADMIN_OBJ] = ADF_4XXX_ADMIN_OBJ, 26 }; 27 28 static const char * const adf_402xx_fw_objs[] = { 29 [ADF_FW_SYM_OBJ] = ADF_402XX_SYM_OBJ, 30 [ADF_FW_ASYM_OBJ] = ADF_402XX_ASYM_OBJ, 31 [ADF_FW_DC_OBJ] = ADF_402XX_DC_OBJ, 32 [ADF_FW_ADMIN_OBJ] = ADF_402XX_ADMIN_OBJ, 33 }; 34 35 struct adf_fw_config { 36 u32 ae_mask; 37 enum adf_fw_objs obj; 38 }; 39 40 static const struct adf_fw_config adf_fw_cy_config[] = { 41 {0xF0, ADF_FW_SYM_OBJ}, 42 {0xF, ADF_FW_ASYM_OBJ}, 43 {0x100, ADF_FW_ADMIN_OBJ}, 44 }; 45 46 static const struct adf_fw_config adf_fw_dc_config[] = { 47 {0xF0, ADF_FW_DC_OBJ}, 48 {0xF, ADF_FW_DC_OBJ}, 49 {0x100, ADF_FW_ADMIN_OBJ}, 50 }; 51 52 static const struct adf_fw_config adf_fw_sym_config[] = { 53 {0xF0, ADF_FW_SYM_OBJ}, 54 {0xF, ADF_FW_SYM_OBJ}, 55 {0x100, ADF_FW_ADMIN_OBJ}, 56 }; 57 58 static const struct adf_fw_config adf_fw_asym_config[] = { 59 {0xF0, ADF_FW_ASYM_OBJ}, 60 {0xF, ADF_FW_ASYM_OBJ}, 61 {0x100, ADF_FW_ADMIN_OBJ}, 62 }; 63 64 static const struct adf_fw_config adf_fw_asym_dc_config[] = { 65 {0xF0, ADF_FW_ASYM_OBJ}, 66 {0xF, ADF_FW_DC_OBJ}, 67 {0x100, ADF_FW_ADMIN_OBJ}, 68 }; 69 70 static const struct adf_fw_config adf_fw_sym_dc_config[] = { 71 {0xF0, ADF_FW_SYM_OBJ}, 72 {0xF, ADF_FW_DC_OBJ}, 73 {0x100, ADF_FW_ADMIN_OBJ}, 74 }; 75 76 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dc_config)); 77 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_config)); 78 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_config)); 79 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config)); 80 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config)); 81 82 /* Worker thread to service arbiter mappings */ 83 static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = { 84 0x5555555, 0x5555555, 0x5555555, 0x5555555, 85 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 86 0x0 87 }; 88 89 static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = { 90 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, 91 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, 92 0x0 93 }; 94 95 static struct adf_hw_device_class adf_4xxx_class = { 96 .name = ADF_4XXX_DEVICE_NAME, 97 .type = DEV_4XXX, 98 .instances = 0, 99 }; 100 101 enum dev_services { 102 SVC_CY = 0, 103 SVC_CY2, 104 SVC_DC, 105 SVC_SYM, 106 SVC_ASYM, 107 SVC_DC_ASYM, 108 SVC_ASYM_DC, 109 SVC_DC_SYM, 110 SVC_SYM_DC, 111 }; 112 113 static const char *const dev_cfg_services[] = { 114 [SVC_CY] = ADF_CFG_CY, 115 [SVC_CY2] = ADF_CFG_ASYM_SYM, 116 [SVC_DC] = ADF_CFG_DC, 117 [SVC_SYM] = ADF_CFG_SYM, 118 [SVC_ASYM] = ADF_CFG_ASYM, 119 [SVC_DC_ASYM] = ADF_CFG_DC_ASYM, 120 [SVC_ASYM_DC] = ADF_CFG_ASYM_DC, 121 [SVC_DC_SYM] = ADF_CFG_DC_SYM, 122 [SVC_SYM_DC] = ADF_CFG_SYM_DC, 123 }; 124 125 static int get_service_enabled(struct adf_accel_dev *accel_dev) 126 { 127 char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; 128 int ret; 129 130 ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, 131 ADF_SERVICES_ENABLED, services); 132 if (ret) { 133 dev_err(&GET_DEV(accel_dev), 134 ADF_SERVICES_ENABLED " param not found\n"); 135 return ret; 136 } 137 138 ret = match_string(dev_cfg_services, ARRAY_SIZE(dev_cfg_services), 139 services); 140 if (ret < 0) 141 dev_err(&GET_DEV(accel_dev), 142 "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n", 143 services); 144 145 return ret; 146 } 147 148 static u32 get_accel_mask(struct adf_hw_device_data *self) 149 { 150 return ADF_4XXX_ACCELERATORS_MASK; 151 } 152 153 static u32 get_ae_mask(struct adf_hw_device_data *self) 154 { 155 u32 me_disable = self->fuses; 156 157 return ~me_disable & ADF_4XXX_ACCELENGINES_MASK; 158 } 159 160 static u32 get_num_accels(struct adf_hw_device_data *self) 161 { 162 return ADF_4XXX_MAX_ACCELERATORS; 163 } 164 165 static u32 get_num_aes(struct adf_hw_device_data *self) 166 { 167 if (!self || !self->ae_mask) 168 return 0; 169 170 return hweight32(self->ae_mask); 171 } 172 173 static u32 get_misc_bar_id(struct adf_hw_device_data *self) 174 { 175 return ADF_4XXX_PMISC_BAR; 176 } 177 178 static u32 get_etr_bar_id(struct adf_hw_device_data *self) 179 { 180 return ADF_4XXX_ETR_BAR; 181 } 182 183 static u32 get_sram_bar_id(struct adf_hw_device_data *self) 184 { 185 return ADF_4XXX_SRAM_BAR; 186 } 187 188 /* 189 * The vector routing table is used to select the MSI-X entry to use for each 190 * interrupt source. 191 * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts. 192 * The final entry corresponds to VF2PF or error interrupts. 193 * This vector table could be used to configure one MSI-X entry to be shared 194 * between multiple interrupt sources. 195 * 196 * The default routing is set to have a one to one correspondence between the 197 * interrupt source and the MSI-X entry used. 198 */ 199 static void set_msix_default_rttable(struct adf_accel_dev *accel_dev) 200 { 201 void __iomem *csr; 202 int i; 203 204 csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; 205 for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++) 206 ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i); 207 } 208 209 static u32 get_accel_cap(struct adf_accel_dev *accel_dev) 210 { 211 struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev; 212 u32 capabilities_sym, capabilities_asym, capabilities_dc; 213 u32 fusectl1; 214 215 /* Read accelerator capabilities mask */ 216 pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1); 217 218 capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | 219 ICP_ACCEL_CAPABILITIES_CIPHER | 220 ICP_ACCEL_CAPABILITIES_AUTHENTICATION | 221 ICP_ACCEL_CAPABILITIES_SHA3 | 222 ICP_ACCEL_CAPABILITIES_SHA3_EXT | 223 ICP_ACCEL_CAPABILITIES_HKDF | 224 ICP_ACCEL_CAPABILITIES_CHACHA_POLY | 225 ICP_ACCEL_CAPABILITIES_AESGCM_SPC | 226 ICP_ACCEL_CAPABILITIES_AES_V2; 227 228 /* A set bit in fusectl1 means the feature is OFF in this SKU */ 229 if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) { 230 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; 231 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF; 232 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; 233 } 234 235 if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) { 236 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY; 237 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC; 238 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2; 239 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; 240 } 241 242 if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) { 243 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; 244 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3; 245 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT; 246 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; 247 } 248 249 capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | 250 ICP_ACCEL_CAPABILITIES_CIPHER | 251 ICP_ACCEL_CAPABILITIES_ECEDMONT; 252 253 if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) { 254 capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; 255 capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT; 256 } 257 258 capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION | 259 ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION | 260 ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | 261 ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; 262 263 if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) { 264 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; 265 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION; 266 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION; 267 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; 268 } 269 270 switch (get_service_enabled(accel_dev)) { 271 case SVC_CY: 272 case SVC_CY2: 273 return capabilities_sym | capabilities_asym; 274 case SVC_DC: 275 return capabilities_dc; 276 case SVC_SYM: 277 return capabilities_sym; 278 case SVC_ASYM: 279 return capabilities_asym; 280 case SVC_ASYM_DC: 281 case SVC_DC_ASYM: 282 return capabilities_asym | capabilities_dc; 283 case SVC_SYM_DC: 284 case SVC_DC_SYM: 285 return capabilities_sym | capabilities_dc; 286 default: 287 return 0; 288 } 289 } 290 291 static enum dev_sku_info get_sku(struct adf_hw_device_data *self) 292 { 293 return DEV_SKU_1; 294 } 295 296 static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) 297 { 298 switch (get_service_enabled(accel_dev)) { 299 case SVC_DC: 300 return thrd_to_arb_map_dc; 301 default: 302 return default_thrd_to_arb_map; 303 } 304 } 305 306 static void get_arb_info(struct arb_info *arb_info) 307 { 308 arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG; 309 arb_info->arb_offset = ADF_4XXX_ARB_OFFSET; 310 arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET; 311 } 312 313 static void get_admin_info(struct admin_info *admin_csrs_info) 314 { 315 admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET; 316 admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET; 317 admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET; 318 } 319 320 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) 321 { 322 struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR]; 323 void __iomem *csr = misc_bar->virt_addr; 324 325 /* Enable all in errsou3 except VFLR notification on host */ 326 ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY); 327 } 328 329 static void adf_enable_ints(struct adf_accel_dev *accel_dev) 330 { 331 void __iomem *addr; 332 333 addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; 334 335 /* Enable bundle interrupts */ 336 ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0); 337 ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0); 338 339 /* Enable misc interrupts */ 340 ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0); 341 } 342 343 static int adf_init_device(struct adf_accel_dev *accel_dev) 344 { 345 void __iomem *addr; 346 u32 status; 347 u32 csr; 348 int ret; 349 350 addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; 351 352 /* Temporarily mask PM interrupt */ 353 csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2); 354 csr |= ADF_GEN4_PM_SOU; 355 ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr); 356 357 /* Set DRV_ACTIVE bit to power up the device */ 358 ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE); 359 360 /* Poll status register to make sure the device is powered up */ 361 ret = read_poll_timeout(ADF_CSR_RD, status, 362 status & ADF_GEN4_PM_INIT_STATE, 363 ADF_GEN4_PM_POLL_DELAY_US, 364 ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr, 365 ADF_GEN4_PM_STATUS); 366 if (ret) 367 dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n"); 368 369 return ret; 370 } 371 372 static u32 uof_get_num_objs(void) 373 { 374 return ARRAY_SIZE(adf_fw_cy_config); 375 } 376 377 static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, 378 const char * const fw_objs[], int num_objs) 379 { 380 int id; 381 382 switch (get_service_enabled(accel_dev)) { 383 case SVC_CY: 384 case SVC_CY2: 385 id = adf_fw_cy_config[obj_num].obj; 386 break; 387 case SVC_DC: 388 id = adf_fw_dc_config[obj_num].obj; 389 break; 390 case SVC_SYM: 391 id = adf_fw_sym_config[obj_num].obj; 392 break; 393 case SVC_ASYM: 394 id = adf_fw_asym_config[obj_num].obj; 395 break; 396 case SVC_ASYM_DC: 397 case SVC_DC_ASYM: 398 id = adf_fw_asym_dc_config[obj_num].obj; 399 break; 400 case SVC_SYM_DC: 401 case SVC_DC_SYM: 402 id = adf_fw_sym_dc_config[obj_num].obj; 403 break; 404 default: 405 id = -EINVAL; 406 break; 407 } 408 409 if (id < 0 || id > num_objs) 410 return NULL; 411 412 return fw_objs[id]; 413 } 414 415 static const char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num) 416 { 417 int num_fw_objs = ARRAY_SIZE(adf_4xxx_fw_objs); 418 419 return uof_get_name(accel_dev, obj_num, adf_4xxx_fw_objs, num_fw_objs); 420 } 421 422 static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num) 423 { 424 int num_fw_objs = ARRAY_SIZE(adf_402xx_fw_objs); 425 426 return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs); 427 } 428 429 static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) 430 { 431 switch (get_service_enabled(accel_dev)) { 432 case SVC_CY: 433 return adf_fw_cy_config[obj_num].ae_mask; 434 case SVC_DC: 435 return adf_fw_dc_config[obj_num].ae_mask; 436 case SVC_CY2: 437 return adf_fw_cy_config[obj_num].ae_mask; 438 case SVC_SYM: 439 return adf_fw_sym_config[obj_num].ae_mask; 440 case SVC_ASYM: 441 return adf_fw_asym_config[obj_num].ae_mask; 442 case SVC_ASYM_DC: 443 case SVC_DC_ASYM: 444 return adf_fw_asym_dc_config[obj_num].ae_mask; 445 case SVC_SYM_DC: 446 case SVC_DC_SYM: 447 return adf_fw_sym_dc_config[obj_num].ae_mask; 448 default: 449 return 0; 450 } 451 } 452 453 void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) 454 { 455 hw_data->dev_class = &adf_4xxx_class; 456 hw_data->instance_id = adf_4xxx_class.instances++; 457 hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS; 458 hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF; 459 hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK; 460 hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS; 461 hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES; 462 hw_data->num_logical_accel = 1; 463 hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET; 464 hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK; 465 hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP; 466 hw_data->alloc_irq = adf_isr_resource_alloc; 467 hw_data->free_irq = adf_isr_resource_free; 468 hw_data->enable_error_correction = adf_enable_error_correction; 469 hw_data->get_accel_mask = get_accel_mask; 470 hw_data->get_ae_mask = get_ae_mask; 471 hw_data->get_num_accels = get_num_accels; 472 hw_data->get_num_aes = get_num_aes; 473 hw_data->get_sram_bar_id = get_sram_bar_id; 474 hw_data->get_etr_bar_id = get_etr_bar_id; 475 hw_data->get_misc_bar_id = get_misc_bar_id; 476 hw_data->get_arb_info = get_arb_info; 477 hw_data->get_admin_info = get_admin_info; 478 hw_data->get_accel_cap = get_accel_cap; 479 hw_data->get_sku = get_sku; 480 hw_data->init_admin_comms = adf_init_admin_comms; 481 hw_data->exit_admin_comms = adf_exit_admin_comms; 482 hw_data->send_admin_init = adf_send_admin_init; 483 hw_data->init_arb = adf_init_arb; 484 hw_data->exit_arb = adf_exit_arb; 485 hw_data->get_arb_mapping = adf_get_arbiter_mapping; 486 hw_data->enable_ints = adf_enable_ints; 487 hw_data->init_device = adf_init_device; 488 hw_data->reset_device = adf_reset_flr; 489 hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK; 490 switch (dev_id) { 491 case ADF_402XX_PCI_DEVICE_ID: 492 hw_data->fw_name = ADF_402XX_FW; 493 hw_data->fw_mmp_name = ADF_402XX_MMP; 494 hw_data->uof_get_name = uof_get_name_402xx; 495 break; 496 497 default: 498 hw_data->fw_name = ADF_4XXX_FW; 499 hw_data->fw_mmp_name = ADF_4XXX_MMP; 500 hw_data->uof_get_name = uof_get_name_4xxx; 501 } 502 hw_data->uof_get_num_objs = uof_get_num_objs; 503 hw_data->uof_get_ae_mask = uof_get_ae_mask; 504 hw_data->set_msix_rttable = set_msix_default_rttable; 505 hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; 506 hw_data->disable_iov = adf_disable_sriov; 507 hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; 508 hw_data->enable_pm = adf_gen4_enable_pm; 509 hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; 510 hw_data->dev_config = adf_gen4_dev_config; 511 512 adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); 513 adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); 514 adf_gen4_init_dc_ops(&hw_data->dc_ops); 515 } 516 517 void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data) 518 { 519 hw_data->dev_class->instances--; 520 } 521