1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2017 Marvell 4 * 5 * Antoine Tenart <antoine.tenart@free-electrons.com> 6 */ 7 8 #include <linux/clk.h> 9 #include <linux/device.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/dmapool.h> 12 #include <linux/firmware.h> 13 #include <linux/interrupt.h> 14 #include <linux/module.h> 15 #include <linux/of_platform.h> 16 #include <linux/of_irq.h> 17 #include <linux/platform_device.h> 18 #include <linux/workqueue.h> 19 20 #include <crypto/internal/aead.h> 21 #include <crypto/internal/hash.h> 22 #include <crypto/internal/skcipher.h> 23 24 #include "safexcel.h" 25 26 static u32 max_rings = EIP197_MAX_RINGS; 27 module_param(max_rings, uint, 0644); 28 MODULE_PARM_DESC(max_rings, "Maximum number of rings to use."); 29 30 static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) 31 { 32 u32 val, htable_offset; 33 int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc; 34 35 if (priv->version == EIP197B) { 36 cs_rc_max = EIP197B_CS_RC_MAX; 37 cs_ht_wc = EIP197B_CS_HT_WC; 38 cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC; 39 cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC; 40 } else { 41 cs_rc_max = EIP197D_CS_RC_MAX; 42 cs_ht_wc = EIP197D_CS_HT_WC; 43 cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC; 44 cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC; 45 } 46 47 /* Enable the record cache memory access */ 48 val = readl(priv->base + EIP197_CS_RAM_CTRL); 49 val &= ~EIP197_TRC_ENABLE_MASK; 50 val |= EIP197_TRC_ENABLE_0; 51 writel(val, priv->base + EIP197_CS_RAM_CTRL); 52 53 /* Clear all ECC errors */ 54 writel(0, priv->base + EIP197_TRC_ECCCTRL); 55 56 /* 57 * Make sure the cache memory is accessible by taking record cache into 58 * reset. 59 */ 60 val = readl(priv->base + EIP197_TRC_PARAMS); 61 val |= EIP197_TRC_PARAMS_SW_RESET; 62 val &= ~EIP197_TRC_PARAMS_DATA_ACCESS; 63 writel(val, priv->base + EIP197_TRC_PARAMS); 64 65 /* Clear all records */ 66 for (i = 0; i < cs_rc_max; i++) { 67 u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE; 68 69 writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) | 70 EIP197_CS_RC_PREV(EIP197_RC_NULL), 71 priv->base + offset); 72 73 val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1); 74 if (i == 0) 75 val |= EIP197_CS_RC_PREV(EIP197_RC_NULL); 76 else if (i == cs_rc_max - 1) 77 val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL); 78 writel(val, priv->base + offset + sizeof(u32)); 79 } 80 81 /* Clear the hash table entries */ 82 htable_offset = cs_rc_max * EIP197_CS_RC_SIZE; 83 for (i = 0; i < cs_ht_wc; i++) 84 writel(GENMASK(29, 0), 85 priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32)); 86 87 /* Disable the record cache memory access */ 88 val = readl(priv->base + EIP197_CS_RAM_CTRL); 89 val &= ~EIP197_TRC_ENABLE_MASK; 90 writel(val, priv->base + EIP197_CS_RAM_CTRL); 91 92 /* Write head and tail pointers of the record free chain */ 93 val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) | 94 EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1); 95 writel(val, priv->base + EIP197_TRC_FREECHAIN); 96 97 /* Configure the record cache #1 */ 98 val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) | 99 EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max); 100 writel(val, priv->base + EIP197_TRC_PARAMS2); 101 102 /* Configure the record cache #2 */ 103 val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) | 104 EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) | 105 EIP197_TRC_PARAMS_HTABLE_SZ(2); 106 writel(val, priv->base + EIP197_TRC_PARAMS); 107 } 108 109 static void eip197_write_firmware(struct safexcel_crypto_priv *priv, 110 const struct firmware *fw, int pe, u32 ctrl, 111 u32 prog_en) 112 { 113 const u32 *data = (const u32 *)fw->data; 114 u32 val; 115 int i; 116 117 /* Reset the engine to make its program memory accessible */ 118 writel(EIP197_PE_ICE_x_CTRL_SW_RESET | 119 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | 120 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, 121 EIP197_PE(priv) + ctrl); 122 123 /* Enable access to the program memory */ 124 writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); 125 126 /* Write the firmware */ 127 for (i = 0; i < fw->size / sizeof(u32); i++) 128 writel(be32_to_cpu(data[i]), 129 priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32)); 130 131 /* Disable access to the program memory */ 132 writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); 133 134 /* Release engine from reset */ 135 val = readl(EIP197_PE(priv) + ctrl); 136 val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET; 137 writel(val, EIP197_PE(priv) + ctrl); 138 } 139 140 static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) 141 { 142 const char *fw_name[] = {"ifpp.bin", "ipue.bin"}; 143 const struct firmware *fw[FW_NB]; 144 char fw_path[31], *dir = NULL; 145 int i, j, ret = 0, pe; 146 u32 val; 147 148 switch (priv->version) { 149 case EIP197B: 150 dir = "eip197b"; 151 break; 152 case EIP197D: 153 dir = "eip197d"; 154 break; 155 default: 156 /* No firmware is required */ 157 return 0; 158 } 159 160 for (i = 0; i < FW_NB; i++) { 161 snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]); 162 ret = request_firmware(&fw[i], fw_path, priv->dev); 163 if (ret) { 164 if (priv->version != EIP197B) 165 goto release_fw; 166 167 /* Fallback to the old firmware location for the 168 * EIP197b. 169 */ 170 ret = request_firmware(&fw[i], fw_name[i], priv->dev); 171 if (ret) { 172 dev_err(priv->dev, 173 "Failed to request firmware %s (%d)\n", 174 fw_name[i], ret); 175 goto release_fw; 176 } 177 } 178 } 179 180 for (pe = 0; pe < priv->config.pes; pe++) { 181 /* Clear the scratchpad memory */ 182 val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); 183 val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER | 184 EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN | 185 EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS | 186 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; 187 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); 188 189 memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0, 190 EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); 191 192 eip197_write_firmware(priv, fw[FW_IFPP], pe, 193 EIP197_PE_ICE_FPP_CTRL(pe), 194 EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN); 195 196 eip197_write_firmware(priv, fw[FW_IPUE], pe, 197 EIP197_PE_ICE_PUE_CTRL(pe), 198 EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN); 199 } 200 201 release_fw: 202 for (j = 0; j < i; j++) 203 release_firmware(fw[j]); 204 205 return ret; 206 } 207 208 static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) 209 { 210 u32 hdw, cd_size_rnd, val; 211 int i; 212 213 hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); 214 hdw &= GENMASK(27, 25); 215 hdw >>= 25; 216 217 cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw; 218 219 for (i = 0; i < priv->config.rings; i++) { 220 /* ring base address */ 221 writel(lower_32_bits(priv->ring[i].cdr.base_dma), 222 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); 223 writel(upper_32_bits(priv->ring[i].cdr.base_dma), 224 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); 225 226 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) | 227 priv->config.cd_size, 228 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); 229 writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) | 230 (EIP197_FETCH_COUNT * priv->config.cd_offset), 231 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG); 232 233 /* Configure DMA tx control */ 234 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS); 235 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS); 236 writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG); 237 238 /* clear any pending interrupt */ 239 writel(GENMASK(5, 0), 240 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT); 241 } 242 243 return 0; 244 } 245 246 static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) 247 { 248 u32 hdw, rd_size_rnd, val; 249 int i; 250 251 hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); 252 hdw &= GENMASK(27, 25); 253 hdw >>= 25; 254 255 rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw; 256 257 for (i = 0; i < priv->config.rings; i++) { 258 /* ring base address */ 259 writel(lower_32_bits(priv->ring[i].rdr.base_dma), 260 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); 261 writel(upper_32_bits(priv->ring[i].rdr.base_dma), 262 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); 263 264 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) | 265 priv->config.rd_size, 266 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); 267 268 writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) | 269 (EIP197_FETCH_COUNT * priv->config.rd_offset), 270 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG); 271 272 /* Configure DMA tx control */ 273 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS); 274 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS); 275 val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF; 276 writel(val, 277 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG); 278 279 /* clear any pending interrupt */ 280 writel(GENMASK(7, 0), 281 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT); 282 283 /* enable ring interrupt */ 284 val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); 285 val |= EIP197_RDR_IRQ(i); 286 writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); 287 } 288 289 return 0; 290 } 291 292 static int safexcel_hw_init(struct safexcel_crypto_priv *priv) 293 { 294 u32 version, val; 295 int i, ret, pe; 296 297 /* Determine endianess and configure byte swap */ 298 version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION); 299 val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); 300 301 if ((version & 0xffff) == EIP197_HIA_VERSION_BE) 302 val |= EIP197_MST_CTRL_BYTE_SWAP; 303 else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE) 304 val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24); 305 306 /* For EIP197 set maximum number of TX commands to 2^5 = 32 */ 307 if (priv->version == EIP197B || priv->version == EIP197D) 308 val |= EIP197_MST_CTRL_TX_MAX_CMD(5); 309 310 writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); 311 312 /* Configure wr/rd cache values */ 313 writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) | 314 EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS), 315 EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL); 316 317 /* Interrupts reset */ 318 319 /* Disable all global interrupts */ 320 writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL); 321 322 /* Clear any pending interrupt */ 323 writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); 324 325 /* Processing Engine configuration */ 326 for (pe = 0; pe < priv->config.pes; pe++) { 327 /* Data Fetch Engine configuration */ 328 329 /* Reset all DFE threads */ 330 writel(EIP197_DxE_THR_CTRL_RESET_PE, 331 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe)); 332 333 if (priv->version == EIP197B || priv->version == EIP197D) { 334 /* Reset HIA input interface arbiter */ 335 writel(EIP197_HIA_RA_PE_CTRL_RESET, 336 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); 337 } 338 339 /* DMA transfer size to use */ 340 val = EIP197_HIA_DFE_CFG_DIS_DEBUG; 341 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) | 342 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9); 343 val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) | 344 EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7); 345 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS); 346 val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS); 347 writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe)); 348 349 /* Leave the DFE threads reset state */ 350 writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe)); 351 352 /* Configure the processing engine thresholds */ 353 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) | 354 EIP197_PE_IN_xBUF_THRES_MAX(9), 355 EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe)); 356 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) | 357 EIP197_PE_IN_xBUF_THRES_MAX(7), 358 EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe)); 359 360 if (priv->version == EIP197B || priv->version == EIP197D) { 361 /* enable HIA input interface arbiter and rings */ 362 writel(EIP197_HIA_RA_PE_CTRL_EN | 363 GENMASK(priv->config.rings - 1, 0), 364 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); 365 } 366 367 /* Data Store Engine configuration */ 368 369 /* Reset all DSE threads */ 370 writel(EIP197_DxE_THR_CTRL_RESET_PE, 371 EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe)); 372 373 /* Wait for all DSE threads to complete */ 374 while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) & 375 GENMASK(15, 12)) != GENMASK(15, 12)) 376 ; 377 378 /* DMA transfer size to use */ 379 val = EIP197_HIA_DSE_CFG_DIS_DEBUG; 380 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | 381 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8); 382 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS); 383 val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE; 384 /* FIXME: instability issues can occur for EIP97 but disabling it impact 385 * performances. 386 */ 387 if (priv->version == EIP197B || priv->version == EIP197D) 388 val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; 389 writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe)); 390 391 /* Leave the DSE threads reset state */ 392 writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe)); 393 394 /* Configure the procesing engine thresholds */ 395 writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | 396 EIP197_PE_OUT_DBUF_THRES_MAX(8), 397 EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe)); 398 399 /* Processing Engine configuration */ 400 401 /* H/W capabilities selection */ 402 val = EIP197_FUNCTION_RSVD; 403 val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY; 404 val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT; 405 val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC; 406 val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC; 407 val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC; 408 val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5; 409 val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1; 410 val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2; 411 writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe)); 412 } 413 414 /* Command Descriptor Rings prepare */ 415 for (i = 0; i < priv->config.rings; i++) { 416 /* Clear interrupts for this ring */ 417 writel(GENMASK(31, 0), 418 EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i)); 419 420 /* Disable external triggering */ 421 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG); 422 423 /* Clear the pending prepared counter */ 424 writel(EIP197_xDR_PREP_CLR_COUNT, 425 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT); 426 427 /* Clear the pending processed counter */ 428 writel(EIP197_xDR_PROC_CLR_COUNT, 429 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT); 430 431 writel(0, 432 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR); 433 writel(0, 434 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR); 435 436 writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2, 437 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE); 438 } 439 440 /* Result Descriptor Ring prepare */ 441 for (i = 0; i < priv->config.rings; i++) { 442 /* Disable external triggering*/ 443 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG); 444 445 /* Clear the pending prepared counter */ 446 writel(EIP197_xDR_PREP_CLR_COUNT, 447 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT); 448 449 /* Clear the pending processed counter */ 450 writel(EIP197_xDR_PROC_CLR_COUNT, 451 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT); 452 453 writel(0, 454 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR); 455 writel(0, 456 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR); 457 458 /* Ring size */ 459 writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2, 460 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE); 461 } 462 463 for (pe = 0; pe < priv->config.pes; pe++) { 464 /* Enable command descriptor rings */ 465 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), 466 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe)); 467 468 /* Enable result descriptor rings */ 469 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), 470 EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe)); 471 } 472 473 /* Clear any HIA interrupt */ 474 writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); 475 476 if (priv->version == EIP197B || priv->version == EIP197D) { 477 eip197_trc_cache_init(priv); 478 479 ret = eip197_load_firmwares(priv); 480 if (ret) 481 return ret; 482 } 483 484 safexcel_hw_setup_cdesc_rings(priv); 485 safexcel_hw_setup_rdesc_rings(priv); 486 487 return 0; 488 } 489 490 /* Called with ring's lock taken */ 491 static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv, 492 int ring) 493 { 494 int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ); 495 496 if (!coal) 497 return; 498 499 /* Configure when we want an interrupt */ 500 writel(EIP197_HIA_RDR_THRESH_PKT_MODE | 501 EIP197_HIA_RDR_THRESH_PROC_PKT(coal), 502 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH); 503 } 504 505 void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) 506 { 507 struct crypto_async_request *req, *backlog; 508 struct safexcel_context *ctx; 509 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; 510 511 /* If a request wasn't properly dequeued because of a lack of resources, 512 * proceeded it first, 513 */ 514 req = priv->ring[ring].req; 515 backlog = priv->ring[ring].backlog; 516 if (req) 517 goto handle_req; 518 519 while (true) { 520 spin_lock_bh(&priv->ring[ring].queue_lock); 521 backlog = crypto_get_backlog(&priv->ring[ring].queue); 522 req = crypto_dequeue_request(&priv->ring[ring].queue); 523 spin_unlock_bh(&priv->ring[ring].queue_lock); 524 525 if (!req) { 526 priv->ring[ring].req = NULL; 527 priv->ring[ring].backlog = NULL; 528 goto finalize; 529 } 530 531 handle_req: 532 ctx = crypto_tfm_ctx(req->tfm); 533 ret = ctx->send(req, ring, &commands, &results); 534 if (ret) 535 goto request_failed; 536 537 if (backlog) 538 backlog->complete(backlog, -EINPROGRESS); 539 540 /* In case the send() helper did not issue any command to push 541 * to the engine because the input data was cached, continue to 542 * dequeue other requests as this is valid and not an error. 543 */ 544 if (!commands && !results) 545 continue; 546 547 cdesc += commands; 548 rdesc += results; 549 nreq++; 550 } 551 552 request_failed: 553 /* Not enough resources to handle all the requests. Bail out and save 554 * the request and the backlog for the next dequeue call (per-ring). 555 */ 556 priv->ring[ring].req = req; 557 priv->ring[ring].backlog = backlog; 558 559 finalize: 560 if (!nreq) 561 return; 562 563 spin_lock_bh(&priv->ring[ring].lock); 564 565 priv->ring[ring].requests += nreq; 566 567 if (!priv->ring[ring].busy) { 568 safexcel_try_push_requests(priv, ring); 569 priv->ring[ring].busy = true; 570 } 571 572 spin_unlock_bh(&priv->ring[ring].lock); 573 574 /* let the RDR know we have pending descriptors */ 575 writel((rdesc * priv->config.rd_offset) << 2, 576 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT); 577 578 /* let the CDR know we have pending descriptors */ 579 writel((cdesc * priv->config.cd_offset) << 2, 580 EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT); 581 } 582 583 inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, 584 struct safexcel_result_desc *rdesc) 585 { 586 if (likely(!rdesc->result_data.error_code)) 587 return 0; 588 589 if (rdesc->result_data.error_code & 0x407f) { 590 /* Fatal error (bits 0-7, 14) */ 591 dev_err(priv->dev, 592 "cipher: result: result descriptor error (%d)\n", 593 rdesc->result_data.error_code); 594 return -EIO; 595 } else if (rdesc->result_data.error_code == BIT(9)) { 596 /* Authentication failed */ 597 return -EBADMSG; 598 } 599 600 /* All other non-fatal errors */ 601 return -EINVAL; 602 } 603 604 inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv, 605 int ring, 606 struct safexcel_result_desc *rdesc, 607 struct crypto_async_request *req) 608 { 609 int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc); 610 611 priv->ring[ring].rdr_req[i] = req; 612 } 613 614 inline struct crypto_async_request * 615 safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring) 616 { 617 int i = safexcel_ring_first_rdr_index(priv, ring); 618 619 return priv->ring[ring].rdr_req[i]; 620 } 621 622 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring) 623 { 624 struct safexcel_command_desc *cdesc; 625 626 /* Acknowledge the command descriptors */ 627 do { 628 cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr); 629 if (IS_ERR(cdesc)) { 630 dev_err(priv->dev, 631 "Could not retrieve the command descriptor\n"); 632 return; 633 } 634 } while (!cdesc->last_seg); 635 } 636 637 void safexcel_inv_complete(struct crypto_async_request *req, int error) 638 { 639 struct safexcel_inv_result *result = req->data; 640 641 if (error == -EINPROGRESS) 642 return; 643 644 result->error = error; 645 complete(&result->completion); 646 } 647 648 int safexcel_invalidate_cache(struct crypto_async_request *async, 649 struct safexcel_crypto_priv *priv, 650 dma_addr_t ctxr_dma, int ring) 651 { 652 struct safexcel_command_desc *cdesc; 653 struct safexcel_result_desc *rdesc; 654 int ret = 0; 655 656 /* Prepare command descriptor */ 657 cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma); 658 if (IS_ERR(cdesc)) 659 return PTR_ERR(cdesc); 660 661 cdesc->control_data.type = EIP197_TYPE_EXTENDED; 662 cdesc->control_data.options = 0; 663 cdesc->control_data.refresh = 0; 664 cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR; 665 666 /* Prepare result descriptor */ 667 rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0); 668 669 if (IS_ERR(rdesc)) { 670 ret = PTR_ERR(rdesc); 671 goto cdesc_rollback; 672 } 673 674 safexcel_rdr_req_set(priv, ring, rdesc, async); 675 676 return ret; 677 678 cdesc_rollback: 679 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); 680 681 return ret; 682 } 683 684 static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv, 685 int ring) 686 { 687 struct crypto_async_request *req; 688 struct safexcel_context *ctx; 689 int ret, i, nreq, ndesc, tot_descs, handled = 0; 690 bool should_complete; 691 692 handle_results: 693 tot_descs = 0; 694 695 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); 696 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET; 697 nreq &= EIP197_xDR_PROC_xD_PKT_MASK; 698 if (!nreq) 699 goto requests_left; 700 701 for (i = 0; i < nreq; i++) { 702 req = safexcel_rdr_req_get(priv, ring); 703 704 ctx = crypto_tfm_ctx(req->tfm); 705 ndesc = ctx->handle_result(priv, ring, req, 706 &should_complete, &ret); 707 if (ndesc < 0) { 708 dev_err(priv->dev, "failed to handle result (%d)", ndesc); 709 goto acknowledge; 710 } 711 712 if (should_complete) { 713 local_bh_disable(); 714 req->complete(req, ret); 715 local_bh_enable(); 716 } 717 718 tot_descs += ndesc; 719 handled++; 720 } 721 722 acknowledge: 723 if (i) { 724 writel(EIP197_xDR_PROC_xD_PKT(i) | 725 EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset), 726 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); 727 } 728 729 /* If the number of requests overflowed the counter, try to proceed more 730 * requests. 731 */ 732 if (nreq == EIP197_xDR_PROC_xD_PKT_MASK) 733 goto handle_results; 734 735 requests_left: 736 spin_lock_bh(&priv->ring[ring].lock); 737 738 priv->ring[ring].requests -= handled; 739 safexcel_try_push_requests(priv, ring); 740 741 if (!priv->ring[ring].requests) 742 priv->ring[ring].busy = false; 743 744 spin_unlock_bh(&priv->ring[ring].lock); 745 } 746 747 static void safexcel_dequeue_work(struct work_struct *work) 748 { 749 struct safexcel_work_data *data = 750 container_of(work, struct safexcel_work_data, work); 751 752 safexcel_dequeue(data->priv, data->ring); 753 } 754 755 struct safexcel_ring_irq_data { 756 struct safexcel_crypto_priv *priv; 757 int ring; 758 }; 759 760 static irqreturn_t safexcel_irq_ring(int irq, void *data) 761 { 762 struct safexcel_ring_irq_data *irq_data = data; 763 struct safexcel_crypto_priv *priv = irq_data->priv; 764 int ring = irq_data->ring, rc = IRQ_NONE; 765 u32 status, stat; 766 767 status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring)); 768 if (!status) 769 return rc; 770 771 /* RDR interrupts */ 772 if (status & EIP197_RDR_IRQ(ring)) { 773 stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT); 774 775 if (unlikely(stat & EIP197_xDR_ERR)) { 776 /* 777 * Fatal error, the RDR is unusable and must be 778 * reinitialized. This should not happen under 779 * normal circumstances. 780 */ 781 dev_err(priv->dev, "RDR: fatal error."); 782 } else if (likely(stat & EIP197_xDR_THRESH)) { 783 rc = IRQ_WAKE_THREAD; 784 } 785 786 /* ACK the interrupts */ 787 writel(stat & 0xff, 788 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT); 789 } 790 791 /* ACK the interrupts */ 792 writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring)); 793 794 return rc; 795 } 796 797 static irqreturn_t safexcel_irq_ring_thread(int irq, void *data) 798 { 799 struct safexcel_ring_irq_data *irq_data = data; 800 struct safexcel_crypto_priv *priv = irq_data->priv; 801 int ring = irq_data->ring; 802 803 safexcel_handle_result_descriptor(priv, ring); 804 805 queue_work(priv->ring[ring].workqueue, 806 &priv->ring[ring].work_data.work); 807 808 return IRQ_HANDLED; 809 } 810 811 static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name, 812 irq_handler_t handler, 813 irq_handler_t threaded_handler, 814 struct safexcel_ring_irq_data *ring_irq_priv) 815 { 816 int ret, irq = platform_get_irq_byname(pdev, name); 817 818 if (irq < 0) { 819 dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name); 820 return irq; 821 } 822 823 ret = devm_request_threaded_irq(&pdev->dev, irq, handler, 824 threaded_handler, IRQF_ONESHOT, 825 dev_name(&pdev->dev), ring_irq_priv); 826 if (ret) { 827 dev_err(&pdev->dev, "unable to request IRQ %d\n", irq); 828 return ret; 829 } 830 831 return irq; 832 } 833 834 static struct safexcel_alg_template *safexcel_algs[] = { 835 &safexcel_alg_ecb_des, 836 &safexcel_alg_cbc_des, 837 &safexcel_alg_ecb_des3_ede, 838 &safexcel_alg_cbc_des3_ede, 839 &safexcel_alg_ecb_aes, 840 &safexcel_alg_cbc_aes, 841 &safexcel_alg_md5, 842 &safexcel_alg_sha1, 843 &safexcel_alg_sha224, 844 &safexcel_alg_sha256, 845 &safexcel_alg_sha384, 846 &safexcel_alg_sha512, 847 &safexcel_alg_hmac_md5, 848 &safexcel_alg_hmac_sha1, 849 &safexcel_alg_hmac_sha224, 850 &safexcel_alg_hmac_sha256, 851 &safexcel_alg_hmac_sha384, 852 &safexcel_alg_hmac_sha512, 853 &safexcel_alg_authenc_hmac_sha1_cbc_aes, 854 &safexcel_alg_authenc_hmac_sha224_cbc_aes, 855 &safexcel_alg_authenc_hmac_sha256_cbc_aes, 856 &safexcel_alg_authenc_hmac_sha384_cbc_aes, 857 &safexcel_alg_authenc_hmac_sha512_cbc_aes, 858 }; 859 860 static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) 861 { 862 int i, j, ret = 0; 863 864 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { 865 safexcel_algs[i]->priv = priv; 866 867 if (!(safexcel_algs[i]->engines & priv->version)) 868 continue; 869 870 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) 871 ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher); 872 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD) 873 ret = crypto_register_aead(&safexcel_algs[i]->alg.aead); 874 else 875 ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash); 876 877 if (ret) 878 goto fail; 879 } 880 881 return 0; 882 883 fail: 884 for (j = 0; j < i; j++) { 885 if (!(safexcel_algs[j]->engines & priv->version)) 886 continue; 887 888 if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) 889 crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher); 890 else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD) 891 crypto_unregister_aead(&safexcel_algs[j]->alg.aead); 892 else 893 crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash); 894 } 895 896 return ret; 897 } 898 899 static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv) 900 { 901 int i; 902 903 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { 904 if (!(safexcel_algs[i]->engines & priv->version)) 905 continue; 906 907 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) 908 crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher); 909 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD) 910 crypto_unregister_aead(&safexcel_algs[i]->alg.aead); 911 else 912 crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash); 913 } 914 } 915 916 static void safexcel_configure(struct safexcel_crypto_priv *priv) 917 { 918 u32 val, mask = 0; 919 920 val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); 921 922 /* Read number of PEs from the engine */ 923 switch (priv->version) { 924 case EIP197B: 925 case EIP197D: 926 mask = EIP197_N_PES_MASK; 927 break; 928 default: 929 mask = EIP97_N_PES_MASK; 930 } 931 priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask; 932 933 val = (val & GENMASK(27, 25)) >> 25; 934 mask = BIT(val) - 1; 935 936 val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); 937 priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings); 938 939 priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32)); 940 priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask; 941 942 priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32)); 943 priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask; 944 } 945 946 static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) 947 { 948 struct safexcel_register_offsets *offsets = &priv->offsets; 949 950 switch (priv->version) { 951 case EIP197B: 952 case EIP197D: 953 offsets->hia_aic = EIP197_HIA_AIC_BASE; 954 offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE; 955 offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE; 956 offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE; 957 offsets->hia_dfe = EIP197_HIA_DFE_BASE; 958 offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE; 959 offsets->hia_dse = EIP197_HIA_DSE_BASE; 960 offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE; 961 offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE; 962 offsets->pe = EIP197_PE_BASE; 963 break; 964 case EIP97IES: 965 offsets->hia_aic = EIP97_HIA_AIC_BASE; 966 offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE; 967 offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE; 968 offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE; 969 offsets->hia_dfe = EIP97_HIA_DFE_BASE; 970 offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE; 971 offsets->hia_dse = EIP97_HIA_DSE_BASE; 972 offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE; 973 offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE; 974 offsets->pe = EIP97_PE_BASE; 975 break; 976 } 977 } 978 979 static int safexcel_probe(struct platform_device *pdev) 980 { 981 struct device *dev = &pdev->dev; 982 struct resource *res; 983 struct safexcel_crypto_priv *priv; 984 int i, ret; 985 986 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 987 if (!priv) 988 return -ENOMEM; 989 990 priv->dev = dev; 991 priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev); 992 993 if (priv->version == EIP197B || priv->version == EIP197D) 994 priv->flags |= EIP197_TRC_CACHE; 995 996 safexcel_init_register_offsets(priv); 997 998 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 999 priv->base = devm_ioremap_resource(dev, res); 1000 if (IS_ERR(priv->base)) { 1001 dev_err(dev, "failed to get resource\n"); 1002 return PTR_ERR(priv->base); 1003 } 1004 1005 priv->clk = devm_clk_get(&pdev->dev, NULL); 1006 ret = PTR_ERR_OR_ZERO(priv->clk); 1007 /* The clock isn't mandatory */ 1008 if (ret != -ENOENT) { 1009 if (ret) 1010 return ret; 1011 1012 ret = clk_prepare_enable(priv->clk); 1013 if (ret) { 1014 dev_err(dev, "unable to enable clk (%d)\n", ret); 1015 return ret; 1016 } 1017 } 1018 1019 priv->reg_clk = devm_clk_get(&pdev->dev, "reg"); 1020 ret = PTR_ERR_OR_ZERO(priv->reg_clk); 1021 /* The clock isn't mandatory */ 1022 if (ret != -ENOENT) { 1023 if (ret) 1024 goto err_core_clk; 1025 1026 ret = clk_prepare_enable(priv->reg_clk); 1027 if (ret) { 1028 dev_err(dev, "unable to enable reg clk (%d)\n", ret); 1029 goto err_core_clk; 1030 } 1031 } 1032 1033 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 1034 if (ret) 1035 goto err_reg_clk; 1036 1037 priv->context_pool = dmam_pool_create("safexcel-context", dev, 1038 sizeof(struct safexcel_context_record), 1039 1, 0); 1040 if (!priv->context_pool) { 1041 ret = -ENOMEM; 1042 goto err_reg_clk; 1043 } 1044 1045 safexcel_configure(priv); 1046 1047 priv->ring = devm_kcalloc(dev, priv->config.rings, 1048 sizeof(*priv->ring), 1049 GFP_KERNEL); 1050 if (!priv->ring) { 1051 ret = -ENOMEM; 1052 goto err_reg_clk; 1053 } 1054 1055 for (i = 0; i < priv->config.rings; i++) { 1056 char irq_name[6] = {0}; /* "ringX\0" */ 1057 char wq_name[9] = {0}; /* "wq_ringX\0" */ 1058 int irq; 1059 struct safexcel_ring_irq_data *ring_irq; 1060 1061 ret = safexcel_init_ring_descriptors(priv, 1062 &priv->ring[i].cdr, 1063 &priv->ring[i].rdr); 1064 if (ret) 1065 goto err_reg_clk; 1066 1067 priv->ring[i].rdr_req = devm_kcalloc(dev, 1068 EIP197_DEFAULT_RING_SIZE, 1069 sizeof(priv->ring[i].rdr_req), 1070 GFP_KERNEL); 1071 if (!priv->ring[i].rdr_req) { 1072 ret = -ENOMEM; 1073 goto err_reg_clk; 1074 } 1075 1076 ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL); 1077 if (!ring_irq) { 1078 ret = -ENOMEM; 1079 goto err_reg_clk; 1080 } 1081 1082 ring_irq->priv = priv; 1083 ring_irq->ring = i; 1084 1085 snprintf(irq_name, 6, "ring%d", i); 1086 irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring, 1087 safexcel_irq_ring_thread, 1088 ring_irq); 1089 if (irq < 0) { 1090 ret = irq; 1091 goto err_reg_clk; 1092 } 1093 1094 priv->ring[i].work_data.priv = priv; 1095 priv->ring[i].work_data.ring = i; 1096 INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work); 1097 1098 snprintf(wq_name, 9, "wq_ring%d", i); 1099 priv->ring[i].workqueue = create_singlethread_workqueue(wq_name); 1100 if (!priv->ring[i].workqueue) { 1101 ret = -ENOMEM; 1102 goto err_reg_clk; 1103 } 1104 1105 priv->ring[i].requests = 0; 1106 priv->ring[i].busy = false; 1107 1108 crypto_init_queue(&priv->ring[i].queue, 1109 EIP197_DEFAULT_RING_SIZE); 1110 1111 spin_lock_init(&priv->ring[i].lock); 1112 spin_lock_init(&priv->ring[i].queue_lock); 1113 } 1114 1115 platform_set_drvdata(pdev, priv); 1116 atomic_set(&priv->ring_used, 0); 1117 1118 ret = safexcel_hw_init(priv); 1119 if (ret) { 1120 dev_err(dev, "EIP h/w init failed (%d)\n", ret); 1121 goto err_reg_clk; 1122 } 1123 1124 ret = safexcel_register_algorithms(priv); 1125 if (ret) { 1126 dev_err(dev, "Failed to register algorithms (%d)\n", ret); 1127 goto err_reg_clk; 1128 } 1129 1130 return 0; 1131 1132 err_reg_clk: 1133 clk_disable_unprepare(priv->reg_clk); 1134 err_core_clk: 1135 clk_disable_unprepare(priv->clk); 1136 return ret; 1137 } 1138 1139 static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv) 1140 { 1141 int i; 1142 1143 for (i = 0; i < priv->config.rings; i++) { 1144 /* clear any pending interrupt */ 1145 writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT); 1146 writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT); 1147 1148 /* Reset the CDR base address */ 1149 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); 1150 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); 1151 1152 /* Reset the RDR base address */ 1153 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); 1154 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); 1155 } 1156 } 1157 1158 static int safexcel_remove(struct platform_device *pdev) 1159 { 1160 struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev); 1161 int i; 1162 1163 safexcel_unregister_algorithms(priv); 1164 safexcel_hw_reset_rings(priv); 1165 1166 clk_disable_unprepare(priv->clk); 1167 1168 for (i = 0; i < priv->config.rings; i++) 1169 destroy_workqueue(priv->ring[i].workqueue); 1170 1171 return 0; 1172 } 1173 1174 static const struct of_device_id safexcel_of_match_table[] = { 1175 { 1176 .compatible = "inside-secure,safexcel-eip97ies", 1177 .data = (void *)EIP97IES, 1178 }, 1179 { 1180 .compatible = "inside-secure,safexcel-eip197b", 1181 .data = (void *)EIP197B, 1182 }, 1183 { 1184 .compatible = "inside-secure,safexcel-eip197d", 1185 .data = (void *)EIP197D, 1186 }, 1187 { 1188 /* Deprecated. Kept for backward compatibility. */ 1189 .compatible = "inside-secure,safexcel-eip97", 1190 .data = (void *)EIP97IES, 1191 }, 1192 { 1193 /* Deprecated. Kept for backward compatibility. */ 1194 .compatible = "inside-secure,safexcel-eip197", 1195 .data = (void *)EIP197B, 1196 }, 1197 {}, 1198 }; 1199 1200 1201 static struct platform_driver crypto_safexcel = { 1202 .probe = safexcel_probe, 1203 .remove = safexcel_remove, 1204 .driver = { 1205 .name = "crypto-safexcel", 1206 .of_match_table = safexcel_of_match_table, 1207 }, 1208 }; 1209 module_platform_driver(crypto_safexcel); 1210 1211 MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>"); 1212 MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>"); 1213 MODULE_AUTHOR("Igal Liberman <igall@marvell.com>"); 1214 MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197"); 1215 MODULE_LICENSE("GPL v2"); 1216