1 /* 2 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/moduleparam.h> 23 #include <linux/mod_devicetable.h> 24 #include <linux/interrupt.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/delay.h> 28 #include <linux/mm.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/scatterlist.h> 31 #include <linux/highmem.h> 32 #include <linux/crypto.h> 33 #include <linux/hw_random.h> 34 #include <linux/ktime.h> 35 36 #include <crypto/algapi.h> 37 #include <crypto/des.h> 38 39 //#define HIFN_DEBUG 40 41 #ifdef HIFN_DEBUG 42 #define dprintk(f, a...) printk(f, ##a) 43 #else 44 #define dprintk(f, a...) do {} while (0) 45 #endif 46 47 static char hifn_pll_ref[sizeof("extNNN")] = "ext"; 48 module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444); 49 MODULE_PARM_DESC(hifn_pll_ref, 50 "PLL reference clock (pci[freq] or ext[freq], default ext)"); 51 52 static atomic_t hifn_dev_number; 53 54 #define ACRYPTO_OP_DECRYPT 0 55 #define ACRYPTO_OP_ENCRYPT 1 56 #define ACRYPTO_OP_HMAC 2 57 #define ACRYPTO_OP_RNG 3 58 59 #define ACRYPTO_MODE_ECB 0 60 #define ACRYPTO_MODE_CBC 1 61 #define ACRYPTO_MODE_CFB 2 62 #define ACRYPTO_MODE_OFB 3 63 64 #define ACRYPTO_TYPE_AES_128 0 65 #define ACRYPTO_TYPE_AES_192 1 66 #define ACRYPTO_TYPE_AES_256 2 67 #define ACRYPTO_TYPE_3DES 3 68 #define ACRYPTO_TYPE_DES 4 69 70 #define PCI_VENDOR_ID_HIFN 0x13A3 71 #define PCI_DEVICE_ID_HIFN_7955 0x0020 72 #define PCI_DEVICE_ID_HIFN_7956 0x001d 73 74 /* I/O region sizes */ 75 76 #define HIFN_BAR0_SIZE 0x1000 77 #define HIFN_BAR1_SIZE 0x2000 78 #define HIFN_BAR2_SIZE 0x8000 79 80 /* DMA registres */ 81 82 #define HIFN_DMA_CRA 0x0C /* DMA Command Ring Address */ 83 #define HIFN_DMA_SDRA 0x1C /* DMA Source Data Ring Address */ 84 #define HIFN_DMA_RRA 0x2C /* DMA Result Ring Address */ 85 #define HIFN_DMA_DDRA 0x3C /* DMA Destination Data Ring Address */ 86 #define HIFN_DMA_STCTL 0x40 /* DMA Status and Control */ 87 #define HIFN_DMA_INTREN 0x44 /* DMA Interrupt Enable */ 88 #define HIFN_DMA_CFG1 0x48 /* DMA Configuration #1 */ 89 #define HIFN_DMA_CFG2 0x6C /* DMA Configuration #2 */ 90 #define HIFN_CHIP_ID 0x98 /* Chip ID */ 91 92 /* 93 * Processing Unit Registers (offset from BASEREG0) 94 */ 95 #define HIFN_0_PUDATA 0x00 /* Processing Unit Data */ 96 #define HIFN_0_PUCTRL 0x04 /* Processing Unit Control */ 97 #define HIFN_0_PUISR 0x08 /* Processing Unit Interrupt Status */ 98 #define HIFN_0_PUCNFG 0x0c /* Processing Unit Configuration */ 99 #define HIFN_0_PUIER 0x10 /* Processing Unit Interrupt Enable */ 100 #define HIFN_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */ 101 #define HIFN_0_FIFOSTAT 0x18 /* FIFO Status */ 102 #define HIFN_0_FIFOCNFG 0x1c /* FIFO Configuration */ 103 #define HIFN_0_SPACESIZE 0x20 /* Register space size */ 104 105 /* Processing Unit Control Register (HIFN_0_PUCTRL) */ 106 #define HIFN_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */ 107 #define HIFN_PUCTRL_STOP 0x0008 /* stop pu */ 108 #define HIFN_PUCTRL_LOCKRAM 0x0004 /* lock ram */ 109 #define HIFN_PUCTRL_DMAENA 0x0002 /* enable dma */ 110 #define HIFN_PUCTRL_RESET 0x0001 /* Reset processing unit */ 111 112 /* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */ 113 #define HIFN_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */ 114 #define HIFN_PUISR_DATAERR 0x4000 /* Data error interrupt */ 115 #define HIFN_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ 116 #define HIFN_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ 117 #define HIFN_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */ 118 #define HIFN_PUISR_SRCCMD 0x0080 /* Source command interrupt */ 119 #define HIFN_PUISR_SRCCTX 0x0040 /* Source context interrupt */ 120 #define HIFN_PUISR_SRCDATA 0x0020 /* Source data interrupt */ 121 #define HIFN_PUISR_DSTDATA 0x0010 /* Destination data interrupt */ 122 #define HIFN_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */ 123 124 /* Processing Unit Configuration Register (HIFN_0_PUCNFG) */ 125 #define HIFN_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */ 126 #define HIFN_PUCNFG_DSZ_256K 0x0000 /* 256k dram */ 127 #define HIFN_PUCNFG_DSZ_512K 0x2000 /* 512k dram */ 128 #define HIFN_PUCNFG_DSZ_1M 0x4000 /* 1m dram */ 129 #define HIFN_PUCNFG_DSZ_2M 0x6000 /* 2m dram */ 130 #define HIFN_PUCNFG_DSZ_4M 0x8000 /* 4m dram */ 131 #define HIFN_PUCNFG_DSZ_8M 0xa000 /* 8m dram */ 132 #define HIFN_PUNCFG_DSZ_16M 0xc000 /* 16m dram */ 133 #define HIFN_PUCNFG_DSZ_32M 0xe000 /* 32m dram */ 134 #define HIFN_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */ 135 #define HIFN_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */ 136 #define HIFN_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */ 137 #define HIFN_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */ 138 #define HIFN_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */ 139 #define HIFN_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */ 140 #define HIFN_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */ 141 #define HIFN_PUCNFG_BUS32 0x0040 /* Bus width 32bits */ 142 #define HIFN_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */ 143 #define HIFN_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */ 144 #define HIFN_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */ 145 #define HIFN_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */ 146 #define HIFN_PUCNFG_COMPSING 0x0004 /* Enable single compression context */ 147 #define HIFN_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */ 148 149 /* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */ 150 #define HIFN_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */ 151 #define HIFN_PUIER_DATAERR 0x4000 /* Data error interrupt */ 152 #define HIFN_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ 153 #define HIFN_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ 154 #define HIFN_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */ 155 #define HIFN_PUIER_SRCCMD 0x0080 /* Source command interrupt */ 156 #define HIFN_PUIER_SRCCTX 0x0040 /* Source context interrupt */ 157 #define HIFN_PUIER_SRCDATA 0x0020 /* Source data interrupt */ 158 #define HIFN_PUIER_DSTDATA 0x0010 /* Destination data interrupt */ 159 #define HIFN_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */ 160 161 /* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */ 162 #define HIFN_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */ 163 #define HIFN_PUSTAT_DATAERR 0x4000 /* Data error interrupt */ 164 #define HIFN_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */ 165 #define HIFN_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */ 166 #define HIFN_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */ 167 #define HIFN_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */ 168 #define HIFN_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */ 169 #define HIFN_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */ 170 #define HIFN_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */ 171 #define HIFN_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */ 172 #define HIFN_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */ 173 #define HIFN_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */ 174 #define HIFN_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */ 175 #define HIFN_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */ 176 #define HIFN_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */ 177 #define HIFN_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */ 178 #define HIFN_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */ 179 180 /* FIFO Status Register (HIFN_0_FIFOSTAT) */ 181 #define HIFN_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */ 182 #define HIFN_FIFOSTAT_DST 0x007f /* Destination FIFO available */ 183 184 /* FIFO Configuration Register (HIFN_0_FIFOCNFG) */ 185 #define HIFN_FIFOCNFG_THRESHOLD 0x0400 /* must be written as 1 */ 186 187 /* 188 * DMA Interface Registers (offset from BASEREG1) 189 */ 190 #define HIFN_1_DMA_CRAR 0x0c /* DMA Command Ring Address */ 191 #define HIFN_1_DMA_SRAR 0x1c /* DMA Source Ring Address */ 192 #define HIFN_1_DMA_RRAR 0x2c /* DMA Result Ring Address */ 193 #define HIFN_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */ 194 #define HIFN_1_DMA_CSR 0x40 /* DMA Status and Control */ 195 #define HIFN_1_DMA_IER 0x44 /* DMA Interrupt Enable */ 196 #define HIFN_1_DMA_CNFG 0x48 /* DMA Configuration */ 197 #define HIFN_1_PLL 0x4c /* 795x: PLL config */ 198 #define HIFN_1_7811_RNGENA 0x60 /* 7811: rng enable */ 199 #define HIFN_1_7811_RNGCFG 0x64 /* 7811: rng config */ 200 #define HIFN_1_7811_RNGDAT 0x68 /* 7811: rng data */ 201 #define HIFN_1_7811_RNGSTS 0x6c /* 7811: rng status */ 202 #define HIFN_1_7811_MIPSRST 0x94 /* 7811: MIPS reset */ 203 #define HIFN_1_REVID 0x98 /* Revision ID */ 204 #define HIFN_1_UNLOCK_SECRET1 0xf4 205 #define HIFN_1_UNLOCK_SECRET2 0xfc 206 #define HIFN_1_PUB_RESET 0x204 /* Public/RNG Reset */ 207 #define HIFN_1_PUB_BASE 0x300 /* Public Base Address */ 208 #define HIFN_1_PUB_OPLEN 0x304 /* Public Operand Length */ 209 #define HIFN_1_PUB_OP 0x308 /* Public Operand */ 210 #define HIFN_1_PUB_STATUS 0x30c /* Public Status */ 211 #define HIFN_1_PUB_IEN 0x310 /* Public Interrupt enable */ 212 #define HIFN_1_RNG_CONFIG 0x314 /* RNG config */ 213 #define HIFN_1_RNG_DATA 0x318 /* RNG data */ 214 #define HIFN_1_PUB_MEM 0x400 /* start of Public key memory */ 215 #define HIFN_1_PUB_MEMEND 0xbff /* end of Public key memory */ 216 217 /* DMA Status and Control Register (HIFN_1_DMA_CSR) */ 218 #define HIFN_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */ 219 #define HIFN_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */ 220 #define HIFN_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */ 221 #define HIFN_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */ 222 #define HIFN_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */ 223 #define HIFN_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */ 224 #define HIFN_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */ 225 #define HIFN_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */ 226 #define HIFN_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */ 227 #define HIFN_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */ 228 #define HIFN_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */ 229 #define HIFN_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */ 230 #define HIFN_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */ 231 #define HIFN_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */ 232 #define HIFN_DMACSR_R_DONE 0x00100000 /* Result Ring Done */ 233 #define HIFN_DMACSR_R_LAST 0x00080000 /* Result Ring Last */ 234 #define HIFN_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */ 235 #define HIFN_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */ 236 #define HIFN_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */ 237 #define HIFN_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */ 238 #define HIFN_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */ 239 #define HIFN_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */ 240 #define HIFN_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */ 241 #define HIFN_DMACSR_S_DONE 0x00001000 /* Source Ring Done */ 242 #define HIFN_DMACSR_S_LAST 0x00000800 /* Source Ring Last */ 243 #define HIFN_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */ 244 #define HIFN_DMACSR_ILLW 0x00000200 /* Illegal write (7811 only) */ 245 #define HIFN_DMACSR_ILLR 0x00000100 /* Illegal read (7811 only) */ 246 #define HIFN_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */ 247 #define HIFN_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */ 248 #define HIFN_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */ 249 #define HIFN_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */ 250 #define HIFN_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */ 251 #define HIFN_DMACSR_C_DONE 0x00000010 /* Command Ring Done */ 252 #define HIFN_DMACSR_C_LAST 0x00000008 /* Command Ring Last */ 253 #define HIFN_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */ 254 #define HIFN_DMACSR_PUBDONE 0x00000002 /* Public op done (7951 only) */ 255 #define HIFN_DMACSR_ENGINE 0x00000001 /* Command Ring Engine IRQ */ 256 257 /* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */ 258 #define HIFN_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */ 259 #define HIFN_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */ 260 #define HIFN_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */ 261 #define HIFN_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */ 262 #define HIFN_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */ 263 #define HIFN_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */ 264 #define HIFN_DMAIER_R_DONE 0x00100000 /* Result Ring Done */ 265 #define HIFN_DMAIER_R_LAST 0x00080000 /* Result Ring Last */ 266 #define HIFN_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */ 267 #define HIFN_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */ 268 #define HIFN_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */ 269 #define HIFN_DMAIER_S_DONE 0x00001000 /* Source Ring Done */ 270 #define HIFN_DMAIER_S_LAST 0x00000800 /* Source Ring Last */ 271 #define HIFN_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */ 272 #define HIFN_DMAIER_ILLW 0x00000200 /* Illegal write (7811 only) */ 273 #define HIFN_DMAIER_ILLR 0x00000100 /* Illegal read (7811 only) */ 274 #define HIFN_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */ 275 #define HIFN_DMAIER_C_DONE 0x00000010 /* Command Ring Done */ 276 #define HIFN_DMAIER_C_LAST 0x00000008 /* Command Ring Last */ 277 #define HIFN_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */ 278 #define HIFN_DMAIER_PUBDONE 0x00000002 /* public op done (7951 only) */ 279 #define HIFN_DMAIER_ENGINE 0x00000001 /* Engine IRQ */ 280 281 /* DMA Configuration Register (HIFN_1_DMA_CNFG) */ 282 #define HIFN_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */ 283 #define HIFN_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */ 284 #define HIFN_DMACNFG_UNLOCK 0x00000800 285 #define HIFN_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */ 286 #define HIFN_DMACNFG_LAST 0x00000010 /* Host control LAST bit */ 287 #define HIFN_DMACNFG_MODE 0x00000004 /* DMA mode */ 288 #define HIFN_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */ 289 #define HIFN_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */ 290 291 /* PLL configuration register */ 292 #define HIFN_PLL_REF_CLK_HBI 0x00000000 /* HBI reference clock */ 293 #define HIFN_PLL_REF_CLK_PLL 0x00000001 /* PLL reference clock */ 294 #define HIFN_PLL_BP 0x00000002 /* Reference clock bypass */ 295 #define HIFN_PLL_PK_CLK_HBI 0x00000000 /* PK engine HBI clock */ 296 #define HIFN_PLL_PK_CLK_PLL 0x00000008 /* PK engine PLL clock */ 297 #define HIFN_PLL_PE_CLK_HBI 0x00000000 /* PE engine HBI clock */ 298 #define HIFN_PLL_PE_CLK_PLL 0x00000010 /* PE engine PLL clock */ 299 #define HIFN_PLL_RESERVED_1 0x00000400 /* Reserved bit, must be 1 */ 300 #define HIFN_PLL_ND_SHIFT 11 /* Clock multiplier shift */ 301 #define HIFN_PLL_ND_MULT_2 0x00000000 /* PLL clock multiplier 2 */ 302 #define HIFN_PLL_ND_MULT_4 0x00000800 /* PLL clock multiplier 4 */ 303 #define HIFN_PLL_ND_MULT_6 0x00001000 /* PLL clock multiplier 6 */ 304 #define HIFN_PLL_ND_MULT_8 0x00001800 /* PLL clock multiplier 8 */ 305 #define HIFN_PLL_ND_MULT_10 0x00002000 /* PLL clock multiplier 10 */ 306 #define HIFN_PLL_ND_MULT_12 0x00002800 /* PLL clock multiplier 12 */ 307 #define HIFN_PLL_IS_1_8 0x00000000 /* charge pump (mult. 1-8) */ 308 #define HIFN_PLL_IS_9_12 0x00010000 /* charge pump (mult. 9-12) */ 309 310 #define HIFN_PLL_FCK_MAX 266 /* Maximum PLL frequency */ 311 312 /* Public key reset register (HIFN_1_PUB_RESET) */ 313 #define HIFN_PUBRST_RESET 0x00000001 /* reset public/rng unit */ 314 315 /* Public base address register (HIFN_1_PUB_BASE) */ 316 #define HIFN_PUBBASE_ADDR 0x00003fff /* base address */ 317 318 /* Public operand length register (HIFN_1_PUB_OPLEN) */ 319 #define HIFN_PUBOPLEN_MOD_M 0x0000007f /* modulus length mask */ 320 #define HIFN_PUBOPLEN_MOD_S 0 /* modulus length shift */ 321 #define HIFN_PUBOPLEN_EXP_M 0x0003ff80 /* exponent length mask */ 322 #define HIFN_PUBOPLEN_EXP_S 7 /* exponent length shift */ 323 #define HIFN_PUBOPLEN_RED_M 0x003c0000 /* reducend length mask */ 324 #define HIFN_PUBOPLEN_RED_S 18 /* reducend length shift */ 325 326 /* Public operation register (HIFN_1_PUB_OP) */ 327 #define HIFN_PUBOP_AOFFSET_M 0x0000007f /* A offset mask */ 328 #define HIFN_PUBOP_AOFFSET_S 0 /* A offset shift */ 329 #define HIFN_PUBOP_BOFFSET_M 0x00000f80 /* B offset mask */ 330 #define HIFN_PUBOP_BOFFSET_S 7 /* B offset shift */ 331 #define HIFN_PUBOP_MOFFSET_M 0x0003f000 /* M offset mask */ 332 #define HIFN_PUBOP_MOFFSET_S 12 /* M offset shift */ 333 #define HIFN_PUBOP_OP_MASK 0x003c0000 /* Opcode: */ 334 #define HIFN_PUBOP_OP_NOP 0x00000000 /* NOP */ 335 #define HIFN_PUBOP_OP_ADD 0x00040000 /* ADD */ 336 #define HIFN_PUBOP_OP_ADDC 0x00080000 /* ADD w/carry */ 337 #define HIFN_PUBOP_OP_SUB 0x000c0000 /* SUB */ 338 #define HIFN_PUBOP_OP_SUBC 0x00100000 /* SUB w/carry */ 339 #define HIFN_PUBOP_OP_MODADD 0x00140000 /* Modular ADD */ 340 #define HIFN_PUBOP_OP_MODSUB 0x00180000 /* Modular SUB */ 341 #define HIFN_PUBOP_OP_INCA 0x001c0000 /* INC A */ 342 #define HIFN_PUBOP_OP_DECA 0x00200000 /* DEC A */ 343 #define HIFN_PUBOP_OP_MULT 0x00240000 /* MULT */ 344 #define HIFN_PUBOP_OP_MODMULT 0x00280000 /* Modular MULT */ 345 #define HIFN_PUBOP_OP_MODRED 0x002c0000 /* Modular RED */ 346 #define HIFN_PUBOP_OP_MODEXP 0x00300000 /* Modular EXP */ 347 348 /* Public status register (HIFN_1_PUB_STATUS) */ 349 #define HIFN_PUBSTS_DONE 0x00000001 /* operation done */ 350 #define HIFN_PUBSTS_CARRY 0x00000002 /* carry */ 351 352 /* Public interrupt enable register (HIFN_1_PUB_IEN) */ 353 #define HIFN_PUBIEN_DONE 0x00000001 /* operation done interrupt */ 354 355 /* Random number generator config register (HIFN_1_RNG_CONFIG) */ 356 #define HIFN_RNGCFG_ENA 0x00000001 /* enable rng */ 357 358 #define HIFN_NAMESIZE 32 359 #define HIFN_MAX_RESULT_ORDER 5 360 361 #define HIFN_D_CMD_RSIZE 24*1 362 #define HIFN_D_SRC_RSIZE 80*1 363 #define HIFN_D_DST_RSIZE 80*1 364 #define HIFN_D_RES_RSIZE 24*1 365 366 #define HIFN_D_DST_DALIGN 4 367 368 #define HIFN_QUEUE_LENGTH (HIFN_D_CMD_RSIZE - 1) 369 370 #define AES_MIN_KEY_SIZE 16 371 #define AES_MAX_KEY_SIZE 32 372 373 #define HIFN_DES_KEY_LENGTH 8 374 #define HIFN_3DES_KEY_LENGTH 24 375 #define HIFN_MAX_CRYPT_KEY_LENGTH AES_MAX_KEY_SIZE 376 #define HIFN_IV_LENGTH 8 377 #define HIFN_AES_IV_LENGTH 16 378 #define HIFN_MAX_IV_LENGTH HIFN_AES_IV_LENGTH 379 380 #define HIFN_MAC_KEY_LENGTH 64 381 #define HIFN_MD5_LENGTH 16 382 #define HIFN_SHA1_LENGTH 20 383 #define HIFN_MAC_TRUNC_LENGTH 12 384 385 #define HIFN_MAX_COMMAND (8 + 8 + 8 + 64 + 260) 386 #define HIFN_MAX_RESULT (8 + 4 + 4 + 20 + 4) 387 #define HIFN_USED_RESULT 12 388 389 struct hifn_desc 390 { 391 volatile __le32 l; 392 volatile __le32 p; 393 }; 394 395 struct hifn_dma { 396 struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1]; 397 struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1]; 398 struct hifn_desc dstr[HIFN_D_DST_RSIZE+1]; 399 struct hifn_desc resr[HIFN_D_RES_RSIZE+1]; 400 401 u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND]; 402 u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT]; 403 404 /* 405 * Our current positions for insertion and removal from the descriptor 406 * rings. 407 */ 408 volatile int cmdi, srci, dsti, resi; 409 volatile int cmdu, srcu, dstu, resu; 410 int cmdk, srck, dstk, resk; 411 }; 412 413 #define HIFN_FLAG_CMD_BUSY (1<<0) 414 #define HIFN_FLAG_SRC_BUSY (1<<1) 415 #define HIFN_FLAG_DST_BUSY (1<<2) 416 #define HIFN_FLAG_RES_BUSY (1<<3) 417 #define HIFN_FLAG_OLD_KEY (1<<4) 418 419 #define HIFN_DEFAULT_ACTIVE_NUM 5 420 421 struct hifn_device 422 { 423 char name[HIFN_NAMESIZE]; 424 425 int irq; 426 427 struct pci_dev *pdev; 428 void __iomem *bar[3]; 429 430 void *desc_virt; 431 dma_addr_t desc_dma; 432 433 u32 dmareg; 434 435 void *sa[HIFN_D_RES_RSIZE]; 436 437 spinlock_t lock; 438 439 u32 flags; 440 int active, started; 441 struct delayed_work work; 442 unsigned long reset; 443 unsigned long success; 444 unsigned long prev_success; 445 446 u8 snum; 447 448 struct tasklet_struct tasklet; 449 450 struct crypto_queue queue; 451 struct list_head alg_list; 452 453 unsigned int pk_clk_freq; 454 455 #ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG 456 unsigned int rng_wait_time; 457 ktime_t rngtime; 458 struct hwrng rng; 459 #endif 460 }; 461 462 #define HIFN_D_LENGTH 0x0000ffff 463 #define HIFN_D_NOINVALID 0x01000000 464 #define HIFN_D_MASKDONEIRQ 0x02000000 465 #define HIFN_D_DESTOVER 0x04000000 466 #define HIFN_D_OVER 0x08000000 467 #define HIFN_D_LAST 0x20000000 468 #define HIFN_D_JUMP 0x40000000 469 #define HIFN_D_VALID 0x80000000 470 471 struct hifn_base_command 472 { 473 volatile __le16 masks; 474 volatile __le16 session_num; 475 volatile __le16 total_source_count; 476 volatile __le16 total_dest_count; 477 }; 478 479 #define HIFN_BASE_CMD_COMP 0x0100 /* enable compression engine */ 480 #define HIFN_BASE_CMD_PAD 0x0200 /* enable padding engine */ 481 #define HIFN_BASE_CMD_MAC 0x0400 /* enable MAC engine */ 482 #define HIFN_BASE_CMD_CRYPT 0x0800 /* enable crypt engine */ 483 #define HIFN_BASE_CMD_DECODE 0x2000 484 #define HIFN_BASE_CMD_SRCLEN_M 0xc000 485 #define HIFN_BASE_CMD_SRCLEN_S 14 486 #define HIFN_BASE_CMD_DSTLEN_M 0x3000 487 #define HIFN_BASE_CMD_DSTLEN_S 12 488 #define HIFN_BASE_CMD_LENMASK_HI 0x30000 489 #define HIFN_BASE_CMD_LENMASK_LO 0x0ffff 490 491 /* 492 * Structure to help build up the command data structure. 493 */ 494 struct hifn_crypt_command 495 { 496 volatile __le16 masks; 497 volatile __le16 header_skip; 498 volatile __le16 source_count; 499 volatile __le16 reserved; 500 }; 501 502 #define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */ 503 #define HIFN_CRYPT_CMD_ALG_DES 0x0000 /* DES */ 504 #define HIFN_CRYPT_CMD_ALG_3DES 0x0001 /* 3DES */ 505 #define HIFN_CRYPT_CMD_ALG_RC4 0x0002 /* RC4 */ 506 #define HIFN_CRYPT_CMD_ALG_AES 0x0003 /* AES */ 507 #define HIFN_CRYPT_CMD_MODE_MASK 0x0018 /* Encrypt mode: */ 508 #define HIFN_CRYPT_CMD_MODE_ECB 0x0000 /* ECB */ 509 #define HIFN_CRYPT_CMD_MODE_CBC 0x0008 /* CBC */ 510 #define HIFN_CRYPT_CMD_MODE_CFB 0x0010 /* CFB */ 511 #define HIFN_CRYPT_CMD_MODE_OFB 0x0018 /* OFB */ 512 #define HIFN_CRYPT_CMD_CLR_CTX 0x0040 /* clear context */ 513 #define HIFN_CRYPT_CMD_KSZ_MASK 0x0600 /* AES key size: */ 514 #define HIFN_CRYPT_CMD_KSZ_128 0x0000 /* 128 bit */ 515 #define HIFN_CRYPT_CMD_KSZ_192 0x0200 /* 192 bit */ 516 #define HIFN_CRYPT_CMD_KSZ_256 0x0400 /* 256 bit */ 517 #define HIFN_CRYPT_CMD_NEW_KEY 0x0800 /* expect new key */ 518 #define HIFN_CRYPT_CMD_NEW_IV 0x1000 /* expect new iv */ 519 #define HIFN_CRYPT_CMD_SRCLEN_M 0xc000 520 #define HIFN_CRYPT_CMD_SRCLEN_S 14 521 522 /* 523 * Structure to help build up the command data structure. 524 */ 525 struct hifn_mac_command 526 { 527 volatile __le16 masks; 528 volatile __le16 header_skip; 529 volatile __le16 source_count; 530 volatile __le16 reserved; 531 }; 532 533 #define HIFN_MAC_CMD_ALG_MASK 0x0001 534 #define HIFN_MAC_CMD_ALG_SHA1 0x0000 535 #define HIFN_MAC_CMD_ALG_MD5 0x0001 536 #define HIFN_MAC_CMD_MODE_MASK 0x000c 537 #define HIFN_MAC_CMD_MODE_HMAC 0x0000 538 #define HIFN_MAC_CMD_MODE_SSL_MAC 0x0004 539 #define HIFN_MAC_CMD_MODE_HASH 0x0008 540 #define HIFN_MAC_CMD_MODE_FULL 0x0004 541 #define HIFN_MAC_CMD_TRUNC 0x0010 542 #define HIFN_MAC_CMD_RESULT 0x0020 543 #define HIFN_MAC_CMD_APPEND 0x0040 544 #define HIFN_MAC_CMD_SRCLEN_M 0xc000 545 #define HIFN_MAC_CMD_SRCLEN_S 14 546 547 /* 548 * MAC POS IPsec initiates authentication after encryption on encodes 549 * and before decryption on decodes. 550 */ 551 #define HIFN_MAC_CMD_POS_IPSEC 0x0200 552 #define HIFN_MAC_CMD_NEW_KEY 0x0800 553 554 struct hifn_comp_command 555 { 556 volatile __le16 masks; 557 volatile __le16 header_skip; 558 volatile __le16 source_count; 559 volatile __le16 reserved; 560 }; 561 562 #define HIFN_COMP_CMD_SRCLEN_M 0xc000 563 #define HIFN_COMP_CMD_SRCLEN_S 14 564 #define HIFN_COMP_CMD_ONE 0x0100 /* must be one */ 565 #define HIFN_COMP_CMD_CLEARHIST 0x0010 /* clear history */ 566 #define HIFN_COMP_CMD_UPDATEHIST 0x0008 /* update history */ 567 #define HIFN_COMP_CMD_LZS_STRIP0 0x0004 /* LZS: strip zero */ 568 #define HIFN_COMP_CMD_MPPC_RESTART 0x0004 /* MPPC: restart */ 569 #define HIFN_COMP_CMD_ALG_MASK 0x0001 /* compression mode: */ 570 #define HIFN_COMP_CMD_ALG_MPPC 0x0001 /* MPPC */ 571 #define HIFN_COMP_CMD_ALG_LZS 0x0000 /* LZS */ 572 573 struct hifn_base_result 574 { 575 volatile __le16 flags; 576 volatile __le16 session; 577 volatile __le16 src_cnt; /* 15:0 of source count */ 578 volatile __le16 dst_cnt; /* 15:0 of dest count */ 579 }; 580 581 #define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */ 582 #define HIFN_BASE_RES_SRCLEN_M 0xc000 /* 17:16 of source count */ 583 #define HIFN_BASE_RES_SRCLEN_S 14 584 #define HIFN_BASE_RES_DSTLEN_M 0x3000 /* 17:16 of dest count */ 585 #define HIFN_BASE_RES_DSTLEN_S 12 586 587 struct hifn_comp_result 588 { 589 volatile __le16 flags; 590 volatile __le16 crc; 591 }; 592 593 #define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */ 594 #define HIFN_COMP_RES_LCB_S 8 595 #define HIFN_COMP_RES_RESTART 0x0004 /* MPPC: restart */ 596 #define HIFN_COMP_RES_ENDMARKER 0x0002 /* LZS: end marker seen */ 597 #define HIFN_COMP_RES_SRC_NOTZERO 0x0001 /* source expired */ 598 599 struct hifn_mac_result 600 { 601 volatile __le16 flags; 602 volatile __le16 reserved; 603 /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */ 604 }; 605 606 #define HIFN_MAC_RES_MISCOMPARE 0x0002 /* compare failed */ 607 #define HIFN_MAC_RES_SRC_NOTZERO 0x0001 /* source expired */ 608 609 struct hifn_crypt_result 610 { 611 volatile __le16 flags; 612 volatile __le16 reserved; 613 }; 614 615 #define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */ 616 617 #ifndef HIFN_POLL_FREQUENCY 618 #define HIFN_POLL_FREQUENCY 0x1 619 #endif 620 621 #ifndef HIFN_POLL_SCALAR 622 #define HIFN_POLL_SCALAR 0x0 623 #endif 624 625 #define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */ 626 #define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */ 627 628 struct hifn_crypto_alg 629 { 630 struct list_head entry; 631 struct crypto_alg alg; 632 struct hifn_device *dev; 633 }; 634 635 #define ASYNC_SCATTERLIST_CACHE 16 636 637 #define ASYNC_FLAGS_MISALIGNED (1<<0) 638 639 struct hifn_cipher_walk 640 { 641 struct scatterlist cache[ASYNC_SCATTERLIST_CACHE]; 642 u32 flags; 643 int num; 644 }; 645 646 struct hifn_context 647 { 648 u8 key[HIFN_MAX_CRYPT_KEY_LENGTH]; 649 struct hifn_device *dev; 650 unsigned int keysize; 651 }; 652 653 struct hifn_request_context 654 { 655 u8 *iv; 656 unsigned int ivsize; 657 u8 op, type, mode, unused; 658 struct hifn_cipher_walk walk; 659 }; 660 661 #define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg) 662 663 static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg) 664 { 665 u32 ret; 666 667 ret = readl(dev->bar[0] + reg); 668 669 return ret; 670 } 671 672 static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg) 673 { 674 u32 ret; 675 676 ret = readl(dev->bar[1] + reg); 677 678 return ret; 679 } 680 681 static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val) 682 { 683 writel((__force u32)cpu_to_le32(val), dev->bar[0] + reg); 684 } 685 686 static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val) 687 { 688 writel((__force u32)cpu_to_le32(val), dev->bar[1] + reg); 689 } 690 691 static void hifn_wait_puc(struct hifn_device *dev) 692 { 693 int i; 694 u32 ret; 695 696 for (i=10000; i > 0; --i) { 697 ret = hifn_read_0(dev, HIFN_0_PUCTRL); 698 if (!(ret & HIFN_PUCTRL_RESET)) 699 break; 700 701 udelay(1); 702 } 703 704 if (!i) 705 dprintk("%s: Failed to reset PUC unit.\n", dev->name); 706 } 707 708 static void hifn_reset_puc(struct hifn_device *dev) 709 { 710 hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 711 hifn_wait_puc(dev); 712 } 713 714 static void hifn_stop_device(struct hifn_device *dev) 715 { 716 hifn_write_1(dev, HIFN_1_DMA_CSR, 717 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | 718 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS); 719 hifn_write_0(dev, HIFN_0_PUIER, 0); 720 hifn_write_1(dev, HIFN_1_DMA_IER, 0); 721 } 722 723 static void hifn_reset_dma(struct hifn_device *dev, int full) 724 { 725 hifn_stop_device(dev); 726 727 /* 728 * Setting poll frequency and others to 0. 729 */ 730 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 731 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 732 mdelay(1); 733 734 /* 735 * Reset DMA. 736 */ 737 if (full) { 738 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); 739 mdelay(1); 740 } else { 741 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE | 742 HIFN_DMACNFG_MSTRESET); 743 hifn_reset_puc(dev); 744 } 745 746 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 747 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 748 749 hifn_reset_puc(dev); 750 } 751 752 static u32 hifn_next_signature(u_int32_t a, u_int cnt) 753 { 754 int i; 755 u32 v; 756 757 for (i = 0; i < cnt; i++) { 758 759 /* get the parity */ 760 v = a & 0x80080125; 761 v ^= v >> 16; 762 v ^= v >> 8; 763 v ^= v >> 4; 764 v ^= v >> 2; 765 v ^= v >> 1; 766 767 a = (v & 1) ^ (a << 1); 768 } 769 770 return a; 771 } 772 773 static struct pci2id { 774 u_short pci_vendor; 775 u_short pci_prod; 776 char card_id[13]; 777 } pci2id[] = { 778 { 779 PCI_VENDOR_ID_HIFN, 780 PCI_DEVICE_ID_HIFN_7955, 781 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 782 0x00, 0x00, 0x00, 0x00, 0x00 } 783 }, 784 { 785 PCI_VENDOR_ID_HIFN, 786 PCI_DEVICE_ID_HIFN_7956, 787 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 788 0x00, 0x00, 0x00, 0x00, 0x00 } 789 } 790 }; 791 792 #ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG 793 static int hifn_rng_data_present(struct hwrng *rng, int wait) 794 { 795 struct hifn_device *dev = (struct hifn_device *)rng->priv; 796 s64 nsec; 797 798 nsec = ktime_to_ns(ktime_sub(ktime_get(), dev->rngtime)); 799 nsec -= dev->rng_wait_time; 800 if (nsec <= 0) 801 return 1; 802 if (!wait) 803 return 0; 804 ndelay(nsec); 805 return 1; 806 } 807 808 static int hifn_rng_data_read(struct hwrng *rng, u32 *data) 809 { 810 struct hifn_device *dev = (struct hifn_device *)rng->priv; 811 812 *data = hifn_read_1(dev, HIFN_1_RNG_DATA); 813 dev->rngtime = ktime_get(); 814 return 4; 815 } 816 817 static int hifn_register_rng(struct hifn_device *dev) 818 { 819 /* 820 * We must wait at least 256 Pk_clk cycles between two reads of the rng. 821 */ 822 dev->rng_wait_time = DIV_ROUND_UP_ULL(NSEC_PER_SEC, 823 dev->pk_clk_freq) * 256; 824 825 dev->rng.name = dev->name; 826 dev->rng.data_present = hifn_rng_data_present, 827 dev->rng.data_read = hifn_rng_data_read, 828 dev->rng.priv = (unsigned long)dev; 829 830 return hwrng_register(&dev->rng); 831 } 832 833 static void hifn_unregister_rng(struct hifn_device *dev) 834 { 835 hwrng_unregister(&dev->rng); 836 } 837 #else 838 #define hifn_register_rng(dev) 0 839 #define hifn_unregister_rng(dev) 840 #endif 841 842 static int hifn_init_pubrng(struct hifn_device *dev) 843 { 844 int i; 845 846 hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) | 847 HIFN_PUBRST_RESET); 848 849 for (i=100; i > 0; --i) { 850 mdelay(1); 851 852 if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0) 853 break; 854 } 855 856 if (!i) 857 dprintk("Chip %s: Failed to initialise public key engine.\n", 858 dev->name); 859 else { 860 hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); 861 dev->dmareg |= HIFN_DMAIER_PUBDONE; 862 hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); 863 864 dprintk("Chip %s: Public key engine has been successfully " 865 "initialised.\n", dev->name); 866 } 867 868 /* 869 * Enable RNG engine. 870 */ 871 872 hifn_write_1(dev, HIFN_1_RNG_CONFIG, 873 hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA); 874 dprintk("Chip %s: RNG engine has been successfully initialised.\n", 875 dev->name); 876 877 #ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG 878 /* First value must be discarded */ 879 hifn_read_1(dev, HIFN_1_RNG_DATA); 880 dev->rngtime = ktime_get(); 881 #endif 882 return 0; 883 } 884 885 static int hifn_enable_crypto(struct hifn_device *dev) 886 { 887 u32 dmacfg, addr; 888 char *offtbl = NULL; 889 int i; 890 891 for (i = 0; i < ARRAY_SIZE(pci2id); i++) { 892 if (pci2id[i].pci_vendor == dev->pdev->vendor && 893 pci2id[i].pci_prod == dev->pdev->device) { 894 offtbl = pci2id[i].card_id; 895 break; 896 } 897 } 898 899 if (offtbl == NULL) { 900 dprintk("Chip %s: Unknown card!\n", dev->name); 901 return -ENODEV; 902 } 903 904 dmacfg = hifn_read_1(dev, HIFN_1_DMA_CNFG); 905 906 hifn_write_1(dev, HIFN_1_DMA_CNFG, 907 HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET | 908 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 909 mdelay(1); 910 addr = hifn_read_1(dev, HIFN_1_UNLOCK_SECRET1); 911 mdelay(1); 912 hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0); 913 mdelay(1); 914 915 for (i=0; i<12; ++i) { 916 addr = hifn_next_signature(addr, offtbl[i] + 0x101); 917 hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr); 918 919 mdelay(1); 920 } 921 hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg); 922 923 dprintk("Chip %s: %s.\n", dev->name, pci_name(dev->pdev)); 924 925 return 0; 926 } 927 928 static void hifn_init_dma(struct hifn_device *dev) 929 { 930 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; 931 u32 dptr = dev->desc_dma; 932 int i; 933 934 for (i=0; i<HIFN_D_CMD_RSIZE; ++i) 935 dma->cmdr[i].p = __cpu_to_le32(dptr + 936 offsetof(struct hifn_dma, command_bufs[i][0])); 937 for (i=0; i<HIFN_D_RES_RSIZE; ++i) 938 dma->resr[i].p = __cpu_to_le32(dptr + 939 offsetof(struct hifn_dma, result_bufs[i][0])); 940 941 /* 942 * Setup LAST descriptors. 943 */ 944 dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr + 945 offsetof(struct hifn_dma, cmdr[0])); 946 dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr + 947 offsetof(struct hifn_dma, srcr[0])); 948 dma->dstr[HIFN_D_DST_RSIZE].p = __cpu_to_le32(dptr + 949 offsetof(struct hifn_dma, dstr[0])); 950 dma->resr[HIFN_D_RES_RSIZE].p = __cpu_to_le32(dptr + 951 offsetof(struct hifn_dma, resr[0])); 952 953 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; 954 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; 955 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; 956 } 957 958 /* 959 * Initialize the PLL. We need to know the frequency of the reference clock 960 * to calculate the optimal multiplier. For PCI we assume 66MHz, since that 961 * allows us to operate without the risk of overclocking the chip. If it 962 * actually uses 33MHz, the chip will operate at half the speed, this can be 963 * overriden by specifying the frequency as module parameter (pci33). 964 * 965 * Unfortunately the PCI clock is not very suitable since the HIFN needs a 966 * stable clock and the PCI clock frequency may vary, so the default is the 967 * external clock. There is no way to find out its frequency, we default to 968 * 66MHz since according to Mike Ham of HiFn, almost every board in existence 969 * has an external crystal populated at 66MHz. 970 */ 971 static void hifn_init_pll(struct hifn_device *dev) 972 { 973 unsigned int freq, m; 974 u32 pllcfg; 975 976 pllcfg = HIFN_1_PLL | HIFN_PLL_RESERVED_1; 977 978 if (strncmp(hifn_pll_ref, "ext", 3) == 0) 979 pllcfg |= HIFN_PLL_REF_CLK_PLL; 980 else 981 pllcfg |= HIFN_PLL_REF_CLK_HBI; 982 983 if (hifn_pll_ref[3] != '\0') 984 freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10); 985 else { 986 freq = 66; 987 printk(KERN_INFO "hifn795x: assuming %uMHz clock speed, " 988 "override with hifn_pll_ref=%.3s<frequency>\n", 989 freq, hifn_pll_ref); 990 } 991 992 m = HIFN_PLL_FCK_MAX / freq; 993 994 pllcfg |= (m / 2 - 1) << HIFN_PLL_ND_SHIFT; 995 if (m <= 8) 996 pllcfg |= HIFN_PLL_IS_1_8; 997 else 998 pllcfg |= HIFN_PLL_IS_9_12; 999 1000 /* Select clock source and enable clock bypass */ 1001 hifn_write_1(dev, HIFN_1_PLL, pllcfg | 1002 HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI | HIFN_PLL_BP); 1003 1004 /* Let the chip lock to the input clock */ 1005 mdelay(10); 1006 1007 /* Disable clock bypass */ 1008 hifn_write_1(dev, HIFN_1_PLL, pllcfg | 1009 HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI); 1010 1011 /* Switch the engines to the PLL */ 1012 hifn_write_1(dev, HIFN_1_PLL, pllcfg | 1013 HIFN_PLL_PK_CLK_PLL | HIFN_PLL_PE_CLK_PLL); 1014 1015 /* 1016 * The Fpk_clk runs at half the total speed. Its frequency is needed to 1017 * calculate the minimum time between two reads of the rng. Since 33MHz 1018 * is actually 33.333... we overestimate the frequency here, resulting 1019 * in slightly larger intervals. 1020 */ 1021 dev->pk_clk_freq = 1000000 * (freq + 1) * m / 2; 1022 } 1023 1024 static void hifn_init_registers(struct hifn_device *dev) 1025 { 1026 u32 dptr = dev->desc_dma; 1027 1028 /* Initialization magic... */ 1029 hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 1030 hifn_write_0(dev, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); 1031 hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); 1032 1033 /* write all 4 ring address registers */ 1034 hifn_write_1(dev, HIFN_1_DMA_CRAR, dptr + 1035 offsetof(struct hifn_dma, cmdr[0])); 1036 hifn_write_1(dev, HIFN_1_DMA_SRAR, dptr + 1037 offsetof(struct hifn_dma, srcr[0])); 1038 hifn_write_1(dev, HIFN_1_DMA_DRAR, dptr + 1039 offsetof(struct hifn_dma, dstr[0])); 1040 hifn_write_1(dev, HIFN_1_DMA_RRAR, dptr + 1041 offsetof(struct hifn_dma, resr[0])); 1042 1043 mdelay(2); 1044 #if 0 1045 hifn_write_1(dev, HIFN_1_DMA_CSR, 1046 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | 1047 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | 1048 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | 1049 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | 1050 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | 1051 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | 1052 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | 1053 HIFN_DMACSR_S_WAIT | 1054 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | 1055 HIFN_DMACSR_C_WAIT | 1056 HIFN_DMACSR_ENGINE | 1057 HIFN_DMACSR_PUBDONE); 1058 #else 1059 hifn_write_1(dev, HIFN_1_DMA_CSR, 1060 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1061 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA | 1062 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | 1063 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | 1064 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | 1065 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | 1066 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | 1067 HIFN_DMACSR_S_WAIT | 1068 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | 1069 HIFN_DMACSR_C_WAIT | 1070 HIFN_DMACSR_ENGINE | 1071 HIFN_DMACSR_PUBDONE); 1072 #endif 1073 hifn_read_1(dev, HIFN_1_DMA_CSR); 1074 1075 dev->dmareg |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | 1076 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | 1077 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | 1078 HIFN_DMAIER_ENGINE; 1079 dev->dmareg &= ~HIFN_DMAIER_C_WAIT; 1080 1081 hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); 1082 hifn_read_1(dev, HIFN_1_DMA_IER); 1083 #if 0 1084 hifn_write_0(dev, HIFN_0_PUCNFG, HIFN_PUCNFG_ENCCNFG | 1085 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | 1086 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | 1087 HIFN_PUCNFG_DRAM); 1088 #else 1089 hifn_write_0(dev, HIFN_0_PUCNFG, 0x10342); 1090 #endif 1091 hifn_init_pll(dev); 1092 1093 hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); 1094 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 1095 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | 1096 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | 1097 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); 1098 } 1099 1100 static int hifn_setup_base_command(struct hifn_device *dev, u8 *buf, 1101 unsigned dlen, unsigned slen, u16 mask, u8 snum) 1102 { 1103 struct hifn_base_command *base_cmd; 1104 u8 *buf_pos = buf; 1105 1106 base_cmd = (struct hifn_base_command *)buf_pos; 1107 base_cmd->masks = __cpu_to_le16(mask); 1108 base_cmd->total_source_count = 1109 __cpu_to_le16(slen & HIFN_BASE_CMD_LENMASK_LO); 1110 base_cmd->total_dest_count = 1111 __cpu_to_le16(dlen & HIFN_BASE_CMD_LENMASK_LO); 1112 1113 dlen >>= 16; 1114 slen >>= 16; 1115 base_cmd->session_num = __cpu_to_le16(snum | 1116 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | 1117 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); 1118 1119 return sizeof(struct hifn_base_command); 1120 } 1121 1122 static int hifn_setup_crypto_command(struct hifn_device *dev, 1123 u8 *buf, unsigned dlen, unsigned slen, 1124 u8 *key, int keylen, u8 *iv, int ivsize, u16 mode) 1125 { 1126 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; 1127 struct hifn_crypt_command *cry_cmd; 1128 u8 *buf_pos = buf; 1129 u16 cmd_len; 1130 1131 cry_cmd = (struct hifn_crypt_command *)buf_pos; 1132 1133 cry_cmd->source_count = __cpu_to_le16(dlen & 0xffff); 1134 dlen >>= 16; 1135 cry_cmd->masks = __cpu_to_le16(mode | 1136 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & 1137 HIFN_CRYPT_CMD_SRCLEN_M)); 1138 cry_cmd->header_skip = 0; 1139 cry_cmd->reserved = 0; 1140 1141 buf_pos += sizeof(struct hifn_crypt_command); 1142 1143 dma->cmdu++; 1144 if (dma->cmdu > 1) { 1145 dev->dmareg |= HIFN_DMAIER_C_WAIT; 1146 hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); 1147 } 1148 1149 if (keylen) { 1150 memcpy(buf_pos, key, keylen); 1151 buf_pos += keylen; 1152 } 1153 if (ivsize) { 1154 memcpy(buf_pos, iv, ivsize); 1155 buf_pos += ivsize; 1156 } 1157 1158 cmd_len = buf_pos - buf; 1159 1160 return cmd_len; 1161 } 1162 1163 static int hifn_setup_cmd_desc(struct hifn_device *dev, 1164 struct hifn_context *ctx, struct hifn_request_context *rctx, 1165 void *priv, unsigned int nbytes) 1166 { 1167 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; 1168 int cmd_len, sa_idx; 1169 u8 *buf, *buf_pos; 1170 u16 mask; 1171 1172 sa_idx = dma->cmdi; 1173 buf_pos = buf = dma->command_bufs[dma->cmdi]; 1174 1175 mask = 0; 1176 switch (rctx->op) { 1177 case ACRYPTO_OP_DECRYPT: 1178 mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE; 1179 break; 1180 case ACRYPTO_OP_ENCRYPT: 1181 mask = HIFN_BASE_CMD_CRYPT; 1182 break; 1183 case ACRYPTO_OP_HMAC: 1184 mask = HIFN_BASE_CMD_MAC; 1185 break; 1186 default: 1187 goto err_out; 1188 } 1189 1190 buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes, 1191 nbytes, mask, dev->snum); 1192 1193 if (rctx->op == ACRYPTO_OP_ENCRYPT || rctx->op == ACRYPTO_OP_DECRYPT) { 1194 u16 md = 0; 1195 1196 if (ctx->keysize) 1197 md |= HIFN_CRYPT_CMD_NEW_KEY; 1198 if (rctx->iv && rctx->mode != ACRYPTO_MODE_ECB) 1199 md |= HIFN_CRYPT_CMD_NEW_IV; 1200 1201 switch (rctx->mode) { 1202 case ACRYPTO_MODE_ECB: 1203 md |= HIFN_CRYPT_CMD_MODE_ECB; 1204 break; 1205 case ACRYPTO_MODE_CBC: 1206 md |= HIFN_CRYPT_CMD_MODE_CBC; 1207 break; 1208 case ACRYPTO_MODE_CFB: 1209 md |= HIFN_CRYPT_CMD_MODE_CFB; 1210 break; 1211 case ACRYPTO_MODE_OFB: 1212 md |= HIFN_CRYPT_CMD_MODE_OFB; 1213 break; 1214 default: 1215 goto err_out; 1216 } 1217 1218 switch (rctx->type) { 1219 case ACRYPTO_TYPE_AES_128: 1220 if (ctx->keysize != 16) 1221 goto err_out; 1222 md |= HIFN_CRYPT_CMD_KSZ_128 | 1223 HIFN_CRYPT_CMD_ALG_AES; 1224 break; 1225 case ACRYPTO_TYPE_AES_192: 1226 if (ctx->keysize != 24) 1227 goto err_out; 1228 md |= HIFN_CRYPT_CMD_KSZ_192 | 1229 HIFN_CRYPT_CMD_ALG_AES; 1230 break; 1231 case ACRYPTO_TYPE_AES_256: 1232 if (ctx->keysize != 32) 1233 goto err_out; 1234 md |= HIFN_CRYPT_CMD_KSZ_256 | 1235 HIFN_CRYPT_CMD_ALG_AES; 1236 break; 1237 case ACRYPTO_TYPE_3DES: 1238 if (ctx->keysize != 24) 1239 goto err_out; 1240 md |= HIFN_CRYPT_CMD_ALG_3DES; 1241 break; 1242 case ACRYPTO_TYPE_DES: 1243 if (ctx->keysize != 8) 1244 goto err_out; 1245 md |= HIFN_CRYPT_CMD_ALG_DES; 1246 break; 1247 default: 1248 goto err_out; 1249 } 1250 1251 buf_pos += hifn_setup_crypto_command(dev, buf_pos, 1252 nbytes, nbytes, ctx->key, ctx->keysize, 1253 rctx->iv, rctx->ivsize, md); 1254 } 1255 1256 dev->sa[sa_idx] = priv; 1257 dev->started++; 1258 1259 cmd_len = buf_pos - buf; 1260 dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID | 1261 HIFN_D_LAST | HIFN_D_MASKDONEIRQ); 1262 1263 if (++dma->cmdi == HIFN_D_CMD_RSIZE) { 1264 dma->cmdr[dma->cmdi].l = __cpu_to_le32( 1265 HIFN_D_VALID | HIFN_D_LAST | 1266 HIFN_D_MASKDONEIRQ | HIFN_D_JUMP); 1267 dma->cmdi = 0; 1268 } else 1269 dma->cmdr[dma->cmdi-1].l |= __cpu_to_le32(HIFN_D_VALID); 1270 1271 if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) { 1272 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 1273 dev->flags |= HIFN_FLAG_CMD_BUSY; 1274 } 1275 return 0; 1276 1277 err_out: 1278 return -EINVAL; 1279 } 1280 1281 static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page, 1282 unsigned int offset, unsigned int size, int last) 1283 { 1284 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; 1285 int idx; 1286 dma_addr_t addr; 1287 1288 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE); 1289 1290 idx = dma->srci; 1291 1292 dma->srcr[idx].p = __cpu_to_le32(addr); 1293 dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID | 1294 HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0)); 1295 1296 if (++idx == HIFN_D_SRC_RSIZE) { 1297 dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID | 1298 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | 1299 (last ? HIFN_D_LAST : 0)); 1300 idx = 0; 1301 } 1302 1303 dma->srci = idx; 1304 dma->srcu++; 1305 1306 if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) { 1307 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); 1308 dev->flags |= HIFN_FLAG_SRC_BUSY; 1309 } 1310 1311 return size; 1312 } 1313 1314 static void hifn_setup_res_desc(struct hifn_device *dev) 1315 { 1316 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; 1317 1318 dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT | 1319 HIFN_D_VALID | HIFN_D_LAST); 1320 /* 1321 * dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID | 1322 * HIFN_D_LAST); 1323 */ 1324 1325 if (++dma->resi == HIFN_D_RES_RSIZE) { 1326 dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID | 1327 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST); 1328 dma->resi = 0; 1329 } 1330 1331 dma->resu++; 1332 1333 if (!(dev->flags & HIFN_FLAG_RES_BUSY)) { 1334 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); 1335 dev->flags |= HIFN_FLAG_RES_BUSY; 1336 } 1337 } 1338 1339 static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page, 1340 unsigned offset, unsigned size, int last) 1341 { 1342 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; 1343 int idx; 1344 dma_addr_t addr; 1345 1346 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE); 1347 1348 idx = dma->dsti; 1349 dma->dstr[idx].p = __cpu_to_le32(addr); 1350 dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID | 1351 HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0)); 1352 1353 if (++idx == HIFN_D_DST_RSIZE) { 1354 dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID | 1355 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | 1356 (last ? HIFN_D_LAST : 0)); 1357 idx = 0; 1358 } 1359 dma->dsti = idx; 1360 dma->dstu++; 1361 1362 if (!(dev->flags & HIFN_FLAG_DST_BUSY)) { 1363 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); 1364 dev->flags |= HIFN_FLAG_DST_BUSY; 1365 } 1366 } 1367 1368 static int hifn_setup_dma(struct hifn_device *dev, 1369 struct hifn_context *ctx, struct hifn_request_context *rctx, 1370 struct scatterlist *src, struct scatterlist *dst, 1371 unsigned int nbytes, void *priv) 1372 { 1373 struct scatterlist *t; 1374 struct page *spage, *dpage; 1375 unsigned int soff, doff; 1376 unsigned int n, len; 1377 1378 n = nbytes; 1379 while (n) { 1380 spage = sg_page(src); 1381 soff = src->offset; 1382 len = min(src->length, n); 1383 1384 hifn_setup_src_desc(dev, spage, soff, len, n - len == 0); 1385 1386 src++; 1387 n -= len; 1388 } 1389 1390 t = &rctx->walk.cache[0]; 1391 n = nbytes; 1392 while (n) { 1393 if (t->length && rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { 1394 BUG_ON(!sg_page(t)); 1395 dpage = sg_page(t); 1396 doff = 0; 1397 len = t->length; 1398 } else { 1399 BUG_ON(!sg_page(dst)); 1400 dpage = sg_page(dst); 1401 doff = dst->offset; 1402 len = dst->length; 1403 } 1404 len = min(len, n); 1405 1406 hifn_setup_dst_desc(dev, dpage, doff, len, n - len == 0); 1407 1408 dst++; 1409 t++; 1410 n -= len; 1411 } 1412 1413 hifn_setup_cmd_desc(dev, ctx, rctx, priv, nbytes); 1414 hifn_setup_res_desc(dev); 1415 return 0; 1416 } 1417 1418 static int hifn_cipher_walk_init(struct hifn_cipher_walk *w, 1419 int num, gfp_t gfp_flags) 1420 { 1421 int i; 1422 1423 num = min(ASYNC_SCATTERLIST_CACHE, num); 1424 sg_init_table(w->cache, num); 1425 1426 w->num = 0; 1427 for (i=0; i<num; ++i) { 1428 struct page *page = alloc_page(gfp_flags); 1429 struct scatterlist *s; 1430 1431 if (!page) 1432 break; 1433 1434 s = &w->cache[i]; 1435 1436 sg_set_page(s, page, PAGE_SIZE, 0); 1437 w->num++; 1438 } 1439 1440 return i; 1441 } 1442 1443 static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w) 1444 { 1445 int i; 1446 1447 for (i=0; i<w->num; ++i) { 1448 struct scatterlist *s = &w->cache[i]; 1449 1450 __free_page(sg_page(s)); 1451 1452 s->length = 0; 1453 } 1454 1455 w->num = 0; 1456 } 1457 1458 static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst, 1459 unsigned int size, unsigned int *nbytesp) 1460 { 1461 unsigned int copy, drest = *drestp, nbytes = *nbytesp; 1462 int idx = 0; 1463 1464 if (drest < size || size > nbytes) 1465 return -EINVAL; 1466 1467 while (size) { 1468 copy = min3(drest, size, dst->length); 1469 1470 size -= copy; 1471 drest -= copy; 1472 nbytes -= copy; 1473 1474 dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n", 1475 __func__, copy, size, drest, nbytes); 1476 1477 dst++; 1478 idx++; 1479 } 1480 1481 *nbytesp = nbytes; 1482 *drestp = drest; 1483 1484 return idx; 1485 } 1486 1487 static int hifn_cipher_walk(struct ablkcipher_request *req, 1488 struct hifn_cipher_walk *w) 1489 { 1490 struct scatterlist *dst, *t; 1491 unsigned int nbytes = req->nbytes, offset, copy, diff; 1492 int idx, tidx, err; 1493 1494 tidx = idx = 0; 1495 offset = 0; 1496 while (nbytes) { 1497 if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED)) 1498 return -EINVAL; 1499 1500 dst = &req->dst[idx]; 1501 1502 dprintk("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n", 1503 __func__, dst->length, dst->offset, offset, nbytes); 1504 1505 if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) || 1506 !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) || 1507 offset) { 1508 unsigned slen = min(dst->length - offset, nbytes); 1509 unsigned dlen = PAGE_SIZE; 1510 1511 t = &w->cache[idx]; 1512 1513 err = ablkcipher_add(&dlen, dst, slen, &nbytes); 1514 if (err < 0) 1515 return err; 1516 1517 idx += err; 1518 1519 copy = slen & ~(HIFN_D_DST_DALIGN - 1); 1520 diff = slen & (HIFN_D_DST_DALIGN - 1); 1521 1522 if (dlen < nbytes) { 1523 /* 1524 * Destination page does not have enough space 1525 * to put there additional blocksized chunk, 1526 * so we mark that page as containing only 1527 * blocksize aligned chunks: 1528 * t->length = (slen & ~(HIFN_D_DST_DALIGN - 1)); 1529 * and increase number of bytes to be processed 1530 * in next chunk: 1531 * nbytes += diff; 1532 */ 1533 nbytes += diff; 1534 1535 /* 1536 * Temporary of course... 1537 * Kick author if you will catch this one. 1538 */ 1539 printk(KERN_ERR "%s: dlen: %u, nbytes: %u," 1540 "slen: %u, offset: %u.\n", 1541 __func__, dlen, nbytes, slen, offset); 1542 printk(KERN_ERR "%s: please contact author to fix this " 1543 "issue, generally you should not catch " 1544 "this path under any condition but who " 1545 "knows how did you use crypto code.\n" 1546 "Thank you.\n", __func__); 1547 BUG(); 1548 } else { 1549 copy += diff + nbytes; 1550 1551 dst = &req->dst[idx]; 1552 1553 err = ablkcipher_add(&dlen, dst, nbytes, &nbytes); 1554 if (err < 0) 1555 return err; 1556 1557 idx += err; 1558 } 1559 1560 t->length = copy; 1561 t->offset = offset; 1562 } else { 1563 nbytes -= min(dst->length, nbytes); 1564 idx++; 1565 } 1566 1567 tidx++; 1568 } 1569 1570 return tidx; 1571 } 1572 1573 static int hifn_setup_session(struct ablkcipher_request *req) 1574 { 1575 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); 1576 struct hifn_request_context *rctx = ablkcipher_request_ctx(req); 1577 struct hifn_device *dev = ctx->dev; 1578 unsigned long dlen, flags; 1579 unsigned int nbytes = req->nbytes, idx = 0; 1580 int err = -EINVAL, sg_num; 1581 struct scatterlist *dst; 1582 1583 if (rctx->iv && !rctx->ivsize && rctx->mode != ACRYPTO_MODE_ECB) 1584 goto err_out_exit; 1585 1586 rctx->walk.flags = 0; 1587 1588 while (nbytes) { 1589 dst = &req->dst[idx]; 1590 dlen = min(dst->length, nbytes); 1591 1592 if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) || 1593 !IS_ALIGNED(dlen, HIFN_D_DST_DALIGN)) 1594 rctx->walk.flags |= ASYNC_FLAGS_MISALIGNED; 1595 1596 nbytes -= dlen; 1597 idx++; 1598 } 1599 1600 if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { 1601 err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC); 1602 if (err < 0) 1603 return err; 1604 } 1605 1606 sg_num = hifn_cipher_walk(req, &rctx->walk); 1607 if (sg_num < 0) { 1608 err = sg_num; 1609 goto err_out_exit; 1610 } 1611 1612 spin_lock_irqsave(&dev->lock, flags); 1613 if (dev->started + sg_num > HIFN_QUEUE_LENGTH) { 1614 err = -EAGAIN; 1615 goto err_out; 1616 } 1617 1618 err = hifn_setup_dma(dev, ctx, rctx, req->src, req->dst, req->nbytes, req); 1619 if (err) 1620 goto err_out; 1621 1622 dev->snum++; 1623 1624 dev->active = HIFN_DEFAULT_ACTIVE_NUM; 1625 spin_unlock_irqrestore(&dev->lock, flags); 1626 1627 return 0; 1628 1629 err_out: 1630 spin_unlock_irqrestore(&dev->lock, flags); 1631 err_out_exit: 1632 if (err) { 1633 printk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, " 1634 "type: %u, err: %d.\n", 1635 dev->name, rctx->iv, rctx->ivsize, 1636 ctx->key, ctx->keysize, 1637 rctx->mode, rctx->op, rctx->type, err); 1638 } 1639 1640 return err; 1641 } 1642 1643 static int hifn_test(struct hifn_device *dev, int encdec, u8 snum) 1644 { 1645 int n, err; 1646 u8 src[16]; 1647 struct hifn_context ctx; 1648 struct hifn_request_context rctx; 1649 u8 fips_aes_ecb_from_zero[16] = { 1650 0x66, 0xE9, 0x4B, 0xD4, 1651 0xEF, 0x8A, 0x2C, 0x3B, 1652 0x88, 0x4C, 0xFA, 0x59, 1653 0xCA, 0x34, 0x2B, 0x2E}; 1654 struct scatterlist sg; 1655 1656 memset(src, 0, sizeof(src)); 1657 memset(ctx.key, 0, sizeof(ctx.key)); 1658 1659 ctx.dev = dev; 1660 ctx.keysize = 16; 1661 rctx.ivsize = 0; 1662 rctx.iv = NULL; 1663 rctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT; 1664 rctx.mode = ACRYPTO_MODE_ECB; 1665 rctx.type = ACRYPTO_TYPE_AES_128; 1666 rctx.walk.cache[0].length = 0; 1667 1668 sg_init_one(&sg, &src, sizeof(src)); 1669 1670 err = hifn_setup_dma(dev, &ctx, &rctx, &sg, &sg, sizeof(src), NULL); 1671 if (err) 1672 goto err_out; 1673 1674 dev->started = 0; 1675 msleep(200); 1676 1677 dprintk("%s: decoded: ", dev->name); 1678 for (n=0; n<sizeof(src); ++n) 1679 dprintk("%02x ", src[n]); 1680 dprintk("\n"); 1681 dprintk("%s: FIPS : ", dev->name); 1682 for (n=0; n<sizeof(fips_aes_ecb_from_zero); ++n) 1683 dprintk("%02x ", fips_aes_ecb_from_zero[n]); 1684 dprintk("\n"); 1685 1686 if (!memcmp(src, fips_aes_ecb_from_zero, sizeof(fips_aes_ecb_from_zero))) { 1687 printk(KERN_INFO "%s: AES 128 ECB test has been successfully " 1688 "passed.\n", dev->name); 1689 return 0; 1690 } 1691 1692 err_out: 1693 printk(KERN_INFO "%s: AES 128 ECB test has been failed.\n", dev->name); 1694 return -1; 1695 } 1696 1697 static int hifn_start_device(struct hifn_device *dev) 1698 { 1699 int err; 1700 1701 dev->started = dev->active = 0; 1702 hifn_reset_dma(dev, 1); 1703 1704 err = hifn_enable_crypto(dev); 1705 if (err) 1706 return err; 1707 1708 hifn_reset_puc(dev); 1709 1710 hifn_init_dma(dev); 1711 1712 hifn_init_registers(dev); 1713 1714 hifn_init_pubrng(dev); 1715 1716 return 0; 1717 } 1718 1719 static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset, 1720 struct scatterlist *dst, unsigned int size, unsigned int *nbytesp) 1721 { 1722 unsigned int srest = *srestp, nbytes = *nbytesp, copy; 1723 void *daddr; 1724 int idx = 0; 1725 1726 if (srest < size || size > nbytes) 1727 return -EINVAL; 1728 1729 while (size) { 1730 copy = min3(srest, dst->length, size); 1731 1732 daddr = kmap_atomic(sg_page(dst)); 1733 memcpy(daddr + dst->offset + offset, saddr, copy); 1734 kunmap_atomic(daddr); 1735 1736 nbytes -= copy; 1737 size -= copy; 1738 srest -= copy; 1739 saddr += copy; 1740 offset = 0; 1741 1742 dprintk("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n", 1743 __func__, copy, size, srest, nbytes); 1744 1745 dst++; 1746 idx++; 1747 } 1748 1749 *nbytesp = nbytes; 1750 *srestp = srest; 1751 1752 return idx; 1753 } 1754 1755 static inline void hifn_complete_sa(struct hifn_device *dev, int i) 1756 { 1757 unsigned long flags; 1758 1759 spin_lock_irqsave(&dev->lock, flags); 1760 dev->sa[i] = NULL; 1761 dev->started--; 1762 if (dev->started < 0) 1763 printk("%s: started: %d.\n", __func__, dev->started); 1764 spin_unlock_irqrestore(&dev->lock, flags); 1765 BUG_ON(dev->started < 0); 1766 } 1767 1768 static void hifn_process_ready(struct ablkcipher_request *req, int error) 1769 { 1770 struct hifn_request_context *rctx = ablkcipher_request_ctx(req); 1771 1772 if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) { 1773 unsigned int nbytes = req->nbytes; 1774 int idx = 0, err; 1775 struct scatterlist *dst, *t; 1776 void *saddr; 1777 1778 while (nbytes) { 1779 t = &rctx->walk.cache[idx]; 1780 dst = &req->dst[idx]; 1781 1782 dprintk("\n%s: sg_page(t): %p, t->length: %u, " 1783 "sg_page(dst): %p, dst->length: %u, " 1784 "nbytes: %u.\n", 1785 __func__, sg_page(t), t->length, 1786 sg_page(dst), dst->length, nbytes); 1787 1788 if (!t->length) { 1789 nbytes -= min(dst->length, nbytes); 1790 idx++; 1791 continue; 1792 } 1793 1794 saddr = kmap_atomic(sg_page(t)); 1795 1796 err = ablkcipher_get(saddr, &t->length, t->offset, 1797 dst, nbytes, &nbytes); 1798 if (err < 0) { 1799 kunmap_atomic(saddr); 1800 break; 1801 } 1802 1803 idx += err; 1804 kunmap_atomic(saddr); 1805 } 1806 1807 hifn_cipher_walk_exit(&rctx->walk); 1808 } 1809 1810 req->base.complete(&req->base, error); 1811 } 1812 1813 static void hifn_clear_rings(struct hifn_device *dev, int error) 1814 { 1815 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; 1816 int i, u; 1817 1818 dprintk("%s: ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " 1819 "k: %d.%d.%d.%d.\n", 1820 dev->name, 1821 dma->cmdi, dma->srci, dma->dsti, dma->resi, 1822 dma->cmdu, dma->srcu, dma->dstu, dma->resu, 1823 dma->cmdk, dma->srck, dma->dstk, dma->resk); 1824 1825 i = dma->resk; u = dma->resu; 1826 while (u != 0) { 1827 if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID)) 1828 break; 1829 1830 if (dev->sa[i]) { 1831 dev->success++; 1832 dev->reset = 0; 1833 hifn_process_ready(dev->sa[i], error); 1834 hifn_complete_sa(dev, i); 1835 } 1836 1837 if (++i == HIFN_D_RES_RSIZE) 1838 i = 0; 1839 u--; 1840 } 1841 dma->resk = i; dma->resu = u; 1842 1843 i = dma->srck; u = dma->srcu; 1844 while (u != 0) { 1845 if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID)) 1846 break; 1847 if (++i == HIFN_D_SRC_RSIZE) 1848 i = 0; 1849 u--; 1850 } 1851 dma->srck = i; dma->srcu = u; 1852 1853 i = dma->cmdk; u = dma->cmdu; 1854 while (u != 0) { 1855 if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID)) 1856 break; 1857 if (++i == HIFN_D_CMD_RSIZE) 1858 i = 0; 1859 u--; 1860 } 1861 dma->cmdk = i; dma->cmdu = u; 1862 1863 i = dma->dstk; u = dma->dstu; 1864 while (u != 0) { 1865 if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID)) 1866 break; 1867 if (++i == HIFN_D_DST_RSIZE) 1868 i = 0; 1869 u--; 1870 } 1871 dma->dstk = i; dma->dstu = u; 1872 1873 dprintk("%s: ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " 1874 "k: %d.%d.%d.%d.\n", 1875 dev->name, 1876 dma->cmdi, dma->srci, dma->dsti, dma->resi, 1877 dma->cmdu, dma->srcu, dma->dstu, dma->resu, 1878 dma->cmdk, dma->srck, dma->dstk, dma->resk); 1879 } 1880 1881 static void hifn_work(struct work_struct *work) 1882 { 1883 struct delayed_work *dw = to_delayed_work(work); 1884 struct hifn_device *dev = container_of(dw, struct hifn_device, work); 1885 unsigned long flags; 1886 int reset = 0; 1887 u32 r = 0; 1888 1889 spin_lock_irqsave(&dev->lock, flags); 1890 if (dev->active == 0) { 1891 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; 1892 1893 if (dma->cmdu == 0 && (dev->flags & HIFN_FLAG_CMD_BUSY)) { 1894 dev->flags &= ~HIFN_FLAG_CMD_BUSY; 1895 r |= HIFN_DMACSR_C_CTRL_DIS; 1896 } 1897 if (dma->srcu == 0 && (dev->flags & HIFN_FLAG_SRC_BUSY)) { 1898 dev->flags &= ~HIFN_FLAG_SRC_BUSY; 1899 r |= HIFN_DMACSR_S_CTRL_DIS; 1900 } 1901 if (dma->dstu == 0 && (dev->flags & HIFN_FLAG_DST_BUSY)) { 1902 dev->flags &= ~HIFN_FLAG_DST_BUSY; 1903 r |= HIFN_DMACSR_D_CTRL_DIS; 1904 } 1905 if (dma->resu == 0 && (dev->flags & HIFN_FLAG_RES_BUSY)) { 1906 dev->flags &= ~HIFN_FLAG_RES_BUSY; 1907 r |= HIFN_DMACSR_R_CTRL_DIS; 1908 } 1909 if (r) 1910 hifn_write_1(dev, HIFN_1_DMA_CSR, r); 1911 } else 1912 dev->active--; 1913 1914 if ((dev->prev_success == dev->success) && dev->started) 1915 reset = 1; 1916 dev->prev_success = dev->success; 1917 spin_unlock_irqrestore(&dev->lock, flags); 1918 1919 if (reset) { 1920 if (++dev->reset >= 5) { 1921 int i; 1922 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; 1923 1924 printk("%s: r: %08x, active: %d, started: %d, " 1925 "success: %lu: qlen: %u/%u, reset: %d.\n", 1926 dev->name, r, dev->active, dev->started, 1927 dev->success, dev->queue.qlen, dev->queue.max_qlen, 1928 reset); 1929 1930 printk("%s: res: ", __func__); 1931 for (i=0; i<HIFN_D_RES_RSIZE; ++i) { 1932 printk("%x.%p ", dma->resr[i].l, dev->sa[i]); 1933 if (dev->sa[i]) { 1934 hifn_process_ready(dev->sa[i], -ENODEV); 1935 hifn_complete_sa(dev, i); 1936 } 1937 } 1938 printk("\n"); 1939 1940 hifn_reset_dma(dev, 1); 1941 hifn_stop_device(dev); 1942 hifn_start_device(dev); 1943 dev->reset = 0; 1944 } 1945 1946 tasklet_schedule(&dev->tasklet); 1947 } 1948 1949 schedule_delayed_work(&dev->work, HZ); 1950 } 1951 1952 static irqreturn_t hifn_interrupt(int irq, void *data) 1953 { 1954 struct hifn_device *dev = (struct hifn_device *)data; 1955 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; 1956 u32 dmacsr, restart; 1957 1958 dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR); 1959 1960 dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], " 1961 "i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n", 1962 dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi, 1963 dma->cmdi, dma->srci, dma->dsti, dma->resi, 1964 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 1965 1966 if ((dmacsr & dev->dmareg) == 0) 1967 return IRQ_NONE; 1968 1969 hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & dev->dmareg); 1970 1971 if (dmacsr & HIFN_DMACSR_ENGINE) 1972 hifn_write_0(dev, HIFN_0_PUISR, hifn_read_0(dev, HIFN_0_PUISR)); 1973 if (dmacsr & HIFN_DMACSR_PUBDONE) 1974 hifn_write_1(dev, HIFN_1_PUB_STATUS, 1975 hifn_read_1(dev, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); 1976 1977 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER); 1978 if (restart) { 1979 u32 puisr = hifn_read_0(dev, HIFN_0_PUISR); 1980 1981 printk(KERN_WARNING "%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n", 1982 dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER), 1983 !!(dmacsr & HIFN_DMACSR_D_OVER), 1984 puisr, !!(puisr & HIFN_PUISR_DSTOVER)); 1985 if (!!(puisr & HIFN_PUISR_DSTOVER)) 1986 hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); 1987 hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER | 1988 HIFN_DMACSR_D_OVER)); 1989 } 1990 1991 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | 1992 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); 1993 if (restart) { 1994 printk(KERN_WARNING "%s: abort: c: %d, s: %d, d: %d, r: %d.\n", 1995 dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT), 1996 !!(dmacsr & HIFN_DMACSR_S_ABORT), 1997 !!(dmacsr & HIFN_DMACSR_D_ABORT), 1998 !!(dmacsr & HIFN_DMACSR_R_ABORT)); 1999 hifn_reset_dma(dev, 1); 2000 hifn_init_dma(dev); 2001 hifn_init_registers(dev); 2002 } 2003 2004 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) { 2005 dprintk("%s: wait on command.\n", dev->name); 2006 dev->dmareg &= ~(HIFN_DMAIER_C_WAIT); 2007 hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); 2008 } 2009 2010 tasklet_schedule(&dev->tasklet); 2011 2012 return IRQ_HANDLED; 2013 } 2014 2015 static void hifn_flush(struct hifn_device *dev) 2016 { 2017 unsigned long flags; 2018 struct crypto_async_request *async_req; 2019 struct ablkcipher_request *req; 2020 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; 2021 int i; 2022 2023 for (i=0; i<HIFN_D_RES_RSIZE; ++i) { 2024 struct hifn_desc *d = &dma->resr[i]; 2025 2026 if (dev->sa[i]) { 2027 hifn_process_ready(dev->sa[i], 2028 (d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0); 2029 hifn_complete_sa(dev, i); 2030 } 2031 } 2032 2033 spin_lock_irqsave(&dev->lock, flags); 2034 while ((async_req = crypto_dequeue_request(&dev->queue))) { 2035 req = container_of(async_req, struct ablkcipher_request, base); 2036 spin_unlock_irqrestore(&dev->lock, flags); 2037 2038 hifn_process_ready(req, -ENODEV); 2039 2040 spin_lock_irqsave(&dev->lock, flags); 2041 } 2042 spin_unlock_irqrestore(&dev->lock, flags); 2043 } 2044 2045 static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 2046 unsigned int len) 2047 { 2048 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 2049 struct hifn_context *ctx = crypto_tfm_ctx(tfm); 2050 struct hifn_device *dev = ctx->dev; 2051 2052 if (len > HIFN_MAX_CRYPT_KEY_LENGTH) { 2053 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 2054 return -1; 2055 } 2056 2057 if (len == HIFN_DES_KEY_LENGTH) { 2058 u32 tmp[DES_EXPKEY_WORDS]; 2059 int ret = des_ekey(tmp, key); 2060 2061 if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { 2062 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 2063 return -EINVAL; 2064 } 2065 } 2066 2067 dev->flags &= ~HIFN_FLAG_OLD_KEY; 2068 2069 memcpy(ctx->key, key, len); 2070 ctx->keysize = len; 2071 2072 return 0; 2073 } 2074 2075 static int hifn_handle_req(struct ablkcipher_request *req) 2076 { 2077 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); 2078 struct hifn_device *dev = ctx->dev; 2079 int err = -EAGAIN; 2080 2081 if (dev->started + DIV_ROUND_UP(req->nbytes, PAGE_SIZE) <= HIFN_QUEUE_LENGTH) 2082 err = hifn_setup_session(req); 2083 2084 if (err == -EAGAIN) { 2085 unsigned long flags; 2086 2087 spin_lock_irqsave(&dev->lock, flags); 2088 err = ablkcipher_enqueue_request(&dev->queue, req); 2089 spin_unlock_irqrestore(&dev->lock, flags); 2090 } 2091 2092 return err; 2093 } 2094 2095 static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op, 2096 u8 type, u8 mode) 2097 { 2098 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); 2099 struct hifn_request_context *rctx = ablkcipher_request_ctx(req); 2100 unsigned ivsize; 2101 2102 ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); 2103 2104 if (req->info && mode != ACRYPTO_MODE_ECB) { 2105 if (type == ACRYPTO_TYPE_AES_128) 2106 ivsize = HIFN_AES_IV_LENGTH; 2107 else if (type == ACRYPTO_TYPE_DES) 2108 ivsize = HIFN_DES_KEY_LENGTH; 2109 else if (type == ACRYPTO_TYPE_3DES) 2110 ivsize = HIFN_3DES_KEY_LENGTH; 2111 } 2112 2113 if (ctx->keysize != 16 && type == ACRYPTO_TYPE_AES_128) { 2114 if (ctx->keysize == 24) 2115 type = ACRYPTO_TYPE_AES_192; 2116 else if (ctx->keysize == 32) 2117 type = ACRYPTO_TYPE_AES_256; 2118 } 2119 2120 rctx->op = op; 2121 rctx->mode = mode; 2122 rctx->type = type; 2123 rctx->iv = req->info; 2124 rctx->ivsize = ivsize; 2125 2126 /* 2127 * HEAVY TODO: needs to kick Herbert XU to write documentation. 2128 * HEAVY TODO: needs to kick Herbert XU to write documentation. 2129 * HEAVY TODO: needs to kick Herbert XU to write documentation. 2130 */ 2131 2132 return hifn_handle_req(req); 2133 } 2134 2135 static int hifn_process_queue(struct hifn_device *dev) 2136 { 2137 struct crypto_async_request *async_req, *backlog; 2138 struct ablkcipher_request *req; 2139 unsigned long flags; 2140 int err = 0; 2141 2142 while (dev->started < HIFN_QUEUE_LENGTH) { 2143 spin_lock_irqsave(&dev->lock, flags); 2144 backlog = crypto_get_backlog(&dev->queue); 2145 async_req = crypto_dequeue_request(&dev->queue); 2146 spin_unlock_irqrestore(&dev->lock, flags); 2147 2148 if (!async_req) 2149 break; 2150 2151 if (backlog) 2152 backlog->complete(backlog, -EINPROGRESS); 2153 2154 req = container_of(async_req, struct ablkcipher_request, base); 2155 2156 err = hifn_handle_req(req); 2157 if (err) 2158 break; 2159 } 2160 2161 return err; 2162 } 2163 2164 static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op, 2165 u8 type, u8 mode) 2166 { 2167 int err; 2168 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); 2169 struct hifn_device *dev = ctx->dev; 2170 2171 err = hifn_setup_crypto_req(req, op, type, mode); 2172 if (err) 2173 return err; 2174 2175 if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen) 2176 hifn_process_queue(dev); 2177 2178 return -EINPROGRESS; 2179 } 2180 2181 /* 2182 * AES ecryption functions. 2183 */ 2184 static inline int hifn_encrypt_aes_ecb(struct ablkcipher_request *req) 2185 { 2186 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, 2187 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB); 2188 } 2189 static inline int hifn_encrypt_aes_cbc(struct ablkcipher_request *req) 2190 { 2191 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, 2192 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC); 2193 } 2194 static inline int hifn_encrypt_aes_cfb(struct ablkcipher_request *req) 2195 { 2196 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, 2197 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB); 2198 } 2199 static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req) 2200 { 2201 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, 2202 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB); 2203 } 2204 2205 /* 2206 * AES decryption functions. 2207 */ 2208 static inline int hifn_decrypt_aes_ecb(struct ablkcipher_request *req) 2209 { 2210 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, 2211 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB); 2212 } 2213 static inline int hifn_decrypt_aes_cbc(struct ablkcipher_request *req) 2214 { 2215 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, 2216 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC); 2217 } 2218 static inline int hifn_decrypt_aes_cfb(struct ablkcipher_request *req) 2219 { 2220 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, 2221 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB); 2222 } 2223 static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req) 2224 { 2225 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, 2226 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB); 2227 } 2228 2229 /* 2230 * DES ecryption functions. 2231 */ 2232 static inline int hifn_encrypt_des_ecb(struct ablkcipher_request *req) 2233 { 2234 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, 2235 ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB); 2236 } 2237 static inline int hifn_encrypt_des_cbc(struct ablkcipher_request *req) 2238 { 2239 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, 2240 ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC); 2241 } 2242 static inline int hifn_encrypt_des_cfb(struct ablkcipher_request *req) 2243 { 2244 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, 2245 ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB); 2246 } 2247 static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req) 2248 { 2249 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, 2250 ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB); 2251 } 2252 2253 /* 2254 * DES decryption functions. 2255 */ 2256 static inline int hifn_decrypt_des_ecb(struct ablkcipher_request *req) 2257 { 2258 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, 2259 ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB); 2260 } 2261 static inline int hifn_decrypt_des_cbc(struct ablkcipher_request *req) 2262 { 2263 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, 2264 ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC); 2265 } 2266 static inline int hifn_decrypt_des_cfb(struct ablkcipher_request *req) 2267 { 2268 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, 2269 ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB); 2270 } 2271 static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req) 2272 { 2273 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, 2274 ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB); 2275 } 2276 2277 /* 2278 * 3DES ecryption functions. 2279 */ 2280 static inline int hifn_encrypt_3des_ecb(struct ablkcipher_request *req) 2281 { 2282 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, 2283 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB); 2284 } 2285 static inline int hifn_encrypt_3des_cbc(struct ablkcipher_request *req) 2286 { 2287 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, 2288 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC); 2289 } 2290 static inline int hifn_encrypt_3des_cfb(struct ablkcipher_request *req) 2291 { 2292 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, 2293 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB); 2294 } 2295 static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req) 2296 { 2297 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT, 2298 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB); 2299 } 2300 2301 /* 2302 * 3DES decryption functions. 2303 */ 2304 static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req) 2305 { 2306 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, 2307 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB); 2308 } 2309 static inline int hifn_decrypt_3des_cbc(struct ablkcipher_request *req) 2310 { 2311 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, 2312 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC); 2313 } 2314 static inline int hifn_decrypt_3des_cfb(struct ablkcipher_request *req) 2315 { 2316 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, 2317 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB); 2318 } 2319 static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req) 2320 { 2321 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, 2322 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB); 2323 } 2324 2325 struct hifn_alg_template 2326 { 2327 char name[CRYPTO_MAX_ALG_NAME]; 2328 char drv_name[CRYPTO_MAX_ALG_NAME]; 2329 unsigned int bsize; 2330 struct ablkcipher_alg ablkcipher; 2331 }; 2332 2333 static struct hifn_alg_template hifn_alg_templates[] = { 2334 /* 2335 * 3DES ECB, CBC, CFB and OFB modes. 2336 */ 2337 { 2338 .name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8, 2339 .ablkcipher = { 2340 .min_keysize = HIFN_3DES_KEY_LENGTH, 2341 .max_keysize = HIFN_3DES_KEY_LENGTH, 2342 .setkey = hifn_setkey, 2343 .encrypt = hifn_encrypt_3des_cfb, 2344 .decrypt = hifn_decrypt_3des_cfb, 2345 }, 2346 }, 2347 { 2348 .name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8, 2349 .ablkcipher = { 2350 .min_keysize = HIFN_3DES_KEY_LENGTH, 2351 .max_keysize = HIFN_3DES_KEY_LENGTH, 2352 .setkey = hifn_setkey, 2353 .encrypt = hifn_encrypt_3des_ofb, 2354 .decrypt = hifn_decrypt_3des_ofb, 2355 }, 2356 }, 2357 { 2358 .name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8, 2359 .ablkcipher = { 2360 .ivsize = HIFN_IV_LENGTH, 2361 .min_keysize = HIFN_3DES_KEY_LENGTH, 2362 .max_keysize = HIFN_3DES_KEY_LENGTH, 2363 .setkey = hifn_setkey, 2364 .encrypt = hifn_encrypt_3des_cbc, 2365 .decrypt = hifn_decrypt_3des_cbc, 2366 }, 2367 }, 2368 { 2369 .name = "ecb(des3_ede)", .drv_name = "ecb-3des", .bsize = 8, 2370 .ablkcipher = { 2371 .min_keysize = HIFN_3DES_KEY_LENGTH, 2372 .max_keysize = HIFN_3DES_KEY_LENGTH, 2373 .setkey = hifn_setkey, 2374 .encrypt = hifn_encrypt_3des_ecb, 2375 .decrypt = hifn_decrypt_3des_ecb, 2376 }, 2377 }, 2378 2379 /* 2380 * DES ECB, CBC, CFB and OFB modes. 2381 */ 2382 { 2383 .name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8, 2384 .ablkcipher = { 2385 .min_keysize = HIFN_DES_KEY_LENGTH, 2386 .max_keysize = HIFN_DES_KEY_LENGTH, 2387 .setkey = hifn_setkey, 2388 .encrypt = hifn_encrypt_des_cfb, 2389 .decrypt = hifn_decrypt_des_cfb, 2390 }, 2391 }, 2392 { 2393 .name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8, 2394 .ablkcipher = { 2395 .min_keysize = HIFN_DES_KEY_LENGTH, 2396 .max_keysize = HIFN_DES_KEY_LENGTH, 2397 .setkey = hifn_setkey, 2398 .encrypt = hifn_encrypt_des_ofb, 2399 .decrypt = hifn_decrypt_des_ofb, 2400 }, 2401 }, 2402 { 2403 .name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8, 2404 .ablkcipher = { 2405 .ivsize = HIFN_IV_LENGTH, 2406 .min_keysize = HIFN_DES_KEY_LENGTH, 2407 .max_keysize = HIFN_DES_KEY_LENGTH, 2408 .setkey = hifn_setkey, 2409 .encrypt = hifn_encrypt_des_cbc, 2410 .decrypt = hifn_decrypt_des_cbc, 2411 }, 2412 }, 2413 { 2414 .name = "ecb(des)", .drv_name = "ecb-des", .bsize = 8, 2415 .ablkcipher = { 2416 .min_keysize = HIFN_DES_KEY_LENGTH, 2417 .max_keysize = HIFN_DES_KEY_LENGTH, 2418 .setkey = hifn_setkey, 2419 .encrypt = hifn_encrypt_des_ecb, 2420 .decrypt = hifn_decrypt_des_ecb, 2421 }, 2422 }, 2423 2424 /* 2425 * AES ECB, CBC, CFB and OFB modes. 2426 */ 2427 { 2428 .name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16, 2429 .ablkcipher = { 2430 .min_keysize = AES_MIN_KEY_SIZE, 2431 .max_keysize = AES_MAX_KEY_SIZE, 2432 .setkey = hifn_setkey, 2433 .encrypt = hifn_encrypt_aes_ecb, 2434 .decrypt = hifn_decrypt_aes_ecb, 2435 }, 2436 }, 2437 { 2438 .name = "cbc(aes)", .drv_name = "cbc-aes", .bsize = 16, 2439 .ablkcipher = { 2440 .ivsize = HIFN_AES_IV_LENGTH, 2441 .min_keysize = AES_MIN_KEY_SIZE, 2442 .max_keysize = AES_MAX_KEY_SIZE, 2443 .setkey = hifn_setkey, 2444 .encrypt = hifn_encrypt_aes_cbc, 2445 .decrypt = hifn_decrypt_aes_cbc, 2446 }, 2447 }, 2448 { 2449 .name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16, 2450 .ablkcipher = { 2451 .min_keysize = AES_MIN_KEY_SIZE, 2452 .max_keysize = AES_MAX_KEY_SIZE, 2453 .setkey = hifn_setkey, 2454 .encrypt = hifn_encrypt_aes_cfb, 2455 .decrypt = hifn_decrypt_aes_cfb, 2456 }, 2457 }, 2458 { 2459 .name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16, 2460 .ablkcipher = { 2461 .min_keysize = AES_MIN_KEY_SIZE, 2462 .max_keysize = AES_MAX_KEY_SIZE, 2463 .setkey = hifn_setkey, 2464 .encrypt = hifn_encrypt_aes_ofb, 2465 .decrypt = hifn_decrypt_aes_ofb, 2466 }, 2467 }, 2468 }; 2469 2470 static int hifn_cra_init(struct crypto_tfm *tfm) 2471 { 2472 struct crypto_alg *alg = tfm->__crt_alg; 2473 struct hifn_crypto_alg *ha = crypto_alg_to_hifn(alg); 2474 struct hifn_context *ctx = crypto_tfm_ctx(tfm); 2475 2476 ctx->dev = ha->dev; 2477 tfm->crt_ablkcipher.reqsize = sizeof(struct hifn_request_context); 2478 return 0; 2479 } 2480 2481 static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t) 2482 { 2483 struct hifn_crypto_alg *alg; 2484 int err; 2485 2486 alg = kzalloc(sizeof(struct hifn_crypto_alg), GFP_KERNEL); 2487 if (!alg) 2488 return -ENOMEM; 2489 2490 snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name); 2491 snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s", 2492 t->drv_name, dev->name); 2493 2494 alg->alg.cra_priority = 300; 2495 alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2496 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC; 2497 alg->alg.cra_blocksize = t->bsize; 2498 alg->alg.cra_ctxsize = sizeof(struct hifn_context); 2499 alg->alg.cra_alignmask = 0; 2500 alg->alg.cra_type = &crypto_ablkcipher_type; 2501 alg->alg.cra_module = THIS_MODULE; 2502 alg->alg.cra_u.ablkcipher = t->ablkcipher; 2503 alg->alg.cra_init = hifn_cra_init; 2504 2505 alg->dev = dev; 2506 2507 list_add_tail(&alg->entry, &dev->alg_list); 2508 2509 err = crypto_register_alg(&alg->alg); 2510 if (err) { 2511 list_del(&alg->entry); 2512 kfree(alg); 2513 } 2514 2515 return err; 2516 } 2517 2518 static void hifn_unregister_alg(struct hifn_device *dev) 2519 { 2520 struct hifn_crypto_alg *a, *n; 2521 2522 list_for_each_entry_safe(a, n, &dev->alg_list, entry) { 2523 list_del(&a->entry); 2524 crypto_unregister_alg(&a->alg); 2525 kfree(a); 2526 } 2527 } 2528 2529 static int hifn_register_alg(struct hifn_device *dev) 2530 { 2531 int i, err; 2532 2533 for (i=0; i<ARRAY_SIZE(hifn_alg_templates); ++i) { 2534 err = hifn_alg_alloc(dev, &hifn_alg_templates[i]); 2535 if (err) 2536 goto err_out_exit; 2537 } 2538 2539 return 0; 2540 2541 err_out_exit: 2542 hifn_unregister_alg(dev); 2543 return err; 2544 } 2545 2546 static void hifn_tasklet_callback(unsigned long data) 2547 { 2548 struct hifn_device *dev = (struct hifn_device *)data; 2549 2550 /* 2551 * This is ok to call this without lock being held, 2552 * althogh it modifies some parameters used in parallel, 2553 * (like dev->success), but they are used in process 2554 * context or update is atomic (like setting dev->sa[i] to NULL). 2555 */ 2556 hifn_clear_rings(dev, 0); 2557 2558 if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen) 2559 hifn_process_queue(dev); 2560 } 2561 2562 static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2563 { 2564 int err, i; 2565 struct hifn_device *dev; 2566 char name[8]; 2567 2568 err = pci_enable_device(pdev); 2569 if (err) 2570 return err; 2571 pci_set_master(pdev); 2572 2573 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2574 if (err) 2575 goto err_out_disable_pci_device; 2576 2577 snprintf(name, sizeof(name), "hifn%d", 2578 atomic_inc_return(&hifn_dev_number)-1); 2579 2580 err = pci_request_regions(pdev, name); 2581 if (err) 2582 goto err_out_disable_pci_device; 2583 2584 if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE || 2585 pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE || 2586 pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) { 2587 dprintk("%s: Broken hardware - I/O regions are too small.\n", 2588 pci_name(pdev)); 2589 err = -ENODEV; 2590 goto err_out_free_regions; 2591 } 2592 2593 dev = kzalloc(sizeof(struct hifn_device) + sizeof(struct crypto_alg), 2594 GFP_KERNEL); 2595 if (!dev) { 2596 err = -ENOMEM; 2597 goto err_out_free_regions; 2598 } 2599 2600 INIT_LIST_HEAD(&dev->alg_list); 2601 2602 snprintf(dev->name, sizeof(dev->name), "%s", name); 2603 spin_lock_init(&dev->lock); 2604 2605 for (i=0; i<3; ++i) { 2606 unsigned long addr, size; 2607 2608 addr = pci_resource_start(pdev, i); 2609 size = pci_resource_len(pdev, i); 2610 2611 dev->bar[i] = ioremap_nocache(addr, size); 2612 if (!dev->bar[i]) { 2613 err = -ENOMEM; 2614 goto err_out_unmap_bars; 2615 } 2616 } 2617 2618 dev->desc_virt = pci_zalloc_consistent(pdev, sizeof(struct hifn_dma), 2619 &dev->desc_dma); 2620 if (!dev->desc_virt) { 2621 dprintk("Failed to allocate descriptor rings.\n"); 2622 err = -ENOMEM; 2623 goto err_out_unmap_bars; 2624 } 2625 2626 dev->pdev = pdev; 2627 dev->irq = pdev->irq; 2628 2629 for (i=0; i<HIFN_D_RES_RSIZE; ++i) 2630 dev->sa[i] = NULL; 2631 2632 pci_set_drvdata(pdev, dev); 2633 2634 tasklet_init(&dev->tasklet, hifn_tasklet_callback, (unsigned long)dev); 2635 2636 crypto_init_queue(&dev->queue, 1); 2637 2638 err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev); 2639 if (err) { 2640 dprintk("Failed to request IRQ%d: err: %d.\n", dev->irq, err); 2641 dev->irq = 0; 2642 goto err_out_free_desc; 2643 } 2644 2645 err = hifn_start_device(dev); 2646 if (err) 2647 goto err_out_free_irq; 2648 2649 err = hifn_test(dev, 1, 0); 2650 if (err) 2651 goto err_out_stop_device; 2652 2653 err = hifn_register_rng(dev); 2654 if (err) 2655 goto err_out_stop_device; 2656 2657 err = hifn_register_alg(dev); 2658 if (err) 2659 goto err_out_unregister_rng; 2660 2661 INIT_DELAYED_WORK(&dev->work, hifn_work); 2662 schedule_delayed_work(&dev->work, HZ); 2663 2664 dprintk("HIFN crypto accelerator card at %s has been " 2665 "successfully registered as %s.\n", 2666 pci_name(pdev), dev->name); 2667 2668 return 0; 2669 2670 err_out_unregister_rng: 2671 hifn_unregister_rng(dev); 2672 err_out_stop_device: 2673 hifn_reset_dma(dev, 1); 2674 hifn_stop_device(dev); 2675 err_out_free_irq: 2676 free_irq(dev->irq, dev); 2677 tasklet_kill(&dev->tasklet); 2678 err_out_free_desc: 2679 pci_free_consistent(pdev, sizeof(struct hifn_dma), 2680 dev->desc_virt, dev->desc_dma); 2681 2682 err_out_unmap_bars: 2683 for (i=0; i<3; ++i) 2684 if (dev->bar[i]) 2685 iounmap(dev->bar[i]); 2686 2687 err_out_free_regions: 2688 pci_release_regions(pdev); 2689 2690 err_out_disable_pci_device: 2691 pci_disable_device(pdev); 2692 2693 return err; 2694 } 2695 2696 static void hifn_remove(struct pci_dev *pdev) 2697 { 2698 int i; 2699 struct hifn_device *dev; 2700 2701 dev = pci_get_drvdata(pdev); 2702 2703 if (dev) { 2704 cancel_delayed_work_sync(&dev->work); 2705 2706 hifn_unregister_rng(dev); 2707 hifn_unregister_alg(dev); 2708 hifn_reset_dma(dev, 1); 2709 hifn_stop_device(dev); 2710 2711 free_irq(dev->irq, dev); 2712 tasklet_kill(&dev->tasklet); 2713 2714 hifn_flush(dev); 2715 2716 pci_free_consistent(pdev, sizeof(struct hifn_dma), 2717 dev->desc_virt, dev->desc_dma); 2718 for (i=0; i<3; ++i) 2719 if (dev->bar[i]) 2720 iounmap(dev->bar[i]); 2721 2722 kfree(dev); 2723 } 2724 2725 pci_release_regions(pdev); 2726 pci_disable_device(pdev); 2727 } 2728 2729 static struct pci_device_id hifn_pci_tbl[] = { 2730 { PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7955) }, 2731 { PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7956) }, 2732 { 0 } 2733 }; 2734 MODULE_DEVICE_TABLE(pci, hifn_pci_tbl); 2735 2736 static struct pci_driver hifn_pci_driver = { 2737 .name = "hifn795x", 2738 .id_table = hifn_pci_tbl, 2739 .probe = hifn_probe, 2740 .remove = hifn_remove, 2741 }; 2742 2743 static int __init hifn_init(void) 2744 { 2745 unsigned int freq; 2746 int err; 2747 2748 /* HIFN supports only 32-bit addresses */ 2749 BUILD_BUG_ON(sizeof(dma_addr_t) != 4); 2750 2751 if (strncmp(hifn_pll_ref, "ext", 3) && 2752 strncmp(hifn_pll_ref, "pci", 3)) { 2753 printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, " 2754 "must be pci or ext"); 2755 return -EINVAL; 2756 } 2757 2758 /* 2759 * For the 7955/7956 the reference clock frequency must be in the 2760 * range of 20MHz-100MHz. For the 7954 the upper bound is 66.67MHz, 2761 * but this chip is currently not supported. 2762 */ 2763 if (hifn_pll_ref[3] != '\0') { 2764 freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10); 2765 if (freq < 20 || freq > 100) { 2766 printk(KERN_ERR "hifn795x: invalid hifn_pll_ref " 2767 "frequency, must be in the range " 2768 "of 20-100"); 2769 return -EINVAL; 2770 } 2771 } 2772 2773 err = pci_register_driver(&hifn_pci_driver); 2774 if (err < 0) { 2775 dprintk("Failed to register PCI driver for %s device.\n", 2776 hifn_pci_driver.name); 2777 return -ENODEV; 2778 } 2779 2780 printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip " 2781 "has been successfully registered.\n"); 2782 2783 return 0; 2784 } 2785 2786 static void __exit hifn_fini(void) 2787 { 2788 pci_unregister_driver(&hifn_pci_driver); 2789 2790 printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip " 2791 "has been successfully unregistered.\n"); 2792 } 2793 2794 module_init(hifn_init); 2795 module_exit(hifn_fini); 2796 2797 MODULE_LICENSE("GPL"); 2798 MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 2799 MODULE_DESCRIPTION("Driver for HIFN 795x crypto accelerator chip."); 2800