xref: /openbmc/qemu/hw/misc/aspeed_hace.c (revision 93e0932b)
1 /*
2  * ASPEED Hash and Crypto Engine
3  *
4  * Copyright (C) 2021 IBM Corp.
5  *
6  * Joel Stanley <joel@jms.id.au>
7  *
8  * SPDX-License-Identifier: GPL-2.0-or-later
9  */
10 
11 #include "qemu/osdep.h"
12 #include "qemu/log.h"
13 #include "qemu/error-report.h"
14 #include "hw/misc/aspeed_hace.h"
15 #include "qapi/error.h"
16 #include "migration/vmstate.h"
17 #include "crypto/hash.h"
18 #include "hw/qdev-properties.h"
19 #include "hw/irq.h"
20 
21 #define R_CRYPT_CMD     (0x10 / 4)
22 
23 #define R_STATUS        (0x1c / 4)
24 #define HASH_IRQ        BIT(9)
25 #define CRYPT_IRQ       BIT(12)
26 #define TAG_IRQ         BIT(15)
27 
28 #define R_HASH_SRC      (0x20 / 4)
29 #define R_HASH_DEST     (0x24 / 4)
30 #define R_HASH_KEY_BUFF (0x28 / 4)
31 #define R_HASH_SRC_LEN  (0x2c / 4)
32 
33 #define R_HASH_CMD      (0x30 / 4)
34 /* Hash algorithm selection */
35 #define  HASH_ALGO_MASK                 (BIT(4) | BIT(5) | BIT(6))
36 #define  HASH_ALGO_MD5                  0
37 #define  HASH_ALGO_SHA1                 BIT(5)
38 #define  HASH_ALGO_SHA224               BIT(6)
39 #define  HASH_ALGO_SHA256               (BIT(4) | BIT(6))
40 #define  HASH_ALGO_SHA512_SERIES        (BIT(5) | BIT(6))
41 /* SHA512 algorithm selection */
42 #define  SHA512_HASH_ALGO_MASK          (BIT(10) | BIT(11) | BIT(12))
43 #define  HASH_ALGO_SHA512_SHA512        0
44 #define  HASH_ALGO_SHA512_SHA384        BIT(10)
45 #define  HASH_ALGO_SHA512_SHA256        BIT(11)
46 #define  HASH_ALGO_SHA512_SHA224        (BIT(10) | BIT(11))
47 /* HMAC modes */
48 #define  HASH_HMAC_MASK                 (BIT(7) | BIT(8))
49 #define  HASH_DIGEST                    0
50 #define  HASH_DIGEST_HMAC               BIT(7)
51 #define  HASH_DIGEST_ACCUM              BIT(8)
52 #define  HASH_HMAC_KEY                  (BIT(7) | BIT(8))
53 /* Cascaded operation modes */
54 #define  HASH_ONLY                      0
55 #define  HASH_ONLY2                     BIT(0)
56 #define  HASH_CRYPT_THEN_HASH           BIT(1)
57 #define  HASH_HASH_THEN_CRYPT           (BIT(0) | BIT(1))
58 /* Other cmd bits */
59 #define  HASH_IRQ_EN                    BIT(9)
60 #define  HASH_SG_EN                     BIT(18)
61 /* Scatter-gather data list */
62 #define SG_LIST_LEN_SIZE                4
63 #define SG_LIST_LEN_MASK                0x0FFFFFFF
64 #define SG_LIST_LEN_LAST                BIT(31)
65 #define SG_LIST_ADDR_SIZE               4
66 #define SG_LIST_ADDR_MASK               0x7FFFFFFF
67 #define SG_LIST_ENTRY_SIZE              (SG_LIST_LEN_SIZE + SG_LIST_ADDR_SIZE)
68 
69 static const struct {
70     uint32_t mask;
71     QCryptoHashAlgorithm algo;
72 } hash_algo_map[] = {
73     { HASH_ALGO_MD5, QCRYPTO_HASH_ALG_MD5 },
74     { HASH_ALGO_SHA1, QCRYPTO_HASH_ALG_SHA1 },
75     { HASH_ALGO_SHA224, QCRYPTO_HASH_ALG_SHA224 },
76     { HASH_ALGO_SHA256, QCRYPTO_HASH_ALG_SHA256 },
77     { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512, QCRYPTO_HASH_ALG_SHA512 },
78     { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384, QCRYPTO_HASH_ALG_SHA384 },
79     { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256, QCRYPTO_HASH_ALG_SHA256 },
80 };
81 
82 static int hash_algo_lookup(uint32_t reg)
83 {
84     int i;
85 
86     reg &= HASH_ALGO_MASK | SHA512_HASH_ALGO_MASK;
87 
88     for (i = 0; i < ARRAY_SIZE(hash_algo_map); i++) {
89         if (reg == hash_algo_map[i].mask) {
90             return hash_algo_map[i].algo;
91         }
92     }
93 
94     return -1;
95 }
96 
97 /**
98  * Check whether the request contains padding message.
99  *
100  * @param s             aspeed hace state object
101  * @param iov           iov of current request
102  * @param req_len       length of the current request
103  * @param total_msg_len length of all acc_mode requests(excluding padding msg)
104  * @param pad_offset    start offset of padding message
105  */
106 static bool has_padding(AspeedHACEState *s, struct iovec *iov,
107                         hwaddr req_len, uint32_t *total_msg_len,
108                         uint32_t *pad_offset)
109 {
110     *total_msg_len = (uint32_t)(ldq_be_p(iov->iov_base + req_len - 8) / 8);
111     /*
112      * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the
113      * last request. The last request should contain padding message.
114      * We check whether message contains padding by
115      *   1. Get total message length. If the current message contains
116      *      padding, the last 8 bytes are total message length.
117      *   2. Check whether the total message length is valid.
118      *      If it is valid, the value should less than or equal to
119      *      total_req_len.
120      *   3. Current request len - padding_size to get padding offset.
121      *      The padding message's first byte should be 0x80
122      */
123     if (*total_msg_len <= s->total_req_len) {
124         uint32_t padding_size = s->total_req_len - *total_msg_len;
125         uint8_t *padding = iov->iov_base;
126         *pad_offset = req_len - padding_size;
127         if (padding[*pad_offset] == 0x80) {
128             return true;
129         }
130     }
131 
132     return false;
133 }
134 
135 static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id,
136                            uint32_t *pad_offset)
137 {
138     int i, iov_count;
139     if (*pad_offset != 0) {
140         s->iov_cache[s->iov_count].iov_base = iov[id].iov_base;
141         s->iov_cache[s->iov_count].iov_len = *pad_offset;
142         ++s->iov_count;
143     }
144     for (i = 0; i < s->iov_count; i++) {
145         iov[i].iov_base = s->iov_cache[i].iov_base;
146         iov[i].iov_len = s->iov_cache[i].iov_len;
147     }
148     iov_count = s->iov_count;
149     s->iov_count = 0;
150     s->total_req_len = 0;
151     return iov_count;
152 }
153 
154 /**
155  * Generate iov for accumulative mode.
156  *
157  * @param s             aspeed hace state object
158  * @param iov           iov of the current request
159  * @param id            index of the current iov
160  * @param req_len       length of the current request
161  *
162  * @return count of iov
163  */
164 static int gen_acc_mode_iov(AspeedHACEState *s, struct iovec *iov, int id,
165                             hwaddr *req_len)
166 {
167     uint32_t pad_offset;
168     uint32_t total_msg_len;
169     s->total_req_len += *req_len;
170 
171     if (has_padding(s, &iov[id], *req_len, &total_msg_len, &pad_offset)) {
172         if (s->iov_count) {
173             return reconstruct_iov(s, iov, id, &pad_offset);
174         }
175 
176         *req_len -= s->total_req_len - total_msg_len;
177         s->total_req_len = 0;
178         iov[id].iov_len = *req_len;
179     } else {
180         s->iov_cache[s->iov_count].iov_base = iov->iov_base;
181         s->iov_cache[s->iov_count].iov_len = *req_len;
182         ++s->iov_count;
183     }
184 
185     return id + 1;
186 }
187 
188 static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
189                               bool acc_mode)
190 {
191     struct iovec iov[ASPEED_HACE_MAX_SG];
192     g_autofree uint8_t *digest_buf;
193     size_t digest_len = 0;
194     int niov = 0;
195     int i;
196     void *haddr;
197 
198     if (sg_mode) {
199         uint32_t len = 0;
200 
201         for (i = 0; !(len & SG_LIST_LEN_LAST); i++) {
202             uint32_t addr, src;
203             hwaddr plen;
204 
205             if (i == ASPEED_HACE_MAX_SG) {
206                 qemu_log_mask(LOG_GUEST_ERROR,
207                         "aspeed_hace: guest failed to set end of sg list marker\n");
208                 break;
209             }
210 
211             src = s->regs[R_HASH_SRC] + (i * SG_LIST_ENTRY_SIZE);
212 
213             len = address_space_ldl_le(&s->dram_as, src,
214                                        MEMTXATTRS_UNSPECIFIED, NULL);
215 
216             addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE,
217                                         MEMTXATTRS_UNSPECIFIED, NULL);
218             addr &= SG_LIST_ADDR_MASK;
219 
220             plen = len & SG_LIST_LEN_MASK;
221             haddr = address_space_map(&s->dram_as, addr, &plen, false,
222                                       MEMTXATTRS_UNSPECIFIED);
223             if (haddr == NULL) {
224                 qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
225                 return;
226             }
227             iov[i].iov_base = haddr;
228             if (acc_mode) {
229                 niov = gen_acc_mode_iov(s, iov, i, &plen);
230 
231             } else {
232                 iov[i].iov_len = plen;
233             }
234         }
235     } else {
236         hwaddr len = s->regs[R_HASH_SRC_LEN];
237 
238         haddr = address_space_map(&s->dram_as, s->regs[R_HASH_SRC],
239                                   &len, false, MEMTXATTRS_UNSPECIFIED);
240         if (haddr == NULL) {
241             qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
242             return;
243         }
244         iov[0].iov_base = haddr;
245         iov[0].iov_len = len;
246         i = 1;
247 
248         if (s->iov_count) {
249             /*
250              * In aspeed sdk kernel driver, sg_mode is disabled in hash_final().
251              * Thus if we received a request with sg_mode disabled, it is
252              * required to check whether cache is empty. If no, we should
253              * combine cached iov and the current iov.
254              */
255             uint32_t total_msg_len;
256             uint32_t pad_offset;
257             s->total_req_len += len;
258             if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) {
259                 niov = reconstruct_iov(s, iov, 0, &pad_offset);
260             }
261         }
262     }
263 
264     if (niov) {
265         i = niov;
266     }
267 
268     if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) {
269         qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
270         return;
271     }
272 
273     if (address_space_write(&s->dram_as, s->regs[R_HASH_DEST],
274                             MEMTXATTRS_UNSPECIFIED,
275                             digest_buf, digest_len)) {
276         qemu_log_mask(LOG_GUEST_ERROR,
277                       "aspeed_hace: address space write failed\n");
278     }
279 
280     for (; i > 0; i--) {
281         address_space_unmap(&s->dram_as, iov[i - 1].iov_base,
282                             iov[i - 1].iov_len, false,
283                             iov[i - 1].iov_len);
284     }
285 
286     /*
287      * Set status bits to indicate completion. Testing shows hardware sets
288      * these irrespective of HASH_IRQ_EN.
289      */
290     s->regs[R_STATUS] |= HASH_IRQ;
291 }
292 
293 static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size)
294 {
295     AspeedHACEState *s = ASPEED_HACE(opaque);
296 
297     addr >>= 2;
298 
299     if (addr >= ASPEED_HACE_NR_REGS) {
300         qemu_log_mask(LOG_GUEST_ERROR,
301                       "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n",
302                       __func__, addr << 2);
303         return 0;
304     }
305 
306     return s->regs[addr];
307 }
308 
309 static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
310                               unsigned int size)
311 {
312     AspeedHACEState *s = ASPEED_HACE(opaque);
313     AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s);
314 
315     addr >>= 2;
316 
317     if (addr >= ASPEED_HACE_NR_REGS) {
318         qemu_log_mask(LOG_GUEST_ERROR,
319                       "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n",
320                       __func__, addr << 2);
321         return;
322     }
323 
324     switch (addr) {
325     case R_STATUS:
326         if (data & HASH_IRQ) {
327             data &= ~HASH_IRQ;
328 
329             if (s->regs[addr] & HASH_IRQ) {
330                 qemu_irq_lower(s->irq);
331             }
332         }
333         break;
334     case R_HASH_SRC:
335         data &= ahc->src_mask;
336         break;
337     case R_HASH_DEST:
338         data &= ahc->dest_mask;
339         break;
340     case R_HASH_KEY_BUFF:
341         data &= ahc->key_mask;
342         break;
343     case R_HASH_SRC_LEN:
344         data &= 0x0FFFFFFF;
345         break;
346     case R_HASH_CMD: {
347         int algo;
348         data &= ahc->hash_mask;
349 
350         if ((data & HASH_DIGEST_HMAC)) {
351             qemu_log_mask(LOG_UNIMP,
352                           "%s: HMAC mode not implemented\n",
353                           __func__);
354         }
355         if (data & BIT(1)) {
356             qemu_log_mask(LOG_UNIMP,
357                           "%s: Cascaded mode not implemented\n",
358                           __func__);
359         }
360         algo = hash_algo_lookup(data);
361         if (algo < 0) {
362                 qemu_log_mask(LOG_GUEST_ERROR,
363                         "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
364                         __func__, data & ahc->hash_mask);
365                 break;
366         }
367         do_hash_operation(s, algo, data & HASH_SG_EN,
368                 ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
369 
370         if (data & HASH_IRQ_EN) {
371             qemu_irq_raise(s->irq);
372         }
373         break;
374     }
375     case R_CRYPT_CMD:
376         qemu_log_mask(LOG_UNIMP, "%s: Crypt commands not implemented\n",
377                        __func__);
378         break;
379     default:
380         break;
381     }
382 
383     s->regs[addr] = data;
384 }
385 
386 static const MemoryRegionOps aspeed_hace_ops = {
387     .read = aspeed_hace_read,
388     .write = aspeed_hace_write,
389     .endianness = DEVICE_LITTLE_ENDIAN,
390     .valid = {
391         .min_access_size = 1,
392         .max_access_size = 4,
393     },
394 };
395 
396 static void aspeed_hace_reset(DeviceState *dev)
397 {
398     struct AspeedHACEState *s = ASPEED_HACE(dev);
399 
400     memset(s->regs, 0, sizeof(s->regs));
401     s->iov_count = 0;
402     s->total_req_len = 0;
403 }
404 
405 static void aspeed_hace_realize(DeviceState *dev, Error **errp)
406 {
407     AspeedHACEState *s = ASPEED_HACE(dev);
408     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
409 
410     sysbus_init_irq(sbd, &s->irq);
411 
412     memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_hace_ops, s,
413             TYPE_ASPEED_HACE, 0x1000);
414 
415     if (!s->dram_mr) {
416         error_setg(errp, TYPE_ASPEED_HACE ": 'dram' link not set");
417         return;
418     }
419 
420     address_space_init(&s->dram_as, s->dram_mr, "dram");
421 
422     sysbus_init_mmio(sbd, &s->iomem);
423 }
424 
425 static Property aspeed_hace_properties[] = {
426     DEFINE_PROP_LINK("dram", AspeedHACEState, dram_mr,
427                      TYPE_MEMORY_REGION, MemoryRegion *),
428     DEFINE_PROP_END_OF_LIST(),
429 };
430 
431 
432 static const VMStateDescription vmstate_aspeed_hace = {
433     .name = TYPE_ASPEED_HACE,
434     .version_id = 1,
435     .minimum_version_id = 1,
436     .fields = (VMStateField[]) {
437         VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS),
438         VMSTATE_UINT32(total_req_len, AspeedHACEState),
439         VMSTATE_UINT32(iov_count, AspeedHACEState),
440         VMSTATE_END_OF_LIST(),
441     }
442 };
443 
444 static void aspeed_hace_class_init(ObjectClass *klass, void *data)
445 {
446     DeviceClass *dc = DEVICE_CLASS(klass);
447 
448     dc->realize = aspeed_hace_realize;
449     dc->reset = aspeed_hace_reset;
450     device_class_set_props(dc, aspeed_hace_properties);
451     dc->vmsd = &vmstate_aspeed_hace;
452 }
453 
454 static const TypeInfo aspeed_hace_info = {
455     .name = TYPE_ASPEED_HACE,
456     .parent = TYPE_SYS_BUS_DEVICE,
457     .instance_size = sizeof(AspeedHACEState),
458     .class_init = aspeed_hace_class_init,
459     .class_size = sizeof(AspeedHACEClass)
460 };
461 
462 static void aspeed_ast2400_hace_class_init(ObjectClass *klass, void *data)
463 {
464     DeviceClass *dc = DEVICE_CLASS(klass);
465     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
466 
467     dc->desc = "AST2400 Hash and Crypto Engine";
468 
469     ahc->src_mask = 0x0FFFFFFF;
470     ahc->dest_mask = 0x0FFFFFF8;
471     ahc->key_mask = 0x0FFFFFC0;
472     ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
473 }
474 
475 static const TypeInfo aspeed_ast2400_hace_info = {
476     .name = TYPE_ASPEED_AST2400_HACE,
477     .parent = TYPE_ASPEED_HACE,
478     .class_init = aspeed_ast2400_hace_class_init,
479 };
480 
481 static void aspeed_ast2500_hace_class_init(ObjectClass *klass, void *data)
482 {
483     DeviceClass *dc = DEVICE_CLASS(klass);
484     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
485 
486     dc->desc = "AST2500 Hash and Crypto Engine";
487 
488     ahc->src_mask = 0x3fffffff;
489     ahc->dest_mask = 0x3ffffff8;
490     ahc->key_mask = 0x3FFFFFC0;
491     ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
492 }
493 
494 static const TypeInfo aspeed_ast2500_hace_info = {
495     .name = TYPE_ASPEED_AST2500_HACE,
496     .parent = TYPE_ASPEED_HACE,
497     .class_init = aspeed_ast2500_hace_class_init,
498 };
499 
500 static void aspeed_ast2600_hace_class_init(ObjectClass *klass, void *data)
501 {
502     DeviceClass *dc = DEVICE_CLASS(klass);
503     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
504 
505     dc->desc = "AST2600 Hash and Crypto Engine";
506 
507     ahc->src_mask = 0x7FFFFFFF;
508     ahc->dest_mask = 0x7FFFFFF8;
509     ahc->key_mask = 0x7FFFFFF8;
510     ahc->hash_mask = 0x00147FFF;
511 }
512 
513 static const TypeInfo aspeed_ast2600_hace_info = {
514     .name = TYPE_ASPEED_AST2600_HACE,
515     .parent = TYPE_ASPEED_HACE,
516     .class_init = aspeed_ast2600_hace_class_init,
517 };
518 
519 static void aspeed_ast1030_hace_class_init(ObjectClass *klass, void *data)
520 {
521     DeviceClass *dc = DEVICE_CLASS(klass);
522     AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass);
523 
524     dc->desc = "AST1030 Hash and Crypto Engine";
525 
526     ahc->src_mask = 0x7FFFFFFF;
527     ahc->dest_mask = 0x7FFFFFF8;
528     ahc->key_mask = 0x7FFFFFF8;
529     ahc->hash_mask = 0x00147FFF;
530 }
531 
532 static const TypeInfo aspeed_ast1030_hace_info = {
533     .name = TYPE_ASPEED_AST1030_HACE,
534     .parent = TYPE_ASPEED_HACE,
535     .class_init = aspeed_ast1030_hace_class_init,
536 };
537 
538 static void aspeed_hace_register_types(void)
539 {
540     type_register_static(&aspeed_ast2400_hace_info);
541     type_register_static(&aspeed_ast2500_hace_info);
542     type_register_static(&aspeed_ast2600_hace_info);
543     type_register_static(&aspeed_ast1030_hace_info);
544     type_register_static(&aspeed_hace_info);
545 }
546 
547 type_init(aspeed_hace_register_types);
548