1 /* 2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 3 * 4 * PAPR Virtual TPM 5 * 6 * Copyright (c) 2015, 2017, 2019 IBM Corporation. 7 * 8 * Authors: 9 * Stefan Berger <stefanb@linux.vnet.ibm.com> 10 * 11 * This code is licensed under the GPL version 2 or later. See the 12 * COPYING file in the top-level directory. 13 * 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qemu/error-report.h" 18 #include "qapi/error.h" 19 #include "hw/qdev-properties.h" 20 #include "migration/vmstate.h" 21 22 #include "sysemu/tpm_backend.h" 23 #include "sysemu/tpm_util.h" 24 #include "tpm_prop.h" 25 26 #include "hw/ppc/spapr.h" 27 #include "hw/ppc/spapr_vio.h" 28 #include "trace.h" 29 #include "qom/object.h" 30 31 #define DEBUG_SPAPR 0 32 33 typedef struct SpaprTpmState SpaprTpmState; 34 DECLARE_INSTANCE_CHECKER(SpaprTpmState, VIO_SPAPR_VTPM, 35 TYPE_TPM_SPAPR) 36 37 typedef struct TpmCrq { 38 uint8_t valid; /* 0x80: cmd; 0xc0: init crq */ 39 /* 0x81-0x83: CRQ message response */ 40 uint8_t msg; /* see below */ 41 uint16_t len; /* len of TPM request; len of TPM response */ 42 uint32_t data; /* rtce_dma_handle when sending TPM request */ 43 uint64_t reserved; 44 } TpmCrq; 45 46 #define SPAPR_VTPM_VALID_INIT_CRQ_COMMAND 0xC0 47 #define SPAPR_VTPM_VALID_COMMAND 0x80 48 #define SPAPR_VTPM_MSG_RESULT 0x80 49 50 /* msg types for valid = SPAPR_VTPM_VALID_INIT_CRQ */ 51 #define SPAPR_VTPM_INIT_CRQ_RESULT 0x1 52 #define SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT 0x2 53 54 /* msg types for valid = SPAPR_VTPM_VALID_CMD */ 55 #define SPAPR_VTPM_GET_VERSION 0x1 56 #define SPAPR_VTPM_TPM_COMMAND 0x2 57 #define SPAPR_VTPM_GET_RTCE_BUFFER_SIZE 0x3 58 #define SPAPR_VTPM_PREPARE_TO_SUSPEND 0x4 59 60 /* response error messages */ 61 #define SPAPR_VTPM_VTPM_ERROR 0xff 62 63 /* error codes */ 64 #define SPAPR_VTPM_ERR_COPY_IN_FAILED 0x3 65 #define SPAPR_VTPM_ERR_COPY_OUT_FAILED 0x4 66 67 #define TPM_SPAPR_BUFFER_MAX 4096 68 69 struct SpaprTpmState { 70 SpaprVioDevice vdev; 71 72 TpmCrq crq; /* track single TPM command */ 73 74 uint8_t state; 75 #define SPAPR_VTPM_STATE_NONE 0 76 #define SPAPR_VTPM_STATE_EXECUTION 1 77 #define SPAPR_VTPM_STATE_COMPLETION 2 78 79 unsigned char *buffer; 80 81 uint32_t numbytes; /* number of bytes to deliver on resume */ 82 83 TPMBackendCmd cmd; 84 85 TPMBackend *be_driver; 86 TPMVersion be_tpm_version; 87 88 size_t be_buffer_size; 89 }; 90 91 /* 92 * Send a request to the TPM. 93 */ 94 static void tpm_spapr_tpm_send(SpaprTpmState *s) 95 { 96 tpm_util_show_buffer(s->buffer, s->be_buffer_size, "To TPM"); 97 98 s->state = SPAPR_VTPM_STATE_EXECUTION; 99 s->cmd = (TPMBackendCmd) { 100 .locty = 0, 101 .in = s->buffer, 102 .in_len = MIN(tpm_cmd_get_size(s->buffer), s->be_buffer_size), 103 .out = s->buffer, 104 .out_len = s->be_buffer_size, 105 }; 106 107 tpm_backend_deliver_request(s->be_driver, &s->cmd); 108 } 109 110 static int tpm_spapr_process_cmd(SpaprTpmState *s, uint64_t dataptr) 111 { 112 long rc; 113 114 /* a max. of be_buffer_size bytes can be transported */ 115 rc = spapr_vio_dma_read(&s->vdev, dataptr, 116 s->buffer, s->be_buffer_size); 117 if (rc) { 118 error_report("tpm_spapr_got_payload: DMA read failure"); 119 } 120 /* let vTPM handle any malformed request */ 121 tpm_spapr_tpm_send(s); 122 123 return rc; 124 } 125 126 static inline int spapr_tpm_send_crq(struct SpaprVioDevice *dev, TpmCrq *crq) 127 { 128 return spapr_vio_send_crq(dev, (uint8_t *)crq); 129 } 130 131 static int tpm_spapr_do_crq(struct SpaprVioDevice *dev, uint8_t *crq_data) 132 { 133 SpaprTpmState *s = VIO_SPAPR_VTPM(dev); 134 TpmCrq local_crq; 135 TpmCrq *crq = &s->crq; /* requests only */ 136 int rc; 137 uint8_t valid = crq_data[0]; 138 uint8_t msg = crq_data[1]; 139 140 trace_tpm_spapr_do_crq(valid, msg); 141 142 switch (valid) { 143 case SPAPR_VTPM_VALID_INIT_CRQ_COMMAND: /* Init command/response */ 144 145 /* Respond to initialization request */ 146 switch (msg) { 147 case SPAPR_VTPM_INIT_CRQ_RESULT: 148 trace_tpm_spapr_do_crq_crq_result(); 149 memset(&local_crq, 0, sizeof(local_crq)); 150 local_crq.valid = SPAPR_VTPM_VALID_INIT_CRQ_COMMAND; 151 local_crq.msg = SPAPR_VTPM_INIT_CRQ_RESULT; 152 spapr_tpm_send_crq(dev, &local_crq); 153 break; 154 155 case SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT: 156 trace_tpm_spapr_do_crq_crq_complete_result(); 157 memset(&local_crq, 0, sizeof(local_crq)); 158 local_crq.valid = SPAPR_VTPM_VALID_INIT_CRQ_COMMAND; 159 local_crq.msg = SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT; 160 spapr_tpm_send_crq(dev, &local_crq); 161 break; 162 } 163 164 break; 165 case SPAPR_VTPM_VALID_COMMAND: /* Payloads */ 166 switch (msg) { 167 case SPAPR_VTPM_TPM_COMMAND: 168 trace_tpm_spapr_do_crq_tpm_command(); 169 if (s->state == SPAPR_VTPM_STATE_EXECUTION) { 170 return H_BUSY; 171 } 172 memcpy(crq, crq_data, sizeof(*crq)); 173 174 rc = tpm_spapr_process_cmd(s, be32_to_cpu(crq->data)); 175 176 if (rc == H_SUCCESS) { 177 crq->valid = be16_to_cpu(0); 178 } else { 179 local_crq.valid = SPAPR_VTPM_MSG_RESULT; 180 local_crq.msg = SPAPR_VTPM_VTPM_ERROR; 181 local_crq.len = cpu_to_be16(0); 182 local_crq.data = cpu_to_be32(SPAPR_VTPM_ERR_COPY_IN_FAILED); 183 spapr_tpm_send_crq(dev, &local_crq); 184 } 185 break; 186 187 case SPAPR_VTPM_GET_RTCE_BUFFER_SIZE: 188 trace_tpm_spapr_do_crq_tpm_get_rtce_buffer_size(s->be_buffer_size); 189 local_crq.valid = SPAPR_VTPM_VALID_COMMAND; 190 local_crq.msg = SPAPR_VTPM_GET_RTCE_BUFFER_SIZE | 191 SPAPR_VTPM_MSG_RESULT; 192 local_crq.len = cpu_to_be16(s->be_buffer_size); 193 spapr_tpm_send_crq(dev, &local_crq); 194 break; 195 196 case SPAPR_VTPM_GET_VERSION: 197 local_crq.valid = SPAPR_VTPM_VALID_COMMAND; 198 local_crq.msg = SPAPR_VTPM_GET_VERSION | SPAPR_VTPM_MSG_RESULT; 199 local_crq.len = cpu_to_be16(0); 200 switch (s->be_tpm_version) { 201 case TPM_VERSION_1_2: 202 local_crq.data = cpu_to_be32(1); 203 break; 204 case TPM_VERSION_2_0: 205 local_crq.data = cpu_to_be32(2); 206 break; 207 default: 208 g_assert_not_reached(); 209 } 210 trace_tpm_spapr_do_crq_get_version(be32_to_cpu(local_crq.data)); 211 spapr_tpm_send_crq(dev, &local_crq); 212 break; 213 214 case SPAPR_VTPM_PREPARE_TO_SUSPEND: 215 trace_tpm_spapr_do_crq_prepare_to_suspend(); 216 local_crq.valid = SPAPR_VTPM_VALID_COMMAND; 217 local_crq.msg = SPAPR_VTPM_PREPARE_TO_SUSPEND | 218 SPAPR_VTPM_MSG_RESULT; 219 spapr_tpm_send_crq(dev, &local_crq); 220 break; 221 222 default: 223 trace_tpm_spapr_do_crq_unknown_msg_type(crq->msg); 224 } 225 break; 226 default: 227 trace_tpm_spapr_do_crq_unknown_crq(valid, msg); 228 }; 229 230 return H_SUCCESS; 231 } 232 233 static void tpm_spapr_request_completed(TPMIf *ti, int ret) 234 { 235 SpaprTpmState *s = VIO_SPAPR_VTPM(ti); 236 TpmCrq *crq = &s->crq; 237 uint32_t len; 238 int rc; 239 240 s->state = SPAPR_VTPM_STATE_COMPLETION; 241 242 /* a max. of be_buffer_size bytes can be transported */ 243 len = MIN(tpm_cmd_get_size(s->buffer), s->be_buffer_size); 244 245 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { 246 trace_tpm_spapr_caught_response(len); 247 /* defer delivery of response until .post_load */ 248 s->numbytes = len; 249 return; 250 } 251 252 rc = spapr_vio_dma_write(&s->vdev, be32_to_cpu(crq->data), 253 s->buffer, len); 254 255 tpm_util_show_buffer(s->buffer, len, "From TPM"); 256 257 crq->valid = SPAPR_VTPM_MSG_RESULT; 258 if (rc == H_SUCCESS) { 259 crq->msg = SPAPR_VTPM_TPM_COMMAND | SPAPR_VTPM_MSG_RESULT; 260 crq->len = cpu_to_be16(len); 261 } else { 262 error_report("%s: DMA write failure", __func__); 263 crq->msg = SPAPR_VTPM_VTPM_ERROR; 264 crq->len = cpu_to_be16(0); 265 crq->data = cpu_to_be32(SPAPR_VTPM_ERR_COPY_OUT_FAILED); 266 } 267 268 rc = spapr_tpm_send_crq(&s->vdev, crq); 269 if (rc) { 270 error_report("%s: Error sending response", __func__); 271 } 272 } 273 274 static int tpm_spapr_do_startup_tpm(SpaprTpmState *s, size_t buffersize) 275 { 276 return tpm_backend_startup_tpm(s->be_driver, buffersize); 277 } 278 279 static const char *tpm_spapr_get_dt_compatible(SpaprVioDevice *dev) 280 { 281 SpaprTpmState *s = VIO_SPAPR_VTPM(dev); 282 283 switch (s->be_tpm_version) { 284 case TPM_VERSION_1_2: 285 return "IBM,vtpm"; 286 case TPM_VERSION_2_0: 287 return "IBM,vtpm20"; 288 default: 289 g_assert_not_reached(); 290 } 291 } 292 293 static void tpm_spapr_reset(SpaprVioDevice *dev) 294 { 295 SpaprTpmState *s = VIO_SPAPR_VTPM(dev); 296 297 s->state = SPAPR_VTPM_STATE_NONE; 298 s->numbytes = 0; 299 300 s->be_tpm_version = tpm_backend_get_tpm_version(s->be_driver); 301 302 s->be_buffer_size = MIN(tpm_backend_get_buffer_size(s->be_driver), 303 TPM_SPAPR_BUFFER_MAX); 304 305 tpm_backend_reset(s->be_driver); 306 307 if (tpm_spapr_do_startup_tpm(s, s->be_buffer_size) < 0) { 308 exit(1); 309 } 310 } 311 312 static enum TPMVersion tpm_spapr_get_version(TPMIf *ti) 313 { 314 SpaprTpmState *s = VIO_SPAPR_VTPM(ti); 315 316 if (tpm_backend_had_startup_error(s->be_driver)) { 317 return TPM_VERSION_UNSPEC; 318 } 319 320 return tpm_backend_get_tpm_version(s->be_driver); 321 } 322 323 /* persistent state handling */ 324 325 static int tpm_spapr_pre_save(void *opaque) 326 { 327 SpaprTpmState *s = opaque; 328 329 tpm_backend_finish_sync(s->be_driver); 330 /* 331 * we cannot deliver the results to the VM since DMA would touch VM memory 332 */ 333 334 return 0; 335 } 336 337 static int tpm_spapr_post_load(void *opaque, int version_id) 338 { 339 SpaprTpmState *s = opaque; 340 341 if (s->numbytes) { 342 trace_tpm_spapr_post_load(); 343 /* deliver the results to the VM via DMA */ 344 tpm_spapr_request_completed(TPM_IF(s), 0); 345 s->numbytes = 0; 346 } 347 348 return 0; 349 } 350 351 static const VMStateDescription vmstate_spapr_vtpm = { 352 .name = "tpm-spapr", 353 .pre_save = tpm_spapr_pre_save, 354 .post_load = tpm_spapr_post_load, 355 .fields = (const VMStateField[]) { 356 VMSTATE_SPAPR_VIO(vdev, SpaprTpmState), 357 358 VMSTATE_UINT8(state, SpaprTpmState), 359 VMSTATE_UINT32(numbytes, SpaprTpmState), 360 VMSTATE_VBUFFER_UINT32(buffer, SpaprTpmState, 0, NULL, numbytes), 361 /* remember DMA address */ 362 VMSTATE_UINT32(crq.data, SpaprTpmState), 363 VMSTATE_END_OF_LIST(), 364 } 365 }; 366 367 static Property tpm_spapr_properties[] = { 368 DEFINE_SPAPR_PROPERTIES(SpaprTpmState, vdev), 369 DEFINE_PROP_TPMBE("tpmdev", SpaprTpmState, be_driver), 370 DEFINE_PROP_END_OF_LIST(), 371 }; 372 373 static void tpm_spapr_realizefn(SpaprVioDevice *dev, Error **errp) 374 { 375 SpaprTpmState *s = VIO_SPAPR_VTPM(dev); 376 377 if (!tpm_find()) { 378 error_setg(errp, "at most one TPM device is permitted"); 379 return; 380 } 381 382 dev->crq.SendFunc = tpm_spapr_do_crq; 383 384 if (!s->be_driver) { 385 error_setg(errp, "'tpmdev' property is required"); 386 return; 387 } 388 s->buffer = g_malloc(TPM_SPAPR_BUFFER_MAX); 389 } 390 391 static void tpm_spapr_class_init(ObjectClass *klass, void *data) 392 { 393 DeviceClass *dc = DEVICE_CLASS(klass); 394 SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass); 395 TPMIfClass *tc = TPM_IF_CLASS(klass); 396 397 k->realize = tpm_spapr_realizefn; 398 k->reset = tpm_spapr_reset; 399 k->dt_name = "vtpm"; 400 k->dt_type = "IBM,vtpm"; 401 k->get_dt_compatible = tpm_spapr_get_dt_compatible; 402 k->signal_mask = 0x00000001; 403 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 404 device_class_set_props(dc, tpm_spapr_properties); 405 k->rtce_window_size = 0x10000000; 406 dc->vmsd = &vmstate_spapr_vtpm; 407 408 tc->model = TPM_MODEL_TPM_SPAPR; 409 tc->get_version = tpm_spapr_get_version; 410 tc->request_completed = tpm_spapr_request_completed; 411 } 412 413 static const TypeInfo tpm_spapr_info = { 414 .name = TYPE_TPM_SPAPR, 415 .parent = TYPE_VIO_SPAPR_DEVICE, 416 .instance_size = sizeof(SpaprTpmState), 417 .class_init = tpm_spapr_class_init, 418 .interfaces = (InterfaceInfo[]) { 419 { TYPE_TPM_IF }, 420 { } 421 } 422 }; 423 424 static void tpm_spapr_register_types(void) 425 { 426 type_register_static(&tpm_spapr_info); 427 } 428 429 type_init(tpm_spapr_register_types) 430