1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (c) 2013 Google, Inc 4 */ 5 6 #include <common.h> 7 #include <dm.h> 8 #include <tpm-v1.h> 9 #include <asm/state.h> 10 #include <asm/unaligned.h> 11 #include <linux/crc8.h> 12 13 /* TPM NVRAM location indices. */ 14 #define FIRMWARE_NV_INDEX 0x1007 15 #define KERNEL_NV_INDEX 0x1008 16 #define BACKUP_NV_INDEX 0x1009 17 #define FWMP_NV_INDEX 0x100a 18 #define REC_HASH_NV_INDEX 0x100b 19 #define REC_HASH_NV_SIZE VB2_SHA256_DIGEST_SIZE 20 21 #define NV_DATA_PUBLIC_PERMISSIONS_OFFSET 60 22 23 /* Kernel TPM space - KERNEL_NV_INDEX, locked with physical presence */ 24 #define ROLLBACK_SPACE_KERNEL_VERSION 2 25 #define ROLLBACK_SPACE_KERNEL_UID 0x4752574C /* 'GRWL' */ 26 27 struct rollback_space_kernel { 28 /* Struct version, for backwards compatibility */ 29 uint8_t struct_version; 30 /* Unique ID to detect space redefinition */ 31 uint32_t uid; 32 /* Kernel versions */ 33 uint32_t kernel_versions; 34 /* Reserved for future expansion */ 35 uint8_t reserved[3]; 36 /* Checksum (v2 and later only) */ 37 uint8_t crc8; 38 } __packed rollback_space_kernel; 39 40 /* 41 * These numbers derive from adding the sizes of command fields as shown in 42 * the TPM commands manual. 43 */ 44 #define TPM_REQUEST_HEADER_LENGTH 10 45 #define TPM_RESPONSE_HEADER_LENGTH 10 46 47 /* These are the different non-volatile spaces that we emulate */ 48 enum { 49 NV_GLOBAL_LOCK, 50 NV_SEQ_FIRMWARE, 51 NV_SEQ_KERNEL, 52 NV_SEQ_BACKUP, 53 NV_SEQ_FWMP, 54 NV_SEQ_REC_HASH, 55 56 NV_SEQ_COUNT, 57 }; 58 59 /* Size of each non-volatile space */ 60 #define NV_DATA_SIZE 0x20 61 62 struct nvdata_state { 63 bool present; 64 u8 data[NV_DATA_SIZE]; 65 }; 66 67 /* 68 * Information about our TPM emulation. This is preserved in the sandbox 69 * state file if enabled. 70 */ 71 static struct tpm_state { 72 bool valid; 73 struct nvdata_state nvdata[NV_SEQ_COUNT]; 74 } g_state; 75 76 /** 77 * sandbox_tpm_read_state() - read the sandbox EC state from the state file 78 * 79 * If data is available, then blob and node will provide access to it. If 80 * not this function sets up an empty TPM. 81 * 82 * @blob: Pointer to device tree blob, or NULL if no data to read 83 * @node: Node offset to read from 84 */ 85 static int sandbox_tpm_read_state(const void *blob, int node) 86 { 87 const char *prop; 88 int len; 89 int i; 90 91 if (!blob) 92 return 0; 93 94 for (i = 0; i < NV_SEQ_COUNT; i++) { 95 char prop_name[20]; 96 97 sprintf(prop_name, "nvdata%d", i); 98 prop = fdt_getprop(blob, node, prop_name, &len); 99 if (prop && len == NV_DATA_SIZE) { 100 memcpy(g_state.nvdata[i].data, prop, NV_DATA_SIZE); 101 g_state.nvdata[i].present = true; 102 } 103 } 104 g_state.valid = true; 105 106 return 0; 107 } 108 109 /** 110 * cros_ec_write_state() - Write out our state to the state file 111 * 112 * The caller will ensure that there is a node ready for the state. The node 113 * may already contain the old state, in which case it is overridden. 114 * 115 * @blob: Device tree blob holding state 116 * @node: Node to write our state into 117 */ 118 static int sandbox_tpm_write_state(void *blob, int node) 119 { 120 int i; 121 122 /* 123 * We are guaranteed enough space to write basic properties. 124 * We could use fdt_add_subnode() to put each set of data in its 125 * own node - perhaps useful if we add access informaiton to each. 126 */ 127 for (i = 0; i < NV_SEQ_COUNT; i++) { 128 char prop_name[20]; 129 130 if (g_state.nvdata[i].present) { 131 sprintf(prop_name, "nvdata%d", i); 132 fdt_setprop(blob, node, prop_name, 133 g_state.nvdata[i].data, NV_DATA_SIZE); 134 } 135 } 136 137 return 0; 138 } 139 140 SANDBOX_STATE_IO(sandbox_tpm, "google,sandbox-tpm", sandbox_tpm_read_state, 141 sandbox_tpm_write_state); 142 143 static int index_to_seq(uint32_t index) 144 { 145 switch (index) { 146 case FIRMWARE_NV_INDEX: 147 return NV_SEQ_FIRMWARE; 148 case KERNEL_NV_INDEX: 149 return NV_SEQ_KERNEL; 150 case BACKUP_NV_INDEX: 151 return NV_SEQ_BACKUP; 152 case FWMP_NV_INDEX: 153 return NV_SEQ_FWMP; 154 case REC_HASH_NV_INDEX: 155 return NV_SEQ_REC_HASH; 156 case 0: 157 return NV_GLOBAL_LOCK; 158 } 159 160 printf("Invalid nv index %#x\n", index); 161 return -1; 162 } 163 164 static void handle_cap_flag_space(u8 **datap, uint index) 165 { 166 struct tpm_nv_data_public pub; 167 168 /* TPM_NV_PER_PPWRITE */ 169 memset(&pub, '\0', sizeof(pub)); 170 pub.nv_index = __cpu_to_be32(index); 171 pub.pcr_info_read.pcr_selection.size_of_select = __cpu_to_be16( 172 sizeof(pub.pcr_info_read.pcr_selection.pcr_select)); 173 pub.permission.attributes = __cpu_to_be32(1); 174 pub.pcr_info_write = pub.pcr_info_read; 175 memcpy(*datap, &pub, sizeof(pub)); 176 *datap += sizeof(pub); 177 } 178 179 static int sandbox_tpm_xfer(struct udevice *dev, const uint8_t *sendbuf, 180 size_t send_size, uint8_t *recvbuf, 181 size_t *recv_len) 182 { 183 struct tpm_state *tpm = dev_get_priv(dev); 184 uint32_t code, index, length, type; 185 uint8_t *data; 186 int seq; 187 188 code = get_unaligned_be32(sendbuf + sizeof(uint16_t) + 189 sizeof(uint32_t)); 190 #ifdef DEBUG 191 printf("tpm: %zd bytes, recv_len %zd, cmd = %x\n", send_size, 192 *recv_len, code); 193 print_buffer(0, sendbuf, 1, send_size, 0); 194 #endif 195 switch (code) { 196 case TPM_CMD_GET_CAPABILITY: 197 type = get_unaligned_be32(sendbuf + 14); 198 switch (type) { 199 case TPM_CAP_FLAG: 200 index = get_unaligned_be32(sendbuf + 18); 201 printf("Get flags index %#02x\n", index); 202 *recv_len = 22; 203 memset(recvbuf, '\0', *recv_len); 204 data = recvbuf + TPM_RESPONSE_HEADER_LENGTH + 205 sizeof(uint32_t); 206 switch (index) { 207 case FIRMWARE_NV_INDEX: 208 break; 209 case KERNEL_NV_INDEX: 210 handle_cap_flag_space(&data, index); 211 *recv_len = data - recvbuf - 212 TPM_RESPONSE_HEADER_LENGTH - 213 sizeof(uint32_t); 214 break; 215 case TPM_CAP_FLAG_PERMANENT: { 216 struct tpm_permanent_flags *pflags; 217 218 pflags = (struct tpm_permanent_flags *)data; 219 memset(pflags, '\0', sizeof(*pflags)); 220 put_unaligned_be32(TPM_TAG_PERMANENT_FLAGS, 221 &pflags->tag); 222 *recv_len = TPM_HEADER_SIZE + 4 + 223 sizeof(*pflags); 224 break; 225 } 226 default: 227 printf(" ** Unknown flags index %x\n", index); 228 return -ENOSYS; 229 } 230 put_unaligned_be32(*recv_len, 231 recvbuf + 232 TPM_RESPONSE_HEADER_LENGTH); 233 break; 234 case TPM_CAP_NV_INDEX: 235 index = get_unaligned_be32(sendbuf + 18); 236 printf("Get cap nv index %#02x\n", index); 237 put_unaligned_be32(22, recvbuf + 238 TPM_RESPONSE_HEADER_LENGTH); 239 break; 240 default: 241 printf(" ** Unknown 0x65 command type %#02x\n", 242 type); 243 return -ENOSYS; 244 } 245 break; 246 case TPM_CMD_NV_WRITE_VALUE: 247 index = get_unaligned_be32(sendbuf + 10); 248 length = get_unaligned_be32(sendbuf + 18); 249 seq = index_to_seq(index); 250 if (seq < 0) 251 return -EINVAL; 252 printf("tpm: nvwrite index=%#02x, len=%#02x\n", index, length); 253 memcpy(&tpm->nvdata[seq].data, sendbuf + 22, length); 254 tpm->nvdata[seq].present = true; 255 *recv_len = 12; 256 memset(recvbuf, '\0', *recv_len); 257 break; 258 case TPM_CMD_NV_READ_VALUE: /* nvread */ 259 index = get_unaligned_be32(sendbuf + 10); 260 length = get_unaligned_be32(sendbuf + 18); 261 seq = index_to_seq(index); 262 if (seq < 0) 263 return -EINVAL; 264 printf("tpm: nvread index=%#02x, len=%#02x, seq=%#02x\n", index, 265 length, seq); 266 *recv_len = TPM_RESPONSE_HEADER_LENGTH + sizeof(uint32_t) + 267 length; 268 memset(recvbuf, '\0', *recv_len); 269 put_unaligned_be32(length, recvbuf + 270 TPM_RESPONSE_HEADER_LENGTH); 271 if (seq == NV_SEQ_KERNEL) { 272 struct rollback_space_kernel rsk; 273 274 data = recvbuf + TPM_RESPONSE_HEADER_LENGTH + 275 sizeof(uint32_t); 276 memset(&rsk, 0, sizeof(struct rollback_space_kernel)); 277 rsk.struct_version = 2; 278 rsk.uid = ROLLBACK_SPACE_KERNEL_UID; 279 rsk.crc8 = crc8(0, (unsigned char *)&rsk, 280 offsetof(struct rollback_space_kernel, 281 crc8)); 282 memcpy(data, &rsk, sizeof(rsk)); 283 } else if (!tpm->nvdata[seq].present) { 284 put_unaligned_be32(TPM_BADINDEX, recvbuf + 285 sizeof(uint16_t) + sizeof(uint32_t)); 286 } else { 287 memcpy(recvbuf + TPM_RESPONSE_HEADER_LENGTH + 288 sizeof(uint32_t), &tpm->nvdata[seq].data, 289 length); 290 } 291 break; 292 case TPM_CMD_EXTEND: 293 *recv_len = 30; 294 memset(recvbuf, '\0', *recv_len); 295 break; 296 case TPM_CMD_NV_DEFINE_SPACE: 297 case 0x15: /* pcr read */ 298 case 0x5d: /* force clear */ 299 case 0x6f: /* physical enable */ 300 case 0x72: /* physical set deactivated */ 301 case 0x99: /* startup */ 302 case 0x50: /* self test full */ 303 case 0x4000000a: /* assert physical presence */ 304 *recv_len = 12; 305 memset(recvbuf, '\0', *recv_len); 306 break; 307 default: 308 printf("Unknown tpm command %02x\n", code); 309 return -ENOSYS; 310 } 311 #ifdef DEBUG 312 printf("tpm: rx recv_len %zd\n", *recv_len); 313 print_buffer(0, recvbuf, 1, *recv_len, 0); 314 #endif 315 316 return 0; 317 } 318 319 static int sandbox_tpm_get_desc(struct udevice *dev, char *buf, int size) 320 { 321 if (size < 15) 322 return -ENOSPC; 323 324 return snprintf(buf, size, "sandbox TPM"); 325 } 326 327 static int sandbox_tpm_probe(struct udevice *dev) 328 { 329 struct tpm_state *tpm = dev_get_priv(dev); 330 331 memcpy(tpm, &g_state, sizeof(*tpm)); 332 333 return 0; 334 } 335 336 static int sandbox_tpm_open(struct udevice *dev) 337 { 338 return 0; 339 } 340 341 static int sandbox_tpm_close(struct udevice *dev) 342 { 343 return 0; 344 } 345 346 static const struct tpm_ops sandbox_tpm_ops = { 347 .open = sandbox_tpm_open, 348 .close = sandbox_tpm_close, 349 .get_desc = sandbox_tpm_get_desc, 350 .xfer = sandbox_tpm_xfer, 351 }; 352 353 static const struct udevice_id sandbox_tpm_ids[] = { 354 { .compatible = "google,sandbox-tpm" }, 355 { } 356 }; 357 358 U_BOOT_DRIVER(sandbox_tpm) = { 359 .name = "sandbox_tpm", 360 .id = UCLASS_TPM, 361 .of_match = sandbox_tpm_ids, 362 .ops = &sandbox_tpm_ops, 363 .probe = sandbox_tpm_probe, 364 .priv_auto_alloc_size = sizeof(struct tpm_state), 365 }; 366