1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (c) 2013 Google, Inc 4 */ 5 6 #include <common.h> 7 #include <dm.h> 8 #include <tpm-v1.h> 9 #include <asm/state.h> 10 #include <asm/unaligned.h> 11 #include <linux/crc8.h> 12 13 /* TPM NVRAM location indices. */ 14 #define FIRMWARE_NV_INDEX 0x1007 15 #define KERNEL_NV_INDEX 0x1008 16 #define BACKUP_NV_INDEX 0x1009 17 #define FWMP_NV_INDEX 0x100a 18 #define REC_HASH_NV_INDEX 0x100b 19 #define REC_HASH_NV_SIZE VB2_SHA256_DIGEST_SIZE 20 21 #define NV_DATA_PUBLIC_PERMISSIONS_OFFSET 60 22 23 /* Kernel TPM space - KERNEL_NV_INDEX, locked with physical presence */ 24 #define ROLLBACK_SPACE_KERNEL_VERSION 2 25 #define ROLLBACK_SPACE_KERNEL_UID 0x4752574C /* 'GRWL' */ 26 27 struct rollback_space_kernel { 28 /* Struct version, for backwards compatibility */ 29 uint8_t struct_version; 30 /* Unique ID to detect space redefinition */ 31 uint32_t uid; 32 /* Kernel versions */ 33 uint32_t kernel_versions; 34 /* Reserved for future expansion */ 35 uint8_t reserved[3]; 36 /* Checksum (v2 and later only) */ 37 uint8_t crc8; 38 } __packed rollback_space_kernel; 39 40 /* 41 * These numbers derive from adding the sizes of command fields as shown in 42 * the TPM commands manual. 43 */ 44 #define TPM_REQUEST_HEADER_LENGTH 10 45 #define TPM_RESPONSE_HEADER_LENGTH 10 46 47 /* These are the different non-volatile spaces that we emulate */ 48 enum { 49 NV_GLOBAL_LOCK, 50 NV_SEQ_FIRMWARE, 51 NV_SEQ_KERNEL, 52 NV_SEQ_BACKUP, 53 NV_SEQ_FWMP, 54 NV_SEQ_REC_HASH, 55 56 NV_SEQ_COUNT, 57 }; 58 59 /* Size of each non-volatile space */ 60 #define NV_DATA_SIZE 0x20 61 62 struct nvdata_state { 63 bool present; 64 u8 data[NV_DATA_SIZE]; 65 }; 66 67 /* 68 * Information about our TPM emulation. This is preserved in the sandbox 69 * state file if enabled. 70 */ 71 static struct tpm_state { 72 bool valid; 73 struct nvdata_state nvdata[NV_SEQ_COUNT]; 74 } g_state; 75 76 /** 77 * sandbox_tpm_read_state() - read the sandbox EC state from the state file 78 * 79 * If data is available, then blob and node will provide access to it. If 80 * not this function sets up an empty TPM. 81 * 82 * @blob: Pointer to device tree blob, or NULL if no data to read 83 * @node: Node offset to read from 84 */ 85 static int sandbox_tpm_read_state(const void *blob, int node) 86 { 87 const char *prop; 88 int len; 89 int i; 90 91 if (!blob) 92 return 0; 93 94 for (i = 0; i < NV_SEQ_COUNT; i++) { 95 char prop_name[20]; 96 97 sprintf(prop_name, "nvdata%d", i); 98 prop = fdt_getprop(blob, node, prop_name, &len); 99 if (prop && len == NV_DATA_SIZE) { 100 memcpy(g_state.nvdata[i].data, prop, NV_DATA_SIZE); 101 g_state.nvdata[i].present = true; 102 } 103 } 104 g_state.valid = true; 105 106 return 0; 107 } 108 109 /** 110 * cros_ec_write_state() - Write out our state to the state file 111 * 112 * The caller will ensure that there is a node ready for the state. The node 113 * may already contain the old state, in which case it is overridden. 114 * 115 * @blob: Device tree blob holding state 116 * @node: Node to write our state into 117 */ 118 static int sandbox_tpm_write_state(void *blob, int node) 119 { 120 int i; 121 122 /* 123 * We are guaranteed enough space to write basic properties. 124 * We could use fdt_add_subnode() to put each set of data in its 125 * own node - perhaps useful if we add access informaiton to each. 126 */ 127 for (i = 0; i < NV_SEQ_COUNT; i++) { 128 char prop_name[20]; 129 130 if (g_state.nvdata[i].present) { 131 sprintf(prop_name, "nvdata%d", i); 132 fdt_setprop(blob, node, prop_name, 133 g_state.nvdata[i].data, NV_DATA_SIZE); 134 } 135 } 136 137 return 0; 138 } 139 140 SANDBOX_STATE_IO(sandbox_tpm, "google,sandbox-tpm", sandbox_tpm_read_state, 141 sandbox_tpm_write_state); 142 143 static int index_to_seq(uint32_t index) 144 { 145 switch (index) { 146 case FIRMWARE_NV_INDEX: 147 return NV_SEQ_FIRMWARE; 148 case KERNEL_NV_INDEX: 149 return NV_SEQ_KERNEL; 150 case BACKUP_NV_INDEX: 151 return NV_SEQ_BACKUP; 152 case FWMP_NV_INDEX: 153 return NV_SEQ_FWMP; 154 case REC_HASH_NV_INDEX: 155 return NV_SEQ_REC_HASH; 156 case 0: 157 return NV_GLOBAL_LOCK; 158 } 159 160 printf("Invalid nv index %#x\n", index); 161 return -1; 162 } 163 164 static void handle_cap_flag_space(u8 **datap, uint index) 165 { 166 struct tpm_nv_data_public pub; 167 168 /* TPM_NV_PER_PPWRITE */ 169 memset(&pub, '\0', sizeof(pub)); 170 pub.nv_index = __cpu_to_be32(index); 171 pub.pcr_info_read.pcr_selection.size_of_select = __cpu_to_be16( 172 sizeof(pub.pcr_info_read.pcr_selection.pcr_select)); 173 pub.permission.attributes = __cpu_to_be32(1); 174 pub.pcr_info_write = pub.pcr_info_read; 175 memcpy(*datap, &pub, sizeof(pub)); 176 *datap += sizeof(pub); 177 } 178 179 static int sandbox_tpm_xfer(struct udevice *dev, const uint8_t *sendbuf, 180 size_t send_size, uint8_t *recvbuf, 181 size_t *recv_len) 182 { 183 struct tpm_state *tpm = dev_get_priv(dev); 184 uint32_t code, index, length, type; 185 uint8_t *data; 186 int seq; 187 188 code = get_unaligned_be32(sendbuf + sizeof(uint16_t) + 189 sizeof(uint32_t)); 190 printf("tpm: %zd bytes, recv_len %zd, cmd = %x\n", send_size, 191 *recv_len, code); 192 print_buffer(0, sendbuf, 1, send_size, 0); 193 switch (code) { 194 case TPM_CMD_GET_CAPABILITY: 195 type = get_unaligned_be32(sendbuf + 14); 196 switch (type) { 197 case TPM_CAP_FLAG: 198 index = get_unaligned_be32(sendbuf + 18); 199 printf("Get flags index %#02x\n", index); 200 *recv_len = 22; 201 memset(recvbuf, '\0', *recv_len); 202 data = recvbuf + TPM_RESPONSE_HEADER_LENGTH + 203 sizeof(uint32_t); 204 switch (index) { 205 case FIRMWARE_NV_INDEX: 206 break; 207 case KERNEL_NV_INDEX: 208 handle_cap_flag_space(&data, index); 209 *recv_len = data - recvbuf - 210 TPM_RESPONSE_HEADER_LENGTH - 211 sizeof(uint32_t); 212 break; 213 case TPM_CAP_FLAG_PERMANENT: { 214 struct tpm_permanent_flags *pflags; 215 216 pflags = (struct tpm_permanent_flags *)data; 217 memset(pflags, '\0', sizeof(*pflags)); 218 put_unaligned_be32(TPM_TAG_PERMANENT_FLAGS, 219 &pflags->tag); 220 *recv_len = TPM_HEADER_SIZE + 4 + 221 sizeof(*pflags); 222 break; 223 } 224 default: 225 printf(" ** Unknown flags index %x\n", index); 226 return -ENOSYS; 227 } 228 put_unaligned_be32(*recv_len, 229 recvbuf + 230 TPM_RESPONSE_HEADER_LENGTH); 231 break; 232 case TPM_CAP_NV_INDEX: 233 index = get_unaligned_be32(sendbuf + 18); 234 printf("Get cap nv index %#02x\n", index); 235 put_unaligned_be32(22, recvbuf + 236 TPM_RESPONSE_HEADER_LENGTH); 237 break; 238 default: 239 printf(" ** Unknown 0x65 command type %#02x\n", 240 type); 241 return -ENOSYS; 242 } 243 break; 244 case TPM_CMD_NV_WRITE_VALUE: 245 index = get_unaligned_be32(sendbuf + 10); 246 length = get_unaligned_be32(sendbuf + 18); 247 seq = index_to_seq(index); 248 if (seq < 0) 249 return -EINVAL; 250 printf("tpm: nvwrite index=%#02x, len=%#02x\n", index, length); 251 memcpy(&tpm->nvdata[seq].data, sendbuf + 22, length); 252 tpm->nvdata[seq].present = true; 253 *recv_len = 12; 254 memset(recvbuf, '\0', *recv_len); 255 break; 256 case TPM_CMD_NV_READ_VALUE: /* nvread */ 257 index = get_unaligned_be32(sendbuf + 10); 258 length = get_unaligned_be32(sendbuf + 18); 259 seq = index_to_seq(index); 260 if (seq < 0) 261 return -EINVAL; 262 printf("tpm: nvread index=%#02x, len=%#02x, seq=%#02x\n", index, 263 length, seq); 264 *recv_len = TPM_RESPONSE_HEADER_LENGTH + sizeof(uint32_t) + 265 length; 266 memset(recvbuf, '\0', *recv_len); 267 put_unaligned_be32(length, recvbuf + 268 TPM_RESPONSE_HEADER_LENGTH); 269 if (seq == NV_SEQ_KERNEL) { 270 struct rollback_space_kernel rsk; 271 272 data = recvbuf + TPM_RESPONSE_HEADER_LENGTH + 273 sizeof(uint32_t); 274 memset(&rsk, 0, sizeof(struct rollback_space_kernel)); 275 rsk.struct_version = 2; 276 rsk.uid = ROLLBACK_SPACE_KERNEL_UID; 277 rsk.crc8 = crc8(0, (unsigned char *)&rsk, 278 offsetof(struct rollback_space_kernel, 279 crc8)); 280 memcpy(data, &rsk, sizeof(rsk)); 281 } else if (!tpm->nvdata[seq].present) { 282 put_unaligned_be32(TPM_BADINDEX, recvbuf + 283 sizeof(uint16_t) + sizeof(uint32_t)); 284 } else { 285 memcpy(recvbuf + TPM_RESPONSE_HEADER_LENGTH + 286 sizeof(uint32_t), &tpm->nvdata[seq].data, 287 length); 288 } 289 break; 290 case TPM_CMD_EXTEND: 291 *recv_len = 30; 292 memset(recvbuf, '\0', *recv_len); 293 break; 294 case TPM_CMD_NV_DEFINE_SPACE: 295 case 0x15: /* pcr read */ 296 case 0x5d: /* force clear */ 297 case 0x6f: /* physical enable */ 298 case 0x72: /* physical set deactivated */ 299 case 0x99: /* startup */ 300 case 0x50: /* self test full */ 301 case 0x4000000a: /* assert physical presence */ 302 *recv_len = 12; 303 memset(recvbuf, '\0', *recv_len); 304 break; 305 default: 306 printf("Unknown tpm command %02x\n", code); 307 return -ENOSYS; 308 } 309 310 return 0; 311 } 312 313 static int sandbox_tpm_get_desc(struct udevice *dev, char *buf, int size) 314 { 315 if (size < 15) 316 return -ENOSPC; 317 318 return snprintf(buf, size, "sandbox TPM"); 319 } 320 321 static int sandbox_tpm_probe(struct udevice *dev) 322 { 323 struct tpm_state *tpm = dev_get_priv(dev); 324 325 memcpy(tpm, &g_state, sizeof(*tpm)); 326 327 return 0; 328 } 329 330 static int sandbox_tpm_open(struct udevice *dev) 331 { 332 return 0; 333 } 334 335 static int sandbox_tpm_close(struct udevice *dev) 336 { 337 return 0; 338 } 339 340 static const struct tpm_ops sandbox_tpm_ops = { 341 .open = sandbox_tpm_open, 342 .close = sandbox_tpm_close, 343 .get_desc = sandbox_tpm_get_desc, 344 .xfer = sandbox_tpm_xfer, 345 }; 346 347 static const struct udevice_id sandbox_tpm_ids[] = { 348 { .compatible = "google,sandbox-tpm" }, 349 { } 350 }; 351 352 U_BOOT_DRIVER(sandbox_tpm) = { 353 .name = "sandbox_tpm", 354 .id = UCLASS_TPM, 355 .of_match = sandbox_tpm_ids, 356 .ops = &sandbox_tpm_ops, 357 .probe = sandbox_tpm_probe, 358 .priv_auto_alloc_size = sizeof(struct tpm_state), 359 }; 360