1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 Infineon Technologies AG 4 * Copyright (C) 2016 STMicroelectronics SAS 5 * 6 * Authors: 7 * Peter Huewe <peter.huewe@infineon.com> 8 * Christophe Ricard <christophe-h.ricard@st.com> 9 * 10 * Maintained by: <tpmdd-devel@lists.sourceforge.net> 11 * 12 * Device driver for TCG/TCPA TPM (trusted platform module). 13 * Specifications at www.trustedcomputinggroup.org 14 * 15 * This device driver implements the TPM interface as defined in 16 * the TCG TPM Interface Spec version 1.3, revision 27 via _raw/native 17 * SPI access_. 18 * 19 * It is based on the original tpm_tis device driver from Leendert van 20 * Dorn and Kyleen Hall and Jarko Sakkinnen. 21 */ 22 23 #include <linux/acpi.h> 24 #include <linux/completion.h> 25 #include <linux/init.h> 26 #include <linux/interrupt.h> 27 #include <linux/kernel.h> 28 #include <linux/module.h> 29 #include <linux/slab.h> 30 31 #include <linux/of.h> 32 #include <linux/spi/spi.h> 33 #include <linux/tpm.h> 34 35 #include "tpm.h" 36 #include "tpm_tis_core.h" 37 #include "tpm_tis_spi.h" 38 39 #define MAX_SPI_FRAMESIZE 64 40 #define SPI_HDRSIZE 4 41 42 /* 43 * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short, 44 * keep trying to read from the device until MISO goes high indicating the 45 * wait state has ended. 46 * 47 * [1] https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/ 48 */ 49 static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy, 50 struct spi_transfer *spi_xfer) 51 { 52 struct spi_message m; 53 int ret, i; 54 55 if ((phy->iobuf[3] & 0x01) == 0) { 56 // handle SPI wait states 57 for (i = 0; i < TPM_RETRY; i++) { 58 spi_xfer->len = 1; 59 spi_message_init(&m); 60 spi_message_add_tail(spi_xfer, &m); 61 ret = spi_sync_locked(phy->spi_device, &m); 62 if (ret < 0) 63 return ret; 64 if (phy->iobuf[0] & 0x01) 65 break; 66 } 67 68 if (i == TPM_RETRY) 69 return -ETIMEDOUT; 70 } 71 72 return 0; 73 } 74 75 /* 76 * Half duplex controller with support for TPM wait state detection like 77 * Tegra QSPI need CMD, ADDR & DATA sent in single message to manage HW flow 78 * control. Each phase sent in different transfer for controller to idenity 79 * phase. 80 */ 81 static int tpm_tis_spi_transfer_half(struct tpm_tis_data *data, u32 addr, 82 u16 len, u8 *in, const u8 *out) 83 { 84 struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); 85 struct spi_transfer spi_xfer[3]; 86 struct spi_message m; 87 u8 transfer_len; 88 int ret; 89 90 while (len) { 91 transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE); 92 93 spi_message_init(&m); 94 phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1); 95 phy->iobuf[1] = 0xd4; 96 phy->iobuf[2] = addr >> 8; 97 phy->iobuf[3] = addr; 98 99 memset(&spi_xfer, 0, sizeof(spi_xfer)); 100 101 spi_xfer[0].tx_buf = phy->iobuf; 102 spi_xfer[0].len = 1; 103 spi_message_add_tail(&spi_xfer[0], &m); 104 105 spi_xfer[1].tx_buf = phy->iobuf + 1; 106 spi_xfer[1].len = 3; 107 spi_message_add_tail(&spi_xfer[1], &m); 108 109 if (out) { 110 spi_xfer[2].tx_buf = &phy->iobuf[4]; 111 spi_xfer[2].rx_buf = NULL; 112 memcpy(&phy->iobuf[4], out, transfer_len); 113 out += transfer_len; 114 } 115 116 if (in) { 117 spi_xfer[2].tx_buf = NULL; 118 spi_xfer[2].rx_buf = &phy->iobuf[4]; 119 } 120 121 spi_xfer[2].len = transfer_len; 122 spi_message_add_tail(&spi_xfer[2], &m); 123 124 reinit_completion(&phy->ready); 125 126 ret = spi_sync(phy->spi_device, &m); 127 if (ret < 0) 128 return ret; 129 130 if (in) { 131 memcpy(in, &phy->iobuf[4], transfer_len); 132 in += transfer_len; 133 } 134 135 len -= transfer_len; 136 } 137 138 return ret; 139 } 140 141 static int tpm_tis_spi_transfer_full(struct tpm_tis_data *data, u32 addr, 142 u16 len, u8 *in, const u8 *out) 143 { 144 struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); 145 int ret = 0; 146 struct spi_message m; 147 struct spi_transfer spi_xfer; 148 u8 transfer_len; 149 150 spi_bus_lock(phy->spi_device->master); 151 152 while (len) { 153 transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE); 154 155 phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1); 156 phy->iobuf[1] = 0xd4; 157 phy->iobuf[2] = addr >> 8; 158 phy->iobuf[3] = addr; 159 160 memset(&spi_xfer, 0, sizeof(spi_xfer)); 161 spi_xfer.tx_buf = phy->iobuf; 162 spi_xfer.rx_buf = phy->iobuf; 163 spi_xfer.len = 4; 164 spi_xfer.cs_change = 1; 165 166 spi_message_init(&m); 167 spi_message_add_tail(&spi_xfer, &m); 168 ret = spi_sync_locked(phy->spi_device, &m); 169 if (ret < 0) 170 goto exit; 171 172 /* Flow control transfers are receive only */ 173 spi_xfer.tx_buf = NULL; 174 ret = phy->flow_control(phy, &spi_xfer); 175 if (ret < 0) 176 goto exit; 177 178 spi_xfer.cs_change = 0; 179 spi_xfer.len = transfer_len; 180 spi_xfer.delay.value = 5; 181 spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS; 182 183 if (out) { 184 spi_xfer.tx_buf = phy->iobuf; 185 spi_xfer.rx_buf = NULL; 186 memcpy(phy->iobuf, out, transfer_len); 187 out += transfer_len; 188 } 189 190 spi_message_init(&m); 191 spi_message_add_tail(&spi_xfer, &m); 192 reinit_completion(&phy->ready); 193 ret = spi_sync_locked(phy->spi_device, &m); 194 if (ret < 0) 195 goto exit; 196 197 if (in) { 198 memcpy(in, phy->iobuf, transfer_len); 199 in += transfer_len; 200 } 201 202 len -= transfer_len; 203 } 204 205 exit: 206 if (ret < 0) { 207 /* Deactivate chip select */ 208 memset(&spi_xfer, 0, sizeof(spi_xfer)); 209 spi_message_init(&m); 210 spi_message_add_tail(&spi_xfer, &m); 211 spi_sync_locked(phy->spi_device, &m); 212 } 213 214 spi_bus_unlock(phy->spi_device->master); 215 return ret; 216 } 217 218 int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, 219 u8 *in, const u8 *out) 220 { 221 struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); 222 struct spi_controller *ctlr = phy->spi_device->controller; 223 224 /* 225 * TPM flow control over SPI requires full duplex support. 226 * Send entire message to a half duplex controller to handle 227 * wait polling in controller. 228 * Set TPM HW flow control flag.. 229 */ 230 if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) 231 return tpm_tis_spi_transfer_half(data, addr, len, in, out); 232 else 233 return tpm_tis_spi_transfer_full(data, addr, len, in, out); 234 } 235 236 static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr, 237 u16 len, u8 *result, enum tpm_tis_io_mode io_mode) 238 { 239 return tpm_tis_spi_transfer(data, addr, len, result, NULL); 240 } 241 242 static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr, 243 u16 len, const u8 *value, enum tpm_tis_io_mode io_mode) 244 { 245 return tpm_tis_spi_transfer(data, addr, len, NULL, value); 246 } 247 248 int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy, 249 int irq, const struct tpm_tis_phy_ops *phy_ops) 250 { 251 phy->iobuf = devm_kmalloc(&spi->dev, SPI_HDRSIZE + MAX_SPI_FRAMESIZE, GFP_KERNEL); 252 if (!phy->iobuf) 253 return -ENOMEM; 254 255 phy->spi_device = spi; 256 257 return tpm_tis_core_init(&spi->dev, &phy->priv, irq, phy_ops, NULL); 258 } 259 260 static const struct tpm_tis_phy_ops tpm_spi_phy_ops = { 261 .read_bytes = tpm_tis_spi_read_bytes, 262 .write_bytes = tpm_tis_spi_write_bytes, 263 }; 264 265 static int tpm_tis_spi_probe(struct spi_device *dev) 266 { 267 struct tpm_tis_spi_phy *phy; 268 int irq; 269 270 phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy), 271 GFP_KERNEL); 272 if (!phy) 273 return -ENOMEM; 274 275 phy->flow_control = tpm_tis_spi_flow_control; 276 277 if (dev->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) 278 dev->mode |= SPI_TPM_HW_FLOW; 279 280 /* If the SPI device has an IRQ then use that */ 281 if (dev->irq > 0) 282 irq = dev->irq; 283 else 284 irq = -1; 285 286 init_completion(&phy->ready); 287 return tpm_tis_spi_init(dev, phy, irq, &tpm_spi_phy_ops); 288 } 289 290 typedef int (*tpm_tis_spi_probe_func)(struct spi_device *); 291 292 static int tpm_tis_spi_driver_probe(struct spi_device *spi) 293 { 294 const struct spi_device_id *spi_dev_id = spi_get_device_id(spi); 295 tpm_tis_spi_probe_func probe_func; 296 297 probe_func = of_device_get_match_data(&spi->dev); 298 if (!probe_func) { 299 if (spi_dev_id) { 300 probe_func = (tpm_tis_spi_probe_func)spi_dev_id->driver_data; 301 if (!probe_func) 302 return -ENODEV; 303 } else 304 probe_func = tpm_tis_spi_probe; 305 } 306 307 return probe_func(spi); 308 } 309 310 static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume); 311 312 static void tpm_tis_spi_remove(struct spi_device *dev) 313 { 314 struct tpm_chip *chip = spi_get_drvdata(dev); 315 316 tpm_chip_unregister(chip); 317 tpm_tis_remove(chip); 318 } 319 320 static const struct spi_device_id tpm_tis_spi_id[] = { 321 { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe }, 322 { "slb9670", (unsigned long)tpm_tis_spi_probe }, 323 { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe }, 324 { "tpm_tis-spi", (unsigned long)tpm_tis_spi_probe }, 325 { "cr50", (unsigned long)cr50_spi_probe }, 326 {} 327 }; 328 MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id); 329 330 static const struct of_device_id of_tis_spi_match[] __maybe_unused = { 331 { .compatible = "st,st33htpm-spi", .data = tpm_tis_spi_probe }, 332 { .compatible = "infineon,slb9670", .data = tpm_tis_spi_probe }, 333 { .compatible = "tcg,tpm_tis-spi", .data = tpm_tis_spi_probe }, 334 { .compatible = "google,cr50", .data = cr50_spi_probe }, 335 {} 336 }; 337 MODULE_DEVICE_TABLE(of, of_tis_spi_match); 338 339 static const struct acpi_device_id acpi_tis_spi_match[] __maybe_unused = { 340 {"SMO0768", 0}, 341 {} 342 }; 343 MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match); 344 345 static struct spi_driver tpm_tis_spi_driver = { 346 .driver = { 347 .name = "tpm_tis_spi", 348 .pm = &tpm_tis_pm, 349 .of_match_table = of_match_ptr(of_tis_spi_match), 350 .acpi_match_table = ACPI_PTR(acpi_tis_spi_match), 351 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 352 }, 353 .probe = tpm_tis_spi_driver_probe, 354 .remove = tpm_tis_spi_remove, 355 .id_table = tpm_tis_spi_id, 356 }; 357 module_spi_driver(tpm_tis_spi_driver); 358 359 MODULE_DESCRIPTION("TPM Driver for native SPI access"); 360 MODULE_LICENSE("GPL"); 361