1 // SPDX-License-Identifier: GPL-2.0-or-later 2 // Copyright (C) IBM Corporation 2020 3 4 #include <linux/bitfield.h> 5 #include <linux/bits.h> 6 #include <linux/fsi.h> 7 #include <linux/jiffies.h> 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/of.h> 11 #include <linux/spi/spi.h> 12 13 #define FSI_ENGID_SPI 0x23 14 #define FSI_MBOX_ROOT_CTRL_8 0x2860 15 #define FSI_MBOX_ROOT_CTRL_8_SPI_MUX 0xf0000000 16 17 #define FSI2SPI_DATA0 0x00 18 #define FSI2SPI_DATA1 0x04 19 #define FSI2SPI_CMD 0x08 20 #define FSI2SPI_CMD_WRITE BIT(31) 21 #define FSI2SPI_RESET 0x18 22 #define FSI2SPI_STATUS 0x1c 23 #define FSI2SPI_STATUS_ANY_ERROR BIT(31) 24 #define FSI2SPI_IRQ 0x20 25 26 #define SPI_FSI_BASE 0x70000 27 #define SPI_FSI_INIT_TIMEOUT_MS 1000 28 #define SPI_FSI_MAX_XFR_SIZE 2048 29 #define SPI_FSI_MAX_XFR_SIZE_RESTRICTED 32 30 31 #define SPI_FSI_ERROR 0x0 32 #define SPI_FSI_COUNTER_CFG 0x1 33 #define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32) 34 #define SPI_FSI_COUNTER_CFG_N2_RX BIT_ULL(8) 35 #define SPI_FSI_COUNTER_CFG_N2_TX BIT_ULL(9) 36 #define SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10) 37 #define SPI_FSI_COUNTER_CFG_N2_RELOAD BIT_ULL(11) 38 #define SPI_FSI_CFG1 0x2 39 #define SPI_FSI_CLOCK_CFG 0x3 40 #define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32) 41 #define SPI_FSI_CLOCK_CFG_ECC_DISABLE (BIT_ULL(35) | BIT_ULL(33)) 42 #define SPI_FSI_CLOCK_CFG_RESET1 (BIT_ULL(36) | BIT_ULL(38)) 43 #define SPI_FSI_CLOCK_CFG_RESET2 (BIT_ULL(37) | BIT_ULL(39)) 44 #define SPI_FSI_CLOCK_CFG_MODE (BIT_ULL(41) | BIT_ULL(42)) 45 #define SPI_FSI_CLOCK_CFG_SCK_RECV_DEL GENMASK_ULL(51, 44) 46 #define SPI_FSI_CLOCK_CFG_SCK_NO_DEL BIT_ULL(51) 47 #define SPI_FSI_CLOCK_CFG_SCK_DIV GENMASK_ULL(63, 52) 48 #define SPI_FSI_MMAP 0x4 49 #define SPI_FSI_DATA_TX 0x5 50 #define SPI_FSI_DATA_RX 0x6 51 #define SPI_FSI_SEQUENCE 0x7 52 #define SPI_FSI_SEQUENCE_STOP 0x00 53 #define SPI_FSI_SEQUENCE_SEL_SLAVE(x) (0x10 | ((x) & 0xf)) 54 #define SPI_FSI_SEQUENCE_SHIFT_OUT(x) (0x30 | ((x) & 0xf)) 55 #define SPI_FSI_SEQUENCE_SHIFT_IN(x) (0x40 | ((x) & 0xf)) 56 #define SPI_FSI_SEQUENCE_COPY_DATA_TX 0xc0 57 #define SPI_FSI_SEQUENCE_BRANCH(x) (0xe0 | ((x) & 0xf)) 58 #define SPI_FSI_STATUS 0x8 59 #define SPI_FSI_STATUS_ERROR \ 60 (GENMASK_ULL(31, 21) | GENMASK_ULL(15, 12)) 61 #define SPI_FSI_STATUS_SEQ_STATE GENMASK_ULL(55, 48) 62 #define SPI_FSI_STATUS_SEQ_STATE_IDLE BIT_ULL(48) 63 #define SPI_FSI_STATUS_TDR_UNDERRUN BIT_ULL(57) 64 #define SPI_FSI_STATUS_TDR_OVERRUN BIT_ULL(58) 65 #define SPI_FSI_STATUS_TDR_FULL BIT_ULL(59) 66 #define SPI_FSI_STATUS_RDR_UNDERRUN BIT_ULL(61) 67 #define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62) 68 #define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63) 69 #define SPI_FSI_STATUS_ANY_ERROR \ 70 (SPI_FSI_STATUS_ERROR | \ 71 SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \ 72 SPI_FSI_STATUS_RDR_OVERRUN) 73 #define SPI_FSI_PORT_CTRL 0x9 74 75 struct fsi_spi { 76 struct device *dev; /* SPI controller device */ 77 struct fsi_device *fsi; /* FSI2SPI CFAM engine device */ 78 u32 base; 79 size_t max_xfr_size; 80 bool restricted; 81 }; 82 83 struct fsi_spi_sequence { 84 int bit; 85 u64 data; 86 }; 87 88 static int fsi_spi_check_mux(struct fsi_device *fsi, struct device *dev) 89 { 90 int rc; 91 u32 root_ctrl_8; 92 __be32 root_ctrl_8_be; 93 94 rc = fsi_slave_read(fsi->slave, FSI_MBOX_ROOT_CTRL_8, &root_ctrl_8_be, 95 sizeof(root_ctrl_8_be)); 96 if (rc) 97 return rc; 98 99 root_ctrl_8 = be32_to_cpu(root_ctrl_8_be); 100 dev_dbg(dev, "Root control register 8: %08x\n", root_ctrl_8); 101 if ((root_ctrl_8 & FSI_MBOX_ROOT_CTRL_8_SPI_MUX) == 102 FSI_MBOX_ROOT_CTRL_8_SPI_MUX) 103 return 0; 104 105 return -ENOLINK; 106 } 107 108 static int fsi_spi_check_status(struct fsi_spi *ctx) 109 { 110 int rc; 111 u32 sts; 112 __be32 sts_be; 113 114 rc = fsi_device_read(ctx->fsi, FSI2SPI_STATUS, &sts_be, 115 sizeof(sts_be)); 116 if (rc) 117 return rc; 118 119 sts = be32_to_cpu(sts_be); 120 if (sts & FSI2SPI_STATUS_ANY_ERROR) { 121 dev_err(ctx->dev, "Error with FSI2SPI interface: %08x.\n", sts); 122 return -EIO; 123 } 124 125 return 0; 126 } 127 128 static int fsi_spi_read_reg(struct fsi_spi *ctx, u32 offset, u64 *value) 129 { 130 int rc; 131 __be32 cmd_be; 132 __be32 data_be; 133 u32 cmd = offset + ctx->base; 134 135 *value = 0ULL; 136 137 if (cmd & FSI2SPI_CMD_WRITE) 138 return -EINVAL; 139 140 cmd_be = cpu_to_be32(cmd); 141 rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be)); 142 if (rc) 143 return rc; 144 145 rc = fsi_spi_check_status(ctx); 146 if (rc) 147 return rc; 148 149 rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA0, &data_be, 150 sizeof(data_be)); 151 if (rc) 152 return rc; 153 154 *value |= (u64)be32_to_cpu(data_be) << 32; 155 156 rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA1, &data_be, 157 sizeof(data_be)); 158 if (rc) 159 return rc; 160 161 *value |= (u64)be32_to_cpu(data_be); 162 dev_dbg(ctx->dev, "Read %02x[%016llx].\n", offset, *value); 163 164 return 0; 165 } 166 167 static int fsi_spi_write_reg(struct fsi_spi *ctx, u32 offset, u64 value) 168 { 169 int rc; 170 __be32 cmd_be; 171 __be32 data_be; 172 u32 cmd = offset + ctx->base; 173 174 if (cmd & FSI2SPI_CMD_WRITE) 175 return -EINVAL; 176 177 dev_dbg(ctx->dev, "Write %02x[%016llx].\n", offset, value); 178 179 data_be = cpu_to_be32(upper_32_bits(value)); 180 rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA0, &data_be, 181 sizeof(data_be)); 182 if (rc) 183 return rc; 184 185 data_be = cpu_to_be32(lower_32_bits(value)); 186 rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA1, &data_be, 187 sizeof(data_be)); 188 if (rc) 189 return rc; 190 191 cmd_be = cpu_to_be32(cmd | FSI2SPI_CMD_WRITE); 192 rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be)); 193 if (rc) 194 return rc; 195 196 return fsi_spi_check_status(ctx); 197 } 198 199 static int fsi_spi_data_in(u64 in, u8 *rx, int len) 200 { 201 int i; 202 int num_bytes = min(len, 8); 203 204 for (i = 0; i < num_bytes; ++i) 205 rx[i] = (u8)(in >> (8 * ((num_bytes - 1) - i))); 206 207 return num_bytes; 208 } 209 210 static int fsi_spi_data_out(u64 *out, const u8 *tx, int len) 211 { 212 int i; 213 int num_bytes = min(len, 8); 214 u8 *out_bytes = (u8 *)out; 215 216 /* Unused bytes of the tx data should be 0. */ 217 *out = 0ULL; 218 219 for (i = 0; i < num_bytes; ++i) 220 out_bytes[8 - (i + 1)] = tx[i]; 221 222 return num_bytes; 223 } 224 225 static int fsi_spi_reset(struct fsi_spi *ctx) 226 { 227 int rc; 228 229 dev_dbg(ctx->dev, "Resetting SPI controller.\n"); 230 231 rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG, 232 SPI_FSI_CLOCK_CFG_RESET1); 233 if (rc) 234 return rc; 235 236 rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG, 237 SPI_FSI_CLOCK_CFG_RESET2); 238 if (rc) 239 return rc; 240 241 return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL); 242 } 243 244 static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val) 245 { 246 /* 247 * Add the next byte of instruction to the 8-byte sequence register. 248 * Then decrement the counter so that the next instruction will go in 249 * the right place. Return the index of the slot we just filled in the 250 * sequence register. 251 */ 252 seq->data |= (u64)val << seq->bit; 253 seq->bit -= 8; 254 255 return ((64 - seq->bit) / 8) - 2; 256 } 257 258 static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq) 259 { 260 seq->bit = 56; 261 seq->data = 0ULL; 262 } 263 264 static int fsi_spi_sequence_transfer(struct fsi_spi *ctx, 265 struct fsi_spi_sequence *seq, 266 struct spi_transfer *transfer) 267 { 268 bool docfg = false; 269 int loops; 270 int idx; 271 int rc; 272 u8 val = 0; 273 u8 len = min(transfer->len, 8U); 274 u8 rem = transfer->len % len; 275 u64 cfg = 0ULL; 276 277 loops = transfer->len / len; 278 279 if (transfer->tx_buf) { 280 val = SPI_FSI_SEQUENCE_SHIFT_OUT(len); 281 idx = fsi_spi_sequence_add(seq, val); 282 283 if (rem) 284 rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem); 285 } else if (transfer->rx_buf) { 286 val = SPI_FSI_SEQUENCE_SHIFT_IN(len); 287 idx = fsi_spi_sequence_add(seq, val); 288 289 if (rem) 290 rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem); 291 } else { 292 return -EINVAL; 293 } 294 295 if (ctx->restricted) { 296 const int eidx = rem ? 5 : 6; 297 298 while (loops > 1 && idx <= eidx) { 299 idx = fsi_spi_sequence_add(seq, val); 300 loops--; 301 docfg = true; 302 } 303 304 if (loops > 1) { 305 dev_warn(ctx->dev, "No sequencer slots; aborting.\n"); 306 return -EINVAL; 307 } 308 } 309 310 if (loops > 1) { 311 fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx)); 312 docfg = true; 313 } 314 315 if (docfg) { 316 cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1); 317 if (transfer->rx_buf) 318 cfg |= SPI_FSI_COUNTER_CFG_N2_RX | 319 SPI_FSI_COUNTER_CFG_N2_TX | 320 SPI_FSI_COUNTER_CFG_N2_IMPLICIT | 321 SPI_FSI_COUNTER_CFG_N2_RELOAD; 322 323 rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg); 324 if (rc) 325 return rc; 326 } else { 327 fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL); 328 } 329 330 if (rem) 331 fsi_spi_sequence_add(seq, rem); 332 333 return 0; 334 } 335 336 static int fsi_spi_transfer_data(struct fsi_spi *ctx, 337 struct spi_transfer *transfer) 338 { 339 int rc = 0; 340 u64 status = 0ULL; 341 u64 cfg = 0ULL; 342 343 if (transfer->tx_buf) { 344 int nb; 345 int sent = 0; 346 u64 out = 0ULL; 347 const u8 *tx = transfer->tx_buf; 348 349 while (transfer->len > sent) { 350 nb = fsi_spi_data_out(&out, &tx[sent], 351 (int)transfer->len - sent); 352 353 rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, out); 354 if (rc) 355 return rc; 356 357 do { 358 rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, 359 &status); 360 if (rc) 361 return rc; 362 363 if (status & SPI_FSI_STATUS_ANY_ERROR) { 364 rc = fsi_spi_reset(ctx); 365 if (rc) 366 return rc; 367 368 return -EREMOTEIO; 369 } 370 } while (status & SPI_FSI_STATUS_TDR_FULL); 371 372 sent += nb; 373 } 374 } else if (transfer->rx_buf) { 375 int recv = 0; 376 u64 in = 0ULL; 377 u8 *rx = transfer->rx_buf; 378 379 rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg); 380 if (rc) 381 return rc; 382 383 if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) { 384 rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0); 385 if (rc) 386 return rc; 387 } 388 389 while (transfer->len > recv) { 390 do { 391 rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, 392 &status); 393 if (rc) 394 return rc; 395 396 if (status & SPI_FSI_STATUS_ANY_ERROR) { 397 rc = fsi_spi_reset(ctx); 398 if (rc) 399 return rc; 400 401 return -EREMOTEIO; 402 } 403 } while (!(status & SPI_FSI_STATUS_RDR_FULL)); 404 405 rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in); 406 if (rc) 407 return rc; 408 409 recv += fsi_spi_data_in(in, &rx[recv], 410 (int)transfer->len - recv); 411 } 412 } 413 414 return 0; 415 } 416 417 static int fsi_spi_transfer_init(struct fsi_spi *ctx) 418 { 419 int rc; 420 bool reset = false; 421 unsigned long end; 422 u64 seq_state; 423 u64 clock_cfg = 0ULL; 424 u64 status = 0ULL; 425 u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE | 426 SPI_FSI_CLOCK_CFG_SCK_NO_DEL | 427 FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 19); 428 429 end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS); 430 do { 431 if (time_after(jiffies, end)) 432 return -ETIMEDOUT; 433 434 rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, &status); 435 if (rc) 436 return rc; 437 438 seq_state = status & SPI_FSI_STATUS_SEQ_STATE; 439 440 if (status & (SPI_FSI_STATUS_ANY_ERROR | 441 SPI_FSI_STATUS_TDR_FULL | 442 SPI_FSI_STATUS_RDR_FULL)) { 443 if (reset) 444 return -EIO; 445 446 rc = fsi_spi_reset(ctx); 447 if (rc) 448 return rc; 449 450 reset = true; 451 continue; 452 } 453 } while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE)); 454 455 rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg); 456 if (rc) 457 return rc; 458 459 if ((clock_cfg & (SPI_FSI_CLOCK_CFG_MM_ENABLE | 460 SPI_FSI_CLOCK_CFG_ECC_DISABLE | 461 SPI_FSI_CLOCK_CFG_MODE | 462 SPI_FSI_CLOCK_CFG_SCK_RECV_DEL | 463 SPI_FSI_CLOCK_CFG_SCK_DIV)) != wanted_clock_cfg) 464 rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG, 465 wanted_clock_cfg); 466 467 return rc; 468 } 469 470 static int fsi_spi_transfer_one_message(struct spi_controller *ctlr, 471 struct spi_message *mesg) 472 { 473 int rc; 474 u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1); 475 struct spi_transfer *transfer; 476 struct fsi_spi *ctx = spi_controller_get_devdata(ctlr); 477 478 rc = fsi_spi_check_mux(ctx->fsi, ctx->dev); 479 if (rc) 480 return rc; 481 482 list_for_each_entry(transfer, &mesg->transfers, transfer_list) { 483 struct fsi_spi_sequence seq; 484 struct spi_transfer *next = NULL; 485 486 /* Sequencer must do shift out (tx) first. */ 487 if (!transfer->tx_buf || 488 transfer->len > (ctx->max_xfr_size + 8)) { 489 rc = -EINVAL; 490 goto error; 491 } 492 493 dev_dbg(ctx->dev, "Start tx of %d bytes.\n", transfer->len); 494 495 rc = fsi_spi_transfer_init(ctx); 496 if (rc < 0) 497 goto error; 498 499 fsi_spi_sequence_init(&seq); 500 fsi_spi_sequence_add(&seq, seq_slave); 501 502 rc = fsi_spi_sequence_transfer(ctx, &seq, transfer); 503 if (rc) 504 goto error; 505 506 if (!list_is_last(&transfer->transfer_list, 507 &mesg->transfers)) { 508 next = list_next_entry(transfer, transfer_list); 509 510 /* Sequencer can only do shift in (rx) after tx. */ 511 if (next->rx_buf) { 512 if (next->len > ctx->max_xfr_size) { 513 rc = -EINVAL; 514 goto error; 515 } 516 517 dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n", 518 next->len); 519 520 rc = fsi_spi_sequence_transfer(ctx, &seq, 521 next); 522 if (rc) 523 goto error; 524 } else { 525 next = NULL; 526 } 527 } 528 529 fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SEL_SLAVE(0)); 530 531 rc = fsi_spi_write_reg(ctx, SPI_FSI_SEQUENCE, seq.data); 532 if (rc) 533 goto error; 534 535 rc = fsi_spi_transfer_data(ctx, transfer); 536 if (rc) 537 goto error; 538 539 if (next) { 540 rc = fsi_spi_transfer_data(ctx, next); 541 if (rc) 542 goto error; 543 544 transfer = next; 545 } 546 } 547 548 error: 549 mesg->status = rc; 550 spi_finalize_current_message(ctlr); 551 552 return rc; 553 } 554 555 static size_t fsi_spi_max_transfer_size(struct spi_device *spi) 556 { 557 struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller); 558 559 return ctx->max_xfr_size; 560 } 561 562 static int fsi_spi_probe(struct device *dev) 563 { 564 int rc; 565 struct device_node *np; 566 int num_controllers_registered = 0; 567 struct fsi_device *fsi = to_fsi_dev(dev); 568 569 rc = fsi_spi_check_mux(fsi, dev); 570 if (rc) 571 return -ENODEV; 572 573 for_each_available_child_of_node(dev->of_node, np) { 574 u32 base; 575 struct fsi_spi *ctx; 576 struct spi_controller *ctlr; 577 578 if (of_property_read_u32(np, "reg", &base)) 579 continue; 580 581 ctlr = spi_alloc_master(dev, sizeof(*ctx)); 582 if (!ctlr) 583 break; 584 585 ctlr->dev.of_node = np; 586 ctlr->num_chipselect = of_get_available_child_count(np) ?: 1; 587 ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX; 588 ctlr->max_transfer_size = fsi_spi_max_transfer_size; 589 ctlr->transfer_one_message = fsi_spi_transfer_one_message; 590 591 ctx = spi_controller_get_devdata(ctlr); 592 ctx->dev = &ctlr->dev; 593 ctx->fsi = fsi; 594 ctx->base = base + SPI_FSI_BASE; 595 596 if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) { 597 ctx->restricted = true; 598 ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED; 599 } else { 600 ctx->restricted = false; 601 ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE; 602 } 603 604 rc = devm_spi_register_controller(dev, ctlr); 605 if (rc) 606 spi_controller_put(ctlr); 607 else 608 num_controllers_registered++; 609 } 610 611 if (!num_controllers_registered) 612 return -ENODEV; 613 614 return 0; 615 } 616 617 static const struct fsi_device_id fsi_spi_ids[] = { 618 { FSI_ENGID_SPI, FSI_VERSION_ANY }, 619 { } 620 }; 621 MODULE_DEVICE_TABLE(fsi, fsi_spi_ids); 622 623 static struct fsi_driver fsi_spi_driver = { 624 .id_table = fsi_spi_ids, 625 .drv = { 626 .name = "spi-fsi", 627 .bus = &fsi_bus_type, 628 .probe = fsi_spi_probe, 629 }, 630 }; 631 module_fsi_driver(fsi_spi_driver); 632 633 MODULE_AUTHOR("Eddie James <eajames@linux.ibm.com>"); 634 MODULE_DESCRIPTION("FSI attached SPI controller"); 635 MODULE_LICENSE("GPL"); 636