1 // SPDX-License-Identifier: BSD-3-Clause 2 /* 3 * Copyright (c) 2020, MIPI Alliance, Inc. 4 * 5 * Author: Nicolas Pitre <npitre@baylibre.com> 6 * 7 * Note: The I3C HCI v2.0 spec is still in flux. The IBI support is based on 8 * v1.x of the spec and v2.0 will likely be split out. 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/device.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/errno.h> 15 #include <linux/i3c/master.h> 16 #include <linux/io.h> 17 18 #include "hci.h" 19 #include "cmd.h" 20 #include "ibi.h" 21 22 23 /* 24 * Software Parameter Values (somewhat arb itrary for now). 25 * Some of them could be determined at run time eventually. 26 */ 27 28 #define XFER_RINGS 1 /* max: 8 */ 29 #define XFER_RING_ENTRIES 16 /* max: 255 */ 30 31 #define IBI_RINGS 1 /* max: 8 */ 32 #define IBI_STATUS_RING_ENTRIES 32 /* max: 255 */ 33 #define IBI_CHUNK_CACHELINES 1 /* max: 256 bytes equivalent */ 34 #define IBI_CHUNK_POOL_SIZE 128 /* max: 1023 */ 35 36 /* 37 * Ring Header Preamble 38 */ 39 40 #define rhs_reg_read(r) readl(hci->RHS_regs + (RHS_##r)) 41 #define rhs_reg_write(r, v) writel(v, hci->RHS_regs + (RHS_##r)) 42 43 #define RHS_CONTROL 0x00 44 #define PREAMBLE_SIZE GENMASK(31, 24) /* Preamble Section Size */ 45 #define HEADER_SIZE GENMASK(23, 16) /* Ring Header Size */ 46 #define MAX_HEADER_COUNT_CAP GENMASK(7, 4) /* HC Max Header Count */ 47 #define MAX_HEADER_COUNT GENMASK(3, 0) /* Driver Max Header Count */ 48 49 #define RHS_RHn_OFFSET(n) (0x04 + (n)*4) 50 51 /* 52 * Ring Header (Per-Ring Bundle) 53 */ 54 55 #define rh_reg_read(r) readl(rh->regs + (RH_##r)) 56 #define rh_reg_write(r, v) writel(v, rh->regs + (RH_##r)) 57 58 #define RH_CR_SETUP 0x00 /* Command/Response Ring */ 59 #define CR_XFER_STRUCT_SIZE GENMASK(31, 24) 60 #define CR_RESP_STRUCT_SIZE GENMASK(23, 16) 61 #define CR_RING_SIZE GENMASK(8, 0) 62 63 #define RH_IBI_SETUP 0x04 64 #define IBI_STATUS_STRUCT_SIZE GENMASK(31, 24) 65 #define IBI_STATUS_RING_SIZE GENMASK(23, 16) 66 #define IBI_DATA_CHUNK_SIZE GENMASK(12, 10) 67 #define IBI_DATA_CHUNK_COUNT GENMASK(9, 0) 68 69 #define RH_CHUNK_CONTROL 0x08 70 71 #define RH_INTR_STATUS 0x10 72 #define RH_INTR_STATUS_ENABLE 0x14 73 #define RH_INTR_SIGNAL_ENABLE 0x18 74 #define RH_INTR_FORCE 0x1c 75 #define INTR_IBI_READY BIT(12) 76 #define INTR_TRANSFER_COMPLETION BIT(11) 77 #define INTR_RING_OP BIT(10) 78 #define INTR_TRANSFER_ERR BIT(9) 79 #define INTR_WARN_INS_STOP_MODE BIT(7) 80 #define INTR_IBI_RING_FULL BIT(6) 81 #define INTR_TRANSFER_ABORT BIT(5) 82 83 #define RH_RING_STATUS 0x20 84 #define RING_STATUS_LOCKED BIT(3) 85 #define RING_STATUS_ABORTED BIT(2) 86 #define RING_STATUS_RUNNING BIT(1) 87 #define RING_STATUS_ENABLED BIT(0) 88 89 #define RH_RING_CONTROL 0x24 90 #define RING_CTRL_ABORT BIT(2) 91 #define RING_CTRL_RUN_STOP BIT(1) 92 #define RING_CTRL_ENABLE BIT(0) 93 94 #define RH_RING_OPERATION1 0x28 95 #define RING_OP1_IBI_DEQ_PTR GENMASK(23, 16) 96 #define RING_OP1_CR_SW_DEQ_PTR GENMASK(15, 8) 97 #define RING_OP1_CR_ENQ_PTR GENMASK(7, 0) 98 99 #define RH_RING_OPERATION2 0x2c 100 #define RING_OP2_IBI_ENQ_PTR GENMASK(23, 16) 101 #define RING_OP2_CR_DEQ_PTR GENMASK(7, 0) 102 103 #define RH_CMD_RING_BASE_LO 0x30 104 #define RH_CMD_RING_BASE_HI 0x34 105 #define RH_RESP_RING_BASE_LO 0x38 106 #define RH_RESP_RING_BASE_HI 0x3c 107 #define RH_IBI_STATUS_RING_BASE_LO 0x40 108 #define RH_IBI_STATUS_RING_BASE_HI 0x44 109 #define RH_IBI_DATA_RING_BASE_LO 0x48 110 #define RH_IBI_DATA_RING_BASE_HI 0x4c 111 112 #define RH_CMD_RING_SG 0x50 /* Ring Scatter Gather Support */ 113 #define RH_RESP_RING_SG 0x54 114 #define RH_IBI_STATUS_RING_SG 0x58 115 #define RH_IBI_DATA_RING_SG 0x5c 116 #define RING_SG_BLP BIT(31) /* Buffer Vs. List Pointer */ 117 #define RING_SG_LIST_SIZE GENMASK(15, 0) 118 119 /* 120 * Data Buffer Descriptor (in memory) 121 */ 122 123 #define DATA_BUF_BLP BIT(31) /* Buffer Vs. List Pointer */ 124 #define DATA_BUF_IOC BIT(30) /* Interrupt on Completion */ 125 #define DATA_BUF_BLOCK_SIZE GENMASK(15, 0) 126 127 128 struct hci_rh_data { 129 void __iomem *regs; 130 void *xfer, *resp, *ibi_status, *ibi_data; 131 dma_addr_t xfer_dma, resp_dma, ibi_status_dma, ibi_data_dma; 132 unsigned int xfer_entries, ibi_status_entries, ibi_chunks_total; 133 unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz; 134 unsigned int done_ptr, ibi_chunk_ptr; 135 struct hci_xfer **src_xfers; 136 spinlock_t lock; 137 struct completion op_done; 138 }; 139 140 struct hci_rings_data { 141 unsigned int total; 142 struct hci_rh_data headers[]; 143 }; 144 145 struct hci_dma_dev_ibi_data { 146 struct i3c_generic_ibi_pool *pool; 147 unsigned int max_len; 148 }; 149 150 static inline u32 lo32(dma_addr_t physaddr) 151 { 152 return physaddr; 153 } 154 155 static inline u32 hi32(dma_addr_t physaddr) 156 { 157 /* trickery to avoid compiler warnings on 32-bit build targets */ 158 if (sizeof(dma_addr_t) > 4) { 159 u64 hi = physaddr; 160 return hi >> 32; 161 } 162 return 0; 163 } 164 165 static void hci_dma_cleanup(struct i3c_hci *hci) 166 { 167 struct hci_rings_data *rings = hci->io_data; 168 struct hci_rh_data *rh; 169 unsigned int i; 170 171 if (!rings) 172 return; 173 174 for (i = 0; i < rings->total; i++) { 175 rh = &rings->headers[i]; 176 177 rh_reg_write(RING_CONTROL, 0); 178 rh_reg_write(CR_SETUP, 0); 179 rh_reg_write(IBI_SETUP, 0); 180 rh_reg_write(INTR_SIGNAL_ENABLE, 0); 181 182 if (rh->xfer) 183 dma_free_coherent(&hci->master.dev, 184 rh->xfer_struct_sz * rh->xfer_entries, 185 rh->xfer, rh->xfer_dma); 186 if (rh->resp) 187 dma_free_coherent(&hci->master.dev, 188 rh->resp_struct_sz * rh->xfer_entries, 189 rh->resp, rh->resp_dma); 190 kfree(rh->src_xfers); 191 if (rh->ibi_status) 192 dma_free_coherent(&hci->master.dev, 193 rh->ibi_status_sz * rh->ibi_status_entries, 194 rh->ibi_status, rh->ibi_status_dma); 195 if (rh->ibi_data_dma) 196 dma_unmap_single(&hci->master.dev, rh->ibi_data_dma, 197 rh->ibi_chunk_sz * rh->ibi_chunks_total, 198 DMA_FROM_DEVICE); 199 kfree(rh->ibi_data); 200 } 201 202 rhs_reg_write(CONTROL, 0); 203 204 kfree(rings); 205 hci->io_data = NULL; 206 } 207 208 static int hci_dma_init(struct i3c_hci *hci) 209 { 210 struct hci_rings_data *rings; 211 struct hci_rh_data *rh; 212 u32 regval; 213 unsigned int i, nr_rings, xfers_sz, resps_sz; 214 unsigned int ibi_status_ring_sz, ibi_data_ring_sz; 215 int ret; 216 217 regval = rhs_reg_read(CONTROL); 218 nr_rings = FIELD_GET(MAX_HEADER_COUNT_CAP, regval); 219 dev_info(&hci->master.dev, "%d DMA rings available\n", nr_rings); 220 if (unlikely(nr_rings > 8)) { 221 dev_err(&hci->master.dev, "number of rings should be <= 8\n"); 222 nr_rings = 8; 223 } 224 if (nr_rings > XFER_RINGS) 225 nr_rings = XFER_RINGS; 226 rings = kzalloc(struct_size(rings, headers, nr_rings), GFP_KERNEL); 227 if (!rings) 228 return -ENOMEM; 229 hci->io_data = rings; 230 rings->total = nr_rings; 231 232 for (i = 0; i < rings->total; i++) { 233 u32 offset = rhs_reg_read(RHn_OFFSET(i)); 234 235 dev_info(&hci->master.dev, "Ring %d at offset %#x\n", i, offset); 236 ret = -EINVAL; 237 if (!offset) 238 goto err_out; 239 rh = &rings->headers[i]; 240 rh->regs = hci->base_regs + offset; 241 spin_lock_init(&rh->lock); 242 init_completion(&rh->op_done); 243 244 rh->xfer_entries = XFER_RING_ENTRIES; 245 246 regval = rh_reg_read(CR_SETUP); 247 rh->xfer_struct_sz = FIELD_GET(CR_XFER_STRUCT_SIZE, regval); 248 rh->resp_struct_sz = FIELD_GET(CR_RESP_STRUCT_SIZE, regval); 249 DBG("xfer_struct_sz = %d, resp_struct_sz = %d", 250 rh->xfer_struct_sz, rh->resp_struct_sz); 251 xfers_sz = rh->xfer_struct_sz * rh->xfer_entries; 252 resps_sz = rh->resp_struct_sz * rh->xfer_entries; 253 254 rh->xfer = dma_alloc_coherent(&hci->master.dev, xfers_sz, 255 &rh->xfer_dma, GFP_KERNEL); 256 rh->resp = dma_alloc_coherent(&hci->master.dev, resps_sz, 257 &rh->resp_dma, GFP_KERNEL); 258 rh->src_xfers = 259 kmalloc_array(rh->xfer_entries, sizeof(*rh->src_xfers), 260 GFP_KERNEL); 261 ret = -ENOMEM; 262 if (!rh->xfer || !rh->resp || !rh->src_xfers) 263 goto err_out; 264 265 rh_reg_write(CMD_RING_BASE_LO, lo32(rh->xfer_dma)); 266 rh_reg_write(CMD_RING_BASE_HI, hi32(rh->xfer_dma)); 267 rh_reg_write(RESP_RING_BASE_LO, lo32(rh->resp_dma)); 268 rh_reg_write(RESP_RING_BASE_HI, hi32(rh->resp_dma)); 269 270 regval = FIELD_PREP(CR_RING_SIZE, rh->xfer_entries); 271 rh_reg_write(CR_SETUP, regval); 272 273 rh_reg_write(INTR_STATUS_ENABLE, 0xffffffff); 274 rh_reg_write(INTR_SIGNAL_ENABLE, INTR_IBI_READY | 275 INTR_TRANSFER_COMPLETION | 276 INTR_RING_OP | 277 INTR_TRANSFER_ERR | 278 INTR_WARN_INS_STOP_MODE | 279 INTR_IBI_RING_FULL | 280 INTR_TRANSFER_ABORT); 281 282 /* IBIs */ 283 284 if (i >= IBI_RINGS) 285 goto ring_ready; 286 287 regval = rh_reg_read(IBI_SETUP); 288 rh->ibi_status_sz = FIELD_GET(IBI_STATUS_STRUCT_SIZE, regval); 289 rh->ibi_status_entries = IBI_STATUS_RING_ENTRIES; 290 rh->ibi_chunks_total = IBI_CHUNK_POOL_SIZE; 291 292 rh->ibi_chunk_sz = dma_get_cache_alignment(); 293 rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES; 294 BUG_ON(rh->ibi_chunk_sz > 256); 295 296 ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries; 297 ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total; 298 299 rh->ibi_status = 300 dma_alloc_coherent(&hci->master.dev, ibi_status_ring_sz, 301 &rh->ibi_status_dma, GFP_KERNEL); 302 rh->ibi_data = kmalloc(ibi_data_ring_sz, GFP_KERNEL); 303 ret = -ENOMEM; 304 if (!rh->ibi_status || !rh->ibi_data) 305 goto err_out; 306 rh->ibi_data_dma = 307 dma_map_single(&hci->master.dev, rh->ibi_data, 308 ibi_data_ring_sz, DMA_FROM_DEVICE); 309 if (dma_mapping_error(&hci->master.dev, rh->ibi_data_dma)) { 310 rh->ibi_data_dma = 0; 311 ret = -ENOMEM; 312 goto err_out; 313 } 314 315 regval = FIELD_PREP(IBI_STATUS_RING_SIZE, 316 rh->ibi_status_entries) | 317 FIELD_PREP(IBI_DATA_CHUNK_SIZE, 318 ilog2(rh->ibi_chunk_sz) - 2) | 319 FIELD_PREP(IBI_DATA_CHUNK_COUNT, 320 rh->ibi_chunks_total); 321 rh_reg_write(IBI_SETUP, regval); 322 323 regval = rh_reg_read(INTR_SIGNAL_ENABLE); 324 regval |= INTR_IBI_READY; 325 rh_reg_write(INTR_SIGNAL_ENABLE, regval); 326 327 ring_ready: 328 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE); 329 } 330 331 regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total); 332 rhs_reg_write(CONTROL, regval); 333 return 0; 334 335 err_out: 336 hci_dma_cleanup(hci); 337 return ret; 338 } 339 340 static void hci_dma_unmap_xfer(struct i3c_hci *hci, 341 struct hci_xfer *xfer_list, unsigned int n) 342 { 343 struct hci_xfer *xfer; 344 unsigned int i; 345 346 for (i = 0; i < n; i++) { 347 xfer = xfer_list + i; 348 if (!xfer->data) 349 continue; 350 dma_unmap_single(&hci->master.dev, 351 xfer->data_dma, xfer->data_len, 352 xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 353 } 354 } 355 356 static int hci_dma_queue_xfer(struct i3c_hci *hci, 357 struct hci_xfer *xfer_list, int n) 358 { 359 struct hci_rings_data *rings = hci->io_data; 360 struct hci_rh_data *rh; 361 unsigned int i, ring, enqueue_ptr; 362 u32 op1_val, op2_val; 363 364 /* For now we only use ring 0 */ 365 ring = 0; 366 rh = &rings->headers[ring]; 367 368 op1_val = rh_reg_read(RING_OPERATION1); 369 enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val); 370 for (i = 0; i < n; i++) { 371 struct hci_xfer *xfer = xfer_list + i; 372 u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr; 373 374 /* store cmd descriptor */ 375 *ring_data++ = xfer->cmd_desc[0]; 376 *ring_data++ = xfer->cmd_desc[1]; 377 if (hci->cmd == &mipi_i3c_hci_cmd_v2) { 378 *ring_data++ = xfer->cmd_desc[2]; 379 *ring_data++ = xfer->cmd_desc[3]; 380 } 381 382 /* first word of Data Buffer Descriptor Structure */ 383 if (!xfer->data) 384 xfer->data_len = 0; 385 *ring_data++ = 386 FIELD_PREP(DATA_BUF_BLOCK_SIZE, xfer->data_len) | 387 ((i == n - 1) ? DATA_BUF_IOC : 0); 388 389 /* 2nd and 3rd words of Data Buffer Descriptor Structure */ 390 if (xfer->data) { 391 xfer->data_dma = 392 dma_map_single(&hci->master.dev, 393 xfer->data, 394 xfer->data_len, 395 xfer->rnw ? 396 DMA_FROM_DEVICE : 397 DMA_TO_DEVICE); 398 if (dma_mapping_error(&hci->master.dev, 399 xfer->data_dma)) { 400 hci_dma_unmap_xfer(hci, xfer_list, i); 401 return -ENOMEM; 402 } 403 *ring_data++ = lo32(xfer->data_dma); 404 *ring_data++ = hi32(xfer->data_dma); 405 } else { 406 *ring_data++ = 0; 407 *ring_data++ = 0; 408 } 409 410 /* remember corresponding xfer struct */ 411 rh->src_xfers[enqueue_ptr] = xfer; 412 /* remember corresponding ring/entry for this xfer structure */ 413 xfer->ring_number = ring; 414 xfer->ring_entry = enqueue_ptr; 415 416 enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries; 417 418 /* 419 * We may update the hardware view of the enqueue pointer 420 * only if we didn't reach its dequeue pointer. 421 */ 422 op2_val = rh_reg_read(RING_OPERATION2); 423 if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) { 424 /* the ring is full */ 425 hci_dma_unmap_xfer(hci, xfer_list, i + 1); 426 return -EBUSY; 427 } 428 } 429 430 /* take care to update the hardware enqueue pointer atomically */ 431 spin_lock_irq(&rh->lock); 432 op1_val = rh_reg_read(RING_OPERATION1); 433 op1_val &= ~RING_OP1_CR_ENQ_PTR; 434 op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr); 435 rh_reg_write(RING_OPERATION1, op1_val); 436 spin_unlock_irq(&rh->lock); 437 438 return 0; 439 } 440 441 static bool hci_dma_dequeue_xfer(struct i3c_hci *hci, 442 struct hci_xfer *xfer_list, int n) 443 { 444 struct hci_rings_data *rings = hci->io_data; 445 struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number]; 446 unsigned int i; 447 bool did_unqueue = false; 448 449 /* stop the ring */ 450 rh_reg_write(RING_CONTROL, RING_CTRL_ABORT); 451 if (wait_for_completion_timeout(&rh->op_done, HZ) == 0) { 452 /* 453 * We're deep in it if ever this condition is ever met. 454 * Hardware might still be writing to memory, etc. 455 */ 456 dev_crit(&hci->master.dev, "unable to abort the ring\n"); 457 WARN_ON(1); 458 } 459 460 for (i = 0; i < n; i++) { 461 struct hci_xfer *xfer = xfer_list + i; 462 int idx = xfer->ring_entry; 463 464 /* 465 * At the time the abort happened, the xfer might have 466 * completed already. If not then replace corresponding 467 * descriptor entries with a no-op. 468 */ 469 if (idx >= 0) { 470 u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx; 471 472 /* store no-op cmd descriptor */ 473 *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7); 474 *ring_data++ = 0; 475 if (hci->cmd == &mipi_i3c_hci_cmd_v2) { 476 *ring_data++ = 0; 477 *ring_data++ = 0; 478 } 479 480 /* disassociate this xfer struct */ 481 rh->src_xfers[idx] = NULL; 482 483 /* and unmap it */ 484 hci_dma_unmap_xfer(hci, xfer, 1); 485 486 did_unqueue = true; 487 } 488 } 489 490 /* restart the ring */ 491 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE); 492 493 return did_unqueue; 494 } 495 496 static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh) 497 { 498 u32 op1_val, op2_val, resp, *ring_resp; 499 unsigned int tid, done_ptr = rh->done_ptr; 500 struct hci_xfer *xfer; 501 502 for (;;) { 503 op2_val = rh_reg_read(RING_OPERATION2); 504 if (done_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) 505 break; 506 507 ring_resp = rh->resp + rh->resp_struct_sz * done_ptr; 508 resp = *ring_resp; 509 tid = RESP_TID(resp); 510 DBG("resp = 0x%08x", resp); 511 512 xfer = rh->src_xfers[done_ptr]; 513 if (!xfer) { 514 DBG("orphaned ring entry"); 515 } else { 516 hci_dma_unmap_xfer(hci, xfer, 1); 517 xfer->ring_entry = -1; 518 xfer->response = resp; 519 if (tid != xfer->cmd_tid) { 520 dev_err(&hci->master.dev, 521 "response tid=%d when expecting %d\n", 522 tid, xfer->cmd_tid); 523 /* TODO: do something about it? */ 524 } 525 if (xfer->completion) 526 complete(xfer->completion); 527 } 528 529 done_ptr = (done_ptr + 1) % rh->xfer_entries; 530 rh->done_ptr = done_ptr; 531 } 532 533 /* take care to update the software dequeue pointer atomically */ 534 spin_lock(&rh->lock); 535 op1_val = rh_reg_read(RING_OPERATION1); 536 op1_val &= ~RING_OP1_CR_SW_DEQ_PTR; 537 op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr); 538 rh_reg_write(RING_OPERATION1, op1_val); 539 spin_unlock(&rh->lock); 540 } 541 542 static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev, 543 const struct i3c_ibi_setup *req) 544 { 545 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev); 546 struct i3c_generic_ibi_pool *pool; 547 struct hci_dma_dev_ibi_data *dev_ibi; 548 549 dev_ibi = kmalloc(sizeof(*dev_ibi), GFP_KERNEL); 550 if (!dev_ibi) 551 return -ENOMEM; 552 pool = i3c_generic_ibi_alloc_pool(dev, req); 553 if (IS_ERR(pool)) { 554 kfree(dev_ibi); 555 return PTR_ERR(pool); 556 } 557 dev_ibi->pool = pool; 558 dev_ibi->max_len = req->max_payload_len; 559 dev_data->ibi_data = dev_ibi; 560 return 0; 561 } 562 563 static void hci_dma_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev) 564 { 565 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev); 566 struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data; 567 568 dev_data->ibi_data = NULL; 569 i3c_generic_ibi_free_pool(dev_ibi->pool); 570 kfree(dev_ibi); 571 } 572 573 static void hci_dma_recycle_ibi_slot(struct i3c_hci *hci, 574 struct i3c_dev_desc *dev, 575 struct i3c_ibi_slot *slot) 576 { 577 struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev); 578 struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data; 579 580 i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot); 581 } 582 583 static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh) 584 { 585 struct i3c_dev_desc *dev; 586 struct i3c_hci_dev_data *dev_data; 587 struct hci_dma_dev_ibi_data *dev_ibi; 588 struct i3c_ibi_slot *slot; 589 u32 op1_val, op2_val, ibi_status_error; 590 unsigned int ptr, enq_ptr, deq_ptr; 591 unsigned int ibi_size, ibi_chunks, ibi_data_offset, first_part; 592 int ibi_addr, last_ptr; 593 void *ring_ibi_data; 594 dma_addr_t ring_ibi_data_dma; 595 596 op1_val = rh_reg_read(RING_OPERATION1); 597 deq_ptr = FIELD_GET(RING_OP1_IBI_DEQ_PTR, op1_val); 598 599 op2_val = rh_reg_read(RING_OPERATION2); 600 enq_ptr = FIELD_GET(RING_OP2_IBI_ENQ_PTR, op2_val); 601 602 ibi_status_error = 0; 603 ibi_addr = -1; 604 ibi_chunks = 0; 605 ibi_size = 0; 606 last_ptr = -1; 607 608 /* let's find all we can about this IBI */ 609 for (ptr = deq_ptr; ptr != enq_ptr; 610 ptr = (ptr + 1) % rh->ibi_status_entries) { 611 u32 ibi_status, *ring_ibi_status; 612 unsigned int chunks; 613 614 ring_ibi_status = rh->ibi_status + rh->ibi_status_sz * ptr; 615 ibi_status = *ring_ibi_status; 616 DBG("status = %#x", ibi_status); 617 618 if (ibi_status_error) { 619 /* we no longer care */ 620 } else if (ibi_status & IBI_ERROR) { 621 ibi_status_error = ibi_status; 622 } else if (ibi_addr == -1) { 623 ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status); 624 } else if (ibi_addr != FIELD_GET(IBI_TARGET_ADDR, ibi_status)) { 625 /* the address changed unexpectedly */ 626 ibi_status_error = ibi_status; 627 } 628 629 chunks = FIELD_GET(IBI_CHUNKS, ibi_status); 630 ibi_chunks += chunks; 631 if (!(ibi_status & IBI_LAST_STATUS)) { 632 ibi_size += chunks * rh->ibi_chunk_sz; 633 } else { 634 ibi_size += FIELD_GET(IBI_DATA_LENGTH, ibi_status); 635 last_ptr = ptr; 636 break; 637 } 638 } 639 640 /* validate what we've got */ 641 642 if (last_ptr == -1) { 643 /* this IBI sequence is not yet complete */ 644 DBG("no LAST_STATUS available (e=%d d=%d)", enq_ptr, deq_ptr); 645 return; 646 } 647 deq_ptr = last_ptr + 1; 648 deq_ptr %= rh->ibi_status_entries; 649 650 if (ibi_status_error) { 651 dev_err(&hci->master.dev, "IBI error from %#x\n", ibi_addr); 652 goto done; 653 } 654 655 /* determine who this is for */ 656 dev = i3c_hci_addr_to_dev(hci, ibi_addr); 657 if (!dev) { 658 dev_err(&hci->master.dev, 659 "IBI for unknown device %#x\n", ibi_addr); 660 goto done; 661 } 662 663 dev_data = i3c_dev_get_master_data(dev); 664 dev_ibi = dev_data->ibi_data; 665 if (ibi_size > dev_ibi->max_len) { 666 dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n", 667 ibi_size, dev_ibi->max_len); 668 goto done; 669 } 670 671 /* 672 * This ring model is not suitable for zero-copy processing of IBIs. 673 * We have the data chunk ring wrap-around to deal with, meaning 674 * that the payload might span multiple chunks beginning at the 675 * end of the ring and wrap to the start of the ring. Furthermore 676 * there is no guarantee that those chunks will be released in order 677 * and in a timely manner by the upper driver. So let's just copy 678 * them to a discrete buffer. In practice they're supposed to be 679 * small anyway. 680 */ 681 slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool); 682 if (!slot) { 683 dev_err(&hci->master.dev, "no free slot for IBI\n"); 684 goto done; 685 } 686 687 /* copy first part of the payload */ 688 ibi_data_offset = rh->ibi_chunk_sz * rh->ibi_chunk_ptr; 689 ring_ibi_data = rh->ibi_data + ibi_data_offset; 690 ring_ibi_data_dma = rh->ibi_data_dma + ibi_data_offset; 691 first_part = (rh->ibi_chunks_total - rh->ibi_chunk_ptr) 692 * rh->ibi_chunk_sz; 693 if (first_part > ibi_size) 694 first_part = ibi_size; 695 dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma, 696 first_part, DMA_FROM_DEVICE); 697 memcpy(slot->data, ring_ibi_data, first_part); 698 699 /* copy second part if any */ 700 if (ibi_size > first_part) { 701 /* we wrap back to the start and copy remaining data */ 702 ring_ibi_data = rh->ibi_data; 703 ring_ibi_data_dma = rh->ibi_data_dma; 704 dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma, 705 ibi_size - first_part, DMA_FROM_DEVICE); 706 memcpy(slot->data + first_part, ring_ibi_data, 707 ibi_size - first_part); 708 } 709 710 /* submit it */ 711 slot->dev = dev; 712 slot->len = ibi_size; 713 i3c_master_queue_ibi(dev, slot); 714 715 done: 716 /* take care to update the ibi dequeue pointer atomically */ 717 spin_lock(&rh->lock); 718 op1_val = rh_reg_read(RING_OPERATION1); 719 op1_val &= ~RING_OP1_IBI_DEQ_PTR; 720 op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr); 721 rh_reg_write(RING_OPERATION1, op1_val); 722 spin_unlock(&rh->lock); 723 724 /* update the chunk pointer */ 725 rh->ibi_chunk_ptr += ibi_chunks; 726 rh->ibi_chunk_ptr %= rh->ibi_chunks_total; 727 728 /* and tell the hardware about freed chunks */ 729 rh_reg_write(CHUNK_CONTROL, rh_reg_read(CHUNK_CONTROL) + ibi_chunks); 730 } 731 732 static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask) 733 { 734 struct hci_rings_data *rings = hci->io_data; 735 unsigned int i; 736 bool handled = false; 737 738 for (i = 0; mask && i < rings->total; i++) { 739 struct hci_rh_data *rh; 740 u32 status; 741 742 if (!(mask & BIT(i))) 743 continue; 744 mask &= ~BIT(i); 745 746 rh = &rings->headers[i]; 747 status = rh_reg_read(INTR_STATUS); 748 DBG("rh%d status: %#x", i, status); 749 if (!status) 750 continue; 751 rh_reg_write(INTR_STATUS, status); 752 753 if (status & INTR_IBI_READY) 754 hci_dma_process_ibi(hci, rh); 755 if (status & (INTR_TRANSFER_COMPLETION | INTR_TRANSFER_ERR)) 756 hci_dma_xfer_done(hci, rh); 757 if (status & INTR_RING_OP) 758 complete(&rh->op_done); 759 760 if (status & INTR_TRANSFER_ABORT) 761 dev_notice_ratelimited(&hci->master.dev, 762 "ring %d: Transfer Aborted\n", i); 763 if (status & INTR_WARN_INS_STOP_MODE) 764 dev_warn_ratelimited(&hci->master.dev, 765 "ring %d: Inserted Stop on Mode Change\n", i); 766 if (status & INTR_IBI_RING_FULL) 767 dev_err_ratelimited(&hci->master.dev, 768 "ring %d: IBI Ring Full Condition\n", i); 769 770 handled = true; 771 } 772 773 return handled; 774 } 775 776 const struct hci_io_ops mipi_i3c_hci_dma = { 777 .init = hci_dma_init, 778 .cleanup = hci_dma_cleanup, 779 .queue_xfer = hci_dma_queue_xfer, 780 .dequeue_xfer = hci_dma_dequeue_xfer, 781 .irq_handler = hci_dma_irq_handler, 782 .request_ibi = hci_dma_request_ibi, 783 .free_ibi = hci_dma_free_ibi, 784 .recycle_ibi_slot = hci_dma_recycle_ibi_slot, 785 }; 786