1 /* Broadcom FlexRM Mailbox Driver 2 * 3 * Copyright (C) 2017 Broadcom 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * Each Broadcom FlexSparx4 offload engine is implemented as an 10 * extension to Broadcom FlexRM ring manager. The FlexRM ring 11 * manager provides a set of rings which can be used to submit 12 * work to a FlexSparx4 offload engine. 13 * 14 * This driver creates a mailbox controller using a set of FlexRM 15 * rings where each mailbox channel represents a separate FlexRM ring. 16 */ 17 18 #include <asm/barrier.h> 19 #include <asm/byteorder.h> 20 #include <linux/delay.h> 21 #include <linux/device.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/dmapool.h> 24 #include <linux/err.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/kernel.h> 28 #include <linux/mailbox_controller.h> 29 #include <linux/mailbox_client.h> 30 #include <linux/mailbox/brcm-message.h> 31 #include <linux/module.h> 32 #include <linux/msi.h> 33 #include <linux/of_address.h> 34 #include <linux/of_irq.h> 35 #include <linux/platform_device.h> 36 #include <linux/spinlock.h> 37 38 /* ====== FlexRM register defines ===== */ 39 40 /* FlexRM configuration */ 41 #define RING_REGS_SIZE 0x10000 42 #define RING_DESC_SIZE 8 43 #define RING_DESC_INDEX(offset) \ 44 ((offset) / RING_DESC_SIZE) 45 #define RING_DESC_OFFSET(index) \ 46 ((index) * RING_DESC_SIZE) 47 #define RING_MAX_REQ_COUNT 1024 48 #define RING_BD_ALIGN_ORDER 12 49 #define RING_BD_ALIGN_CHECK(addr) \ 50 (!((addr) & ((0x1 << RING_BD_ALIGN_ORDER) - 1))) 51 #define RING_BD_TOGGLE_INVALID(offset) \ 52 (((offset) >> RING_BD_ALIGN_ORDER) & 0x1) 53 #define RING_BD_TOGGLE_VALID(offset) \ 54 (!RING_BD_TOGGLE_INVALID(offset)) 55 #define RING_BD_DESC_PER_REQ 32 56 #define RING_BD_DESC_COUNT \ 57 (RING_MAX_REQ_COUNT * RING_BD_DESC_PER_REQ) 58 #define RING_BD_SIZE \ 59 (RING_BD_DESC_COUNT * RING_DESC_SIZE) 60 #define RING_CMPL_ALIGN_ORDER 13 61 #define RING_CMPL_DESC_COUNT RING_MAX_REQ_COUNT 62 #define RING_CMPL_SIZE \ 63 (RING_CMPL_DESC_COUNT * RING_DESC_SIZE) 64 #define RING_VER_MAGIC 0x76303031 65 66 /* Per-Ring register offsets */ 67 #define RING_VER 0x000 68 #define RING_BD_START_ADDR 0x004 69 #define RING_BD_READ_PTR 0x008 70 #define RING_BD_WRITE_PTR 0x00c 71 #define RING_BD_READ_PTR_DDR_LS 0x010 72 #define RING_BD_READ_PTR_DDR_MS 0x014 73 #define RING_CMPL_START_ADDR 0x018 74 #define RING_CMPL_WRITE_PTR 0x01c 75 #define RING_NUM_REQ_RECV_LS 0x020 76 #define RING_NUM_REQ_RECV_MS 0x024 77 #define RING_NUM_REQ_TRANS_LS 0x028 78 #define RING_NUM_REQ_TRANS_MS 0x02c 79 #define RING_NUM_REQ_OUTSTAND 0x030 80 #define RING_CONTROL 0x034 81 #define RING_FLUSH_DONE 0x038 82 #define RING_MSI_ADDR_LS 0x03c 83 #define RING_MSI_ADDR_MS 0x040 84 #define RING_MSI_CONTROL 0x048 85 #define RING_BD_READ_PTR_DDR_CONTROL 0x04c 86 #define RING_MSI_DATA_VALUE 0x064 87 88 /* Register RING_BD_START_ADDR fields */ 89 #define BD_LAST_UPDATE_HW_SHIFT 28 90 #define BD_LAST_UPDATE_HW_MASK 0x1 91 #define BD_START_ADDR_VALUE(pa) \ 92 ((u32)((((dma_addr_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff)) 93 #define BD_START_ADDR_DECODE(val) \ 94 ((dma_addr_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER) 95 96 /* Register RING_CMPL_START_ADDR fields */ 97 #define CMPL_START_ADDR_VALUE(pa) \ 98 ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x03ffffff)) 99 100 /* Register RING_CONTROL fields */ 101 #define CONTROL_MASK_DISABLE_CONTROL 12 102 #define CONTROL_FLUSH_SHIFT 5 103 #define CONTROL_ACTIVE_SHIFT 4 104 #define CONTROL_RATE_ADAPT_MASK 0xf 105 #define CONTROL_RATE_DYNAMIC 0x0 106 #define CONTROL_RATE_FAST 0x8 107 #define CONTROL_RATE_MEDIUM 0x9 108 #define CONTROL_RATE_SLOW 0xa 109 #define CONTROL_RATE_IDLE 0xb 110 111 /* Register RING_FLUSH_DONE fields */ 112 #define FLUSH_DONE_MASK 0x1 113 114 /* Register RING_MSI_CONTROL fields */ 115 #define MSI_TIMER_VAL_SHIFT 16 116 #define MSI_TIMER_VAL_MASK 0xffff 117 #define MSI_ENABLE_SHIFT 15 118 #define MSI_ENABLE_MASK 0x1 119 #define MSI_COUNT_SHIFT 0 120 #define MSI_COUNT_MASK 0x3ff 121 122 /* Register RING_BD_READ_PTR_DDR_CONTROL fields */ 123 #define BD_READ_PTR_DDR_TIMER_VAL_SHIFT 16 124 #define BD_READ_PTR_DDR_TIMER_VAL_MASK 0xffff 125 #define BD_READ_PTR_DDR_ENABLE_SHIFT 15 126 #define BD_READ_PTR_DDR_ENABLE_MASK 0x1 127 128 /* ====== FlexRM ring descriptor defines ===== */ 129 130 /* Completion descriptor format */ 131 #define CMPL_OPAQUE_SHIFT 0 132 #define CMPL_OPAQUE_MASK 0xffff 133 #define CMPL_ENGINE_STATUS_SHIFT 16 134 #define CMPL_ENGINE_STATUS_MASK 0xffff 135 #define CMPL_DME_STATUS_SHIFT 32 136 #define CMPL_DME_STATUS_MASK 0xffff 137 #define CMPL_RM_STATUS_SHIFT 48 138 #define CMPL_RM_STATUS_MASK 0xffff 139 140 /* Completion DME status code */ 141 #define DME_STATUS_MEM_COR_ERR BIT(0) 142 #define DME_STATUS_MEM_UCOR_ERR BIT(1) 143 #define DME_STATUS_FIFO_UNDERFLOW BIT(2) 144 #define DME_STATUS_FIFO_OVERFLOW BIT(3) 145 #define DME_STATUS_RRESP_ERR BIT(4) 146 #define DME_STATUS_BRESP_ERR BIT(5) 147 #define DME_STATUS_ERROR_MASK (DME_STATUS_MEM_COR_ERR | \ 148 DME_STATUS_MEM_UCOR_ERR | \ 149 DME_STATUS_FIFO_UNDERFLOW | \ 150 DME_STATUS_FIFO_OVERFLOW | \ 151 DME_STATUS_RRESP_ERR | \ 152 DME_STATUS_BRESP_ERR) 153 154 /* Completion RM status code */ 155 #define RM_STATUS_CODE_SHIFT 0 156 #define RM_STATUS_CODE_MASK 0x3ff 157 #define RM_STATUS_CODE_GOOD 0x0 158 #define RM_STATUS_CODE_AE_TIMEOUT 0x3ff 159 160 /* General descriptor format */ 161 #define DESC_TYPE_SHIFT 60 162 #define DESC_TYPE_MASK 0xf 163 #define DESC_PAYLOAD_SHIFT 0 164 #define DESC_PAYLOAD_MASK 0x0fffffffffffffff 165 166 /* Null descriptor format */ 167 #define NULL_TYPE 0 168 #define NULL_TOGGLE_SHIFT 58 169 #define NULL_TOGGLE_MASK 0x1 170 171 /* Header descriptor format */ 172 #define HEADER_TYPE 1 173 #define HEADER_TOGGLE_SHIFT 58 174 #define HEADER_TOGGLE_MASK 0x1 175 #define HEADER_ENDPKT_SHIFT 57 176 #define HEADER_ENDPKT_MASK 0x1 177 #define HEADER_STARTPKT_SHIFT 56 178 #define HEADER_STARTPKT_MASK 0x1 179 #define HEADER_BDCOUNT_SHIFT 36 180 #define HEADER_BDCOUNT_MASK 0x1f 181 #define HEADER_BDCOUNT_MAX HEADER_BDCOUNT_MASK 182 #define HEADER_FLAGS_SHIFT 16 183 #define HEADER_FLAGS_MASK 0xffff 184 #define HEADER_OPAQUE_SHIFT 0 185 #define HEADER_OPAQUE_MASK 0xffff 186 187 /* Source (SRC) descriptor format */ 188 #define SRC_TYPE 2 189 #define SRC_LENGTH_SHIFT 44 190 #define SRC_LENGTH_MASK 0xffff 191 #define SRC_ADDR_SHIFT 0 192 #define SRC_ADDR_MASK 0x00000fffffffffff 193 194 /* Destination (DST) descriptor format */ 195 #define DST_TYPE 3 196 #define DST_LENGTH_SHIFT 44 197 #define DST_LENGTH_MASK 0xffff 198 #define DST_ADDR_SHIFT 0 199 #define DST_ADDR_MASK 0x00000fffffffffff 200 201 /* Immediate (IMM) descriptor format */ 202 #define IMM_TYPE 4 203 #define IMM_DATA_SHIFT 0 204 #define IMM_DATA_MASK 0x0fffffffffffffff 205 206 /* Next pointer (NPTR) descriptor format */ 207 #define NPTR_TYPE 5 208 #define NPTR_TOGGLE_SHIFT 58 209 #define NPTR_TOGGLE_MASK 0x1 210 #define NPTR_ADDR_SHIFT 0 211 #define NPTR_ADDR_MASK 0x00000fffffffffff 212 213 /* Mega source (MSRC) descriptor format */ 214 #define MSRC_TYPE 6 215 #define MSRC_LENGTH_SHIFT 44 216 #define MSRC_LENGTH_MASK 0xffff 217 #define MSRC_ADDR_SHIFT 0 218 #define MSRC_ADDR_MASK 0x00000fffffffffff 219 220 /* Mega destination (MDST) descriptor format */ 221 #define MDST_TYPE 7 222 #define MDST_LENGTH_SHIFT 44 223 #define MDST_LENGTH_MASK 0xffff 224 #define MDST_ADDR_SHIFT 0 225 #define MDST_ADDR_MASK 0x00000fffffffffff 226 227 /* Source with tlast (SRCT) descriptor format */ 228 #define SRCT_TYPE 8 229 #define SRCT_LENGTH_SHIFT 44 230 #define SRCT_LENGTH_MASK 0xffff 231 #define SRCT_ADDR_SHIFT 0 232 #define SRCT_ADDR_MASK 0x00000fffffffffff 233 234 /* Destination with tlast (DSTT) descriptor format */ 235 #define DSTT_TYPE 9 236 #define DSTT_LENGTH_SHIFT 44 237 #define DSTT_LENGTH_MASK 0xffff 238 #define DSTT_ADDR_SHIFT 0 239 #define DSTT_ADDR_MASK 0x00000fffffffffff 240 241 /* Immediate with tlast (IMMT) descriptor format */ 242 #define IMMT_TYPE 10 243 #define IMMT_DATA_SHIFT 0 244 #define IMMT_DATA_MASK 0x0fffffffffffffff 245 246 /* Descriptor helper macros */ 247 #define DESC_DEC(_d, _s, _m) (((_d) >> (_s)) & (_m)) 248 #define DESC_ENC(_d, _v, _s, _m) \ 249 do { \ 250 (_d) &= ~((u64)(_m) << (_s)); \ 251 (_d) |= (((u64)(_v) & (_m)) << (_s)); \ 252 } while (0) 253 254 /* ====== FlexRM data structures ===== */ 255 256 struct flexrm_ring { 257 /* Unprotected members */ 258 int num; 259 struct flexrm_mbox *mbox; 260 void __iomem *regs; 261 bool irq_requested; 262 unsigned int irq; 263 unsigned int msi_timer_val; 264 unsigned int msi_count_threshold; 265 struct ida requests_ida; 266 struct brcm_message *requests[RING_MAX_REQ_COUNT]; 267 void *bd_base; 268 dma_addr_t bd_dma_base; 269 u32 bd_write_offset; 270 void *cmpl_base; 271 dma_addr_t cmpl_dma_base; 272 /* Protected members */ 273 spinlock_t lock; 274 struct brcm_message *last_pending_msg; 275 u32 cmpl_read_offset; 276 }; 277 278 struct flexrm_mbox { 279 struct device *dev; 280 void __iomem *regs; 281 u32 num_rings; 282 struct flexrm_ring *rings; 283 struct dma_pool *bd_pool; 284 struct dma_pool *cmpl_pool; 285 struct mbox_controller controller; 286 }; 287 288 /* ====== FlexRM ring descriptor helper routines ===== */ 289 290 static u64 flexrm_read_desc(void *desc_ptr) 291 { 292 return le64_to_cpu(*((u64 *)desc_ptr)); 293 } 294 295 static void flexrm_write_desc(void *desc_ptr, u64 desc) 296 { 297 *((u64 *)desc_ptr) = cpu_to_le64(desc); 298 } 299 300 static u32 flexrm_cmpl_desc_to_reqid(u64 cmpl_desc) 301 { 302 return (u32)(cmpl_desc & CMPL_OPAQUE_MASK); 303 } 304 305 static int flexrm_cmpl_desc_to_error(u64 cmpl_desc) 306 { 307 u32 status; 308 309 status = DESC_DEC(cmpl_desc, CMPL_DME_STATUS_SHIFT, 310 CMPL_DME_STATUS_MASK); 311 if (status & DME_STATUS_ERROR_MASK) 312 return -EIO; 313 314 status = DESC_DEC(cmpl_desc, CMPL_RM_STATUS_SHIFT, 315 CMPL_RM_STATUS_MASK); 316 status &= RM_STATUS_CODE_MASK; 317 if (status == RM_STATUS_CODE_AE_TIMEOUT) 318 return -ETIMEDOUT; 319 320 return 0; 321 } 322 323 static bool flexrm_is_next_table_desc(void *desc_ptr) 324 { 325 u64 desc = flexrm_read_desc(desc_ptr); 326 u32 type = DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK); 327 328 return (type == NPTR_TYPE) ? true : false; 329 } 330 331 static u64 flexrm_next_table_desc(u32 toggle, dma_addr_t next_addr) 332 { 333 u64 desc = 0; 334 335 DESC_ENC(desc, NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); 336 DESC_ENC(desc, toggle, NPTR_TOGGLE_SHIFT, NPTR_TOGGLE_MASK); 337 DESC_ENC(desc, next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK); 338 339 return desc; 340 } 341 342 static u64 flexrm_null_desc(u32 toggle) 343 { 344 u64 desc = 0; 345 346 DESC_ENC(desc, NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); 347 DESC_ENC(desc, toggle, NULL_TOGGLE_SHIFT, NULL_TOGGLE_MASK); 348 349 return desc; 350 } 351 352 static u32 flexrm_estimate_header_desc_count(u32 nhcnt) 353 { 354 u32 hcnt = nhcnt / HEADER_BDCOUNT_MAX; 355 356 if (!(nhcnt % HEADER_BDCOUNT_MAX)) 357 hcnt += 1; 358 359 return hcnt; 360 } 361 362 static void flexrm_flip_header_toogle(void *desc_ptr) 363 { 364 u64 desc = flexrm_read_desc(desc_ptr); 365 366 if (desc & ((u64)0x1 << HEADER_TOGGLE_SHIFT)) 367 desc &= ~((u64)0x1 << HEADER_TOGGLE_SHIFT); 368 else 369 desc |= ((u64)0x1 << HEADER_TOGGLE_SHIFT); 370 371 flexrm_write_desc(desc_ptr, desc); 372 } 373 374 static u64 flexrm_header_desc(u32 toggle, u32 startpkt, u32 endpkt, 375 u32 bdcount, u32 flags, u32 opaque) 376 { 377 u64 desc = 0; 378 379 DESC_ENC(desc, HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); 380 DESC_ENC(desc, toggle, HEADER_TOGGLE_SHIFT, HEADER_TOGGLE_MASK); 381 DESC_ENC(desc, startpkt, HEADER_STARTPKT_SHIFT, HEADER_STARTPKT_MASK); 382 DESC_ENC(desc, endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK); 383 DESC_ENC(desc, bdcount, HEADER_BDCOUNT_SHIFT, HEADER_BDCOUNT_MASK); 384 DESC_ENC(desc, flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK); 385 DESC_ENC(desc, opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK); 386 387 return desc; 388 } 389 390 static void flexrm_enqueue_desc(u32 nhpos, u32 nhcnt, u32 reqid, 391 u64 desc, void **desc_ptr, u32 *toggle, 392 void *start_desc, void *end_desc) 393 { 394 u64 d; 395 u32 nhavail, _toggle, _startpkt, _endpkt, _bdcount; 396 397 /* Sanity check */ 398 if (nhcnt <= nhpos) 399 return; 400 401 /* 402 * Each request or packet start with a HEADER descriptor followed 403 * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST, 404 * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors 405 * following a HEADER descriptor is represented by BDCOUNT field 406 * of HEADER descriptor. The max value of BDCOUNT field is 31 which 407 * means we can only have 31 non-HEADER descriptors following one 408 * HEADER descriptor. 409 * 410 * In general use, number of non-HEADER descriptors can easily go 411 * beyond 31. To tackle this situation, we have packet (or request) 412 * extenstion bits (STARTPKT and ENDPKT) in the HEADER descriptor. 413 * 414 * To use packet extension, the first HEADER descriptor of request 415 * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate 416 * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last 417 * HEADER descriptor will have STARTPKT=0 and ENDPKT=1. Also, the 418 * TOGGLE bit of the first HEADER will be set to invalid state to 419 * ensure that FlexRM does not start fetching descriptors till all 420 * descriptors are enqueued. The user of this function will flip 421 * the TOGGLE bit of first HEADER after all descriptors are 422 * enqueued. 423 */ 424 425 if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) { 426 /* Prepare the header descriptor */ 427 nhavail = (nhcnt - nhpos); 428 _toggle = (nhpos == 0) ? !(*toggle) : (*toggle); 429 _startpkt = (nhpos == 0) ? 0x1 : 0x0; 430 _endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0; 431 _bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ? 432 nhavail : HEADER_BDCOUNT_MAX; 433 if (nhavail <= HEADER_BDCOUNT_MAX) 434 _bdcount = nhavail; 435 else 436 _bdcount = HEADER_BDCOUNT_MAX; 437 d = flexrm_header_desc(_toggle, _startpkt, _endpkt, 438 _bdcount, 0x0, reqid); 439 440 /* Write header descriptor */ 441 flexrm_write_desc(*desc_ptr, d); 442 443 /* Point to next descriptor */ 444 *desc_ptr += sizeof(desc); 445 if (*desc_ptr == end_desc) 446 *desc_ptr = start_desc; 447 448 /* Skip next pointer descriptors */ 449 while (flexrm_is_next_table_desc(*desc_ptr)) { 450 *toggle = (*toggle) ? 0 : 1; 451 *desc_ptr += sizeof(desc); 452 if (*desc_ptr == end_desc) 453 *desc_ptr = start_desc; 454 } 455 } 456 457 /* Write desired descriptor */ 458 flexrm_write_desc(*desc_ptr, desc); 459 460 /* Point to next descriptor */ 461 *desc_ptr += sizeof(desc); 462 if (*desc_ptr == end_desc) 463 *desc_ptr = start_desc; 464 465 /* Skip next pointer descriptors */ 466 while (flexrm_is_next_table_desc(*desc_ptr)) { 467 *toggle = (*toggle) ? 0 : 1; 468 *desc_ptr += sizeof(desc); 469 if (*desc_ptr == end_desc) 470 *desc_ptr = start_desc; 471 } 472 } 473 474 static u64 flexrm_src_desc(dma_addr_t addr, unsigned int length) 475 { 476 u64 desc = 0; 477 478 DESC_ENC(desc, SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); 479 DESC_ENC(desc, length, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK); 480 DESC_ENC(desc, addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK); 481 482 return desc; 483 } 484 485 static u64 flexrm_msrc_desc(dma_addr_t addr, unsigned int length_div_16) 486 { 487 u64 desc = 0; 488 489 DESC_ENC(desc, MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); 490 DESC_ENC(desc, length_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK); 491 DESC_ENC(desc, addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK); 492 493 return desc; 494 } 495 496 static u64 flexrm_dst_desc(dma_addr_t addr, unsigned int length) 497 { 498 u64 desc = 0; 499 500 DESC_ENC(desc, DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); 501 DESC_ENC(desc, length, DST_LENGTH_SHIFT, DST_LENGTH_MASK); 502 DESC_ENC(desc, addr, DST_ADDR_SHIFT, DST_ADDR_MASK); 503 504 return desc; 505 } 506 507 static u64 flexrm_mdst_desc(dma_addr_t addr, unsigned int length_div_16) 508 { 509 u64 desc = 0; 510 511 DESC_ENC(desc, MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); 512 DESC_ENC(desc, length_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK); 513 DESC_ENC(desc, addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK); 514 515 return desc; 516 } 517 518 static u64 flexrm_imm_desc(u64 data) 519 { 520 u64 desc = 0; 521 522 DESC_ENC(desc, IMM_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); 523 DESC_ENC(desc, data, IMM_DATA_SHIFT, IMM_DATA_MASK); 524 525 return desc; 526 } 527 528 static u64 flexrm_srct_desc(dma_addr_t addr, unsigned int length) 529 { 530 u64 desc = 0; 531 532 DESC_ENC(desc, SRCT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); 533 DESC_ENC(desc, length, SRCT_LENGTH_SHIFT, SRCT_LENGTH_MASK); 534 DESC_ENC(desc, addr, SRCT_ADDR_SHIFT, SRCT_ADDR_MASK); 535 536 return desc; 537 } 538 539 static u64 flexrm_dstt_desc(dma_addr_t addr, unsigned int length) 540 { 541 u64 desc = 0; 542 543 DESC_ENC(desc, DSTT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); 544 DESC_ENC(desc, length, DSTT_LENGTH_SHIFT, DSTT_LENGTH_MASK); 545 DESC_ENC(desc, addr, DSTT_ADDR_SHIFT, DSTT_ADDR_MASK); 546 547 return desc; 548 } 549 550 static u64 flexrm_immt_desc(u64 data) 551 { 552 u64 desc = 0; 553 554 DESC_ENC(desc, IMMT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); 555 DESC_ENC(desc, data, IMMT_DATA_SHIFT, IMMT_DATA_MASK); 556 557 return desc; 558 } 559 560 static bool flexrm_spu_sanity_check(struct brcm_message *msg) 561 { 562 struct scatterlist *sg; 563 564 if (!msg->spu.src || !msg->spu.dst) 565 return false; 566 for (sg = msg->spu.src; sg; sg = sg_next(sg)) { 567 if (sg->length & 0xf) { 568 if (sg->length > SRC_LENGTH_MASK) 569 return false; 570 } else { 571 if (sg->length > (MSRC_LENGTH_MASK * 16)) 572 return false; 573 } 574 } 575 for (sg = msg->spu.dst; sg; sg = sg_next(sg)) { 576 if (sg->length & 0xf) { 577 if (sg->length > DST_LENGTH_MASK) 578 return false; 579 } else { 580 if (sg->length > (MDST_LENGTH_MASK * 16)) 581 return false; 582 } 583 } 584 585 return true; 586 } 587 588 static u32 flexrm_spu_estimate_nonheader_desc_count(struct brcm_message *msg) 589 { 590 u32 cnt = 0; 591 unsigned int dst_target = 0; 592 struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst; 593 594 while (src_sg || dst_sg) { 595 if (src_sg) { 596 cnt++; 597 dst_target = src_sg->length; 598 src_sg = sg_next(src_sg); 599 } else 600 dst_target = UINT_MAX; 601 602 while (dst_target && dst_sg) { 603 cnt++; 604 if (dst_sg->length < dst_target) 605 dst_target -= dst_sg->length; 606 else 607 dst_target = 0; 608 dst_sg = sg_next(dst_sg); 609 } 610 } 611 612 return cnt; 613 } 614 615 static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg) 616 { 617 int rc; 618 619 rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src), 620 DMA_TO_DEVICE); 621 if (rc < 0) 622 return rc; 623 624 rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst), 625 DMA_FROM_DEVICE); 626 if (rc < 0) { 627 dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src), 628 DMA_TO_DEVICE); 629 return rc; 630 } 631 632 return 0; 633 } 634 635 static void flexrm_spu_dma_unmap(struct device *dev, struct brcm_message *msg) 636 { 637 dma_unmap_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst), 638 DMA_FROM_DEVICE); 639 dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src), 640 DMA_TO_DEVICE); 641 } 642 643 static void *flexrm_spu_write_descs(struct brcm_message *msg, u32 nhcnt, 644 u32 reqid, void *desc_ptr, u32 toggle, 645 void *start_desc, void *end_desc) 646 { 647 u64 d; 648 u32 nhpos = 0; 649 void *orig_desc_ptr = desc_ptr; 650 unsigned int dst_target = 0; 651 struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst; 652 653 while (src_sg || dst_sg) { 654 if (src_sg) { 655 if (sg_dma_len(src_sg) & 0xf) 656 d = flexrm_src_desc(sg_dma_address(src_sg), 657 sg_dma_len(src_sg)); 658 else 659 d = flexrm_msrc_desc(sg_dma_address(src_sg), 660 sg_dma_len(src_sg)/16); 661 flexrm_enqueue_desc(nhpos, nhcnt, reqid, 662 d, &desc_ptr, &toggle, 663 start_desc, end_desc); 664 nhpos++; 665 dst_target = sg_dma_len(src_sg); 666 src_sg = sg_next(src_sg); 667 } else 668 dst_target = UINT_MAX; 669 670 while (dst_target && dst_sg) { 671 if (sg_dma_len(dst_sg) & 0xf) 672 d = flexrm_dst_desc(sg_dma_address(dst_sg), 673 sg_dma_len(dst_sg)); 674 else 675 d = flexrm_mdst_desc(sg_dma_address(dst_sg), 676 sg_dma_len(dst_sg)/16); 677 flexrm_enqueue_desc(nhpos, nhcnt, reqid, 678 d, &desc_ptr, &toggle, 679 start_desc, end_desc); 680 nhpos++; 681 if (sg_dma_len(dst_sg) < dst_target) 682 dst_target -= sg_dma_len(dst_sg); 683 else 684 dst_target = 0; 685 dst_sg = sg_next(dst_sg); 686 } 687 } 688 689 /* Null descriptor with invalid toggle bit */ 690 flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle)); 691 692 /* Ensure that descriptors have been written to memory */ 693 wmb(); 694 695 /* Flip toggle bit in header */ 696 flexrm_flip_header_toogle(orig_desc_ptr); 697 698 return desc_ptr; 699 } 700 701 static bool flexrm_sba_sanity_check(struct brcm_message *msg) 702 { 703 u32 i; 704 705 if (!msg->sba.cmds || !msg->sba.cmds_count) 706 return false; 707 708 for (i = 0; i < msg->sba.cmds_count; i++) { 709 if (((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) || 710 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) && 711 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT)) 712 return false; 713 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) && 714 (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK)) 715 return false; 716 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C) && 717 (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK)) 718 return false; 719 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) && 720 (msg->sba.cmds[i].resp_len > DSTT_LENGTH_MASK)) 721 return false; 722 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) && 723 (msg->sba.cmds[i].data_len > DSTT_LENGTH_MASK)) 724 return false; 725 } 726 727 return true; 728 } 729 730 static u32 flexrm_sba_estimate_nonheader_desc_count(struct brcm_message *msg) 731 { 732 u32 i, cnt; 733 734 cnt = 0; 735 for (i = 0; i < msg->sba.cmds_count; i++) { 736 cnt++; 737 738 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) || 739 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) 740 cnt++; 741 742 if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) 743 cnt++; 744 745 if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) 746 cnt++; 747 } 748 749 return cnt; 750 } 751 752 static void *flexrm_sba_write_descs(struct brcm_message *msg, u32 nhcnt, 753 u32 reqid, void *desc_ptr, u32 toggle, 754 void *start_desc, void *end_desc) 755 { 756 u64 d; 757 u32 i, nhpos = 0; 758 struct brcm_sba_command *c; 759 void *orig_desc_ptr = desc_ptr; 760 761 /* Convert SBA commands into descriptors */ 762 for (i = 0; i < msg->sba.cmds_count; i++) { 763 c = &msg->sba.cmds[i]; 764 765 if ((c->flags & BRCM_SBA_CMD_HAS_RESP) && 766 (c->flags & BRCM_SBA_CMD_HAS_OUTPUT)) { 767 /* Destination response descriptor */ 768 d = flexrm_dst_desc(c->resp, c->resp_len); 769 flexrm_enqueue_desc(nhpos, nhcnt, reqid, 770 d, &desc_ptr, &toggle, 771 start_desc, end_desc); 772 nhpos++; 773 } else if (c->flags & BRCM_SBA_CMD_HAS_RESP) { 774 /* Destination response with tlast descriptor */ 775 d = flexrm_dstt_desc(c->resp, c->resp_len); 776 flexrm_enqueue_desc(nhpos, nhcnt, reqid, 777 d, &desc_ptr, &toggle, 778 start_desc, end_desc); 779 nhpos++; 780 } 781 782 if (c->flags & BRCM_SBA_CMD_HAS_OUTPUT) { 783 /* Destination with tlast descriptor */ 784 d = flexrm_dstt_desc(c->data, c->data_len); 785 flexrm_enqueue_desc(nhpos, nhcnt, reqid, 786 d, &desc_ptr, &toggle, 787 start_desc, end_desc); 788 nhpos++; 789 } 790 791 if (c->flags & BRCM_SBA_CMD_TYPE_B) { 792 /* Command as immediate descriptor */ 793 d = flexrm_imm_desc(c->cmd); 794 flexrm_enqueue_desc(nhpos, nhcnt, reqid, 795 d, &desc_ptr, &toggle, 796 start_desc, end_desc); 797 nhpos++; 798 } else { 799 /* Command as immediate descriptor with tlast */ 800 d = flexrm_immt_desc(c->cmd); 801 flexrm_enqueue_desc(nhpos, nhcnt, reqid, 802 d, &desc_ptr, &toggle, 803 start_desc, end_desc); 804 nhpos++; 805 } 806 807 if ((c->flags & BRCM_SBA_CMD_TYPE_B) || 808 (c->flags & BRCM_SBA_CMD_TYPE_C)) { 809 /* Source with tlast descriptor */ 810 d = flexrm_srct_desc(c->data, c->data_len); 811 flexrm_enqueue_desc(nhpos, nhcnt, reqid, 812 d, &desc_ptr, &toggle, 813 start_desc, end_desc); 814 nhpos++; 815 } 816 } 817 818 /* Null descriptor with invalid toggle bit */ 819 flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle)); 820 821 /* Ensure that descriptors have been written to memory */ 822 wmb(); 823 824 /* Flip toggle bit in header */ 825 flexrm_flip_header_toogle(orig_desc_ptr); 826 827 return desc_ptr; 828 } 829 830 static bool flexrm_sanity_check(struct brcm_message *msg) 831 { 832 if (!msg) 833 return false; 834 835 switch (msg->type) { 836 case BRCM_MESSAGE_SPU: 837 return flexrm_spu_sanity_check(msg); 838 case BRCM_MESSAGE_SBA: 839 return flexrm_sba_sanity_check(msg); 840 default: 841 return false; 842 }; 843 } 844 845 static u32 flexrm_estimate_nonheader_desc_count(struct brcm_message *msg) 846 { 847 if (!msg) 848 return 0; 849 850 switch (msg->type) { 851 case BRCM_MESSAGE_SPU: 852 return flexrm_spu_estimate_nonheader_desc_count(msg); 853 case BRCM_MESSAGE_SBA: 854 return flexrm_sba_estimate_nonheader_desc_count(msg); 855 default: 856 return 0; 857 }; 858 } 859 860 static int flexrm_dma_map(struct device *dev, struct brcm_message *msg) 861 { 862 if (!dev || !msg) 863 return -EINVAL; 864 865 switch (msg->type) { 866 case BRCM_MESSAGE_SPU: 867 return flexrm_spu_dma_map(dev, msg); 868 default: 869 break; 870 } 871 872 return 0; 873 } 874 875 static void flexrm_dma_unmap(struct device *dev, struct brcm_message *msg) 876 { 877 if (!dev || !msg) 878 return; 879 880 switch (msg->type) { 881 case BRCM_MESSAGE_SPU: 882 flexrm_spu_dma_unmap(dev, msg); 883 break; 884 default: 885 break; 886 } 887 } 888 889 static void *flexrm_write_descs(struct brcm_message *msg, u32 nhcnt, 890 u32 reqid, void *desc_ptr, u32 toggle, 891 void *start_desc, void *end_desc) 892 { 893 if (!msg || !desc_ptr || !start_desc || !end_desc) 894 return ERR_PTR(-ENOTSUPP); 895 896 if ((desc_ptr < start_desc) || (end_desc <= desc_ptr)) 897 return ERR_PTR(-ERANGE); 898 899 switch (msg->type) { 900 case BRCM_MESSAGE_SPU: 901 return flexrm_spu_write_descs(msg, nhcnt, reqid, 902 desc_ptr, toggle, 903 start_desc, end_desc); 904 case BRCM_MESSAGE_SBA: 905 return flexrm_sba_write_descs(msg, nhcnt, reqid, 906 desc_ptr, toggle, 907 start_desc, end_desc); 908 default: 909 return ERR_PTR(-ENOTSUPP); 910 }; 911 } 912 913 /* ====== FlexRM driver helper routines ===== */ 914 915 static int flexrm_new_request(struct flexrm_ring *ring, 916 struct brcm_message *batch_msg, 917 struct brcm_message *msg) 918 { 919 void *next; 920 unsigned long flags; 921 u32 val, count, nhcnt; 922 u32 read_offset, write_offset; 923 bool exit_cleanup = false; 924 int ret = 0, reqid; 925 926 /* Do sanity check on message */ 927 if (!flexrm_sanity_check(msg)) 928 return -EIO; 929 msg->error = 0; 930 931 /* If no requests possible then save data pointer and goto done. */ 932 reqid = ida_simple_get(&ring->requests_ida, 0, 933 RING_MAX_REQ_COUNT, GFP_KERNEL); 934 if (reqid < 0) { 935 spin_lock_irqsave(&ring->lock, flags); 936 if (batch_msg) 937 ring->last_pending_msg = batch_msg; 938 else 939 ring->last_pending_msg = msg; 940 spin_unlock_irqrestore(&ring->lock, flags); 941 return 0; 942 } 943 ring->requests[reqid] = msg; 944 945 /* Do DMA mappings for the message */ 946 ret = flexrm_dma_map(ring->mbox->dev, msg); 947 if (ret < 0) { 948 ring->requests[reqid] = NULL; 949 ida_simple_remove(&ring->requests_ida, reqid); 950 return ret; 951 } 952 953 /* If last_pending_msg is already set then goto done with error */ 954 spin_lock_irqsave(&ring->lock, flags); 955 if (ring->last_pending_msg) 956 ret = -ENOSPC; 957 spin_unlock_irqrestore(&ring->lock, flags); 958 if (ret < 0) { 959 dev_warn(ring->mbox->dev, "no space in ring %d\n", ring->num); 960 exit_cleanup = true; 961 goto exit; 962 } 963 964 /* Determine current HW BD read offset */ 965 read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR); 966 val = readl_relaxed(ring->regs + RING_BD_START_ADDR); 967 read_offset *= RING_DESC_SIZE; 968 read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base); 969 970 /* 971 * Number required descriptors = number of non-header descriptors + 972 * number of header descriptors + 973 * 1x null descriptor 974 */ 975 nhcnt = flexrm_estimate_nonheader_desc_count(msg); 976 count = flexrm_estimate_header_desc_count(nhcnt) + nhcnt + 1; 977 978 /* Check for available descriptor space. */ 979 write_offset = ring->bd_write_offset; 980 while (count) { 981 if (!flexrm_is_next_table_desc(ring->bd_base + write_offset)) 982 count--; 983 write_offset += RING_DESC_SIZE; 984 if (write_offset == RING_BD_SIZE) 985 write_offset = 0x0; 986 if (write_offset == read_offset) 987 break; 988 } 989 if (count) { 990 spin_lock_irqsave(&ring->lock, flags); 991 if (batch_msg) 992 ring->last_pending_msg = batch_msg; 993 else 994 ring->last_pending_msg = msg; 995 spin_unlock_irqrestore(&ring->lock, flags); 996 ret = 0; 997 exit_cleanup = true; 998 goto exit; 999 } 1000 1001 /* Write descriptors to ring */ 1002 next = flexrm_write_descs(msg, nhcnt, reqid, 1003 ring->bd_base + ring->bd_write_offset, 1004 RING_BD_TOGGLE_VALID(ring->bd_write_offset), 1005 ring->bd_base, ring->bd_base + RING_BD_SIZE); 1006 if (IS_ERR(next)) { 1007 ret = PTR_ERR(next); 1008 exit_cleanup = true; 1009 goto exit; 1010 } 1011 1012 /* Save ring BD write offset */ 1013 ring->bd_write_offset = (unsigned long)(next - ring->bd_base); 1014 1015 exit: 1016 /* Update error status in message */ 1017 msg->error = ret; 1018 1019 /* Cleanup if we failed */ 1020 if (exit_cleanup) { 1021 flexrm_dma_unmap(ring->mbox->dev, msg); 1022 ring->requests[reqid] = NULL; 1023 ida_simple_remove(&ring->requests_ida, reqid); 1024 } 1025 1026 return ret; 1027 } 1028 1029 static int flexrm_process_completions(struct flexrm_ring *ring) 1030 { 1031 u64 desc; 1032 int err, count = 0; 1033 unsigned long flags; 1034 struct brcm_message *msg = NULL; 1035 u32 reqid, cmpl_read_offset, cmpl_write_offset; 1036 struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num]; 1037 1038 spin_lock_irqsave(&ring->lock, flags); 1039 1040 /* Check last_pending_msg */ 1041 if (ring->last_pending_msg) { 1042 msg = ring->last_pending_msg; 1043 ring->last_pending_msg = NULL; 1044 } 1045 1046 /* 1047 * Get current completion read and write offset 1048 * 1049 * Note: We should read completion write pointer atleast once 1050 * after we get a MSI interrupt because HW maintains internal 1051 * MSI status which will allow next MSI interrupt only after 1052 * completion write pointer is read. 1053 */ 1054 cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR); 1055 cmpl_write_offset *= RING_DESC_SIZE; 1056 cmpl_read_offset = ring->cmpl_read_offset; 1057 ring->cmpl_read_offset = cmpl_write_offset; 1058 1059 spin_unlock_irqrestore(&ring->lock, flags); 1060 1061 /* If last_pending_msg was set then queue it back */ 1062 if (msg) 1063 mbox_send_message(chan, msg); 1064 1065 /* For each completed request notify mailbox clients */ 1066 reqid = 0; 1067 while (cmpl_read_offset != cmpl_write_offset) { 1068 /* Dequeue next completion descriptor */ 1069 desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset)); 1070 1071 /* Next read offset */ 1072 cmpl_read_offset += RING_DESC_SIZE; 1073 if (cmpl_read_offset == RING_CMPL_SIZE) 1074 cmpl_read_offset = 0; 1075 1076 /* Decode error from completion descriptor */ 1077 err = flexrm_cmpl_desc_to_error(desc); 1078 if (err < 0) { 1079 dev_warn(ring->mbox->dev, 1080 "got completion desc=0x%lx with error %d", 1081 (unsigned long)desc, err); 1082 } 1083 1084 /* Determine request id from completion descriptor */ 1085 reqid = flexrm_cmpl_desc_to_reqid(desc); 1086 1087 /* Determine message pointer based on reqid */ 1088 msg = ring->requests[reqid]; 1089 if (!msg) { 1090 dev_warn(ring->mbox->dev, 1091 "null msg pointer for completion desc=0x%lx", 1092 (unsigned long)desc); 1093 continue; 1094 } 1095 1096 /* Release reqid for recycling */ 1097 ring->requests[reqid] = NULL; 1098 ida_simple_remove(&ring->requests_ida, reqid); 1099 1100 /* Unmap DMA mappings */ 1101 flexrm_dma_unmap(ring->mbox->dev, msg); 1102 1103 /* Give-back message to mailbox client */ 1104 msg->error = err; 1105 mbox_chan_received_data(chan, msg); 1106 1107 /* Increment number of completions processed */ 1108 count++; 1109 } 1110 1111 return count; 1112 } 1113 1114 /* ====== FlexRM interrupt handler ===== */ 1115 1116 static irqreturn_t flexrm_irq_event(int irq, void *dev_id) 1117 { 1118 /* We only have MSI for completions so just wakeup IRQ thread */ 1119 /* Ring related errors will be informed via completion descriptors */ 1120 1121 return IRQ_WAKE_THREAD; 1122 } 1123 1124 static irqreturn_t flexrm_irq_thread(int irq, void *dev_id) 1125 { 1126 flexrm_process_completions(dev_id); 1127 1128 return IRQ_HANDLED; 1129 } 1130 1131 /* ====== FlexRM mailbox callbacks ===== */ 1132 1133 static int flexrm_send_data(struct mbox_chan *chan, void *data) 1134 { 1135 int i, rc; 1136 struct flexrm_ring *ring = chan->con_priv; 1137 struct brcm_message *msg = data; 1138 1139 if (msg->type == BRCM_MESSAGE_BATCH) { 1140 for (i = msg->batch.msgs_queued; 1141 i < msg->batch.msgs_count; i++) { 1142 rc = flexrm_new_request(ring, msg, 1143 &msg->batch.msgs[i]); 1144 if (rc) { 1145 msg->error = rc; 1146 return rc; 1147 } 1148 msg->batch.msgs_queued++; 1149 } 1150 return 0; 1151 } 1152 1153 return flexrm_new_request(ring, NULL, data); 1154 } 1155 1156 static bool flexrm_peek_data(struct mbox_chan *chan) 1157 { 1158 int cnt = flexrm_process_completions(chan->con_priv); 1159 1160 return (cnt > 0) ? true : false; 1161 } 1162 1163 static int flexrm_startup(struct mbox_chan *chan) 1164 { 1165 u64 d; 1166 u32 val, off; 1167 int ret = 0; 1168 dma_addr_t next_addr; 1169 struct flexrm_ring *ring = chan->con_priv; 1170 1171 /* Allocate BD memory */ 1172 ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool, 1173 GFP_KERNEL, &ring->bd_dma_base); 1174 if (!ring->bd_base) { 1175 dev_err(ring->mbox->dev, "can't allocate BD memory\n"); 1176 ret = -ENOMEM; 1177 goto fail; 1178 } 1179 1180 /* Configure next table pointer entries in BD memory */ 1181 for (off = 0; off < RING_BD_SIZE; off += RING_DESC_SIZE) { 1182 next_addr = off + RING_DESC_SIZE; 1183 if (next_addr == RING_BD_SIZE) 1184 next_addr = 0; 1185 next_addr += ring->bd_dma_base; 1186 if (RING_BD_ALIGN_CHECK(next_addr)) 1187 d = flexrm_next_table_desc(RING_BD_TOGGLE_VALID(off), 1188 next_addr); 1189 else 1190 d = flexrm_null_desc(RING_BD_TOGGLE_INVALID(off)); 1191 flexrm_write_desc(ring->bd_base + off, d); 1192 } 1193 1194 /* Allocate completion memory */ 1195 ring->cmpl_base = dma_pool_alloc(ring->mbox->cmpl_pool, 1196 GFP_KERNEL, &ring->cmpl_dma_base); 1197 if (!ring->cmpl_base) { 1198 dev_err(ring->mbox->dev, "can't allocate completion memory\n"); 1199 ret = -ENOMEM; 1200 goto fail_free_bd_memory; 1201 } 1202 memset(ring->cmpl_base, 0, RING_CMPL_SIZE); 1203 1204 /* Request IRQ */ 1205 if (ring->irq == UINT_MAX) { 1206 dev_err(ring->mbox->dev, "ring IRQ not available\n"); 1207 ret = -ENODEV; 1208 goto fail_free_cmpl_memory; 1209 } 1210 ret = request_threaded_irq(ring->irq, 1211 flexrm_irq_event, 1212 flexrm_irq_thread, 1213 0, dev_name(ring->mbox->dev), ring); 1214 if (ret) { 1215 dev_err(ring->mbox->dev, "failed to request ring IRQ\n"); 1216 goto fail_free_cmpl_memory; 1217 } 1218 ring->irq_requested = true; 1219 1220 /* Disable/inactivate ring */ 1221 writel_relaxed(0x0, ring->regs + RING_CONTROL); 1222 1223 /* Program BD start address */ 1224 val = BD_START_ADDR_VALUE(ring->bd_dma_base); 1225 writel_relaxed(val, ring->regs + RING_BD_START_ADDR); 1226 1227 /* BD write pointer will be same as HW write pointer */ 1228 ring->bd_write_offset = 1229 readl_relaxed(ring->regs + RING_BD_WRITE_PTR); 1230 ring->bd_write_offset *= RING_DESC_SIZE; 1231 1232 /* Program completion start address */ 1233 val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base); 1234 writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR); 1235 1236 /* Ensure last pending message is cleared */ 1237 ring->last_pending_msg = NULL; 1238 1239 /* Completion read pointer will be same as HW write pointer */ 1240 ring->cmpl_read_offset = 1241 readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR); 1242 ring->cmpl_read_offset *= RING_DESC_SIZE; 1243 1244 /* Read ring Tx, Rx, and Outstanding counts to clear */ 1245 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS); 1246 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS); 1247 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS); 1248 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS); 1249 readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND); 1250 1251 /* Configure RING_MSI_CONTROL */ 1252 val = 0; 1253 val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT); 1254 val |= BIT(MSI_ENABLE_SHIFT); 1255 val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT; 1256 writel_relaxed(val, ring->regs + RING_MSI_CONTROL); 1257 1258 /* Enable/activate ring */ 1259 val = BIT(CONTROL_ACTIVE_SHIFT); 1260 writel_relaxed(val, ring->regs + RING_CONTROL); 1261 1262 return 0; 1263 1264 fail_free_cmpl_memory: 1265 dma_pool_free(ring->mbox->cmpl_pool, 1266 ring->cmpl_base, ring->cmpl_dma_base); 1267 ring->cmpl_base = NULL; 1268 fail_free_bd_memory: 1269 dma_pool_free(ring->mbox->bd_pool, 1270 ring->bd_base, ring->bd_dma_base); 1271 ring->bd_base = NULL; 1272 fail: 1273 return ret; 1274 } 1275 1276 static void flexrm_shutdown(struct mbox_chan *chan) 1277 { 1278 u32 reqid; 1279 unsigned int timeout; 1280 struct brcm_message *msg; 1281 struct flexrm_ring *ring = chan->con_priv; 1282 1283 /* Disable/inactivate ring */ 1284 writel_relaxed(0x0, ring->regs + RING_CONTROL); 1285 1286 /* Flush ring with timeout of 1s */ 1287 timeout = 1000; 1288 writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), 1289 ring->regs + RING_CONTROL); 1290 do { 1291 if (readl_relaxed(ring->regs + RING_FLUSH_DONE) & 1292 FLUSH_DONE_MASK) 1293 break; 1294 mdelay(1); 1295 } while (timeout--); 1296 1297 /* Abort all in-flight requests */ 1298 for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) { 1299 msg = ring->requests[reqid]; 1300 if (!msg) 1301 continue; 1302 1303 /* Release reqid for recycling */ 1304 ring->requests[reqid] = NULL; 1305 ida_simple_remove(&ring->requests_ida, reqid); 1306 1307 /* Unmap DMA mappings */ 1308 flexrm_dma_unmap(ring->mbox->dev, msg); 1309 1310 /* Give-back message to mailbox client */ 1311 msg->error = -EIO; 1312 mbox_chan_received_data(chan, msg); 1313 } 1314 1315 /* Release IRQ */ 1316 if (ring->irq_requested) { 1317 free_irq(ring->irq, ring); 1318 ring->irq_requested = false; 1319 } 1320 1321 /* Free-up completion descriptor ring */ 1322 if (ring->cmpl_base) { 1323 dma_pool_free(ring->mbox->cmpl_pool, 1324 ring->cmpl_base, ring->cmpl_dma_base); 1325 ring->cmpl_base = NULL; 1326 } 1327 1328 /* Free-up BD descriptor ring */ 1329 if (ring->bd_base) { 1330 dma_pool_free(ring->mbox->bd_pool, 1331 ring->bd_base, ring->bd_dma_base); 1332 ring->bd_base = NULL; 1333 } 1334 } 1335 1336 static bool flexrm_last_tx_done(struct mbox_chan *chan) 1337 { 1338 bool ret; 1339 unsigned long flags; 1340 struct flexrm_ring *ring = chan->con_priv; 1341 1342 spin_lock_irqsave(&ring->lock, flags); 1343 ret = (ring->last_pending_msg) ? false : true; 1344 spin_unlock_irqrestore(&ring->lock, flags); 1345 1346 return ret; 1347 } 1348 1349 static const struct mbox_chan_ops flexrm_mbox_chan_ops = { 1350 .send_data = flexrm_send_data, 1351 .startup = flexrm_startup, 1352 .shutdown = flexrm_shutdown, 1353 .last_tx_done = flexrm_last_tx_done, 1354 .peek_data = flexrm_peek_data, 1355 }; 1356 1357 static struct mbox_chan *flexrm_mbox_of_xlate(struct mbox_controller *cntlr, 1358 const struct of_phandle_args *pa) 1359 { 1360 struct mbox_chan *chan; 1361 struct flexrm_ring *ring; 1362 1363 if (pa->args_count < 3) 1364 return ERR_PTR(-EINVAL); 1365 1366 if (pa->args[0] >= cntlr->num_chans) 1367 return ERR_PTR(-ENOENT); 1368 1369 if (pa->args[1] > MSI_COUNT_MASK) 1370 return ERR_PTR(-EINVAL); 1371 1372 if (pa->args[2] > MSI_TIMER_VAL_MASK) 1373 return ERR_PTR(-EINVAL); 1374 1375 chan = &cntlr->chans[pa->args[0]]; 1376 ring = chan->con_priv; 1377 ring->msi_count_threshold = pa->args[1]; 1378 ring->msi_timer_val = pa->args[2]; 1379 1380 return chan; 1381 } 1382 1383 /* ====== FlexRM platform driver ===== */ 1384 1385 static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg) 1386 { 1387 struct device *dev = msi_desc_to_dev(desc); 1388 struct flexrm_mbox *mbox = dev_get_drvdata(dev); 1389 struct flexrm_ring *ring = &mbox->rings[desc->platform.msi_index]; 1390 1391 /* Configure per-Ring MSI registers */ 1392 writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS); 1393 writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS); 1394 writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE); 1395 } 1396 1397 static int flexrm_mbox_probe(struct platform_device *pdev) 1398 { 1399 int index, ret = 0; 1400 void __iomem *regs; 1401 void __iomem *regs_end; 1402 struct msi_desc *desc; 1403 struct resource *iomem; 1404 struct flexrm_ring *ring; 1405 struct flexrm_mbox *mbox; 1406 struct device *dev = &pdev->dev; 1407 1408 /* Allocate driver mailbox struct */ 1409 mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL); 1410 if (!mbox) { 1411 ret = -ENOMEM; 1412 goto fail; 1413 } 1414 mbox->dev = dev; 1415 platform_set_drvdata(pdev, mbox); 1416 1417 /* Get resource for registers */ 1418 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1419 if (!iomem || (resource_size(iomem) < RING_REGS_SIZE)) { 1420 ret = -ENODEV; 1421 goto fail; 1422 } 1423 1424 /* Map registers of all rings */ 1425 mbox->regs = devm_ioremap_resource(&pdev->dev, iomem); 1426 if (IS_ERR(mbox->regs)) { 1427 ret = PTR_ERR(mbox->regs); 1428 dev_err(&pdev->dev, "Failed to remap mailbox regs: %d\n", ret); 1429 goto fail; 1430 } 1431 regs_end = mbox->regs + resource_size(iomem); 1432 1433 /* Scan and count available rings */ 1434 mbox->num_rings = 0; 1435 for (regs = mbox->regs; regs < regs_end; regs += RING_REGS_SIZE) { 1436 if (readl_relaxed(regs + RING_VER) == RING_VER_MAGIC) 1437 mbox->num_rings++; 1438 } 1439 if (!mbox->num_rings) { 1440 ret = -ENODEV; 1441 goto fail; 1442 } 1443 1444 /* Allocate driver ring structs */ 1445 ring = devm_kcalloc(dev, mbox->num_rings, sizeof(*ring), GFP_KERNEL); 1446 if (!ring) { 1447 ret = -ENOMEM; 1448 goto fail; 1449 } 1450 mbox->rings = ring; 1451 1452 /* Initialize members of driver ring structs */ 1453 regs = mbox->regs; 1454 for (index = 0; index < mbox->num_rings; index++) { 1455 ring = &mbox->rings[index]; 1456 ring->num = index; 1457 ring->mbox = mbox; 1458 while ((regs < regs_end) && 1459 (readl_relaxed(regs + RING_VER) != RING_VER_MAGIC)) 1460 regs += RING_REGS_SIZE; 1461 if (regs_end <= regs) { 1462 ret = -ENODEV; 1463 goto fail; 1464 } 1465 ring->regs = regs; 1466 regs += RING_REGS_SIZE; 1467 ring->irq = UINT_MAX; 1468 ring->irq_requested = false; 1469 ring->msi_timer_val = MSI_TIMER_VAL_MASK; 1470 ring->msi_count_threshold = 0x1; 1471 ida_init(&ring->requests_ida); 1472 memset(ring->requests, 0, sizeof(ring->requests)); 1473 ring->bd_base = NULL; 1474 ring->bd_dma_base = 0; 1475 ring->cmpl_base = NULL; 1476 ring->cmpl_dma_base = 0; 1477 spin_lock_init(&ring->lock); 1478 ring->last_pending_msg = NULL; 1479 ring->cmpl_read_offset = 0; 1480 } 1481 1482 /* FlexRM is capable of 40-bit physical addresses only */ 1483 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); 1484 if (ret) { 1485 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 1486 if (ret) 1487 goto fail; 1488 } 1489 1490 /* Create DMA pool for ring BD memory */ 1491 mbox->bd_pool = dma_pool_create("bd", dev, RING_BD_SIZE, 1492 1 << RING_BD_ALIGN_ORDER, 0); 1493 if (!mbox->bd_pool) { 1494 ret = -ENOMEM; 1495 goto fail; 1496 } 1497 1498 /* Create DMA pool for ring completion memory */ 1499 mbox->cmpl_pool = dma_pool_create("cmpl", dev, RING_CMPL_SIZE, 1500 1 << RING_CMPL_ALIGN_ORDER, 0); 1501 if (!mbox->cmpl_pool) { 1502 ret = -ENOMEM; 1503 goto fail_destroy_bd_pool; 1504 } 1505 1506 /* Allocate platform MSIs for each ring */ 1507 ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings, 1508 flexrm_mbox_msi_write); 1509 if (ret) 1510 goto fail_destroy_cmpl_pool; 1511 1512 /* Save alloced IRQ numbers for each ring */ 1513 for_each_msi_entry(desc, dev) { 1514 ring = &mbox->rings[desc->platform.msi_index]; 1515 ring->irq = desc->irq; 1516 } 1517 1518 /* Initialize mailbox controller */ 1519 mbox->controller.txdone_irq = false; 1520 mbox->controller.txdone_poll = true; 1521 mbox->controller.txpoll_period = 1; 1522 mbox->controller.ops = &flexrm_mbox_chan_ops; 1523 mbox->controller.dev = dev; 1524 mbox->controller.num_chans = mbox->num_rings; 1525 mbox->controller.of_xlate = flexrm_mbox_of_xlate; 1526 mbox->controller.chans = devm_kcalloc(dev, mbox->num_rings, 1527 sizeof(*mbox->controller.chans), GFP_KERNEL); 1528 if (!mbox->controller.chans) { 1529 ret = -ENOMEM; 1530 goto fail_free_msis; 1531 } 1532 for (index = 0; index < mbox->num_rings; index++) 1533 mbox->controller.chans[index].con_priv = &mbox->rings[index]; 1534 1535 /* Register mailbox controller */ 1536 ret = mbox_controller_register(&mbox->controller); 1537 if (ret) 1538 goto fail_free_msis; 1539 1540 dev_info(dev, "registered flexrm mailbox with %d channels\n", 1541 mbox->controller.num_chans); 1542 1543 return 0; 1544 1545 fail_free_msis: 1546 platform_msi_domain_free_irqs(dev); 1547 fail_destroy_cmpl_pool: 1548 dma_pool_destroy(mbox->cmpl_pool); 1549 fail_destroy_bd_pool: 1550 dma_pool_destroy(mbox->bd_pool); 1551 fail: 1552 return ret; 1553 } 1554 1555 static int flexrm_mbox_remove(struct platform_device *pdev) 1556 { 1557 int index; 1558 struct device *dev = &pdev->dev; 1559 struct flexrm_ring *ring; 1560 struct flexrm_mbox *mbox = platform_get_drvdata(pdev); 1561 1562 mbox_controller_unregister(&mbox->controller); 1563 1564 platform_msi_domain_free_irqs(dev); 1565 1566 dma_pool_destroy(mbox->cmpl_pool); 1567 dma_pool_destroy(mbox->bd_pool); 1568 1569 for (index = 0; index < mbox->num_rings; index++) { 1570 ring = &mbox->rings[index]; 1571 ida_destroy(&ring->requests_ida); 1572 } 1573 1574 return 0; 1575 } 1576 1577 static const struct of_device_id flexrm_mbox_of_match[] = { 1578 { .compatible = "brcm,iproc-flexrm-mbox", }, 1579 {}, 1580 }; 1581 MODULE_DEVICE_TABLE(of, flexrm_mbox_of_match); 1582 1583 static struct platform_driver flexrm_mbox_driver = { 1584 .driver = { 1585 .name = "brcm-flexrm-mbox", 1586 .of_match_table = flexrm_mbox_of_match, 1587 }, 1588 .probe = flexrm_mbox_probe, 1589 .remove = flexrm_mbox_remove, 1590 }; 1591 module_platform_driver(flexrm_mbox_driver); 1592 1593 MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>"); 1594 MODULE_DESCRIPTION("Broadcom FlexRM mailbox driver"); 1595 MODULE_LICENSE("GPL v2"); 1596