1 /* 2 * Freescale MPC85xx/MPC86xx RapidIO RMU support 3 * 4 * Copyright 2009 Sysgo AG 5 * Thomas Moll <thomas.moll@sysgo.com> 6 * - fixed maintenance access routines, check for aligned access 7 * 8 * Copyright 2009 Integrated Device Technology, Inc. 9 * Alex Bounine <alexandre.bounine@idt.com> 10 * - Added Port-Write message handling 11 * - Added Machine Check exception handling 12 * 13 * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc. 14 * Zhang Wei <wei.zhang@freescale.com> 15 * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com> 16 * Liu Gang <Gang.Liu@freescale.com> 17 * 18 * Copyright 2005 MontaVista Software, Inc. 19 * Matt Porter <mporter@kernel.crashing.org> 20 * 21 * This program is free software; you can redistribute it and/or modify it 22 * under the terms of the GNU General Public License as published by the 23 * Free Software Foundation; either version 2 of the License, or (at your 24 * option) any later version. 25 */ 26 27 #include <linux/types.h> 28 #include <linux/dma-mapping.h> 29 #include <linux/interrupt.h> 30 #include <linux/of_platform.h> 31 #include <linux/slab.h> 32 33 #include "fsl_rio.h" 34 35 #define GET_RMM_HANDLE(mport) \ 36 (((struct rio_priv *)(mport->priv))->rmm_handle) 37 38 /* RapidIO definition irq, which read from OF-tree */ 39 #define IRQ_RIO_PW(m) (((struct fsl_rio_pw *)(m))->pwirq) 40 #define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq) 41 #define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq) 42 #define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq) 43 44 #define RIO_MIN_TX_RING_SIZE 2 45 #define RIO_MAX_TX_RING_SIZE 2048 46 #define RIO_MIN_RX_RING_SIZE 2 47 #define RIO_MAX_RX_RING_SIZE 2048 48 49 #define RIO_IPWMR_SEN 0x00100000 50 #define RIO_IPWMR_QFIE 0x00000100 51 #define RIO_IPWMR_EIE 0x00000020 52 #define RIO_IPWMR_CQ 0x00000002 53 #define RIO_IPWMR_PWE 0x00000001 54 55 #define RIO_IPWSR_QF 0x00100000 56 #define RIO_IPWSR_TE 0x00000080 57 #define RIO_IPWSR_QFI 0x00000010 58 #define RIO_IPWSR_PWD 0x00000008 59 #define RIO_IPWSR_PWB 0x00000004 60 61 #define RIO_EPWISR 0x10010 62 /* EPWISR Error match value */ 63 #define RIO_EPWISR_PINT1 0x80000000 64 #define RIO_EPWISR_PINT2 0x40000000 65 #define RIO_EPWISR_MU 0x00000002 66 #define RIO_EPWISR_PW 0x00000001 67 68 #define IPWSR_CLEAR 0x98 69 #define OMSR_CLEAR 0x1cb3 70 #define IMSR_CLEAR 0x491 71 #define IDSR_CLEAR 0x91 72 #define ODSR_CLEAR 0x1c00 73 #define LTLEECSR_ENABLE_ALL 0xFFC000FC 74 #define RIO_LTLEECSR 0x060c 75 76 #define RIO_IM0SR 0x64 77 #define RIO_IM1SR 0x164 78 #define RIO_OM0SR 0x4 79 #define RIO_OM1SR 0x104 80 81 #define RIO_DBELL_WIN_SIZE 0x1000 82 83 #define RIO_MSG_OMR_MUI 0x00000002 84 #define RIO_MSG_OSR_TE 0x00000080 85 #define RIO_MSG_OSR_QOI 0x00000020 86 #define RIO_MSG_OSR_QFI 0x00000010 87 #define RIO_MSG_OSR_MUB 0x00000004 88 #define RIO_MSG_OSR_EOMI 0x00000002 89 #define RIO_MSG_OSR_QEI 0x00000001 90 91 #define RIO_MSG_IMR_MI 0x00000002 92 #define RIO_MSG_ISR_TE 0x00000080 93 #define RIO_MSG_ISR_QFI 0x00000010 94 #define RIO_MSG_ISR_DIQI 0x00000001 95 96 #define RIO_MSG_DESC_SIZE 32 97 #define RIO_MSG_BUFFER_SIZE 4096 98 99 #define DOORBELL_DMR_DI 0x00000002 100 #define DOORBELL_DSR_TE 0x00000080 101 #define DOORBELL_DSR_QFI 0x00000010 102 #define DOORBELL_DSR_DIQI 0x00000001 103 104 #define DOORBELL_MESSAGE_SIZE 0x08 105 106 struct rio_msg_regs { 107 u32 omr; 108 u32 osr; 109 u32 pad1; 110 u32 odqdpar; 111 u32 pad2; 112 u32 osar; 113 u32 odpr; 114 u32 odatr; 115 u32 odcr; 116 u32 pad3; 117 u32 odqepar; 118 u32 pad4[13]; 119 u32 imr; 120 u32 isr; 121 u32 pad5; 122 u32 ifqdpar; 123 u32 pad6; 124 u32 ifqepar; 125 }; 126 127 struct rio_dbell_regs { 128 u32 odmr; 129 u32 odsr; 130 u32 pad1[4]; 131 u32 oddpr; 132 u32 oddatr; 133 u32 pad2[3]; 134 u32 odretcr; 135 u32 pad3[12]; 136 u32 dmr; 137 u32 dsr; 138 u32 pad4; 139 u32 dqdpar; 140 u32 pad5; 141 u32 dqepar; 142 }; 143 144 struct rio_pw_regs { 145 u32 pwmr; 146 u32 pwsr; 147 u32 epwqbar; 148 u32 pwqbar; 149 }; 150 151 152 struct rio_tx_desc { 153 u32 pad1; 154 u32 saddr; 155 u32 dport; 156 u32 dattr; 157 u32 pad2; 158 u32 pad3; 159 u32 dwcnt; 160 u32 pad4; 161 }; 162 163 struct rio_msg_tx_ring { 164 void *virt; 165 dma_addr_t phys; 166 void *virt_buffer[RIO_MAX_TX_RING_SIZE]; 167 dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE]; 168 int tx_slot; 169 int size; 170 void *dev_id; 171 }; 172 173 struct rio_msg_rx_ring { 174 void *virt; 175 dma_addr_t phys; 176 void *virt_buffer[RIO_MAX_RX_RING_SIZE]; 177 int rx_slot; 178 int size; 179 void *dev_id; 180 }; 181 182 struct fsl_rmu { 183 struct rio_msg_regs __iomem *msg_regs; 184 struct rio_msg_tx_ring msg_tx_ring; 185 struct rio_msg_rx_ring msg_rx_ring; 186 int txirq; 187 int rxirq; 188 }; 189 190 struct rio_dbell_msg { 191 u16 pad1; 192 u16 tid; 193 u16 sid; 194 u16 info; 195 }; 196 197 /** 198 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler 199 * @irq: Linux interrupt number 200 * @dev_instance: Pointer to interrupt-specific data 201 * 202 * Handles outbound message interrupts. Executes a register outbound 203 * mailbox event handler and acks the interrupt occurrence. 204 */ 205 static irqreturn_t 206 fsl_rio_tx_handler(int irq, void *dev_instance) 207 { 208 int osr; 209 struct rio_mport *port = (struct rio_mport *)dev_instance; 210 struct fsl_rmu *rmu = GET_RMM_HANDLE(port); 211 212 osr = in_be32(&rmu->msg_regs->osr); 213 214 if (osr & RIO_MSG_OSR_TE) { 215 pr_info("RIO: outbound message transmission error\n"); 216 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE); 217 goto out; 218 } 219 220 if (osr & RIO_MSG_OSR_QOI) { 221 pr_info("RIO: outbound message queue overflow\n"); 222 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI); 223 goto out; 224 } 225 226 if (osr & RIO_MSG_OSR_EOMI) { 227 u32 dqp = in_be32(&rmu->msg_regs->odqdpar); 228 int slot = (dqp - rmu->msg_tx_ring.phys) >> 5; 229 if (port->outb_msg[0].mcback != NULL) { 230 port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id, 231 -1, 232 slot); 233 } 234 /* Ack the end-of-message interrupt */ 235 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI); 236 } 237 238 out: 239 return IRQ_HANDLED; 240 } 241 242 /** 243 * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler 244 * @irq: Linux interrupt number 245 * @dev_instance: Pointer to interrupt-specific data 246 * 247 * Handles inbound message interrupts. Executes a registered inbound 248 * mailbox event handler and acks the interrupt occurrence. 249 */ 250 static irqreturn_t 251 fsl_rio_rx_handler(int irq, void *dev_instance) 252 { 253 int isr; 254 struct rio_mport *port = (struct rio_mport *)dev_instance; 255 struct fsl_rmu *rmu = GET_RMM_HANDLE(port); 256 257 isr = in_be32(&rmu->msg_regs->isr); 258 259 if (isr & RIO_MSG_ISR_TE) { 260 pr_info("RIO: inbound message reception error\n"); 261 out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE); 262 goto out; 263 } 264 265 /* XXX Need to check/dispatch until queue empty */ 266 if (isr & RIO_MSG_ISR_DIQI) { 267 /* 268 * Can receive messages for any mailbox/letter to that 269 * mailbox destination. So, make the callback with an 270 * unknown/invalid mailbox number argument. 271 */ 272 if (port->inb_msg[0].mcback != NULL) 273 port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id, 274 -1, 275 -1); 276 277 /* Ack the queueing interrupt */ 278 out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI); 279 } 280 281 out: 282 return IRQ_HANDLED; 283 } 284 285 /** 286 * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler 287 * @irq: Linux interrupt number 288 * @dev_instance: Pointer to interrupt-specific data 289 * 290 * Handles doorbell interrupts. Parses a list of registered 291 * doorbell event handlers and executes a matching event handler. 292 */ 293 static irqreturn_t 294 fsl_rio_dbell_handler(int irq, void *dev_instance) 295 { 296 int dsr; 297 struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance; 298 int i; 299 300 dsr = in_be32(&fsl_dbell->dbell_regs->dsr); 301 302 if (dsr & DOORBELL_DSR_TE) { 303 pr_info("RIO: doorbell reception error\n"); 304 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE); 305 goto out; 306 } 307 308 if (dsr & DOORBELL_DSR_QFI) { 309 pr_info("RIO: doorbell queue full\n"); 310 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI); 311 } 312 313 /* XXX Need to check/dispatch until queue empty */ 314 if (dsr & DOORBELL_DSR_DIQI) { 315 struct rio_dbell_msg *dmsg = 316 fsl_dbell->dbell_ring.virt + 317 (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff); 318 struct rio_dbell *dbell; 319 int found = 0; 320 321 pr_debug 322 ("RIO: processing doorbell," 323 " sid %2.2x tid %2.2x info %4.4x\n", 324 dmsg->sid, dmsg->tid, dmsg->info); 325 326 for (i = 0; i < MAX_PORT_NUM; i++) { 327 if (fsl_dbell->mport[i]) { 328 list_for_each_entry(dbell, 329 &fsl_dbell->mport[i]->dbells, node) { 330 if ((dbell->res->start 331 <= dmsg->info) 332 && (dbell->res->end 333 >= dmsg->info)) { 334 found = 1; 335 break; 336 } 337 } 338 if (found && dbell->dinb) { 339 dbell->dinb(fsl_dbell->mport[i], 340 dbell->dev_id, dmsg->sid, 341 dmsg->tid, 342 dmsg->info); 343 break; 344 } 345 } 346 } 347 348 if (!found) { 349 pr_debug 350 ("RIO: spurious doorbell," 351 " sid %2.2x tid %2.2x info %4.4x\n", 352 dmsg->sid, dmsg->tid, 353 dmsg->info); 354 } 355 setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI); 356 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI); 357 } 358 359 out: 360 return IRQ_HANDLED; 361 } 362 363 void msg_unit_error_handler(void) 364 { 365 366 /*XXX: Error recovery is not implemented, we just clear errors */ 367 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); 368 369 out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR); 370 out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR); 371 out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR); 372 out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR); 373 374 out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR); 375 out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR); 376 377 out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR); 378 } 379 380 /** 381 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler 382 * @irq: Linux interrupt number 383 * @dev_instance: Pointer to interrupt-specific data 384 * 385 * Handles port write interrupts. Parses a list of registered 386 * port write event handlers and executes a matching event handler. 387 */ 388 static irqreturn_t 389 fsl_rio_port_write_handler(int irq, void *dev_instance) 390 { 391 u32 ipwmr, ipwsr; 392 struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance; 393 u32 epwisr, tmp; 394 395 epwisr = in_be32(rio_regs_win + RIO_EPWISR); 396 if (!(epwisr & RIO_EPWISR_PW)) 397 goto pw_done; 398 399 ipwmr = in_be32(&pw->pw_regs->pwmr); 400 ipwsr = in_be32(&pw->pw_regs->pwsr); 401 402 #ifdef DEBUG_PW 403 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); 404 if (ipwsr & RIO_IPWSR_QF) 405 pr_debug(" QF"); 406 if (ipwsr & RIO_IPWSR_TE) 407 pr_debug(" TE"); 408 if (ipwsr & RIO_IPWSR_QFI) 409 pr_debug(" QFI"); 410 if (ipwsr & RIO_IPWSR_PWD) 411 pr_debug(" PWD"); 412 if (ipwsr & RIO_IPWSR_PWB) 413 pr_debug(" PWB"); 414 pr_debug(" )\n"); 415 #endif 416 /* Schedule deferred processing if PW was received */ 417 if (ipwsr & RIO_IPWSR_QFI) { 418 /* Save PW message (if there is room in FIFO), 419 * otherwise discard it. 420 */ 421 if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) { 422 pw->port_write_msg.msg_count++; 423 kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt, 424 RIO_PW_MSG_SIZE); 425 } else { 426 pw->port_write_msg.discard_count++; 427 pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", 428 pw->port_write_msg.discard_count); 429 } 430 /* Clear interrupt and issue Clear Queue command. This allows 431 * another port-write to be received. 432 */ 433 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_QFI); 434 out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ); 435 436 schedule_work(&pw->pw_work); 437 } 438 439 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) { 440 pw->port_write_msg.err_count++; 441 pr_debug("RIO: Port-Write Transaction Err (%d)\n", 442 pw->port_write_msg.err_count); 443 /* Clear Transaction Error: port-write controller should be 444 * disabled when clearing this error 445 */ 446 out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE); 447 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_TE); 448 out_be32(&pw->pw_regs->pwmr, ipwmr); 449 } 450 451 if (ipwsr & RIO_IPWSR_PWD) { 452 pw->port_write_msg.discard_count++; 453 pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n", 454 pw->port_write_msg.discard_count); 455 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD); 456 } 457 458 pw_done: 459 if (epwisr & RIO_EPWISR_PINT1) { 460 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR); 461 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); 462 fsl_rio_port_error_handler(0); 463 } 464 465 if (epwisr & RIO_EPWISR_PINT2) { 466 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR); 467 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); 468 fsl_rio_port_error_handler(1); 469 } 470 471 if (epwisr & RIO_EPWISR_MU) { 472 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR); 473 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); 474 msg_unit_error_handler(); 475 } 476 477 return IRQ_HANDLED; 478 } 479 480 static void fsl_pw_dpc(struct work_struct *work) 481 { 482 struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work); 483 u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; 484 485 /* 486 * Process port-write messages 487 */ 488 while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)msg_buffer, 489 RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) { 490 /* Process one message */ 491 #ifdef DEBUG_PW 492 { 493 u32 i; 494 pr_debug("%s : Port-Write Message:", __func__); 495 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) { 496 if ((i%4) == 0) 497 pr_debug("\n0x%02x: 0x%08x", i*4, 498 msg_buffer[i]); 499 else 500 pr_debug(" 0x%08x", msg_buffer[i]); 501 } 502 pr_debug("\n"); 503 } 504 #endif 505 /* Pass the port-write message to RIO core for processing */ 506 rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); 507 } 508 } 509 510 /** 511 * fsl_rio_pw_enable - enable/disable port-write interface init 512 * @mport: Master port implementing the port write unit 513 * @enable: 1=enable; 0=disable port-write message handling 514 */ 515 int fsl_rio_pw_enable(struct rio_mport *mport, int enable) 516 { 517 u32 rval; 518 519 rval = in_be32(&pw->pw_regs->pwmr); 520 521 if (enable) 522 rval |= RIO_IPWMR_PWE; 523 else 524 rval &= ~RIO_IPWMR_PWE; 525 526 out_be32(&pw->pw_regs->pwmr, rval); 527 528 return 0; 529 } 530 531 /** 532 * fsl_rio_port_write_init - MPC85xx port write interface init 533 * @mport: Master port implementing the port write unit 534 * 535 * Initializes port write unit hardware and DMA buffer 536 * ring. Called from fsl_rio_setup(). Returns %0 on success 537 * or %-ENOMEM on failure. 538 */ 539 540 int fsl_rio_port_write_init(struct fsl_rio_pw *pw) 541 { 542 int rc = 0; 543 544 /* Following configurations require a disabled port write controller */ 545 out_be32(&pw->pw_regs->pwmr, 546 in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE); 547 548 /* Initialize port write */ 549 pw->port_write_msg.virt = dma_alloc_coherent(pw->dev, 550 RIO_PW_MSG_SIZE, 551 &pw->port_write_msg.phys, GFP_KERNEL); 552 if (!pw->port_write_msg.virt) { 553 pr_err("RIO: unable allocate port write queue\n"); 554 return -ENOMEM; 555 } 556 557 pw->port_write_msg.err_count = 0; 558 pw->port_write_msg.discard_count = 0; 559 560 /* Point dequeue/enqueue pointers at first entry */ 561 out_be32(&pw->pw_regs->epwqbar, 0); 562 out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys); 563 564 pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n", 565 in_be32(&pw->pw_regs->epwqbar), 566 in_be32(&pw->pw_regs->pwqbar)); 567 568 /* Clear interrupt status IPWSR */ 569 out_be32(&pw->pw_regs->pwsr, 570 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); 571 572 /* Configure port write contoller for snooping enable all reporting, 573 clear queue full */ 574 out_be32(&pw->pw_regs->pwmr, 575 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ); 576 577 578 /* Hook up port-write handler */ 579 rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler, 580 IRQF_SHARED, "port-write", (void *)pw); 581 if (rc < 0) { 582 pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); 583 goto err_out; 584 } 585 /* Enable Error Interrupt */ 586 out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL); 587 588 INIT_WORK(&pw->pw_work, fsl_pw_dpc); 589 spin_lock_init(&pw->pw_fifo_lock); 590 if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { 591 pr_err("FIFO allocation failed\n"); 592 rc = -ENOMEM; 593 goto err_out_irq; 594 } 595 596 pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n", 597 in_be32(&pw->pw_regs->pwmr), 598 in_be32(&pw->pw_regs->pwsr)); 599 600 return rc; 601 602 err_out_irq: 603 free_irq(IRQ_RIO_PW(pw), (void *)pw); 604 err_out: 605 dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE, 606 pw->port_write_msg.virt, 607 pw->port_write_msg.phys); 608 return rc; 609 } 610 611 /** 612 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message 613 * @mport: RapidIO master port info 614 * @index: ID of RapidIO interface 615 * @destid: Destination ID of target device 616 * @data: 16-bit info field of RapidIO doorbell message 617 * 618 * Sends a MPC85xx doorbell message. Returns %0 on success or 619 * %-EINVAL on failure. 620 */ 621 int fsl_rio_doorbell_send(struct rio_mport *mport, 622 int index, u16 destid, u16 data) 623 { 624 pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", 625 index, destid, data); 626 627 /* In the serial version silicons, such as MPC8548, MPC8641, 628 * below operations is must be. 629 */ 630 out_be32(&dbell->dbell_regs->odmr, 0x00000000); 631 out_be32(&dbell->dbell_regs->odretcr, 0x00000004); 632 out_be32(&dbell->dbell_regs->oddpr, destid << 16); 633 out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data); 634 out_be32(&dbell->dbell_regs->odmr, 0x00000001); 635 636 return 0; 637 } 638 639 /** 640 * fsl_add_outb_message - Add message to the MPC85xx outbound message queue 641 * @mport: Master port with outbound message queue 642 * @rdev: Target of outbound message 643 * @mbox: Outbound mailbox 644 * @buffer: Message to add to outbound queue 645 * @len: Length of message 646 * 647 * Adds the @buffer message to the MPC85xx outbound message queue. Returns 648 * %0 on success or %-EINVAL on failure. 649 */ 650 int 651 fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, 652 void *buffer, size_t len) 653 { 654 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 655 u32 omr; 656 struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt 657 + rmu->msg_tx_ring.tx_slot; 658 int ret = 0; 659 660 pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \ 661 "%p len %8.8zx\n", rdev->destid, mbox, buffer, len); 662 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { 663 ret = -EINVAL; 664 goto out; 665 } 666 667 /* Copy and clear rest of buffer */ 668 memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer, 669 len); 670 if (len < (RIO_MAX_MSG_SIZE - 4)) 671 memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot] 672 + len, 0, RIO_MAX_MSG_SIZE - len); 673 674 /* Set mbox field for message, and set destid */ 675 desc->dport = (rdev->destid << 16) | (mbox & 0x3); 676 677 /* Enable EOMI interrupt and priority */ 678 desc->dattr = 0x28000000 | ((mport->index) << 20); 679 680 /* Set transfer size aligned to next power of 2 (in double words) */ 681 desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len); 682 683 /* Set snooping and source buffer address */ 684 desc->saddr = 0x00000004 685 | rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot]; 686 687 /* Increment enqueue pointer */ 688 omr = in_be32(&rmu->msg_regs->omr); 689 out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI); 690 691 /* Go to next descriptor */ 692 if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size) 693 rmu->msg_tx_ring.tx_slot = 0; 694 695 out: 696 return ret; 697 } 698 699 /** 700 * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox 701 * @mport: Master port implementing the outbound message unit 702 * @dev_id: Device specific pointer to pass on event 703 * @mbox: Mailbox to open 704 * @entries: Number of entries in the outbound mailbox ring 705 * 706 * Initializes buffer ring, request the outbound message interrupt, 707 * and enables the outbound message unit. Returns %0 on success and 708 * %-EINVAL or %-ENOMEM on failure. 709 */ 710 int 711 fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) 712 { 713 int i, j, rc = 0; 714 struct rio_priv *priv = mport->priv; 715 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 716 717 if ((entries < RIO_MIN_TX_RING_SIZE) || 718 (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) { 719 rc = -EINVAL; 720 goto out; 721 } 722 723 /* Initialize shadow copy ring */ 724 rmu->msg_tx_ring.dev_id = dev_id; 725 rmu->msg_tx_ring.size = entries; 726 727 for (i = 0; i < rmu->msg_tx_ring.size; i++) { 728 rmu->msg_tx_ring.virt_buffer[i] = 729 dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, 730 &rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL); 731 if (!rmu->msg_tx_ring.virt_buffer[i]) { 732 rc = -ENOMEM; 733 for (j = 0; j < rmu->msg_tx_ring.size; j++) 734 if (rmu->msg_tx_ring.virt_buffer[j]) 735 dma_free_coherent(priv->dev, 736 RIO_MSG_BUFFER_SIZE, 737 rmu->msg_tx_ring. 738 virt_buffer[j], 739 rmu->msg_tx_ring. 740 phys_buffer[j]); 741 goto out; 742 } 743 } 744 745 /* Initialize outbound message descriptor ring */ 746 rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, 747 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 748 &rmu->msg_tx_ring.phys, GFP_KERNEL); 749 if (!rmu->msg_tx_ring.virt) { 750 rc = -ENOMEM; 751 goto out_dma; 752 } 753 memset(rmu->msg_tx_ring.virt, 0, 754 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE); 755 rmu->msg_tx_ring.tx_slot = 0; 756 757 /* Point dequeue/enqueue pointers at first entry in ring */ 758 out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys); 759 out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys); 760 761 /* Configure for snooping */ 762 out_be32(&rmu->msg_regs->osar, 0x00000004); 763 764 /* Clear interrupt status */ 765 out_be32(&rmu->msg_regs->osr, 0x000000b3); 766 767 /* Hook up outbound message handler */ 768 rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0, 769 "msg_tx", (void *)mport); 770 if (rc < 0) 771 goto out_irq; 772 773 /* 774 * Configure outbound message unit 775 * Snooping 776 * Interrupts (all enabled, except QEIE) 777 * Chaining mode 778 * Disable 779 */ 780 out_be32(&rmu->msg_regs->omr, 0x00100220); 781 782 /* Set number of entries */ 783 out_be32(&rmu->msg_regs->omr, 784 in_be32(&rmu->msg_regs->omr) | 785 ((get_bitmask_order(entries) - 2) << 12)); 786 787 /* Now enable the unit */ 788 out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1); 789 790 out: 791 return rc; 792 793 out_irq: 794 dma_free_coherent(priv->dev, 795 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 796 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys); 797 798 out_dma: 799 for (i = 0; i < rmu->msg_tx_ring.size; i++) 800 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, 801 rmu->msg_tx_ring.virt_buffer[i], 802 rmu->msg_tx_ring.phys_buffer[i]); 803 804 return rc; 805 } 806 807 /** 808 * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox 809 * @mport: Master port implementing the outbound message unit 810 * @mbox: Mailbox to close 811 * 812 * Disables the outbound message unit, free all buffers, and 813 * frees the outbound message interrupt. 814 */ 815 void fsl_close_outb_mbox(struct rio_mport *mport, int mbox) 816 { 817 struct rio_priv *priv = mport->priv; 818 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 819 820 /* Disable inbound message unit */ 821 out_be32(&rmu->msg_regs->omr, 0); 822 823 /* Free ring */ 824 dma_free_coherent(priv->dev, 825 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 826 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys); 827 828 /* Free interrupt */ 829 free_irq(IRQ_RIO_TX(mport), (void *)mport); 830 } 831 832 /** 833 * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox 834 * @mport: Master port implementing the inbound message unit 835 * @dev_id: Device specific pointer to pass on event 836 * @mbox: Mailbox to open 837 * @entries: Number of entries in the inbound mailbox ring 838 * 839 * Initializes buffer ring, request the inbound message interrupt, 840 * and enables the inbound message unit. Returns %0 on success 841 * and %-EINVAL or %-ENOMEM on failure. 842 */ 843 int 844 fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) 845 { 846 int i, rc = 0; 847 struct rio_priv *priv = mport->priv; 848 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 849 850 if ((entries < RIO_MIN_RX_RING_SIZE) || 851 (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) { 852 rc = -EINVAL; 853 goto out; 854 } 855 856 /* Initialize client buffer ring */ 857 rmu->msg_rx_ring.dev_id = dev_id; 858 rmu->msg_rx_ring.size = entries; 859 rmu->msg_rx_ring.rx_slot = 0; 860 for (i = 0; i < rmu->msg_rx_ring.size; i++) 861 rmu->msg_rx_ring.virt_buffer[i] = NULL; 862 863 /* Initialize inbound message ring */ 864 rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev, 865 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE, 866 &rmu->msg_rx_ring.phys, GFP_KERNEL); 867 if (!rmu->msg_rx_ring.virt) { 868 rc = -ENOMEM; 869 goto out; 870 } 871 872 /* Point dequeue/enqueue pointers at first entry in ring */ 873 out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys); 874 out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys); 875 876 /* Clear interrupt status */ 877 out_be32(&rmu->msg_regs->isr, 0x00000091); 878 879 /* Hook up inbound message handler */ 880 rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0, 881 "msg_rx", (void *)mport); 882 if (rc < 0) { 883 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, 884 rmu->msg_tx_ring.virt_buffer[i], 885 rmu->msg_tx_ring.phys_buffer[i]); 886 goto out; 887 } 888 889 /* 890 * Configure inbound message unit: 891 * Snooping 892 * 4KB max message size 893 * Unmask all interrupt sources 894 * Disable 895 */ 896 out_be32(&rmu->msg_regs->imr, 0x001b0060); 897 898 /* Set number of queue entries */ 899 setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12); 900 901 /* Now enable the unit */ 902 setbits32(&rmu->msg_regs->imr, 0x1); 903 904 out: 905 return rc; 906 } 907 908 /** 909 * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox 910 * @mport: Master port implementing the inbound message unit 911 * @mbox: Mailbox to close 912 * 913 * Disables the inbound message unit, free all buffers, and 914 * frees the inbound message interrupt. 915 */ 916 void fsl_close_inb_mbox(struct rio_mport *mport, int mbox) 917 { 918 struct rio_priv *priv = mport->priv; 919 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 920 921 /* Disable inbound message unit */ 922 out_be32(&rmu->msg_regs->imr, 0); 923 924 /* Free ring */ 925 dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE, 926 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys); 927 928 /* Free interrupt */ 929 free_irq(IRQ_RIO_RX(mport), (void *)mport); 930 } 931 932 /** 933 * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue 934 * @mport: Master port implementing the inbound message unit 935 * @mbox: Inbound mailbox number 936 * @buf: Buffer to add to inbound queue 937 * 938 * Adds the @buf buffer to the MPC85xx inbound message queue. Returns 939 * %0 on success or %-EINVAL on failure. 940 */ 941 int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) 942 { 943 int rc = 0; 944 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 945 946 pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n", 947 rmu->msg_rx_ring.rx_slot); 948 949 if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) { 950 printk(KERN_ERR 951 "RIO: error adding inbound buffer %d, buffer exists\n", 952 rmu->msg_rx_ring.rx_slot); 953 rc = -EINVAL; 954 goto out; 955 } 956 957 rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf; 958 if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size) 959 rmu->msg_rx_ring.rx_slot = 0; 960 961 out: 962 return rc; 963 } 964 965 /** 966 * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit 967 * @mport: Master port implementing the inbound message unit 968 * @mbox: Inbound mailbox number 969 * 970 * Gets the next available inbound message from the inbound message queue. 971 * A pointer to the message is returned on success or NULL on failure. 972 */ 973 void *fsl_get_inb_message(struct rio_mport *mport, int mbox) 974 { 975 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 976 u32 phys_buf; 977 void *virt_buf; 978 void *buf = NULL; 979 int buf_idx; 980 981 phys_buf = in_be32(&rmu->msg_regs->ifqdpar); 982 983 /* If no more messages, then bail out */ 984 if (phys_buf == in_be32(&rmu->msg_regs->ifqepar)) 985 goto out2; 986 987 virt_buf = rmu->msg_rx_ring.virt + (phys_buf 988 - rmu->msg_rx_ring.phys); 989 buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE; 990 buf = rmu->msg_rx_ring.virt_buffer[buf_idx]; 991 992 if (!buf) { 993 printk(KERN_ERR 994 "RIO: inbound message copy failed, no buffers\n"); 995 goto out1; 996 } 997 998 /* Copy max message size, caller is expected to allocate that big */ 999 memcpy(buf, virt_buf, RIO_MAX_MSG_SIZE); 1000 1001 /* Clear the available buffer */ 1002 rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL; 1003 1004 out1: 1005 setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI); 1006 1007 out2: 1008 return buf; 1009 } 1010 1011 /** 1012 * fsl_rio_doorbell_init - MPC85xx doorbell interface init 1013 * @mport: Master port implementing the inbound doorbell unit 1014 * 1015 * Initializes doorbell unit hardware and inbound DMA buffer 1016 * ring. Called from fsl_rio_setup(). Returns %0 on success 1017 * or %-ENOMEM on failure. 1018 */ 1019 int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell) 1020 { 1021 int rc = 0; 1022 1023 /* Initialize inbound doorbells */ 1024 dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 * 1025 DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL); 1026 if (!dbell->dbell_ring.virt) { 1027 printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n"); 1028 rc = -ENOMEM; 1029 goto out; 1030 } 1031 1032 /* Point dequeue/enqueue pointers at first entry in ring */ 1033 out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys); 1034 out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys); 1035 1036 /* Clear interrupt status */ 1037 out_be32(&dbell->dbell_regs->dsr, 0x00000091); 1038 1039 /* Hook up doorbell handler */ 1040 rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0, 1041 "dbell_rx", (void *)dbell); 1042 if (rc < 0) { 1043 dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE, 1044 dbell->dbell_ring.virt, dbell->dbell_ring.phys); 1045 printk(KERN_ERR 1046 "MPC85xx RIO: unable to request inbound doorbell irq"); 1047 goto out; 1048 } 1049 1050 /* Configure doorbells for snooping, 512 entries, and enable */ 1051 out_be32(&dbell->dbell_regs->dmr, 0x00108161); 1052 1053 out: 1054 return rc; 1055 } 1056 1057 int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node) 1058 { 1059 struct rio_priv *priv; 1060 struct fsl_rmu *rmu; 1061 u64 msg_start; 1062 const u32 *msg_addr; 1063 int mlen; 1064 int aw; 1065 1066 if (!mport || !mport->priv) 1067 return -EINVAL; 1068 1069 priv = mport->priv; 1070 1071 if (!node) { 1072 dev_warn(priv->dev, "Can't get %s property 'fsl,rmu'\n", 1073 priv->dev->of_node->full_name); 1074 return -EINVAL; 1075 } 1076 1077 rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL); 1078 if (!rmu) 1079 return -ENOMEM; 1080 1081 aw = of_n_addr_cells(node); 1082 msg_addr = of_get_property(node, "reg", &mlen); 1083 if (!msg_addr) { 1084 pr_err("%s: unable to find 'reg' property of message-unit\n", 1085 node->full_name); 1086 kfree(rmu); 1087 return -ENOMEM; 1088 } 1089 msg_start = of_read_number(msg_addr, aw); 1090 1091 rmu->msg_regs = (struct rio_msg_regs *) 1092 (rmu_regs_win + (u32)msg_start); 1093 1094 rmu->txirq = irq_of_parse_and_map(node, 0); 1095 rmu->rxirq = irq_of_parse_and_map(node, 1); 1096 printk(KERN_INFO "%s: txirq: %d, rxirq %d\n", 1097 node->full_name, rmu->txirq, rmu->rxirq); 1098 1099 priv->rmm_handle = rmu; 1100 1101 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); 1102 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0); 1103 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); 1104 1105 return 0; 1106 } 1107