1 /* 2 * RapidIO mport driver for Tsi721 PCIExpress-to-SRIO bridge 3 * 4 * Copyright 2011 Integrated Device Technology, Inc. 5 * Alexandre Bounine <alexandre.bounine@idt.com> 6 * Chul Kim <chul.kim@idt.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program; if not, write to the Free Software Foundation, Inc., 59 20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/io.h> 24 #include <linux/errno.h> 25 #include <linux/init.h> 26 #include <linux/ioport.h> 27 #include <linux/kernel.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <linux/rio.h> 31 #include <linux/rio_drv.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/interrupt.h> 34 #include <linux/kfifo.h> 35 #include <linux/delay.h> 36 37 #include "tsi721.h" 38 39 #define DEBUG_PW /* Inbound Port-Write debugging */ 40 41 static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); 42 static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); 43 44 /** 45 * tsi721_lcread - read from local SREP config space 46 * @mport: RapidIO master port info 47 * @index: ID of RapdiIO interface 48 * @offset: Offset into configuration space 49 * @len: Length (in bytes) of the maintenance transaction 50 * @data: Value to be read into 51 * 52 * Generates a local SREP space read. Returns %0 on 53 * success or %-EINVAL on failure. 54 */ 55 static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset, 56 int len, u32 *data) 57 { 58 struct tsi721_device *priv = mport->priv; 59 60 if (len != sizeof(u32)) 61 return -EINVAL; /* only 32-bit access is supported */ 62 63 *data = ioread32(priv->regs + offset); 64 65 return 0; 66 } 67 68 /** 69 * tsi721_lcwrite - write into local SREP config space 70 * @mport: RapidIO master port info 71 * @index: ID of RapdiIO interface 72 * @offset: Offset into configuration space 73 * @len: Length (in bytes) of the maintenance transaction 74 * @data: Value to be written 75 * 76 * Generates a local write into SREP configuration space. Returns %0 on 77 * success or %-EINVAL on failure. 78 */ 79 static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset, 80 int len, u32 data) 81 { 82 struct tsi721_device *priv = mport->priv; 83 84 if (len != sizeof(u32)) 85 return -EINVAL; /* only 32-bit access is supported */ 86 87 iowrite32(data, priv->regs + offset); 88 89 return 0; 90 } 91 92 /** 93 * tsi721_maint_dma - Helper function to generate RapidIO maintenance 94 * transactions using designated Tsi721 DMA channel. 95 * @priv: pointer to tsi721 private data 96 * @sys_size: RapdiIO transport system size 97 * @destid: Destination ID of transaction 98 * @hopcount: Number of hops to target device 99 * @offset: Offset into configuration space 100 * @len: Length (in bytes) of the maintenance transaction 101 * @data: Location to be read from or write into 102 * @do_wr: Operation flag (1 == MAINT_WR) 103 * 104 * Generates a RapidIO maintenance transaction (Read or Write). 105 * Returns %0 on success and %-EINVAL or %-EFAULT on failure. 106 */ 107 static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, 108 u16 destid, u8 hopcount, u32 offset, int len, 109 u32 *data, int do_wr) 110 { 111 void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id); 112 struct tsi721_dma_desc *bd_ptr; 113 u32 rd_count, swr_ptr, ch_stat; 114 int i, err = 0; 115 u32 op = do_wr ? MAINT_WR : MAINT_RD; 116 117 if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) 118 return -EINVAL; 119 120 bd_ptr = priv->mdma.bd_base; 121 122 rd_count = ioread32(regs + TSI721_DMAC_DRDCNT); 123 124 /* Initialize DMA descriptor */ 125 bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid); 126 bd_ptr[0].bcount = cpu_to_le32((sys_size << 26) | 0x04); 127 bd_ptr[0].raddr_lo = cpu_to_le32((hopcount << 24) | offset); 128 bd_ptr[0].raddr_hi = 0; 129 if (do_wr) 130 bd_ptr[0].data[0] = cpu_to_be32p(data); 131 else 132 bd_ptr[0].data[0] = 0xffffffff; 133 134 mb(); 135 136 /* Start DMA operation */ 137 iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT); 138 ioread32(regs + TSI721_DMAC_DWRCNT); 139 i = 0; 140 141 /* Wait until DMA transfer is finished */ 142 while ((ch_stat = ioread32(regs + TSI721_DMAC_STS)) 143 & TSI721_DMAC_STS_RUN) { 144 udelay(1); 145 if (++i >= 5000000) { 146 dev_dbg(&priv->pdev->dev, 147 "%s : DMA[%d] read timeout ch_status=%x\n", 148 __func__, priv->mdma.ch_id, ch_stat); 149 if (!do_wr) 150 *data = 0xffffffff; 151 err = -EIO; 152 goto err_out; 153 } 154 } 155 156 if (ch_stat & TSI721_DMAC_STS_ABORT) { 157 /* If DMA operation aborted due to error, 158 * reinitialize DMA channel 159 */ 160 dev_dbg(&priv->pdev->dev, "%s : DMA ABORT ch_stat=%x\n", 161 __func__, ch_stat); 162 dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n", 163 do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset); 164 iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); 165 iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); 166 udelay(10); 167 iowrite32(0, regs + TSI721_DMAC_DWRCNT); 168 udelay(1); 169 if (!do_wr) 170 *data = 0xffffffff; 171 err = -EIO; 172 goto err_out; 173 } 174 175 if (!do_wr) 176 *data = be32_to_cpu(bd_ptr[0].data[0]); 177 178 /* 179 * Update descriptor status FIFO RD pointer. 180 * NOTE: Skipping check and clear FIFO entries because we are waiting 181 * for transfer to be completed. 182 */ 183 swr_ptr = ioread32(regs + TSI721_DMAC_DSWP); 184 iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP); 185 err_out: 186 187 return err; 188 } 189 190 /** 191 * tsi721_cread_dma - Generate a RapidIO maintenance read transaction 192 * using Tsi721 BDMA engine. 193 * @mport: RapidIO master port control structure 194 * @index: ID of RapdiIO interface 195 * @destid: Destination ID of transaction 196 * @hopcount: Number of hops to target device 197 * @offset: Offset into configuration space 198 * @len: Length (in bytes) of the maintenance transaction 199 * @val: Location to be read into 200 * 201 * Generates a RapidIO maintenance read transaction. 202 * Returns %0 on success and %-EINVAL or %-EFAULT on failure. 203 */ 204 static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid, 205 u8 hopcount, u32 offset, int len, u32 *data) 206 { 207 struct tsi721_device *priv = mport->priv; 208 209 return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount, 210 offset, len, data, 0); 211 } 212 213 /** 214 * tsi721_cwrite_dma - Generate a RapidIO maintenance write transaction 215 * using Tsi721 BDMA engine 216 * @mport: RapidIO master port control structure 217 * @index: ID of RapdiIO interface 218 * @destid: Destination ID of transaction 219 * @hopcount: Number of hops to target device 220 * @offset: Offset into configuration space 221 * @len: Length (in bytes) of the maintenance transaction 222 * @val: Value to be written 223 * 224 * Generates a RapidIO maintenance write transaction. 225 * Returns %0 on success and %-EINVAL or %-EFAULT on failure. 226 */ 227 static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid, 228 u8 hopcount, u32 offset, int len, u32 data) 229 { 230 struct tsi721_device *priv = mport->priv; 231 u32 temp = data; 232 233 return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount, 234 offset, len, &temp, 1); 235 } 236 237 /** 238 * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler 239 * @mport: RapidIO master port structure 240 * 241 * Handles inbound port-write interrupts. Copies PW message from an internal 242 * buffer into PW message FIFO and schedules deferred routine to process 243 * queued messages. 244 */ 245 static int 246 tsi721_pw_handler(struct rio_mport *mport) 247 { 248 struct tsi721_device *priv = mport->priv; 249 u32 pw_stat; 250 u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)]; 251 252 253 pw_stat = ioread32(priv->regs + TSI721_RIO_PW_RX_STAT); 254 255 if (pw_stat & TSI721_RIO_PW_RX_STAT_PW_VAL) { 256 pw_buf[0] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(0)); 257 pw_buf[1] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(1)); 258 pw_buf[2] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(2)); 259 pw_buf[3] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(3)); 260 261 /* Queue PW message (if there is room in FIFO), 262 * otherwise discard it. 263 */ 264 spin_lock(&priv->pw_fifo_lock); 265 if (kfifo_avail(&priv->pw_fifo) >= TSI721_RIO_PW_MSG_SIZE) 266 kfifo_in(&priv->pw_fifo, pw_buf, 267 TSI721_RIO_PW_MSG_SIZE); 268 else 269 priv->pw_discard_count++; 270 spin_unlock(&priv->pw_fifo_lock); 271 } 272 273 /* Clear pending PW interrupts */ 274 iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL, 275 priv->regs + TSI721_RIO_PW_RX_STAT); 276 277 schedule_work(&priv->pw_work); 278 279 return 0; 280 } 281 282 static void tsi721_pw_dpc(struct work_struct *work) 283 { 284 struct tsi721_device *priv = container_of(work, struct tsi721_device, 285 pw_work); 286 u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; /* Use full size PW message 287 buffer for RIO layer */ 288 289 /* 290 * Process port-write messages 291 */ 292 while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)msg_buffer, 293 TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) { 294 /* Process one message */ 295 #ifdef DEBUG_PW 296 { 297 u32 i; 298 pr_debug("%s : Port-Write Message:", __func__); 299 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); ) { 300 pr_debug("0x%02x: %08x %08x %08x %08x", i*4, 301 msg_buffer[i], msg_buffer[i + 1], 302 msg_buffer[i + 2], msg_buffer[i + 3]); 303 i += 4; 304 } 305 pr_debug("\n"); 306 } 307 #endif 308 /* Pass the port-write message to RIO core for processing */ 309 rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); 310 } 311 } 312 313 /** 314 * tsi721_pw_enable - enable/disable port-write interface init 315 * @mport: Master port implementing the port write unit 316 * @enable: 1=enable; 0=disable port-write message handling 317 */ 318 static int tsi721_pw_enable(struct rio_mport *mport, int enable) 319 { 320 struct tsi721_device *priv = mport->priv; 321 u32 rval; 322 323 rval = ioread32(priv->regs + TSI721_RIO_EM_INT_ENABLE); 324 325 if (enable) 326 rval |= TSI721_RIO_EM_INT_ENABLE_PW_RX; 327 else 328 rval &= ~TSI721_RIO_EM_INT_ENABLE_PW_RX; 329 330 /* Clear pending PW interrupts */ 331 iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL, 332 priv->regs + TSI721_RIO_PW_RX_STAT); 333 /* Update enable bits */ 334 iowrite32(rval, priv->regs + TSI721_RIO_EM_INT_ENABLE); 335 336 return 0; 337 } 338 339 /** 340 * tsi721_dsend - Send a RapidIO doorbell 341 * @mport: RapidIO master port info 342 * @index: ID of RapidIO interface 343 * @destid: Destination ID of target device 344 * @data: 16-bit info field of RapidIO doorbell 345 * 346 * Sends a RapidIO doorbell message. Always returns %0. 347 */ 348 static int tsi721_dsend(struct rio_mport *mport, int index, 349 u16 destid, u16 data) 350 { 351 struct tsi721_device *priv = mport->priv; 352 u32 offset; 353 354 offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) | 355 (destid << 2); 356 357 dev_dbg(&priv->pdev->dev, 358 "Send Doorbell 0x%04x to destID 0x%x\n", data, destid); 359 iowrite16be(data, priv->odb_base + offset); 360 361 return 0; 362 } 363 364 /** 365 * tsi721_dbell_handler - Tsi721 doorbell interrupt handler 366 * @mport: RapidIO master port structure 367 * 368 * Handles inbound doorbell interrupts. Copies doorbell entry from an internal 369 * buffer into DB message FIFO and schedules deferred routine to process 370 * queued DBs. 371 */ 372 static int 373 tsi721_dbell_handler(struct rio_mport *mport) 374 { 375 struct tsi721_device *priv = mport->priv; 376 u32 regval; 377 378 /* Disable IDB interrupts */ 379 regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 380 regval &= ~TSI721_SR_CHINT_IDBQRCV; 381 iowrite32(regval, 382 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 383 384 schedule_work(&priv->idb_work); 385 386 return 0; 387 } 388 389 static void tsi721_db_dpc(struct work_struct *work) 390 { 391 struct tsi721_device *priv = container_of(work, struct tsi721_device, 392 idb_work); 393 struct rio_mport *mport; 394 struct rio_dbell *dbell; 395 int found = 0; 396 u32 wr_ptr, rd_ptr; 397 u64 *idb_entry; 398 u32 regval; 399 union { 400 u64 msg; 401 u8 bytes[8]; 402 } idb; 403 404 /* 405 * Process queued inbound doorbells 406 */ 407 mport = priv->mport; 408 409 wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; 410 rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE; 411 412 while (wr_ptr != rd_ptr) { 413 idb_entry = (u64 *)(priv->idb_base + 414 (TSI721_IDB_ENTRY_SIZE * rd_ptr)); 415 rd_ptr++; 416 rd_ptr %= IDB_QSIZE; 417 idb.msg = *idb_entry; 418 *idb_entry = 0; 419 420 /* Process one doorbell */ 421 list_for_each_entry(dbell, &mport->dbells, node) { 422 if ((dbell->res->start <= DBELL_INF(idb.bytes)) && 423 (dbell->res->end >= DBELL_INF(idb.bytes))) { 424 found = 1; 425 break; 426 } 427 } 428 429 if (found) { 430 dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes), 431 DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); 432 } else { 433 dev_dbg(&priv->pdev->dev, 434 "spurious inb doorbell, sid %2.2x tid %2.2x" 435 " info %4.4x\n", DBELL_SID(idb.bytes), 436 DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); 437 } 438 439 wr_ptr = ioread32(priv->regs + 440 TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; 441 } 442 443 iowrite32(rd_ptr & (IDB_QSIZE - 1), 444 priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); 445 446 /* Re-enable IDB interrupts */ 447 regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 448 regval |= TSI721_SR_CHINT_IDBQRCV; 449 iowrite32(regval, 450 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 451 452 wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; 453 if (wr_ptr != rd_ptr) 454 schedule_work(&priv->idb_work); 455 } 456 457 /** 458 * tsi721_irqhandler - Tsi721 interrupt handler 459 * @irq: Linux interrupt number 460 * @ptr: Pointer to interrupt-specific data (mport structure) 461 * 462 * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported 463 * interrupt events and calls an event-specific handler(s). 464 */ 465 static irqreturn_t tsi721_irqhandler(int irq, void *ptr) 466 { 467 struct rio_mport *mport = (struct rio_mport *)ptr; 468 struct tsi721_device *priv = mport->priv; 469 u32 dev_int; 470 u32 dev_ch_int; 471 u32 intval; 472 u32 ch_inte; 473 474 /* For MSI mode disable all device-level interrupts */ 475 if (priv->flags & TSI721_USING_MSI) 476 iowrite32(0, priv->regs + TSI721_DEV_INTE); 477 478 dev_int = ioread32(priv->regs + TSI721_DEV_INT); 479 if (!dev_int) 480 return IRQ_NONE; 481 482 dev_ch_int = ioread32(priv->regs + TSI721_DEV_CHAN_INT); 483 484 if (dev_int & TSI721_DEV_INT_SR2PC_CH) { 485 /* Service SR2PC Channel interrupts */ 486 if (dev_ch_int & TSI721_INT_SR2PC_CHAN(IDB_QUEUE)) { 487 /* Service Inbound Doorbell interrupt */ 488 intval = ioread32(priv->regs + 489 TSI721_SR_CHINT(IDB_QUEUE)); 490 if (intval & TSI721_SR_CHINT_IDBQRCV) 491 tsi721_dbell_handler(mport); 492 else 493 dev_info(&priv->pdev->dev, 494 "Unsupported SR_CH_INT %x\n", intval); 495 496 /* Clear interrupts */ 497 iowrite32(intval, 498 priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 499 ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 500 } 501 } 502 503 if (dev_int & TSI721_DEV_INT_SMSG_CH) { 504 int ch; 505 506 /* 507 * Service channel interrupts from Messaging Engine 508 */ 509 510 if (dev_ch_int & TSI721_INT_IMSG_CHAN_M) { /* Inbound Msg */ 511 /* Disable signaled OB MSG Channel interrupts */ 512 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 513 ch_inte &= ~(dev_ch_int & TSI721_INT_IMSG_CHAN_M); 514 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); 515 516 /* 517 * Process Inbound Message interrupt for each MBOX 518 */ 519 for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) { 520 if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch))) 521 continue; 522 tsi721_imsg_handler(priv, ch); 523 } 524 } 525 526 if (dev_ch_int & TSI721_INT_OMSG_CHAN_M) { /* Outbound Msg */ 527 /* Disable signaled OB MSG Channel interrupts */ 528 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 529 ch_inte &= ~(dev_ch_int & TSI721_INT_OMSG_CHAN_M); 530 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); 531 532 /* 533 * Process Outbound Message interrupts for each MBOX 534 */ 535 536 for (ch = 0; ch < RIO_MAX_MBOX; ch++) { 537 if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch))) 538 continue; 539 tsi721_omsg_handler(priv, ch); 540 } 541 } 542 } 543 544 if (dev_int & TSI721_DEV_INT_SRIO) { 545 /* Service SRIO MAC interrupts */ 546 intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); 547 if (intval & TSI721_RIO_EM_INT_STAT_PW_RX) 548 tsi721_pw_handler(mport); 549 } 550 551 #ifdef CONFIG_RAPIDIO_DMA_ENGINE 552 if (dev_int & TSI721_DEV_INT_BDMA_CH) { 553 int ch; 554 555 if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) { 556 dev_dbg(&priv->pdev->dev, 557 "IRQ from DMA channel 0x%08x\n", dev_ch_int); 558 559 for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) { 560 if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch))) 561 continue; 562 tsi721_bdma_handler(&priv->bdma[ch]); 563 } 564 } 565 } 566 #endif 567 568 /* For MSI mode re-enable device-level interrupts */ 569 if (priv->flags & TSI721_USING_MSI) { 570 dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | 571 TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; 572 iowrite32(dev_int, priv->regs + TSI721_DEV_INTE); 573 } 574 575 return IRQ_HANDLED; 576 } 577 578 static void tsi721_interrupts_init(struct tsi721_device *priv) 579 { 580 u32 intr; 581 582 /* Enable IDB interrupts */ 583 iowrite32(TSI721_SR_CHINT_ALL, 584 priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 585 iowrite32(TSI721_SR_CHINT_IDBQRCV, 586 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 587 588 /* Enable SRIO MAC interrupts */ 589 iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT, 590 priv->regs + TSI721_RIO_EM_DEV_INT_EN); 591 592 /* Enable interrupts from channels in use */ 593 #ifdef CONFIG_RAPIDIO_DMA_ENGINE 594 intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) | 595 (TSI721_INT_BDMA_CHAN_M & 596 ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT)); 597 #else 598 intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE); 599 #endif 600 iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE); 601 602 if (priv->flags & TSI721_USING_MSIX) 603 intr = TSI721_DEV_INT_SRIO; 604 else 605 intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | 606 TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; 607 608 iowrite32(intr, priv->regs + TSI721_DEV_INTE); 609 ioread32(priv->regs + TSI721_DEV_INTE); 610 } 611 612 #ifdef CONFIG_PCI_MSI 613 /** 614 * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging 615 * @irq: Linux interrupt number 616 * @ptr: Pointer to interrupt-specific data (mport structure) 617 * 618 * Handles outbound messaging interrupts signaled using MSI-X. 619 */ 620 static irqreturn_t tsi721_omsg_msix(int irq, void *ptr) 621 { 622 struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; 623 int mbox; 624 625 mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX; 626 tsi721_omsg_handler(priv, mbox); 627 return IRQ_HANDLED; 628 } 629 630 /** 631 * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging 632 * @irq: Linux interrupt number 633 * @ptr: Pointer to interrupt-specific data (mport structure) 634 * 635 * Handles inbound messaging interrupts signaled using MSI-X. 636 */ 637 static irqreturn_t tsi721_imsg_msix(int irq, void *ptr) 638 { 639 struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; 640 int mbox; 641 642 mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX; 643 tsi721_imsg_handler(priv, mbox + 4); 644 return IRQ_HANDLED; 645 } 646 647 /** 648 * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler 649 * @irq: Linux interrupt number 650 * @ptr: Pointer to interrupt-specific data (mport structure) 651 * 652 * Handles Tsi721 interrupts from SRIO MAC. 653 */ 654 static irqreturn_t tsi721_srio_msix(int irq, void *ptr) 655 { 656 struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; 657 u32 srio_int; 658 659 /* Service SRIO MAC interrupts */ 660 srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); 661 if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX) 662 tsi721_pw_handler((struct rio_mport *)ptr); 663 664 return IRQ_HANDLED; 665 } 666 667 /** 668 * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler 669 * @irq: Linux interrupt number 670 * @ptr: Pointer to interrupt-specific data (mport structure) 671 * 672 * Handles Tsi721 interrupts from SR2PC Channel. 673 * NOTE: At this moment services only one SR2PC channel associated with inbound 674 * doorbells. 675 */ 676 static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr) 677 { 678 struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; 679 u32 sr_ch_int; 680 681 /* Service Inbound DB interrupt from SR2PC channel */ 682 sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 683 if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV) 684 tsi721_dbell_handler((struct rio_mport *)ptr); 685 686 /* Clear interrupts */ 687 iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 688 /* Read back to ensure that interrupt was cleared */ 689 sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 690 691 return IRQ_HANDLED; 692 } 693 694 /** 695 * tsi721_request_msix - register interrupt service for MSI-X mode. 696 * @mport: RapidIO master port structure 697 * 698 * Registers MSI-X interrupt service routines for interrupts that are active 699 * immediately after mport initialization. Messaging interrupt service routines 700 * should be registered during corresponding open requests. 701 */ 702 static int tsi721_request_msix(struct rio_mport *mport) 703 { 704 struct tsi721_device *priv = mport->priv; 705 int err = 0; 706 707 err = request_irq(priv->msix[TSI721_VECT_IDB].vector, 708 tsi721_sr2pc_ch_msix, 0, 709 priv->msix[TSI721_VECT_IDB].irq_name, (void *)mport); 710 if (err) 711 goto out; 712 713 err = request_irq(priv->msix[TSI721_VECT_PWRX].vector, 714 tsi721_srio_msix, 0, 715 priv->msix[TSI721_VECT_PWRX].irq_name, (void *)mport); 716 if (err) 717 free_irq( 718 priv->msix[TSI721_VECT_IDB].vector, 719 (void *)mport); 720 out: 721 return err; 722 } 723 724 /** 725 * tsi721_enable_msix - Attempts to enable MSI-X support for Tsi721. 726 * @priv: pointer to tsi721 private data 727 * 728 * Configures MSI-X support for Tsi721. Supports only an exact number 729 * of requested vectors. 730 */ 731 static int tsi721_enable_msix(struct tsi721_device *priv) 732 { 733 struct msix_entry entries[TSI721_VECT_MAX]; 734 int err; 735 int i; 736 737 entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE); 738 entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT; 739 740 /* 741 * Initialize MSI-X entries for Messaging Engine: 742 * this driver supports four RIO mailboxes (inbound and outbound) 743 * NOTE: Inbound message MBOX 0...4 use IB channels 4...7. Therefore 744 * offset +4 is added to IB MBOX number. 745 */ 746 for (i = 0; i < RIO_MAX_MBOX; i++) { 747 entries[TSI721_VECT_IMB0_RCV + i].entry = 748 TSI721_MSIX_IMSG_DQ_RCV(i + 4); 749 entries[TSI721_VECT_IMB0_INT + i].entry = 750 TSI721_MSIX_IMSG_INT(i + 4); 751 entries[TSI721_VECT_OMB0_DONE + i].entry = 752 TSI721_MSIX_OMSG_DONE(i); 753 entries[TSI721_VECT_OMB0_INT + i].entry = 754 TSI721_MSIX_OMSG_INT(i); 755 } 756 757 #ifdef CONFIG_RAPIDIO_DMA_ENGINE 758 /* 759 * Initialize MSI-X entries for Block DMA Engine: 760 * this driver supports XXX DMA channels 761 * (one is reserved for SRIO maintenance transactions) 762 */ 763 for (i = 0; i < TSI721_DMA_CHNUM; i++) { 764 entries[TSI721_VECT_DMA0_DONE + i].entry = 765 TSI721_MSIX_DMACH_DONE(i); 766 entries[TSI721_VECT_DMA0_INT + i].entry = 767 TSI721_MSIX_DMACH_INT(i); 768 } 769 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ 770 771 err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries)); 772 if (err) { 773 if (err > 0) 774 dev_info(&priv->pdev->dev, 775 "Only %d MSI-X vectors available, " 776 "not using MSI-X\n", err); 777 else 778 dev_err(&priv->pdev->dev, 779 "Failed to enable MSI-X (err=%d)\n", err); 780 return err; 781 } 782 783 /* 784 * Copy MSI-X vector information into tsi721 private structure 785 */ 786 priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector; 787 snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX, 788 DRV_NAME "-idb@pci:%s", pci_name(priv->pdev)); 789 priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector; 790 snprintf(priv->msix[TSI721_VECT_PWRX].irq_name, IRQ_DEVICE_NAME_MAX, 791 DRV_NAME "-pwrx@pci:%s", pci_name(priv->pdev)); 792 793 for (i = 0; i < RIO_MAX_MBOX; i++) { 794 priv->msix[TSI721_VECT_IMB0_RCV + i].vector = 795 entries[TSI721_VECT_IMB0_RCV + i].vector; 796 snprintf(priv->msix[TSI721_VECT_IMB0_RCV + i].irq_name, 797 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbr%d@pci:%s", 798 i, pci_name(priv->pdev)); 799 800 priv->msix[TSI721_VECT_IMB0_INT + i].vector = 801 entries[TSI721_VECT_IMB0_INT + i].vector; 802 snprintf(priv->msix[TSI721_VECT_IMB0_INT + i].irq_name, 803 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbi%d@pci:%s", 804 i, pci_name(priv->pdev)); 805 806 priv->msix[TSI721_VECT_OMB0_DONE + i].vector = 807 entries[TSI721_VECT_OMB0_DONE + i].vector; 808 snprintf(priv->msix[TSI721_VECT_OMB0_DONE + i].irq_name, 809 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombd%d@pci:%s", 810 i, pci_name(priv->pdev)); 811 812 priv->msix[TSI721_VECT_OMB0_INT + i].vector = 813 entries[TSI721_VECT_OMB0_INT + i].vector; 814 snprintf(priv->msix[TSI721_VECT_OMB0_INT + i].irq_name, 815 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s", 816 i, pci_name(priv->pdev)); 817 } 818 819 #ifdef CONFIG_RAPIDIO_DMA_ENGINE 820 for (i = 0; i < TSI721_DMA_CHNUM; i++) { 821 priv->msix[TSI721_VECT_DMA0_DONE + i].vector = 822 entries[TSI721_VECT_DMA0_DONE + i].vector; 823 snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name, 824 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s", 825 i, pci_name(priv->pdev)); 826 827 priv->msix[TSI721_VECT_DMA0_INT + i].vector = 828 entries[TSI721_VECT_DMA0_INT + i].vector; 829 snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name, 830 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s", 831 i, pci_name(priv->pdev)); 832 } 833 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ 834 835 return 0; 836 } 837 #endif /* CONFIG_PCI_MSI */ 838 839 static int tsi721_request_irq(struct rio_mport *mport) 840 { 841 struct tsi721_device *priv = mport->priv; 842 int err; 843 844 #ifdef CONFIG_PCI_MSI 845 if (priv->flags & TSI721_USING_MSIX) 846 err = tsi721_request_msix(mport); 847 else 848 #endif 849 err = request_irq(priv->pdev->irq, tsi721_irqhandler, 850 (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED, 851 DRV_NAME, (void *)mport); 852 853 if (err) 854 dev_err(&priv->pdev->dev, 855 "Unable to allocate interrupt, Error: %d\n", err); 856 857 return err; 858 } 859 860 /** 861 * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO) 862 * translation regions. 863 * @priv: pointer to tsi721 private data 864 * 865 * Disables SREP translation regions. 866 */ 867 static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv) 868 { 869 int i; 870 871 /* Disable all PC2SR translation windows */ 872 for (i = 0; i < TSI721_OBWIN_NUM; i++) 873 iowrite32(0, priv->regs + TSI721_OBWINLB(i)); 874 } 875 876 /** 877 * tsi721_rio_map_inb_mem -- Mapping inbound memory region. 878 * @mport: RapidIO master port 879 * @lstart: Local memory space start address. 880 * @rstart: RapidIO space start address. 881 * @size: The mapping region size. 882 * @flags: Flags for mapping. 0 for using default flags. 883 * 884 * Return: 0 -- Success. 885 * 886 * This function will create the inbound mapping 887 * from rstart to lstart. 888 */ 889 static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, 890 u64 rstart, u32 size, u32 flags) 891 { 892 struct tsi721_device *priv = mport->priv; 893 int i; 894 u32 regval; 895 896 if (!is_power_of_2(size) || size < 0x1000 || 897 ((u64)lstart & (size - 1)) || (rstart & (size - 1))) 898 return -EINVAL; 899 900 /* Search for free inbound translation window */ 901 for (i = 0; i < TSI721_IBWIN_NUM; i++) { 902 regval = ioread32(priv->regs + TSI721_IBWIN_LB(i)); 903 if (!(regval & TSI721_IBWIN_LB_WEN)) 904 break; 905 } 906 907 if (i >= TSI721_IBWIN_NUM) { 908 dev_err(&priv->pdev->dev, 909 "Unable to find free inbound window\n"); 910 return -EBUSY; 911 } 912 913 iowrite32(TSI721_IBWIN_SIZE(size) << 8, 914 priv->regs + TSI721_IBWIN_SZ(i)); 915 916 iowrite32(((u64)lstart >> 32), priv->regs + TSI721_IBWIN_TUA(i)); 917 iowrite32(((u64)lstart & TSI721_IBWIN_TLA_ADD), 918 priv->regs + TSI721_IBWIN_TLA(i)); 919 920 iowrite32(rstart >> 32, priv->regs + TSI721_IBWIN_UB(i)); 921 iowrite32((rstart & TSI721_IBWIN_LB_BA) | TSI721_IBWIN_LB_WEN, 922 priv->regs + TSI721_IBWIN_LB(i)); 923 dev_dbg(&priv->pdev->dev, 924 "Configured IBWIN%d mapping (RIO_0x%llx -> PCIe_0x%llx)\n", 925 i, rstart, (unsigned long long)lstart); 926 927 return 0; 928 } 929 930 /** 931 * fsl_rio_unmap_inb_mem -- Unmapping inbound memory region. 932 * @mport: RapidIO master port 933 * @lstart: Local memory space start address. 934 */ 935 static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport, 936 dma_addr_t lstart) 937 { 938 struct tsi721_device *priv = mport->priv; 939 int i; 940 u64 addr; 941 u32 regval; 942 943 /* Search for matching active inbound translation window */ 944 for (i = 0; i < TSI721_IBWIN_NUM; i++) { 945 regval = ioread32(priv->regs + TSI721_IBWIN_LB(i)); 946 if (regval & TSI721_IBWIN_LB_WEN) { 947 regval = ioread32(priv->regs + TSI721_IBWIN_TUA(i)); 948 addr = (u64)regval << 32; 949 regval = ioread32(priv->regs + TSI721_IBWIN_TLA(i)); 950 addr |= regval & TSI721_IBWIN_TLA_ADD; 951 952 if (addr == (u64)lstart) { 953 iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); 954 break; 955 } 956 } 957 } 958 } 959 960 /** 961 * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe) 962 * translation regions. 963 * @priv: pointer to tsi721 private data 964 * 965 * Disables inbound windows. 966 */ 967 static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv) 968 { 969 int i; 970 971 /* Disable all SR2PC inbound windows */ 972 for (i = 0; i < TSI721_IBWIN_NUM; i++) 973 iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); 974 } 975 976 /** 977 * tsi721_port_write_init - Inbound port write interface init 978 * @priv: pointer to tsi721 private data 979 * 980 * Initializes inbound port write handler. 981 * Returns %0 on success or %-ENOMEM on failure. 982 */ 983 static int tsi721_port_write_init(struct tsi721_device *priv) 984 { 985 priv->pw_discard_count = 0; 986 INIT_WORK(&priv->pw_work, tsi721_pw_dpc); 987 spin_lock_init(&priv->pw_fifo_lock); 988 if (kfifo_alloc(&priv->pw_fifo, 989 TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { 990 dev_err(&priv->pdev->dev, "PW FIFO allocation failed\n"); 991 return -ENOMEM; 992 } 993 994 /* Use reliable port-write capture mode */ 995 iowrite32(TSI721_RIO_PW_CTL_PWC_REL, priv->regs + TSI721_RIO_PW_CTL); 996 return 0; 997 } 998 999 static int tsi721_doorbell_init(struct tsi721_device *priv) 1000 { 1001 /* Outbound Doorbells do not require any setup. 1002 * Tsi721 uses dedicated PCI BAR1 to generate doorbells. 1003 * That BAR1 was mapped during the probe routine. 1004 */ 1005 1006 /* Initialize Inbound Doorbell processing DPC and queue */ 1007 priv->db_discard_count = 0; 1008 INIT_WORK(&priv->idb_work, tsi721_db_dpc); 1009 1010 /* Allocate buffer for inbound doorbells queue */ 1011 priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev, 1012 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, 1013 &priv->idb_dma, GFP_KERNEL); 1014 if (!priv->idb_base) 1015 return -ENOMEM; 1016 1017 dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n", 1018 priv->idb_base, (unsigned long long)priv->idb_dma); 1019 1020 iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE), 1021 priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE)); 1022 iowrite32(((u64)priv->idb_dma >> 32), 1023 priv->regs + TSI721_IDQ_BASEU(IDB_QUEUE)); 1024 iowrite32(((u64)priv->idb_dma & TSI721_IDQ_BASEL_ADDR), 1025 priv->regs + TSI721_IDQ_BASEL(IDB_QUEUE)); 1026 /* Enable accepting all inbound doorbells */ 1027 iowrite32(0, priv->regs + TSI721_IDQ_MASK(IDB_QUEUE)); 1028 1029 iowrite32(TSI721_IDQ_INIT, priv->regs + TSI721_IDQ_CTL(IDB_QUEUE)); 1030 1031 iowrite32(0, priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); 1032 1033 return 0; 1034 } 1035 1036 static void tsi721_doorbell_free(struct tsi721_device *priv) 1037 { 1038 if (priv->idb_base == NULL) 1039 return; 1040 1041 /* Free buffer allocated for inbound doorbell queue */ 1042 dma_free_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, 1043 priv->idb_base, priv->idb_dma); 1044 priv->idb_base = NULL; 1045 } 1046 1047 /** 1048 * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel. 1049 * @priv: pointer to tsi721 private data 1050 * 1051 * Initialize BDMA channel allocated for RapidIO maintenance read/write 1052 * request generation 1053 * Returns %0 on success or %-ENOMEM on failure. 1054 */ 1055 static int tsi721_bdma_maint_init(struct tsi721_device *priv) 1056 { 1057 struct tsi721_dma_desc *bd_ptr; 1058 u64 *sts_ptr; 1059 dma_addr_t bd_phys, sts_phys; 1060 int sts_size; 1061 int bd_num = 2; 1062 void __iomem *regs; 1063 1064 dev_dbg(&priv->pdev->dev, 1065 "Init Block DMA Engine for Maintenance requests, CH%d\n", 1066 TSI721_DMACH_MAINT); 1067 1068 /* 1069 * Initialize DMA channel for maintenance requests 1070 */ 1071 1072 priv->mdma.ch_id = TSI721_DMACH_MAINT; 1073 regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT); 1074 1075 /* Allocate space for DMA descriptors */ 1076 bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, 1077 bd_num * sizeof(struct tsi721_dma_desc), 1078 &bd_phys, GFP_KERNEL); 1079 if (!bd_ptr) 1080 return -ENOMEM; 1081 1082 priv->mdma.bd_num = bd_num; 1083 priv->mdma.bd_phys = bd_phys; 1084 priv->mdma.bd_base = bd_ptr; 1085 1086 dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", 1087 bd_ptr, (unsigned long long)bd_phys); 1088 1089 /* Allocate space for descriptor status FIFO */ 1090 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? 1091 bd_num : TSI721_DMA_MINSTSSZ; 1092 sts_size = roundup_pow_of_two(sts_size); 1093 sts_ptr = dma_zalloc_coherent(&priv->pdev->dev, 1094 sts_size * sizeof(struct tsi721_dma_sts), 1095 &sts_phys, GFP_KERNEL); 1096 if (!sts_ptr) { 1097 /* Free space allocated for DMA descriptors */ 1098 dma_free_coherent(&priv->pdev->dev, 1099 bd_num * sizeof(struct tsi721_dma_desc), 1100 bd_ptr, bd_phys); 1101 priv->mdma.bd_base = NULL; 1102 return -ENOMEM; 1103 } 1104 1105 priv->mdma.sts_phys = sts_phys; 1106 priv->mdma.sts_base = sts_ptr; 1107 priv->mdma.sts_size = sts_size; 1108 1109 dev_dbg(&priv->pdev->dev, 1110 "desc status FIFO @ %p (phys = %llx) size=0x%x\n", 1111 sts_ptr, (unsigned long long)sts_phys, sts_size); 1112 1113 /* Initialize DMA descriptors ring */ 1114 bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29); 1115 bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys & 1116 TSI721_DMAC_DPTRL_MASK); 1117 bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); 1118 1119 /* Setup DMA descriptor pointers */ 1120 iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH); 1121 iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), 1122 regs + TSI721_DMAC_DPTRL); 1123 1124 /* Setup descriptor status FIFO */ 1125 iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH); 1126 iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), 1127 regs + TSI721_DMAC_DSBL); 1128 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), 1129 regs + TSI721_DMAC_DSSZ); 1130 1131 /* Clear interrupt bits */ 1132 iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); 1133 1134 ioread32(regs + TSI721_DMAC_INT); 1135 1136 /* Toggle DMA channel initialization */ 1137 iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); 1138 ioread32(regs + TSI721_DMAC_CTL); 1139 udelay(10); 1140 1141 return 0; 1142 } 1143 1144 static int tsi721_bdma_maint_free(struct tsi721_device *priv) 1145 { 1146 u32 ch_stat; 1147 struct tsi721_bdma_maint *mdma = &priv->mdma; 1148 void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id); 1149 1150 if (mdma->bd_base == NULL) 1151 return 0; 1152 1153 /* Check if DMA channel still running */ 1154 ch_stat = ioread32(regs + TSI721_DMAC_STS); 1155 if (ch_stat & TSI721_DMAC_STS_RUN) 1156 return -EFAULT; 1157 1158 /* Put DMA channel into init state */ 1159 iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); 1160 1161 /* Free space allocated for DMA descriptors */ 1162 dma_free_coherent(&priv->pdev->dev, 1163 mdma->bd_num * sizeof(struct tsi721_dma_desc), 1164 mdma->bd_base, mdma->bd_phys); 1165 mdma->bd_base = NULL; 1166 1167 /* Free space allocated for status FIFO */ 1168 dma_free_coherent(&priv->pdev->dev, 1169 mdma->sts_size * sizeof(struct tsi721_dma_sts), 1170 mdma->sts_base, mdma->sts_phys); 1171 mdma->sts_base = NULL; 1172 return 0; 1173 } 1174 1175 /* Enable Inbound Messaging Interrupts */ 1176 static void 1177 tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch, 1178 u32 inte_mask) 1179 { 1180 u32 rval; 1181 1182 if (!inte_mask) 1183 return; 1184 1185 /* Clear pending Inbound Messaging interrupts */ 1186 iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); 1187 1188 /* Enable Inbound Messaging interrupts */ 1189 rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); 1190 iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch)); 1191 1192 if (priv->flags & TSI721_USING_MSIX) 1193 return; /* Finished if we are in MSI-X mode */ 1194 1195 /* 1196 * For MSI and INTA interrupt signalling we need to enable next levels 1197 */ 1198 1199 /* Enable Device Channel Interrupt */ 1200 rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1201 iowrite32(rval | TSI721_INT_IMSG_CHAN(ch), 1202 priv->regs + TSI721_DEV_CHAN_INTE); 1203 } 1204 1205 /* Disable Inbound Messaging Interrupts */ 1206 static void 1207 tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch, 1208 u32 inte_mask) 1209 { 1210 u32 rval; 1211 1212 if (!inte_mask) 1213 return; 1214 1215 /* Clear pending Inbound Messaging interrupts */ 1216 iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); 1217 1218 /* Disable Inbound Messaging interrupts */ 1219 rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); 1220 rval &= ~inte_mask; 1221 iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch)); 1222 1223 if (priv->flags & TSI721_USING_MSIX) 1224 return; /* Finished if we are in MSI-X mode */ 1225 1226 /* 1227 * For MSI and INTA interrupt signalling we need to disable next levels 1228 */ 1229 1230 /* Disable Device Channel Interrupt */ 1231 rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1232 rval &= ~TSI721_INT_IMSG_CHAN(ch); 1233 iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE); 1234 } 1235 1236 /* Enable Outbound Messaging interrupts */ 1237 static void 1238 tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch, 1239 u32 inte_mask) 1240 { 1241 u32 rval; 1242 1243 if (!inte_mask) 1244 return; 1245 1246 /* Clear pending Outbound Messaging interrupts */ 1247 iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); 1248 1249 /* Enable Outbound Messaging channel interrupts */ 1250 rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); 1251 iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch)); 1252 1253 if (priv->flags & TSI721_USING_MSIX) 1254 return; /* Finished if we are in MSI-X mode */ 1255 1256 /* 1257 * For MSI and INTA interrupt signalling we need to enable next levels 1258 */ 1259 1260 /* Enable Device Channel Interrupt */ 1261 rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1262 iowrite32(rval | TSI721_INT_OMSG_CHAN(ch), 1263 priv->regs + TSI721_DEV_CHAN_INTE); 1264 } 1265 1266 /* Disable Outbound Messaging interrupts */ 1267 static void 1268 tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch, 1269 u32 inte_mask) 1270 { 1271 u32 rval; 1272 1273 if (!inte_mask) 1274 return; 1275 1276 /* Clear pending Outbound Messaging interrupts */ 1277 iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); 1278 1279 /* Disable Outbound Messaging interrupts */ 1280 rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); 1281 rval &= ~inte_mask; 1282 iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch)); 1283 1284 if (priv->flags & TSI721_USING_MSIX) 1285 return; /* Finished if we are in MSI-X mode */ 1286 1287 /* 1288 * For MSI and INTA interrupt signalling we need to disable next levels 1289 */ 1290 1291 /* Disable Device Channel Interrupt */ 1292 rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1293 rval &= ~TSI721_INT_OMSG_CHAN(ch); 1294 iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE); 1295 } 1296 1297 /** 1298 * tsi721_add_outb_message - Add message to the Tsi721 outbound message queue 1299 * @mport: Master port with outbound message queue 1300 * @rdev: Target of outbound message 1301 * @mbox: Outbound mailbox 1302 * @buffer: Message to add to outbound queue 1303 * @len: Length of message 1304 */ 1305 static int 1306 tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, 1307 void *buffer, size_t len) 1308 { 1309 struct tsi721_device *priv = mport->priv; 1310 struct tsi721_omsg_desc *desc; 1311 u32 tx_slot; 1312 1313 if (!priv->omsg_init[mbox] || 1314 len > TSI721_MSG_MAX_SIZE || len < 8) 1315 return -EINVAL; 1316 1317 tx_slot = priv->omsg_ring[mbox].tx_slot; 1318 1319 /* Copy copy message into transfer buffer */ 1320 memcpy(priv->omsg_ring[mbox].omq_base[tx_slot], buffer, len); 1321 1322 if (len & 0x7) 1323 len += 8; 1324 1325 /* Build descriptor associated with buffer */ 1326 desc = priv->omsg_ring[mbox].omd_base; 1327 desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid); 1328 if (tx_slot % 4 == 0) 1329 desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF); 1330 1331 desc[tx_slot].msg_info = 1332 cpu_to_le32((mport->sys_size << 26) | (mbox << 22) | 1333 (0xe << 12) | (len & 0xff8)); 1334 desc[tx_slot].bufptr_lo = 1335 cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] & 1336 0xffffffff); 1337 desc[tx_slot].bufptr_hi = 1338 cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] >> 32); 1339 1340 priv->omsg_ring[mbox].wr_count++; 1341 1342 /* Go to next descriptor */ 1343 if (++priv->omsg_ring[mbox].tx_slot == priv->omsg_ring[mbox].size) { 1344 priv->omsg_ring[mbox].tx_slot = 0; 1345 /* Move through the ring link descriptor at the end */ 1346 priv->omsg_ring[mbox].wr_count++; 1347 } 1348 1349 mb(); 1350 1351 /* Set new write count value */ 1352 iowrite32(priv->omsg_ring[mbox].wr_count, 1353 priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); 1354 ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); 1355 1356 return 0; 1357 } 1358 1359 /** 1360 * tsi721_omsg_handler - Outbound Message Interrupt Handler 1361 * @priv: pointer to tsi721 private data 1362 * @ch: number of OB MSG channel to service 1363 * 1364 * Services channel interrupts from outbound messaging engine. 1365 */ 1366 static void tsi721_omsg_handler(struct tsi721_device *priv, int ch) 1367 { 1368 u32 omsg_int; 1369 1370 spin_lock(&priv->omsg_ring[ch].lock); 1371 1372 omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch)); 1373 1374 if (omsg_int & TSI721_OBDMAC_INT_ST_FULL) 1375 dev_info(&priv->pdev->dev, 1376 "OB MBOX%d: Status FIFO is full\n", ch); 1377 1378 if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) { 1379 u32 srd_ptr; 1380 u64 *sts_ptr, last_ptr = 0, prev_ptr = 0; 1381 int i, j; 1382 u32 tx_slot; 1383 1384 /* 1385 * Find last successfully processed descriptor 1386 */ 1387 1388 /* Check and clear descriptor status FIFO entries */ 1389 srd_ptr = priv->omsg_ring[ch].sts_rdptr; 1390 sts_ptr = priv->omsg_ring[ch].sts_base; 1391 j = srd_ptr * 8; 1392 while (sts_ptr[j]) { 1393 for (i = 0; i < 8 && sts_ptr[j]; i++, j++) { 1394 prev_ptr = last_ptr; 1395 last_ptr = le64_to_cpu(sts_ptr[j]); 1396 sts_ptr[j] = 0; 1397 } 1398 1399 ++srd_ptr; 1400 srd_ptr %= priv->omsg_ring[ch].sts_size; 1401 j = srd_ptr * 8; 1402 } 1403 1404 if (last_ptr == 0) 1405 goto no_sts_update; 1406 1407 priv->omsg_ring[ch].sts_rdptr = srd_ptr; 1408 iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch)); 1409 1410 if (!priv->mport->outb_msg[ch].mcback) 1411 goto no_sts_update; 1412 1413 /* Inform upper layer about transfer completion */ 1414 1415 tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/ 1416 sizeof(struct tsi721_omsg_desc); 1417 1418 /* 1419 * Check if this is a Link Descriptor (LD). 1420 * If yes, ignore LD and use descriptor processed 1421 * before LD. 1422 */ 1423 if (tx_slot == priv->omsg_ring[ch].size) { 1424 if (prev_ptr) 1425 tx_slot = (prev_ptr - 1426 (u64)priv->omsg_ring[ch].omd_phys)/ 1427 sizeof(struct tsi721_omsg_desc); 1428 else 1429 goto no_sts_update; 1430 } 1431 1432 /* Move slot index to the next message to be sent */ 1433 ++tx_slot; 1434 if (tx_slot == priv->omsg_ring[ch].size) 1435 tx_slot = 0; 1436 BUG_ON(tx_slot >= priv->omsg_ring[ch].size); 1437 priv->mport->outb_msg[ch].mcback(priv->mport, 1438 priv->omsg_ring[ch].dev_id, ch, 1439 tx_slot); 1440 } 1441 1442 no_sts_update: 1443 1444 if (omsg_int & TSI721_OBDMAC_INT_ERROR) { 1445 /* 1446 * Outbound message operation aborted due to error, 1447 * reinitialize OB MSG channel 1448 */ 1449 1450 dev_dbg(&priv->pdev->dev, "OB MSG ABORT ch_stat=%x\n", 1451 ioread32(priv->regs + TSI721_OBDMAC_STS(ch))); 1452 1453 iowrite32(TSI721_OBDMAC_INT_ERROR, 1454 priv->regs + TSI721_OBDMAC_INT(ch)); 1455 iowrite32(TSI721_OBDMAC_CTL_INIT, 1456 priv->regs + TSI721_OBDMAC_CTL(ch)); 1457 ioread32(priv->regs + TSI721_OBDMAC_CTL(ch)); 1458 1459 /* Inform upper level to clear all pending tx slots */ 1460 if (priv->mport->outb_msg[ch].mcback) 1461 priv->mport->outb_msg[ch].mcback(priv->mport, 1462 priv->omsg_ring[ch].dev_id, ch, 1463 priv->omsg_ring[ch].tx_slot); 1464 /* Synch tx_slot tracking */ 1465 iowrite32(priv->omsg_ring[ch].tx_slot, 1466 priv->regs + TSI721_OBDMAC_DRDCNT(ch)); 1467 ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch)); 1468 priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot; 1469 priv->omsg_ring[ch].sts_rdptr = 0; 1470 } 1471 1472 /* Clear channel interrupts */ 1473 iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch)); 1474 1475 if (!(priv->flags & TSI721_USING_MSIX)) { 1476 u32 ch_inte; 1477 1478 /* Re-enable channel interrupts */ 1479 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1480 ch_inte |= TSI721_INT_OMSG_CHAN(ch); 1481 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); 1482 } 1483 1484 spin_unlock(&priv->omsg_ring[ch].lock); 1485 } 1486 1487 /** 1488 * tsi721_open_outb_mbox - Initialize Tsi721 outbound mailbox 1489 * @mport: Master port implementing Outbound Messaging Engine 1490 * @dev_id: Device specific pointer to pass on event 1491 * @mbox: Mailbox to open 1492 * @entries: Number of entries in the outbound mailbox ring 1493 */ 1494 static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, 1495 int mbox, int entries) 1496 { 1497 struct tsi721_device *priv = mport->priv; 1498 struct tsi721_omsg_desc *bd_ptr; 1499 int i, rc = 0; 1500 1501 if ((entries < TSI721_OMSGD_MIN_RING_SIZE) || 1502 (entries > (TSI721_OMSGD_RING_SIZE)) || 1503 (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { 1504 rc = -EINVAL; 1505 goto out; 1506 } 1507 1508 priv->omsg_ring[mbox].dev_id = dev_id; 1509 priv->omsg_ring[mbox].size = entries; 1510 priv->omsg_ring[mbox].sts_rdptr = 0; 1511 spin_lock_init(&priv->omsg_ring[mbox].lock); 1512 1513 /* Outbound Msg Buffer allocation based on 1514 the number of maximum descriptor entries */ 1515 for (i = 0; i < entries; i++) { 1516 priv->omsg_ring[mbox].omq_base[i] = 1517 dma_alloc_coherent( 1518 &priv->pdev->dev, TSI721_MSG_BUFFER_SIZE, 1519 &priv->omsg_ring[mbox].omq_phys[i], 1520 GFP_KERNEL); 1521 if (priv->omsg_ring[mbox].omq_base[i] == NULL) { 1522 dev_dbg(&priv->pdev->dev, 1523 "Unable to allocate OB MSG data buffer for" 1524 " MBOX%d\n", mbox); 1525 rc = -ENOMEM; 1526 goto out_buf; 1527 } 1528 } 1529 1530 /* Outbound message descriptor allocation */ 1531 priv->omsg_ring[mbox].omd_base = dma_alloc_coherent( 1532 &priv->pdev->dev, 1533 (entries + 1) * sizeof(struct tsi721_omsg_desc), 1534 &priv->omsg_ring[mbox].omd_phys, GFP_KERNEL); 1535 if (priv->omsg_ring[mbox].omd_base == NULL) { 1536 dev_dbg(&priv->pdev->dev, 1537 "Unable to allocate OB MSG descriptor memory " 1538 "for MBOX%d\n", mbox); 1539 rc = -ENOMEM; 1540 goto out_buf; 1541 } 1542 1543 priv->omsg_ring[mbox].tx_slot = 0; 1544 1545 /* Outbound message descriptor status FIFO allocation */ 1546 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); 1547 priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev, 1548 priv->omsg_ring[mbox].sts_size * 1549 sizeof(struct tsi721_dma_sts), 1550 &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); 1551 if (priv->omsg_ring[mbox].sts_base == NULL) { 1552 dev_dbg(&priv->pdev->dev, 1553 "Unable to allocate OB MSG descriptor status FIFO " 1554 "for MBOX%d\n", mbox); 1555 rc = -ENOMEM; 1556 goto out_desc; 1557 } 1558 1559 /* 1560 * Configure Outbound Messaging Engine 1561 */ 1562 1563 /* Setup Outbound Message descriptor pointer */ 1564 iowrite32(((u64)priv->omsg_ring[mbox].omd_phys >> 32), 1565 priv->regs + TSI721_OBDMAC_DPTRH(mbox)); 1566 iowrite32(((u64)priv->omsg_ring[mbox].omd_phys & 1567 TSI721_OBDMAC_DPTRL_MASK), 1568 priv->regs + TSI721_OBDMAC_DPTRL(mbox)); 1569 1570 /* Setup Outbound Message descriptor status FIFO */ 1571 iowrite32(((u64)priv->omsg_ring[mbox].sts_phys >> 32), 1572 priv->regs + TSI721_OBDMAC_DSBH(mbox)); 1573 iowrite32(((u64)priv->omsg_ring[mbox].sts_phys & 1574 TSI721_OBDMAC_DSBL_MASK), 1575 priv->regs + TSI721_OBDMAC_DSBL(mbox)); 1576 iowrite32(TSI721_DMAC_DSSZ_SIZE(priv->omsg_ring[mbox].sts_size), 1577 priv->regs + (u32)TSI721_OBDMAC_DSSZ(mbox)); 1578 1579 /* Enable interrupts */ 1580 1581 #ifdef CONFIG_PCI_MSI 1582 if (priv->flags & TSI721_USING_MSIX) { 1583 /* Request interrupt service if we are in MSI-X mode */ 1584 rc = request_irq( 1585 priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, 1586 tsi721_omsg_msix, 0, 1587 priv->msix[TSI721_VECT_OMB0_DONE + mbox].irq_name, 1588 (void *)mport); 1589 1590 if (rc) { 1591 dev_dbg(&priv->pdev->dev, 1592 "Unable to allocate MSI-X interrupt for " 1593 "OBOX%d-DONE\n", mbox); 1594 goto out_stat; 1595 } 1596 1597 rc = request_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector, 1598 tsi721_omsg_msix, 0, 1599 priv->msix[TSI721_VECT_OMB0_INT + mbox].irq_name, 1600 (void *)mport); 1601 1602 if (rc) { 1603 dev_dbg(&priv->pdev->dev, 1604 "Unable to allocate MSI-X interrupt for " 1605 "MBOX%d-INT\n", mbox); 1606 free_irq( 1607 priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, 1608 (void *)mport); 1609 goto out_stat; 1610 } 1611 } 1612 #endif /* CONFIG_PCI_MSI */ 1613 1614 tsi721_omsg_interrupt_enable(priv, mbox, TSI721_OBDMAC_INT_ALL); 1615 1616 /* Initialize Outbound Message descriptors ring */ 1617 bd_ptr = priv->omsg_ring[mbox].omd_base; 1618 bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29); 1619 bd_ptr[entries].msg_info = 0; 1620 bd_ptr[entries].next_lo = 1621 cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys & 1622 TSI721_OBDMAC_DPTRL_MASK); 1623 bd_ptr[entries].next_hi = 1624 cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys >> 32); 1625 priv->omsg_ring[mbox].wr_count = 0; 1626 mb(); 1627 1628 /* Initialize Outbound Message engine */ 1629 iowrite32(TSI721_OBDMAC_CTL_INIT, priv->regs + TSI721_OBDMAC_CTL(mbox)); 1630 ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); 1631 udelay(10); 1632 1633 priv->omsg_init[mbox] = 1; 1634 1635 return 0; 1636 1637 #ifdef CONFIG_PCI_MSI 1638 out_stat: 1639 dma_free_coherent(&priv->pdev->dev, 1640 priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), 1641 priv->omsg_ring[mbox].sts_base, 1642 priv->omsg_ring[mbox].sts_phys); 1643 1644 priv->omsg_ring[mbox].sts_base = NULL; 1645 #endif /* CONFIG_PCI_MSI */ 1646 1647 out_desc: 1648 dma_free_coherent(&priv->pdev->dev, 1649 (entries + 1) * sizeof(struct tsi721_omsg_desc), 1650 priv->omsg_ring[mbox].omd_base, 1651 priv->omsg_ring[mbox].omd_phys); 1652 1653 priv->omsg_ring[mbox].omd_base = NULL; 1654 1655 out_buf: 1656 for (i = 0; i < priv->omsg_ring[mbox].size; i++) { 1657 if (priv->omsg_ring[mbox].omq_base[i]) { 1658 dma_free_coherent(&priv->pdev->dev, 1659 TSI721_MSG_BUFFER_SIZE, 1660 priv->omsg_ring[mbox].omq_base[i], 1661 priv->omsg_ring[mbox].omq_phys[i]); 1662 1663 priv->omsg_ring[mbox].omq_base[i] = NULL; 1664 } 1665 } 1666 1667 out: 1668 return rc; 1669 } 1670 1671 /** 1672 * tsi721_close_outb_mbox - Close Tsi721 outbound mailbox 1673 * @mport: Master port implementing the outbound message unit 1674 * @mbox: Mailbox to close 1675 */ 1676 static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox) 1677 { 1678 struct tsi721_device *priv = mport->priv; 1679 u32 i; 1680 1681 if (!priv->omsg_init[mbox]) 1682 return; 1683 priv->omsg_init[mbox] = 0; 1684 1685 /* Disable Interrupts */ 1686 1687 tsi721_omsg_interrupt_disable(priv, mbox, TSI721_OBDMAC_INT_ALL); 1688 1689 #ifdef CONFIG_PCI_MSI 1690 if (priv->flags & TSI721_USING_MSIX) { 1691 free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, 1692 (void *)mport); 1693 free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector, 1694 (void *)mport); 1695 } 1696 #endif /* CONFIG_PCI_MSI */ 1697 1698 /* Free OMSG Descriptor Status FIFO */ 1699 dma_free_coherent(&priv->pdev->dev, 1700 priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), 1701 priv->omsg_ring[mbox].sts_base, 1702 priv->omsg_ring[mbox].sts_phys); 1703 1704 priv->omsg_ring[mbox].sts_base = NULL; 1705 1706 /* Free OMSG descriptors */ 1707 dma_free_coherent(&priv->pdev->dev, 1708 (priv->omsg_ring[mbox].size + 1) * 1709 sizeof(struct tsi721_omsg_desc), 1710 priv->omsg_ring[mbox].omd_base, 1711 priv->omsg_ring[mbox].omd_phys); 1712 1713 priv->omsg_ring[mbox].omd_base = NULL; 1714 1715 /* Free message buffers */ 1716 for (i = 0; i < priv->omsg_ring[mbox].size; i++) { 1717 if (priv->omsg_ring[mbox].omq_base[i]) { 1718 dma_free_coherent(&priv->pdev->dev, 1719 TSI721_MSG_BUFFER_SIZE, 1720 priv->omsg_ring[mbox].omq_base[i], 1721 priv->omsg_ring[mbox].omq_phys[i]); 1722 1723 priv->omsg_ring[mbox].omq_base[i] = NULL; 1724 } 1725 } 1726 } 1727 1728 /** 1729 * tsi721_imsg_handler - Inbound Message Interrupt Handler 1730 * @priv: pointer to tsi721 private data 1731 * @ch: inbound message channel number to service 1732 * 1733 * Services channel interrupts from inbound messaging engine. 1734 */ 1735 static void tsi721_imsg_handler(struct tsi721_device *priv, int ch) 1736 { 1737 u32 mbox = ch - 4; 1738 u32 imsg_int; 1739 1740 spin_lock(&priv->imsg_ring[mbox].lock); 1741 1742 imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch)); 1743 1744 if (imsg_int & TSI721_IBDMAC_INT_SRTO) 1745 dev_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout\n", 1746 mbox); 1747 1748 if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR) 1749 dev_info(&priv->pdev->dev, "IB MBOX%d PCIe error\n", 1750 mbox); 1751 1752 if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW) 1753 dev_info(&priv->pdev->dev, 1754 "IB MBOX%d IB free queue low\n", mbox); 1755 1756 /* Clear IB channel interrupts */ 1757 iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch)); 1758 1759 /* If an IB Msg is received notify the upper layer */ 1760 if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV && 1761 priv->mport->inb_msg[mbox].mcback) 1762 priv->mport->inb_msg[mbox].mcback(priv->mport, 1763 priv->imsg_ring[mbox].dev_id, mbox, -1); 1764 1765 if (!(priv->flags & TSI721_USING_MSIX)) { 1766 u32 ch_inte; 1767 1768 /* Re-enable channel interrupts */ 1769 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1770 ch_inte |= TSI721_INT_IMSG_CHAN(ch); 1771 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); 1772 } 1773 1774 spin_unlock(&priv->imsg_ring[mbox].lock); 1775 } 1776 1777 /** 1778 * tsi721_open_inb_mbox - Initialize Tsi721 inbound mailbox 1779 * @mport: Master port implementing the Inbound Messaging Engine 1780 * @dev_id: Device specific pointer to pass on event 1781 * @mbox: Mailbox to open 1782 * @entries: Number of entries in the inbound mailbox ring 1783 */ 1784 static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id, 1785 int mbox, int entries) 1786 { 1787 struct tsi721_device *priv = mport->priv; 1788 int ch = mbox + 4; 1789 int i; 1790 u64 *free_ptr; 1791 int rc = 0; 1792 1793 if ((entries < TSI721_IMSGD_MIN_RING_SIZE) || 1794 (entries > TSI721_IMSGD_RING_SIZE) || 1795 (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { 1796 rc = -EINVAL; 1797 goto out; 1798 } 1799 1800 /* Initialize IB Messaging Ring */ 1801 priv->imsg_ring[mbox].dev_id = dev_id; 1802 priv->imsg_ring[mbox].size = entries; 1803 priv->imsg_ring[mbox].rx_slot = 0; 1804 priv->imsg_ring[mbox].desc_rdptr = 0; 1805 priv->imsg_ring[mbox].fq_wrptr = 0; 1806 for (i = 0; i < priv->imsg_ring[mbox].size; i++) 1807 priv->imsg_ring[mbox].imq_base[i] = NULL; 1808 spin_lock_init(&priv->imsg_ring[mbox].lock); 1809 1810 /* Allocate buffers for incoming messages */ 1811 priv->imsg_ring[mbox].buf_base = 1812 dma_alloc_coherent(&priv->pdev->dev, 1813 entries * TSI721_MSG_BUFFER_SIZE, 1814 &priv->imsg_ring[mbox].buf_phys, 1815 GFP_KERNEL); 1816 1817 if (priv->imsg_ring[mbox].buf_base == NULL) { 1818 dev_err(&priv->pdev->dev, 1819 "Failed to allocate buffers for IB MBOX%d\n", mbox); 1820 rc = -ENOMEM; 1821 goto out; 1822 } 1823 1824 /* Allocate memory for circular free list */ 1825 priv->imsg_ring[mbox].imfq_base = 1826 dma_alloc_coherent(&priv->pdev->dev, 1827 entries * 8, 1828 &priv->imsg_ring[mbox].imfq_phys, 1829 GFP_KERNEL); 1830 1831 if (priv->imsg_ring[mbox].imfq_base == NULL) { 1832 dev_err(&priv->pdev->dev, 1833 "Failed to allocate free queue for IB MBOX%d\n", mbox); 1834 rc = -ENOMEM; 1835 goto out_buf; 1836 } 1837 1838 /* Allocate memory for Inbound message descriptors */ 1839 priv->imsg_ring[mbox].imd_base = 1840 dma_alloc_coherent(&priv->pdev->dev, 1841 entries * sizeof(struct tsi721_imsg_desc), 1842 &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL); 1843 1844 if (priv->imsg_ring[mbox].imd_base == NULL) { 1845 dev_err(&priv->pdev->dev, 1846 "Failed to allocate descriptor memory for IB MBOX%d\n", 1847 mbox); 1848 rc = -ENOMEM; 1849 goto out_dma; 1850 } 1851 1852 /* Fill free buffer pointer list */ 1853 free_ptr = priv->imsg_ring[mbox].imfq_base; 1854 for (i = 0; i < entries; i++) 1855 free_ptr[i] = cpu_to_le64( 1856 (u64)(priv->imsg_ring[mbox].buf_phys) + 1857 i * 0x1000); 1858 1859 mb(); 1860 1861 /* 1862 * For mapping of inbound SRIO Messages into appropriate queues we need 1863 * to set Inbound Device ID register in the messaging engine. We do it 1864 * once when first inbound mailbox is requested. 1865 */ 1866 if (!(priv->flags & TSI721_IMSGID_SET)) { 1867 iowrite32((u32)priv->mport->host_deviceid, 1868 priv->regs + TSI721_IB_DEVID); 1869 priv->flags |= TSI721_IMSGID_SET; 1870 } 1871 1872 /* 1873 * Configure Inbound Messaging channel (ch = mbox + 4) 1874 */ 1875 1876 /* Setup Inbound Message free queue */ 1877 iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys >> 32), 1878 priv->regs + TSI721_IBDMAC_FQBH(ch)); 1879 iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys & 1880 TSI721_IBDMAC_FQBL_MASK), 1881 priv->regs+TSI721_IBDMAC_FQBL(ch)); 1882 iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), 1883 priv->regs + TSI721_IBDMAC_FQSZ(ch)); 1884 1885 /* Setup Inbound Message descriptor queue */ 1886 iowrite32(((u64)priv->imsg_ring[mbox].imd_phys >> 32), 1887 priv->regs + TSI721_IBDMAC_DQBH(ch)); 1888 iowrite32(((u32)priv->imsg_ring[mbox].imd_phys & 1889 (u32)TSI721_IBDMAC_DQBL_MASK), 1890 priv->regs+TSI721_IBDMAC_DQBL(ch)); 1891 iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), 1892 priv->regs + TSI721_IBDMAC_DQSZ(ch)); 1893 1894 /* Enable interrupts */ 1895 1896 #ifdef CONFIG_PCI_MSI 1897 if (priv->flags & TSI721_USING_MSIX) { 1898 /* Request interrupt service if we are in MSI-X mode */ 1899 rc = request_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, 1900 tsi721_imsg_msix, 0, 1901 priv->msix[TSI721_VECT_IMB0_RCV + mbox].irq_name, 1902 (void *)mport); 1903 1904 if (rc) { 1905 dev_dbg(&priv->pdev->dev, 1906 "Unable to allocate MSI-X interrupt for " 1907 "IBOX%d-DONE\n", mbox); 1908 goto out_desc; 1909 } 1910 1911 rc = request_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector, 1912 tsi721_imsg_msix, 0, 1913 priv->msix[TSI721_VECT_IMB0_INT + mbox].irq_name, 1914 (void *)mport); 1915 1916 if (rc) { 1917 dev_dbg(&priv->pdev->dev, 1918 "Unable to allocate MSI-X interrupt for " 1919 "IBOX%d-INT\n", mbox); 1920 free_irq( 1921 priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, 1922 (void *)mport); 1923 goto out_desc; 1924 } 1925 } 1926 #endif /* CONFIG_PCI_MSI */ 1927 1928 tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL); 1929 1930 /* Initialize Inbound Message Engine */ 1931 iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch)); 1932 ioread32(priv->regs + TSI721_IBDMAC_CTL(ch)); 1933 udelay(10); 1934 priv->imsg_ring[mbox].fq_wrptr = entries - 1; 1935 iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch)); 1936 1937 priv->imsg_init[mbox] = 1; 1938 return 0; 1939 1940 #ifdef CONFIG_PCI_MSI 1941 out_desc: 1942 dma_free_coherent(&priv->pdev->dev, 1943 priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc), 1944 priv->imsg_ring[mbox].imd_base, 1945 priv->imsg_ring[mbox].imd_phys); 1946 1947 priv->imsg_ring[mbox].imd_base = NULL; 1948 #endif /* CONFIG_PCI_MSI */ 1949 1950 out_dma: 1951 dma_free_coherent(&priv->pdev->dev, 1952 priv->imsg_ring[mbox].size * 8, 1953 priv->imsg_ring[mbox].imfq_base, 1954 priv->imsg_ring[mbox].imfq_phys); 1955 1956 priv->imsg_ring[mbox].imfq_base = NULL; 1957 1958 out_buf: 1959 dma_free_coherent(&priv->pdev->dev, 1960 priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE, 1961 priv->imsg_ring[mbox].buf_base, 1962 priv->imsg_ring[mbox].buf_phys); 1963 1964 priv->imsg_ring[mbox].buf_base = NULL; 1965 1966 out: 1967 return rc; 1968 } 1969 1970 /** 1971 * tsi721_close_inb_mbox - Shut down Tsi721 inbound mailbox 1972 * @mport: Master port implementing the Inbound Messaging Engine 1973 * @mbox: Mailbox to close 1974 */ 1975 static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox) 1976 { 1977 struct tsi721_device *priv = mport->priv; 1978 u32 rx_slot; 1979 int ch = mbox + 4; 1980 1981 if (!priv->imsg_init[mbox]) /* mbox isn't initialized yet */ 1982 return; 1983 priv->imsg_init[mbox] = 0; 1984 1985 /* Disable Inbound Messaging Engine */ 1986 1987 /* Disable Interrupts */ 1988 tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK); 1989 1990 #ifdef CONFIG_PCI_MSI 1991 if (priv->flags & TSI721_USING_MSIX) { 1992 free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, 1993 (void *)mport); 1994 free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector, 1995 (void *)mport); 1996 } 1997 #endif /* CONFIG_PCI_MSI */ 1998 1999 /* Clear Inbound Buffer Queue */ 2000 for (rx_slot = 0; rx_slot < priv->imsg_ring[mbox].size; rx_slot++) 2001 priv->imsg_ring[mbox].imq_base[rx_slot] = NULL; 2002 2003 /* Free memory allocated for message buffers */ 2004 dma_free_coherent(&priv->pdev->dev, 2005 priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE, 2006 priv->imsg_ring[mbox].buf_base, 2007 priv->imsg_ring[mbox].buf_phys); 2008 2009 priv->imsg_ring[mbox].buf_base = NULL; 2010 2011 /* Free memory allocated for free pointr list */ 2012 dma_free_coherent(&priv->pdev->dev, 2013 priv->imsg_ring[mbox].size * 8, 2014 priv->imsg_ring[mbox].imfq_base, 2015 priv->imsg_ring[mbox].imfq_phys); 2016 2017 priv->imsg_ring[mbox].imfq_base = NULL; 2018 2019 /* Free memory allocated for RX descriptors */ 2020 dma_free_coherent(&priv->pdev->dev, 2021 priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc), 2022 priv->imsg_ring[mbox].imd_base, 2023 priv->imsg_ring[mbox].imd_phys); 2024 2025 priv->imsg_ring[mbox].imd_base = NULL; 2026 } 2027 2028 /** 2029 * tsi721_add_inb_buffer - Add buffer to the Tsi721 inbound message queue 2030 * @mport: Master port implementing the Inbound Messaging Engine 2031 * @mbox: Inbound mailbox number 2032 * @buf: Buffer to add to inbound queue 2033 */ 2034 static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) 2035 { 2036 struct tsi721_device *priv = mport->priv; 2037 u32 rx_slot; 2038 int rc = 0; 2039 2040 rx_slot = priv->imsg_ring[mbox].rx_slot; 2041 if (priv->imsg_ring[mbox].imq_base[rx_slot]) { 2042 dev_err(&priv->pdev->dev, 2043 "Error adding inbound buffer %d, buffer exists\n", 2044 rx_slot); 2045 rc = -EINVAL; 2046 goto out; 2047 } 2048 2049 priv->imsg_ring[mbox].imq_base[rx_slot] = buf; 2050 2051 if (++priv->imsg_ring[mbox].rx_slot == priv->imsg_ring[mbox].size) 2052 priv->imsg_ring[mbox].rx_slot = 0; 2053 2054 out: 2055 return rc; 2056 } 2057 2058 /** 2059 * tsi721_get_inb_message - Fetch inbound message from the Tsi721 MSG Queue 2060 * @mport: Master port implementing the Inbound Messaging Engine 2061 * @mbox: Inbound mailbox number 2062 * 2063 * Returns pointer to the message on success or NULL on failure. 2064 */ 2065 static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox) 2066 { 2067 struct tsi721_device *priv = mport->priv; 2068 struct tsi721_imsg_desc *desc; 2069 u32 rx_slot; 2070 void *rx_virt = NULL; 2071 u64 rx_phys; 2072 void *buf = NULL; 2073 u64 *free_ptr; 2074 int ch = mbox + 4; 2075 int msg_size; 2076 2077 if (!priv->imsg_init[mbox]) 2078 return NULL; 2079 2080 desc = priv->imsg_ring[mbox].imd_base; 2081 desc += priv->imsg_ring[mbox].desc_rdptr; 2082 2083 if (!(le32_to_cpu(desc->msg_info) & TSI721_IMD_HO)) 2084 goto out; 2085 2086 rx_slot = priv->imsg_ring[mbox].rx_slot; 2087 while (priv->imsg_ring[mbox].imq_base[rx_slot] == NULL) { 2088 if (++rx_slot == priv->imsg_ring[mbox].size) 2089 rx_slot = 0; 2090 } 2091 2092 rx_phys = ((u64)le32_to_cpu(desc->bufptr_hi) << 32) | 2093 le32_to_cpu(desc->bufptr_lo); 2094 2095 rx_virt = priv->imsg_ring[mbox].buf_base + 2096 (rx_phys - (u64)priv->imsg_ring[mbox].buf_phys); 2097 2098 buf = priv->imsg_ring[mbox].imq_base[rx_slot]; 2099 msg_size = le32_to_cpu(desc->msg_info) & TSI721_IMD_BCOUNT; 2100 if (msg_size == 0) 2101 msg_size = RIO_MAX_MSG_SIZE; 2102 2103 memcpy(buf, rx_virt, msg_size); 2104 priv->imsg_ring[mbox].imq_base[rx_slot] = NULL; 2105 2106 desc->msg_info &= cpu_to_le32(~TSI721_IMD_HO); 2107 if (++priv->imsg_ring[mbox].desc_rdptr == priv->imsg_ring[mbox].size) 2108 priv->imsg_ring[mbox].desc_rdptr = 0; 2109 2110 iowrite32(priv->imsg_ring[mbox].desc_rdptr, 2111 priv->regs + TSI721_IBDMAC_DQRP(ch)); 2112 2113 /* Return free buffer into the pointer list */ 2114 free_ptr = priv->imsg_ring[mbox].imfq_base; 2115 free_ptr[priv->imsg_ring[mbox].fq_wrptr] = cpu_to_le64(rx_phys); 2116 2117 if (++priv->imsg_ring[mbox].fq_wrptr == priv->imsg_ring[mbox].size) 2118 priv->imsg_ring[mbox].fq_wrptr = 0; 2119 2120 iowrite32(priv->imsg_ring[mbox].fq_wrptr, 2121 priv->regs + TSI721_IBDMAC_FQWP(ch)); 2122 out: 2123 return buf; 2124 } 2125 2126 /** 2127 * tsi721_messages_init - Initialization of Messaging Engine 2128 * @priv: pointer to tsi721 private data 2129 * 2130 * Configures Tsi721 messaging engine. 2131 */ 2132 static int tsi721_messages_init(struct tsi721_device *priv) 2133 { 2134 int ch; 2135 2136 iowrite32(0, priv->regs + TSI721_SMSG_ECC_LOG); 2137 iowrite32(0, priv->regs + TSI721_RETRY_GEN_CNT); 2138 iowrite32(0, priv->regs + TSI721_RETRY_RX_CNT); 2139 2140 /* Set SRIO Message Request/Response Timeout */ 2141 iowrite32(TSI721_RQRPTO_VAL, priv->regs + TSI721_RQRPTO); 2142 2143 /* Initialize Inbound Messaging Engine Registers */ 2144 for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) { 2145 /* Clear interrupt bits */ 2146 iowrite32(TSI721_IBDMAC_INT_MASK, 2147 priv->regs + TSI721_IBDMAC_INT(ch)); 2148 /* Clear Status */ 2149 iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch)); 2150 2151 iowrite32(TSI721_SMSG_ECC_COR_LOG_MASK, 2152 priv->regs + TSI721_SMSG_ECC_COR_LOG(ch)); 2153 iowrite32(TSI721_SMSG_ECC_NCOR_MASK, 2154 priv->regs + TSI721_SMSG_ECC_NCOR(ch)); 2155 } 2156 2157 return 0; 2158 } 2159 2160 /** 2161 * tsi721_disable_ints - disables all device interrupts 2162 * @priv: pointer to tsi721 private data 2163 */ 2164 static void tsi721_disable_ints(struct tsi721_device *priv) 2165 { 2166 int ch; 2167 2168 /* Disable all device level interrupts */ 2169 iowrite32(0, priv->regs + TSI721_DEV_INTE); 2170 2171 /* Disable all Device Channel interrupts */ 2172 iowrite32(0, priv->regs + TSI721_DEV_CHAN_INTE); 2173 2174 /* Disable all Inbound Msg Channel interrupts */ 2175 for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) 2176 iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch)); 2177 2178 /* Disable all Outbound Msg Channel interrupts */ 2179 for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++) 2180 iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch)); 2181 2182 /* Disable all general messaging interrupts */ 2183 iowrite32(0, priv->regs + TSI721_SMSG_INTE); 2184 2185 /* Disable all BDMA Channel interrupts */ 2186 for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) 2187 iowrite32(0, 2188 priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE); 2189 2190 /* Disable all general BDMA interrupts */ 2191 iowrite32(0, priv->regs + TSI721_BDMA_INTE); 2192 2193 /* Disable all SRIO Channel interrupts */ 2194 for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++) 2195 iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch)); 2196 2197 /* Disable all general SR2PC interrupts */ 2198 iowrite32(0, priv->regs + TSI721_SR2PC_GEN_INTE); 2199 2200 /* Disable all PC2SR interrupts */ 2201 iowrite32(0, priv->regs + TSI721_PC2SR_INTE); 2202 2203 /* Disable all I2C interrupts */ 2204 iowrite32(0, priv->regs + TSI721_I2C_INT_ENABLE); 2205 2206 /* Disable SRIO MAC interrupts */ 2207 iowrite32(0, priv->regs + TSI721_RIO_EM_INT_ENABLE); 2208 iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN); 2209 } 2210 2211 /** 2212 * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port 2213 * @priv: pointer to tsi721 private data 2214 * 2215 * Configures Tsi721 as RapidIO master port. 2216 */ 2217 static int tsi721_setup_mport(struct tsi721_device *priv) 2218 { 2219 struct pci_dev *pdev = priv->pdev; 2220 int err = 0; 2221 struct rio_ops *ops; 2222 2223 struct rio_mport *mport; 2224 2225 ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); 2226 if (!ops) { 2227 dev_dbg(&pdev->dev, "Unable to allocate memory for rio_ops\n"); 2228 return -ENOMEM; 2229 } 2230 2231 ops->lcread = tsi721_lcread; 2232 ops->lcwrite = tsi721_lcwrite; 2233 ops->cread = tsi721_cread_dma; 2234 ops->cwrite = tsi721_cwrite_dma; 2235 ops->dsend = tsi721_dsend; 2236 ops->open_inb_mbox = tsi721_open_inb_mbox; 2237 ops->close_inb_mbox = tsi721_close_inb_mbox; 2238 ops->open_outb_mbox = tsi721_open_outb_mbox; 2239 ops->close_outb_mbox = tsi721_close_outb_mbox; 2240 ops->add_outb_message = tsi721_add_outb_message; 2241 ops->add_inb_buffer = tsi721_add_inb_buffer; 2242 ops->get_inb_message = tsi721_get_inb_message; 2243 ops->map_inb = tsi721_rio_map_inb_mem; 2244 ops->unmap_inb = tsi721_rio_unmap_inb_mem; 2245 2246 mport = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); 2247 if (!mport) { 2248 kfree(ops); 2249 dev_dbg(&pdev->dev, "Unable to allocate memory for mport\n"); 2250 return -ENOMEM; 2251 } 2252 2253 mport->ops = ops; 2254 mport->index = 0; 2255 mport->sys_size = 0; /* small system */ 2256 mport->phy_type = RIO_PHY_SERIAL; 2257 mport->priv = (void *)priv; 2258 mport->phys_efptr = 0x100; 2259 priv->mport = mport; 2260 2261 INIT_LIST_HEAD(&mport->dbells); 2262 2263 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); 2264 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3); 2265 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3); 2266 snprintf(mport->name, RIO_MAX_MPORT_NAME, "%s(%s)", 2267 dev_driver_string(&pdev->dev), dev_name(&pdev->dev)); 2268 2269 /* Hook up interrupt handler */ 2270 2271 #ifdef CONFIG_PCI_MSI 2272 if (!tsi721_enable_msix(priv)) 2273 priv->flags |= TSI721_USING_MSIX; 2274 else if (!pci_enable_msi(pdev)) 2275 priv->flags |= TSI721_USING_MSI; 2276 else 2277 dev_info(&pdev->dev, 2278 "MSI/MSI-X is not available. Using legacy INTx.\n"); 2279 #endif /* CONFIG_PCI_MSI */ 2280 2281 err = tsi721_request_irq(mport); 2282 2283 if (!err) { 2284 tsi721_interrupts_init(priv); 2285 ops->pwenable = tsi721_pw_enable; 2286 } else { 2287 dev_err(&pdev->dev, "Unable to get assigned PCI IRQ " 2288 "vector %02X err=0x%x\n", pdev->irq, err); 2289 goto err_exit; 2290 } 2291 2292 #ifdef CONFIG_RAPIDIO_DMA_ENGINE 2293 tsi721_register_dma(priv); 2294 #endif 2295 /* Enable SRIO link */ 2296 iowrite32(ioread32(priv->regs + TSI721_DEVCTL) | 2297 TSI721_DEVCTL_SRBOOT_CMPL, 2298 priv->regs + TSI721_DEVCTL); 2299 2300 rio_register_mport(mport); 2301 2302 if (mport->host_deviceid >= 0) 2303 iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | 2304 RIO_PORT_GEN_DISCOVERED, 2305 priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); 2306 else 2307 iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); 2308 2309 return 0; 2310 2311 err_exit: 2312 kfree(mport); 2313 kfree(ops); 2314 return err; 2315 } 2316 2317 static int tsi721_probe(struct pci_dev *pdev, 2318 const struct pci_device_id *id) 2319 { 2320 struct tsi721_device *priv; 2321 int err; 2322 2323 priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL); 2324 if (priv == NULL) { 2325 dev_err(&pdev->dev, "Failed to allocate memory for device\n"); 2326 err = -ENOMEM; 2327 goto err_exit; 2328 } 2329 2330 err = pci_enable_device(pdev); 2331 if (err) { 2332 dev_err(&pdev->dev, "Failed to enable PCI device\n"); 2333 goto err_clean; 2334 } 2335 2336 priv->pdev = pdev; 2337 2338 #ifdef DEBUG 2339 { 2340 int i; 2341 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { 2342 dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n", 2343 i, (unsigned long long)pci_resource_start(pdev, i), 2344 (unsigned long)pci_resource_len(pdev, i), 2345 pci_resource_flags(pdev, i)); 2346 } 2347 } 2348 #endif 2349 /* 2350 * Verify BAR configuration 2351 */ 2352 2353 /* BAR_0 (registers) must be 512KB+ in 32-bit address space */ 2354 if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) || 2355 pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 || 2356 pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) { 2357 dev_err(&pdev->dev, 2358 "Missing or misconfigured CSR BAR0, aborting.\n"); 2359 err = -ENODEV; 2360 goto err_disable_pdev; 2361 } 2362 2363 /* BAR_1 (outbound doorbells) must be 16MB+ in 32-bit address space */ 2364 if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) || 2365 pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 || 2366 pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) { 2367 dev_err(&pdev->dev, 2368 "Missing or misconfigured Doorbell BAR1, aborting.\n"); 2369 err = -ENODEV; 2370 goto err_disable_pdev; 2371 } 2372 2373 /* 2374 * BAR_2 and BAR_4 (outbound translation) must be in 64-bit PCIe address 2375 * space. 2376 * NOTE: BAR_2 and BAR_4 are not used by this version of driver. 2377 * It may be a good idea to keep them disabled using HW configuration 2378 * to save PCI memory space. 2379 */ 2380 if ((pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM) && 2381 (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64)) { 2382 dev_info(&pdev->dev, "Outbound BAR2 is not used but enabled.\n"); 2383 } 2384 2385 if ((pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM) && 2386 (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64)) { 2387 dev_info(&pdev->dev, "Outbound BAR4 is not used but enabled.\n"); 2388 } 2389 2390 err = pci_request_regions(pdev, DRV_NAME); 2391 if (err) { 2392 dev_err(&pdev->dev, "Cannot obtain PCI resources, " 2393 "aborting.\n"); 2394 goto err_disable_pdev; 2395 } 2396 2397 pci_set_master(pdev); 2398 2399 priv->regs = pci_ioremap_bar(pdev, BAR_0); 2400 if (!priv->regs) { 2401 dev_err(&pdev->dev, 2402 "Unable to map device registers space, aborting\n"); 2403 err = -ENOMEM; 2404 goto err_free_res; 2405 } 2406 2407 priv->odb_base = pci_ioremap_bar(pdev, BAR_1); 2408 if (!priv->odb_base) { 2409 dev_err(&pdev->dev, 2410 "Unable to map outbound doorbells space, aborting\n"); 2411 err = -ENOMEM; 2412 goto err_unmap_bars; 2413 } 2414 2415 /* Configure DMA attributes. */ 2416 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 2417 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2418 if (err) { 2419 dev_info(&pdev->dev, "Unable to set DMA mask\n"); 2420 goto err_unmap_bars; 2421 } 2422 2423 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) 2424 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); 2425 } else { 2426 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2427 if (err) 2428 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); 2429 } 2430 2431 BUG_ON(!pci_is_pcie(pdev)); 2432 2433 /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */ 2434 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, 2435 PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | 2436 PCI_EXP_DEVCTL_NOSNOOP_EN, 2437 0x2 << MAX_READ_REQUEST_SZ_SHIFT); 2438 2439 /* Adjust PCIe completion timeout. */ 2440 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2); 2441 2442 /* 2443 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block 2444 */ 2445 pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0x01); 2446 pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXTBL, 2447 TSI721_MSIXTBL_OFFSET); 2448 pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXPBA, 2449 TSI721_MSIXPBA_OFFSET); 2450 pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0); 2451 /* End of FIXUP */ 2452 2453 tsi721_disable_ints(priv); 2454 2455 tsi721_init_pc2sr_mapping(priv); 2456 tsi721_init_sr2pc_mapping(priv); 2457 2458 if (tsi721_bdma_maint_init(priv)) { 2459 dev_err(&pdev->dev, "BDMA initialization failed, aborting\n"); 2460 err = -ENOMEM; 2461 goto err_unmap_bars; 2462 } 2463 2464 err = tsi721_doorbell_init(priv); 2465 if (err) 2466 goto err_free_bdma; 2467 2468 tsi721_port_write_init(priv); 2469 2470 err = tsi721_messages_init(priv); 2471 if (err) 2472 goto err_free_consistent; 2473 2474 err = tsi721_setup_mport(priv); 2475 if (err) 2476 goto err_free_consistent; 2477 2478 return 0; 2479 2480 err_free_consistent: 2481 tsi721_doorbell_free(priv); 2482 err_free_bdma: 2483 tsi721_bdma_maint_free(priv); 2484 err_unmap_bars: 2485 if (priv->regs) 2486 iounmap(priv->regs); 2487 if (priv->odb_base) 2488 iounmap(priv->odb_base); 2489 err_free_res: 2490 pci_release_regions(pdev); 2491 pci_clear_master(pdev); 2492 err_disable_pdev: 2493 pci_disable_device(pdev); 2494 err_clean: 2495 kfree(priv); 2496 err_exit: 2497 return err; 2498 } 2499 2500 static DEFINE_PCI_DEVICE_TABLE(tsi721_pci_tbl) = { 2501 { PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) }, 2502 { 0, } /* terminate list */ 2503 }; 2504 2505 MODULE_DEVICE_TABLE(pci, tsi721_pci_tbl); 2506 2507 static struct pci_driver tsi721_driver = { 2508 .name = "tsi721", 2509 .id_table = tsi721_pci_tbl, 2510 .probe = tsi721_probe, 2511 }; 2512 2513 static int __init tsi721_init(void) 2514 { 2515 return pci_register_driver(&tsi721_driver); 2516 } 2517 2518 device_initcall(tsi721_init); 2519 2520 MODULE_DESCRIPTION("IDT Tsi721 PCIExpress-to-SRIO bridge driver"); 2521 MODULE_AUTHOR("Integrated Device Technology, Inc."); 2522 MODULE_LICENSE("GPL"); 2523