1 /* 2 * RapidIO mport driver for Tsi721 PCIExpress-to-SRIO bridge 3 * 4 * Copyright 2011 Integrated Device Technology, Inc. 5 * Alexandre Bounine <alexandre.bounine@idt.com> 6 * Chul Kim <chul.kim@idt.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program; if not, write to the Free Software Foundation, Inc., 59 20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/io.h> 24 #include <linux/errno.h> 25 #include <linux/init.h> 26 #include <linux/ioport.h> 27 #include <linux/kernel.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <linux/rio.h> 31 #include <linux/rio_drv.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/interrupt.h> 34 #include <linux/kfifo.h> 35 #include <linux/delay.h> 36 37 #include "tsi721.h" 38 39 #define DEBUG_PW /* Inbound Port-Write debugging */ 40 41 static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); 42 static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); 43 44 /** 45 * tsi721_lcread - read from local SREP config space 46 * @mport: RapidIO master port info 47 * @index: ID of RapdiIO interface 48 * @offset: Offset into configuration space 49 * @len: Length (in bytes) of the maintenance transaction 50 * @data: Value to be read into 51 * 52 * Generates a local SREP space read. Returns %0 on 53 * success or %-EINVAL on failure. 54 */ 55 static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset, 56 int len, u32 *data) 57 { 58 struct tsi721_device *priv = mport->priv; 59 60 if (len != sizeof(u32)) 61 return -EINVAL; /* only 32-bit access is supported */ 62 63 *data = ioread32(priv->regs + offset); 64 65 return 0; 66 } 67 68 /** 69 * tsi721_lcwrite - write into local SREP config space 70 * @mport: RapidIO master port info 71 * @index: ID of RapdiIO interface 72 * @offset: Offset into configuration space 73 * @len: Length (in bytes) of the maintenance transaction 74 * @data: Value to be written 75 * 76 * Generates a local write into SREP configuration space. Returns %0 on 77 * success or %-EINVAL on failure. 78 */ 79 static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset, 80 int len, u32 data) 81 { 82 struct tsi721_device *priv = mport->priv; 83 84 if (len != sizeof(u32)) 85 return -EINVAL; /* only 32-bit access is supported */ 86 87 iowrite32(data, priv->regs + offset); 88 89 return 0; 90 } 91 92 /** 93 * tsi721_maint_dma - Helper function to generate RapidIO maintenance 94 * transactions using designated Tsi721 DMA channel. 95 * @priv: pointer to tsi721 private data 96 * @sys_size: RapdiIO transport system size 97 * @destid: Destination ID of transaction 98 * @hopcount: Number of hops to target device 99 * @offset: Offset into configuration space 100 * @len: Length (in bytes) of the maintenance transaction 101 * @data: Location to be read from or write into 102 * @do_wr: Operation flag (1 == MAINT_WR) 103 * 104 * Generates a RapidIO maintenance transaction (Read or Write). 105 * Returns %0 on success and %-EINVAL or %-EFAULT on failure. 106 */ 107 static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, 108 u16 destid, u8 hopcount, u32 offset, int len, 109 u32 *data, int do_wr) 110 { 111 struct tsi721_dma_desc *bd_ptr; 112 u32 rd_count, swr_ptr, ch_stat; 113 int i, err = 0; 114 u32 op = do_wr ? MAINT_WR : MAINT_RD; 115 116 if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) 117 return -EINVAL; 118 119 bd_ptr = priv->bdma[TSI721_DMACH_MAINT].bd_base; 120 121 rd_count = ioread32( 122 priv->regs + TSI721_DMAC_DRDCNT(TSI721_DMACH_MAINT)); 123 124 /* Initialize DMA descriptor */ 125 bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid); 126 bd_ptr[0].bcount = cpu_to_le32((sys_size << 26) | 0x04); 127 bd_ptr[0].raddr_lo = cpu_to_le32((hopcount << 24) | offset); 128 bd_ptr[0].raddr_hi = 0; 129 if (do_wr) 130 bd_ptr[0].data[0] = cpu_to_be32p(data); 131 else 132 bd_ptr[0].data[0] = 0xffffffff; 133 134 mb(); 135 136 /* Start DMA operation */ 137 iowrite32(rd_count + 2, 138 priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT)); 139 ioread32(priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT)); 140 i = 0; 141 142 /* Wait until DMA transfer is finished */ 143 while ((ch_stat = ioread32(priv->regs + 144 TSI721_DMAC_STS(TSI721_DMACH_MAINT))) & TSI721_DMAC_STS_RUN) { 145 udelay(1); 146 if (++i >= 5000000) { 147 dev_dbg(&priv->pdev->dev, 148 "%s : DMA[%d] read timeout ch_status=%x\n", 149 __func__, TSI721_DMACH_MAINT, ch_stat); 150 if (!do_wr) 151 *data = 0xffffffff; 152 err = -EIO; 153 goto err_out; 154 } 155 } 156 157 if (ch_stat & TSI721_DMAC_STS_ABORT) { 158 /* If DMA operation aborted due to error, 159 * reinitialize DMA channel 160 */ 161 dev_dbg(&priv->pdev->dev, "%s : DMA ABORT ch_stat=%x\n", 162 __func__, ch_stat); 163 dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n", 164 do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset); 165 iowrite32(TSI721_DMAC_INT_ALL, 166 priv->regs + TSI721_DMAC_INT(TSI721_DMACH_MAINT)); 167 iowrite32(TSI721_DMAC_CTL_INIT, 168 priv->regs + TSI721_DMAC_CTL(TSI721_DMACH_MAINT)); 169 udelay(10); 170 iowrite32(0, priv->regs + 171 TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT)); 172 udelay(1); 173 if (!do_wr) 174 *data = 0xffffffff; 175 err = -EIO; 176 goto err_out; 177 } 178 179 if (!do_wr) 180 *data = be32_to_cpu(bd_ptr[0].data[0]); 181 182 /* 183 * Update descriptor status FIFO RD pointer. 184 * NOTE: Skipping check and clear FIFO entries because we are waiting 185 * for transfer to be completed. 186 */ 187 swr_ptr = ioread32(priv->regs + TSI721_DMAC_DSWP(TSI721_DMACH_MAINT)); 188 iowrite32(swr_ptr, priv->regs + TSI721_DMAC_DSRP(TSI721_DMACH_MAINT)); 189 err_out: 190 191 return err; 192 } 193 194 /** 195 * tsi721_cread_dma - Generate a RapidIO maintenance read transaction 196 * using Tsi721 BDMA engine. 197 * @mport: RapidIO master port control structure 198 * @index: ID of RapdiIO interface 199 * @destid: Destination ID of transaction 200 * @hopcount: Number of hops to target device 201 * @offset: Offset into configuration space 202 * @len: Length (in bytes) of the maintenance transaction 203 * @val: Location to be read into 204 * 205 * Generates a RapidIO maintenance read transaction. 206 * Returns %0 on success and %-EINVAL or %-EFAULT on failure. 207 */ 208 static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid, 209 u8 hopcount, u32 offset, int len, u32 *data) 210 { 211 struct tsi721_device *priv = mport->priv; 212 213 return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount, 214 offset, len, data, 0); 215 } 216 217 /** 218 * tsi721_cwrite_dma - Generate a RapidIO maintenance write transaction 219 * using Tsi721 BDMA engine 220 * @mport: RapidIO master port control structure 221 * @index: ID of RapdiIO interface 222 * @destid: Destination ID of transaction 223 * @hopcount: Number of hops to target device 224 * @offset: Offset into configuration space 225 * @len: Length (in bytes) of the maintenance transaction 226 * @val: Value to be written 227 * 228 * Generates a RapidIO maintenance write transaction. 229 * Returns %0 on success and %-EINVAL or %-EFAULT on failure. 230 */ 231 static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid, 232 u8 hopcount, u32 offset, int len, u32 data) 233 { 234 struct tsi721_device *priv = mport->priv; 235 u32 temp = data; 236 237 return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount, 238 offset, len, &temp, 1); 239 } 240 241 /** 242 * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler 243 * @mport: RapidIO master port structure 244 * 245 * Handles inbound port-write interrupts. Copies PW message from an internal 246 * buffer into PW message FIFO and schedules deferred routine to process 247 * queued messages. 248 */ 249 static int 250 tsi721_pw_handler(struct rio_mport *mport) 251 { 252 struct tsi721_device *priv = mport->priv; 253 u32 pw_stat; 254 u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)]; 255 256 257 pw_stat = ioread32(priv->regs + TSI721_RIO_PW_RX_STAT); 258 259 if (pw_stat & TSI721_RIO_PW_RX_STAT_PW_VAL) { 260 pw_buf[0] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(0)); 261 pw_buf[1] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(1)); 262 pw_buf[2] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(2)); 263 pw_buf[3] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(3)); 264 265 /* Queue PW message (if there is room in FIFO), 266 * otherwise discard it. 267 */ 268 spin_lock(&priv->pw_fifo_lock); 269 if (kfifo_avail(&priv->pw_fifo) >= TSI721_RIO_PW_MSG_SIZE) 270 kfifo_in(&priv->pw_fifo, pw_buf, 271 TSI721_RIO_PW_MSG_SIZE); 272 else 273 priv->pw_discard_count++; 274 spin_unlock(&priv->pw_fifo_lock); 275 } 276 277 /* Clear pending PW interrupts */ 278 iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL, 279 priv->regs + TSI721_RIO_PW_RX_STAT); 280 281 schedule_work(&priv->pw_work); 282 283 return 0; 284 } 285 286 static void tsi721_pw_dpc(struct work_struct *work) 287 { 288 struct tsi721_device *priv = container_of(work, struct tsi721_device, 289 pw_work); 290 u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; /* Use full size PW message 291 buffer for RIO layer */ 292 293 /* 294 * Process port-write messages 295 */ 296 while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)msg_buffer, 297 TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) { 298 /* Process one message */ 299 #ifdef DEBUG_PW 300 { 301 u32 i; 302 pr_debug("%s : Port-Write Message:", __func__); 303 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); ) { 304 pr_debug("0x%02x: %08x %08x %08x %08x", i*4, 305 msg_buffer[i], msg_buffer[i + 1], 306 msg_buffer[i + 2], msg_buffer[i + 3]); 307 i += 4; 308 } 309 pr_debug("\n"); 310 } 311 #endif 312 /* Pass the port-write message to RIO core for processing */ 313 rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); 314 } 315 } 316 317 /** 318 * tsi721_pw_enable - enable/disable port-write interface init 319 * @mport: Master port implementing the port write unit 320 * @enable: 1=enable; 0=disable port-write message handling 321 */ 322 static int tsi721_pw_enable(struct rio_mport *mport, int enable) 323 { 324 struct tsi721_device *priv = mport->priv; 325 u32 rval; 326 327 rval = ioread32(priv->regs + TSI721_RIO_EM_INT_ENABLE); 328 329 if (enable) 330 rval |= TSI721_RIO_EM_INT_ENABLE_PW_RX; 331 else 332 rval &= ~TSI721_RIO_EM_INT_ENABLE_PW_RX; 333 334 /* Clear pending PW interrupts */ 335 iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL, 336 priv->regs + TSI721_RIO_PW_RX_STAT); 337 /* Update enable bits */ 338 iowrite32(rval, priv->regs + TSI721_RIO_EM_INT_ENABLE); 339 340 return 0; 341 } 342 343 /** 344 * tsi721_dsend - Send a RapidIO doorbell 345 * @mport: RapidIO master port info 346 * @index: ID of RapidIO interface 347 * @destid: Destination ID of target device 348 * @data: 16-bit info field of RapidIO doorbell 349 * 350 * Sends a RapidIO doorbell message. Always returns %0. 351 */ 352 static int tsi721_dsend(struct rio_mport *mport, int index, 353 u16 destid, u16 data) 354 { 355 struct tsi721_device *priv = mport->priv; 356 u32 offset; 357 358 offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) | 359 (destid << 2); 360 361 dev_dbg(&priv->pdev->dev, 362 "Send Doorbell 0x%04x to destID 0x%x\n", data, destid); 363 iowrite16be(data, priv->odb_base + offset); 364 365 return 0; 366 } 367 368 /** 369 * tsi721_dbell_handler - Tsi721 doorbell interrupt handler 370 * @mport: RapidIO master port structure 371 * 372 * Handles inbound doorbell interrupts. Copies doorbell entry from an internal 373 * buffer into DB message FIFO and schedules deferred routine to process 374 * queued DBs. 375 */ 376 static int 377 tsi721_dbell_handler(struct rio_mport *mport) 378 { 379 struct tsi721_device *priv = mport->priv; 380 u32 regval; 381 382 /* Disable IDB interrupts */ 383 regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 384 regval &= ~TSI721_SR_CHINT_IDBQRCV; 385 iowrite32(regval, 386 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 387 388 schedule_work(&priv->idb_work); 389 390 return 0; 391 } 392 393 static void tsi721_db_dpc(struct work_struct *work) 394 { 395 struct tsi721_device *priv = container_of(work, struct tsi721_device, 396 idb_work); 397 struct rio_mport *mport; 398 struct rio_dbell *dbell; 399 int found = 0; 400 u32 wr_ptr, rd_ptr; 401 u64 *idb_entry; 402 u32 regval; 403 union { 404 u64 msg; 405 u8 bytes[8]; 406 } idb; 407 408 /* 409 * Process queued inbound doorbells 410 */ 411 mport = priv->mport; 412 413 wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; 414 rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE; 415 416 while (wr_ptr != rd_ptr) { 417 idb_entry = (u64 *)(priv->idb_base + 418 (TSI721_IDB_ENTRY_SIZE * rd_ptr)); 419 rd_ptr++; 420 rd_ptr %= IDB_QSIZE; 421 idb.msg = *idb_entry; 422 *idb_entry = 0; 423 424 /* Process one doorbell */ 425 list_for_each_entry(dbell, &mport->dbells, node) { 426 if ((dbell->res->start <= DBELL_INF(idb.bytes)) && 427 (dbell->res->end >= DBELL_INF(idb.bytes))) { 428 found = 1; 429 break; 430 } 431 } 432 433 if (found) { 434 dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes), 435 DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); 436 } else { 437 dev_dbg(&priv->pdev->dev, 438 "spurious inb doorbell, sid %2.2x tid %2.2x" 439 " info %4.4x\n", DBELL_SID(idb.bytes), 440 DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); 441 } 442 } 443 444 iowrite32(rd_ptr & (IDB_QSIZE - 1), 445 priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); 446 447 /* Re-enable IDB interrupts */ 448 regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 449 regval |= TSI721_SR_CHINT_IDBQRCV; 450 iowrite32(regval, 451 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 452 } 453 454 /** 455 * tsi721_irqhandler - Tsi721 interrupt handler 456 * @irq: Linux interrupt number 457 * @ptr: Pointer to interrupt-specific data (mport structure) 458 * 459 * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported 460 * interrupt events and calls an event-specific handler(s). 461 */ 462 static irqreturn_t tsi721_irqhandler(int irq, void *ptr) 463 { 464 struct rio_mport *mport = (struct rio_mport *)ptr; 465 struct tsi721_device *priv = mport->priv; 466 u32 dev_int; 467 u32 dev_ch_int; 468 u32 intval; 469 u32 ch_inte; 470 471 dev_int = ioread32(priv->regs + TSI721_DEV_INT); 472 if (!dev_int) 473 return IRQ_NONE; 474 475 dev_ch_int = ioread32(priv->regs + TSI721_DEV_CHAN_INT); 476 477 if (dev_int & TSI721_DEV_INT_SR2PC_CH) { 478 /* Service SR2PC Channel interrupts */ 479 if (dev_ch_int & TSI721_INT_SR2PC_CHAN(IDB_QUEUE)) { 480 /* Service Inbound Doorbell interrupt */ 481 intval = ioread32(priv->regs + 482 TSI721_SR_CHINT(IDB_QUEUE)); 483 if (intval & TSI721_SR_CHINT_IDBQRCV) 484 tsi721_dbell_handler(mport); 485 else 486 dev_info(&priv->pdev->dev, 487 "Unsupported SR_CH_INT %x\n", intval); 488 489 /* Clear interrupts */ 490 iowrite32(intval, 491 priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 492 ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 493 } 494 } 495 496 if (dev_int & TSI721_DEV_INT_SMSG_CH) { 497 int ch; 498 499 /* 500 * Service channel interrupts from Messaging Engine 501 */ 502 503 if (dev_ch_int & TSI721_INT_IMSG_CHAN_M) { /* Inbound Msg */ 504 /* Disable signaled OB MSG Channel interrupts */ 505 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 506 ch_inte &= ~(dev_ch_int & TSI721_INT_IMSG_CHAN_M); 507 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); 508 509 /* 510 * Process Inbound Message interrupt for each MBOX 511 */ 512 for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) { 513 if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch))) 514 continue; 515 tsi721_imsg_handler(priv, ch); 516 } 517 } 518 519 if (dev_ch_int & TSI721_INT_OMSG_CHAN_M) { /* Outbound Msg */ 520 /* Disable signaled OB MSG Channel interrupts */ 521 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 522 ch_inte &= ~(dev_ch_int & TSI721_INT_OMSG_CHAN_M); 523 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); 524 525 /* 526 * Process Outbound Message interrupts for each MBOX 527 */ 528 529 for (ch = 0; ch < RIO_MAX_MBOX; ch++) { 530 if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch))) 531 continue; 532 tsi721_omsg_handler(priv, ch); 533 } 534 } 535 } 536 537 if (dev_int & TSI721_DEV_INT_SRIO) { 538 /* Service SRIO MAC interrupts */ 539 intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); 540 if (intval & TSI721_RIO_EM_INT_STAT_PW_RX) 541 tsi721_pw_handler(mport); 542 } 543 544 return IRQ_HANDLED; 545 } 546 547 static void tsi721_interrupts_init(struct tsi721_device *priv) 548 { 549 u32 intr; 550 551 /* Enable IDB interrupts */ 552 iowrite32(TSI721_SR_CHINT_ALL, 553 priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 554 iowrite32(TSI721_SR_CHINT_IDBQRCV, 555 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 556 iowrite32(TSI721_INT_SR2PC_CHAN(IDB_QUEUE), 557 priv->regs + TSI721_DEV_CHAN_INTE); 558 559 /* Enable SRIO MAC interrupts */ 560 iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT, 561 priv->regs + TSI721_RIO_EM_DEV_INT_EN); 562 563 if (priv->flags & TSI721_USING_MSIX) 564 intr = TSI721_DEV_INT_SRIO; 565 else 566 intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | 567 TSI721_DEV_INT_SMSG_CH; 568 569 iowrite32(intr, priv->regs + TSI721_DEV_INTE); 570 ioread32(priv->regs + TSI721_DEV_INTE); 571 } 572 573 #ifdef CONFIG_PCI_MSI 574 /** 575 * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging 576 * @irq: Linux interrupt number 577 * @ptr: Pointer to interrupt-specific data (mport structure) 578 * 579 * Handles outbound messaging interrupts signaled using MSI-X. 580 */ 581 static irqreturn_t tsi721_omsg_msix(int irq, void *ptr) 582 { 583 struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; 584 int mbox; 585 586 mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX; 587 tsi721_omsg_handler(priv, mbox); 588 return IRQ_HANDLED; 589 } 590 591 /** 592 * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging 593 * @irq: Linux interrupt number 594 * @ptr: Pointer to interrupt-specific data (mport structure) 595 * 596 * Handles inbound messaging interrupts signaled using MSI-X. 597 */ 598 static irqreturn_t tsi721_imsg_msix(int irq, void *ptr) 599 { 600 struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; 601 int mbox; 602 603 mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX; 604 tsi721_imsg_handler(priv, mbox + 4); 605 return IRQ_HANDLED; 606 } 607 608 /** 609 * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler 610 * @irq: Linux interrupt number 611 * @ptr: Pointer to interrupt-specific data (mport structure) 612 * 613 * Handles Tsi721 interrupts from SRIO MAC. 614 */ 615 static irqreturn_t tsi721_srio_msix(int irq, void *ptr) 616 { 617 struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; 618 u32 srio_int; 619 620 /* Service SRIO MAC interrupts */ 621 srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); 622 if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX) 623 tsi721_pw_handler((struct rio_mport *)ptr); 624 625 return IRQ_HANDLED; 626 } 627 628 /** 629 * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler 630 * @irq: Linux interrupt number 631 * @ptr: Pointer to interrupt-specific data (mport structure) 632 * 633 * Handles Tsi721 interrupts from SR2PC Channel. 634 * NOTE: At this moment services only one SR2PC channel associated with inbound 635 * doorbells. 636 */ 637 static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr) 638 { 639 struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; 640 u32 sr_ch_int; 641 642 /* Service Inbound DB interrupt from SR2PC channel */ 643 sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 644 if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV) 645 tsi721_dbell_handler((struct rio_mport *)ptr); 646 647 /* Clear interrupts */ 648 iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 649 /* Read back to ensure that interrupt was cleared */ 650 sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 651 652 return IRQ_HANDLED; 653 } 654 655 /** 656 * tsi721_request_msix - register interrupt service for MSI-X mode. 657 * @mport: RapidIO master port structure 658 * 659 * Registers MSI-X interrupt service routines for interrupts that are active 660 * immediately after mport initialization. Messaging interrupt service routines 661 * should be registered during corresponding open requests. 662 */ 663 static int tsi721_request_msix(struct rio_mport *mport) 664 { 665 struct tsi721_device *priv = mport->priv; 666 int err = 0; 667 668 err = request_irq(priv->msix[TSI721_VECT_IDB].vector, 669 tsi721_sr2pc_ch_msix, 0, 670 priv->msix[TSI721_VECT_IDB].irq_name, (void *)mport); 671 if (err) 672 goto out; 673 674 err = request_irq(priv->msix[TSI721_VECT_PWRX].vector, 675 tsi721_srio_msix, 0, 676 priv->msix[TSI721_VECT_PWRX].irq_name, (void *)mport); 677 if (err) 678 free_irq( 679 priv->msix[TSI721_VECT_IDB].vector, 680 (void *)mport); 681 out: 682 return err; 683 } 684 685 /** 686 * tsi721_enable_msix - Attempts to enable MSI-X support for Tsi721. 687 * @priv: pointer to tsi721 private data 688 * 689 * Configures MSI-X support for Tsi721. Supports only an exact number 690 * of requested vectors. 691 */ 692 static int tsi721_enable_msix(struct tsi721_device *priv) 693 { 694 struct msix_entry entries[TSI721_VECT_MAX]; 695 int err; 696 int i; 697 698 entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE); 699 entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT; 700 701 /* 702 * Initialize MSI-X entries for Messaging Engine: 703 * this driver supports four RIO mailboxes (inbound and outbound) 704 * NOTE: Inbound message MBOX 0...4 use IB channels 4...7. Therefore 705 * offset +4 is added to IB MBOX number. 706 */ 707 for (i = 0; i < RIO_MAX_MBOX; i++) { 708 entries[TSI721_VECT_IMB0_RCV + i].entry = 709 TSI721_MSIX_IMSG_DQ_RCV(i + 4); 710 entries[TSI721_VECT_IMB0_INT + i].entry = 711 TSI721_MSIX_IMSG_INT(i + 4); 712 entries[TSI721_VECT_OMB0_DONE + i].entry = 713 TSI721_MSIX_OMSG_DONE(i); 714 entries[TSI721_VECT_OMB0_INT + i].entry = 715 TSI721_MSIX_OMSG_INT(i); 716 } 717 718 err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries)); 719 if (err) { 720 if (err > 0) 721 dev_info(&priv->pdev->dev, 722 "Only %d MSI-X vectors available, " 723 "not using MSI-X\n", err); 724 return err; 725 } 726 727 /* 728 * Copy MSI-X vector information into tsi721 private structure 729 */ 730 priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector; 731 snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX, 732 DRV_NAME "-idb@pci:%s", pci_name(priv->pdev)); 733 priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector; 734 snprintf(priv->msix[TSI721_VECT_PWRX].irq_name, IRQ_DEVICE_NAME_MAX, 735 DRV_NAME "-pwrx@pci:%s", pci_name(priv->pdev)); 736 737 for (i = 0; i < RIO_MAX_MBOX; i++) { 738 priv->msix[TSI721_VECT_IMB0_RCV + i].vector = 739 entries[TSI721_VECT_IMB0_RCV + i].vector; 740 snprintf(priv->msix[TSI721_VECT_IMB0_RCV + i].irq_name, 741 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbr%d@pci:%s", 742 i, pci_name(priv->pdev)); 743 744 priv->msix[TSI721_VECT_IMB0_INT + i].vector = 745 entries[TSI721_VECT_IMB0_INT + i].vector; 746 snprintf(priv->msix[TSI721_VECT_IMB0_INT + i].irq_name, 747 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbi%d@pci:%s", 748 i, pci_name(priv->pdev)); 749 750 priv->msix[TSI721_VECT_OMB0_DONE + i].vector = 751 entries[TSI721_VECT_OMB0_DONE + i].vector; 752 snprintf(priv->msix[TSI721_VECT_OMB0_DONE + i].irq_name, 753 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombd%d@pci:%s", 754 i, pci_name(priv->pdev)); 755 756 priv->msix[TSI721_VECT_OMB0_INT + i].vector = 757 entries[TSI721_VECT_OMB0_INT + i].vector; 758 snprintf(priv->msix[TSI721_VECT_OMB0_INT + i].irq_name, 759 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s", 760 i, pci_name(priv->pdev)); 761 } 762 763 return 0; 764 } 765 #endif /* CONFIG_PCI_MSI */ 766 767 static int tsi721_request_irq(struct rio_mport *mport) 768 { 769 struct tsi721_device *priv = mport->priv; 770 int err; 771 772 #ifdef CONFIG_PCI_MSI 773 if (priv->flags & TSI721_USING_MSIX) 774 err = tsi721_request_msix(mport); 775 else 776 #endif 777 err = request_irq(priv->pdev->irq, tsi721_irqhandler, 778 (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED, 779 DRV_NAME, (void *)mport); 780 781 if (err) 782 dev_err(&priv->pdev->dev, 783 "Unable to allocate interrupt, Error: %d\n", err); 784 785 return err; 786 } 787 788 /** 789 * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO) 790 * translation regions. 791 * @priv: pointer to tsi721 private data 792 * 793 * Disables SREP translation regions. 794 */ 795 static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv) 796 { 797 int i; 798 799 /* Disable all PC2SR translation windows */ 800 for (i = 0; i < TSI721_OBWIN_NUM; i++) 801 iowrite32(0, priv->regs + TSI721_OBWINLB(i)); 802 } 803 804 /** 805 * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe) 806 * translation regions. 807 * @priv: pointer to tsi721 private data 808 * 809 * Disables inbound windows. 810 */ 811 static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv) 812 { 813 int i; 814 815 /* Disable all SR2PC inbound windows */ 816 for (i = 0; i < TSI721_IBWIN_NUM; i++) 817 iowrite32(0, priv->regs + TSI721_IBWINLB(i)); 818 } 819 820 /** 821 * tsi721_port_write_init - Inbound port write interface init 822 * @priv: pointer to tsi721 private data 823 * 824 * Initializes inbound port write handler. 825 * Returns %0 on success or %-ENOMEM on failure. 826 */ 827 static int tsi721_port_write_init(struct tsi721_device *priv) 828 { 829 priv->pw_discard_count = 0; 830 INIT_WORK(&priv->pw_work, tsi721_pw_dpc); 831 spin_lock_init(&priv->pw_fifo_lock); 832 if (kfifo_alloc(&priv->pw_fifo, 833 TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { 834 dev_err(&priv->pdev->dev, "PW FIFO allocation failed\n"); 835 return -ENOMEM; 836 } 837 838 /* Use reliable port-write capture mode */ 839 iowrite32(TSI721_RIO_PW_CTL_PWC_REL, priv->regs + TSI721_RIO_PW_CTL); 840 return 0; 841 } 842 843 static int tsi721_doorbell_init(struct tsi721_device *priv) 844 { 845 /* Outbound Doorbells do not require any setup. 846 * Tsi721 uses dedicated PCI BAR1 to generate doorbells. 847 * That BAR1 was mapped during the probe routine. 848 */ 849 850 /* Initialize Inbound Doorbell processing DPC and queue */ 851 priv->db_discard_count = 0; 852 INIT_WORK(&priv->idb_work, tsi721_db_dpc); 853 854 /* Allocate buffer for inbound doorbells queue */ 855 priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev, 856 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, 857 &priv->idb_dma, GFP_KERNEL); 858 if (!priv->idb_base) 859 return -ENOMEM; 860 861 dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n", 862 priv->idb_base, (unsigned long long)priv->idb_dma); 863 864 iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE), 865 priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE)); 866 iowrite32(((u64)priv->idb_dma >> 32), 867 priv->regs + TSI721_IDQ_BASEU(IDB_QUEUE)); 868 iowrite32(((u64)priv->idb_dma & TSI721_IDQ_BASEL_ADDR), 869 priv->regs + TSI721_IDQ_BASEL(IDB_QUEUE)); 870 /* Enable accepting all inbound doorbells */ 871 iowrite32(0, priv->regs + TSI721_IDQ_MASK(IDB_QUEUE)); 872 873 iowrite32(TSI721_IDQ_INIT, priv->regs + TSI721_IDQ_CTL(IDB_QUEUE)); 874 875 iowrite32(0, priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); 876 877 return 0; 878 } 879 880 static void tsi721_doorbell_free(struct tsi721_device *priv) 881 { 882 if (priv->idb_base == NULL) 883 return; 884 885 /* Free buffer allocated for inbound doorbell queue */ 886 dma_free_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, 887 priv->idb_base, priv->idb_dma); 888 priv->idb_base = NULL; 889 } 890 891 static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) 892 { 893 struct tsi721_dma_desc *bd_ptr; 894 u64 *sts_ptr; 895 dma_addr_t bd_phys, sts_phys; 896 int sts_size; 897 int bd_num = priv->bdma[chnum].bd_num; 898 899 dev_dbg(&priv->pdev->dev, "Init Block DMA Engine, CH%d\n", chnum); 900 901 /* 902 * Initialize DMA channel for maintenance requests 903 */ 904 905 /* Allocate space for DMA descriptors */ 906 bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, 907 bd_num * sizeof(struct tsi721_dma_desc), 908 &bd_phys, GFP_KERNEL); 909 if (!bd_ptr) 910 return -ENOMEM; 911 912 priv->bdma[chnum].bd_phys = bd_phys; 913 priv->bdma[chnum].bd_base = bd_ptr; 914 915 dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", 916 bd_ptr, (unsigned long long)bd_phys); 917 918 /* Allocate space for descriptor status FIFO */ 919 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? 920 bd_num : TSI721_DMA_MINSTSSZ; 921 sts_size = roundup_pow_of_two(sts_size); 922 sts_ptr = dma_zalloc_coherent(&priv->pdev->dev, 923 sts_size * sizeof(struct tsi721_dma_sts), 924 &sts_phys, GFP_KERNEL); 925 if (!sts_ptr) { 926 /* Free space allocated for DMA descriptors */ 927 dma_free_coherent(&priv->pdev->dev, 928 bd_num * sizeof(struct tsi721_dma_desc), 929 bd_ptr, bd_phys); 930 priv->bdma[chnum].bd_base = NULL; 931 return -ENOMEM; 932 } 933 934 priv->bdma[chnum].sts_phys = sts_phys; 935 priv->bdma[chnum].sts_base = sts_ptr; 936 priv->bdma[chnum].sts_size = sts_size; 937 938 dev_dbg(&priv->pdev->dev, 939 "desc status FIFO @ %p (phys = %llx) size=0x%x\n", 940 sts_ptr, (unsigned long long)sts_phys, sts_size); 941 942 /* Initialize DMA descriptors ring */ 943 bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29); 944 bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys & 945 TSI721_DMAC_DPTRL_MASK); 946 bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); 947 948 /* Setup DMA descriptor pointers */ 949 iowrite32(((u64)bd_phys >> 32), 950 priv->regs + TSI721_DMAC_DPTRH(chnum)); 951 iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), 952 priv->regs + TSI721_DMAC_DPTRL(chnum)); 953 954 /* Setup descriptor status FIFO */ 955 iowrite32(((u64)sts_phys >> 32), 956 priv->regs + TSI721_DMAC_DSBH(chnum)); 957 iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), 958 priv->regs + TSI721_DMAC_DSBL(chnum)); 959 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), 960 priv->regs + TSI721_DMAC_DSSZ(chnum)); 961 962 /* Clear interrupt bits */ 963 iowrite32(TSI721_DMAC_INT_ALL, 964 priv->regs + TSI721_DMAC_INT(chnum)); 965 966 ioread32(priv->regs + TSI721_DMAC_INT(chnum)); 967 968 /* Toggle DMA channel initialization */ 969 iowrite32(TSI721_DMAC_CTL_INIT, priv->regs + TSI721_DMAC_CTL(chnum)); 970 ioread32(priv->regs + TSI721_DMAC_CTL(chnum)); 971 udelay(10); 972 973 return 0; 974 } 975 976 static int tsi721_bdma_ch_free(struct tsi721_device *priv, int chnum) 977 { 978 u32 ch_stat; 979 980 if (priv->bdma[chnum].bd_base == NULL) 981 return 0; 982 983 /* Check if DMA channel still running */ 984 ch_stat = ioread32(priv->regs + TSI721_DMAC_STS(chnum)); 985 if (ch_stat & TSI721_DMAC_STS_RUN) 986 return -EFAULT; 987 988 /* Put DMA channel into init state */ 989 iowrite32(TSI721_DMAC_CTL_INIT, 990 priv->regs + TSI721_DMAC_CTL(chnum)); 991 992 /* Free space allocated for DMA descriptors */ 993 dma_free_coherent(&priv->pdev->dev, 994 priv->bdma[chnum].bd_num * sizeof(struct tsi721_dma_desc), 995 priv->bdma[chnum].bd_base, priv->bdma[chnum].bd_phys); 996 priv->bdma[chnum].bd_base = NULL; 997 998 /* Free space allocated for status FIFO */ 999 dma_free_coherent(&priv->pdev->dev, 1000 priv->bdma[chnum].sts_size * sizeof(struct tsi721_dma_sts), 1001 priv->bdma[chnum].sts_base, priv->bdma[chnum].sts_phys); 1002 priv->bdma[chnum].sts_base = NULL; 1003 return 0; 1004 } 1005 1006 static int tsi721_bdma_init(struct tsi721_device *priv) 1007 { 1008 /* Initialize BDMA channel allocated for RapidIO maintenance read/write 1009 * request generation 1010 */ 1011 priv->bdma[TSI721_DMACH_MAINT].bd_num = 2; 1012 if (tsi721_bdma_ch_init(priv, TSI721_DMACH_MAINT)) { 1013 dev_err(&priv->pdev->dev, "Unable to initialize maintenance DMA" 1014 " channel %d, aborting\n", TSI721_DMACH_MAINT); 1015 return -ENOMEM; 1016 } 1017 1018 return 0; 1019 } 1020 1021 static void tsi721_bdma_free(struct tsi721_device *priv) 1022 { 1023 tsi721_bdma_ch_free(priv, TSI721_DMACH_MAINT); 1024 } 1025 1026 /* Enable Inbound Messaging Interrupts */ 1027 static void 1028 tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch, 1029 u32 inte_mask) 1030 { 1031 u32 rval; 1032 1033 if (!inte_mask) 1034 return; 1035 1036 /* Clear pending Inbound Messaging interrupts */ 1037 iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); 1038 1039 /* Enable Inbound Messaging interrupts */ 1040 rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); 1041 iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch)); 1042 1043 if (priv->flags & TSI721_USING_MSIX) 1044 return; /* Finished if we are in MSI-X mode */ 1045 1046 /* 1047 * For MSI and INTA interrupt signalling we need to enable next levels 1048 */ 1049 1050 /* Enable Device Channel Interrupt */ 1051 rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1052 iowrite32(rval | TSI721_INT_IMSG_CHAN(ch), 1053 priv->regs + TSI721_DEV_CHAN_INTE); 1054 } 1055 1056 /* Disable Inbound Messaging Interrupts */ 1057 static void 1058 tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch, 1059 u32 inte_mask) 1060 { 1061 u32 rval; 1062 1063 if (!inte_mask) 1064 return; 1065 1066 /* Clear pending Inbound Messaging interrupts */ 1067 iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); 1068 1069 /* Disable Inbound Messaging interrupts */ 1070 rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); 1071 rval &= ~inte_mask; 1072 iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch)); 1073 1074 if (priv->flags & TSI721_USING_MSIX) 1075 return; /* Finished if we are in MSI-X mode */ 1076 1077 /* 1078 * For MSI and INTA interrupt signalling we need to disable next levels 1079 */ 1080 1081 /* Disable Device Channel Interrupt */ 1082 rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1083 rval &= ~TSI721_INT_IMSG_CHAN(ch); 1084 iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE); 1085 } 1086 1087 /* Enable Outbound Messaging interrupts */ 1088 static void 1089 tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch, 1090 u32 inte_mask) 1091 { 1092 u32 rval; 1093 1094 if (!inte_mask) 1095 return; 1096 1097 /* Clear pending Outbound Messaging interrupts */ 1098 iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); 1099 1100 /* Enable Outbound Messaging channel interrupts */ 1101 rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); 1102 iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch)); 1103 1104 if (priv->flags & TSI721_USING_MSIX) 1105 return; /* Finished if we are in MSI-X mode */ 1106 1107 /* 1108 * For MSI and INTA interrupt signalling we need to enable next levels 1109 */ 1110 1111 /* Enable Device Channel Interrupt */ 1112 rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1113 iowrite32(rval | TSI721_INT_OMSG_CHAN(ch), 1114 priv->regs + TSI721_DEV_CHAN_INTE); 1115 } 1116 1117 /* Disable Outbound Messaging interrupts */ 1118 static void 1119 tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch, 1120 u32 inte_mask) 1121 { 1122 u32 rval; 1123 1124 if (!inte_mask) 1125 return; 1126 1127 /* Clear pending Outbound Messaging interrupts */ 1128 iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); 1129 1130 /* Disable Outbound Messaging interrupts */ 1131 rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); 1132 rval &= ~inte_mask; 1133 iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch)); 1134 1135 if (priv->flags & TSI721_USING_MSIX) 1136 return; /* Finished if we are in MSI-X mode */ 1137 1138 /* 1139 * For MSI and INTA interrupt signalling we need to disable next levels 1140 */ 1141 1142 /* Disable Device Channel Interrupt */ 1143 rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1144 rval &= ~TSI721_INT_OMSG_CHAN(ch); 1145 iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE); 1146 } 1147 1148 /** 1149 * tsi721_add_outb_message - Add message to the Tsi721 outbound message queue 1150 * @mport: Master port with outbound message queue 1151 * @rdev: Target of outbound message 1152 * @mbox: Outbound mailbox 1153 * @buffer: Message to add to outbound queue 1154 * @len: Length of message 1155 */ 1156 static int 1157 tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, 1158 void *buffer, size_t len) 1159 { 1160 struct tsi721_device *priv = mport->priv; 1161 struct tsi721_omsg_desc *desc; 1162 u32 tx_slot; 1163 1164 if (!priv->omsg_init[mbox] || 1165 len > TSI721_MSG_MAX_SIZE || len < 8) 1166 return -EINVAL; 1167 1168 tx_slot = priv->omsg_ring[mbox].tx_slot; 1169 1170 /* Copy copy message into transfer buffer */ 1171 memcpy(priv->omsg_ring[mbox].omq_base[tx_slot], buffer, len); 1172 1173 if (len & 0x7) 1174 len += 8; 1175 1176 /* Build descriptor associated with buffer */ 1177 desc = priv->omsg_ring[mbox].omd_base; 1178 desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid); 1179 if (tx_slot % 4 == 0) 1180 desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF); 1181 1182 desc[tx_slot].msg_info = 1183 cpu_to_le32((mport->sys_size << 26) | (mbox << 22) | 1184 (0xe << 12) | (len & 0xff8)); 1185 desc[tx_slot].bufptr_lo = 1186 cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] & 1187 0xffffffff); 1188 desc[tx_slot].bufptr_hi = 1189 cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] >> 32); 1190 1191 priv->omsg_ring[mbox].wr_count++; 1192 1193 /* Go to next descriptor */ 1194 if (++priv->omsg_ring[mbox].tx_slot == priv->omsg_ring[mbox].size) { 1195 priv->omsg_ring[mbox].tx_slot = 0; 1196 /* Move through the ring link descriptor at the end */ 1197 priv->omsg_ring[mbox].wr_count++; 1198 } 1199 1200 mb(); 1201 1202 /* Set new write count value */ 1203 iowrite32(priv->omsg_ring[mbox].wr_count, 1204 priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); 1205 ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); 1206 1207 return 0; 1208 } 1209 1210 /** 1211 * tsi721_omsg_handler - Outbound Message Interrupt Handler 1212 * @priv: pointer to tsi721 private data 1213 * @ch: number of OB MSG channel to service 1214 * 1215 * Services channel interrupts from outbound messaging engine. 1216 */ 1217 static void tsi721_omsg_handler(struct tsi721_device *priv, int ch) 1218 { 1219 u32 omsg_int; 1220 1221 spin_lock(&priv->omsg_ring[ch].lock); 1222 1223 omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch)); 1224 1225 if (omsg_int & TSI721_OBDMAC_INT_ST_FULL) 1226 dev_info(&priv->pdev->dev, 1227 "OB MBOX%d: Status FIFO is full\n", ch); 1228 1229 if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) { 1230 u32 srd_ptr; 1231 u64 *sts_ptr, last_ptr = 0, prev_ptr = 0; 1232 int i, j; 1233 u32 tx_slot; 1234 1235 /* 1236 * Find last successfully processed descriptor 1237 */ 1238 1239 /* Check and clear descriptor status FIFO entries */ 1240 srd_ptr = priv->omsg_ring[ch].sts_rdptr; 1241 sts_ptr = priv->omsg_ring[ch].sts_base; 1242 j = srd_ptr * 8; 1243 while (sts_ptr[j]) { 1244 for (i = 0; i < 8 && sts_ptr[j]; i++, j++) { 1245 prev_ptr = last_ptr; 1246 last_ptr = le64_to_cpu(sts_ptr[j]); 1247 sts_ptr[j] = 0; 1248 } 1249 1250 ++srd_ptr; 1251 srd_ptr %= priv->omsg_ring[ch].sts_size; 1252 j = srd_ptr * 8; 1253 } 1254 1255 if (last_ptr == 0) 1256 goto no_sts_update; 1257 1258 priv->omsg_ring[ch].sts_rdptr = srd_ptr; 1259 iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch)); 1260 1261 if (!priv->mport->outb_msg[ch].mcback) 1262 goto no_sts_update; 1263 1264 /* Inform upper layer about transfer completion */ 1265 1266 tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/ 1267 sizeof(struct tsi721_omsg_desc); 1268 1269 /* 1270 * Check if this is a Link Descriptor (LD). 1271 * If yes, ignore LD and use descriptor processed 1272 * before LD. 1273 */ 1274 if (tx_slot == priv->omsg_ring[ch].size) { 1275 if (prev_ptr) 1276 tx_slot = (prev_ptr - 1277 (u64)priv->omsg_ring[ch].omd_phys)/ 1278 sizeof(struct tsi721_omsg_desc); 1279 else 1280 goto no_sts_update; 1281 } 1282 1283 /* Move slot index to the next message to be sent */ 1284 ++tx_slot; 1285 if (tx_slot == priv->omsg_ring[ch].size) 1286 tx_slot = 0; 1287 BUG_ON(tx_slot >= priv->omsg_ring[ch].size); 1288 priv->mport->outb_msg[ch].mcback(priv->mport, 1289 priv->omsg_ring[ch].dev_id, ch, 1290 tx_slot); 1291 } 1292 1293 no_sts_update: 1294 1295 if (omsg_int & TSI721_OBDMAC_INT_ERROR) { 1296 /* 1297 * Outbound message operation aborted due to error, 1298 * reinitialize OB MSG channel 1299 */ 1300 1301 dev_dbg(&priv->pdev->dev, "OB MSG ABORT ch_stat=%x\n", 1302 ioread32(priv->regs + TSI721_OBDMAC_STS(ch))); 1303 1304 iowrite32(TSI721_OBDMAC_INT_ERROR, 1305 priv->regs + TSI721_OBDMAC_INT(ch)); 1306 iowrite32(TSI721_OBDMAC_CTL_INIT, 1307 priv->regs + TSI721_OBDMAC_CTL(ch)); 1308 ioread32(priv->regs + TSI721_OBDMAC_CTL(ch)); 1309 1310 /* Inform upper level to clear all pending tx slots */ 1311 if (priv->mport->outb_msg[ch].mcback) 1312 priv->mport->outb_msg[ch].mcback(priv->mport, 1313 priv->omsg_ring[ch].dev_id, ch, 1314 priv->omsg_ring[ch].tx_slot); 1315 /* Synch tx_slot tracking */ 1316 iowrite32(priv->omsg_ring[ch].tx_slot, 1317 priv->regs + TSI721_OBDMAC_DRDCNT(ch)); 1318 ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch)); 1319 priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot; 1320 priv->omsg_ring[ch].sts_rdptr = 0; 1321 } 1322 1323 /* Clear channel interrupts */ 1324 iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch)); 1325 1326 if (!(priv->flags & TSI721_USING_MSIX)) { 1327 u32 ch_inte; 1328 1329 /* Re-enable channel interrupts */ 1330 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1331 ch_inte |= TSI721_INT_OMSG_CHAN(ch); 1332 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); 1333 } 1334 1335 spin_unlock(&priv->omsg_ring[ch].lock); 1336 } 1337 1338 /** 1339 * tsi721_open_outb_mbox - Initialize Tsi721 outbound mailbox 1340 * @mport: Master port implementing Outbound Messaging Engine 1341 * @dev_id: Device specific pointer to pass on event 1342 * @mbox: Mailbox to open 1343 * @entries: Number of entries in the outbound mailbox ring 1344 */ 1345 static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, 1346 int mbox, int entries) 1347 { 1348 struct tsi721_device *priv = mport->priv; 1349 struct tsi721_omsg_desc *bd_ptr; 1350 int i, rc = 0; 1351 1352 if ((entries < TSI721_OMSGD_MIN_RING_SIZE) || 1353 (entries > (TSI721_OMSGD_RING_SIZE)) || 1354 (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { 1355 rc = -EINVAL; 1356 goto out; 1357 } 1358 1359 priv->omsg_ring[mbox].dev_id = dev_id; 1360 priv->omsg_ring[mbox].size = entries; 1361 priv->omsg_ring[mbox].sts_rdptr = 0; 1362 spin_lock_init(&priv->omsg_ring[mbox].lock); 1363 1364 /* Outbound Msg Buffer allocation based on 1365 the number of maximum descriptor entries */ 1366 for (i = 0; i < entries; i++) { 1367 priv->omsg_ring[mbox].omq_base[i] = 1368 dma_alloc_coherent( 1369 &priv->pdev->dev, TSI721_MSG_BUFFER_SIZE, 1370 &priv->omsg_ring[mbox].omq_phys[i], 1371 GFP_KERNEL); 1372 if (priv->omsg_ring[mbox].omq_base[i] == NULL) { 1373 dev_dbg(&priv->pdev->dev, 1374 "Unable to allocate OB MSG data buffer for" 1375 " MBOX%d\n", mbox); 1376 rc = -ENOMEM; 1377 goto out_buf; 1378 } 1379 } 1380 1381 /* Outbound message descriptor allocation */ 1382 priv->omsg_ring[mbox].omd_base = dma_alloc_coherent( 1383 &priv->pdev->dev, 1384 (entries + 1) * sizeof(struct tsi721_omsg_desc), 1385 &priv->omsg_ring[mbox].omd_phys, GFP_KERNEL); 1386 if (priv->omsg_ring[mbox].omd_base == NULL) { 1387 dev_dbg(&priv->pdev->dev, 1388 "Unable to allocate OB MSG descriptor memory " 1389 "for MBOX%d\n", mbox); 1390 rc = -ENOMEM; 1391 goto out_buf; 1392 } 1393 1394 priv->omsg_ring[mbox].tx_slot = 0; 1395 1396 /* Outbound message descriptor status FIFO allocation */ 1397 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); 1398 priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev, 1399 priv->omsg_ring[mbox].sts_size * 1400 sizeof(struct tsi721_dma_sts), 1401 &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); 1402 if (priv->omsg_ring[mbox].sts_base == NULL) { 1403 dev_dbg(&priv->pdev->dev, 1404 "Unable to allocate OB MSG descriptor status FIFO " 1405 "for MBOX%d\n", mbox); 1406 rc = -ENOMEM; 1407 goto out_desc; 1408 } 1409 1410 /* 1411 * Configure Outbound Messaging Engine 1412 */ 1413 1414 /* Setup Outbound Message descriptor pointer */ 1415 iowrite32(((u64)priv->omsg_ring[mbox].omd_phys >> 32), 1416 priv->regs + TSI721_OBDMAC_DPTRH(mbox)); 1417 iowrite32(((u64)priv->omsg_ring[mbox].omd_phys & 1418 TSI721_OBDMAC_DPTRL_MASK), 1419 priv->regs + TSI721_OBDMAC_DPTRL(mbox)); 1420 1421 /* Setup Outbound Message descriptor status FIFO */ 1422 iowrite32(((u64)priv->omsg_ring[mbox].sts_phys >> 32), 1423 priv->regs + TSI721_OBDMAC_DSBH(mbox)); 1424 iowrite32(((u64)priv->omsg_ring[mbox].sts_phys & 1425 TSI721_OBDMAC_DSBL_MASK), 1426 priv->regs + TSI721_OBDMAC_DSBL(mbox)); 1427 iowrite32(TSI721_DMAC_DSSZ_SIZE(priv->omsg_ring[mbox].sts_size), 1428 priv->regs + (u32)TSI721_OBDMAC_DSSZ(mbox)); 1429 1430 /* Enable interrupts */ 1431 1432 #ifdef CONFIG_PCI_MSI 1433 if (priv->flags & TSI721_USING_MSIX) { 1434 /* Request interrupt service if we are in MSI-X mode */ 1435 rc = request_irq( 1436 priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, 1437 tsi721_omsg_msix, 0, 1438 priv->msix[TSI721_VECT_OMB0_DONE + mbox].irq_name, 1439 (void *)mport); 1440 1441 if (rc) { 1442 dev_dbg(&priv->pdev->dev, 1443 "Unable to allocate MSI-X interrupt for " 1444 "OBOX%d-DONE\n", mbox); 1445 goto out_stat; 1446 } 1447 1448 rc = request_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector, 1449 tsi721_omsg_msix, 0, 1450 priv->msix[TSI721_VECT_OMB0_INT + mbox].irq_name, 1451 (void *)mport); 1452 1453 if (rc) { 1454 dev_dbg(&priv->pdev->dev, 1455 "Unable to allocate MSI-X interrupt for " 1456 "MBOX%d-INT\n", mbox); 1457 free_irq( 1458 priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, 1459 (void *)mport); 1460 goto out_stat; 1461 } 1462 } 1463 #endif /* CONFIG_PCI_MSI */ 1464 1465 tsi721_omsg_interrupt_enable(priv, mbox, TSI721_OBDMAC_INT_ALL); 1466 1467 /* Initialize Outbound Message descriptors ring */ 1468 bd_ptr = priv->omsg_ring[mbox].omd_base; 1469 bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29); 1470 bd_ptr[entries].msg_info = 0; 1471 bd_ptr[entries].next_lo = 1472 cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys & 1473 TSI721_OBDMAC_DPTRL_MASK); 1474 bd_ptr[entries].next_hi = 1475 cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys >> 32); 1476 priv->omsg_ring[mbox].wr_count = 0; 1477 mb(); 1478 1479 /* Initialize Outbound Message engine */ 1480 iowrite32(TSI721_OBDMAC_CTL_INIT, priv->regs + TSI721_OBDMAC_CTL(mbox)); 1481 ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); 1482 udelay(10); 1483 1484 priv->omsg_init[mbox] = 1; 1485 1486 return 0; 1487 1488 #ifdef CONFIG_PCI_MSI 1489 out_stat: 1490 dma_free_coherent(&priv->pdev->dev, 1491 priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), 1492 priv->omsg_ring[mbox].sts_base, 1493 priv->omsg_ring[mbox].sts_phys); 1494 1495 priv->omsg_ring[mbox].sts_base = NULL; 1496 #endif /* CONFIG_PCI_MSI */ 1497 1498 out_desc: 1499 dma_free_coherent(&priv->pdev->dev, 1500 (entries + 1) * sizeof(struct tsi721_omsg_desc), 1501 priv->omsg_ring[mbox].omd_base, 1502 priv->omsg_ring[mbox].omd_phys); 1503 1504 priv->omsg_ring[mbox].omd_base = NULL; 1505 1506 out_buf: 1507 for (i = 0; i < priv->omsg_ring[mbox].size; i++) { 1508 if (priv->omsg_ring[mbox].omq_base[i]) { 1509 dma_free_coherent(&priv->pdev->dev, 1510 TSI721_MSG_BUFFER_SIZE, 1511 priv->omsg_ring[mbox].omq_base[i], 1512 priv->omsg_ring[mbox].omq_phys[i]); 1513 1514 priv->omsg_ring[mbox].omq_base[i] = NULL; 1515 } 1516 } 1517 1518 out: 1519 return rc; 1520 } 1521 1522 /** 1523 * tsi721_close_outb_mbox - Close Tsi721 outbound mailbox 1524 * @mport: Master port implementing the outbound message unit 1525 * @mbox: Mailbox to close 1526 */ 1527 static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox) 1528 { 1529 struct tsi721_device *priv = mport->priv; 1530 u32 i; 1531 1532 if (!priv->omsg_init[mbox]) 1533 return; 1534 priv->omsg_init[mbox] = 0; 1535 1536 /* Disable Interrupts */ 1537 1538 tsi721_omsg_interrupt_disable(priv, mbox, TSI721_OBDMAC_INT_ALL); 1539 1540 #ifdef CONFIG_PCI_MSI 1541 if (priv->flags & TSI721_USING_MSIX) { 1542 free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, 1543 (void *)mport); 1544 free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector, 1545 (void *)mport); 1546 } 1547 #endif /* CONFIG_PCI_MSI */ 1548 1549 /* Free OMSG Descriptor Status FIFO */ 1550 dma_free_coherent(&priv->pdev->dev, 1551 priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), 1552 priv->omsg_ring[mbox].sts_base, 1553 priv->omsg_ring[mbox].sts_phys); 1554 1555 priv->omsg_ring[mbox].sts_base = NULL; 1556 1557 /* Free OMSG descriptors */ 1558 dma_free_coherent(&priv->pdev->dev, 1559 (priv->omsg_ring[mbox].size + 1) * 1560 sizeof(struct tsi721_omsg_desc), 1561 priv->omsg_ring[mbox].omd_base, 1562 priv->omsg_ring[mbox].omd_phys); 1563 1564 priv->omsg_ring[mbox].omd_base = NULL; 1565 1566 /* Free message buffers */ 1567 for (i = 0; i < priv->omsg_ring[mbox].size; i++) { 1568 if (priv->omsg_ring[mbox].omq_base[i]) { 1569 dma_free_coherent(&priv->pdev->dev, 1570 TSI721_MSG_BUFFER_SIZE, 1571 priv->omsg_ring[mbox].omq_base[i], 1572 priv->omsg_ring[mbox].omq_phys[i]); 1573 1574 priv->omsg_ring[mbox].omq_base[i] = NULL; 1575 } 1576 } 1577 } 1578 1579 /** 1580 * tsi721_imsg_handler - Inbound Message Interrupt Handler 1581 * @priv: pointer to tsi721 private data 1582 * @ch: inbound message channel number to service 1583 * 1584 * Services channel interrupts from inbound messaging engine. 1585 */ 1586 static void tsi721_imsg_handler(struct tsi721_device *priv, int ch) 1587 { 1588 u32 mbox = ch - 4; 1589 u32 imsg_int; 1590 1591 spin_lock(&priv->imsg_ring[mbox].lock); 1592 1593 imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch)); 1594 1595 if (imsg_int & TSI721_IBDMAC_INT_SRTO) 1596 dev_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout\n", 1597 mbox); 1598 1599 if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR) 1600 dev_info(&priv->pdev->dev, "IB MBOX%d PCIe error\n", 1601 mbox); 1602 1603 if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW) 1604 dev_info(&priv->pdev->dev, 1605 "IB MBOX%d IB free queue low\n", mbox); 1606 1607 /* Clear IB channel interrupts */ 1608 iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch)); 1609 1610 /* If an IB Msg is received notify the upper layer */ 1611 if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV && 1612 priv->mport->inb_msg[mbox].mcback) 1613 priv->mport->inb_msg[mbox].mcback(priv->mport, 1614 priv->imsg_ring[mbox].dev_id, mbox, -1); 1615 1616 if (!(priv->flags & TSI721_USING_MSIX)) { 1617 u32 ch_inte; 1618 1619 /* Re-enable channel interrupts */ 1620 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1621 ch_inte |= TSI721_INT_IMSG_CHAN(ch); 1622 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); 1623 } 1624 1625 spin_unlock(&priv->imsg_ring[mbox].lock); 1626 } 1627 1628 /** 1629 * tsi721_open_inb_mbox - Initialize Tsi721 inbound mailbox 1630 * @mport: Master port implementing the Inbound Messaging Engine 1631 * @dev_id: Device specific pointer to pass on event 1632 * @mbox: Mailbox to open 1633 * @entries: Number of entries in the inbound mailbox ring 1634 */ 1635 static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id, 1636 int mbox, int entries) 1637 { 1638 struct tsi721_device *priv = mport->priv; 1639 int ch = mbox + 4; 1640 int i; 1641 u64 *free_ptr; 1642 int rc = 0; 1643 1644 if ((entries < TSI721_IMSGD_MIN_RING_SIZE) || 1645 (entries > TSI721_IMSGD_RING_SIZE) || 1646 (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { 1647 rc = -EINVAL; 1648 goto out; 1649 } 1650 1651 /* Initialize IB Messaging Ring */ 1652 priv->imsg_ring[mbox].dev_id = dev_id; 1653 priv->imsg_ring[mbox].size = entries; 1654 priv->imsg_ring[mbox].rx_slot = 0; 1655 priv->imsg_ring[mbox].desc_rdptr = 0; 1656 priv->imsg_ring[mbox].fq_wrptr = 0; 1657 for (i = 0; i < priv->imsg_ring[mbox].size; i++) 1658 priv->imsg_ring[mbox].imq_base[i] = NULL; 1659 spin_lock_init(&priv->imsg_ring[mbox].lock); 1660 1661 /* Allocate buffers for incoming messages */ 1662 priv->imsg_ring[mbox].buf_base = 1663 dma_alloc_coherent(&priv->pdev->dev, 1664 entries * TSI721_MSG_BUFFER_SIZE, 1665 &priv->imsg_ring[mbox].buf_phys, 1666 GFP_KERNEL); 1667 1668 if (priv->imsg_ring[mbox].buf_base == NULL) { 1669 dev_err(&priv->pdev->dev, 1670 "Failed to allocate buffers for IB MBOX%d\n", mbox); 1671 rc = -ENOMEM; 1672 goto out; 1673 } 1674 1675 /* Allocate memory for circular free list */ 1676 priv->imsg_ring[mbox].imfq_base = 1677 dma_alloc_coherent(&priv->pdev->dev, 1678 entries * 8, 1679 &priv->imsg_ring[mbox].imfq_phys, 1680 GFP_KERNEL); 1681 1682 if (priv->imsg_ring[mbox].imfq_base == NULL) { 1683 dev_err(&priv->pdev->dev, 1684 "Failed to allocate free queue for IB MBOX%d\n", mbox); 1685 rc = -ENOMEM; 1686 goto out_buf; 1687 } 1688 1689 /* Allocate memory for Inbound message descriptors */ 1690 priv->imsg_ring[mbox].imd_base = 1691 dma_alloc_coherent(&priv->pdev->dev, 1692 entries * sizeof(struct tsi721_imsg_desc), 1693 &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL); 1694 1695 if (priv->imsg_ring[mbox].imd_base == NULL) { 1696 dev_err(&priv->pdev->dev, 1697 "Failed to allocate descriptor memory for IB MBOX%d\n", 1698 mbox); 1699 rc = -ENOMEM; 1700 goto out_dma; 1701 } 1702 1703 /* Fill free buffer pointer list */ 1704 free_ptr = priv->imsg_ring[mbox].imfq_base; 1705 for (i = 0; i < entries; i++) 1706 free_ptr[i] = cpu_to_le64( 1707 (u64)(priv->imsg_ring[mbox].buf_phys) + 1708 i * 0x1000); 1709 1710 mb(); 1711 1712 /* 1713 * For mapping of inbound SRIO Messages into appropriate queues we need 1714 * to set Inbound Device ID register in the messaging engine. We do it 1715 * once when first inbound mailbox is requested. 1716 */ 1717 if (!(priv->flags & TSI721_IMSGID_SET)) { 1718 iowrite32((u32)priv->mport->host_deviceid, 1719 priv->regs + TSI721_IB_DEVID); 1720 priv->flags |= TSI721_IMSGID_SET; 1721 } 1722 1723 /* 1724 * Configure Inbound Messaging channel (ch = mbox + 4) 1725 */ 1726 1727 /* Setup Inbound Message free queue */ 1728 iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys >> 32), 1729 priv->regs + TSI721_IBDMAC_FQBH(ch)); 1730 iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys & 1731 TSI721_IBDMAC_FQBL_MASK), 1732 priv->regs+TSI721_IBDMAC_FQBL(ch)); 1733 iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), 1734 priv->regs + TSI721_IBDMAC_FQSZ(ch)); 1735 1736 /* Setup Inbound Message descriptor queue */ 1737 iowrite32(((u64)priv->imsg_ring[mbox].imd_phys >> 32), 1738 priv->regs + TSI721_IBDMAC_DQBH(ch)); 1739 iowrite32(((u32)priv->imsg_ring[mbox].imd_phys & 1740 (u32)TSI721_IBDMAC_DQBL_MASK), 1741 priv->regs+TSI721_IBDMAC_DQBL(ch)); 1742 iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), 1743 priv->regs + TSI721_IBDMAC_DQSZ(ch)); 1744 1745 /* Enable interrupts */ 1746 1747 #ifdef CONFIG_PCI_MSI 1748 if (priv->flags & TSI721_USING_MSIX) { 1749 /* Request interrupt service if we are in MSI-X mode */ 1750 rc = request_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, 1751 tsi721_imsg_msix, 0, 1752 priv->msix[TSI721_VECT_IMB0_RCV + mbox].irq_name, 1753 (void *)mport); 1754 1755 if (rc) { 1756 dev_dbg(&priv->pdev->dev, 1757 "Unable to allocate MSI-X interrupt for " 1758 "IBOX%d-DONE\n", mbox); 1759 goto out_desc; 1760 } 1761 1762 rc = request_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector, 1763 tsi721_imsg_msix, 0, 1764 priv->msix[TSI721_VECT_IMB0_INT + mbox].irq_name, 1765 (void *)mport); 1766 1767 if (rc) { 1768 dev_dbg(&priv->pdev->dev, 1769 "Unable to allocate MSI-X interrupt for " 1770 "IBOX%d-INT\n", mbox); 1771 free_irq( 1772 priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, 1773 (void *)mport); 1774 goto out_desc; 1775 } 1776 } 1777 #endif /* CONFIG_PCI_MSI */ 1778 1779 tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL); 1780 1781 /* Initialize Inbound Message Engine */ 1782 iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch)); 1783 ioread32(priv->regs + TSI721_IBDMAC_CTL(ch)); 1784 udelay(10); 1785 priv->imsg_ring[mbox].fq_wrptr = entries - 1; 1786 iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch)); 1787 1788 priv->imsg_init[mbox] = 1; 1789 return 0; 1790 1791 #ifdef CONFIG_PCI_MSI 1792 out_desc: 1793 dma_free_coherent(&priv->pdev->dev, 1794 priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc), 1795 priv->imsg_ring[mbox].imd_base, 1796 priv->imsg_ring[mbox].imd_phys); 1797 1798 priv->imsg_ring[mbox].imd_base = NULL; 1799 #endif /* CONFIG_PCI_MSI */ 1800 1801 out_dma: 1802 dma_free_coherent(&priv->pdev->dev, 1803 priv->imsg_ring[mbox].size * 8, 1804 priv->imsg_ring[mbox].imfq_base, 1805 priv->imsg_ring[mbox].imfq_phys); 1806 1807 priv->imsg_ring[mbox].imfq_base = NULL; 1808 1809 out_buf: 1810 dma_free_coherent(&priv->pdev->dev, 1811 priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE, 1812 priv->imsg_ring[mbox].buf_base, 1813 priv->imsg_ring[mbox].buf_phys); 1814 1815 priv->imsg_ring[mbox].buf_base = NULL; 1816 1817 out: 1818 return rc; 1819 } 1820 1821 /** 1822 * tsi721_close_inb_mbox - Shut down Tsi721 inbound mailbox 1823 * @mport: Master port implementing the Inbound Messaging Engine 1824 * @mbox: Mailbox to close 1825 */ 1826 static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox) 1827 { 1828 struct tsi721_device *priv = mport->priv; 1829 u32 rx_slot; 1830 int ch = mbox + 4; 1831 1832 if (!priv->imsg_init[mbox]) /* mbox isn't initialized yet */ 1833 return; 1834 priv->imsg_init[mbox] = 0; 1835 1836 /* Disable Inbound Messaging Engine */ 1837 1838 /* Disable Interrupts */ 1839 tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK); 1840 1841 #ifdef CONFIG_PCI_MSI 1842 if (priv->flags & TSI721_USING_MSIX) { 1843 free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, 1844 (void *)mport); 1845 free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector, 1846 (void *)mport); 1847 } 1848 #endif /* CONFIG_PCI_MSI */ 1849 1850 /* Clear Inbound Buffer Queue */ 1851 for (rx_slot = 0; rx_slot < priv->imsg_ring[mbox].size; rx_slot++) 1852 priv->imsg_ring[mbox].imq_base[rx_slot] = NULL; 1853 1854 /* Free memory allocated for message buffers */ 1855 dma_free_coherent(&priv->pdev->dev, 1856 priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE, 1857 priv->imsg_ring[mbox].buf_base, 1858 priv->imsg_ring[mbox].buf_phys); 1859 1860 priv->imsg_ring[mbox].buf_base = NULL; 1861 1862 /* Free memory allocated for free pointr list */ 1863 dma_free_coherent(&priv->pdev->dev, 1864 priv->imsg_ring[mbox].size * 8, 1865 priv->imsg_ring[mbox].imfq_base, 1866 priv->imsg_ring[mbox].imfq_phys); 1867 1868 priv->imsg_ring[mbox].imfq_base = NULL; 1869 1870 /* Free memory allocated for RX descriptors */ 1871 dma_free_coherent(&priv->pdev->dev, 1872 priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc), 1873 priv->imsg_ring[mbox].imd_base, 1874 priv->imsg_ring[mbox].imd_phys); 1875 1876 priv->imsg_ring[mbox].imd_base = NULL; 1877 } 1878 1879 /** 1880 * tsi721_add_inb_buffer - Add buffer to the Tsi721 inbound message queue 1881 * @mport: Master port implementing the Inbound Messaging Engine 1882 * @mbox: Inbound mailbox number 1883 * @buf: Buffer to add to inbound queue 1884 */ 1885 static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) 1886 { 1887 struct tsi721_device *priv = mport->priv; 1888 u32 rx_slot; 1889 int rc = 0; 1890 1891 rx_slot = priv->imsg_ring[mbox].rx_slot; 1892 if (priv->imsg_ring[mbox].imq_base[rx_slot]) { 1893 dev_err(&priv->pdev->dev, 1894 "Error adding inbound buffer %d, buffer exists\n", 1895 rx_slot); 1896 rc = -EINVAL; 1897 goto out; 1898 } 1899 1900 priv->imsg_ring[mbox].imq_base[rx_slot] = buf; 1901 1902 if (++priv->imsg_ring[mbox].rx_slot == priv->imsg_ring[mbox].size) 1903 priv->imsg_ring[mbox].rx_slot = 0; 1904 1905 out: 1906 return rc; 1907 } 1908 1909 /** 1910 * tsi721_get_inb_message - Fetch inbound message from the Tsi721 MSG Queue 1911 * @mport: Master port implementing the Inbound Messaging Engine 1912 * @mbox: Inbound mailbox number 1913 * 1914 * Returns pointer to the message on success or NULL on failure. 1915 */ 1916 static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox) 1917 { 1918 struct tsi721_device *priv = mport->priv; 1919 struct tsi721_imsg_desc *desc; 1920 u32 rx_slot; 1921 void *rx_virt = NULL; 1922 u64 rx_phys; 1923 void *buf = NULL; 1924 u64 *free_ptr; 1925 int ch = mbox + 4; 1926 int msg_size; 1927 1928 if (!priv->imsg_init[mbox]) 1929 return NULL; 1930 1931 desc = priv->imsg_ring[mbox].imd_base; 1932 desc += priv->imsg_ring[mbox].desc_rdptr; 1933 1934 if (!(le32_to_cpu(desc->msg_info) & TSI721_IMD_HO)) 1935 goto out; 1936 1937 rx_slot = priv->imsg_ring[mbox].rx_slot; 1938 while (priv->imsg_ring[mbox].imq_base[rx_slot] == NULL) { 1939 if (++rx_slot == priv->imsg_ring[mbox].size) 1940 rx_slot = 0; 1941 } 1942 1943 rx_phys = ((u64)le32_to_cpu(desc->bufptr_hi) << 32) | 1944 le32_to_cpu(desc->bufptr_lo); 1945 1946 rx_virt = priv->imsg_ring[mbox].buf_base + 1947 (rx_phys - (u64)priv->imsg_ring[mbox].buf_phys); 1948 1949 buf = priv->imsg_ring[mbox].imq_base[rx_slot]; 1950 msg_size = le32_to_cpu(desc->msg_info) & TSI721_IMD_BCOUNT; 1951 if (msg_size == 0) 1952 msg_size = RIO_MAX_MSG_SIZE; 1953 1954 memcpy(buf, rx_virt, msg_size); 1955 priv->imsg_ring[mbox].imq_base[rx_slot] = NULL; 1956 1957 desc->msg_info &= cpu_to_le32(~TSI721_IMD_HO); 1958 if (++priv->imsg_ring[mbox].desc_rdptr == priv->imsg_ring[mbox].size) 1959 priv->imsg_ring[mbox].desc_rdptr = 0; 1960 1961 iowrite32(priv->imsg_ring[mbox].desc_rdptr, 1962 priv->regs + TSI721_IBDMAC_DQRP(ch)); 1963 1964 /* Return free buffer into the pointer list */ 1965 free_ptr = priv->imsg_ring[mbox].imfq_base; 1966 free_ptr[priv->imsg_ring[mbox].fq_wrptr] = cpu_to_le64(rx_phys); 1967 1968 if (++priv->imsg_ring[mbox].fq_wrptr == priv->imsg_ring[mbox].size) 1969 priv->imsg_ring[mbox].fq_wrptr = 0; 1970 1971 iowrite32(priv->imsg_ring[mbox].fq_wrptr, 1972 priv->regs + TSI721_IBDMAC_FQWP(ch)); 1973 out: 1974 return buf; 1975 } 1976 1977 /** 1978 * tsi721_messages_init - Initialization of Messaging Engine 1979 * @priv: pointer to tsi721 private data 1980 * 1981 * Configures Tsi721 messaging engine. 1982 */ 1983 static int tsi721_messages_init(struct tsi721_device *priv) 1984 { 1985 int ch; 1986 1987 iowrite32(0, priv->regs + TSI721_SMSG_ECC_LOG); 1988 iowrite32(0, priv->regs + TSI721_RETRY_GEN_CNT); 1989 iowrite32(0, priv->regs + TSI721_RETRY_RX_CNT); 1990 1991 /* Set SRIO Message Request/Response Timeout */ 1992 iowrite32(TSI721_RQRPTO_VAL, priv->regs + TSI721_RQRPTO); 1993 1994 /* Initialize Inbound Messaging Engine Registers */ 1995 for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) { 1996 /* Clear interrupt bits */ 1997 iowrite32(TSI721_IBDMAC_INT_MASK, 1998 priv->regs + TSI721_IBDMAC_INT(ch)); 1999 /* Clear Status */ 2000 iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch)); 2001 2002 iowrite32(TSI721_SMSG_ECC_COR_LOG_MASK, 2003 priv->regs + TSI721_SMSG_ECC_COR_LOG(ch)); 2004 iowrite32(TSI721_SMSG_ECC_NCOR_MASK, 2005 priv->regs + TSI721_SMSG_ECC_NCOR(ch)); 2006 } 2007 2008 return 0; 2009 } 2010 2011 /** 2012 * tsi721_disable_ints - disables all device interrupts 2013 * @priv: pointer to tsi721 private data 2014 */ 2015 static void tsi721_disable_ints(struct tsi721_device *priv) 2016 { 2017 int ch; 2018 2019 /* Disable all device level interrupts */ 2020 iowrite32(0, priv->regs + TSI721_DEV_INTE); 2021 2022 /* Disable all Device Channel interrupts */ 2023 iowrite32(0, priv->regs + TSI721_DEV_CHAN_INTE); 2024 2025 /* Disable all Inbound Msg Channel interrupts */ 2026 for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) 2027 iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch)); 2028 2029 /* Disable all Outbound Msg Channel interrupts */ 2030 for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++) 2031 iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch)); 2032 2033 /* Disable all general messaging interrupts */ 2034 iowrite32(0, priv->regs + TSI721_SMSG_INTE); 2035 2036 /* Disable all BDMA Channel interrupts */ 2037 for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) 2038 iowrite32(0, priv->regs + TSI721_DMAC_INTE(ch)); 2039 2040 /* Disable all general BDMA interrupts */ 2041 iowrite32(0, priv->regs + TSI721_BDMA_INTE); 2042 2043 /* Disable all SRIO Channel interrupts */ 2044 for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++) 2045 iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch)); 2046 2047 /* Disable all general SR2PC interrupts */ 2048 iowrite32(0, priv->regs + TSI721_SR2PC_GEN_INTE); 2049 2050 /* Disable all PC2SR interrupts */ 2051 iowrite32(0, priv->regs + TSI721_PC2SR_INTE); 2052 2053 /* Disable all I2C interrupts */ 2054 iowrite32(0, priv->regs + TSI721_I2C_INT_ENABLE); 2055 2056 /* Disable SRIO MAC interrupts */ 2057 iowrite32(0, priv->regs + TSI721_RIO_EM_INT_ENABLE); 2058 iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN); 2059 } 2060 2061 /** 2062 * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port 2063 * @priv: pointer to tsi721 private data 2064 * 2065 * Configures Tsi721 as RapidIO master port. 2066 */ 2067 static int __devinit tsi721_setup_mport(struct tsi721_device *priv) 2068 { 2069 struct pci_dev *pdev = priv->pdev; 2070 int err = 0; 2071 struct rio_ops *ops; 2072 2073 struct rio_mport *mport; 2074 2075 ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); 2076 if (!ops) { 2077 dev_dbg(&pdev->dev, "Unable to allocate memory for rio_ops\n"); 2078 return -ENOMEM; 2079 } 2080 2081 ops->lcread = tsi721_lcread; 2082 ops->lcwrite = tsi721_lcwrite; 2083 ops->cread = tsi721_cread_dma; 2084 ops->cwrite = tsi721_cwrite_dma; 2085 ops->dsend = tsi721_dsend; 2086 ops->open_inb_mbox = tsi721_open_inb_mbox; 2087 ops->close_inb_mbox = tsi721_close_inb_mbox; 2088 ops->open_outb_mbox = tsi721_open_outb_mbox; 2089 ops->close_outb_mbox = tsi721_close_outb_mbox; 2090 ops->add_outb_message = tsi721_add_outb_message; 2091 ops->add_inb_buffer = tsi721_add_inb_buffer; 2092 ops->get_inb_message = tsi721_get_inb_message; 2093 2094 mport = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); 2095 if (!mport) { 2096 kfree(ops); 2097 dev_dbg(&pdev->dev, "Unable to allocate memory for mport\n"); 2098 return -ENOMEM; 2099 } 2100 2101 mport->ops = ops; 2102 mport->index = 0; 2103 mport->sys_size = 0; /* small system */ 2104 mport->phy_type = RIO_PHY_SERIAL; 2105 mport->priv = (void *)priv; 2106 mport->phys_efptr = 0x100; 2107 2108 INIT_LIST_HEAD(&mport->dbells); 2109 2110 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); 2111 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3); 2112 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3); 2113 strcpy(mport->name, "Tsi721 mport"); 2114 2115 /* Hook up interrupt handler */ 2116 2117 #ifdef CONFIG_PCI_MSI 2118 if (!tsi721_enable_msix(priv)) 2119 priv->flags |= TSI721_USING_MSIX; 2120 else if (!pci_enable_msi(pdev)) 2121 priv->flags |= TSI721_USING_MSI; 2122 else 2123 dev_info(&pdev->dev, 2124 "MSI/MSI-X is not available. Using legacy INTx.\n"); 2125 #endif /* CONFIG_PCI_MSI */ 2126 2127 err = tsi721_request_irq(mport); 2128 2129 if (!err) { 2130 tsi721_interrupts_init(priv); 2131 ops->pwenable = tsi721_pw_enable; 2132 } else 2133 dev_err(&pdev->dev, "Unable to get assigned PCI IRQ " 2134 "vector %02X err=0x%x\n", pdev->irq, err); 2135 2136 /* Enable SRIO link */ 2137 iowrite32(ioread32(priv->regs + TSI721_DEVCTL) | 2138 TSI721_DEVCTL_SRBOOT_CMPL, 2139 priv->regs + TSI721_DEVCTL); 2140 2141 rio_register_mport(mport); 2142 priv->mport = mport; 2143 2144 if (mport->host_deviceid >= 0) 2145 iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | 2146 RIO_PORT_GEN_DISCOVERED, 2147 priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); 2148 else 2149 iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); 2150 2151 return 0; 2152 } 2153 2154 static int __devinit tsi721_probe(struct pci_dev *pdev, 2155 const struct pci_device_id *id) 2156 { 2157 struct tsi721_device *priv; 2158 int i, cap; 2159 int err; 2160 u32 regval; 2161 2162 priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL); 2163 if (priv == NULL) { 2164 dev_err(&pdev->dev, "Failed to allocate memory for device\n"); 2165 err = -ENOMEM; 2166 goto err_exit; 2167 } 2168 2169 err = pci_enable_device(pdev); 2170 if (err) { 2171 dev_err(&pdev->dev, "Failed to enable PCI device\n"); 2172 goto err_clean; 2173 } 2174 2175 priv->pdev = pdev; 2176 2177 #ifdef DEBUG 2178 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { 2179 dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n", 2180 i, (unsigned long long)pci_resource_start(pdev, i), 2181 (unsigned long)pci_resource_len(pdev, i), 2182 pci_resource_flags(pdev, i)); 2183 } 2184 #endif 2185 /* 2186 * Verify BAR configuration 2187 */ 2188 2189 /* BAR_0 (registers) must be 512KB+ in 32-bit address space */ 2190 if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) || 2191 pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 || 2192 pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) { 2193 dev_err(&pdev->dev, 2194 "Missing or misconfigured CSR BAR0, aborting.\n"); 2195 err = -ENODEV; 2196 goto err_disable_pdev; 2197 } 2198 2199 /* BAR_1 (outbound doorbells) must be 16MB+ in 32-bit address space */ 2200 if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) || 2201 pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 || 2202 pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) { 2203 dev_err(&pdev->dev, 2204 "Missing or misconfigured Doorbell BAR1, aborting.\n"); 2205 err = -ENODEV; 2206 goto err_disable_pdev; 2207 } 2208 2209 /* 2210 * BAR_2 and BAR_4 (outbound translation) must be in 64-bit PCIe address 2211 * space. 2212 * NOTE: BAR_2 and BAR_4 are not used by this version of driver. 2213 * It may be a good idea to keep them disabled using HW configuration 2214 * to save PCI memory space. 2215 */ 2216 if ((pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM) && 2217 (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64)) { 2218 dev_info(&pdev->dev, "Outbound BAR2 is not used but enabled.\n"); 2219 } 2220 2221 if ((pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM) && 2222 (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64)) { 2223 dev_info(&pdev->dev, "Outbound BAR4 is not used but enabled.\n"); 2224 } 2225 2226 err = pci_request_regions(pdev, DRV_NAME); 2227 if (err) { 2228 dev_err(&pdev->dev, "Cannot obtain PCI resources, " 2229 "aborting.\n"); 2230 goto err_disable_pdev; 2231 } 2232 2233 pci_set_master(pdev); 2234 2235 priv->regs = pci_ioremap_bar(pdev, BAR_0); 2236 if (!priv->regs) { 2237 dev_err(&pdev->dev, 2238 "Unable to map device registers space, aborting\n"); 2239 err = -ENOMEM; 2240 goto err_free_res; 2241 } 2242 2243 priv->odb_base = pci_ioremap_bar(pdev, BAR_1); 2244 if (!priv->odb_base) { 2245 dev_err(&pdev->dev, 2246 "Unable to map outbound doorbells space, aborting\n"); 2247 err = -ENOMEM; 2248 goto err_unmap_bars; 2249 } 2250 2251 /* Configure DMA attributes. */ 2252 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 2253 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 2254 dev_info(&pdev->dev, "Unable to set DMA mask\n"); 2255 goto err_unmap_bars; 2256 } 2257 2258 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) 2259 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); 2260 } else { 2261 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2262 if (err) 2263 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); 2264 } 2265 2266 cap = pci_pcie_cap(pdev); 2267 BUG_ON(cap == 0); 2268 2269 /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */ 2270 pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, ®val); 2271 regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | 2272 PCI_EXP_DEVCTL_NOSNOOP_EN); 2273 regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT; 2274 pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval); 2275 2276 /* Adjust PCIe completion timeout. */ 2277 pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, ®val); 2278 regval &= ~(0x0f); 2279 pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2); 2280 2281 /* 2282 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block 2283 */ 2284 pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0x01); 2285 pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXTBL, 2286 TSI721_MSIXTBL_OFFSET); 2287 pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXPBA, 2288 TSI721_MSIXPBA_OFFSET); 2289 pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0); 2290 /* End of FIXUP */ 2291 2292 tsi721_disable_ints(priv); 2293 2294 tsi721_init_pc2sr_mapping(priv); 2295 tsi721_init_sr2pc_mapping(priv); 2296 2297 if (tsi721_bdma_init(priv)) { 2298 dev_err(&pdev->dev, "BDMA initialization failed, aborting\n"); 2299 err = -ENOMEM; 2300 goto err_unmap_bars; 2301 } 2302 2303 err = tsi721_doorbell_init(priv); 2304 if (err) 2305 goto err_free_bdma; 2306 2307 tsi721_port_write_init(priv); 2308 2309 err = tsi721_messages_init(priv); 2310 if (err) 2311 goto err_free_consistent; 2312 2313 err = tsi721_setup_mport(priv); 2314 if (err) 2315 goto err_free_consistent; 2316 2317 return 0; 2318 2319 err_free_consistent: 2320 tsi721_doorbell_free(priv); 2321 err_free_bdma: 2322 tsi721_bdma_free(priv); 2323 err_unmap_bars: 2324 if (priv->regs) 2325 iounmap(priv->regs); 2326 if (priv->odb_base) 2327 iounmap(priv->odb_base); 2328 err_free_res: 2329 pci_release_regions(pdev); 2330 pci_clear_master(pdev); 2331 err_disable_pdev: 2332 pci_disable_device(pdev); 2333 err_clean: 2334 kfree(priv); 2335 err_exit: 2336 return err; 2337 } 2338 2339 static DEFINE_PCI_DEVICE_TABLE(tsi721_pci_tbl) = { 2340 { PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) }, 2341 { 0, } /* terminate list */ 2342 }; 2343 2344 MODULE_DEVICE_TABLE(pci, tsi721_pci_tbl); 2345 2346 static struct pci_driver tsi721_driver = { 2347 .name = "tsi721", 2348 .id_table = tsi721_pci_tbl, 2349 .probe = tsi721_probe, 2350 }; 2351 2352 static int __init tsi721_init(void) 2353 { 2354 return pci_register_driver(&tsi721_driver); 2355 } 2356 2357 static void __exit tsi721_exit(void) 2358 { 2359 pci_unregister_driver(&tsi721_driver); 2360 } 2361 2362 device_initcall(tsi721_init); 2363