1 /* 2 * RapidIO interconnect services 3 * (RapidIO Interconnect Specification, http://www.rapidio.org) 4 * 5 * Copyright 2005 MontaVista Software, Inc. 6 * Matt Porter <mporter@kernel.crashing.org> 7 * 8 * Copyright 2009 Integrated Device Technology, Inc. 9 * Alex Bounine <alexandre.bounine@idt.com> 10 * - Added Port-Write/Error Management initialization and handling 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License as published by the 14 * Free Software Foundation; either version 2 of the License, or (at your 15 * option) any later version. 16 */ 17 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 21 #include <linux/delay.h> 22 #include <linux/init.h> 23 #include <linux/rio.h> 24 #include <linux/rio_drv.h> 25 #include <linux/rio_ids.h> 26 #include <linux/rio_regs.h> 27 #include <linux/module.h> 28 #include <linux/spinlock.h> 29 #include <linux/slab.h> 30 #include <linux/interrupt.h> 31 32 #include "rio.h" 33 34 static LIST_HEAD(rio_mports); 35 static unsigned char next_portid; 36 37 /** 38 * rio_local_get_device_id - Get the base/extended device id for a port 39 * @port: RIO master port from which to get the deviceid 40 * 41 * Reads the base/extended device id from the local device 42 * implementing the master port. Returns the 8/16-bit device 43 * id. 44 */ 45 u16 rio_local_get_device_id(struct rio_mport *port) 46 { 47 u32 result; 48 49 rio_local_read_config_32(port, RIO_DID_CSR, &result); 50 51 return (RIO_GET_DID(port->sys_size, result)); 52 } 53 54 /** 55 * rio_request_inb_mbox - request inbound mailbox service 56 * @mport: RIO master port from which to allocate the mailbox resource 57 * @dev_id: Device specific pointer to pass on event 58 * @mbox: Mailbox number to claim 59 * @entries: Number of entries in inbound mailbox queue 60 * @minb: Callback to execute when inbound message is received 61 * 62 * Requests ownership of an inbound mailbox resource and binds 63 * a callback function to the resource. Returns %0 on success. 64 */ 65 int rio_request_inb_mbox(struct rio_mport *mport, 66 void *dev_id, 67 int mbox, 68 int entries, 69 void (*minb) (struct rio_mport * mport, void *dev_id, int mbox, 70 int slot)) 71 { 72 int rc = -ENOSYS; 73 struct resource *res; 74 75 if (mport->ops->open_inb_mbox == NULL) 76 goto out; 77 78 res = kmalloc(sizeof(struct resource), GFP_KERNEL); 79 80 if (res) { 81 rio_init_mbox_res(res, mbox, mbox); 82 83 /* Make sure this mailbox isn't in use */ 84 if ((rc = 85 request_resource(&mport->riores[RIO_INB_MBOX_RESOURCE], 86 res)) < 0) { 87 kfree(res); 88 goto out; 89 } 90 91 mport->inb_msg[mbox].res = res; 92 93 /* Hook the inbound message callback */ 94 mport->inb_msg[mbox].mcback = minb; 95 96 rc = mport->ops->open_inb_mbox(mport, dev_id, mbox, entries); 97 } else 98 rc = -ENOMEM; 99 100 out: 101 return rc; 102 } 103 104 /** 105 * rio_release_inb_mbox - release inbound mailbox message service 106 * @mport: RIO master port from which to release the mailbox resource 107 * @mbox: Mailbox number to release 108 * 109 * Releases ownership of an inbound mailbox resource. Returns 0 110 * if the request has been satisfied. 111 */ 112 int rio_release_inb_mbox(struct rio_mport *mport, int mbox) 113 { 114 if (mport->ops->close_inb_mbox) { 115 mport->ops->close_inb_mbox(mport, mbox); 116 117 /* Release the mailbox resource */ 118 return release_resource(mport->inb_msg[mbox].res); 119 } else 120 return -ENOSYS; 121 } 122 123 /** 124 * rio_request_outb_mbox - request outbound mailbox service 125 * @mport: RIO master port from which to allocate the mailbox resource 126 * @dev_id: Device specific pointer to pass on event 127 * @mbox: Mailbox number to claim 128 * @entries: Number of entries in outbound mailbox queue 129 * @moutb: Callback to execute when outbound message is sent 130 * 131 * Requests ownership of an outbound mailbox resource and binds 132 * a callback function to the resource. Returns 0 on success. 133 */ 134 int rio_request_outb_mbox(struct rio_mport *mport, 135 void *dev_id, 136 int mbox, 137 int entries, 138 void (*moutb) (struct rio_mport * mport, void *dev_id, int mbox, int slot)) 139 { 140 int rc = -ENOSYS; 141 struct resource *res; 142 143 if (mport->ops->open_outb_mbox == NULL) 144 goto out; 145 146 res = kmalloc(sizeof(struct resource), GFP_KERNEL); 147 148 if (res) { 149 rio_init_mbox_res(res, mbox, mbox); 150 151 /* Make sure this outbound mailbox isn't in use */ 152 if ((rc = 153 request_resource(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 154 res)) < 0) { 155 kfree(res); 156 goto out; 157 } 158 159 mport->outb_msg[mbox].res = res; 160 161 /* Hook the inbound message callback */ 162 mport->outb_msg[mbox].mcback = moutb; 163 164 rc = mport->ops->open_outb_mbox(mport, dev_id, mbox, entries); 165 } else 166 rc = -ENOMEM; 167 168 out: 169 return rc; 170 } 171 172 /** 173 * rio_release_outb_mbox - release outbound mailbox message service 174 * @mport: RIO master port from which to release the mailbox resource 175 * @mbox: Mailbox number to release 176 * 177 * Releases ownership of an inbound mailbox resource. Returns 0 178 * if the request has been satisfied. 179 */ 180 int rio_release_outb_mbox(struct rio_mport *mport, int mbox) 181 { 182 if (mport->ops->close_outb_mbox) { 183 mport->ops->close_outb_mbox(mport, mbox); 184 185 /* Release the mailbox resource */ 186 return release_resource(mport->outb_msg[mbox].res); 187 } else 188 return -ENOSYS; 189 } 190 191 /** 192 * rio_setup_inb_dbell - bind inbound doorbell callback 193 * @mport: RIO master port to bind the doorbell callback 194 * @dev_id: Device specific pointer to pass on event 195 * @res: Doorbell message resource 196 * @dinb: Callback to execute when doorbell is received 197 * 198 * Adds a doorbell resource/callback pair into a port's 199 * doorbell event list. Returns 0 if the request has been 200 * satisfied. 201 */ 202 static int 203 rio_setup_inb_dbell(struct rio_mport *mport, void *dev_id, struct resource *res, 204 void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src, u16 dst, 205 u16 info)) 206 { 207 int rc = 0; 208 struct rio_dbell *dbell; 209 210 if (!(dbell = kmalloc(sizeof(struct rio_dbell), GFP_KERNEL))) { 211 rc = -ENOMEM; 212 goto out; 213 } 214 215 dbell->res = res; 216 dbell->dinb = dinb; 217 dbell->dev_id = dev_id; 218 219 list_add_tail(&dbell->node, &mport->dbells); 220 221 out: 222 return rc; 223 } 224 225 /** 226 * rio_request_inb_dbell - request inbound doorbell message service 227 * @mport: RIO master port from which to allocate the doorbell resource 228 * @dev_id: Device specific pointer to pass on event 229 * @start: Doorbell info range start 230 * @end: Doorbell info range end 231 * @dinb: Callback to execute when doorbell is received 232 * 233 * Requests ownership of an inbound doorbell resource and binds 234 * a callback function to the resource. Returns 0 if the request 235 * has been satisfied. 236 */ 237 int rio_request_inb_dbell(struct rio_mport *mport, 238 void *dev_id, 239 u16 start, 240 u16 end, 241 void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src, 242 u16 dst, u16 info)) 243 { 244 int rc = 0; 245 246 struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL); 247 248 if (res) { 249 rio_init_dbell_res(res, start, end); 250 251 /* Make sure these doorbells aren't in use */ 252 if ((rc = 253 request_resource(&mport->riores[RIO_DOORBELL_RESOURCE], 254 res)) < 0) { 255 kfree(res); 256 goto out; 257 } 258 259 /* Hook the doorbell callback */ 260 rc = rio_setup_inb_dbell(mport, dev_id, res, dinb); 261 } else 262 rc = -ENOMEM; 263 264 out: 265 return rc; 266 } 267 268 /** 269 * rio_release_inb_dbell - release inbound doorbell message service 270 * @mport: RIO master port from which to release the doorbell resource 271 * @start: Doorbell info range start 272 * @end: Doorbell info range end 273 * 274 * Releases ownership of an inbound doorbell resource and removes 275 * callback from the doorbell event list. Returns 0 if the request 276 * has been satisfied. 277 */ 278 int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end) 279 { 280 int rc = 0, found = 0; 281 struct rio_dbell *dbell; 282 283 list_for_each_entry(dbell, &mport->dbells, node) { 284 if ((dbell->res->start == start) && (dbell->res->end == end)) { 285 found = 1; 286 break; 287 } 288 } 289 290 /* If we can't find an exact match, fail */ 291 if (!found) { 292 rc = -EINVAL; 293 goto out; 294 } 295 296 /* Delete from list */ 297 list_del(&dbell->node); 298 299 /* Release the doorbell resource */ 300 rc = release_resource(dbell->res); 301 302 /* Free the doorbell event */ 303 kfree(dbell); 304 305 out: 306 return rc; 307 } 308 309 /** 310 * rio_request_outb_dbell - request outbound doorbell message range 311 * @rdev: RIO device from which to allocate the doorbell resource 312 * @start: Doorbell message range start 313 * @end: Doorbell message range end 314 * 315 * Requests ownership of a doorbell message range. Returns a resource 316 * if the request has been satisfied or %NULL on failure. 317 */ 318 struct resource *rio_request_outb_dbell(struct rio_dev *rdev, u16 start, 319 u16 end) 320 { 321 struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL); 322 323 if (res) { 324 rio_init_dbell_res(res, start, end); 325 326 /* Make sure these doorbells aren't in use */ 327 if (request_resource(&rdev->riores[RIO_DOORBELL_RESOURCE], res) 328 < 0) { 329 kfree(res); 330 res = NULL; 331 } 332 } 333 334 return res; 335 } 336 337 /** 338 * rio_release_outb_dbell - release outbound doorbell message range 339 * @rdev: RIO device from which to release the doorbell resource 340 * @res: Doorbell resource to be freed 341 * 342 * Releases ownership of a doorbell message range. Returns 0 if the 343 * request has been satisfied. 344 */ 345 int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res) 346 { 347 int rc = release_resource(res); 348 349 kfree(res); 350 351 return rc; 352 } 353 354 /** 355 * rio_request_inb_pwrite - request inbound port-write message service 356 * @rdev: RIO device to which register inbound port-write callback routine 357 * @pwcback: Callback routine to execute when port-write is received 358 * 359 * Binds a port-write callback function to the RapidIO device. 360 * Returns 0 if the request has been satisfied. 361 */ 362 int rio_request_inb_pwrite(struct rio_dev *rdev, 363 int (*pwcback)(struct rio_dev *rdev, union rio_pw_msg *msg, int step)) 364 { 365 int rc = 0; 366 367 spin_lock(&rio_global_list_lock); 368 if (rdev->pwcback != NULL) 369 rc = -ENOMEM; 370 else 371 rdev->pwcback = pwcback; 372 373 spin_unlock(&rio_global_list_lock); 374 return rc; 375 } 376 EXPORT_SYMBOL_GPL(rio_request_inb_pwrite); 377 378 /** 379 * rio_release_inb_pwrite - release inbound port-write message service 380 * @rdev: RIO device which registered for inbound port-write callback 381 * 382 * Removes callback from the rio_dev structure. Returns 0 if the request 383 * has been satisfied. 384 */ 385 int rio_release_inb_pwrite(struct rio_dev *rdev) 386 { 387 int rc = -ENOMEM; 388 389 spin_lock(&rio_global_list_lock); 390 if (rdev->pwcback) { 391 rdev->pwcback = NULL; 392 rc = 0; 393 } 394 395 spin_unlock(&rio_global_list_lock); 396 return rc; 397 } 398 EXPORT_SYMBOL_GPL(rio_release_inb_pwrite); 399 400 /** 401 * rio_mport_get_physefb - Helper function that returns register offset 402 * for Physical Layer Extended Features Block. 403 * @port: Master port to issue transaction 404 * @local: Indicate a local master port or remote device access 405 * @destid: Destination ID of the device 406 * @hopcount: Number of switch hops to the device 407 */ 408 u32 409 rio_mport_get_physefb(struct rio_mport *port, int local, 410 u16 destid, u8 hopcount) 411 { 412 u32 ext_ftr_ptr; 413 u32 ftr_header; 414 415 ext_ftr_ptr = rio_mport_get_efb(port, local, destid, hopcount, 0); 416 417 while (ext_ftr_ptr) { 418 if (local) 419 rio_local_read_config_32(port, ext_ftr_ptr, 420 &ftr_header); 421 else 422 rio_mport_read_config_32(port, destid, hopcount, 423 ext_ftr_ptr, &ftr_header); 424 425 ftr_header = RIO_GET_BLOCK_ID(ftr_header); 426 switch (ftr_header) { 427 428 case RIO_EFB_SER_EP_ID_V13P: 429 case RIO_EFB_SER_EP_REC_ID_V13P: 430 case RIO_EFB_SER_EP_FREE_ID_V13P: 431 case RIO_EFB_SER_EP_ID: 432 case RIO_EFB_SER_EP_REC_ID: 433 case RIO_EFB_SER_EP_FREE_ID: 434 case RIO_EFB_SER_EP_FREC_ID: 435 436 return ext_ftr_ptr; 437 438 default: 439 break; 440 } 441 442 ext_ftr_ptr = rio_mport_get_efb(port, local, destid, 443 hopcount, ext_ftr_ptr); 444 } 445 446 return ext_ftr_ptr; 447 } 448 449 /** 450 * rio_get_comptag - Begin or continue searching for a RIO device by component tag 451 * @comp_tag: RIO component tag to match 452 * @from: Previous RIO device found in search, or %NULL for new search 453 * 454 * Iterates through the list of known RIO devices. If a RIO device is 455 * found with a matching @comp_tag, a pointer to its device 456 * structure is returned. Otherwise, %NULL is returned. A new search 457 * is initiated by passing %NULL to the @from argument. Otherwise, if 458 * @from is not %NULL, searches continue from next device on the global 459 * list. 460 */ 461 struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from) 462 { 463 struct list_head *n; 464 struct rio_dev *rdev; 465 466 spin_lock(&rio_global_list_lock); 467 n = from ? from->global_list.next : rio_devices.next; 468 469 while (n && (n != &rio_devices)) { 470 rdev = rio_dev_g(n); 471 if (rdev->comp_tag == comp_tag) 472 goto exit; 473 n = n->next; 474 } 475 rdev = NULL; 476 exit: 477 spin_unlock(&rio_global_list_lock); 478 return rdev; 479 } 480 481 /** 482 * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port. 483 * @rdev: Pointer to RIO device control structure 484 * @pnum: Switch port number to set LOCKOUT bit 485 * @lock: Operation : set (=1) or clear (=0) 486 */ 487 int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock) 488 { 489 u32 regval; 490 491 rio_read_config_32(rdev, 492 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum), 493 ®val); 494 if (lock) 495 regval |= RIO_PORT_N_CTL_LOCKOUT; 496 else 497 regval &= ~RIO_PORT_N_CTL_LOCKOUT; 498 499 rio_write_config_32(rdev, 500 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum), 501 regval); 502 return 0; 503 } 504 505 /** 506 * rio_chk_dev_route - Validate route to the specified device. 507 * @rdev: RIO device failed to respond 508 * @nrdev: Last active device on the route to rdev 509 * @npnum: nrdev's port number on the route to rdev 510 * 511 * Follows a route to the specified RIO device to determine the last available 512 * device (and corresponding RIO port) on the route. 513 */ 514 static int 515 rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum) 516 { 517 u32 result; 518 int p_port, rc = -EIO; 519 struct rio_dev *prev = NULL; 520 521 /* Find switch with failed RIO link */ 522 while (rdev->prev && (rdev->prev->pef & RIO_PEF_SWITCH)) { 523 if (!rio_read_config_32(rdev->prev, RIO_DEV_ID_CAR, &result)) { 524 prev = rdev->prev; 525 break; 526 } 527 rdev = rdev->prev; 528 } 529 530 if (prev == NULL) 531 goto err_out; 532 533 p_port = prev->rswitch->route_table[rdev->destid]; 534 535 if (p_port != RIO_INVALID_ROUTE) { 536 pr_debug("RIO: link failed on [%s]-P%d\n", 537 rio_name(prev), p_port); 538 *nrdev = prev; 539 *npnum = p_port; 540 rc = 0; 541 } else 542 pr_debug("RIO: failed to trace route to %s\n", rio_name(rdev)); 543 err_out: 544 return rc; 545 } 546 547 /** 548 * rio_mport_chk_dev_access - Validate access to the specified device. 549 * @mport: Master port to send transactions 550 * @destid: Device destination ID in network 551 * @hopcount: Number of hops into the network 552 */ 553 int 554 rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount) 555 { 556 int i = 0; 557 u32 tmp; 558 559 while (rio_mport_read_config_32(mport, destid, hopcount, 560 RIO_DEV_ID_CAR, &tmp)) { 561 i++; 562 if (i == RIO_MAX_CHK_RETRY) 563 return -EIO; 564 mdelay(1); 565 } 566 567 return 0; 568 } 569 570 /** 571 * rio_chk_dev_access - Validate access to the specified device. 572 * @rdev: Pointer to RIO device control structure 573 */ 574 static int rio_chk_dev_access(struct rio_dev *rdev) 575 { 576 return rio_mport_chk_dev_access(rdev->net->hport, 577 rdev->destid, rdev->hopcount); 578 } 579 580 /** 581 * rio_get_input_status - Sends a Link-Request/Input-Status control symbol and 582 * returns link-response (if requested). 583 * @rdev: RIO devive to issue Input-status command 584 * @pnum: Device port number to issue the command 585 * @lnkresp: Response from a link partner 586 */ 587 static int 588 rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp) 589 { 590 u32 regval; 591 int checkcount; 592 593 if (lnkresp) { 594 /* Read from link maintenance response register 595 * to clear valid bit */ 596 rio_read_config_32(rdev, 597 rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum), 598 ®val); 599 udelay(50); 600 } 601 602 /* Issue Input-status command */ 603 rio_write_config_32(rdev, 604 rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum), 605 RIO_MNT_REQ_CMD_IS); 606 607 /* Exit if the response is not expected */ 608 if (lnkresp == NULL) 609 return 0; 610 611 checkcount = 3; 612 while (checkcount--) { 613 udelay(50); 614 rio_read_config_32(rdev, 615 rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum), 616 ®val); 617 if (regval & RIO_PORT_N_MNT_RSP_RVAL) { 618 *lnkresp = regval; 619 return 0; 620 } 621 } 622 623 return -EIO; 624 } 625 626 /** 627 * rio_clr_err_stopped - Clears port Error-stopped states. 628 * @rdev: Pointer to RIO device control structure 629 * @pnum: Switch port number to clear errors 630 * @err_status: port error status (if 0 reads register from device) 631 */ 632 static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) 633 { 634 struct rio_dev *nextdev = rdev->rswitch->nextdev[pnum]; 635 u32 regval; 636 u32 far_ackid, far_linkstat, near_ackid; 637 638 if (err_status == 0) 639 rio_read_config_32(rdev, 640 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), 641 &err_status); 642 643 if (err_status & RIO_PORT_N_ERR_STS_PW_OUT_ES) { 644 pr_debug("RIO_EM: servicing Output Error-Stopped state\n"); 645 /* 646 * Send a Link-Request/Input-Status control symbol 647 */ 648 if (rio_get_input_status(rdev, pnum, ®val)) { 649 pr_debug("RIO_EM: Input-status response timeout\n"); 650 goto rd_err; 651 } 652 653 pr_debug("RIO_EM: SP%d Input-status response=0x%08x\n", 654 pnum, regval); 655 far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5; 656 far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT; 657 rio_read_config_32(rdev, 658 rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum), 659 ®val); 660 pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval); 661 near_ackid = (regval & RIO_PORT_N_ACK_INBOUND) >> 24; 662 pr_debug("RIO_EM: SP%d far_ackID=0x%02x far_linkstat=0x%02x" \ 663 " near_ackID=0x%02x\n", 664 pnum, far_ackid, far_linkstat, near_ackid); 665 666 /* 667 * If required, synchronize ackIDs of near and 668 * far sides. 669 */ 670 if ((far_ackid != ((regval & RIO_PORT_N_ACK_OUTSTAND) >> 8)) || 671 (far_ackid != (regval & RIO_PORT_N_ACK_OUTBOUND))) { 672 /* Align near outstanding/outbound ackIDs with 673 * far inbound. 674 */ 675 rio_write_config_32(rdev, 676 rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum), 677 (near_ackid << 24) | 678 (far_ackid << 8) | far_ackid); 679 /* Align far outstanding/outbound ackIDs with 680 * near inbound. 681 */ 682 far_ackid++; 683 if (nextdev) 684 rio_write_config_32(nextdev, 685 nextdev->phys_efptr + 686 RIO_PORT_N_ACK_STS_CSR(RIO_GET_PORT_NUM(nextdev->swpinfo)), 687 (far_ackid << 24) | 688 (near_ackid << 8) | near_ackid); 689 else 690 pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n"); 691 } 692 rd_err: 693 rio_read_config_32(rdev, 694 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), 695 &err_status); 696 pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); 697 } 698 699 if ((err_status & RIO_PORT_N_ERR_STS_PW_INP_ES) && nextdev) { 700 pr_debug("RIO_EM: servicing Input Error-Stopped state\n"); 701 rio_get_input_status(nextdev, 702 RIO_GET_PORT_NUM(nextdev->swpinfo), NULL); 703 udelay(50); 704 705 rio_read_config_32(rdev, 706 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), 707 &err_status); 708 pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); 709 } 710 711 return (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES | 712 RIO_PORT_N_ERR_STS_PW_INP_ES)) ? 1 : 0; 713 } 714 715 /** 716 * rio_inb_pwrite_handler - process inbound port-write message 717 * @pw_msg: pointer to inbound port-write message 718 * 719 * Processes an inbound port-write message. Returns 0 if the request 720 * has been satisfied. 721 */ 722 int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg) 723 { 724 struct rio_dev *rdev; 725 u32 err_status, em_perrdet, em_ltlerrdet; 726 int rc, portnum; 727 728 rdev = rio_get_comptag((pw_msg->em.comptag & RIO_CTAG_UDEVID), NULL); 729 if (rdev == NULL) { 730 /* Device removed or enumeration error */ 731 pr_debug("RIO: %s No matching device for CTag 0x%08x\n", 732 __func__, pw_msg->em.comptag); 733 return -EIO; 734 } 735 736 pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev)); 737 738 #ifdef DEBUG_PW 739 { 740 u32 i; 741 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) { 742 pr_debug("0x%02x: %08x %08x %08x %08x\n", 743 i*4, pw_msg->raw[i], pw_msg->raw[i + 1], 744 pw_msg->raw[i + 2], pw_msg->raw[i + 3]); 745 i += 4; 746 } 747 } 748 #endif 749 750 /* Call an external service function (if such is registered 751 * for this device). This may be the service for endpoints that send 752 * device-specific port-write messages. End-point messages expected 753 * to be handled completely by EP specific device driver. 754 * For switches rc==0 signals that no standard processing required. 755 */ 756 if (rdev->pwcback != NULL) { 757 rc = rdev->pwcback(rdev, pw_msg, 0); 758 if (rc == 0) 759 return 0; 760 } 761 762 portnum = pw_msg->em.is_port & 0xFF; 763 764 /* Check if device and route to it are functional: 765 * Sometimes devices may send PW message(s) just before being 766 * powered down (or link being lost). 767 */ 768 if (rio_chk_dev_access(rdev)) { 769 pr_debug("RIO: device access failed - get link partner\n"); 770 /* Scan route to the device and identify failed link. 771 * This will replace device and port reported in PW message. 772 * PW message should not be used after this point. 773 */ 774 if (rio_chk_dev_route(rdev, &rdev, &portnum)) { 775 pr_err("RIO: Route trace for %s failed\n", 776 rio_name(rdev)); 777 return -EIO; 778 } 779 pw_msg = NULL; 780 } 781 782 /* For End-point devices processing stops here */ 783 if (!(rdev->pef & RIO_PEF_SWITCH)) 784 return 0; 785 786 if (rdev->phys_efptr == 0) { 787 pr_err("RIO_PW: Bad switch initialization for %s\n", 788 rio_name(rdev)); 789 return 0; 790 } 791 792 /* 793 * Process the port-write notification from switch 794 */ 795 if (rdev->rswitch->em_handle) 796 rdev->rswitch->em_handle(rdev, portnum); 797 798 rio_read_config_32(rdev, 799 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), 800 &err_status); 801 pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status); 802 803 if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) { 804 805 if (!(rdev->rswitch->port_ok & (1 << portnum))) { 806 rdev->rswitch->port_ok |= (1 << portnum); 807 rio_set_port_lockout(rdev, portnum, 0); 808 /* Schedule Insertion Service */ 809 pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n", 810 rio_name(rdev), portnum); 811 } 812 813 /* Clear error-stopped states (if reported). 814 * Depending on the link partner state, two attempts 815 * may be needed for successful recovery. 816 */ 817 if (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES | 818 RIO_PORT_N_ERR_STS_PW_INP_ES)) { 819 if (rio_clr_err_stopped(rdev, portnum, err_status)) 820 rio_clr_err_stopped(rdev, portnum, 0); 821 } 822 } else { /* if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) */ 823 824 if (rdev->rswitch->port_ok & (1 << portnum)) { 825 rdev->rswitch->port_ok &= ~(1 << portnum); 826 rio_set_port_lockout(rdev, portnum, 1); 827 828 rio_write_config_32(rdev, 829 rdev->phys_efptr + 830 RIO_PORT_N_ACK_STS_CSR(portnum), 831 RIO_PORT_N_ACK_CLEAR); 832 833 /* Schedule Extraction Service */ 834 pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n", 835 rio_name(rdev), portnum); 836 } 837 } 838 839 rio_read_config_32(rdev, 840 rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet); 841 if (em_perrdet) { 842 pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n", 843 portnum, em_perrdet); 844 /* Clear EM Port N Error Detect CSR */ 845 rio_write_config_32(rdev, 846 rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0); 847 } 848 849 rio_read_config_32(rdev, 850 rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet); 851 if (em_ltlerrdet) { 852 pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n", 853 em_ltlerrdet); 854 /* Clear EM L/T Layer Error Detect CSR */ 855 rio_write_config_32(rdev, 856 rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0); 857 } 858 859 /* Clear remaining error bits and Port-Write Pending bit */ 860 rio_write_config_32(rdev, 861 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), 862 err_status); 863 864 return 0; 865 } 866 EXPORT_SYMBOL_GPL(rio_inb_pwrite_handler); 867 868 /** 869 * rio_mport_get_efb - get pointer to next extended features block 870 * @port: Master port to issue transaction 871 * @local: Indicate a local master port or remote device access 872 * @destid: Destination ID of the device 873 * @hopcount: Number of switch hops to the device 874 * @from: Offset of current Extended Feature block header (if 0 starts 875 * from ExtFeaturePtr) 876 */ 877 u32 878 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, 879 u8 hopcount, u32 from) 880 { 881 u32 reg_val; 882 883 if (from == 0) { 884 if (local) 885 rio_local_read_config_32(port, RIO_ASM_INFO_CAR, 886 ®_val); 887 else 888 rio_mport_read_config_32(port, destid, hopcount, 889 RIO_ASM_INFO_CAR, ®_val); 890 return reg_val & RIO_EXT_FTR_PTR_MASK; 891 } else { 892 if (local) 893 rio_local_read_config_32(port, from, ®_val); 894 else 895 rio_mport_read_config_32(port, destid, hopcount, 896 from, ®_val); 897 return RIO_GET_BLOCK_ID(reg_val); 898 } 899 } 900 901 /** 902 * rio_mport_get_feature - query for devices' extended features 903 * @port: Master port to issue transaction 904 * @local: Indicate a local master port or remote device access 905 * @destid: Destination ID of the device 906 * @hopcount: Number of switch hops to the device 907 * @ftr: Extended feature code 908 * 909 * Tell if a device supports a given RapidIO capability. 910 * Returns the offset of the requested extended feature 911 * block within the device's RIO configuration space or 912 * 0 in case the device does not support it. Possible 913 * values for @ftr: 914 * 915 * %RIO_EFB_PAR_EP_ID LP/LVDS EP Devices 916 * 917 * %RIO_EFB_PAR_EP_REC_ID LP/LVDS EP Recovery Devices 918 * 919 * %RIO_EFB_PAR_EP_FREE_ID LP/LVDS EP Free Devices 920 * 921 * %RIO_EFB_SER_EP_ID LP/Serial EP Devices 922 * 923 * %RIO_EFB_SER_EP_REC_ID LP/Serial EP Recovery Devices 924 * 925 * %RIO_EFB_SER_EP_FREE_ID LP/Serial EP Free Devices 926 */ 927 u32 928 rio_mport_get_feature(struct rio_mport * port, int local, u16 destid, 929 u8 hopcount, int ftr) 930 { 931 u32 asm_info, ext_ftr_ptr, ftr_header; 932 933 if (local) 934 rio_local_read_config_32(port, RIO_ASM_INFO_CAR, &asm_info); 935 else 936 rio_mport_read_config_32(port, destid, hopcount, 937 RIO_ASM_INFO_CAR, &asm_info); 938 939 ext_ftr_ptr = asm_info & RIO_EXT_FTR_PTR_MASK; 940 941 while (ext_ftr_ptr) { 942 if (local) 943 rio_local_read_config_32(port, ext_ftr_ptr, 944 &ftr_header); 945 else 946 rio_mport_read_config_32(port, destid, hopcount, 947 ext_ftr_ptr, &ftr_header); 948 if (RIO_GET_BLOCK_ID(ftr_header) == ftr) 949 return ext_ftr_ptr; 950 if (!(ext_ftr_ptr = RIO_GET_BLOCK_PTR(ftr_header))) 951 break; 952 } 953 954 return 0; 955 } 956 957 /** 958 * rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did 959 * @vid: RIO vid to match or %RIO_ANY_ID to match all vids 960 * @did: RIO did to match or %RIO_ANY_ID to match all dids 961 * @asm_vid: RIO asm_vid to match or %RIO_ANY_ID to match all asm_vids 962 * @asm_did: RIO asm_did to match or %RIO_ANY_ID to match all asm_dids 963 * @from: Previous RIO device found in search, or %NULL for new search 964 * 965 * Iterates through the list of known RIO devices. If a RIO device is 966 * found with a matching @vid, @did, @asm_vid, @asm_did, the reference 967 * count to the device is incrememted and a pointer to its device 968 * structure is returned. Otherwise, %NULL is returned. A new search 969 * is initiated by passing %NULL to the @from argument. Otherwise, if 970 * @from is not %NULL, searches continue from next device on the global 971 * list. The reference count for @from is always decremented if it is 972 * not %NULL. 973 */ 974 struct rio_dev *rio_get_asm(u16 vid, u16 did, 975 u16 asm_vid, u16 asm_did, struct rio_dev *from) 976 { 977 struct list_head *n; 978 struct rio_dev *rdev; 979 980 WARN_ON(in_interrupt()); 981 spin_lock(&rio_global_list_lock); 982 n = from ? from->global_list.next : rio_devices.next; 983 984 while (n && (n != &rio_devices)) { 985 rdev = rio_dev_g(n); 986 if ((vid == RIO_ANY_ID || rdev->vid == vid) && 987 (did == RIO_ANY_ID || rdev->did == did) && 988 (asm_vid == RIO_ANY_ID || rdev->asm_vid == asm_vid) && 989 (asm_did == RIO_ANY_ID || rdev->asm_did == asm_did)) 990 goto exit; 991 n = n->next; 992 } 993 rdev = NULL; 994 exit: 995 rio_dev_put(from); 996 rdev = rio_dev_get(rdev); 997 spin_unlock(&rio_global_list_lock); 998 return rdev; 999 } 1000 1001 /** 1002 * rio_get_device - Begin or continue searching for a RIO device by vid/did 1003 * @vid: RIO vid to match or %RIO_ANY_ID to match all vids 1004 * @did: RIO did to match or %RIO_ANY_ID to match all dids 1005 * @from: Previous RIO device found in search, or %NULL for new search 1006 * 1007 * Iterates through the list of known RIO devices. If a RIO device is 1008 * found with a matching @vid and @did, the reference count to the 1009 * device is incrememted and a pointer to its device structure is returned. 1010 * Otherwise, %NULL is returned. A new search is initiated by passing %NULL 1011 * to the @from argument. Otherwise, if @from is not %NULL, searches 1012 * continue from next device on the global list. The reference count for 1013 * @from is always decremented if it is not %NULL. 1014 */ 1015 struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from) 1016 { 1017 return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from); 1018 } 1019 1020 /** 1021 * rio_std_route_add_entry - Add switch route table entry using standard 1022 * registers defined in RIO specification rev.1.3 1023 * @mport: Master port to issue transaction 1024 * @destid: Destination ID of the device 1025 * @hopcount: Number of switch hops to the device 1026 * @table: routing table ID (global or port-specific) 1027 * @route_destid: destID entry in the RT 1028 * @route_port: destination port for specified destID 1029 */ 1030 int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, 1031 u16 table, u16 route_destid, u8 route_port) 1032 { 1033 if (table == RIO_GLOBAL_TABLE) { 1034 rio_mport_write_config_32(mport, destid, hopcount, 1035 RIO_STD_RTE_CONF_DESTID_SEL_CSR, 1036 (u32)route_destid); 1037 rio_mport_write_config_32(mport, destid, hopcount, 1038 RIO_STD_RTE_CONF_PORT_SEL_CSR, 1039 (u32)route_port); 1040 } 1041 1042 udelay(10); 1043 return 0; 1044 } 1045 1046 /** 1047 * rio_std_route_get_entry - Read switch route table entry (port number) 1048 * associated with specified destID using standard registers defined in RIO 1049 * specification rev.1.3 1050 * @mport: Master port to issue transaction 1051 * @destid: Destination ID of the device 1052 * @hopcount: Number of switch hops to the device 1053 * @table: routing table ID (global or port-specific) 1054 * @route_destid: destID entry in the RT 1055 * @route_port: returned destination port for specified destID 1056 */ 1057 int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, 1058 u16 table, u16 route_destid, u8 *route_port) 1059 { 1060 u32 result; 1061 1062 if (table == RIO_GLOBAL_TABLE) { 1063 rio_mport_write_config_32(mport, destid, hopcount, 1064 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); 1065 rio_mport_read_config_32(mport, destid, hopcount, 1066 RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); 1067 1068 *route_port = (u8)result; 1069 } 1070 1071 return 0; 1072 } 1073 1074 /** 1075 * rio_std_route_clr_table - Clear swotch route table using standard registers 1076 * defined in RIO specification rev.1.3. 1077 * @mport: Master port to issue transaction 1078 * @destid: Destination ID of the device 1079 * @hopcount: Number of switch hops to the device 1080 * @table: routing table ID (global or port-specific) 1081 */ 1082 int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, 1083 u16 table) 1084 { 1085 u32 max_destid = 0xff; 1086 u32 i, pef, id_inc = 1, ext_cfg = 0; 1087 u32 port_sel = RIO_INVALID_ROUTE; 1088 1089 if (table == RIO_GLOBAL_TABLE) { 1090 rio_mport_read_config_32(mport, destid, hopcount, 1091 RIO_PEF_CAR, &pef); 1092 1093 if (mport->sys_size) { 1094 rio_mport_read_config_32(mport, destid, hopcount, 1095 RIO_SWITCH_RT_LIMIT, 1096 &max_destid); 1097 max_destid &= RIO_RT_MAX_DESTID; 1098 } 1099 1100 if (pef & RIO_PEF_EXT_RT) { 1101 ext_cfg = 0x80000000; 1102 id_inc = 4; 1103 port_sel = (RIO_INVALID_ROUTE << 24) | 1104 (RIO_INVALID_ROUTE << 16) | 1105 (RIO_INVALID_ROUTE << 8) | 1106 RIO_INVALID_ROUTE; 1107 } 1108 1109 for (i = 0; i <= max_destid;) { 1110 rio_mport_write_config_32(mport, destid, hopcount, 1111 RIO_STD_RTE_CONF_DESTID_SEL_CSR, 1112 ext_cfg | i); 1113 rio_mport_write_config_32(mport, destid, hopcount, 1114 RIO_STD_RTE_CONF_PORT_SEL_CSR, 1115 port_sel); 1116 i += id_inc; 1117 } 1118 } 1119 1120 udelay(10); 1121 return 0; 1122 } 1123 1124 #ifdef CONFIG_RAPIDIO_DMA_ENGINE 1125 1126 static bool rio_chan_filter(struct dma_chan *chan, void *arg) 1127 { 1128 struct rio_dev *rdev = arg; 1129 1130 /* Check that DMA device belongs to the right MPORT */ 1131 return (rdev->net->hport == 1132 container_of(chan->device, struct rio_mport, dma)); 1133 } 1134 1135 /** 1136 * rio_request_dma - request RapidIO capable DMA channel that supports 1137 * specified target RapidIO device. 1138 * @rdev: RIO device control structure 1139 * 1140 * Returns pointer to allocated DMA channel or NULL if failed. 1141 */ 1142 struct dma_chan *rio_request_dma(struct rio_dev *rdev) 1143 { 1144 dma_cap_mask_t mask; 1145 struct dma_chan *dchan; 1146 1147 dma_cap_zero(mask); 1148 dma_cap_set(DMA_SLAVE, mask); 1149 dchan = dma_request_channel(mask, rio_chan_filter, rdev); 1150 1151 return dchan; 1152 } 1153 EXPORT_SYMBOL_GPL(rio_request_dma); 1154 1155 /** 1156 * rio_release_dma - release specified DMA channel 1157 * @dchan: DMA channel to release 1158 */ 1159 void rio_release_dma(struct dma_chan *dchan) 1160 { 1161 dma_release_channel(dchan); 1162 } 1163 EXPORT_SYMBOL_GPL(rio_release_dma); 1164 1165 /** 1166 * rio_dma_prep_slave_sg - RapidIO specific wrapper 1167 * for device_prep_slave_sg callback defined by DMAENGINE. 1168 * @rdev: RIO device control structure 1169 * @dchan: DMA channel to configure 1170 * @data: RIO specific data descriptor 1171 * @direction: DMA data transfer direction (TO or FROM the device) 1172 * @flags: dmaengine defined flags 1173 * 1174 * Initializes RapidIO capable DMA channel for the specified data transfer. 1175 * Uses DMA channel private extension to pass information related to remote 1176 * target RIO device. 1177 * Returns pointer to DMA transaction descriptor or NULL if failed. 1178 */ 1179 struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, 1180 struct dma_chan *dchan, struct rio_dma_data *data, 1181 enum dma_transfer_direction direction, unsigned long flags) 1182 { 1183 struct dma_async_tx_descriptor *txd = NULL; 1184 struct rio_dma_ext rio_ext; 1185 1186 if (dchan->device->device_prep_slave_sg == NULL) { 1187 pr_err("%s: prep_rio_sg == NULL\n", __func__); 1188 return NULL; 1189 } 1190 1191 rio_ext.destid = rdev->destid; 1192 rio_ext.rio_addr_u = data->rio_addr_u; 1193 rio_ext.rio_addr = data->rio_addr; 1194 rio_ext.wr_type = data->wr_type; 1195 1196 txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len, 1197 direction, flags, &rio_ext); 1198 1199 return txd; 1200 } 1201 EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg); 1202 1203 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ 1204 1205 static void rio_fixup_device(struct rio_dev *dev) 1206 { 1207 } 1208 1209 static int __devinit rio_init(void) 1210 { 1211 struct rio_dev *dev = NULL; 1212 1213 while ((dev = rio_get_device(RIO_ANY_ID, RIO_ANY_ID, dev)) != NULL) { 1214 rio_fixup_device(dev); 1215 } 1216 return 0; 1217 } 1218 1219 int __devinit rio_init_mports(void) 1220 { 1221 struct rio_mport *port; 1222 1223 list_for_each_entry(port, &rio_mports, node) { 1224 if (port->host_deviceid >= 0) 1225 rio_enum_mport(port); 1226 else 1227 rio_disc_mport(port); 1228 } 1229 1230 rio_init(); 1231 1232 return 0; 1233 } 1234 1235 device_initcall_sync(rio_init_mports); 1236 1237 static int hdids[RIO_MAX_MPORTS + 1]; 1238 1239 static int rio_get_hdid(int index) 1240 { 1241 if (!hdids[0] || hdids[0] <= index || index >= RIO_MAX_MPORTS) 1242 return -1; 1243 1244 return hdids[index + 1]; 1245 } 1246 1247 static int rio_hdid_setup(char *str) 1248 { 1249 (void)get_options(str, ARRAY_SIZE(hdids), hdids); 1250 return 1; 1251 } 1252 1253 __setup("riohdid=", rio_hdid_setup); 1254 1255 int rio_register_mport(struct rio_mport *port) 1256 { 1257 if (next_portid >= RIO_MAX_MPORTS) { 1258 pr_err("RIO: reached specified max number of mports\n"); 1259 return 1; 1260 } 1261 1262 port->id = next_portid++; 1263 port->host_deviceid = rio_get_hdid(port->id); 1264 list_add_tail(&port->node, &rio_mports); 1265 return 0; 1266 } 1267 1268 EXPORT_SYMBOL_GPL(rio_local_get_device_id); 1269 EXPORT_SYMBOL_GPL(rio_get_device); 1270 EXPORT_SYMBOL_GPL(rio_get_asm); 1271 EXPORT_SYMBOL_GPL(rio_request_inb_dbell); 1272 EXPORT_SYMBOL_GPL(rio_release_inb_dbell); 1273 EXPORT_SYMBOL_GPL(rio_request_outb_dbell); 1274 EXPORT_SYMBOL_GPL(rio_release_outb_dbell); 1275 EXPORT_SYMBOL_GPL(rio_request_inb_mbox); 1276 EXPORT_SYMBOL_GPL(rio_release_inb_mbox); 1277 EXPORT_SYMBOL_GPL(rio_request_outb_mbox); 1278 EXPORT_SYMBOL_GPL(rio_release_outb_mbox); 1279