1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/arm/kernel/ecard.c 4 * 5 * Copyright 1995-2001 Russell King 6 * 7 * Find all installed expansion cards, and handle interrupts from them. 8 * 9 * Created from information from Acorns RiscOS3 PRMs 10 * 11 * 08-Dec-1996 RMK Added code for the 9'th expansion card - the ether 12 * podule slot. 13 * 06-May-1997 RMK Added blacklist for cards whose loader doesn't work. 14 * 12-Sep-1997 RMK Created new handling of interrupt enables/disables 15 * - cards can now register their own routine to control 16 * interrupts (recommended). 17 * 29-Sep-1997 RMK Expansion card interrupt hardware not being re-enabled 18 * on reset from Linux. (Caused cards not to respond 19 * under RiscOS without hard reset). 20 * 15-Feb-1998 RMK Added DMA support 21 * 12-Sep-1998 RMK Added EASI support 22 * 10-Jan-1999 RMK Run loaders in a simulated RISC OS environment. 23 * 17-Apr-1999 RMK Support for EASI Type C cycles. 24 */ 25 #define ECARD_C 26 27 #include <linux/module.h> 28 #include <linux/kernel.h> 29 #include <linux/types.h> 30 #include <linux/sched.h> 31 #include <linux/sched/mm.h> 32 #include <linux/interrupt.h> 33 #include <linux/completion.h> 34 #include <linux/reboot.h> 35 #include <linux/mm.h> 36 #include <linux/slab.h> 37 #include <linux/proc_fs.h> 38 #include <linux/seq_file.h> 39 #include <linux/device.h> 40 #include <linux/init.h> 41 #include <linux/mutex.h> 42 #include <linux/kthread.h> 43 #include <linux/irq.h> 44 #include <linux/io.h> 45 46 #include <asm/dma.h> 47 #include <asm/ecard.h> 48 #include <mach/hardware.h> 49 #include <asm/irq.h> 50 #include <asm/mmu_context.h> 51 #include <asm/mach/irq.h> 52 #include <asm/tlbflush.h> 53 54 #include "ecard.h" 55 56 struct ecard_request { 57 void (*fn)(struct ecard_request *); 58 ecard_t *ec; 59 unsigned int address; 60 unsigned int length; 61 unsigned int use_loader; 62 void *buffer; 63 struct completion *complete; 64 }; 65 66 struct expcard_blacklist { 67 unsigned short manufacturer; 68 unsigned short product; 69 const char *type; 70 }; 71 72 static ecard_t *cards; 73 static ecard_t *slot_to_expcard[MAX_ECARDS]; 74 static unsigned int ectcr; 75 76 /* List of descriptions of cards which don't have an extended 77 * identification, or chunk directories containing a description. 78 */ 79 static struct expcard_blacklist __initdata blacklist[] = { 80 { MANU_ACORN, PROD_ACORN_ETHER1, "Acorn Ether1" } 81 }; 82 83 asmlinkage extern int 84 ecard_loader_reset(unsigned long base, loader_t loader); 85 asmlinkage extern int 86 ecard_loader_read(int off, unsigned long base, loader_t loader); 87 88 static inline unsigned short ecard_getu16(unsigned char *v) 89 { 90 return v[0] | v[1] << 8; 91 } 92 93 static inline signed long ecard_gets24(unsigned char *v) 94 { 95 return v[0] | v[1] << 8 | v[2] << 16 | ((v[2] & 0x80) ? 0xff000000 : 0); 96 } 97 98 static inline ecard_t *slot_to_ecard(unsigned int slot) 99 { 100 return slot < MAX_ECARDS ? slot_to_expcard[slot] : NULL; 101 } 102 103 /* ===================== Expansion card daemon ======================== */ 104 /* 105 * Since the loader programs on the expansion cards need to be run 106 * in a specific environment, create a separate task with this 107 * environment up, and pass requests to this task as and when we 108 * need to. 109 * 110 * This should allow 99% of loaders to be called from Linux. 111 * 112 * From a security standpoint, we trust the card vendors. This 113 * may be a misplaced trust. 114 */ 115 static void ecard_task_reset(struct ecard_request *req) 116 { 117 struct expansion_card *ec = req->ec; 118 struct resource *res; 119 120 res = ec->slot_no == 8 121 ? &ec->resource[ECARD_RES_MEMC] 122 : ec->easi 123 ? &ec->resource[ECARD_RES_EASI] 124 : &ec->resource[ECARD_RES_IOCSYNC]; 125 126 ecard_loader_reset(res->start, ec->loader); 127 } 128 129 static void ecard_task_readbytes(struct ecard_request *req) 130 { 131 struct expansion_card *ec = req->ec; 132 unsigned char *buf = req->buffer; 133 unsigned int len = req->length; 134 unsigned int off = req->address; 135 136 if (ec->slot_no == 8) { 137 void __iomem *base = (void __iomem *) 138 ec->resource[ECARD_RES_MEMC].start; 139 140 /* 141 * The card maintains an index which increments the address 142 * into a 4096-byte page on each access. We need to keep 143 * track of the counter. 144 */ 145 static unsigned int index; 146 unsigned int page; 147 148 page = (off >> 12) * 4; 149 if (page > 256 * 4) 150 return; 151 152 off &= 4095; 153 154 /* 155 * If we are reading offset 0, or our current index is 156 * greater than the offset, reset the hardware index counter. 157 */ 158 if (off == 0 || index > off) { 159 writeb(0, base); 160 index = 0; 161 } 162 163 /* 164 * Increment the hardware index counter until we get to the 165 * required offset. The read bytes are discarded. 166 */ 167 while (index < off) { 168 readb(base + page); 169 index += 1; 170 } 171 172 while (len--) { 173 *buf++ = readb(base + page); 174 index += 1; 175 } 176 } else { 177 unsigned long base = (ec->easi 178 ? &ec->resource[ECARD_RES_EASI] 179 : &ec->resource[ECARD_RES_IOCSYNC])->start; 180 void __iomem *pbase = (void __iomem *)base; 181 182 if (!req->use_loader || !ec->loader) { 183 off *= 4; 184 while (len--) { 185 *buf++ = readb(pbase + off); 186 off += 4; 187 } 188 } else { 189 while(len--) { 190 /* 191 * The following is required by some 192 * expansion card loader programs. 193 */ 194 *(unsigned long *)0x108 = 0; 195 *buf++ = ecard_loader_read(off++, base, 196 ec->loader); 197 } 198 } 199 } 200 201 } 202 203 static DECLARE_WAIT_QUEUE_HEAD(ecard_wait); 204 static struct ecard_request *ecard_req; 205 static DEFINE_MUTEX(ecard_mutex); 206 207 /* 208 * Set up the expansion card daemon's page tables. 209 */ 210 static void ecard_init_pgtables(struct mm_struct *mm) 211 { 212 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, VM_EXEC); 213 214 /* We want to set up the page tables for the following mapping: 215 * Virtual Physical 216 * 0x03000000 0x03000000 217 * 0x03010000 unmapped 218 * 0x03210000 0x03210000 219 * 0x03400000 unmapped 220 * 0x08000000 0x08000000 221 * 0x10000000 unmapped 222 * 223 * FIXME: we don't follow this 100% yet. 224 */ 225 pgd_t *src_pgd, *dst_pgd; 226 227 src_pgd = pgd_offset(mm, (unsigned long)IO_BASE); 228 dst_pgd = pgd_offset(mm, IO_START); 229 230 memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (IO_SIZE / PGDIR_SIZE)); 231 232 src_pgd = pgd_offset(mm, (unsigned long)EASI_BASE); 233 dst_pgd = pgd_offset(mm, EASI_START); 234 235 memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); 236 237 flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); 238 flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE); 239 } 240 241 static int ecard_init_mm(void) 242 { 243 struct mm_struct * mm = mm_alloc(); 244 struct mm_struct *active_mm = current->active_mm; 245 246 if (!mm) 247 return -ENOMEM; 248 249 current->mm = mm; 250 current->active_mm = mm; 251 activate_mm(active_mm, mm); 252 mmdrop(active_mm); 253 ecard_init_pgtables(mm); 254 return 0; 255 } 256 257 static int 258 ecard_task(void * unused) 259 { 260 /* 261 * Allocate a mm. We're not a lazy-TLB kernel task since we need 262 * to set page table entries where the user space would be. Note 263 * that this also creates the page tables. Failure is not an 264 * option here. 265 */ 266 if (ecard_init_mm()) 267 panic("kecardd: unable to alloc mm\n"); 268 269 while (1) { 270 struct ecard_request *req; 271 272 wait_event_interruptible(ecard_wait, ecard_req != NULL); 273 274 req = xchg(&ecard_req, NULL); 275 if (req != NULL) { 276 req->fn(req); 277 complete(req->complete); 278 } 279 } 280 } 281 282 /* 283 * Wake the expansion card daemon to action our request. 284 * 285 * FIXME: The test here is not sufficient to detect if the 286 * kcardd is running. 287 */ 288 static void ecard_call(struct ecard_request *req) 289 { 290 DECLARE_COMPLETION_ONSTACK(completion); 291 292 req->complete = &completion; 293 294 mutex_lock(&ecard_mutex); 295 ecard_req = req; 296 wake_up(&ecard_wait); 297 298 /* 299 * Now wait for kecardd to run. 300 */ 301 wait_for_completion(&completion); 302 mutex_unlock(&ecard_mutex); 303 } 304 305 /* ======================= Mid-level card control ===================== */ 306 307 static void 308 ecard_readbytes(void *addr, ecard_t *ec, int off, int len, int useld) 309 { 310 struct ecard_request req; 311 312 req.fn = ecard_task_readbytes; 313 req.ec = ec; 314 req.address = off; 315 req.length = len; 316 req.use_loader = useld; 317 req.buffer = addr; 318 319 ecard_call(&req); 320 } 321 322 int ecard_readchunk(struct in_chunk_dir *cd, ecard_t *ec, int id, int num) 323 { 324 struct ex_chunk_dir excd; 325 int index = 16; 326 int useld = 0; 327 328 if (!ec->cid.cd) 329 return 0; 330 331 while(1) { 332 ecard_readbytes(&excd, ec, index, 8, useld); 333 index += 8; 334 if (c_id(&excd) == 0) { 335 if (!useld && ec->loader) { 336 useld = 1; 337 index = 0; 338 continue; 339 } 340 return 0; 341 } 342 if (c_id(&excd) == 0xf0) { /* link */ 343 index = c_start(&excd); 344 continue; 345 } 346 if (c_id(&excd) == 0x80) { /* loader */ 347 if (!ec->loader) { 348 ec->loader = kmalloc(c_len(&excd), 349 GFP_KERNEL); 350 if (ec->loader) 351 ecard_readbytes(ec->loader, ec, 352 (int)c_start(&excd), 353 c_len(&excd), useld); 354 else 355 return 0; 356 } 357 continue; 358 } 359 if (c_id(&excd) == id && num-- == 0) 360 break; 361 } 362 363 if (c_id(&excd) & 0x80) { 364 switch (c_id(&excd) & 0x70) { 365 case 0x70: 366 ecard_readbytes((unsigned char *)excd.d.string, ec, 367 (int)c_start(&excd), c_len(&excd), 368 useld); 369 break; 370 case 0x00: 371 break; 372 } 373 } 374 cd->start_offset = c_start(&excd); 375 memcpy(cd->d.string, excd.d.string, 256); 376 return 1; 377 } 378 379 /* ======================= Interrupt control ============================ */ 380 381 static void ecard_def_irq_enable(ecard_t *ec, int irqnr) 382 { 383 } 384 385 static void ecard_def_irq_disable(ecard_t *ec, int irqnr) 386 { 387 } 388 389 static int ecard_def_irq_pending(ecard_t *ec) 390 { 391 return !ec->irqmask || readb(ec->irqaddr) & ec->irqmask; 392 } 393 394 static void ecard_def_fiq_enable(ecard_t *ec, int fiqnr) 395 { 396 panic("ecard_def_fiq_enable called - impossible"); 397 } 398 399 static void ecard_def_fiq_disable(ecard_t *ec, int fiqnr) 400 { 401 panic("ecard_def_fiq_disable called - impossible"); 402 } 403 404 static int ecard_def_fiq_pending(ecard_t *ec) 405 { 406 return !ec->fiqmask || readb(ec->fiqaddr) & ec->fiqmask; 407 } 408 409 static expansioncard_ops_t ecard_default_ops = { 410 ecard_def_irq_enable, 411 ecard_def_irq_disable, 412 ecard_def_irq_pending, 413 ecard_def_fiq_enable, 414 ecard_def_fiq_disable, 415 ecard_def_fiq_pending 416 }; 417 418 /* 419 * Enable and disable interrupts from expansion cards. 420 * (interrupts are disabled for these functions). 421 * 422 * They are not meant to be called directly, but via enable/disable_irq. 423 */ 424 static void ecard_irq_unmask(struct irq_data *d) 425 { 426 ecard_t *ec = irq_data_get_irq_chip_data(d); 427 428 if (ec) { 429 if (!ec->ops) 430 ec->ops = &ecard_default_ops; 431 432 if (ec->claimed && ec->ops->irqenable) 433 ec->ops->irqenable(ec, d->irq); 434 else 435 printk(KERN_ERR "ecard: rejecting request to " 436 "enable IRQs for %d\n", d->irq); 437 } 438 } 439 440 static void ecard_irq_mask(struct irq_data *d) 441 { 442 ecard_t *ec = irq_data_get_irq_chip_data(d); 443 444 if (ec) { 445 if (!ec->ops) 446 ec->ops = &ecard_default_ops; 447 448 if (ec->ops && ec->ops->irqdisable) 449 ec->ops->irqdisable(ec, d->irq); 450 } 451 } 452 453 static struct irq_chip ecard_chip = { 454 .name = "ECARD", 455 .irq_ack = ecard_irq_mask, 456 .irq_mask = ecard_irq_mask, 457 .irq_unmask = ecard_irq_unmask, 458 }; 459 460 void ecard_enablefiq(unsigned int fiqnr) 461 { 462 ecard_t *ec = slot_to_ecard(fiqnr); 463 464 if (ec) { 465 if (!ec->ops) 466 ec->ops = &ecard_default_ops; 467 468 if (ec->claimed && ec->ops->fiqenable) 469 ec->ops->fiqenable(ec, fiqnr); 470 else 471 printk(KERN_ERR "ecard: rejecting request to " 472 "enable FIQs for %d\n", fiqnr); 473 } 474 } 475 476 void ecard_disablefiq(unsigned int fiqnr) 477 { 478 ecard_t *ec = slot_to_ecard(fiqnr); 479 480 if (ec) { 481 if (!ec->ops) 482 ec->ops = &ecard_default_ops; 483 484 if (ec->ops->fiqdisable) 485 ec->ops->fiqdisable(ec, fiqnr); 486 } 487 } 488 489 static void ecard_dump_irq_state(void) 490 { 491 ecard_t *ec; 492 493 printk("Expansion card IRQ state:\n"); 494 495 for (ec = cards; ec; ec = ec->next) { 496 if (ec->slot_no == 8) 497 continue; 498 499 printk(" %d: %sclaimed, ", 500 ec->slot_no, ec->claimed ? "" : "not "); 501 502 if (ec->ops && ec->ops->irqpending && 503 ec->ops != &ecard_default_ops) 504 printk("irq %spending\n", 505 ec->ops->irqpending(ec) ? "" : "not "); 506 else 507 printk("irqaddr %p, mask = %02X, status = %02X\n", 508 ec->irqaddr, ec->irqmask, readb(ec->irqaddr)); 509 } 510 } 511 512 static void ecard_check_lockup(struct irq_desc *desc) 513 { 514 static unsigned long last; 515 static int lockup; 516 517 /* 518 * If the timer interrupt has not run since the last million 519 * unrecognised expansion card interrupts, then there is 520 * something seriously wrong. Disable the expansion card 521 * interrupts so at least we can continue. 522 * 523 * Maybe we ought to start a timer to re-enable them some time 524 * later? 525 */ 526 if (last == jiffies) { 527 lockup += 1; 528 if (lockup > 1000000) { 529 printk(KERN_ERR "\nInterrupt lockup detected - " 530 "disabling all expansion card interrupts\n"); 531 532 desc->irq_data.chip->irq_mask(&desc->irq_data); 533 ecard_dump_irq_state(); 534 } 535 } else 536 lockup = 0; 537 538 /* 539 * If we did not recognise the source of this interrupt, 540 * warn the user, but don't flood the user with these messages. 541 */ 542 if (!last || time_after(jiffies, last + 5*HZ)) { 543 last = jiffies; 544 printk(KERN_WARNING "Unrecognised interrupt from backplane\n"); 545 ecard_dump_irq_state(); 546 } 547 } 548 549 static void ecard_irq_handler(struct irq_desc *desc) 550 { 551 ecard_t *ec; 552 int called = 0; 553 554 desc->irq_data.chip->irq_mask(&desc->irq_data); 555 for (ec = cards; ec; ec = ec->next) { 556 int pending; 557 558 if (!ec->claimed || !ec->irq || ec->slot_no == 8) 559 continue; 560 561 if (ec->ops && ec->ops->irqpending) 562 pending = ec->ops->irqpending(ec); 563 else 564 pending = ecard_default_ops.irqpending(ec); 565 566 if (pending) { 567 generic_handle_irq(ec->irq); 568 called ++; 569 } 570 } 571 desc->irq_data.chip->irq_unmask(&desc->irq_data); 572 573 if (called == 0) 574 ecard_check_lockup(desc); 575 } 576 577 static void __iomem *__ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed) 578 { 579 void __iomem *address = NULL; 580 int slot = ec->slot_no; 581 582 if (ec->slot_no == 8) 583 return ECARD_MEMC8_BASE; 584 585 ectcr &= ~(1 << slot); 586 587 switch (type) { 588 case ECARD_MEMC: 589 if (slot < 4) 590 address = ECARD_MEMC_BASE + (slot << 14); 591 break; 592 593 case ECARD_IOC: 594 if (slot < 4) 595 address = ECARD_IOC_BASE + (slot << 14); 596 else 597 address = ECARD_IOC4_BASE + ((slot - 4) << 14); 598 if (address) 599 address += speed << 19; 600 break; 601 602 case ECARD_EASI: 603 address = ECARD_EASI_BASE + (slot << 24); 604 if (speed == ECARD_FAST) 605 ectcr |= 1 << slot; 606 break; 607 608 default: 609 break; 610 } 611 612 #ifdef IOMD_ECTCR 613 iomd_writeb(ectcr, IOMD_ECTCR); 614 #endif 615 return address; 616 } 617 618 static int ecard_prints(struct seq_file *m, ecard_t *ec) 619 { 620 seq_printf(m, " %d: %s ", ec->slot_no, ec->easi ? "EASI" : " "); 621 622 if (ec->cid.id == 0) { 623 struct in_chunk_dir incd; 624 625 seq_printf(m, "[%04X:%04X] ", 626 ec->cid.manufacturer, ec->cid.product); 627 628 if (!ec->card_desc && ec->cid.cd && 629 ecard_readchunk(&incd, ec, 0xf5, 0)) { 630 ec->card_desc = kmalloc(strlen(incd.d.string)+1, GFP_KERNEL); 631 632 if (ec->card_desc) 633 strcpy((char *)ec->card_desc, incd.d.string); 634 } 635 636 seq_printf(m, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*"); 637 } else 638 seq_printf(m, "Simple card %d\n", ec->cid.id); 639 640 return 0; 641 } 642 643 static int ecard_devices_proc_show(struct seq_file *m, void *v) 644 { 645 ecard_t *ec = cards; 646 647 while (ec) { 648 ecard_prints(m, ec); 649 ec = ec->next; 650 } 651 return 0; 652 } 653 654 static struct proc_dir_entry *proc_bus_ecard_dir = NULL; 655 656 static void ecard_proc_init(void) 657 { 658 proc_bus_ecard_dir = proc_mkdir("bus/ecard", NULL); 659 proc_create_single("devices", 0, proc_bus_ecard_dir, 660 ecard_devices_proc_show); 661 } 662 663 #define ec_set_resource(ec,nr,st,sz) \ 664 do { \ 665 (ec)->resource[nr].name = dev_name(&ec->dev); \ 666 (ec)->resource[nr].start = st; \ 667 (ec)->resource[nr].end = (st) + (sz) - 1; \ 668 (ec)->resource[nr].flags = IORESOURCE_MEM; \ 669 } while (0) 670 671 static void __init ecard_free_card(struct expansion_card *ec) 672 { 673 int i; 674 675 for (i = 0; i < ECARD_NUM_RESOURCES; i++) 676 if (ec->resource[i].flags) 677 release_resource(&ec->resource[i]); 678 679 kfree(ec); 680 } 681 682 static struct expansion_card *__init ecard_alloc_card(int type, int slot) 683 { 684 struct expansion_card *ec; 685 unsigned long base; 686 int i; 687 688 ec = kzalloc(sizeof(ecard_t), GFP_KERNEL); 689 if (!ec) { 690 ec = ERR_PTR(-ENOMEM); 691 goto nomem; 692 } 693 694 ec->slot_no = slot; 695 ec->easi = type == ECARD_EASI; 696 ec->irq = 0; 697 ec->fiq = 0; 698 ec->dma = NO_DMA; 699 ec->ops = &ecard_default_ops; 700 701 dev_set_name(&ec->dev, "ecard%d", slot); 702 ec->dev.parent = NULL; 703 ec->dev.bus = &ecard_bus_type; 704 ec->dev.dma_mask = &ec->dma_mask; 705 ec->dma_mask = (u64)0xffffffff; 706 ec->dev.coherent_dma_mask = ec->dma_mask; 707 708 if (slot < 4) { 709 ec_set_resource(ec, ECARD_RES_MEMC, 710 PODSLOT_MEMC_BASE + (slot << 14), 711 PODSLOT_MEMC_SIZE); 712 base = PODSLOT_IOC0_BASE + (slot << 14); 713 } else 714 base = PODSLOT_IOC4_BASE + ((slot - 4) << 14); 715 716 #ifdef CONFIG_ARCH_RPC 717 if (slot < 8) { 718 ec_set_resource(ec, ECARD_RES_EASI, 719 PODSLOT_EASI_BASE + (slot << 24), 720 PODSLOT_EASI_SIZE); 721 } 722 723 if (slot == 8) { 724 ec_set_resource(ec, ECARD_RES_MEMC, NETSLOT_BASE, NETSLOT_SIZE); 725 } else 726 #endif 727 728 for (i = 0; i <= ECARD_RES_IOCSYNC - ECARD_RES_IOCSLOW; i++) 729 ec_set_resource(ec, i + ECARD_RES_IOCSLOW, 730 base + (i << 19), PODSLOT_IOC_SIZE); 731 732 for (i = 0; i < ECARD_NUM_RESOURCES; i++) { 733 if (ec->resource[i].flags && 734 request_resource(&iomem_resource, &ec->resource[i])) { 735 dev_err(&ec->dev, "resource(s) not available\n"); 736 ec->resource[i].end -= ec->resource[i].start; 737 ec->resource[i].start = 0; 738 ec->resource[i].flags = 0; 739 } 740 } 741 742 nomem: 743 return ec; 744 } 745 746 static ssize_t irq_show(struct device *dev, struct device_attribute *attr, char *buf) 747 { 748 struct expansion_card *ec = ECARD_DEV(dev); 749 return sprintf(buf, "%u\n", ec->irq); 750 } 751 static DEVICE_ATTR_RO(irq); 752 753 static ssize_t dma_show(struct device *dev, struct device_attribute *attr, char *buf) 754 { 755 struct expansion_card *ec = ECARD_DEV(dev); 756 return sprintf(buf, "%u\n", ec->dma); 757 } 758 static DEVICE_ATTR_RO(dma); 759 760 static ssize_t resource_show(struct device *dev, struct device_attribute *attr, char *buf) 761 { 762 struct expansion_card *ec = ECARD_DEV(dev); 763 char *str = buf; 764 int i; 765 766 for (i = 0; i < ECARD_NUM_RESOURCES; i++) 767 str += sprintf(str, "%08x %08x %08lx\n", 768 ec->resource[i].start, 769 ec->resource[i].end, 770 ec->resource[i].flags); 771 772 return str - buf; 773 } 774 static DEVICE_ATTR_RO(resource); 775 776 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf) 777 { 778 struct expansion_card *ec = ECARD_DEV(dev); 779 return sprintf(buf, "%u\n", ec->cid.manufacturer); 780 } 781 static DEVICE_ATTR_RO(vendor); 782 783 static ssize_t device_show(struct device *dev, struct device_attribute *attr, char *buf) 784 { 785 struct expansion_card *ec = ECARD_DEV(dev); 786 return sprintf(buf, "%u\n", ec->cid.product); 787 } 788 static DEVICE_ATTR_RO(device); 789 790 static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) 791 { 792 struct expansion_card *ec = ECARD_DEV(dev); 793 return sprintf(buf, "%s\n", ec->easi ? "EASI" : "IOC"); 794 } 795 static DEVICE_ATTR_RO(type); 796 797 static struct attribute *ecard_dev_attrs[] = { 798 &dev_attr_device.attr, 799 &dev_attr_dma.attr, 800 &dev_attr_irq.attr, 801 &dev_attr_resource.attr, 802 &dev_attr_type.attr, 803 &dev_attr_vendor.attr, 804 NULL, 805 }; 806 ATTRIBUTE_GROUPS(ecard_dev); 807 808 int ecard_request_resources(struct expansion_card *ec) 809 { 810 int i, err = 0; 811 812 for (i = 0; i < ECARD_NUM_RESOURCES; i++) { 813 if (ecard_resource_end(ec, i) && 814 !request_mem_region(ecard_resource_start(ec, i), 815 ecard_resource_len(ec, i), 816 ec->dev.driver->name)) { 817 err = -EBUSY; 818 break; 819 } 820 } 821 822 if (err) { 823 while (i--) 824 if (ecard_resource_end(ec, i)) 825 release_mem_region(ecard_resource_start(ec, i), 826 ecard_resource_len(ec, i)); 827 } 828 return err; 829 } 830 EXPORT_SYMBOL(ecard_request_resources); 831 832 void ecard_release_resources(struct expansion_card *ec) 833 { 834 int i; 835 836 for (i = 0; i < ECARD_NUM_RESOURCES; i++) 837 if (ecard_resource_end(ec, i)) 838 release_mem_region(ecard_resource_start(ec, i), 839 ecard_resource_len(ec, i)); 840 } 841 EXPORT_SYMBOL(ecard_release_resources); 842 843 void ecard_setirq(struct expansion_card *ec, const struct expansion_card_ops *ops, void *irq_data) 844 { 845 ec->irq_data = irq_data; 846 barrier(); 847 ec->ops = ops; 848 } 849 EXPORT_SYMBOL(ecard_setirq); 850 851 void __iomem *ecardm_iomap(struct expansion_card *ec, unsigned int res, 852 unsigned long offset, unsigned long maxsize) 853 { 854 unsigned long start = ecard_resource_start(ec, res); 855 unsigned long end = ecard_resource_end(ec, res); 856 857 if (offset > (end - start)) 858 return NULL; 859 860 start += offset; 861 if (maxsize && end - start > maxsize) 862 end = start + maxsize; 863 864 return devm_ioremap(&ec->dev, start, end - start); 865 } 866 EXPORT_SYMBOL(ecardm_iomap); 867 868 /* 869 * Probe for an expansion card. 870 * 871 * If bit 1 of the first byte of the card is set, then the 872 * card does not exist. 873 */ 874 static int __init ecard_probe(int slot, unsigned irq, card_type_t type) 875 { 876 ecard_t **ecp; 877 ecard_t *ec; 878 struct ex_ecid cid; 879 void __iomem *addr; 880 int i, rc; 881 882 ec = ecard_alloc_card(type, slot); 883 if (IS_ERR(ec)) { 884 rc = PTR_ERR(ec); 885 goto nomem; 886 } 887 888 rc = -ENODEV; 889 if ((addr = __ecard_address(ec, type, ECARD_SYNC)) == NULL) 890 goto nodev; 891 892 cid.r_zero = 1; 893 ecard_readbytes(&cid, ec, 0, 16, 0); 894 if (cid.r_zero) 895 goto nodev; 896 897 ec->cid.id = cid.r_id; 898 ec->cid.cd = cid.r_cd; 899 ec->cid.is = cid.r_is; 900 ec->cid.w = cid.r_w; 901 ec->cid.manufacturer = ecard_getu16(cid.r_manu); 902 ec->cid.product = ecard_getu16(cid.r_prod); 903 ec->cid.country = cid.r_country; 904 ec->cid.irqmask = cid.r_irqmask; 905 ec->cid.irqoff = ecard_gets24(cid.r_irqoff); 906 ec->cid.fiqmask = cid.r_fiqmask; 907 ec->cid.fiqoff = ecard_gets24(cid.r_fiqoff); 908 ec->fiqaddr = 909 ec->irqaddr = addr; 910 911 if (ec->cid.is) { 912 ec->irqmask = ec->cid.irqmask; 913 ec->irqaddr += ec->cid.irqoff; 914 ec->fiqmask = ec->cid.fiqmask; 915 ec->fiqaddr += ec->cid.fiqoff; 916 } else { 917 ec->irqmask = 1; 918 ec->fiqmask = 4; 919 } 920 921 for (i = 0; i < ARRAY_SIZE(blacklist); i++) 922 if (blacklist[i].manufacturer == ec->cid.manufacturer && 923 blacklist[i].product == ec->cid.product) { 924 ec->card_desc = blacklist[i].type; 925 break; 926 } 927 928 ec->irq = irq; 929 930 /* 931 * hook the interrupt handlers 932 */ 933 if (slot < 8) { 934 irq_set_chip_and_handler(ec->irq, &ecard_chip, 935 handle_level_irq); 936 irq_set_chip_data(ec->irq, ec); 937 irq_clear_status_flags(ec->irq, IRQ_NOREQUEST); 938 } 939 940 #ifdef CONFIG_ARCH_RPC 941 /* On RiscPC, only first two slots have DMA capability */ 942 if (slot < 2) 943 ec->dma = 2 + slot; 944 #endif 945 946 for (ecp = &cards; *ecp; ecp = &(*ecp)->next); 947 948 *ecp = ec; 949 slot_to_expcard[slot] = ec; 950 951 rc = device_register(&ec->dev); 952 if (rc) 953 goto nodev; 954 955 return 0; 956 957 nodev: 958 ecard_free_card(ec); 959 nomem: 960 return rc; 961 } 962 963 /* 964 * Initialise the expansion card system. 965 * Locate all hardware - interrupt management and 966 * actual cards. 967 */ 968 static int __init ecard_init(void) 969 { 970 struct task_struct *task; 971 int slot, irqbase; 972 973 irqbase = irq_alloc_descs(-1, 0, 8, -1); 974 if (irqbase < 0) 975 return irqbase; 976 977 task = kthread_run(ecard_task, NULL, "kecardd"); 978 if (IS_ERR(task)) { 979 printk(KERN_ERR "Ecard: unable to create kernel thread: %ld\n", 980 PTR_ERR(task)); 981 irq_free_descs(irqbase, 8); 982 return PTR_ERR(task); 983 } 984 985 printk("Probing expansion cards\n"); 986 987 for (slot = 0; slot < 8; slot ++) { 988 if (ecard_probe(slot, irqbase + slot, ECARD_EASI) == -ENODEV) 989 ecard_probe(slot, irqbase + slot, ECARD_IOC); 990 } 991 992 ecard_probe(8, 11, ECARD_IOC); 993 994 irq_set_chained_handler(IRQ_EXPANSIONCARD, ecard_irq_handler); 995 996 ecard_proc_init(); 997 998 return 0; 999 } 1000 1001 subsys_initcall(ecard_init); 1002 1003 /* 1004 * ECARD "bus" 1005 */ 1006 static const struct ecard_id * 1007 ecard_match_device(const struct ecard_id *ids, struct expansion_card *ec) 1008 { 1009 int i; 1010 1011 for (i = 0; ids[i].manufacturer != 65535; i++) 1012 if (ec->cid.manufacturer == ids[i].manufacturer && 1013 ec->cid.product == ids[i].product) 1014 return ids + i; 1015 1016 return NULL; 1017 } 1018 1019 static int ecard_drv_probe(struct device *dev) 1020 { 1021 struct expansion_card *ec = ECARD_DEV(dev); 1022 struct ecard_driver *drv = ECARD_DRV(dev->driver); 1023 const struct ecard_id *id; 1024 int ret; 1025 1026 id = ecard_match_device(drv->id_table, ec); 1027 1028 ec->claimed = 1; 1029 ret = drv->probe(ec, id); 1030 if (ret) 1031 ec->claimed = 0; 1032 return ret; 1033 } 1034 1035 static int ecard_drv_remove(struct device *dev) 1036 { 1037 struct expansion_card *ec = ECARD_DEV(dev); 1038 struct ecard_driver *drv = ECARD_DRV(dev->driver); 1039 1040 drv->remove(ec); 1041 ec->claimed = 0; 1042 1043 /* 1044 * Restore the default operations. We ensure that the 1045 * ops are set before we change the data. 1046 */ 1047 ec->ops = &ecard_default_ops; 1048 barrier(); 1049 ec->irq_data = NULL; 1050 1051 return 0; 1052 } 1053 1054 /* 1055 * Before rebooting, we must make sure that the expansion card is in a 1056 * sensible state, so it can be re-detected. This means that the first 1057 * page of the ROM must be visible. We call the expansion cards reset 1058 * handler, if any. 1059 */ 1060 static void ecard_drv_shutdown(struct device *dev) 1061 { 1062 struct expansion_card *ec = ECARD_DEV(dev); 1063 struct ecard_driver *drv = ECARD_DRV(dev->driver); 1064 struct ecard_request req; 1065 1066 if (dev->driver) { 1067 if (drv->shutdown) 1068 drv->shutdown(ec); 1069 ec->claimed = 0; 1070 } 1071 1072 /* 1073 * If this card has a loader, call the reset handler. 1074 */ 1075 if (ec->loader) { 1076 req.fn = ecard_task_reset; 1077 req.ec = ec; 1078 ecard_call(&req); 1079 } 1080 } 1081 1082 int ecard_register_driver(struct ecard_driver *drv) 1083 { 1084 drv->drv.bus = &ecard_bus_type; 1085 1086 return driver_register(&drv->drv); 1087 } 1088 1089 void ecard_remove_driver(struct ecard_driver *drv) 1090 { 1091 driver_unregister(&drv->drv); 1092 } 1093 1094 static int ecard_match(struct device *_dev, struct device_driver *_drv) 1095 { 1096 struct expansion_card *ec = ECARD_DEV(_dev); 1097 struct ecard_driver *drv = ECARD_DRV(_drv); 1098 int ret; 1099 1100 if (drv->id_table) { 1101 ret = ecard_match_device(drv->id_table, ec) != NULL; 1102 } else { 1103 ret = ec->cid.id == drv->id; 1104 } 1105 1106 return ret; 1107 } 1108 1109 struct bus_type ecard_bus_type = { 1110 .name = "ecard", 1111 .dev_groups = ecard_dev_groups, 1112 .match = ecard_match, 1113 .probe = ecard_drv_probe, 1114 .remove = ecard_drv_remove, 1115 .shutdown = ecard_drv_shutdown, 1116 }; 1117 1118 static int ecard_bus_init(void) 1119 { 1120 return bus_register(&ecard_bus_type); 1121 } 1122 1123 postcore_initcall(ecard_bus_init); 1124 1125 EXPORT_SYMBOL(ecard_readchunk); 1126 EXPORT_SYMBOL(ecard_register_driver); 1127 EXPORT_SYMBOL(ecard_remove_driver); 1128 EXPORT_SYMBOL(ecard_bus_type); 1129