1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * General Purpose functions for the global management of the 4 * Communication Processor Module. 5 * Copyright (c) 1997 Dan error_act (dmalek@jlc.net) 6 * 7 * In addition to the individual control of the communication 8 * channels, there are a few functions that globally affect the 9 * communication processor. 10 * 11 * Buffer descriptors must be allocated from the dual ported memory 12 * space. The allocator for that is here. When the communication 13 * process is reset, we reclaim the memory available. There is 14 * currently no deallocator for this memory. 15 * The amount of space available is platform dependent. On the 16 * MBX, the EPPC software loads additional microcode into the 17 * communication processor, and uses some of the DP ram for this 18 * purpose. Current, the first 512 bytes and the last 256 bytes of 19 * memory are used. Right now I am conservative and only use the 20 * memory that can never be used for microcode. If there are 21 * applications that require more DP ram, we can expand the boundaries 22 * but then we have to be careful of any downloaded microcode. 23 */ 24 #include <linux/errno.h> 25 #include <linux/sched.h> 26 #include <linux/kernel.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/param.h> 29 #include <linux/string.h> 30 #include <linux/mm.h> 31 #include <linux/interrupt.h> 32 #include <linux/irq.h> 33 #include <linux/module.h> 34 #include <linux/spinlock.h> 35 #include <linux/slab.h> 36 #include <asm/page.h> 37 #include <asm/pgtable.h> 38 #include <asm/8xx_immap.h> 39 #include <asm/cpm1.h> 40 #include <asm/io.h> 41 #include <asm/rheap.h> 42 #include <asm/prom.h> 43 #include <asm/cpm.h> 44 45 #include <asm/fs_pd.h> 46 47 #ifdef CONFIG_8xx_GPIO 48 #include <linux/of_gpio.h> 49 #endif 50 51 #define CPM_MAP_SIZE (0x4000) 52 53 cpm8xx_t __iomem *cpmp; /* Pointer to comm processor space */ 54 immap_t __iomem *mpc8xx_immr; 55 static cpic8xx_t __iomem *cpic_reg; 56 57 static struct irq_domain *cpm_pic_host; 58 59 static void cpm_mask_irq(struct irq_data *d) 60 { 61 unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); 62 63 clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); 64 } 65 66 static void cpm_unmask_irq(struct irq_data *d) 67 { 68 unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); 69 70 setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); 71 } 72 73 static void cpm_end_irq(struct irq_data *d) 74 { 75 unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); 76 77 out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec)); 78 } 79 80 static struct irq_chip cpm_pic = { 81 .name = "CPM PIC", 82 .irq_mask = cpm_mask_irq, 83 .irq_unmask = cpm_unmask_irq, 84 .irq_eoi = cpm_end_irq, 85 }; 86 87 int cpm_get_irq(void) 88 { 89 int cpm_vec; 90 91 /* 92 * Get the vector by setting the ACK bit and then reading 93 * the register. 94 */ 95 out_be16(&cpic_reg->cpic_civr, 1); 96 cpm_vec = in_be16(&cpic_reg->cpic_civr); 97 cpm_vec >>= 11; 98 99 return irq_linear_revmap(cpm_pic_host, cpm_vec); 100 } 101 102 static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq, 103 irq_hw_number_t hw) 104 { 105 pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw); 106 107 irq_set_status_flags(virq, IRQ_LEVEL); 108 irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq); 109 return 0; 110 } 111 112 /* 113 * The CPM can generate the error interrupt when there is a race condition 114 * between generating and masking interrupts. All we have to do is ACK it 115 * and return. This is a no-op function so we don't need any special 116 * tests in the interrupt handler. 117 */ 118 static irqreturn_t cpm_error_interrupt(int irq, void *dev) 119 { 120 return IRQ_HANDLED; 121 } 122 123 static struct irqaction cpm_error_irqaction = { 124 .handler = cpm_error_interrupt, 125 .flags = IRQF_NO_THREAD, 126 .name = "error", 127 }; 128 129 static const struct irq_domain_ops cpm_pic_host_ops = { 130 .map = cpm_pic_host_map, 131 }; 132 133 unsigned int cpm_pic_init(void) 134 { 135 struct device_node *np = NULL; 136 struct resource res; 137 unsigned int sirq = 0, hwirq, eirq; 138 int ret; 139 140 pr_debug("cpm_pic_init\n"); 141 142 np = of_find_compatible_node(NULL, NULL, "fsl,cpm1-pic"); 143 if (np == NULL) 144 np = of_find_compatible_node(NULL, "cpm-pic", "CPM"); 145 if (np == NULL) { 146 printk(KERN_ERR "CPM PIC init: can not find cpm-pic node\n"); 147 return sirq; 148 } 149 150 ret = of_address_to_resource(np, 0, &res); 151 if (ret) 152 goto end; 153 154 cpic_reg = ioremap(res.start, resource_size(&res)); 155 if (cpic_reg == NULL) 156 goto end; 157 158 sirq = irq_of_parse_and_map(np, 0); 159 if (!sirq) 160 goto end; 161 162 /* Initialize the CPM interrupt controller. */ 163 hwirq = (unsigned int)virq_to_hw(sirq); 164 out_be32(&cpic_reg->cpic_cicr, 165 (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) | 166 ((hwirq/2) << 13) | CICR_HP_MASK); 167 168 out_be32(&cpic_reg->cpic_cimr, 0); 169 170 cpm_pic_host = irq_domain_add_linear(np, 64, &cpm_pic_host_ops, NULL); 171 if (cpm_pic_host == NULL) { 172 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); 173 sirq = 0; 174 goto end; 175 } 176 177 /* Install our own error handler. */ 178 np = of_find_compatible_node(NULL, NULL, "fsl,cpm1"); 179 if (np == NULL) 180 np = of_find_node_by_type(NULL, "cpm"); 181 if (np == NULL) { 182 printk(KERN_ERR "CPM PIC init: can not find cpm node\n"); 183 goto end; 184 } 185 186 eirq = irq_of_parse_and_map(np, 0); 187 if (!eirq) 188 goto end; 189 190 if (setup_irq(eirq, &cpm_error_irqaction)) 191 printk(KERN_ERR "Could not allocate CPM error IRQ!"); 192 193 setbits32(&cpic_reg->cpic_cicr, CICR_IEN); 194 195 end: 196 of_node_put(np); 197 return sirq; 198 } 199 200 void __init cpm_reset(void) 201 { 202 sysconf8xx_t __iomem *siu_conf; 203 204 mpc8xx_immr = ioremap(get_immrbase(), 0x4000); 205 if (!mpc8xx_immr) { 206 printk(KERN_CRIT "Could not map IMMR\n"); 207 return; 208 } 209 210 cpmp = &mpc8xx_immr->im_cpm; 211 212 #ifndef CONFIG_PPC_EARLY_DEBUG_CPM 213 /* Perform a reset. */ 214 out_be16(&cpmp->cp_cpcr, CPM_CR_RST | CPM_CR_FLG); 215 216 /* Wait for it. */ 217 while (in_be16(&cpmp->cp_cpcr) & CPM_CR_FLG); 218 #endif 219 220 #ifdef CONFIG_UCODE_PATCH 221 cpm_load_patch(cpmp); 222 #endif 223 224 /* 225 * Set SDMA Bus Request priority 5. 226 * On 860T, this also enables FEC priority 6. I am not sure 227 * this is what we really want for some applications, but the 228 * manual recommends it. 229 * Bit 25, FAM can also be set to use FEC aggressive mode (860T). 230 */ 231 siu_conf = immr_map(im_siu_conf); 232 if ((mfspr(SPRN_IMMR) & 0xffff) == 0x0900) /* MPC885 */ 233 out_be32(&siu_conf->sc_sdcr, 0x40); 234 else 235 out_be32(&siu_conf->sc_sdcr, 1); 236 immr_unmap(siu_conf); 237 } 238 239 static DEFINE_SPINLOCK(cmd_lock); 240 241 #define MAX_CR_CMD_LOOPS 10000 242 243 int cpm_command(u32 command, u8 opcode) 244 { 245 int i, ret; 246 unsigned long flags; 247 248 if (command & 0xffffff0f) 249 return -EINVAL; 250 251 spin_lock_irqsave(&cmd_lock, flags); 252 253 ret = 0; 254 out_be16(&cpmp->cp_cpcr, command | CPM_CR_FLG | (opcode << 8)); 255 for (i = 0; i < MAX_CR_CMD_LOOPS; i++) 256 if ((in_be16(&cpmp->cp_cpcr) & CPM_CR_FLG) == 0) 257 goto out; 258 259 printk(KERN_ERR "%s(): Not able to issue CPM command\n", __func__); 260 ret = -EIO; 261 out: 262 spin_unlock_irqrestore(&cmd_lock, flags); 263 return ret; 264 } 265 EXPORT_SYMBOL(cpm_command); 266 267 /* 268 * Set a baud rate generator. This needs lots of work. There are 269 * four BRGs, any of which can be wired to any channel. 270 * The internal baud rate clock is the system clock divided by 16. 271 * This assumes the baudrate is 16x oversampled by the uart. 272 */ 273 #define BRG_INT_CLK (get_brgfreq()) 274 #define BRG_UART_CLK (BRG_INT_CLK/16) 275 #define BRG_UART_CLK_DIV16 (BRG_UART_CLK/16) 276 277 void 278 cpm_setbrg(uint brg, uint rate) 279 { 280 u32 __iomem *bp; 281 282 /* This is good enough to get SMCs running..... */ 283 bp = &cpmp->cp_brgc1; 284 bp += brg; 285 /* 286 * The BRG has a 12-bit counter. For really slow baud rates (or 287 * really fast processors), we may have to further divide by 16. 288 */ 289 if (((BRG_UART_CLK / rate) - 1) < 4096) 290 out_be32(bp, (((BRG_UART_CLK / rate) - 1) << 1) | CPM_BRG_EN); 291 else 292 out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) | 293 CPM_BRG_EN | CPM_BRG_DIV16); 294 } 295 296 struct cpm_ioport16 { 297 __be16 dir, par, odr_sor, dat, intr; 298 __be16 res[3]; 299 }; 300 301 struct cpm_ioport32b { 302 __be32 dir, par, odr, dat; 303 }; 304 305 struct cpm_ioport32e { 306 __be32 dir, par, sor, odr, dat; 307 }; 308 309 static void cpm1_set_pin32(int port, int pin, int flags) 310 { 311 struct cpm_ioport32e __iomem *iop; 312 pin = 1 << (31 - pin); 313 314 if (port == CPM_PORTB) 315 iop = (struct cpm_ioport32e __iomem *) 316 &mpc8xx_immr->im_cpm.cp_pbdir; 317 else 318 iop = (struct cpm_ioport32e __iomem *) 319 &mpc8xx_immr->im_cpm.cp_pedir; 320 321 if (flags & CPM_PIN_OUTPUT) 322 setbits32(&iop->dir, pin); 323 else 324 clrbits32(&iop->dir, pin); 325 326 if (!(flags & CPM_PIN_GPIO)) 327 setbits32(&iop->par, pin); 328 else 329 clrbits32(&iop->par, pin); 330 331 if (port == CPM_PORTB) { 332 if (flags & CPM_PIN_OPENDRAIN) 333 setbits16(&mpc8xx_immr->im_cpm.cp_pbodr, pin); 334 else 335 clrbits16(&mpc8xx_immr->im_cpm.cp_pbodr, pin); 336 } 337 338 if (port == CPM_PORTE) { 339 if (flags & CPM_PIN_SECONDARY) 340 setbits32(&iop->sor, pin); 341 else 342 clrbits32(&iop->sor, pin); 343 344 if (flags & CPM_PIN_OPENDRAIN) 345 setbits32(&mpc8xx_immr->im_cpm.cp_peodr, pin); 346 else 347 clrbits32(&mpc8xx_immr->im_cpm.cp_peodr, pin); 348 } 349 } 350 351 static void cpm1_set_pin16(int port, int pin, int flags) 352 { 353 struct cpm_ioport16 __iomem *iop = 354 (struct cpm_ioport16 __iomem *)&mpc8xx_immr->im_ioport; 355 356 pin = 1 << (15 - pin); 357 358 if (port != 0) 359 iop += port - 1; 360 361 if (flags & CPM_PIN_OUTPUT) 362 setbits16(&iop->dir, pin); 363 else 364 clrbits16(&iop->dir, pin); 365 366 if (!(flags & CPM_PIN_GPIO)) 367 setbits16(&iop->par, pin); 368 else 369 clrbits16(&iop->par, pin); 370 371 if (port == CPM_PORTA) { 372 if (flags & CPM_PIN_OPENDRAIN) 373 setbits16(&iop->odr_sor, pin); 374 else 375 clrbits16(&iop->odr_sor, pin); 376 } 377 if (port == CPM_PORTC) { 378 if (flags & CPM_PIN_SECONDARY) 379 setbits16(&iop->odr_sor, pin); 380 else 381 clrbits16(&iop->odr_sor, pin); 382 if (flags & CPM_PIN_FALLEDGE) 383 setbits16(&iop->intr, pin); 384 else 385 clrbits16(&iop->intr, pin); 386 } 387 } 388 389 void cpm1_set_pin(enum cpm_port port, int pin, int flags) 390 { 391 if (port == CPM_PORTB || port == CPM_PORTE) 392 cpm1_set_pin32(port, pin, flags); 393 else 394 cpm1_set_pin16(port, pin, flags); 395 } 396 397 int cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode) 398 { 399 int shift; 400 int i, bits = 0; 401 u32 __iomem *reg; 402 u32 mask = 7; 403 404 u8 clk_map[][3] = { 405 {CPM_CLK_SCC1, CPM_BRG1, 0}, 406 {CPM_CLK_SCC1, CPM_BRG2, 1}, 407 {CPM_CLK_SCC1, CPM_BRG3, 2}, 408 {CPM_CLK_SCC1, CPM_BRG4, 3}, 409 {CPM_CLK_SCC1, CPM_CLK1, 4}, 410 {CPM_CLK_SCC1, CPM_CLK2, 5}, 411 {CPM_CLK_SCC1, CPM_CLK3, 6}, 412 {CPM_CLK_SCC1, CPM_CLK4, 7}, 413 414 {CPM_CLK_SCC2, CPM_BRG1, 0}, 415 {CPM_CLK_SCC2, CPM_BRG2, 1}, 416 {CPM_CLK_SCC2, CPM_BRG3, 2}, 417 {CPM_CLK_SCC2, CPM_BRG4, 3}, 418 {CPM_CLK_SCC2, CPM_CLK1, 4}, 419 {CPM_CLK_SCC2, CPM_CLK2, 5}, 420 {CPM_CLK_SCC2, CPM_CLK3, 6}, 421 {CPM_CLK_SCC2, CPM_CLK4, 7}, 422 423 {CPM_CLK_SCC3, CPM_BRG1, 0}, 424 {CPM_CLK_SCC3, CPM_BRG2, 1}, 425 {CPM_CLK_SCC3, CPM_BRG3, 2}, 426 {CPM_CLK_SCC3, CPM_BRG4, 3}, 427 {CPM_CLK_SCC3, CPM_CLK5, 4}, 428 {CPM_CLK_SCC3, CPM_CLK6, 5}, 429 {CPM_CLK_SCC3, CPM_CLK7, 6}, 430 {CPM_CLK_SCC3, CPM_CLK8, 7}, 431 432 {CPM_CLK_SCC4, CPM_BRG1, 0}, 433 {CPM_CLK_SCC4, CPM_BRG2, 1}, 434 {CPM_CLK_SCC4, CPM_BRG3, 2}, 435 {CPM_CLK_SCC4, CPM_BRG4, 3}, 436 {CPM_CLK_SCC4, CPM_CLK5, 4}, 437 {CPM_CLK_SCC4, CPM_CLK6, 5}, 438 {CPM_CLK_SCC4, CPM_CLK7, 6}, 439 {CPM_CLK_SCC4, CPM_CLK8, 7}, 440 441 {CPM_CLK_SMC1, CPM_BRG1, 0}, 442 {CPM_CLK_SMC1, CPM_BRG2, 1}, 443 {CPM_CLK_SMC1, CPM_BRG3, 2}, 444 {CPM_CLK_SMC1, CPM_BRG4, 3}, 445 {CPM_CLK_SMC1, CPM_CLK1, 4}, 446 {CPM_CLK_SMC1, CPM_CLK2, 5}, 447 {CPM_CLK_SMC1, CPM_CLK3, 6}, 448 {CPM_CLK_SMC1, CPM_CLK4, 7}, 449 450 {CPM_CLK_SMC2, CPM_BRG1, 0}, 451 {CPM_CLK_SMC2, CPM_BRG2, 1}, 452 {CPM_CLK_SMC2, CPM_BRG3, 2}, 453 {CPM_CLK_SMC2, CPM_BRG4, 3}, 454 {CPM_CLK_SMC2, CPM_CLK5, 4}, 455 {CPM_CLK_SMC2, CPM_CLK6, 5}, 456 {CPM_CLK_SMC2, CPM_CLK7, 6}, 457 {CPM_CLK_SMC2, CPM_CLK8, 7}, 458 }; 459 460 switch (target) { 461 case CPM_CLK_SCC1: 462 reg = &mpc8xx_immr->im_cpm.cp_sicr; 463 shift = 0; 464 break; 465 466 case CPM_CLK_SCC2: 467 reg = &mpc8xx_immr->im_cpm.cp_sicr; 468 shift = 8; 469 break; 470 471 case CPM_CLK_SCC3: 472 reg = &mpc8xx_immr->im_cpm.cp_sicr; 473 shift = 16; 474 break; 475 476 case CPM_CLK_SCC4: 477 reg = &mpc8xx_immr->im_cpm.cp_sicr; 478 shift = 24; 479 break; 480 481 case CPM_CLK_SMC1: 482 reg = &mpc8xx_immr->im_cpm.cp_simode; 483 shift = 12; 484 break; 485 486 case CPM_CLK_SMC2: 487 reg = &mpc8xx_immr->im_cpm.cp_simode; 488 shift = 28; 489 break; 490 491 default: 492 printk(KERN_ERR "cpm1_clock_setup: invalid clock target\n"); 493 return -EINVAL; 494 } 495 496 for (i = 0; i < ARRAY_SIZE(clk_map); i++) { 497 if (clk_map[i][0] == target && clk_map[i][1] == clock) { 498 bits = clk_map[i][2]; 499 break; 500 } 501 } 502 503 if (i == ARRAY_SIZE(clk_map)) { 504 printk(KERN_ERR "cpm1_clock_setup: invalid clock combination\n"); 505 return -EINVAL; 506 } 507 508 bits <<= shift; 509 mask <<= shift; 510 511 if (reg == &mpc8xx_immr->im_cpm.cp_sicr) { 512 if (mode == CPM_CLK_RTX) { 513 bits |= bits << 3; 514 mask |= mask << 3; 515 } else if (mode == CPM_CLK_RX) { 516 bits <<= 3; 517 mask <<= 3; 518 } 519 } 520 521 out_be32(reg, (in_be32(reg) & ~mask) | bits); 522 523 return 0; 524 } 525 526 /* 527 * GPIO LIB API implementation 528 */ 529 #ifdef CONFIG_8xx_GPIO 530 531 struct cpm1_gpio16_chip { 532 struct of_mm_gpio_chip mm_gc; 533 spinlock_t lock; 534 535 /* shadowed data register to clear/set bits safely */ 536 u16 cpdata; 537 538 /* IRQ associated with Pins when relevant */ 539 int irq[16]; 540 }; 541 542 static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc) 543 { 544 struct cpm1_gpio16_chip *cpm1_gc = 545 container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc); 546 struct cpm_ioport16 __iomem *iop = mm_gc->regs; 547 548 cpm1_gc->cpdata = in_be16(&iop->dat); 549 } 550 551 static int cpm1_gpio16_get(struct gpio_chip *gc, unsigned int gpio) 552 { 553 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 554 struct cpm_ioport16 __iomem *iop = mm_gc->regs; 555 u16 pin_mask; 556 557 pin_mask = 1 << (15 - gpio); 558 559 return !!(in_be16(&iop->dat) & pin_mask); 560 } 561 562 static void __cpm1_gpio16_set(struct of_mm_gpio_chip *mm_gc, u16 pin_mask, 563 int value) 564 { 565 struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 566 struct cpm_ioport16 __iomem *iop = mm_gc->regs; 567 568 if (value) 569 cpm1_gc->cpdata |= pin_mask; 570 else 571 cpm1_gc->cpdata &= ~pin_mask; 572 573 out_be16(&iop->dat, cpm1_gc->cpdata); 574 } 575 576 static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value) 577 { 578 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 579 struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 580 unsigned long flags; 581 u16 pin_mask = 1 << (15 - gpio); 582 583 spin_lock_irqsave(&cpm1_gc->lock, flags); 584 585 __cpm1_gpio16_set(mm_gc, pin_mask, value); 586 587 spin_unlock_irqrestore(&cpm1_gc->lock, flags); 588 } 589 590 static int cpm1_gpio16_to_irq(struct gpio_chip *gc, unsigned int gpio) 591 { 592 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 593 struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 594 595 return cpm1_gc->irq[gpio] ? : -ENXIO; 596 } 597 598 static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) 599 { 600 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 601 struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 602 struct cpm_ioport16 __iomem *iop = mm_gc->regs; 603 unsigned long flags; 604 u16 pin_mask = 1 << (15 - gpio); 605 606 spin_lock_irqsave(&cpm1_gc->lock, flags); 607 608 setbits16(&iop->dir, pin_mask); 609 __cpm1_gpio16_set(mm_gc, pin_mask, val); 610 611 spin_unlock_irqrestore(&cpm1_gc->lock, flags); 612 613 return 0; 614 } 615 616 static int cpm1_gpio16_dir_in(struct gpio_chip *gc, unsigned int gpio) 617 { 618 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 619 struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 620 struct cpm_ioport16 __iomem *iop = mm_gc->regs; 621 unsigned long flags; 622 u16 pin_mask = 1 << (15 - gpio); 623 624 spin_lock_irqsave(&cpm1_gc->lock, flags); 625 626 clrbits16(&iop->dir, pin_mask); 627 628 spin_unlock_irqrestore(&cpm1_gc->lock, flags); 629 630 return 0; 631 } 632 633 int cpm1_gpiochip_add16(struct device *dev) 634 { 635 struct device_node *np = dev->of_node; 636 struct cpm1_gpio16_chip *cpm1_gc; 637 struct of_mm_gpio_chip *mm_gc; 638 struct gpio_chip *gc; 639 u16 mask; 640 641 cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL); 642 if (!cpm1_gc) 643 return -ENOMEM; 644 645 spin_lock_init(&cpm1_gc->lock); 646 647 if (!of_property_read_u16(np, "fsl,cpm1-gpio-irq-mask", &mask)) { 648 int i, j; 649 650 for (i = 0, j = 0; i < 16; i++) 651 if (mask & (1 << (15 - i))) 652 cpm1_gc->irq[i] = irq_of_parse_and_map(np, j++); 653 } 654 655 mm_gc = &cpm1_gc->mm_gc; 656 gc = &mm_gc->gc; 657 658 mm_gc->save_regs = cpm1_gpio16_save_regs; 659 gc->ngpio = 16; 660 gc->direction_input = cpm1_gpio16_dir_in; 661 gc->direction_output = cpm1_gpio16_dir_out; 662 gc->get = cpm1_gpio16_get; 663 gc->set = cpm1_gpio16_set; 664 gc->to_irq = cpm1_gpio16_to_irq; 665 gc->parent = dev; 666 gc->owner = THIS_MODULE; 667 668 return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc); 669 } 670 671 struct cpm1_gpio32_chip { 672 struct of_mm_gpio_chip mm_gc; 673 spinlock_t lock; 674 675 /* shadowed data register to clear/set bits safely */ 676 u32 cpdata; 677 }; 678 679 static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc) 680 { 681 struct cpm1_gpio32_chip *cpm1_gc = 682 container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc); 683 struct cpm_ioport32b __iomem *iop = mm_gc->regs; 684 685 cpm1_gc->cpdata = in_be32(&iop->dat); 686 } 687 688 static int cpm1_gpio32_get(struct gpio_chip *gc, unsigned int gpio) 689 { 690 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 691 struct cpm_ioport32b __iomem *iop = mm_gc->regs; 692 u32 pin_mask; 693 694 pin_mask = 1 << (31 - gpio); 695 696 return !!(in_be32(&iop->dat) & pin_mask); 697 } 698 699 static void __cpm1_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, 700 int value) 701 { 702 struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 703 struct cpm_ioport32b __iomem *iop = mm_gc->regs; 704 705 if (value) 706 cpm1_gc->cpdata |= pin_mask; 707 else 708 cpm1_gc->cpdata &= ~pin_mask; 709 710 out_be32(&iop->dat, cpm1_gc->cpdata); 711 } 712 713 static void cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) 714 { 715 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 716 struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 717 unsigned long flags; 718 u32 pin_mask = 1 << (31 - gpio); 719 720 spin_lock_irqsave(&cpm1_gc->lock, flags); 721 722 __cpm1_gpio32_set(mm_gc, pin_mask, value); 723 724 spin_unlock_irqrestore(&cpm1_gc->lock, flags); 725 } 726 727 static int cpm1_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) 728 { 729 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 730 struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 731 struct cpm_ioport32b __iomem *iop = mm_gc->regs; 732 unsigned long flags; 733 u32 pin_mask = 1 << (31 - gpio); 734 735 spin_lock_irqsave(&cpm1_gc->lock, flags); 736 737 setbits32(&iop->dir, pin_mask); 738 __cpm1_gpio32_set(mm_gc, pin_mask, val); 739 740 spin_unlock_irqrestore(&cpm1_gc->lock, flags); 741 742 return 0; 743 } 744 745 static int cpm1_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio) 746 { 747 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 748 struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 749 struct cpm_ioport32b __iomem *iop = mm_gc->regs; 750 unsigned long flags; 751 u32 pin_mask = 1 << (31 - gpio); 752 753 spin_lock_irqsave(&cpm1_gc->lock, flags); 754 755 clrbits32(&iop->dir, pin_mask); 756 757 spin_unlock_irqrestore(&cpm1_gc->lock, flags); 758 759 return 0; 760 } 761 762 int cpm1_gpiochip_add32(struct device *dev) 763 { 764 struct device_node *np = dev->of_node; 765 struct cpm1_gpio32_chip *cpm1_gc; 766 struct of_mm_gpio_chip *mm_gc; 767 struct gpio_chip *gc; 768 769 cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL); 770 if (!cpm1_gc) 771 return -ENOMEM; 772 773 spin_lock_init(&cpm1_gc->lock); 774 775 mm_gc = &cpm1_gc->mm_gc; 776 gc = &mm_gc->gc; 777 778 mm_gc->save_regs = cpm1_gpio32_save_regs; 779 gc->ngpio = 32; 780 gc->direction_input = cpm1_gpio32_dir_in; 781 gc->direction_output = cpm1_gpio32_dir_out; 782 gc->get = cpm1_gpio32_get; 783 gc->set = cpm1_gpio32_set; 784 gc->parent = dev; 785 gc->owner = THIS_MODULE; 786 787 return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc); 788 } 789 790 #endif /* CONFIG_8xx_GPIO */ 791