1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * General Purpose functions for the global management of the 4 * Communication Processor Module. 5 * Copyright (c) 1997 Dan error_act (dmalek@jlc.net) 6 * 7 * In addition to the individual control of the communication 8 * channels, there are a few functions that globally affect the 9 * communication processor. 10 * 11 * Buffer descriptors must be allocated from the dual ported memory 12 * space. The allocator for that is here. When the communication 13 * process is reset, we reclaim the memory available. There is 14 * currently no deallocator for this memory. 15 * The amount of space available is platform dependent. On the 16 * MBX, the EPPC software loads additional microcode into the 17 * communication processor, and uses some of the DP ram for this 18 * purpose. Current, the first 512 bytes and the last 256 bytes of 19 * memory are used. Right now I am conservative and only use the 20 * memory that can never be used for microcode. If there are 21 * applications that require more DP ram, we can expand the boundaries 22 * but then we have to be careful of any downloaded microcode. 23 */ 24 #include <linux/errno.h> 25 #include <linux/sched.h> 26 #include <linux/kernel.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/param.h> 29 #include <linux/string.h> 30 #include <linux/mm.h> 31 #include <linux/interrupt.h> 32 #include <linux/irq.h> 33 #include <linux/module.h> 34 #include <linux/spinlock.h> 35 #include <linux/slab.h> 36 #include <asm/page.h> 37 #include <asm/pgtable.h> 38 #include <asm/8xx_immap.h> 39 #include <asm/cpm1.h> 40 #include <asm/io.h> 41 #include <asm/rheap.h> 42 #include <asm/prom.h> 43 #include <asm/cpm.h> 44 45 #include <asm/fs_pd.h> 46 47 #ifdef CONFIG_8xx_GPIO 48 #include <linux/of_gpio.h> 49 #endif 50 51 #define CPM_MAP_SIZE (0x4000) 52 53 cpm8xx_t __iomem *cpmp; /* Pointer to comm processor space */ 54 immap_t __iomem *mpc8xx_immr = (void __iomem *)VIRT_IMMR_BASE; 55 static cpic8xx_t __iomem *cpic_reg; 56 57 static struct irq_domain *cpm_pic_host; 58 59 static void cpm_mask_irq(struct irq_data *d) 60 { 61 unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); 62 63 clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); 64 } 65 66 static void cpm_unmask_irq(struct irq_data *d) 67 { 68 unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); 69 70 setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); 71 } 72 73 static void cpm_end_irq(struct irq_data *d) 74 { 75 unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); 76 77 out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec)); 78 } 79 80 static struct irq_chip cpm_pic = { 81 .name = "CPM PIC", 82 .irq_mask = cpm_mask_irq, 83 .irq_unmask = cpm_unmask_irq, 84 .irq_eoi = cpm_end_irq, 85 }; 86 87 int cpm_get_irq(void) 88 { 89 int cpm_vec; 90 91 /* 92 * Get the vector by setting the ACK bit and then reading 93 * the register. 94 */ 95 out_be16(&cpic_reg->cpic_civr, 1); 96 cpm_vec = in_be16(&cpic_reg->cpic_civr); 97 cpm_vec >>= 11; 98 99 return irq_linear_revmap(cpm_pic_host, cpm_vec); 100 } 101 102 static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq, 103 irq_hw_number_t hw) 104 { 105 pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw); 106 107 irq_set_status_flags(virq, IRQ_LEVEL); 108 irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq); 109 return 0; 110 } 111 112 /* 113 * The CPM can generate the error interrupt when there is a race condition 114 * between generating and masking interrupts. All we have to do is ACK it 115 * and return. This is a no-op function so we don't need any special 116 * tests in the interrupt handler. 117 */ 118 static irqreturn_t cpm_error_interrupt(int irq, void *dev) 119 { 120 return IRQ_HANDLED; 121 } 122 123 static struct irqaction cpm_error_irqaction = { 124 .handler = cpm_error_interrupt, 125 .flags = IRQF_NO_THREAD, 126 .name = "error", 127 }; 128 129 static const struct irq_domain_ops cpm_pic_host_ops = { 130 .map = cpm_pic_host_map, 131 }; 132 133 unsigned int __init cpm_pic_init(void) 134 { 135 struct device_node *np = NULL; 136 struct resource res; 137 unsigned int sirq = 0, hwirq, eirq; 138 int ret; 139 140 pr_debug("cpm_pic_init\n"); 141 142 np = of_find_compatible_node(NULL, NULL, "fsl,cpm1-pic"); 143 if (np == NULL) 144 np = of_find_compatible_node(NULL, "cpm-pic", "CPM"); 145 if (np == NULL) { 146 printk(KERN_ERR "CPM PIC init: can not find cpm-pic node\n"); 147 return sirq; 148 } 149 150 ret = of_address_to_resource(np, 0, &res); 151 if (ret) 152 goto end; 153 154 cpic_reg = ioremap(res.start, resource_size(&res)); 155 if (cpic_reg == NULL) 156 goto end; 157 158 sirq = irq_of_parse_and_map(np, 0); 159 if (!sirq) 160 goto end; 161 162 /* Initialize the CPM interrupt controller. */ 163 hwirq = (unsigned int)virq_to_hw(sirq); 164 out_be32(&cpic_reg->cpic_cicr, 165 (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) | 166 ((hwirq/2) << 13) | CICR_HP_MASK); 167 168 out_be32(&cpic_reg->cpic_cimr, 0); 169 170 cpm_pic_host = irq_domain_add_linear(np, 64, &cpm_pic_host_ops, NULL); 171 if (cpm_pic_host == NULL) { 172 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); 173 sirq = 0; 174 goto end; 175 } 176 177 /* Install our own error handler. */ 178 np = of_find_compatible_node(NULL, NULL, "fsl,cpm1"); 179 if (np == NULL) 180 np = of_find_node_by_type(NULL, "cpm"); 181 if (np == NULL) { 182 printk(KERN_ERR "CPM PIC init: can not find cpm node\n"); 183 goto end; 184 } 185 186 eirq = irq_of_parse_and_map(np, 0); 187 if (!eirq) 188 goto end; 189 190 if (setup_irq(eirq, &cpm_error_irqaction)) 191 printk(KERN_ERR "Could not allocate CPM error IRQ!"); 192 193 setbits32(&cpic_reg->cpic_cicr, CICR_IEN); 194 195 end: 196 of_node_put(np); 197 return sirq; 198 } 199 200 void __init cpm_reset(void) 201 { 202 sysconf8xx_t __iomem *siu_conf; 203 204 cpmp = &mpc8xx_immr->im_cpm; 205 206 #ifndef CONFIG_PPC_EARLY_DEBUG_CPM 207 /* Perform a reset. */ 208 out_be16(&cpmp->cp_cpcr, CPM_CR_RST | CPM_CR_FLG); 209 210 /* Wait for it. */ 211 while (in_be16(&cpmp->cp_cpcr) & CPM_CR_FLG); 212 #endif 213 214 #ifdef CONFIG_UCODE_PATCH 215 cpm_load_patch(cpmp); 216 #endif 217 218 /* 219 * Set SDMA Bus Request priority 5. 220 * On 860T, this also enables FEC priority 6. I am not sure 221 * this is what we really want for some applications, but the 222 * manual recommends it. 223 * Bit 25, FAM can also be set to use FEC aggressive mode (860T). 224 */ 225 siu_conf = immr_map(im_siu_conf); 226 if ((mfspr(SPRN_IMMR) & 0xffff) == 0x0900) /* MPC885 */ 227 out_be32(&siu_conf->sc_sdcr, 0x40); 228 else 229 out_be32(&siu_conf->sc_sdcr, 1); 230 immr_unmap(siu_conf); 231 } 232 233 static DEFINE_SPINLOCK(cmd_lock); 234 235 #define MAX_CR_CMD_LOOPS 10000 236 237 int cpm_command(u32 command, u8 opcode) 238 { 239 int i, ret; 240 unsigned long flags; 241 242 if (command & 0xffffff0f) 243 return -EINVAL; 244 245 spin_lock_irqsave(&cmd_lock, flags); 246 247 ret = 0; 248 out_be16(&cpmp->cp_cpcr, command | CPM_CR_FLG | (opcode << 8)); 249 for (i = 0; i < MAX_CR_CMD_LOOPS; i++) 250 if ((in_be16(&cpmp->cp_cpcr) & CPM_CR_FLG) == 0) 251 goto out; 252 253 printk(KERN_ERR "%s(): Not able to issue CPM command\n", __func__); 254 ret = -EIO; 255 out: 256 spin_unlock_irqrestore(&cmd_lock, flags); 257 return ret; 258 } 259 EXPORT_SYMBOL(cpm_command); 260 261 /* 262 * Set a baud rate generator. This needs lots of work. There are 263 * four BRGs, any of which can be wired to any channel. 264 * The internal baud rate clock is the system clock divided by 16. 265 * This assumes the baudrate is 16x oversampled by the uart. 266 */ 267 #define BRG_INT_CLK (get_brgfreq()) 268 #define BRG_UART_CLK (BRG_INT_CLK/16) 269 #define BRG_UART_CLK_DIV16 (BRG_UART_CLK/16) 270 271 void 272 cpm_setbrg(uint brg, uint rate) 273 { 274 u32 __iomem *bp; 275 276 /* This is good enough to get SMCs running..... */ 277 bp = &cpmp->cp_brgc1; 278 bp += brg; 279 /* 280 * The BRG has a 12-bit counter. For really slow baud rates (or 281 * really fast processors), we may have to further divide by 16. 282 */ 283 if (((BRG_UART_CLK / rate) - 1) < 4096) 284 out_be32(bp, (((BRG_UART_CLK / rate) - 1) << 1) | CPM_BRG_EN); 285 else 286 out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) | 287 CPM_BRG_EN | CPM_BRG_DIV16); 288 } 289 290 struct cpm_ioport16 { 291 __be16 dir, par, odr_sor, dat, intr; 292 __be16 res[3]; 293 }; 294 295 struct cpm_ioport32b { 296 __be32 dir, par, odr, dat; 297 }; 298 299 struct cpm_ioport32e { 300 __be32 dir, par, sor, odr, dat; 301 }; 302 303 static void __init cpm1_set_pin32(int port, int pin, int flags) 304 { 305 struct cpm_ioport32e __iomem *iop; 306 pin = 1 << (31 - pin); 307 308 if (port == CPM_PORTB) 309 iop = (struct cpm_ioport32e __iomem *) 310 &mpc8xx_immr->im_cpm.cp_pbdir; 311 else 312 iop = (struct cpm_ioport32e __iomem *) 313 &mpc8xx_immr->im_cpm.cp_pedir; 314 315 if (flags & CPM_PIN_OUTPUT) 316 setbits32(&iop->dir, pin); 317 else 318 clrbits32(&iop->dir, pin); 319 320 if (!(flags & CPM_PIN_GPIO)) 321 setbits32(&iop->par, pin); 322 else 323 clrbits32(&iop->par, pin); 324 325 if (port == CPM_PORTB) { 326 if (flags & CPM_PIN_OPENDRAIN) 327 setbits16(&mpc8xx_immr->im_cpm.cp_pbodr, pin); 328 else 329 clrbits16(&mpc8xx_immr->im_cpm.cp_pbodr, pin); 330 } 331 332 if (port == CPM_PORTE) { 333 if (flags & CPM_PIN_SECONDARY) 334 setbits32(&iop->sor, pin); 335 else 336 clrbits32(&iop->sor, pin); 337 338 if (flags & CPM_PIN_OPENDRAIN) 339 setbits32(&mpc8xx_immr->im_cpm.cp_peodr, pin); 340 else 341 clrbits32(&mpc8xx_immr->im_cpm.cp_peodr, pin); 342 } 343 } 344 345 static void __init cpm1_set_pin16(int port, int pin, int flags) 346 { 347 struct cpm_ioport16 __iomem *iop = 348 (struct cpm_ioport16 __iomem *)&mpc8xx_immr->im_ioport; 349 350 pin = 1 << (15 - pin); 351 352 if (port != 0) 353 iop += port - 1; 354 355 if (flags & CPM_PIN_OUTPUT) 356 setbits16(&iop->dir, pin); 357 else 358 clrbits16(&iop->dir, pin); 359 360 if (!(flags & CPM_PIN_GPIO)) 361 setbits16(&iop->par, pin); 362 else 363 clrbits16(&iop->par, pin); 364 365 if (port == CPM_PORTA) { 366 if (flags & CPM_PIN_OPENDRAIN) 367 setbits16(&iop->odr_sor, pin); 368 else 369 clrbits16(&iop->odr_sor, pin); 370 } 371 if (port == CPM_PORTC) { 372 if (flags & CPM_PIN_SECONDARY) 373 setbits16(&iop->odr_sor, pin); 374 else 375 clrbits16(&iop->odr_sor, pin); 376 if (flags & CPM_PIN_FALLEDGE) 377 setbits16(&iop->intr, pin); 378 else 379 clrbits16(&iop->intr, pin); 380 } 381 } 382 383 void __init cpm1_set_pin(enum cpm_port port, int pin, int flags) 384 { 385 if (port == CPM_PORTB || port == CPM_PORTE) 386 cpm1_set_pin32(port, pin, flags); 387 else 388 cpm1_set_pin16(port, pin, flags); 389 } 390 391 int __init cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode) 392 { 393 int shift; 394 int i, bits = 0; 395 u32 __iomem *reg; 396 u32 mask = 7; 397 398 u8 clk_map[][3] = { 399 {CPM_CLK_SCC1, CPM_BRG1, 0}, 400 {CPM_CLK_SCC1, CPM_BRG2, 1}, 401 {CPM_CLK_SCC1, CPM_BRG3, 2}, 402 {CPM_CLK_SCC1, CPM_BRG4, 3}, 403 {CPM_CLK_SCC1, CPM_CLK1, 4}, 404 {CPM_CLK_SCC1, CPM_CLK2, 5}, 405 {CPM_CLK_SCC1, CPM_CLK3, 6}, 406 {CPM_CLK_SCC1, CPM_CLK4, 7}, 407 408 {CPM_CLK_SCC2, CPM_BRG1, 0}, 409 {CPM_CLK_SCC2, CPM_BRG2, 1}, 410 {CPM_CLK_SCC2, CPM_BRG3, 2}, 411 {CPM_CLK_SCC2, CPM_BRG4, 3}, 412 {CPM_CLK_SCC2, CPM_CLK1, 4}, 413 {CPM_CLK_SCC2, CPM_CLK2, 5}, 414 {CPM_CLK_SCC2, CPM_CLK3, 6}, 415 {CPM_CLK_SCC2, CPM_CLK4, 7}, 416 417 {CPM_CLK_SCC3, CPM_BRG1, 0}, 418 {CPM_CLK_SCC3, CPM_BRG2, 1}, 419 {CPM_CLK_SCC3, CPM_BRG3, 2}, 420 {CPM_CLK_SCC3, CPM_BRG4, 3}, 421 {CPM_CLK_SCC3, CPM_CLK5, 4}, 422 {CPM_CLK_SCC3, CPM_CLK6, 5}, 423 {CPM_CLK_SCC3, CPM_CLK7, 6}, 424 {CPM_CLK_SCC3, CPM_CLK8, 7}, 425 426 {CPM_CLK_SCC4, CPM_BRG1, 0}, 427 {CPM_CLK_SCC4, CPM_BRG2, 1}, 428 {CPM_CLK_SCC4, CPM_BRG3, 2}, 429 {CPM_CLK_SCC4, CPM_BRG4, 3}, 430 {CPM_CLK_SCC4, CPM_CLK5, 4}, 431 {CPM_CLK_SCC4, CPM_CLK6, 5}, 432 {CPM_CLK_SCC4, CPM_CLK7, 6}, 433 {CPM_CLK_SCC4, CPM_CLK8, 7}, 434 435 {CPM_CLK_SMC1, CPM_BRG1, 0}, 436 {CPM_CLK_SMC1, CPM_BRG2, 1}, 437 {CPM_CLK_SMC1, CPM_BRG3, 2}, 438 {CPM_CLK_SMC1, CPM_BRG4, 3}, 439 {CPM_CLK_SMC1, CPM_CLK1, 4}, 440 {CPM_CLK_SMC1, CPM_CLK2, 5}, 441 {CPM_CLK_SMC1, CPM_CLK3, 6}, 442 {CPM_CLK_SMC1, CPM_CLK4, 7}, 443 444 {CPM_CLK_SMC2, CPM_BRG1, 0}, 445 {CPM_CLK_SMC2, CPM_BRG2, 1}, 446 {CPM_CLK_SMC2, CPM_BRG3, 2}, 447 {CPM_CLK_SMC2, CPM_BRG4, 3}, 448 {CPM_CLK_SMC2, CPM_CLK5, 4}, 449 {CPM_CLK_SMC2, CPM_CLK6, 5}, 450 {CPM_CLK_SMC2, CPM_CLK7, 6}, 451 {CPM_CLK_SMC2, CPM_CLK8, 7}, 452 }; 453 454 switch (target) { 455 case CPM_CLK_SCC1: 456 reg = &mpc8xx_immr->im_cpm.cp_sicr; 457 shift = 0; 458 break; 459 460 case CPM_CLK_SCC2: 461 reg = &mpc8xx_immr->im_cpm.cp_sicr; 462 shift = 8; 463 break; 464 465 case CPM_CLK_SCC3: 466 reg = &mpc8xx_immr->im_cpm.cp_sicr; 467 shift = 16; 468 break; 469 470 case CPM_CLK_SCC4: 471 reg = &mpc8xx_immr->im_cpm.cp_sicr; 472 shift = 24; 473 break; 474 475 case CPM_CLK_SMC1: 476 reg = &mpc8xx_immr->im_cpm.cp_simode; 477 shift = 12; 478 break; 479 480 case CPM_CLK_SMC2: 481 reg = &mpc8xx_immr->im_cpm.cp_simode; 482 shift = 28; 483 break; 484 485 default: 486 printk(KERN_ERR "cpm1_clock_setup: invalid clock target\n"); 487 return -EINVAL; 488 } 489 490 for (i = 0; i < ARRAY_SIZE(clk_map); i++) { 491 if (clk_map[i][0] == target && clk_map[i][1] == clock) { 492 bits = clk_map[i][2]; 493 break; 494 } 495 } 496 497 if (i == ARRAY_SIZE(clk_map)) { 498 printk(KERN_ERR "cpm1_clock_setup: invalid clock combination\n"); 499 return -EINVAL; 500 } 501 502 bits <<= shift; 503 mask <<= shift; 504 505 if (reg == &mpc8xx_immr->im_cpm.cp_sicr) { 506 if (mode == CPM_CLK_RTX) { 507 bits |= bits << 3; 508 mask |= mask << 3; 509 } else if (mode == CPM_CLK_RX) { 510 bits <<= 3; 511 mask <<= 3; 512 } 513 } 514 515 out_be32(reg, (in_be32(reg) & ~mask) | bits); 516 517 return 0; 518 } 519 520 /* 521 * GPIO LIB API implementation 522 */ 523 #ifdef CONFIG_8xx_GPIO 524 525 struct cpm1_gpio16_chip { 526 struct of_mm_gpio_chip mm_gc; 527 spinlock_t lock; 528 529 /* shadowed data register to clear/set bits safely */ 530 u16 cpdata; 531 532 /* IRQ associated with Pins when relevant */ 533 int irq[16]; 534 }; 535 536 static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc) 537 { 538 struct cpm1_gpio16_chip *cpm1_gc = 539 container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc); 540 struct cpm_ioport16 __iomem *iop = mm_gc->regs; 541 542 cpm1_gc->cpdata = in_be16(&iop->dat); 543 } 544 545 static int cpm1_gpio16_get(struct gpio_chip *gc, unsigned int gpio) 546 { 547 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 548 struct cpm_ioport16 __iomem *iop = mm_gc->regs; 549 u16 pin_mask; 550 551 pin_mask = 1 << (15 - gpio); 552 553 return !!(in_be16(&iop->dat) & pin_mask); 554 } 555 556 static void __cpm1_gpio16_set(struct of_mm_gpio_chip *mm_gc, u16 pin_mask, 557 int value) 558 { 559 struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 560 struct cpm_ioport16 __iomem *iop = mm_gc->regs; 561 562 if (value) 563 cpm1_gc->cpdata |= pin_mask; 564 else 565 cpm1_gc->cpdata &= ~pin_mask; 566 567 out_be16(&iop->dat, cpm1_gc->cpdata); 568 } 569 570 static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value) 571 { 572 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 573 struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 574 unsigned long flags; 575 u16 pin_mask = 1 << (15 - gpio); 576 577 spin_lock_irqsave(&cpm1_gc->lock, flags); 578 579 __cpm1_gpio16_set(mm_gc, pin_mask, value); 580 581 spin_unlock_irqrestore(&cpm1_gc->lock, flags); 582 } 583 584 static int cpm1_gpio16_to_irq(struct gpio_chip *gc, unsigned int gpio) 585 { 586 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 587 struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 588 589 return cpm1_gc->irq[gpio] ? : -ENXIO; 590 } 591 592 static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) 593 { 594 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 595 struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 596 struct cpm_ioport16 __iomem *iop = mm_gc->regs; 597 unsigned long flags; 598 u16 pin_mask = 1 << (15 - gpio); 599 600 spin_lock_irqsave(&cpm1_gc->lock, flags); 601 602 setbits16(&iop->dir, pin_mask); 603 __cpm1_gpio16_set(mm_gc, pin_mask, val); 604 605 spin_unlock_irqrestore(&cpm1_gc->lock, flags); 606 607 return 0; 608 } 609 610 static int cpm1_gpio16_dir_in(struct gpio_chip *gc, unsigned int gpio) 611 { 612 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 613 struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 614 struct cpm_ioport16 __iomem *iop = mm_gc->regs; 615 unsigned long flags; 616 u16 pin_mask = 1 << (15 - gpio); 617 618 spin_lock_irqsave(&cpm1_gc->lock, flags); 619 620 clrbits16(&iop->dir, pin_mask); 621 622 spin_unlock_irqrestore(&cpm1_gc->lock, flags); 623 624 return 0; 625 } 626 627 int cpm1_gpiochip_add16(struct device *dev) 628 { 629 struct device_node *np = dev->of_node; 630 struct cpm1_gpio16_chip *cpm1_gc; 631 struct of_mm_gpio_chip *mm_gc; 632 struct gpio_chip *gc; 633 u16 mask; 634 635 cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL); 636 if (!cpm1_gc) 637 return -ENOMEM; 638 639 spin_lock_init(&cpm1_gc->lock); 640 641 if (!of_property_read_u16(np, "fsl,cpm1-gpio-irq-mask", &mask)) { 642 int i, j; 643 644 for (i = 0, j = 0; i < 16; i++) 645 if (mask & (1 << (15 - i))) 646 cpm1_gc->irq[i] = irq_of_parse_and_map(np, j++); 647 } 648 649 mm_gc = &cpm1_gc->mm_gc; 650 gc = &mm_gc->gc; 651 652 mm_gc->save_regs = cpm1_gpio16_save_regs; 653 gc->ngpio = 16; 654 gc->direction_input = cpm1_gpio16_dir_in; 655 gc->direction_output = cpm1_gpio16_dir_out; 656 gc->get = cpm1_gpio16_get; 657 gc->set = cpm1_gpio16_set; 658 gc->to_irq = cpm1_gpio16_to_irq; 659 gc->parent = dev; 660 gc->owner = THIS_MODULE; 661 662 return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc); 663 } 664 665 struct cpm1_gpio32_chip { 666 struct of_mm_gpio_chip mm_gc; 667 spinlock_t lock; 668 669 /* shadowed data register to clear/set bits safely */ 670 u32 cpdata; 671 }; 672 673 static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc) 674 { 675 struct cpm1_gpio32_chip *cpm1_gc = 676 container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc); 677 struct cpm_ioport32b __iomem *iop = mm_gc->regs; 678 679 cpm1_gc->cpdata = in_be32(&iop->dat); 680 } 681 682 static int cpm1_gpio32_get(struct gpio_chip *gc, unsigned int gpio) 683 { 684 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 685 struct cpm_ioport32b __iomem *iop = mm_gc->regs; 686 u32 pin_mask; 687 688 pin_mask = 1 << (31 - gpio); 689 690 return !!(in_be32(&iop->dat) & pin_mask); 691 } 692 693 static void __cpm1_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, 694 int value) 695 { 696 struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 697 struct cpm_ioport32b __iomem *iop = mm_gc->regs; 698 699 if (value) 700 cpm1_gc->cpdata |= pin_mask; 701 else 702 cpm1_gc->cpdata &= ~pin_mask; 703 704 out_be32(&iop->dat, cpm1_gc->cpdata); 705 } 706 707 static void cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) 708 { 709 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 710 struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 711 unsigned long flags; 712 u32 pin_mask = 1 << (31 - gpio); 713 714 spin_lock_irqsave(&cpm1_gc->lock, flags); 715 716 __cpm1_gpio32_set(mm_gc, pin_mask, value); 717 718 spin_unlock_irqrestore(&cpm1_gc->lock, flags); 719 } 720 721 static int cpm1_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) 722 { 723 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 724 struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 725 struct cpm_ioport32b __iomem *iop = mm_gc->regs; 726 unsigned long flags; 727 u32 pin_mask = 1 << (31 - gpio); 728 729 spin_lock_irqsave(&cpm1_gc->lock, flags); 730 731 setbits32(&iop->dir, pin_mask); 732 __cpm1_gpio32_set(mm_gc, pin_mask, val); 733 734 spin_unlock_irqrestore(&cpm1_gc->lock, flags); 735 736 return 0; 737 } 738 739 static int cpm1_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio) 740 { 741 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 742 struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); 743 struct cpm_ioport32b __iomem *iop = mm_gc->regs; 744 unsigned long flags; 745 u32 pin_mask = 1 << (31 - gpio); 746 747 spin_lock_irqsave(&cpm1_gc->lock, flags); 748 749 clrbits32(&iop->dir, pin_mask); 750 751 spin_unlock_irqrestore(&cpm1_gc->lock, flags); 752 753 return 0; 754 } 755 756 int cpm1_gpiochip_add32(struct device *dev) 757 { 758 struct device_node *np = dev->of_node; 759 struct cpm1_gpio32_chip *cpm1_gc; 760 struct of_mm_gpio_chip *mm_gc; 761 struct gpio_chip *gc; 762 763 cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL); 764 if (!cpm1_gc) 765 return -ENOMEM; 766 767 spin_lock_init(&cpm1_gc->lock); 768 769 mm_gc = &cpm1_gc->mm_gc; 770 gc = &mm_gc->gc; 771 772 mm_gc->save_regs = cpm1_gpio32_save_regs; 773 gc->ngpio = 32; 774 gc->direction_input = cpm1_gpio32_dir_in; 775 gc->direction_output = cpm1_gpio32_dir_out; 776 gc->get = cpm1_gpio32_get; 777 gc->set = cpm1_gpio32_set; 778 gc->parent = dev; 779 gc->owner = THIS_MODULE; 780 781 return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc); 782 } 783 784 #endif /* CONFIG_8xx_GPIO */ 785