1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2015-2018, Intel Corporation. 4 */ 5 6 #define pr_fmt(fmt) "aspeed-kcs-bmc: " fmt 7 8 #include <linux/atomic.h> 9 #include <linux/errno.h> 10 #include <linux/interrupt.h> 11 #include <linux/io.h> 12 #include <linux/irq.h> 13 #include <linux/mfd/syscon.h> 14 #include <linux/module.h> 15 #include <linux/of.h> 16 #include <linux/of_address.h> 17 #include <linux/of_device.h> 18 #include <linux/platform_device.h> 19 #include <linux/poll.h> 20 #include <linux/regmap.h> 21 #include <linux/sched.h> 22 #include <linux/slab.h> 23 #include <linux/timer.h> 24 25 #include "kcs_bmc_device.h" 26 27 28 #define DEVICE_NAME "ast-kcs-bmc" 29 30 #define KCS_CHANNEL_MAX 4 31 32 /* 33 * Field class descriptions 34 * 35 * LPCyE Enable LPC channel y 36 * IBFIEy Input Buffer Full IRQ Enable for LPC channel y 37 * IRQxEy Assert SerIRQ x for LPC channel y (Deprecated, use IDyIRQX, IRQXEy) 38 * IDyIRQX Use the specified 4-bit SerIRQ for LPC channel y 39 * SELyIRQX SerIRQ polarity for LPC channel y (low: 0, high: 1) 40 * IRQXEy Assert the SerIRQ specified in IDyIRQX for LPC channel y 41 */ 42 43 #define LPC_TYIRQX_LOW 0b00 44 #define LPC_TYIRQX_HIGH 0b01 45 #define LPC_TYIRQX_RSVD 0b10 46 #define LPC_TYIRQX_RISING 0b11 47 48 #define LPC_HICR0 0x000 49 #define LPC_HICR0_LPC3E BIT(7) 50 #define LPC_HICR0_LPC2E BIT(6) 51 #define LPC_HICR0_LPC1E BIT(5) 52 #define LPC_HICR2 0x008 53 #define LPC_HICR2_IBFIE3 BIT(3) 54 #define LPC_HICR2_IBFIE2 BIT(2) 55 #define LPC_HICR2_IBFIE1 BIT(1) 56 #define LPC_HICR4 0x010 57 #define LPC_HICR4_LADR12AS BIT(7) 58 #define LPC_HICR4_KCSENBL BIT(2) 59 #define LPC_SIRQCR0 0x070 60 /* IRQ{12,1}E1 are deprecated as of AST2600 A3 but necessary for prior chips */ 61 #define LPC_SIRQCR0_IRQ12E1 BIT(1) 62 #define LPC_SIRQCR0_IRQ1E1 BIT(0) 63 #define LPC_HICR5 0x080 64 #define LPC_HICR5_ID3IRQX_MASK GENMASK(23, 20) 65 #define LPC_HICR5_ID3IRQX_SHIFT 20 66 #define LPC_HICR5_ID2IRQX_MASK GENMASK(19, 16) 67 #define LPC_HICR5_ID2IRQX_SHIFT 16 68 #define LPC_HICR5_SEL3IRQX BIT(15) 69 #define LPC_HICR5_IRQXE3 BIT(14) 70 #define LPC_HICR5_SEL2IRQX BIT(13) 71 #define LPC_HICR5_IRQXE2 BIT(12) 72 #define LPC_LADR3H 0x014 73 #define LPC_LADR3L 0x018 74 #define LPC_LADR12H 0x01C 75 #define LPC_LADR12L 0x020 76 #define LPC_IDR1 0x024 77 #define LPC_IDR2 0x028 78 #define LPC_IDR3 0x02C 79 #define LPC_ODR1 0x030 80 #define LPC_ODR2 0x034 81 #define LPC_ODR3 0x038 82 #define LPC_STR1 0x03C 83 #define LPC_STR2 0x040 84 #define LPC_STR3 0x044 85 #define LPC_HICRB 0x100 86 #define LPC_HICRB_EN16LADR2 BIT(5) 87 #define LPC_HICRB_EN16LADR1 BIT(4) 88 #define LPC_HICRB_IBFIE4 BIT(1) 89 #define LPC_HICRB_LPC4E BIT(0) 90 #define LPC_HICRC 0x104 91 #define LPC_HICRC_ID4IRQX_MASK GENMASK(7, 4) 92 #define LPC_HICRC_ID4IRQX_SHIFT 4 93 #define LPC_HICRC_TY4IRQX_MASK GENMASK(3, 2) 94 #define LPC_HICRC_TY4IRQX_SHIFT 2 95 #define LPC_HICRC_OBF4_AUTO_CLR BIT(1) 96 #define LPC_HICRC_IRQXE4 BIT(0) 97 #define LPC_LADR4 0x110 98 #define LPC_IDR4 0x114 99 #define LPC_ODR4 0x118 100 #define LPC_STR4 0x11C 101 #define LPC_LSADR12 0x120 102 #define LPC_LSADR12_LSADR2_MASK GENMASK(31, 16) 103 #define LPC_LSADR12_LSADR2_SHIFT 16 104 #define LPC_LSADR12_LSADR1_MASK GENMASK(15, 0) 105 #define LPC_LSADR12_LSADR1_SHIFT 0 106 107 #define OBE_POLL_PERIOD (HZ / 2) 108 109 enum aspeed_kcs_irq_mode { 110 aspeed_kcs_irq_none, 111 aspeed_kcs_irq_serirq, 112 }; 113 114 struct aspeed_kcs_bmc { 115 struct kcs_bmc_device kcs_bmc; 116 117 struct regmap *map; 118 119 struct { 120 enum aspeed_kcs_irq_mode mode; 121 int id; 122 } upstream_irq; 123 124 struct { 125 spinlock_t lock; 126 bool remove; 127 struct timer_list timer; 128 } obe; 129 }; 130 131 struct aspeed_kcs_of_ops { 132 int (*get_channel)(struct platform_device *pdev); 133 int (*get_io_address)(struct platform_device *pdev, u32 addrs[2]); 134 }; 135 136 static inline struct aspeed_kcs_bmc *to_aspeed_kcs_bmc(struct kcs_bmc_device *kcs_bmc) 137 { 138 return container_of(kcs_bmc, struct aspeed_kcs_bmc, kcs_bmc); 139 } 140 141 static u8 aspeed_kcs_inb(struct kcs_bmc_device *kcs_bmc, u32 reg) 142 { 143 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); 144 u32 val = 0; 145 int rc; 146 147 rc = regmap_read(priv->map, reg, &val); 148 WARN(rc != 0, "regmap_read() failed: %d\n", rc); 149 150 return rc == 0 ? (u8) val : 0; 151 } 152 153 static void aspeed_kcs_outb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 data) 154 { 155 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); 156 int rc; 157 158 rc = regmap_write(priv->map, reg, data); 159 WARN(rc != 0, "regmap_write() failed: %d\n", rc); 160 161 /* Trigger the upstream IRQ on ODR writes, if enabled */ 162 163 switch (reg) { 164 case LPC_ODR1: 165 case LPC_ODR2: 166 case LPC_ODR3: 167 case LPC_ODR4: 168 break; 169 default: 170 return; 171 } 172 173 if (priv->upstream_irq.mode != aspeed_kcs_irq_serirq) 174 return; 175 176 switch (kcs_bmc->channel) { 177 case 1: 178 switch (priv->upstream_irq.id) { 179 case 12: 180 regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ12E1, 181 LPC_SIRQCR0_IRQ12E1); 182 break; 183 case 1: 184 regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ1E1, 185 LPC_SIRQCR0_IRQ1E1); 186 break; 187 default: 188 break; 189 } 190 break; 191 case 2: 192 regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE2, LPC_HICR5_IRQXE2); 193 break; 194 case 3: 195 regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE3, LPC_HICR5_IRQXE3); 196 break; 197 case 4: 198 regmap_update_bits(priv->map, LPC_HICRC, LPC_HICRC_IRQXE4, LPC_HICRC_IRQXE4); 199 break; 200 default: 201 break; 202 } 203 } 204 205 static void aspeed_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 val) 206 { 207 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); 208 int rc; 209 210 rc = regmap_update_bits(priv->map, reg, mask, val); 211 WARN(rc != 0, "regmap_update_bits() failed: %d\n", rc); 212 } 213 214 /* 215 * AST_usrGuide_KCS.pdf 216 * 2. Background: 217 * we note D for Data, and C for Cmd/Status, default rules are 218 * A. KCS1 / KCS2 ( D / C:X / X+4 ) 219 * D / C : CA0h / CA4h 220 * D / C : CA8h / CACh 221 * B. KCS3 ( D / C:XX2h / XX3h ) 222 * D / C : CA2h / CA3h 223 * D / C : CB2h / CB3h 224 * C. KCS4 225 * D / C : CA4h / CA5h 226 */ 227 static int aspeed_kcs_set_address(struct kcs_bmc_device *kcs_bmc, u32 addrs[2], int nr_addrs) 228 { 229 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); 230 231 if (WARN_ON(nr_addrs < 1 || nr_addrs > 2)) 232 return -EINVAL; 233 234 switch (priv->kcs_bmc.channel) { 235 case 1: 236 regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, 0); 237 regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8); 238 regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF); 239 if (nr_addrs == 2) { 240 regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR1_MASK, 241 addrs[1] << LPC_LSADR12_LSADR1_SHIFT); 242 243 regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR1, 244 LPC_HICRB_EN16LADR1); 245 } 246 break; 247 248 case 2: 249 regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, LPC_HICR4_LADR12AS); 250 regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8); 251 regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF); 252 if (nr_addrs == 2) { 253 regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR2_MASK, 254 addrs[1] << LPC_LSADR12_LSADR2_SHIFT); 255 256 regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR2, 257 LPC_HICRB_EN16LADR2); 258 } 259 break; 260 261 case 3: 262 if (nr_addrs == 2) { 263 dev_err(priv->kcs_bmc.dev, 264 "Channel 3 only supports inferred status IO address\n"); 265 return -EINVAL; 266 } 267 268 regmap_write(priv->map, LPC_LADR3H, addrs[0] >> 8); 269 regmap_write(priv->map, LPC_LADR3L, addrs[0] & 0xFF); 270 break; 271 272 case 4: 273 if (nr_addrs == 1) 274 regmap_write(priv->map, LPC_LADR4, ((addrs[0] + 1) << 16) | addrs[0]); 275 else 276 regmap_write(priv->map, LPC_LADR4, (addrs[1] << 16) | addrs[0]); 277 278 break; 279 280 default: 281 return -EINVAL; 282 } 283 284 return 0; 285 } 286 287 static inline int aspeed_kcs_map_serirq_type(u32 dt_type) 288 { 289 switch (dt_type) { 290 case IRQ_TYPE_EDGE_RISING: 291 return LPC_TYIRQX_RISING; 292 case IRQ_TYPE_LEVEL_HIGH: 293 return LPC_TYIRQX_HIGH; 294 case IRQ_TYPE_LEVEL_LOW: 295 return LPC_TYIRQX_LOW; 296 default: 297 return -EINVAL; 298 } 299 } 300 301 static int aspeed_kcs_config_upstream_irq(struct aspeed_kcs_bmc *priv, u32 id, u32 dt_type) 302 { 303 unsigned int mask, val, hw_type; 304 int ret; 305 306 if (id > 15) 307 return -EINVAL; 308 309 ret = aspeed_kcs_map_serirq_type(dt_type); 310 if (ret < 0) 311 return ret; 312 hw_type = ret; 313 314 priv->upstream_irq.mode = aspeed_kcs_irq_serirq; 315 priv->upstream_irq.id = id; 316 317 switch (priv->kcs_bmc.channel) { 318 case 1: 319 /* Needs IRQxE1 rather than (ID1IRQX, SEL1IRQX, IRQXE1) before AST2600 A3 */ 320 break; 321 case 2: 322 if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH)) 323 return -EINVAL; 324 325 mask = LPC_HICR5_SEL2IRQX | LPC_HICR5_ID2IRQX_MASK; 326 val = (id << LPC_HICR5_ID2IRQX_SHIFT); 327 val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL2IRQX : 0; 328 regmap_update_bits(priv->map, LPC_HICR5, mask, val); 329 330 break; 331 case 3: 332 if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH)) 333 return -EINVAL; 334 335 mask = LPC_HICR5_SEL3IRQX | LPC_HICR5_ID3IRQX_MASK; 336 val = (id << LPC_HICR5_ID3IRQX_SHIFT); 337 val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL3IRQX : 0; 338 regmap_update_bits(priv->map, LPC_HICR5, mask, val); 339 340 break; 341 case 4: 342 mask = LPC_HICRC_ID4IRQX_MASK | LPC_HICRC_TY4IRQX_MASK | LPC_HICRC_OBF4_AUTO_CLR; 343 val = (id << LPC_HICRC_ID4IRQX_SHIFT) | (hw_type << LPC_HICRC_TY4IRQX_SHIFT); 344 regmap_update_bits(priv->map, LPC_HICRC, mask, val); 345 break; 346 default: 347 dev_warn(priv->kcs_bmc.dev, 348 "SerIRQ configuration not supported on KCS channel %d\n", 349 priv->kcs_bmc.channel); 350 return -EINVAL; 351 } 352 353 return 0; 354 } 355 356 static void aspeed_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enable) 357 { 358 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); 359 360 switch (kcs_bmc->channel) { 361 case 1: 362 regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC1E, enable * LPC_HICR0_LPC1E); 363 return; 364 case 2: 365 regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC2E, enable * LPC_HICR0_LPC2E); 366 return; 367 case 3: 368 regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC3E, enable * LPC_HICR0_LPC3E); 369 regmap_update_bits(priv->map, LPC_HICR4, 370 LPC_HICR4_KCSENBL, enable * LPC_HICR4_KCSENBL); 371 return; 372 case 4: 373 regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_LPC4E, enable * LPC_HICRB_LPC4E); 374 return; 375 default: 376 pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel); 377 return; 378 } 379 } 380 381 static void aspeed_kcs_check_obe(struct timer_list *timer) 382 { 383 struct aspeed_kcs_bmc *priv = container_of(timer, struct aspeed_kcs_bmc, obe.timer); 384 unsigned long flags; 385 u8 str; 386 387 spin_lock_irqsave(&priv->obe.lock, flags); 388 if (priv->obe.remove) { 389 spin_unlock_irqrestore(&priv->obe.lock, flags); 390 return; 391 } 392 393 str = aspeed_kcs_inb(&priv->kcs_bmc, priv->kcs_bmc.ioreg.str); 394 if (str & KCS_BMC_STR_OBF) { 395 mod_timer(timer, jiffies + OBE_POLL_PERIOD); 396 spin_unlock_irqrestore(&priv->obe.lock, flags); 397 return; 398 } 399 spin_unlock_irqrestore(&priv->obe.lock, flags); 400 401 kcs_bmc_handle_event(&priv->kcs_bmc); 402 } 403 404 static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state) 405 { 406 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); 407 408 /* We don't have an OBE IRQ, emulate it */ 409 if (mask & KCS_BMC_EVENT_TYPE_OBE) { 410 if (KCS_BMC_EVENT_TYPE_OBE & state) 411 mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD); 412 else 413 del_timer(&priv->obe.timer); 414 } 415 416 if (mask & KCS_BMC_EVENT_TYPE_IBF) { 417 const bool enable = !!(state & KCS_BMC_EVENT_TYPE_IBF); 418 419 switch (kcs_bmc->channel) { 420 case 1: 421 regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE1, 422 enable * LPC_HICR2_IBFIE1); 423 return; 424 case 2: 425 regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE2, 426 enable * LPC_HICR2_IBFIE2); 427 return; 428 case 3: 429 regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE3, 430 enable * LPC_HICR2_IBFIE3); 431 return; 432 case 4: 433 regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_IBFIE4, 434 enable * LPC_HICRB_IBFIE4); 435 return; 436 default: 437 pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel); 438 return; 439 } 440 } 441 } 442 443 static const struct kcs_bmc_device_ops aspeed_kcs_ops = { 444 .irq_mask_update = aspeed_kcs_irq_mask_update, 445 .io_inputb = aspeed_kcs_inb, 446 .io_outputb = aspeed_kcs_outb, 447 .io_updateb = aspeed_kcs_updateb, 448 }; 449 450 static irqreturn_t aspeed_kcs_irq(int irq, void *arg) 451 { 452 struct kcs_bmc_device *kcs_bmc = arg; 453 454 return kcs_bmc_handle_event(kcs_bmc); 455 } 456 457 static int aspeed_kcs_config_downstream_irq(struct kcs_bmc_device *kcs_bmc, 458 struct platform_device *pdev) 459 { 460 struct device *dev = &pdev->dev; 461 int irq; 462 463 irq = platform_get_irq(pdev, 0); 464 if (irq < 0) 465 return irq; 466 467 return devm_request_irq(dev, irq, aspeed_kcs_irq, IRQF_SHARED, 468 dev_name(dev), kcs_bmc); 469 } 470 471 static const struct kcs_ioreg ast_kcs_bmc_ioregs[KCS_CHANNEL_MAX] = { 472 { .idr = LPC_IDR1, .odr = LPC_ODR1, .str = LPC_STR1 }, 473 { .idr = LPC_IDR2, .odr = LPC_ODR2, .str = LPC_STR2 }, 474 { .idr = LPC_IDR3, .odr = LPC_ODR3, .str = LPC_STR3 }, 475 { .idr = LPC_IDR4, .odr = LPC_ODR4, .str = LPC_STR4 }, 476 }; 477 478 static int aspeed_kcs_of_v1_get_channel(struct platform_device *pdev) 479 { 480 struct device_node *np; 481 u32 channel; 482 int rc; 483 484 np = pdev->dev.of_node; 485 486 rc = of_property_read_u32(np, "kcs_chan", &channel); 487 if ((rc != 0) || (channel == 0 || channel > KCS_CHANNEL_MAX)) { 488 dev_err(&pdev->dev, "no valid 'kcs_chan' configured\n"); 489 return -EINVAL; 490 } 491 492 return channel; 493 } 494 495 static int 496 aspeed_kcs_of_v1_get_io_address(struct platform_device *pdev, u32 addrs[2]) 497 { 498 int rc; 499 500 rc = of_property_read_u32(pdev->dev.of_node, "kcs_addr", addrs); 501 if (rc || addrs[0] > 0xffff) { 502 dev_err(&pdev->dev, "no valid 'kcs_addr' configured\n"); 503 return -EINVAL; 504 } 505 506 return 1; 507 } 508 509 static int aspeed_kcs_of_v2_get_channel(struct platform_device *pdev) 510 { 511 struct device_node *np; 512 struct kcs_ioreg ioreg; 513 const __be32 *reg; 514 int i; 515 516 np = pdev->dev.of_node; 517 518 /* Don't translate addresses, we want offsets for the regmaps */ 519 reg = of_get_address(np, 0, NULL, NULL); 520 if (!reg) 521 return -EINVAL; 522 ioreg.idr = be32_to_cpup(reg); 523 524 reg = of_get_address(np, 1, NULL, NULL); 525 if (!reg) 526 return -EINVAL; 527 ioreg.odr = be32_to_cpup(reg); 528 529 reg = of_get_address(np, 2, NULL, NULL); 530 if (!reg) 531 return -EINVAL; 532 ioreg.str = be32_to_cpup(reg); 533 534 for (i = 0; i < ARRAY_SIZE(ast_kcs_bmc_ioregs); i++) { 535 if (!memcmp(&ast_kcs_bmc_ioregs[i], &ioreg, sizeof(ioreg))) 536 return i + 1; 537 } 538 539 return -EINVAL; 540 } 541 542 static int 543 aspeed_kcs_of_v2_get_io_address(struct platform_device *pdev, u32 addrs[2]) 544 { 545 int rc; 546 547 rc = of_property_read_variable_u32_array(pdev->dev.of_node, 548 "aspeed,lpc-io-reg", 549 addrs, 1, 2); 550 if (rc < 0) { 551 dev_err(&pdev->dev, "No valid 'aspeed,lpc-io-reg' configured\n"); 552 return rc; 553 } 554 555 if (addrs[0] > 0xffff) { 556 dev_err(&pdev->dev, "Invalid data address in 'aspeed,lpc-io-reg'\n"); 557 return -EINVAL; 558 } 559 560 if (rc == 2 && addrs[1] > 0xffff) { 561 dev_err(&pdev->dev, "Invalid status address in 'aspeed,lpc-io-reg'\n"); 562 return -EINVAL; 563 } 564 565 return rc; 566 } 567 568 static int aspeed_kcs_probe(struct platform_device *pdev) 569 { 570 const struct aspeed_kcs_of_ops *ops; 571 struct kcs_bmc_device *kcs_bmc; 572 struct aspeed_kcs_bmc *priv; 573 struct device_node *np; 574 bool have_upstream_irq; 575 u32 upstream_irq[2]; 576 int rc, channel; 577 int nr_addrs; 578 u32 addrs[2]; 579 580 np = pdev->dev.of_node->parent; 581 if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") && 582 !of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") && 583 !of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) { 584 dev_err(&pdev->dev, "unsupported LPC device binding\n"); 585 return -ENODEV; 586 } 587 588 ops = of_device_get_match_data(&pdev->dev); 589 if (!ops) 590 return -EINVAL; 591 592 channel = ops->get_channel(pdev); 593 if (channel < 0) 594 return channel; 595 596 nr_addrs = ops->get_io_address(pdev, addrs); 597 if (nr_addrs < 0) 598 return nr_addrs; 599 600 np = pdev->dev.of_node; 601 rc = of_property_read_u32_array(np, "aspeed,lpc-interrupts", upstream_irq, 2); 602 if (rc && rc != -EINVAL) 603 return -EINVAL; 604 605 have_upstream_irq = !rc; 606 607 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 608 if (!priv) 609 return -ENOMEM; 610 611 kcs_bmc = &priv->kcs_bmc; 612 kcs_bmc->dev = &pdev->dev; 613 kcs_bmc->channel = channel; 614 kcs_bmc->ioreg = ast_kcs_bmc_ioregs[channel - 1]; 615 kcs_bmc->ops = &aspeed_kcs_ops; 616 617 priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node); 618 if (IS_ERR(priv->map)) { 619 dev_err(&pdev->dev, "Couldn't get regmap\n"); 620 return -ENODEV; 621 } 622 623 spin_lock_init(&priv->obe.lock); 624 priv->obe.remove = false; 625 timer_setup(&priv->obe.timer, aspeed_kcs_check_obe, 0); 626 627 rc = aspeed_kcs_set_address(kcs_bmc, addrs, nr_addrs); 628 if (rc) 629 return rc; 630 631 /* Host to BMC IRQ */ 632 rc = aspeed_kcs_config_downstream_irq(kcs_bmc, pdev); 633 if (rc) 634 return rc; 635 636 /* BMC to Host IRQ */ 637 if (have_upstream_irq) { 638 rc = aspeed_kcs_config_upstream_irq(priv, upstream_irq[0], upstream_irq[1]); 639 if (rc < 0) 640 return rc; 641 } else { 642 priv->upstream_irq.mode = aspeed_kcs_irq_none; 643 } 644 645 platform_set_drvdata(pdev, priv); 646 647 aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0); 648 aspeed_kcs_enable_channel(kcs_bmc, true); 649 650 rc = kcs_bmc_add_device(&priv->kcs_bmc); 651 if (rc) { 652 dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc); 653 return rc; 654 } 655 656 dev_info(&pdev->dev, "Initialised channel %d at 0x%x\n", 657 kcs_bmc->channel, addrs[0]); 658 659 return 0; 660 } 661 662 static int aspeed_kcs_remove(struct platform_device *pdev) 663 { 664 struct aspeed_kcs_bmc *priv = platform_get_drvdata(pdev); 665 struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc; 666 667 kcs_bmc_remove_device(kcs_bmc); 668 669 aspeed_kcs_enable_channel(kcs_bmc, false); 670 aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0); 671 672 /* Make sure it's proper dead */ 673 spin_lock_irq(&priv->obe.lock); 674 priv->obe.remove = true; 675 spin_unlock_irq(&priv->obe.lock); 676 del_timer_sync(&priv->obe.timer); 677 678 return 0; 679 } 680 681 static const struct aspeed_kcs_of_ops of_v1_ops = { 682 .get_channel = aspeed_kcs_of_v1_get_channel, 683 .get_io_address = aspeed_kcs_of_v1_get_io_address, 684 }; 685 686 static const struct aspeed_kcs_of_ops of_v2_ops = { 687 .get_channel = aspeed_kcs_of_v2_get_channel, 688 .get_io_address = aspeed_kcs_of_v2_get_io_address, 689 }; 690 691 static const struct of_device_id ast_kcs_bmc_match[] = { 692 { .compatible = "aspeed,ast2400-kcs-bmc", .data = &of_v1_ops }, 693 { .compatible = "aspeed,ast2500-kcs-bmc", .data = &of_v1_ops }, 694 { .compatible = "aspeed,ast2400-kcs-bmc-v2", .data = &of_v2_ops }, 695 { .compatible = "aspeed,ast2500-kcs-bmc-v2", .data = &of_v2_ops }, 696 { } 697 }; 698 MODULE_DEVICE_TABLE(of, ast_kcs_bmc_match); 699 700 static struct platform_driver ast_kcs_bmc_driver = { 701 .driver = { 702 .name = DEVICE_NAME, 703 .of_match_table = ast_kcs_bmc_match, 704 }, 705 .probe = aspeed_kcs_probe, 706 .remove = aspeed_kcs_remove, 707 }; 708 module_platform_driver(ast_kcs_bmc_driver); 709 710 MODULE_LICENSE("GPL v2"); 711 MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>"); 712 MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>"); 713 MODULE_DESCRIPTION("Aspeed device interface to the KCS BMC device"); 714