1 /* 2 * System Control and Power Interface (SCPI) Message Protocol driver 3 * 4 * SCPI Message Protocol is used between the System Control Processor(SCP) 5 * and the Application Processors(AP). The Message Handling Unit(MHU) 6 * provides a mechanism for inter-processor communication between SCP's 7 * Cortex M3 and AP. 8 * 9 * SCP offers control and management of the core/cluster power states, 10 * various power domain DVFS including the core/cluster, certain system 11 * clocks configuration, thermal sensors and many others. 12 * 13 * Copyright (C) 2015 ARM Ltd. 14 * 15 * This program is free software; you can redistribute it and/or modify it 16 * under the terms and conditions of the GNU General Public License, 17 * version 2, as published by the Free Software Foundation. 18 * 19 * This program is distributed in the hope it will be useful, but WITHOUT 20 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 21 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 22 * more details. 23 * 24 * You should have received a copy of the GNU General Public License along 25 * with this program. If not, see <http://www.gnu.org/licenses/>. 26 */ 27 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30 #include <linux/bitmap.h> 31 #include <linux/device.h> 32 #include <linux/err.h> 33 #include <linux/export.h> 34 #include <linux/io.h> 35 #include <linux/kernel.h> 36 #include <linux/list.h> 37 #include <linux/mailbox_client.h> 38 #include <linux/module.h> 39 #include <linux/of_address.h> 40 #include <linux/of_platform.h> 41 #include <linux/printk.h> 42 #include <linux/scpi_protocol.h> 43 #include <linux/slab.h> 44 #include <linux/sort.h> 45 #include <linux/spinlock.h> 46 47 #define CMD_ID_SHIFT 0 48 #define CMD_ID_MASK 0x7f 49 #define CMD_TOKEN_ID_SHIFT 8 50 #define CMD_TOKEN_ID_MASK 0xff 51 #define CMD_DATA_SIZE_SHIFT 16 52 #define CMD_DATA_SIZE_MASK 0x1ff 53 #define PACK_SCPI_CMD(cmd_id, tx_sz) \ 54 ((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \ 55 (((tx_sz) & CMD_DATA_SIZE_MASK) << CMD_DATA_SIZE_SHIFT)) 56 #define ADD_SCPI_TOKEN(cmd, token) \ 57 ((cmd) |= (((token) & CMD_TOKEN_ID_MASK) << CMD_TOKEN_ID_SHIFT)) 58 59 #define CMD_SIZE(cmd) (((cmd) >> CMD_DATA_SIZE_SHIFT) & CMD_DATA_SIZE_MASK) 60 #define CMD_UNIQ_MASK (CMD_TOKEN_ID_MASK << CMD_TOKEN_ID_SHIFT | CMD_ID_MASK) 61 #define CMD_XTRACT_UNIQ(cmd) ((cmd) & CMD_UNIQ_MASK) 62 63 #define SCPI_SLOT 0 64 65 #define MAX_DVFS_DOMAINS 8 66 #define MAX_DVFS_OPPS 8 67 #define DVFS_LATENCY(hdr) (le32_to_cpu(hdr) >> 16) 68 #define DVFS_OPP_COUNT(hdr) ((le32_to_cpu(hdr) >> 8) & 0xff) 69 70 #define PROTOCOL_REV_MINOR_BITS 16 71 #define PROTOCOL_REV_MINOR_MASK ((1U << PROTOCOL_REV_MINOR_BITS) - 1) 72 #define PROTOCOL_REV_MAJOR(x) ((x) >> PROTOCOL_REV_MINOR_BITS) 73 #define PROTOCOL_REV_MINOR(x) ((x) & PROTOCOL_REV_MINOR_MASK) 74 75 #define FW_REV_MAJOR_BITS 24 76 #define FW_REV_MINOR_BITS 16 77 #define FW_REV_PATCH_MASK ((1U << FW_REV_MINOR_BITS) - 1) 78 #define FW_REV_MINOR_MASK ((1U << FW_REV_MAJOR_BITS) - 1) 79 #define FW_REV_MAJOR(x) ((x) >> FW_REV_MAJOR_BITS) 80 #define FW_REV_MINOR(x) (((x) & FW_REV_MINOR_MASK) >> FW_REV_MINOR_BITS) 81 #define FW_REV_PATCH(x) ((x) & FW_REV_PATCH_MASK) 82 83 #define MAX_RX_TIMEOUT (msecs_to_jiffies(20)) 84 85 enum scpi_error_codes { 86 SCPI_SUCCESS = 0, /* Success */ 87 SCPI_ERR_PARAM = 1, /* Invalid parameter(s) */ 88 SCPI_ERR_ALIGN = 2, /* Invalid alignment */ 89 SCPI_ERR_SIZE = 3, /* Invalid size */ 90 SCPI_ERR_HANDLER = 4, /* Invalid handler/callback */ 91 SCPI_ERR_ACCESS = 5, /* Invalid access/permission denied */ 92 SCPI_ERR_RANGE = 6, /* Value out of range */ 93 SCPI_ERR_TIMEOUT = 7, /* Timeout has occurred */ 94 SCPI_ERR_NOMEM = 8, /* Invalid memory area or pointer */ 95 SCPI_ERR_PWRSTATE = 9, /* Invalid power state */ 96 SCPI_ERR_SUPPORT = 10, /* Not supported or disabled */ 97 SCPI_ERR_DEVICE = 11, /* Device error */ 98 SCPI_ERR_BUSY = 12, /* Device busy */ 99 SCPI_ERR_MAX 100 }; 101 102 enum scpi_std_cmd { 103 SCPI_CMD_INVALID = 0x00, 104 SCPI_CMD_SCPI_READY = 0x01, 105 SCPI_CMD_SCPI_CAPABILITIES = 0x02, 106 SCPI_CMD_SET_CSS_PWR_STATE = 0x03, 107 SCPI_CMD_GET_CSS_PWR_STATE = 0x04, 108 SCPI_CMD_SET_SYS_PWR_STATE = 0x05, 109 SCPI_CMD_SET_CPU_TIMER = 0x06, 110 SCPI_CMD_CANCEL_CPU_TIMER = 0x07, 111 SCPI_CMD_DVFS_CAPABILITIES = 0x08, 112 SCPI_CMD_GET_DVFS_INFO = 0x09, 113 SCPI_CMD_SET_DVFS = 0x0a, 114 SCPI_CMD_GET_DVFS = 0x0b, 115 SCPI_CMD_GET_DVFS_STAT = 0x0c, 116 SCPI_CMD_CLOCK_CAPABILITIES = 0x0d, 117 SCPI_CMD_GET_CLOCK_INFO = 0x0e, 118 SCPI_CMD_SET_CLOCK_VALUE = 0x0f, 119 SCPI_CMD_GET_CLOCK_VALUE = 0x10, 120 SCPI_CMD_PSU_CAPABILITIES = 0x11, 121 SCPI_CMD_GET_PSU_INFO = 0x12, 122 SCPI_CMD_SET_PSU = 0x13, 123 SCPI_CMD_GET_PSU = 0x14, 124 SCPI_CMD_SENSOR_CAPABILITIES = 0x15, 125 SCPI_CMD_SENSOR_INFO = 0x16, 126 SCPI_CMD_SENSOR_VALUE = 0x17, 127 SCPI_CMD_SENSOR_CFG_PERIODIC = 0x18, 128 SCPI_CMD_SENSOR_CFG_BOUNDS = 0x19, 129 SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1a, 130 SCPI_CMD_SET_DEVICE_PWR_STATE = 0x1b, 131 SCPI_CMD_GET_DEVICE_PWR_STATE = 0x1c, 132 SCPI_CMD_COUNT 133 }; 134 135 struct scpi_xfer { 136 u32 slot; /* has to be first element */ 137 u32 cmd; 138 u32 status; 139 const void *tx_buf; 140 void *rx_buf; 141 unsigned int tx_len; 142 unsigned int rx_len; 143 struct list_head node; 144 struct completion done; 145 }; 146 147 struct scpi_chan { 148 struct mbox_client cl; 149 struct mbox_chan *chan; 150 void __iomem *tx_payload; 151 void __iomem *rx_payload; 152 struct list_head rx_pending; 153 struct list_head xfers_list; 154 struct scpi_xfer *xfers; 155 spinlock_t rx_lock; /* locking for the rx pending list */ 156 struct mutex xfers_lock; 157 u8 token; 158 }; 159 160 struct scpi_drvinfo { 161 u32 protocol_version; 162 u32 firmware_version; 163 int num_chans; 164 atomic_t next_chan; 165 struct scpi_ops *scpi_ops; 166 struct scpi_chan *channels; 167 struct scpi_dvfs_info *dvfs[MAX_DVFS_DOMAINS]; 168 }; 169 170 /* 171 * The SCP firmware only executes in little-endian mode, so any buffers 172 * shared through SCPI should have their contents converted to little-endian 173 */ 174 struct scpi_shared_mem { 175 __le32 command; 176 __le32 status; 177 u8 payload[0]; 178 } __packed; 179 180 struct scp_capabilities { 181 __le32 protocol_version; 182 __le32 event_version; 183 __le32 platform_version; 184 __le32 commands[4]; 185 } __packed; 186 187 struct clk_get_info { 188 __le16 id; 189 __le16 flags; 190 __le32 min_rate; 191 __le32 max_rate; 192 u8 name[20]; 193 } __packed; 194 195 struct clk_get_value { 196 __le32 rate; 197 } __packed; 198 199 struct clk_set_value { 200 __le16 id; 201 __le16 reserved; 202 __le32 rate; 203 } __packed; 204 205 struct dvfs_info { 206 __le32 header; 207 struct { 208 __le32 freq; 209 __le32 m_volt; 210 } opps[MAX_DVFS_OPPS]; 211 } __packed; 212 213 struct dvfs_get { 214 u8 index; 215 } __packed; 216 217 struct dvfs_set { 218 u8 domain; 219 u8 index; 220 } __packed; 221 222 struct sensor_capabilities { 223 __le16 sensors; 224 } __packed; 225 226 struct _scpi_sensor_info { 227 __le16 sensor_id; 228 u8 class; 229 u8 trigger_type; 230 char name[20]; 231 }; 232 233 struct sensor_value { 234 __le32 val; 235 } __packed; 236 237 static struct scpi_drvinfo *scpi_info; 238 239 static int scpi_linux_errmap[SCPI_ERR_MAX] = { 240 /* better than switch case as long as return value is continuous */ 241 0, /* SCPI_SUCCESS */ 242 -EINVAL, /* SCPI_ERR_PARAM */ 243 -ENOEXEC, /* SCPI_ERR_ALIGN */ 244 -EMSGSIZE, /* SCPI_ERR_SIZE */ 245 -EINVAL, /* SCPI_ERR_HANDLER */ 246 -EACCES, /* SCPI_ERR_ACCESS */ 247 -ERANGE, /* SCPI_ERR_RANGE */ 248 -ETIMEDOUT, /* SCPI_ERR_TIMEOUT */ 249 -ENOMEM, /* SCPI_ERR_NOMEM */ 250 -EINVAL, /* SCPI_ERR_PWRSTATE */ 251 -EOPNOTSUPP, /* SCPI_ERR_SUPPORT */ 252 -EIO, /* SCPI_ERR_DEVICE */ 253 -EBUSY, /* SCPI_ERR_BUSY */ 254 }; 255 256 static inline int scpi_to_linux_errno(int errno) 257 { 258 if (errno >= SCPI_SUCCESS && errno < SCPI_ERR_MAX) 259 return scpi_linux_errmap[errno]; 260 return -EIO; 261 } 262 263 static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd) 264 { 265 unsigned long flags; 266 struct scpi_xfer *t, *match = NULL; 267 268 spin_lock_irqsave(&ch->rx_lock, flags); 269 if (list_empty(&ch->rx_pending)) { 270 spin_unlock_irqrestore(&ch->rx_lock, flags); 271 return; 272 } 273 274 list_for_each_entry(t, &ch->rx_pending, node) 275 if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) { 276 list_del(&t->node); 277 match = t; 278 break; 279 } 280 /* check if wait_for_completion is in progress or timed-out */ 281 if (match && !completion_done(&match->done)) { 282 struct scpi_shared_mem *mem = ch->rx_payload; 283 unsigned int len = min(match->rx_len, CMD_SIZE(cmd)); 284 285 match->status = le32_to_cpu(mem->status); 286 memcpy_fromio(match->rx_buf, mem->payload, len); 287 if (match->rx_len > len) 288 memset(match->rx_buf + len, 0, match->rx_len - len); 289 complete(&match->done); 290 } 291 spin_unlock_irqrestore(&ch->rx_lock, flags); 292 } 293 294 static void scpi_handle_remote_msg(struct mbox_client *c, void *msg) 295 { 296 struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); 297 struct scpi_shared_mem *mem = ch->rx_payload; 298 u32 cmd = le32_to_cpu(mem->command); 299 300 scpi_process_cmd(ch, cmd); 301 } 302 303 static void scpi_tx_prepare(struct mbox_client *c, void *msg) 304 { 305 unsigned long flags; 306 struct scpi_xfer *t = msg; 307 struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); 308 struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload; 309 310 if (t->tx_buf) 311 memcpy_toio(mem->payload, t->tx_buf, t->tx_len); 312 if (t->rx_buf) { 313 if (!(++ch->token)) 314 ++ch->token; 315 ADD_SCPI_TOKEN(t->cmd, ch->token); 316 spin_lock_irqsave(&ch->rx_lock, flags); 317 list_add_tail(&t->node, &ch->rx_pending); 318 spin_unlock_irqrestore(&ch->rx_lock, flags); 319 } 320 mem->command = cpu_to_le32(t->cmd); 321 } 322 323 static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch) 324 { 325 struct scpi_xfer *t; 326 327 mutex_lock(&ch->xfers_lock); 328 if (list_empty(&ch->xfers_list)) { 329 mutex_unlock(&ch->xfers_lock); 330 return NULL; 331 } 332 t = list_first_entry(&ch->xfers_list, struct scpi_xfer, node); 333 list_del(&t->node); 334 mutex_unlock(&ch->xfers_lock); 335 return t; 336 } 337 338 static void put_scpi_xfer(struct scpi_xfer *t, struct scpi_chan *ch) 339 { 340 mutex_lock(&ch->xfers_lock); 341 list_add_tail(&t->node, &ch->xfers_list); 342 mutex_unlock(&ch->xfers_lock); 343 } 344 345 static int scpi_send_message(u8 cmd, void *tx_buf, unsigned int tx_len, 346 void *rx_buf, unsigned int rx_len) 347 { 348 int ret; 349 u8 chan; 350 struct scpi_xfer *msg; 351 struct scpi_chan *scpi_chan; 352 353 chan = atomic_inc_return(&scpi_info->next_chan) % scpi_info->num_chans; 354 scpi_chan = scpi_info->channels + chan; 355 356 msg = get_scpi_xfer(scpi_chan); 357 if (!msg) 358 return -ENOMEM; 359 360 msg->slot = BIT(SCPI_SLOT); 361 msg->cmd = PACK_SCPI_CMD(cmd, tx_len); 362 msg->tx_buf = tx_buf; 363 msg->tx_len = tx_len; 364 msg->rx_buf = rx_buf; 365 msg->rx_len = rx_len; 366 init_completion(&msg->done); 367 368 ret = mbox_send_message(scpi_chan->chan, msg); 369 if (ret < 0 || !rx_buf) 370 goto out; 371 372 if (!wait_for_completion_timeout(&msg->done, MAX_RX_TIMEOUT)) 373 ret = -ETIMEDOUT; 374 else 375 /* first status word */ 376 ret = le32_to_cpu(msg->status); 377 out: 378 if (ret < 0 && rx_buf) /* remove entry from the list if timed-out */ 379 scpi_process_cmd(scpi_chan, msg->cmd); 380 381 put_scpi_xfer(msg, scpi_chan); 382 /* SCPI error codes > 0, translate them to Linux scale*/ 383 return ret > 0 ? scpi_to_linux_errno(ret) : ret; 384 } 385 386 static u32 scpi_get_version(void) 387 { 388 return scpi_info->protocol_version; 389 } 390 391 static int 392 scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max) 393 { 394 int ret; 395 struct clk_get_info clk; 396 __le16 le_clk_id = cpu_to_le16(clk_id); 397 398 ret = scpi_send_message(SCPI_CMD_GET_CLOCK_INFO, &le_clk_id, 399 sizeof(le_clk_id), &clk, sizeof(clk)); 400 if (!ret) { 401 *min = le32_to_cpu(clk.min_rate); 402 *max = le32_to_cpu(clk.max_rate); 403 } 404 return ret; 405 } 406 407 static unsigned long scpi_clk_get_val(u16 clk_id) 408 { 409 int ret; 410 struct clk_get_value clk; 411 __le16 le_clk_id = cpu_to_le16(clk_id); 412 413 ret = scpi_send_message(SCPI_CMD_GET_CLOCK_VALUE, &le_clk_id, 414 sizeof(le_clk_id), &clk, sizeof(clk)); 415 return ret ? ret : le32_to_cpu(clk.rate); 416 } 417 418 static int scpi_clk_set_val(u16 clk_id, unsigned long rate) 419 { 420 int stat; 421 struct clk_set_value clk = { 422 .id = cpu_to_le16(clk_id), 423 .rate = cpu_to_le32(rate) 424 }; 425 426 return scpi_send_message(SCPI_CMD_SET_CLOCK_VALUE, &clk, sizeof(clk), 427 &stat, sizeof(stat)); 428 } 429 430 static int scpi_dvfs_get_idx(u8 domain) 431 { 432 int ret; 433 struct dvfs_get dvfs; 434 435 ret = scpi_send_message(SCPI_CMD_GET_DVFS, &domain, sizeof(domain), 436 &dvfs, sizeof(dvfs)); 437 return ret ? ret : dvfs.index; 438 } 439 440 static int scpi_dvfs_set_idx(u8 domain, u8 index) 441 { 442 int stat; 443 struct dvfs_set dvfs = {domain, index}; 444 445 return scpi_send_message(SCPI_CMD_SET_DVFS, &dvfs, sizeof(dvfs), 446 &stat, sizeof(stat)); 447 } 448 449 static int opp_cmp_func(const void *opp1, const void *opp2) 450 { 451 const struct scpi_opp *t1 = opp1, *t2 = opp2; 452 453 return t1->freq - t2->freq; 454 } 455 456 static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain) 457 { 458 struct scpi_dvfs_info *info; 459 struct scpi_opp *opp; 460 struct dvfs_info buf; 461 int ret, i; 462 463 if (domain >= MAX_DVFS_DOMAINS) 464 return ERR_PTR(-EINVAL); 465 466 if (scpi_info->dvfs[domain]) /* data already populated */ 467 return scpi_info->dvfs[domain]; 468 469 ret = scpi_send_message(SCPI_CMD_GET_DVFS_INFO, &domain, sizeof(domain), 470 &buf, sizeof(buf)); 471 472 if (ret) 473 return ERR_PTR(ret); 474 475 info = kmalloc(sizeof(*info), GFP_KERNEL); 476 if (!info) 477 return ERR_PTR(-ENOMEM); 478 479 info->count = DVFS_OPP_COUNT(buf.header); 480 info->latency = DVFS_LATENCY(buf.header) * 1000; /* uS to nS */ 481 482 info->opps = kcalloc(info->count, sizeof(*opp), GFP_KERNEL); 483 if (!info->opps) { 484 kfree(info); 485 return ERR_PTR(-ENOMEM); 486 } 487 488 for (i = 0, opp = info->opps; i < info->count; i++, opp++) { 489 opp->freq = le32_to_cpu(buf.opps[i].freq); 490 opp->m_volt = le32_to_cpu(buf.opps[i].m_volt); 491 } 492 493 sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL); 494 495 scpi_info->dvfs[domain] = info; 496 return info; 497 } 498 499 static int scpi_sensor_get_capability(u16 *sensors) 500 { 501 struct sensor_capabilities cap_buf; 502 int ret; 503 504 ret = scpi_send_message(SCPI_CMD_SENSOR_CAPABILITIES, NULL, 0, &cap_buf, 505 sizeof(cap_buf)); 506 if (!ret) 507 *sensors = le16_to_cpu(cap_buf.sensors); 508 509 return ret; 510 } 511 512 static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info) 513 { 514 __le16 id = cpu_to_le16(sensor_id); 515 struct _scpi_sensor_info _info; 516 int ret; 517 518 ret = scpi_send_message(SCPI_CMD_SENSOR_INFO, &id, sizeof(id), 519 &_info, sizeof(_info)); 520 if (!ret) { 521 memcpy(info, &_info, sizeof(*info)); 522 info->sensor_id = le16_to_cpu(_info.sensor_id); 523 } 524 525 return ret; 526 } 527 528 int scpi_sensor_get_value(u16 sensor, u32 *val) 529 { 530 struct sensor_value buf; 531 int ret; 532 533 ret = scpi_send_message(SCPI_CMD_SENSOR_VALUE, &sensor, sizeof(sensor), 534 &buf, sizeof(buf)); 535 if (!ret) 536 *val = le32_to_cpu(buf.val); 537 538 return ret; 539 } 540 541 static struct scpi_ops scpi_ops = { 542 .get_version = scpi_get_version, 543 .clk_get_range = scpi_clk_get_range, 544 .clk_get_val = scpi_clk_get_val, 545 .clk_set_val = scpi_clk_set_val, 546 .dvfs_get_idx = scpi_dvfs_get_idx, 547 .dvfs_set_idx = scpi_dvfs_set_idx, 548 .dvfs_get_info = scpi_dvfs_get_info, 549 .sensor_get_capability = scpi_sensor_get_capability, 550 .sensor_get_info = scpi_sensor_get_info, 551 .sensor_get_value = scpi_sensor_get_value, 552 }; 553 554 struct scpi_ops *get_scpi_ops(void) 555 { 556 return scpi_info ? scpi_info->scpi_ops : NULL; 557 } 558 EXPORT_SYMBOL_GPL(get_scpi_ops); 559 560 static int scpi_init_versions(struct scpi_drvinfo *info) 561 { 562 int ret; 563 struct scp_capabilities caps; 564 565 ret = scpi_send_message(SCPI_CMD_SCPI_CAPABILITIES, NULL, 0, 566 &caps, sizeof(caps)); 567 if (!ret) { 568 info->protocol_version = le32_to_cpu(caps.protocol_version); 569 info->firmware_version = le32_to_cpu(caps.platform_version); 570 } 571 return ret; 572 } 573 574 static ssize_t protocol_version_show(struct device *dev, 575 struct device_attribute *attr, char *buf) 576 { 577 struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev); 578 579 return sprintf(buf, "%d.%d\n", 580 PROTOCOL_REV_MAJOR(scpi_info->protocol_version), 581 PROTOCOL_REV_MINOR(scpi_info->protocol_version)); 582 } 583 static DEVICE_ATTR_RO(protocol_version); 584 585 static ssize_t firmware_version_show(struct device *dev, 586 struct device_attribute *attr, char *buf) 587 { 588 struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev); 589 590 return sprintf(buf, "%d.%d.%d\n", 591 FW_REV_MAJOR(scpi_info->firmware_version), 592 FW_REV_MINOR(scpi_info->firmware_version), 593 FW_REV_PATCH(scpi_info->firmware_version)); 594 } 595 static DEVICE_ATTR_RO(firmware_version); 596 597 static struct attribute *versions_attrs[] = { 598 &dev_attr_firmware_version.attr, 599 &dev_attr_protocol_version.attr, 600 NULL, 601 }; 602 ATTRIBUTE_GROUPS(versions); 603 604 static void 605 scpi_free_channels(struct device *dev, struct scpi_chan *pchan, int count) 606 { 607 int i; 608 609 for (i = 0; i < count && pchan->chan; i++, pchan++) { 610 mbox_free_channel(pchan->chan); 611 devm_kfree(dev, pchan->xfers); 612 devm_iounmap(dev, pchan->rx_payload); 613 } 614 } 615 616 static int scpi_remove(struct platform_device *pdev) 617 { 618 int i; 619 struct device *dev = &pdev->dev; 620 struct scpi_drvinfo *info = platform_get_drvdata(pdev); 621 622 scpi_info = NULL; /* stop exporting SCPI ops through get_scpi_ops */ 623 624 of_platform_depopulate(dev); 625 sysfs_remove_groups(&dev->kobj, versions_groups); 626 scpi_free_channels(dev, info->channels, info->num_chans); 627 platform_set_drvdata(pdev, NULL); 628 629 for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) { 630 kfree(info->dvfs[i]->opps); 631 kfree(info->dvfs[i]); 632 } 633 devm_kfree(dev, info->channels); 634 devm_kfree(dev, info); 635 636 return 0; 637 } 638 639 #define MAX_SCPI_XFERS 10 640 static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch) 641 { 642 int i; 643 struct scpi_xfer *xfers; 644 645 xfers = devm_kzalloc(dev, MAX_SCPI_XFERS * sizeof(*xfers), GFP_KERNEL); 646 if (!xfers) 647 return -ENOMEM; 648 649 ch->xfers = xfers; 650 for (i = 0; i < MAX_SCPI_XFERS; i++, xfers++) 651 list_add_tail(&xfers->node, &ch->xfers_list); 652 return 0; 653 } 654 655 static int scpi_probe(struct platform_device *pdev) 656 { 657 int count, idx, ret; 658 struct resource res; 659 struct scpi_chan *scpi_chan; 660 struct device *dev = &pdev->dev; 661 struct device_node *np = dev->of_node; 662 663 scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL); 664 if (!scpi_info) 665 return -ENOMEM; 666 667 count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); 668 if (count < 0) { 669 dev_err(dev, "no mboxes property in '%s'\n", np->full_name); 670 return -ENODEV; 671 } 672 673 scpi_chan = devm_kcalloc(dev, count, sizeof(*scpi_chan), GFP_KERNEL); 674 if (!scpi_chan) 675 return -ENOMEM; 676 677 for (idx = 0; idx < count; idx++) { 678 resource_size_t size; 679 struct scpi_chan *pchan = scpi_chan + idx; 680 struct mbox_client *cl = &pchan->cl; 681 struct device_node *shmem = of_parse_phandle(np, "shmem", idx); 682 683 if (of_address_to_resource(shmem, 0, &res)) { 684 dev_err(dev, "failed to get SCPI payload mem resource\n"); 685 ret = -EINVAL; 686 goto err; 687 } 688 689 size = resource_size(&res); 690 pchan->rx_payload = devm_ioremap(dev, res.start, size); 691 if (!pchan->rx_payload) { 692 dev_err(dev, "failed to ioremap SCPI payload\n"); 693 ret = -EADDRNOTAVAIL; 694 goto err; 695 } 696 pchan->tx_payload = pchan->rx_payload + (size >> 1); 697 698 cl->dev = dev; 699 cl->rx_callback = scpi_handle_remote_msg; 700 cl->tx_prepare = scpi_tx_prepare; 701 cl->tx_block = true; 702 cl->tx_tout = 50; 703 cl->knows_txdone = false; /* controller can't ack */ 704 705 INIT_LIST_HEAD(&pchan->rx_pending); 706 INIT_LIST_HEAD(&pchan->xfers_list); 707 spin_lock_init(&pchan->rx_lock); 708 mutex_init(&pchan->xfers_lock); 709 710 ret = scpi_alloc_xfer_list(dev, pchan); 711 if (!ret) { 712 pchan->chan = mbox_request_channel(cl, idx); 713 if (!IS_ERR(pchan->chan)) 714 continue; 715 ret = PTR_ERR(pchan->chan); 716 if (ret != -EPROBE_DEFER) 717 dev_err(dev, "failed to get channel%d err %d\n", 718 idx, ret); 719 } 720 err: 721 scpi_free_channels(dev, scpi_chan, idx); 722 scpi_info = NULL; 723 return ret; 724 } 725 726 scpi_info->channels = scpi_chan; 727 scpi_info->num_chans = count; 728 platform_set_drvdata(pdev, scpi_info); 729 730 ret = scpi_init_versions(scpi_info); 731 if (ret) { 732 dev_err(dev, "incorrect or no SCP firmware found\n"); 733 scpi_remove(pdev); 734 return ret; 735 } 736 737 _dev_info(dev, "SCP Protocol %d.%d Firmware %d.%d.%d version\n", 738 PROTOCOL_REV_MAJOR(scpi_info->protocol_version), 739 PROTOCOL_REV_MINOR(scpi_info->protocol_version), 740 FW_REV_MAJOR(scpi_info->firmware_version), 741 FW_REV_MINOR(scpi_info->firmware_version), 742 FW_REV_PATCH(scpi_info->firmware_version)); 743 scpi_info->scpi_ops = &scpi_ops; 744 745 ret = sysfs_create_groups(&dev->kobj, versions_groups); 746 if (ret) 747 dev_err(dev, "unable to create sysfs version group\n"); 748 749 return of_platform_populate(dev->of_node, NULL, NULL, dev); 750 } 751 752 static const struct of_device_id scpi_of_match[] = { 753 {.compatible = "arm,scpi"}, 754 {}, 755 }; 756 757 MODULE_DEVICE_TABLE(of, scpi_of_match); 758 759 static struct platform_driver scpi_driver = { 760 .driver = { 761 .name = "scpi_protocol", 762 .of_match_table = scpi_of_match, 763 }, 764 .probe = scpi_probe, 765 .remove = scpi_remove, 766 }; 767 module_platform_driver(scpi_driver); 768 769 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); 770 MODULE_DESCRIPTION("ARM SCPI mailbox protocol driver"); 771 MODULE_LICENSE("GPL v2"); 772