1 /* 2 * System Control and Power Interface (SCPI) Message Protocol driver 3 * 4 * SCPI Message Protocol is used between the System Control Processor(SCP) 5 * and the Application Processors(AP). The Message Handling Unit(MHU) 6 * provides a mechanism for inter-processor communication between SCP's 7 * Cortex M3 and AP. 8 * 9 * SCP offers control and management of the core/cluster power states, 10 * various power domain DVFS including the core/cluster, certain system 11 * clocks configuration, thermal sensors and many others. 12 * 13 * Copyright (C) 2015 ARM Ltd. 14 * 15 * This program is free software; you can redistribute it and/or modify it 16 * under the terms and conditions of the GNU General Public License, 17 * version 2, as published by the Free Software Foundation. 18 * 19 * This program is distributed in the hope it will be useful, but WITHOUT 20 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 21 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 22 * more details. 23 * 24 * You should have received a copy of the GNU General Public License along 25 * with this program. If not, see <http://www.gnu.org/licenses/>. 26 */ 27 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30 #include <linux/bitmap.h> 31 #include <linux/device.h> 32 #include <linux/err.h> 33 #include <linux/export.h> 34 #include <linux/io.h> 35 #include <linux/kernel.h> 36 #include <linux/list.h> 37 #include <linux/mailbox_client.h> 38 #include <linux/module.h> 39 #include <linux/of_address.h> 40 #include <linux/of_platform.h> 41 #include <linux/printk.h> 42 #include <linux/scpi_protocol.h> 43 #include <linux/slab.h> 44 #include <linux/sort.h> 45 #include <linux/spinlock.h> 46 47 #define CMD_ID_SHIFT 0 48 #define CMD_ID_MASK 0x7f 49 #define CMD_TOKEN_ID_SHIFT 8 50 #define CMD_TOKEN_ID_MASK 0xff 51 #define CMD_DATA_SIZE_SHIFT 16 52 #define CMD_DATA_SIZE_MASK 0x1ff 53 #define PACK_SCPI_CMD(cmd_id, tx_sz) \ 54 ((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \ 55 (((tx_sz) & CMD_DATA_SIZE_MASK) << CMD_DATA_SIZE_SHIFT)) 56 #define ADD_SCPI_TOKEN(cmd, token) \ 57 ((cmd) |= (((token) & CMD_TOKEN_ID_MASK) << CMD_TOKEN_ID_SHIFT)) 58 59 #define CMD_SIZE(cmd) (((cmd) >> CMD_DATA_SIZE_SHIFT) & CMD_DATA_SIZE_MASK) 60 #define CMD_UNIQ_MASK (CMD_TOKEN_ID_MASK << CMD_TOKEN_ID_SHIFT | CMD_ID_MASK) 61 #define CMD_XTRACT_UNIQ(cmd) ((cmd) & CMD_UNIQ_MASK) 62 63 #define SCPI_SLOT 0 64 65 #define MAX_DVFS_DOMAINS 8 66 #define MAX_DVFS_OPPS 8 67 #define DVFS_LATENCY(hdr) (le32_to_cpu(hdr) >> 16) 68 #define DVFS_OPP_COUNT(hdr) ((le32_to_cpu(hdr) >> 8) & 0xff) 69 70 #define PROTOCOL_REV_MINOR_BITS 16 71 #define PROTOCOL_REV_MINOR_MASK ((1U << PROTOCOL_REV_MINOR_BITS) - 1) 72 #define PROTOCOL_REV_MAJOR(x) ((x) >> PROTOCOL_REV_MINOR_BITS) 73 #define PROTOCOL_REV_MINOR(x) ((x) & PROTOCOL_REV_MINOR_MASK) 74 75 #define FW_REV_MAJOR_BITS 24 76 #define FW_REV_MINOR_BITS 16 77 #define FW_REV_PATCH_MASK ((1U << FW_REV_MINOR_BITS) - 1) 78 #define FW_REV_MINOR_MASK ((1U << FW_REV_MAJOR_BITS) - 1) 79 #define FW_REV_MAJOR(x) ((x) >> FW_REV_MAJOR_BITS) 80 #define FW_REV_MINOR(x) (((x) & FW_REV_MINOR_MASK) >> FW_REV_MINOR_BITS) 81 #define FW_REV_PATCH(x) ((x) & FW_REV_PATCH_MASK) 82 83 #define MAX_RX_TIMEOUT (msecs_to_jiffies(30)) 84 85 enum scpi_error_codes { 86 SCPI_SUCCESS = 0, /* Success */ 87 SCPI_ERR_PARAM = 1, /* Invalid parameter(s) */ 88 SCPI_ERR_ALIGN = 2, /* Invalid alignment */ 89 SCPI_ERR_SIZE = 3, /* Invalid size */ 90 SCPI_ERR_HANDLER = 4, /* Invalid handler/callback */ 91 SCPI_ERR_ACCESS = 5, /* Invalid access/permission denied */ 92 SCPI_ERR_RANGE = 6, /* Value out of range */ 93 SCPI_ERR_TIMEOUT = 7, /* Timeout has occurred */ 94 SCPI_ERR_NOMEM = 8, /* Invalid memory area or pointer */ 95 SCPI_ERR_PWRSTATE = 9, /* Invalid power state */ 96 SCPI_ERR_SUPPORT = 10, /* Not supported or disabled */ 97 SCPI_ERR_DEVICE = 11, /* Device error */ 98 SCPI_ERR_BUSY = 12, /* Device busy */ 99 SCPI_ERR_MAX 100 }; 101 102 enum scpi_std_cmd { 103 SCPI_CMD_INVALID = 0x00, 104 SCPI_CMD_SCPI_READY = 0x01, 105 SCPI_CMD_SCPI_CAPABILITIES = 0x02, 106 SCPI_CMD_SET_CSS_PWR_STATE = 0x03, 107 SCPI_CMD_GET_CSS_PWR_STATE = 0x04, 108 SCPI_CMD_SET_SYS_PWR_STATE = 0x05, 109 SCPI_CMD_SET_CPU_TIMER = 0x06, 110 SCPI_CMD_CANCEL_CPU_TIMER = 0x07, 111 SCPI_CMD_DVFS_CAPABILITIES = 0x08, 112 SCPI_CMD_GET_DVFS_INFO = 0x09, 113 SCPI_CMD_SET_DVFS = 0x0a, 114 SCPI_CMD_GET_DVFS = 0x0b, 115 SCPI_CMD_GET_DVFS_STAT = 0x0c, 116 SCPI_CMD_CLOCK_CAPABILITIES = 0x0d, 117 SCPI_CMD_GET_CLOCK_INFO = 0x0e, 118 SCPI_CMD_SET_CLOCK_VALUE = 0x0f, 119 SCPI_CMD_GET_CLOCK_VALUE = 0x10, 120 SCPI_CMD_PSU_CAPABILITIES = 0x11, 121 SCPI_CMD_GET_PSU_INFO = 0x12, 122 SCPI_CMD_SET_PSU = 0x13, 123 SCPI_CMD_GET_PSU = 0x14, 124 SCPI_CMD_SENSOR_CAPABILITIES = 0x15, 125 SCPI_CMD_SENSOR_INFO = 0x16, 126 SCPI_CMD_SENSOR_VALUE = 0x17, 127 SCPI_CMD_SENSOR_CFG_PERIODIC = 0x18, 128 SCPI_CMD_SENSOR_CFG_BOUNDS = 0x19, 129 SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1a, 130 SCPI_CMD_SET_DEVICE_PWR_STATE = 0x1b, 131 SCPI_CMD_GET_DEVICE_PWR_STATE = 0x1c, 132 SCPI_CMD_COUNT 133 }; 134 135 struct scpi_xfer { 136 u32 slot; /* has to be first element */ 137 u32 cmd; 138 u32 status; 139 const void *tx_buf; 140 void *rx_buf; 141 unsigned int tx_len; 142 unsigned int rx_len; 143 struct list_head node; 144 struct completion done; 145 }; 146 147 struct scpi_chan { 148 struct mbox_client cl; 149 struct mbox_chan *chan; 150 void __iomem *tx_payload; 151 void __iomem *rx_payload; 152 struct list_head rx_pending; 153 struct list_head xfers_list; 154 struct scpi_xfer *xfers; 155 spinlock_t rx_lock; /* locking for the rx pending list */ 156 struct mutex xfers_lock; 157 u8 token; 158 }; 159 160 struct scpi_drvinfo { 161 u32 protocol_version; 162 u32 firmware_version; 163 int num_chans; 164 atomic_t next_chan; 165 struct scpi_ops *scpi_ops; 166 struct scpi_chan *channels; 167 struct scpi_dvfs_info *dvfs[MAX_DVFS_DOMAINS]; 168 }; 169 170 /* 171 * The SCP firmware only executes in little-endian mode, so any buffers 172 * shared through SCPI should have their contents converted to little-endian 173 */ 174 struct scpi_shared_mem { 175 __le32 command; 176 __le32 status; 177 u8 payload[0]; 178 } __packed; 179 180 struct scp_capabilities { 181 __le32 protocol_version; 182 __le32 event_version; 183 __le32 platform_version; 184 __le32 commands[4]; 185 } __packed; 186 187 struct clk_get_info { 188 __le16 id; 189 __le16 flags; 190 __le32 min_rate; 191 __le32 max_rate; 192 u8 name[20]; 193 } __packed; 194 195 struct clk_get_value { 196 __le32 rate; 197 } __packed; 198 199 struct clk_set_value { 200 __le16 id; 201 __le16 reserved; 202 __le32 rate; 203 } __packed; 204 205 struct dvfs_info { 206 __le32 header; 207 struct { 208 __le32 freq; 209 __le32 m_volt; 210 } opps[MAX_DVFS_OPPS]; 211 } __packed; 212 213 struct dvfs_get { 214 u8 index; 215 } __packed; 216 217 struct dvfs_set { 218 u8 domain; 219 u8 index; 220 } __packed; 221 222 struct sensor_capabilities { 223 __le16 sensors; 224 } __packed; 225 226 struct _scpi_sensor_info { 227 __le16 sensor_id; 228 u8 class; 229 u8 trigger_type; 230 char name[20]; 231 }; 232 233 struct sensor_value { 234 __le32 lo_val; 235 __le32 hi_val; 236 } __packed; 237 238 static struct scpi_drvinfo *scpi_info; 239 240 static int scpi_linux_errmap[SCPI_ERR_MAX] = { 241 /* better than switch case as long as return value is continuous */ 242 0, /* SCPI_SUCCESS */ 243 -EINVAL, /* SCPI_ERR_PARAM */ 244 -ENOEXEC, /* SCPI_ERR_ALIGN */ 245 -EMSGSIZE, /* SCPI_ERR_SIZE */ 246 -EINVAL, /* SCPI_ERR_HANDLER */ 247 -EACCES, /* SCPI_ERR_ACCESS */ 248 -ERANGE, /* SCPI_ERR_RANGE */ 249 -ETIMEDOUT, /* SCPI_ERR_TIMEOUT */ 250 -ENOMEM, /* SCPI_ERR_NOMEM */ 251 -EINVAL, /* SCPI_ERR_PWRSTATE */ 252 -EOPNOTSUPP, /* SCPI_ERR_SUPPORT */ 253 -EIO, /* SCPI_ERR_DEVICE */ 254 -EBUSY, /* SCPI_ERR_BUSY */ 255 }; 256 257 static inline int scpi_to_linux_errno(int errno) 258 { 259 if (errno >= SCPI_SUCCESS && errno < SCPI_ERR_MAX) 260 return scpi_linux_errmap[errno]; 261 return -EIO; 262 } 263 264 static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd) 265 { 266 unsigned long flags; 267 struct scpi_xfer *t, *match = NULL; 268 269 spin_lock_irqsave(&ch->rx_lock, flags); 270 if (list_empty(&ch->rx_pending)) { 271 spin_unlock_irqrestore(&ch->rx_lock, flags); 272 return; 273 } 274 275 list_for_each_entry(t, &ch->rx_pending, node) 276 if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) { 277 list_del(&t->node); 278 match = t; 279 break; 280 } 281 /* check if wait_for_completion is in progress or timed-out */ 282 if (match && !completion_done(&match->done)) { 283 struct scpi_shared_mem *mem = ch->rx_payload; 284 unsigned int len = min(match->rx_len, CMD_SIZE(cmd)); 285 286 match->status = le32_to_cpu(mem->status); 287 memcpy_fromio(match->rx_buf, mem->payload, len); 288 if (match->rx_len > len) 289 memset(match->rx_buf + len, 0, match->rx_len - len); 290 complete(&match->done); 291 } 292 spin_unlock_irqrestore(&ch->rx_lock, flags); 293 } 294 295 static void scpi_handle_remote_msg(struct mbox_client *c, void *msg) 296 { 297 struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); 298 struct scpi_shared_mem *mem = ch->rx_payload; 299 u32 cmd = le32_to_cpu(mem->command); 300 301 scpi_process_cmd(ch, cmd); 302 } 303 304 static void scpi_tx_prepare(struct mbox_client *c, void *msg) 305 { 306 unsigned long flags; 307 struct scpi_xfer *t = msg; 308 struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); 309 struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload; 310 311 if (t->tx_buf) 312 memcpy_toio(mem->payload, t->tx_buf, t->tx_len); 313 if (t->rx_buf) { 314 if (!(++ch->token)) 315 ++ch->token; 316 ADD_SCPI_TOKEN(t->cmd, ch->token); 317 spin_lock_irqsave(&ch->rx_lock, flags); 318 list_add_tail(&t->node, &ch->rx_pending); 319 spin_unlock_irqrestore(&ch->rx_lock, flags); 320 } 321 mem->command = cpu_to_le32(t->cmd); 322 } 323 324 static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch) 325 { 326 struct scpi_xfer *t; 327 328 mutex_lock(&ch->xfers_lock); 329 if (list_empty(&ch->xfers_list)) { 330 mutex_unlock(&ch->xfers_lock); 331 return NULL; 332 } 333 t = list_first_entry(&ch->xfers_list, struct scpi_xfer, node); 334 list_del(&t->node); 335 mutex_unlock(&ch->xfers_lock); 336 return t; 337 } 338 339 static void put_scpi_xfer(struct scpi_xfer *t, struct scpi_chan *ch) 340 { 341 mutex_lock(&ch->xfers_lock); 342 list_add_tail(&t->node, &ch->xfers_list); 343 mutex_unlock(&ch->xfers_lock); 344 } 345 346 static int scpi_send_message(u8 cmd, void *tx_buf, unsigned int tx_len, 347 void *rx_buf, unsigned int rx_len) 348 { 349 int ret; 350 u8 chan; 351 struct scpi_xfer *msg; 352 struct scpi_chan *scpi_chan; 353 354 chan = atomic_inc_return(&scpi_info->next_chan) % scpi_info->num_chans; 355 scpi_chan = scpi_info->channels + chan; 356 357 msg = get_scpi_xfer(scpi_chan); 358 if (!msg) 359 return -ENOMEM; 360 361 msg->slot = BIT(SCPI_SLOT); 362 msg->cmd = PACK_SCPI_CMD(cmd, tx_len); 363 msg->tx_buf = tx_buf; 364 msg->tx_len = tx_len; 365 msg->rx_buf = rx_buf; 366 msg->rx_len = rx_len; 367 init_completion(&msg->done); 368 369 ret = mbox_send_message(scpi_chan->chan, msg); 370 if (ret < 0 || !rx_buf) 371 goto out; 372 373 if (!wait_for_completion_timeout(&msg->done, MAX_RX_TIMEOUT)) 374 ret = -ETIMEDOUT; 375 else 376 /* first status word */ 377 ret = msg->status; 378 out: 379 if (ret < 0 && rx_buf) /* remove entry from the list if timed-out */ 380 scpi_process_cmd(scpi_chan, msg->cmd); 381 382 put_scpi_xfer(msg, scpi_chan); 383 /* SCPI error codes > 0, translate them to Linux scale*/ 384 return ret > 0 ? scpi_to_linux_errno(ret) : ret; 385 } 386 387 static u32 scpi_get_version(void) 388 { 389 return scpi_info->protocol_version; 390 } 391 392 static int 393 scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max) 394 { 395 int ret; 396 struct clk_get_info clk; 397 __le16 le_clk_id = cpu_to_le16(clk_id); 398 399 ret = scpi_send_message(SCPI_CMD_GET_CLOCK_INFO, &le_clk_id, 400 sizeof(le_clk_id), &clk, sizeof(clk)); 401 if (!ret) { 402 *min = le32_to_cpu(clk.min_rate); 403 *max = le32_to_cpu(clk.max_rate); 404 } 405 return ret; 406 } 407 408 static unsigned long scpi_clk_get_val(u16 clk_id) 409 { 410 int ret; 411 struct clk_get_value clk; 412 __le16 le_clk_id = cpu_to_le16(clk_id); 413 414 ret = scpi_send_message(SCPI_CMD_GET_CLOCK_VALUE, &le_clk_id, 415 sizeof(le_clk_id), &clk, sizeof(clk)); 416 return ret ? ret : le32_to_cpu(clk.rate); 417 } 418 419 static int scpi_clk_set_val(u16 clk_id, unsigned long rate) 420 { 421 int stat; 422 struct clk_set_value clk = { 423 .id = cpu_to_le16(clk_id), 424 .rate = cpu_to_le32(rate) 425 }; 426 427 return scpi_send_message(SCPI_CMD_SET_CLOCK_VALUE, &clk, sizeof(clk), 428 &stat, sizeof(stat)); 429 } 430 431 static int scpi_dvfs_get_idx(u8 domain) 432 { 433 int ret; 434 struct dvfs_get dvfs; 435 436 ret = scpi_send_message(SCPI_CMD_GET_DVFS, &domain, sizeof(domain), 437 &dvfs, sizeof(dvfs)); 438 return ret ? ret : dvfs.index; 439 } 440 441 static int scpi_dvfs_set_idx(u8 domain, u8 index) 442 { 443 int stat; 444 struct dvfs_set dvfs = {domain, index}; 445 446 return scpi_send_message(SCPI_CMD_SET_DVFS, &dvfs, sizeof(dvfs), 447 &stat, sizeof(stat)); 448 } 449 450 static int opp_cmp_func(const void *opp1, const void *opp2) 451 { 452 const struct scpi_opp *t1 = opp1, *t2 = opp2; 453 454 return t1->freq - t2->freq; 455 } 456 457 static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain) 458 { 459 struct scpi_dvfs_info *info; 460 struct scpi_opp *opp; 461 struct dvfs_info buf; 462 int ret, i; 463 464 if (domain >= MAX_DVFS_DOMAINS) 465 return ERR_PTR(-EINVAL); 466 467 if (scpi_info->dvfs[domain]) /* data already populated */ 468 return scpi_info->dvfs[domain]; 469 470 ret = scpi_send_message(SCPI_CMD_GET_DVFS_INFO, &domain, sizeof(domain), 471 &buf, sizeof(buf)); 472 473 if (ret) 474 return ERR_PTR(ret); 475 476 info = kmalloc(sizeof(*info), GFP_KERNEL); 477 if (!info) 478 return ERR_PTR(-ENOMEM); 479 480 info->count = DVFS_OPP_COUNT(buf.header); 481 info->latency = DVFS_LATENCY(buf.header) * 1000; /* uS to nS */ 482 483 info->opps = kcalloc(info->count, sizeof(*opp), GFP_KERNEL); 484 if (!info->opps) { 485 kfree(info); 486 return ERR_PTR(-ENOMEM); 487 } 488 489 for (i = 0, opp = info->opps; i < info->count; i++, opp++) { 490 opp->freq = le32_to_cpu(buf.opps[i].freq); 491 opp->m_volt = le32_to_cpu(buf.opps[i].m_volt); 492 } 493 494 sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL); 495 496 scpi_info->dvfs[domain] = info; 497 return info; 498 } 499 500 static int scpi_sensor_get_capability(u16 *sensors) 501 { 502 struct sensor_capabilities cap_buf; 503 int ret; 504 505 ret = scpi_send_message(SCPI_CMD_SENSOR_CAPABILITIES, NULL, 0, &cap_buf, 506 sizeof(cap_buf)); 507 if (!ret) 508 *sensors = le16_to_cpu(cap_buf.sensors); 509 510 return ret; 511 } 512 513 static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info) 514 { 515 __le16 id = cpu_to_le16(sensor_id); 516 struct _scpi_sensor_info _info; 517 int ret; 518 519 ret = scpi_send_message(SCPI_CMD_SENSOR_INFO, &id, sizeof(id), 520 &_info, sizeof(_info)); 521 if (!ret) { 522 memcpy(info, &_info, sizeof(*info)); 523 info->sensor_id = le16_to_cpu(_info.sensor_id); 524 } 525 526 return ret; 527 } 528 529 int scpi_sensor_get_value(u16 sensor, u64 *val) 530 { 531 __le16 id = cpu_to_le16(sensor); 532 struct sensor_value buf; 533 int ret; 534 535 ret = scpi_send_message(SCPI_CMD_SENSOR_VALUE, &id, sizeof(id), 536 &buf, sizeof(buf)); 537 if (!ret) 538 *val = (u64)le32_to_cpu(buf.hi_val) << 32 | 539 le32_to_cpu(buf.lo_val); 540 541 return ret; 542 } 543 544 static struct scpi_ops scpi_ops = { 545 .get_version = scpi_get_version, 546 .clk_get_range = scpi_clk_get_range, 547 .clk_get_val = scpi_clk_get_val, 548 .clk_set_val = scpi_clk_set_val, 549 .dvfs_get_idx = scpi_dvfs_get_idx, 550 .dvfs_set_idx = scpi_dvfs_set_idx, 551 .dvfs_get_info = scpi_dvfs_get_info, 552 .sensor_get_capability = scpi_sensor_get_capability, 553 .sensor_get_info = scpi_sensor_get_info, 554 .sensor_get_value = scpi_sensor_get_value, 555 }; 556 557 struct scpi_ops *get_scpi_ops(void) 558 { 559 return scpi_info ? scpi_info->scpi_ops : NULL; 560 } 561 EXPORT_SYMBOL_GPL(get_scpi_ops); 562 563 static int scpi_init_versions(struct scpi_drvinfo *info) 564 { 565 int ret; 566 struct scp_capabilities caps; 567 568 ret = scpi_send_message(SCPI_CMD_SCPI_CAPABILITIES, NULL, 0, 569 &caps, sizeof(caps)); 570 if (!ret) { 571 info->protocol_version = le32_to_cpu(caps.protocol_version); 572 info->firmware_version = le32_to_cpu(caps.platform_version); 573 } 574 return ret; 575 } 576 577 static ssize_t protocol_version_show(struct device *dev, 578 struct device_attribute *attr, char *buf) 579 { 580 struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev); 581 582 return sprintf(buf, "%d.%d\n", 583 PROTOCOL_REV_MAJOR(scpi_info->protocol_version), 584 PROTOCOL_REV_MINOR(scpi_info->protocol_version)); 585 } 586 static DEVICE_ATTR_RO(protocol_version); 587 588 static ssize_t firmware_version_show(struct device *dev, 589 struct device_attribute *attr, char *buf) 590 { 591 struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev); 592 593 return sprintf(buf, "%d.%d.%d\n", 594 FW_REV_MAJOR(scpi_info->firmware_version), 595 FW_REV_MINOR(scpi_info->firmware_version), 596 FW_REV_PATCH(scpi_info->firmware_version)); 597 } 598 static DEVICE_ATTR_RO(firmware_version); 599 600 static struct attribute *versions_attrs[] = { 601 &dev_attr_firmware_version.attr, 602 &dev_attr_protocol_version.attr, 603 NULL, 604 }; 605 ATTRIBUTE_GROUPS(versions); 606 607 static void 608 scpi_free_channels(struct device *dev, struct scpi_chan *pchan, int count) 609 { 610 int i; 611 612 for (i = 0; i < count && pchan->chan; i++, pchan++) { 613 mbox_free_channel(pchan->chan); 614 devm_kfree(dev, pchan->xfers); 615 devm_iounmap(dev, pchan->rx_payload); 616 } 617 } 618 619 static int scpi_remove(struct platform_device *pdev) 620 { 621 int i; 622 struct device *dev = &pdev->dev; 623 struct scpi_drvinfo *info = platform_get_drvdata(pdev); 624 625 scpi_info = NULL; /* stop exporting SCPI ops through get_scpi_ops */ 626 627 of_platform_depopulate(dev); 628 sysfs_remove_groups(&dev->kobj, versions_groups); 629 scpi_free_channels(dev, info->channels, info->num_chans); 630 platform_set_drvdata(pdev, NULL); 631 632 for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) { 633 kfree(info->dvfs[i]->opps); 634 kfree(info->dvfs[i]); 635 } 636 devm_kfree(dev, info->channels); 637 devm_kfree(dev, info); 638 639 return 0; 640 } 641 642 #define MAX_SCPI_XFERS 10 643 static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch) 644 { 645 int i; 646 struct scpi_xfer *xfers; 647 648 xfers = devm_kzalloc(dev, MAX_SCPI_XFERS * sizeof(*xfers), GFP_KERNEL); 649 if (!xfers) 650 return -ENOMEM; 651 652 ch->xfers = xfers; 653 for (i = 0; i < MAX_SCPI_XFERS; i++, xfers++) 654 list_add_tail(&xfers->node, &ch->xfers_list); 655 return 0; 656 } 657 658 static int scpi_probe(struct platform_device *pdev) 659 { 660 int count, idx, ret; 661 struct resource res; 662 struct scpi_chan *scpi_chan; 663 struct device *dev = &pdev->dev; 664 struct device_node *np = dev->of_node; 665 666 scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL); 667 if (!scpi_info) 668 return -ENOMEM; 669 670 count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); 671 if (count < 0) { 672 dev_err(dev, "no mboxes property in '%s'\n", np->full_name); 673 return -ENODEV; 674 } 675 676 scpi_chan = devm_kcalloc(dev, count, sizeof(*scpi_chan), GFP_KERNEL); 677 if (!scpi_chan) 678 return -ENOMEM; 679 680 for (idx = 0; idx < count; idx++) { 681 resource_size_t size; 682 struct scpi_chan *pchan = scpi_chan + idx; 683 struct mbox_client *cl = &pchan->cl; 684 struct device_node *shmem = of_parse_phandle(np, "shmem", idx); 685 686 if (of_address_to_resource(shmem, 0, &res)) { 687 dev_err(dev, "failed to get SCPI payload mem resource\n"); 688 ret = -EINVAL; 689 goto err; 690 } 691 692 size = resource_size(&res); 693 pchan->rx_payload = devm_ioremap(dev, res.start, size); 694 if (!pchan->rx_payload) { 695 dev_err(dev, "failed to ioremap SCPI payload\n"); 696 ret = -EADDRNOTAVAIL; 697 goto err; 698 } 699 pchan->tx_payload = pchan->rx_payload + (size >> 1); 700 701 cl->dev = dev; 702 cl->rx_callback = scpi_handle_remote_msg; 703 cl->tx_prepare = scpi_tx_prepare; 704 cl->tx_block = true; 705 cl->tx_tout = 20; 706 cl->knows_txdone = false; /* controller can't ack */ 707 708 INIT_LIST_HEAD(&pchan->rx_pending); 709 INIT_LIST_HEAD(&pchan->xfers_list); 710 spin_lock_init(&pchan->rx_lock); 711 mutex_init(&pchan->xfers_lock); 712 713 ret = scpi_alloc_xfer_list(dev, pchan); 714 if (!ret) { 715 pchan->chan = mbox_request_channel(cl, idx); 716 if (!IS_ERR(pchan->chan)) 717 continue; 718 ret = PTR_ERR(pchan->chan); 719 if (ret != -EPROBE_DEFER) 720 dev_err(dev, "failed to get channel%d err %d\n", 721 idx, ret); 722 } 723 err: 724 scpi_free_channels(dev, scpi_chan, idx); 725 scpi_info = NULL; 726 return ret; 727 } 728 729 scpi_info->channels = scpi_chan; 730 scpi_info->num_chans = count; 731 platform_set_drvdata(pdev, scpi_info); 732 733 ret = scpi_init_versions(scpi_info); 734 if (ret) { 735 dev_err(dev, "incorrect or no SCP firmware found\n"); 736 scpi_remove(pdev); 737 return ret; 738 } 739 740 _dev_info(dev, "SCP Protocol %d.%d Firmware %d.%d.%d version\n", 741 PROTOCOL_REV_MAJOR(scpi_info->protocol_version), 742 PROTOCOL_REV_MINOR(scpi_info->protocol_version), 743 FW_REV_MAJOR(scpi_info->firmware_version), 744 FW_REV_MINOR(scpi_info->firmware_version), 745 FW_REV_PATCH(scpi_info->firmware_version)); 746 scpi_info->scpi_ops = &scpi_ops; 747 748 ret = sysfs_create_groups(&dev->kobj, versions_groups); 749 if (ret) 750 dev_err(dev, "unable to create sysfs version group\n"); 751 752 return of_platform_populate(dev->of_node, NULL, NULL, dev); 753 } 754 755 static const struct of_device_id scpi_of_match[] = { 756 {.compatible = "arm,scpi"}, 757 {}, 758 }; 759 760 MODULE_DEVICE_TABLE(of, scpi_of_match); 761 762 static struct platform_driver scpi_driver = { 763 .driver = { 764 .name = "scpi_protocol", 765 .of_match_table = scpi_of_match, 766 }, 767 .probe = scpi_probe, 768 .remove = scpi_remove, 769 }; 770 module_platform_driver(scpi_driver); 771 772 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); 773 MODULE_DESCRIPTION("ARM SCPI mailbox protocol driver"); 774 MODULE_LICENSE("GPL v2"); 775