1 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. 2 * 3 * Description: CoreSight Embedded Trace Buffer driver 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 and 7 * only version 2 as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include <asm/local.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/types.h> 19 #include <linux/device.h> 20 #include <linux/io.h> 21 #include <linux/err.h> 22 #include <linux/fs.h> 23 #include <linux/miscdevice.h> 24 #include <linux/uaccess.h> 25 #include <linux/slab.h> 26 #include <linux/spinlock.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/seq_file.h> 29 #include <linux/coresight.h> 30 #include <linux/amba/bus.h> 31 #include <linux/clk.h> 32 #include <linux/circ_buf.h> 33 #include <linux/mm.h> 34 #include <linux/perf_event.h> 35 36 #include <asm/local.h> 37 38 #include "coresight-priv.h" 39 40 #define ETB_RAM_DEPTH_REG 0x004 41 #define ETB_STATUS_REG 0x00c 42 #define ETB_RAM_READ_DATA_REG 0x010 43 #define ETB_RAM_READ_POINTER 0x014 44 #define ETB_RAM_WRITE_POINTER 0x018 45 #define ETB_TRG 0x01c 46 #define ETB_CTL_REG 0x020 47 #define ETB_RWD_REG 0x024 48 #define ETB_FFSR 0x300 49 #define ETB_FFCR 0x304 50 #define ETB_ITMISCOP0 0xee0 51 #define ETB_ITTRFLINACK 0xee4 52 #define ETB_ITTRFLIN 0xee8 53 #define ETB_ITATBDATA0 0xeeC 54 #define ETB_ITATBCTR2 0xef0 55 #define ETB_ITATBCTR1 0xef4 56 #define ETB_ITATBCTR0 0xef8 57 58 /* register description */ 59 /* STS - 0x00C */ 60 #define ETB_STATUS_RAM_FULL BIT(0) 61 /* CTL - 0x020 */ 62 #define ETB_CTL_CAPT_EN BIT(0) 63 /* FFCR - 0x304 */ 64 #define ETB_FFCR_EN_FTC BIT(0) 65 #define ETB_FFCR_FON_MAN BIT(6) 66 #define ETB_FFCR_STOP_FI BIT(12) 67 #define ETB_FFCR_STOP_TRIGGER BIT(13) 68 69 #define ETB_FFCR_BIT 6 70 #define ETB_FFSR_BIT 1 71 #define ETB_FRAME_SIZE_WORDS 4 72 73 /** 74 * struct cs_buffer - keep track of a recording session' specifics 75 * @cur: index of the current buffer 76 * @nr_pages: max number of pages granted to us 77 * @offset: offset within the current buffer 78 * @data_size: how much we collected in this run 79 * @lost: other than zero if we had a HW buffer wrap around 80 * @snapshot: is this run in snapshot mode 81 * @data_pages: a handle the ring buffer 82 */ 83 struct cs_buffers { 84 unsigned int cur; 85 unsigned int nr_pages; 86 unsigned long offset; 87 local_t data_size; 88 local_t lost; 89 bool snapshot; 90 void **data_pages; 91 }; 92 93 /** 94 * struct etb_drvdata - specifics associated to an ETB component 95 * @base: memory mapped base address for this component. 96 * @dev: the device entity associated to this component. 97 * @atclk: optional clock for the core parts of the ETB. 98 * @csdev: component vitals needed by the framework. 99 * @miscdev: specifics to handle "/dev/xyz.etb" entry. 100 * @spinlock: only one at a time pls. 101 * @reading: synchronise user space access to etb buffer. 102 * @mode: this ETB is being used. 103 * @buf: area of memory where ETB buffer content gets sent. 104 * @buffer_depth: size of @buf. 105 * @trigger_cntr: amount of words to store after a trigger. 106 */ 107 struct etb_drvdata { 108 void __iomem *base; 109 struct device *dev; 110 struct clk *atclk; 111 struct coresight_device *csdev; 112 struct miscdevice miscdev; 113 spinlock_t spinlock; 114 local_t reading; 115 local_t mode; 116 u8 *buf; 117 u32 buffer_depth; 118 u32 trigger_cntr; 119 }; 120 121 static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata) 122 { 123 u32 depth = 0; 124 125 pm_runtime_get_sync(drvdata->dev); 126 127 /* RO registers don't need locking */ 128 depth = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG); 129 130 pm_runtime_put(drvdata->dev); 131 return depth; 132 } 133 134 static void etb_enable_hw(struct etb_drvdata *drvdata) 135 { 136 int i; 137 u32 depth; 138 139 CS_UNLOCK(drvdata->base); 140 141 depth = drvdata->buffer_depth; 142 /* reset write RAM pointer address */ 143 writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER); 144 /* clear entire RAM buffer */ 145 for (i = 0; i < depth; i++) 146 writel_relaxed(0x0, drvdata->base + ETB_RWD_REG); 147 148 /* reset write RAM pointer address */ 149 writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER); 150 /* reset read RAM pointer address */ 151 writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER); 152 153 writel_relaxed(drvdata->trigger_cntr, drvdata->base + ETB_TRG); 154 writel_relaxed(ETB_FFCR_EN_FTC | ETB_FFCR_STOP_TRIGGER, 155 drvdata->base + ETB_FFCR); 156 /* ETB trace capture enable */ 157 writel_relaxed(ETB_CTL_CAPT_EN, drvdata->base + ETB_CTL_REG); 158 159 CS_LOCK(drvdata->base); 160 } 161 162 static int etb_enable(struct coresight_device *csdev, u32 mode) 163 { 164 u32 val; 165 unsigned long flags; 166 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 167 168 val = local_cmpxchg(&drvdata->mode, 169 CS_MODE_DISABLED, mode); 170 /* 171 * When accessing from Perf, a HW buffer can be handled 172 * by a single trace entity. In sysFS mode many tracers 173 * can be logging to the same HW buffer. 174 */ 175 if (val == CS_MODE_PERF) 176 return -EBUSY; 177 178 /* Nothing to do, the tracer is already enabled. */ 179 if (val == CS_MODE_SYSFS) 180 goto out; 181 182 spin_lock_irqsave(&drvdata->spinlock, flags); 183 etb_enable_hw(drvdata); 184 spin_unlock_irqrestore(&drvdata->spinlock, flags); 185 186 out: 187 dev_info(drvdata->dev, "ETB enabled\n"); 188 return 0; 189 } 190 191 static void etb_disable_hw(struct etb_drvdata *drvdata) 192 { 193 u32 ffcr; 194 195 CS_UNLOCK(drvdata->base); 196 197 ffcr = readl_relaxed(drvdata->base + ETB_FFCR); 198 /* stop formatter when a stop has completed */ 199 ffcr |= ETB_FFCR_STOP_FI; 200 writel_relaxed(ffcr, drvdata->base + ETB_FFCR); 201 /* manually generate a flush of the system */ 202 ffcr |= ETB_FFCR_FON_MAN; 203 writel_relaxed(ffcr, drvdata->base + ETB_FFCR); 204 205 if (coresight_timeout(drvdata->base, ETB_FFCR, ETB_FFCR_BIT, 0)) { 206 dev_err(drvdata->dev, 207 "timeout observed when probing at offset %#x\n", 208 ETB_FFCR); 209 } 210 211 /* disable trace capture */ 212 writel_relaxed(0x0, drvdata->base + ETB_CTL_REG); 213 214 if (coresight_timeout(drvdata->base, ETB_FFSR, ETB_FFSR_BIT, 1)) { 215 dev_err(drvdata->dev, 216 "timeout observed when probing at offset %#x\n", 217 ETB_FFCR); 218 } 219 220 CS_LOCK(drvdata->base); 221 } 222 223 static void etb_dump_hw(struct etb_drvdata *drvdata) 224 { 225 int i; 226 u8 *buf_ptr; 227 u32 read_data, depth; 228 u32 read_ptr, write_ptr; 229 u32 frame_off, frame_endoff; 230 231 CS_UNLOCK(drvdata->base); 232 233 read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER); 234 write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER); 235 236 frame_off = write_ptr % ETB_FRAME_SIZE_WORDS; 237 frame_endoff = ETB_FRAME_SIZE_WORDS - frame_off; 238 if (frame_off) { 239 dev_err(drvdata->dev, 240 "write_ptr: %lu not aligned to formatter frame size\n", 241 (unsigned long)write_ptr); 242 dev_err(drvdata->dev, "frameoff: %lu, frame_endoff: %lu\n", 243 (unsigned long)frame_off, (unsigned long)frame_endoff); 244 write_ptr += frame_endoff; 245 } 246 247 if ((readl_relaxed(drvdata->base + ETB_STATUS_REG) 248 & ETB_STATUS_RAM_FULL) == 0) 249 writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER); 250 else 251 writel_relaxed(write_ptr, drvdata->base + ETB_RAM_READ_POINTER); 252 253 depth = drvdata->buffer_depth; 254 buf_ptr = drvdata->buf; 255 for (i = 0; i < depth; i++) { 256 read_data = readl_relaxed(drvdata->base + 257 ETB_RAM_READ_DATA_REG); 258 *buf_ptr++ = read_data >> 0; 259 *buf_ptr++ = read_data >> 8; 260 *buf_ptr++ = read_data >> 16; 261 *buf_ptr++ = read_data >> 24; 262 } 263 264 if (frame_off) { 265 buf_ptr -= (frame_endoff * 4); 266 for (i = 0; i < frame_endoff; i++) { 267 *buf_ptr++ = 0x0; 268 *buf_ptr++ = 0x0; 269 *buf_ptr++ = 0x0; 270 *buf_ptr++ = 0x0; 271 } 272 } 273 274 writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER); 275 276 CS_LOCK(drvdata->base); 277 } 278 279 static void etb_disable(struct coresight_device *csdev) 280 { 281 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 282 unsigned long flags; 283 284 spin_lock_irqsave(&drvdata->spinlock, flags); 285 etb_disable_hw(drvdata); 286 etb_dump_hw(drvdata); 287 spin_unlock_irqrestore(&drvdata->spinlock, flags); 288 289 local_set(&drvdata->mode, CS_MODE_DISABLED); 290 291 dev_info(drvdata->dev, "ETB disabled\n"); 292 } 293 294 static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu, 295 void **pages, int nr_pages, bool overwrite) 296 { 297 int node; 298 struct cs_buffers *buf; 299 300 if (cpu == -1) 301 cpu = smp_processor_id(); 302 node = cpu_to_node(cpu); 303 304 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node); 305 if (!buf) 306 return NULL; 307 308 buf->snapshot = overwrite; 309 buf->nr_pages = nr_pages; 310 buf->data_pages = pages; 311 312 return buf; 313 } 314 315 static void etb_free_buffer(void *config) 316 { 317 struct cs_buffers *buf = config; 318 319 kfree(buf); 320 } 321 322 static int etb_set_buffer(struct coresight_device *csdev, 323 struct perf_output_handle *handle, 324 void *sink_config) 325 { 326 int ret = 0; 327 unsigned long head; 328 struct cs_buffers *buf = sink_config; 329 330 /* wrap head around to the amount of space we have */ 331 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); 332 333 /* find the page to write to */ 334 buf->cur = head / PAGE_SIZE; 335 336 /* and offset within that page */ 337 buf->offset = head % PAGE_SIZE; 338 339 local_set(&buf->data_size, 0); 340 341 return ret; 342 } 343 344 static unsigned long etb_reset_buffer(struct coresight_device *csdev, 345 struct perf_output_handle *handle, 346 void *sink_config, bool *lost) 347 { 348 unsigned long size = 0; 349 struct cs_buffers *buf = sink_config; 350 351 if (buf) { 352 /* 353 * In snapshot mode ->data_size holds the new address of the 354 * ring buffer's head. The size itself is the whole address 355 * range since we want the latest information. 356 */ 357 if (buf->snapshot) 358 handle->head = local_xchg(&buf->data_size, 359 buf->nr_pages << PAGE_SHIFT); 360 361 /* 362 * Tell the tracer PMU how much we got in this run and if 363 * something went wrong along the way. Nobody else can use 364 * this cs_buffers instance until we are done. As such 365 * resetting parameters here and squaring off with the ring 366 * buffer API in the tracer PMU is fine. 367 */ 368 *lost = !!local_xchg(&buf->lost, 0); 369 size = local_xchg(&buf->data_size, 0); 370 } 371 372 return size; 373 } 374 375 static void etb_update_buffer(struct coresight_device *csdev, 376 struct perf_output_handle *handle, 377 void *sink_config) 378 { 379 int i, cur; 380 u8 *buf_ptr; 381 u32 read_ptr, write_ptr, capacity; 382 u32 status, read_data, to_read; 383 unsigned long offset; 384 struct cs_buffers *buf = sink_config; 385 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 386 387 if (!buf) 388 return; 389 390 capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS; 391 392 CS_UNLOCK(drvdata->base); 393 etb_disable_hw(drvdata); 394 395 /* unit is in words, not bytes */ 396 read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER); 397 write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER); 398 399 /* 400 * Entries should be aligned to the frame size. If they are not 401 * go back to the last alignement point to give decoding tools a 402 * chance to fix things. 403 */ 404 if (write_ptr % ETB_FRAME_SIZE_WORDS) { 405 dev_err(drvdata->dev, 406 "write_ptr: %lu not aligned to formatter frame size\n", 407 (unsigned long)write_ptr); 408 409 write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1); 410 local_inc(&buf->lost); 411 } 412 413 /* 414 * Get a hold of the status register and see if a wrap around 415 * has occurred. If so adjust things accordingly. Otherwise 416 * start at the beginning and go until the write pointer has 417 * been reached. 418 */ 419 status = readl_relaxed(drvdata->base + ETB_STATUS_REG); 420 if (status & ETB_STATUS_RAM_FULL) { 421 local_inc(&buf->lost); 422 to_read = capacity; 423 read_ptr = write_ptr; 424 } else { 425 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth); 426 to_read *= ETB_FRAME_SIZE_WORDS; 427 } 428 429 /* 430 * Make sure we don't overwrite data that hasn't been consumed yet. 431 * It is entirely possible that the HW buffer has more data than the 432 * ring buffer can currently handle. If so adjust the start address 433 * to take only the last traces. 434 * 435 * In snapshot mode we are looking to get the latest traces only and as 436 * such, we don't care about not overwriting data that hasn't been 437 * processed by user space. 438 */ 439 if (!buf->snapshot && to_read > handle->size) { 440 u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1); 441 442 /* The new read pointer must be frame size aligned */ 443 to_read -= handle->size & mask; 444 /* 445 * Move the RAM read pointer up, keeping in mind that 446 * everything is in frame size units. 447 */ 448 read_ptr = (write_ptr + drvdata->buffer_depth) - 449 to_read / ETB_FRAME_SIZE_WORDS; 450 /* Wrap around if need be*/ 451 read_ptr &= ~(drvdata->buffer_depth - 1); 452 /* let the decoder know we've skipped ahead */ 453 local_inc(&buf->lost); 454 } 455 456 /* finally tell HW where we want to start reading from */ 457 writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER); 458 459 cur = buf->cur; 460 offset = buf->offset; 461 for (i = 0; i < to_read; i += 4) { 462 buf_ptr = buf->data_pages[cur] + offset; 463 read_data = readl_relaxed(drvdata->base + 464 ETB_RAM_READ_DATA_REG); 465 *buf_ptr++ = read_data >> 0; 466 *buf_ptr++ = read_data >> 8; 467 *buf_ptr++ = read_data >> 16; 468 *buf_ptr++ = read_data >> 24; 469 470 offset += 4; 471 if (offset >= PAGE_SIZE) { 472 offset = 0; 473 cur++; 474 /* wrap around at the end of the buffer */ 475 cur &= buf->nr_pages - 1; 476 } 477 } 478 479 /* reset ETB buffer for next run */ 480 writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER); 481 writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER); 482 483 /* 484 * In snapshot mode all we have to do is communicate to 485 * perf_aux_output_end() the address of the current head. In full 486 * trace mode the same function expects a size to move rb->aux_head 487 * forward. 488 */ 489 if (buf->snapshot) 490 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset); 491 else 492 local_add(to_read, &buf->data_size); 493 494 etb_enable_hw(drvdata); 495 CS_LOCK(drvdata->base); 496 } 497 498 static const struct coresight_ops_sink etb_sink_ops = { 499 .enable = etb_enable, 500 .disable = etb_disable, 501 .alloc_buffer = etb_alloc_buffer, 502 .free_buffer = etb_free_buffer, 503 .set_buffer = etb_set_buffer, 504 .reset_buffer = etb_reset_buffer, 505 .update_buffer = etb_update_buffer, 506 }; 507 508 static const struct coresight_ops etb_cs_ops = { 509 .sink_ops = &etb_sink_ops, 510 }; 511 512 static void etb_dump(struct etb_drvdata *drvdata) 513 { 514 unsigned long flags; 515 516 spin_lock_irqsave(&drvdata->spinlock, flags); 517 if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { 518 etb_disable_hw(drvdata); 519 etb_dump_hw(drvdata); 520 etb_enable_hw(drvdata); 521 } 522 spin_unlock_irqrestore(&drvdata->spinlock, flags); 523 524 dev_info(drvdata->dev, "ETB dumped\n"); 525 } 526 527 static int etb_open(struct inode *inode, struct file *file) 528 { 529 struct etb_drvdata *drvdata = container_of(file->private_data, 530 struct etb_drvdata, miscdev); 531 532 if (local_cmpxchg(&drvdata->reading, 0, 1)) 533 return -EBUSY; 534 535 dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__); 536 return 0; 537 } 538 539 static ssize_t etb_read(struct file *file, char __user *data, 540 size_t len, loff_t *ppos) 541 { 542 u32 depth; 543 struct etb_drvdata *drvdata = container_of(file->private_data, 544 struct etb_drvdata, miscdev); 545 546 etb_dump(drvdata); 547 548 depth = drvdata->buffer_depth; 549 if (*ppos + len > depth * 4) 550 len = depth * 4 - *ppos; 551 552 if (copy_to_user(data, drvdata->buf + *ppos, len)) { 553 dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__); 554 return -EFAULT; 555 } 556 557 *ppos += len; 558 559 dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n", 560 __func__, len, (int)(depth * 4 - *ppos)); 561 return len; 562 } 563 564 static int etb_release(struct inode *inode, struct file *file) 565 { 566 struct etb_drvdata *drvdata = container_of(file->private_data, 567 struct etb_drvdata, miscdev); 568 local_set(&drvdata->reading, 0); 569 570 dev_dbg(drvdata->dev, "%s: released\n", __func__); 571 return 0; 572 } 573 574 static const struct file_operations etb_fops = { 575 .owner = THIS_MODULE, 576 .open = etb_open, 577 .read = etb_read, 578 .release = etb_release, 579 .llseek = no_llseek, 580 }; 581 582 static ssize_t status_show(struct device *dev, 583 struct device_attribute *attr, char *buf) 584 { 585 unsigned long flags; 586 u32 etb_rdr, etb_sr, etb_rrp, etb_rwp; 587 u32 etb_trg, etb_cr, etb_ffsr, etb_ffcr; 588 struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent); 589 590 pm_runtime_get_sync(drvdata->dev); 591 spin_lock_irqsave(&drvdata->spinlock, flags); 592 CS_UNLOCK(drvdata->base); 593 594 etb_rdr = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG); 595 etb_sr = readl_relaxed(drvdata->base + ETB_STATUS_REG); 596 etb_rrp = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER); 597 etb_rwp = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER); 598 etb_trg = readl_relaxed(drvdata->base + ETB_TRG); 599 etb_cr = readl_relaxed(drvdata->base + ETB_CTL_REG); 600 etb_ffsr = readl_relaxed(drvdata->base + ETB_FFSR); 601 etb_ffcr = readl_relaxed(drvdata->base + ETB_FFCR); 602 603 CS_LOCK(drvdata->base); 604 spin_unlock_irqrestore(&drvdata->spinlock, flags); 605 606 pm_runtime_put(drvdata->dev); 607 608 return sprintf(buf, 609 "Depth:\t\t0x%x\n" 610 "Status:\t\t0x%x\n" 611 "RAM read ptr:\t0x%x\n" 612 "RAM wrt ptr:\t0x%x\n" 613 "Trigger cnt:\t0x%x\n" 614 "Control:\t0x%x\n" 615 "Flush status:\t0x%x\n" 616 "Flush ctrl:\t0x%x\n", 617 etb_rdr, etb_sr, etb_rrp, etb_rwp, 618 etb_trg, etb_cr, etb_ffsr, etb_ffcr); 619 620 return -EINVAL; 621 } 622 static DEVICE_ATTR_RO(status); 623 624 static ssize_t trigger_cntr_show(struct device *dev, 625 struct device_attribute *attr, char *buf) 626 { 627 struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent); 628 unsigned long val = drvdata->trigger_cntr; 629 630 return sprintf(buf, "%#lx\n", val); 631 } 632 633 static ssize_t trigger_cntr_store(struct device *dev, 634 struct device_attribute *attr, 635 const char *buf, size_t size) 636 { 637 int ret; 638 unsigned long val; 639 struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent); 640 641 ret = kstrtoul(buf, 16, &val); 642 if (ret) 643 return ret; 644 645 drvdata->trigger_cntr = val; 646 return size; 647 } 648 static DEVICE_ATTR_RW(trigger_cntr); 649 650 static struct attribute *coresight_etb_attrs[] = { 651 &dev_attr_trigger_cntr.attr, 652 &dev_attr_status.attr, 653 NULL, 654 }; 655 ATTRIBUTE_GROUPS(coresight_etb); 656 657 static int etb_probe(struct amba_device *adev, const struct amba_id *id) 658 { 659 int ret; 660 void __iomem *base; 661 struct device *dev = &adev->dev; 662 struct coresight_platform_data *pdata = NULL; 663 struct etb_drvdata *drvdata; 664 struct resource *res = &adev->res; 665 struct coresight_desc *desc; 666 struct device_node *np = adev->dev.of_node; 667 668 if (np) { 669 pdata = of_get_coresight_platform_data(dev, np); 670 if (IS_ERR(pdata)) 671 return PTR_ERR(pdata); 672 adev->dev.platform_data = pdata; 673 } 674 675 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); 676 if (!drvdata) 677 return -ENOMEM; 678 679 drvdata->dev = &adev->dev; 680 drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */ 681 if (!IS_ERR(drvdata->atclk)) { 682 ret = clk_prepare_enable(drvdata->atclk); 683 if (ret) 684 return ret; 685 } 686 dev_set_drvdata(dev, drvdata); 687 688 /* validity for the resource is already checked by the AMBA core */ 689 base = devm_ioremap_resource(dev, res); 690 if (IS_ERR(base)) 691 return PTR_ERR(base); 692 693 drvdata->base = base; 694 695 spin_lock_init(&drvdata->spinlock); 696 697 drvdata->buffer_depth = etb_get_buffer_depth(drvdata); 698 pm_runtime_put(&adev->dev); 699 700 if (drvdata->buffer_depth & 0x80000000) 701 return -EINVAL; 702 703 drvdata->buf = devm_kzalloc(dev, 704 drvdata->buffer_depth * 4, GFP_KERNEL); 705 if (!drvdata->buf) { 706 dev_err(dev, "Failed to allocate %u bytes for buffer data\n", 707 drvdata->buffer_depth * 4); 708 return -ENOMEM; 709 } 710 711 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); 712 if (!desc) 713 return -ENOMEM; 714 715 desc->type = CORESIGHT_DEV_TYPE_SINK; 716 desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER; 717 desc->ops = &etb_cs_ops; 718 desc->pdata = pdata; 719 desc->dev = dev; 720 desc->groups = coresight_etb_groups; 721 drvdata->csdev = coresight_register(desc); 722 if (IS_ERR(drvdata->csdev)) 723 return PTR_ERR(drvdata->csdev); 724 725 drvdata->miscdev.name = pdata->name; 726 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR; 727 drvdata->miscdev.fops = &etb_fops; 728 ret = misc_register(&drvdata->miscdev); 729 if (ret) 730 goto err_misc_register; 731 732 dev_info(dev, "ETB initialized\n"); 733 return 0; 734 735 err_misc_register: 736 coresight_unregister(drvdata->csdev); 737 return ret; 738 } 739 740 #ifdef CONFIG_PM 741 static int etb_runtime_suspend(struct device *dev) 742 { 743 struct etb_drvdata *drvdata = dev_get_drvdata(dev); 744 745 if (drvdata && !IS_ERR(drvdata->atclk)) 746 clk_disable_unprepare(drvdata->atclk); 747 748 return 0; 749 } 750 751 static int etb_runtime_resume(struct device *dev) 752 { 753 struct etb_drvdata *drvdata = dev_get_drvdata(dev); 754 755 if (drvdata && !IS_ERR(drvdata->atclk)) 756 clk_prepare_enable(drvdata->atclk); 757 758 return 0; 759 } 760 #endif 761 762 static const struct dev_pm_ops etb_dev_pm_ops = { 763 SET_RUNTIME_PM_OPS(etb_runtime_suspend, etb_runtime_resume, NULL) 764 }; 765 766 static struct amba_id etb_ids[] = { 767 { 768 .id = 0x0003b907, 769 .mask = 0x0003ffff, 770 }, 771 { 0, 0}, 772 }; 773 774 static struct amba_driver etb_driver = { 775 .drv = { 776 .name = "coresight-etb10", 777 .owner = THIS_MODULE, 778 .pm = &etb_dev_pm_ops, 779 .suppress_bind_attrs = true, 780 781 }, 782 .probe = etb_probe, 783 .id_table = etb_ids, 784 }; 785 builtin_amba_driver(etb_driver); 786