1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2016 Linaro Limited. All rights reserved. 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 5 */ 6 7 #include <linux/circ_buf.h> 8 #include <linux/coresight.h> 9 #include <linux/perf_event.h> 10 #include <linux/slab.h> 11 #include "coresight-priv.h" 12 #include "coresight-tmc.h" 13 14 static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata) 15 { 16 CS_UNLOCK(drvdata->base); 17 18 /* Wait for TMCSReady bit to be set */ 19 tmc_wait_for_tmcready(drvdata); 20 21 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); 22 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | 23 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | 24 TMC_FFCR_TRIGON_TRIGIN, 25 drvdata->base + TMC_FFCR); 26 27 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); 28 tmc_enable_hw(drvdata); 29 30 CS_LOCK(drvdata->base); 31 } 32 33 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata) 34 { 35 char *bufp; 36 u32 read_data, lost; 37 int i; 38 39 /* Check if the buffer wrapped around. */ 40 lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL; 41 bufp = drvdata->buf; 42 drvdata->len = 0; 43 while (1) { 44 for (i = 0; i < drvdata->memwidth; i++) { 45 read_data = readl_relaxed(drvdata->base + TMC_RRD); 46 if (read_data == 0xFFFFFFFF) 47 goto done; 48 memcpy(bufp, &read_data, 4); 49 bufp += 4; 50 drvdata->len += 4; 51 } 52 } 53 done: 54 if (lost) 55 coresight_insert_barrier_packet(drvdata->buf); 56 return; 57 } 58 59 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata) 60 { 61 CS_UNLOCK(drvdata->base); 62 63 tmc_flush_and_stop(drvdata); 64 /* 65 * When operating in sysFS mode the content of the buffer needs to be 66 * read before the TMC is disabled. 67 */ 68 if (drvdata->mode == CS_MODE_SYSFS) 69 tmc_etb_dump_hw(drvdata); 70 tmc_disable_hw(drvdata); 71 72 CS_LOCK(drvdata->base); 73 } 74 75 static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata) 76 { 77 CS_UNLOCK(drvdata->base); 78 79 /* Wait for TMCSReady bit to be set */ 80 tmc_wait_for_tmcready(drvdata); 81 82 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE); 83 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI, 84 drvdata->base + TMC_FFCR); 85 writel_relaxed(0x0, drvdata->base + TMC_BUFWM); 86 tmc_enable_hw(drvdata); 87 88 CS_LOCK(drvdata->base); 89 } 90 91 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata) 92 { 93 CS_UNLOCK(drvdata->base); 94 95 tmc_flush_and_stop(drvdata); 96 tmc_disable_hw(drvdata); 97 98 CS_LOCK(drvdata->base); 99 } 100 101 /* 102 * Return the available trace data in the buffer from @pos, with 103 * a maximum limit of @len, updating the @bufpp on where to 104 * find it. 105 */ 106 ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata, 107 loff_t pos, size_t len, char **bufpp) 108 { 109 ssize_t actual = len; 110 111 /* Adjust the len to available size @pos */ 112 if (pos + actual > drvdata->len) 113 actual = drvdata->len - pos; 114 if (actual > 0) 115 *bufpp = drvdata->buf + pos; 116 return actual; 117 } 118 119 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev) 120 { 121 int ret = 0; 122 bool used = false; 123 char *buf = NULL; 124 unsigned long flags; 125 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 126 127 /* 128 * If we don't have a buffer release the lock and allocate memory. 129 * Otherwise keep the lock and move along. 130 */ 131 spin_lock_irqsave(&drvdata->spinlock, flags); 132 if (!drvdata->buf) { 133 spin_unlock_irqrestore(&drvdata->spinlock, flags); 134 135 /* Allocating the memory here while outside of the spinlock */ 136 buf = kzalloc(drvdata->size, GFP_KERNEL); 137 if (!buf) 138 return -ENOMEM; 139 140 /* Let's try again */ 141 spin_lock_irqsave(&drvdata->spinlock, flags); 142 } 143 144 if (drvdata->reading) { 145 ret = -EBUSY; 146 goto out; 147 } 148 149 /* 150 * In sysFS mode we can have multiple writers per sink. Since this 151 * sink is already enabled no memory is needed and the HW need not be 152 * touched. 153 */ 154 if (drvdata->mode == CS_MODE_SYSFS) 155 goto out; 156 157 /* 158 * If drvdata::buf isn't NULL, memory was allocated for a previous 159 * trace run but wasn't read. If so simply zero-out the memory. 160 * Otherwise use the memory allocated above. 161 * 162 * The memory is freed when users read the buffer using the 163 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for 164 * details. 165 */ 166 if (drvdata->buf) { 167 memset(drvdata->buf, 0, drvdata->size); 168 } else { 169 used = true; 170 drvdata->buf = buf; 171 } 172 173 drvdata->mode = CS_MODE_SYSFS; 174 tmc_etb_enable_hw(drvdata); 175 out: 176 spin_unlock_irqrestore(&drvdata->spinlock, flags); 177 178 /* Free memory outside the spinlock if need be */ 179 if (!used) 180 kfree(buf); 181 182 return ret; 183 } 184 185 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev) 186 { 187 int ret = 0; 188 unsigned long flags; 189 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 190 191 spin_lock_irqsave(&drvdata->spinlock, flags); 192 if (drvdata->reading) { 193 ret = -EINVAL; 194 goto out; 195 } 196 197 /* 198 * In Perf mode there can be only one writer per sink. There 199 * is also no need to continue if the ETB/ETR is already operated 200 * from sysFS. 201 */ 202 if (drvdata->mode != CS_MODE_DISABLED) { 203 ret = -EINVAL; 204 goto out; 205 } 206 207 drvdata->mode = CS_MODE_PERF; 208 tmc_etb_enable_hw(drvdata); 209 out: 210 spin_unlock_irqrestore(&drvdata->spinlock, flags); 211 212 return ret; 213 } 214 215 static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode) 216 { 217 int ret; 218 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 219 220 switch (mode) { 221 case CS_MODE_SYSFS: 222 ret = tmc_enable_etf_sink_sysfs(csdev); 223 break; 224 case CS_MODE_PERF: 225 ret = tmc_enable_etf_sink_perf(csdev); 226 break; 227 /* We shouldn't be here */ 228 default: 229 ret = -EINVAL; 230 break; 231 } 232 233 if (ret) 234 return ret; 235 236 dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n"); 237 return 0; 238 } 239 240 static void tmc_disable_etf_sink(struct coresight_device *csdev) 241 { 242 unsigned long flags; 243 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 244 245 spin_lock_irqsave(&drvdata->spinlock, flags); 246 if (drvdata->reading) { 247 spin_unlock_irqrestore(&drvdata->spinlock, flags); 248 return; 249 } 250 251 /* Disable the TMC only if it needs to */ 252 if (drvdata->mode != CS_MODE_DISABLED) { 253 tmc_etb_disable_hw(drvdata); 254 drvdata->mode = CS_MODE_DISABLED; 255 } 256 257 spin_unlock_irqrestore(&drvdata->spinlock, flags); 258 259 dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n"); 260 } 261 262 static int tmc_enable_etf_link(struct coresight_device *csdev, 263 int inport, int outport) 264 { 265 unsigned long flags; 266 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 267 268 spin_lock_irqsave(&drvdata->spinlock, flags); 269 if (drvdata->reading) { 270 spin_unlock_irqrestore(&drvdata->spinlock, flags); 271 return -EBUSY; 272 } 273 274 tmc_etf_enable_hw(drvdata); 275 drvdata->mode = CS_MODE_SYSFS; 276 spin_unlock_irqrestore(&drvdata->spinlock, flags); 277 278 dev_info(drvdata->dev, "TMC-ETF enabled\n"); 279 return 0; 280 } 281 282 static void tmc_disable_etf_link(struct coresight_device *csdev, 283 int inport, int outport) 284 { 285 unsigned long flags; 286 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 287 288 spin_lock_irqsave(&drvdata->spinlock, flags); 289 if (drvdata->reading) { 290 spin_unlock_irqrestore(&drvdata->spinlock, flags); 291 return; 292 } 293 294 tmc_etf_disable_hw(drvdata); 295 drvdata->mode = CS_MODE_DISABLED; 296 spin_unlock_irqrestore(&drvdata->spinlock, flags); 297 298 dev_info(drvdata->dev, "TMC-ETF disabled\n"); 299 } 300 301 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu, 302 void **pages, int nr_pages, bool overwrite) 303 { 304 int node; 305 struct cs_buffers *buf; 306 307 if (cpu == -1) 308 cpu = smp_processor_id(); 309 node = cpu_to_node(cpu); 310 311 /* Allocate memory structure for interaction with Perf */ 312 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node); 313 if (!buf) 314 return NULL; 315 316 buf->snapshot = overwrite; 317 buf->nr_pages = nr_pages; 318 buf->data_pages = pages; 319 320 return buf; 321 } 322 323 static void tmc_free_etf_buffer(void *config) 324 { 325 struct cs_buffers *buf = config; 326 327 kfree(buf); 328 } 329 330 static int tmc_set_etf_buffer(struct coresight_device *csdev, 331 struct perf_output_handle *handle, 332 void *sink_config) 333 { 334 int ret = 0; 335 unsigned long head; 336 struct cs_buffers *buf = sink_config; 337 338 /* wrap head around to the amount of space we have */ 339 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); 340 341 /* find the page to write to */ 342 buf->cur = head / PAGE_SIZE; 343 344 /* and offset within that page */ 345 buf->offset = head % PAGE_SIZE; 346 347 local_set(&buf->data_size, 0); 348 349 return ret; 350 } 351 352 static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev, 353 struct perf_output_handle *handle, 354 void *sink_config) 355 { 356 long size = 0; 357 struct cs_buffers *buf = sink_config; 358 359 if (buf) { 360 /* 361 * In snapshot mode ->data_size holds the new address of the 362 * ring buffer's head. The size itself is the whole address 363 * range since we want the latest information. 364 */ 365 if (buf->snapshot) 366 handle->head = local_xchg(&buf->data_size, 367 buf->nr_pages << PAGE_SHIFT); 368 /* 369 * Tell the tracer PMU how much we got in this run and if 370 * something went wrong along the way. Nobody else can use 371 * this cs_buffers instance until we are done. As such 372 * resetting parameters here and squaring off with the ring 373 * buffer API in the tracer PMU is fine. 374 */ 375 size = local_xchg(&buf->data_size, 0); 376 } 377 378 return size; 379 } 380 381 static void tmc_update_etf_buffer(struct coresight_device *csdev, 382 struct perf_output_handle *handle, 383 void *sink_config) 384 { 385 bool lost = false; 386 int i, cur; 387 const u32 *barrier; 388 u32 *buf_ptr; 389 u64 read_ptr, write_ptr; 390 u32 status, to_read; 391 unsigned long offset; 392 struct cs_buffers *buf = sink_config; 393 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 394 395 if (!buf) 396 return; 397 398 /* This shouldn't happen */ 399 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF)) 400 return; 401 402 CS_UNLOCK(drvdata->base); 403 404 tmc_flush_and_stop(drvdata); 405 406 read_ptr = tmc_read_rrp(drvdata); 407 write_ptr = tmc_read_rwp(drvdata); 408 409 /* 410 * Get a hold of the status register and see if a wrap around 411 * has occurred. If so adjust things accordingly. 412 */ 413 status = readl_relaxed(drvdata->base + TMC_STS); 414 if (status & TMC_STS_FULL) { 415 lost = true; 416 to_read = drvdata->size; 417 } else { 418 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size); 419 } 420 421 /* 422 * The TMC RAM buffer may be bigger than the space available in the 423 * perf ring buffer (handle->size). If so advance the RRP so that we 424 * get the latest trace data. 425 */ 426 if (to_read > handle->size) { 427 u32 mask = 0; 428 429 /* 430 * The value written to RRP must be byte-address aligned to 431 * the width of the trace memory databus _and_ to a frame 432 * boundary (16 byte), whichever is the biggest. For example, 433 * for 32-bit, 64-bit and 128-bit wide trace memory, the four 434 * LSBs must be 0s. For 256-bit wide trace memory, the five 435 * LSBs must be 0s. 436 */ 437 switch (drvdata->memwidth) { 438 case TMC_MEM_INTF_WIDTH_32BITS: 439 case TMC_MEM_INTF_WIDTH_64BITS: 440 case TMC_MEM_INTF_WIDTH_128BITS: 441 mask = GENMASK(31, 5); 442 break; 443 case TMC_MEM_INTF_WIDTH_256BITS: 444 mask = GENMASK(31, 6); 445 break; 446 } 447 448 /* 449 * Make sure the new size is aligned in accordance with the 450 * requirement explained above. 451 */ 452 to_read = handle->size & mask; 453 /* Move the RAM read pointer up */ 454 read_ptr = (write_ptr + drvdata->size) - to_read; 455 /* Make sure we are still within our limits */ 456 if (read_ptr > (drvdata->size - 1)) 457 read_ptr -= drvdata->size; 458 /* Tell the HW */ 459 tmc_write_rrp(drvdata, read_ptr); 460 lost = true; 461 } 462 463 if (lost) 464 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); 465 466 cur = buf->cur; 467 offset = buf->offset; 468 barrier = barrier_pkt; 469 470 /* for every byte to read */ 471 for (i = 0; i < to_read; i += 4) { 472 buf_ptr = buf->data_pages[cur] + offset; 473 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD); 474 475 if (lost && *barrier) { 476 *buf_ptr = *barrier; 477 barrier++; 478 } 479 480 offset += 4; 481 if (offset >= PAGE_SIZE) { 482 offset = 0; 483 cur++; 484 /* wrap around at the end of the buffer */ 485 cur &= buf->nr_pages - 1; 486 } 487 } 488 489 /* 490 * In snapshot mode all we have to do is communicate to 491 * perf_aux_output_end() the address of the current head. In full 492 * trace mode the same function expects a size to move rb->aux_head 493 * forward. 494 */ 495 if (buf->snapshot) 496 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset); 497 else 498 local_add(to_read, &buf->data_size); 499 500 CS_LOCK(drvdata->base); 501 } 502 503 static const struct coresight_ops_sink tmc_etf_sink_ops = { 504 .enable = tmc_enable_etf_sink, 505 .disable = tmc_disable_etf_sink, 506 .alloc_buffer = tmc_alloc_etf_buffer, 507 .free_buffer = tmc_free_etf_buffer, 508 .set_buffer = tmc_set_etf_buffer, 509 .reset_buffer = tmc_reset_etf_buffer, 510 .update_buffer = tmc_update_etf_buffer, 511 }; 512 513 static const struct coresight_ops_link tmc_etf_link_ops = { 514 .enable = tmc_enable_etf_link, 515 .disable = tmc_disable_etf_link, 516 }; 517 518 const struct coresight_ops tmc_etb_cs_ops = { 519 .sink_ops = &tmc_etf_sink_ops, 520 }; 521 522 const struct coresight_ops tmc_etf_cs_ops = { 523 .sink_ops = &tmc_etf_sink_ops, 524 .link_ops = &tmc_etf_link_ops, 525 }; 526 527 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) 528 { 529 enum tmc_mode mode; 530 int ret = 0; 531 unsigned long flags; 532 533 /* config types are set a boot time and never change */ 534 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && 535 drvdata->config_type != TMC_CONFIG_TYPE_ETF)) 536 return -EINVAL; 537 538 spin_lock_irqsave(&drvdata->spinlock, flags); 539 540 if (drvdata->reading) { 541 ret = -EBUSY; 542 goto out; 543 } 544 545 /* There is no point in reading a TMC in HW FIFO mode */ 546 mode = readl_relaxed(drvdata->base + TMC_MODE); 547 if (mode != TMC_MODE_CIRCULAR_BUFFER) { 548 ret = -EINVAL; 549 goto out; 550 } 551 552 /* Don't interfere if operated from Perf */ 553 if (drvdata->mode == CS_MODE_PERF) { 554 ret = -EINVAL; 555 goto out; 556 } 557 558 /* If drvdata::buf is NULL the trace data has been read already */ 559 if (drvdata->buf == NULL) { 560 ret = -EINVAL; 561 goto out; 562 } 563 564 /* Disable the TMC if need be */ 565 if (drvdata->mode == CS_MODE_SYSFS) 566 tmc_etb_disable_hw(drvdata); 567 568 drvdata->reading = true; 569 out: 570 spin_unlock_irqrestore(&drvdata->spinlock, flags); 571 572 return ret; 573 } 574 575 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) 576 { 577 char *buf = NULL; 578 enum tmc_mode mode; 579 unsigned long flags; 580 581 /* config types are set a boot time and never change */ 582 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && 583 drvdata->config_type != TMC_CONFIG_TYPE_ETF)) 584 return -EINVAL; 585 586 spin_lock_irqsave(&drvdata->spinlock, flags); 587 588 /* There is no point in reading a TMC in HW FIFO mode */ 589 mode = readl_relaxed(drvdata->base + TMC_MODE); 590 if (mode != TMC_MODE_CIRCULAR_BUFFER) { 591 spin_unlock_irqrestore(&drvdata->spinlock, flags); 592 return -EINVAL; 593 } 594 595 /* Re-enable the TMC if need be */ 596 if (drvdata->mode == CS_MODE_SYSFS) { 597 /* 598 * The trace run will continue with the same allocated trace 599 * buffer. As such zero-out the buffer so that we don't end 600 * up with stale data. 601 * 602 * Since the tracer is still enabled drvdata::buf 603 * can't be NULL. 604 */ 605 memset(drvdata->buf, 0, drvdata->size); 606 tmc_etb_enable_hw(drvdata); 607 } else { 608 /* 609 * The ETB/ETF is not tracing and the buffer was just read. 610 * As such prepare to free the trace buffer. 611 */ 612 buf = drvdata->buf; 613 drvdata->buf = NULL; 614 } 615 616 drvdata->reading = false; 617 spin_unlock_irqrestore(&drvdata->spinlock, flags); 618 619 /* 620 * Free allocated memory outside of the spinlock. There is no need 621 * to assert the validity of 'buf' since calling kfree(NULL) is safe. 622 */ 623 kfree(buf); 624 625 return 0; 626 } 627