1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2016 Linaro Limited. All rights reserved. 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 5 */ 6 7 #include <linux/circ_buf.h> 8 #include <linux/coresight.h> 9 #include <linux/perf_event.h> 10 #include <linux/slab.h> 11 #include "coresight-priv.h" 12 #include "coresight-tmc.h" 13 14 static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata) 15 { 16 CS_UNLOCK(drvdata->base); 17 18 /* Wait for TMCSReady bit to be set */ 19 tmc_wait_for_tmcready(drvdata); 20 21 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); 22 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | 23 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | 24 TMC_FFCR_TRIGON_TRIGIN, 25 drvdata->base + TMC_FFCR); 26 27 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); 28 tmc_enable_hw(drvdata); 29 30 CS_LOCK(drvdata->base); 31 } 32 33 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata) 34 { 35 bool lost = false; 36 char *bufp; 37 const u32 *barrier; 38 u32 read_data, status; 39 int i; 40 41 /* 42 * Get a hold of the status register and see if a wrap around 43 * has occurred. 44 */ 45 status = readl_relaxed(drvdata->base + TMC_STS); 46 if (status & TMC_STS_FULL) 47 lost = true; 48 49 bufp = drvdata->buf; 50 drvdata->len = 0; 51 barrier = barrier_pkt; 52 while (1) { 53 for (i = 0; i < drvdata->memwidth; i++) { 54 read_data = readl_relaxed(drvdata->base + TMC_RRD); 55 if (read_data == 0xFFFFFFFF) 56 return; 57 58 if (lost && *barrier) { 59 read_data = *barrier; 60 barrier++; 61 } 62 63 memcpy(bufp, &read_data, 4); 64 bufp += 4; 65 drvdata->len += 4; 66 } 67 } 68 } 69 70 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata) 71 { 72 CS_UNLOCK(drvdata->base); 73 74 tmc_flush_and_stop(drvdata); 75 /* 76 * When operating in sysFS mode the content of the buffer needs to be 77 * read before the TMC is disabled. 78 */ 79 if (drvdata->mode == CS_MODE_SYSFS) 80 tmc_etb_dump_hw(drvdata); 81 tmc_disable_hw(drvdata); 82 83 CS_LOCK(drvdata->base); 84 } 85 86 static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata) 87 { 88 CS_UNLOCK(drvdata->base); 89 90 /* Wait for TMCSReady bit to be set */ 91 tmc_wait_for_tmcready(drvdata); 92 93 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE); 94 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI, 95 drvdata->base + TMC_FFCR); 96 writel_relaxed(0x0, drvdata->base + TMC_BUFWM); 97 tmc_enable_hw(drvdata); 98 99 CS_LOCK(drvdata->base); 100 } 101 102 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata) 103 { 104 CS_UNLOCK(drvdata->base); 105 106 tmc_flush_and_stop(drvdata); 107 tmc_disable_hw(drvdata); 108 109 CS_LOCK(drvdata->base); 110 } 111 112 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev) 113 { 114 int ret = 0; 115 bool used = false; 116 char *buf = NULL; 117 unsigned long flags; 118 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 119 120 /* 121 * If we don't have a buffer release the lock and allocate memory. 122 * Otherwise keep the lock and move along. 123 */ 124 spin_lock_irqsave(&drvdata->spinlock, flags); 125 if (!drvdata->buf) { 126 spin_unlock_irqrestore(&drvdata->spinlock, flags); 127 128 /* Allocating the memory here while outside of the spinlock */ 129 buf = kzalloc(drvdata->size, GFP_KERNEL); 130 if (!buf) 131 return -ENOMEM; 132 133 /* Let's try again */ 134 spin_lock_irqsave(&drvdata->spinlock, flags); 135 } 136 137 if (drvdata->reading) { 138 ret = -EBUSY; 139 goto out; 140 } 141 142 /* 143 * In sysFS mode we can have multiple writers per sink. Since this 144 * sink is already enabled no memory is needed and the HW need not be 145 * touched. 146 */ 147 if (drvdata->mode == CS_MODE_SYSFS) 148 goto out; 149 150 /* 151 * If drvdata::buf isn't NULL, memory was allocated for a previous 152 * trace run but wasn't read. If so simply zero-out the memory. 153 * Otherwise use the memory allocated above. 154 * 155 * The memory is freed when users read the buffer using the 156 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for 157 * details. 158 */ 159 if (drvdata->buf) { 160 memset(drvdata->buf, 0, drvdata->size); 161 } else { 162 used = true; 163 drvdata->buf = buf; 164 } 165 166 drvdata->mode = CS_MODE_SYSFS; 167 tmc_etb_enable_hw(drvdata); 168 out: 169 spin_unlock_irqrestore(&drvdata->spinlock, flags); 170 171 /* Free memory outside the spinlock if need be */ 172 if (!used) 173 kfree(buf); 174 175 return ret; 176 } 177 178 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev) 179 { 180 int ret = 0; 181 unsigned long flags; 182 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 183 184 spin_lock_irqsave(&drvdata->spinlock, flags); 185 if (drvdata->reading) { 186 ret = -EINVAL; 187 goto out; 188 } 189 190 /* 191 * In Perf mode there can be only one writer per sink. There 192 * is also no need to continue if the ETB/ETR is already operated 193 * from sysFS. 194 */ 195 if (drvdata->mode != CS_MODE_DISABLED) { 196 ret = -EINVAL; 197 goto out; 198 } 199 200 drvdata->mode = CS_MODE_PERF; 201 tmc_etb_enable_hw(drvdata); 202 out: 203 spin_unlock_irqrestore(&drvdata->spinlock, flags); 204 205 return ret; 206 } 207 208 static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode) 209 { 210 int ret; 211 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 212 213 switch (mode) { 214 case CS_MODE_SYSFS: 215 ret = tmc_enable_etf_sink_sysfs(csdev); 216 break; 217 case CS_MODE_PERF: 218 ret = tmc_enable_etf_sink_perf(csdev); 219 break; 220 /* We shouldn't be here */ 221 default: 222 ret = -EINVAL; 223 break; 224 } 225 226 if (ret) 227 return ret; 228 229 dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n"); 230 return 0; 231 } 232 233 static void tmc_disable_etf_sink(struct coresight_device *csdev) 234 { 235 unsigned long flags; 236 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 237 238 spin_lock_irqsave(&drvdata->spinlock, flags); 239 if (drvdata->reading) { 240 spin_unlock_irqrestore(&drvdata->spinlock, flags); 241 return; 242 } 243 244 /* Disable the TMC only if it needs to */ 245 if (drvdata->mode != CS_MODE_DISABLED) { 246 tmc_etb_disable_hw(drvdata); 247 drvdata->mode = CS_MODE_DISABLED; 248 } 249 250 spin_unlock_irqrestore(&drvdata->spinlock, flags); 251 252 dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n"); 253 } 254 255 static int tmc_enable_etf_link(struct coresight_device *csdev, 256 int inport, int outport) 257 { 258 unsigned long flags; 259 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 260 261 spin_lock_irqsave(&drvdata->spinlock, flags); 262 if (drvdata->reading) { 263 spin_unlock_irqrestore(&drvdata->spinlock, flags); 264 return -EBUSY; 265 } 266 267 tmc_etf_enable_hw(drvdata); 268 drvdata->mode = CS_MODE_SYSFS; 269 spin_unlock_irqrestore(&drvdata->spinlock, flags); 270 271 dev_info(drvdata->dev, "TMC-ETF enabled\n"); 272 return 0; 273 } 274 275 static void tmc_disable_etf_link(struct coresight_device *csdev, 276 int inport, int outport) 277 { 278 unsigned long flags; 279 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 280 281 spin_lock_irqsave(&drvdata->spinlock, flags); 282 if (drvdata->reading) { 283 spin_unlock_irqrestore(&drvdata->spinlock, flags); 284 return; 285 } 286 287 tmc_etf_disable_hw(drvdata); 288 drvdata->mode = CS_MODE_DISABLED; 289 spin_unlock_irqrestore(&drvdata->spinlock, flags); 290 291 dev_info(drvdata->dev, "TMC-ETF disabled\n"); 292 } 293 294 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu, 295 void **pages, int nr_pages, bool overwrite) 296 { 297 int node; 298 struct cs_buffers *buf; 299 300 if (cpu == -1) 301 cpu = smp_processor_id(); 302 node = cpu_to_node(cpu); 303 304 /* Allocate memory structure for interaction with Perf */ 305 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node); 306 if (!buf) 307 return NULL; 308 309 buf->snapshot = overwrite; 310 buf->nr_pages = nr_pages; 311 buf->data_pages = pages; 312 313 return buf; 314 } 315 316 static void tmc_free_etf_buffer(void *config) 317 { 318 struct cs_buffers *buf = config; 319 320 kfree(buf); 321 } 322 323 static int tmc_set_etf_buffer(struct coresight_device *csdev, 324 struct perf_output_handle *handle, 325 void *sink_config) 326 { 327 int ret = 0; 328 unsigned long head; 329 struct cs_buffers *buf = sink_config; 330 331 /* wrap head around to the amount of space we have */ 332 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); 333 334 /* find the page to write to */ 335 buf->cur = head / PAGE_SIZE; 336 337 /* and offset within that page */ 338 buf->offset = head % PAGE_SIZE; 339 340 local_set(&buf->data_size, 0); 341 342 return ret; 343 } 344 345 static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev, 346 struct perf_output_handle *handle, 347 void *sink_config) 348 { 349 long size = 0; 350 struct cs_buffers *buf = sink_config; 351 352 if (buf) { 353 /* 354 * In snapshot mode ->data_size holds the new address of the 355 * ring buffer's head. The size itself is the whole address 356 * range since we want the latest information. 357 */ 358 if (buf->snapshot) 359 handle->head = local_xchg(&buf->data_size, 360 buf->nr_pages << PAGE_SHIFT); 361 /* 362 * Tell the tracer PMU how much we got in this run and if 363 * something went wrong along the way. Nobody else can use 364 * this cs_buffers instance until we are done. As such 365 * resetting parameters here and squaring off with the ring 366 * buffer API in the tracer PMU is fine. 367 */ 368 size = local_xchg(&buf->data_size, 0); 369 } 370 371 return size; 372 } 373 374 static void tmc_update_etf_buffer(struct coresight_device *csdev, 375 struct perf_output_handle *handle, 376 void *sink_config) 377 { 378 bool lost = false; 379 int i, cur; 380 const u32 *barrier; 381 u32 *buf_ptr; 382 u64 read_ptr, write_ptr; 383 u32 status, to_read; 384 unsigned long offset; 385 struct cs_buffers *buf = sink_config; 386 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 387 388 if (!buf) 389 return; 390 391 /* This shouldn't happen */ 392 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF)) 393 return; 394 395 CS_UNLOCK(drvdata->base); 396 397 tmc_flush_and_stop(drvdata); 398 399 read_ptr = tmc_read_rrp(drvdata); 400 write_ptr = tmc_read_rwp(drvdata); 401 402 /* 403 * Get a hold of the status register and see if a wrap around 404 * has occurred. If so adjust things accordingly. 405 */ 406 status = readl_relaxed(drvdata->base + TMC_STS); 407 if (status & TMC_STS_FULL) { 408 lost = true; 409 to_read = drvdata->size; 410 } else { 411 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size); 412 } 413 414 /* 415 * The TMC RAM buffer may be bigger than the space available in the 416 * perf ring buffer (handle->size). If so advance the RRP so that we 417 * get the latest trace data. 418 */ 419 if (to_read > handle->size) { 420 u32 mask = 0; 421 422 /* 423 * The value written to RRP must be byte-address aligned to 424 * the width of the trace memory databus _and_ to a frame 425 * boundary (16 byte), whichever is the biggest. For example, 426 * for 32-bit, 64-bit and 128-bit wide trace memory, the four 427 * LSBs must be 0s. For 256-bit wide trace memory, the five 428 * LSBs must be 0s. 429 */ 430 switch (drvdata->memwidth) { 431 case TMC_MEM_INTF_WIDTH_32BITS: 432 case TMC_MEM_INTF_WIDTH_64BITS: 433 case TMC_MEM_INTF_WIDTH_128BITS: 434 mask = GENMASK(31, 5); 435 break; 436 case TMC_MEM_INTF_WIDTH_256BITS: 437 mask = GENMASK(31, 6); 438 break; 439 } 440 441 /* 442 * Make sure the new size is aligned in accordance with the 443 * requirement explained above. 444 */ 445 to_read = handle->size & mask; 446 /* Move the RAM read pointer up */ 447 read_ptr = (write_ptr + drvdata->size) - to_read; 448 /* Make sure we are still within our limits */ 449 if (read_ptr > (drvdata->size - 1)) 450 read_ptr -= drvdata->size; 451 /* Tell the HW */ 452 tmc_write_rrp(drvdata, read_ptr); 453 lost = true; 454 } 455 456 if (lost) 457 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); 458 459 cur = buf->cur; 460 offset = buf->offset; 461 barrier = barrier_pkt; 462 463 /* for every byte to read */ 464 for (i = 0; i < to_read; i += 4) { 465 buf_ptr = buf->data_pages[cur] + offset; 466 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD); 467 468 if (lost && *barrier) { 469 *buf_ptr = *barrier; 470 barrier++; 471 } 472 473 offset += 4; 474 if (offset >= PAGE_SIZE) { 475 offset = 0; 476 cur++; 477 /* wrap around at the end of the buffer */ 478 cur &= buf->nr_pages - 1; 479 } 480 } 481 482 /* 483 * In snapshot mode all we have to do is communicate to 484 * perf_aux_output_end() the address of the current head. In full 485 * trace mode the same function expects a size to move rb->aux_head 486 * forward. 487 */ 488 if (buf->snapshot) 489 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset); 490 else 491 local_add(to_read, &buf->data_size); 492 493 CS_LOCK(drvdata->base); 494 } 495 496 static const struct coresight_ops_sink tmc_etf_sink_ops = { 497 .enable = tmc_enable_etf_sink, 498 .disable = tmc_disable_etf_sink, 499 .alloc_buffer = tmc_alloc_etf_buffer, 500 .free_buffer = tmc_free_etf_buffer, 501 .set_buffer = tmc_set_etf_buffer, 502 .reset_buffer = tmc_reset_etf_buffer, 503 .update_buffer = tmc_update_etf_buffer, 504 }; 505 506 static const struct coresight_ops_link tmc_etf_link_ops = { 507 .enable = tmc_enable_etf_link, 508 .disable = tmc_disable_etf_link, 509 }; 510 511 const struct coresight_ops tmc_etb_cs_ops = { 512 .sink_ops = &tmc_etf_sink_ops, 513 }; 514 515 const struct coresight_ops tmc_etf_cs_ops = { 516 .sink_ops = &tmc_etf_sink_ops, 517 .link_ops = &tmc_etf_link_ops, 518 }; 519 520 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) 521 { 522 enum tmc_mode mode; 523 int ret = 0; 524 unsigned long flags; 525 526 /* config types are set a boot time and never change */ 527 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && 528 drvdata->config_type != TMC_CONFIG_TYPE_ETF)) 529 return -EINVAL; 530 531 spin_lock_irqsave(&drvdata->spinlock, flags); 532 533 if (drvdata->reading) { 534 ret = -EBUSY; 535 goto out; 536 } 537 538 /* There is no point in reading a TMC in HW FIFO mode */ 539 mode = readl_relaxed(drvdata->base + TMC_MODE); 540 if (mode != TMC_MODE_CIRCULAR_BUFFER) { 541 ret = -EINVAL; 542 goto out; 543 } 544 545 /* Don't interfere if operated from Perf */ 546 if (drvdata->mode == CS_MODE_PERF) { 547 ret = -EINVAL; 548 goto out; 549 } 550 551 /* If drvdata::buf is NULL the trace data has been read already */ 552 if (drvdata->buf == NULL) { 553 ret = -EINVAL; 554 goto out; 555 } 556 557 /* Disable the TMC if need be */ 558 if (drvdata->mode == CS_MODE_SYSFS) 559 tmc_etb_disable_hw(drvdata); 560 561 drvdata->reading = true; 562 out: 563 spin_unlock_irqrestore(&drvdata->spinlock, flags); 564 565 return ret; 566 } 567 568 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) 569 { 570 char *buf = NULL; 571 enum tmc_mode mode; 572 unsigned long flags; 573 574 /* config types are set a boot time and never change */ 575 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && 576 drvdata->config_type != TMC_CONFIG_TYPE_ETF)) 577 return -EINVAL; 578 579 spin_lock_irqsave(&drvdata->spinlock, flags); 580 581 /* There is no point in reading a TMC in HW FIFO mode */ 582 mode = readl_relaxed(drvdata->base + TMC_MODE); 583 if (mode != TMC_MODE_CIRCULAR_BUFFER) { 584 spin_unlock_irqrestore(&drvdata->spinlock, flags); 585 return -EINVAL; 586 } 587 588 /* Re-enable the TMC if need be */ 589 if (drvdata->mode == CS_MODE_SYSFS) { 590 /* 591 * The trace run will continue with the same allocated trace 592 * buffer. As such zero-out the buffer so that we don't end 593 * up with stale data. 594 * 595 * Since the tracer is still enabled drvdata::buf 596 * can't be NULL. 597 */ 598 memset(drvdata->buf, 0, drvdata->size); 599 tmc_etb_enable_hw(drvdata); 600 } else { 601 /* 602 * The ETB/ETF is not tracing and the buffer was just read. 603 * As such prepare to free the trace buffer. 604 */ 605 buf = drvdata->buf; 606 drvdata->buf = NULL; 607 } 608 609 drvdata->reading = false; 610 spin_unlock_irqrestore(&drvdata->spinlock, flags); 611 612 /* 613 * Free allocated memory outside of the spinlock. There is no need 614 * to assert the validity of 'buf' since calling kfree(NULL) is safe. 615 */ 616 kfree(buf); 617 618 return 0; 619 } 620