1 /* 2 * Copyright(C) 2016 Linaro Limited. All rights reserved. 3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include <linux/circ_buf.h> 19 #include <linux/coresight.h> 20 #include <linux/perf_event.h> 21 #include <linux/slab.h> 22 #include "coresight-priv.h" 23 #include "coresight-tmc.h" 24 25 static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata) 26 { 27 CS_UNLOCK(drvdata->base); 28 29 /* Wait for TMCSReady bit to be set */ 30 tmc_wait_for_tmcready(drvdata); 31 32 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); 33 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | 34 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | 35 TMC_FFCR_TRIGON_TRIGIN, 36 drvdata->base + TMC_FFCR); 37 38 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); 39 tmc_enable_hw(drvdata); 40 41 CS_LOCK(drvdata->base); 42 } 43 44 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata) 45 { 46 char *bufp; 47 u32 read_data; 48 int i; 49 50 bufp = drvdata->buf; 51 drvdata->len = 0; 52 while (1) { 53 for (i = 0; i < drvdata->memwidth; i++) { 54 read_data = readl_relaxed(drvdata->base + TMC_RRD); 55 if (read_data == 0xFFFFFFFF) 56 return; 57 memcpy(bufp, &read_data, 4); 58 bufp += 4; 59 drvdata->len += 4; 60 } 61 } 62 } 63 64 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata) 65 { 66 CS_UNLOCK(drvdata->base); 67 68 tmc_flush_and_stop(drvdata); 69 /* 70 * When operating in sysFS mode the content of the buffer needs to be 71 * read before the TMC is disabled. 72 */ 73 if (drvdata->mode == CS_MODE_SYSFS) 74 tmc_etb_dump_hw(drvdata); 75 tmc_disable_hw(drvdata); 76 77 CS_LOCK(drvdata->base); 78 } 79 80 static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata) 81 { 82 CS_UNLOCK(drvdata->base); 83 84 /* Wait for TMCSReady bit to be set */ 85 tmc_wait_for_tmcready(drvdata); 86 87 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE); 88 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI, 89 drvdata->base + TMC_FFCR); 90 writel_relaxed(0x0, drvdata->base + TMC_BUFWM); 91 tmc_enable_hw(drvdata); 92 93 CS_LOCK(drvdata->base); 94 } 95 96 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata) 97 { 98 CS_UNLOCK(drvdata->base); 99 100 tmc_flush_and_stop(drvdata); 101 tmc_disable_hw(drvdata); 102 103 CS_LOCK(drvdata->base); 104 } 105 106 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev) 107 { 108 int ret = 0; 109 bool used = false; 110 char *buf = NULL; 111 unsigned long flags; 112 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 113 114 /* 115 * If we don't have a buffer release the lock and allocate memory. 116 * Otherwise keep the lock and move along. 117 */ 118 spin_lock_irqsave(&drvdata->spinlock, flags); 119 if (!drvdata->buf) { 120 spin_unlock_irqrestore(&drvdata->spinlock, flags); 121 122 /* Allocating the memory here while outside of the spinlock */ 123 buf = kzalloc(drvdata->size, GFP_KERNEL); 124 if (!buf) 125 return -ENOMEM; 126 127 /* Let's try again */ 128 spin_lock_irqsave(&drvdata->spinlock, flags); 129 } 130 131 if (drvdata->reading) { 132 ret = -EBUSY; 133 goto out; 134 } 135 136 /* 137 * In sysFS mode we can have multiple writers per sink. Since this 138 * sink is already enabled no memory is needed and the HW need not be 139 * touched. 140 */ 141 if (drvdata->mode == CS_MODE_SYSFS) 142 goto out; 143 144 /* 145 * If drvdata::buf isn't NULL, memory was allocated for a previous 146 * trace run but wasn't read. If so simply zero-out the memory. 147 * Otherwise use the memory allocated above. 148 * 149 * The memory is freed when users read the buffer using the 150 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for 151 * details. 152 */ 153 if (drvdata->buf) { 154 memset(drvdata->buf, 0, drvdata->size); 155 } else { 156 used = true; 157 drvdata->buf = buf; 158 } 159 160 drvdata->mode = CS_MODE_SYSFS; 161 tmc_etb_enable_hw(drvdata); 162 out: 163 spin_unlock_irqrestore(&drvdata->spinlock, flags); 164 165 /* Free memory outside the spinlock if need be */ 166 if (!used) 167 kfree(buf); 168 169 return ret; 170 } 171 172 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev) 173 { 174 int ret = 0; 175 unsigned long flags; 176 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 177 178 spin_lock_irqsave(&drvdata->spinlock, flags); 179 if (drvdata->reading) { 180 ret = -EINVAL; 181 goto out; 182 } 183 184 /* 185 * In Perf mode there can be only one writer per sink. There 186 * is also no need to continue if the ETB/ETR is already operated 187 * from sysFS. 188 */ 189 if (drvdata->mode != CS_MODE_DISABLED) { 190 ret = -EINVAL; 191 goto out; 192 } 193 194 drvdata->mode = CS_MODE_PERF; 195 tmc_etb_enable_hw(drvdata); 196 out: 197 spin_unlock_irqrestore(&drvdata->spinlock, flags); 198 199 return ret; 200 } 201 202 static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode) 203 { 204 int ret; 205 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 206 207 switch (mode) { 208 case CS_MODE_SYSFS: 209 ret = tmc_enable_etf_sink_sysfs(csdev); 210 break; 211 case CS_MODE_PERF: 212 ret = tmc_enable_etf_sink_perf(csdev); 213 break; 214 /* We shouldn't be here */ 215 default: 216 ret = -EINVAL; 217 break; 218 } 219 220 if (ret) 221 return ret; 222 223 dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n"); 224 return 0; 225 } 226 227 static void tmc_disable_etf_sink(struct coresight_device *csdev) 228 { 229 unsigned long flags; 230 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 231 232 spin_lock_irqsave(&drvdata->spinlock, flags); 233 if (drvdata->reading) { 234 spin_unlock_irqrestore(&drvdata->spinlock, flags); 235 return; 236 } 237 238 /* Disable the TMC only if it needs to */ 239 if (drvdata->mode != CS_MODE_DISABLED) { 240 tmc_etb_disable_hw(drvdata); 241 drvdata->mode = CS_MODE_DISABLED; 242 } 243 244 spin_unlock_irqrestore(&drvdata->spinlock, flags); 245 246 dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n"); 247 } 248 249 static int tmc_enable_etf_link(struct coresight_device *csdev, 250 int inport, int outport) 251 { 252 unsigned long flags; 253 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 254 255 spin_lock_irqsave(&drvdata->spinlock, flags); 256 if (drvdata->reading) { 257 spin_unlock_irqrestore(&drvdata->spinlock, flags); 258 return -EBUSY; 259 } 260 261 tmc_etf_enable_hw(drvdata); 262 drvdata->mode = CS_MODE_SYSFS; 263 spin_unlock_irqrestore(&drvdata->spinlock, flags); 264 265 dev_info(drvdata->dev, "TMC-ETF enabled\n"); 266 return 0; 267 } 268 269 static void tmc_disable_etf_link(struct coresight_device *csdev, 270 int inport, int outport) 271 { 272 unsigned long flags; 273 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 274 275 spin_lock_irqsave(&drvdata->spinlock, flags); 276 if (drvdata->reading) { 277 spin_unlock_irqrestore(&drvdata->spinlock, flags); 278 return; 279 } 280 281 tmc_etf_disable_hw(drvdata); 282 drvdata->mode = CS_MODE_DISABLED; 283 spin_unlock_irqrestore(&drvdata->spinlock, flags); 284 285 dev_info(drvdata->dev, "TMC-ETF disabled\n"); 286 } 287 288 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu, 289 void **pages, int nr_pages, bool overwrite) 290 { 291 int node; 292 struct cs_buffers *buf; 293 294 if (cpu == -1) 295 cpu = smp_processor_id(); 296 node = cpu_to_node(cpu); 297 298 /* Allocate memory structure for interaction with Perf */ 299 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node); 300 if (!buf) 301 return NULL; 302 303 buf->snapshot = overwrite; 304 buf->nr_pages = nr_pages; 305 buf->data_pages = pages; 306 307 return buf; 308 } 309 310 static void tmc_free_etf_buffer(void *config) 311 { 312 struct cs_buffers *buf = config; 313 314 kfree(buf); 315 } 316 317 static int tmc_set_etf_buffer(struct coresight_device *csdev, 318 struct perf_output_handle *handle, 319 void *sink_config) 320 { 321 int ret = 0; 322 unsigned long head; 323 struct cs_buffers *buf = sink_config; 324 325 /* wrap head around to the amount of space we have */ 326 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); 327 328 /* find the page to write to */ 329 buf->cur = head / PAGE_SIZE; 330 331 /* and offset within that page */ 332 buf->offset = head % PAGE_SIZE; 333 334 local_set(&buf->data_size, 0); 335 336 return ret; 337 } 338 339 static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev, 340 struct perf_output_handle *handle, 341 void *sink_config) 342 { 343 long size = 0; 344 struct cs_buffers *buf = sink_config; 345 346 if (buf) { 347 /* 348 * In snapshot mode ->data_size holds the new address of the 349 * ring buffer's head. The size itself is the whole address 350 * range since we want the latest information. 351 */ 352 if (buf->snapshot) 353 handle->head = local_xchg(&buf->data_size, 354 buf->nr_pages << PAGE_SHIFT); 355 /* 356 * Tell the tracer PMU how much we got in this run and if 357 * something went wrong along the way. Nobody else can use 358 * this cs_buffers instance until we are done. As such 359 * resetting parameters here and squaring off with the ring 360 * buffer API in the tracer PMU is fine. 361 */ 362 size = local_xchg(&buf->data_size, 0); 363 } 364 365 return size; 366 } 367 368 static void tmc_update_etf_buffer(struct coresight_device *csdev, 369 struct perf_output_handle *handle, 370 void *sink_config) 371 { 372 int i, cur; 373 u32 *buf_ptr; 374 u32 read_ptr, write_ptr; 375 u32 status, to_read; 376 unsigned long offset; 377 struct cs_buffers *buf = sink_config; 378 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 379 380 if (!buf) 381 return; 382 383 /* This shouldn't happen */ 384 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF)) 385 return; 386 387 CS_UNLOCK(drvdata->base); 388 389 tmc_flush_and_stop(drvdata); 390 391 read_ptr = readl_relaxed(drvdata->base + TMC_RRP); 392 write_ptr = readl_relaxed(drvdata->base + TMC_RWP); 393 394 /* 395 * Get a hold of the status register and see if a wrap around 396 * has occurred. If so adjust things accordingly. 397 */ 398 status = readl_relaxed(drvdata->base + TMC_STS); 399 if (status & TMC_STS_FULL) { 400 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); 401 to_read = drvdata->size; 402 } else { 403 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size); 404 } 405 406 /* 407 * The TMC RAM buffer may be bigger than the space available in the 408 * perf ring buffer (handle->size). If so advance the RRP so that we 409 * get the latest trace data. 410 */ 411 if (to_read > handle->size) { 412 u32 mask = 0; 413 414 /* 415 * The value written to RRP must be byte-address aligned to 416 * the width of the trace memory databus _and_ to a frame 417 * boundary (16 byte), whichever is the biggest. For example, 418 * for 32-bit, 64-bit and 128-bit wide trace memory, the four 419 * LSBs must be 0s. For 256-bit wide trace memory, the five 420 * LSBs must be 0s. 421 */ 422 switch (drvdata->memwidth) { 423 case TMC_MEM_INTF_WIDTH_32BITS: 424 case TMC_MEM_INTF_WIDTH_64BITS: 425 case TMC_MEM_INTF_WIDTH_128BITS: 426 mask = GENMASK(31, 5); 427 break; 428 case TMC_MEM_INTF_WIDTH_256BITS: 429 mask = GENMASK(31, 6); 430 break; 431 } 432 433 /* 434 * Make sure the new size is aligned in accordance with the 435 * requirement explained above. 436 */ 437 to_read = handle->size & mask; 438 /* Move the RAM read pointer up */ 439 read_ptr = (write_ptr + drvdata->size) - to_read; 440 /* Make sure we are still within our limits */ 441 if (read_ptr > (drvdata->size - 1)) 442 read_ptr -= drvdata->size; 443 /* Tell the HW */ 444 writel_relaxed(read_ptr, drvdata->base + TMC_RRP); 445 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); 446 } 447 448 cur = buf->cur; 449 offset = buf->offset; 450 451 /* for every byte to read */ 452 for (i = 0; i < to_read; i += 4) { 453 buf_ptr = buf->data_pages[cur] + offset; 454 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD); 455 456 offset += 4; 457 if (offset >= PAGE_SIZE) { 458 offset = 0; 459 cur++; 460 /* wrap around at the end of the buffer */ 461 cur &= buf->nr_pages - 1; 462 } 463 } 464 465 /* 466 * In snapshot mode all we have to do is communicate to 467 * perf_aux_output_end() the address of the current head. In full 468 * trace mode the same function expects a size to move rb->aux_head 469 * forward. 470 */ 471 if (buf->snapshot) 472 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset); 473 else 474 local_add(to_read, &buf->data_size); 475 476 CS_LOCK(drvdata->base); 477 } 478 479 static const struct coresight_ops_sink tmc_etf_sink_ops = { 480 .enable = tmc_enable_etf_sink, 481 .disable = tmc_disable_etf_sink, 482 .alloc_buffer = tmc_alloc_etf_buffer, 483 .free_buffer = tmc_free_etf_buffer, 484 .set_buffer = tmc_set_etf_buffer, 485 .reset_buffer = tmc_reset_etf_buffer, 486 .update_buffer = tmc_update_etf_buffer, 487 }; 488 489 static const struct coresight_ops_link tmc_etf_link_ops = { 490 .enable = tmc_enable_etf_link, 491 .disable = tmc_disable_etf_link, 492 }; 493 494 const struct coresight_ops tmc_etb_cs_ops = { 495 .sink_ops = &tmc_etf_sink_ops, 496 }; 497 498 const struct coresight_ops tmc_etf_cs_ops = { 499 .sink_ops = &tmc_etf_sink_ops, 500 .link_ops = &tmc_etf_link_ops, 501 }; 502 503 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) 504 { 505 enum tmc_mode mode; 506 int ret = 0; 507 unsigned long flags; 508 509 /* config types are set a boot time and never change */ 510 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && 511 drvdata->config_type != TMC_CONFIG_TYPE_ETF)) 512 return -EINVAL; 513 514 spin_lock_irqsave(&drvdata->spinlock, flags); 515 516 if (drvdata->reading) { 517 ret = -EBUSY; 518 goto out; 519 } 520 521 /* There is no point in reading a TMC in HW FIFO mode */ 522 mode = readl_relaxed(drvdata->base + TMC_MODE); 523 if (mode != TMC_MODE_CIRCULAR_BUFFER) { 524 ret = -EINVAL; 525 goto out; 526 } 527 528 /* Don't interfere if operated from Perf */ 529 if (drvdata->mode == CS_MODE_PERF) { 530 ret = -EINVAL; 531 goto out; 532 } 533 534 /* If drvdata::buf is NULL the trace data has been read already */ 535 if (drvdata->buf == NULL) { 536 ret = -EINVAL; 537 goto out; 538 } 539 540 /* Disable the TMC if need be */ 541 if (drvdata->mode == CS_MODE_SYSFS) 542 tmc_etb_disable_hw(drvdata); 543 544 drvdata->reading = true; 545 out: 546 spin_unlock_irqrestore(&drvdata->spinlock, flags); 547 548 return ret; 549 } 550 551 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) 552 { 553 char *buf = NULL; 554 enum tmc_mode mode; 555 unsigned long flags; 556 557 /* config types are set a boot time and never change */ 558 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && 559 drvdata->config_type != TMC_CONFIG_TYPE_ETF)) 560 return -EINVAL; 561 562 spin_lock_irqsave(&drvdata->spinlock, flags); 563 564 /* There is no point in reading a TMC in HW FIFO mode */ 565 mode = readl_relaxed(drvdata->base + TMC_MODE); 566 if (mode != TMC_MODE_CIRCULAR_BUFFER) { 567 spin_unlock_irqrestore(&drvdata->spinlock, flags); 568 return -EINVAL; 569 } 570 571 /* Re-enable the TMC if need be */ 572 if (drvdata->mode == CS_MODE_SYSFS) { 573 /* 574 * The trace run will continue with the same allocated trace 575 * buffer. As such zero-out the buffer so that we don't end 576 * up with stale data. 577 * 578 * Since the tracer is still enabled drvdata::buf 579 * can't be NULL. 580 */ 581 memset(drvdata->buf, 0, drvdata->size); 582 tmc_etb_enable_hw(drvdata); 583 } else { 584 /* 585 * The ETB/ETF is not tracing and the buffer was just read. 586 * As such prepare to free the trace buffer. 587 */ 588 buf = drvdata->buf; 589 drvdata->buf = NULL; 590 } 591 592 drvdata->reading = false; 593 spin_unlock_irqrestore(&drvdata->spinlock, flags); 594 595 /* 596 * Free allocated memory outside of the spinlock. There is no need 597 * to assert the validity of 'buf' since calling kfree(NULL) is safe. 598 */ 599 kfree(buf); 600 601 return 0; 602 } 603