1 /* 2 * Copyright(C) 2016 Linaro Limited. All rights reserved. 3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include <linux/circ_buf.h> 19 #include <linux/coresight.h> 20 #include <linux/perf_event.h> 21 #include <linux/slab.h> 22 #include "coresight-priv.h" 23 #include "coresight-tmc.h" 24 25 void tmc_etb_enable_hw(struct tmc_drvdata *drvdata) 26 { 27 CS_UNLOCK(drvdata->base); 28 29 /* Wait for TMCSReady bit to be set */ 30 tmc_wait_for_tmcready(drvdata); 31 32 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); 33 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | 34 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | 35 TMC_FFCR_TRIGON_TRIGIN, 36 drvdata->base + TMC_FFCR); 37 38 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); 39 tmc_enable_hw(drvdata); 40 41 CS_LOCK(drvdata->base); 42 } 43 44 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata) 45 { 46 char *bufp; 47 u32 read_data; 48 int i; 49 50 bufp = drvdata->buf; 51 while (1) { 52 for (i = 0; i < drvdata->memwidth; i++) { 53 read_data = readl_relaxed(drvdata->base + TMC_RRD); 54 if (read_data == 0xFFFFFFFF) 55 return; 56 memcpy(bufp, &read_data, 4); 57 bufp += 4; 58 } 59 } 60 } 61 62 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata) 63 { 64 CS_UNLOCK(drvdata->base); 65 66 tmc_flush_and_stop(drvdata); 67 /* 68 * When operating in sysFS mode the content of the buffer needs to be 69 * read before the TMC is disabled. 70 */ 71 if (local_read(&drvdata->mode) == CS_MODE_SYSFS) 72 tmc_etb_dump_hw(drvdata); 73 tmc_disable_hw(drvdata); 74 75 CS_LOCK(drvdata->base); 76 } 77 78 static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata) 79 { 80 CS_UNLOCK(drvdata->base); 81 82 /* Wait for TMCSReady bit to be set */ 83 tmc_wait_for_tmcready(drvdata); 84 85 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE); 86 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI, 87 drvdata->base + TMC_FFCR); 88 writel_relaxed(0x0, drvdata->base + TMC_BUFWM); 89 tmc_enable_hw(drvdata); 90 91 CS_LOCK(drvdata->base); 92 } 93 94 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata) 95 { 96 CS_UNLOCK(drvdata->base); 97 98 tmc_flush_and_stop(drvdata); 99 tmc_disable_hw(drvdata); 100 101 CS_LOCK(drvdata->base); 102 } 103 104 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode) 105 { 106 int ret = 0; 107 bool used = false; 108 char *buf = NULL; 109 long val; 110 unsigned long flags; 111 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 112 113 /* This shouldn't be happening */ 114 if (WARN_ON(mode != CS_MODE_SYSFS)) 115 return -EINVAL; 116 117 /* 118 * If we don't have a buffer release the lock and allocate memory. 119 * Otherwise keep the lock and move along. 120 */ 121 spin_lock_irqsave(&drvdata->spinlock, flags); 122 if (!drvdata->buf) { 123 spin_unlock_irqrestore(&drvdata->spinlock, flags); 124 125 /* Allocating the memory here while outside of the spinlock */ 126 buf = kzalloc(drvdata->size, GFP_KERNEL); 127 if (!buf) 128 return -ENOMEM; 129 130 /* Let's try again */ 131 spin_lock_irqsave(&drvdata->spinlock, flags); 132 } 133 134 if (drvdata->reading) { 135 ret = -EBUSY; 136 goto out; 137 } 138 139 val = local_xchg(&drvdata->mode, mode); 140 /* 141 * In sysFS mode we can have multiple writers per sink. Since this 142 * sink is already enabled no memory is needed and the HW need not be 143 * touched. 144 */ 145 if (val == CS_MODE_SYSFS) 146 goto out; 147 148 /* 149 * If drvdata::buf isn't NULL, memory was allocated for a previous 150 * trace run but wasn't read. If so simply zero-out the memory. 151 * Otherwise use the memory allocated above. 152 * 153 * The memory is freed when users read the buffer using the 154 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for 155 * details. 156 */ 157 if (drvdata->buf) { 158 memset(drvdata->buf, 0, drvdata->size); 159 } else { 160 used = true; 161 drvdata->buf = buf; 162 } 163 164 tmc_etb_enable_hw(drvdata); 165 out: 166 spin_unlock_irqrestore(&drvdata->spinlock, flags); 167 168 /* Free memory outside the spinlock if need be */ 169 if (!used && buf) 170 kfree(buf); 171 172 if (!ret) 173 dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n"); 174 175 return ret; 176 } 177 178 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, u32 mode) 179 { 180 int ret = 0; 181 long val; 182 unsigned long flags; 183 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 184 185 /* This shouldn't be happening */ 186 if (WARN_ON(mode != CS_MODE_PERF)) 187 return -EINVAL; 188 189 spin_lock_irqsave(&drvdata->spinlock, flags); 190 if (drvdata->reading) { 191 ret = -EINVAL; 192 goto out; 193 } 194 195 val = local_xchg(&drvdata->mode, mode); 196 /* 197 * In Perf mode there can be only one writer per sink. There 198 * is also no need to continue if the ETB/ETR is already operated 199 * from sysFS. 200 */ 201 if (val != CS_MODE_DISABLED) { 202 ret = -EINVAL; 203 goto out; 204 } 205 206 tmc_etb_enable_hw(drvdata); 207 out: 208 spin_unlock_irqrestore(&drvdata->spinlock, flags); 209 210 return ret; 211 } 212 213 static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode) 214 { 215 switch (mode) { 216 case CS_MODE_SYSFS: 217 return tmc_enable_etf_sink_sysfs(csdev, mode); 218 case CS_MODE_PERF: 219 return tmc_enable_etf_sink_perf(csdev, mode); 220 } 221 222 /* We shouldn't be here */ 223 return -EINVAL; 224 } 225 226 static void tmc_disable_etf_sink(struct coresight_device *csdev) 227 { 228 long val; 229 unsigned long flags; 230 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 231 232 spin_lock_irqsave(&drvdata->spinlock, flags); 233 if (drvdata->reading) { 234 spin_unlock_irqrestore(&drvdata->spinlock, flags); 235 return; 236 } 237 238 val = local_xchg(&drvdata->mode, CS_MODE_DISABLED); 239 /* Disable the TMC only if it needs to */ 240 if (val != CS_MODE_DISABLED) 241 tmc_etb_disable_hw(drvdata); 242 243 spin_unlock_irqrestore(&drvdata->spinlock, flags); 244 245 dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n"); 246 } 247 248 static int tmc_enable_etf_link(struct coresight_device *csdev, 249 int inport, int outport) 250 { 251 unsigned long flags; 252 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 253 254 spin_lock_irqsave(&drvdata->spinlock, flags); 255 if (drvdata->reading) { 256 spin_unlock_irqrestore(&drvdata->spinlock, flags); 257 return -EBUSY; 258 } 259 260 tmc_etf_enable_hw(drvdata); 261 local_set(&drvdata->mode, CS_MODE_SYSFS); 262 spin_unlock_irqrestore(&drvdata->spinlock, flags); 263 264 dev_info(drvdata->dev, "TMC-ETF enabled\n"); 265 return 0; 266 } 267 268 static void tmc_disable_etf_link(struct coresight_device *csdev, 269 int inport, int outport) 270 { 271 unsigned long flags; 272 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 273 274 spin_lock_irqsave(&drvdata->spinlock, flags); 275 if (drvdata->reading) { 276 spin_unlock_irqrestore(&drvdata->spinlock, flags); 277 return; 278 } 279 280 tmc_etf_disable_hw(drvdata); 281 local_set(&drvdata->mode, CS_MODE_DISABLED); 282 spin_unlock_irqrestore(&drvdata->spinlock, flags); 283 284 dev_info(drvdata->dev, "TMC disabled\n"); 285 } 286 287 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu, 288 void **pages, int nr_pages, bool overwrite) 289 { 290 int node; 291 struct cs_buffers *buf; 292 293 if (cpu == -1) 294 cpu = smp_processor_id(); 295 node = cpu_to_node(cpu); 296 297 /* Allocate memory structure for interaction with Perf */ 298 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node); 299 if (!buf) 300 return NULL; 301 302 buf->snapshot = overwrite; 303 buf->nr_pages = nr_pages; 304 buf->data_pages = pages; 305 306 return buf; 307 } 308 309 static void tmc_free_etf_buffer(void *config) 310 { 311 struct cs_buffers *buf = config; 312 313 kfree(buf); 314 } 315 316 static int tmc_set_etf_buffer(struct coresight_device *csdev, 317 struct perf_output_handle *handle, 318 void *sink_config) 319 { 320 int ret = 0; 321 unsigned long head; 322 struct cs_buffers *buf = sink_config; 323 324 /* wrap head around to the amount of space we have */ 325 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); 326 327 /* find the page to write to */ 328 buf->cur = head / PAGE_SIZE; 329 330 /* and offset within that page */ 331 buf->offset = head % PAGE_SIZE; 332 333 local_set(&buf->data_size, 0); 334 335 return ret; 336 } 337 338 static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev, 339 struct perf_output_handle *handle, 340 void *sink_config, bool *lost) 341 { 342 long size = 0; 343 struct cs_buffers *buf = sink_config; 344 345 if (buf) { 346 /* 347 * In snapshot mode ->data_size holds the new address of the 348 * ring buffer's head. The size itself is the whole address 349 * range since we want the latest information. 350 */ 351 if (buf->snapshot) 352 handle->head = local_xchg(&buf->data_size, 353 buf->nr_pages << PAGE_SHIFT); 354 /* 355 * Tell the tracer PMU how much we got in this run and if 356 * something went wrong along the way. Nobody else can use 357 * this cs_buffers instance until we are done. As such 358 * resetting parameters here and squaring off with the ring 359 * buffer API in the tracer PMU is fine. 360 */ 361 *lost = !!local_xchg(&buf->lost, 0); 362 size = local_xchg(&buf->data_size, 0); 363 } 364 365 return size; 366 } 367 368 static void tmc_update_etf_buffer(struct coresight_device *csdev, 369 struct perf_output_handle *handle, 370 void *sink_config) 371 { 372 int i, cur; 373 u32 *buf_ptr; 374 u32 read_ptr, write_ptr; 375 u32 status, to_read; 376 unsigned long offset; 377 struct cs_buffers *buf = sink_config; 378 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 379 380 if (!buf) 381 return; 382 383 /* This shouldn't happen */ 384 if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF)) 385 return; 386 387 CS_UNLOCK(drvdata->base); 388 389 tmc_flush_and_stop(drvdata); 390 391 read_ptr = readl_relaxed(drvdata->base + TMC_RRP); 392 write_ptr = readl_relaxed(drvdata->base + TMC_RWP); 393 394 /* 395 * Get a hold of the status register and see if a wrap around 396 * has occurred. If so adjust things accordingly. 397 */ 398 status = readl_relaxed(drvdata->base + TMC_STS); 399 if (status & TMC_STS_FULL) { 400 local_inc(&buf->lost); 401 to_read = drvdata->size; 402 } else { 403 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size); 404 } 405 406 /* 407 * The TMC RAM buffer may be bigger than the space available in the 408 * perf ring buffer (handle->size). If so advance the RRP so that we 409 * get the latest trace data. 410 */ 411 if (to_read > handle->size) { 412 u32 mask = 0; 413 414 /* 415 * The value written to RRP must be byte-address aligned to 416 * the width of the trace memory databus _and_ to a frame 417 * boundary (16 byte), whichever is the biggest. For example, 418 * for 32-bit, 64-bit and 128-bit wide trace memory, the four 419 * LSBs must be 0s. For 256-bit wide trace memory, the five 420 * LSBs must be 0s. 421 */ 422 switch (drvdata->memwidth) { 423 case TMC_MEM_INTF_WIDTH_32BITS: 424 case TMC_MEM_INTF_WIDTH_64BITS: 425 case TMC_MEM_INTF_WIDTH_128BITS: 426 mask = GENMASK(31, 5); 427 break; 428 case TMC_MEM_INTF_WIDTH_256BITS: 429 mask = GENMASK(31, 6); 430 break; 431 } 432 433 /* 434 * Make sure the new size is aligned in accordance with the 435 * requirement explained above. 436 */ 437 to_read = handle->size & mask; 438 /* Move the RAM read pointer up */ 439 read_ptr = (write_ptr + drvdata->size) - to_read; 440 /* Make sure we are still within our limits */ 441 if (read_ptr > (drvdata->size - 1)) 442 read_ptr -= drvdata->size; 443 /* Tell the HW */ 444 writel_relaxed(read_ptr, drvdata->base + TMC_RRP); 445 local_inc(&buf->lost); 446 } 447 448 cur = buf->cur; 449 offset = buf->offset; 450 451 /* for every byte to read */ 452 for (i = 0; i < to_read; i += 4) { 453 buf_ptr = buf->data_pages[cur] + offset; 454 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD); 455 456 offset += 4; 457 if (offset >= PAGE_SIZE) { 458 offset = 0; 459 cur++; 460 /* wrap around at the end of the buffer */ 461 cur &= buf->nr_pages - 1; 462 } 463 } 464 465 /* 466 * In snapshot mode all we have to do is communicate to 467 * perf_aux_output_end() the address of the current head. In full 468 * trace mode the same function expects a size to move rb->aux_head 469 * forward. 470 */ 471 if (buf->snapshot) 472 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset); 473 else 474 local_add(to_read, &buf->data_size); 475 476 CS_LOCK(drvdata->base); 477 } 478 479 static const struct coresight_ops_sink tmc_etf_sink_ops = { 480 .enable = tmc_enable_etf_sink, 481 .disable = tmc_disable_etf_sink, 482 .alloc_buffer = tmc_alloc_etf_buffer, 483 .free_buffer = tmc_free_etf_buffer, 484 .set_buffer = tmc_set_etf_buffer, 485 .reset_buffer = tmc_reset_etf_buffer, 486 .update_buffer = tmc_update_etf_buffer, 487 }; 488 489 static const struct coresight_ops_link tmc_etf_link_ops = { 490 .enable = tmc_enable_etf_link, 491 .disable = tmc_disable_etf_link, 492 }; 493 494 const struct coresight_ops tmc_etb_cs_ops = { 495 .sink_ops = &tmc_etf_sink_ops, 496 }; 497 498 const struct coresight_ops tmc_etf_cs_ops = { 499 .sink_ops = &tmc_etf_sink_ops, 500 .link_ops = &tmc_etf_link_ops, 501 }; 502 503 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) 504 { 505 long val; 506 enum tmc_mode mode; 507 int ret = 0; 508 unsigned long flags; 509 510 /* config types are set a boot time and never change */ 511 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && 512 drvdata->config_type != TMC_CONFIG_TYPE_ETF)) 513 return -EINVAL; 514 515 spin_lock_irqsave(&drvdata->spinlock, flags); 516 517 if (drvdata->reading) { 518 ret = -EBUSY; 519 goto out; 520 } 521 522 /* There is no point in reading a TMC in HW FIFO mode */ 523 mode = readl_relaxed(drvdata->base + TMC_MODE); 524 if (mode != TMC_MODE_CIRCULAR_BUFFER) { 525 ret = -EINVAL; 526 goto out; 527 } 528 529 val = local_read(&drvdata->mode); 530 /* Don't interfere if operated from Perf */ 531 if (val == CS_MODE_PERF) { 532 ret = -EINVAL; 533 goto out; 534 } 535 536 /* If drvdata::buf is NULL the trace data has been read already */ 537 if (drvdata->buf == NULL) { 538 ret = -EINVAL; 539 goto out; 540 } 541 542 /* Disable the TMC if need be */ 543 if (val == CS_MODE_SYSFS) 544 tmc_etb_disable_hw(drvdata); 545 546 drvdata->reading = true; 547 out: 548 spin_unlock_irqrestore(&drvdata->spinlock, flags); 549 550 return ret; 551 } 552 553 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) 554 { 555 char *buf = NULL; 556 enum tmc_mode mode; 557 unsigned long flags; 558 559 /* config types are set a boot time and never change */ 560 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && 561 drvdata->config_type != TMC_CONFIG_TYPE_ETF)) 562 return -EINVAL; 563 564 spin_lock_irqsave(&drvdata->spinlock, flags); 565 566 /* There is no point in reading a TMC in HW FIFO mode */ 567 mode = readl_relaxed(drvdata->base + TMC_MODE); 568 if (mode != TMC_MODE_CIRCULAR_BUFFER) { 569 spin_unlock_irqrestore(&drvdata->spinlock, flags); 570 return -EINVAL; 571 } 572 573 /* Re-enable the TMC if need be */ 574 if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { 575 /* 576 * The trace run will continue with the same allocated trace 577 * buffer. As such zero-out the buffer so that we don't end 578 * up with stale data. 579 * 580 * Since the tracer is still enabled drvdata::buf 581 * can't be NULL. 582 */ 583 memset(drvdata->buf, 0, drvdata->size); 584 tmc_etb_enable_hw(drvdata); 585 } else { 586 /* 587 * The ETB/ETF is not tracing and the buffer was just read. 588 * As such prepare to free the trace buffer. 589 */ 590 buf = drvdata->buf; 591 drvdata->buf = NULL; 592 } 593 594 drvdata->reading = false; 595 spin_unlock_irqrestore(&drvdata->spinlock, flags); 596 597 /* 598 * Free allocated memory outside of the spinlock. There is no need 599 * to assert the validity of 'buf' since calling kfree(NULL) is safe. 600 */ 601 kfree(buf); 602 603 return 0; 604 } 605