1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * HiSilicon SoC HHA uncore Hardware event counters support 4 * 5 * Copyright (C) 2017 Hisilicon Limited 6 * Author: Shaokun Zhang <zhangshaokun@hisilicon.com> 7 * Anurup M <anurup.m@huawei.com> 8 * 9 * This code is based on the uncore PMUs like arm-cci and arm-ccn. 10 */ 11 #include <linux/acpi.h> 12 #include <linux/bug.h> 13 #include <linux/cpuhotplug.h> 14 #include <linux/interrupt.h> 15 #include <linux/irq.h> 16 #include <linux/list.h> 17 #include <linux/platform_device.h> 18 #include <linux/smp.h> 19 20 #include "hisi_uncore_pmu.h" 21 22 /* HHA register definition */ 23 #define HHA_INT_MASK 0x0804 24 #define HHA_INT_STATUS 0x0808 25 #define HHA_INT_CLEAR 0x080C 26 #define HHA_VERSION 0x1cf0 27 #define HHA_PERF_CTRL 0x1E00 28 #define HHA_EVENT_CTRL 0x1E04 29 #define HHA_EVENT_TYPE0 0x1E80 30 /* 31 * Each counter is 48-bits and [48:63] are reserved 32 * which are Read-As-Zero and Writes-Ignored. 33 */ 34 #define HHA_CNT0_LOWER 0x1F00 35 36 /* HHA has 16-counters */ 37 #define HHA_NR_COUNTERS 0x10 38 39 #define HHA_PERF_CTRL_EN 0x1 40 #define HHA_EVTYPE_NONE 0xff 41 42 /* 43 * Select the counter register offset using the counter index 44 * each counter is 48-bits. 45 */ 46 static u32 hisi_hha_pmu_get_counter_offset(int cntr_idx) 47 { 48 return (HHA_CNT0_LOWER + (cntr_idx * 8)); 49 } 50 51 static u64 hisi_hha_pmu_read_counter(struct hisi_pmu *hha_pmu, 52 struct hw_perf_event *hwc) 53 { 54 u32 idx = hwc->idx; 55 56 if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) { 57 dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx); 58 return 0; 59 } 60 61 /* Read 64 bits and like L3C, top 16 bits are RAZ */ 62 return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx)); 63 } 64 65 static void hisi_hha_pmu_write_counter(struct hisi_pmu *hha_pmu, 66 struct hw_perf_event *hwc, u64 val) 67 { 68 u32 idx = hwc->idx; 69 70 if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) { 71 dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx); 72 return; 73 } 74 75 /* Write 64 bits and like L3C, top 16 bits are WI */ 76 writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx)); 77 } 78 79 static void hisi_hha_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx, 80 u32 type) 81 { 82 u32 reg, reg_idx, shift, val; 83 84 /* 85 * Select the appropriate event select register(HHA_EVENT_TYPEx). 86 * There are 4 event select registers for the 16 hardware counters. 87 * Event code is 8-bits and for the first 4 hardware counters, 88 * HHA_EVENT_TYPE0 is chosen. For the next 4 hardware counters, 89 * HHA_EVENT_TYPE1 is chosen and so on. 90 */ 91 reg = HHA_EVENT_TYPE0 + 4 * (idx / 4); 92 reg_idx = idx % 4; 93 shift = 8 * reg_idx; 94 95 /* Write event code to HHA_EVENT_TYPEx register */ 96 val = readl(hha_pmu->base + reg); 97 val &= ~(HHA_EVTYPE_NONE << shift); 98 val |= (type << shift); 99 writel(val, hha_pmu->base + reg); 100 } 101 102 static void hisi_hha_pmu_start_counters(struct hisi_pmu *hha_pmu) 103 { 104 u32 val; 105 106 /* 107 * Set perf_enable bit in HHA_PERF_CTRL to start event 108 * counting for all enabled counters. 109 */ 110 val = readl(hha_pmu->base + HHA_PERF_CTRL); 111 val |= HHA_PERF_CTRL_EN; 112 writel(val, hha_pmu->base + HHA_PERF_CTRL); 113 } 114 115 static void hisi_hha_pmu_stop_counters(struct hisi_pmu *hha_pmu) 116 { 117 u32 val; 118 119 /* 120 * Clear perf_enable bit in HHA_PERF_CTRL to stop event 121 * counting for all enabled counters. 122 */ 123 val = readl(hha_pmu->base + HHA_PERF_CTRL); 124 val &= ~(HHA_PERF_CTRL_EN); 125 writel(val, hha_pmu->base + HHA_PERF_CTRL); 126 } 127 128 static void hisi_hha_pmu_enable_counter(struct hisi_pmu *hha_pmu, 129 struct hw_perf_event *hwc) 130 { 131 u32 val; 132 133 /* Enable counter index in HHA_EVENT_CTRL register */ 134 val = readl(hha_pmu->base + HHA_EVENT_CTRL); 135 val |= (1 << hwc->idx); 136 writel(val, hha_pmu->base + HHA_EVENT_CTRL); 137 } 138 139 static void hisi_hha_pmu_disable_counter(struct hisi_pmu *hha_pmu, 140 struct hw_perf_event *hwc) 141 { 142 u32 val; 143 144 /* Clear counter index in HHA_EVENT_CTRL register */ 145 val = readl(hha_pmu->base + HHA_EVENT_CTRL); 146 val &= ~(1 << hwc->idx); 147 writel(val, hha_pmu->base + HHA_EVENT_CTRL); 148 } 149 150 static void hisi_hha_pmu_enable_counter_int(struct hisi_pmu *hha_pmu, 151 struct hw_perf_event *hwc) 152 { 153 u32 val; 154 155 /* Write 0 to enable interrupt */ 156 val = readl(hha_pmu->base + HHA_INT_MASK); 157 val &= ~(1 << hwc->idx); 158 writel(val, hha_pmu->base + HHA_INT_MASK); 159 } 160 161 static void hisi_hha_pmu_disable_counter_int(struct hisi_pmu *hha_pmu, 162 struct hw_perf_event *hwc) 163 { 164 u32 val; 165 166 /* Write 1 to mask interrupt */ 167 val = readl(hha_pmu->base + HHA_INT_MASK); 168 val |= (1 << hwc->idx); 169 writel(val, hha_pmu->base + HHA_INT_MASK); 170 } 171 172 static irqreturn_t hisi_hha_pmu_isr(int irq, void *dev_id) 173 { 174 struct hisi_pmu *hha_pmu = dev_id; 175 struct perf_event *event; 176 unsigned long overflown; 177 int idx; 178 179 /* Read HHA_INT_STATUS register */ 180 overflown = readl(hha_pmu->base + HHA_INT_STATUS); 181 if (!overflown) 182 return IRQ_NONE; 183 184 /* 185 * Find the counter index which overflowed if the bit was set 186 * and handle it 187 */ 188 for_each_set_bit(idx, &overflown, HHA_NR_COUNTERS) { 189 /* Write 1 to clear the IRQ status flag */ 190 writel((1 << idx), hha_pmu->base + HHA_INT_CLEAR); 191 192 /* Get the corresponding event struct */ 193 event = hha_pmu->pmu_events.hw_events[idx]; 194 if (!event) 195 continue; 196 197 hisi_uncore_pmu_event_update(event); 198 hisi_uncore_pmu_set_event_period(event); 199 } 200 201 return IRQ_HANDLED; 202 } 203 204 static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu, 205 struct platform_device *pdev) 206 { 207 int irq, ret; 208 209 /* Read and init IRQ */ 210 irq = platform_get_irq(pdev, 0); 211 if (irq < 0) 212 return irq; 213 214 ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr, 215 IRQF_NOBALANCING | IRQF_NO_THREAD, 216 dev_name(&pdev->dev), hha_pmu); 217 if (ret < 0) { 218 dev_err(&pdev->dev, 219 "Fail to request IRQ:%d ret:%d\n", irq, ret); 220 return ret; 221 } 222 223 hha_pmu->irq = irq; 224 225 return 0; 226 } 227 228 static const struct acpi_device_id hisi_hha_pmu_acpi_match[] = { 229 { "HISI0243", }, 230 {}, 231 }; 232 MODULE_DEVICE_TABLE(acpi, hisi_hha_pmu_acpi_match); 233 234 static int hisi_hha_pmu_init_data(struct platform_device *pdev, 235 struct hisi_pmu *hha_pmu) 236 { 237 unsigned long long id; 238 acpi_status status; 239 240 status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), 241 "_UID", NULL, &id); 242 if (ACPI_FAILURE(status)) 243 return -EINVAL; 244 245 hha_pmu->index_id = id; 246 247 /* 248 * Use SCCL_ID and UID to identify the HHA PMU, while 249 * SCCL_ID is in MPIDR[aff2]. 250 */ 251 if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", 252 &hha_pmu->sccl_id)) { 253 dev_err(&pdev->dev, "Can not read hha sccl-id!\n"); 254 return -EINVAL; 255 } 256 /* HHA PMUs only share the same SCCL */ 257 hha_pmu->ccl_id = -1; 258 259 hha_pmu->base = devm_platform_ioremap_resource(pdev, 0); 260 if (IS_ERR(hha_pmu->base)) { 261 dev_err(&pdev->dev, "ioremap failed for hha_pmu resource\n"); 262 return PTR_ERR(hha_pmu->base); 263 } 264 265 hha_pmu->identifier = readl(hha_pmu->base + HHA_VERSION); 266 267 return 0; 268 } 269 270 static struct attribute *hisi_hha_pmu_format_attr[] = { 271 HISI_PMU_FORMAT_ATTR(event, "config:0-7"), 272 NULL, 273 }; 274 275 static const struct attribute_group hisi_hha_pmu_format_group = { 276 .name = "format", 277 .attrs = hisi_hha_pmu_format_attr, 278 }; 279 280 static struct attribute *hisi_hha_pmu_events_attr[] = { 281 HISI_PMU_EVENT_ATTR(rx_ops_num, 0x00), 282 HISI_PMU_EVENT_ATTR(rx_outer, 0x01), 283 HISI_PMU_EVENT_ATTR(rx_sccl, 0x02), 284 HISI_PMU_EVENT_ATTR(rx_ccix, 0x03), 285 HISI_PMU_EVENT_ATTR(rx_wbi, 0x04), 286 HISI_PMU_EVENT_ATTR(rx_wbip, 0x05), 287 HISI_PMU_EVENT_ATTR(rx_wtistash, 0x11), 288 HISI_PMU_EVENT_ATTR(rd_ddr_64b, 0x1c), 289 HISI_PMU_EVENT_ATTR(wr_ddr_64b, 0x1d), 290 HISI_PMU_EVENT_ATTR(rd_ddr_128b, 0x1e), 291 HISI_PMU_EVENT_ATTR(wr_ddr_128b, 0x1f), 292 HISI_PMU_EVENT_ATTR(spill_num, 0x20), 293 HISI_PMU_EVENT_ATTR(spill_success, 0x21), 294 HISI_PMU_EVENT_ATTR(bi_num, 0x23), 295 HISI_PMU_EVENT_ATTR(mediated_num, 0x32), 296 HISI_PMU_EVENT_ATTR(tx_snp_num, 0x33), 297 HISI_PMU_EVENT_ATTR(tx_snp_outer, 0x34), 298 HISI_PMU_EVENT_ATTR(tx_snp_ccix, 0x35), 299 HISI_PMU_EVENT_ATTR(rx_snprspdata, 0x38), 300 HISI_PMU_EVENT_ATTR(rx_snprsp_outer, 0x3c), 301 HISI_PMU_EVENT_ATTR(sdir-lookup, 0x40), 302 HISI_PMU_EVENT_ATTR(edir-lookup, 0x41), 303 HISI_PMU_EVENT_ATTR(sdir-hit, 0x42), 304 HISI_PMU_EVENT_ATTR(edir-hit, 0x43), 305 HISI_PMU_EVENT_ATTR(sdir-home-migrate, 0x4c), 306 HISI_PMU_EVENT_ATTR(edir-home-migrate, 0x4d), 307 NULL, 308 }; 309 310 static const struct attribute_group hisi_hha_pmu_events_group = { 311 .name = "events", 312 .attrs = hisi_hha_pmu_events_attr, 313 }; 314 315 static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); 316 317 static struct attribute *hisi_hha_pmu_cpumask_attrs[] = { 318 &dev_attr_cpumask.attr, 319 NULL, 320 }; 321 322 static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = { 323 .attrs = hisi_hha_pmu_cpumask_attrs, 324 }; 325 326 static struct device_attribute hisi_hha_pmu_identifier_attr = 327 __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL); 328 329 static struct attribute *hisi_hha_pmu_identifier_attrs[] = { 330 &hisi_hha_pmu_identifier_attr.attr, 331 NULL 332 }; 333 334 static const struct attribute_group hisi_hha_pmu_identifier_group = { 335 .attrs = hisi_hha_pmu_identifier_attrs, 336 }; 337 338 static const struct attribute_group *hisi_hha_pmu_attr_groups[] = { 339 &hisi_hha_pmu_format_group, 340 &hisi_hha_pmu_events_group, 341 &hisi_hha_pmu_cpumask_attr_group, 342 &hisi_hha_pmu_identifier_group, 343 NULL, 344 }; 345 346 static const struct hisi_uncore_ops hisi_uncore_hha_ops = { 347 .write_evtype = hisi_hha_pmu_write_evtype, 348 .get_event_idx = hisi_uncore_pmu_get_event_idx, 349 .start_counters = hisi_hha_pmu_start_counters, 350 .stop_counters = hisi_hha_pmu_stop_counters, 351 .enable_counter = hisi_hha_pmu_enable_counter, 352 .disable_counter = hisi_hha_pmu_disable_counter, 353 .enable_counter_int = hisi_hha_pmu_enable_counter_int, 354 .disable_counter_int = hisi_hha_pmu_disable_counter_int, 355 .write_counter = hisi_hha_pmu_write_counter, 356 .read_counter = hisi_hha_pmu_read_counter, 357 }; 358 359 static int hisi_hha_pmu_dev_probe(struct platform_device *pdev, 360 struct hisi_pmu *hha_pmu) 361 { 362 int ret; 363 364 ret = hisi_hha_pmu_init_data(pdev, hha_pmu); 365 if (ret) 366 return ret; 367 368 ret = hisi_hha_pmu_init_irq(hha_pmu, pdev); 369 if (ret) 370 return ret; 371 372 hha_pmu->num_counters = HHA_NR_COUNTERS; 373 hha_pmu->counter_bits = 48; 374 hha_pmu->ops = &hisi_uncore_hha_ops; 375 hha_pmu->dev = &pdev->dev; 376 hha_pmu->on_cpu = -1; 377 hha_pmu->check_event = 0x65; 378 379 return 0; 380 } 381 382 static int hisi_hha_pmu_probe(struct platform_device *pdev) 383 { 384 struct hisi_pmu *hha_pmu; 385 char *name; 386 int ret; 387 388 hha_pmu = devm_kzalloc(&pdev->dev, sizeof(*hha_pmu), GFP_KERNEL); 389 if (!hha_pmu) 390 return -ENOMEM; 391 392 platform_set_drvdata(pdev, hha_pmu); 393 394 ret = hisi_hha_pmu_dev_probe(pdev, hha_pmu); 395 if (ret) 396 return ret; 397 398 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, 399 &hha_pmu->node); 400 if (ret) { 401 dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); 402 return ret; 403 } 404 405 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u", 406 hha_pmu->sccl_id, hha_pmu->index_id); 407 hha_pmu->pmu = (struct pmu) { 408 .name = name, 409 .module = THIS_MODULE, 410 .task_ctx_nr = perf_invalid_context, 411 .event_init = hisi_uncore_pmu_event_init, 412 .pmu_enable = hisi_uncore_pmu_enable, 413 .pmu_disable = hisi_uncore_pmu_disable, 414 .add = hisi_uncore_pmu_add, 415 .del = hisi_uncore_pmu_del, 416 .start = hisi_uncore_pmu_start, 417 .stop = hisi_uncore_pmu_stop, 418 .read = hisi_uncore_pmu_read, 419 .attr_groups = hisi_hha_pmu_attr_groups, 420 .capabilities = PERF_PMU_CAP_NO_EXCLUDE, 421 }; 422 423 ret = perf_pmu_register(&hha_pmu->pmu, name, -1); 424 if (ret) { 425 dev_err(hha_pmu->dev, "HHA PMU register failed!\n"); 426 cpuhp_state_remove_instance_nocalls( 427 CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, &hha_pmu->node); 428 irq_set_affinity_hint(hha_pmu->irq, NULL); 429 } 430 431 return ret; 432 } 433 434 static int hisi_hha_pmu_remove(struct platform_device *pdev) 435 { 436 struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev); 437 438 perf_pmu_unregister(&hha_pmu->pmu); 439 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, 440 &hha_pmu->node); 441 irq_set_affinity_hint(hha_pmu->irq, NULL); 442 443 return 0; 444 } 445 446 static struct platform_driver hisi_hha_pmu_driver = { 447 .driver = { 448 .name = "hisi_hha_pmu", 449 .acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match), 450 .suppress_bind_attrs = true, 451 }, 452 .probe = hisi_hha_pmu_probe, 453 .remove = hisi_hha_pmu_remove, 454 }; 455 456 static int __init hisi_hha_pmu_module_init(void) 457 { 458 int ret; 459 460 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, 461 "AP_PERF_ARM_HISI_HHA_ONLINE", 462 hisi_uncore_pmu_online_cpu, 463 hisi_uncore_pmu_offline_cpu); 464 if (ret) { 465 pr_err("HHA PMU: Error setup hotplug, ret = %d;\n", ret); 466 return ret; 467 } 468 469 ret = platform_driver_register(&hisi_hha_pmu_driver); 470 if (ret) 471 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE); 472 473 return ret; 474 } 475 module_init(hisi_hha_pmu_module_init); 476 477 static void __exit hisi_hha_pmu_module_exit(void) 478 { 479 platform_driver_unregister(&hisi_hha_pmu_driver); 480 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE); 481 } 482 module_exit(hisi_hha_pmu_module_exit); 483 484 MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver"); 485 MODULE_LICENSE("GPL v2"); 486 MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>"); 487 MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>"); 488