1 // SPDX-License-Identifier: GPL-2.0-only OR Linux-OpenIB 2 /* 3 * Mellanox BlueField Performance Monitoring Counters driver 4 * 5 * This driver provides a sysfs interface for monitoring 6 * performance statistics in BlueField SoC. 7 * 8 * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 9 */ 10 11 #include <linux/acpi.h> 12 #include <linux/arm-smccc.h> 13 #include <linux/bitfield.h> 14 #include <linux/errno.h> 15 #include <linux/hwmon.h> 16 #include <linux/platform_device.h> 17 #include <linux/string.h> 18 #include <uapi/linux/psci.h> 19 20 #define MLXBF_PMC_WRITE_REG_32 0x82000009 21 #define MLXBF_PMC_READ_REG_32 0x8200000A 22 #define MLXBF_PMC_WRITE_REG_64 0x8200000B 23 #define MLXBF_PMC_READ_REG_64 0x8200000C 24 #define MLXBF_PMC_SIP_SVC_UID 0x8200ff01 25 #define MLXBF_PMC_SIP_SVC_VERSION 0x8200ff03 26 #define MLXBF_PMC_SVC_REQ_MAJOR 0 27 #define MLXBF_PMC_SVC_MIN_MINOR 3 28 29 #define MLXBF_PMC_SMCCC_ACCESS_VIOLATION -4 30 31 #define MLXBF_PMC_EVENT_SET_BF1 0 32 #define MLXBF_PMC_EVENT_SET_BF2 1 33 #define MLXBF_PMC_EVENT_INFO_LEN 100 34 35 #define MLXBF_PMC_MAX_BLOCKS 30 36 #define MLXBF_PMC_MAX_ATTRS 30 37 #define MLXBF_PMC_INFO_SZ 4 38 #define MLXBF_PMC_REG_SIZE 8 39 #define MLXBF_PMC_L3C_REG_SIZE 4 40 41 #define MLXBF_PMC_TYPE_COUNTER 1 42 #define MLXBF_PMC_TYPE_REGISTER 0 43 44 #define MLXBF_PMC_PERFCTL 0 45 #define MLXBF_PMC_PERFEVT 1 46 #define MLXBF_PMC_PERFACC0 4 47 48 #define MLXBF_PMC_PERFMON_CONFIG_WR_R_B BIT(0) 49 #define MLXBF_PMC_PERFMON_CONFIG_STROBE BIT(1) 50 #define MLXBF_PMC_PERFMON_CONFIG_ADDR GENMASK_ULL(4, 2) 51 #define MLXBF_PMC_PERFMON_CONFIG_WDATA GENMASK_ULL(60, 5) 52 53 #define MLXBF_PMC_PERFCTL_FM0 GENMASK_ULL(18, 16) 54 #define MLXBF_PMC_PERFCTL_MS0 GENMASK_ULL(21, 20) 55 #define MLXBF_PMC_PERFCTL_ACCM0 GENMASK_ULL(26, 24) 56 #define MLXBF_PMC_PERFCTL_AD0 BIT(27) 57 #define MLXBF_PMC_PERFCTL_ETRIG0 GENMASK_ULL(29, 28) 58 #define MLXBF_PMC_PERFCTL_EB0 BIT(30) 59 #define MLXBF_PMC_PERFCTL_EN0 BIT(31) 60 61 #define MLXBF_PMC_PERFEVT_EVTSEL GENMASK_ULL(31, 24) 62 63 #define MLXBF_PMC_L3C_PERF_CNT_CFG 0x0 64 #define MLXBF_PMC_L3C_PERF_CNT_SEL 0x10 65 #define MLXBF_PMC_L3C_PERF_CNT_SEL_1 0x14 66 #define MLXBF_PMC_L3C_PERF_CNT_LOW 0x40 67 #define MLXBF_PMC_L3C_PERF_CNT_HIGH 0x60 68 69 #define MLXBF_PMC_L3C_PERF_CNT_CFG_EN BIT(0) 70 #define MLXBF_PMC_L3C_PERF_CNT_CFG_RST BIT(1) 71 #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0 GENMASK(5, 0) 72 #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1 GENMASK(13, 8) 73 #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2 GENMASK(21, 16) 74 #define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3 GENMASK(29, 24) 75 76 #define MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4 GENMASK(5, 0) 77 78 #define MLXBF_PMC_L3C_PERF_CNT_LOW_VAL GENMASK(31, 0) 79 #define MLXBF_PMC_L3C_PERF_CNT_HIGH_VAL GENMASK(24, 0) 80 81 /** 82 * struct mlxbf_pmc_attribute - Structure to hold attribute and block info 83 * for each sysfs entry 84 * @dev_attr: Device attribute struct 85 * @index: index to identify counter number within a block 86 * @nr: block number to which the sysfs belongs 87 */ 88 struct mlxbf_pmc_attribute { 89 struct device_attribute dev_attr; 90 int index; 91 int nr; 92 }; 93 94 /** 95 * struct mlxbf_pmc_block_info - Structure to hold info for each HW block 96 * 97 * @mmio_base: The VA at which the PMC block is mapped 98 * @blk_size: Size of each mapped region 99 * @counters: Number of counters in the block 100 * @type: Type of counters in the block 101 * @attr_counter: Attributes for "counter" sysfs files 102 * @attr_event: Attributes for "event" sysfs files 103 * @attr_event_list: Attributes for "event_list" sysfs files 104 * @attr_enable: Attributes for "enable" sysfs files 105 * @block_attr: All attributes needed for the block 106 * @block_attr_grp: Attribute group for the block 107 */ 108 struct mlxbf_pmc_block_info { 109 void __iomem *mmio_base; 110 size_t blk_size; 111 size_t counters; 112 int type; 113 struct mlxbf_pmc_attribute *attr_counter; 114 struct mlxbf_pmc_attribute *attr_event; 115 struct mlxbf_pmc_attribute attr_event_list; 116 struct mlxbf_pmc_attribute attr_enable; 117 struct attribute *block_attr[MLXBF_PMC_MAX_ATTRS]; 118 struct attribute_group block_attr_grp; 119 }; 120 121 /** 122 * struct mlxbf_pmc_context - Structure to hold PMC context info 123 * 124 * @pdev: The kernel structure representing the device 125 * @total_blocks: Total number of blocks 126 * @tile_count: Number of tiles in the system 127 * @hwmon_dev: Hwmon device for bfperf 128 * @block_name: Block name 129 * @block: Block info 130 * @groups: Attribute groups from each block 131 * @svc_sreg_support: Whether SMCs are used to access performance registers 132 * @sreg_tbl_perf: Secure register access table number 133 * @event_set: Event set to use 134 */ 135 struct mlxbf_pmc_context { 136 struct platform_device *pdev; 137 uint32_t total_blocks; 138 uint32_t tile_count; 139 struct device *hwmon_dev; 140 const char *block_name[MLXBF_PMC_MAX_BLOCKS]; 141 struct mlxbf_pmc_block_info block[MLXBF_PMC_MAX_BLOCKS]; 142 const struct attribute_group *groups[MLXBF_PMC_MAX_BLOCKS]; 143 bool svc_sreg_support; 144 uint32_t sreg_tbl_perf; 145 unsigned int event_set; 146 }; 147 148 /** 149 * struct mlxbf_pmc_events - Structure to hold supported events for each block 150 * @evt_num: Event number used to program counters 151 * @evt_name: Name of the event 152 */ 153 struct mlxbf_pmc_events { 154 int evt_num; 155 char *evt_name; 156 }; 157 158 static const struct mlxbf_pmc_events mlxbf_pmc_pcie_events[] = { 159 { 0x0, "IN_P_PKT_CNT" }, 160 { 0x10, "IN_NP_PKT_CNT" }, 161 { 0x18, "IN_C_PKT_CNT" }, 162 { 0x20, "OUT_P_PKT_CNT" }, 163 { 0x28, "OUT_NP_PKT_CNT" }, 164 { 0x30, "OUT_C_PKT_CNT" }, 165 { 0x38, "IN_P_BYTE_CNT" }, 166 { 0x40, "IN_NP_BYTE_CNT" }, 167 { 0x48, "IN_C_BYTE_CNT" }, 168 { 0x50, "OUT_P_BYTE_CNT" }, 169 { 0x58, "OUT_NP_BYTE_CNT" }, 170 { 0x60, "OUT_C_BYTE_CNT" }, 171 }; 172 173 static const struct mlxbf_pmc_events mlxbf_pmc_smgen_events[] = { 174 { 0x0, "AW_REQ" }, 175 { 0x1, "AW_BEATS" }, 176 { 0x2, "AW_TRANS" }, 177 { 0x3, "AW_RESP" }, 178 { 0x4, "AW_STL" }, 179 { 0x5, "AW_LAT" }, 180 { 0x6, "AW_REQ_TBU" }, 181 { 0x8, "AR_REQ" }, 182 { 0x9, "AR_BEATS" }, 183 { 0xa, "AR_TRANS" }, 184 { 0xb, "AR_STL" }, 185 { 0xc, "AR_LAT" }, 186 { 0xd, "AR_REQ_TBU" }, 187 { 0xe, "TBU_MISS" }, 188 { 0xf, "TX_DAT_AF" }, 189 { 0x10, "RX_DAT_AF" }, 190 { 0x11, "RETRYQ_CRED" }, 191 }; 192 193 static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = { 194 { 0x0, "DISABLE" }, 195 { 0xa0, "TPIO_DATA_BEAT" }, 196 { 0xa1, "TDMA_DATA_BEAT" }, 197 { 0xa2, "MAP_DATA_BEAT" }, 198 { 0xa3, "TXMSG_DATA_BEAT" }, 199 { 0xa4, "TPIO_DATA_PACKET" }, 200 { 0xa5, "TDMA_DATA_PACKET" }, 201 { 0xa6, "MAP_DATA_PACKET" }, 202 { 0xa7, "TXMSG_DATA_PACKET" }, 203 { 0xa8, "TDMA_RT_AF" }, 204 { 0xa9, "TDMA_PBUF_MAC_AF" }, 205 { 0xaa, "TRIO_MAP_WRQ_BUF_EMPTY" }, 206 { 0xab, "TRIO_MAP_CPL_BUF_EMPTY" }, 207 { 0xac, "TRIO_MAP_RDQ0_BUF_EMPTY" }, 208 { 0xad, "TRIO_MAP_RDQ1_BUF_EMPTY" }, 209 { 0xae, "TRIO_MAP_RDQ2_BUF_EMPTY" }, 210 { 0xaf, "TRIO_MAP_RDQ3_BUF_EMPTY" }, 211 { 0xb0, "TRIO_MAP_RDQ4_BUF_EMPTY" }, 212 { 0xb1, "TRIO_MAP_RDQ5_BUF_EMPTY" }, 213 { 0xb2, "TRIO_MAP_RDQ6_BUF_EMPTY" }, 214 { 0xb3, "TRIO_MAP_RDQ7_BUF_EMPTY" }, 215 }; 216 217 static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = { 218 { 0x0, "DISABLE" }, 219 { 0xa0, "TPIO_DATA_BEAT" }, 220 { 0xa1, "TDMA_DATA_BEAT" }, 221 { 0xa2, "MAP_DATA_BEAT" }, 222 { 0xa3, "TXMSG_DATA_BEAT" }, 223 { 0xa4, "TPIO_DATA_PACKET" }, 224 { 0xa5, "TDMA_DATA_PACKET" }, 225 { 0xa6, "MAP_DATA_PACKET" }, 226 { 0xa7, "TXMSG_DATA_PACKET" }, 227 { 0xa8, "TDMA_RT_AF" }, 228 { 0xa9, "TDMA_PBUF_MAC_AF" }, 229 { 0xaa, "TRIO_MAP_WRQ_BUF_EMPTY" }, 230 { 0xab, "TRIO_MAP_CPL_BUF_EMPTY" }, 231 { 0xac, "TRIO_MAP_RDQ0_BUF_EMPTY" }, 232 { 0xad, "TRIO_MAP_RDQ1_BUF_EMPTY" }, 233 { 0xae, "TRIO_MAP_RDQ2_BUF_EMPTY" }, 234 { 0xaf, "TRIO_MAP_RDQ3_BUF_EMPTY" }, 235 { 0xb0, "TRIO_MAP_RDQ4_BUF_EMPTY" }, 236 { 0xb1, "TRIO_MAP_RDQ5_BUF_EMPTY" }, 237 { 0xb2, "TRIO_MAP_RDQ6_BUF_EMPTY" }, 238 { 0xb3, "TRIO_MAP_RDQ7_BUF_EMPTY" }, 239 { 0xb4, "TRIO_RING_TX_FLIT_CH0" }, 240 { 0xb5, "TRIO_RING_TX_FLIT_CH1" }, 241 { 0xb6, "TRIO_RING_TX_FLIT_CH2" }, 242 { 0xb7, "TRIO_RING_TX_FLIT_CH3" }, 243 { 0xb8, "TRIO_RING_TX_FLIT_CH4" }, 244 { 0xb9, "TRIO_RING_RX_FLIT_CH0" }, 245 { 0xba, "TRIO_RING_RX_FLIT_CH1" }, 246 { 0xbb, "TRIO_RING_RX_FLIT_CH2" }, 247 { 0xbc, "TRIO_RING_RX_FLIT_CH3" }, 248 }; 249 250 static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = { 251 { 0x0, "DISABLE" }, 252 { 0x100, "ECC_SINGLE_ERROR_CNT" }, 253 { 0x104, "ECC_DOUBLE_ERROR_CNT" }, 254 { 0x114, "SERR_INJ" }, 255 { 0x118, "DERR_INJ" }, 256 { 0x124, "ECC_SINGLE_ERROR_0" }, 257 { 0x164, "ECC_DOUBLE_ERROR_0" }, 258 { 0x340, "DRAM_ECC_COUNT" }, 259 { 0x344, "DRAM_ECC_INJECT" }, 260 { 0x348, "DRAM_ECC_ERROR" }, 261 }; 262 263 static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = { 264 { 0x0, "DISABLE" }, 265 { 0xc0, "RXREQ_MSS" }, 266 { 0xc1, "RXDAT_MSS" }, 267 { 0xc2, "TXRSP_MSS" }, 268 { 0xc3, "TXDAT_MSS" }, 269 }; 270 271 static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = { 272 { 0x0, "DISABLE" }, 273 { 0x45, "HNF_REQUESTS" }, 274 { 0x46, "HNF_REJECTS" }, 275 { 0x47, "ALL_BUSY" }, 276 { 0x48, "MAF_BUSY" }, 277 { 0x49, "MAF_REQUESTS" }, 278 { 0x4a, "RNF_REQUESTS" }, 279 { 0x4b, "REQUEST_TYPE" }, 280 { 0x4c, "MEMORY_READS" }, 281 { 0x4d, "MEMORY_WRITES" }, 282 { 0x4e, "VICTIM_WRITE" }, 283 { 0x4f, "POC_FULL" }, 284 { 0x50, "POC_FAIL" }, 285 { 0x51, "POC_SUCCESS" }, 286 { 0x52, "POC_WRITES" }, 287 { 0x53, "POC_READS" }, 288 { 0x54, "FORWARD" }, 289 { 0x55, "RXREQ_HNF" }, 290 { 0x56, "RXRSP_HNF" }, 291 { 0x57, "RXDAT_HNF" }, 292 { 0x58, "TXREQ_HNF" }, 293 { 0x59, "TXRSP_HNF" }, 294 { 0x5a, "TXDAT_HNF" }, 295 { 0x5b, "TXSNP_HNF" }, 296 { 0x5c, "INDEX_MATCH" }, 297 { 0x5d, "A72_ACCESS" }, 298 { 0x5e, "IO_ACCESS" }, 299 { 0x5f, "TSO_WRITE" }, 300 { 0x60, "TSO_CONFLICT" }, 301 { 0x61, "DIR_HIT" }, 302 { 0x62, "HNF_ACCEPTS" }, 303 { 0x63, "REQ_BUF_EMPTY" }, 304 { 0x64, "REQ_BUF_IDLE_MAF" }, 305 { 0x65, "TSO_NOARB" }, 306 { 0x66, "TSO_NOARB_CYCLES" }, 307 { 0x67, "MSS_NO_CREDIT" }, 308 { 0x68, "TXDAT_NO_LCRD" }, 309 { 0x69, "TXSNP_NO_LCRD" }, 310 { 0x6a, "TXRSP_NO_LCRD" }, 311 { 0x6b, "TXREQ_NO_LCRD" }, 312 { 0x6c, "TSO_CL_MATCH" }, 313 { 0x6d, "MEMORY_READS_BYPASS" }, 314 { 0x6e, "TSO_NOARB_TIMEOUT" }, 315 { 0x6f, "ALLOCATE" }, 316 { 0x70, "VICTIM" }, 317 { 0x71, "A72_WRITE" }, 318 { 0x72, "A72_READ" }, 319 { 0x73, "IO_WRITE" }, 320 { 0x74, "IO_READ" }, 321 { 0x75, "TSO_REJECT" }, 322 { 0x80, "TXREQ_RN" }, 323 { 0x81, "TXRSP_RN" }, 324 { 0x82, "TXDAT_RN" }, 325 { 0x83, "RXSNP_RN" }, 326 { 0x84, "RXRSP_RN" }, 327 { 0x85, "RXDAT_RN" }, 328 }; 329 330 static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = { 331 { 0x0, "DISABLE" }, 332 { 0x12, "CDN_REQ" }, 333 { 0x13, "DDN_REQ" }, 334 { 0x14, "NDN_REQ" }, 335 { 0x15, "CDN_DIAG_N_OUT_OF_CRED" }, 336 { 0x16, "CDN_DIAG_S_OUT_OF_CRED" }, 337 { 0x17, "CDN_DIAG_E_OUT_OF_CRED" }, 338 { 0x18, "CDN_DIAG_W_OUT_OF_CRED" }, 339 { 0x19, "CDN_DIAG_C_OUT_OF_CRED" }, 340 { 0x1a, "CDN_DIAG_N_EGRESS" }, 341 { 0x1b, "CDN_DIAG_S_EGRESS" }, 342 { 0x1c, "CDN_DIAG_E_EGRESS" }, 343 { 0x1d, "CDN_DIAG_W_EGRESS" }, 344 { 0x1e, "CDN_DIAG_C_EGRESS" }, 345 { 0x1f, "CDN_DIAG_N_INGRESS" }, 346 { 0x20, "CDN_DIAG_S_INGRESS" }, 347 { 0x21, "CDN_DIAG_E_INGRESS" }, 348 { 0x22, "CDN_DIAG_W_INGRESS" }, 349 { 0x23, "CDN_DIAG_C_INGRESS" }, 350 { 0x24, "CDN_DIAG_CORE_SENT" }, 351 { 0x25, "DDN_DIAG_N_OUT_OF_CRED" }, 352 { 0x26, "DDN_DIAG_S_OUT_OF_CRED" }, 353 { 0x27, "DDN_DIAG_E_OUT_OF_CRED" }, 354 { 0x28, "DDN_DIAG_W_OUT_OF_CRED" }, 355 { 0x29, "DDN_DIAG_C_OUT_OF_CRED" }, 356 { 0x2a, "DDN_DIAG_N_EGRESS" }, 357 { 0x2b, "DDN_DIAG_S_EGRESS" }, 358 { 0x2c, "DDN_DIAG_E_EGRESS" }, 359 { 0x2d, "DDN_DIAG_W_EGRESS" }, 360 { 0x2e, "DDN_DIAG_C_EGRESS" }, 361 { 0x2f, "DDN_DIAG_N_INGRESS" }, 362 { 0x30, "DDN_DIAG_S_INGRESS" }, 363 { 0x31, "DDN_DIAG_E_INGRESS" }, 364 { 0x32, "DDN_DIAG_W_INGRESS" }, 365 { 0x33, "DDN_DIAG_C_INGRESS" }, 366 { 0x34, "DDN_DIAG_CORE_SENT" }, 367 { 0x35, "NDN_DIAG_N_OUT_OF_CRED" }, 368 { 0x36, "NDN_DIAG_S_OUT_OF_CRED" }, 369 { 0x37, "NDN_DIAG_E_OUT_OF_CRED" }, 370 { 0x38, "NDN_DIAG_W_OUT_OF_CRED" }, 371 { 0x39, "NDN_DIAG_C_OUT_OF_CRED" }, 372 { 0x3a, "NDN_DIAG_N_EGRESS" }, 373 { 0x3b, "NDN_DIAG_S_EGRESS" }, 374 { 0x3c, "NDN_DIAG_E_EGRESS" }, 375 { 0x3d, "NDN_DIAG_W_EGRESS" }, 376 { 0x3e, "NDN_DIAG_C_EGRESS" }, 377 { 0x3f, "NDN_DIAG_N_INGRESS" }, 378 { 0x40, "NDN_DIAG_S_INGRESS" }, 379 { 0x41, "NDN_DIAG_E_INGRESS" }, 380 { 0x42, "NDN_DIAG_W_INGRESS" }, 381 { 0x43, "NDN_DIAG_C_INGRESS" }, 382 { 0x44, "NDN_DIAG_CORE_SENT" }, 383 }; 384 385 static const struct mlxbf_pmc_events mlxbf_pmc_l3c_events[] = { 386 { 0x00, "DISABLE" }, 387 { 0x01, "CYCLES" }, 388 { 0x02, "TOTAL_RD_REQ_IN" }, 389 { 0x03, "TOTAL_WR_REQ_IN" }, 390 { 0x04, "TOTAL_WR_DBID_ACK" }, 391 { 0x05, "TOTAL_WR_DATA_IN" }, 392 { 0x06, "TOTAL_WR_COMP" }, 393 { 0x07, "TOTAL_RD_DATA_OUT" }, 394 { 0x08, "TOTAL_CDN_REQ_IN_BANK0" }, 395 { 0x09, "TOTAL_CDN_REQ_IN_BANK1" }, 396 { 0x0a, "TOTAL_DDN_REQ_IN_BANK0" }, 397 { 0x0b, "TOTAL_DDN_REQ_IN_BANK1" }, 398 { 0x0c, "TOTAL_EMEM_RD_RES_IN_BANK0" }, 399 { 0x0d, "TOTAL_EMEM_RD_RES_IN_BANK1" }, 400 { 0x0e, "TOTAL_CACHE_RD_RES_IN_BANK0" }, 401 { 0x0f, "TOTAL_CACHE_RD_RES_IN_BANK1" }, 402 { 0x10, "TOTAL_EMEM_RD_REQ_BANK0" }, 403 { 0x11, "TOTAL_EMEM_RD_REQ_BANK1" }, 404 { 0x12, "TOTAL_EMEM_WR_REQ_BANK0" }, 405 { 0x13, "TOTAL_EMEM_WR_REQ_BANK1" }, 406 { 0x14, "TOTAL_RD_REQ_OUT" }, 407 { 0x15, "TOTAL_WR_REQ_OUT" }, 408 { 0x16, "TOTAL_RD_RES_IN" }, 409 { 0x17, "HITS_BANK0" }, 410 { 0x18, "HITS_BANK1" }, 411 { 0x19, "MISSES_BANK0" }, 412 { 0x1a, "MISSES_BANK1" }, 413 { 0x1b, "ALLOCATIONS_BANK0" }, 414 { 0x1c, "ALLOCATIONS_BANK1" }, 415 { 0x1d, "EVICTIONS_BANK0" }, 416 { 0x1e, "EVICTIONS_BANK1" }, 417 { 0x1f, "DBID_REJECT" }, 418 { 0x20, "WRDB_REJECT_BANK0" }, 419 { 0x21, "WRDB_REJECT_BANK1" }, 420 { 0x22, "CMDQ_REJECT_BANK0" }, 421 { 0x23, "CMDQ_REJECT_BANK1" }, 422 { 0x24, "COB_REJECT_BANK0" }, 423 { 0x25, "COB_REJECT_BANK1" }, 424 { 0x26, "TRB_REJECT_BANK0" }, 425 { 0x27, "TRB_REJECT_BANK1" }, 426 { 0x28, "TAG_REJECT_BANK0" }, 427 { 0x29, "TAG_REJECT_BANK1" }, 428 { 0x2a, "ANY_REJECT_BANK0" }, 429 { 0x2b, "ANY_REJECT_BANK1" }, 430 }; 431 432 static struct mlxbf_pmc_context *pmc; 433 434 /* UUID used to probe ATF service. */ 435 static const char *mlxbf_pmc_svc_uuid_str = "89c036b4-e7d7-11e6-8797-001aca00bfc4"; 436 437 /* Calls an SMC to access a performance register */ 438 static int mlxbf_pmc_secure_read(void __iomem *addr, uint32_t command, 439 uint64_t *result) 440 { 441 struct arm_smccc_res res; 442 int status, err = 0; 443 444 arm_smccc_smc(command, pmc->sreg_tbl_perf, (uintptr_t)addr, 0, 0, 0, 0, 445 0, &res); 446 447 status = res.a0; 448 449 switch (status) { 450 case PSCI_RET_NOT_SUPPORTED: 451 err = -EINVAL; 452 break; 453 case MLXBF_PMC_SMCCC_ACCESS_VIOLATION: 454 err = -EACCES; 455 break; 456 default: 457 *result = res.a1; 458 break; 459 } 460 461 return err; 462 } 463 464 /* Read from a performance counter */ 465 static int mlxbf_pmc_read(void __iomem *addr, uint32_t command, 466 uint64_t *result) 467 { 468 if (pmc->svc_sreg_support) 469 return mlxbf_pmc_secure_read(addr, command, result); 470 471 if (command == MLXBF_PMC_READ_REG_32) 472 *result = readl(addr); 473 else 474 *result = readq(addr); 475 476 return 0; 477 } 478 479 /* Convenience function for 32-bit reads */ 480 static int mlxbf_pmc_readl(void __iomem *addr, uint32_t *result) 481 { 482 uint64_t read_out; 483 int status; 484 485 status = mlxbf_pmc_read(addr, MLXBF_PMC_READ_REG_32, &read_out); 486 if (status) 487 return status; 488 *result = (uint32_t)read_out; 489 490 return 0; 491 } 492 493 /* Calls an SMC to access a performance register */ 494 static int mlxbf_pmc_secure_write(void __iomem *addr, uint32_t command, 495 uint64_t value) 496 { 497 struct arm_smccc_res res; 498 int status, err = 0; 499 500 arm_smccc_smc(command, pmc->sreg_tbl_perf, value, (uintptr_t)addr, 0, 0, 501 0, 0, &res); 502 503 status = res.a0; 504 505 switch (status) { 506 case PSCI_RET_NOT_SUPPORTED: 507 err = -EINVAL; 508 break; 509 case MLXBF_PMC_SMCCC_ACCESS_VIOLATION: 510 err = -EACCES; 511 break; 512 } 513 514 return err; 515 } 516 517 /* Write to a performance counter */ 518 static int mlxbf_pmc_write(void __iomem *addr, int command, uint64_t value) 519 { 520 if (pmc->svc_sreg_support) 521 return mlxbf_pmc_secure_write(addr, command, value); 522 523 if (command == MLXBF_PMC_WRITE_REG_32) 524 writel(value, addr); 525 else 526 writeq(value, addr); 527 528 return 0; 529 } 530 531 /* Check if the register offset is within the mapped region for the block */ 532 static bool mlxbf_pmc_valid_range(int blk_num, uint32_t offset) 533 { 534 if ((offset >= 0) && !(offset % MLXBF_PMC_REG_SIZE) && 535 (offset + MLXBF_PMC_REG_SIZE <= pmc->block[blk_num].blk_size)) 536 return true; /* inside the mapped PMC space */ 537 538 return false; 539 } 540 541 /* Get the event list corresponding to a certain block */ 542 static const struct mlxbf_pmc_events *mlxbf_pmc_event_list(const char *blk, 543 int *size) 544 { 545 const struct mlxbf_pmc_events *events; 546 547 if (strstr(blk, "tilenet")) { 548 events = mlxbf_pmc_hnfnet_events; 549 *size = ARRAY_SIZE(mlxbf_pmc_hnfnet_events); 550 } else if (strstr(blk, "tile")) { 551 events = mlxbf_pmc_hnf_events; 552 *size = ARRAY_SIZE(mlxbf_pmc_hnf_events); 553 } else if (strstr(blk, "triogen")) { 554 events = mlxbf_pmc_smgen_events; 555 *size = ARRAY_SIZE(mlxbf_pmc_smgen_events); 556 } else if (strstr(blk, "trio")) { 557 switch (pmc->event_set) { 558 case MLXBF_PMC_EVENT_SET_BF1: 559 events = mlxbf_pmc_trio_events_1; 560 *size = ARRAY_SIZE(mlxbf_pmc_trio_events_1); 561 break; 562 case MLXBF_PMC_EVENT_SET_BF2: 563 events = mlxbf_pmc_trio_events_2; 564 *size = ARRAY_SIZE(mlxbf_pmc_trio_events_2); 565 break; 566 default: 567 events = NULL; 568 *size = 0; 569 break; 570 } 571 } else if (strstr(blk, "mss")) { 572 events = mlxbf_pmc_mss_events; 573 *size = ARRAY_SIZE(mlxbf_pmc_mss_events); 574 } else if (strstr(blk, "ecc")) { 575 events = mlxbf_pmc_ecc_events; 576 *size = ARRAY_SIZE(mlxbf_pmc_ecc_events); 577 } else if (strstr(blk, "pcie")) { 578 events = mlxbf_pmc_pcie_events; 579 *size = ARRAY_SIZE(mlxbf_pmc_pcie_events); 580 } else if (strstr(blk, "l3cache")) { 581 events = mlxbf_pmc_l3c_events; 582 *size = ARRAY_SIZE(mlxbf_pmc_l3c_events); 583 } else if (strstr(blk, "gic")) { 584 events = mlxbf_pmc_smgen_events; 585 *size = ARRAY_SIZE(mlxbf_pmc_smgen_events); 586 } else if (strstr(blk, "smmu")) { 587 events = mlxbf_pmc_smgen_events; 588 *size = ARRAY_SIZE(mlxbf_pmc_smgen_events); 589 } else { 590 events = NULL; 591 *size = 0; 592 } 593 594 return events; 595 } 596 597 /* Get the event number given the name */ 598 static int mlxbf_pmc_get_event_num(const char *blk, const char *evt) 599 { 600 const struct mlxbf_pmc_events *events; 601 int i, size; 602 603 events = mlxbf_pmc_event_list(blk, &size); 604 if (!events) 605 return -EINVAL; 606 607 for (i = 0; i < size; ++i) { 608 if (!strcmp(evt, events[i].evt_name)) 609 return events[i].evt_num; 610 } 611 612 return -ENODEV; 613 } 614 615 /* Get the event number given the name */ 616 static char *mlxbf_pmc_get_event_name(const char *blk, int evt) 617 { 618 const struct mlxbf_pmc_events *events; 619 int i, size; 620 621 events = mlxbf_pmc_event_list(blk, &size); 622 if (!events) 623 return NULL; 624 625 for (i = 0; i < size; ++i) { 626 if (evt == events[i].evt_num) 627 return events[i].evt_name; 628 } 629 630 return NULL; 631 } 632 633 /* Method to enable/disable/reset l3cache counters */ 634 static int mlxbf_pmc_config_l3_counters(int blk_num, bool enable, bool reset) 635 { 636 uint32_t perfcnt_cfg = 0; 637 638 if (enable) 639 perfcnt_cfg |= MLXBF_PMC_L3C_PERF_CNT_CFG_EN; 640 if (reset) 641 perfcnt_cfg |= MLXBF_PMC_L3C_PERF_CNT_CFG_RST; 642 643 return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + 644 MLXBF_PMC_L3C_PERF_CNT_CFG, 645 MLXBF_PMC_WRITE_REG_32, perfcnt_cfg); 646 } 647 648 /* Method to handle l3cache counter programming */ 649 static int mlxbf_pmc_program_l3_counter(int blk_num, uint32_t cnt_num, 650 uint32_t evt) 651 { 652 uint32_t perfcnt_sel_1 = 0; 653 uint32_t perfcnt_sel = 0; 654 uint32_t *wordaddr; 655 void __iomem *pmcaddr; 656 int ret; 657 658 /* Disable all counters before programming them */ 659 if (mlxbf_pmc_config_l3_counters(blk_num, false, false)) 660 return -EINVAL; 661 662 /* Select appropriate register information */ 663 switch (cnt_num) { 664 case 0 ... 3: 665 pmcaddr = pmc->block[blk_num].mmio_base + 666 MLXBF_PMC_L3C_PERF_CNT_SEL; 667 wordaddr = &perfcnt_sel; 668 break; 669 case 4: 670 pmcaddr = pmc->block[blk_num].mmio_base + 671 MLXBF_PMC_L3C_PERF_CNT_SEL_1; 672 wordaddr = &perfcnt_sel_1; 673 break; 674 default: 675 return -EINVAL; 676 } 677 678 ret = mlxbf_pmc_readl(pmcaddr, wordaddr); 679 if (ret) 680 return ret; 681 682 switch (cnt_num) { 683 case 0: 684 perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0; 685 perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0, 686 evt); 687 break; 688 case 1: 689 perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1; 690 perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1, 691 evt); 692 break; 693 case 2: 694 perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2; 695 perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2, 696 evt); 697 break; 698 case 3: 699 perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3; 700 perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3, 701 evt); 702 break; 703 case 4: 704 perfcnt_sel_1 &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4; 705 perfcnt_sel_1 |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4, 706 evt); 707 break; 708 default: 709 return -EINVAL; 710 } 711 712 return mlxbf_pmc_write(pmcaddr, MLXBF_PMC_WRITE_REG_32, *wordaddr); 713 } 714 715 /* Method to program a counter to monitor an event */ 716 static int mlxbf_pmc_program_counter(int blk_num, uint32_t cnt_num, 717 uint32_t evt, bool is_l3) 718 { 719 uint64_t perfctl, perfevt, perfmon_cfg; 720 721 if (cnt_num >= pmc->block[blk_num].counters) 722 return -ENODEV; 723 724 if (is_l3) 725 return mlxbf_pmc_program_l3_counter(blk_num, cnt_num, evt); 726 727 /* Configure the counter */ 728 perfctl = FIELD_PREP(MLXBF_PMC_PERFCTL_EN0, 1); 729 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_EB0, 0); 730 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_ETRIG0, 1); 731 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_AD0, 0); 732 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_ACCM0, 0); 733 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_MS0, 0); 734 perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_FM0, 0); 735 736 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WDATA, perfctl); 737 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR, 738 MLXBF_PMC_PERFCTL); 739 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1); 740 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1); 741 742 if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + 743 cnt_num * MLXBF_PMC_REG_SIZE, 744 MLXBF_PMC_WRITE_REG_64, perfmon_cfg)) 745 return -EFAULT; 746 747 /* Select the event */ 748 perfevt = FIELD_PREP(MLXBF_PMC_PERFEVT_EVTSEL, evt); 749 750 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WDATA, perfevt); 751 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR, 752 MLXBF_PMC_PERFEVT); 753 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1); 754 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1); 755 756 if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + 757 cnt_num * MLXBF_PMC_REG_SIZE, 758 MLXBF_PMC_WRITE_REG_64, perfmon_cfg)) 759 return -EFAULT; 760 761 /* Clear the accumulator */ 762 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR, 763 MLXBF_PMC_PERFACC0); 764 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1); 765 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1); 766 767 if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + 768 cnt_num * MLXBF_PMC_REG_SIZE, 769 MLXBF_PMC_WRITE_REG_64, perfmon_cfg)) 770 return -EFAULT; 771 772 return 0; 773 } 774 775 /* Method to handle l3 counter reads */ 776 static int mlxbf_pmc_read_l3_counter(int blk_num, uint32_t cnt_num, 777 uint64_t *result) 778 { 779 uint32_t perfcnt_low = 0, perfcnt_high = 0; 780 uint64_t value; 781 int status = 0; 782 783 status = mlxbf_pmc_readl(pmc->block[blk_num].mmio_base + 784 MLXBF_PMC_L3C_PERF_CNT_LOW + 785 cnt_num * MLXBF_PMC_L3C_REG_SIZE, 786 &perfcnt_low); 787 788 if (status) 789 return status; 790 791 status = mlxbf_pmc_readl(pmc->block[blk_num].mmio_base + 792 MLXBF_PMC_L3C_PERF_CNT_HIGH + 793 cnt_num * MLXBF_PMC_L3C_REG_SIZE, 794 &perfcnt_high); 795 796 if (status) 797 return status; 798 799 value = perfcnt_high; 800 value = value << 32; 801 value |= perfcnt_low; 802 *result = value; 803 804 return 0; 805 } 806 807 /* Method to read the counter value */ 808 static int mlxbf_pmc_read_counter(int blk_num, uint32_t cnt_num, bool is_l3, 809 uint64_t *result) 810 { 811 uint32_t perfcfg_offset, perfval_offset; 812 uint64_t perfmon_cfg; 813 int status; 814 815 if (cnt_num >= pmc->block[blk_num].counters) 816 return -EINVAL; 817 818 if (is_l3) 819 return mlxbf_pmc_read_l3_counter(blk_num, cnt_num, result); 820 821 perfcfg_offset = cnt_num * MLXBF_PMC_REG_SIZE; 822 perfval_offset = perfcfg_offset + 823 pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE; 824 825 /* Set counter in "read" mode */ 826 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR, 827 MLXBF_PMC_PERFACC0); 828 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1); 829 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0); 830 831 status = mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset, 832 MLXBF_PMC_WRITE_REG_64, perfmon_cfg); 833 834 if (status) 835 return status; 836 837 /* Get the counter value */ 838 return mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset, 839 MLXBF_PMC_READ_REG_64, result); 840 } 841 842 /* Method to read L3 block event */ 843 static int mlxbf_pmc_read_l3_event(int blk_num, uint32_t cnt_num, 844 uint64_t *result) 845 { 846 uint32_t perfcnt_sel = 0, perfcnt_sel_1 = 0; 847 uint32_t *wordaddr; 848 void __iomem *pmcaddr; 849 uint64_t evt; 850 851 /* Select appropriate register information */ 852 switch (cnt_num) { 853 case 0 ... 3: 854 pmcaddr = pmc->block[blk_num].mmio_base + 855 MLXBF_PMC_L3C_PERF_CNT_SEL; 856 wordaddr = &perfcnt_sel; 857 break; 858 case 4: 859 pmcaddr = pmc->block[blk_num].mmio_base + 860 MLXBF_PMC_L3C_PERF_CNT_SEL_1; 861 wordaddr = &perfcnt_sel_1; 862 break; 863 default: 864 return -EINVAL; 865 } 866 867 if (mlxbf_pmc_readl(pmcaddr, wordaddr)) 868 return -EINVAL; 869 870 /* Read from appropriate register field for the counter */ 871 switch (cnt_num) { 872 case 0: 873 evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0, perfcnt_sel); 874 break; 875 case 1: 876 evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1, perfcnt_sel); 877 break; 878 case 2: 879 evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2, perfcnt_sel); 880 break; 881 case 3: 882 evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3, perfcnt_sel); 883 break; 884 case 4: 885 evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4, 886 perfcnt_sel_1); 887 break; 888 default: 889 return -EINVAL; 890 } 891 *result = evt; 892 893 return 0; 894 } 895 896 /* Method to find the event currently being monitored by a counter */ 897 static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3, 898 uint64_t *result) 899 { 900 uint32_t perfcfg_offset, perfval_offset; 901 uint64_t perfmon_cfg, perfevt; 902 903 if (cnt_num >= pmc->block[blk_num].counters) 904 return -EINVAL; 905 906 if (is_l3) 907 return mlxbf_pmc_read_l3_event(blk_num, cnt_num, result); 908 909 perfcfg_offset = cnt_num * MLXBF_PMC_REG_SIZE; 910 perfval_offset = perfcfg_offset + 911 pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE; 912 913 /* Set counter in "read" mode */ 914 perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR, 915 MLXBF_PMC_PERFEVT); 916 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1); 917 perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0); 918 919 if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset, 920 MLXBF_PMC_WRITE_REG_64, perfmon_cfg)) 921 return -EFAULT; 922 923 /* Get the event number */ 924 if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset, 925 MLXBF_PMC_READ_REG_64, &perfevt)) 926 return -EFAULT; 927 928 *result = FIELD_GET(MLXBF_PMC_PERFEVT_EVTSEL, perfevt); 929 930 return 0; 931 } 932 933 /* Method to read a register */ 934 static int mlxbf_pmc_read_reg(int blk_num, uint32_t offset, uint64_t *result) 935 { 936 uint32_t ecc_out; 937 938 if (strstr(pmc->block_name[blk_num], "ecc")) { 939 if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base + offset, 940 &ecc_out)) 941 return -EFAULT; 942 943 *result = ecc_out; 944 return 0; 945 } 946 947 if (mlxbf_pmc_valid_range(blk_num, offset)) 948 return mlxbf_pmc_read(pmc->block[blk_num].mmio_base + offset, 949 MLXBF_PMC_READ_REG_64, result); 950 951 return -EINVAL; 952 } 953 954 /* Method to write to a register */ 955 static int mlxbf_pmc_write_reg(int blk_num, uint32_t offset, uint64_t data) 956 { 957 if (strstr(pmc->block_name[blk_num], "ecc")) { 958 return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset, 959 MLXBF_PMC_WRITE_REG_32, data); 960 } 961 962 if (mlxbf_pmc_valid_range(blk_num, offset)) 963 return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset, 964 MLXBF_PMC_WRITE_REG_64, data); 965 966 return -EINVAL; 967 } 968 969 /* Show function for "counter" sysfs files */ 970 static ssize_t mlxbf_pmc_counter_show(struct device *dev, 971 struct device_attribute *attr, char *buf) 972 { 973 struct mlxbf_pmc_attribute *attr_counter = container_of( 974 attr, struct mlxbf_pmc_attribute, dev_attr); 975 int blk_num, cnt_num, offset; 976 bool is_l3 = false; 977 uint64_t value; 978 979 blk_num = attr_counter->nr; 980 cnt_num = attr_counter->index; 981 982 if (strstr(pmc->block_name[blk_num], "l3cache")) 983 is_l3 = true; 984 985 if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER) { 986 if (mlxbf_pmc_read_counter(blk_num, cnt_num, is_l3, &value)) 987 return -EINVAL; 988 } else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER) { 989 offset = mlxbf_pmc_get_event_num(pmc->block_name[blk_num], 990 attr->attr.name); 991 if (offset < 0) 992 return -EINVAL; 993 if (mlxbf_pmc_read_reg(blk_num, offset, &value)) 994 return -EINVAL; 995 } else 996 return -EINVAL; 997 998 return sysfs_emit(buf, "0x%llx\n", value); 999 } 1000 1001 /* Store function for "counter" sysfs files */ 1002 static ssize_t mlxbf_pmc_counter_store(struct device *dev, 1003 struct device_attribute *attr, 1004 const char *buf, size_t count) 1005 { 1006 struct mlxbf_pmc_attribute *attr_counter = container_of( 1007 attr, struct mlxbf_pmc_attribute, dev_attr); 1008 int blk_num, cnt_num, offset, err, data; 1009 bool is_l3 = false; 1010 uint64_t evt_num; 1011 1012 blk_num = attr_counter->nr; 1013 cnt_num = attr_counter->index; 1014 1015 err = kstrtoint(buf, 0, &data); 1016 if (err < 0) 1017 return err; 1018 1019 /* Allow non-zero writes only to the ecc regs */ 1020 if (!(strstr(pmc->block_name[blk_num], "ecc")) && data) 1021 return -EINVAL; 1022 1023 /* Do not allow writes to the L3C regs */ 1024 if (strstr(pmc->block_name[blk_num], "l3cache")) 1025 return -EINVAL; 1026 1027 if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER) { 1028 err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num); 1029 if (err) 1030 return err; 1031 err = mlxbf_pmc_program_counter(blk_num, cnt_num, evt_num, 1032 is_l3); 1033 if (err) 1034 return err; 1035 } else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER) { 1036 offset = mlxbf_pmc_get_event_num(pmc->block_name[blk_num], 1037 attr->attr.name); 1038 if (offset < 0) 1039 return -EINVAL; 1040 err = mlxbf_pmc_write_reg(blk_num, offset, data); 1041 if (err) 1042 return err; 1043 } else 1044 return -EINVAL; 1045 1046 return count; 1047 } 1048 1049 /* Show function for "event" sysfs files */ 1050 static ssize_t mlxbf_pmc_event_show(struct device *dev, 1051 struct device_attribute *attr, char *buf) 1052 { 1053 struct mlxbf_pmc_attribute *attr_event = container_of( 1054 attr, struct mlxbf_pmc_attribute, dev_attr); 1055 int blk_num, cnt_num, err; 1056 bool is_l3 = false; 1057 uint64_t evt_num; 1058 char *evt_name; 1059 1060 blk_num = attr_event->nr; 1061 cnt_num = attr_event->index; 1062 1063 if (strstr(pmc->block_name[blk_num], "l3cache")) 1064 is_l3 = true; 1065 1066 err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num); 1067 if (err) 1068 return sysfs_emit(buf, "No event being monitored\n"); 1069 1070 evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num); 1071 if (!evt_name) 1072 return -EINVAL; 1073 1074 return sysfs_emit(buf, "0x%llx: %s\n", evt_num, evt_name); 1075 } 1076 1077 /* Store function for "event" sysfs files */ 1078 static ssize_t mlxbf_pmc_event_store(struct device *dev, 1079 struct device_attribute *attr, 1080 const char *buf, size_t count) 1081 { 1082 struct mlxbf_pmc_attribute *attr_event = container_of( 1083 attr, struct mlxbf_pmc_attribute, dev_attr); 1084 int blk_num, cnt_num, evt_num, err; 1085 bool is_l3 = false; 1086 1087 blk_num = attr_event->nr; 1088 cnt_num = attr_event->index; 1089 1090 if (isalpha(buf[0])) { 1091 evt_num = mlxbf_pmc_get_event_num(pmc->block_name[blk_num], 1092 buf); 1093 if (evt_num < 0) 1094 return -EINVAL; 1095 } else { 1096 err = kstrtoint(buf, 0, &evt_num); 1097 if (err < 0) 1098 return err; 1099 } 1100 1101 if (strstr(pmc->block_name[blk_num], "l3cache")) 1102 is_l3 = true; 1103 1104 err = mlxbf_pmc_program_counter(blk_num, cnt_num, evt_num, is_l3); 1105 if (err) 1106 return err; 1107 1108 return count; 1109 } 1110 1111 /* Show function for "event_list" sysfs files */ 1112 static ssize_t mlxbf_pmc_event_list_show(struct device *dev, 1113 struct device_attribute *attr, 1114 char *buf) 1115 { 1116 struct mlxbf_pmc_attribute *attr_event_list = container_of( 1117 attr, struct mlxbf_pmc_attribute, dev_attr); 1118 int blk_num, i, size, len = 0, ret = 0; 1119 const struct mlxbf_pmc_events *events; 1120 char e_info[MLXBF_PMC_EVENT_INFO_LEN]; 1121 1122 blk_num = attr_event_list->nr; 1123 1124 events = mlxbf_pmc_event_list(pmc->block_name[blk_num], &size); 1125 if (!events) 1126 return -EINVAL; 1127 1128 for (i = 0, buf[0] = '\0'; i < size; ++i) { 1129 len += snprintf(e_info, sizeof(e_info), "0x%x: %s\n", 1130 events[i].evt_num, events[i].evt_name); 1131 if (len >= PAGE_SIZE) 1132 break; 1133 strcat(buf, e_info); 1134 ret = len; 1135 } 1136 1137 return ret; 1138 } 1139 1140 /* Show function for "enable" sysfs files - only for l3cache */ 1141 static ssize_t mlxbf_pmc_enable_show(struct device *dev, 1142 struct device_attribute *attr, char *buf) 1143 { 1144 struct mlxbf_pmc_attribute *attr_enable = container_of( 1145 attr, struct mlxbf_pmc_attribute, dev_attr); 1146 uint32_t perfcnt_cfg; 1147 int blk_num, value; 1148 1149 blk_num = attr_enable->nr; 1150 1151 if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base + 1152 MLXBF_PMC_L3C_PERF_CNT_CFG, 1153 &perfcnt_cfg)) 1154 return -EINVAL; 1155 1156 value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg); 1157 1158 return sysfs_emit(buf, "%d\n", value); 1159 } 1160 1161 /* Store function for "enable" sysfs files - only for l3cache */ 1162 static ssize_t mlxbf_pmc_enable_store(struct device *dev, 1163 struct device_attribute *attr, 1164 const char *buf, size_t count) 1165 { 1166 struct mlxbf_pmc_attribute *attr_enable = container_of( 1167 attr, struct mlxbf_pmc_attribute, dev_attr); 1168 int err, en, blk_num; 1169 1170 blk_num = attr_enable->nr; 1171 1172 err = kstrtoint(buf, 0, &en); 1173 if (err < 0) 1174 return err; 1175 1176 if (!en) { 1177 err = mlxbf_pmc_config_l3_counters(blk_num, false, false); 1178 if (err) 1179 return err; 1180 } else if (en == 1) { 1181 err = mlxbf_pmc_config_l3_counters(blk_num, false, true); 1182 if (err) 1183 return err; 1184 err = mlxbf_pmc_config_l3_counters(blk_num, true, false); 1185 if (err) 1186 return err; 1187 } else 1188 return -EINVAL; 1189 1190 return count; 1191 } 1192 1193 /* Populate attributes for blocks with counters to monitor performance */ 1194 static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num) 1195 { 1196 struct mlxbf_pmc_attribute *attr; 1197 int i = 0, j = 0; 1198 1199 /* "event_list" sysfs to list events supported by the block */ 1200 attr = &pmc->block[blk_num].attr_event_list; 1201 attr->dev_attr.attr.mode = 0444; 1202 attr->dev_attr.show = mlxbf_pmc_event_list_show; 1203 attr->nr = blk_num; 1204 attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, "event_list"); 1205 pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr; 1206 attr = NULL; 1207 1208 /* "enable" sysfs to start/stop the counters. Only in L3C blocks */ 1209 if (strstr(pmc->block_name[blk_num], "l3cache")) { 1210 attr = &pmc->block[blk_num].attr_enable; 1211 attr->dev_attr.attr.mode = 0644; 1212 attr->dev_attr.show = mlxbf_pmc_enable_show; 1213 attr->dev_attr.store = mlxbf_pmc_enable_store; 1214 attr->nr = blk_num; 1215 attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, 1216 "enable"); 1217 pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr; 1218 attr = NULL; 1219 } 1220 1221 pmc->block[blk_num].attr_counter = devm_kcalloc( 1222 dev, pmc->block[blk_num].counters, 1223 sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL); 1224 if (!pmc->block[blk_num].attr_counter) 1225 return -ENOMEM; 1226 1227 pmc->block[blk_num].attr_event = devm_kcalloc( 1228 dev, pmc->block[blk_num].counters, 1229 sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL); 1230 if (!pmc->block[blk_num].attr_event) 1231 return -ENOMEM; 1232 1233 /* "eventX" and "counterX" sysfs to program and read counter values */ 1234 for (j = 0; j < pmc->block[blk_num].counters; ++j) { 1235 attr = &pmc->block[blk_num].attr_counter[j]; 1236 attr->dev_attr.attr.mode = 0644; 1237 attr->dev_attr.show = mlxbf_pmc_counter_show; 1238 attr->dev_attr.store = mlxbf_pmc_counter_store; 1239 attr->index = j; 1240 attr->nr = blk_num; 1241 attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, 1242 "counter%d", j); 1243 pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr; 1244 attr = NULL; 1245 1246 attr = &pmc->block[blk_num].attr_event[j]; 1247 attr->dev_attr.attr.mode = 0644; 1248 attr->dev_attr.show = mlxbf_pmc_event_show; 1249 attr->dev_attr.store = mlxbf_pmc_event_store; 1250 attr->index = j; 1251 attr->nr = blk_num; 1252 attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, 1253 "event%d", j); 1254 pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr; 1255 attr = NULL; 1256 } 1257 1258 return 0; 1259 } 1260 1261 /* Populate attributes for blocks with registers to monitor performance */ 1262 static int mlxbf_pmc_init_perftype_reg(struct device *dev, int blk_num) 1263 { 1264 struct mlxbf_pmc_attribute *attr; 1265 const struct mlxbf_pmc_events *events; 1266 int i = 0, j = 0; 1267 1268 events = mlxbf_pmc_event_list(pmc->block_name[blk_num], &j); 1269 if (!events) 1270 return -EINVAL; 1271 1272 pmc->block[blk_num].attr_event = devm_kcalloc( 1273 dev, j, sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL); 1274 if (!pmc->block[blk_num].attr_event) 1275 return -ENOMEM; 1276 1277 while (j > 0) { 1278 --j; 1279 attr = &pmc->block[blk_num].attr_event[j]; 1280 attr->dev_attr.attr.mode = 0644; 1281 attr->dev_attr.show = mlxbf_pmc_counter_show; 1282 attr->dev_attr.store = mlxbf_pmc_counter_store; 1283 attr->nr = blk_num; 1284 attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, 1285 events[j].evt_name); 1286 pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr; 1287 attr = NULL; 1288 i++; 1289 } 1290 1291 return 0; 1292 } 1293 1294 /* Helper to create the bfperf sysfs sub-directories and files */ 1295 static int mlxbf_pmc_create_groups(struct device *dev, int blk_num) 1296 { 1297 int err; 1298 1299 /* Populate attributes based on counter type */ 1300 if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER) 1301 err = mlxbf_pmc_init_perftype_counter(dev, blk_num); 1302 else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER) 1303 err = mlxbf_pmc_init_perftype_reg(dev, blk_num); 1304 else 1305 err = -EINVAL; 1306 1307 if (err) 1308 return err; 1309 1310 /* Add a new attribute_group for the block */ 1311 pmc->block[blk_num].block_attr_grp.attrs = pmc->block[blk_num].block_attr; 1312 pmc->block[blk_num].block_attr_grp.name = devm_kasprintf( 1313 dev, GFP_KERNEL, pmc->block_name[blk_num]); 1314 pmc->groups[blk_num] = &pmc->block[blk_num].block_attr_grp; 1315 1316 return 0; 1317 } 1318 1319 static bool mlxbf_pmc_guid_match(const guid_t *guid, 1320 const struct arm_smccc_res *res) 1321 { 1322 guid_t id = GUID_INIT(res->a0, res->a1, res->a1 >> 16, res->a2, 1323 res->a2 >> 8, res->a2 >> 16, res->a2 >> 24, 1324 res->a3, res->a3 >> 8, res->a3 >> 16, 1325 res->a3 >> 24); 1326 1327 return guid_equal(guid, &id); 1328 } 1329 1330 /* Helper to map the Performance Counters from the varios blocks */ 1331 static int mlxbf_pmc_map_counters(struct device *dev) 1332 { 1333 uint64_t info[MLXBF_PMC_INFO_SZ]; 1334 int i, tile_num, ret; 1335 1336 for (i = 0; i < pmc->total_blocks; ++i) { 1337 if (strstr(pmc->block_name[i], "tile")) { 1338 if (sscanf(pmc->block_name[i], "tile%d", &tile_num) != 1) 1339 return -EINVAL; 1340 1341 if (tile_num >= pmc->tile_count) 1342 continue; 1343 } 1344 ret = device_property_read_u64_array(dev, pmc->block_name[i], 1345 info, MLXBF_PMC_INFO_SZ); 1346 if (ret) 1347 return ret; 1348 1349 /* 1350 * Do not remap if the proper SMC calls are supported, 1351 * since the SMC calls expect physical addresses. 1352 */ 1353 if (pmc->svc_sreg_support) 1354 pmc->block[i].mmio_base = (void __iomem *)info[0]; 1355 else 1356 pmc->block[i].mmio_base = 1357 devm_ioremap(dev, info[0], info[1]); 1358 1359 pmc->block[i].blk_size = info[1]; 1360 pmc->block[i].counters = info[2]; 1361 pmc->block[i].type = info[3]; 1362 1363 if (!pmc->block[i].mmio_base) 1364 return -ENOMEM; 1365 1366 ret = mlxbf_pmc_create_groups(dev, i); 1367 if (ret) 1368 return ret; 1369 } 1370 1371 return 0; 1372 } 1373 1374 static int mlxbf_pmc_probe(struct platform_device *pdev) 1375 { 1376 struct acpi_device *acpi_dev = ACPI_COMPANION(&pdev->dev); 1377 const char *hid = acpi_device_hid(acpi_dev); 1378 struct device *dev = &pdev->dev; 1379 struct arm_smccc_res res; 1380 guid_t guid; 1381 int ret; 1382 1383 /* Ensure we have the UUID we expect for this service. */ 1384 arm_smccc_smc(MLXBF_PMC_SIP_SVC_UID, 0, 0, 0, 0, 0, 0, 0, &res); 1385 guid_parse(mlxbf_pmc_svc_uuid_str, &guid); 1386 if (!mlxbf_pmc_guid_match(&guid, &res)) 1387 return -ENODEV; 1388 1389 pmc = devm_kzalloc(dev, sizeof(struct mlxbf_pmc_context), GFP_KERNEL); 1390 if (!pmc) 1391 return -ENOMEM; 1392 1393 /* 1394 * ACPI indicates whether we use SMCs to access registers or not. 1395 * If sreg_tbl_perf is not present, just assume we're not using SMCs. 1396 */ 1397 ret = device_property_read_u32(dev, "sec_reg_block", 1398 &pmc->sreg_tbl_perf); 1399 if (ret) { 1400 pmc->svc_sreg_support = false; 1401 } else { 1402 /* 1403 * Check service version to see if we actually do support the 1404 * needed SMCs. If we have the calls we need, mark support for 1405 * them in the pmc struct. 1406 */ 1407 arm_smccc_smc(MLXBF_PMC_SIP_SVC_VERSION, 0, 0, 0, 0, 0, 0, 0, 1408 &res); 1409 if (res.a0 == MLXBF_PMC_SVC_REQ_MAJOR && 1410 res.a1 >= MLXBF_PMC_SVC_MIN_MINOR) 1411 pmc->svc_sreg_support = true; 1412 else 1413 return -EINVAL; 1414 } 1415 1416 if (!strcmp(hid, "MLNXBFD0")) 1417 pmc->event_set = MLXBF_PMC_EVENT_SET_BF1; 1418 else if (!strcmp(hid, "MLNXBFD1")) 1419 pmc->event_set = MLXBF_PMC_EVENT_SET_BF2; 1420 else 1421 return -ENODEV; 1422 1423 ret = device_property_read_u32(dev, "block_num", &pmc->total_blocks); 1424 if (ret) 1425 return ret; 1426 1427 ret = device_property_read_string_array(dev, "block_name", 1428 pmc->block_name, 1429 pmc->total_blocks); 1430 if (ret != pmc->total_blocks) 1431 return -EFAULT; 1432 1433 ret = device_property_read_u32(dev, "tile_num", &pmc->tile_count); 1434 if (ret) 1435 return ret; 1436 1437 pmc->pdev = pdev; 1438 1439 ret = mlxbf_pmc_map_counters(dev); 1440 if (ret) 1441 return ret; 1442 1443 pmc->hwmon_dev = devm_hwmon_device_register_with_groups( 1444 dev, "bfperf", pmc, pmc->groups); 1445 platform_set_drvdata(pdev, pmc); 1446 1447 return 0; 1448 } 1449 1450 static const struct acpi_device_id mlxbf_pmc_acpi_ids[] = { { "MLNXBFD0", 0 }, 1451 { "MLNXBFD1", 0 }, 1452 {}, }; 1453 1454 MODULE_DEVICE_TABLE(acpi, mlxbf_pmc_acpi_ids); 1455 static struct platform_driver pmc_driver = { 1456 .driver = { .name = "mlxbf-pmc", 1457 .acpi_match_table = ACPI_PTR(mlxbf_pmc_acpi_ids), }, 1458 .probe = mlxbf_pmc_probe, 1459 }; 1460 1461 module_platform_driver(pmc_driver); 1462 1463 MODULE_AUTHOR("Shravan Kumar Ramani <sramani@mellanox.com>"); 1464 MODULE_DESCRIPTION("Mellanox PMC driver"); 1465 MODULE_LICENSE("Dual BSD/GPL"); 1466