1 // SPDX-License-Identifier: GPL-2.0 2 /* SandyBridge-EP/IvyTown uncore support */ 3 #include "uncore.h" 4 #include "uncore_discovery.h" 5 6 /* SNB-EP pci bus to socket mapping */ 7 #define SNBEP_CPUNODEID 0x40 8 #define SNBEP_GIDNIDMAP 0x54 9 10 /* SNB-EP Box level control */ 11 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0) 12 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1) 13 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8) 14 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16) 15 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ 16 SNBEP_PMON_BOX_CTL_RST_CTRS | \ 17 SNBEP_PMON_BOX_CTL_FRZ_EN) 18 /* SNB-EP event control */ 19 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff 20 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00 21 #define SNBEP_PMON_CTL_RST (1 << 17) 22 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18) 23 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) 24 #define SNBEP_PMON_CTL_EN (1 << 22) 25 #define SNBEP_PMON_CTL_INVERT (1 << 23) 26 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000 27 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ 28 SNBEP_PMON_CTL_UMASK_MASK | \ 29 SNBEP_PMON_CTL_EDGE_DET | \ 30 SNBEP_PMON_CTL_INVERT | \ 31 SNBEP_PMON_CTL_TRESH_MASK) 32 33 /* SNB-EP Ubox event control */ 34 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000 35 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \ 36 (SNBEP_PMON_CTL_EV_SEL_MASK | \ 37 SNBEP_PMON_CTL_UMASK_MASK | \ 38 SNBEP_PMON_CTL_EDGE_DET | \ 39 SNBEP_PMON_CTL_INVERT | \ 40 SNBEP_U_MSR_PMON_CTL_TRESH_MASK) 41 42 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19) 43 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ 44 SNBEP_CBO_PMON_CTL_TID_EN) 45 46 /* SNB-EP PCU event control */ 47 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000 48 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000 49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30) 50 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31) 51 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \ 52 (SNBEP_PMON_CTL_EV_SEL_MASK | \ 53 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ 54 SNBEP_PMON_CTL_EDGE_DET | \ 55 SNBEP_PMON_CTL_INVERT | \ 56 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ 57 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ 58 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) 59 60 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ 61 (SNBEP_PMON_RAW_EVENT_MASK | \ 62 SNBEP_PMON_CTL_EV_SEL_EXT) 63 64 /* SNB-EP pci control register */ 65 #define SNBEP_PCI_PMON_BOX_CTL 0xf4 66 #define SNBEP_PCI_PMON_CTL0 0xd8 67 /* SNB-EP pci counter register */ 68 #define SNBEP_PCI_PMON_CTR0 0xa0 69 70 /* SNB-EP home agent register */ 71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40 72 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44 73 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48 74 /* SNB-EP memory controller register */ 75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0 76 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0 77 /* SNB-EP QPI register */ 78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228 79 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c 80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238 81 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c 82 83 /* SNB-EP Ubox register */ 84 #define SNBEP_U_MSR_PMON_CTR0 0xc16 85 #define SNBEP_U_MSR_PMON_CTL0 0xc10 86 87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08 88 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09 89 90 /* SNB-EP Cbo register */ 91 #define SNBEP_C0_MSR_PMON_CTR0 0xd16 92 #define SNBEP_C0_MSR_PMON_CTL0 0xd10 93 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04 94 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14 95 #define SNBEP_CBO_MSR_OFFSET 0x20 96 97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f 98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00 99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000 100 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000 101 102 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \ 103 .event = (e), \ 104 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \ 105 .config_mask = (m), \ 106 .idx = (i) \ 107 } 108 109 /* SNB-EP PCU register */ 110 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36 111 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30 112 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24 113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34 114 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff 115 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc 116 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd 117 118 /* IVBEP event control */ 119 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ 120 SNBEP_PMON_BOX_CTL_RST_CTRS) 121 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ 122 SNBEP_PMON_CTL_UMASK_MASK | \ 123 SNBEP_PMON_CTL_EDGE_DET | \ 124 SNBEP_PMON_CTL_TRESH_MASK) 125 /* IVBEP Ubox */ 126 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00 127 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31) 128 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29) 129 130 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \ 131 (SNBEP_PMON_CTL_EV_SEL_MASK | \ 132 SNBEP_PMON_CTL_UMASK_MASK | \ 133 SNBEP_PMON_CTL_EDGE_DET | \ 134 SNBEP_U_MSR_PMON_CTL_TRESH_MASK) 135 /* IVBEP Cbo */ 136 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \ 137 SNBEP_CBO_PMON_CTL_TID_EN) 138 139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0) 140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5) 141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17) 142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32) 143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52) 144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) 145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) 146 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63) 147 148 /* IVBEP home agent */ 149 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16) 150 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \ 151 (IVBEP_PMON_RAW_EVENT_MASK | \ 152 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST) 153 /* IVBEP PCU */ 154 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \ 155 (SNBEP_PMON_CTL_EV_SEL_MASK | \ 156 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ 157 SNBEP_PMON_CTL_EDGE_DET | \ 158 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ 159 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ 160 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) 161 /* IVBEP QPI */ 162 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ 163 (IVBEP_PMON_RAW_EVENT_MASK | \ 164 SNBEP_PMON_CTL_EV_SEL_EXT) 165 166 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ 167 ((1ULL << (n)) - 1))) 168 169 /* Haswell-EP Ubox */ 170 #define HSWEP_U_MSR_PMON_CTR0 0x709 171 #define HSWEP_U_MSR_PMON_CTL0 0x705 172 #define HSWEP_U_MSR_PMON_FILTER 0x707 173 174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703 175 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704 176 177 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0) 178 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1) 179 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \ 180 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \ 181 HSWEP_U_MSR_PMON_BOX_FILTER_CID) 182 183 /* Haswell-EP CBo */ 184 #define HSWEP_C0_MSR_PMON_CTR0 0xe08 185 #define HSWEP_C0_MSR_PMON_CTL0 0xe01 186 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00 187 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05 188 #define HSWEP_CBO_MSR_OFFSET 0x10 189 190 191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0) 192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6) 193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17) 194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32) 195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52) 196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) 197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) 198 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63) 199 200 201 /* Haswell-EP Sbox */ 202 #define HSWEP_S0_MSR_PMON_CTR0 0x726 203 #define HSWEP_S0_MSR_PMON_CTL0 0x721 204 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720 205 #define HSWEP_SBOX_MSR_OFFSET 0xa 206 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ 207 SNBEP_CBO_PMON_CTL_TID_EN) 208 209 /* Haswell-EP PCU */ 210 #define HSWEP_PCU_MSR_PMON_CTR0 0x717 211 #define HSWEP_PCU_MSR_PMON_CTL0 0x711 212 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710 213 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715 214 215 /* KNL Ubox */ 216 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \ 217 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \ 218 SNBEP_CBO_PMON_CTL_TID_EN) 219 /* KNL CHA */ 220 #define KNL_CHA_MSR_OFFSET 0xc 221 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16) 222 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \ 223 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \ 224 KNL_CHA_MSR_PMON_CTL_QOR) 225 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff 226 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18) 227 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32) 228 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32) 229 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33) 230 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37) 231 232 /* KNL EDC/MC UCLK */ 233 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400 234 #define KNL_UCLK_MSR_PMON_CTL0 0x420 235 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430 236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c 237 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454 238 #define KNL_PMON_FIXED_CTL_EN 0x1 239 240 /* KNL EDC */ 241 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00 242 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20 243 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30 244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c 245 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44 246 247 /* KNL MC */ 248 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00 249 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20 250 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30 251 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c 252 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44 253 254 /* KNL IRP */ 255 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0 256 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ 257 KNL_CHA_MSR_PMON_CTL_QOR) 258 /* KNL PCU */ 259 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f 260 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7) 261 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000 262 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \ 263 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \ 264 KNL_PCU_PMON_CTL_USE_OCC_CTR | \ 265 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ 266 SNBEP_PMON_CTL_EDGE_DET | \ 267 SNBEP_CBO_PMON_CTL_TID_EN | \ 268 SNBEP_PMON_CTL_INVERT | \ 269 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \ 270 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ 271 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) 272 273 /* SKX pci bus to socket mapping */ 274 #define SKX_CPUNODEID 0xc0 275 #define SKX_GIDNIDMAP 0xd4 276 277 /* 278 * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR 279 * that BIOS programmed. MSR has package scope. 280 * | Bit | Default | Description 281 * | [63] | 00h | VALID - When set, indicates the CPU bus 282 * numbers have been initialized. (RO) 283 * |[62:48]| --- | Reserved 284 * |[47:40]| 00h | BUS_NUM_5 - Return the bus number BIOS assigned 285 * CPUBUSNO(5). (RO) 286 * |[39:32]| 00h | BUS_NUM_4 - Return the bus number BIOS assigned 287 * CPUBUSNO(4). (RO) 288 * |[31:24]| 00h | BUS_NUM_3 - Return the bus number BIOS assigned 289 * CPUBUSNO(3). (RO) 290 * |[23:16]| 00h | BUS_NUM_2 - Return the bus number BIOS assigned 291 * CPUBUSNO(2). (RO) 292 * |[15:8] | 00h | BUS_NUM_1 - Return the bus number BIOS assigned 293 * CPUBUSNO(1). (RO) 294 * | [7:0] | 00h | BUS_NUM_0 - Return the bus number BIOS assigned 295 * CPUBUSNO(0). (RO) 296 */ 297 #define SKX_MSR_CPU_BUS_NUMBER 0x300 298 #define SKX_MSR_CPU_BUS_VALID_BIT (1ULL << 63) 299 #define BUS_NUM_STRIDE 8 300 301 /* SKX CHA */ 302 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0) 303 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9) 304 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17) 305 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32) 306 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33) 307 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35) 308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36) 309 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37) 310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41) 311 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51) 312 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) 313 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) 314 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63) 315 316 /* SKX IIO */ 317 #define SKX_IIO0_MSR_PMON_CTL0 0xa48 318 #define SKX_IIO0_MSR_PMON_CTR0 0xa41 319 #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40 320 #define SKX_IIO_MSR_OFFSET 0x20 321 322 #define SKX_PMON_CTL_TRESH_MASK (0xff << 24) 323 #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf) 324 #define SKX_PMON_CTL_CH_MASK (0xff << 4) 325 #define SKX_PMON_CTL_FC_MASK (0x7 << 12) 326 #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ 327 SNBEP_PMON_CTL_UMASK_MASK | \ 328 SNBEP_PMON_CTL_EDGE_DET | \ 329 SNBEP_PMON_CTL_INVERT | \ 330 SKX_PMON_CTL_TRESH_MASK) 331 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \ 332 SKX_PMON_CTL_CH_MASK | \ 333 SKX_PMON_CTL_FC_MASK) 334 335 /* SKX IRP */ 336 #define SKX_IRP0_MSR_PMON_CTL0 0xa5b 337 #define SKX_IRP0_MSR_PMON_CTR0 0xa59 338 #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58 339 #define SKX_IRP_MSR_OFFSET 0x20 340 341 /* SKX UPI */ 342 #define SKX_UPI_PCI_PMON_CTL0 0x350 343 #define SKX_UPI_PCI_PMON_CTR0 0x318 344 #define SKX_UPI_PCI_PMON_BOX_CTL 0x378 345 #define SKX_UPI_CTL_UMASK_EXT 0xffefff 346 347 /* SKX M2M */ 348 #define SKX_M2M_PCI_PMON_CTL0 0x228 349 #define SKX_M2M_PCI_PMON_CTR0 0x200 350 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258 351 352 /* Memory Map registers device ID */ 353 #define SNR_ICX_MESH2IIO_MMAP_DID 0x9a2 354 #define SNR_ICX_SAD_CONTROL_CFG 0x3f4 355 356 /* Getting I/O stack id in SAD_COTROL_CFG notation */ 357 #define SAD_CONTROL_STACK_ID(data) (((data) >> 4) & 0x7) 358 359 /* SNR Ubox */ 360 #define SNR_U_MSR_PMON_CTR0 0x1f98 361 #define SNR_U_MSR_PMON_CTL0 0x1f91 362 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93 363 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94 364 365 /* SNR CHA */ 366 #define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff 367 #define SNR_CHA_MSR_PMON_CTL0 0x1c01 368 #define SNR_CHA_MSR_PMON_CTR0 0x1c08 369 #define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00 370 #define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05 371 372 373 /* SNR IIO */ 374 #define SNR_IIO_MSR_PMON_CTL0 0x1e08 375 #define SNR_IIO_MSR_PMON_CTR0 0x1e01 376 #define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00 377 #define SNR_IIO_MSR_OFFSET 0x10 378 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff 379 380 /* SNR IRP */ 381 #define SNR_IRP0_MSR_PMON_CTL0 0x1ea8 382 #define SNR_IRP0_MSR_PMON_CTR0 0x1ea1 383 #define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0 384 #define SNR_IRP_MSR_OFFSET 0x10 385 386 /* SNR M2PCIE */ 387 #define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58 388 #define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51 389 #define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50 390 #define SNR_M2PCIE_MSR_OFFSET 0x10 391 392 /* SNR PCU */ 393 #define SNR_PCU_MSR_PMON_CTL0 0x1ef1 394 #define SNR_PCU_MSR_PMON_CTR0 0x1ef8 395 #define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0 396 #define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc 397 398 /* SNR M2M */ 399 #define SNR_M2M_PCI_PMON_CTL0 0x468 400 #define SNR_M2M_PCI_PMON_CTR0 0x440 401 #define SNR_M2M_PCI_PMON_BOX_CTL 0x438 402 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff 403 404 /* SNR PCIE3 */ 405 #define SNR_PCIE3_PCI_PMON_CTL0 0x508 406 #define SNR_PCIE3_PCI_PMON_CTR0 0x4e8 407 #define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0 408 409 /* SNR IMC */ 410 #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54 411 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38 412 #define SNR_IMC_MMIO_PMON_CTL0 0x40 413 #define SNR_IMC_MMIO_PMON_CTR0 0x8 414 #define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800 415 #define SNR_IMC_MMIO_OFFSET 0x4000 416 #define SNR_IMC_MMIO_SIZE 0x4000 417 #define SNR_IMC_MMIO_BASE_OFFSET 0xd0 418 #define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF 419 #define SNR_IMC_MMIO_MEM0_OFFSET 0xd8 420 #define SNR_IMC_MMIO_MEM0_MASK 0x7FF 421 422 /* ICX CHA */ 423 #define ICX_C34_MSR_PMON_CTR0 0xb68 424 #define ICX_C34_MSR_PMON_CTL0 0xb61 425 #define ICX_C34_MSR_PMON_BOX_CTL 0xb60 426 #define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65 427 428 /* ICX IIO */ 429 #define ICX_IIO_MSR_PMON_CTL0 0xa58 430 #define ICX_IIO_MSR_PMON_CTR0 0xa51 431 #define ICX_IIO_MSR_PMON_BOX_CTL 0xa50 432 433 /* ICX IRP */ 434 #define ICX_IRP0_MSR_PMON_CTL0 0xa4d 435 #define ICX_IRP0_MSR_PMON_CTR0 0xa4b 436 #define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a 437 438 /* ICX M2PCIE */ 439 #define ICX_M2PCIE_MSR_PMON_CTL0 0xa46 440 #define ICX_M2PCIE_MSR_PMON_CTR0 0xa41 441 #define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40 442 443 /* ICX UPI */ 444 #define ICX_UPI_PCI_PMON_CTL0 0x350 445 #define ICX_UPI_PCI_PMON_CTR0 0x320 446 #define ICX_UPI_PCI_PMON_BOX_CTL 0x318 447 #define ICX_UPI_CTL_UMASK_EXT 0xffffff 448 449 /* ICX M3UPI*/ 450 #define ICX_M3UPI_PCI_PMON_CTL0 0xd8 451 #define ICX_M3UPI_PCI_PMON_CTR0 0xa8 452 #define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0 453 454 /* ICX IMC */ 455 #define ICX_NUMBER_IMC_CHN 3 456 #define ICX_IMC_MEM_STRIDE 0x4 457 458 /* SPR */ 459 #define SPR_RAW_EVENT_MASK_EXT 0xffffff 460 461 /* SPR CHA */ 462 #define SPR_CHA_PMON_CTL_TID_EN (1 << 16) 463 #define SPR_CHA_PMON_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ 464 SPR_CHA_PMON_CTL_TID_EN) 465 #define SPR_CHA_PMON_BOX_FILTER_TID 0x3ff 466 467 #define SPR_C0_MSR_PMON_BOX_FILTER0 0x200e 468 469 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 470 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6"); 471 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); 472 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7"); 473 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 474 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55"); 475 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57"); 476 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39"); 477 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55"); 478 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16"); 479 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); 480 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); 481 DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16"); 482 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); 483 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35"); 484 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); 485 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29"); 486 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28"); 487 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15"); 488 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30"); 489 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51"); 490 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31"); 491 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43"); 492 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47"); 493 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46"); 494 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50"); 495 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); 496 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0"); 497 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5"); 498 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8"); 499 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9"); 500 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5"); 501 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8"); 502 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8"); 503 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12"); 504 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); 505 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47"); 506 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); 507 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22"); 508 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23"); 509 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20"); 510 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26"); 511 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32"); 512 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33"); 513 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36"); 514 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37"); 515 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33"); 516 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35"); 517 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37"); 518 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); 519 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60"); 520 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60"); 521 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50"); 522 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60"); 523 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62"); 524 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61"); 525 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63"); 526 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7"); 527 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); 528 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); 529 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31"); 530 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51"); 531 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35"); 532 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31"); 533 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17"); 534 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12"); 535 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8"); 536 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4"); 537 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31"); 538 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63"); 539 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51"); 540 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35"); 541 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31"); 542 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17"); 543 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12"); 544 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8"); 545 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4"); 546 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31"); 547 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63"); 548 549 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box) 550 { 551 struct pci_dev *pdev = box->pci_dev; 552 int box_ctl = uncore_pci_box_ctl(box); 553 u32 config = 0; 554 555 if (!pci_read_config_dword(pdev, box_ctl, &config)) { 556 config |= SNBEP_PMON_BOX_CTL_FRZ; 557 pci_write_config_dword(pdev, box_ctl, config); 558 } 559 } 560 561 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box) 562 { 563 struct pci_dev *pdev = box->pci_dev; 564 int box_ctl = uncore_pci_box_ctl(box); 565 u32 config = 0; 566 567 if (!pci_read_config_dword(pdev, box_ctl, &config)) { 568 config &= ~SNBEP_PMON_BOX_CTL_FRZ; 569 pci_write_config_dword(pdev, box_ctl, config); 570 } 571 } 572 573 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) 574 { 575 struct pci_dev *pdev = box->pci_dev; 576 struct hw_perf_event *hwc = &event->hw; 577 578 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 579 } 580 581 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event) 582 { 583 struct pci_dev *pdev = box->pci_dev; 584 struct hw_perf_event *hwc = &event->hw; 585 586 pci_write_config_dword(pdev, hwc->config_base, hwc->config); 587 } 588 589 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event) 590 { 591 struct pci_dev *pdev = box->pci_dev; 592 struct hw_perf_event *hwc = &event->hw; 593 u64 count = 0; 594 595 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); 596 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); 597 598 return count; 599 } 600 601 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box) 602 { 603 struct pci_dev *pdev = box->pci_dev; 604 int box_ctl = uncore_pci_box_ctl(box); 605 606 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT); 607 } 608 609 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) 610 { 611 u64 config; 612 unsigned msr; 613 614 msr = uncore_msr_box_ctl(box); 615 if (msr) { 616 rdmsrl(msr, config); 617 config |= SNBEP_PMON_BOX_CTL_FRZ; 618 wrmsrl(msr, config); 619 } 620 } 621 622 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box) 623 { 624 u64 config; 625 unsigned msr; 626 627 msr = uncore_msr_box_ctl(box); 628 if (msr) { 629 rdmsrl(msr, config); 630 config &= ~SNBEP_PMON_BOX_CTL_FRZ; 631 wrmsrl(msr, config); 632 } 633 } 634 635 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 636 { 637 struct hw_perf_event *hwc = &event->hw; 638 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 639 640 if (reg1->idx != EXTRA_REG_NONE) 641 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0)); 642 643 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 644 } 645 646 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box, 647 struct perf_event *event) 648 { 649 struct hw_perf_event *hwc = &event->hw; 650 651 wrmsrl(hwc->config_base, hwc->config); 652 } 653 654 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) 655 { 656 unsigned msr = uncore_msr_box_ctl(box); 657 658 if (msr) 659 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); 660 } 661 662 static struct attribute *snbep_uncore_formats_attr[] = { 663 &format_attr_event.attr, 664 &format_attr_umask.attr, 665 &format_attr_edge.attr, 666 &format_attr_inv.attr, 667 &format_attr_thresh8.attr, 668 NULL, 669 }; 670 671 static struct attribute *snbep_uncore_ubox_formats_attr[] = { 672 &format_attr_event.attr, 673 &format_attr_umask.attr, 674 &format_attr_edge.attr, 675 &format_attr_inv.attr, 676 &format_attr_thresh5.attr, 677 NULL, 678 }; 679 680 static struct attribute *snbep_uncore_cbox_formats_attr[] = { 681 &format_attr_event.attr, 682 &format_attr_umask.attr, 683 &format_attr_edge.attr, 684 &format_attr_tid_en.attr, 685 &format_attr_inv.attr, 686 &format_attr_thresh8.attr, 687 &format_attr_filter_tid.attr, 688 &format_attr_filter_nid.attr, 689 &format_attr_filter_state.attr, 690 &format_attr_filter_opc.attr, 691 NULL, 692 }; 693 694 static struct attribute *snbep_uncore_pcu_formats_attr[] = { 695 &format_attr_event.attr, 696 &format_attr_occ_sel.attr, 697 &format_attr_edge.attr, 698 &format_attr_inv.attr, 699 &format_attr_thresh5.attr, 700 &format_attr_occ_invert.attr, 701 &format_attr_occ_edge.attr, 702 &format_attr_filter_band0.attr, 703 &format_attr_filter_band1.attr, 704 &format_attr_filter_band2.attr, 705 &format_attr_filter_band3.attr, 706 NULL, 707 }; 708 709 static struct attribute *snbep_uncore_qpi_formats_attr[] = { 710 &format_attr_event_ext.attr, 711 &format_attr_umask.attr, 712 &format_attr_edge.attr, 713 &format_attr_inv.attr, 714 &format_attr_thresh8.attr, 715 &format_attr_match_rds.attr, 716 &format_attr_match_rnid30.attr, 717 &format_attr_match_rnid4.attr, 718 &format_attr_match_dnid.attr, 719 &format_attr_match_mc.attr, 720 &format_attr_match_opc.attr, 721 &format_attr_match_vnw.attr, 722 &format_attr_match0.attr, 723 &format_attr_match1.attr, 724 &format_attr_mask_rds.attr, 725 &format_attr_mask_rnid30.attr, 726 &format_attr_mask_rnid4.attr, 727 &format_attr_mask_dnid.attr, 728 &format_attr_mask_mc.attr, 729 &format_attr_mask_opc.attr, 730 &format_attr_mask_vnw.attr, 731 &format_attr_mask0.attr, 732 &format_attr_mask1.attr, 733 NULL, 734 }; 735 736 static struct uncore_event_desc snbep_uncore_imc_events[] = { 737 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), 738 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), 739 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"), 740 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"), 741 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), 742 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"), 743 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"), 744 { /* end: all zeroes */ }, 745 }; 746 747 static struct uncore_event_desc snbep_uncore_qpi_events[] = { 748 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), 749 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), 750 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"), 751 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"), 752 { /* end: all zeroes */ }, 753 }; 754 755 static const struct attribute_group snbep_uncore_format_group = { 756 .name = "format", 757 .attrs = snbep_uncore_formats_attr, 758 }; 759 760 static const struct attribute_group snbep_uncore_ubox_format_group = { 761 .name = "format", 762 .attrs = snbep_uncore_ubox_formats_attr, 763 }; 764 765 static const struct attribute_group snbep_uncore_cbox_format_group = { 766 .name = "format", 767 .attrs = snbep_uncore_cbox_formats_attr, 768 }; 769 770 static const struct attribute_group snbep_uncore_pcu_format_group = { 771 .name = "format", 772 .attrs = snbep_uncore_pcu_formats_attr, 773 }; 774 775 static const struct attribute_group snbep_uncore_qpi_format_group = { 776 .name = "format", 777 .attrs = snbep_uncore_qpi_formats_attr, 778 }; 779 780 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ 781 .disable_box = snbep_uncore_msr_disable_box, \ 782 .enable_box = snbep_uncore_msr_enable_box, \ 783 .disable_event = snbep_uncore_msr_disable_event, \ 784 .enable_event = snbep_uncore_msr_enable_event, \ 785 .read_counter = uncore_msr_read_counter 786 787 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ 788 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \ 789 .init_box = snbep_uncore_msr_init_box \ 790 791 static struct intel_uncore_ops snbep_uncore_msr_ops = { 792 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 793 }; 794 795 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \ 796 .init_box = snbep_uncore_pci_init_box, \ 797 .disable_box = snbep_uncore_pci_disable_box, \ 798 .enable_box = snbep_uncore_pci_enable_box, \ 799 .disable_event = snbep_uncore_pci_disable_event, \ 800 .read_counter = snbep_uncore_pci_read_counter 801 802 static struct intel_uncore_ops snbep_uncore_pci_ops = { 803 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), 804 .enable_event = snbep_uncore_pci_enable_event, \ 805 }; 806 807 static struct event_constraint snbep_uncore_cbox_constraints[] = { 808 UNCORE_EVENT_CONSTRAINT(0x01, 0x1), 809 UNCORE_EVENT_CONSTRAINT(0x02, 0x3), 810 UNCORE_EVENT_CONSTRAINT(0x04, 0x3), 811 UNCORE_EVENT_CONSTRAINT(0x05, 0x3), 812 UNCORE_EVENT_CONSTRAINT(0x07, 0x3), 813 UNCORE_EVENT_CONSTRAINT(0x09, 0x3), 814 UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 815 UNCORE_EVENT_CONSTRAINT(0x12, 0x3), 816 UNCORE_EVENT_CONSTRAINT(0x13, 0x3), 817 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc), 818 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc), 819 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc), 820 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc), 821 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe), 822 UNCORE_EVENT_CONSTRAINT(0x21, 0x3), 823 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 824 UNCORE_EVENT_CONSTRAINT(0x31, 0x3), 825 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 826 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 827 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 828 UNCORE_EVENT_CONSTRAINT(0x35, 0x3), 829 UNCORE_EVENT_CONSTRAINT(0x36, 0x1), 830 UNCORE_EVENT_CONSTRAINT(0x37, 0x3), 831 UNCORE_EVENT_CONSTRAINT(0x38, 0x3), 832 UNCORE_EVENT_CONSTRAINT(0x39, 0x3), 833 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1), 834 EVENT_CONSTRAINT_END 835 }; 836 837 static struct event_constraint snbep_uncore_r2pcie_constraints[] = { 838 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 839 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 840 UNCORE_EVENT_CONSTRAINT(0x12, 0x1), 841 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 842 UNCORE_EVENT_CONSTRAINT(0x24, 0x3), 843 UNCORE_EVENT_CONSTRAINT(0x25, 0x3), 844 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 845 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 846 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 847 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 848 EVENT_CONSTRAINT_END 849 }; 850 851 static struct event_constraint snbep_uncore_r3qpi_constraints[] = { 852 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 853 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 854 UNCORE_EVENT_CONSTRAINT(0x12, 0x3), 855 UNCORE_EVENT_CONSTRAINT(0x13, 0x1), 856 UNCORE_EVENT_CONSTRAINT(0x20, 0x3), 857 UNCORE_EVENT_CONSTRAINT(0x21, 0x3), 858 UNCORE_EVENT_CONSTRAINT(0x22, 0x3), 859 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 860 UNCORE_EVENT_CONSTRAINT(0x24, 0x3), 861 UNCORE_EVENT_CONSTRAINT(0x25, 0x3), 862 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 863 UNCORE_EVENT_CONSTRAINT(0x28, 0x3), 864 UNCORE_EVENT_CONSTRAINT(0x29, 0x3), 865 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3), 866 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3), 867 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), 868 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 869 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), 870 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), 871 UNCORE_EVENT_CONSTRAINT(0x30, 0x3), 872 UNCORE_EVENT_CONSTRAINT(0x31, 0x3), 873 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 874 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 875 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 876 UNCORE_EVENT_CONSTRAINT(0x36, 0x3), 877 UNCORE_EVENT_CONSTRAINT(0x37, 0x3), 878 UNCORE_EVENT_CONSTRAINT(0x38, 0x3), 879 UNCORE_EVENT_CONSTRAINT(0x39, 0x3), 880 EVENT_CONSTRAINT_END 881 }; 882 883 static struct intel_uncore_type snbep_uncore_ubox = { 884 .name = "ubox", 885 .num_counters = 2, 886 .num_boxes = 1, 887 .perf_ctr_bits = 44, 888 .fixed_ctr_bits = 48, 889 .perf_ctr = SNBEP_U_MSR_PMON_CTR0, 890 .event_ctl = SNBEP_U_MSR_PMON_CTL0, 891 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, 892 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, 893 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, 894 .ops = &snbep_uncore_msr_ops, 895 .format_group = &snbep_uncore_ubox_format_group, 896 }; 897 898 static struct extra_reg snbep_uncore_cbox_extra_regs[] = { 899 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 900 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 901 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 902 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6), 903 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 904 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6), 905 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 906 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6), 907 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), 908 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), 909 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), 910 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa), 911 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa), 912 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), 913 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), 914 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), 915 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), 916 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), 917 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), 918 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa), 919 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa), 920 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), 921 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), 922 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), 923 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2), 924 EVENT_EXTRA_END 925 }; 926 927 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) 928 { 929 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 930 struct intel_uncore_extra_reg *er = &box->shared_regs[0]; 931 int i; 932 933 if (uncore_box_is_fake(box)) 934 return; 935 936 for (i = 0; i < 5; i++) { 937 if (reg1->alloc & (0x1 << i)) 938 atomic_sub(1 << (i * 6), &er->ref); 939 } 940 reg1->alloc = 0; 941 } 942 943 static struct event_constraint * 944 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event, 945 u64 (*cbox_filter_mask)(int fields)) 946 { 947 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 948 struct intel_uncore_extra_reg *er = &box->shared_regs[0]; 949 int i, alloc = 0; 950 unsigned long flags; 951 u64 mask; 952 953 if (reg1->idx == EXTRA_REG_NONE) 954 return NULL; 955 956 raw_spin_lock_irqsave(&er->lock, flags); 957 for (i = 0; i < 5; i++) { 958 if (!(reg1->idx & (0x1 << i))) 959 continue; 960 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) 961 continue; 962 963 mask = cbox_filter_mask(0x1 << i); 964 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) || 965 !((reg1->config ^ er->config) & mask)) { 966 atomic_add(1 << (i * 6), &er->ref); 967 er->config &= ~mask; 968 er->config |= reg1->config & mask; 969 alloc |= (0x1 << i); 970 } else { 971 break; 972 } 973 } 974 raw_spin_unlock_irqrestore(&er->lock, flags); 975 if (i < 5) 976 goto fail; 977 978 if (!uncore_box_is_fake(box)) 979 reg1->alloc |= alloc; 980 981 return NULL; 982 fail: 983 for (; i >= 0; i--) { 984 if (alloc & (0x1 << i)) 985 atomic_sub(1 << (i * 6), &er->ref); 986 } 987 return &uncore_constraint_empty; 988 } 989 990 static u64 snbep_cbox_filter_mask(int fields) 991 { 992 u64 mask = 0; 993 994 if (fields & 0x1) 995 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID; 996 if (fields & 0x2) 997 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID; 998 if (fields & 0x4) 999 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE; 1000 if (fields & 0x8) 1001 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC; 1002 1003 return mask; 1004 } 1005 1006 static struct event_constraint * 1007 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 1008 { 1009 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask); 1010 } 1011 1012 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 1013 { 1014 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 1015 struct extra_reg *er; 1016 int idx = 0; 1017 1018 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) { 1019 if (er->event != (event->hw.config & er->config_mask)) 1020 continue; 1021 idx |= er->idx; 1022 } 1023 1024 if (idx) { 1025 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + 1026 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; 1027 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx); 1028 reg1->idx = idx; 1029 } 1030 return 0; 1031 } 1032 1033 static struct intel_uncore_ops snbep_uncore_cbox_ops = { 1034 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 1035 .hw_config = snbep_cbox_hw_config, 1036 .get_constraint = snbep_cbox_get_constraint, 1037 .put_constraint = snbep_cbox_put_constraint, 1038 }; 1039 1040 static struct intel_uncore_type snbep_uncore_cbox = { 1041 .name = "cbox", 1042 .num_counters = 4, 1043 .num_boxes = 8, 1044 .perf_ctr_bits = 44, 1045 .event_ctl = SNBEP_C0_MSR_PMON_CTL0, 1046 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, 1047 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, 1048 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, 1049 .msr_offset = SNBEP_CBO_MSR_OFFSET, 1050 .num_shared_regs = 1, 1051 .constraints = snbep_uncore_cbox_constraints, 1052 .ops = &snbep_uncore_cbox_ops, 1053 .format_group = &snbep_uncore_cbox_format_group, 1054 }; 1055 1056 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify) 1057 { 1058 struct hw_perf_event *hwc = &event->hw; 1059 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1060 u64 config = reg1->config; 1061 1062 if (new_idx > reg1->idx) 1063 config <<= 8 * (new_idx - reg1->idx); 1064 else 1065 config >>= 8 * (reg1->idx - new_idx); 1066 1067 if (modify) { 1068 hwc->config += new_idx - reg1->idx; 1069 reg1->config = config; 1070 reg1->idx = new_idx; 1071 } 1072 return config; 1073 } 1074 1075 static struct event_constraint * 1076 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 1077 { 1078 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 1079 struct intel_uncore_extra_reg *er = &box->shared_regs[0]; 1080 unsigned long flags; 1081 int idx = reg1->idx; 1082 u64 mask, config1 = reg1->config; 1083 bool ok = false; 1084 1085 if (reg1->idx == EXTRA_REG_NONE || 1086 (!uncore_box_is_fake(box) && reg1->alloc)) 1087 return NULL; 1088 again: 1089 mask = 0xffULL << (idx * 8); 1090 raw_spin_lock_irqsave(&er->lock, flags); 1091 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) || 1092 !((config1 ^ er->config) & mask)) { 1093 atomic_add(1 << (idx * 8), &er->ref); 1094 er->config &= ~mask; 1095 er->config |= config1 & mask; 1096 ok = true; 1097 } 1098 raw_spin_unlock_irqrestore(&er->lock, flags); 1099 1100 if (!ok) { 1101 idx = (idx + 1) % 4; 1102 if (idx != reg1->idx) { 1103 config1 = snbep_pcu_alter_er(event, idx, false); 1104 goto again; 1105 } 1106 return &uncore_constraint_empty; 1107 } 1108 1109 if (!uncore_box_is_fake(box)) { 1110 if (idx != reg1->idx) 1111 snbep_pcu_alter_er(event, idx, true); 1112 reg1->alloc = 1; 1113 } 1114 return NULL; 1115 } 1116 1117 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event) 1118 { 1119 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 1120 struct intel_uncore_extra_reg *er = &box->shared_regs[0]; 1121 1122 if (uncore_box_is_fake(box) || !reg1->alloc) 1123 return; 1124 1125 atomic_sub(1 << (reg1->idx * 8), &er->ref); 1126 reg1->alloc = 0; 1127 } 1128 1129 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) 1130 { 1131 struct hw_perf_event *hwc = &event->hw; 1132 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1133 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; 1134 1135 if (ev_sel >= 0xb && ev_sel <= 0xe) { 1136 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; 1137 reg1->idx = ev_sel - 0xb; 1138 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8)); 1139 } 1140 return 0; 1141 } 1142 1143 static struct intel_uncore_ops snbep_uncore_pcu_ops = { 1144 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 1145 .hw_config = snbep_pcu_hw_config, 1146 .get_constraint = snbep_pcu_get_constraint, 1147 .put_constraint = snbep_pcu_put_constraint, 1148 }; 1149 1150 static struct intel_uncore_type snbep_uncore_pcu = { 1151 .name = "pcu", 1152 .num_counters = 4, 1153 .num_boxes = 1, 1154 .perf_ctr_bits = 48, 1155 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, 1156 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, 1157 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, 1158 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, 1159 .num_shared_regs = 1, 1160 .ops = &snbep_uncore_pcu_ops, 1161 .format_group = &snbep_uncore_pcu_format_group, 1162 }; 1163 1164 static struct intel_uncore_type *snbep_msr_uncores[] = { 1165 &snbep_uncore_ubox, 1166 &snbep_uncore_cbox, 1167 &snbep_uncore_pcu, 1168 NULL, 1169 }; 1170 1171 void snbep_uncore_cpu_init(void) 1172 { 1173 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 1174 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 1175 uncore_msr_uncores = snbep_msr_uncores; 1176 } 1177 1178 enum { 1179 SNBEP_PCI_QPI_PORT0_FILTER, 1180 SNBEP_PCI_QPI_PORT1_FILTER, 1181 BDX_PCI_QPI_PORT2_FILTER, 1182 }; 1183 1184 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) 1185 { 1186 struct hw_perf_event *hwc = &event->hw; 1187 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1188 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 1189 1190 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) { 1191 reg1->idx = 0; 1192 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0; 1193 reg1->config = event->attr.config1; 1194 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0; 1195 reg2->config = event->attr.config2; 1196 } 1197 return 0; 1198 } 1199 1200 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event) 1201 { 1202 struct pci_dev *pdev = box->pci_dev; 1203 struct hw_perf_event *hwc = &event->hw; 1204 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1205 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 1206 1207 if (reg1->idx != EXTRA_REG_NONE) { 1208 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER; 1209 int die = box->dieid; 1210 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx]; 1211 1212 if (filter_pdev) { 1213 pci_write_config_dword(filter_pdev, reg1->reg, 1214 (u32)reg1->config); 1215 pci_write_config_dword(filter_pdev, reg1->reg + 4, 1216 (u32)(reg1->config >> 32)); 1217 pci_write_config_dword(filter_pdev, reg2->reg, 1218 (u32)reg2->config); 1219 pci_write_config_dword(filter_pdev, reg2->reg + 4, 1220 (u32)(reg2->config >> 32)); 1221 } 1222 } 1223 1224 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 1225 } 1226 1227 static struct intel_uncore_ops snbep_uncore_qpi_ops = { 1228 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), 1229 .enable_event = snbep_qpi_enable_event, 1230 .hw_config = snbep_qpi_hw_config, 1231 .get_constraint = uncore_get_constraint, 1232 .put_constraint = uncore_put_constraint, 1233 }; 1234 1235 #define SNBEP_UNCORE_PCI_COMMON_INIT() \ 1236 .perf_ctr = SNBEP_PCI_PMON_CTR0, \ 1237 .event_ctl = SNBEP_PCI_PMON_CTL0, \ 1238 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \ 1239 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ 1240 .ops = &snbep_uncore_pci_ops, \ 1241 .format_group = &snbep_uncore_format_group 1242 1243 static struct intel_uncore_type snbep_uncore_ha = { 1244 .name = "ha", 1245 .num_counters = 4, 1246 .num_boxes = 1, 1247 .perf_ctr_bits = 48, 1248 SNBEP_UNCORE_PCI_COMMON_INIT(), 1249 }; 1250 1251 static struct intel_uncore_type snbep_uncore_imc = { 1252 .name = "imc", 1253 .num_counters = 4, 1254 .num_boxes = 4, 1255 .perf_ctr_bits = 48, 1256 .fixed_ctr_bits = 48, 1257 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, 1258 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, 1259 .event_descs = snbep_uncore_imc_events, 1260 SNBEP_UNCORE_PCI_COMMON_INIT(), 1261 }; 1262 1263 static struct intel_uncore_type snbep_uncore_qpi = { 1264 .name = "qpi", 1265 .num_counters = 4, 1266 .num_boxes = 2, 1267 .perf_ctr_bits = 48, 1268 .perf_ctr = SNBEP_PCI_PMON_CTR0, 1269 .event_ctl = SNBEP_PCI_PMON_CTL0, 1270 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, 1271 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 1272 .num_shared_regs = 1, 1273 .ops = &snbep_uncore_qpi_ops, 1274 .event_descs = snbep_uncore_qpi_events, 1275 .format_group = &snbep_uncore_qpi_format_group, 1276 }; 1277 1278 1279 static struct intel_uncore_type snbep_uncore_r2pcie = { 1280 .name = "r2pcie", 1281 .num_counters = 4, 1282 .num_boxes = 1, 1283 .perf_ctr_bits = 44, 1284 .constraints = snbep_uncore_r2pcie_constraints, 1285 SNBEP_UNCORE_PCI_COMMON_INIT(), 1286 }; 1287 1288 static struct intel_uncore_type snbep_uncore_r3qpi = { 1289 .name = "r3qpi", 1290 .num_counters = 3, 1291 .num_boxes = 2, 1292 .perf_ctr_bits = 44, 1293 .constraints = snbep_uncore_r3qpi_constraints, 1294 SNBEP_UNCORE_PCI_COMMON_INIT(), 1295 }; 1296 1297 enum { 1298 SNBEP_PCI_UNCORE_HA, 1299 SNBEP_PCI_UNCORE_IMC, 1300 SNBEP_PCI_UNCORE_QPI, 1301 SNBEP_PCI_UNCORE_R2PCIE, 1302 SNBEP_PCI_UNCORE_R3QPI, 1303 }; 1304 1305 static struct intel_uncore_type *snbep_pci_uncores[] = { 1306 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha, 1307 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc, 1308 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi, 1309 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie, 1310 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi, 1311 NULL, 1312 }; 1313 1314 static const struct pci_device_id snbep_uncore_pci_ids[] = { 1315 { /* Home Agent */ 1316 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA), 1317 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0), 1318 }, 1319 { /* MC Channel 0 */ 1320 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0), 1321 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0), 1322 }, 1323 { /* MC Channel 1 */ 1324 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1), 1325 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1), 1326 }, 1327 { /* MC Channel 2 */ 1328 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2), 1329 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2), 1330 }, 1331 { /* MC Channel 3 */ 1332 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3), 1333 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3), 1334 }, 1335 { /* QPI Port 0 */ 1336 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0), 1337 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0), 1338 }, 1339 { /* QPI Port 1 */ 1340 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1), 1341 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1), 1342 }, 1343 { /* R2PCIe */ 1344 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE), 1345 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0), 1346 }, 1347 { /* R3QPI Link 0 */ 1348 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0), 1349 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0), 1350 }, 1351 { /* R3QPI Link 1 */ 1352 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), 1353 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1), 1354 }, 1355 { /* QPI Port 0 filter */ 1356 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86), 1357 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1358 SNBEP_PCI_QPI_PORT0_FILTER), 1359 }, 1360 { /* QPI Port 0 filter */ 1361 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96), 1362 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1363 SNBEP_PCI_QPI_PORT1_FILTER), 1364 }, 1365 { /* end: all zeroes */ } 1366 }; 1367 1368 static struct pci_driver snbep_uncore_pci_driver = { 1369 .name = "snbep_uncore", 1370 .id_table = snbep_uncore_pci_ids, 1371 }; 1372 1373 #define NODE_ID_MASK 0x7 1374 1375 /* 1376 * build pci bus to socket mapping 1377 */ 1378 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse) 1379 { 1380 struct pci_dev *ubox_dev = NULL; 1381 int i, bus, nodeid, segment, die_id; 1382 struct pci2phy_map *map; 1383 int err = 0; 1384 u32 config = 0; 1385 1386 while (1) { 1387 /* find the UBOX device */ 1388 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev); 1389 if (!ubox_dev) 1390 break; 1391 bus = ubox_dev->bus->number; 1392 /* 1393 * The nodeid and idmap registers only contain enough 1394 * information to handle 8 nodes. On systems with more 1395 * than 8 nodes, we need to rely on NUMA information, 1396 * filled in from BIOS supplied information, to determine 1397 * the topology. 1398 */ 1399 if (nr_node_ids <= 8) { 1400 /* get the Node ID of the local register */ 1401 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config); 1402 if (err) 1403 break; 1404 nodeid = config & NODE_ID_MASK; 1405 /* get the Node ID mapping */ 1406 err = pci_read_config_dword(ubox_dev, idmap_loc, &config); 1407 if (err) 1408 break; 1409 1410 segment = pci_domain_nr(ubox_dev->bus); 1411 raw_spin_lock(&pci2phy_map_lock); 1412 map = __find_pci2phy_map(segment); 1413 if (!map) { 1414 raw_spin_unlock(&pci2phy_map_lock); 1415 err = -ENOMEM; 1416 break; 1417 } 1418 1419 /* 1420 * every three bits in the Node ID mapping register maps 1421 * to a particular node. 1422 */ 1423 for (i = 0; i < 8; i++) { 1424 if (nodeid == ((config >> (3 * i)) & 0x7)) { 1425 if (topology_max_die_per_package() > 1) 1426 die_id = i; 1427 else 1428 die_id = topology_phys_to_logical_pkg(i); 1429 if (die_id < 0) 1430 die_id = -ENODEV; 1431 map->pbus_to_dieid[bus] = die_id; 1432 break; 1433 } 1434 } 1435 raw_spin_unlock(&pci2phy_map_lock); 1436 } else { 1437 int node = pcibus_to_node(ubox_dev->bus); 1438 int cpu; 1439 1440 segment = pci_domain_nr(ubox_dev->bus); 1441 raw_spin_lock(&pci2phy_map_lock); 1442 map = __find_pci2phy_map(segment); 1443 if (!map) { 1444 raw_spin_unlock(&pci2phy_map_lock); 1445 err = -ENOMEM; 1446 break; 1447 } 1448 1449 die_id = -1; 1450 for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) { 1451 struct cpuinfo_x86 *c = &cpu_data(cpu); 1452 1453 if (c->initialized && cpu_to_node(cpu) == node) { 1454 map->pbus_to_dieid[bus] = die_id = c->logical_die_id; 1455 break; 1456 } 1457 } 1458 raw_spin_unlock(&pci2phy_map_lock); 1459 1460 if (WARN_ON_ONCE(die_id == -1)) { 1461 err = -EINVAL; 1462 break; 1463 } 1464 } 1465 } 1466 1467 if (!err) { 1468 /* 1469 * For PCI bus with no UBOX device, find the next bus 1470 * that has UBOX device and use its mapping. 1471 */ 1472 raw_spin_lock(&pci2phy_map_lock); 1473 list_for_each_entry(map, &pci2phy_map_head, list) { 1474 i = -1; 1475 if (reverse) { 1476 for (bus = 255; bus >= 0; bus--) { 1477 if (map->pbus_to_dieid[bus] != -1) 1478 i = map->pbus_to_dieid[bus]; 1479 else 1480 map->pbus_to_dieid[bus] = i; 1481 } 1482 } else { 1483 for (bus = 0; bus <= 255; bus++) { 1484 if (map->pbus_to_dieid[bus] != -1) 1485 i = map->pbus_to_dieid[bus]; 1486 else 1487 map->pbus_to_dieid[bus] = i; 1488 } 1489 } 1490 } 1491 raw_spin_unlock(&pci2phy_map_lock); 1492 } 1493 1494 pci_dev_put(ubox_dev); 1495 1496 return err ? pcibios_err_to_errno(err) : 0; 1497 } 1498 1499 int snbep_uncore_pci_init(void) 1500 { 1501 int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true); 1502 if (ret) 1503 return ret; 1504 uncore_pci_uncores = snbep_pci_uncores; 1505 uncore_pci_driver = &snbep_uncore_pci_driver; 1506 return 0; 1507 } 1508 /* end of Sandy Bridge-EP uncore support */ 1509 1510 /* IvyTown uncore support */ 1511 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box) 1512 { 1513 unsigned msr = uncore_msr_box_ctl(box); 1514 if (msr) 1515 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT); 1516 } 1517 1518 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box) 1519 { 1520 struct pci_dev *pdev = box->pci_dev; 1521 1522 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT); 1523 } 1524 1525 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \ 1526 .init_box = ivbep_uncore_msr_init_box, \ 1527 .disable_box = snbep_uncore_msr_disable_box, \ 1528 .enable_box = snbep_uncore_msr_enable_box, \ 1529 .disable_event = snbep_uncore_msr_disable_event, \ 1530 .enable_event = snbep_uncore_msr_enable_event, \ 1531 .read_counter = uncore_msr_read_counter 1532 1533 static struct intel_uncore_ops ivbep_uncore_msr_ops = { 1534 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), 1535 }; 1536 1537 static struct intel_uncore_ops ivbep_uncore_pci_ops = { 1538 .init_box = ivbep_uncore_pci_init_box, 1539 .disable_box = snbep_uncore_pci_disable_box, 1540 .enable_box = snbep_uncore_pci_enable_box, 1541 .disable_event = snbep_uncore_pci_disable_event, 1542 .enable_event = snbep_uncore_pci_enable_event, 1543 .read_counter = snbep_uncore_pci_read_counter, 1544 }; 1545 1546 #define IVBEP_UNCORE_PCI_COMMON_INIT() \ 1547 .perf_ctr = SNBEP_PCI_PMON_CTR0, \ 1548 .event_ctl = SNBEP_PCI_PMON_CTL0, \ 1549 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \ 1550 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ 1551 .ops = &ivbep_uncore_pci_ops, \ 1552 .format_group = &ivbep_uncore_format_group 1553 1554 static struct attribute *ivbep_uncore_formats_attr[] = { 1555 &format_attr_event.attr, 1556 &format_attr_umask.attr, 1557 &format_attr_edge.attr, 1558 &format_attr_inv.attr, 1559 &format_attr_thresh8.attr, 1560 NULL, 1561 }; 1562 1563 static struct attribute *ivbep_uncore_ubox_formats_attr[] = { 1564 &format_attr_event.attr, 1565 &format_attr_umask.attr, 1566 &format_attr_edge.attr, 1567 &format_attr_inv.attr, 1568 &format_attr_thresh5.attr, 1569 NULL, 1570 }; 1571 1572 static struct attribute *ivbep_uncore_cbox_formats_attr[] = { 1573 &format_attr_event.attr, 1574 &format_attr_umask.attr, 1575 &format_attr_edge.attr, 1576 &format_attr_tid_en.attr, 1577 &format_attr_thresh8.attr, 1578 &format_attr_filter_tid.attr, 1579 &format_attr_filter_link.attr, 1580 &format_attr_filter_state2.attr, 1581 &format_attr_filter_nid2.attr, 1582 &format_attr_filter_opc2.attr, 1583 &format_attr_filter_nc.attr, 1584 &format_attr_filter_c6.attr, 1585 &format_attr_filter_isoc.attr, 1586 NULL, 1587 }; 1588 1589 static struct attribute *ivbep_uncore_pcu_formats_attr[] = { 1590 &format_attr_event.attr, 1591 &format_attr_occ_sel.attr, 1592 &format_attr_edge.attr, 1593 &format_attr_thresh5.attr, 1594 &format_attr_occ_invert.attr, 1595 &format_attr_occ_edge.attr, 1596 &format_attr_filter_band0.attr, 1597 &format_attr_filter_band1.attr, 1598 &format_attr_filter_band2.attr, 1599 &format_attr_filter_band3.attr, 1600 NULL, 1601 }; 1602 1603 static struct attribute *ivbep_uncore_qpi_formats_attr[] = { 1604 &format_attr_event_ext.attr, 1605 &format_attr_umask.attr, 1606 &format_attr_edge.attr, 1607 &format_attr_thresh8.attr, 1608 &format_attr_match_rds.attr, 1609 &format_attr_match_rnid30.attr, 1610 &format_attr_match_rnid4.attr, 1611 &format_attr_match_dnid.attr, 1612 &format_attr_match_mc.attr, 1613 &format_attr_match_opc.attr, 1614 &format_attr_match_vnw.attr, 1615 &format_attr_match0.attr, 1616 &format_attr_match1.attr, 1617 &format_attr_mask_rds.attr, 1618 &format_attr_mask_rnid30.attr, 1619 &format_attr_mask_rnid4.attr, 1620 &format_attr_mask_dnid.attr, 1621 &format_attr_mask_mc.attr, 1622 &format_attr_mask_opc.attr, 1623 &format_attr_mask_vnw.attr, 1624 &format_attr_mask0.attr, 1625 &format_attr_mask1.attr, 1626 NULL, 1627 }; 1628 1629 static const struct attribute_group ivbep_uncore_format_group = { 1630 .name = "format", 1631 .attrs = ivbep_uncore_formats_attr, 1632 }; 1633 1634 static const struct attribute_group ivbep_uncore_ubox_format_group = { 1635 .name = "format", 1636 .attrs = ivbep_uncore_ubox_formats_attr, 1637 }; 1638 1639 static const struct attribute_group ivbep_uncore_cbox_format_group = { 1640 .name = "format", 1641 .attrs = ivbep_uncore_cbox_formats_attr, 1642 }; 1643 1644 static const struct attribute_group ivbep_uncore_pcu_format_group = { 1645 .name = "format", 1646 .attrs = ivbep_uncore_pcu_formats_attr, 1647 }; 1648 1649 static const struct attribute_group ivbep_uncore_qpi_format_group = { 1650 .name = "format", 1651 .attrs = ivbep_uncore_qpi_formats_attr, 1652 }; 1653 1654 static struct intel_uncore_type ivbep_uncore_ubox = { 1655 .name = "ubox", 1656 .num_counters = 2, 1657 .num_boxes = 1, 1658 .perf_ctr_bits = 44, 1659 .fixed_ctr_bits = 48, 1660 .perf_ctr = SNBEP_U_MSR_PMON_CTR0, 1661 .event_ctl = SNBEP_U_MSR_PMON_CTL0, 1662 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK, 1663 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, 1664 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, 1665 .ops = &ivbep_uncore_msr_ops, 1666 .format_group = &ivbep_uncore_ubox_format_group, 1667 }; 1668 1669 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = { 1670 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 1671 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 1672 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), 1673 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), 1674 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), 1675 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), 1676 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 1677 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc), 1678 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 1679 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc), 1680 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 1681 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc), 1682 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), 1683 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), 1684 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), 1685 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10), 1686 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18), 1687 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18), 1688 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8), 1689 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8), 1690 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8), 1691 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8), 1692 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10), 1693 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), 1694 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), 1695 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), 1696 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10), 1697 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), 1698 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), 1699 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), 1700 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8), 1701 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8), 1702 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8), 1703 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8), 1704 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10), 1705 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10), 1706 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8), 1707 EVENT_EXTRA_END 1708 }; 1709 1710 static u64 ivbep_cbox_filter_mask(int fields) 1711 { 1712 u64 mask = 0; 1713 1714 if (fields & 0x1) 1715 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID; 1716 if (fields & 0x2) 1717 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK; 1718 if (fields & 0x4) 1719 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE; 1720 if (fields & 0x8) 1721 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID; 1722 if (fields & 0x10) { 1723 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC; 1724 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC; 1725 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6; 1726 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC; 1727 } 1728 1729 return mask; 1730 } 1731 1732 static struct event_constraint * 1733 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 1734 { 1735 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask); 1736 } 1737 1738 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 1739 { 1740 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 1741 struct extra_reg *er; 1742 int idx = 0; 1743 1744 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) { 1745 if (er->event != (event->hw.config & er->config_mask)) 1746 continue; 1747 idx |= er->idx; 1748 } 1749 1750 if (idx) { 1751 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + 1752 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; 1753 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx); 1754 reg1->idx = idx; 1755 } 1756 return 0; 1757 } 1758 1759 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event) 1760 { 1761 struct hw_perf_event *hwc = &event->hw; 1762 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1763 1764 if (reg1->idx != EXTRA_REG_NONE) { 1765 u64 filter = uncore_shared_reg_config(box, 0); 1766 wrmsrl(reg1->reg, filter & 0xffffffff); 1767 wrmsrl(reg1->reg + 6, filter >> 32); 1768 } 1769 1770 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 1771 } 1772 1773 static struct intel_uncore_ops ivbep_uncore_cbox_ops = { 1774 .init_box = ivbep_uncore_msr_init_box, 1775 .disable_box = snbep_uncore_msr_disable_box, 1776 .enable_box = snbep_uncore_msr_enable_box, 1777 .disable_event = snbep_uncore_msr_disable_event, 1778 .enable_event = ivbep_cbox_enable_event, 1779 .read_counter = uncore_msr_read_counter, 1780 .hw_config = ivbep_cbox_hw_config, 1781 .get_constraint = ivbep_cbox_get_constraint, 1782 .put_constraint = snbep_cbox_put_constraint, 1783 }; 1784 1785 static struct intel_uncore_type ivbep_uncore_cbox = { 1786 .name = "cbox", 1787 .num_counters = 4, 1788 .num_boxes = 15, 1789 .perf_ctr_bits = 44, 1790 .event_ctl = SNBEP_C0_MSR_PMON_CTL0, 1791 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, 1792 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK, 1793 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, 1794 .msr_offset = SNBEP_CBO_MSR_OFFSET, 1795 .num_shared_regs = 1, 1796 .constraints = snbep_uncore_cbox_constraints, 1797 .ops = &ivbep_uncore_cbox_ops, 1798 .format_group = &ivbep_uncore_cbox_format_group, 1799 }; 1800 1801 static struct intel_uncore_ops ivbep_uncore_pcu_ops = { 1802 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), 1803 .hw_config = snbep_pcu_hw_config, 1804 .get_constraint = snbep_pcu_get_constraint, 1805 .put_constraint = snbep_pcu_put_constraint, 1806 }; 1807 1808 static struct intel_uncore_type ivbep_uncore_pcu = { 1809 .name = "pcu", 1810 .num_counters = 4, 1811 .num_boxes = 1, 1812 .perf_ctr_bits = 48, 1813 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, 1814 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, 1815 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK, 1816 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, 1817 .num_shared_regs = 1, 1818 .ops = &ivbep_uncore_pcu_ops, 1819 .format_group = &ivbep_uncore_pcu_format_group, 1820 }; 1821 1822 static struct intel_uncore_type *ivbep_msr_uncores[] = { 1823 &ivbep_uncore_ubox, 1824 &ivbep_uncore_cbox, 1825 &ivbep_uncore_pcu, 1826 NULL, 1827 }; 1828 1829 void ivbep_uncore_cpu_init(void) 1830 { 1831 if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 1832 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 1833 uncore_msr_uncores = ivbep_msr_uncores; 1834 } 1835 1836 static struct intel_uncore_type ivbep_uncore_ha = { 1837 .name = "ha", 1838 .num_counters = 4, 1839 .num_boxes = 2, 1840 .perf_ctr_bits = 48, 1841 IVBEP_UNCORE_PCI_COMMON_INIT(), 1842 }; 1843 1844 static struct intel_uncore_type ivbep_uncore_imc = { 1845 .name = "imc", 1846 .num_counters = 4, 1847 .num_boxes = 8, 1848 .perf_ctr_bits = 48, 1849 .fixed_ctr_bits = 48, 1850 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, 1851 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, 1852 .event_descs = snbep_uncore_imc_events, 1853 IVBEP_UNCORE_PCI_COMMON_INIT(), 1854 }; 1855 1856 /* registers in IRP boxes are not properly aligned */ 1857 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4}; 1858 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0}; 1859 1860 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event) 1861 { 1862 struct pci_dev *pdev = box->pci_dev; 1863 struct hw_perf_event *hwc = &event->hw; 1864 1865 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], 1866 hwc->config | SNBEP_PMON_CTL_EN); 1867 } 1868 1869 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event) 1870 { 1871 struct pci_dev *pdev = box->pci_dev; 1872 struct hw_perf_event *hwc = &event->hw; 1873 1874 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config); 1875 } 1876 1877 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) 1878 { 1879 struct pci_dev *pdev = box->pci_dev; 1880 struct hw_perf_event *hwc = &event->hw; 1881 u64 count = 0; 1882 1883 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count); 1884 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1); 1885 1886 return count; 1887 } 1888 1889 static struct intel_uncore_ops ivbep_uncore_irp_ops = { 1890 .init_box = ivbep_uncore_pci_init_box, 1891 .disable_box = snbep_uncore_pci_disable_box, 1892 .enable_box = snbep_uncore_pci_enable_box, 1893 .disable_event = ivbep_uncore_irp_disable_event, 1894 .enable_event = ivbep_uncore_irp_enable_event, 1895 .read_counter = ivbep_uncore_irp_read_counter, 1896 }; 1897 1898 static struct intel_uncore_type ivbep_uncore_irp = { 1899 .name = "irp", 1900 .num_counters = 4, 1901 .num_boxes = 1, 1902 .perf_ctr_bits = 48, 1903 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, 1904 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 1905 .ops = &ivbep_uncore_irp_ops, 1906 .format_group = &ivbep_uncore_format_group, 1907 }; 1908 1909 static struct intel_uncore_ops ivbep_uncore_qpi_ops = { 1910 .init_box = ivbep_uncore_pci_init_box, 1911 .disable_box = snbep_uncore_pci_disable_box, 1912 .enable_box = snbep_uncore_pci_enable_box, 1913 .disable_event = snbep_uncore_pci_disable_event, 1914 .enable_event = snbep_qpi_enable_event, 1915 .read_counter = snbep_uncore_pci_read_counter, 1916 .hw_config = snbep_qpi_hw_config, 1917 .get_constraint = uncore_get_constraint, 1918 .put_constraint = uncore_put_constraint, 1919 }; 1920 1921 static struct intel_uncore_type ivbep_uncore_qpi = { 1922 .name = "qpi", 1923 .num_counters = 4, 1924 .num_boxes = 3, 1925 .perf_ctr_bits = 48, 1926 .perf_ctr = SNBEP_PCI_PMON_CTR0, 1927 .event_ctl = SNBEP_PCI_PMON_CTL0, 1928 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK, 1929 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 1930 .num_shared_regs = 1, 1931 .ops = &ivbep_uncore_qpi_ops, 1932 .format_group = &ivbep_uncore_qpi_format_group, 1933 }; 1934 1935 static struct intel_uncore_type ivbep_uncore_r2pcie = { 1936 .name = "r2pcie", 1937 .num_counters = 4, 1938 .num_boxes = 1, 1939 .perf_ctr_bits = 44, 1940 .constraints = snbep_uncore_r2pcie_constraints, 1941 IVBEP_UNCORE_PCI_COMMON_INIT(), 1942 }; 1943 1944 static struct intel_uncore_type ivbep_uncore_r3qpi = { 1945 .name = "r3qpi", 1946 .num_counters = 3, 1947 .num_boxes = 2, 1948 .perf_ctr_bits = 44, 1949 .constraints = snbep_uncore_r3qpi_constraints, 1950 IVBEP_UNCORE_PCI_COMMON_INIT(), 1951 }; 1952 1953 enum { 1954 IVBEP_PCI_UNCORE_HA, 1955 IVBEP_PCI_UNCORE_IMC, 1956 IVBEP_PCI_UNCORE_IRP, 1957 IVBEP_PCI_UNCORE_QPI, 1958 IVBEP_PCI_UNCORE_R2PCIE, 1959 IVBEP_PCI_UNCORE_R3QPI, 1960 }; 1961 1962 static struct intel_uncore_type *ivbep_pci_uncores[] = { 1963 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha, 1964 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc, 1965 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp, 1966 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi, 1967 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie, 1968 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi, 1969 NULL, 1970 }; 1971 1972 static const struct pci_device_id ivbep_uncore_pci_ids[] = { 1973 { /* Home Agent 0 */ 1974 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30), 1975 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0), 1976 }, 1977 { /* Home Agent 1 */ 1978 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38), 1979 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1), 1980 }, 1981 { /* MC0 Channel 0 */ 1982 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4), 1983 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0), 1984 }, 1985 { /* MC0 Channel 1 */ 1986 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5), 1987 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1), 1988 }, 1989 { /* MC0 Channel 3 */ 1990 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0), 1991 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2), 1992 }, 1993 { /* MC0 Channel 4 */ 1994 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1), 1995 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3), 1996 }, 1997 { /* MC1 Channel 0 */ 1998 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4), 1999 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4), 2000 }, 2001 { /* MC1 Channel 1 */ 2002 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5), 2003 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5), 2004 }, 2005 { /* MC1 Channel 3 */ 2006 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0), 2007 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6), 2008 }, 2009 { /* MC1 Channel 4 */ 2010 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1), 2011 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7), 2012 }, 2013 { /* IRP */ 2014 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39), 2015 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0), 2016 }, 2017 { /* QPI0 Port 0 */ 2018 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32), 2019 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0), 2020 }, 2021 { /* QPI0 Port 1 */ 2022 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33), 2023 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1), 2024 }, 2025 { /* QPI1 Port 2 */ 2026 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a), 2027 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2), 2028 }, 2029 { /* R2PCIe */ 2030 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34), 2031 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0), 2032 }, 2033 { /* R3QPI0 Link 0 */ 2034 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36), 2035 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0), 2036 }, 2037 { /* R3QPI0 Link 1 */ 2038 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37), 2039 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1), 2040 }, 2041 { /* R3QPI1 Link 2 */ 2042 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e), 2043 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2), 2044 }, 2045 { /* QPI Port 0 filter */ 2046 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86), 2047 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2048 SNBEP_PCI_QPI_PORT0_FILTER), 2049 }, 2050 { /* QPI Port 0 filter */ 2051 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96), 2052 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2053 SNBEP_PCI_QPI_PORT1_FILTER), 2054 }, 2055 { /* end: all zeroes */ } 2056 }; 2057 2058 static struct pci_driver ivbep_uncore_pci_driver = { 2059 .name = "ivbep_uncore", 2060 .id_table = ivbep_uncore_pci_ids, 2061 }; 2062 2063 int ivbep_uncore_pci_init(void) 2064 { 2065 int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true); 2066 if (ret) 2067 return ret; 2068 uncore_pci_uncores = ivbep_pci_uncores; 2069 uncore_pci_driver = &ivbep_uncore_pci_driver; 2070 return 0; 2071 } 2072 /* end of IvyTown uncore support */ 2073 2074 /* KNL uncore support */ 2075 static struct attribute *knl_uncore_ubox_formats_attr[] = { 2076 &format_attr_event.attr, 2077 &format_attr_umask.attr, 2078 &format_attr_edge.attr, 2079 &format_attr_tid_en.attr, 2080 &format_attr_inv.attr, 2081 &format_attr_thresh5.attr, 2082 NULL, 2083 }; 2084 2085 static const struct attribute_group knl_uncore_ubox_format_group = { 2086 .name = "format", 2087 .attrs = knl_uncore_ubox_formats_attr, 2088 }; 2089 2090 static struct intel_uncore_type knl_uncore_ubox = { 2091 .name = "ubox", 2092 .num_counters = 2, 2093 .num_boxes = 1, 2094 .perf_ctr_bits = 48, 2095 .fixed_ctr_bits = 48, 2096 .perf_ctr = HSWEP_U_MSR_PMON_CTR0, 2097 .event_ctl = HSWEP_U_MSR_PMON_CTL0, 2098 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK, 2099 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, 2100 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, 2101 .ops = &snbep_uncore_msr_ops, 2102 .format_group = &knl_uncore_ubox_format_group, 2103 }; 2104 2105 static struct attribute *knl_uncore_cha_formats_attr[] = { 2106 &format_attr_event.attr, 2107 &format_attr_umask.attr, 2108 &format_attr_qor.attr, 2109 &format_attr_edge.attr, 2110 &format_attr_tid_en.attr, 2111 &format_attr_inv.attr, 2112 &format_attr_thresh8.attr, 2113 &format_attr_filter_tid4.attr, 2114 &format_attr_filter_link3.attr, 2115 &format_attr_filter_state4.attr, 2116 &format_attr_filter_local.attr, 2117 &format_attr_filter_all_op.attr, 2118 &format_attr_filter_nnm.attr, 2119 &format_attr_filter_opc3.attr, 2120 &format_attr_filter_nc.attr, 2121 &format_attr_filter_isoc.attr, 2122 NULL, 2123 }; 2124 2125 static const struct attribute_group knl_uncore_cha_format_group = { 2126 .name = "format", 2127 .attrs = knl_uncore_cha_formats_attr, 2128 }; 2129 2130 static struct event_constraint knl_uncore_cha_constraints[] = { 2131 UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 2132 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1), 2133 UNCORE_EVENT_CONSTRAINT(0x36, 0x1), 2134 EVENT_CONSTRAINT_END 2135 }; 2136 2137 static struct extra_reg knl_uncore_cha_extra_regs[] = { 2138 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 2139 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 2140 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2), 2141 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4), 2142 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4), 2143 EVENT_EXTRA_END 2144 }; 2145 2146 static u64 knl_cha_filter_mask(int fields) 2147 { 2148 u64 mask = 0; 2149 2150 if (fields & 0x1) 2151 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID; 2152 if (fields & 0x2) 2153 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE; 2154 if (fields & 0x4) 2155 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP; 2156 return mask; 2157 } 2158 2159 static struct event_constraint * 2160 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 2161 { 2162 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask); 2163 } 2164 2165 static int knl_cha_hw_config(struct intel_uncore_box *box, 2166 struct perf_event *event) 2167 { 2168 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 2169 struct extra_reg *er; 2170 int idx = 0; 2171 2172 for (er = knl_uncore_cha_extra_regs; er->msr; er++) { 2173 if (er->event != (event->hw.config & er->config_mask)) 2174 continue; 2175 idx |= er->idx; 2176 } 2177 2178 if (idx) { 2179 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 + 2180 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx; 2181 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx); 2182 2183 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE; 2184 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE; 2185 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC; 2186 reg1->idx = idx; 2187 } 2188 return 0; 2189 } 2190 2191 static void hswep_cbox_enable_event(struct intel_uncore_box *box, 2192 struct perf_event *event); 2193 2194 static struct intel_uncore_ops knl_uncore_cha_ops = { 2195 .init_box = snbep_uncore_msr_init_box, 2196 .disable_box = snbep_uncore_msr_disable_box, 2197 .enable_box = snbep_uncore_msr_enable_box, 2198 .disable_event = snbep_uncore_msr_disable_event, 2199 .enable_event = hswep_cbox_enable_event, 2200 .read_counter = uncore_msr_read_counter, 2201 .hw_config = knl_cha_hw_config, 2202 .get_constraint = knl_cha_get_constraint, 2203 .put_constraint = snbep_cbox_put_constraint, 2204 }; 2205 2206 static struct intel_uncore_type knl_uncore_cha = { 2207 .name = "cha", 2208 .num_counters = 4, 2209 .num_boxes = 38, 2210 .perf_ctr_bits = 48, 2211 .event_ctl = HSWEP_C0_MSR_PMON_CTL0, 2212 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, 2213 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK, 2214 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, 2215 .msr_offset = KNL_CHA_MSR_OFFSET, 2216 .num_shared_regs = 1, 2217 .constraints = knl_uncore_cha_constraints, 2218 .ops = &knl_uncore_cha_ops, 2219 .format_group = &knl_uncore_cha_format_group, 2220 }; 2221 2222 static struct attribute *knl_uncore_pcu_formats_attr[] = { 2223 &format_attr_event2.attr, 2224 &format_attr_use_occ_ctr.attr, 2225 &format_attr_occ_sel.attr, 2226 &format_attr_edge.attr, 2227 &format_attr_tid_en.attr, 2228 &format_attr_inv.attr, 2229 &format_attr_thresh6.attr, 2230 &format_attr_occ_invert.attr, 2231 &format_attr_occ_edge_det.attr, 2232 NULL, 2233 }; 2234 2235 static const struct attribute_group knl_uncore_pcu_format_group = { 2236 .name = "format", 2237 .attrs = knl_uncore_pcu_formats_attr, 2238 }; 2239 2240 static struct intel_uncore_type knl_uncore_pcu = { 2241 .name = "pcu", 2242 .num_counters = 4, 2243 .num_boxes = 1, 2244 .perf_ctr_bits = 48, 2245 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0, 2246 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0, 2247 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK, 2248 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL, 2249 .ops = &snbep_uncore_msr_ops, 2250 .format_group = &knl_uncore_pcu_format_group, 2251 }; 2252 2253 static struct intel_uncore_type *knl_msr_uncores[] = { 2254 &knl_uncore_ubox, 2255 &knl_uncore_cha, 2256 &knl_uncore_pcu, 2257 NULL, 2258 }; 2259 2260 void knl_uncore_cpu_init(void) 2261 { 2262 uncore_msr_uncores = knl_msr_uncores; 2263 } 2264 2265 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box) 2266 { 2267 struct pci_dev *pdev = box->pci_dev; 2268 int box_ctl = uncore_pci_box_ctl(box); 2269 2270 pci_write_config_dword(pdev, box_ctl, 0); 2271 } 2272 2273 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box, 2274 struct perf_event *event) 2275 { 2276 struct pci_dev *pdev = box->pci_dev; 2277 struct hw_perf_event *hwc = &event->hw; 2278 2279 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK) 2280 == UNCORE_FIXED_EVENT) 2281 pci_write_config_dword(pdev, hwc->config_base, 2282 hwc->config | KNL_PMON_FIXED_CTL_EN); 2283 else 2284 pci_write_config_dword(pdev, hwc->config_base, 2285 hwc->config | SNBEP_PMON_CTL_EN); 2286 } 2287 2288 static struct intel_uncore_ops knl_uncore_imc_ops = { 2289 .init_box = snbep_uncore_pci_init_box, 2290 .disable_box = snbep_uncore_pci_disable_box, 2291 .enable_box = knl_uncore_imc_enable_box, 2292 .read_counter = snbep_uncore_pci_read_counter, 2293 .enable_event = knl_uncore_imc_enable_event, 2294 .disable_event = snbep_uncore_pci_disable_event, 2295 }; 2296 2297 static struct intel_uncore_type knl_uncore_imc_uclk = { 2298 .name = "imc_uclk", 2299 .num_counters = 4, 2300 .num_boxes = 2, 2301 .perf_ctr_bits = 48, 2302 .fixed_ctr_bits = 48, 2303 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW, 2304 .event_ctl = KNL_UCLK_MSR_PMON_CTL0, 2305 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 2306 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW, 2307 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL, 2308 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL, 2309 .ops = &knl_uncore_imc_ops, 2310 .format_group = &snbep_uncore_format_group, 2311 }; 2312 2313 static struct intel_uncore_type knl_uncore_imc_dclk = { 2314 .name = "imc", 2315 .num_counters = 4, 2316 .num_boxes = 6, 2317 .perf_ctr_bits = 48, 2318 .fixed_ctr_bits = 48, 2319 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW, 2320 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0, 2321 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 2322 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW, 2323 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL, 2324 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL, 2325 .ops = &knl_uncore_imc_ops, 2326 .format_group = &snbep_uncore_format_group, 2327 }; 2328 2329 static struct intel_uncore_type knl_uncore_edc_uclk = { 2330 .name = "edc_uclk", 2331 .num_counters = 4, 2332 .num_boxes = 8, 2333 .perf_ctr_bits = 48, 2334 .fixed_ctr_bits = 48, 2335 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW, 2336 .event_ctl = KNL_UCLK_MSR_PMON_CTL0, 2337 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 2338 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW, 2339 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL, 2340 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL, 2341 .ops = &knl_uncore_imc_ops, 2342 .format_group = &snbep_uncore_format_group, 2343 }; 2344 2345 static struct intel_uncore_type knl_uncore_edc_eclk = { 2346 .name = "edc_eclk", 2347 .num_counters = 4, 2348 .num_boxes = 8, 2349 .perf_ctr_bits = 48, 2350 .fixed_ctr_bits = 48, 2351 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW, 2352 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0, 2353 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 2354 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW, 2355 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL, 2356 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL, 2357 .ops = &knl_uncore_imc_ops, 2358 .format_group = &snbep_uncore_format_group, 2359 }; 2360 2361 static struct event_constraint knl_uncore_m2pcie_constraints[] = { 2362 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 2363 EVENT_CONSTRAINT_END 2364 }; 2365 2366 static struct intel_uncore_type knl_uncore_m2pcie = { 2367 .name = "m2pcie", 2368 .num_counters = 4, 2369 .num_boxes = 1, 2370 .perf_ctr_bits = 48, 2371 .constraints = knl_uncore_m2pcie_constraints, 2372 SNBEP_UNCORE_PCI_COMMON_INIT(), 2373 }; 2374 2375 static struct attribute *knl_uncore_irp_formats_attr[] = { 2376 &format_attr_event.attr, 2377 &format_attr_umask.attr, 2378 &format_attr_qor.attr, 2379 &format_attr_edge.attr, 2380 &format_attr_inv.attr, 2381 &format_attr_thresh8.attr, 2382 NULL, 2383 }; 2384 2385 static const struct attribute_group knl_uncore_irp_format_group = { 2386 .name = "format", 2387 .attrs = knl_uncore_irp_formats_attr, 2388 }; 2389 2390 static struct intel_uncore_type knl_uncore_irp = { 2391 .name = "irp", 2392 .num_counters = 2, 2393 .num_boxes = 1, 2394 .perf_ctr_bits = 48, 2395 .perf_ctr = SNBEP_PCI_PMON_CTR0, 2396 .event_ctl = SNBEP_PCI_PMON_CTL0, 2397 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK, 2398 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL, 2399 .ops = &snbep_uncore_pci_ops, 2400 .format_group = &knl_uncore_irp_format_group, 2401 }; 2402 2403 enum { 2404 KNL_PCI_UNCORE_MC_UCLK, 2405 KNL_PCI_UNCORE_MC_DCLK, 2406 KNL_PCI_UNCORE_EDC_UCLK, 2407 KNL_PCI_UNCORE_EDC_ECLK, 2408 KNL_PCI_UNCORE_M2PCIE, 2409 KNL_PCI_UNCORE_IRP, 2410 }; 2411 2412 static struct intel_uncore_type *knl_pci_uncores[] = { 2413 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk, 2414 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk, 2415 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk, 2416 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk, 2417 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie, 2418 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp, 2419 NULL, 2420 }; 2421 2422 /* 2423 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU 2424 * device type. prior to KNL, each instance of a PMU device type had a unique 2425 * device ID. 2426 * 2427 * PCI Device ID Uncore PMU Devices 2428 * ---------------------------------- 2429 * 0x7841 MC0 UClk, MC1 UClk 2430 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2, 2431 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2 2432 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk, 2433 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk 2434 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk, 2435 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk 2436 * 0x7817 M2PCIe 2437 * 0x7814 IRP 2438 */ 2439 2440 static const struct pci_device_id knl_uncore_pci_ids[] = { 2441 { /* MC0 UClk */ 2442 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841), 2443 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0), 2444 }, 2445 { /* MC1 UClk */ 2446 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841), 2447 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1), 2448 }, 2449 { /* MC0 DClk CH 0 */ 2450 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), 2451 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0), 2452 }, 2453 { /* MC0 DClk CH 1 */ 2454 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), 2455 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1), 2456 }, 2457 { /* MC0 DClk CH 2 */ 2458 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), 2459 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2), 2460 }, 2461 { /* MC1 DClk CH 0 */ 2462 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), 2463 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3), 2464 }, 2465 { /* MC1 DClk CH 1 */ 2466 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), 2467 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4), 2468 }, 2469 { /* MC1 DClk CH 2 */ 2470 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), 2471 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5), 2472 }, 2473 { /* EDC0 UClk */ 2474 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2475 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0), 2476 }, 2477 { /* EDC1 UClk */ 2478 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2479 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1), 2480 }, 2481 { /* EDC2 UClk */ 2482 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2483 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2), 2484 }, 2485 { /* EDC3 UClk */ 2486 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2487 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3), 2488 }, 2489 { /* EDC4 UClk */ 2490 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2491 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4), 2492 }, 2493 { /* EDC5 UClk */ 2494 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2495 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5), 2496 }, 2497 { /* EDC6 UClk */ 2498 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2499 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6), 2500 }, 2501 { /* EDC7 UClk */ 2502 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2503 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7), 2504 }, 2505 { /* EDC0 EClk */ 2506 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2507 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0), 2508 }, 2509 { /* EDC1 EClk */ 2510 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2511 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1), 2512 }, 2513 { /* EDC2 EClk */ 2514 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2515 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2), 2516 }, 2517 { /* EDC3 EClk */ 2518 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2519 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3), 2520 }, 2521 { /* EDC4 EClk */ 2522 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2523 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4), 2524 }, 2525 { /* EDC5 EClk */ 2526 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2527 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5), 2528 }, 2529 { /* EDC6 EClk */ 2530 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2531 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6), 2532 }, 2533 { /* EDC7 EClk */ 2534 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2535 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7), 2536 }, 2537 { /* M2PCIe */ 2538 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817), 2539 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0), 2540 }, 2541 { /* IRP */ 2542 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814), 2543 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0), 2544 }, 2545 { /* end: all zeroes */ } 2546 }; 2547 2548 static struct pci_driver knl_uncore_pci_driver = { 2549 .name = "knl_uncore", 2550 .id_table = knl_uncore_pci_ids, 2551 }; 2552 2553 int knl_uncore_pci_init(void) 2554 { 2555 int ret; 2556 2557 /* All KNL PCI based PMON units are on the same PCI bus except IRP */ 2558 ret = snb_pci2phy_map_init(0x7814); /* IRP */ 2559 if (ret) 2560 return ret; 2561 ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */ 2562 if (ret) 2563 return ret; 2564 uncore_pci_uncores = knl_pci_uncores; 2565 uncore_pci_driver = &knl_uncore_pci_driver; 2566 return 0; 2567 } 2568 2569 /* end of KNL uncore support */ 2570 2571 /* Haswell-EP uncore support */ 2572 static struct attribute *hswep_uncore_ubox_formats_attr[] = { 2573 &format_attr_event.attr, 2574 &format_attr_umask.attr, 2575 &format_attr_edge.attr, 2576 &format_attr_inv.attr, 2577 &format_attr_thresh5.attr, 2578 &format_attr_filter_tid2.attr, 2579 &format_attr_filter_cid.attr, 2580 NULL, 2581 }; 2582 2583 static const struct attribute_group hswep_uncore_ubox_format_group = { 2584 .name = "format", 2585 .attrs = hswep_uncore_ubox_formats_attr, 2586 }; 2587 2588 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 2589 { 2590 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 2591 reg1->reg = HSWEP_U_MSR_PMON_FILTER; 2592 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK; 2593 reg1->idx = 0; 2594 return 0; 2595 } 2596 2597 static struct intel_uncore_ops hswep_uncore_ubox_ops = { 2598 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 2599 .hw_config = hswep_ubox_hw_config, 2600 .get_constraint = uncore_get_constraint, 2601 .put_constraint = uncore_put_constraint, 2602 }; 2603 2604 static struct intel_uncore_type hswep_uncore_ubox = { 2605 .name = "ubox", 2606 .num_counters = 2, 2607 .num_boxes = 1, 2608 .perf_ctr_bits = 44, 2609 .fixed_ctr_bits = 48, 2610 .perf_ctr = HSWEP_U_MSR_PMON_CTR0, 2611 .event_ctl = HSWEP_U_MSR_PMON_CTL0, 2612 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, 2613 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, 2614 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, 2615 .num_shared_regs = 1, 2616 .ops = &hswep_uncore_ubox_ops, 2617 .format_group = &hswep_uncore_ubox_format_group, 2618 }; 2619 2620 static struct attribute *hswep_uncore_cbox_formats_attr[] = { 2621 &format_attr_event.attr, 2622 &format_attr_umask.attr, 2623 &format_attr_edge.attr, 2624 &format_attr_tid_en.attr, 2625 &format_attr_thresh8.attr, 2626 &format_attr_filter_tid3.attr, 2627 &format_attr_filter_link2.attr, 2628 &format_attr_filter_state3.attr, 2629 &format_attr_filter_nid2.attr, 2630 &format_attr_filter_opc2.attr, 2631 &format_attr_filter_nc.attr, 2632 &format_attr_filter_c6.attr, 2633 &format_attr_filter_isoc.attr, 2634 NULL, 2635 }; 2636 2637 static const struct attribute_group hswep_uncore_cbox_format_group = { 2638 .name = "format", 2639 .attrs = hswep_uncore_cbox_formats_attr, 2640 }; 2641 2642 static struct event_constraint hswep_uncore_cbox_constraints[] = { 2643 UNCORE_EVENT_CONSTRAINT(0x01, 0x1), 2644 UNCORE_EVENT_CONSTRAINT(0x09, 0x1), 2645 UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 2646 UNCORE_EVENT_CONSTRAINT(0x36, 0x1), 2647 UNCORE_EVENT_CONSTRAINT(0x38, 0x3), 2648 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1), 2649 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1), 2650 EVENT_CONSTRAINT_END 2651 }; 2652 2653 static struct extra_reg hswep_uncore_cbox_extra_regs[] = { 2654 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 2655 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 2656 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 2657 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 2658 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 2659 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), 2660 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4), 2661 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4), 2662 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8), 2663 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8), 2664 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8), 2665 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8), 2666 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8), 2667 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8), 2668 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12), 2669 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), 2670 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18), 2671 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8), 2672 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8), 2673 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8), 2674 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18), 2675 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8), 2676 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10), 2677 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), 2678 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), 2679 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10), 2680 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), 2681 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), 2682 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), 2683 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8), 2684 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8), 2685 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), 2686 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8), 2687 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), 2688 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10), 2689 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10), 2690 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10), 2691 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8), 2692 EVENT_EXTRA_END 2693 }; 2694 2695 static u64 hswep_cbox_filter_mask(int fields) 2696 { 2697 u64 mask = 0; 2698 if (fields & 0x1) 2699 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID; 2700 if (fields & 0x2) 2701 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK; 2702 if (fields & 0x4) 2703 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE; 2704 if (fields & 0x8) 2705 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID; 2706 if (fields & 0x10) { 2707 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC; 2708 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC; 2709 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6; 2710 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC; 2711 } 2712 return mask; 2713 } 2714 2715 static struct event_constraint * 2716 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 2717 { 2718 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask); 2719 } 2720 2721 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 2722 { 2723 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 2724 struct extra_reg *er; 2725 int idx = 0; 2726 2727 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) { 2728 if (er->event != (event->hw.config & er->config_mask)) 2729 continue; 2730 idx |= er->idx; 2731 } 2732 2733 if (idx) { 2734 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 + 2735 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; 2736 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx); 2737 reg1->idx = idx; 2738 } 2739 return 0; 2740 } 2741 2742 static void hswep_cbox_enable_event(struct intel_uncore_box *box, 2743 struct perf_event *event) 2744 { 2745 struct hw_perf_event *hwc = &event->hw; 2746 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 2747 2748 if (reg1->idx != EXTRA_REG_NONE) { 2749 u64 filter = uncore_shared_reg_config(box, 0); 2750 wrmsrl(reg1->reg, filter & 0xffffffff); 2751 wrmsrl(reg1->reg + 1, filter >> 32); 2752 } 2753 2754 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 2755 } 2756 2757 static struct intel_uncore_ops hswep_uncore_cbox_ops = { 2758 .init_box = snbep_uncore_msr_init_box, 2759 .disable_box = snbep_uncore_msr_disable_box, 2760 .enable_box = snbep_uncore_msr_enable_box, 2761 .disable_event = snbep_uncore_msr_disable_event, 2762 .enable_event = hswep_cbox_enable_event, 2763 .read_counter = uncore_msr_read_counter, 2764 .hw_config = hswep_cbox_hw_config, 2765 .get_constraint = hswep_cbox_get_constraint, 2766 .put_constraint = snbep_cbox_put_constraint, 2767 }; 2768 2769 static struct intel_uncore_type hswep_uncore_cbox = { 2770 .name = "cbox", 2771 .num_counters = 4, 2772 .num_boxes = 18, 2773 .perf_ctr_bits = 48, 2774 .event_ctl = HSWEP_C0_MSR_PMON_CTL0, 2775 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, 2776 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, 2777 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, 2778 .msr_offset = HSWEP_CBO_MSR_OFFSET, 2779 .num_shared_regs = 1, 2780 .constraints = hswep_uncore_cbox_constraints, 2781 .ops = &hswep_uncore_cbox_ops, 2782 .format_group = &hswep_uncore_cbox_format_group, 2783 }; 2784 2785 /* 2786 * Write SBOX Initialization register bit by bit to avoid spurious #GPs 2787 */ 2788 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box) 2789 { 2790 unsigned msr = uncore_msr_box_ctl(box); 2791 2792 if (msr) { 2793 u64 init = SNBEP_PMON_BOX_CTL_INT; 2794 u64 flags = 0; 2795 int i; 2796 2797 for_each_set_bit(i, (unsigned long *)&init, 64) { 2798 flags |= (1ULL << i); 2799 wrmsrl(msr, flags); 2800 } 2801 } 2802 } 2803 2804 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = { 2805 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 2806 .init_box = hswep_uncore_sbox_msr_init_box 2807 }; 2808 2809 static struct attribute *hswep_uncore_sbox_formats_attr[] = { 2810 &format_attr_event.attr, 2811 &format_attr_umask.attr, 2812 &format_attr_edge.attr, 2813 &format_attr_tid_en.attr, 2814 &format_attr_inv.attr, 2815 &format_attr_thresh8.attr, 2816 NULL, 2817 }; 2818 2819 static const struct attribute_group hswep_uncore_sbox_format_group = { 2820 .name = "format", 2821 .attrs = hswep_uncore_sbox_formats_attr, 2822 }; 2823 2824 static struct intel_uncore_type hswep_uncore_sbox = { 2825 .name = "sbox", 2826 .num_counters = 4, 2827 .num_boxes = 4, 2828 .perf_ctr_bits = 44, 2829 .event_ctl = HSWEP_S0_MSR_PMON_CTL0, 2830 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0, 2831 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, 2832 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, 2833 .msr_offset = HSWEP_SBOX_MSR_OFFSET, 2834 .ops = &hswep_uncore_sbox_msr_ops, 2835 .format_group = &hswep_uncore_sbox_format_group, 2836 }; 2837 2838 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) 2839 { 2840 struct hw_perf_event *hwc = &event->hw; 2841 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 2842 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; 2843 2844 if (ev_sel >= 0xb && ev_sel <= 0xe) { 2845 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER; 2846 reg1->idx = ev_sel - 0xb; 2847 reg1->config = event->attr.config1 & (0xff << reg1->idx); 2848 } 2849 return 0; 2850 } 2851 2852 static struct intel_uncore_ops hswep_uncore_pcu_ops = { 2853 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 2854 .hw_config = hswep_pcu_hw_config, 2855 .get_constraint = snbep_pcu_get_constraint, 2856 .put_constraint = snbep_pcu_put_constraint, 2857 }; 2858 2859 static struct intel_uncore_type hswep_uncore_pcu = { 2860 .name = "pcu", 2861 .num_counters = 4, 2862 .num_boxes = 1, 2863 .perf_ctr_bits = 48, 2864 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0, 2865 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0, 2866 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, 2867 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL, 2868 .num_shared_regs = 1, 2869 .ops = &hswep_uncore_pcu_ops, 2870 .format_group = &snbep_uncore_pcu_format_group, 2871 }; 2872 2873 static struct intel_uncore_type *hswep_msr_uncores[] = { 2874 &hswep_uncore_ubox, 2875 &hswep_uncore_cbox, 2876 &hswep_uncore_sbox, 2877 &hswep_uncore_pcu, 2878 NULL, 2879 }; 2880 2881 #define HSWEP_PCU_DID 0x2fc0 2882 #define HSWEP_PCU_CAPID4_OFFET 0x94 2883 #define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3) 2884 2885 static bool hswep_has_limit_sbox(unsigned int device) 2886 { 2887 struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); 2888 u32 capid4; 2889 2890 if (!dev) 2891 return false; 2892 2893 pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4); 2894 if (!hswep_get_chop(capid4)) 2895 return true; 2896 2897 return false; 2898 } 2899 2900 void hswep_uncore_cpu_init(void) 2901 { 2902 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 2903 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 2904 2905 /* Detect 6-8 core systems with only two SBOXes */ 2906 if (hswep_has_limit_sbox(HSWEP_PCU_DID)) 2907 hswep_uncore_sbox.num_boxes = 2; 2908 2909 uncore_msr_uncores = hswep_msr_uncores; 2910 } 2911 2912 static struct intel_uncore_type hswep_uncore_ha = { 2913 .name = "ha", 2914 .num_counters = 4, 2915 .num_boxes = 2, 2916 .perf_ctr_bits = 48, 2917 SNBEP_UNCORE_PCI_COMMON_INIT(), 2918 }; 2919 2920 static struct uncore_event_desc hswep_uncore_imc_events[] = { 2921 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"), 2922 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), 2923 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"), 2924 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"), 2925 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), 2926 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"), 2927 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"), 2928 { /* end: all zeroes */ }, 2929 }; 2930 2931 static struct intel_uncore_type hswep_uncore_imc = { 2932 .name = "imc", 2933 .num_counters = 4, 2934 .num_boxes = 8, 2935 .perf_ctr_bits = 48, 2936 .fixed_ctr_bits = 48, 2937 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, 2938 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, 2939 .event_descs = hswep_uncore_imc_events, 2940 SNBEP_UNCORE_PCI_COMMON_INIT(), 2941 }; 2942 2943 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8}; 2944 2945 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) 2946 { 2947 struct pci_dev *pdev = box->pci_dev; 2948 struct hw_perf_event *hwc = &event->hw; 2949 u64 count = 0; 2950 2951 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count); 2952 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1); 2953 2954 return count; 2955 } 2956 2957 static struct intel_uncore_ops hswep_uncore_irp_ops = { 2958 .init_box = snbep_uncore_pci_init_box, 2959 .disable_box = snbep_uncore_pci_disable_box, 2960 .enable_box = snbep_uncore_pci_enable_box, 2961 .disable_event = ivbep_uncore_irp_disable_event, 2962 .enable_event = ivbep_uncore_irp_enable_event, 2963 .read_counter = hswep_uncore_irp_read_counter, 2964 }; 2965 2966 static struct intel_uncore_type hswep_uncore_irp = { 2967 .name = "irp", 2968 .num_counters = 4, 2969 .num_boxes = 1, 2970 .perf_ctr_bits = 48, 2971 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 2972 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 2973 .ops = &hswep_uncore_irp_ops, 2974 .format_group = &snbep_uncore_format_group, 2975 }; 2976 2977 static struct intel_uncore_type hswep_uncore_qpi = { 2978 .name = "qpi", 2979 .num_counters = 4, 2980 .num_boxes = 3, 2981 .perf_ctr_bits = 48, 2982 .perf_ctr = SNBEP_PCI_PMON_CTR0, 2983 .event_ctl = SNBEP_PCI_PMON_CTL0, 2984 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, 2985 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 2986 .num_shared_regs = 1, 2987 .ops = &snbep_uncore_qpi_ops, 2988 .format_group = &snbep_uncore_qpi_format_group, 2989 }; 2990 2991 static struct event_constraint hswep_uncore_r2pcie_constraints[] = { 2992 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 2993 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 2994 UNCORE_EVENT_CONSTRAINT(0x13, 0x1), 2995 UNCORE_EVENT_CONSTRAINT(0x23, 0x1), 2996 UNCORE_EVENT_CONSTRAINT(0x24, 0x1), 2997 UNCORE_EVENT_CONSTRAINT(0x25, 0x1), 2998 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 2999 UNCORE_EVENT_CONSTRAINT(0x27, 0x1), 3000 UNCORE_EVENT_CONSTRAINT(0x28, 0x3), 3001 UNCORE_EVENT_CONSTRAINT(0x29, 0x3), 3002 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1), 3003 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3), 3004 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), 3005 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 3006 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 3007 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 3008 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 3009 UNCORE_EVENT_CONSTRAINT(0x35, 0x3), 3010 EVENT_CONSTRAINT_END 3011 }; 3012 3013 static struct intel_uncore_type hswep_uncore_r2pcie = { 3014 .name = "r2pcie", 3015 .num_counters = 4, 3016 .num_boxes = 1, 3017 .perf_ctr_bits = 48, 3018 .constraints = hswep_uncore_r2pcie_constraints, 3019 SNBEP_UNCORE_PCI_COMMON_INIT(), 3020 }; 3021 3022 static struct event_constraint hswep_uncore_r3qpi_constraints[] = { 3023 UNCORE_EVENT_CONSTRAINT(0x01, 0x3), 3024 UNCORE_EVENT_CONSTRAINT(0x07, 0x7), 3025 UNCORE_EVENT_CONSTRAINT(0x08, 0x7), 3026 UNCORE_EVENT_CONSTRAINT(0x09, 0x7), 3027 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7), 3028 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7), 3029 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 3030 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 3031 UNCORE_EVENT_CONSTRAINT(0x12, 0x3), 3032 UNCORE_EVENT_CONSTRAINT(0x13, 0x1), 3033 UNCORE_EVENT_CONSTRAINT(0x14, 0x3), 3034 UNCORE_EVENT_CONSTRAINT(0x15, 0x3), 3035 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3), 3036 UNCORE_EVENT_CONSTRAINT(0x20, 0x3), 3037 UNCORE_EVENT_CONSTRAINT(0x21, 0x3), 3038 UNCORE_EVENT_CONSTRAINT(0x22, 0x3), 3039 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 3040 UNCORE_EVENT_CONSTRAINT(0x25, 0x3), 3041 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 3042 UNCORE_EVENT_CONSTRAINT(0x28, 0x3), 3043 UNCORE_EVENT_CONSTRAINT(0x29, 0x3), 3044 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), 3045 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 3046 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), 3047 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), 3048 UNCORE_EVENT_CONSTRAINT(0x31, 0x3), 3049 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 3050 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 3051 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 3052 UNCORE_EVENT_CONSTRAINT(0x36, 0x3), 3053 UNCORE_EVENT_CONSTRAINT(0x37, 0x3), 3054 UNCORE_EVENT_CONSTRAINT(0x38, 0x3), 3055 UNCORE_EVENT_CONSTRAINT(0x39, 0x3), 3056 EVENT_CONSTRAINT_END 3057 }; 3058 3059 static struct intel_uncore_type hswep_uncore_r3qpi = { 3060 .name = "r3qpi", 3061 .num_counters = 3, 3062 .num_boxes = 3, 3063 .perf_ctr_bits = 44, 3064 .constraints = hswep_uncore_r3qpi_constraints, 3065 SNBEP_UNCORE_PCI_COMMON_INIT(), 3066 }; 3067 3068 enum { 3069 HSWEP_PCI_UNCORE_HA, 3070 HSWEP_PCI_UNCORE_IMC, 3071 HSWEP_PCI_UNCORE_IRP, 3072 HSWEP_PCI_UNCORE_QPI, 3073 HSWEP_PCI_UNCORE_R2PCIE, 3074 HSWEP_PCI_UNCORE_R3QPI, 3075 }; 3076 3077 static struct intel_uncore_type *hswep_pci_uncores[] = { 3078 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha, 3079 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc, 3080 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp, 3081 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi, 3082 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie, 3083 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi, 3084 NULL, 3085 }; 3086 3087 static const struct pci_device_id hswep_uncore_pci_ids[] = { 3088 { /* Home Agent 0 */ 3089 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30), 3090 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0), 3091 }, 3092 { /* Home Agent 1 */ 3093 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38), 3094 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1), 3095 }, 3096 { /* MC0 Channel 0 */ 3097 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0), 3098 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0), 3099 }, 3100 { /* MC0 Channel 1 */ 3101 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1), 3102 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1), 3103 }, 3104 { /* MC0 Channel 2 */ 3105 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4), 3106 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2), 3107 }, 3108 { /* MC0 Channel 3 */ 3109 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5), 3110 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3), 3111 }, 3112 { /* MC1 Channel 0 */ 3113 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0), 3114 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4), 3115 }, 3116 { /* MC1 Channel 1 */ 3117 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1), 3118 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5), 3119 }, 3120 { /* MC1 Channel 2 */ 3121 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4), 3122 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6), 3123 }, 3124 { /* MC1 Channel 3 */ 3125 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5), 3126 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7), 3127 }, 3128 { /* IRP */ 3129 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39), 3130 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0), 3131 }, 3132 { /* QPI0 Port 0 */ 3133 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32), 3134 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0), 3135 }, 3136 { /* QPI0 Port 1 */ 3137 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33), 3138 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1), 3139 }, 3140 { /* QPI1 Port 2 */ 3141 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a), 3142 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2), 3143 }, 3144 { /* R2PCIe */ 3145 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34), 3146 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0), 3147 }, 3148 { /* R3QPI0 Link 0 */ 3149 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36), 3150 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0), 3151 }, 3152 { /* R3QPI0 Link 1 */ 3153 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37), 3154 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1), 3155 }, 3156 { /* R3QPI1 Link 2 */ 3157 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e), 3158 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2), 3159 }, 3160 { /* QPI Port 0 filter */ 3161 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86), 3162 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 3163 SNBEP_PCI_QPI_PORT0_FILTER), 3164 }, 3165 { /* QPI Port 1 filter */ 3166 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96), 3167 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 3168 SNBEP_PCI_QPI_PORT1_FILTER), 3169 }, 3170 { /* end: all zeroes */ } 3171 }; 3172 3173 static struct pci_driver hswep_uncore_pci_driver = { 3174 .name = "hswep_uncore", 3175 .id_table = hswep_uncore_pci_ids, 3176 }; 3177 3178 int hswep_uncore_pci_init(void) 3179 { 3180 int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true); 3181 if (ret) 3182 return ret; 3183 uncore_pci_uncores = hswep_pci_uncores; 3184 uncore_pci_driver = &hswep_uncore_pci_driver; 3185 return 0; 3186 } 3187 /* end of Haswell-EP uncore support */ 3188 3189 /* BDX uncore support */ 3190 3191 static struct intel_uncore_type bdx_uncore_ubox = { 3192 .name = "ubox", 3193 .num_counters = 2, 3194 .num_boxes = 1, 3195 .perf_ctr_bits = 48, 3196 .fixed_ctr_bits = 48, 3197 .perf_ctr = HSWEP_U_MSR_PMON_CTR0, 3198 .event_ctl = HSWEP_U_MSR_PMON_CTL0, 3199 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, 3200 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, 3201 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, 3202 .num_shared_regs = 1, 3203 .ops = &ivbep_uncore_msr_ops, 3204 .format_group = &ivbep_uncore_ubox_format_group, 3205 }; 3206 3207 static struct event_constraint bdx_uncore_cbox_constraints[] = { 3208 UNCORE_EVENT_CONSTRAINT(0x09, 0x3), 3209 UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 3210 UNCORE_EVENT_CONSTRAINT(0x36, 0x1), 3211 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1), 3212 EVENT_CONSTRAINT_END 3213 }; 3214 3215 static struct intel_uncore_type bdx_uncore_cbox = { 3216 .name = "cbox", 3217 .num_counters = 4, 3218 .num_boxes = 24, 3219 .perf_ctr_bits = 48, 3220 .event_ctl = HSWEP_C0_MSR_PMON_CTL0, 3221 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, 3222 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, 3223 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, 3224 .msr_offset = HSWEP_CBO_MSR_OFFSET, 3225 .num_shared_regs = 1, 3226 .constraints = bdx_uncore_cbox_constraints, 3227 .ops = &hswep_uncore_cbox_ops, 3228 .format_group = &hswep_uncore_cbox_format_group, 3229 }; 3230 3231 static struct intel_uncore_type bdx_uncore_sbox = { 3232 .name = "sbox", 3233 .num_counters = 4, 3234 .num_boxes = 4, 3235 .perf_ctr_bits = 48, 3236 .event_ctl = HSWEP_S0_MSR_PMON_CTL0, 3237 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0, 3238 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, 3239 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, 3240 .msr_offset = HSWEP_SBOX_MSR_OFFSET, 3241 .ops = &hswep_uncore_sbox_msr_ops, 3242 .format_group = &hswep_uncore_sbox_format_group, 3243 }; 3244 3245 #define BDX_MSR_UNCORE_SBOX 3 3246 3247 static struct intel_uncore_type *bdx_msr_uncores[] = { 3248 &bdx_uncore_ubox, 3249 &bdx_uncore_cbox, 3250 &hswep_uncore_pcu, 3251 &bdx_uncore_sbox, 3252 NULL, 3253 }; 3254 3255 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */ 3256 static struct event_constraint bdx_uncore_pcu_constraints[] = { 3257 EVENT_CONSTRAINT(0x80, 0xe, 0x80), 3258 EVENT_CONSTRAINT_END 3259 }; 3260 3261 #define BDX_PCU_DID 0x6fc0 3262 3263 void bdx_uncore_cpu_init(void) 3264 { 3265 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 3266 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 3267 uncore_msr_uncores = bdx_msr_uncores; 3268 3269 /* Detect systems with no SBOXes */ 3270 if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID)) 3271 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; 3272 3273 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; 3274 } 3275 3276 static struct intel_uncore_type bdx_uncore_ha = { 3277 .name = "ha", 3278 .num_counters = 4, 3279 .num_boxes = 2, 3280 .perf_ctr_bits = 48, 3281 SNBEP_UNCORE_PCI_COMMON_INIT(), 3282 }; 3283 3284 static struct intel_uncore_type bdx_uncore_imc = { 3285 .name = "imc", 3286 .num_counters = 4, 3287 .num_boxes = 8, 3288 .perf_ctr_bits = 48, 3289 .fixed_ctr_bits = 48, 3290 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, 3291 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, 3292 .event_descs = hswep_uncore_imc_events, 3293 SNBEP_UNCORE_PCI_COMMON_INIT(), 3294 }; 3295 3296 static struct intel_uncore_type bdx_uncore_irp = { 3297 .name = "irp", 3298 .num_counters = 4, 3299 .num_boxes = 1, 3300 .perf_ctr_bits = 48, 3301 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 3302 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 3303 .ops = &hswep_uncore_irp_ops, 3304 .format_group = &snbep_uncore_format_group, 3305 }; 3306 3307 static struct intel_uncore_type bdx_uncore_qpi = { 3308 .name = "qpi", 3309 .num_counters = 4, 3310 .num_boxes = 3, 3311 .perf_ctr_bits = 48, 3312 .perf_ctr = SNBEP_PCI_PMON_CTR0, 3313 .event_ctl = SNBEP_PCI_PMON_CTL0, 3314 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, 3315 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 3316 .num_shared_regs = 1, 3317 .ops = &snbep_uncore_qpi_ops, 3318 .format_group = &snbep_uncore_qpi_format_group, 3319 }; 3320 3321 static struct event_constraint bdx_uncore_r2pcie_constraints[] = { 3322 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 3323 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 3324 UNCORE_EVENT_CONSTRAINT(0x13, 0x1), 3325 UNCORE_EVENT_CONSTRAINT(0x23, 0x1), 3326 UNCORE_EVENT_CONSTRAINT(0x25, 0x1), 3327 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 3328 UNCORE_EVENT_CONSTRAINT(0x28, 0x3), 3329 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), 3330 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 3331 EVENT_CONSTRAINT_END 3332 }; 3333 3334 static struct intel_uncore_type bdx_uncore_r2pcie = { 3335 .name = "r2pcie", 3336 .num_counters = 4, 3337 .num_boxes = 1, 3338 .perf_ctr_bits = 48, 3339 .constraints = bdx_uncore_r2pcie_constraints, 3340 SNBEP_UNCORE_PCI_COMMON_INIT(), 3341 }; 3342 3343 static struct event_constraint bdx_uncore_r3qpi_constraints[] = { 3344 UNCORE_EVENT_CONSTRAINT(0x01, 0x7), 3345 UNCORE_EVENT_CONSTRAINT(0x07, 0x7), 3346 UNCORE_EVENT_CONSTRAINT(0x08, 0x7), 3347 UNCORE_EVENT_CONSTRAINT(0x09, 0x7), 3348 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7), 3349 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7), 3350 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 3351 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 3352 UNCORE_EVENT_CONSTRAINT(0x13, 0x1), 3353 UNCORE_EVENT_CONSTRAINT(0x14, 0x3), 3354 UNCORE_EVENT_CONSTRAINT(0x15, 0x3), 3355 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3), 3356 UNCORE_EVENT_CONSTRAINT(0x20, 0x3), 3357 UNCORE_EVENT_CONSTRAINT(0x21, 0x3), 3358 UNCORE_EVENT_CONSTRAINT(0x22, 0x3), 3359 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 3360 UNCORE_EVENT_CONSTRAINT(0x25, 0x3), 3361 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 3362 UNCORE_EVENT_CONSTRAINT(0x28, 0x3), 3363 UNCORE_EVENT_CONSTRAINT(0x29, 0x3), 3364 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), 3365 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 3366 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), 3367 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), 3368 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 3369 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 3370 UNCORE_EVENT_CONSTRAINT(0x36, 0x3), 3371 UNCORE_EVENT_CONSTRAINT(0x37, 0x3), 3372 UNCORE_EVENT_CONSTRAINT(0x38, 0x3), 3373 UNCORE_EVENT_CONSTRAINT(0x39, 0x3), 3374 EVENT_CONSTRAINT_END 3375 }; 3376 3377 static struct intel_uncore_type bdx_uncore_r3qpi = { 3378 .name = "r3qpi", 3379 .num_counters = 3, 3380 .num_boxes = 3, 3381 .perf_ctr_bits = 48, 3382 .constraints = bdx_uncore_r3qpi_constraints, 3383 SNBEP_UNCORE_PCI_COMMON_INIT(), 3384 }; 3385 3386 enum { 3387 BDX_PCI_UNCORE_HA, 3388 BDX_PCI_UNCORE_IMC, 3389 BDX_PCI_UNCORE_IRP, 3390 BDX_PCI_UNCORE_QPI, 3391 BDX_PCI_UNCORE_R2PCIE, 3392 BDX_PCI_UNCORE_R3QPI, 3393 }; 3394 3395 static struct intel_uncore_type *bdx_pci_uncores[] = { 3396 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha, 3397 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc, 3398 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp, 3399 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi, 3400 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie, 3401 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi, 3402 NULL, 3403 }; 3404 3405 static const struct pci_device_id bdx_uncore_pci_ids[] = { 3406 { /* Home Agent 0 */ 3407 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30), 3408 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0), 3409 }, 3410 { /* Home Agent 1 */ 3411 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38), 3412 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1), 3413 }, 3414 { /* MC0 Channel 0 */ 3415 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0), 3416 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0), 3417 }, 3418 { /* MC0 Channel 1 */ 3419 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1), 3420 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1), 3421 }, 3422 { /* MC0 Channel 2 */ 3423 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4), 3424 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2), 3425 }, 3426 { /* MC0 Channel 3 */ 3427 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5), 3428 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3), 3429 }, 3430 { /* MC1 Channel 0 */ 3431 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0), 3432 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4), 3433 }, 3434 { /* MC1 Channel 1 */ 3435 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1), 3436 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5), 3437 }, 3438 { /* MC1 Channel 2 */ 3439 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4), 3440 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6), 3441 }, 3442 { /* MC1 Channel 3 */ 3443 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5), 3444 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7), 3445 }, 3446 { /* IRP */ 3447 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39), 3448 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0), 3449 }, 3450 { /* QPI0 Port 0 */ 3451 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32), 3452 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0), 3453 }, 3454 { /* QPI0 Port 1 */ 3455 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33), 3456 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1), 3457 }, 3458 { /* QPI1 Port 2 */ 3459 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a), 3460 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2), 3461 }, 3462 { /* R2PCIe */ 3463 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34), 3464 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0), 3465 }, 3466 { /* R3QPI0 Link 0 */ 3467 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36), 3468 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0), 3469 }, 3470 { /* R3QPI0 Link 1 */ 3471 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37), 3472 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1), 3473 }, 3474 { /* R3QPI1 Link 2 */ 3475 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e), 3476 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2), 3477 }, 3478 { /* QPI Port 0 filter */ 3479 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86), 3480 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 3481 SNBEP_PCI_QPI_PORT0_FILTER), 3482 }, 3483 { /* QPI Port 1 filter */ 3484 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96), 3485 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 3486 SNBEP_PCI_QPI_PORT1_FILTER), 3487 }, 3488 { /* QPI Port 2 filter */ 3489 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46), 3490 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 3491 BDX_PCI_QPI_PORT2_FILTER), 3492 }, 3493 { /* end: all zeroes */ } 3494 }; 3495 3496 static struct pci_driver bdx_uncore_pci_driver = { 3497 .name = "bdx_uncore", 3498 .id_table = bdx_uncore_pci_ids, 3499 }; 3500 3501 int bdx_uncore_pci_init(void) 3502 { 3503 int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true); 3504 3505 if (ret) 3506 return ret; 3507 uncore_pci_uncores = bdx_pci_uncores; 3508 uncore_pci_driver = &bdx_uncore_pci_driver; 3509 return 0; 3510 } 3511 3512 /* end of BDX uncore support */ 3513 3514 /* SKX uncore support */ 3515 3516 static struct intel_uncore_type skx_uncore_ubox = { 3517 .name = "ubox", 3518 .num_counters = 2, 3519 .num_boxes = 1, 3520 .perf_ctr_bits = 48, 3521 .fixed_ctr_bits = 48, 3522 .perf_ctr = HSWEP_U_MSR_PMON_CTR0, 3523 .event_ctl = HSWEP_U_MSR_PMON_CTL0, 3524 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, 3525 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, 3526 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, 3527 .ops = &ivbep_uncore_msr_ops, 3528 .format_group = &ivbep_uncore_ubox_format_group, 3529 }; 3530 3531 static struct attribute *skx_uncore_cha_formats_attr[] = { 3532 &format_attr_event.attr, 3533 &format_attr_umask.attr, 3534 &format_attr_edge.attr, 3535 &format_attr_tid_en.attr, 3536 &format_attr_inv.attr, 3537 &format_attr_thresh8.attr, 3538 &format_attr_filter_tid4.attr, 3539 &format_attr_filter_state5.attr, 3540 &format_attr_filter_rem.attr, 3541 &format_attr_filter_loc.attr, 3542 &format_attr_filter_nm.attr, 3543 &format_attr_filter_all_op.attr, 3544 &format_attr_filter_not_nm.attr, 3545 &format_attr_filter_opc_0.attr, 3546 &format_attr_filter_opc_1.attr, 3547 &format_attr_filter_nc.attr, 3548 &format_attr_filter_isoc.attr, 3549 NULL, 3550 }; 3551 3552 static const struct attribute_group skx_uncore_chabox_format_group = { 3553 .name = "format", 3554 .attrs = skx_uncore_cha_formats_attr, 3555 }; 3556 3557 static struct event_constraint skx_uncore_chabox_constraints[] = { 3558 UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 3559 UNCORE_EVENT_CONSTRAINT(0x36, 0x1), 3560 EVENT_CONSTRAINT_END 3561 }; 3562 3563 static struct extra_reg skx_uncore_cha_extra_regs[] = { 3564 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 3565 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 3566 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 3567 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), 3568 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4), 3569 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4), 3570 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8), 3571 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8), 3572 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3), 3573 EVENT_EXTRA_END 3574 }; 3575 3576 static u64 skx_cha_filter_mask(int fields) 3577 { 3578 u64 mask = 0; 3579 3580 if (fields & 0x1) 3581 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID; 3582 if (fields & 0x2) 3583 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK; 3584 if (fields & 0x4) 3585 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE; 3586 if (fields & 0x8) { 3587 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM; 3588 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC; 3589 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC; 3590 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM; 3591 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM; 3592 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0; 3593 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1; 3594 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC; 3595 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC; 3596 } 3597 return mask; 3598 } 3599 3600 static struct event_constraint * 3601 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 3602 { 3603 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask); 3604 } 3605 3606 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) 3607 { 3608 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 3609 struct extra_reg *er; 3610 int idx = 0; 3611 3612 for (er = skx_uncore_cha_extra_regs; er->msr; er++) { 3613 if (er->event != (event->hw.config & er->config_mask)) 3614 continue; 3615 idx |= er->idx; 3616 } 3617 3618 if (idx) { 3619 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 + 3620 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; 3621 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx); 3622 reg1->idx = idx; 3623 } 3624 return 0; 3625 } 3626 3627 static struct intel_uncore_ops skx_uncore_chabox_ops = { 3628 /* There is no frz_en for chabox ctl */ 3629 .init_box = ivbep_uncore_msr_init_box, 3630 .disable_box = snbep_uncore_msr_disable_box, 3631 .enable_box = snbep_uncore_msr_enable_box, 3632 .disable_event = snbep_uncore_msr_disable_event, 3633 .enable_event = hswep_cbox_enable_event, 3634 .read_counter = uncore_msr_read_counter, 3635 .hw_config = skx_cha_hw_config, 3636 .get_constraint = skx_cha_get_constraint, 3637 .put_constraint = snbep_cbox_put_constraint, 3638 }; 3639 3640 static struct intel_uncore_type skx_uncore_chabox = { 3641 .name = "cha", 3642 .num_counters = 4, 3643 .perf_ctr_bits = 48, 3644 .event_ctl = HSWEP_C0_MSR_PMON_CTL0, 3645 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, 3646 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, 3647 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, 3648 .msr_offset = HSWEP_CBO_MSR_OFFSET, 3649 .num_shared_regs = 1, 3650 .constraints = skx_uncore_chabox_constraints, 3651 .ops = &skx_uncore_chabox_ops, 3652 .format_group = &skx_uncore_chabox_format_group, 3653 }; 3654 3655 static struct attribute *skx_uncore_iio_formats_attr[] = { 3656 &format_attr_event.attr, 3657 &format_attr_umask.attr, 3658 &format_attr_edge.attr, 3659 &format_attr_inv.attr, 3660 &format_attr_thresh9.attr, 3661 &format_attr_ch_mask.attr, 3662 &format_attr_fc_mask.attr, 3663 NULL, 3664 }; 3665 3666 static const struct attribute_group skx_uncore_iio_format_group = { 3667 .name = "format", 3668 .attrs = skx_uncore_iio_formats_attr, 3669 }; 3670 3671 static struct event_constraint skx_uncore_iio_constraints[] = { 3672 UNCORE_EVENT_CONSTRAINT(0x83, 0x3), 3673 UNCORE_EVENT_CONSTRAINT(0x88, 0xc), 3674 UNCORE_EVENT_CONSTRAINT(0x95, 0xc), 3675 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), 3676 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc), 3677 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc), 3678 EVENT_CONSTRAINT_END 3679 }; 3680 3681 static void skx_iio_enable_event(struct intel_uncore_box *box, 3682 struct perf_event *event) 3683 { 3684 struct hw_perf_event *hwc = &event->hw; 3685 3686 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 3687 } 3688 3689 static struct intel_uncore_ops skx_uncore_iio_ops = { 3690 .init_box = ivbep_uncore_msr_init_box, 3691 .disable_box = snbep_uncore_msr_disable_box, 3692 .enable_box = snbep_uncore_msr_enable_box, 3693 .disable_event = snbep_uncore_msr_disable_event, 3694 .enable_event = skx_iio_enable_event, 3695 .read_counter = uncore_msr_read_counter, 3696 }; 3697 3698 static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die) 3699 { 3700 return pmu->type->topology[die].configuration >> 3701 (pmu->pmu_idx * BUS_NUM_STRIDE); 3702 } 3703 3704 static umode_t 3705 pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, 3706 int die, int zero_bus_pmu) 3707 { 3708 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj)); 3709 3710 return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode; 3711 } 3712 3713 static umode_t 3714 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die) 3715 { 3716 /* Root bus 0x00 is valid only for pmu_idx = 0. */ 3717 return pmu_iio_mapping_visible(kobj, attr, die, 0); 3718 } 3719 3720 static ssize_t skx_iio_mapping_show(struct device *dev, 3721 struct device_attribute *attr, char *buf) 3722 { 3723 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev); 3724 struct dev_ext_attribute *ea = to_dev_ext_attribute(attr); 3725 long die = (long)ea->var; 3726 3727 return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment, 3728 skx_iio_stack(pmu, die)); 3729 } 3730 3731 static int skx_msr_cpu_bus_read(int cpu, u64 *topology) 3732 { 3733 u64 msr_value; 3734 3735 if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) || 3736 !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT)) 3737 return -ENXIO; 3738 3739 *topology = msr_value; 3740 3741 return 0; 3742 } 3743 3744 static int die_to_cpu(int die) 3745 { 3746 int res = 0, cpu, current_die; 3747 /* 3748 * Using cpus_read_lock() to ensure cpu is not going down between 3749 * looking at cpu_online_mask. 3750 */ 3751 cpus_read_lock(); 3752 for_each_online_cpu(cpu) { 3753 current_die = topology_logical_die_id(cpu); 3754 if (current_die == die) { 3755 res = cpu; 3756 break; 3757 } 3758 } 3759 cpus_read_unlock(); 3760 return res; 3761 } 3762 3763 static int skx_iio_get_topology(struct intel_uncore_type *type) 3764 { 3765 int die, ret = -EPERM; 3766 3767 type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology), 3768 GFP_KERNEL); 3769 if (!type->topology) 3770 return -ENOMEM; 3771 3772 for (die = 0; die < uncore_max_dies(); die++) { 3773 ret = skx_msr_cpu_bus_read(die_to_cpu(die), 3774 &type->topology[die].configuration); 3775 if (ret) 3776 break; 3777 3778 ret = uncore_die_to_segment(die); 3779 if (ret < 0) 3780 break; 3781 3782 type->topology[die].segment = ret; 3783 } 3784 3785 if (ret < 0) { 3786 kfree(type->topology); 3787 type->topology = NULL; 3788 } 3789 3790 return ret; 3791 } 3792 3793 static struct attribute_group skx_iio_mapping_group = { 3794 .is_visible = skx_iio_mapping_visible, 3795 }; 3796 3797 static const struct attribute_group *skx_iio_attr_update[] = { 3798 &skx_iio_mapping_group, 3799 NULL, 3800 }; 3801 3802 static int 3803 pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag) 3804 { 3805 char buf[64]; 3806 int ret; 3807 long die = -1; 3808 struct attribute **attrs = NULL; 3809 struct dev_ext_attribute *eas = NULL; 3810 3811 ret = type->get_topology(type); 3812 if (ret < 0) 3813 goto clear_attr_update; 3814 3815 ret = -ENOMEM; 3816 3817 /* One more for NULL. */ 3818 attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL); 3819 if (!attrs) 3820 goto clear_topology; 3821 3822 eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL); 3823 if (!eas) 3824 goto clear_attrs; 3825 3826 for (die = 0; die < uncore_max_dies(); die++) { 3827 sprintf(buf, "die%ld", die); 3828 sysfs_attr_init(&eas[die].attr.attr); 3829 eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL); 3830 if (!eas[die].attr.attr.name) 3831 goto err; 3832 eas[die].attr.attr.mode = 0444; 3833 eas[die].attr.show = skx_iio_mapping_show; 3834 eas[die].attr.store = NULL; 3835 eas[die].var = (void *)die; 3836 attrs[die] = &eas[die].attr.attr; 3837 } 3838 ag->attrs = attrs; 3839 3840 return 0; 3841 err: 3842 for (; die >= 0; die--) 3843 kfree(eas[die].attr.attr.name); 3844 kfree(eas); 3845 clear_attrs: 3846 kfree(attrs); 3847 clear_topology: 3848 kfree(type->topology); 3849 clear_attr_update: 3850 type->attr_update = NULL; 3851 return ret; 3852 } 3853 3854 static void 3855 pmu_iio_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag) 3856 { 3857 struct attribute **attr = ag->attrs; 3858 3859 if (!attr) 3860 return; 3861 3862 for (; *attr; attr++) 3863 kfree((*attr)->name); 3864 kfree(attr_to_ext_attr(*ag->attrs)); 3865 kfree(ag->attrs); 3866 ag->attrs = NULL; 3867 kfree(type->topology); 3868 } 3869 3870 static int skx_iio_set_mapping(struct intel_uncore_type *type) 3871 { 3872 return pmu_iio_set_mapping(type, &skx_iio_mapping_group); 3873 } 3874 3875 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type) 3876 { 3877 pmu_iio_cleanup_mapping(type, &skx_iio_mapping_group); 3878 } 3879 3880 static struct intel_uncore_type skx_uncore_iio = { 3881 .name = "iio", 3882 .num_counters = 4, 3883 .num_boxes = 6, 3884 .perf_ctr_bits = 48, 3885 .event_ctl = SKX_IIO0_MSR_PMON_CTL0, 3886 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0, 3887 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK, 3888 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT, 3889 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL, 3890 .msr_offset = SKX_IIO_MSR_OFFSET, 3891 .constraints = skx_uncore_iio_constraints, 3892 .ops = &skx_uncore_iio_ops, 3893 .format_group = &skx_uncore_iio_format_group, 3894 .attr_update = skx_iio_attr_update, 3895 .get_topology = skx_iio_get_topology, 3896 .set_mapping = skx_iio_set_mapping, 3897 .cleanup_mapping = skx_iio_cleanup_mapping, 3898 }; 3899 3900 enum perf_uncore_iio_freerunning_type_id { 3901 SKX_IIO_MSR_IOCLK = 0, 3902 SKX_IIO_MSR_BW = 1, 3903 SKX_IIO_MSR_UTIL = 2, 3904 3905 SKX_IIO_FREERUNNING_TYPE_MAX, 3906 }; 3907 3908 3909 static struct freerunning_counters skx_iio_freerunning[] = { 3910 [SKX_IIO_MSR_IOCLK] = { 0xa45, 0x1, 0x20, 1, 36 }, 3911 [SKX_IIO_MSR_BW] = { 0xb00, 0x1, 0x10, 8, 36 }, 3912 [SKX_IIO_MSR_UTIL] = { 0xb08, 0x1, 0x10, 8, 36 }, 3913 }; 3914 3915 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = { 3916 /* Free-Running IO CLOCKS Counter */ 3917 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), 3918 /* Free-Running IIO BANDWIDTH Counters */ 3919 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), 3920 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), 3921 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), 3922 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), 3923 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), 3924 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), 3925 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), 3926 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), 3927 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), 3928 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), 3929 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), 3930 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), 3931 INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x24"), 3932 INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"), 3933 INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"), 3934 INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x25"), 3935 INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"), 3936 INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"), 3937 INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x26"), 3938 INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"), 3939 INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"), 3940 INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x27"), 3941 INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"), 3942 INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"), 3943 /* Free-running IIO UTILIZATION Counters */ 3944 INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30"), 3945 INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31"), 3946 INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32"), 3947 INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33"), 3948 INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34"), 3949 INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35"), 3950 INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36"), 3951 INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37"), 3952 { /* end: all zeroes */ }, 3953 }; 3954 3955 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = { 3956 .read_counter = uncore_msr_read_counter, 3957 .hw_config = uncore_freerunning_hw_config, 3958 }; 3959 3960 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = { 3961 &format_attr_event.attr, 3962 &format_attr_umask.attr, 3963 NULL, 3964 }; 3965 3966 static const struct attribute_group skx_uncore_iio_freerunning_format_group = { 3967 .name = "format", 3968 .attrs = skx_uncore_iio_freerunning_formats_attr, 3969 }; 3970 3971 static struct intel_uncore_type skx_uncore_iio_free_running = { 3972 .name = "iio_free_running", 3973 .num_counters = 17, 3974 .num_boxes = 6, 3975 .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX, 3976 .freerunning = skx_iio_freerunning, 3977 .ops = &skx_uncore_iio_freerunning_ops, 3978 .event_descs = skx_uncore_iio_freerunning_events, 3979 .format_group = &skx_uncore_iio_freerunning_format_group, 3980 }; 3981 3982 static struct attribute *skx_uncore_formats_attr[] = { 3983 &format_attr_event.attr, 3984 &format_attr_umask.attr, 3985 &format_attr_edge.attr, 3986 &format_attr_inv.attr, 3987 &format_attr_thresh8.attr, 3988 NULL, 3989 }; 3990 3991 static const struct attribute_group skx_uncore_format_group = { 3992 .name = "format", 3993 .attrs = skx_uncore_formats_attr, 3994 }; 3995 3996 static struct intel_uncore_type skx_uncore_irp = { 3997 .name = "irp", 3998 .num_counters = 2, 3999 .num_boxes = 6, 4000 .perf_ctr_bits = 48, 4001 .event_ctl = SKX_IRP0_MSR_PMON_CTL0, 4002 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0, 4003 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4004 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL, 4005 .msr_offset = SKX_IRP_MSR_OFFSET, 4006 .ops = &skx_uncore_iio_ops, 4007 .format_group = &skx_uncore_format_group, 4008 }; 4009 4010 static struct attribute *skx_uncore_pcu_formats_attr[] = { 4011 &format_attr_event.attr, 4012 &format_attr_umask.attr, 4013 &format_attr_edge.attr, 4014 &format_attr_inv.attr, 4015 &format_attr_thresh8.attr, 4016 &format_attr_occ_invert.attr, 4017 &format_attr_occ_edge_det.attr, 4018 &format_attr_filter_band0.attr, 4019 &format_attr_filter_band1.attr, 4020 &format_attr_filter_band2.attr, 4021 &format_attr_filter_band3.attr, 4022 NULL, 4023 }; 4024 4025 static struct attribute_group skx_uncore_pcu_format_group = { 4026 .name = "format", 4027 .attrs = skx_uncore_pcu_formats_attr, 4028 }; 4029 4030 static struct intel_uncore_ops skx_uncore_pcu_ops = { 4031 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), 4032 .hw_config = hswep_pcu_hw_config, 4033 .get_constraint = snbep_pcu_get_constraint, 4034 .put_constraint = snbep_pcu_put_constraint, 4035 }; 4036 4037 static struct intel_uncore_type skx_uncore_pcu = { 4038 .name = "pcu", 4039 .num_counters = 4, 4040 .num_boxes = 1, 4041 .perf_ctr_bits = 48, 4042 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0, 4043 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0, 4044 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, 4045 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL, 4046 .num_shared_regs = 1, 4047 .ops = &skx_uncore_pcu_ops, 4048 .format_group = &skx_uncore_pcu_format_group, 4049 }; 4050 4051 static struct intel_uncore_type *skx_msr_uncores[] = { 4052 &skx_uncore_ubox, 4053 &skx_uncore_chabox, 4054 &skx_uncore_iio, 4055 &skx_uncore_iio_free_running, 4056 &skx_uncore_irp, 4057 &skx_uncore_pcu, 4058 NULL, 4059 }; 4060 4061 /* 4062 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6 4063 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083. 4064 */ 4065 #define SKX_CAPID6 0x9c 4066 #define SKX_CHA_BIT_MASK GENMASK(27, 0) 4067 4068 static int skx_count_chabox(void) 4069 { 4070 struct pci_dev *dev = NULL; 4071 u32 val = 0; 4072 4073 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev); 4074 if (!dev) 4075 goto out; 4076 4077 pci_read_config_dword(dev, SKX_CAPID6, &val); 4078 val &= SKX_CHA_BIT_MASK; 4079 out: 4080 pci_dev_put(dev); 4081 return hweight32(val); 4082 } 4083 4084 void skx_uncore_cpu_init(void) 4085 { 4086 skx_uncore_chabox.num_boxes = skx_count_chabox(); 4087 uncore_msr_uncores = skx_msr_uncores; 4088 } 4089 4090 static struct intel_uncore_type skx_uncore_imc = { 4091 .name = "imc", 4092 .num_counters = 4, 4093 .num_boxes = 6, 4094 .perf_ctr_bits = 48, 4095 .fixed_ctr_bits = 48, 4096 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, 4097 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, 4098 .event_descs = hswep_uncore_imc_events, 4099 .perf_ctr = SNBEP_PCI_PMON_CTR0, 4100 .event_ctl = SNBEP_PCI_PMON_CTL0, 4101 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4102 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 4103 .ops = &ivbep_uncore_pci_ops, 4104 .format_group = &skx_uncore_format_group, 4105 }; 4106 4107 static struct attribute *skx_upi_uncore_formats_attr[] = { 4108 &format_attr_event.attr, 4109 &format_attr_umask_ext.attr, 4110 &format_attr_edge.attr, 4111 &format_attr_inv.attr, 4112 &format_attr_thresh8.attr, 4113 NULL, 4114 }; 4115 4116 static const struct attribute_group skx_upi_uncore_format_group = { 4117 .name = "format", 4118 .attrs = skx_upi_uncore_formats_attr, 4119 }; 4120 4121 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box) 4122 { 4123 struct pci_dev *pdev = box->pci_dev; 4124 4125 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); 4126 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT); 4127 } 4128 4129 static struct intel_uncore_ops skx_upi_uncore_pci_ops = { 4130 .init_box = skx_upi_uncore_pci_init_box, 4131 .disable_box = snbep_uncore_pci_disable_box, 4132 .enable_box = snbep_uncore_pci_enable_box, 4133 .disable_event = snbep_uncore_pci_disable_event, 4134 .enable_event = snbep_uncore_pci_enable_event, 4135 .read_counter = snbep_uncore_pci_read_counter, 4136 }; 4137 4138 static struct intel_uncore_type skx_uncore_upi = { 4139 .name = "upi", 4140 .num_counters = 4, 4141 .num_boxes = 3, 4142 .perf_ctr_bits = 48, 4143 .perf_ctr = SKX_UPI_PCI_PMON_CTR0, 4144 .event_ctl = SKX_UPI_PCI_PMON_CTL0, 4145 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4146 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT, 4147 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL, 4148 .ops = &skx_upi_uncore_pci_ops, 4149 .format_group = &skx_upi_uncore_format_group, 4150 }; 4151 4152 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box) 4153 { 4154 struct pci_dev *pdev = box->pci_dev; 4155 4156 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); 4157 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT); 4158 } 4159 4160 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = { 4161 .init_box = skx_m2m_uncore_pci_init_box, 4162 .disable_box = snbep_uncore_pci_disable_box, 4163 .enable_box = snbep_uncore_pci_enable_box, 4164 .disable_event = snbep_uncore_pci_disable_event, 4165 .enable_event = snbep_uncore_pci_enable_event, 4166 .read_counter = snbep_uncore_pci_read_counter, 4167 }; 4168 4169 static struct intel_uncore_type skx_uncore_m2m = { 4170 .name = "m2m", 4171 .num_counters = 4, 4172 .num_boxes = 2, 4173 .perf_ctr_bits = 48, 4174 .perf_ctr = SKX_M2M_PCI_PMON_CTR0, 4175 .event_ctl = SKX_M2M_PCI_PMON_CTL0, 4176 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4177 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL, 4178 .ops = &skx_m2m_uncore_pci_ops, 4179 .format_group = &skx_uncore_format_group, 4180 }; 4181 4182 static struct event_constraint skx_uncore_m2pcie_constraints[] = { 4183 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 4184 EVENT_CONSTRAINT_END 4185 }; 4186 4187 static struct intel_uncore_type skx_uncore_m2pcie = { 4188 .name = "m2pcie", 4189 .num_counters = 4, 4190 .num_boxes = 4, 4191 .perf_ctr_bits = 48, 4192 .constraints = skx_uncore_m2pcie_constraints, 4193 .perf_ctr = SNBEP_PCI_PMON_CTR0, 4194 .event_ctl = SNBEP_PCI_PMON_CTL0, 4195 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4196 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 4197 .ops = &ivbep_uncore_pci_ops, 4198 .format_group = &skx_uncore_format_group, 4199 }; 4200 4201 static struct event_constraint skx_uncore_m3upi_constraints[] = { 4202 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1), 4203 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1), 4204 UNCORE_EVENT_CONSTRAINT(0x40, 0x7), 4205 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7), 4206 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7), 4207 UNCORE_EVENT_CONSTRAINT(0x50, 0x7), 4208 UNCORE_EVENT_CONSTRAINT(0x51, 0x7), 4209 UNCORE_EVENT_CONSTRAINT(0x52, 0x7), 4210 EVENT_CONSTRAINT_END 4211 }; 4212 4213 static struct intel_uncore_type skx_uncore_m3upi = { 4214 .name = "m3upi", 4215 .num_counters = 3, 4216 .num_boxes = 3, 4217 .perf_ctr_bits = 48, 4218 .constraints = skx_uncore_m3upi_constraints, 4219 .perf_ctr = SNBEP_PCI_PMON_CTR0, 4220 .event_ctl = SNBEP_PCI_PMON_CTL0, 4221 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4222 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 4223 .ops = &ivbep_uncore_pci_ops, 4224 .format_group = &skx_uncore_format_group, 4225 }; 4226 4227 enum { 4228 SKX_PCI_UNCORE_IMC, 4229 SKX_PCI_UNCORE_M2M, 4230 SKX_PCI_UNCORE_UPI, 4231 SKX_PCI_UNCORE_M2PCIE, 4232 SKX_PCI_UNCORE_M3UPI, 4233 }; 4234 4235 static struct intel_uncore_type *skx_pci_uncores[] = { 4236 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc, 4237 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m, 4238 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi, 4239 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie, 4240 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi, 4241 NULL, 4242 }; 4243 4244 static const struct pci_device_id skx_uncore_pci_ids[] = { 4245 { /* MC0 Channel 0 */ 4246 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042), 4247 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0), 4248 }, 4249 { /* MC0 Channel 1 */ 4250 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046), 4251 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1), 4252 }, 4253 { /* MC0 Channel 2 */ 4254 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a), 4255 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2), 4256 }, 4257 { /* MC1 Channel 0 */ 4258 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042), 4259 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3), 4260 }, 4261 { /* MC1 Channel 1 */ 4262 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046), 4263 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4), 4264 }, 4265 { /* MC1 Channel 2 */ 4266 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a), 4267 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5), 4268 }, 4269 { /* M2M0 */ 4270 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066), 4271 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0), 4272 }, 4273 { /* M2M1 */ 4274 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066), 4275 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1), 4276 }, 4277 { /* UPI0 Link 0 */ 4278 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058), 4279 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0), 4280 }, 4281 { /* UPI0 Link 1 */ 4282 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058), 4283 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1), 4284 }, 4285 { /* UPI1 Link 2 */ 4286 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058), 4287 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2), 4288 }, 4289 { /* M2PCIe 0 */ 4290 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), 4291 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0), 4292 }, 4293 { /* M2PCIe 1 */ 4294 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), 4295 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1), 4296 }, 4297 { /* M2PCIe 2 */ 4298 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), 4299 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2), 4300 }, 4301 { /* M2PCIe 3 */ 4302 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), 4303 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3), 4304 }, 4305 { /* M3UPI0 Link 0 */ 4306 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D), 4307 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0), 4308 }, 4309 { /* M3UPI0 Link 1 */ 4310 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E), 4311 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1), 4312 }, 4313 { /* M3UPI1 Link 2 */ 4314 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D), 4315 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2), 4316 }, 4317 { /* end: all zeroes */ } 4318 }; 4319 4320 4321 static struct pci_driver skx_uncore_pci_driver = { 4322 .name = "skx_uncore", 4323 .id_table = skx_uncore_pci_ids, 4324 }; 4325 4326 int skx_uncore_pci_init(void) 4327 { 4328 /* need to double check pci address */ 4329 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false); 4330 4331 if (ret) 4332 return ret; 4333 4334 uncore_pci_uncores = skx_pci_uncores; 4335 uncore_pci_driver = &skx_uncore_pci_driver; 4336 return 0; 4337 } 4338 4339 /* end of SKX uncore support */ 4340 4341 /* SNR uncore support */ 4342 4343 static struct intel_uncore_type snr_uncore_ubox = { 4344 .name = "ubox", 4345 .num_counters = 2, 4346 .num_boxes = 1, 4347 .perf_ctr_bits = 48, 4348 .fixed_ctr_bits = 48, 4349 .perf_ctr = SNR_U_MSR_PMON_CTR0, 4350 .event_ctl = SNR_U_MSR_PMON_CTL0, 4351 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4352 .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR, 4353 .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL, 4354 .ops = &ivbep_uncore_msr_ops, 4355 .format_group = &ivbep_uncore_format_group, 4356 }; 4357 4358 static struct attribute *snr_uncore_cha_formats_attr[] = { 4359 &format_attr_event.attr, 4360 &format_attr_umask_ext2.attr, 4361 &format_attr_edge.attr, 4362 &format_attr_tid_en.attr, 4363 &format_attr_inv.attr, 4364 &format_attr_thresh8.attr, 4365 &format_attr_filter_tid5.attr, 4366 NULL, 4367 }; 4368 static const struct attribute_group snr_uncore_chabox_format_group = { 4369 .name = "format", 4370 .attrs = snr_uncore_cha_formats_attr, 4371 }; 4372 4373 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) 4374 { 4375 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 4376 4377 reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 + 4378 box->pmu->type->msr_offset * box->pmu->pmu_idx; 4379 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID; 4380 reg1->idx = 0; 4381 4382 return 0; 4383 } 4384 4385 static void snr_cha_enable_event(struct intel_uncore_box *box, 4386 struct perf_event *event) 4387 { 4388 struct hw_perf_event *hwc = &event->hw; 4389 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 4390 4391 if (reg1->idx != EXTRA_REG_NONE) 4392 wrmsrl(reg1->reg, reg1->config); 4393 4394 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 4395 } 4396 4397 static struct intel_uncore_ops snr_uncore_chabox_ops = { 4398 .init_box = ivbep_uncore_msr_init_box, 4399 .disable_box = snbep_uncore_msr_disable_box, 4400 .enable_box = snbep_uncore_msr_enable_box, 4401 .disable_event = snbep_uncore_msr_disable_event, 4402 .enable_event = snr_cha_enable_event, 4403 .read_counter = uncore_msr_read_counter, 4404 .hw_config = snr_cha_hw_config, 4405 }; 4406 4407 static struct intel_uncore_type snr_uncore_chabox = { 4408 .name = "cha", 4409 .num_counters = 4, 4410 .num_boxes = 6, 4411 .perf_ctr_bits = 48, 4412 .event_ctl = SNR_CHA_MSR_PMON_CTL0, 4413 .perf_ctr = SNR_CHA_MSR_PMON_CTR0, 4414 .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL, 4415 .msr_offset = HSWEP_CBO_MSR_OFFSET, 4416 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, 4417 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT, 4418 .ops = &snr_uncore_chabox_ops, 4419 .format_group = &snr_uncore_chabox_format_group, 4420 }; 4421 4422 static struct attribute *snr_uncore_iio_formats_attr[] = { 4423 &format_attr_event.attr, 4424 &format_attr_umask.attr, 4425 &format_attr_edge.attr, 4426 &format_attr_inv.attr, 4427 &format_attr_thresh9.attr, 4428 &format_attr_ch_mask2.attr, 4429 &format_attr_fc_mask2.attr, 4430 NULL, 4431 }; 4432 4433 static const struct attribute_group snr_uncore_iio_format_group = { 4434 .name = "format", 4435 .attrs = snr_uncore_iio_formats_attr, 4436 }; 4437 4438 static umode_t 4439 snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die) 4440 { 4441 /* Root bus 0x00 is valid only for pmu_idx = 1. */ 4442 return pmu_iio_mapping_visible(kobj, attr, die, 1); 4443 } 4444 4445 static struct attribute_group snr_iio_mapping_group = { 4446 .is_visible = snr_iio_mapping_visible, 4447 }; 4448 4449 static const struct attribute_group *snr_iio_attr_update[] = { 4450 &snr_iio_mapping_group, 4451 NULL, 4452 }; 4453 4454 static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping) 4455 { 4456 u32 sad_cfg; 4457 int die, stack_id, ret = -EPERM; 4458 struct pci_dev *dev = NULL; 4459 4460 type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology), 4461 GFP_KERNEL); 4462 if (!type->topology) 4463 return -ENOMEM; 4464 4465 while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) { 4466 ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg); 4467 if (ret) { 4468 ret = pcibios_err_to_errno(ret); 4469 break; 4470 } 4471 4472 die = uncore_pcibus_to_dieid(dev->bus); 4473 stack_id = SAD_CONTROL_STACK_ID(sad_cfg); 4474 if (die < 0 || stack_id >= type->num_boxes) { 4475 ret = -EPERM; 4476 break; 4477 } 4478 4479 /* Convert stack id from SAD_CONTROL to PMON notation. */ 4480 stack_id = sad_pmon_mapping[stack_id]; 4481 4482 ((u8 *)&(type->topology[die].configuration))[stack_id] = dev->bus->number; 4483 type->topology[die].segment = pci_domain_nr(dev->bus); 4484 } 4485 4486 if (ret) { 4487 kfree(type->topology); 4488 type->topology = NULL; 4489 } 4490 4491 return ret; 4492 } 4493 4494 /* 4495 * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON 4496 */ 4497 enum { 4498 SNR_QAT_PMON_ID, 4499 SNR_CBDMA_DMI_PMON_ID, 4500 SNR_NIS_PMON_ID, 4501 SNR_DLB_PMON_ID, 4502 SNR_PCIE_GEN3_PMON_ID 4503 }; 4504 4505 static u8 snr_sad_pmon_mapping[] = { 4506 SNR_CBDMA_DMI_PMON_ID, 4507 SNR_PCIE_GEN3_PMON_ID, 4508 SNR_DLB_PMON_ID, 4509 SNR_NIS_PMON_ID, 4510 SNR_QAT_PMON_ID 4511 }; 4512 4513 static int snr_iio_get_topology(struct intel_uncore_type *type) 4514 { 4515 return sad_cfg_iio_topology(type, snr_sad_pmon_mapping); 4516 } 4517 4518 static int snr_iio_set_mapping(struct intel_uncore_type *type) 4519 { 4520 return pmu_iio_set_mapping(type, &snr_iio_mapping_group); 4521 } 4522 4523 static void snr_iio_cleanup_mapping(struct intel_uncore_type *type) 4524 { 4525 pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group); 4526 } 4527 4528 static struct intel_uncore_type snr_uncore_iio = { 4529 .name = "iio", 4530 .num_counters = 4, 4531 .num_boxes = 5, 4532 .perf_ctr_bits = 48, 4533 .event_ctl = SNR_IIO_MSR_PMON_CTL0, 4534 .perf_ctr = SNR_IIO_MSR_PMON_CTR0, 4535 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4536 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT, 4537 .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL, 4538 .msr_offset = SNR_IIO_MSR_OFFSET, 4539 .ops = &ivbep_uncore_msr_ops, 4540 .format_group = &snr_uncore_iio_format_group, 4541 .attr_update = snr_iio_attr_update, 4542 .get_topology = snr_iio_get_topology, 4543 .set_mapping = snr_iio_set_mapping, 4544 .cleanup_mapping = snr_iio_cleanup_mapping, 4545 }; 4546 4547 static struct intel_uncore_type snr_uncore_irp = { 4548 .name = "irp", 4549 .num_counters = 2, 4550 .num_boxes = 5, 4551 .perf_ctr_bits = 48, 4552 .event_ctl = SNR_IRP0_MSR_PMON_CTL0, 4553 .perf_ctr = SNR_IRP0_MSR_PMON_CTR0, 4554 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4555 .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL, 4556 .msr_offset = SNR_IRP_MSR_OFFSET, 4557 .ops = &ivbep_uncore_msr_ops, 4558 .format_group = &ivbep_uncore_format_group, 4559 }; 4560 4561 static struct intel_uncore_type snr_uncore_m2pcie = { 4562 .name = "m2pcie", 4563 .num_counters = 4, 4564 .num_boxes = 5, 4565 .perf_ctr_bits = 48, 4566 .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0, 4567 .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0, 4568 .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL, 4569 .msr_offset = SNR_M2PCIE_MSR_OFFSET, 4570 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4571 .ops = &ivbep_uncore_msr_ops, 4572 .format_group = &ivbep_uncore_format_group, 4573 }; 4574 4575 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) 4576 { 4577 struct hw_perf_event *hwc = &event->hw; 4578 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 4579 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; 4580 4581 if (ev_sel >= 0xb && ev_sel <= 0xe) { 4582 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER; 4583 reg1->idx = ev_sel - 0xb; 4584 reg1->config = event->attr.config1 & (0xff << reg1->idx); 4585 } 4586 return 0; 4587 } 4588 4589 static struct intel_uncore_ops snr_uncore_pcu_ops = { 4590 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), 4591 .hw_config = snr_pcu_hw_config, 4592 .get_constraint = snbep_pcu_get_constraint, 4593 .put_constraint = snbep_pcu_put_constraint, 4594 }; 4595 4596 static struct intel_uncore_type snr_uncore_pcu = { 4597 .name = "pcu", 4598 .num_counters = 4, 4599 .num_boxes = 1, 4600 .perf_ctr_bits = 48, 4601 .perf_ctr = SNR_PCU_MSR_PMON_CTR0, 4602 .event_ctl = SNR_PCU_MSR_PMON_CTL0, 4603 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4604 .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL, 4605 .num_shared_regs = 1, 4606 .ops = &snr_uncore_pcu_ops, 4607 .format_group = &skx_uncore_pcu_format_group, 4608 }; 4609 4610 enum perf_uncore_snr_iio_freerunning_type_id { 4611 SNR_IIO_MSR_IOCLK, 4612 SNR_IIO_MSR_BW_IN, 4613 4614 SNR_IIO_FREERUNNING_TYPE_MAX, 4615 }; 4616 4617 static struct freerunning_counters snr_iio_freerunning[] = { 4618 [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 }, 4619 [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 }, 4620 }; 4621 4622 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = { 4623 /* Free-Running IIO CLOCKS Counter */ 4624 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), 4625 /* Free-Running IIO BANDWIDTH IN Counters */ 4626 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), 4627 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), 4628 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), 4629 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), 4630 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), 4631 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), 4632 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), 4633 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), 4634 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), 4635 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), 4636 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), 4637 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), 4638 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"), 4639 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"), 4640 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"), 4641 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"), 4642 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"), 4643 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"), 4644 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"), 4645 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"), 4646 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"), 4647 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"), 4648 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"), 4649 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"), 4650 { /* end: all zeroes */ }, 4651 }; 4652 4653 static struct intel_uncore_type snr_uncore_iio_free_running = { 4654 .name = "iio_free_running", 4655 .num_counters = 9, 4656 .num_boxes = 5, 4657 .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX, 4658 .freerunning = snr_iio_freerunning, 4659 .ops = &skx_uncore_iio_freerunning_ops, 4660 .event_descs = snr_uncore_iio_freerunning_events, 4661 .format_group = &skx_uncore_iio_freerunning_format_group, 4662 }; 4663 4664 static struct intel_uncore_type *snr_msr_uncores[] = { 4665 &snr_uncore_ubox, 4666 &snr_uncore_chabox, 4667 &snr_uncore_iio, 4668 &snr_uncore_irp, 4669 &snr_uncore_m2pcie, 4670 &snr_uncore_pcu, 4671 &snr_uncore_iio_free_running, 4672 NULL, 4673 }; 4674 4675 void snr_uncore_cpu_init(void) 4676 { 4677 uncore_msr_uncores = snr_msr_uncores; 4678 } 4679 4680 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box) 4681 { 4682 struct pci_dev *pdev = box->pci_dev; 4683 int box_ctl = uncore_pci_box_ctl(box); 4684 4685 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); 4686 pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT); 4687 } 4688 4689 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = { 4690 .init_box = snr_m2m_uncore_pci_init_box, 4691 .disable_box = snbep_uncore_pci_disable_box, 4692 .enable_box = snbep_uncore_pci_enable_box, 4693 .disable_event = snbep_uncore_pci_disable_event, 4694 .enable_event = snbep_uncore_pci_enable_event, 4695 .read_counter = snbep_uncore_pci_read_counter, 4696 }; 4697 4698 static struct attribute *snr_m2m_uncore_formats_attr[] = { 4699 &format_attr_event.attr, 4700 &format_attr_umask_ext3.attr, 4701 &format_attr_edge.attr, 4702 &format_attr_inv.attr, 4703 &format_attr_thresh8.attr, 4704 NULL, 4705 }; 4706 4707 static const struct attribute_group snr_m2m_uncore_format_group = { 4708 .name = "format", 4709 .attrs = snr_m2m_uncore_formats_attr, 4710 }; 4711 4712 static struct intel_uncore_type snr_uncore_m2m = { 4713 .name = "m2m", 4714 .num_counters = 4, 4715 .num_boxes = 1, 4716 .perf_ctr_bits = 48, 4717 .perf_ctr = SNR_M2M_PCI_PMON_CTR0, 4718 .event_ctl = SNR_M2M_PCI_PMON_CTL0, 4719 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4720 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT, 4721 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL, 4722 .ops = &snr_m2m_uncore_pci_ops, 4723 .format_group = &snr_m2m_uncore_format_group, 4724 }; 4725 4726 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) 4727 { 4728 struct pci_dev *pdev = box->pci_dev; 4729 struct hw_perf_event *hwc = &event->hw; 4730 4731 pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN)); 4732 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32)); 4733 } 4734 4735 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = { 4736 .init_box = snr_m2m_uncore_pci_init_box, 4737 .disable_box = snbep_uncore_pci_disable_box, 4738 .enable_box = snbep_uncore_pci_enable_box, 4739 .disable_event = snbep_uncore_pci_disable_event, 4740 .enable_event = snr_uncore_pci_enable_event, 4741 .read_counter = snbep_uncore_pci_read_counter, 4742 }; 4743 4744 static struct intel_uncore_type snr_uncore_pcie3 = { 4745 .name = "pcie3", 4746 .num_counters = 4, 4747 .num_boxes = 1, 4748 .perf_ctr_bits = 48, 4749 .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0, 4750 .event_ctl = SNR_PCIE3_PCI_PMON_CTL0, 4751 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK, 4752 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT, 4753 .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL, 4754 .ops = &snr_pcie3_uncore_pci_ops, 4755 .format_group = &skx_uncore_iio_format_group, 4756 }; 4757 4758 enum { 4759 SNR_PCI_UNCORE_M2M, 4760 SNR_PCI_UNCORE_PCIE3, 4761 }; 4762 4763 static struct intel_uncore_type *snr_pci_uncores[] = { 4764 [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m, 4765 [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3, 4766 NULL, 4767 }; 4768 4769 static const struct pci_device_id snr_uncore_pci_ids[] = { 4770 { /* M2M */ 4771 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), 4772 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0), 4773 }, 4774 { /* end: all zeroes */ } 4775 }; 4776 4777 static struct pci_driver snr_uncore_pci_driver = { 4778 .name = "snr_uncore", 4779 .id_table = snr_uncore_pci_ids, 4780 }; 4781 4782 static const struct pci_device_id snr_uncore_pci_sub_ids[] = { 4783 { /* PCIe3 RP */ 4784 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a), 4785 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0), 4786 }, 4787 { /* end: all zeroes */ } 4788 }; 4789 4790 static struct pci_driver snr_uncore_pci_sub_driver = { 4791 .name = "snr_uncore_sub", 4792 .id_table = snr_uncore_pci_sub_ids, 4793 }; 4794 4795 int snr_uncore_pci_init(void) 4796 { 4797 /* SNR UBOX DID */ 4798 int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID, 4799 SKX_GIDNIDMAP, true); 4800 4801 if (ret) 4802 return ret; 4803 4804 uncore_pci_uncores = snr_pci_uncores; 4805 uncore_pci_driver = &snr_uncore_pci_driver; 4806 uncore_pci_sub_driver = &snr_uncore_pci_sub_driver; 4807 return 0; 4808 } 4809 4810 #define SNR_MC_DEVICE_ID 0x3451 4811 4812 static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id) 4813 { 4814 struct pci_dev *mc_dev = NULL; 4815 int pkg; 4816 4817 while (1) { 4818 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev); 4819 if (!mc_dev) 4820 break; 4821 pkg = uncore_pcibus_to_dieid(mc_dev->bus); 4822 if (pkg == id) 4823 break; 4824 } 4825 return mc_dev; 4826 } 4827 4828 static int snr_uncore_mmio_map(struct intel_uncore_box *box, 4829 unsigned int box_ctl, int mem_offset, 4830 unsigned int device) 4831 { 4832 struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid); 4833 struct intel_uncore_type *type = box->pmu->type; 4834 resource_size_t addr; 4835 u32 pci_dword; 4836 4837 if (!pdev) 4838 return -ENODEV; 4839 4840 pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword); 4841 addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23; 4842 4843 pci_read_config_dword(pdev, mem_offset, &pci_dword); 4844 addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12; 4845 4846 addr += box_ctl; 4847 4848 box->io_addr = ioremap(addr, type->mmio_map_size); 4849 if (!box->io_addr) { 4850 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); 4851 return -EINVAL; 4852 } 4853 4854 return 0; 4855 } 4856 4857 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box, 4858 unsigned int box_ctl, int mem_offset, 4859 unsigned int device) 4860 { 4861 if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device)) 4862 writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr); 4863 } 4864 4865 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box) 4866 { 4867 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), 4868 SNR_IMC_MMIO_MEM0_OFFSET, 4869 SNR_MC_DEVICE_ID); 4870 } 4871 4872 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box) 4873 { 4874 u32 config; 4875 4876 if (!box->io_addr) 4877 return; 4878 4879 config = readl(box->io_addr); 4880 config |= SNBEP_PMON_BOX_CTL_FRZ; 4881 writel(config, box->io_addr); 4882 } 4883 4884 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box) 4885 { 4886 u32 config; 4887 4888 if (!box->io_addr) 4889 return; 4890 4891 config = readl(box->io_addr); 4892 config &= ~SNBEP_PMON_BOX_CTL_FRZ; 4893 writel(config, box->io_addr); 4894 } 4895 4896 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box, 4897 struct perf_event *event) 4898 { 4899 struct hw_perf_event *hwc = &event->hw; 4900 4901 if (!box->io_addr) 4902 return; 4903 4904 if (!uncore_mmio_is_valid_offset(box, hwc->config_base)) 4905 return; 4906 4907 writel(hwc->config | SNBEP_PMON_CTL_EN, 4908 box->io_addr + hwc->config_base); 4909 } 4910 4911 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box, 4912 struct perf_event *event) 4913 { 4914 struct hw_perf_event *hwc = &event->hw; 4915 4916 if (!box->io_addr) 4917 return; 4918 4919 if (!uncore_mmio_is_valid_offset(box, hwc->config_base)) 4920 return; 4921 4922 writel(hwc->config, box->io_addr + hwc->config_base); 4923 } 4924 4925 static struct intel_uncore_ops snr_uncore_mmio_ops = { 4926 .init_box = snr_uncore_mmio_init_box, 4927 .exit_box = uncore_mmio_exit_box, 4928 .disable_box = snr_uncore_mmio_disable_box, 4929 .enable_box = snr_uncore_mmio_enable_box, 4930 .disable_event = snr_uncore_mmio_disable_event, 4931 .enable_event = snr_uncore_mmio_enable_event, 4932 .read_counter = uncore_mmio_read_counter, 4933 }; 4934 4935 static struct uncore_event_desc snr_uncore_imc_events[] = { 4936 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"), 4937 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"), 4938 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"), 4939 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"), 4940 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"), 4941 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"), 4942 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"), 4943 { /* end: all zeroes */ }, 4944 }; 4945 4946 static struct intel_uncore_type snr_uncore_imc = { 4947 .name = "imc", 4948 .num_counters = 4, 4949 .num_boxes = 2, 4950 .perf_ctr_bits = 48, 4951 .fixed_ctr_bits = 48, 4952 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, 4953 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, 4954 .event_descs = snr_uncore_imc_events, 4955 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0, 4956 .event_ctl = SNR_IMC_MMIO_PMON_CTL0, 4957 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4958 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL, 4959 .mmio_offset = SNR_IMC_MMIO_OFFSET, 4960 .mmio_map_size = SNR_IMC_MMIO_SIZE, 4961 .ops = &snr_uncore_mmio_ops, 4962 .format_group = &skx_uncore_format_group, 4963 }; 4964 4965 enum perf_uncore_snr_imc_freerunning_type_id { 4966 SNR_IMC_DCLK, 4967 SNR_IMC_DDR, 4968 4969 SNR_IMC_FREERUNNING_TYPE_MAX, 4970 }; 4971 4972 static struct freerunning_counters snr_imc_freerunning[] = { 4973 [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 }, 4974 [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 }, 4975 }; 4976 4977 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = { 4978 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"), 4979 4980 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"), 4981 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"), 4982 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"), 4983 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"), 4984 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"), 4985 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"), 4986 { /* end: all zeroes */ }, 4987 }; 4988 4989 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = { 4990 .init_box = snr_uncore_mmio_init_box, 4991 .exit_box = uncore_mmio_exit_box, 4992 .read_counter = uncore_mmio_read_counter, 4993 .hw_config = uncore_freerunning_hw_config, 4994 }; 4995 4996 static struct intel_uncore_type snr_uncore_imc_free_running = { 4997 .name = "imc_free_running", 4998 .num_counters = 3, 4999 .num_boxes = 1, 5000 .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX, 5001 .mmio_map_size = SNR_IMC_MMIO_SIZE, 5002 .freerunning = snr_imc_freerunning, 5003 .ops = &snr_uncore_imc_freerunning_ops, 5004 .event_descs = snr_uncore_imc_freerunning_events, 5005 .format_group = &skx_uncore_iio_freerunning_format_group, 5006 }; 5007 5008 static struct intel_uncore_type *snr_mmio_uncores[] = { 5009 &snr_uncore_imc, 5010 &snr_uncore_imc_free_running, 5011 NULL, 5012 }; 5013 5014 void snr_uncore_mmio_init(void) 5015 { 5016 uncore_mmio_uncores = snr_mmio_uncores; 5017 } 5018 5019 /* end of SNR uncore support */ 5020 5021 /* ICX uncore support */ 5022 5023 static unsigned icx_cha_msr_offsets[] = { 5024 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310, 5025 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e, 5026 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a, 5027 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe, 5028 0x1c, 0x2a, 0x38, 0x46, 5029 }; 5030 5031 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) 5032 { 5033 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 5034 bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN); 5035 5036 if (tie_en) { 5037 reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 + 5038 icx_cha_msr_offsets[box->pmu->pmu_idx]; 5039 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID; 5040 reg1->idx = 0; 5041 } 5042 5043 return 0; 5044 } 5045 5046 static struct intel_uncore_ops icx_uncore_chabox_ops = { 5047 .init_box = ivbep_uncore_msr_init_box, 5048 .disable_box = snbep_uncore_msr_disable_box, 5049 .enable_box = snbep_uncore_msr_enable_box, 5050 .disable_event = snbep_uncore_msr_disable_event, 5051 .enable_event = snr_cha_enable_event, 5052 .read_counter = uncore_msr_read_counter, 5053 .hw_config = icx_cha_hw_config, 5054 }; 5055 5056 static struct intel_uncore_type icx_uncore_chabox = { 5057 .name = "cha", 5058 .num_counters = 4, 5059 .perf_ctr_bits = 48, 5060 .event_ctl = ICX_C34_MSR_PMON_CTL0, 5061 .perf_ctr = ICX_C34_MSR_PMON_CTR0, 5062 .box_ctl = ICX_C34_MSR_PMON_BOX_CTL, 5063 .msr_offsets = icx_cha_msr_offsets, 5064 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, 5065 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT, 5066 .constraints = skx_uncore_chabox_constraints, 5067 .ops = &icx_uncore_chabox_ops, 5068 .format_group = &snr_uncore_chabox_format_group, 5069 }; 5070 5071 static unsigned icx_msr_offsets[] = { 5072 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0, 5073 }; 5074 5075 static struct event_constraint icx_uncore_iio_constraints[] = { 5076 UNCORE_EVENT_CONSTRAINT(0x02, 0x3), 5077 UNCORE_EVENT_CONSTRAINT(0x03, 0x3), 5078 UNCORE_EVENT_CONSTRAINT(0x83, 0x3), 5079 UNCORE_EVENT_CONSTRAINT(0x88, 0xc), 5080 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), 5081 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc), 5082 UNCORE_EVENT_CONSTRAINT(0xd5, 0xc), 5083 EVENT_CONSTRAINT_END 5084 }; 5085 5086 static umode_t 5087 icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die) 5088 { 5089 /* Root bus 0x00 is valid only for pmu_idx = 5. */ 5090 return pmu_iio_mapping_visible(kobj, attr, die, 5); 5091 } 5092 5093 static struct attribute_group icx_iio_mapping_group = { 5094 .is_visible = icx_iio_mapping_visible, 5095 }; 5096 5097 static const struct attribute_group *icx_iio_attr_update[] = { 5098 &icx_iio_mapping_group, 5099 NULL, 5100 }; 5101 5102 /* 5103 * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON 5104 */ 5105 enum { 5106 ICX_PCIE1_PMON_ID, 5107 ICX_PCIE2_PMON_ID, 5108 ICX_PCIE3_PMON_ID, 5109 ICX_PCIE4_PMON_ID, 5110 ICX_PCIE5_PMON_ID, 5111 ICX_CBDMA_DMI_PMON_ID 5112 }; 5113 5114 static u8 icx_sad_pmon_mapping[] = { 5115 ICX_CBDMA_DMI_PMON_ID, 5116 ICX_PCIE1_PMON_ID, 5117 ICX_PCIE2_PMON_ID, 5118 ICX_PCIE3_PMON_ID, 5119 ICX_PCIE4_PMON_ID, 5120 ICX_PCIE5_PMON_ID, 5121 }; 5122 5123 static int icx_iio_get_topology(struct intel_uncore_type *type) 5124 { 5125 return sad_cfg_iio_topology(type, icx_sad_pmon_mapping); 5126 } 5127 5128 static int icx_iio_set_mapping(struct intel_uncore_type *type) 5129 { 5130 return pmu_iio_set_mapping(type, &icx_iio_mapping_group); 5131 } 5132 5133 static void icx_iio_cleanup_mapping(struct intel_uncore_type *type) 5134 { 5135 pmu_iio_cleanup_mapping(type, &icx_iio_mapping_group); 5136 } 5137 5138 static struct intel_uncore_type icx_uncore_iio = { 5139 .name = "iio", 5140 .num_counters = 4, 5141 .num_boxes = 6, 5142 .perf_ctr_bits = 48, 5143 .event_ctl = ICX_IIO_MSR_PMON_CTL0, 5144 .perf_ctr = ICX_IIO_MSR_PMON_CTR0, 5145 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 5146 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT, 5147 .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL, 5148 .msr_offsets = icx_msr_offsets, 5149 .constraints = icx_uncore_iio_constraints, 5150 .ops = &skx_uncore_iio_ops, 5151 .format_group = &snr_uncore_iio_format_group, 5152 .attr_update = icx_iio_attr_update, 5153 .get_topology = icx_iio_get_topology, 5154 .set_mapping = icx_iio_set_mapping, 5155 .cleanup_mapping = icx_iio_cleanup_mapping, 5156 }; 5157 5158 static struct intel_uncore_type icx_uncore_irp = { 5159 .name = "irp", 5160 .num_counters = 2, 5161 .num_boxes = 6, 5162 .perf_ctr_bits = 48, 5163 .event_ctl = ICX_IRP0_MSR_PMON_CTL0, 5164 .perf_ctr = ICX_IRP0_MSR_PMON_CTR0, 5165 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 5166 .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL, 5167 .msr_offsets = icx_msr_offsets, 5168 .ops = &ivbep_uncore_msr_ops, 5169 .format_group = &ivbep_uncore_format_group, 5170 }; 5171 5172 static struct event_constraint icx_uncore_m2pcie_constraints[] = { 5173 UNCORE_EVENT_CONSTRAINT(0x14, 0x3), 5174 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 5175 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 5176 EVENT_CONSTRAINT_END 5177 }; 5178 5179 static struct intel_uncore_type icx_uncore_m2pcie = { 5180 .name = "m2pcie", 5181 .num_counters = 4, 5182 .num_boxes = 6, 5183 .perf_ctr_bits = 48, 5184 .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0, 5185 .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0, 5186 .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL, 5187 .msr_offsets = icx_msr_offsets, 5188 .constraints = icx_uncore_m2pcie_constraints, 5189 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 5190 .ops = &ivbep_uncore_msr_ops, 5191 .format_group = &ivbep_uncore_format_group, 5192 }; 5193 5194 enum perf_uncore_icx_iio_freerunning_type_id { 5195 ICX_IIO_MSR_IOCLK, 5196 ICX_IIO_MSR_BW_IN, 5197 5198 ICX_IIO_FREERUNNING_TYPE_MAX, 5199 }; 5200 5201 static unsigned icx_iio_clk_freerunning_box_offsets[] = { 5202 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0, 5203 }; 5204 5205 static unsigned icx_iio_bw_freerunning_box_offsets[] = { 5206 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0, 5207 }; 5208 5209 static struct freerunning_counters icx_iio_freerunning[] = { 5210 [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets }, 5211 [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets }, 5212 }; 5213 5214 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = { 5215 /* Free-Running IIO CLOCKS Counter */ 5216 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), 5217 /* Free-Running IIO BANDWIDTH IN Counters */ 5218 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), 5219 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), 5220 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), 5221 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), 5222 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), 5223 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), 5224 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), 5225 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), 5226 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), 5227 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), 5228 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), 5229 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), 5230 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"), 5231 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"), 5232 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"), 5233 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"), 5234 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"), 5235 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"), 5236 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"), 5237 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"), 5238 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"), 5239 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"), 5240 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"), 5241 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"), 5242 { /* end: all zeroes */ }, 5243 }; 5244 5245 static struct intel_uncore_type icx_uncore_iio_free_running = { 5246 .name = "iio_free_running", 5247 .num_counters = 9, 5248 .num_boxes = 6, 5249 .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX, 5250 .freerunning = icx_iio_freerunning, 5251 .ops = &skx_uncore_iio_freerunning_ops, 5252 .event_descs = icx_uncore_iio_freerunning_events, 5253 .format_group = &skx_uncore_iio_freerunning_format_group, 5254 }; 5255 5256 static struct intel_uncore_type *icx_msr_uncores[] = { 5257 &skx_uncore_ubox, 5258 &icx_uncore_chabox, 5259 &icx_uncore_iio, 5260 &icx_uncore_irp, 5261 &icx_uncore_m2pcie, 5262 &skx_uncore_pcu, 5263 &icx_uncore_iio_free_running, 5264 NULL, 5265 }; 5266 5267 /* 5268 * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High) 5269 * registers which located at Device 30, Function 3 5270 */ 5271 #define ICX_CAPID6 0x9c 5272 #define ICX_CAPID7 0xa0 5273 5274 static u64 icx_count_chabox(void) 5275 { 5276 struct pci_dev *dev = NULL; 5277 u64 caps = 0; 5278 5279 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev); 5280 if (!dev) 5281 goto out; 5282 5283 pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps); 5284 pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1); 5285 out: 5286 pci_dev_put(dev); 5287 return hweight64(caps); 5288 } 5289 5290 void icx_uncore_cpu_init(void) 5291 { 5292 u64 num_boxes = icx_count_chabox(); 5293 5294 if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets))) 5295 return; 5296 icx_uncore_chabox.num_boxes = num_boxes; 5297 uncore_msr_uncores = icx_msr_uncores; 5298 } 5299 5300 static struct intel_uncore_type icx_uncore_m2m = { 5301 .name = "m2m", 5302 .num_counters = 4, 5303 .num_boxes = 4, 5304 .perf_ctr_bits = 48, 5305 .perf_ctr = SNR_M2M_PCI_PMON_CTR0, 5306 .event_ctl = SNR_M2M_PCI_PMON_CTL0, 5307 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 5308 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT, 5309 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL, 5310 .ops = &snr_m2m_uncore_pci_ops, 5311 .format_group = &snr_m2m_uncore_format_group, 5312 }; 5313 5314 static struct attribute *icx_upi_uncore_formats_attr[] = { 5315 &format_attr_event.attr, 5316 &format_attr_umask_ext4.attr, 5317 &format_attr_edge.attr, 5318 &format_attr_inv.attr, 5319 &format_attr_thresh8.attr, 5320 NULL, 5321 }; 5322 5323 static const struct attribute_group icx_upi_uncore_format_group = { 5324 .name = "format", 5325 .attrs = icx_upi_uncore_formats_attr, 5326 }; 5327 5328 static struct intel_uncore_type icx_uncore_upi = { 5329 .name = "upi", 5330 .num_counters = 4, 5331 .num_boxes = 3, 5332 .perf_ctr_bits = 48, 5333 .perf_ctr = ICX_UPI_PCI_PMON_CTR0, 5334 .event_ctl = ICX_UPI_PCI_PMON_CTL0, 5335 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 5336 .event_mask_ext = ICX_UPI_CTL_UMASK_EXT, 5337 .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL, 5338 .ops = &skx_upi_uncore_pci_ops, 5339 .format_group = &icx_upi_uncore_format_group, 5340 }; 5341 5342 static struct event_constraint icx_uncore_m3upi_constraints[] = { 5343 UNCORE_EVENT_CONSTRAINT(0x1c, 0x1), 5344 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1), 5345 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1), 5346 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1), 5347 UNCORE_EVENT_CONSTRAINT(0x40, 0x7), 5348 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7), 5349 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7), 5350 UNCORE_EVENT_CONSTRAINT(0x50, 0x7), 5351 EVENT_CONSTRAINT_END 5352 }; 5353 5354 static struct intel_uncore_type icx_uncore_m3upi = { 5355 .name = "m3upi", 5356 .num_counters = 4, 5357 .num_boxes = 3, 5358 .perf_ctr_bits = 48, 5359 .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0, 5360 .event_ctl = ICX_M3UPI_PCI_PMON_CTL0, 5361 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 5362 .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL, 5363 .constraints = icx_uncore_m3upi_constraints, 5364 .ops = &ivbep_uncore_pci_ops, 5365 .format_group = &skx_uncore_format_group, 5366 }; 5367 5368 enum { 5369 ICX_PCI_UNCORE_M2M, 5370 ICX_PCI_UNCORE_UPI, 5371 ICX_PCI_UNCORE_M3UPI, 5372 }; 5373 5374 static struct intel_uncore_type *icx_pci_uncores[] = { 5375 [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m, 5376 [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi, 5377 [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi, 5378 NULL, 5379 }; 5380 5381 static const struct pci_device_id icx_uncore_pci_ids[] = { 5382 { /* M2M 0 */ 5383 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), 5384 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0), 5385 }, 5386 { /* M2M 1 */ 5387 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), 5388 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1), 5389 }, 5390 { /* M2M 2 */ 5391 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), 5392 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2), 5393 }, 5394 { /* M2M 3 */ 5395 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), 5396 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3), 5397 }, 5398 { /* UPI Link 0 */ 5399 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441), 5400 .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0), 5401 }, 5402 { /* UPI Link 1 */ 5403 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441), 5404 .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1), 5405 }, 5406 { /* UPI Link 2 */ 5407 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441), 5408 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2), 5409 }, 5410 { /* M3UPI Link 0 */ 5411 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446), 5412 .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0), 5413 }, 5414 { /* M3UPI Link 1 */ 5415 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446), 5416 .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1), 5417 }, 5418 { /* M3UPI Link 2 */ 5419 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446), 5420 .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2), 5421 }, 5422 { /* end: all zeroes */ } 5423 }; 5424 5425 static struct pci_driver icx_uncore_pci_driver = { 5426 .name = "icx_uncore", 5427 .id_table = icx_uncore_pci_ids, 5428 }; 5429 5430 int icx_uncore_pci_init(void) 5431 { 5432 /* ICX UBOX DID */ 5433 int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID, 5434 SKX_GIDNIDMAP, true); 5435 5436 if (ret) 5437 return ret; 5438 5439 uncore_pci_uncores = icx_pci_uncores; 5440 uncore_pci_driver = &icx_uncore_pci_driver; 5441 return 0; 5442 } 5443 5444 static void icx_uncore_imc_init_box(struct intel_uncore_box *box) 5445 { 5446 unsigned int box_ctl = box->pmu->type->box_ctl + 5447 box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN); 5448 int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE + 5449 SNR_IMC_MMIO_MEM0_OFFSET; 5450 5451 __snr_uncore_mmio_init_box(box, box_ctl, mem_offset, 5452 SNR_MC_DEVICE_ID); 5453 } 5454 5455 static struct intel_uncore_ops icx_uncore_mmio_ops = { 5456 .init_box = icx_uncore_imc_init_box, 5457 .exit_box = uncore_mmio_exit_box, 5458 .disable_box = snr_uncore_mmio_disable_box, 5459 .enable_box = snr_uncore_mmio_enable_box, 5460 .disable_event = snr_uncore_mmio_disable_event, 5461 .enable_event = snr_uncore_mmio_enable_event, 5462 .read_counter = uncore_mmio_read_counter, 5463 }; 5464 5465 static struct intel_uncore_type icx_uncore_imc = { 5466 .name = "imc", 5467 .num_counters = 4, 5468 .num_boxes = 12, 5469 .perf_ctr_bits = 48, 5470 .fixed_ctr_bits = 48, 5471 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, 5472 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, 5473 .event_descs = hswep_uncore_imc_events, 5474 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0, 5475 .event_ctl = SNR_IMC_MMIO_PMON_CTL0, 5476 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 5477 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL, 5478 .mmio_offset = SNR_IMC_MMIO_OFFSET, 5479 .mmio_map_size = SNR_IMC_MMIO_SIZE, 5480 .ops = &icx_uncore_mmio_ops, 5481 .format_group = &skx_uncore_format_group, 5482 }; 5483 5484 enum perf_uncore_icx_imc_freerunning_type_id { 5485 ICX_IMC_DCLK, 5486 ICX_IMC_DDR, 5487 ICX_IMC_DDRT, 5488 5489 ICX_IMC_FREERUNNING_TYPE_MAX, 5490 }; 5491 5492 static struct freerunning_counters icx_imc_freerunning[] = { 5493 [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 }, 5494 [ICX_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 }, 5495 [ICX_IMC_DDRT] = { 0x22a0, 0x8, 0, 2, 48 }, 5496 }; 5497 5498 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = { 5499 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"), 5500 5501 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"), 5502 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"), 5503 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"), 5504 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"), 5505 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"), 5506 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"), 5507 5508 INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"), 5509 INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"), 5510 INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"), 5511 INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"), 5512 INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"), 5513 INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"), 5514 { /* end: all zeroes */ }, 5515 }; 5516 5517 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) 5518 { 5519 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + 5520 SNR_IMC_MMIO_MEM0_OFFSET; 5521 5522 snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box), 5523 mem_offset, SNR_MC_DEVICE_ID); 5524 } 5525 5526 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = { 5527 .init_box = icx_uncore_imc_freerunning_init_box, 5528 .exit_box = uncore_mmio_exit_box, 5529 .read_counter = uncore_mmio_read_counter, 5530 .hw_config = uncore_freerunning_hw_config, 5531 }; 5532 5533 static struct intel_uncore_type icx_uncore_imc_free_running = { 5534 .name = "imc_free_running", 5535 .num_counters = 5, 5536 .num_boxes = 4, 5537 .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX, 5538 .mmio_map_size = SNR_IMC_MMIO_SIZE, 5539 .freerunning = icx_imc_freerunning, 5540 .ops = &icx_uncore_imc_freerunning_ops, 5541 .event_descs = icx_uncore_imc_freerunning_events, 5542 .format_group = &skx_uncore_iio_freerunning_format_group, 5543 }; 5544 5545 static struct intel_uncore_type *icx_mmio_uncores[] = { 5546 &icx_uncore_imc, 5547 &icx_uncore_imc_free_running, 5548 NULL, 5549 }; 5550 5551 void icx_uncore_mmio_init(void) 5552 { 5553 uncore_mmio_uncores = icx_mmio_uncores; 5554 } 5555 5556 /* end of ICX uncore support */ 5557 5558 /* SPR uncore support */ 5559 5560 static void spr_uncore_msr_enable_event(struct intel_uncore_box *box, 5561 struct perf_event *event) 5562 { 5563 struct hw_perf_event *hwc = &event->hw; 5564 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 5565 5566 if (reg1->idx != EXTRA_REG_NONE) 5567 wrmsrl(reg1->reg, reg1->config); 5568 5569 wrmsrl(hwc->config_base, hwc->config); 5570 } 5571 5572 static void spr_uncore_msr_disable_event(struct intel_uncore_box *box, 5573 struct perf_event *event) 5574 { 5575 struct hw_perf_event *hwc = &event->hw; 5576 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 5577 5578 if (reg1->idx != EXTRA_REG_NONE) 5579 wrmsrl(reg1->reg, 0); 5580 5581 wrmsrl(hwc->config_base, 0); 5582 } 5583 5584 static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) 5585 { 5586 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 5587 bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN); 5588 struct intel_uncore_type *type = box->pmu->type; 5589 5590 if (tie_en) { 5591 reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 + 5592 HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx]; 5593 reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID; 5594 reg1->idx = 0; 5595 } 5596 5597 return 0; 5598 } 5599 5600 static struct intel_uncore_ops spr_uncore_chabox_ops = { 5601 .init_box = intel_generic_uncore_msr_init_box, 5602 .disable_box = intel_generic_uncore_msr_disable_box, 5603 .enable_box = intel_generic_uncore_msr_enable_box, 5604 .disable_event = spr_uncore_msr_disable_event, 5605 .enable_event = spr_uncore_msr_enable_event, 5606 .read_counter = uncore_msr_read_counter, 5607 .hw_config = spr_cha_hw_config, 5608 .get_constraint = uncore_get_constraint, 5609 .put_constraint = uncore_put_constraint, 5610 }; 5611 5612 static struct attribute *spr_uncore_cha_formats_attr[] = { 5613 &format_attr_event.attr, 5614 &format_attr_umask_ext4.attr, 5615 &format_attr_tid_en2.attr, 5616 &format_attr_edge.attr, 5617 &format_attr_inv.attr, 5618 &format_attr_thresh8.attr, 5619 &format_attr_filter_tid5.attr, 5620 NULL, 5621 }; 5622 static const struct attribute_group spr_uncore_chabox_format_group = { 5623 .name = "format", 5624 .attrs = spr_uncore_cha_formats_attr, 5625 }; 5626 5627 static ssize_t alias_show(struct device *dev, 5628 struct device_attribute *attr, 5629 char *buf) 5630 { 5631 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev); 5632 char pmu_name[UNCORE_PMU_NAME_LEN]; 5633 5634 uncore_get_alias_name(pmu_name, pmu); 5635 return sysfs_emit(buf, "%s\n", pmu_name); 5636 } 5637 5638 static DEVICE_ATTR_RO(alias); 5639 5640 static struct attribute *uncore_alias_attrs[] = { 5641 &dev_attr_alias.attr, 5642 NULL 5643 }; 5644 5645 ATTRIBUTE_GROUPS(uncore_alias); 5646 5647 static struct intel_uncore_type spr_uncore_chabox = { 5648 .name = "cha", 5649 .event_mask = SPR_CHA_PMON_EVENT_MASK, 5650 .event_mask_ext = SPR_RAW_EVENT_MASK_EXT, 5651 .num_shared_regs = 1, 5652 .constraints = skx_uncore_chabox_constraints, 5653 .ops = &spr_uncore_chabox_ops, 5654 .format_group = &spr_uncore_chabox_format_group, 5655 .attr_update = uncore_alias_groups, 5656 }; 5657 5658 static struct intel_uncore_type spr_uncore_iio = { 5659 .name = "iio", 5660 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 5661 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT, 5662 .format_group = &snr_uncore_iio_format_group, 5663 .attr_update = uncore_alias_groups, 5664 .constraints = icx_uncore_iio_constraints, 5665 }; 5666 5667 static struct attribute *spr_uncore_raw_formats_attr[] = { 5668 &format_attr_event.attr, 5669 &format_attr_umask_ext4.attr, 5670 &format_attr_edge.attr, 5671 &format_attr_inv.attr, 5672 &format_attr_thresh8.attr, 5673 NULL, 5674 }; 5675 5676 static const struct attribute_group spr_uncore_raw_format_group = { 5677 .name = "format", 5678 .attrs = spr_uncore_raw_formats_attr, 5679 }; 5680 5681 #define SPR_UNCORE_COMMON_FORMAT() \ 5682 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \ 5683 .event_mask_ext = SPR_RAW_EVENT_MASK_EXT, \ 5684 .format_group = &spr_uncore_raw_format_group, \ 5685 .attr_update = uncore_alias_groups 5686 5687 static struct intel_uncore_type spr_uncore_irp = { 5688 SPR_UNCORE_COMMON_FORMAT(), 5689 .name = "irp", 5690 5691 }; 5692 5693 static struct event_constraint spr_uncore_m2pcie_constraints[] = { 5694 UNCORE_EVENT_CONSTRAINT(0x14, 0x3), 5695 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 5696 EVENT_CONSTRAINT_END 5697 }; 5698 5699 static struct intel_uncore_type spr_uncore_m2pcie = { 5700 SPR_UNCORE_COMMON_FORMAT(), 5701 .name = "m2pcie", 5702 .constraints = spr_uncore_m2pcie_constraints, 5703 }; 5704 5705 static struct intel_uncore_type spr_uncore_pcu = { 5706 .name = "pcu", 5707 .attr_update = uncore_alias_groups, 5708 }; 5709 5710 static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box, 5711 struct perf_event *event) 5712 { 5713 struct hw_perf_event *hwc = &event->hw; 5714 5715 if (!box->io_addr) 5716 return; 5717 5718 if (uncore_pmc_fixed(hwc->idx)) 5719 writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base); 5720 else 5721 writel(hwc->config, box->io_addr + hwc->config_base); 5722 } 5723 5724 static struct intel_uncore_ops spr_uncore_mmio_ops = { 5725 .init_box = intel_generic_uncore_mmio_init_box, 5726 .exit_box = uncore_mmio_exit_box, 5727 .disable_box = intel_generic_uncore_mmio_disable_box, 5728 .enable_box = intel_generic_uncore_mmio_enable_box, 5729 .disable_event = intel_generic_uncore_mmio_disable_event, 5730 .enable_event = spr_uncore_mmio_enable_event, 5731 .read_counter = uncore_mmio_read_counter, 5732 }; 5733 5734 static struct intel_uncore_type spr_uncore_imc = { 5735 SPR_UNCORE_COMMON_FORMAT(), 5736 .name = "imc", 5737 .fixed_ctr_bits = 48, 5738 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, 5739 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, 5740 .ops = &spr_uncore_mmio_ops, 5741 }; 5742 5743 static void spr_uncore_pci_enable_event(struct intel_uncore_box *box, 5744 struct perf_event *event) 5745 { 5746 struct pci_dev *pdev = box->pci_dev; 5747 struct hw_perf_event *hwc = &event->hw; 5748 5749 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32)); 5750 pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config); 5751 } 5752 5753 static struct intel_uncore_ops spr_uncore_pci_ops = { 5754 .init_box = intel_generic_uncore_pci_init_box, 5755 .disable_box = intel_generic_uncore_pci_disable_box, 5756 .enable_box = intel_generic_uncore_pci_enable_box, 5757 .disable_event = intel_generic_uncore_pci_disable_event, 5758 .enable_event = spr_uncore_pci_enable_event, 5759 .read_counter = intel_generic_uncore_pci_read_counter, 5760 }; 5761 5762 #define SPR_UNCORE_PCI_COMMON_FORMAT() \ 5763 SPR_UNCORE_COMMON_FORMAT(), \ 5764 .ops = &spr_uncore_pci_ops 5765 5766 static struct intel_uncore_type spr_uncore_m2m = { 5767 SPR_UNCORE_PCI_COMMON_FORMAT(), 5768 .name = "m2m", 5769 }; 5770 5771 static struct intel_uncore_type spr_uncore_upi = { 5772 SPR_UNCORE_PCI_COMMON_FORMAT(), 5773 .name = "upi", 5774 }; 5775 5776 static struct intel_uncore_type spr_uncore_m3upi = { 5777 SPR_UNCORE_PCI_COMMON_FORMAT(), 5778 .name = "m3upi", 5779 .constraints = icx_uncore_m3upi_constraints, 5780 }; 5781 5782 static struct intel_uncore_type spr_uncore_mdf = { 5783 SPR_UNCORE_COMMON_FORMAT(), 5784 .name = "mdf", 5785 }; 5786 5787 #define UNCORE_SPR_NUM_UNCORE_TYPES 12 5788 #define UNCORE_SPR_IIO 1 5789 #define UNCORE_SPR_IMC 6 5790 5791 static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = { 5792 &spr_uncore_chabox, 5793 &spr_uncore_iio, 5794 &spr_uncore_irp, 5795 &spr_uncore_m2pcie, 5796 &spr_uncore_pcu, 5797 NULL, 5798 &spr_uncore_imc, 5799 &spr_uncore_m2m, 5800 &spr_uncore_upi, 5801 &spr_uncore_m3upi, 5802 NULL, 5803 &spr_uncore_mdf, 5804 }; 5805 5806 enum perf_uncore_spr_iio_freerunning_type_id { 5807 SPR_IIO_MSR_IOCLK, 5808 SPR_IIO_MSR_BW_IN, 5809 SPR_IIO_MSR_BW_OUT, 5810 5811 SPR_IIO_FREERUNNING_TYPE_MAX, 5812 }; 5813 5814 static struct freerunning_counters spr_iio_freerunning[] = { 5815 [SPR_IIO_MSR_IOCLK] = { 0x340e, 0x1, 0x10, 1, 48 }, 5816 [SPR_IIO_MSR_BW_IN] = { 0x3800, 0x1, 0x10, 8, 48 }, 5817 [SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 }, 5818 }; 5819 5820 static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = { 5821 /* Free-Running IIO CLOCKS Counter */ 5822 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), 5823 /* Free-Running IIO BANDWIDTH IN Counters */ 5824 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), 5825 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), 5826 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), 5827 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), 5828 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), 5829 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), 5830 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), 5831 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), 5832 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), 5833 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), 5834 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), 5835 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), 5836 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"), 5837 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"), 5838 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"), 5839 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"), 5840 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"), 5841 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"), 5842 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"), 5843 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"), 5844 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"), 5845 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"), 5846 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"), 5847 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"), 5848 /* Free-Running IIO BANDWIDTH OUT Counters */ 5849 INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x30"), 5850 INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"), 5851 INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"), 5852 INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x31"), 5853 INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"), 5854 INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"), 5855 INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x32"), 5856 INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"), 5857 INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"), 5858 INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x33"), 5859 INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"), 5860 INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"), 5861 INTEL_UNCORE_EVENT_DESC(bw_out_port4, "event=0xff,umask=0x34"), 5862 INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale, "3.814697266e-6"), 5863 INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit, "MiB"), 5864 INTEL_UNCORE_EVENT_DESC(bw_out_port5, "event=0xff,umask=0x35"), 5865 INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale, "3.814697266e-6"), 5866 INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit, "MiB"), 5867 INTEL_UNCORE_EVENT_DESC(bw_out_port6, "event=0xff,umask=0x36"), 5868 INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale, "3.814697266e-6"), 5869 INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit, "MiB"), 5870 INTEL_UNCORE_EVENT_DESC(bw_out_port7, "event=0xff,umask=0x37"), 5871 INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale, "3.814697266e-6"), 5872 INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit, "MiB"), 5873 { /* end: all zeroes */ }, 5874 }; 5875 5876 static struct intel_uncore_type spr_uncore_iio_free_running = { 5877 .name = "iio_free_running", 5878 .num_counters = 17, 5879 .num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX, 5880 .freerunning = spr_iio_freerunning, 5881 .ops = &skx_uncore_iio_freerunning_ops, 5882 .event_descs = spr_uncore_iio_freerunning_events, 5883 .format_group = &skx_uncore_iio_freerunning_format_group, 5884 }; 5885 5886 enum perf_uncore_spr_imc_freerunning_type_id { 5887 SPR_IMC_DCLK, 5888 SPR_IMC_PQ_CYCLES, 5889 5890 SPR_IMC_FREERUNNING_TYPE_MAX, 5891 }; 5892 5893 static struct freerunning_counters spr_imc_freerunning[] = { 5894 [SPR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 }, 5895 [SPR_IMC_PQ_CYCLES] = { 0x2318, 0x8, 0, 2, 48 }, 5896 }; 5897 5898 static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = { 5899 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"), 5900 5901 INTEL_UNCORE_EVENT_DESC(rpq_cycles, "event=0xff,umask=0x20"), 5902 INTEL_UNCORE_EVENT_DESC(wpq_cycles, "event=0xff,umask=0x21"), 5903 { /* end: all zeroes */ }, 5904 }; 5905 5906 #define SPR_MC_DEVICE_ID 0x3251 5907 5908 static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) 5909 { 5910 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET; 5911 5912 snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box), 5913 mem_offset, SPR_MC_DEVICE_ID); 5914 } 5915 5916 static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = { 5917 .init_box = spr_uncore_imc_freerunning_init_box, 5918 .exit_box = uncore_mmio_exit_box, 5919 .read_counter = uncore_mmio_read_counter, 5920 .hw_config = uncore_freerunning_hw_config, 5921 }; 5922 5923 static struct intel_uncore_type spr_uncore_imc_free_running = { 5924 .name = "imc_free_running", 5925 .num_counters = 3, 5926 .mmio_map_size = SNR_IMC_MMIO_SIZE, 5927 .num_freerunning_types = SPR_IMC_FREERUNNING_TYPE_MAX, 5928 .freerunning = spr_imc_freerunning, 5929 .ops = &spr_uncore_imc_freerunning_ops, 5930 .event_descs = spr_uncore_imc_freerunning_events, 5931 .format_group = &skx_uncore_iio_freerunning_format_group, 5932 }; 5933 5934 #define UNCORE_SPR_MSR_EXTRA_UNCORES 1 5935 #define UNCORE_SPR_MMIO_EXTRA_UNCORES 1 5936 5937 static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = { 5938 &spr_uncore_iio_free_running, 5939 }; 5940 5941 static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = { 5942 &spr_uncore_imc_free_running, 5943 }; 5944 5945 static void uncore_type_customized_copy(struct intel_uncore_type *to_type, 5946 struct intel_uncore_type *from_type) 5947 { 5948 if (!to_type || !from_type) 5949 return; 5950 5951 if (from_type->name) 5952 to_type->name = from_type->name; 5953 if (from_type->fixed_ctr_bits) 5954 to_type->fixed_ctr_bits = from_type->fixed_ctr_bits; 5955 if (from_type->event_mask) 5956 to_type->event_mask = from_type->event_mask; 5957 if (from_type->event_mask_ext) 5958 to_type->event_mask_ext = from_type->event_mask_ext; 5959 if (from_type->fixed_ctr) 5960 to_type->fixed_ctr = from_type->fixed_ctr; 5961 if (from_type->fixed_ctl) 5962 to_type->fixed_ctl = from_type->fixed_ctl; 5963 if (from_type->fixed_ctr_bits) 5964 to_type->fixed_ctr_bits = from_type->fixed_ctr_bits; 5965 if (from_type->num_shared_regs) 5966 to_type->num_shared_regs = from_type->num_shared_regs; 5967 if (from_type->constraints) 5968 to_type->constraints = from_type->constraints; 5969 if (from_type->ops) 5970 to_type->ops = from_type->ops; 5971 if (from_type->event_descs) 5972 to_type->event_descs = from_type->event_descs; 5973 if (from_type->format_group) 5974 to_type->format_group = from_type->format_group; 5975 if (from_type->attr_update) 5976 to_type->attr_update = from_type->attr_update; 5977 } 5978 5979 static struct intel_uncore_type ** 5980 uncore_get_uncores(enum uncore_access_type type_id, int num_extra, 5981 struct intel_uncore_type **extra) 5982 { 5983 struct intel_uncore_type **types, **start_types; 5984 int i; 5985 5986 start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra); 5987 5988 /* Only copy the customized features */ 5989 for (; *types; types++) { 5990 if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES) 5991 continue; 5992 uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]); 5993 } 5994 5995 for (i = 0; i < num_extra; i++, types++) 5996 *types = extra[i]; 5997 5998 return start_types; 5999 } 6000 6001 static struct intel_uncore_type * 6002 uncore_find_type_by_id(struct intel_uncore_type **types, int type_id) 6003 { 6004 for (; *types; types++) { 6005 if (type_id == (*types)->type_id) 6006 return *types; 6007 } 6008 6009 return NULL; 6010 } 6011 6012 static int uncore_type_max_boxes(struct intel_uncore_type **types, 6013 int type_id) 6014 { 6015 struct intel_uncore_type *type; 6016 int i, max = 0; 6017 6018 type = uncore_find_type_by_id(types, type_id); 6019 if (!type) 6020 return 0; 6021 6022 for (i = 0; i < type->num_boxes; i++) { 6023 if (type->box_ids[i] > max) 6024 max = type->box_ids[i]; 6025 } 6026 6027 return max + 1; 6028 } 6029 6030 void spr_uncore_cpu_init(void) 6031 { 6032 uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR, 6033 UNCORE_SPR_MSR_EXTRA_UNCORES, 6034 spr_msr_uncores); 6035 6036 spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO); 6037 } 6038 6039 int spr_uncore_pci_init(void) 6040 { 6041 uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL); 6042 return 0; 6043 } 6044 6045 void spr_uncore_mmio_init(void) 6046 { 6047 int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true); 6048 6049 if (ret) 6050 uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL); 6051 else { 6052 uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 6053 UNCORE_SPR_MMIO_EXTRA_UNCORES, 6054 spr_mmio_uncores); 6055 6056 spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2; 6057 } 6058 } 6059 6060 /* end of SPR uncore support */ 6061