1 // SPDX-License-Identifier: GPL-2.0 2 /* SandyBridge-EP/IvyTown uncore support */ 3 #include "uncore.h" 4 5 /* SNB-EP pci bus to socket mapping */ 6 #define SNBEP_CPUNODEID 0x40 7 #define SNBEP_GIDNIDMAP 0x54 8 9 /* SNB-EP Box level control */ 10 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0) 11 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1) 12 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8) 13 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16) 14 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ 15 SNBEP_PMON_BOX_CTL_RST_CTRS | \ 16 SNBEP_PMON_BOX_CTL_FRZ_EN) 17 /* SNB-EP event control */ 18 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff 19 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00 20 #define SNBEP_PMON_CTL_RST (1 << 17) 21 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18) 22 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) 23 #define SNBEP_PMON_CTL_EN (1 << 22) 24 #define SNBEP_PMON_CTL_INVERT (1 << 23) 25 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000 26 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ 27 SNBEP_PMON_CTL_UMASK_MASK | \ 28 SNBEP_PMON_CTL_EDGE_DET | \ 29 SNBEP_PMON_CTL_INVERT | \ 30 SNBEP_PMON_CTL_TRESH_MASK) 31 32 /* SNB-EP Ubox event control */ 33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000 34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \ 35 (SNBEP_PMON_CTL_EV_SEL_MASK | \ 36 SNBEP_PMON_CTL_UMASK_MASK | \ 37 SNBEP_PMON_CTL_EDGE_DET | \ 38 SNBEP_PMON_CTL_INVERT | \ 39 SNBEP_U_MSR_PMON_CTL_TRESH_MASK) 40 41 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19) 42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ 43 SNBEP_CBO_PMON_CTL_TID_EN) 44 45 /* SNB-EP PCU event control */ 46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000 47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000 48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30) 49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31) 50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \ 51 (SNBEP_PMON_CTL_EV_SEL_MASK | \ 52 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ 53 SNBEP_PMON_CTL_EDGE_DET | \ 54 SNBEP_PMON_CTL_INVERT | \ 55 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ 56 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ 57 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) 58 59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ 60 (SNBEP_PMON_RAW_EVENT_MASK | \ 61 SNBEP_PMON_CTL_EV_SEL_EXT) 62 63 /* SNB-EP pci control register */ 64 #define SNBEP_PCI_PMON_BOX_CTL 0xf4 65 #define SNBEP_PCI_PMON_CTL0 0xd8 66 /* SNB-EP pci counter register */ 67 #define SNBEP_PCI_PMON_CTR0 0xa0 68 69 /* SNB-EP home agent register */ 70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40 71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44 72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48 73 /* SNB-EP memory controller register */ 74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0 75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0 76 /* SNB-EP QPI register */ 77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228 78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c 79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238 80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c 81 82 /* SNB-EP Ubox register */ 83 #define SNBEP_U_MSR_PMON_CTR0 0xc16 84 #define SNBEP_U_MSR_PMON_CTL0 0xc10 85 86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08 87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09 88 89 /* SNB-EP Cbo register */ 90 #define SNBEP_C0_MSR_PMON_CTR0 0xd16 91 #define SNBEP_C0_MSR_PMON_CTL0 0xd10 92 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04 93 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14 94 #define SNBEP_CBO_MSR_OFFSET 0x20 95 96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f 97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00 98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000 99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000 100 101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \ 102 .event = (e), \ 103 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \ 104 .config_mask = (m), \ 105 .idx = (i) \ 106 } 107 108 /* SNB-EP PCU register */ 109 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36 110 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30 111 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24 112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34 113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff 114 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc 115 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd 116 117 /* IVBEP event control */ 118 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ 119 SNBEP_PMON_BOX_CTL_RST_CTRS) 120 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ 121 SNBEP_PMON_CTL_UMASK_MASK | \ 122 SNBEP_PMON_CTL_EDGE_DET | \ 123 SNBEP_PMON_CTL_TRESH_MASK) 124 /* IVBEP Ubox */ 125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00 126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31) 127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29) 128 129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \ 130 (SNBEP_PMON_CTL_EV_SEL_MASK | \ 131 SNBEP_PMON_CTL_UMASK_MASK | \ 132 SNBEP_PMON_CTL_EDGE_DET | \ 133 SNBEP_U_MSR_PMON_CTL_TRESH_MASK) 134 /* IVBEP Cbo */ 135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \ 136 SNBEP_CBO_PMON_CTL_TID_EN) 137 138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0) 139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5) 140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17) 141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32) 142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52) 143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) 144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) 145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63) 146 147 /* IVBEP home agent */ 148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16) 149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \ 150 (IVBEP_PMON_RAW_EVENT_MASK | \ 151 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST) 152 /* IVBEP PCU */ 153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \ 154 (SNBEP_PMON_CTL_EV_SEL_MASK | \ 155 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ 156 SNBEP_PMON_CTL_EDGE_DET | \ 157 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ 158 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ 159 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) 160 /* IVBEP QPI */ 161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \ 162 (IVBEP_PMON_RAW_EVENT_MASK | \ 163 SNBEP_PMON_CTL_EV_SEL_EXT) 164 165 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ 166 ((1ULL << (n)) - 1))) 167 168 /* Haswell-EP Ubox */ 169 #define HSWEP_U_MSR_PMON_CTR0 0x709 170 #define HSWEP_U_MSR_PMON_CTL0 0x705 171 #define HSWEP_U_MSR_PMON_FILTER 0x707 172 173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703 174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704 175 176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0) 177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1) 178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \ 179 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \ 180 HSWEP_U_MSR_PMON_BOX_FILTER_CID) 181 182 /* Haswell-EP CBo */ 183 #define HSWEP_C0_MSR_PMON_CTR0 0xe08 184 #define HSWEP_C0_MSR_PMON_CTL0 0xe01 185 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00 186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05 187 #define HSWEP_CBO_MSR_OFFSET 0x10 188 189 190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0) 191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6) 192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17) 193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32) 194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52) 195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) 196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) 197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63) 198 199 200 /* Haswell-EP Sbox */ 201 #define HSWEP_S0_MSR_PMON_CTR0 0x726 202 #define HSWEP_S0_MSR_PMON_CTL0 0x721 203 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720 204 #define HSWEP_SBOX_MSR_OFFSET 0xa 205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ 206 SNBEP_CBO_PMON_CTL_TID_EN) 207 208 /* Haswell-EP PCU */ 209 #define HSWEP_PCU_MSR_PMON_CTR0 0x717 210 #define HSWEP_PCU_MSR_PMON_CTL0 0x711 211 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710 212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715 213 214 /* KNL Ubox */ 215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \ 216 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \ 217 SNBEP_CBO_PMON_CTL_TID_EN) 218 /* KNL CHA */ 219 #define KNL_CHA_MSR_OFFSET 0xc 220 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16) 221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \ 222 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \ 223 KNL_CHA_MSR_PMON_CTL_QOR) 224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff 225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18) 226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32) 227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32) 228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33) 229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37) 230 231 /* KNL EDC/MC UCLK */ 232 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400 233 #define KNL_UCLK_MSR_PMON_CTL0 0x420 234 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430 235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c 236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454 237 #define KNL_PMON_FIXED_CTL_EN 0x1 238 239 /* KNL EDC */ 240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00 241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20 242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30 243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c 244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44 245 246 /* KNL MC */ 247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00 248 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20 249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30 250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c 251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44 252 253 /* KNL IRP */ 254 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0 255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ 256 KNL_CHA_MSR_PMON_CTL_QOR) 257 /* KNL PCU */ 258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f 259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7) 260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000 261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \ 262 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \ 263 KNL_PCU_PMON_CTL_USE_OCC_CTR | \ 264 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ 265 SNBEP_PMON_CTL_EDGE_DET | \ 266 SNBEP_CBO_PMON_CTL_TID_EN | \ 267 SNBEP_PMON_CTL_INVERT | \ 268 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \ 269 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ 270 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) 271 272 /* SKX pci bus to socket mapping */ 273 #define SKX_CPUNODEID 0xc0 274 #define SKX_GIDNIDMAP 0xd4 275 276 /* 277 * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR 278 * that BIOS programmed. MSR has package scope. 279 * | Bit | Default | Description 280 * | [63] | 00h | VALID - When set, indicates the CPU bus 281 * numbers have been initialized. (RO) 282 * |[62:48]| --- | Reserved 283 * |[47:40]| 00h | BUS_NUM_5 - Return the bus number BIOS assigned 284 * CPUBUSNO(5). (RO) 285 * |[39:32]| 00h | BUS_NUM_4 - Return the bus number BIOS assigned 286 * CPUBUSNO(4). (RO) 287 * |[31:24]| 00h | BUS_NUM_3 - Return the bus number BIOS assigned 288 * CPUBUSNO(3). (RO) 289 * |[23:16]| 00h | BUS_NUM_2 - Return the bus number BIOS assigned 290 * CPUBUSNO(2). (RO) 291 * |[15:8] | 00h | BUS_NUM_1 - Return the bus number BIOS assigned 292 * CPUBUSNO(1). (RO) 293 * | [7:0] | 00h | BUS_NUM_0 - Return the bus number BIOS assigned 294 * CPUBUSNO(0). (RO) 295 */ 296 #define SKX_MSR_CPU_BUS_NUMBER 0x300 297 #define SKX_MSR_CPU_BUS_VALID_BIT (1ULL << 63) 298 #define BUS_NUM_STRIDE 8 299 300 /* SKX CHA */ 301 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0) 302 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9) 303 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17) 304 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32) 305 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33) 306 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35) 307 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36) 308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37) 309 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41) 310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51) 311 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) 312 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) 313 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63) 314 315 /* SKX IIO */ 316 #define SKX_IIO0_MSR_PMON_CTL0 0xa48 317 #define SKX_IIO0_MSR_PMON_CTR0 0xa41 318 #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40 319 #define SKX_IIO_MSR_OFFSET 0x20 320 321 #define SKX_PMON_CTL_TRESH_MASK (0xff << 24) 322 #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf) 323 #define SKX_PMON_CTL_CH_MASK (0xff << 4) 324 #define SKX_PMON_CTL_FC_MASK (0x7 << 12) 325 #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ 326 SNBEP_PMON_CTL_UMASK_MASK | \ 327 SNBEP_PMON_CTL_EDGE_DET | \ 328 SNBEP_PMON_CTL_INVERT | \ 329 SKX_PMON_CTL_TRESH_MASK) 330 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \ 331 SKX_PMON_CTL_CH_MASK | \ 332 SKX_PMON_CTL_FC_MASK) 333 334 /* SKX IRP */ 335 #define SKX_IRP0_MSR_PMON_CTL0 0xa5b 336 #define SKX_IRP0_MSR_PMON_CTR0 0xa59 337 #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58 338 #define SKX_IRP_MSR_OFFSET 0x20 339 340 /* SKX UPI */ 341 #define SKX_UPI_PCI_PMON_CTL0 0x350 342 #define SKX_UPI_PCI_PMON_CTR0 0x318 343 #define SKX_UPI_PCI_PMON_BOX_CTL 0x378 344 #define SKX_UPI_CTL_UMASK_EXT 0xffefff 345 346 /* SKX M2M */ 347 #define SKX_M2M_PCI_PMON_CTL0 0x228 348 #define SKX_M2M_PCI_PMON_CTR0 0x200 349 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258 350 351 /* Memory Map registers device ID */ 352 #define SNR_ICX_MESH2IIO_MMAP_DID 0x9a2 353 #define SNR_ICX_SAD_CONTROL_CFG 0x3f4 354 355 /* Getting I/O stack id in SAD_COTROL_CFG notation */ 356 #define SAD_CONTROL_STACK_ID(data) (((data) >> 4) & 0x7) 357 358 /* SNR Ubox */ 359 #define SNR_U_MSR_PMON_CTR0 0x1f98 360 #define SNR_U_MSR_PMON_CTL0 0x1f91 361 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93 362 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94 363 364 /* SNR CHA */ 365 #define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff 366 #define SNR_CHA_MSR_PMON_CTL0 0x1c01 367 #define SNR_CHA_MSR_PMON_CTR0 0x1c08 368 #define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00 369 #define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05 370 371 372 /* SNR IIO */ 373 #define SNR_IIO_MSR_PMON_CTL0 0x1e08 374 #define SNR_IIO_MSR_PMON_CTR0 0x1e01 375 #define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00 376 #define SNR_IIO_MSR_OFFSET 0x10 377 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff 378 379 /* SNR IRP */ 380 #define SNR_IRP0_MSR_PMON_CTL0 0x1ea8 381 #define SNR_IRP0_MSR_PMON_CTR0 0x1ea1 382 #define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0 383 #define SNR_IRP_MSR_OFFSET 0x10 384 385 /* SNR M2PCIE */ 386 #define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58 387 #define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51 388 #define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50 389 #define SNR_M2PCIE_MSR_OFFSET 0x10 390 391 /* SNR PCU */ 392 #define SNR_PCU_MSR_PMON_CTL0 0x1ef1 393 #define SNR_PCU_MSR_PMON_CTR0 0x1ef8 394 #define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0 395 #define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc 396 397 /* SNR M2M */ 398 #define SNR_M2M_PCI_PMON_CTL0 0x468 399 #define SNR_M2M_PCI_PMON_CTR0 0x440 400 #define SNR_M2M_PCI_PMON_BOX_CTL 0x438 401 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff 402 403 /* SNR PCIE3 */ 404 #define SNR_PCIE3_PCI_PMON_CTL0 0x508 405 #define SNR_PCIE3_PCI_PMON_CTR0 0x4e8 406 #define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0 407 408 /* SNR IMC */ 409 #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54 410 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38 411 #define SNR_IMC_MMIO_PMON_CTL0 0x40 412 #define SNR_IMC_MMIO_PMON_CTR0 0x8 413 #define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800 414 #define SNR_IMC_MMIO_OFFSET 0x4000 415 #define SNR_IMC_MMIO_SIZE 0x4000 416 #define SNR_IMC_MMIO_BASE_OFFSET 0xd0 417 #define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF 418 #define SNR_IMC_MMIO_MEM0_OFFSET 0xd8 419 #define SNR_IMC_MMIO_MEM0_MASK 0x7FF 420 421 /* ICX CHA */ 422 #define ICX_C34_MSR_PMON_CTR0 0xb68 423 #define ICX_C34_MSR_PMON_CTL0 0xb61 424 #define ICX_C34_MSR_PMON_BOX_CTL 0xb60 425 #define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65 426 427 /* ICX IIO */ 428 #define ICX_IIO_MSR_PMON_CTL0 0xa58 429 #define ICX_IIO_MSR_PMON_CTR0 0xa51 430 #define ICX_IIO_MSR_PMON_BOX_CTL 0xa50 431 432 /* ICX IRP */ 433 #define ICX_IRP0_MSR_PMON_CTL0 0xa4d 434 #define ICX_IRP0_MSR_PMON_CTR0 0xa4b 435 #define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a 436 437 /* ICX M2PCIE */ 438 #define ICX_M2PCIE_MSR_PMON_CTL0 0xa46 439 #define ICX_M2PCIE_MSR_PMON_CTR0 0xa41 440 #define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40 441 442 /* ICX UPI */ 443 #define ICX_UPI_PCI_PMON_CTL0 0x350 444 #define ICX_UPI_PCI_PMON_CTR0 0x320 445 #define ICX_UPI_PCI_PMON_BOX_CTL 0x318 446 #define ICX_UPI_CTL_UMASK_EXT 0xffffff 447 448 /* ICX M3UPI*/ 449 #define ICX_M3UPI_PCI_PMON_CTL0 0xd8 450 #define ICX_M3UPI_PCI_PMON_CTR0 0xa8 451 #define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0 452 453 /* ICX IMC */ 454 #define ICX_NUMBER_IMC_CHN 2 455 #define ICX_IMC_MEM_STRIDE 0x4 456 457 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 458 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6"); 459 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); 460 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7"); 461 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 462 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55"); 463 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57"); 464 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39"); 465 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55"); 466 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16"); 467 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); 468 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); 469 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); 470 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35"); 471 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); 472 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29"); 473 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28"); 474 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15"); 475 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30"); 476 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51"); 477 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31"); 478 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43"); 479 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47"); 480 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46"); 481 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50"); 482 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); 483 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0"); 484 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5"); 485 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8"); 486 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9"); 487 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5"); 488 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8"); 489 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8"); 490 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12"); 491 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); 492 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47"); 493 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); 494 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22"); 495 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23"); 496 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20"); 497 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26"); 498 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32"); 499 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33"); 500 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36"); 501 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37"); 502 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33"); 503 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35"); 504 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37"); 505 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); 506 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60"); 507 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60"); 508 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50"); 509 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60"); 510 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62"); 511 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61"); 512 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63"); 513 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7"); 514 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); 515 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); 516 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31"); 517 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51"); 518 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35"); 519 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31"); 520 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17"); 521 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12"); 522 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8"); 523 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4"); 524 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31"); 525 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63"); 526 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51"); 527 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35"); 528 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31"); 529 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17"); 530 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12"); 531 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8"); 532 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4"); 533 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31"); 534 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63"); 535 536 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box) 537 { 538 struct pci_dev *pdev = box->pci_dev; 539 int box_ctl = uncore_pci_box_ctl(box); 540 u32 config = 0; 541 542 if (!pci_read_config_dword(pdev, box_ctl, &config)) { 543 config |= SNBEP_PMON_BOX_CTL_FRZ; 544 pci_write_config_dword(pdev, box_ctl, config); 545 } 546 } 547 548 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box) 549 { 550 struct pci_dev *pdev = box->pci_dev; 551 int box_ctl = uncore_pci_box_ctl(box); 552 u32 config = 0; 553 554 if (!pci_read_config_dword(pdev, box_ctl, &config)) { 555 config &= ~SNBEP_PMON_BOX_CTL_FRZ; 556 pci_write_config_dword(pdev, box_ctl, config); 557 } 558 } 559 560 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) 561 { 562 struct pci_dev *pdev = box->pci_dev; 563 struct hw_perf_event *hwc = &event->hw; 564 565 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 566 } 567 568 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event) 569 { 570 struct pci_dev *pdev = box->pci_dev; 571 struct hw_perf_event *hwc = &event->hw; 572 573 pci_write_config_dword(pdev, hwc->config_base, hwc->config); 574 } 575 576 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event) 577 { 578 struct pci_dev *pdev = box->pci_dev; 579 struct hw_perf_event *hwc = &event->hw; 580 u64 count = 0; 581 582 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); 583 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); 584 585 return count; 586 } 587 588 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box) 589 { 590 struct pci_dev *pdev = box->pci_dev; 591 int box_ctl = uncore_pci_box_ctl(box); 592 593 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT); 594 } 595 596 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) 597 { 598 u64 config; 599 unsigned msr; 600 601 msr = uncore_msr_box_ctl(box); 602 if (msr) { 603 rdmsrl(msr, config); 604 config |= SNBEP_PMON_BOX_CTL_FRZ; 605 wrmsrl(msr, config); 606 } 607 } 608 609 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box) 610 { 611 u64 config; 612 unsigned msr; 613 614 msr = uncore_msr_box_ctl(box); 615 if (msr) { 616 rdmsrl(msr, config); 617 config &= ~SNBEP_PMON_BOX_CTL_FRZ; 618 wrmsrl(msr, config); 619 } 620 } 621 622 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 623 { 624 struct hw_perf_event *hwc = &event->hw; 625 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 626 627 if (reg1->idx != EXTRA_REG_NONE) 628 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0)); 629 630 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 631 } 632 633 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box, 634 struct perf_event *event) 635 { 636 struct hw_perf_event *hwc = &event->hw; 637 638 wrmsrl(hwc->config_base, hwc->config); 639 } 640 641 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) 642 { 643 unsigned msr = uncore_msr_box_ctl(box); 644 645 if (msr) 646 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); 647 } 648 649 static struct attribute *snbep_uncore_formats_attr[] = { 650 &format_attr_event.attr, 651 &format_attr_umask.attr, 652 &format_attr_edge.attr, 653 &format_attr_inv.attr, 654 &format_attr_thresh8.attr, 655 NULL, 656 }; 657 658 static struct attribute *snbep_uncore_ubox_formats_attr[] = { 659 &format_attr_event.attr, 660 &format_attr_umask.attr, 661 &format_attr_edge.attr, 662 &format_attr_inv.attr, 663 &format_attr_thresh5.attr, 664 NULL, 665 }; 666 667 static struct attribute *snbep_uncore_cbox_formats_attr[] = { 668 &format_attr_event.attr, 669 &format_attr_umask.attr, 670 &format_attr_edge.attr, 671 &format_attr_tid_en.attr, 672 &format_attr_inv.attr, 673 &format_attr_thresh8.attr, 674 &format_attr_filter_tid.attr, 675 &format_attr_filter_nid.attr, 676 &format_attr_filter_state.attr, 677 &format_attr_filter_opc.attr, 678 NULL, 679 }; 680 681 static struct attribute *snbep_uncore_pcu_formats_attr[] = { 682 &format_attr_event.attr, 683 &format_attr_occ_sel.attr, 684 &format_attr_edge.attr, 685 &format_attr_inv.attr, 686 &format_attr_thresh5.attr, 687 &format_attr_occ_invert.attr, 688 &format_attr_occ_edge.attr, 689 &format_attr_filter_band0.attr, 690 &format_attr_filter_band1.attr, 691 &format_attr_filter_band2.attr, 692 &format_attr_filter_band3.attr, 693 NULL, 694 }; 695 696 static struct attribute *snbep_uncore_qpi_formats_attr[] = { 697 &format_attr_event_ext.attr, 698 &format_attr_umask.attr, 699 &format_attr_edge.attr, 700 &format_attr_inv.attr, 701 &format_attr_thresh8.attr, 702 &format_attr_match_rds.attr, 703 &format_attr_match_rnid30.attr, 704 &format_attr_match_rnid4.attr, 705 &format_attr_match_dnid.attr, 706 &format_attr_match_mc.attr, 707 &format_attr_match_opc.attr, 708 &format_attr_match_vnw.attr, 709 &format_attr_match0.attr, 710 &format_attr_match1.attr, 711 &format_attr_mask_rds.attr, 712 &format_attr_mask_rnid30.attr, 713 &format_attr_mask_rnid4.attr, 714 &format_attr_mask_dnid.attr, 715 &format_attr_mask_mc.attr, 716 &format_attr_mask_opc.attr, 717 &format_attr_mask_vnw.attr, 718 &format_attr_mask0.attr, 719 &format_attr_mask1.attr, 720 NULL, 721 }; 722 723 static struct uncore_event_desc snbep_uncore_imc_events[] = { 724 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), 725 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), 726 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"), 727 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"), 728 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), 729 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"), 730 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"), 731 { /* end: all zeroes */ }, 732 }; 733 734 static struct uncore_event_desc snbep_uncore_qpi_events[] = { 735 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), 736 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), 737 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"), 738 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"), 739 { /* end: all zeroes */ }, 740 }; 741 742 static const struct attribute_group snbep_uncore_format_group = { 743 .name = "format", 744 .attrs = snbep_uncore_formats_attr, 745 }; 746 747 static const struct attribute_group snbep_uncore_ubox_format_group = { 748 .name = "format", 749 .attrs = snbep_uncore_ubox_formats_attr, 750 }; 751 752 static const struct attribute_group snbep_uncore_cbox_format_group = { 753 .name = "format", 754 .attrs = snbep_uncore_cbox_formats_attr, 755 }; 756 757 static const struct attribute_group snbep_uncore_pcu_format_group = { 758 .name = "format", 759 .attrs = snbep_uncore_pcu_formats_attr, 760 }; 761 762 static const struct attribute_group snbep_uncore_qpi_format_group = { 763 .name = "format", 764 .attrs = snbep_uncore_qpi_formats_attr, 765 }; 766 767 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ 768 .disable_box = snbep_uncore_msr_disable_box, \ 769 .enable_box = snbep_uncore_msr_enable_box, \ 770 .disable_event = snbep_uncore_msr_disable_event, \ 771 .enable_event = snbep_uncore_msr_enable_event, \ 772 .read_counter = uncore_msr_read_counter 773 774 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ 775 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \ 776 .init_box = snbep_uncore_msr_init_box \ 777 778 static struct intel_uncore_ops snbep_uncore_msr_ops = { 779 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 780 }; 781 782 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \ 783 .init_box = snbep_uncore_pci_init_box, \ 784 .disable_box = snbep_uncore_pci_disable_box, \ 785 .enable_box = snbep_uncore_pci_enable_box, \ 786 .disable_event = snbep_uncore_pci_disable_event, \ 787 .read_counter = snbep_uncore_pci_read_counter 788 789 static struct intel_uncore_ops snbep_uncore_pci_ops = { 790 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), 791 .enable_event = snbep_uncore_pci_enable_event, \ 792 }; 793 794 static struct event_constraint snbep_uncore_cbox_constraints[] = { 795 UNCORE_EVENT_CONSTRAINT(0x01, 0x1), 796 UNCORE_EVENT_CONSTRAINT(0x02, 0x3), 797 UNCORE_EVENT_CONSTRAINT(0x04, 0x3), 798 UNCORE_EVENT_CONSTRAINT(0x05, 0x3), 799 UNCORE_EVENT_CONSTRAINT(0x07, 0x3), 800 UNCORE_EVENT_CONSTRAINT(0x09, 0x3), 801 UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 802 UNCORE_EVENT_CONSTRAINT(0x12, 0x3), 803 UNCORE_EVENT_CONSTRAINT(0x13, 0x3), 804 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc), 805 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc), 806 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc), 807 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc), 808 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe), 809 UNCORE_EVENT_CONSTRAINT(0x21, 0x3), 810 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 811 UNCORE_EVENT_CONSTRAINT(0x31, 0x3), 812 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 813 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 814 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 815 UNCORE_EVENT_CONSTRAINT(0x35, 0x3), 816 UNCORE_EVENT_CONSTRAINT(0x36, 0x1), 817 UNCORE_EVENT_CONSTRAINT(0x37, 0x3), 818 UNCORE_EVENT_CONSTRAINT(0x38, 0x3), 819 UNCORE_EVENT_CONSTRAINT(0x39, 0x3), 820 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1), 821 EVENT_CONSTRAINT_END 822 }; 823 824 static struct event_constraint snbep_uncore_r2pcie_constraints[] = { 825 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 826 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 827 UNCORE_EVENT_CONSTRAINT(0x12, 0x1), 828 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 829 UNCORE_EVENT_CONSTRAINT(0x24, 0x3), 830 UNCORE_EVENT_CONSTRAINT(0x25, 0x3), 831 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 832 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 833 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 834 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 835 EVENT_CONSTRAINT_END 836 }; 837 838 static struct event_constraint snbep_uncore_r3qpi_constraints[] = { 839 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 840 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 841 UNCORE_EVENT_CONSTRAINT(0x12, 0x3), 842 UNCORE_EVENT_CONSTRAINT(0x13, 0x1), 843 UNCORE_EVENT_CONSTRAINT(0x20, 0x3), 844 UNCORE_EVENT_CONSTRAINT(0x21, 0x3), 845 UNCORE_EVENT_CONSTRAINT(0x22, 0x3), 846 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 847 UNCORE_EVENT_CONSTRAINT(0x24, 0x3), 848 UNCORE_EVENT_CONSTRAINT(0x25, 0x3), 849 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 850 UNCORE_EVENT_CONSTRAINT(0x28, 0x3), 851 UNCORE_EVENT_CONSTRAINT(0x29, 0x3), 852 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3), 853 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3), 854 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), 855 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 856 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), 857 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), 858 UNCORE_EVENT_CONSTRAINT(0x30, 0x3), 859 UNCORE_EVENT_CONSTRAINT(0x31, 0x3), 860 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 861 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 862 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 863 UNCORE_EVENT_CONSTRAINT(0x36, 0x3), 864 UNCORE_EVENT_CONSTRAINT(0x37, 0x3), 865 UNCORE_EVENT_CONSTRAINT(0x38, 0x3), 866 UNCORE_EVENT_CONSTRAINT(0x39, 0x3), 867 EVENT_CONSTRAINT_END 868 }; 869 870 static struct intel_uncore_type snbep_uncore_ubox = { 871 .name = "ubox", 872 .num_counters = 2, 873 .num_boxes = 1, 874 .perf_ctr_bits = 44, 875 .fixed_ctr_bits = 48, 876 .perf_ctr = SNBEP_U_MSR_PMON_CTR0, 877 .event_ctl = SNBEP_U_MSR_PMON_CTL0, 878 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, 879 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, 880 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, 881 .ops = &snbep_uncore_msr_ops, 882 .format_group = &snbep_uncore_ubox_format_group, 883 }; 884 885 static struct extra_reg snbep_uncore_cbox_extra_regs[] = { 886 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 887 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 888 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 889 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6), 890 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 891 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6), 892 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 893 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6), 894 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), 895 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), 896 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), 897 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa), 898 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa), 899 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), 900 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), 901 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), 902 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), 903 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), 904 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), 905 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa), 906 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa), 907 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), 908 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), 909 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), 910 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2), 911 EVENT_EXTRA_END 912 }; 913 914 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) 915 { 916 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 917 struct intel_uncore_extra_reg *er = &box->shared_regs[0]; 918 int i; 919 920 if (uncore_box_is_fake(box)) 921 return; 922 923 for (i = 0; i < 5; i++) { 924 if (reg1->alloc & (0x1 << i)) 925 atomic_sub(1 << (i * 6), &er->ref); 926 } 927 reg1->alloc = 0; 928 } 929 930 static struct event_constraint * 931 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event, 932 u64 (*cbox_filter_mask)(int fields)) 933 { 934 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 935 struct intel_uncore_extra_reg *er = &box->shared_regs[0]; 936 int i, alloc = 0; 937 unsigned long flags; 938 u64 mask; 939 940 if (reg1->idx == EXTRA_REG_NONE) 941 return NULL; 942 943 raw_spin_lock_irqsave(&er->lock, flags); 944 for (i = 0; i < 5; i++) { 945 if (!(reg1->idx & (0x1 << i))) 946 continue; 947 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) 948 continue; 949 950 mask = cbox_filter_mask(0x1 << i); 951 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) || 952 !((reg1->config ^ er->config) & mask)) { 953 atomic_add(1 << (i * 6), &er->ref); 954 er->config &= ~mask; 955 er->config |= reg1->config & mask; 956 alloc |= (0x1 << i); 957 } else { 958 break; 959 } 960 } 961 raw_spin_unlock_irqrestore(&er->lock, flags); 962 if (i < 5) 963 goto fail; 964 965 if (!uncore_box_is_fake(box)) 966 reg1->alloc |= alloc; 967 968 return NULL; 969 fail: 970 for (; i >= 0; i--) { 971 if (alloc & (0x1 << i)) 972 atomic_sub(1 << (i * 6), &er->ref); 973 } 974 return &uncore_constraint_empty; 975 } 976 977 static u64 snbep_cbox_filter_mask(int fields) 978 { 979 u64 mask = 0; 980 981 if (fields & 0x1) 982 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID; 983 if (fields & 0x2) 984 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID; 985 if (fields & 0x4) 986 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE; 987 if (fields & 0x8) 988 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC; 989 990 return mask; 991 } 992 993 static struct event_constraint * 994 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 995 { 996 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask); 997 } 998 999 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 1000 { 1001 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 1002 struct extra_reg *er; 1003 int idx = 0; 1004 1005 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) { 1006 if (er->event != (event->hw.config & er->config_mask)) 1007 continue; 1008 idx |= er->idx; 1009 } 1010 1011 if (idx) { 1012 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + 1013 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; 1014 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx); 1015 reg1->idx = idx; 1016 } 1017 return 0; 1018 } 1019 1020 static struct intel_uncore_ops snbep_uncore_cbox_ops = { 1021 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 1022 .hw_config = snbep_cbox_hw_config, 1023 .get_constraint = snbep_cbox_get_constraint, 1024 .put_constraint = snbep_cbox_put_constraint, 1025 }; 1026 1027 static struct intel_uncore_type snbep_uncore_cbox = { 1028 .name = "cbox", 1029 .num_counters = 4, 1030 .num_boxes = 8, 1031 .perf_ctr_bits = 44, 1032 .event_ctl = SNBEP_C0_MSR_PMON_CTL0, 1033 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, 1034 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, 1035 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, 1036 .msr_offset = SNBEP_CBO_MSR_OFFSET, 1037 .num_shared_regs = 1, 1038 .constraints = snbep_uncore_cbox_constraints, 1039 .ops = &snbep_uncore_cbox_ops, 1040 .format_group = &snbep_uncore_cbox_format_group, 1041 }; 1042 1043 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify) 1044 { 1045 struct hw_perf_event *hwc = &event->hw; 1046 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1047 u64 config = reg1->config; 1048 1049 if (new_idx > reg1->idx) 1050 config <<= 8 * (new_idx - reg1->idx); 1051 else 1052 config >>= 8 * (reg1->idx - new_idx); 1053 1054 if (modify) { 1055 hwc->config += new_idx - reg1->idx; 1056 reg1->config = config; 1057 reg1->idx = new_idx; 1058 } 1059 return config; 1060 } 1061 1062 static struct event_constraint * 1063 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 1064 { 1065 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 1066 struct intel_uncore_extra_reg *er = &box->shared_regs[0]; 1067 unsigned long flags; 1068 int idx = reg1->idx; 1069 u64 mask, config1 = reg1->config; 1070 bool ok = false; 1071 1072 if (reg1->idx == EXTRA_REG_NONE || 1073 (!uncore_box_is_fake(box) && reg1->alloc)) 1074 return NULL; 1075 again: 1076 mask = 0xffULL << (idx * 8); 1077 raw_spin_lock_irqsave(&er->lock, flags); 1078 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) || 1079 !((config1 ^ er->config) & mask)) { 1080 atomic_add(1 << (idx * 8), &er->ref); 1081 er->config &= ~mask; 1082 er->config |= config1 & mask; 1083 ok = true; 1084 } 1085 raw_spin_unlock_irqrestore(&er->lock, flags); 1086 1087 if (!ok) { 1088 idx = (idx + 1) % 4; 1089 if (idx != reg1->idx) { 1090 config1 = snbep_pcu_alter_er(event, idx, false); 1091 goto again; 1092 } 1093 return &uncore_constraint_empty; 1094 } 1095 1096 if (!uncore_box_is_fake(box)) { 1097 if (idx != reg1->idx) 1098 snbep_pcu_alter_er(event, idx, true); 1099 reg1->alloc = 1; 1100 } 1101 return NULL; 1102 } 1103 1104 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event) 1105 { 1106 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 1107 struct intel_uncore_extra_reg *er = &box->shared_regs[0]; 1108 1109 if (uncore_box_is_fake(box) || !reg1->alloc) 1110 return; 1111 1112 atomic_sub(1 << (reg1->idx * 8), &er->ref); 1113 reg1->alloc = 0; 1114 } 1115 1116 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) 1117 { 1118 struct hw_perf_event *hwc = &event->hw; 1119 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1120 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; 1121 1122 if (ev_sel >= 0xb && ev_sel <= 0xe) { 1123 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; 1124 reg1->idx = ev_sel - 0xb; 1125 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8)); 1126 } 1127 return 0; 1128 } 1129 1130 static struct intel_uncore_ops snbep_uncore_pcu_ops = { 1131 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 1132 .hw_config = snbep_pcu_hw_config, 1133 .get_constraint = snbep_pcu_get_constraint, 1134 .put_constraint = snbep_pcu_put_constraint, 1135 }; 1136 1137 static struct intel_uncore_type snbep_uncore_pcu = { 1138 .name = "pcu", 1139 .num_counters = 4, 1140 .num_boxes = 1, 1141 .perf_ctr_bits = 48, 1142 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, 1143 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, 1144 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, 1145 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, 1146 .num_shared_regs = 1, 1147 .ops = &snbep_uncore_pcu_ops, 1148 .format_group = &snbep_uncore_pcu_format_group, 1149 }; 1150 1151 static struct intel_uncore_type *snbep_msr_uncores[] = { 1152 &snbep_uncore_ubox, 1153 &snbep_uncore_cbox, 1154 &snbep_uncore_pcu, 1155 NULL, 1156 }; 1157 1158 void snbep_uncore_cpu_init(void) 1159 { 1160 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 1161 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 1162 uncore_msr_uncores = snbep_msr_uncores; 1163 } 1164 1165 enum { 1166 SNBEP_PCI_QPI_PORT0_FILTER, 1167 SNBEP_PCI_QPI_PORT1_FILTER, 1168 BDX_PCI_QPI_PORT2_FILTER, 1169 }; 1170 1171 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) 1172 { 1173 struct hw_perf_event *hwc = &event->hw; 1174 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1175 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 1176 1177 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) { 1178 reg1->idx = 0; 1179 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0; 1180 reg1->config = event->attr.config1; 1181 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0; 1182 reg2->config = event->attr.config2; 1183 } 1184 return 0; 1185 } 1186 1187 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event) 1188 { 1189 struct pci_dev *pdev = box->pci_dev; 1190 struct hw_perf_event *hwc = &event->hw; 1191 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1192 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 1193 1194 if (reg1->idx != EXTRA_REG_NONE) { 1195 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER; 1196 int die = box->dieid; 1197 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx]; 1198 1199 if (filter_pdev) { 1200 pci_write_config_dword(filter_pdev, reg1->reg, 1201 (u32)reg1->config); 1202 pci_write_config_dword(filter_pdev, reg1->reg + 4, 1203 (u32)(reg1->config >> 32)); 1204 pci_write_config_dword(filter_pdev, reg2->reg, 1205 (u32)reg2->config); 1206 pci_write_config_dword(filter_pdev, reg2->reg + 4, 1207 (u32)(reg2->config >> 32)); 1208 } 1209 } 1210 1211 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 1212 } 1213 1214 static struct intel_uncore_ops snbep_uncore_qpi_ops = { 1215 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), 1216 .enable_event = snbep_qpi_enable_event, 1217 .hw_config = snbep_qpi_hw_config, 1218 .get_constraint = uncore_get_constraint, 1219 .put_constraint = uncore_put_constraint, 1220 }; 1221 1222 #define SNBEP_UNCORE_PCI_COMMON_INIT() \ 1223 .perf_ctr = SNBEP_PCI_PMON_CTR0, \ 1224 .event_ctl = SNBEP_PCI_PMON_CTL0, \ 1225 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \ 1226 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ 1227 .ops = &snbep_uncore_pci_ops, \ 1228 .format_group = &snbep_uncore_format_group 1229 1230 static struct intel_uncore_type snbep_uncore_ha = { 1231 .name = "ha", 1232 .num_counters = 4, 1233 .num_boxes = 1, 1234 .perf_ctr_bits = 48, 1235 SNBEP_UNCORE_PCI_COMMON_INIT(), 1236 }; 1237 1238 static struct intel_uncore_type snbep_uncore_imc = { 1239 .name = "imc", 1240 .num_counters = 4, 1241 .num_boxes = 4, 1242 .perf_ctr_bits = 48, 1243 .fixed_ctr_bits = 48, 1244 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, 1245 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, 1246 .event_descs = snbep_uncore_imc_events, 1247 SNBEP_UNCORE_PCI_COMMON_INIT(), 1248 }; 1249 1250 static struct intel_uncore_type snbep_uncore_qpi = { 1251 .name = "qpi", 1252 .num_counters = 4, 1253 .num_boxes = 2, 1254 .perf_ctr_bits = 48, 1255 .perf_ctr = SNBEP_PCI_PMON_CTR0, 1256 .event_ctl = SNBEP_PCI_PMON_CTL0, 1257 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, 1258 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 1259 .num_shared_regs = 1, 1260 .ops = &snbep_uncore_qpi_ops, 1261 .event_descs = snbep_uncore_qpi_events, 1262 .format_group = &snbep_uncore_qpi_format_group, 1263 }; 1264 1265 1266 static struct intel_uncore_type snbep_uncore_r2pcie = { 1267 .name = "r2pcie", 1268 .num_counters = 4, 1269 .num_boxes = 1, 1270 .perf_ctr_bits = 44, 1271 .constraints = snbep_uncore_r2pcie_constraints, 1272 SNBEP_UNCORE_PCI_COMMON_INIT(), 1273 }; 1274 1275 static struct intel_uncore_type snbep_uncore_r3qpi = { 1276 .name = "r3qpi", 1277 .num_counters = 3, 1278 .num_boxes = 2, 1279 .perf_ctr_bits = 44, 1280 .constraints = snbep_uncore_r3qpi_constraints, 1281 SNBEP_UNCORE_PCI_COMMON_INIT(), 1282 }; 1283 1284 enum { 1285 SNBEP_PCI_UNCORE_HA, 1286 SNBEP_PCI_UNCORE_IMC, 1287 SNBEP_PCI_UNCORE_QPI, 1288 SNBEP_PCI_UNCORE_R2PCIE, 1289 SNBEP_PCI_UNCORE_R3QPI, 1290 }; 1291 1292 static struct intel_uncore_type *snbep_pci_uncores[] = { 1293 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha, 1294 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc, 1295 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi, 1296 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie, 1297 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi, 1298 NULL, 1299 }; 1300 1301 static const struct pci_device_id snbep_uncore_pci_ids[] = { 1302 { /* Home Agent */ 1303 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA), 1304 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0), 1305 }, 1306 { /* MC Channel 0 */ 1307 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0), 1308 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0), 1309 }, 1310 { /* MC Channel 1 */ 1311 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1), 1312 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1), 1313 }, 1314 { /* MC Channel 2 */ 1315 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2), 1316 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2), 1317 }, 1318 { /* MC Channel 3 */ 1319 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3), 1320 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3), 1321 }, 1322 { /* QPI Port 0 */ 1323 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0), 1324 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0), 1325 }, 1326 { /* QPI Port 1 */ 1327 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1), 1328 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1), 1329 }, 1330 { /* R2PCIe */ 1331 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE), 1332 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0), 1333 }, 1334 { /* R3QPI Link 0 */ 1335 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0), 1336 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0), 1337 }, 1338 { /* R3QPI Link 1 */ 1339 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), 1340 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1), 1341 }, 1342 { /* QPI Port 0 filter */ 1343 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86), 1344 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1345 SNBEP_PCI_QPI_PORT0_FILTER), 1346 }, 1347 { /* QPI Port 0 filter */ 1348 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96), 1349 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1350 SNBEP_PCI_QPI_PORT1_FILTER), 1351 }, 1352 { /* end: all zeroes */ } 1353 }; 1354 1355 static struct pci_driver snbep_uncore_pci_driver = { 1356 .name = "snbep_uncore", 1357 .id_table = snbep_uncore_pci_ids, 1358 }; 1359 1360 #define NODE_ID_MASK 0x7 1361 1362 /* 1363 * build pci bus to socket mapping 1364 */ 1365 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse) 1366 { 1367 struct pci_dev *ubox_dev = NULL; 1368 int i, bus, nodeid, segment, die_id; 1369 struct pci2phy_map *map; 1370 int err = 0; 1371 u32 config = 0; 1372 1373 while (1) { 1374 /* find the UBOX device */ 1375 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev); 1376 if (!ubox_dev) 1377 break; 1378 bus = ubox_dev->bus->number; 1379 /* 1380 * The nodeid and idmap registers only contain enough 1381 * information to handle 8 nodes. On systems with more 1382 * than 8 nodes, we need to rely on NUMA information, 1383 * filled in from BIOS supplied information, to determine 1384 * the topology. 1385 */ 1386 if (nr_node_ids <= 8) { 1387 /* get the Node ID of the local register */ 1388 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config); 1389 if (err) 1390 break; 1391 nodeid = config & NODE_ID_MASK; 1392 /* get the Node ID mapping */ 1393 err = pci_read_config_dword(ubox_dev, idmap_loc, &config); 1394 if (err) 1395 break; 1396 1397 segment = pci_domain_nr(ubox_dev->bus); 1398 raw_spin_lock(&pci2phy_map_lock); 1399 map = __find_pci2phy_map(segment); 1400 if (!map) { 1401 raw_spin_unlock(&pci2phy_map_lock); 1402 err = -ENOMEM; 1403 break; 1404 } 1405 1406 /* 1407 * every three bits in the Node ID mapping register maps 1408 * to a particular node. 1409 */ 1410 for (i = 0; i < 8; i++) { 1411 if (nodeid == ((config >> (3 * i)) & 0x7)) { 1412 if (topology_max_die_per_package() > 1) 1413 die_id = i; 1414 else 1415 die_id = topology_phys_to_logical_pkg(i); 1416 if (die_id < 0) 1417 die_id = -ENODEV; 1418 map->pbus_to_dieid[bus] = die_id; 1419 break; 1420 } 1421 } 1422 raw_spin_unlock(&pci2phy_map_lock); 1423 } else { 1424 int node = pcibus_to_node(ubox_dev->bus); 1425 int cpu; 1426 1427 segment = pci_domain_nr(ubox_dev->bus); 1428 raw_spin_lock(&pci2phy_map_lock); 1429 map = __find_pci2phy_map(segment); 1430 if (!map) { 1431 raw_spin_unlock(&pci2phy_map_lock); 1432 err = -ENOMEM; 1433 break; 1434 } 1435 1436 die_id = -1; 1437 for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) { 1438 struct cpuinfo_x86 *c = &cpu_data(cpu); 1439 1440 if (c->initialized && cpu_to_node(cpu) == node) { 1441 map->pbus_to_dieid[bus] = die_id = c->logical_die_id; 1442 break; 1443 } 1444 } 1445 raw_spin_unlock(&pci2phy_map_lock); 1446 1447 if (WARN_ON_ONCE(die_id == -1)) { 1448 err = -EINVAL; 1449 break; 1450 } 1451 } 1452 } 1453 1454 if (!err) { 1455 /* 1456 * For PCI bus with no UBOX device, find the next bus 1457 * that has UBOX device and use its mapping. 1458 */ 1459 raw_spin_lock(&pci2phy_map_lock); 1460 list_for_each_entry(map, &pci2phy_map_head, list) { 1461 i = -1; 1462 if (reverse) { 1463 for (bus = 255; bus >= 0; bus--) { 1464 if (map->pbus_to_dieid[bus] != -1) 1465 i = map->pbus_to_dieid[bus]; 1466 else 1467 map->pbus_to_dieid[bus] = i; 1468 } 1469 } else { 1470 for (bus = 0; bus <= 255; bus++) { 1471 if (map->pbus_to_dieid[bus] != -1) 1472 i = map->pbus_to_dieid[bus]; 1473 else 1474 map->pbus_to_dieid[bus] = i; 1475 } 1476 } 1477 } 1478 raw_spin_unlock(&pci2phy_map_lock); 1479 } 1480 1481 pci_dev_put(ubox_dev); 1482 1483 return err ? pcibios_err_to_errno(err) : 0; 1484 } 1485 1486 int snbep_uncore_pci_init(void) 1487 { 1488 int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true); 1489 if (ret) 1490 return ret; 1491 uncore_pci_uncores = snbep_pci_uncores; 1492 uncore_pci_driver = &snbep_uncore_pci_driver; 1493 return 0; 1494 } 1495 /* end of Sandy Bridge-EP uncore support */ 1496 1497 /* IvyTown uncore support */ 1498 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box) 1499 { 1500 unsigned msr = uncore_msr_box_ctl(box); 1501 if (msr) 1502 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT); 1503 } 1504 1505 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box) 1506 { 1507 struct pci_dev *pdev = box->pci_dev; 1508 1509 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT); 1510 } 1511 1512 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \ 1513 .init_box = ivbep_uncore_msr_init_box, \ 1514 .disable_box = snbep_uncore_msr_disable_box, \ 1515 .enable_box = snbep_uncore_msr_enable_box, \ 1516 .disable_event = snbep_uncore_msr_disable_event, \ 1517 .enable_event = snbep_uncore_msr_enable_event, \ 1518 .read_counter = uncore_msr_read_counter 1519 1520 static struct intel_uncore_ops ivbep_uncore_msr_ops = { 1521 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), 1522 }; 1523 1524 static struct intel_uncore_ops ivbep_uncore_pci_ops = { 1525 .init_box = ivbep_uncore_pci_init_box, 1526 .disable_box = snbep_uncore_pci_disable_box, 1527 .enable_box = snbep_uncore_pci_enable_box, 1528 .disable_event = snbep_uncore_pci_disable_event, 1529 .enable_event = snbep_uncore_pci_enable_event, 1530 .read_counter = snbep_uncore_pci_read_counter, 1531 }; 1532 1533 #define IVBEP_UNCORE_PCI_COMMON_INIT() \ 1534 .perf_ctr = SNBEP_PCI_PMON_CTR0, \ 1535 .event_ctl = SNBEP_PCI_PMON_CTL0, \ 1536 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \ 1537 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ 1538 .ops = &ivbep_uncore_pci_ops, \ 1539 .format_group = &ivbep_uncore_format_group 1540 1541 static struct attribute *ivbep_uncore_formats_attr[] = { 1542 &format_attr_event.attr, 1543 &format_attr_umask.attr, 1544 &format_attr_edge.attr, 1545 &format_attr_inv.attr, 1546 &format_attr_thresh8.attr, 1547 NULL, 1548 }; 1549 1550 static struct attribute *ivbep_uncore_ubox_formats_attr[] = { 1551 &format_attr_event.attr, 1552 &format_attr_umask.attr, 1553 &format_attr_edge.attr, 1554 &format_attr_inv.attr, 1555 &format_attr_thresh5.attr, 1556 NULL, 1557 }; 1558 1559 static struct attribute *ivbep_uncore_cbox_formats_attr[] = { 1560 &format_attr_event.attr, 1561 &format_attr_umask.attr, 1562 &format_attr_edge.attr, 1563 &format_attr_tid_en.attr, 1564 &format_attr_thresh8.attr, 1565 &format_attr_filter_tid.attr, 1566 &format_attr_filter_link.attr, 1567 &format_attr_filter_state2.attr, 1568 &format_attr_filter_nid2.attr, 1569 &format_attr_filter_opc2.attr, 1570 &format_attr_filter_nc.attr, 1571 &format_attr_filter_c6.attr, 1572 &format_attr_filter_isoc.attr, 1573 NULL, 1574 }; 1575 1576 static struct attribute *ivbep_uncore_pcu_formats_attr[] = { 1577 &format_attr_event.attr, 1578 &format_attr_occ_sel.attr, 1579 &format_attr_edge.attr, 1580 &format_attr_thresh5.attr, 1581 &format_attr_occ_invert.attr, 1582 &format_attr_occ_edge.attr, 1583 &format_attr_filter_band0.attr, 1584 &format_attr_filter_band1.attr, 1585 &format_attr_filter_band2.attr, 1586 &format_attr_filter_band3.attr, 1587 NULL, 1588 }; 1589 1590 static struct attribute *ivbep_uncore_qpi_formats_attr[] = { 1591 &format_attr_event_ext.attr, 1592 &format_attr_umask.attr, 1593 &format_attr_edge.attr, 1594 &format_attr_thresh8.attr, 1595 &format_attr_match_rds.attr, 1596 &format_attr_match_rnid30.attr, 1597 &format_attr_match_rnid4.attr, 1598 &format_attr_match_dnid.attr, 1599 &format_attr_match_mc.attr, 1600 &format_attr_match_opc.attr, 1601 &format_attr_match_vnw.attr, 1602 &format_attr_match0.attr, 1603 &format_attr_match1.attr, 1604 &format_attr_mask_rds.attr, 1605 &format_attr_mask_rnid30.attr, 1606 &format_attr_mask_rnid4.attr, 1607 &format_attr_mask_dnid.attr, 1608 &format_attr_mask_mc.attr, 1609 &format_attr_mask_opc.attr, 1610 &format_attr_mask_vnw.attr, 1611 &format_attr_mask0.attr, 1612 &format_attr_mask1.attr, 1613 NULL, 1614 }; 1615 1616 static const struct attribute_group ivbep_uncore_format_group = { 1617 .name = "format", 1618 .attrs = ivbep_uncore_formats_attr, 1619 }; 1620 1621 static const struct attribute_group ivbep_uncore_ubox_format_group = { 1622 .name = "format", 1623 .attrs = ivbep_uncore_ubox_formats_attr, 1624 }; 1625 1626 static const struct attribute_group ivbep_uncore_cbox_format_group = { 1627 .name = "format", 1628 .attrs = ivbep_uncore_cbox_formats_attr, 1629 }; 1630 1631 static const struct attribute_group ivbep_uncore_pcu_format_group = { 1632 .name = "format", 1633 .attrs = ivbep_uncore_pcu_formats_attr, 1634 }; 1635 1636 static const struct attribute_group ivbep_uncore_qpi_format_group = { 1637 .name = "format", 1638 .attrs = ivbep_uncore_qpi_formats_attr, 1639 }; 1640 1641 static struct intel_uncore_type ivbep_uncore_ubox = { 1642 .name = "ubox", 1643 .num_counters = 2, 1644 .num_boxes = 1, 1645 .perf_ctr_bits = 44, 1646 .fixed_ctr_bits = 48, 1647 .perf_ctr = SNBEP_U_MSR_PMON_CTR0, 1648 .event_ctl = SNBEP_U_MSR_PMON_CTL0, 1649 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK, 1650 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, 1651 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, 1652 .ops = &ivbep_uncore_msr_ops, 1653 .format_group = &ivbep_uncore_ubox_format_group, 1654 }; 1655 1656 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = { 1657 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 1658 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 1659 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), 1660 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), 1661 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), 1662 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), 1663 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 1664 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc), 1665 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 1666 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc), 1667 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 1668 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc), 1669 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), 1670 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), 1671 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), 1672 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10), 1673 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18), 1674 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18), 1675 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8), 1676 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8), 1677 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8), 1678 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8), 1679 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10), 1680 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), 1681 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), 1682 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), 1683 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10), 1684 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), 1685 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), 1686 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), 1687 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8), 1688 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8), 1689 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8), 1690 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8), 1691 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10), 1692 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10), 1693 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8), 1694 EVENT_EXTRA_END 1695 }; 1696 1697 static u64 ivbep_cbox_filter_mask(int fields) 1698 { 1699 u64 mask = 0; 1700 1701 if (fields & 0x1) 1702 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID; 1703 if (fields & 0x2) 1704 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK; 1705 if (fields & 0x4) 1706 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE; 1707 if (fields & 0x8) 1708 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID; 1709 if (fields & 0x10) { 1710 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC; 1711 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC; 1712 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6; 1713 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC; 1714 } 1715 1716 return mask; 1717 } 1718 1719 static struct event_constraint * 1720 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 1721 { 1722 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask); 1723 } 1724 1725 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 1726 { 1727 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 1728 struct extra_reg *er; 1729 int idx = 0; 1730 1731 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) { 1732 if (er->event != (event->hw.config & er->config_mask)) 1733 continue; 1734 idx |= er->idx; 1735 } 1736 1737 if (idx) { 1738 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + 1739 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; 1740 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx); 1741 reg1->idx = idx; 1742 } 1743 return 0; 1744 } 1745 1746 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event) 1747 { 1748 struct hw_perf_event *hwc = &event->hw; 1749 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1750 1751 if (reg1->idx != EXTRA_REG_NONE) { 1752 u64 filter = uncore_shared_reg_config(box, 0); 1753 wrmsrl(reg1->reg, filter & 0xffffffff); 1754 wrmsrl(reg1->reg + 6, filter >> 32); 1755 } 1756 1757 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 1758 } 1759 1760 static struct intel_uncore_ops ivbep_uncore_cbox_ops = { 1761 .init_box = ivbep_uncore_msr_init_box, 1762 .disable_box = snbep_uncore_msr_disable_box, 1763 .enable_box = snbep_uncore_msr_enable_box, 1764 .disable_event = snbep_uncore_msr_disable_event, 1765 .enable_event = ivbep_cbox_enable_event, 1766 .read_counter = uncore_msr_read_counter, 1767 .hw_config = ivbep_cbox_hw_config, 1768 .get_constraint = ivbep_cbox_get_constraint, 1769 .put_constraint = snbep_cbox_put_constraint, 1770 }; 1771 1772 static struct intel_uncore_type ivbep_uncore_cbox = { 1773 .name = "cbox", 1774 .num_counters = 4, 1775 .num_boxes = 15, 1776 .perf_ctr_bits = 44, 1777 .event_ctl = SNBEP_C0_MSR_PMON_CTL0, 1778 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, 1779 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK, 1780 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, 1781 .msr_offset = SNBEP_CBO_MSR_OFFSET, 1782 .num_shared_regs = 1, 1783 .constraints = snbep_uncore_cbox_constraints, 1784 .ops = &ivbep_uncore_cbox_ops, 1785 .format_group = &ivbep_uncore_cbox_format_group, 1786 }; 1787 1788 static struct intel_uncore_ops ivbep_uncore_pcu_ops = { 1789 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), 1790 .hw_config = snbep_pcu_hw_config, 1791 .get_constraint = snbep_pcu_get_constraint, 1792 .put_constraint = snbep_pcu_put_constraint, 1793 }; 1794 1795 static struct intel_uncore_type ivbep_uncore_pcu = { 1796 .name = "pcu", 1797 .num_counters = 4, 1798 .num_boxes = 1, 1799 .perf_ctr_bits = 48, 1800 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, 1801 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, 1802 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK, 1803 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, 1804 .num_shared_regs = 1, 1805 .ops = &ivbep_uncore_pcu_ops, 1806 .format_group = &ivbep_uncore_pcu_format_group, 1807 }; 1808 1809 static struct intel_uncore_type *ivbep_msr_uncores[] = { 1810 &ivbep_uncore_ubox, 1811 &ivbep_uncore_cbox, 1812 &ivbep_uncore_pcu, 1813 NULL, 1814 }; 1815 1816 void ivbep_uncore_cpu_init(void) 1817 { 1818 if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 1819 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 1820 uncore_msr_uncores = ivbep_msr_uncores; 1821 } 1822 1823 static struct intel_uncore_type ivbep_uncore_ha = { 1824 .name = "ha", 1825 .num_counters = 4, 1826 .num_boxes = 2, 1827 .perf_ctr_bits = 48, 1828 IVBEP_UNCORE_PCI_COMMON_INIT(), 1829 }; 1830 1831 static struct intel_uncore_type ivbep_uncore_imc = { 1832 .name = "imc", 1833 .num_counters = 4, 1834 .num_boxes = 8, 1835 .perf_ctr_bits = 48, 1836 .fixed_ctr_bits = 48, 1837 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, 1838 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, 1839 .event_descs = snbep_uncore_imc_events, 1840 IVBEP_UNCORE_PCI_COMMON_INIT(), 1841 }; 1842 1843 /* registers in IRP boxes are not properly aligned */ 1844 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4}; 1845 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0}; 1846 1847 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event) 1848 { 1849 struct pci_dev *pdev = box->pci_dev; 1850 struct hw_perf_event *hwc = &event->hw; 1851 1852 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], 1853 hwc->config | SNBEP_PMON_CTL_EN); 1854 } 1855 1856 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event) 1857 { 1858 struct pci_dev *pdev = box->pci_dev; 1859 struct hw_perf_event *hwc = &event->hw; 1860 1861 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config); 1862 } 1863 1864 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) 1865 { 1866 struct pci_dev *pdev = box->pci_dev; 1867 struct hw_perf_event *hwc = &event->hw; 1868 u64 count = 0; 1869 1870 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count); 1871 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1); 1872 1873 return count; 1874 } 1875 1876 static struct intel_uncore_ops ivbep_uncore_irp_ops = { 1877 .init_box = ivbep_uncore_pci_init_box, 1878 .disable_box = snbep_uncore_pci_disable_box, 1879 .enable_box = snbep_uncore_pci_enable_box, 1880 .disable_event = ivbep_uncore_irp_disable_event, 1881 .enable_event = ivbep_uncore_irp_enable_event, 1882 .read_counter = ivbep_uncore_irp_read_counter, 1883 }; 1884 1885 static struct intel_uncore_type ivbep_uncore_irp = { 1886 .name = "irp", 1887 .num_counters = 4, 1888 .num_boxes = 1, 1889 .perf_ctr_bits = 48, 1890 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, 1891 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 1892 .ops = &ivbep_uncore_irp_ops, 1893 .format_group = &ivbep_uncore_format_group, 1894 }; 1895 1896 static struct intel_uncore_ops ivbep_uncore_qpi_ops = { 1897 .init_box = ivbep_uncore_pci_init_box, 1898 .disable_box = snbep_uncore_pci_disable_box, 1899 .enable_box = snbep_uncore_pci_enable_box, 1900 .disable_event = snbep_uncore_pci_disable_event, 1901 .enable_event = snbep_qpi_enable_event, 1902 .read_counter = snbep_uncore_pci_read_counter, 1903 .hw_config = snbep_qpi_hw_config, 1904 .get_constraint = uncore_get_constraint, 1905 .put_constraint = uncore_put_constraint, 1906 }; 1907 1908 static struct intel_uncore_type ivbep_uncore_qpi = { 1909 .name = "qpi", 1910 .num_counters = 4, 1911 .num_boxes = 3, 1912 .perf_ctr_bits = 48, 1913 .perf_ctr = SNBEP_PCI_PMON_CTR0, 1914 .event_ctl = SNBEP_PCI_PMON_CTL0, 1915 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK, 1916 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 1917 .num_shared_regs = 1, 1918 .ops = &ivbep_uncore_qpi_ops, 1919 .format_group = &ivbep_uncore_qpi_format_group, 1920 }; 1921 1922 static struct intel_uncore_type ivbep_uncore_r2pcie = { 1923 .name = "r2pcie", 1924 .num_counters = 4, 1925 .num_boxes = 1, 1926 .perf_ctr_bits = 44, 1927 .constraints = snbep_uncore_r2pcie_constraints, 1928 IVBEP_UNCORE_PCI_COMMON_INIT(), 1929 }; 1930 1931 static struct intel_uncore_type ivbep_uncore_r3qpi = { 1932 .name = "r3qpi", 1933 .num_counters = 3, 1934 .num_boxes = 2, 1935 .perf_ctr_bits = 44, 1936 .constraints = snbep_uncore_r3qpi_constraints, 1937 IVBEP_UNCORE_PCI_COMMON_INIT(), 1938 }; 1939 1940 enum { 1941 IVBEP_PCI_UNCORE_HA, 1942 IVBEP_PCI_UNCORE_IMC, 1943 IVBEP_PCI_UNCORE_IRP, 1944 IVBEP_PCI_UNCORE_QPI, 1945 IVBEP_PCI_UNCORE_R2PCIE, 1946 IVBEP_PCI_UNCORE_R3QPI, 1947 }; 1948 1949 static struct intel_uncore_type *ivbep_pci_uncores[] = { 1950 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha, 1951 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc, 1952 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp, 1953 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi, 1954 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie, 1955 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi, 1956 NULL, 1957 }; 1958 1959 static const struct pci_device_id ivbep_uncore_pci_ids[] = { 1960 { /* Home Agent 0 */ 1961 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30), 1962 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0), 1963 }, 1964 { /* Home Agent 1 */ 1965 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38), 1966 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1), 1967 }, 1968 { /* MC0 Channel 0 */ 1969 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4), 1970 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0), 1971 }, 1972 { /* MC0 Channel 1 */ 1973 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5), 1974 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1), 1975 }, 1976 { /* MC0 Channel 3 */ 1977 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0), 1978 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2), 1979 }, 1980 { /* MC0 Channel 4 */ 1981 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1), 1982 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3), 1983 }, 1984 { /* MC1 Channel 0 */ 1985 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4), 1986 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4), 1987 }, 1988 { /* MC1 Channel 1 */ 1989 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5), 1990 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5), 1991 }, 1992 { /* MC1 Channel 3 */ 1993 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0), 1994 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6), 1995 }, 1996 { /* MC1 Channel 4 */ 1997 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1), 1998 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7), 1999 }, 2000 { /* IRP */ 2001 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39), 2002 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0), 2003 }, 2004 { /* QPI0 Port 0 */ 2005 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32), 2006 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0), 2007 }, 2008 { /* QPI0 Port 1 */ 2009 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33), 2010 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1), 2011 }, 2012 { /* QPI1 Port 2 */ 2013 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a), 2014 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2), 2015 }, 2016 { /* R2PCIe */ 2017 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34), 2018 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0), 2019 }, 2020 { /* R3QPI0 Link 0 */ 2021 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36), 2022 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0), 2023 }, 2024 { /* R3QPI0 Link 1 */ 2025 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37), 2026 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1), 2027 }, 2028 { /* R3QPI1 Link 2 */ 2029 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e), 2030 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2), 2031 }, 2032 { /* QPI Port 0 filter */ 2033 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86), 2034 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2035 SNBEP_PCI_QPI_PORT0_FILTER), 2036 }, 2037 { /* QPI Port 0 filter */ 2038 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96), 2039 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2040 SNBEP_PCI_QPI_PORT1_FILTER), 2041 }, 2042 { /* end: all zeroes */ } 2043 }; 2044 2045 static struct pci_driver ivbep_uncore_pci_driver = { 2046 .name = "ivbep_uncore", 2047 .id_table = ivbep_uncore_pci_ids, 2048 }; 2049 2050 int ivbep_uncore_pci_init(void) 2051 { 2052 int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true); 2053 if (ret) 2054 return ret; 2055 uncore_pci_uncores = ivbep_pci_uncores; 2056 uncore_pci_driver = &ivbep_uncore_pci_driver; 2057 return 0; 2058 } 2059 /* end of IvyTown uncore support */ 2060 2061 /* KNL uncore support */ 2062 static struct attribute *knl_uncore_ubox_formats_attr[] = { 2063 &format_attr_event.attr, 2064 &format_attr_umask.attr, 2065 &format_attr_edge.attr, 2066 &format_attr_tid_en.attr, 2067 &format_attr_inv.attr, 2068 &format_attr_thresh5.attr, 2069 NULL, 2070 }; 2071 2072 static const struct attribute_group knl_uncore_ubox_format_group = { 2073 .name = "format", 2074 .attrs = knl_uncore_ubox_formats_attr, 2075 }; 2076 2077 static struct intel_uncore_type knl_uncore_ubox = { 2078 .name = "ubox", 2079 .num_counters = 2, 2080 .num_boxes = 1, 2081 .perf_ctr_bits = 48, 2082 .fixed_ctr_bits = 48, 2083 .perf_ctr = HSWEP_U_MSR_PMON_CTR0, 2084 .event_ctl = HSWEP_U_MSR_PMON_CTL0, 2085 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK, 2086 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, 2087 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, 2088 .ops = &snbep_uncore_msr_ops, 2089 .format_group = &knl_uncore_ubox_format_group, 2090 }; 2091 2092 static struct attribute *knl_uncore_cha_formats_attr[] = { 2093 &format_attr_event.attr, 2094 &format_attr_umask.attr, 2095 &format_attr_qor.attr, 2096 &format_attr_edge.attr, 2097 &format_attr_tid_en.attr, 2098 &format_attr_inv.attr, 2099 &format_attr_thresh8.attr, 2100 &format_attr_filter_tid4.attr, 2101 &format_attr_filter_link3.attr, 2102 &format_attr_filter_state4.attr, 2103 &format_attr_filter_local.attr, 2104 &format_attr_filter_all_op.attr, 2105 &format_attr_filter_nnm.attr, 2106 &format_attr_filter_opc3.attr, 2107 &format_attr_filter_nc.attr, 2108 &format_attr_filter_isoc.attr, 2109 NULL, 2110 }; 2111 2112 static const struct attribute_group knl_uncore_cha_format_group = { 2113 .name = "format", 2114 .attrs = knl_uncore_cha_formats_attr, 2115 }; 2116 2117 static struct event_constraint knl_uncore_cha_constraints[] = { 2118 UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 2119 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1), 2120 UNCORE_EVENT_CONSTRAINT(0x36, 0x1), 2121 EVENT_CONSTRAINT_END 2122 }; 2123 2124 static struct extra_reg knl_uncore_cha_extra_regs[] = { 2125 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 2126 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 2127 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2), 2128 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4), 2129 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4), 2130 EVENT_EXTRA_END 2131 }; 2132 2133 static u64 knl_cha_filter_mask(int fields) 2134 { 2135 u64 mask = 0; 2136 2137 if (fields & 0x1) 2138 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID; 2139 if (fields & 0x2) 2140 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE; 2141 if (fields & 0x4) 2142 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP; 2143 return mask; 2144 } 2145 2146 static struct event_constraint * 2147 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 2148 { 2149 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask); 2150 } 2151 2152 static int knl_cha_hw_config(struct intel_uncore_box *box, 2153 struct perf_event *event) 2154 { 2155 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 2156 struct extra_reg *er; 2157 int idx = 0; 2158 2159 for (er = knl_uncore_cha_extra_regs; er->msr; er++) { 2160 if (er->event != (event->hw.config & er->config_mask)) 2161 continue; 2162 idx |= er->idx; 2163 } 2164 2165 if (idx) { 2166 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 + 2167 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx; 2168 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx); 2169 2170 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE; 2171 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE; 2172 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC; 2173 reg1->idx = idx; 2174 } 2175 return 0; 2176 } 2177 2178 static void hswep_cbox_enable_event(struct intel_uncore_box *box, 2179 struct perf_event *event); 2180 2181 static struct intel_uncore_ops knl_uncore_cha_ops = { 2182 .init_box = snbep_uncore_msr_init_box, 2183 .disable_box = snbep_uncore_msr_disable_box, 2184 .enable_box = snbep_uncore_msr_enable_box, 2185 .disable_event = snbep_uncore_msr_disable_event, 2186 .enable_event = hswep_cbox_enable_event, 2187 .read_counter = uncore_msr_read_counter, 2188 .hw_config = knl_cha_hw_config, 2189 .get_constraint = knl_cha_get_constraint, 2190 .put_constraint = snbep_cbox_put_constraint, 2191 }; 2192 2193 static struct intel_uncore_type knl_uncore_cha = { 2194 .name = "cha", 2195 .num_counters = 4, 2196 .num_boxes = 38, 2197 .perf_ctr_bits = 48, 2198 .event_ctl = HSWEP_C0_MSR_PMON_CTL0, 2199 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, 2200 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK, 2201 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, 2202 .msr_offset = KNL_CHA_MSR_OFFSET, 2203 .num_shared_regs = 1, 2204 .constraints = knl_uncore_cha_constraints, 2205 .ops = &knl_uncore_cha_ops, 2206 .format_group = &knl_uncore_cha_format_group, 2207 }; 2208 2209 static struct attribute *knl_uncore_pcu_formats_attr[] = { 2210 &format_attr_event2.attr, 2211 &format_attr_use_occ_ctr.attr, 2212 &format_attr_occ_sel.attr, 2213 &format_attr_edge.attr, 2214 &format_attr_tid_en.attr, 2215 &format_attr_inv.attr, 2216 &format_attr_thresh6.attr, 2217 &format_attr_occ_invert.attr, 2218 &format_attr_occ_edge_det.attr, 2219 NULL, 2220 }; 2221 2222 static const struct attribute_group knl_uncore_pcu_format_group = { 2223 .name = "format", 2224 .attrs = knl_uncore_pcu_formats_attr, 2225 }; 2226 2227 static struct intel_uncore_type knl_uncore_pcu = { 2228 .name = "pcu", 2229 .num_counters = 4, 2230 .num_boxes = 1, 2231 .perf_ctr_bits = 48, 2232 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0, 2233 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0, 2234 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK, 2235 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL, 2236 .ops = &snbep_uncore_msr_ops, 2237 .format_group = &knl_uncore_pcu_format_group, 2238 }; 2239 2240 static struct intel_uncore_type *knl_msr_uncores[] = { 2241 &knl_uncore_ubox, 2242 &knl_uncore_cha, 2243 &knl_uncore_pcu, 2244 NULL, 2245 }; 2246 2247 void knl_uncore_cpu_init(void) 2248 { 2249 uncore_msr_uncores = knl_msr_uncores; 2250 } 2251 2252 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box) 2253 { 2254 struct pci_dev *pdev = box->pci_dev; 2255 int box_ctl = uncore_pci_box_ctl(box); 2256 2257 pci_write_config_dword(pdev, box_ctl, 0); 2258 } 2259 2260 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box, 2261 struct perf_event *event) 2262 { 2263 struct pci_dev *pdev = box->pci_dev; 2264 struct hw_perf_event *hwc = &event->hw; 2265 2266 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK) 2267 == UNCORE_FIXED_EVENT) 2268 pci_write_config_dword(pdev, hwc->config_base, 2269 hwc->config | KNL_PMON_FIXED_CTL_EN); 2270 else 2271 pci_write_config_dword(pdev, hwc->config_base, 2272 hwc->config | SNBEP_PMON_CTL_EN); 2273 } 2274 2275 static struct intel_uncore_ops knl_uncore_imc_ops = { 2276 .init_box = snbep_uncore_pci_init_box, 2277 .disable_box = snbep_uncore_pci_disable_box, 2278 .enable_box = knl_uncore_imc_enable_box, 2279 .read_counter = snbep_uncore_pci_read_counter, 2280 .enable_event = knl_uncore_imc_enable_event, 2281 .disable_event = snbep_uncore_pci_disable_event, 2282 }; 2283 2284 static struct intel_uncore_type knl_uncore_imc_uclk = { 2285 .name = "imc_uclk", 2286 .num_counters = 4, 2287 .num_boxes = 2, 2288 .perf_ctr_bits = 48, 2289 .fixed_ctr_bits = 48, 2290 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW, 2291 .event_ctl = KNL_UCLK_MSR_PMON_CTL0, 2292 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 2293 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW, 2294 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL, 2295 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL, 2296 .ops = &knl_uncore_imc_ops, 2297 .format_group = &snbep_uncore_format_group, 2298 }; 2299 2300 static struct intel_uncore_type knl_uncore_imc_dclk = { 2301 .name = "imc", 2302 .num_counters = 4, 2303 .num_boxes = 6, 2304 .perf_ctr_bits = 48, 2305 .fixed_ctr_bits = 48, 2306 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW, 2307 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0, 2308 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 2309 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW, 2310 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL, 2311 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL, 2312 .ops = &knl_uncore_imc_ops, 2313 .format_group = &snbep_uncore_format_group, 2314 }; 2315 2316 static struct intel_uncore_type knl_uncore_edc_uclk = { 2317 .name = "edc_uclk", 2318 .num_counters = 4, 2319 .num_boxes = 8, 2320 .perf_ctr_bits = 48, 2321 .fixed_ctr_bits = 48, 2322 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW, 2323 .event_ctl = KNL_UCLK_MSR_PMON_CTL0, 2324 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 2325 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW, 2326 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL, 2327 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL, 2328 .ops = &knl_uncore_imc_ops, 2329 .format_group = &snbep_uncore_format_group, 2330 }; 2331 2332 static struct intel_uncore_type knl_uncore_edc_eclk = { 2333 .name = "edc_eclk", 2334 .num_counters = 4, 2335 .num_boxes = 8, 2336 .perf_ctr_bits = 48, 2337 .fixed_ctr_bits = 48, 2338 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW, 2339 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0, 2340 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 2341 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW, 2342 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL, 2343 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL, 2344 .ops = &knl_uncore_imc_ops, 2345 .format_group = &snbep_uncore_format_group, 2346 }; 2347 2348 static struct event_constraint knl_uncore_m2pcie_constraints[] = { 2349 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 2350 EVENT_CONSTRAINT_END 2351 }; 2352 2353 static struct intel_uncore_type knl_uncore_m2pcie = { 2354 .name = "m2pcie", 2355 .num_counters = 4, 2356 .num_boxes = 1, 2357 .perf_ctr_bits = 48, 2358 .constraints = knl_uncore_m2pcie_constraints, 2359 SNBEP_UNCORE_PCI_COMMON_INIT(), 2360 }; 2361 2362 static struct attribute *knl_uncore_irp_formats_attr[] = { 2363 &format_attr_event.attr, 2364 &format_attr_umask.attr, 2365 &format_attr_qor.attr, 2366 &format_attr_edge.attr, 2367 &format_attr_inv.attr, 2368 &format_attr_thresh8.attr, 2369 NULL, 2370 }; 2371 2372 static const struct attribute_group knl_uncore_irp_format_group = { 2373 .name = "format", 2374 .attrs = knl_uncore_irp_formats_attr, 2375 }; 2376 2377 static struct intel_uncore_type knl_uncore_irp = { 2378 .name = "irp", 2379 .num_counters = 2, 2380 .num_boxes = 1, 2381 .perf_ctr_bits = 48, 2382 .perf_ctr = SNBEP_PCI_PMON_CTR0, 2383 .event_ctl = SNBEP_PCI_PMON_CTL0, 2384 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK, 2385 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL, 2386 .ops = &snbep_uncore_pci_ops, 2387 .format_group = &knl_uncore_irp_format_group, 2388 }; 2389 2390 enum { 2391 KNL_PCI_UNCORE_MC_UCLK, 2392 KNL_PCI_UNCORE_MC_DCLK, 2393 KNL_PCI_UNCORE_EDC_UCLK, 2394 KNL_PCI_UNCORE_EDC_ECLK, 2395 KNL_PCI_UNCORE_M2PCIE, 2396 KNL_PCI_UNCORE_IRP, 2397 }; 2398 2399 static struct intel_uncore_type *knl_pci_uncores[] = { 2400 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk, 2401 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk, 2402 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk, 2403 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk, 2404 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie, 2405 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp, 2406 NULL, 2407 }; 2408 2409 /* 2410 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU 2411 * device type. prior to KNL, each instance of a PMU device type had a unique 2412 * device ID. 2413 * 2414 * PCI Device ID Uncore PMU Devices 2415 * ---------------------------------- 2416 * 0x7841 MC0 UClk, MC1 UClk 2417 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2, 2418 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2 2419 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk, 2420 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk 2421 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk, 2422 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk 2423 * 0x7817 M2PCIe 2424 * 0x7814 IRP 2425 */ 2426 2427 static const struct pci_device_id knl_uncore_pci_ids[] = { 2428 { /* MC0 UClk */ 2429 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841), 2430 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0), 2431 }, 2432 { /* MC1 UClk */ 2433 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841), 2434 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1), 2435 }, 2436 { /* MC0 DClk CH 0 */ 2437 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), 2438 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0), 2439 }, 2440 { /* MC0 DClk CH 1 */ 2441 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), 2442 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1), 2443 }, 2444 { /* MC0 DClk CH 2 */ 2445 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), 2446 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2), 2447 }, 2448 { /* MC1 DClk CH 0 */ 2449 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), 2450 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3), 2451 }, 2452 { /* MC1 DClk CH 1 */ 2453 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), 2454 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4), 2455 }, 2456 { /* MC1 DClk CH 2 */ 2457 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843), 2458 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5), 2459 }, 2460 { /* EDC0 UClk */ 2461 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2462 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0), 2463 }, 2464 { /* EDC1 UClk */ 2465 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2466 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1), 2467 }, 2468 { /* EDC2 UClk */ 2469 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2470 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2), 2471 }, 2472 { /* EDC3 UClk */ 2473 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2474 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3), 2475 }, 2476 { /* EDC4 UClk */ 2477 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2478 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4), 2479 }, 2480 { /* EDC5 UClk */ 2481 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2482 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5), 2483 }, 2484 { /* EDC6 UClk */ 2485 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2486 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6), 2487 }, 2488 { /* EDC7 UClk */ 2489 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833), 2490 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7), 2491 }, 2492 { /* EDC0 EClk */ 2493 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2494 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0), 2495 }, 2496 { /* EDC1 EClk */ 2497 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2498 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1), 2499 }, 2500 { /* EDC2 EClk */ 2501 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2502 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2), 2503 }, 2504 { /* EDC3 EClk */ 2505 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2506 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3), 2507 }, 2508 { /* EDC4 EClk */ 2509 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2510 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4), 2511 }, 2512 { /* EDC5 EClk */ 2513 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2514 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5), 2515 }, 2516 { /* EDC6 EClk */ 2517 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2518 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6), 2519 }, 2520 { /* EDC7 EClk */ 2521 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835), 2522 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7), 2523 }, 2524 { /* M2PCIe */ 2525 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817), 2526 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0), 2527 }, 2528 { /* IRP */ 2529 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814), 2530 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0), 2531 }, 2532 { /* end: all zeroes */ } 2533 }; 2534 2535 static struct pci_driver knl_uncore_pci_driver = { 2536 .name = "knl_uncore", 2537 .id_table = knl_uncore_pci_ids, 2538 }; 2539 2540 int knl_uncore_pci_init(void) 2541 { 2542 int ret; 2543 2544 /* All KNL PCI based PMON units are on the same PCI bus except IRP */ 2545 ret = snb_pci2phy_map_init(0x7814); /* IRP */ 2546 if (ret) 2547 return ret; 2548 ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */ 2549 if (ret) 2550 return ret; 2551 uncore_pci_uncores = knl_pci_uncores; 2552 uncore_pci_driver = &knl_uncore_pci_driver; 2553 return 0; 2554 } 2555 2556 /* end of KNL uncore support */ 2557 2558 /* Haswell-EP uncore support */ 2559 static struct attribute *hswep_uncore_ubox_formats_attr[] = { 2560 &format_attr_event.attr, 2561 &format_attr_umask.attr, 2562 &format_attr_edge.attr, 2563 &format_attr_inv.attr, 2564 &format_attr_thresh5.attr, 2565 &format_attr_filter_tid2.attr, 2566 &format_attr_filter_cid.attr, 2567 NULL, 2568 }; 2569 2570 static const struct attribute_group hswep_uncore_ubox_format_group = { 2571 .name = "format", 2572 .attrs = hswep_uncore_ubox_formats_attr, 2573 }; 2574 2575 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 2576 { 2577 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 2578 reg1->reg = HSWEP_U_MSR_PMON_FILTER; 2579 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK; 2580 reg1->idx = 0; 2581 return 0; 2582 } 2583 2584 static struct intel_uncore_ops hswep_uncore_ubox_ops = { 2585 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 2586 .hw_config = hswep_ubox_hw_config, 2587 .get_constraint = uncore_get_constraint, 2588 .put_constraint = uncore_put_constraint, 2589 }; 2590 2591 static struct intel_uncore_type hswep_uncore_ubox = { 2592 .name = "ubox", 2593 .num_counters = 2, 2594 .num_boxes = 1, 2595 .perf_ctr_bits = 44, 2596 .fixed_ctr_bits = 48, 2597 .perf_ctr = HSWEP_U_MSR_PMON_CTR0, 2598 .event_ctl = HSWEP_U_MSR_PMON_CTL0, 2599 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, 2600 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, 2601 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, 2602 .num_shared_regs = 1, 2603 .ops = &hswep_uncore_ubox_ops, 2604 .format_group = &hswep_uncore_ubox_format_group, 2605 }; 2606 2607 static struct attribute *hswep_uncore_cbox_formats_attr[] = { 2608 &format_attr_event.attr, 2609 &format_attr_umask.attr, 2610 &format_attr_edge.attr, 2611 &format_attr_tid_en.attr, 2612 &format_attr_thresh8.attr, 2613 &format_attr_filter_tid3.attr, 2614 &format_attr_filter_link2.attr, 2615 &format_attr_filter_state3.attr, 2616 &format_attr_filter_nid2.attr, 2617 &format_attr_filter_opc2.attr, 2618 &format_attr_filter_nc.attr, 2619 &format_attr_filter_c6.attr, 2620 &format_attr_filter_isoc.attr, 2621 NULL, 2622 }; 2623 2624 static const struct attribute_group hswep_uncore_cbox_format_group = { 2625 .name = "format", 2626 .attrs = hswep_uncore_cbox_formats_attr, 2627 }; 2628 2629 static struct event_constraint hswep_uncore_cbox_constraints[] = { 2630 UNCORE_EVENT_CONSTRAINT(0x01, 0x1), 2631 UNCORE_EVENT_CONSTRAINT(0x09, 0x1), 2632 UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 2633 UNCORE_EVENT_CONSTRAINT(0x36, 0x1), 2634 UNCORE_EVENT_CONSTRAINT(0x38, 0x3), 2635 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1), 2636 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1), 2637 EVENT_CONSTRAINT_END 2638 }; 2639 2640 static struct extra_reg hswep_uncore_cbox_extra_regs[] = { 2641 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 2642 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 2643 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 2644 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 2645 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 2646 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), 2647 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4), 2648 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4), 2649 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8), 2650 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8), 2651 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8), 2652 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8), 2653 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8), 2654 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8), 2655 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12), 2656 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), 2657 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18), 2658 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8), 2659 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8), 2660 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8), 2661 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18), 2662 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8), 2663 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10), 2664 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), 2665 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), 2666 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10), 2667 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), 2668 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), 2669 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), 2670 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8), 2671 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8), 2672 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), 2673 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8), 2674 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), 2675 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10), 2676 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10), 2677 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10), 2678 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8), 2679 EVENT_EXTRA_END 2680 }; 2681 2682 static u64 hswep_cbox_filter_mask(int fields) 2683 { 2684 u64 mask = 0; 2685 if (fields & 0x1) 2686 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID; 2687 if (fields & 0x2) 2688 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK; 2689 if (fields & 0x4) 2690 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE; 2691 if (fields & 0x8) 2692 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID; 2693 if (fields & 0x10) { 2694 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC; 2695 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC; 2696 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6; 2697 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC; 2698 } 2699 return mask; 2700 } 2701 2702 static struct event_constraint * 2703 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 2704 { 2705 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask); 2706 } 2707 2708 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 2709 { 2710 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 2711 struct extra_reg *er; 2712 int idx = 0; 2713 2714 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) { 2715 if (er->event != (event->hw.config & er->config_mask)) 2716 continue; 2717 idx |= er->idx; 2718 } 2719 2720 if (idx) { 2721 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 + 2722 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; 2723 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx); 2724 reg1->idx = idx; 2725 } 2726 return 0; 2727 } 2728 2729 static void hswep_cbox_enable_event(struct intel_uncore_box *box, 2730 struct perf_event *event) 2731 { 2732 struct hw_perf_event *hwc = &event->hw; 2733 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 2734 2735 if (reg1->idx != EXTRA_REG_NONE) { 2736 u64 filter = uncore_shared_reg_config(box, 0); 2737 wrmsrl(reg1->reg, filter & 0xffffffff); 2738 wrmsrl(reg1->reg + 1, filter >> 32); 2739 } 2740 2741 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 2742 } 2743 2744 static struct intel_uncore_ops hswep_uncore_cbox_ops = { 2745 .init_box = snbep_uncore_msr_init_box, 2746 .disable_box = snbep_uncore_msr_disable_box, 2747 .enable_box = snbep_uncore_msr_enable_box, 2748 .disable_event = snbep_uncore_msr_disable_event, 2749 .enable_event = hswep_cbox_enable_event, 2750 .read_counter = uncore_msr_read_counter, 2751 .hw_config = hswep_cbox_hw_config, 2752 .get_constraint = hswep_cbox_get_constraint, 2753 .put_constraint = snbep_cbox_put_constraint, 2754 }; 2755 2756 static struct intel_uncore_type hswep_uncore_cbox = { 2757 .name = "cbox", 2758 .num_counters = 4, 2759 .num_boxes = 18, 2760 .perf_ctr_bits = 48, 2761 .event_ctl = HSWEP_C0_MSR_PMON_CTL0, 2762 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, 2763 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, 2764 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, 2765 .msr_offset = HSWEP_CBO_MSR_OFFSET, 2766 .num_shared_regs = 1, 2767 .constraints = hswep_uncore_cbox_constraints, 2768 .ops = &hswep_uncore_cbox_ops, 2769 .format_group = &hswep_uncore_cbox_format_group, 2770 }; 2771 2772 /* 2773 * Write SBOX Initialization register bit by bit to avoid spurious #GPs 2774 */ 2775 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box) 2776 { 2777 unsigned msr = uncore_msr_box_ctl(box); 2778 2779 if (msr) { 2780 u64 init = SNBEP_PMON_BOX_CTL_INT; 2781 u64 flags = 0; 2782 int i; 2783 2784 for_each_set_bit(i, (unsigned long *)&init, 64) { 2785 flags |= (1ULL << i); 2786 wrmsrl(msr, flags); 2787 } 2788 } 2789 } 2790 2791 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = { 2792 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 2793 .init_box = hswep_uncore_sbox_msr_init_box 2794 }; 2795 2796 static struct attribute *hswep_uncore_sbox_formats_attr[] = { 2797 &format_attr_event.attr, 2798 &format_attr_umask.attr, 2799 &format_attr_edge.attr, 2800 &format_attr_tid_en.attr, 2801 &format_attr_inv.attr, 2802 &format_attr_thresh8.attr, 2803 NULL, 2804 }; 2805 2806 static const struct attribute_group hswep_uncore_sbox_format_group = { 2807 .name = "format", 2808 .attrs = hswep_uncore_sbox_formats_attr, 2809 }; 2810 2811 static struct intel_uncore_type hswep_uncore_sbox = { 2812 .name = "sbox", 2813 .num_counters = 4, 2814 .num_boxes = 4, 2815 .perf_ctr_bits = 44, 2816 .event_ctl = HSWEP_S0_MSR_PMON_CTL0, 2817 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0, 2818 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, 2819 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, 2820 .msr_offset = HSWEP_SBOX_MSR_OFFSET, 2821 .ops = &hswep_uncore_sbox_msr_ops, 2822 .format_group = &hswep_uncore_sbox_format_group, 2823 }; 2824 2825 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) 2826 { 2827 struct hw_perf_event *hwc = &event->hw; 2828 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 2829 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; 2830 2831 if (ev_sel >= 0xb && ev_sel <= 0xe) { 2832 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER; 2833 reg1->idx = ev_sel - 0xb; 2834 reg1->config = event->attr.config1 & (0xff << reg1->idx); 2835 } 2836 return 0; 2837 } 2838 2839 static struct intel_uncore_ops hswep_uncore_pcu_ops = { 2840 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 2841 .hw_config = hswep_pcu_hw_config, 2842 .get_constraint = snbep_pcu_get_constraint, 2843 .put_constraint = snbep_pcu_put_constraint, 2844 }; 2845 2846 static struct intel_uncore_type hswep_uncore_pcu = { 2847 .name = "pcu", 2848 .num_counters = 4, 2849 .num_boxes = 1, 2850 .perf_ctr_bits = 48, 2851 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0, 2852 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0, 2853 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, 2854 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL, 2855 .num_shared_regs = 1, 2856 .ops = &hswep_uncore_pcu_ops, 2857 .format_group = &snbep_uncore_pcu_format_group, 2858 }; 2859 2860 static struct intel_uncore_type *hswep_msr_uncores[] = { 2861 &hswep_uncore_ubox, 2862 &hswep_uncore_cbox, 2863 &hswep_uncore_sbox, 2864 &hswep_uncore_pcu, 2865 NULL, 2866 }; 2867 2868 #define HSWEP_PCU_DID 0x2fc0 2869 #define HSWEP_PCU_CAPID4_OFFET 0x94 2870 #define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3) 2871 2872 static bool hswep_has_limit_sbox(unsigned int device) 2873 { 2874 struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); 2875 u32 capid4; 2876 2877 if (!dev) 2878 return false; 2879 2880 pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4); 2881 if (!hswep_get_chop(capid4)) 2882 return true; 2883 2884 return false; 2885 } 2886 2887 void hswep_uncore_cpu_init(void) 2888 { 2889 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 2890 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 2891 2892 /* Detect 6-8 core systems with only two SBOXes */ 2893 if (hswep_has_limit_sbox(HSWEP_PCU_DID)) 2894 hswep_uncore_sbox.num_boxes = 2; 2895 2896 uncore_msr_uncores = hswep_msr_uncores; 2897 } 2898 2899 static struct intel_uncore_type hswep_uncore_ha = { 2900 .name = "ha", 2901 .num_counters = 4, 2902 .num_boxes = 2, 2903 .perf_ctr_bits = 48, 2904 SNBEP_UNCORE_PCI_COMMON_INIT(), 2905 }; 2906 2907 static struct uncore_event_desc hswep_uncore_imc_events[] = { 2908 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"), 2909 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), 2910 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"), 2911 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"), 2912 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), 2913 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"), 2914 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"), 2915 { /* end: all zeroes */ }, 2916 }; 2917 2918 static struct intel_uncore_type hswep_uncore_imc = { 2919 .name = "imc", 2920 .num_counters = 4, 2921 .num_boxes = 8, 2922 .perf_ctr_bits = 48, 2923 .fixed_ctr_bits = 48, 2924 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, 2925 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, 2926 .event_descs = hswep_uncore_imc_events, 2927 SNBEP_UNCORE_PCI_COMMON_INIT(), 2928 }; 2929 2930 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8}; 2931 2932 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) 2933 { 2934 struct pci_dev *pdev = box->pci_dev; 2935 struct hw_perf_event *hwc = &event->hw; 2936 u64 count = 0; 2937 2938 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count); 2939 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1); 2940 2941 return count; 2942 } 2943 2944 static struct intel_uncore_ops hswep_uncore_irp_ops = { 2945 .init_box = snbep_uncore_pci_init_box, 2946 .disable_box = snbep_uncore_pci_disable_box, 2947 .enable_box = snbep_uncore_pci_enable_box, 2948 .disable_event = ivbep_uncore_irp_disable_event, 2949 .enable_event = ivbep_uncore_irp_enable_event, 2950 .read_counter = hswep_uncore_irp_read_counter, 2951 }; 2952 2953 static struct intel_uncore_type hswep_uncore_irp = { 2954 .name = "irp", 2955 .num_counters = 4, 2956 .num_boxes = 1, 2957 .perf_ctr_bits = 48, 2958 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 2959 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 2960 .ops = &hswep_uncore_irp_ops, 2961 .format_group = &snbep_uncore_format_group, 2962 }; 2963 2964 static struct intel_uncore_type hswep_uncore_qpi = { 2965 .name = "qpi", 2966 .num_counters = 4, 2967 .num_boxes = 3, 2968 .perf_ctr_bits = 48, 2969 .perf_ctr = SNBEP_PCI_PMON_CTR0, 2970 .event_ctl = SNBEP_PCI_PMON_CTL0, 2971 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, 2972 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 2973 .num_shared_regs = 1, 2974 .ops = &snbep_uncore_qpi_ops, 2975 .format_group = &snbep_uncore_qpi_format_group, 2976 }; 2977 2978 static struct event_constraint hswep_uncore_r2pcie_constraints[] = { 2979 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 2980 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 2981 UNCORE_EVENT_CONSTRAINT(0x13, 0x1), 2982 UNCORE_EVENT_CONSTRAINT(0x23, 0x1), 2983 UNCORE_EVENT_CONSTRAINT(0x24, 0x1), 2984 UNCORE_EVENT_CONSTRAINT(0x25, 0x1), 2985 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 2986 UNCORE_EVENT_CONSTRAINT(0x27, 0x1), 2987 UNCORE_EVENT_CONSTRAINT(0x28, 0x3), 2988 UNCORE_EVENT_CONSTRAINT(0x29, 0x3), 2989 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1), 2990 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3), 2991 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), 2992 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 2993 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 2994 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 2995 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 2996 UNCORE_EVENT_CONSTRAINT(0x35, 0x3), 2997 EVENT_CONSTRAINT_END 2998 }; 2999 3000 static struct intel_uncore_type hswep_uncore_r2pcie = { 3001 .name = "r2pcie", 3002 .num_counters = 4, 3003 .num_boxes = 1, 3004 .perf_ctr_bits = 48, 3005 .constraints = hswep_uncore_r2pcie_constraints, 3006 SNBEP_UNCORE_PCI_COMMON_INIT(), 3007 }; 3008 3009 static struct event_constraint hswep_uncore_r3qpi_constraints[] = { 3010 UNCORE_EVENT_CONSTRAINT(0x01, 0x3), 3011 UNCORE_EVENT_CONSTRAINT(0x07, 0x7), 3012 UNCORE_EVENT_CONSTRAINT(0x08, 0x7), 3013 UNCORE_EVENT_CONSTRAINT(0x09, 0x7), 3014 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7), 3015 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7), 3016 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 3017 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 3018 UNCORE_EVENT_CONSTRAINT(0x12, 0x3), 3019 UNCORE_EVENT_CONSTRAINT(0x13, 0x1), 3020 UNCORE_EVENT_CONSTRAINT(0x14, 0x3), 3021 UNCORE_EVENT_CONSTRAINT(0x15, 0x3), 3022 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3), 3023 UNCORE_EVENT_CONSTRAINT(0x20, 0x3), 3024 UNCORE_EVENT_CONSTRAINT(0x21, 0x3), 3025 UNCORE_EVENT_CONSTRAINT(0x22, 0x3), 3026 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 3027 UNCORE_EVENT_CONSTRAINT(0x25, 0x3), 3028 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 3029 UNCORE_EVENT_CONSTRAINT(0x28, 0x3), 3030 UNCORE_EVENT_CONSTRAINT(0x29, 0x3), 3031 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), 3032 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 3033 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), 3034 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), 3035 UNCORE_EVENT_CONSTRAINT(0x31, 0x3), 3036 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 3037 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 3038 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 3039 UNCORE_EVENT_CONSTRAINT(0x36, 0x3), 3040 UNCORE_EVENT_CONSTRAINT(0x37, 0x3), 3041 UNCORE_EVENT_CONSTRAINT(0x38, 0x3), 3042 UNCORE_EVENT_CONSTRAINT(0x39, 0x3), 3043 EVENT_CONSTRAINT_END 3044 }; 3045 3046 static struct intel_uncore_type hswep_uncore_r3qpi = { 3047 .name = "r3qpi", 3048 .num_counters = 3, 3049 .num_boxes = 3, 3050 .perf_ctr_bits = 44, 3051 .constraints = hswep_uncore_r3qpi_constraints, 3052 SNBEP_UNCORE_PCI_COMMON_INIT(), 3053 }; 3054 3055 enum { 3056 HSWEP_PCI_UNCORE_HA, 3057 HSWEP_PCI_UNCORE_IMC, 3058 HSWEP_PCI_UNCORE_IRP, 3059 HSWEP_PCI_UNCORE_QPI, 3060 HSWEP_PCI_UNCORE_R2PCIE, 3061 HSWEP_PCI_UNCORE_R3QPI, 3062 }; 3063 3064 static struct intel_uncore_type *hswep_pci_uncores[] = { 3065 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha, 3066 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc, 3067 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp, 3068 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi, 3069 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie, 3070 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi, 3071 NULL, 3072 }; 3073 3074 static const struct pci_device_id hswep_uncore_pci_ids[] = { 3075 { /* Home Agent 0 */ 3076 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30), 3077 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0), 3078 }, 3079 { /* Home Agent 1 */ 3080 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38), 3081 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1), 3082 }, 3083 { /* MC0 Channel 0 */ 3084 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0), 3085 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0), 3086 }, 3087 { /* MC0 Channel 1 */ 3088 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1), 3089 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1), 3090 }, 3091 { /* MC0 Channel 2 */ 3092 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4), 3093 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2), 3094 }, 3095 { /* MC0 Channel 3 */ 3096 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5), 3097 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3), 3098 }, 3099 { /* MC1 Channel 0 */ 3100 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0), 3101 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4), 3102 }, 3103 { /* MC1 Channel 1 */ 3104 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1), 3105 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5), 3106 }, 3107 { /* MC1 Channel 2 */ 3108 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4), 3109 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6), 3110 }, 3111 { /* MC1 Channel 3 */ 3112 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5), 3113 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7), 3114 }, 3115 { /* IRP */ 3116 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39), 3117 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0), 3118 }, 3119 { /* QPI0 Port 0 */ 3120 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32), 3121 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0), 3122 }, 3123 { /* QPI0 Port 1 */ 3124 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33), 3125 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1), 3126 }, 3127 { /* QPI1 Port 2 */ 3128 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a), 3129 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2), 3130 }, 3131 { /* R2PCIe */ 3132 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34), 3133 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0), 3134 }, 3135 { /* R3QPI0 Link 0 */ 3136 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36), 3137 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0), 3138 }, 3139 { /* R3QPI0 Link 1 */ 3140 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37), 3141 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1), 3142 }, 3143 { /* R3QPI1 Link 2 */ 3144 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e), 3145 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2), 3146 }, 3147 { /* QPI Port 0 filter */ 3148 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86), 3149 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 3150 SNBEP_PCI_QPI_PORT0_FILTER), 3151 }, 3152 { /* QPI Port 1 filter */ 3153 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96), 3154 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 3155 SNBEP_PCI_QPI_PORT1_FILTER), 3156 }, 3157 { /* end: all zeroes */ } 3158 }; 3159 3160 static struct pci_driver hswep_uncore_pci_driver = { 3161 .name = "hswep_uncore", 3162 .id_table = hswep_uncore_pci_ids, 3163 }; 3164 3165 int hswep_uncore_pci_init(void) 3166 { 3167 int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true); 3168 if (ret) 3169 return ret; 3170 uncore_pci_uncores = hswep_pci_uncores; 3171 uncore_pci_driver = &hswep_uncore_pci_driver; 3172 return 0; 3173 } 3174 /* end of Haswell-EP uncore support */ 3175 3176 /* BDX uncore support */ 3177 3178 static struct intel_uncore_type bdx_uncore_ubox = { 3179 .name = "ubox", 3180 .num_counters = 2, 3181 .num_boxes = 1, 3182 .perf_ctr_bits = 48, 3183 .fixed_ctr_bits = 48, 3184 .perf_ctr = HSWEP_U_MSR_PMON_CTR0, 3185 .event_ctl = HSWEP_U_MSR_PMON_CTL0, 3186 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, 3187 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, 3188 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, 3189 .num_shared_regs = 1, 3190 .ops = &ivbep_uncore_msr_ops, 3191 .format_group = &ivbep_uncore_ubox_format_group, 3192 }; 3193 3194 static struct event_constraint bdx_uncore_cbox_constraints[] = { 3195 UNCORE_EVENT_CONSTRAINT(0x09, 0x3), 3196 UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 3197 UNCORE_EVENT_CONSTRAINT(0x36, 0x1), 3198 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1), 3199 EVENT_CONSTRAINT_END 3200 }; 3201 3202 static struct intel_uncore_type bdx_uncore_cbox = { 3203 .name = "cbox", 3204 .num_counters = 4, 3205 .num_boxes = 24, 3206 .perf_ctr_bits = 48, 3207 .event_ctl = HSWEP_C0_MSR_PMON_CTL0, 3208 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, 3209 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, 3210 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, 3211 .msr_offset = HSWEP_CBO_MSR_OFFSET, 3212 .num_shared_regs = 1, 3213 .constraints = bdx_uncore_cbox_constraints, 3214 .ops = &hswep_uncore_cbox_ops, 3215 .format_group = &hswep_uncore_cbox_format_group, 3216 }; 3217 3218 static struct intel_uncore_type bdx_uncore_sbox = { 3219 .name = "sbox", 3220 .num_counters = 4, 3221 .num_boxes = 4, 3222 .perf_ctr_bits = 48, 3223 .event_ctl = HSWEP_S0_MSR_PMON_CTL0, 3224 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0, 3225 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, 3226 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, 3227 .msr_offset = HSWEP_SBOX_MSR_OFFSET, 3228 .ops = &hswep_uncore_sbox_msr_ops, 3229 .format_group = &hswep_uncore_sbox_format_group, 3230 }; 3231 3232 #define BDX_MSR_UNCORE_SBOX 3 3233 3234 static struct intel_uncore_type *bdx_msr_uncores[] = { 3235 &bdx_uncore_ubox, 3236 &bdx_uncore_cbox, 3237 &hswep_uncore_pcu, 3238 &bdx_uncore_sbox, 3239 NULL, 3240 }; 3241 3242 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */ 3243 static struct event_constraint bdx_uncore_pcu_constraints[] = { 3244 EVENT_CONSTRAINT(0x80, 0xe, 0x80), 3245 EVENT_CONSTRAINT_END 3246 }; 3247 3248 #define BDX_PCU_DID 0x6fc0 3249 3250 void bdx_uncore_cpu_init(void) 3251 { 3252 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 3253 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 3254 uncore_msr_uncores = bdx_msr_uncores; 3255 3256 /* Detect systems with no SBOXes */ 3257 if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID)) 3258 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; 3259 3260 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; 3261 } 3262 3263 static struct intel_uncore_type bdx_uncore_ha = { 3264 .name = "ha", 3265 .num_counters = 4, 3266 .num_boxes = 2, 3267 .perf_ctr_bits = 48, 3268 SNBEP_UNCORE_PCI_COMMON_INIT(), 3269 }; 3270 3271 static struct intel_uncore_type bdx_uncore_imc = { 3272 .name = "imc", 3273 .num_counters = 4, 3274 .num_boxes = 8, 3275 .perf_ctr_bits = 48, 3276 .fixed_ctr_bits = 48, 3277 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, 3278 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, 3279 .event_descs = hswep_uncore_imc_events, 3280 SNBEP_UNCORE_PCI_COMMON_INIT(), 3281 }; 3282 3283 static struct intel_uncore_type bdx_uncore_irp = { 3284 .name = "irp", 3285 .num_counters = 4, 3286 .num_boxes = 1, 3287 .perf_ctr_bits = 48, 3288 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 3289 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 3290 .ops = &hswep_uncore_irp_ops, 3291 .format_group = &snbep_uncore_format_group, 3292 }; 3293 3294 static struct intel_uncore_type bdx_uncore_qpi = { 3295 .name = "qpi", 3296 .num_counters = 4, 3297 .num_boxes = 3, 3298 .perf_ctr_bits = 48, 3299 .perf_ctr = SNBEP_PCI_PMON_CTR0, 3300 .event_ctl = SNBEP_PCI_PMON_CTL0, 3301 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, 3302 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 3303 .num_shared_regs = 1, 3304 .ops = &snbep_uncore_qpi_ops, 3305 .format_group = &snbep_uncore_qpi_format_group, 3306 }; 3307 3308 static struct event_constraint bdx_uncore_r2pcie_constraints[] = { 3309 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 3310 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 3311 UNCORE_EVENT_CONSTRAINT(0x13, 0x1), 3312 UNCORE_EVENT_CONSTRAINT(0x23, 0x1), 3313 UNCORE_EVENT_CONSTRAINT(0x25, 0x1), 3314 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 3315 UNCORE_EVENT_CONSTRAINT(0x28, 0x3), 3316 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), 3317 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 3318 EVENT_CONSTRAINT_END 3319 }; 3320 3321 static struct intel_uncore_type bdx_uncore_r2pcie = { 3322 .name = "r2pcie", 3323 .num_counters = 4, 3324 .num_boxes = 1, 3325 .perf_ctr_bits = 48, 3326 .constraints = bdx_uncore_r2pcie_constraints, 3327 SNBEP_UNCORE_PCI_COMMON_INIT(), 3328 }; 3329 3330 static struct event_constraint bdx_uncore_r3qpi_constraints[] = { 3331 UNCORE_EVENT_CONSTRAINT(0x01, 0x7), 3332 UNCORE_EVENT_CONSTRAINT(0x07, 0x7), 3333 UNCORE_EVENT_CONSTRAINT(0x08, 0x7), 3334 UNCORE_EVENT_CONSTRAINT(0x09, 0x7), 3335 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7), 3336 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7), 3337 UNCORE_EVENT_CONSTRAINT(0x10, 0x3), 3338 UNCORE_EVENT_CONSTRAINT(0x11, 0x3), 3339 UNCORE_EVENT_CONSTRAINT(0x13, 0x1), 3340 UNCORE_EVENT_CONSTRAINT(0x14, 0x3), 3341 UNCORE_EVENT_CONSTRAINT(0x15, 0x3), 3342 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3), 3343 UNCORE_EVENT_CONSTRAINT(0x20, 0x3), 3344 UNCORE_EVENT_CONSTRAINT(0x21, 0x3), 3345 UNCORE_EVENT_CONSTRAINT(0x22, 0x3), 3346 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 3347 UNCORE_EVENT_CONSTRAINT(0x25, 0x3), 3348 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 3349 UNCORE_EVENT_CONSTRAINT(0x28, 0x3), 3350 UNCORE_EVENT_CONSTRAINT(0x29, 0x3), 3351 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3), 3352 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 3353 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3), 3354 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3), 3355 UNCORE_EVENT_CONSTRAINT(0x33, 0x3), 3356 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 3357 UNCORE_EVENT_CONSTRAINT(0x36, 0x3), 3358 UNCORE_EVENT_CONSTRAINT(0x37, 0x3), 3359 UNCORE_EVENT_CONSTRAINT(0x38, 0x3), 3360 UNCORE_EVENT_CONSTRAINT(0x39, 0x3), 3361 EVENT_CONSTRAINT_END 3362 }; 3363 3364 static struct intel_uncore_type bdx_uncore_r3qpi = { 3365 .name = "r3qpi", 3366 .num_counters = 3, 3367 .num_boxes = 3, 3368 .perf_ctr_bits = 48, 3369 .constraints = bdx_uncore_r3qpi_constraints, 3370 SNBEP_UNCORE_PCI_COMMON_INIT(), 3371 }; 3372 3373 enum { 3374 BDX_PCI_UNCORE_HA, 3375 BDX_PCI_UNCORE_IMC, 3376 BDX_PCI_UNCORE_IRP, 3377 BDX_PCI_UNCORE_QPI, 3378 BDX_PCI_UNCORE_R2PCIE, 3379 BDX_PCI_UNCORE_R3QPI, 3380 }; 3381 3382 static struct intel_uncore_type *bdx_pci_uncores[] = { 3383 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha, 3384 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc, 3385 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp, 3386 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi, 3387 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie, 3388 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi, 3389 NULL, 3390 }; 3391 3392 static const struct pci_device_id bdx_uncore_pci_ids[] = { 3393 { /* Home Agent 0 */ 3394 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30), 3395 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0), 3396 }, 3397 { /* Home Agent 1 */ 3398 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38), 3399 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1), 3400 }, 3401 { /* MC0 Channel 0 */ 3402 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0), 3403 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0), 3404 }, 3405 { /* MC0 Channel 1 */ 3406 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1), 3407 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1), 3408 }, 3409 { /* MC0 Channel 2 */ 3410 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4), 3411 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2), 3412 }, 3413 { /* MC0 Channel 3 */ 3414 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5), 3415 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3), 3416 }, 3417 { /* MC1 Channel 0 */ 3418 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0), 3419 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4), 3420 }, 3421 { /* MC1 Channel 1 */ 3422 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1), 3423 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5), 3424 }, 3425 { /* MC1 Channel 2 */ 3426 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4), 3427 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6), 3428 }, 3429 { /* MC1 Channel 3 */ 3430 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5), 3431 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7), 3432 }, 3433 { /* IRP */ 3434 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39), 3435 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0), 3436 }, 3437 { /* QPI0 Port 0 */ 3438 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32), 3439 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0), 3440 }, 3441 { /* QPI0 Port 1 */ 3442 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33), 3443 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1), 3444 }, 3445 { /* QPI1 Port 2 */ 3446 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a), 3447 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2), 3448 }, 3449 { /* R2PCIe */ 3450 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34), 3451 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0), 3452 }, 3453 { /* R3QPI0 Link 0 */ 3454 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36), 3455 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0), 3456 }, 3457 { /* R3QPI0 Link 1 */ 3458 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37), 3459 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1), 3460 }, 3461 { /* R3QPI1 Link 2 */ 3462 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e), 3463 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2), 3464 }, 3465 { /* QPI Port 0 filter */ 3466 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86), 3467 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 3468 SNBEP_PCI_QPI_PORT0_FILTER), 3469 }, 3470 { /* QPI Port 1 filter */ 3471 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96), 3472 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 3473 SNBEP_PCI_QPI_PORT1_FILTER), 3474 }, 3475 { /* QPI Port 2 filter */ 3476 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46), 3477 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 3478 BDX_PCI_QPI_PORT2_FILTER), 3479 }, 3480 { /* end: all zeroes */ } 3481 }; 3482 3483 static struct pci_driver bdx_uncore_pci_driver = { 3484 .name = "bdx_uncore", 3485 .id_table = bdx_uncore_pci_ids, 3486 }; 3487 3488 int bdx_uncore_pci_init(void) 3489 { 3490 int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true); 3491 3492 if (ret) 3493 return ret; 3494 uncore_pci_uncores = bdx_pci_uncores; 3495 uncore_pci_driver = &bdx_uncore_pci_driver; 3496 return 0; 3497 } 3498 3499 /* end of BDX uncore support */ 3500 3501 /* SKX uncore support */ 3502 3503 static struct intel_uncore_type skx_uncore_ubox = { 3504 .name = "ubox", 3505 .num_counters = 2, 3506 .num_boxes = 1, 3507 .perf_ctr_bits = 48, 3508 .fixed_ctr_bits = 48, 3509 .perf_ctr = HSWEP_U_MSR_PMON_CTR0, 3510 .event_ctl = HSWEP_U_MSR_PMON_CTL0, 3511 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, 3512 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, 3513 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, 3514 .ops = &ivbep_uncore_msr_ops, 3515 .format_group = &ivbep_uncore_ubox_format_group, 3516 }; 3517 3518 static struct attribute *skx_uncore_cha_formats_attr[] = { 3519 &format_attr_event.attr, 3520 &format_attr_umask.attr, 3521 &format_attr_edge.attr, 3522 &format_attr_tid_en.attr, 3523 &format_attr_inv.attr, 3524 &format_attr_thresh8.attr, 3525 &format_attr_filter_tid4.attr, 3526 &format_attr_filter_state5.attr, 3527 &format_attr_filter_rem.attr, 3528 &format_attr_filter_loc.attr, 3529 &format_attr_filter_nm.attr, 3530 &format_attr_filter_all_op.attr, 3531 &format_attr_filter_not_nm.attr, 3532 &format_attr_filter_opc_0.attr, 3533 &format_attr_filter_opc_1.attr, 3534 &format_attr_filter_nc.attr, 3535 &format_attr_filter_isoc.attr, 3536 NULL, 3537 }; 3538 3539 static const struct attribute_group skx_uncore_chabox_format_group = { 3540 .name = "format", 3541 .attrs = skx_uncore_cha_formats_attr, 3542 }; 3543 3544 static struct event_constraint skx_uncore_chabox_constraints[] = { 3545 UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 3546 UNCORE_EVENT_CONSTRAINT(0x36, 0x1), 3547 EVENT_CONSTRAINT_END 3548 }; 3549 3550 static struct extra_reg skx_uncore_cha_extra_regs[] = { 3551 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 3552 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 3553 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 3554 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), 3555 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4), 3556 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4), 3557 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8), 3558 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8), 3559 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3), 3560 EVENT_EXTRA_END 3561 }; 3562 3563 static u64 skx_cha_filter_mask(int fields) 3564 { 3565 u64 mask = 0; 3566 3567 if (fields & 0x1) 3568 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID; 3569 if (fields & 0x2) 3570 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK; 3571 if (fields & 0x4) 3572 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE; 3573 if (fields & 0x8) { 3574 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM; 3575 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC; 3576 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC; 3577 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM; 3578 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM; 3579 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0; 3580 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1; 3581 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC; 3582 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC; 3583 } 3584 return mask; 3585 } 3586 3587 static struct event_constraint * 3588 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 3589 { 3590 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask); 3591 } 3592 3593 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) 3594 { 3595 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 3596 struct extra_reg *er; 3597 int idx = 0; 3598 3599 for (er = skx_uncore_cha_extra_regs; er->msr; er++) { 3600 if (er->event != (event->hw.config & er->config_mask)) 3601 continue; 3602 idx |= er->idx; 3603 } 3604 3605 if (idx) { 3606 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 + 3607 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; 3608 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx); 3609 reg1->idx = idx; 3610 } 3611 return 0; 3612 } 3613 3614 static struct intel_uncore_ops skx_uncore_chabox_ops = { 3615 /* There is no frz_en for chabox ctl */ 3616 .init_box = ivbep_uncore_msr_init_box, 3617 .disable_box = snbep_uncore_msr_disable_box, 3618 .enable_box = snbep_uncore_msr_enable_box, 3619 .disable_event = snbep_uncore_msr_disable_event, 3620 .enable_event = hswep_cbox_enable_event, 3621 .read_counter = uncore_msr_read_counter, 3622 .hw_config = skx_cha_hw_config, 3623 .get_constraint = skx_cha_get_constraint, 3624 .put_constraint = snbep_cbox_put_constraint, 3625 }; 3626 3627 static struct intel_uncore_type skx_uncore_chabox = { 3628 .name = "cha", 3629 .num_counters = 4, 3630 .perf_ctr_bits = 48, 3631 .event_ctl = HSWEP_C0_MSR_PMON_CTL0, 3632 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, 3633 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, 3634 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, 3635 .msr_offset = HSWEP_CBO_MSR_OFFSET, 3636 .num_shared_regs = 1, 3637 .constraints = skx_uncore_chabox_constraints, 3638 .ops = &skx_uncore_chabox_ops, 3639 .format_group = &skx_uncore_chabox_format_group, 3640 }; 3641 3642 static struct attribute *skx_uncore_iio_formats_attr[] = { 3643 &format_attr_event.attr, 3644 &format_attr_umask.attr, 3645 &format_attr_edge.attr, 3646 &format_attr_inv.attr, 3647 &format_attr_thresh9.attr, 3648 &format_attr_ch_mask.attr, 3649 &format_attr_fc_mask.attr, 3650 NULL, 3651 }; 3652 3653 static const struct attribute_group skx_uncore_iio_format_group = { 3654 .name = "format", 3655 .attrs = skx_uncore_iio_formats_attr, 3656 }; 3657 3658 static struct event_constraint skx_uncore_iio_constraints[] = { 3659 UNCORE_EVENT_CONSTRAINT(0x83, 0x3), 3660 UNCORE_EVENT_CONSTRAINT(0x88, 0xc), 3661 UNCORE_EVENT_CONSTRAINT(0x95, 0xc), 3662 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), 3663 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc), 3664 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc), 3665 EVENT_CONSTRAINT_END 3666 }; 3667 3668 static void skx_iio_enable_event(struct intel_uncore_box *box, 3669 struct perf_event *event) 3670 { 3671 struct hw_perf_event *hwc = &event->hw; 3672 3673 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 3674 } 3675 3676 static struct intel_uncore_ops skx_uncore_iio_ops = { 3677 .init_box = ivbep_uncore_msr_init_box, 3678 .disable_box = snbep_uncore_msr_disable_box, 3679 .enable_box = snbep_uncore_msr_enable_box, 3680 .disable_event = snbep_uncore_msr_disable_event, 3681 .enable_event = skx_iio_enable_event, 3682 .read_counter = uncore_msr_read_counter, 3683 }; 3684 3685 static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die) 3686 { 3687 return pmu->type->topology[die].configuration >> 3688 (pmu->pmu_idx * BUS_NUM_STRIDE); 3689 } 3690 3691 static umode_t 3692 pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, 3693 int die, int zero_bus_pmu) 3694 { 3695 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj)); 3696 3697 return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode; 3698 } 3699 3700 static umode_t 3701 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die) 3702 { 3703 /* Root bus 0x00 is valid only for pmu_idx = 0. */ 3704 return pmu_iio_mapping_visible(kobj, attr, die, 0); 3705 } 3706 3707 static ssize_t skx_iio_mapping_show(struct device *dev, 3708 struct device_attribute *attr, char *buf) 3709 { 3710 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev); 3711 struct dev_ext_attribute *ea = to_dev_ext_attribute(attr); 3712 long die = (long)ea->var; 3713 3714 return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment, 3715 skx_iio_stack(pmu, die)); 3716 } 3717 3718 static int skx_msr_cpu_bus_read(int cpu, u64 *topology) 3719 { 3720 u64 msr_value; 3721 3722 if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) || 3723 !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT)) 3724 return -ENXIO; 3725 3726 *topology = msr_value; 3727 3728 return 0; 3729 } 3730 3731 static int die_to_cpu(int die) 3732 { 3733 int res = 0, cpu, current_die; 3734 /* 3735 * Using cpus_read_lock() to ensure cpu is not going down between 3736 * looking at cpu_online_mask. 3737 */ 3738 cpus_read_lock(); 3739 for_each_online_cpu(cpu) { 3740 current_die = topology_logical_die_id(cpu); 3741 if (current_die == die) { 3742 res = cpu; 3743 break; 3744 } 3745 } 3746 cpus_read_unlock(); 3747 return res; 3748 } 3749 3750 static int skx_iio_get_topology(struct intel_uncore_type *type) 3751 { 3752 int die, ret = -EPERM; 3753 3754 type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology), 3755 GFP_KERNEL); 3756 if (!type->topology) 3757 return -ENOMEM; 3758 3759 for (die = 0; die < uncore_max_dies(); die++) { 3760 ret = skx_msr_cpu_bus_read(die_to_cpu(die), 3761 &type->topology[die].configuration); 3762 if (ret) 3763 break; 3764 3765 ret = uncore_die_to_segment(die); 3766 if (ret < 0) 3767 break; 3768 3769 type->topology[die].segment = ret; 3770 } 3771 3772 if (ret < 0) { 3773 kfree(type->topology); 3774 type->topology = NULL; 3775 } 3776 3777 return ret; 3778 } 3779 3780 static struct attribute_group skx_iio_mapping_group = { 3781 .is_visible = skx_iio_mapping_visible, 3782 }; 3783 3784 static const struct attribute_group *skx_iio_attr_update[] = { 3785 &skx_iio_mapping_group, 3786 NULL, 3787 }; 3788 3789 static int 3790 pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag) 3791 { 3792 char buf[64]; 3793 int ret; 3794 long die = -1; 3795 struct attribute **attrs = NULL; 3796 struct dev_ext_attribute *eas = NULL; 3797 3798 ret = type->get_topology(type); 3799 if (ret < 0) 3800 goto clear_attr_update; 3801 3802 ret = -ENOMEM; 3803 3804 /* One more for NULL. */ 3805 attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL); 3806 if (!attrs) 3807 goto err; 3808 3809 eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL); 3810 if (!eas) 3811 goto err; 3812 3813 for (die = 0; die < uncore_max_dies(); die++) { 3814 sprintf(buf, "die%ld", die); 3815 sysfs_attr_init(&eas[die].attr.attr); 3816 eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL); 3817 if (!eas[die].attr.attr.name) 3818 goto err; 3819 eas[die].attr.attr.mode = 0444; 3820 eas[die].attr.show = skx_iio_mapping_show; 3821 eas[die].attr.store = NULL; 3822 eas[die].var = (void *)die; 3823 attrs[die] = &eas[die].attr.attr; 3824 } 3825 ag->attrs = attrs; 3826 3827 return 0; 3828 err: 3829 for (; die >= 0; die--) 3830 kfree(eas[die].attr.attr.name); 3831 kfree(eas); 3832 kfree(attrs); 3833 kfree(type->topology); 3834 clear_attr_update: 3835 type->attr_update = NULL; 3836 return ret; 3837 } 3838 3839 static int skx_iio_set_mapping(struct intel_uncore_type *type) 3840 { 3841 return pmu_iio_set_mapping(type, &skx_iio_mapping_group); 3842 } 3843 3844 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type) 3845 { 3846 struct attribute **attr = skx_iio_mapping_group.attrs; 3847 3848 if (!attr) 3849 return; 3850 3851 for (; *attr; attr++) 3852 kfree((*attr)->name); 3853 kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs)); 3854 kfree(skx_iio_mapping_group.attrs); 3855 skx_iio_mapping_group.attrs = NULL; 3856 kfree(type->topology); 3857 } 3858 3859 static struct intel_uncore_type skx_uncore_iio = { 3860 .name = "iio", 3861 .num_counters = 4, 3862 .num_boxes = 6, 3863 .perf_ctr_bits = 48, 3864 .event_ctl = SKX_IIO0_MSR_PMON_CTL0, 3865 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0, 3866 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK, 3867 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT, 3868 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL, 3869 .msr_offset = SKX_IIO_MSR_OFFSET, 3870 .constraints = skx_uncore_iio_constraints, 3871 .ops = &skx_uncore_iio_ops, 3872 .format_group = &skx_uncore_iio_format_group, 3873 .attr_update = skx_iio_attr_update, 3874 .get_topology = skx_iio_get_topology, 3875 .set_mapping = skx_iio_set_mapping, 3876 .cleanup_mapping = skx_iio_cleanup_mapping, 3877 }; 3878 3879 enum perf_uncore_iio_freerunning_type_id { 3880 SKX_IIO_MSR_IOCLK = 0, 3881 SKX_IIO_MSR_BW = 1, 3882 SKX_IIO_MSR_UTIL = 2, 3883 3884 SKX_IIO_FREERUNNING_TYPE_MAX, 3885 }; 3886 3887 3888 static struct freerunning_counters skx_iio_freerunning[] = { 3889 [SKX_IIO_MSR_IOCLK] = { 0xa45, 0x1, 0x20, 1, 36 }, 3890 [SKX_IIO_MSR_BW] = { 0xb00, 0x1, 0x10, 8, 36 }, 3891 [SKX_IIO_MSR_UTIL] = { 0xb08, 0x1, 0x10, 8, 36 }, 3892 }; 3893 3894 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = { 3895 /* Free-Running IO CLOCKS Counter */ 3896 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), 3897 /* Free-Running IIO BANDWIDTH Counters */ 3898 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), 3899 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), 3900 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), 3901 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), 3902 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), 3903 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), 3904 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), 3905 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), 3906 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), 3907 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), 3908 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), 3909 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), 3910 INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x24"), 3911 INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"), 3912 INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"), 3913 INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x25"), 3914 INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"), 3915 INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"), 3916 INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x26"), 3917 INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"), 3918 INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"), 3919 INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x27"), 3920 INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"), 3921 INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"), 3922 /* Free-running IIO UTILIZATION Counters */ 3923 INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30"), 3924 INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31"), 3925 INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32"), 3926 INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33"), 3927 INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34"), 3928 INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35"), 3929 INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36"), 3930 INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37"), 3931 { /* end: all zeroes */ }, 3932 }; 3933 3934 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = { 3935 .read_counter = uncore_msr_read_counter, 3936 .hw_config = uncore_freerunning_hw_config, 3937 }; 3938 3939 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = { 3940 &format_attr_event.attr, 3941 &format_attr_umask.attr, 3942 NULL, 3943 }; 3944 3945 static const struct attribute_group skx_uncore_iio_freerunning_format_group = { 3946 .name = "format", 3947 .attrs = skx_uncore_iio_freerunning_formats_attr, 3948 }; 3949 3950 static struct intel_uncore_type skx_uncore_iio_free_running = { 3951 .name = "iio_free_running", 3952 .num_counters = 17, 3953 .num_boxes = 6, 3954 .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX, 3955 .freerunning = skx_iio_freerunning, 3956 .ops = &skx_uncore_iio_freerunning_ops, 3957 .event_descs = skx_uncore_iio_freerunning_events, 3958 .format_group = &skx_uncore_iio_freerunning_format_group, 3959 }; 3960 3961 static struct attribute *skx_uncore_formats_attr[] = { 3962 &format_attr_event.attr, 3963 &format_attr_umask.attr, 3964 &format_attr_edge.attr, 3965 &format_attr_inv.attr, 3966 &format_attr_thresh8.attr, 3967 NULL, 3968 }; 3969 3970 static const struct attribute_group skx_uncore_format_group = { 3971 .name = "format", 3972 .attrs = skx_uncore_formats_attr, 3973 }; 3974 3975 static struct intel_uncore_type skx_uncore_irp = { 3976 .name = "irp", 3977 .num_counters = 2, 3978 .num_boxes = 6, 3979 .perf_ctr_bits = 48, 3980 .event_ctl = SKX_IRP0_MSR_PMON_CTL0, 3981 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0, 3982 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 3983 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL, 3984 .msr_offset = SKX_IRP_MSR_OFFSET, 3985 .ops = &skx_uncore_iio_ops, 3986 .format_group = &skx_uncore_format_group, 3987 }; 3988 3989 static struct attribute *skx_uncore_pcu_formats_attr[] = { 3990 &format_attr_event.attr, 3991 &format_attr_umask.attr, 3992 &format_attr_edge.attr, 3993 &format_attr_inv.attr, 3994 &format_attr_thresh8.attr, 3995 &format_attr_occ_invert.attr, 3996 &format_attr_occ_edge_det.attr, 3997 &format_attr_filter_band0.attr, 3998 &format_attr_filter_band1.attr, 3999 &format_attr_filter_band2.attr, 4000 &format_attr_filter_band3.attr, 4001 NULL, 4002 }; 4003 4004 static struct attribute_group skx_uncore_pcu_format_group = { 4005 .name = "format", 4006 .attrs = skx_uncore_pcu_formats_attr, 4007 }; 4008 4009 static struct intel_uncore_ops skx_uncore_pcu_ops = { 4010 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), 4011 .hw_config = hswep_pcu_hw_config, 4012 .get_constraint = snbep_pcu_get_constraint, 4013 .put_constraint = snbep_pcu_put_constraint, 4014 }; 4015 4016 static struct intel_uncore_type skx_uncore_pcu = { 4017 .name = "pcu", 4018 .num_counters = 4, 4019 .num_boxes = 1, 4020 .perf_ctr_bits = 48, 4021 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0, 4022 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0, 4023 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, 4024 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL, 4025 .num_shared_regs = 1, 4026 .ops = &skx_uncore_pcu_ops, 4027 .format_group = &skx_uncore_pcu_format_group, 4028 }; 4029 4030 static struct intel_uncore_type *skx_msr_uncores[] = { 4031 &skx_uncore_ubox, 4032 &skx_uncore_chabox, 4033 &skx_uncore_iio, 4034 &skx_uncore_iio_free_running, 4035 &skx_uncore_irp, 4036 &skx_uncore_pcu, 4037 NULL, 4038 }; 4039 4040 /* 4041 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6 4042 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083. 4043 */ 4044 #define SKX_CAPID6 0x9c 4045 #define SKX_CHA_BIT_MASK GENMASK(27, 0) 4046 4047 static int skx_count_chabox(void) 4048 { 4049 struct pci_dev *dev = NULL; 4050 u32 val = 0; 4051 4052 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev); 4053 if (!dev) 4054 goto out; 4055 4056 pci_read_config_dword(dev, SKX_CAPID6, &val); 4057 val &= SKX_CHA_BIT_MASK; 4058 out: 4059 pci_dev_put(dev); 4060 return hweight32(val); 4061 } 4062 4063 void skx_uncore_cpu_init(void) 4064 { 4065 skx_uncore_chabox.num_boxes = skx_count_chabox(); 4066 uncore_msr_uncores = skx_msr_uncores; 4067 } 4068 4069 static struct intel_uncore_type skx_uncore_imc = { 4070 .name = "imc", 4071 .num_counters = 4, 4072 .num_boxes = 6, 4073 .perf_ctr_bits = 48, 4074 .fixed_ctr_bits = 48, 4075 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, 4076 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, 4077 .event_descs = hswep_uncore_imc_events, 4078 .perf_ctr = SNBEP_PCI_PMON_CTR0, 4079 .event_ctl = SNBEP_PCI_PMON_CTL0, 4080 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4081 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 4082 .ops = &ivbep_uncore_pci_ops, 4083 .format_group = &skx_uncore_format_group, 4084 }; 4085 4086 static struct attribute *skx_upi_uncore_formats_attr[] = { 4087 &format_attr_event.attr, 4088 &format_attr_umask_ext.attr, 4089 &format_attr_edge.attr, 4090 &format_attr_inv.attr, 4091 &format_attr_thresh8.attr, 4092 NULL, 4093 }; 4094 4095 static const struct attribute_group skx_upi_uncore_format_group = { 4096 .name = "format", 4097 .attrs = skx_upi_uncore_formats_attr, 4098 }; 4099 4100 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box) 4101 { 4102 struct pci_dev *pdev = box->pci_dev; 4103 4104 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); 4105 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT); 4106 } 4107 4108 static struct intel_uncore_ops skx_upi_uncore_pci_ops = { 4109 .init_box = skx_upi_uncore_pci_init_box, 4110 .disable_box = snbep_uncore_pci_disable_box, 4111 .enable_box = snbep_uncore_pci_enable_box, 4112 .disable_event = snbep_uncore_pci_disable_event, 4113 .enable_event = snbep_uncore_pci_enable_event, 4114 .read_counter = snbep_uncore_pci_read_counter, 4115 }; 4116 4117 static struct intel_uncore_type skx_uncore_upi = { 4118 .name = "upi", 4119 .num_counters = 4, 4120 .num_boxes = 3, 4121 .perf_ctr_bits = 48, 4122 .perf_ctr = SKX_UPI_PCI_PMON_CTR0, 4123 .event_ctl = SKX_UPI_PCI_PMON_CTL0, 4124 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4125 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT, 4126 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL, 4127 .ops = &skx_upi_uncore_pci_ops, 4128 .format_group = &skx_upi_uncore_format_group, 4129 }; 4130 4131 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box) 4132 { 4133 struct pci_dev *pdev = box->pci_dev; 4134 4135 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); 4136 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT); 4137 } 4138 4139 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = { 4140 .init_box = skx_m2m_uncore_pci_init_box, 4141 .disable_box = snbep_uncore_pci_disable_box, 4142 .enable_box = snbep_uncore_pci_enable_box, 4143 .disable_event = snbep_uncore_pci_disable_event, 4144 .enable_event = snbep_uncore_pci_enable_event, 4145 .read_counter = snbep_uncore_pci_read_counter, 4146 }; 4147 4148 static struct intel_uncore_type skx_uncore_m2m = { 4149 .name = "m2m", 4150 .num_counters = 4, 4151 .num_boxes = 2, 4152 .perf_ctr_bits = 48, 4153 .perf_ctr = SKX_M2M_PCI_PMON_CTR0, 4154 .event_ctl = SKX_M2M_PCI_PMON_CTL0, 4155 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4156 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL, 4157 .ops = &skx_m2m_uncore_pci_ops, 4158 .format_group = &skx_uncore_format_group, 4159 }; 4160 4161 static struct event_constraint skx_uncore_m2pcie_constraints[] = { 4162 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 4163 EVENT_CONSTRAINT_END 4164 }; 4165 4166 static struct intel_uncore_type skx_uncore_m2pcie = { 4167 .name = "m2pcie", 4168 .num_counters = 4, 4169 .num_boxes = 4, 4170 .perf_ctr_bits = 48, 4171 .constraints = skx_uncore_m2pcie_constraints, 4172 .perf_ctr = SNBEP_PCI_PMON_CTR0, 4173 .event_ctl = SNBEP_PCI_PMON_CTL0, 4174 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4175 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 4176 .ops = &ivbep_uncore_pci_ops, 4177 .format_group = &skx_uncore_format_group, 4178 }; 4179 4180 static struct event_constraint skx_uncore_m3upi_constraints[] = { 4181 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1), 4182 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1), 4183 UNCORE_EVENT_CONSTRAINT(0x40, 0x7), 4184 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7), 4185 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7), 4186 UNCORE_EVENT_CONSTRAINT(0x50, 0x7), 4187 UNCORE_EVENT_CONSTRAINT(0x51, 0x7), 4188 UNCORE_EVENT_CONSTRAINT(0x52, 0x7), 4189 EVENT_CONSTRAINT_END 4190 }; 4191 4192 static struct intel_uncore_type skx_uncore_m3upi = { 4193 .name = "m3upi", 4194 .num_counters = 3, 4195 .num_boxes = 3, 4196 .perf_ctr_bits = 48, 4197 .constraints = skx_uncore_m3upi_constraints, 4198 .perf_ctr = SNBEP_PCI_PMON_CTR0, 4199 .event_ctl = SNBEP_PCI_PMON_CTL0, 4200 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4201 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 4202 .ops = &ivbep_uncore_pci_ops, 4203 .format_group = &skx_uncore_format_group, 4204 }; 4205 4206 enum { 4207 SKX_PCI_UNCORE_IMC, 4208 SKX_PCI_UNCORE_M2M, 4209 SKX_PCI_UNCORE_UPI, 4210 SKX_PCI_UNCORE_M2PCIE, 4211 SKX_PCI_UNCORE_M3UPI, 4212 }; 4213 4214 static struct intel_uncore_type *skx_pci_uncores[] = { 4215 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc, 4216 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m, 4217 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi, 4218 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie, 4219 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi, 4220 NULL, 4221 }; 4222 4223 static const struct pci_device_id skx_uncore_pci_ids[] = { 4224 { /* MC0 Channel 0 */ 4225 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042), 4226 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0), 4227 }, 4228 { /* MC0 Channel 1 */ 4229 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046), 4230 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1), 4231 }, 4232 { /* MC0 Channel 2 */ 4233 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a), 4234 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2), 4235 }, 4236 { /* MC1 Channel 0 */ 4237 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042), 4238 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3), 4239 }, 4240 { /* MC1 Channel 1 */ 4241 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046), 4242 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4), 4243 }, 4244 { /* MC1 Channel 2 */ 4245 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a), 4246 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5), 4247 }, 4248 { /* M2M0 */ 4249 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066), 4250 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0), 4251 }, 4252 { /* M2M1 */ 4253 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066), 4254 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1), 4255 }, 4256 { /* UPI0 Link 0 */ 4257 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058), 4258 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0), 4259 }, 4260 { /* UPI0 Link 1 */ 4261 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058), 4262 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1), 4263 }, 4264 { /* UPI1 Link 2 */ 4265 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058), 4266 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2), 4267 }, 4268 { /* M2PCIe 0 */ 4269 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), 4270 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0), 4271 }, 4272 { /* M2PCIe 1 */ 4273 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), 4274 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1), 4275 }, 4276 { /* M2PCIe 2 */ 4277 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), 4278 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2), 4279 }, 4280 { /* M2PCIe 3 */ 4281 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), 4282 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3), 4283 }, 4284 { /* M3UPI0 Link 0 */ 4285 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D), 4286 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0), 4287 }, 4288 { /* M3UPI0 Link 1 */ 4289 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E), 4290 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1), 4291 }, 4292 { /* M3UPI1 Link 2 */ 4293 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D), 4294 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2), 4295 }, 4296 { /* end: all zeroes */ } 4297 }; 4298 4299 4300 static struct pci_driver skx_uncore_pci_driver = { 4301 .name = "skx_uncore", 4302 .id_table = skx_uncore_pci_ids, 4303 }; 4304 4305 int skx_uncore_pci_init(void) 4306 { 4307 /* need to double check pci address */ 4308 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false); 4309 4310 if (ret) 4311 return ret; 4312 4313 uncore_pci_uncores = skx_pci_uncores; 4314 uncore_pci_driver = &skx_uncore_pci_driver; 4315 return 0; 4316 } 4317 4318 /* end of SKX uncore support */ 4319 4320 /* SNR uncore support */ 4321 4322 static struct intel_uncore_type snr_uncore_ubox = { 4323 .name = "ubox", 4324 .num_counters = 2, 4325 .num_boxes = 1, 4326 .perf_ctr_bits = 48, 4327 .fixed_ctr_bits = 48, 4328 .perf_ctr = SNR_U_MSR_PMON_CTR0, 4329 .event_ctl = SNR_U_MSR_PMON_CTL0, 4330 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4331 .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR, 4332 .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL, 4333 .ops = &ivbep_uncore_msr_ops, 4334 .format_group = &ivbep_uncore_format_group, 4335 }; 4336 4337 static struct attribute *snr_uncore_cha_formats_attr[] = { 4338 &format_attr_event.attr, 4339 &format_attr_umask_ext2.attr, 4340 &format_attr_edge.attr, 4341 &format_attr_tid_en.attr, 4342 &format_attr_inv.attr, 4343 &format_attr_thresh8.attr, 4344 &format_attr_filter_tid5.attr, 4345 NULL, 4346 }; 4347 static const struct attribute_group snr_uncore_chabox_format_group = { 4348 .name = "format", 4349 .attrs = snr_uncore_cha_formats_attr, 4350 }; 4351 4352 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) 4353 { 4354 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 4355 4356 reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 + 4357 box->pmu->type->msr_offset * box->pmu->pmu_idx; 4358 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID; 4359 reg1->idx = 0; 4360 4361 return 0; 4362 } 4363 4364 static void snr_cha_enable_event(struct intel_uncore_box *box, 4365 struct perf_event *event) 4366 { 4367 struct hw_perf_event *hwc = &event->hw; 4368 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 4369 4370 if (reg1->idx != EXTRA_REG_NONE) 4371 wrmsrl(reg1->reg, reg1->config); 4372 4373 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 4374 } 4375 4376 static struct intel_uncore_ops snr_uncore_chabox_ops = { 4377 .init_box = ivbep_uncore_msr_init_box, 4378 .disable_box = snbep_uncore_msr_disable_box, 4379 .enable_box = snbep_uncore_msr_enable_box, 4380 .disable_event = snbep_uncore_msr_disable_event, 4381 .enable_event = snr_cha_enable_event, 4382 .read_counter = uncore_msr_read_counter, 4383 .hw_config = snr_cha_hw_config, 4384 }; 4385 4386 static struct intel_uncore_type snr_uncore_chabox = { 4387 .name = "cha", 4388 .num_counters = 4, 4389 .num_boxes = 6, 4390 .perf_ctr_bits = 48, 4391 .event_ctl = SNR_CHA_MSR_PMON_CTL0, 4392 .perf_ctr = SNR_CHA_MSR_PMON_CTR0, 4393 .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL, 4394 .msr_offset = HSWEP_CBO_MSR_OFFSET, 4395 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, 4396 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT, 4397 .ops = &snr_uncore_chabox_ops, 4398 .format_group = &snr_uncore_chabox_format_group, 4399 }; 4400 4401 static struct attribute *snr_uncore_iio_formats_attr[] = { 4402 &format_attr_event.attr, 4403 &format_attr_umask.attr, 4404 &format_attr_edge.attr, 4405 &format_attr_inv.attr, 4406 &format_attr_thresh9.attr, 4407 &format_attr_ch_mask2.attr, 4408 &format_attr_fc_mask2.attr, 4409 NULL, 4410 }; 4411 4412 static const struct attribute_group snr_uncore_iio_format_group = { 4413 .name = "format", 4414 .attrs = snr_uncore_iio_formats_attr, 4415 }; 4416 4417 static umode_t 4418 snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die) 4419 { 4420 /* Root bus 0x00 is valid only for pmu_idx = 1. */ 4421 return pmu_iio_mapping_visible(kobj, attr, die, 1); 4422 } 4423 4424 static struct attribute_group snr_iio_mapping_group = { 4425 .is_visible = snr_iio_mapping_visible, 4426 }; 4427 4428 static const struct attribute_group *snr_iio_attr_update[] = { 4429 &snr_iio_mapping_group, 4430 NULL, 4431 }; 4432 4433 static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping) 4434 { 4435 u32 sad_cfg; 4436 int die, stack_id, ret = -EPERM; 4437 struct pci_dev *dev = NULL; 4438 4439 type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology), 4440 GFP_KERNEL); 4441 if (!type->topology) 4442 return -ENOMEM; 4443 4444 while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) { 4445 ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg); 4446 if (ret) { 4447 ret = pcibios_err_to_errno(ret); 4448 break; 4449 } 4450 4451 die = uncore_pcibus_to_dieid(dev->bus); 4452 stack_id = SAD_CONTROL_STACK_ID(sad_cfg); 4453 if (die < 0 || stack_id >= type->num_boxes) { 4454 ret = -EPERM; 4455 break; 4456 } 4457 4458 /* Convert stack id from SAD_CONTROL to PMON notation. */ 4459 stack_id = sad_pmon_mapping[stack_id]; 4460 4461 ((u8 *)&(type->topology[die].configuration))[stack_id] = dev->bus->number; 4462 type->topology[die].segment = pci_domain_nr(dev->bus); 4463 } 4464 4465 if (ret) { 4466 kfree(type->topology); 4467 type->topology = NULL; 4468 } 4469 4470 return ret; 4471 } 4472 4473 /* 4474 * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON 4475 */ 4476 enum { 4477 SNR_QAT_PMON_ID, 4478 SNR_CBDMA_DMI_PMON_ID, 4479 SNR_NIS_PMON_ID, 4480 SNR_DLB_PMON_ID, 4481 SNR_PCIE_GEN3_PMON_ID 4482 }; 4483 4484 static u8 snr_sad_pmon_mapping[] = { 4485 SNR_CBDMA_DMI_PMON_ID, 4486 SNR_PCIE_GEN3_PMON_ID, 4487 SNR_DLB_PMON_ID, 4488 SNR_NIS_PMON_ID, 4489 SNR_QAT_PMON_ID 4490 }; 4491 4492 static int snr_iio_get_topology(struct intel_uncore_type *type) 4493 { 4494 return sad_cfg_iio_topology(type, snr_sad_pmon_mapping); 4495 } 4496 4497 static int snr_iio_set_mapping(struct intel_uncore_type *type) 4498 { 4499 return pmu_iio_set_mapping(type, &snr_iio_mapping_group); 4500 } 4501 4502 static struct intel_uncore_type snr_uncore_iio = { 4503 .name = "iio", 4504 .num_counters = 4, 4505 .num_boxes = 5, 4506 .perf_ctr_bits = 48, 4507 .event_ctl = SNR_IIO_MSR_PMON_CTL0, 4508 .perf_ctr = SNR_IIO_MSR_PMON_CTR0, 4509 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4510 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT, 4511 .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL, 4512 .msr_offset = SNR_IIO_MSR_OFFSET, 4513 .ops = &ivbep_uncore_msr_ops, 4514 .format_group = &snr_uncore_iio_format_group, 4515 .attr_update = snr_iio_attr_update, 4516 .get_topology = snr_iio_get_topology, 4517 .set_mapping = snr_iio_set_mapping, 4518 .cleanup_mapping = skx_iio_cleanup_mapping, 4519 }; 4520 4521 static struct intel_uncore_type snr_uncore_irp = { 4522 .name = "irp", 4523 .num_counters = 2, 4524 .num_boxes = 5, 4525 .perf_ctr_bits = 48, 4526 .event_ctl = SNR_IRP0_MSR_PMON_CTL0, 4527 .perf_ctr = SNR_IRP0_MSR_PMON_CTR0, 4528 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4529 .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL, 4530 .msr_offset = SNR_IRP_MSR_OFFSET, 4531 .ops = &ivbep_uncore_msr_ops, 4532 .format_group = &ivbep_uncore_format_group, 4533 }; 4534 4535 static struct intel_uncore_type snr_uncore_m2pcie = { 4536 .name = "m2pcie", 4537 .num_counters = 4, 4538 .num_boxes = 5, 4539 .perf_ctr_bits = 48, 4540 .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0, 4541 .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0, 4542 .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL, 4543 .msr_offset = SNR_M2PCIE_MSR_OFFSET, 4544 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4545 .ops = &ivbep_uncore_msr_ops, 4546 .format_group = &ivbep_uncore_format_group, 4547 }; 4548 4549 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) 4550 { 4551 struct hw_perf_event *hwc = &event->hw; 4552 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 4553 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK; 4554 4555 if (ev_sel >= 0xb && ev_sel <= 0xe) { 4556 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER; 4557 reg1->idx = ev_sel - 0xb; 4558 reg1->config = event->attr.config1 & (0xff << reg1->idx); 4559 } 4560 return 0; 4561 } 4562 4563 static struct intel_uncore_ops snr_uncore_pcu_ops = { 4564 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), 4565 .hw_config = snr_pcu_hw_config, 4566 .get_constraint = snbep_pcu_get_constraint, 4567 .put_constraint = snbep_pcu_put_constraint, 4568 }; 4569 4570 static struct intel_uncore_type snr_uncore_pcu = { 4571 .name = "pcu", 4572 .num_counters = 4, 4573 .num_boxes = 1, 4574 .perf_ctr_bits = 48, 4575 .perf_ctr = SNR_PCU_MSR_PMON_CTR0, 4576 .event_ctl = SNR_PCU_MSR_PMON_CTL0, 4577 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4578 .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL, 4579 .num_shared_regs = 1, 4580 .ops = &snr_uncore_pcu_ops, 4581 .format_group = &skx_uncore_pcu_format_group, 4582 }; 4583 4584 enum perf_uncore_snr_iio_freerunning_type_id { 4585 SNR_IIO_MSR_IOCLK, 4586 SNR_IIO_MSR_BW_IN, 4587 4588 SNR_IIO_FREERUNNING_TYPE_MAX, 4589 }; 4590 4591 static struct freerunning_counters snr_iio_freerunning[] = { 4592 [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 }, 4593 [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 }, 4594 }; 4595 4596 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = { 4597 /* Free-Running IIO CLOCKS Counter */ 4598 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), 4599 /* Free-Running IIO BANDWIDTH IN Counters */ 4600 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), 4601 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), 4602 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), 4603 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), 4604 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), 4605 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), 4606 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), 4607 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), 4608 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), 4609 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), 4610 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), 4611 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), 4612 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"), 4613 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"), 4614 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"), 4615 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"), 4616 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"), 4617 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"), 4618 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"), 4619 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"), 4620 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"), 4621 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"), 4622 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"), 4623 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"), 4624 { /* end: all zeroes */ }, 4625 }; 4626 4627 static struct intel_uncore_type snr_uncore_iio_free_running = { 4628 .name = "iio_free_running", 4629 .num_counters = 9, 4630 .num_boxes = 5, 4631 .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX, 4632 .freerunning = snr_iio_freerunning, 4633 .ops = &skx_uncore_iio_freerunning_ops, 4634 .event_descs = snr_uncore_iio_freerunning_events, 4635 .format_group = &skx_uncore_iio_freerunning_format_group, 4636 }; 4637 4638 static struct intel_uncore_type *snr_msr_uncores[] = { 4639 &snr_uncore_ubox, 4640 &snr_uncore_chabox, 4641 &snr_uncore_iio, 4642 &snr_uncore_irp, 4643 &snr_uncore_m2pcie, 4644 &snr_uncore_pcu, 4645 &snr_uncore_iio_free_running, 4646 NULL, 4647 }; 4648 4649 void snr_uncore_cpu_init(void) 4650 { 4651 uncore_msr_uncores = snr_msr_uncores; 4652 } 4653 4654 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box) 4655 { 4656 struct pci_dev *pdev = box->pci_dev; 4657 int box_ctl = uncore_pci_box_ctl(box); 4658 4659 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); 4660 pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT); 4661 } 4662 4663 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = { 4664 .init_box = snr_m2m_uncore_pci_init_box, 4665 .disable_box = snbep_uncore_pci_disable_box, 4666 .enable_box = snbep_uncore_pci_enable_box, 4667 .disable_event = snbep_uncore_pci_disable_event, 4668 .enable_event = snbep_uncore_pci_enable_event, 4669 .read_counter = snbep_uncore_pci_read_counter, 4670 }; 4671 4672 static struct attribute *snr_m2m_uncore_formats_attr[] = { 4673 &format_attr_event.attr, 4674 &format_attr_umask_ext3.attr, 4675 &format_attr_edge.attr, 4676 &format_attr_inv.attr, 4677 &format_attr_thresh8.attr, 4678 NULL, 4679 }; 4680 4681 static const struct attribute_group snr_m2m_uncore_format_group = { 4682 .name = "format", 4683 .attrs = snr_m2m_uncore_formats_attr, 4684 }; 4685 4686 static struct intel_uncore_type snr_uncore_m2m = { 4687 .name = "m2m", 4688 .num_counters = 4, 4689 .num_boxes = 1, 4690 .perf_ctr_bits = 48, 4691 .perf_ctr = SNR_M2M_PCI_PMON_CTR0, 4692 .event_ctl = SNR_M2M_PCI_PMON_CTL0, 4693 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4694 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT, 4695 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL, 4696 .ops = &snr_m2m_uncore_pci_ops, 4697 .format_group = &snr_m2m_uncore_format_group, 4698 }; 4699 4700 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) 4701 { 4702 struct pci_dev *pdev = box->pci_dev; 4703 struct hw_perf_event *hwc = &event->hw; 4704 4705 pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN)); 4706 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32)); 4707 } 4708 4709 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = { 4710 .init_box = snr_m2m_uncore_pci_init_box, 4711 .disable_box = snbep_uncore_pci_disable_box, 4712 .enable_box = snbep_uncore_pci_enable_box, 4713 .disable_event = snbep_uncore_pci_disable_event, 4714 .enable_event = snr_uncore_pci_enable_event, 4715 .read_counter = snbep_uncore_pci_read_counter, 4716 }; 4717 4718 static struct intel_uncore_type snr_uncore_pcie3 = { 4719 .name = "pcie3", 4720 .num_counters = 4, 4721 .num_boxes = 1, 4722 .perf_ctr_bits = 48, 4723 .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0, 4724 .event_ctl = SNR_PCIE3_PCI_PMON_CTL0, 4725 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK, 4726 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT, 4727 .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL, 4728 .ops = &snr_pcie3_uncore_pci_ops, 4729 .format_group = &skx_uncore_iio_format_group, 4730 }; 4731 4732 enum { 4733 SNR_PCI_UNCORE_M2M, 4734 SNR_PCI_UNCORE_PCIE3, 4735 }; 4736 4737 static struct intel_uncore_type *snr_pci_uncores[] = { 4738 [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m, 4739 [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3, 4740 NULL, 4741 }; 4742 4743 static const struct pci_device_id snr_uncore_pci_ids[] = { 4744 { /* M2M */ 4745 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), 4746 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0), 4747 }, 4748 { /* end: all zeroes */ } 4749 }; 4750 4751 static struct pci_driver snr_uncore_pci_driver = { 4752 .name = "snr_uncore", 4753 .id_table = snr_uncore_pci_ids, 4754 }; 4755 4756 static const struct pci_device_id snr_uncore_pci_sub_ids[] = { 4757 { /* PCIe3 RP */ 4758 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a), 4759 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0), 4760 }, 4761 { /* end: all zeroes */ } 4762 }; 4763 4764 static struct pci_driver snr_uncore_pci_sub_driver = { 4765 .name = "snr_uncore_sub", 4766 .id_table = snr_uncore_pci_sub_ids, 4767 }; 4768 4769 int snr_uncore_pci_init(void) 4770 { 4771 /* SNR UBOX DID */ 4772 int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID, 4773 SKX_GIDNIDMAP, true); 4774 4775 if (ret) 4776 return ret; 4777 4778 uncore_pci_uncores = snr_pci_uncores; 4779 uncore_pci_driver = &snr_uncore_pci_driver; 4780 uncore_pci_sub_driver = &snr_uncore_pci_sub_driver; 4781 return 0; 4782 } 4783 4784 static struct pci_dev *snr_uncore_get_mc_dev(int id) 4785 { 4786 struct pci_dev *mc_dev = NULL; 4787 int pkg; 4788 4789 while (1) { 4790 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev); 4791 if (!mc_dev) 4792 break; 4793 pkg = uncore_pcibus_to_dieid(mc_dev->bus); 4794 if (pkg == id) 4795 break; 4796 } 4797 return mc_dev; 4798 } 4799 4800 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box, 4801 unsigned int box_ctl, int mem_offset) 4802 { 4803 struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid); 4804 struct intel_uncore_type *type = box->pmu->type; 4805 resource_size_t addr; 4806 u32 pci_dword; 4807 4808 if (!pdev) 4809 return; 4810 4811 pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword); 4812 addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23; 4813 4814 pci_read_config_dword(pdev, mem_offset, &pci_dword); 4815 addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12; 4816 4817 addr += box_ctl; 4818 4819 box->io_addr = ioremap(addr, type->mmio_map_size); 4820 if (!box->io_addr) { 4821 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); 4822 return; 4823 } 4824 4825 writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr); 4826 } 4827 4828 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box) 4829 { 4830 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), 4831 SNR_IMC_MMIO_MEM0_OFFSET); 4832 } 4833 4834 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box) 4835 { 4836 u32 config; 4837 4838 if (!box->io_addr) 4839 return; 4840 4841 config = readl(box->io_addr); 4842 config |= SNBEP_PMON_BOX_CTL_FRZ; 4843 writel(config, box->io_addr); 4844 } 4845 4846 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box) 4847 { 4848 u32 config; 4849 4850 if (!box->io_addr) 4851 return; 4852 4853 config = readl(box->io_addr); 4854 config &= ~SNBEP_PMON_BOX_CTL_FRZ; 4855 writel(config, box->io_addr); 4856 } 4857 4858 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box, 4859 struct perf_event *event) 4860 { 4861 struct hw_perf_event *hwc = &event->hw; 4862 4863 if (!box->io_addr) 4864 return; 4865 4866 if (!uncore_mmio_is_valid_offset(box, hwc->config_base)) 4867 return; 4868 4869 writel(hwc->config | SNBEP_PMON_CTL_EN, 4870 box->io_addr + hwc->config_base); 4871 } 4872 4873 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box, 4874 struct perf_event *event) 4875 { 4876 struct hw_perf_event *hwc = &event->hw; 4877 4878 if (!box->io_addr) 4879 return; 4880 4881 if (!uncore_mmio_is_valid_offset(box, hwc->config_base)) 4882 return; 4883 4884 writel(hwc->config, box->io_addr + hwc->config_base); 4885 } 4886 4887 static struct intel_uncore_ops snr_uncore_mmio_ops = { 4888 .init_box = snr_uncore_mmio_init_box, 4889 .exit_box = uncore_mmio_exit_box, 4890 .disable_box = snr_uncore_mmio_disable_box, 4891 .enable_box = snr_uncore_mmio_enable_box, 4892 .disable_event = snr_uncore_mmio_disable_event, 4893 .enable_event = snr_uncore_mmio_enable_event, 4894 .read_counter = uncore_mmio_read_counter, 4895 }; 4896 4897 static struct uncore_event_desc snr_uncore_imc_events[] = { 4898 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"), 4899 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"), 4900 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"), 4901 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"), 4902 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"), 4903 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"), 4904 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"), 4905 { /* end: all zeroes */ }, 4906 }; 4907 4908 static struct intel_uncore_type snr_uncore_imc = { 4909 .name = "imc", 4910 .num_counters = 4, 4911 .num_boxes = 2, 4912 .perf_ctr_bits = 48, 4913 .fixed_ctr_bits = 48, 4914 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, 4915 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, 4916 .event_descs = snr_uncore_imc_events, 4917 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0, 4918 .event_ctl = SNR_IMC_MMIO_PMON_CTL0, 4919 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 4920 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL, 4921 .mmio_offset = SNR_IMC_MMIO_OFFSET, 4922 .mmio_map_size = SNR_IMC_MMIO_SIZE, 4923 .ops = &snr_uncore_mmio_ops, 4924 .format_group = &skx_uncore_format_group, 4925 }; 4926 4927 enum perf_uncore_snr_imc_freerunning_type_id { 4928 SNR_IMC_DCLK, 4929 SNR_IMC_DDR, 4930 4931 SNR_IMC_FREERUNNING_TYPE_MAX, 4932 }; 4933 4934 static struct freerunning_counters snr_imc_freerunning[] = { 4935 [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 }, 4936 [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 }, 4937 }; 4938 4939 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = { 4940 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"), 4941 4942 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"), 4943 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"), 4944 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"), 4945 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"), 4946 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"), 4947 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"), 4948 { /* end: all zeroes */ }, 4949 }; 4950 4951 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = { 4952 .init_box = snr_uncore_mmio_init_box, 4953 .exit_box = uncore_mmio_exit_box, 4954 .read_counter = uncore_mmio_read_counter, 4955 .hw_config = uncore_freerunning_hw_config, 4956 }; 4957 4958 static struct intel_uncore_type snr_uncore_imc_free_running = { 4959 .name = "imc_free_running", 4960 .num_counters = 3, 4961 .num_boxes = 1, 4962 .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX, 4963 .mmio_map_size = SNR_IMC_MMIO_SIZE, 4964 .freerunning = snr_imc_freerunning, 4965 .ops = &snr_uncore_imc_freerunning_ops, 4966 .event_descs = snr_uncore_imc_freerunning_events, 4967 .format_group = &skx_uncore_iio_freerunning_format_group, 4968 }; 4969 4970 static struct intel_uncore_type *snr_mmio_uncores[] = { 4971 &snr_uncore_imc, 4972 &snr_uncore_imc_free_running, 4973 NULL, 4974 }; 4975 4976 void snr_uncore_mmio_init(void) 4977 { 4978 uncore_mmio_uncores = snr_mmio_uncores; 4979 } 4980 4981 /* end of SNR uncore support */ 4982 4983 /* ICX uncore support */ 4984 4985 static unsigned icx_cha_msr_offsets[] = { 4986 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310, 4987 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e, 4988 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a, 4989 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe, 4990 0x1c, 0x2a, 0x38, 0x46, 4991 }; 4992 4993 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) 4994 { 4995 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 4996 bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN); 4997 4998 if (tie_en) { 4999 reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 + 5000 icx_cha_msr_offsets[box->pmu->pmu_idx]; 5001 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID; 5002 reg1->idx = 0; 5003 } 5004 5005 return 0; 5006 } 5007 5008 static struct intel_uncore_ops icx_uncore_chabox_ops = { 5009 .init_box = ivbep_uncore_msr_init_box, 5010 .disable_box = snbep_uncore_msr_disable_box, 5011 .enable_box = snbep_uncore_msr_enable_box, 5012 .disable_event = snbep_uncore_msr_disable_event, 5013 .enable_event = snr_cha_enable_event, 5014 .read_counter = uncore_msr_read_counter, 5015 .hw_config = icx_cha_hw_config, 5016 }; 5017 5018 static struct intel_uncore_type icx_uncore_chabox = { 5019 .name = "cha", 5020 .num_counters = 4, 5021 .perf_ctr_bits = 48, 5022 .event_ctl = ICX_C34_MSR_PMON_CTL0, 5023 .perf_ctr = ICX_C34_MSR_PMON_CTR0, 5024 .box_ctl = ICX_C34_MSR_PMON_BOX_CTL, 5025 .msr_offsets = icx_cha_msr_offsets, 5026 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, 5027 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT, 5028 .constraints = skx_uncore_chabox_constraints, 5029 .ops = &icx_uncore_chabox_ops, 5030 .format_group = &snr_uncore_chabox_format_group, 5031 }; 5032 5033 static unsigned icx_msr_offsets[] = { 5034 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0, 5035 }; 5036 5037 static struct event_constraint icx_uncore_iio_constraints[] = { 5038 UNCORE_EVENT_CONSTRAINT(0x02, 0x3), 5039 UNCORE_EVENT_CONSTRAINT(0x03, 0x3), 5040 UNCORE_EVENT_CONSTRAINT(0x83, 0x3), 5041 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), 5042 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc), 5043 EVENT_CONSTRAINT_END 5044 }; 5045 5046 static umode_t 5047 icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die) 5048 { 5049 /* Root bus 0x00 is valid only for pmu_idx = 5. */ 5050 return pmu_iio_mapping_visible(kobj, attr, die, 5); 5051 } 5052 5053 static struct attribute_group icx_iio_mapping_group = { 5054 .is_visible = icx_iio_mapping_visible, 5055 }; 5056 5057 static const struct attribute_group *icx_iio_attr_update[] = { 5058 &icx_iio_mapping_group, 5059 NULL, 5060 }; 5061 5062 /* 5063 * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON 5064 */ 5065 enum { 5066 ICX_PCIE1_PMON_ID, 5067 ICX_PCIE2_PMON_ID, 5068 ICX_PCIE3_PMON_ID, 5069 ICX_PCIE4_PMON_ID, 5070 ICX_PCIE5_PMON_ID, 5071 ICX_CBDMA_DMI_PMON_ID 5072 }; 5073 5074 static u8 icx_sad_pmon_mapping[] = { 5075 ICX_CBDMA_DMI_PMON_ID, 5076 ICX_PCIE1_PMON_ID, 5077 ICX_PCIE2_PMON_ID, 5078 ICX_PCIE3_PMON_ID, 5079 ICX_PCIE4_PMON_ID, 5080 ICX_PCIE5_PMON_ID, 5081 }; 5082 5083 static int icx_iio_get_topology(struct intel_uncore_type *type) 5084 { 5085 return sad_cfg_iio_topology(type, icx_sad_pmon_mapping); 5086 } 5087 5088 static int icx_iio_set_mapping(struct intel_uncore_type *type) 5089 { 5090 return pmu_iio_set_mapping(type, &icx_iio_mapping_group); 5091 } 5092 5093 static struct intel_uncore_type icx_uncore_iio = { 5094 .name = "iio", 5095 .num_counters = 4, 5096 .num_boxes = 6, 5097 .perf_ctr_bits = 48, 5098 .event_ctl = ICX_IIO_MSR_PMON_CTL0, 5099 .perf_ctr = ICX_IIO_MSR_PMON_CTR0, 5100 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 5101 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT, 5102 .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL, 5103 .msr_offsets = icx_msr_offsets, 5104 .constraints = icx_uncore_iio_constraints, 5105 .ops = &skx_uncore_iio_ops, 5106 .format_group = &snr_uncore_iio_format_group, 5107 .attr_update = icx_iio_attr_update, 5108 .get_topology = icx_iio_get_topology, 5109 .set_mapping = icx_iio_set_mapping, 5110 .cleanup_mapping = skx_iio_cleanup_mapping, 5111 }; 5112 5113 static struct intel_uncore_type icx_uncore_irp = { 5114 .name = "irp", 5115 .num_counters = 2, 5116 .num_boxes = 6, 5117 .perf_ctr_bits = 48, 5118 .event_ctl = ICX_IRP0_MSR_PMON_CTL0, 5119 .perf_ctr = ICX_IRP0_MSR_PMON_CTR0, 5120 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 5121 .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL, 5122 .msr_offsets = icx_msr_offsets, 5123 .ops = &ivbep_uncore_msr_ops, 5124 .format_group = &ivbep_uncore_format_group, 5125 }; 5126 5127 static struct event_constraint icx_uncore_m2pcie_constraints[] = { 5128 UNCORE_EVENT_CONSTRAINT(0x14, 0x3), 5129 UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 5130 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3), 5131 EVENT_CONSTRAINT_END 5132 }; 5133 5134 static struct intel_uncore_type icx_uncore_m2pcie = { 5135 .name = "m2pcie", 5136 .num_counters = 4, 5137 .num_boxes = 6, 5138 .perf_ctr_bits = 48, 5139 .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0, 5140 .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0, 5141 .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL, 5142 .msr_offsets = icx_msr_offsets, 5143 .constraints = icx_uncore_m2pcie_constraints, 5144 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 5145 .ops = &ivbep_uncore_msr_ops, 5146 .format_group = &ivbep_uncore_format_group, 5147 }; 5148 5149 enum perf_uncore_icx_iio_freerunning_type_id { 5150 ICX_IIO_MSR_IOCLK, 5151 ICX_IIO_MSR_BW_IN, 5152 5153 ICX_IIO_FREERUNNING_TYPE_MAX, 5154 }; 5155 5156 static unsigned icx_iio_clk_freerunning_box_offsets[] = { 5157 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0, 5158 }; 5159 5160 static unsigned icx_iio_bw_freerunning_box_offsets[] = { 5161 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0, 5162 }; 5163 5164 static struct freerunning_counters icx_iio_freerunning[] = { 5165 [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets }, 5166 [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets }, 5167 }; 5168 5169 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = { 5170 /* Free-Running IIO CLOCKS Counter */ 5171 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), 5172 /* Free-Running IIO BANDWIDTH IN Counters */ 5173 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), 5174 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), 5175 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), 5176 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), 5177 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), 5178 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), 5179 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), 5180 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), 5181 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), 5182 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), 5183 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), 5184 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), 5185 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"), 5186 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"), 5187 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"), 5188 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"), 5189 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"), 5190 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"), 5191 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"), 5192 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"), 5193 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"), 5194 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"), 5195 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"), 5196 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"), 5197 { /* end: all zeroes */ }, 5198 }; 5199 5200 static struct intel_uncore_type icx_uncore_iio_free_running = { 5201 .name = "iio_free_running", 5202 .num_counters = 9, 5203 .num_boxes = 6, 5204 .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX, 5205 .freerunning = icx_iio_freerunning, 5206 .ops = &skx_uncore_iio_freerunning_ops, 5207 .event_descs = icx_uncore_iio_freerunning_events, 5208 .format_group = &skx_uncore_iio_freerunning_format_group, 5209 }; 5210 5211 static struct intel_uncore_type *icx_msr_uncores[] = { 5212 &skx_uncore_ubox, 5213 &icx_uncore_chabox, 5214 &icx_uncore_iio, 5215 &icx_uncore_irp, 5216 &icx_uncore_m2pcie, 5217 &skx_uncore_pcu, 5218 &icx_uncore_iio_free_running, 5219 NULL, 5220 }; 5221 5222 /* 5223 * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High) 5224 * registers which located at Device 30, Function 3 5225 */ 5226 #define ICX_CAPID6 0x9c 5227 #define ICX_CAPID7 0xa0 5228 5229 static u64 icx_count_chabox(void) 5230 { 5231 struct pci_dev *dev = NULL; 5232 u64 caps = 0; 5233 5234 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev); 5235 if (!dev) 5236 goto out; 5237 5238 pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps); 5239 pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1); 5240 out: 5241 pci_dev_put(dev); 5242 return hweight64(caps); 5243 } 5244 5245 void icx_uncore_cpu_init(void) 5246 { 5247 u64 num_boxes = icx_count_chabox(); 5248 5249 if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets))) 5250 return; 5251 icx_uncore_chabox.num_boxes = num_boxes; 5252 uncore_msr_uncores = icx_msr_uncores; 5253 } 5254 5255 static struct intel_uncore_type icx_uncore_m2m = { 5256 .name = "m2m", 5257 .num_counters = 4, 5258 .num_boxes = 4, 5259 .perf_ctr_bits = 48, 5260 .perf_ctr = SNR_M2M_PCI_PMON_CTR0, 5261 .event_ctl = SNR_M2M_PCI_PMON_CTL0, 5262 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 5263 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT, 5264 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL, 5265 .ops = &snr_m2m_uncore_pci_ops, 5266 .format_group = &snr_m2m_uncore_format_group, 5267 }; 5268 5269 static struct attribute *icx_upi_uncore_formats_attr[] = { 5270 &format_attr_event.attr, 5271 &format_attr_umask_ext4.attr, 5272 &format_attr_edge.attr, 5273 &format_attr_inv.attr, 5274 &format_attr_thresh8.attr, 5275 NULL, 5276 }; 5277 5278 static const struct attribute_group icx_upi_uncore_format_group = { 5279 .name = "format", 5280 .attrs = icx_upi_uncore_formats_attr, 5281 }; 5282 5283 static struct intel_uncore_type icx_uncore_upi = { 5284 .name = "upi", 5285 .num_counters = 4, 5286 .num_boxes = 3, 5287 .perf_ctr_bits = 48, 5288 .perf_ctr = ICX_UPI_PCI_PMON_CTR0, 5289 .event_ctl = ICX_UPI_PCI_PMON_CTL0, 5290 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 5291 .event_mask_ext = ICX_UPI_CTL_UMASK_EXT, 5292 .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL, 5293 .ops = &skx_upi_uncore_pci_ops, 5294 .format_group = &icx_upi_uncore_format_group, 5295 }; 5296 5297 static struct event_constraint icx_uncore_m3upi_constraints[] = { 5298 UNCORE_EVENT_CONSTRAINT(0x1c, 0x1), 5299 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1), 5300 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1), 5301 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1), 5302 UNCORE_EVENT_CONSTRAINT(0x40, 0x7), 5303 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7), 5304 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7), 5305 UNCORE_EVENT_CONSTRAINT(0x50, 0x7), 5306 EVENT_CONSTRAINT_END 5307 }; 5308 5309 static struct intel_uncore_type icx_uncore_m3upi = { 5310 .name = "m3upi", 5311 .num_counters = 4, 5312 .num_boxes = 3, 5313 .perf_ctr_bits = 48, 5314 .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0, 5315 .event_ctl = ICX_M3UPI_PCI_PMON_CTL0, 5316 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 5317 .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL, 5318 .constraints = icx_uncore_m3upi_constraints, 5319 .ops = &ivbep_uncore_pci_ops, 5320 .format_group = &skx_uncore_format_group, 5321 }; 5322 5323 enum { 5324 ICX_PCI_UNCORE_M2M, 5325 ICX_PCI_UNCORE_UPI, 5326 ICX_PCI_UNCORE_M3UPI, 5327 }; 5328 5329 static struct intel_uncore_type *icx_pci_uncores[] = { 5330 [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m, 5331 [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi, 5332 [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi, 5333 NULL, 5334 }; 5335 5336 static const struct pci_device_id icx_uncore_pci_ids[] = { 5337 { /* M2M 0 */ 5338 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), 5339 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0), 5340 }, 5341 { /* M2M 1 */ 5342 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), 5343 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1), 5344 }, 5345 { /* M2M 2 */ 5346 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), 5347 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2), 5348 }, 5349 { /* M2M 3 */ 5350 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a), 5351 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3), 5352 }, 5353 { /* UPI Link 0 */ 5354 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441), 5355 .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0), 5356 }, 5357 { /* UPI Link 1 */ 5358 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441), 5359 .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1), 5360 }, 5361 { /* UPI Link 2 */ 5362 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441), 5363 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2), 5364 }, 5365 { /* M3UPI Link 0 */ 5366 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446), 5367 .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0), 5368 }, 5369 { /* M3UPI Link 1 */ 5370 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446), 5371 .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1), 5372 }, 5373 { /* M3UPI Link 2 */ 5374 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446), 5375 .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2), 5376 }, 5377 { /* end: all zeroes */ } 5378 }; 5379 5380 static struct pci_driver icx_uncore_pci_driver = { 5381 .name = "icx_uncore", 5382 .id_table = icx_uncore_pci_ids, 5383 }; 5384 5385 int icx_uncore_pci_init(void) 5386 { 5387 /* ICX UBOX DID */ 5388 int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID, 5389 SKX_GIDNIDMAP, true); 5390 5391 if (ret) 5392 return ret; 5393 5394 uncore_pci_uncores = icx_pci_uncores; 5395 uncore_pci_driver = &icx_uncore_pci_driver; 5396 return 0; 5397 } 5398 5399 static void icx_uncore_imc_init_box(struct intel_uncore_box *box) 5400 { 5401 unsigned int box_ctl = box->pmu->type->box_ctl + 5402 box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN); 5403 int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE + 5404 SNR_IMC_MMIO_MEM0_OFFSET; 5405 5406 __snr_uncore_mmio_init_box(box, box_ctl, mem_offset); 5407 } 5408 5409 static struct intel_uncore_ops icx_uncore_mmio_ops = { 5410 .init_box = icx_uncore_imc_init_box, 5411 .exit_box = uncore_mmio_exit_box, 5412 .disable_box = snr_uncore_mmio_disable_box, 5413 .enable_box = snr_uncore_mmio_enable_box, 5414 .disable_event = snr_uncore_mmio_disable_event, 5415 .enable_event = snr_uncore_mmio_enable_event, 5416 .read_counter = uncore_mmio_read_counter, 5417 }; 5418 5419 static struct intel_uncore_type icx_uncore_imc = { 5420 .name = "imc", 5421 .num_counters = 4, 5422 .num_boxes = 8, 5423 .perf_ctr_bits = 48, 5424 .fixed_ctr_bits = 48, 5425 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, 5426 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, 5427 .event_descs = hswep_uncore_imc_events, 5428 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0, 5429 .event_ctl = SNR_IMC_MMIO_PMON_CTL0, 5430 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 5431 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL, 5432 .mmio_offset = SNR_IMC_MMIO_OFFSET, 5433 .mmio_map_size = SNR_IMC_MMIO_SIZE, 5434 .ops = &icx_uncore_mmio_ops, 5435 .format_group = &skx_uncore_format_group, 5436 }; 5437 5438 enum perf_uncore_icx_imc_freerunning_type_id { 5439 ICX_IMC_DCLK, 5440 ICX_IMC_DDR, 5441 ICX_IMC_DDRT, 5442 5443 ICX_IMC_FREERUNNING_TYPE_MAX, 5444 }; 5445 5446 static struct freerunning_counters icx_imc_freerunning[] = { 5447 [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 }, 5448 [ICX_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 }, 5449 [ICX_IMC_DDRT] = { 0x22a0, 0x8, 0, 2, 48 }, 5450 }; 5451 5452 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = { 5453 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"), 5454 5455 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"), 5456 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"), 5457 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"), 5458 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"), 5459 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"), 5460 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"), 5461 5462 INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"), 5463 INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"), 5464 INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"), 5465 INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"), 5466 INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"), 5467 INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"), 5468 { /* end: all zeroes */ }, 5469 }; 5470 5471 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) 5472 { 5473 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + 5474 SNR_IMC_MMIO_MEM0_OFFSET; 5475 5476 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset); 5477 } 5478 5479 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = { 5480 .init_box = icx_uncore_imc_freerunning_init_box, 5481 .exit_box = uncore_mmio_exit_box, 5482 .read_counter = uncore_mmio_read_counter, 5483 .hw_config = uncore_freerunning_hw_config, 5484 }; 5485 5486 static struct intel_uncore_type icx_uncore_imc_free_running = { 5487 .name = "imc_free_running", 5488 .num_counters = 5, 5489 .num_boxes = 4, 5490 .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX, 5491 .mmio_map_size = SNR_IMC_MMIO_SIZE, 5492 .freerunning = icx_imc_freerunning, 5493 .ops = &icx_uncore_imc_freerunning_ops, 5494 .event_descs = icx_uncore_imc_freerunning_events, 5495 .format_group = &skx_uncore_iio_freerunning_format_group, 5496 }; 5497 5498 static struct intel_uncore_type *icx_mmio_uncores[] = { 5499 &icx_uncore_imc, 5500 &icx_uncore_imc_free_running, 5501 NULL, 5502 }; 5503 5504 void icx_uncore_mmio_init(void) 5505 { 5506 uncore_mmio_uncores = icx_mmio_uncores; 5507 } 5508 5509 /* end of ICX uncore support */ 5510