1 // SPDX-License-Identifier: GPL-2.0-only 2 // Copyright (c) 2021 Intel Corporation 3 4 #include <linux/bug.h> 5 #include <linux/export.h> 6 #include <linux/pci.h> 7 #include <linux/peci.h> 8 #include <linux/slab.h> 9 #include <linux/types.h> 10 11 #include <asm/unaligned.h> 12 13 #include "internal.h" 14 15 #define PECI_GET_DIB_CMD 0xf7 16 #define PECI_GET_DIB_WR_LEN 1 17 #define PECI_GET_DIB_RD_LEN 8 18 19 #define PECI_GET_TEMP_CMD 0x01 20 #define PECI_GET_TEMP_WR_LEN 1 21 #define PECI_GET_TEMP_RD_LEN 2 22 23 #define PECI_RDPKGCFG_CMD 0xa1 24 #define PECI_RDPKGCFG_WR_LEN 5 25 #define PECI_RDPKGCFG_RD_LEN_BASE 1 26 #define PECI_WRPKGCFG_CMD 0xa5 27 #define PECI_WRPKGCFG_WR_LEN_BASE 6 28 #define PECI_WRPKGCFG_RD_LEN 1 29 30 #define PECI_RDIAMSR_CMD 0xb1 31 #define PECI_RDIAMSR_WR_LEN 5 32 #define PECI_RDIAMSR_RD_LEN 9 33 #define PECI_WRIAMSR_CMD 0xb5 34 #define PECI_RDIAMSREX_CMD 0xd1 35 #define PECI_RDIAMSREX_WR_LEN 6 36 #define PECI_RDIAMSREX_RD_LEN 9 37 38 #define PECI_RDPCICFG_CMD 0x61 39 #define PECI_RDPCICFG_WR_LEN 6 40 #define PECI_RDPCICFG_RD_LEN 5 41 #define PECI_RDPCICFG_RD_LEN_MAX 24 42 #define PECI_WRPCICFG_CMD 0x65 43 44 #define PECI_RDPCICFGLOCAL_CMD 0xe1 45 #define PECI_RDPCICFGLOCAL_WR_LEN 5 46 #define PECI_RDPCICFGLOCAL_RD_LEN_BASE 1 47 #define PECI_WRPCICFGLOCAL_CMD 0xe5 48 #define PECI_WRPCICFGLOCAL_WR_LEN_BASE 6 49 #define PECI_WRPCICFGLOCAL_RD_LEN 1 50 51 #define PECI_ENDPTCFG_TYPE_LOCAL_PCI 0x03 52 #define PECI_ENDPTCFG_TYPE_PCI 0x04 53 #define PECI_ENDPTCFG_TYPE_MMIO 0x05 54 #define PECI_ENDPTCFG_ADDR_TYPE_PCI 0x04 55 #define PECI_ENDPTCFG_ADDR_TYPE_MMIO_D 0x05 56 #define PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q 0x06 57 #define PECI_RDENDPTCFG_CMD 0xc1 58 #define PECI_RDENDPTCFG_PCI_WR_LEN 12 59 #define PECI_RDENDPTCFG_MMIO_WR_LEN_BASE 10 60 #define PECI_RDENDPTCFG_MMIO_D_WR_LEN 14 61 #define PECI_RDENDPTCFG_MMIO_Q_WR_LEN 18 62 #define PECI_RDENDPTCFG_RD_LEN_BASE 1 63 #define PECI_WRENDPTCFG_CMD 0xc5 64 #define PECI_WRENDPTCFG_PCI_WR_LEN_BASE 13 65 #define PECI_WRENDPTCFG_MMIO_D_WR_LEN_BASE 15 66 #define PECI_WRENDPTCFG_MMIO_Q_WR_LEN_BASE 19 67 #define PECI_WRENDPTCFG_RD_LEN 1 68 69 /* Device Specific Completion Code (CC) Definition */ 70 #define PECI_CC_SUCCESS 0x40 71 #define PECI_CC_NEED_RETRY 0x80 72 #define PECI_CC_OUT_OF_RESOURCE 0x81 73 #define PECI_CC_UNAVAIL_RESOURCE 0x82 74 #define PECI_CC_INVALID_REQ 0x90 75 #define PECI_CC_MCA_ERROR 0x91 76 #define PECI_CC_CATASTROPHIC_MCA_ERROR 0x93 77 #define PECI_CC_FATAL_MCA_ERROR 0x94 78 #define PECI_CC_PARITY_ERR_GPSB_OR_PMSB 0x98 79 #define PECI_CC_PARITY_ERR_GPSB_OR_PMSB_IERR 0x9B 80 #define PECI_CC_PARITY_ERR_GPSB_OR_PMSB_MCA 0x9C 81 82 #define PECI_RETRY_BIT BIT(0) 83 84 #define PECI_RETRY_TIMEOUT msecs_to_jiffies(700) 85 #define PECI_RETRY_INTERVAL_MIN msecs_to_jiffies(1) 86 #define PECI_RETRY_INTERVAL_MAX msecs_to_jiffies(128) 87 88 static u8 peci_request_data_cc(struct peci_request *req) 89 { 90 return req->rx.buf[0]; 91 } 92 93 /** 94 * peci_request_status() - return -errno based on PECI completion code 95 * @req: the PECI request that contains response data with completion code 96 * 97 * It can't be used for Ping(), GetDIB() and GetTemp() - for those commands we 98 * don't expect completion code in the response. 99 * 100 * Return: -errno 101 */ 102 int peci_request_status(struct peci_request *req) 103 { 104 u8 cc = peci_request_data_cc(req); 105 106 if (cc != PECI_CC_SUCCESS) 107 dev_dbg(&req->device->dev, "ret: %#02x\n", cc); 108 109 switch (cc) { 110 case PECI_CC_SUCCESS: 111 return 0; 112 case PECI_CC_NEED_RETRY: 113 case PECI_CC_OUT_OF_RESOURCE: 114 case PECI_CC_UNAVAIL_RESOURCE: 115 return -EAGAIN; 116 case PECI_CC_INVALID_REQ: 117 return -EINVAL; 118 case PECI_CC_MCA_ERROR: 119 case PECI_CC_CATASTROPHIC_MCA_ERROR: 120 case PECI_CC_FATAL_MCA_ERROR: 121 case PECI_CC_PARITY_ERR_GPSB_OR_PMSB: 122 case PECI_CC_PARITY_ERR_GPSB_OR_PMSB_IERR: 123 case PECI_CC_PARITY_ERR_GPSB_OR_PMSB_MCA: 124 return -EIO; 125 } 126 127 WARN_ONCE(1, "Unknown PECI completion code: %#02x\n", cc); 128 129 return -EIO; 130 } 131 EXPORT_SYMBOL_NS_GPL(peci_request_status, PECI); 132 133 static int peci_request_xfer(struct peci_request *req) 134 { 135 struct peci_device *device = req->device; 136 struct peci_controller *controller = to_peci_controller(device->dev.parent); 137 int ret; 138 139 mutex_lock(&controller->bus_lock); 140 ret = controller->ops->xfer(controller, device->addr, req); 141 mutex_unlock(&controller->bus_lock); 142 143 return ret; 144 } 145 146 static int peci_request_xfer_retry(struct peci_request *req) 147 { 148 long wait_interval = PECI_RETRY_INTERVAL_MIN; 149 struct peci_device *device = req->device; 150 struct peci_controller *controller = to_peci_controller(device->dev.parent); 151 unsigned long start = jiffies; 152 int ret; 153 154 /* Don't try to use it for ping */ 155 if (WARN_ON(req->tx.len == 0)) 156 return 0; 157 158 do { 159 ret = peci_request_xfer(req); 160 if (ret) { 161 dev_dbg(&controller->dev, "xfer error: %d\n", ret); 162 return ret; 163 } 164 165 if (peci_request_status(req) != -EAGAIN) 166 return 0; 167 168 /* Set the retry bit to indicate a retry attempt */ 169 req->tx.buf[1] |= PECI_RETRY_BIT; 170 171 if (schedule_timeout_interruptible(wait_interval)) 172 return -ERESTARTSYS; 173 174 wait_interval = min_t(long, wait_interval * 2, PECI_RETRY_INTERVAL_MAX); 175 } while (time_before(jiffies, start + PECI_RETRY_TIMEOUT)); 176 177 dev_dbg(&controller->dev, "request timed out\n"); 178 179 return -ETIMEDOUT; 180 } 181 182 /** 183 * peci_request_alloc() - allocate &struct peci_requests 184 * @device: PECI device to which request is going to be sent 185 * @tx_len: TX length 186 * @rx_len: RX length 187 * 188 * Return: A pointer to a newly allocated &struct peci_request on success or NULL otherwise. 189 */ 190 struct peci_request *peci_request_alloc(struct peci_device *device, u8 tx_len, u8 rx_len) 191 { 192 struct peci_request *req; 193 194 /* 195 * TX and RX buffers are fixed length members of peci_request, this is 196 * just a warn for developers to make sure to expand the buffers (or 197 * change the allocation method) if we go over the current limit. 198 */ 199 if (WARN_ON_ONCE(tx_len > PECI_REQUEST_MAX_BUF_SIZE || rx_len > PECI_REQUEST_MAX_BUF_SIZE)) 200 return NULL; 201 /* 202 * PECI controllers that we are using now don't support DMA, this 203 * should be converted to DMA API once support for controllers that do 204 * allow it is added to avoid an extra copy. 205 */ 206 req = kzalloc(sizeof(*req), GFP_KERNEL); 207 if (!req) 208 return NULL; 209 210 req->device = device; 211 req->tx.len = tx_len; 212 req->rx.len = rx_len; 213 214 return req; 215 } 216 EXPORT_SYMBOL_NS_GPL(peci_request_alloc, PECI); 217 218 /** 219 * peci_request_free() - free peci_request 220 * @req: the PECI request to be freed 221 */ 222 void peci_request_free(struct peci_request *req) 223 { 224 kfree(req); 225 } 226 EXPORT_SYMBOL_NS_GPL(peci_request_free, PECI); 227 228 struct peci_request *peci_xfer_get_dib(struct peci_device *device) 229 { 230 struct peci_request *req; 231 int ret; 232 233 req = peci_request_alloc(device, PECI_GET_DIB_WR_LEN, PECI_GET_DIB_RD_LEN); 234 if (!req) 235 return ERR_PTR(-ENOMEM); 236 237 req->tx.buf[0] = PECI_GET_DIB_CMD; 238 239 ret = peci_request_xfer(req); 240 if (ret) { 241 peci_request_free(req); 242 return ERR_PTR(ret); 243 } 244 245 return req; 246 } 247 EXPORT_SYMBOL_NS_GPL(peci_xfer_get_dib, PECI); 248 249 struct peci_request *peci_xfer_get_temp(struct peci_device *device) 250 { 251 struct peci_request *req; 252 int ret; 253 254 req = peci_request_alloc(device, PECI_GET_TEMP_WR_LEN, PECI_GET_TEMP_RD_LEN); 255 if (!req) 256 return ERR_PTR(-ENOMEM); 257 258 req->tx.buf[0] = PECI_GET_TEMP_CMD; 259 260 ret = peci_request_xfer(req); 261 if (ret) { 262 peci_request_free(req); 263 return ERR_PTR(ret); 264 } 265 266 return req; 267 } 268 EXPORT_SYMBOL_NS_GPL(peci_xfer_get_temp, PECI); 269 270 static struct peci_request * 271 __pkg_cfg_read(struct peci_device *device, u8 index, u16 param, u8 len) 272 { 273 struct peci_request *req; 274 int ret; 275 276 req = peci_request_alloc(device, PECI_RDPKGCFG_WR_LEN, PECI_RDPKGCFG_RD_LEN_BASE + len); 277 if (!req) 278 return ERR_PTR(-ENOMEM); 279 280 req->tx.buf[0] = PECI_RDPKGCFG_CMD; 281 req->tx.buf[1] = 0; 282 req->tx.buf[2] = index; 283 put_unaligned_le16(param, &req->tx.buf[3]); 284 285 ret = peci_request_xfer_retry(req); 286 if (ret) { 287 peci_request_free(req); 288 return ERR_PTR(ret); 289 } 290 291 return req; 292 } 293 294 static u32 __get_pci_addr(u8 bus, u8 dev, u8 func, u16 reg) 295 { 296 return reg | PCI_DEVID(bus, PCI_DEVFN(dev, func)) << 12; 297 } 298 299 static struct peci_request * 300 __pci_cfg_local_read(struct peci_device *device, u8 bus, u8 dev, u8 func, u16 reg, u8 len) 301 { 302 struct peci_request *req; 303 u32 pci_addr; 304 int ret; 305 306 req = peci_request_alloc(device, PECI_RDPCICFGLOCAL_WR_LEN, 307 PECI_RDPCICFGLOCAL_RD_LEN_BASE + len); 308 if (!req) 309 return ERR_PTR(-ENOMEM); 310 311 pci_addr = __get_pci_addr(bus, dev, func, reg); 312 313 req->tx.buf[0] = PECI_RDPCICFGLOCAL_CMD; 314 req->tx.buf[1] = 0; 315 put_unaligned_le24(pci_addr, &req->tx.buf[2]); 316 317 ret = peci_request_xfer_retry(req); 318 if (ret) { 319 peci_request_free(req); 320 return ERR_PTR(ret); 321 } 322 323 return req; 324 } 325 326 static struct peci_request * 327 __ep_pci_cfg_read(struct peci_device *device, u8 msg_type, u8 seg, 328 u8 bus, u8 dev, u8 func, u16 reg, u8 len) 329 { 330 struct peci_request *req; 331 u32 pci_addr; 332 int ret; 333 334 req = peci_request_alloc(device, PECI_RDENDPTCFG_PCI_WR_LEN, 335 PECI_RDENDPTCFG_RD_LEN_BASE + len); 336 if (!req) 337 return ERR_PTR(-ENOMEM); 338 339 pci_addr = __get_pci_addr(bus, dev, func, reg); 340 341 req->tx.buf[0] = PECI_RDENDPTCFG_CMD; 342 req->tx.buf[1] = 0; 343 req->tx.buf[2] = msg_type; 344 req->tx.buf[3] = 0; 345 req->tx.buf[4] = 0; 346 req->tx.buf[5] = 0; 347 req->tx.buf[6] = PECI_ENDPTCFG_ADDR_TYPE_PCI; 348 req->tx.buf[7] = seg; /* PCI Segment */ 349 put_unaligned_le32(pci_addr, &req->tx.buf[8]); 350 351 ret = peci_request_xfer_retry(req); 352 if (ret) { 353 peci_request_free(req); 354 return ERR_PTR(ret); 355 } 356 357 return req; 358 } 359 360 static struct peci_request * 361 __ep_mmio_read(struct peci_device *device, u8 bar, u8 addr_type, u8 seg, 362 u8 bus, u8 dev, u8 func, u64 offset, u8 tx_len, u8 len) 363 { 364 struct peci_request *req; 365 int ret; 366 367 req = peci_request_alloc(device, tx_len, PECI_RDENDPTCFG_RD_LEN_BASE + len); 368 if (!req) 369 return ERR_PTR(-ENOMEM); 370 371 req->tx.buf[0] = PECI_RDENDPTCFG_CMD; 372 req->tx.buf[1] = 0; 373 req->tx.buf[2] = PECI_ENDPTCFG_TYPE_MMIO; 374 req->tx.buf[3] = 0; /* Endpoint ID */ 375 req->tx.buf[4] = 0; /* Reserved */ 376 req->tx.buf[5] = bar; 377 req->tx.buf[6] = addr_type; 378 req->tx.buf[7] = seg; /* PCI Segment */ 379 req->tx.buf[8] = PCI_DEVFN(dev, func); 380 req->tx.buf[9] = bus; /* PCI Bus */ 381 382 if (addr_type == PECI_ENDPTCFG_ADDR_TYPE_MMIO_D) 383 put_unaligned_le32(offset, &req->tx.buf[10]); 384 else 385 put_unaligned_le64(offset, &req->tx.buf[10]); 386 387 ret = peci_request_xfer_retry(req); 388 if (ret) { 389 peci_request_free(req); 390 return ERR_PTR(ret); 391 } 392 393 return req; 394 } 395 396 u8 peci_request_data_readb(struct peci_request *req) 397 { 398 return req->rx.buf[1]; 399 } 400 EXPORT_SYMBOL_NS_GPL(peci_request_data_readb, PECI); 401 402 u16 peci_request_data_readw(struct peci_request *req) 403 { 404 return get_unaligned_le16(&req->rx.buf[1]); 405 } 406 EXPORT_SYMBOL_NS_GPL(peci_request_data_readw, PECI); 407 408 u32 peci_request_data_readl(struct peci_request *req) 409 { 410 return get_unaligned_le32(&req->rx.buf[1]); 411 } 412 EXPORT_SYMBOL_NS_GPL(peci_request_data_readl, PECI); 413 414 u64 peci_request_data_readq(struct peci_request *req) 415 { 416 return get_unaligned_le64(&req->rx.buf[1]); 417 } 418 EXPORT_SYMBOL_NS_GPL(peci_request_data_readq, PECI); 419 420 u64 peci_request_dib_read(struct peci_request *req) 421 { 422 return get_unaligned_le64(&req->rx.buf[0]); 423 } 424 EXPORT_SYMBOL_NS_GPL(peci_request_dib_read, PECI); 425 426 s16 peci_request_temp_read(struct peci_request *req) 427 { 428 return get_unaligned_le16(&req->rx.buf[0]); 429 } 430 EXPORT_SYMBOL_NS_GPL(peci_request_temp_read, PECI); 431 432 #define __read_pkg_config(x, type) \ 433 struct peci_request *peci_xfer_pkg_cfg_##x(struct peci_device *device, u8 index, u16 param) \ 434 { \ 435 return __pkg_cfg_read(device, index, param, sizeof(type)); \ 436 } \ 437 EXPORT_SYMBOL_NS_GPL(peci_xfer_pkg_cfg_##x, PECI) 438 439 __read_pkg_config(readb, u8); 440 __read_pkg_config(readw, u16); 441 __read_pkg_config(readl, u32); 442 __read_pkg_config(readq, u64); 443 444 #define __read_pci_config_local(x, type) \ 445 struct peci_request * \ 446 peci_xfer_pci_cfg_local_##x(struct peci_device *device, u8 bus, u8 dev, u8 func, u16 reg) \ 447 { \ 448 return __pci_cfg_local_read(device, bus, dev, func, reg, sizeof(type)); \ 449 } \ 450 EXPORT_SYMBOL_NS_GPL(peci_xfer_pci_cfg_local_##x, PECI) 451 452 __read_pci_config_local(readb, u8); 453 __read_pci_config_local(readw, u16); 454 __read_pci_config_local(readl, u32); 455 456 #define __read_ep_pci_config(x, msg_type, type) \ 457 struct peci_request * \ 458 peci_xfer_ep_pci_cfg_##x(struct peci_device *device, u8 seg, u8 bus, u8 dev, u8 func, u16 reg) \ 459 { \ 460 return __ep_pci_cfg_read(device, msg_type, seg, bus, dev, func, reg, sizeof(type)); \ 461 } \ 462 EXPORT_SYMBOL_NS_GPL(peci_xfer_ep_pci_cfg_##x, PECI) 463 464 __read_ep_pci_config(local_readb, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u8); 465 __read_ep_pci_config(local_readw, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u16); 466 __read_ep_pci_config(local_readl, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u32); 467 __read_ep_pci_config(readb, PECI_ENDPTCFG_TYPE_PCI, u8); 468 __read_ep_pci_config(readw, PECI_ENDPTCFG_TYPE_PCI, u16); 469 __read_ep_pci_config(readl, PECI_ENDPTCFG_TYPE_PCI, u32); 470 471 #define __read_ep_mmio(x, y, addr_type, type1, type2) \ 472 struct peci_request *peci_xfer_ep_mmio##y##_##x(struct peci_device *device, u8 bar, u8 seg, \ 473 u8 bus, u8 dev, u8 func, u64 offset) \ 474 { \ 475 return __ep_mmio_read(device, bar, addr_type, seg, bus, dev, func, \ 476 offset, PECI_RDENDPTCFG_MMIO_WR_LEN_BASE + sizeof(type1), \ 477 sizeof(type2)); \ 478 } \ 479 EXPORT_SYMBOL_NS_GPL(peci_xfer_ep_mmio##y##_##x, PECI) 480 481 __read_ep_mmio(readl, 32, PECI_ENDPTCFG_ADDR_TYPE_MMIO_D, u32, u32); 482 __read_ep_mmio(readl, 64, PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q, u64, u32); 483