1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #ifndef _IDXD_H_ 4 #define _IDXD_H_ 5 6 #include <linux/sbitmap.h> 7 #include <linux/dmaengine.h> 8 #include <linux/percpu-rwsem.h> 9 #include <linux/wait.h> 10 #include <linux/cdev.h> 11 #include <linux/idr.h> 12 #include <linux/pci.h> 13 #include <linux/ioasid.h> 14 #include <linux/perf_event.h> 15 #include <uapi/linux/idxd.h> 16 #include "registers.h" 17 18 #define IDXD_DRIVER_VERSION "1.00" 19 20 extern struct kmem_cache *idxd_desc_pool; 21 extern bool tc_override; 22 23 struct idxd_wq; 24 struct idxd_dev; 25 26 enum idxd_dev_type { 27 IDXD_DEV_NONE = -1, 28 IDXD_DEV_DSA = 0, 29 IDXD_DEV_IAX, 30 IDXD_DEV_WQ, 31 IDXD_DEV_GROUP, 32 IDXD_DEV_ENGINE, 33 IDXD_DEV_CDEV, 34 IDXD_DEV_MAX_TYPE, 35 }; 36 37 struct idxd_dev { 38 struct device conf_dev; 39 enum idxd_dev_type type; 40 }; 41 42 #define IDXD_REG_TIMEOUT 50 43 #define IDXD_DRAIN_TIMEOUT 5000 44 45 enum idxd_type { 46 IDXD_TYPE_UNKNOWN = -1, 47 IDXD_TYPE_DSA = 0, 48 IDXD_TYPE_IAX, 49 IDXD_TYPE_MAX, 50 }; 51 52 #define IDXD_NAME_SIZE 128 53 #define IDXD_PMU_EVENT_MAX 64 54 55 struct idxd_device_driver { 56 const char *name; 57 enum idxd_dev_type *type; 58 int (*probe)(struct idxd_dev *idxd_dev); 59 void (*remove)(struct idxd_dev *idxd_dev); 60 struct device_driver drv; 61 }; 62 63 extern struct idxd_device_driver dsa_drv; 64 extern struct idxd_device_driver idxd_drv; 65 extern struct idxd_device_driver idxd_dmaengine_drv; 66 extern struct idxd_device_driver idxd_user_drv; 67 68 #define INVALID_INT_HANDLE -1 69 struct idxd_irq_entry { 70 struct idxd_device *idxd; 71 int id; 72 int vector; 73 struct llist_head pending_llist; 74 struct list_head work_list; 75 /* 76 * Lock to protect access between irq thread process descriptor 77 * and irq thread processing error descriptor. 78 */ 79 spinlock_t list_lock; 80 int int_handle; 81 struct idxd_wq *wq; 82 ioasid_t pasid; 83 }; 84 85 struct idxd_group { 86 struct idxd_dev idxd_dev; 87 struct idxd_device *idxd; 88 struct grpcfg grpcfg; 89 int id; 90 int num_engines; 91 int num_wqs; 92 bool use_token_limit; 93 u8 tokens_allowed; 94 u8 tokens_reserved; 95 int tc_a; 96 int tc_b; 97 }; 98 99 struct idxd_pmu { 100 struct idxd_device *idxd; 101 102 struct perf_event *event_list[IDXD_PMU_EVENT_MAX]; 103 int n_events; 104 105 DECLARE_BITMAP(used_mask, IDXD_PMU_EVENT_MAX); 106 107 struct pmu pmu; 108 char name[IDXD_NAME_SIZE]; 109 int cpu; 110 111 int n_counters; 112 int counter_width; 113 int n_event_categories; 114 115 bool per_counter_caps_supported; 116 unsigned long supported_event_categories; 117 118 unsigned long supported_filters; 119 int n_filters; 120 121 struct hlist_node cpuhp_node; 122 }; 123 124 #define IDXD_MAX_PRIORITY 0xf 125 126 enum idxd_wq_state { 127 IDXD_WQ_DISABLED = 0, 128 IDXD_WQ_ENABLED, 129 }; 130 131 enum idxd_wq_flag { 132 WQ_FLAG_DEDICATED = 0, 133 WQ_FLAG_BLOCK_ON_FAULT, 134 }; 135 136 enum idxd_wq_type { 137 IDXD_WQT_NONE = 0, 138 IDXD_WQT_KERNEL, 139 IDXD_WQT_USER, 140 }; 141 142 struct idxd_cdev { 143 struct idxd_wq *wq; 144 struct cdev cdev; 145 struct idxd_dev idxd_dev; 146 int minor; 147 }; 148 149 #define IDXD_ALLOCATED_BATCH_SIZE 128U 150 #define WQ_NAME_SIZE 1024 151 #define WQ_TYPE_SIZE 10 152 153 enum idxd_op_type { 154 IDXD_OP_BLOCK = 0, 155 IDXD_OP_NONBLOCK = 1, 156 }; 157 158 enum idxd_complete_type { 159 IDXD_COMPLETE_NORMAL = 0, 160 IDXD_COMPLETE_ABORT, 161 IDXD_COMPLETE_DEV_FAIL, 162 }; 163 164 struct idxd_dma_chan { 165 struct dma_chan chan; 166 struct idxd_wq *wq; 167 }; 168 169 struct idxd_wq { 170 void __iomem *portal; 171 u32 portal_offset; 172 struct percpu_ref wq_active; 173 struct completion wq_dead; 174 struct idxd_dev idxd_dev; 175 struct idxd_cdev *idxd_cdev; 176 struct wait_queue_head err_queue; 177 struct idxd_device *idxd; 178 int id; 179 struct idxd_irq_entry *ie; 180 enum idxd_wq_type type; 181 struct idxd_group *group; 182 int client_count; 183 struct mutex wq_lock; /* mutex for workqueue */ 184 u32 size; 185 u32 threshold; 186 u32 priority; 187 enum idxd_wq_state state; 188 unsigned long flags; 189 union wqcfg *wqcfg; 190 struct dsa_hw_desc **hw_descs; 191 int num_descs; 192 union { 193 struct dsa_completion_record *compls; 194 struct iax_completion_record *iax_compls; 195 }; 196 dma_addr_t compls_addr; 197 int compls_size; 198 struct idxd_desc **descs; 199 struct sbitmap_queue sbq; 200 struct idxd_dma_chan *idxd_chan; 201 char name[WQ_NAME_SIZE + 1]; 202 u64 max_xfer_bytes; 203 u32 max_batch_size; 204 bool ats_dis; 205 }; 206 207 struct idxd_engine { 208 struct idxd_dev idxd_dev; 209 int id; 210 struct idxd_group *group; 211 struct idxd_device *idxd; 212 }; 213 214 /* shadow registers */ 215 struct idxd_hw { 216 u32 version; 217 union gen_cap_reg gen_cap; 218 union wq_cap_reg wq_cap; 219 union group_cap_reg group_cap; 220 union engine_cap_reg engine_cap; 221 struct opcap opcap; 222 u32 cmd_cap; 223 }; 224 225 enum idxd_device_state { 226 IDXD_DEV_HALTED = -1, 227 IDXD_DEV_DISABLED = 0, 228 IDXD_DEV_ENABLED, 229 }; 230 231 enum idxd_device_flag { 232 IDXD_FLAG_CONFIGURABLE = 0, 233 IDXD_FLAG_CMD_RUNNING, 234 IDXD_FLAG_PASID_ENABLED, 235 }; 236 237 struct idxd_dma_dev { 238 struct idxd_device *idxd; 239 struct dma_device dma; 240 }; 241 242 struct idxd_driver_data { 243 const char *name_prefix; 244 enum idxd_type type; 245 struct device_type *dev_type; 246 int compl_size; 247 int align; 248 }; 249 250 struct idxd_device { 251 struct idxd_dev idxd_dev; 252 struct idxd_driver_data *data; 253 struct list_head list; 254 struct idxd_hw hw; 255 enum idxd_device_state state; 256 unsigned long flags; 257 int id; 258 int major; 259 u32 cmd_status; 260 261 struct pci_dev *pdev; 262 void __iomem *reg_base; 263 264 spinlock_t dev_lock; /* spinlock for device */ 265 spinlock_t cmd_lock; /* spinlock for device commands */ 266 struct completion *cmd_done; 267 struct idxd_group **groups; 268 struct idxd_wq **wqs; 269 struct idxd_engine **engines; 270 271 struct iommu_sva *sva; 272 unsigned int pasid; 273 274 int num_groups; 275 int irq_cnt; 276 bool request_int_handles; 277 278 u32 msix_perm_offset; 279 u32 wqcfg_offset; 280 u32 grpcfg_offset; 281 u32 perfmon_offset; 282 283 u64 max_xfer_bytes; 284 u32 max_batch_size; 285 int max_groups; 286 int max_engines; 287 int max_tokens; 288 int max_wqs; 289 int max_wq_size; 290 int token_limit; 291 int nr_tokens; /* non-reserved tokens */ 292 unsigned int wqcfg_size; 293 294 union sw_err_reg sw_err; 295 wait_queue_head_t cmd_waitq; 296 int num_wq_irqs; 297 struct idxd_irq_entry *irq_entries; 298 299 struct idxd_dma_dev *idxd_dma; 300 struct workqueue_struct *wq; 301 struct work_struct work; 302 303 struct idxd_pmu *idxd_pmu; 304 }; 305 306 /* IDXD software descriptor */ 307 struct idxd_desc { 308 union { 309 struct dsa_hw_desc *hw; 310 struct iax_hw_desc *iax_hw; 311 }; 312 dma_addr_t desc_dma; 313 union { 314 struct dsa_completion_record *completion; 315 struct iax_completion_record *iax_completion; 316 }; 317 dma_addr_t compl_dma; 318 struct dma_async_tx_descriptor txd; 319 struct llist_node llnode; 320 struct list_head list; 321 int id; 322 int cpu; 323 struct idxd_wq *wq; 324 }; 325 326 /* 327 * This is software defined error for the completion status. We overload the error code 328 * that will never appear in completion status and only SWERR register. 329 */ 330 enum idxd_completion_status { 331 IDXD_COMP_DESC_ABORT = 0xff, 332 }; 333 334 #define idxd_confdev(idxd) &idxd->idxd_dev.conf_dev 335 #define wq_confdev(wq) &wq->idxd_dev.conf_dev 336 #define engine_confdev(engine) &engine->idxd_dev.conf_dev 337 #define group_confdev(group) &group->idxd_dev.conf_dev 338 #define cdev_dev(cdev) &cdev->idxd_dev.conf_dev 339 340 #define confdev_to_idxd_dev(dev) container_of(dev, struct idxd_dev, conf_dev) 341 #define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev) 342 #define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev) 343 344 static inline struct idxd_device *confdev_to_idxd(struct device *dev) 345 { 346 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); 347 348 return idxd_dev_to_idxd(idxd_dev); 349 } 350 351 static inline struct idxd_wq *confdev_to_wq(struct device *dev) 352 { 353 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); 354 355 return idxd_dev_to_wq(idxd_dev); 356 } 357 358 static inline struct idxd_engine *confdev_to_engine(struct device *dev) 359 { 360 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); 361 362 return container_of(idxd_dev, struct idxd_engine, idxd_dev); 363 } 364 365 static inline struct idxd_group *confdev_to_group(struct device *dev) 366 { 367 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); 368 369 return container_of(idxd_dev, struct idxd_group, idxd_dev); 370 } 371 372 static inline struct idxd_cdev *dev_to_cdev(struct device *dev) 373 { 374 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); 375 376 return container_of(idxd_dev, struct idxd_cdev, idxd_dev); 377 } 378 379 static inline void idxd_dev_set_type(struct idxd_dev *idev, int type) 380 { 381 if (type >= IDXD_DEV_MAX_TYPE) { 382 idev->type = IDXD_DEV_NONE; 383 return; 384 } 385 386 idev->type = type; 387 } 388 389 extern struct bus_type dsa_bus_type; 390 391 extern bool support_enqcmd; 392 extern struct ida idxd_ida; 393 extern struct device_type dsa_device_type; 394 extern struct device_type iax_device_type; 395 extern struct device_type idxd_wq_device_type; 396 extern struct device_type idxd_engine_device_type; 397 extern struct device_type idxd_group_device_type; 398 399 static inline bool is_dsa_dev(struct idxd_dev *idxd_dev) 400 { 401 return idxd_dev->type == IDXD_DEV_DSA; 402 } 403 404 static inline bool is_iax_dev(struct idxd_dev *idxd_dev) 405 { 406 return idxd_dev->type == IDXD_DEV_IAX; 407 } 408 409 static inline bool is_idxd_dev(struct idxd_dev *idxd_dev) 410 { 411 return is_dsa_dev(idxd_dev) || is_iax_dev(idxd_dev); 412 } 413 414 static inline bool is_idxd_wq_dev(struct idxd_dev *idxd_dev) 415 { 416 return idxd_dev->type == IDXD_DEV_WQ; 417 } 418 419 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) 420 { 421 if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0) 422 return true; 423 return false; 424 } 425 426 static inline bool is_idxd_wq_user(struct idxd_wq *wq) 427 { 428 return wq->type == IDXD_WQT_USER; 429 } 430 431 static inline bool is_idxd_wq_kernel(struct idxd_wq *wq) 432 { 433 return wq->type == IDXD_WQT_KERNEL; 434 } 435 436 static inline bool wq_dedicated(struct idxd_wq *wq) 437 { 438 return test_bit(WQ_FLAG_DEDICATED, &wq->flags); 439 } 440 441 static inline bool wq_shared(struct idxd_wq *wq) 442 { 443 return !test_bit(WQ_FLAG_DEDICATED, &wq->flags); 444 } 445 446 static inline bool device_pasid_enabled(struct idxd_device *idxd) 447 { 448 return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 449 } 450 451 static inline bool device_swq_supported(struct idxd_device *idxd) 452 { 453 return (support_enqcmd && device_pasid_enabled(idxd)); 454 } 455 456 enum idxd_portal_prot { 457 IDXD_PORTAL_UNLIMITED = 0, 458 IDXD_PORTAL_LIMITED, 459 }; 460 461 enum idxd_interrupt_type { 462 IDXD_IRQ_MSIX = 0, 463 IDXD_IRQ_IMS, 464 }; 465 466 static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot) 467 { 468 return prot * 0x1000; 469 } 470 471 static inline int idxd_get_wq_portal_full_offset(int wq_id, 472 enum idxd_portal_prot prot) 473 { 474 return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot); 475 } 476 477 #define IDXD_PORTAL_MASK (PAGE_SIZE - 1) 478 479 /* 480 * Even though this function can be accessed by multiple threads, it is safe to use. 481 * At worst the address gets used more than once before it gets incremented. We don't 482 * hit a threshold until iops becomes many million times a second. So the occasional 483 * reuse of the same address is tolerable compare to using an atomic variable. This is 484 * safe on a system that has atomic load/store for 32bit integers. Given that this is an 485 * Intel iEP device, that should not be a problem. 486 */ 487 static inline void __iomem *idxd_wq_portal_addr(struct idxd_wq *wq) 488 { 489 int ofs = wq->portal_offset; 490 491 wq->portal_offset = (ofs + sizeof(struct dsa_raw_desc)) & IDXD_PORTAL_MASK; 492 return wq->portal + ofs; 493 } 494 495 static inline void idxd_wq_get(struct idxd_wq *wq) 496 { 497 wq->client_count++; 498 } 499 500 static inline void idxd_wq_put(struct idxd_wq *wq) 501 { 502 wq->client_count--; 503 } 504 505 static inline int idxd_wq_refcount(struct idxd_wq *wq) 506 { 507 return wq->client_count; 508 }; 509 510 int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv, 511 struct module *module, const char *mod_name); 512 #define idxd_driver_register(driver) \ 513 __idxd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) 514 515 void idxd_driver_unregister(struct idxd_device_driver *idxd_drv); 516 517 #define module_idxd_driver(__idxd_driver) \ 518 module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister) 519 520 int idxd_register_bus_type(void); 521 void idxd_unregister_bus_type(void); 522 int idxd_register_devices(struct idxd_device *idxd); 523 void idxd_unregister_devices(struct idxd_device *idxd); 524 int idxd_register_driver(void); 525 void idxd_unregister_driver(void); 526 void idxd_wqs_quiesce(struct idxd_device *idxd); 527 528 /* device interrupt control */ 529 void idxd_msix_perm_setup(struct idxd_device *idxd); 530 void idxd_msix_perm_clear(struct idxd_device *idxd); 531 irqreturn_t idxd_misc_thread(int vec, void *data); 532 irqreturn_t idxd_wq_thread(int irq, void *data); 533 void idxd_mask_error_interrupts(struct idxd_device *idxd); 534 void idxd_unmask_error_interrupts(struct idxd_device *idxd); 535 void idxd_mask_msix_vectors(struct idxd_device *idxd); 536 void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id); 537 void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id); 538 539 /* device control */ 540 int idxd_register_idxd_drv(void); 541 void idxd_unregister_idxd_drv(void); 542 int idxd_device_drv_probe(struct idxd_dev *idxd_dev); 543 void idxd_device_drv_remove(struct idxd_dev *idxd_dev); 544 int drv_enable_wq(struct idxd_wq *wq); 545 int __drv_enable_wq(struct idxd_wq *wq); 546 void drv_disable_wq(struct idxd_wq *wq); 547 void __drv_disable_wq(struct idxd_wq *wq); 548 int idxd_device_init_reset(struct idxd_device *idxd); 549 int idxd_device_enable(struct idxd_device *idxd); 550 int idxd_device_disable(struct idxd_device *idxd); 551 void idxd_device_reset(struct idxd_device *idxd); 552 void idxd_device_clear_state(struct idxd_device *idxd); 553 int idxd_device_config(struct idxd_device *idxd); 554 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid); 555 int idxd_device_load_config(struct idxd_device *idxd); 556 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle, 557 enum idxd_interrupt_type irq_type); 558 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle, 559 enum idxd_interrupt_type irq_type); 560 561 /* work queue control */ 562 void idxd_wqs_unmap_portal(struct idxd_device *idxd); 563 int idxd_wq_alloc_resources(struct idxd_wq *wq); 564 void idxd_wq_free_resources(struct idxd_wq *wq); 565 int idxd_wq_enable(struct idxd_wq *wq); 566 int idxd_wq_disable(struct idxd_wq *wq, bool reset_config); 567 void idxd_wq_drain(struct idxd_wq *wq); 568 void idxd_wq_reset(struct idxd_wq *wq); 569 int idxd_wq_map_portal(struct idxd_wq *wq); 570 void idxd_wq_unmap_portal(struct idxd_wq *wq); 571 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid); 572 int idxd_wq_disable_pasid(struct idxd_wq *wq); 573 void __idxd_wq_quiesce(struct idxd_wq *wq); 574 void idxd_wq_quiesce(struct idxd_wq *wq); 575 int idxd_wq_init_percpu_ref(struct idxd_wq *wq); 576 577 /* submission */ 578 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); 579 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype); 580 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); 581 582 /* dmaengine */ 583 int idxd_register_dma_device(struct idxd_device *idxd); 584 void idxd_unregister_dma_device(struct idxd_device *idxd); 585 int idxd_register_dma_channel(struct idxd_wq *wq); 586 void idxd_unregister_dma_channel(struct idxd_wq *wq); 587 void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res); 588 void idxd_dma_complete_txd(struct idxd_desc *desc, 589 enum idxd_complete_type comp_type, bool free_desc); 590 591 /* cdev */ 592 int idxd_cdev_register(void); 593 void idxd_cdev_remove(void); 594 int idxd_cdev_get_major(struct idxd_device *idxd); 595 int idxd_wq_add_cdev(struct idxd_wq *wq); 596 void idxd_wq_del_cdev(struct idxd_wq *wq); 597 598 /* perfmon */ 599 #if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON) 600 int perfmon_pmu_init(struct idxd_device *idxd); 601 void perfmon_pmu_remove(struct idxd_device *idxd); 602 void perfmon_counter_overflow(struct idxd_device *idxd); 603 void perfmon_init(void); 604 void perfmon_exit(void); 605 #else 606 static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; } 607 static inline void perfmon_pmu_remove(struct idxd_device *idxd) {} 608 static inline void perfmon_counter_overflow(struct idxd_device *idxd) {} 609 static inline void perfmon_init(void) {} 610 static inline void perfmon_exit(void) {} 611 #endif 612 613 #endif 614