1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #ifndef _IDXD_H_ 4 #define _IDXD_H_ 5 6 #include <linux/sbitmap.h> 7 #include <linux/dmaengine.h> 8 #include <linux/percpu-rwsem.h> 9 #include <linux/wait.h> 10 #include <linux/cdev.h> 11 #include <linux/idr.h> 12 #include <linux/pci.h> 13 #include <linux/perf_event.h> 14 #include "registers.h" 15 16 #define IDXD_DRIVER_VERSION "1.00" 17 18 extern struct kmem_cache *idxd_desc_pool; 19 20 struct idxd_device; 21 struct idxd_wq; 22 23 #define IDXD_REG_TIMEOUT 50 24 #define IDXD_DRAIN_TIMEOUT 5000 25 26 enum idxd_type { 27 IDXD_TYPE_UNKNOWN = -1, 28 IDXD_TYPE_DSA = 0, 29 IDXD_TYPE_IAX, 30 IDXD_TYPE_MAX, 31 }; 32 33 #define IDXD_NAME_SIZE 128 34 #define IDXD_PMU_EVENT_MAX 64 35 36 struct idxd_device_driver { 37 struct device_driver drv; 38 }; 39 40 struct idxd_irq_entry { 41 struct idxd_device *idxd; 42 int id; 43 int vector; 44 struct llist_head pending_llist; 45 struct list_head work_list; 46 /* 47 * Lock to protect access between irq thread process descriptor 48 * and irq thread processing error descriptor. 49 */ 50 spinlock_t list_lock; 51 }; 52 53 struct idxd_group { 54 struct device conf_dev; 55 struct idxd_device *idxd; 56 struct grpcfg grpcfg; 57 int id; 58 int num_engines; 59 int num_wqs; 60 bool use_token_limit; 61 u8 tokens_allowed; 62 u8 tokens_reserved; 63 int tc_a; 64 int tc_b; 65 }; 66 67 struct idxd_pmu { 68 struct idxd_device *idxd; 69 70 struct perf_event *event_list[IDXD_PMU_EVENT_MAX]; 71 int n_events; 72 73 DECLARE_BITMAP(used_mask, IDXD_PMU_EVENT_MAX); 74 75 struct pmu pmu; 76 char name[IDXD_NAME_SIZE]; 77 int cpu; 78 79 int n_counters; 80 int counter_width; 81 int n_event_categories; 82 83 bool per_counter_caps_supported; 84 unsigned long supported_event_categories; 85 86 unsigned long supported_filters; 87 int n_filters; 88 89 struct hlist_node cpuhp_node; 90 }; 91 92 #define IDXD_MAX_PRIORITY 0xf 93 94 enum idxd_wq_state { 95 IDXD_WQ_DISABLED = 0, 96 IDXD_WQ_ENABLED, 97 }; 98 99 enum idxd_wq_flag { 100 WQ_FLAG_DEDICATED = 0, 101 WQ_FLAG_BLOCK_ON_FAULT, 102 }; 103 104 enum idxd_wq_type { 105 IDXD_WQT_NONE = 0, 106 IDXD_WQT_KERNEL, 107 IDXD_WQT_USER, 108 }; 109 110 struct idxd_cdev { 111 struct idxd_wq *wq; 112 struct cdev cdev; 113 struct device dev; 114 int minor; 115 }; 116 117 #define IDXD_ALLOCATED_BATCH_SIZE 128U 118 #define WQ_NAME_SIZE 1024 119 #define WQ_TYPE_SIZE 10 120 121 enum idxd_op_type { 122 IDXD_OP_BLOCK = 0, 123 IDXD_OP_NONBLOCK = 1, 124 }; 125 126 enum idxd_complete_type { 127 IDXD_COMPLETE_NORMAL = 0, 128 IDXD_COMPLETE_ABORT, 129 IDXD_COMPLETE_DEV_FAIL, 130 }; 131 132 struct idxd_dma_chan { 133 struct dma_chan chan; 134 struct idxd_wq *wq; 135 }; 136 137 struct idxd_wq { 138 void __iomem *portal; 139 struct percpu_ref wq_active; 140 struct completion wq_dead; 141 struct device conf_dev; 142 struct idxd_cdev *idxd_cdev; 143 struct wait_queue_head err_queue; 144 struct idxd_device *idxd; 145 int id; 146 enum idxd_wq_type type; 147 struct idxd_group *group; 148 int client_count; 149 struct mutex wq_lock; /* mutex for workqueue */ 150 u32 size; 151 u32 threshold; 152 u32 priority; 153 enum idxd_wq_state state; 154 unsigned long flags; 155 union wqcfg *wqcfg; 156 u32 vec_ptr; /* interrupt steering */ 157 struct dsa_hw_desc **hw_descs; 158 int num_descs; 159 union { 160 struct dsa_completion_record *compls; 161 struct iax_completion_record *iax_compls; 162 }; 163 void *compls_raw; 164 dma_addr_t compls_addr; 165 dma_addr_t compls_addr_raw; 166 int compls_size; 167 struct idxd_desc **descs; 168 struct sbitmap_queue sbq; 169 struct idxd_dma_chan *idxd_chan; 170 char name[WQ_NAME_SIZE + 1]; 171 u64 max_xfer_bytes; 172 u32 max_batch_size; 173 bool ats_dis; 174 }; 175 176 struct idxd_engine { 177 struct device conf_dev; 178 int id; 179 struct idxd_group *group; 180 struct idxd_device *idxd; 181 }; 182 183 /* shadow registers */ 184 struct idxd_hw { 185 u32 version; 186 union gen_cap_reg gen_cap; 187 union wq_cap_reg wq_cap; 188 union group_cap_reg group_cap; 189 union engine_cap_reg engine_cap; 190 struct opcap opcap; 191 u32 cmd_cap; 192 }; 193 194 enum idxd_device_state { 195 IDXD_DEV_HALTED = -1, 196 IDXD_DEV_DISABLED = 0, 197 IDXD_DEV_CONF_READY, 198 IDXD_DEV_ENABLED, 199 }; 200 201 enum idxd_device_flag { 202 IDXD_FLAG_CONFIGURABLE = 0, 203 IDXD_FLAG_CMD_RUNNING, 204 IDXD_FLAG_PASID_ENABLED, 205 }; 206 207 struct idxd_dma_dev { 208 struct idxd_device *idxd; 209 struct dma_device dma; 210 }; 211 212 struct idxd_driver_data { 213 const char *name_prefix; 214 enum idxd_type type; 215 struct device_type *dev_type; 216 int compl_size; 217 int align; 218 }; 219 220 struct idxd_device { 221 struct device conf_dev; 222 struct idxd_driver_data *data; 223 struct list_head list; 224 struct idxd_hw hw; 225 enum idxd_device_state state; 226 unsigned long flags; 227 int id; 228 int major; 229 u8 cmd_status; 230 231 struct pci_dev *pdev; 232 void __iomem *reg_base; 233 234 spinlock_t dev_lock; /* spinlock for device */ 235 spinlock_t cmd_lock; /* spinlock for device commands */ 236 struct completion *cmd_done; 237 struct idxd_group **groups; 238 struct idxd_wq **wqs; 239 struct idxd_engine **engines; 240 241 struct iommu_sva *sva; 242 unsigned int pasid; 243 244 int num_groups; 245 246 u32 msix_perm_offset; 247 u32 wqcfg_offset; 248 u32 grpcfg_offset; 249 u32 perfmon_offset; 250 251 u64 max_xfer_bytes; 252 u32 max_batch_size; 253 int max_groups; 254 int max_engines; 255 int max_tokens; 256 int max_wqs; 257 int max_wq_size; 258 int token_limit; 259 int nr_tokens; /* non-reserved tokens */ 260 unsigned int wqcfg_size; 261 262 union sw_err_reg sw_err; 263 wait_queue_head_t cmd_waitq; 264 int num_wq_irqs; 265 struct idxd_irq_entry *irq_entries; 266 267 struct idxd_dma_dev *idxd_dma; 268 struct workqueue_struct *wq; 269 struct work_struct work; 270 271 int *int_handles; 272 273 struct idxd_pmu *idxd_pmu; 274 }; 275 276 /* IDXD software descriptor */ 277 struct idxd_desc { 278 union { 279 struct dsa_hw_desc *hw; 280 struct iax_hw_desc *iax_hw; 281 }; 282 dma_addr_t desc_dma; 283 union { 284 struct dsa_completion_record *completion; 285 struct iax_completion_record *iax_completion; 286 }; 287 dma_addr_t compl_dma; 288 struct dma_async_tx_descriptor txd; 289 struct llist_node llnode; 290 struct list_head list; 291 int id; 292 int cpu; 293 unsigned int vector; 294 struct idxd_wq *wq; 295 }; 296 297 #define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev) 298 #define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev) 299 300 extern struct bus_type dsa_bus_type; 301 extern struct bus_type iax_bus_type; 302 303 extern bool support_enqcmd; 304 extern struct ida idxd_ida; 305 extern struct device_type dsa_device_type; 306 extern struct device_type iax_device_type; 307 extern struct device_type idxd_wq_device_type; 308 extern struct device_type idxd_engine_device_type; 309 extern struct device_type idxd_group_device_type; 310 311 static inline bool is_dsa_dev(struct device *dev) 312 { 313 return dev->type == &dsa_device_type; 314 } 315 316 static inline bool is_iax_dev(struct device *dev) 317 { 318 return dev->type == &iax_device_type; 319 } 320 321 static inline bool is_idxd_dev(struct device *dev) 322 { 323 return is_dsa_dev(dev) || is_iax_dev(dev); 324 } 325 326 static inline bool is_idxd_wq_dev(struct device *dev) 327 { 328 return dev->type == &idxd_wq_device_type; 329 } 330 331 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) 332 { 333 if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0) 334 return true; 335 return false; 336 } 337 338 static inline bool is_idxd_wq_cdev(struct idxd_wq *wq) 339 { 340 return wq->type == IDXD_WQT_USER; 341 } 342 343 static inline bool wq_dedicated(struct idxd_wq *wq) 344 { 345 return test_bit(WQ_FLAG_DEDICATED, &wq->flags); 346 } 347 348 static inline bool wq_shared(struct idxd_wq *wq) 349 { 350 return !test_bit(WQ_FLAG_DEDICATED, &wq->flags); 351 } 352 353 static inline bool device_pasid_enabled(struct idxd_device *idxd) 354 { 355 return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 356 } 357 358 static inline bool device_swq_supported(struct idxd_device *idxd) 359 { 360 return (support_enqcmd && device_pasid_enabled(idxd)); 361 } 362 363 enum idxd_portal_prot { 364 IDXD_PORTAL_UNLIMITED = 0, 365 IDXD_PORTAL_LIMITED, 366 }; 367 368 enum idxd_interrupt_type { 369 IDXD_IRQ_MSIX = 0, 370 IDXD_IRQ_IMS, 371 }; 372 373 static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot) 374 { 375 return prot * 0x1000; 376 } 377 378 static inline int idxd_get_wq_portal_full_offset(int wq_id, 379 enum idxd_portal_prot prot) 380 { 381 return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot); 382 } 383 384 static inline void idxd_wq_get(struct idxd_wq *wq) 385 { 386 wq->client_count++; 387 } 388 389 static inline void idxd_wq_put(struct idxd_wq *wq) 390 { 391 wq->client_count--; 392 } 393 394 static inline int idxd_wq_refcount(struct idxd_wq *wq) 395 { 396 return wq->client_count; 397 }; 398 399 int idxd_register_bus_type(void); 400 void idxd_unregister_bus_type(void); 401 int idxd_register_devices(struct idxd_device *idxd); 402 void idxd_unregister_devices(struct idxd_device *idxd); 403 int idxd_register_driver(void); 404 void idxd_unregister_driver(void); 405 void idxd_wqs_quiesce(struct idxd_device *idxd); 406 407 /* device interrupt control */ 408 void idxd_msix_perm_setup(struct idxd_device *idxd); 409 void idxd_msix_perm_clear(struct idxd_device *idxd); 410 irqreturn_t idxd_misc_thread(int vec, void *data); 411 irqreturn_t idxd_wq_thread(int irq, void *data); 412 void idxd_mask_error_interrupts(struct idxd_device *idxd); 413 void idxd_unmask_error_interrupts(struct idxd_device *idxd); 414 void idxd_mask_msix_vectors(struct idxd_device *idxd); 415 void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id); 416 void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id); 417 418 /* device control */ 419 int idxd_device_init_reset(struct idxd_device *idxd); 420 int idxd_device_enable(struct idxd_device *idxd); 421 int idxd_device_disable(struct idxd_device *idxd); 422 void idxd_device_reset(struct idxd_device *idxd); 423 void idxd_device_cleanup(struct idxd_device *idxd); 424 int idxd_device_config(struct idxd_device *idxd); 425 void idxd_device_wqs_clear_state(struct idxd_device *idxd); 426 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid); 427 int idxd_device_load_config(struct idxd_device *idxd); 428 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle, 429 enum idxd_interrupt_type irq_type); 430 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle, 431 enum idxd_interrupt_type irq_type); 432 433 /* work queue control */ 434 void idxd_wqs_unmap_portal(struct idxd_device *idxd); 435 int idxd_wq_alloc_resources(struct idxd_wq *wq); 436 void idxd_wq_free_resources(struct idxd_wq *wq); 437 int idxd_wq_enable(struct idxd_wq *wq); 438 int idxd_wq_disable(struct idxd_wq *wq); 439 void idxd_wq_drain(struct idxd_wq *wq); 440 void idxd_wq_reset(struct idxd_wq *wq); 441 int idxd_wq_map_portal(struct idxd_wq *wq); 442 void idxd_wq_unmap_portal(struct idxd_wq *wq); 443 void idxd_wq_disable_cleanup(struct idxd_wq *wq); 444 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid); 445 int idxd_wq_disable_pasid(struct idxd_wq *wq); 446 void idxd_wq_quiesce(struct idxd_wq *wq); 447 int idxd_wq_init_percpu_ref(struct idxd_wq *wq); 448 449 /* submission */ 450 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); 451 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype); 452 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); 453 454 /* dmaengine */ 455 int idxd_register_dma_device(struct idxd_device *idxd); 456 void idxd_unregister_dma_device(struct idxd_device *idxd); 457 int idxd_register_dma_channel(struct idxd_wq *wq); 458 void idxd_unregister_dma_channel(struct idxd_wq *wq); 459 void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res); 460 void idxd_dma_complete_txd(struct idxd_desc *desc, 461 enum idxd_complete_type comp_type); 462 463 /* cdev */ 464 int idxd_cdev_register(void); 465 void idxd_cdev_remove(void); 466 int idxd_cdev_get_major(struct idxd_device *idxd); 467 int idxd_wq_add_cdev(struct idxd_wq *wq); 468 void idxd_wq_del_cdev(struct idxd_wq *wq); 469 470 /* perfmon */ 471 #if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON) 472 int perfmon_pmu_init(struct idxd_device *idxd); 473 void perfmon_pmu_remove(struct idxd_device *idxd); 474 void perfmon_counter_overflow(struct idxd_device *idxd); 475 void perfmon_init(void); 476 void perfmon_exit(void); 477 #else 478 static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; } 479 static inline void perfmon_pmu_remove(struct idxd_device *idxd) {} 480 static inline void perfmon_counter_overflow(struct idxd_device *idxd) {} 481 static inline void perfmon_init(void) {} 482 static inline void perfmon_exit(void) {} 483 #endif 484 485 #endif 486