idxd.h (d98793b5d4256faae76177178456214f55bc7083) | idxd.h (8e50d392652f20616a136165dff516b86baf5e49) |
---|---|
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3#ifndef _IDXD_H_ 4#define _IDXD_H_ 5 6#include <linux/sbitmap.h> 7#include <linux/dmaengine.h> 8#include <linux/percpu-rwsem.h> --- 45 unchanged lines hidden (view full) --- 54 55enum idxd_wq_state { 56 IDXD_WQ_DISABLED = 0, 57 IDXD_WQ_ENABLED, 58}; 59 60enum idxd_wq_flag { 61 WQ_FLAG_DEDICATED = 0, | 1/* SPDX-License-Identifier: GPL-2.0 */ 2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3#ifndef _IDXD_H_ 4#define _IDXD_H_ 5 6#include <linux/sbitmap.h> 7#include <linux/dmaengine.h> 8#include <linux/percpu-rwsem.h> --- 45 unchanged lines hidden (view full) --- 54 55enum idxd_wq_state { 56 IDXD_WQ_DISABLED = 0, 57 IDXD_WQ_ENABLED, 58}; 59 60enum idxd_wq_flag { 61 WQ_FLAG_DEDICATED = 0, |
62 WQ_FLAG_BLOCK_ON_FAULT, |
|
62}; 63 64enum idxd_wq_type { 65 IDXD_WQT_NONE = 0, 66 IDXD_WQT_KERNEL, 67 IDXD_WQT_USER, 68}; 69 --- 11 unchanged lines hidden (view full) --- 81enum idxd_op_type { 82 IDXD_OP_BLOCK = 0, 83 IDXD_OP_NONBLOCK = 1, 84}; 85 86enum idxd_complete_type { 87 IDXD_COMPLETE_NORMAL = 0, 88 IDXD_COMPLETE_ABORT, | 63}; 64 65enum idxd_wq_type { 66 IDXD_WQT_NONE = 0, 67 IDXD_WQT_KERNEL, 68 IDXD_WQT_USER, 69}; 70 --- 11 unchanged lines hidden (view full) --- 82enum idxd_op_type { 83 IDXD_OP_BLOCK = 0, 84 IDXD_OP_NONBLOCK = 1, 85}; 86 87enum idxd_complete_type { 88 IDXD_COMPLETE_NORMAL = 0, 89 IDXD_COMPLETE_ABORT, |
90 IDXD_COMPLETE_DEV_FAIL, |
|
89}; 90 91struct idxd_wq { | 91}; 92 93struct idxd_wq { |
92 void __iomem *dportal; | 94 void __iomem *portal; |
93 struct device conf_dev; 94 struct idxd_cdev idxd_cdev; 95 struct idxd_device *idxd; 96 int id; 97 enum idxd_wq_type type; 98 struct idxd_group *group; 99 int client_count; 100 struct mutex wq_lock; /* mutex for workqueue */ --- 39 unchanged lines hidden (view full) --- 140 IDXD_DEV_DISABLED = 0, 141 IDXD_DEV_CONF_READY, 142 IDXD_DEV_ENABLED, 143}; 144 145enum idxd_device_flag { 146 IDXD_FLAG_CONFIGURABLE = 0, 147 IDXD_FLAG_CMD_RUNNING, | 95 struct device conf_dev; 96 struct idxd_cdev idxd_cdev; 97 struct idxd_device *idxd; 98 int id; 99 enum idxd_wq_type type; 100 struct idxd_group *group; 101 int client_count; 102 struct mutex wq_lock; /* mutex for workqueue */ --- 39 unchanged lines hidden (view full) --- 142 IDXD_DEV_DISABLED = 0, 143 IDXD_DEV_CONF_READY, 144 IDXD_DEV_ENABLED, 145}; 146 147enum idxd_device_flag { 148 IDXD_FLAG_CONFIGURABLE = 0, 149 IDXD_FLAG_CMD_RUNNING, |
150 IDXD_FLAG_PASID_ENABLED, |
|
148}; 149 150struct idxd_device { 151 enum idxd_type type; 152 struct device conf_dev; 153 struct list_head list; 154 struct idxd_hw hw; 155 enum idxd_device_state state; --- 6 unchanged lines hidden (view full) --- 162 void __iomem *reg_base; 163 164 spinlock_t dev_lock; /* spinlock for device */ 165 struct completion *cmd_done; 166 struct idxd_group *groups; 167 struct idxd_wq *wqs; 168 struct idxd_engine *engines; 169 | 151}; 152 153struct idxd_device { 154 enum idxd_type type; 155 struct device conf_dev; 156 struct list_head list; 157 struct idxd_hw hw; 158 enum idxd_device_state state; --- 6 unchanged lines hidden (view full) --- 165 void __iomem *reg_base; 166 167 spinlock_t dev_lock; /* spinlock for device */ 168 struct completion *cmd_done; 169 struct idxd_group *groups; 170 struct idxd_wq *wqs; 171 struct idxd_engine *engines; 172 |
173 struct iommu_sva *sva; 174 unsigned int pasid; 175 |
|
170 int num_groups; 171 172 u32 msix_perm_offset; 173 u32 wqcfg_offset; 174 u32 grpcfg_offset; 175 u32 perfmon_offset; 176 177 u64 max_xfer_bytes; --- 32 unchanged lines hidden (view full) --- 210 struct idxd_wq *wq; 211}; 212 213#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev) 214#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev) 215 216extern struct bus_type dsa_bus_type; 217 | 176 int num_groups; 177 178 u32 msix_perm_offset; 179 u32 wqcfg_offset; 180 u32 grpcfg_offset; 181 u32 perfmon_offset; 182 183 u64 max_xfer_bytes; --- 32 unchanged lines hidden (view full) --- 216 struct idxd_wq *wq; 217}; 218 219#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev) 220#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev) 221 222extern struct bus_type dsa_bus_type; 223 |
224extern bool support_enqcmd; 225 |
|
218static inline bool wq_dedicated(struct idxd_wq *wq) 219{ 220 return test_bit(WQ_FLAG_DEDICATED, &wq->flags); 221} 222 | 226static inline bool wq_dedicated(struct idxd_wq *wq) 227{ 228 return test_bit(WQ_FLAG_DEDICATED, &wq->flags); 229} 230 |
231static inline bool wq_shared(struct idxd_wq *wq) 232{ 233 return !test_bit(WQ_FLAG_DEDICATED, &wq->flags); 234} 235 236static inline bool device_pasid_enabled(struct idxd_device *idxd) 237{ 238 return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 239} 240 241static inline bool device_swq_supported(struct idxd_device *idxd) 242{ 243 return (support_enqcmd && device_pasid_enabled(idxd)); 244} 245 |
|
223enum idxd_portal_prot { 224 IDXD_PORTAL_UNLIMITED = 0, 225 IDXD_PORTAL_LIMITED, 226}; 227 228static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot) 229{ 230 return prot * 0x1000; --- 52 unchanged lines hidden (view full) --- 283/* device control */ 284void idxd_device_init_reset(struct idxd_device *idxd); 285int idxd_device_enable(struct idxd_device *idxd); 286int idxd_device_disable(struct idxd_device *idxd); 287void idxd_device_reset(struct idxd_device *idxd); 288void idxd_device_cleanup(struct idxd_device *idxd); 289int idxd_device_config(struct idxd_device *idxd); 290void idxd_device_wqs_clear_state(struct idxd_device *idxd); | 246enum idxd_portal_prot { 247 IDXD_PORTAL_UNLIMITED = 0, 248 IDXD_PORTAL_LIMITED, 249}; 250 251static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot) 252{ 253 return prot * 0x1000; --- 52 unchanged lines hidden (view full) --- 306/* device control */ 307void idxd_device_init_reset(struct idxd_device *idxd); 308int idxd_device_enable(struct idxd_device *idxd); 309int idxd_device_disable(struct idxd_device *idxd); 310void idxd_device_reset(struct idxd_device *idxd); 311void idxd_device_cleanup(struct idxd_device *idxd); 312int idxd_device_config(struct idxd_device *idxd); 313void idxd_device_wqs_clear_state(struct idxd_device *idxd); |
314void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid); |
|
291 292/* work queue control */ 293int idxd_wq_alloc_resources(struct idxd_wq *wq); 294void idxd_wq_free_resources(struct idxd_wq *wq); 295int idxd_wq_enable(struct idxd_wq *wq); 296int idxd_wq_disable(struct idxd_wq *wq); 297void idxd_wq_drain(struct idxd_wq *wq); 298int idxd_wq_map_portal(struct idxd_wq *wq); 299void idxd_wq_unmap_portal(struct idxd_wq *wq); 300void idxd_wq_disable_cleanup(struct idxd_wq *wq); | 315 316/* work queue control */ 317int idxd_wq_alloc_resources(struct idxd_wq *wq); 318void idxd_wq_free_resources(struct idxd_wq *wq); 319int idxd_wq_enable(struct idxd_wq *wq); 320int idxd_wq_disable(struct idxd_wq *wq); 321void idxd_wq_drain(struct idxd_wq *wq); 322int idxd_wq_map_portal(struct idxd_wq *wq); 323void idxd_wq_unmap_portal(struct idxd_wq *wq); 324void idxd_wq_disable_cleanup(struct idxd_wq *wq); |
325int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid); 326int idxd_wq_disable_pasid(struct idxd_wq *wq); |
|
301 302/* submission */ 303int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); 304struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype); 305void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); 306 307/* dmaengine */ 308int idxd_register_dma_device(struct idxd_device *idxd); --- 16 unchanged lines hidden --- | 327 328/* submission */ 329int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); 330struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype); 331void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); 332 333/* dmaengine */ 334int idxd_register_dma_device(struct idxd_device *idxd); --- 16 unchanged lines hidden --- |