idxd.h (8f47d1a5e545f903cd049c42da31a3be36178447) idxd.h (42d279f9137ab7d5503836baec2739284b278d8f)
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#ifndef _IDXD_H_
4#define _IDXD_H_
5
6#include <linux/sbitmap.h>
7#include <linux/dmaengine.h>
8#include <linux/percpu-rwsem.h>
9#include <linux/wait.h>
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#ifndef _IDXD_H_
4#define _IDXD_H_
5
6#include <linux/sbitmap.h>
7#include <linux/dmaengine.h>
8#include <linux/percpu-rwsem.h>
9#include <linux/wait.h>
10#include <linux/cdev.h>
10#include "registers.h"
11
12#define IDXD_DRIVER_VERSION "1.00"
13
14extern struct kmem_cache *idxd_desc_pool;
15
16#define IDXD_REG_TIMEOUT 50
17#define IDXD_DRAIN_TIMEOUT 5000

--- 40 unchanged lines hidden (view full) ---

58
59enum idxd_wq_flag {
60 WQ_FLAG_DEDICATED = 0,
61};
62
63enum idxd_wq_type {
64 IDXD_WQT_NONE = 0,
65 IDXD_WQT_KERNEL,
11#include "registers.h"
12
13#define IDXD_DRIVER_VERSION "1.00"
14
15extern struct kmem_cache *idxd_desc_pool;
16
17#define IDXD_REG_TIMEOUT 50
18#define IDXD_DRAIN_TIMEOUT 5000

--- 40 unchanged lines hidden (view full) ---

59
60enum idxd_wq_flag {
61 WQ_FLAG_DEDICATED = 0,
62};
63
64enum idxd_wq_type {
65 IDXD_WQT_NONE = 0,
66 IDXD_WQT_KERNEL,
67 IDXD_WQT_USER,
66};
67
68};
69
70struct idxd_cdev {
71 struct cdev cdev;
72 struct device *dev;
73 int minor;
74 struct wait_queue_head err_queue;
75};
76
68#define IDXD_ALLOCATED_BATCH_SIZE 128U
69#define WQ_NAME_SIZE 1024
70#define WQ_TYPE_SIZE 10
71
72enum idxd_op_type {
73 IDXD_OP_BLOCK = 0,
74 IDXD_OP_NONBLOCK = 1,
75};
76
77enum idxd_complete_type {
78 IDXD_COMPLETE_NORMAL = 0,
79 IDXD_COMPLETE_ABORT,
80};
81
82struct idxd_wq {
83 void __iomem *dportal;
84 struct device conf_dev;
77#define IDXD_ALLOCATED_BATCH_SIZE 128U
78#define WQ_NAME_SIZE 1024
79#define WQ_TYPE_SIZE 10
80
81enum idxd_op_type {
82 IDXD_OP_BLOCK = 0,
83 IDXD_OP_NONBLOCK = 1,
84};
85
86enum idxd_complete_type {
87 IDXD_COMPLETE_NORMAL = 0,
88 IDXD_COMPLETE_ABORT,
89};
90
91struct idxd_wq {
92 void __iomem *dportal;
93 struct device conf_dev;
94 struct idxd_cdev idxd_cdev;
85 struct idxd_device *idxd;
86 int id;
87 enum idxd_wq_type type;
88 struct idxd_group *group;
89 int client_count;
90 struct mutex wq_lock; /* mutex for workqueue */
91 u32 size;
92 u32 threshold;

--- 47 unchanged lines hidden (view full) ---

140struct idxd_device {
141 enum idxd_type type;
142 struct device conf_dev;
143 struct list_head list;
144 struct idxd_hw hw;
145 enum idxd_device_state state;
146 unsigned long flags;
147 int id;
95 struct idxd_device *idxd;
96 int id;
97 enum idxd_wq_type type;
98 struct idxd_group *group;
99 int client_count;
100 struct mutex wq_lock; /* mutex for workqueue */
101 u32 size;
102 u32 threshold;

--- 47 unchanged lines hidden (view full) ---

150struct idxd_device {
151 enum idxd_type type;
152 struct device conf_dev;
153 struct list_head list;
154 struct idxd_hw hw;
155 enum idxd_device_state state;
156 unsigned long flags;
157 int id;
158 int major;
148
149 struct pci_dev *pdev;
150 void __iomem *reg_base;
151
152 spinlock_t dev_lock; /* spinlock for device */
153 struct idxd_group *groups;
154 struct idxd_wq *wqs;
155 struct idxd_engine *engines;

--- 35 unchanged lines hidden (view full) ---

191 struct list_head list;
192 int id;
193 struct idxd_wq *wq;
194};
195
196#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
197#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
198
159
160 struct pci_dev *pdev;
161 void __iomem *reg_base;
162
163 spinlock_t dev_lock; /* spinlock for device */
164 struct idxd_group *groups;
165 struct idxd_wq *wqs;
166 struct idxd_engine *engines;

--- 35 unchanged lines hidden (view full) ---

202 struct list_head list;
203 int id;
204 struct idxd_wq *wq;
205};
206
207#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
208#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
209
210extern struct bus_type dsa_bus_type;
211
199static inline bool wq_dedicated(struct idxd_wq *wq)
200{
201 return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
202}
203
212static inline bool wq_dedicated(struct idxd_wq *wq)
213{
214 return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
215}
216
217enum idxd_portal_prot {
218 IDXD_PORTAL_UNLIMITED = 0,
219 IDXD_PORTAL_LIMITED,
220};
221
222static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
223{
224 return prot * 0x1000;
225}
226
227static inline int idxd_get_wq_portal_full_offset(int wq_id,
228 enum idxd_portal_prot prot)
229{
230 return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
231}
232
204static inline void idxd_set_type(struct idxd_device *idxd)
205{
206 struct pci_dev *pdev = idxd->pdev;
207
208 if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
209 idxd->type = IDXD_TYPE_DSA;
210 else
211 idxd->type = IDXD_TYPE_UNKNOWN;

--- 16 unchanged lines hidden (view full) ---

228
229const char *idxd_get_dev_name(struct idxd_device *idxd);
230int idxd_register_bus_type(void);
231void idxd_unregister_bus_type(void);
232int idxd_setup_sysfs(struct idxd_device *idxd);
233void idxd_cleanup_sysfs(struct idxd_device *idxd);
234int idxd_register_driver(void);
235void idxd_unregister_driver(void);
233static inline void idxd_set_type(struct idxd_device *idxd)
234{
235 struct pci_dev *pdev = idxd->pdev;
236
237 if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
238 idxd->type = IDXD_TYPE_DSA;
239 else
240 idxd->type = IDXD_TYPE_UNKNOWN;

--- 16 unchanged lines hidden (view full) ---

257
258const char *idxd_get_dev_name(struct idxd_device *idxd);
259int idxd_register_bus_type(void);
260void idxd_unregister_bus_type(void);
261int idxd_setup_sysfs(struct idxd_device *idxd);
262void idxd_cleanup_sysfs(struct idxd_device *idxd);
263int idxd_register_driver(void);
264void idxd_unregister_driver(void);
265struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
236
237/* device interrupt control */
238irqreturn_t idxd_irq_handler(int vec, void *data);
239irqreturn_t idxd_misc_thread(int vec, void *data);
240irqreturn_t idxd_wq_thread(int irq, void *data);
241void idxd_mask_error_interrupts(struct idxd_device *idxd);
242void idxd_unmask_error_interrupts(struct idxd_device *idxd);
243void idxd_mask_msix_vectors(struct idxd_device *idxd);

--- 27 unchanged lines hidden (view full) ---

271void idxd_unregister_dma_device(struct idxd_device *idxd);
272int idxd_register_dma_channel(struct idxd_wq *wq);
273void idxd_unregister_dma_channel(struct idxd_wq *wq);
274void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
275void idxd_dma_complete_txd(struct idxd_desc *desc,
276 enum idxd_complete_type comp_type);
277dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
278
266
267/* device interrupt control */
268irqreturn_t idxd_irq_handler(int vec, void *data);
269irqreturn_t idxd_misc_thread(int vec, void *data);
270irqreturn_t idxd_wq_thread(int irq, void *data);
271void idxd_mask_error_interrupts(struct idxd_device *idxd);
272void idxd_unmask_error_interrupts(struct idxd_device *idxd);
273void idxd_mask_msix_vectors(struct idxd_device *idxd);

--- 27 unchanged lines hidden (view full) ---

301void idxd_unregister_dma_device(struct idxd_device *idxd);
302int idxd_register_dma_channel(struct idxd_wq *wq);
303void idxd_unregister_dma_channel(struct idxd_wq *wq);
304void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
305void idxd_dma_complete_txd(struct idxd_desc *desc,
306 enum idxd_complete_type comp_type);
307dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
308
309/* cdev */
310int idxd_cdev_register(void);
311void idxd_cdev_remove(void);
312int idxd_cdev_get_major(struct idxd_device *idxd);
313int idxd_wq_add_cdev(struct idxd_wq *wq);
314void idxd_wq_del_cdev(struct idxd_wq *wq);
315
279#endif
316#endif