xref: /openbmc/linux/drivers/s390/cio/qdio.h (revision 75f25bd3)
1 /*
2  * linux/drivers/s390/cio/qdio.h
3  *
4  * Copyright 2000,2009 IBM Corp.
5  * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
6  *	      Jan Glauber <jang@linux.vnet.ibm.com>
7  */
8 #ifndef _CIO_QDIO_H
9 #define _CIO_QDIO_H
10 
11 #include <asm/page.h>
12 #include <asm/schid.h>
13 #include <asm/debug.h>
14 #include "chsc.h"
15 
16 #define QDIO_BUSY_BIT_PATIENCE		(100 << 12)	/* 100 microseconds */
17 #define QDIO_BUSY_BIT_RETRY_DELAY	10		/* 10 milliseconds */
18 #define QDIO_BUSY_BIT_RETRIES		1000		/* = 10s retry time */
19 #define QDIO_INPUT_THRESHOLD		(500 << 12)	/* 500 microseconds */
20 
21 /*
22  * if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait
23  * till next initiative to give transmitted skbs back to the stack is too long.
24  * Therefore polling is started in case of multicast queue is filled more
25  * than 50 percent.
26  */
27 #define QDIO_IQDIO_POLL_LVL		65	/* HS multicast queue */
28 
29 enum qdio_irq_states {
30 	QDIO_IRQ_STATE_INACTIVE,
31 	QDIO_IRQ_STATE_ESTABLISHED,
32 	QDIO_IRQ_STATE_ACTIVE,
33 	QDIO_IRQ_STATE_STOPPED,
34 	QDIO_IRQ_STATE_CLEANUP,
35 	QDIO_IRQ_STATE_ERR,
36 	NR_QDIO_IRQ_STATES,
37 };
38 
39 /* used as intparm in do_IO */
40 #define QDIO_DOING_ESTABLISH	1
41 #define QDIO_DOING_ACTIVATE	2
42 #define QDIO_DOING_CLEANUP	3
43 
44 #define SLSB_STATE_NOT_INIT	0x0
45 #define SLSB_STATE_EMPTY	0x1
46 #define SLSB_STATE_PRIMED	0x2
47 #define SLSB_STATE_HALTED	0xe
48 #define SLSB_STATE_ERROR	0xf
49 #define SLSB_TYPE_INPUT		0x0
50 #define SLSB_TYPE_OUTPUT	0x20
51 #define SLSB_OWNER_PROG		0x80
52 #define SLSB_OWNER_CU		0x40
53 
54 #define SLSB_P_INPUT_NOT_INIT	\
55 	(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT)  /* 0x80 */
56 #define SLSB_P_INPUT_ACK	\
57 	(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY)	   /* 0x81 */
58 #define SLSB_CU_INPUT_EMPTY	\
59 	(SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY)	   /* 0x41 */
60 #define SLSB_P_INPUT_PRIMED	\
61 	(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED)	   /* 0x82 */
62 #define SLSB_P_INPUT_HALTED	\
63 	(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED)	   /* 0x8e */
64 #define SLSB_P_INPUT_ERROR	\
65 	(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR)	   /* 0x8f */
66 #define SLSB_P_OUTPUT_NOT_INIT	\
67 	(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
68 #define SLSB_P_OUTPUT_EMPTY	\
69 	(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY)	   /* 0xa1 */
70 #define SLSB_CU_OUTPUT_PRIMED	\
71 	(SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED)	   /* 0x62 */
72 #define SLSB_P_OUTPUT_HALTED	\
73 	(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED)   /* 0xae */
74 #define SLSB_P_OUTPUT_ERROR	\
75 	(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR)	   /* 0xaf */
76 
77 #define SLSB_ERROR_DURING_LOOKUP  0xff
78 
79 /* additional CIWs returned by extended Sense-ID */
80 #define CIW_TYPE_EQUEUE			0x3 /* establish QDIO queues */
81 #define CIW_TYPE_AQUEUE			0x4 /* activate QDIO queues */
82 
83 /* flags for st qdio sch data */
84 #define CHSC_FLAG_QDIO_CAPABILITY	0x80
85 #define CHSC_FLAG_VALIDITY		0x40
86 
87 /* qdio adapter-characteristics-1 flag */
88 #define AC1_SIGA_INPUT_NEEDED		0x40	/* process input queues */
89 #define AC1_SIGA_OUTPUT_NEEDED		0x20	/* process output queues */
90 #define AC1_SIGA_SYNC_NEEDED		0x10	/* ask hypervisor to sync */
91 #define AC1_AUTOMATIC_SYNC_ON_THININT	0x08	/* set by hypervisor */
92 #define AC1_AUTOMATIC_SYNC_ON_OUT_PCI	0x04	/* set by hypervisor */
93 #define AC1_SC_QEBSM_AVAILABLE		0x02	/* available for subchannel */
94 #define AC1_SC_QEBSM_ENABLED		0x01	/* enabled for subchannel */
95 
96 /* SIGA flags */
97 #define QDIO_SIGA_WRITE		0x00
98 #define QDIO_SIGA_READ		0x01
99 #define QDIO_SIGA_SYNC		0x02
100 #define QDIO_SIGA_QEBSM_FLAG	0x80
101 
102 #ifdef CONFIG_64BIT
103 static inline int do_sqbs(u64 token, unsigned char state, int queue,
104 			  int *start, int *count)
105 {
106 	register unsigned long _ccq asm ("0") = *count;
107 	register unsigned long _token asm ("1") = token;
108 	unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
109 
110 	asm volatile(
111 		"	.insn	rsy,0xeb000000008A,%1,0,0(%2)"
112 		: "+d" (_ccq), "+d" (_queuestart)
113 		: "d" ((unsigned long)state), "d" (_token)
114 		: "memory", "cc");
115 	*count = _ccq & 0xff;
116 	*start = _queuestart & 0xff;
117 
118 	return (_ccq >> 32) & 0xff;
119 }
120 
121 static inline int do_eqbs(u64 token, unsigned char *state, int queue,
122 			  int *start, int *count, int ack)
123 {
124 	register unsigned long _ccq asm ("0") = *count;
125 	register unsigned long _token asm ("1") = token;
126 	unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
127 	unsigned long _state = (unsigned long)ack << 63;
128 
129 	asm volatile(
130 		"	.insn	rrf,0xB99c0000,%1,%2,0,0"
131 		: "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
132 		: "d" (_token)
133 		: "memory", "cc");
134 	*count = _ccq & 0xff;
135 	*start = _queuestart & 0xff;
136 	*state = _state & 0xff;
137 
138 	return (_ccq >> 32) & 0xff;
139 }
140 #else
141 static inline int do_sqbs(u64 token, unsigned char state, int queue,
142 			  int *start, int *count) { return 0; }
143 static inline int do_eqbs(u64 token, unsigned char *state, int queue,
144 			  int *start, int *count, int ack) { return 0; }
145 #endif /* CONFIG_64BIT */
146 
147 struct qdio_irq;
148 
149 struct siga_flag {
150 	u8 input:1;
151 	u8 output:1;
152 	u8 sync:1;
153 	u8 sync_after_ai:1;
154 	u8 sync_out_after_pci:1;
155 	u8:3;
156 } __attribute__ ((packed));
157 
158 struct chsc_ssqd_area {
159 	struct chsc_header request;
160 	u16:10;
161 	u8 ssid:2;
162 	u8 fmt:4;
163 	u16 first_sch;
164 	u16:16;
165 	u16 last_sch;
166 	u32:32;
167 	struct chsc_header response;
168 	u32:32;
169 	struct qdio_ssqd_desc qdio_ssqd;
170 } __attribute__ ((packed));
171 
172 struct scssc_area {
173 	struct chsc_header request;
174 	u16 operation_code;
175 	u16:16;
176 	u32:32;
177 	u32:32;
178 	u64 summary_indicator_addr;
179 	u64 subchannel_indicator_addr;
180 	u32 ks:4;
181 	u32 kc:4;
182 	u32:21;
183 	u32 isc:3;
184 	u32 word_with_d_bit;
185 	u32:32;
186 	struct subchannel_id schid;
187 	u32 reserved[1004];
188 	struct chsc_header response;
189 	u32:32;
190 } __attribute__ ((packed));
191 
192 struct qdio_dev_perf_stat {
193 	unsigned int adapter_int;
194 	unsigned int qdio_int;
195 	unsigned int pci_request_int;
196 
197 	unsigned int tasklet_inbound;
198 	unsigned int tasklet_inbound_resched;
199 	unsigned int tasklet_inbound_resched2;
200 	unsigned int tasklet_outbound;
201 
202 	unsigned int siga_read;
203 	unsigned int siga_write;
204 	unsigned int siga_sync;
205 
206 	unsigned int inbound_call;
207 	unsigned int inbound_handler;
208 	unsigned int stop_polling;
209 	unsigned int inbound_queue_full;
210 	unsigned int outbound_call;
211 	unsigned int outbound_handler;
212 	unsigned int outbound_queue_full;
213 	unsigned int fast_requeue;
214 	unsigned int target_full;
215 	unsigned int eqbs;
216 	unsigned int eqbs_partial;
217 	unsigned int sqbs;
218 	unsigned int sqbs_partial;
219 	unsigned int int_discarded;
220 } ____cacheline_aligned;
221 
222 struct qdio_queue_perf_stat {
223 	/*
224 	 * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
225 	 * Since max. 127 SBALs are scanned reuse entry for 128 as queue full
226 	 * aka 127 SBALs found.
227 	 */
228 	unsigned int nr_sbals[8];
229 	unsigned int nr_sbal_error;
230 	unsigned int nr_sbal_nop;
231 	unsigned int nr_sbal_total;
232 };
233 
234 enum qdio_queue_irq_states {
235 	QDIO_QUEUE_IRQS_DISABLED,
236 };
237 
238 struct qdio_input_q {
239 	/* input buffer acknowledgement flag */
240 	int polling;
241 	/* first ACK'ed buffer */
242 	int ack_start;
243 	/* how much sbals are acknowledged with qebsm */
244 	int ack_count;
245 	/* last time of noticing incoming data */
246 	u64 timestamp;
247 	/* upper-layer polling flag */
248 	unsigned long queue_irq_state;
249 	/* callback to start upper-layer polling */
250 	void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
251 };
252 
253 struct qdio_output_q {
254 	/* PCIs are enabled for the queue */
255 	int pci_out_enabled;
256 	/* timer to check for more outbound work */
257 	struct timer_list timer;
258 	/* used SBALs before tasklet schedule */
259 	int scan_threshold;
260 };
261 
262 /*
263  * Note on cache alignment: grouped slsb and write mostly data at the beginning
264  * sbal[] is read-only and starts on a new cacheline followed by read mostly.
265  */
266 struct qdio_q {
267 	struct slsb slsb;
268 
269 	union {
270 		struct qdio_input_q in;
271 		struct qdio_output_q out;
272 	} u;
273 
274 	/*
275 	 * inbound: next buffer the program should check for
276 	 * outbound: next buffer to check if adapter processed it
277 	 */
278 	int first_to_check;
279 
280 	/* first_to_check of the last time */
281 	int last_move;
282 
283 	/* beginning position for calling the program */
284 	int first_to_kick;
285 
286 	/* number of buffers in use by the adapter */
287 	atomic_t nr_buf_used;
288 
289 	/* error condition during a data transfer */
290 	unsigned int qdio_error;
291 
292 	struct tasklet_struct tasklet;
293 	struct qdio_queue_perf_stat q_stats;
294 
295 	struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
296 
297 	/* queue number */
298 	int nr;
299 
300 	/* bitmask of queue number */
301 	int mask;
302 
303 	/* input or output queue */
304 	int is_input_q;
305 
306 	/* list of thinint input queues */
307 	struct list_head entry;
308 
309 	/* upper-layer program handler */
310 	qdio_handler_t (*handler);
311 
312 	struct dentry *debugfs_q;
313 	struct qdio_irq *irq_ptr;
314 	struct sl *sl;
315 	/*
316 	 * A page is allocated under this pointer and used for slib and sl.
317 	 * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
318 	 */
319 	struct slib *slib;
320 } __attribute__ ((aligned(256)));
321 
322 struct qdio_irq {
323 	struct qib qib;
324 	u32 *dsci;		/* address of device state change indicator */
325 	struct ccw_device *cdev;
326 	struct dentry *debugfs_dev;
327 	struct dentry *debugfs_perf;
328 
329 	unsigned long int_parm;
330 	struct subchannel_id schid;
331 	unsigned long sch_token;	/* QEBSM facility */
332 
333 	enum qdio_irq_states state;
334 
335 	struct siga_flag siga_flag;	/* siga sync information from qdioac */
336 
337 	int nr_input_qs;
338 	int nr_output_qs;
339 
340 	struct ccw1 ccw;
341 	struct ciw equeue;
342 	struct ciw aqueue;
343 
344 	struct qdio_ssqd_desc ssqd_desc;
345 	void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
346 
347 	int perf_stat_enabled;
348 
349 	struct qdr *qdr;
350 	unsigned long chsc_page;
351 
352 	struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
353 	struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
354 
355 	debug_info_t *debug_area;
356 	struct mutex setup_mutex;
357 	struct qdio_dev_perf_stat perf_stat;
358 };
359 
360 /* helper functions */
361 #define queue_type(q)	q->irq_ptr->qib.qfmt
362 #define SCH_NO(q)	(q->irq_ptr->schid.sch_no)
363 
364 #define is_thinint_irq(irq) \
365 	(irq->qib.qfmt == QDIO_IQDIO_QFMT || \
366 	 css_general_characteristics.aif_osa)
367 
368 #define qperf(__qdev, __attr)	((__qdev)->perf_stat.(__attr))
369 
370 #define qperf_inc(__q, __attr)						\
371 ({									\
372 	struct qdio_irq *qdev = (__q)->irq_ptr;				\
373 	if (qdev->perf_stat_enabled)					\
374 		(qdev->perf_stat.__attr)++;				\
375 })
376 
377 static inline void account_sbals_error(struct qdio_q *q, int count)
378 {
379 	q->q_stats.nr_sbal_error += count;
380 	q->q_stats.nr_sbal_total += count;
381 }
382 
383 /* the highest iqdio queue is used for multicast */
384 static inline int multicast_outbound(struct qdio_q *q)
385 {
386 	return (q->irq_ptr->nr_output_qs > 1) &&
387 	       (q->nr == q->irq_ptr->nr_output_qs - 1);
388 }
389 
390 #define pci_out_supported(q) \
391 	(q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
392 #define is_qebsm(q)			(q->irq_ptr->sch_token != 0)
393 
394 #define need_siga_in(q)			(q->irq_ptr->siga_flag.input)
395 #define need_siga_out(q)		(q->irq_ptr->siga_flag.output)
396 #define need_siga_sync(q)		(unlikely(q->irq_ptr->siga_flag.sync))
397 #define need_siga_sync_after_ai(q)	\
398 	(unlikely(q->irq_ptr->siga_flag.sync_after_ai))
399 #define need_siga_sync_out_after_pci(q)	\
400 	(unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
401 
402 #define for_each_input_queue(irq_ptr, q, i)	\
403 	for (i = 0, q = irq_ptr->input_qs[0];	\
404 		i < irq_ptr->nr_input_qs;	\
405 		q = irq_ptr->input_qs[++i])
406 #define for_each_output_queue(irq_ptr, q, i)	\
407 	for (i = 0, q = irq_ptr->output_qs[0];	\
408 		i < irq_ptr->nr_output_qs;	\
409 		q = irq_ptr->output_qs[++i])
410 
411 #define prev_buf(bufnr)	\
412 	((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
413 #define next_buf(bufnr)	\
414 	((bufnr + 1) & QDIO_MAX_BUFFERS_MASK)
415 #define add_buf(bufnr, inc) \
416 	((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
417 #define sub_buf(bufnr, dec) \
418 	((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
419 
420 #define queue_irqs_enabled(q)			\
421 	(test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
422 #define queue_irqs_disabled(q)			\
423 	(test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
424 
425 #define TIQDIO_SHARED_IND		63
426 
427 /* device state change indicators */
428 struct indicator_t {
429 	u32 ind;	/* u32 because of compare-and-swap performance */
430 	atomic_t count; /* use count, 0 or 1 for non-shared indicators */
431 };
432 
433 extern struct indicator_t *q_indicators;
434 
435 static inline int shared_ind(u32 *dsci)
436 {
437 	return dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
438 }
439 
440 /* prototypes for thin interrupt */
441 void qdio_setup_thinint(struct qdio_irq *irq_ptr);
442 int qdio_establish_thinint(struct qdio_irq *irq_ptr);
443 void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
444 void tiqdio_add_input_queues(struct qdio_irq *irq_ptr);
445 void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr);
446 void tiqdio_inbound_processing(unsigned long q);
447 int tiqdio_allocate_memory(void);
448 void tiqdio_free_memory(void);
449 int tiqdio_register_thinints(void);
450 void tiqdio_unregister_thinints(void);
451 
452 /* prototypes for setup */
453 void qdio_inbound_processing(unsigned long data);
454 void qdio_outbound_processing(unsigned long data);
455 void qdio_outbound_timer(unsigned long data);
456 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
457 		      struct irb *irb);
458 int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
459 		     int nr_output_qs);
460 void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
461 int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
462 			struct subchannel_id *schid,
463 			struct qdio_ssqd_desc *data);
464 int qdio_setup_irq(struct qdio_initialize *init_data);
465 void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
466 				struct ccw_device *cdev);
467 void qdio_release_memory(struct qdio_irq *irq_ptr);
468 int qdio_setup_create_sysfs(struct ccw_device *cdev);
469 void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
470 int qdio_setup_init(void);
471 void qdio_setup_exit(void);
472 
473 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
474 			unsigned char *state);
475 #endif /* _CIO_QDIO_H */
476