xref: /openbmc/linux/drivers/s390/block/scm_blk.h (revision 97da55fc)
1 #ifndef SCM_BLK_H
2 #define SCM_BLK_H
3 
4 #include <linux/interrupt.h>
5 #include <linux/spinlock.h>
6 #include <linux/blkdev.h>
7 #include <linux/genhd.h>
8 #include <linux/list.h>
9 
10 #include <asm/debug.h>
11 #include <asm/eadm.h>
12 
13 #define SCM_NR_PARTS 8
14 #define SCM_QUEUE_DELAY 5
15 
16 struct scm_blk_dev {
17 	struct tasklet_struct tasklet;
18 	struct request_queue *rq;
19 	struct gendisk *gendisk;
20 	struct scm_device *scmdev;
21 	spinlock_t rq_lock;	/* guard the request queue */
22 	spinlock_t lock;	/* guard the rest of the blockdev */
23 	atomic_t queued_reqs;
24 	struct list_head finished_requests;
25 #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
26 	struct list_head cluster_list;
27 #endif
28 };
29 
30 struct scm_request {
31 	struct scm_blk_dev *bdev;
32 	struct request *request;
33 	struct aidaw *aidaw;
34 	struct aob *aob;
35 	struct list_head list;
36 	u8 retries;
37 	int error;
38 #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
39 	struct {
40 		enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
41 		struct list_head list;
42 		void **buf;
43 	} cluster;
44 #endif
45 };
46 
47 #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
48 
49 int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
50 void scm_blk_dev_cleanup(struct scm_blk_dev *);
51 void scm_blk_irq(struct scm_device *, void *, int);
52 
53 void scm_request_finish(struct scm_request *);
54 void scm_request_requeue(struct scm_request *);
55 
56 int scm_drv_init(void);
57 void scm_drv_cleanup(void);
58 
59 #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
60 void __scm_free_rq_cluster(struct scm_request *);
61 int __scm_alloc_rq_cluster(struct scm_request *);
62 void scm_request_cluster_init(struct scm_request *);
63 bool scm_reserve_cluster(struct scm_request *);
64 void scm_release_cluster(struct scm_request *);
65 void scm_blk_dev_cluster_setup(struct scm_blk_dev *);
66 bool scm_need_cluster_request(struct scm_request *);
67 void scm_initiate_cluster_request(struct scm_request *);
68 void scm_cluster_request_irq(struct scm_request *);
69 bool scm_test_cluster_request(struct scm_request *);
70 bool scm_cluster_size_valid(void);
71 #else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
72 static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {}
73 static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq)
74 {
75 	return 0;
76 }
77 static inline void scm_request_cluster_init(struct scm_request *scmrq) {}
78 static inline bool scm_reserve_cluster(struct scm_request *scmrq)
79 {
80 	return true;
81 }
82 static inline void scm_release_cluster(struct scm_request *scmrq) {}
83 static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {}
84 static inline bool scm_need_cluster_request(struct scm_request *scmrq)
85 {
86 	return false;
87 }
88 static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {}
89 static inline void scm_cluster_request_irq(struct scm_request *scmrq) {}
90 static inline bool scm_test_cluster_request(struct scm_request *scmrq)
91 {
92 	return false;
93 }
94 static inline bool scm_cluster_size_valid(void)
95 {
96 	return true;
97 }
98 #endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
99 
100 extern debug_info_t *scm_debug;
101 
102 #define SCM_LOG(imp, txt) do {					\
103 		debug_text_event(scm_debug, imp, txt);		\
104 	} while (0)
105 
106 static inline void SCM_LOG_HEX(int level, void *data, int length)
107 {
108 	if (level > scm_debug->level)
109 		return;
110 	while (length > 0) {
111 		debug_event(scm_debug, level, data, length);
112 		length -= scm_debug->buf_size;
113 		data += scm_debug->buf_size;
114 	}
115 }
116 
117 static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
118 {
119 	struct {
120 		u64 address;
121 		u8 oper_state;
122 		u8 rank;
123 	} __packed data = {
124 		.address = scmdev->address,
125 		.oper_state = scmdev->attrs.oper_state,
126 		.rank = scmdev->attrs.rank,
127 	};
128 
129 	SCM_LOG_HEX(level, &data, sizeof(data));
130 }
131 
132 #endif /* SCM_BLK_H */
133