xref: /openbmc/linux/drivers/s390/block/scm_blk.h (revision d0b73b48)
1 #ifndef SCM_BLK_H
2 #define SCM_BLK_H
3 
4 #include <linux/interrupt.h>
5 #include <linux/spinlock.h>
6 #include <linux/blkdev.h>
7 #include <linux/genhd.h>
8 #include <linux/list.h>
9 
10 #include <asm/debug.h>
11 #include <asm/eadm.h>
12 
13 #define SCM_NR_PARTS 8
14 #define SCM_QUEUE_DELAY 5
15 
16 struct scm_blk_dev {
17 	struct tasklet_struct tasklet;
18 	struct request_queue *rq;
19 	struct gendisk *gendisk;
20 	struct scm_device *scmdev;
21 	spinlock_t rq_lock;	/* guard the request queue */
22 	spinlock_t lock;	/* guard the rest of the blockdev */
23 	atomic_t queued_reqs;
24 	struct list_head finished_requests;
25 #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
26 	struct list_head cluster_list;
27 #endif
28 };
29 
30 struct scm_request {
31 	struct scm_blk_dev *bdev;
32 	struct request *request;
33 	struct aidaw *aidaw;
34 	struct aob *aob;
35 	struct list_head list;
36 	u8 retries;
37 	int error;
38 #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
39 	struct {
40 		enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
41 		struct list_head list;
42 		void **buf;
43 	} cluster;
44 #endif
45 };
46 
47 #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
48 
49 int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
50 void scm_blk_dev_cleanup(struct scm_blk_dev *);
51 void scm_blk_irq(struct scm_device *, void *, int);
52 
53 void scm_request_finish(struct scm_request *);
54 void scm_request_requeue(struct scm_request *);
55 
56 int scm_drv_init(void);
57 void scm_drv_cleanup(void);
58 
59 #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
60 void __scm_free_rq_cluster(struct scm_request *);
61 int __scm_alloc_rq_cluster(struct scm_request *);
62 void scm_request_cluster_init(struct scm_request *);
63 bool scm_reserve_cluster(struct scm_request *);
64 void scm_release_cluster(struct scm_request *);
65 void scm_blk_dev_cluster_setup(struct scm_blk_dev *);
66 bool scm_need_cluster_request(struct scm_request *);
67 void scm_initiate_cluster_request(struct scm_request *);
68 void scm_cluster_request_irq(struct scm_request *);
69 bool scm_test_cluster_request(struct scm_request *);
70 bool scm_cluster_size_valid(void);
71 #else
72 #define __scm_free_rq_cluster(scmrq) {}
73 #define __scm_alloc_rq_cluster(scmrq) 0
74 #define scm_request_cluster_init(scmrq) {}
75 #define scm_reserve_cluster(scmrq) true
76 #define scm_release_cluster(scmrq) {}
77 #define scm_blk_dev_cluster_setup(bdev) {}
78 #define scm_need_cluster_request(scmrq) false
79 #define scm_initiate_cluster_request(scmrq) {}
80 #define scm_cluster_request_irq(scmrq) {}
81 #define scm_test_cluster_request(scmrq) false
82 #define scm_cluster_size_valid() true
83 #endif
84 
85 extern debug_info_t *scm_debug;
86 
87 #define SCM_LOG(imp, txt) do {					\
88 		debug_text_event(scm_debug, imp, txt);		\
89 	} while (0)
90 
91 static inline void SCM_LOG_HEX(int level, void *data, int length)
92 {
93 	if (level > scm_debug->level)
94 		return;
95 	while (length > 0) {
96 		debug_event(scm_debug, level, data, length);
97 		length -= scm_debug->buf_size;
98 		data += scm_debug->buf_size;
99 	}
100 }
101 
102 static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
103 {
104 	struct {
105 		u64 address;
106 		u8 oper_state;
107 		u8 rank;
108 	} __packed data = {
109 		.address = scmdev->address,
110 		.oper_state = scmdev->attrs.oper_state,
111 		.rank = scmdev->attrs.rank,
112 	};
113 
114 	SCM_LOG_HEX(level, &data, sizeof(data));
115 }
116 
117 #endif /* SCM_BLK_H */
118