1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __BLK_NULL_BLK_H
3 #define __BLK_NULL_BLK_H
4 
5 #undef pr_fmt
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/blk-mq.h>
11 #include <linux/hrtimer.h>
12 #include <linux/configfs.h>
13 #include <linux/badblocks.h>
14 #include <linux/fault-inject.h>
15 #include <linux/spinlock.h>
16 #include <linux/mutex.h>
17 
18 struct nullb_cmd {
19 	union {
20 		struct request *rq;
21 		struct bio *bio;
22 	};
23 	unsigned int tag;
24 	blk_status_t error;
25 	bool fake_timeout;
26 	struct nullb_queue *nq;
27 	struct hrtimer timer;
28 };
29 
30 struct nullb_queue {
31 	unsigned long *tag_map;
32 	wait_queue_head_t wait;
33 	unsigned int queue_depth;
34 	struct nullb_device *dev;
35 	unsigned int requeue_selection;
36 
37 	struct list_head poll_list;
38 	spinlock_t poll_lock;
39 
40 	struct nullb_cmd *cmds;
41 };
42 
43 struct nullb_zone {
44 	/*
45 	 * Zone lock to prevent concurrent modification of a zone write
46 	 * pointer position and condition: with memory backing, a write
47 	 * command execution may sleep on memory allocation. For this case,
48 	 * use mutex as the zone lock. Otherwise, use the spinlock for
49 	 * locking the zone.
50 	 */
51 	union {
52 		spinlock_t spinlock;
53 		struct mutex mutex;
54 	};
55 	enum blk_zone_type type;
56 	enum blk_zone_cond cond;
57 	sector_t start;
58 	sector_t wp;
59 	unsigned int len;
60 	unsigned int capacity;
61 };
62 
63 /* Queue modes */
64 enum {
65 	NULL_Q_BIO	= 0,
66 	NULL_Q_RQ	= 1,
67 	NULL_Q_MQ	= 2,
68 };
69 
70 struct nullb_device {
71 	struct nullb *nullb;
72 	struct config_item item;
73 	struct radix_tree_root data; /* data stored in the disk */
74 	struct radix_tree_root cache; /* disk cache data */
75 	unsigned long flags; /* device flags */
76 	unsigned int curr_cache;
77 	struct badblocks badblocks;
78 
79 	unsigned int nr_zones;
80 	unsigned int nr_zones_imp_open;
81 	unsigned int nr_zones_exp_open;
82 	unsigned int nr_zones_closed;
83 	unsigned int imp_close_zone_no;
84 	struct nullb_zone *zones;
85 	sector_t zone_size_sects;
86 	bool need_zone_res_mgmt;
87 	spinlock_t zone_res_lock;
88 
89 	unsigned long size; /* device size in MB */
90 	unsigned long completion_nsec; /* time in ns to complete a request */
91 	unsigned long cache_size; /* disk cache size in MB */
92 	unsigned long zone_size; /* zone size in MB if device is zoned */
93 	unsigned long zone_capacity; /* zone capacity in MB if device is zoned */
94 	unsigned int zone_nr_conv; /* number of conventional zones */
95 	unsigned int zone_max_open; /* max number of open zones */
96 	unsigned int zone_max_active; /* max number of active zones */
97 	unsigned int submit_queues; /* number of submission queues */
98 	unsigned int prev_submit_queues; /* number of submission queues before change */
99 	unsigned int poll_queues; /* number of IOPOLL submission queues */
100 	unsigned int prev_poll_queues; /* number of IOPOLL submission queues before change */
101 	unsigned int home_node; /* home node for the device */
102 	unsigned int queue_mode; /* block interface */
103 	unsigned int blocksize; /* block size */
104 	unsigned int max_sectors; /* Max sectors per command */
105 	unsigned int irqmode; /* IRQ completion handler */
106 	unsigned int hw_queue_depth; /* queue depth */
107 	unsigned int index; /* index of the disk, only valid with a disk */
108 	unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
109 	bool blocking; /* blocking blk-mq device */
110 	bool use_per_node_hctx; /* use per-node allocation for hardware context */
111 	bool power; /* power on/off the device */
112 	bool memory_backed; /* if data is stored in memory */
113 	bool discard; /* if support discard */
114 	bool zoned; /* if device is zoned */
115 	bool virt_boundary; /* virtual boundary on/off for the device */
116 };
117 
118 struct nullb {
119 	struct nullb_device *dev;
120 	struct list_head list;
121 	unsigned int index;
122 	struct request_queue *q;
123 	struct gendisk *disk;
124 	struct blk_mq_tag_set *tag_set;
125 	struct blk_mq_tag_set __tag_set;
126 	unsigned int queue_depth;
127 	atomic_long_t cur_bytes;
128 	struct hrtimer bw_timer;
129 	unsigned long cache_flush_pos;
130 	spinlock_t lock;
131 
132 	struct nullb_queue *queues;
133 	unsigned int nr_queues;
134 	char disk_name[DISK_NAME_LEN];
135 };
136 
137 blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
138 				 sector_t nr_sectors);
139 blk_status_t null_process_cmd(struct nullb_cmd *cmd,
140 			      enum req_opf op, sector_t sector,
141 			      unsigned int nr_sectors);
142 
143 #ifdef CONFIG_BLK_DEV_ZONED
144 int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
145 int null_register_zoned_dev(struct nullb *nullb);
146 void null_free_zoned_dev(struct nullb_device *dev);
147 int null_report_zones(struct gendisk *disk, sector_t sector,
148 		      unsigned int nr_zones, report_zones_cb cb, void *data);
149 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
150 				    enum req_opf op, sector_t sector,
151 				    sector_t nr_sectors);
152 size_t null_zone_valid_read_len(struct nullb *nullb,
153 				sector_t sector, unsigned int len);
154 #else
155 static inline int null_init_zoned_dev(struct nullb_device *dev,
156 				      struct request_queue *q)
157 {
158 	pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
159 	return -EINVAL;
160 }
161 static inline int null_register_zoned_dev(struct nullb *nullb)
162 {
163 	return -ENODEV;
164 }
165 static inline void null_free_zoned_dev(struct nullb_device *dev) {}
166 static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
167 			enum req_opf op, sector_t sector, sector_t nr_sectors)
168 {
169 	return BLK_STS_NOTSUPP;
170 }
171 static inline size_t null_zone_valid_read_len(struct nullb *nullb,
172 					      sector_t sector,
173 					      unsigned int len)
174 {
175 	return len;
176 }
177 #define null_report_zones	NULL
178 #endif /* CONFIG_BLK_DEV_ZONED */
179 #endif /* __NULL_BLK_H */
180