xref: /openbmc/linux/drivers/md/dm-core.h (revision 72d711c8768805b5f8cf2d23c575dfd188993e12)
14cc96131SMike Snitzer /*
24cc96131SMike Snitzer  * Internal header file _only_ for device mapper core
34cc96131SMike Snitzer  *
44cc96131SMike Snitzer  * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
54cc96131SMike Snitzer  *
64cc96131SMike Snitzer  * This file is released under the LGPL.
74cc96131SMike Snitzer  */
84cc96131SMike Snitzer 
94cc96131SMike Snitzer #ifndef DM_CORE_INTERNAL_H
104cc96131SMike Snitzer #define DM_CORE_INTERNAL_H
114cc96131SMike Snitzer 
124cc96131SMike Snitzer #include <linux/kthread.h>
134cc96131SMike Snitzer #include <linux/ktime.h>
144cc96131SMike Snitzer #include <linux/blk-mq.h>
154cc96131SMike Snitzer 
164cc96131SMike Snitzer #include <trace/events/block.h>
174cc96131SMike Snitzer 
184cc96131SMike Snitzer #include "dm.h"
194cc96131SMike Snitzer 
204cc96131SMike Snitzer #define DM_RESERVED_MAX_IOS		1024
214cc96131SMike Snitzer 
224cc96131SMike Snitzer struct dm_kobject_holder {
234cc96131SMike Snitzer 	struct kobject kobj;
244cc96131SMike Snitzer 	struct completion completion;
254cc96131SMike Snitzer };
264cc96131SMike Snitzer 
274cc96131SMike Snitzer /*
284cc96131SMike Snitzer  * DM core internal structure that used directly by dm.c and dm-rq.c
294cc96131SMike Snitzer  * DM targets must _not_ deference a mapped_device to directly access its members!
304cc96131SMike Snitzer  */
314cc96131SMike Snitzer struct mapped_device {
324cc96131SMike Snitzer 	struct mutex suspend_lock;
334cc96131SMike Snitzer 
34*72d711c8SMike Snitzer 	struct mutex table_devices_lock;
35*72d711c8SMike Snitzer 	struct list_head table_devices;
36*72d711c8SMike Snitzer 
374cc96131SMike Snitzer 	/*
384cc96131SMike Snitzer 	 * The current mapping (struct dm_table *).
394cc96131SMike Snitzer 	 * Use dm_get_live_table{_fast} or take suspend_lock for
404cc96131SMike Snitzer 	 * dereference.
414cc96131SMike Snitzer 	 */
424cc96131SMike Snitzer 	void __rcu *map;
434cc96131SMike Snitzer 
444cc96131SMike Snitzer 	unsigned long flags;
454cc96131SMike Snitzer 
464cc96131SMike Snitzer 	/* Protect queue and type against concurrent access. */
474cc96131SMike Snitzer 	struct mutex type_lock;
48*72d711c8SMike Snitzer 	enum dm_queue_mode type;
49*72d711c8SMike Snitzer 
50*72d711c8SMike Snitzer 	int numa_node_id;
51*72d711c8SMike Snitzer 	struct request_queue *queue;
524cc96131SMike Snitzer 
534cc96131SMike Snitzer 	atomic_t holders;
544cc96131SMike Snitzer 	atomic_t open_count;
554cc96131SMike Snitzer 
564cc96131SMike Snitzer 	struct dm_target *immutable_target;
574cc96131SMike Snitzer 	struct target_type *immutable_target_type;
584cc96131SMike Snitzer 
59*72d711c8SMike Snitzer 	char name[16];
604cc96131SMike Snitzer 	struct gendisk *disk;
61f26c5719SDan Williams 	struct dax_device *dax_dev;
624cc96131SMike Snitzer 
634cc96131SMike Snitzer 	/*
644cc96131SMike Snitzer 	 * A list of ios that arrived while we were suspended.
654cc96131SMike Snitzer 	 */
664cc96131SMike Snitzer 	struct work_struct work;
67*72d711c8SMike Snitzer 	wait_queue_head_t wait;
68*72d711c8SMike Snitzer 	atomic_t pending[2];
694cc96131SMike Snitzer 	spinlock_t deferred_lock;
704cc96131SMike Snitzer 	struct bio_list deferred;
714cc96131SMike Snitzer 
72*72d711c8SMike Snitzer 	void *interface_ptr;
73*72d711c8SMike Snitzer 
744cc96131SMike Snitzer 	/*
754cc96131SMike Snitzer 	 * Event handling.
764cc96131SMike Snitzer 	 */
774cc96131SMike Snitzer 	wait_queue_head_t eventq;
784cc96131SMike Snitzer 	atomic_t event_nr;
794cc96131SMike Snitzer 	atomic_t uevent_seq;
804cc96131SMike Snitzer 	struct list_head uevent_list;
814cc96131SMike Snitzer 	spinlock_t uevent_lock; /* Protect access to uevent_list */
824cc96131SMike Snitzer 
834cc96131SMike Snitzer 	/* the number of internal suspends */
844cc96131SMike Snitzer 	unsigned internal_suspend_count;
854cc96131SMike Snitzer 
864cc96131SMike Snitzer 	/*
874cc96131SMike Snitzer 	 * io objects are allocated from here.
884cc96131SMike Snitzer 	 */
896f1c819cSKent Overstreet 	struct bio_set io_bs;
906f1c819cSKent Overstreet 	struct bio_set bs;
914cc96131SMike Snitzer 
924cc96131SMike Snitzer 	/*
93*72d711c8SMike Snitzer 	 * Processing queue (flush)
94*72d711c8SMike Snitzer 	 */
95*72d711c8SMike Snitzer 	struct workqueue_struct *wq;
96*72d711c8SMike Snitzer 
97*72d711c8SMike Snitzer 	/*
984cc96131SMike Snitzer 	 * freeze/thaw support require holding onto a super block
994cc96131SMike Snitzer 	 */
1004cc96131SMike Snitzer 	struct super_block *frozen_sb;
1014cc96131SMike Snitzer 
1024cc96131SMike Snitzer 	/* forced geometry settings */
1034cc96131SMike Snitzer 	struct hd_geometry geometry;
1044cc96131SMike Snitzer 
1054cc96131SMike Snitzer 	/* kobject and completion */
1064cc96131SMike Snitzer 	struct dm_kobject_holder kobj_holder;
1074cc96131SMike Snitzer 
108*72d711c8SMike Snitzer 	struct block_device *bdev;
109*72d711c8SMike Snitzer 
1104cc96131SMike Snitzer 	/* zero-length flush that will be cloned and submitted to targets */
1114cc96131SMike Snitzer 	struct bio flush_bio;
1124cc96131SMike Snitzer 
1134cc96131SMike Snitzer 	struct dm_stats stats;
1144cc96131SMike Snitzer 
1154cc96131SMike Snitzer 	struct kthread_worker kworker;
1164cc96131SMike Snitzer 	struct task_struct *kworker_task;
1174cc96131SMike Snitzer 
1184cc96131SMike Snitzer 	/* for request-based merge heuristic in dm_request_fn() */
1194cc96131SMike Snitzer 	unsigned seq_rq_merge_deadline_usecs;
1204cc96131SMike Snitzer 	int last_rq_rw;
1214cc96131SMike Snitzer 	sector_t last_rq_pos;
1224cc96131SMike Snitzer 	ktime_t last_rq_start_time;
1234cc96131SMike Snitzer 
1244cc96131SMike Snitzer 	/* for blk-mq request-based DM support */
1254cc96131SMike Snitzer 	struct blk_mq_tag_set *tag_set;
1264cc96131SMike Snitzer 	bool use_blk_mq:1;
1274cc96131SMike Snitzer 	bool init_tio_pdu:1;
128856eb091SMikulas Patocka 
129856eb091SMikulas Patocka 	struct srcu_struct io_barrier;
1304cc96131SMike Snitzer };
1314cc96131SMike Snitzer 
1324cc96131SMike Snitzer int md_in_flight(struct mapped_device *md);
1334cc96131SMike Snitzer void disable_write_same(struct mapped_device *md);
134ac62d620SChristoph Hellwig void disable_write_zeroes(struct mapped_device *md);
1354cc96131SMike Snitzer 
1364cc96131SMike Snitzer static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
1374cc96131SMike Snitzer {
1384cc96131SMike Snitzer 	return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
1394cc96131SMike Snitzer }
1404cc96131SMike Snitzer 
1414cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
1424cc96131SMike Snitzer 
1434cc96131SMike Snitzer static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
1444cc96131SMike Snitzer {
1454cc96131SMike Snitzer 	return !maxlen || strlen(result) + 1 >= maxlen;
1464cc96131SMike Snitzer }
1474cc96131SMike Snitzer 
14893e6442cSMikulas Patocka extern atomic_t dm_global_event_nr;
14993e6442cSMikulas Patocka extern wait_queue_head_t dm_global_eventq;
15062e08243SMikulas Patocka void dm_issue_global_event(void);
15193e6442cSMikulas Patocka 
1524cc96131SMike Snitzer #endif
153