xref: /openbmc/linux/drivers/md/dm-core.h (revision f26c5719b2d7b00de69eb83eb1c1c831759fdc9b)
14cc96131SMike Snitzer /*
24cc96131SMike Snitzer  * Internal header file _only_ for device mapper core
34cc96131SMike Snitzer  *
44cc96131SMike Snitzer  * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
54cc96131SMike Snitzer  *
64cc96131SMike Snitzer  * This file is released under the LGPL.
74cc96131SMike Snitzer  */
84cc96131SMike Snitzer 
94cc96131SMike Snitzer #ifndef DM_CORE_INTERNAL_H
104cc96131SMike Snitzer #define DM_CORE_INTERNAL_H
114cc96131SMike Snitzer 
124cc96131SMike Snitzer #include <linux/kthread.h>
134cc96131SMike Snitzer #include <linux/ktime.h>
144cc96131SMike Snitzer #include <linux/blk-mq.h>
154cc96131SMike Snitzer 
164cc96131SMike Snitzer #include <trace/events/block.h>
174cc96131SMike Snitzer 
184cc96131SMike Snitzer #include "dm.h"
194cc96131SMike Snitzer 
204cc96131SMike Snitzer #define DM_RESERVED_MAX_IOS		1024
214cc96131SMike Snitzer 
224cc96131SMike Snitzer struct dm_kobject_holder {
234cc96131SMike Snitzer 	struct kobject kobj;
244cc96131SMike Snitzer 	struct completion completion;
254cc96131SMike Snitzer };
264cc96131SMike Snitzer 
274cc96131SMike Snitzer /*
284cc96131SMike Snitzer  * DM core internal structure that used directly by dm.c and dm-rq.c
294cc96131SMike Snitzer  * DM targets must _not_ deference a mapped_device to directly access its members!
304cc96131SMike Snitzer  */
314cc96131SMike Snitzer struct mapped_device {
324cc96131SMike Snitzer 	struct srcu_struct io_barrier;
334cc96131SMike Snitzer 	struct mutex suspend_lock;
344cc96131SMike Snitzer 
354cc96131SMike Snitzer 	/*
364cc96131SMike Snitzer 	 * The current mapping (struct dm_table *).
374cc96131SMike Snitzer 	 * Use dm_get_live_table{_fast} or take suspend_lock for
384cc96131SMike Snitzer 	 * dereference.
394cc96131SMike Snitzer 	 */
404cc96131SMike Snitzer 	void __rcu *map;
414cc96131SMike Snitzer 
424cc96131SMike Snitzer 	struct list_head table_devices;
434cc96131SMike Snitzer 	struct mutex table_devices_lock;
444cc96131SMike Snitzer 
454cc96131SMike Snitzer 	unsigned long flags;
464cc96131SMike Snitzer 
474cc96131SMike Snitzer 	struct request_queue *queue;
484cc96131SMike Snitzer 	int numa_node_id;
494cc96131SMike Snitzer 
504cc96131SMike Snitzer 	unsigned type;
514cc96131SMike Snitzer 	/* Protect queue and type against concurrent access. */
524cc96131SMike Snitzer 	struct mutex type_lock;
534cc96131SMike Snitzer 
544cc96131SMike Snitzer 	atomic_t holders;
554cc96131SMike Snitzer 	atomic_t open_count;
564cc96131SMike Snitzer 
574cc96131SMike Snitzer 	struct dm_target *immutable_target;
584cc96131SMike Snitzer 	struct target_type *immutable_target_type;
594cc96131SMike Snitzer 
604cc96131SMike Snitzer 	struct gendisk *disk;
61*f26c5719SDan Williams 	struct dax_device *dax_dev;
624cc96131SMike Snitzer 	char name[16];
634cc96131SMike Snitzer 
644cc96131SMike Snitzer 	void *interface_ptr;
654cc96131SMike Snitzer 
664cc96131SMike Snitzer 	/*
674cc96131SMike Snitzer 	 * A list of ios that arrived while we were suspended.
684cc96131SMike Snitzer 	 */
694cc96131SMike Snitzer 	atomic_t pending[2];
704cc96131SMike Snitzer 	wait_queue_head_t wait;
714cc96131SMike Snitzer 	struct work_struct work;
724cc96131SMike Snitzer 	spinlock_t deferred_lock;
734cc96131SMike Snitzer 	struct bio_list deferred;
744cc96131SMike Snitzer 
754cc96131SMike Snitzer 	/*
764cc96131SMike Snitzer 	 * Event handling.
774cc96131SMike Snitzer 	 */
784cc96131SMike Snitzer 	wait_queue_head_t eventq;
794cc96131SMike Snitzer 	atomic_t event_nr;
804cc96131SMike Snitzer 	atomic_t uevent_seq;
814cc96131SMike Snitzer 	struct list_head uevent_list;
824cc96131SMike Snitzer 	spinlock_t uevent_lock; /* Protect access to uevent_list */
834cc96131SMike Snitzer 
844cc96131SMike Snitzer 	/* the number of internal suspends */
854cc96131SMike Snitzer 	unsigned internal_suspend_count;
864cc96131SMike Snitzer 
874cc96131SMike Snitzer 	/*
884cc96131SMike Snitzer 	 * Processing queue (flush)
894cc96131SMike Snitzer 	 */
904cc96131SMike Snitzer 	struct workqueue_struct *wq;
914cc96131SMike Snitzer 
924cc96131SMike Snitzer 	/*
934cc96131SMike Snitzer 	 * io objects are allocated from here.
944cc96131SMike Snitzer 	 */
954cc96131SMike Snitzer 	mempool_t *io_pool;
964cc96131SMike Snitzer 
974cc96131SMike Snitzer 	struct bio_set *bs;
984cc96131SMike Snitzer 
994cc96131SMike Snitzer 	/*
1004cc96131SMike Snitzer 	 * freeze/thaw support require holding onto a super block
1014cc96131SMike Snitzer 	 */
1024cc96131SMike Snitzer 	struct super_block *frozen_sb;
1034cc96131SMike Snitzer 
1044cc96131SMike Snitzer 	/* forced geometry settings */
1054cc96131SMike Snitzer 	struct hd_geometry geometry;
1064cc96131SMike Snitzer 
1074cc96131SMike Snitzer 	struct block_device *bdev;
1084cc96131SMike Snitzer 
1094cc96131SMike Snitzer 	/* kobject and completion */
1104cc96131SMike Snitzer 	struct dm_kobject_holder kobj_holder;
1114cc96131SMike Snitzer 
1124cc96131SMike Snitzer 	/* zero-length flush that will be cloned and submitted to targets */
1134cc96131SMike Snitzer 	struct bio flush_bio;
1144cc96131SMike Snitzer 
1154cc96131SMike Snitzer 	struct dm_stats stats;
1164cc96131SMike Snitzer 
1174cc96131SMike Snitzer 	struct kthread_worker kworker;
1184cc96131SMike Snitzer 	struct task_struct *kworker_task;
1194cc96131SMike Snitzer 
1204cc96131SMike Snitzer 	/* for request-based merge heuristic in dm_request_fn() */
1214cc96131SMike Snitzer 	unsigned seq_rq_merge_deadline_usecs;
1224cc96131SMike Snitzer 	int last_rq_rw;
1234cc96131SMike Snitzer 	sector_t last_rq_pos;
1244cc96131SMike Snitzer 	ktime_t last_rq_start_time;
1254cc96131SMike Snitzer 
1264cc96131SMike Snitzer 	/* for blk-mq request-based DM support */
1274cc96131SMike Snitzer 	struct blk_mq_tag_set *tag_set;
1284cc96131SMike Snitzer 	bool use_blk_mq:1;
1294cc96131SMike Snitzer 	bool init_tio_pdu:1;
1304cc96131SMike Snitzer };
1314cc96131SMike Snitzer 
1324cc96131SMike Snitzer void dm_init_md_queue(struct mapped_device *md);
1334cc96131SMike Snitzer void dm_init_normal_md_queue(struct mapped_device *md);
1344cc96131SMike Snitzer int md_in_flight(struct mapped_device *md);
1354cc96131SMike Snitzer void disable_write_same(struct mapped_device *md);
1364cc96131SMike Snitzer 
1374cc96131SMike Snitzer static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
1384cc96131SMike Snitzer {
1394cc96131SMike Snitzer 	return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
1404cc96131SMike Snitzer }
1414cc96131SMike Snitzer 
1424cc96131SMike Snitzer unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
1434cc96131SMike Snitzer 
1444cc96131SMike Snitzer static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
1454cc96131SMike Snitzer {
1464cc96131SMike Snitzer 	return !maxlen || strlen(result) + 1 >= maxlen;
1474cc96131SMike Snitzer }
1484cc96131SMike Snitzer 
1494cc96131SMike Snitzer #endif
150