13bd94003SHeinz Mauelshagen /* SPDX-License-Identifier: GPL-2.0-only */ 2c6b4fcbaSJoe Thornber /* 3c6b4fcbaSJoe Thornber * Copyright (C) 2012 Red Hat. All rights reserved. 4c6b4fcbaSJoe Thornber * 5c6b4fcbaSJoe Thornber * This file is released under the GPL. 6c6b4fcbaSJoe Thornber */ 7c6b4fcbaSJoe Thornber 8c6b4fcbaSJoe Thornber #ifndef DM_CACHE_POLICY_H 9c6b4fcbaSJoe Thornber #define DM_CACHE_POLICY_H 10c6b4fcbaSJoe Thornber 11c6b4fcbaSJoe Thornber #include "dm-cache-block-types.h" 12c6b4fcbaSJoe Thornber 13c6b4fcbaSJoe Thornber #include <linux/device-mapper.h> 14c6b4fcbaSJoe Thornber 15c6b4fcbaSJoe Thornber /*----------------------------------------------------------------*/ 16c6b4fcbaSJoe Thornber 17c6b4fcbaSJoe Thornber /* 18c6b4fcbaSJoe Thornber * The cache policy makes the important decisions about which blocks get to 19c6b4fcbaSJoe Thornber * live on the faster cache device. 20c6b4fcbaSJoe Thornber */ 21c6b4fcbaSJoe Thornber enum policy_operation { 22b29d4986SJoe Thornber POLICY_PROMOTE, 23b29d4986SJoe Thornber POLICY_DEMOTE, 24b29d4986SJoe Thornber POLICY_WRITEBACK 25fb4100aeSJoe Thornber }; 26fb4100aeSJoe Thornber 27fb4100aeSJoe Thornber /* 28c6b4fcbaSJoe Thornber * This is the instruction passed back to the core target. 29c6b4fcbaSJoe Thornber */ 30b29d4986SJoe Thornber struct policy_work { 31c6b4fcbaSJoe Thornber enum policy_operation op; 32b29d4986SJoe Thornber dm_oblock_t oblock; 33b29d4986SJoe Thornber dm_cblock_t cblock; 34c6b4fcbaSJoe Thornber }; 35c6b4fcbaSJoe Thornber 36c6b4fcbaSJoe Thornber /* 37b29d4986SJoe Thornber * The cache policy object. It is envisaged that this structure will be 38b29d4986SJoe Thornber * embedded in a bigger, policy specific structure (ie. use container_of()). 39c6b4fcbaSJoe Thornber */ 40c6b4fcbaSJoe Thornber struct dm_cache_policy { 41c6b4fcbaSJoe Thornber /* 42c6b4fcbaSJoe Thornber * Destroys this object. 43c6b4fcbaSJoe Thornber */ 44c6b4fcbaSJoe Thornber void (*destroy)(struct dm_cache_policy *p); 45c6b4fcbaSJoe Thornber 46c6b4fcbaSJoe Thornber /* 47b29d4986SJoe Thornber * Find the location of a block. 48c6b4fcbaSJoe Thornber * 49c6b4fcbaSJoe Thornber * Must not block. 50c6b4fcbaSJoe Thornber * 51b29d4986SJoe Thornber * Returns 0 if in cache (cblock will be set), -ENOENT if not, < 0 for 52b29d4986SJoe Thornber * other errors (-EWOULDBLOCK would be typical). data_dir should be 53b29d4986SJoe Thornber * READ or WRITE. fast_copy should be set if migrating this block would 54b29d4986SJoe Thornber * be 'cheap' somehow (eg, discarded data). background_queued will be set 55b29d4986SJoe Thornber * if a migration has just been queued. 56c6b4fcbaSJoe Thornber */ 57b29d4986SJoe Thornber int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock, 58b29d4986SJoe Thornber int data_dir, bool fast_copy, bool *background_queued); 59c6b4fcbaSJoe Thornber 60b29d4986SJoe Thornber /* 61b29d4986SJoe Thornber * Sometimes the core target can optimise a migration, eg, the 62b29d4986SJoe Thornber * block may be discarded, or the bio may cover an entire block. 63b29d4986SJoe Thornber * In order to optimise it needs the migration immediately though 64b29d4986SJoe Thornber * so it knows to do something different with the bio. 65b29d4986SJoe Thornber * 66b29d4986SJoe Thornber * This method is optional (policy-internal will fallback to using 67b29d4986SJoe Thornber * lookup). 68b29d4986SJoe Thornber */ 69b29d4986SJoe Thornber int (*lookup_with_work)(struct dm_cache_policy *p, 70b29d4986SJoe Thornber dm_oblock_t oblock, dm_cblock_t *cblock, 71b29d4986SJoe Thornber int data_dir, bool fast_copy, 72b29d4986SJoe Thornber struct policy_work **work); 73b29d4986SJoe Thornber 74b29d4986SJoe Thornber /* 75b29d4986SJoe Thornber * Retrieves background work. Returns -ENODATA when there's no 76b29d4986SJoe Thornber * background work. 77b29d4986SJoe Thornber */ 78b29d4986SJoe Thornber int (*get_background_work)(struct dm_cache_policy *p, bool idle, 79b29d4986SJoe Thornber struct policy_work **result); 80b29d4986SJoe Thornber 81b29d4986SJoe Thornber /* 82b29d4986SJoe Thornber * You must pass in the same work pointer that you were given, not 83b29d4986SJoe Thornber * a copy. 84b29d4986SJoe Thornber */ 85b29d4986SJoe Thornber void (*complete_background_work)(struct dm_cache_policy *p, 86b29d4986SJoe Thornber struct policy_work *work, 87b29d4986SJoe Thornber bool success); 88b29d4986SJoe Thornber 89b29d4986SJoe Thornber void (*set_dirty)(struct dm_cache_policy *p, dm_cblock_t cblock); 90b29d4986SJoe Thornber void (*clear_dirty)(struct dm_cache_policy *p, dm_cblock_t cblock); 91c6b4fcbaSJoe Thornber 92c6b4fcbaSJoe Thornber /* 93c6b4fcbaSJoe Thornber * Called when a cache target is first created. Used to load a 94c6b4fcbaSJoe Thornber * mapping from the metadata device into the policy. 95c6b4fcbaSJoe Thornber */ 96c6b4fcbaSJoe Thornber int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock, 97b29d4986SJoe Thornber dm_cblock_t cblock, bool dirty, 98b29d4986SJoe Thornber uint32_t hint, bool hint_valid); 99b29d4986SJoe Thornber 100b29d4986SJoe Thornber /* 101b29d4986SJoe Thornber * Drops the mapping, irrespective of whether it's clean or dirty. 102b29d4986SJoe Thornber * Returns -ENODATA if cblock is not mapped. 103b29d4986SJoe Thornber */ 104b29d4986SJoe Thornber int (*invalidate_mapping)(struct dm_cache_policy *p, dm_cblock_t cblock); 105c6b4fcbaSJoe Thornber 1064e781b49SJoe Thornber /* 1074e781b49SJoe Thornber * Gets the hint for a given cblock. Called in a single threaded 1084e781b49SJoe Thornber * context. So no locking required. 1094e781b49SJoe Thornber */ 1104e781b49SJoe Thornber uint32_t (*get_hint)(struct dm_cache_policy *p, dm_cblock_t cblock); 111c6b4fcbaSJoe Thornber 112c6b4fcbaSJoe Thornber /* 113c6b4fcbaSJoe Thornber * How full is the cache? 114c6b4fcbaSJoe Thornber */ 115c6b4fcbaSJoe Thornber dm_cblock_t (*residency)(struct dm_cache_policy *p); 116c6b4fcbaSJoe Thornber 117c6b4fcbaSJoe Thornber /* 118c6b4fcbaSJoe Thornber * Because of where we sit in the block layer, we can be asked to 119c6b4fcbaSJoe Thornber * map a lot of little bios that are all in the same block (no 120c6b4fcbaSJoe Thornber * queue merging has occurred). To stop the policy being fooled by 121fba10109SJoe Thornber * these, the core target sends regular tick() calls to the policy. 122c6b4fcbaSJoe Thornber * The policy should only count an entry as hit once per tick. 123b29d4986SJoe Thornber * 124b29d4986SJoe Thornber * This method is optional. 125c6b4fcbaSJoe Thornber */ 126fba10109SJoe Thornber void (*tick)(struct dm_cache_policy *p, bool can_block); 127c6b4fcbaSJoe Thornber 128c6b4fcbaSJoe Thornber /* 129c6b4fcbaSJoe Thornber * Configuration. 130c6b4fcbaSJoe Thornber */ 131028ae9f7SJoe Thornber int (*emit_config_values)(struct dm_cache_policy *p, char *result, 132*86a3238cSHeinz Mauelshagen unsigned int maxlen, ssize_t *sz_ptr); 133c6b4fcbaSJoe Thornber int (*set_config_value)(struct dm_cache_policy *p, 134c6b4fcbaSJoe Thornber const char *key, const char *value); 135c6b4fcbaSJoe Thornber 136b29d4986SJoe Thornber void (*allow_migrations)(struct dm_cache_policy *p, bool allow); 137b29d4986SJoe Thornber 138c6b4fcbaSJoe Thornber /* 139c6b4fcbaSJoe Thornber * Book keeping ptr for the policy register, not for general use. 140c6b4fcbaSJoe Thornber */ 141c6b4fcbaSJoe Thornber void *private; 142c6b4fcbaSJoe Thornber }; 143c6b4fcbaSJoe Thornber 144c6b4fcbaSJoe Thornber /*----------------------------------------------------------------*/ 145c6b4fcbaSJoe Thornber 146c6b4fcbaSJoe Thornber /* 147c6b4fcbaSJoe Thornber * We maintain a little register of the different policy types. 148c6b4fcbaSJoe Thornber */ 149c6b4fcbaSJoe Thornber #define CACHE_POLICY_NAME_SIZE 16 1504e7f506fSMike Snitzer #define CACHE_POLICY_VERSION_SIZE 3 151c6b4fcbaSJoe Thornber 152c6b4fcbaSJoe Thornber struct dm_cache_policy_type { 153c6b4fcbaSJoe Thornber /* For use by the register code only. */ 154c6b4fcbaSJoe Thornber struct list_head list; 155c6b4fcbaSJoe Thornber 156c6b4fcbaSJoe Thornber /* 157c6b4fcbaSJoe Thornber * Policy writers should fill in these fields. The name field is 158c6b4fcbaSJoe Thornber * what gets passed on the target line to select your policy. 159c6b4fcbaSJoe Thornber */ 160c6b4fcbaSJoe Thornber char name[CACHE_POLICY_NAME_SIZE]; 161*86a3238cSHeinz Mauelshagen unsigned int version[CACHE_POLICY_VERSION_SIZE]; 162c6b4fcbaSJoe Thornber 163c6b4fcbaSJoe Thornber /* 1642e68c4e6SMike Snitzer * For use by an alias dm_cache_policy_type to point to the 1652e68c4e6SMike Snitzer * real dm_cache_policy_type. 1662e68c4e6SMike Snitzer */ 1672e68c4e6SMike Snitzer struct dm_cache_policy_type *real; 1682e68c4e6SMike Snitzer 1692e68c4e6SMike Snitzer /* 17048d1a964SShaomin Deng * Policies may store a hint for each cache block. 171c6b4fcbaSJoe Thornber * Currently the size of this hint must be 0 or 4 bytes but we 172c6b4fcbaSJoe Thornber * expect to relax this in future. 173c6b4fcbaSJoe Thornber */ 174c6b4fcbaSJoe Thornber size_t hint_size; 175c6b4fcbaSJoe Thornber 176c6b4fcbaSJoe Thornber struct module *owner; 177c6b4fcbaSJoe Thornber struct dm_cache_policy *(*create)(dm_cblock_t cache_size, 178c6b4fcbaSJoe Thornber sector_t origin_size, 179c6b4fcbaSJoe Thornber sector_t block_size); 180c6b4fcbaSJoe Thornber }; 181c6b4fcbaSJoe Thornber 182c6b4fcbaSJoe Thornber int dm_cache_policy_register(struct dm_cache_policy_type *type); 183c6b4fcbaSJoe Thornber void dm_cache_policy_unregister(struct dm_cache_policy_type *type); 184c6b4fcbaSJoe Thornber 185c6b4fcbaSJoe Thornber /*----------------------------------------------------------------*/ 186c6b4fcbaSJoe Thornber 187c6b4fcbaSJoe Thornber #endif /* DM_CACHE_POLICY_H */ 188