Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2010-2011 Red Hat, Inc. |
| 3 | * |
| 4 | * This file is released under the GPL. |
| 5 | */ |
| 6 | |
| 7 | #ifndef DM_THIN_METADATA_H |
| 8 | #define DM_THIN_METADATA_H |
| 9 | |
| 10 | #include "persistent-data/dm-block-manager.h" |
Joe Thornber | ac8c3f3 | 2013-05-10 14:37:21 +0100 | [diff] [blame] | 11 | #include "persistent-data/dm-space-map.h" |
Mike Snitzer | 7d48935 | 2014-02-12 23:58:15 -0500 | [diff] [blame] | 12 | #include "persistent-data/dm-space-map-metadata.h" |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 13 | |
Mike Snitzer | 7d48935 | 2014-02-12 23:58:15 -0500 | [diff] [blame] | 14 | #define THIN_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 15 | |
Mike Snitzer | c4a69ec | 2012-03-28 18:41:28 +0100 | [diff] [blame] | 16 | /* |
| 17 | * The metadata device is currently limited in size. |
Mike Snitzer | c4a69ec | 2012-03-28 18:41:28 +0100 | [diff] [blame] | 18 | */ |
Mike Snitzer | 7d48935 | 2014-02-12 23:58:15 -0500 | [diff] [blame] | 19 | #define THIN_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS |
Mike Snitzer | c4a69ec | 2012-03-28 18:41:28 +0100 | [diff] [blame] | 20 | |
| 21 | /* |
| 22 | * A metadata device larger than 16GB triggers a warning. |
| 23 | */ |
| 24 | #define THIN_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT)) |
| 25 | |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 26 | /*----------------------------------------------------------------*/ |
| 27 | |
Mike Snitzer | 07f2b6e0 | 2014-02-14 11:58:41 -0500 | [diff] [blame] | 28 | /* |
| 29 | * Thin metadata superblock flags. |
| 30 | */ |
| 31 | #define THIN_METADATA_NEEDS_CHECK_FLAG (1 << 0) |
| 32 | |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 33 | struct dm_pool_metadata; |
| 34 | struct dm_thin_device; |
| 35 | |
| 36 | /* |
| 37 | * Device identifier |
| 38 | */ |
| 39 | typedef uint64_t dm_thin_id; |
| 40 | |
| 41 | /* |
| 42 | * Reopens or creates a new, empty metadata volume. |
| 43 | */ |
| 44 | struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, |
Joe Thornber | 66b1edc | 2012-07-27 15:08:14 +0100 | [diff] [blame] | 45 | sector_t data_block_size, |
| 46 | bool format_device); |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 47 | |
| 48 | int dm_pool_metadata_close(struct dm_pool_metadata *pmd); |
| 49 | |
| 50 | /* |
| 51 | * Compat feature flags. Any incompat flags beyond the ones |
| 52 | * specified below will prevent use of the thin metadata. |
| 53 | */ |
| 54 | #define THIN_FEATURE_COMPAT_SUPP 0UL |
| 55 | #define THIN_FEATURE_COMPAT_RO_SUPP 0UL |
| 56 | #define THIN_FEATURE_INCOMPAT_SUPP 0UL |
| 57 | |
| 58 | /* |
| 59 | * Device creation/deletion. |
| 60 | */ |
| 61 | int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev); |
| 62 | |
| 63 | /* |
| 64 | * An internal snapshot. |
| 65 | * |
| 66 | * You can only snapshot a quiesced origin i.e. one that is either |
| 67 | * suspended or not instanced at all. |
| 68 | */ |
| 69 | int dm_pool_create_snap(struct dm_pool_metadata *pmd, dm_thin_id dev, |
| 70 | dm_thin_id origin); |
| 71 | |
| 72 | /* |
| 73 | * Deletes a virtual device from the metadata. It _is_ safe to call this |
| 74 | * when that device is open. Operations on that device will just start |
| 75 | * failing. You still need to call close() on the device. |
| 76 | */ |
| 77 | int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd, |
| 78 | dm_thin_id dev); |
| 79 | |
| 80 | /* |
| 81 | * Commits _all_ metadata changes: device creation, deletion, mapping |
| 82 | * updates. |
| 83 | */ |
| 84 | int dm_pool_commit_metadata(struct dm_pool_metadata *pmd); |
| 85 | |
| 86 | /* |
Joe Thornber | da105ed | 2012-07-27 15:08:15 +0100 | [diff] [blame] | 87 | * Discards all uncommitted changes. Rereads the superblock, rolling back |
| 88 | * to the last good transaction. Thin devices remain open. |
| 89 | * dm_thin_aborted_changes() tells you if they had uncommitted changes. |
| 90 | * |
| 91 | * If this call fails it's only useful to call dm_pool_metadata_close(). |
| 92 | * All other methods will fail with -EINVAL. |
| 93 | */ |
| 94 | int dm_pool_abort_metadata(struct dm_pool_metadata *pmd); |
| 95 | |
| 96 | /* |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 97 | * Set/get userspace transaction id. |
| 98 | */ |
| 99 | int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd, |
| 100 | uint64_t current_id, |
| 101 | uint64_t new_id); |
| 102 | |
| 103 | int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd, |
| 104 | uint64_t *result); |
| 105 | |
| 106 | /* |
| 107 | * Hold/get root for userspace transaction. |
Joe Thornber | cc8394d | 2012-06-03 00:30:01 +0100 | [diff] [blame] | 108 | * |
| 109 | * The metadata snapshot is a copy of the current superblock (minus the |
| 110 | * space maps). Userland can access the data structures for READ |
| 111 | * operations only. A small performance hit is incurred by providing this |
| 112 | * copy of the metadata to userland due to extra copy-on-write operations |
| 113 | * on the metadata nodes. Release this as soon as you finish with it. |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 114 | */ |
Joe Thornber | cc8394d | 2012-06-03 00:30:01 +0100 | [diff] [blame] | 115 | int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd); |
| 116 | int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd); |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 117 | |
Joe Thornber | cc8394d | 2012-06-03 00:30:01 +0100 | [diff] [blame] | 118 | int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd, |
| 119 | dm_block_t *result); |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 120 | |
| 121 | /* |
| 122 | * Actions on a single virtual device. |
| 123 | */ |
| 124 | |
| 125 | /* |
| 126 | * Opening the same device more than once will fail with -EBUSY. |
| 127 | */ |
| 128 | int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev, |
| 129 | struct dm_thin_device **td); |
| 130 | |
| 131 | int dm_pool_close_thin_device(struct dm_thin_device *td); |
| 132 | |
| 133 | dm_thin_id dm_thin_dev_id(struct dm_thin_device *td); |
| 134 | |
| 135 | struct dm_thin_lookup_result { |
| 136 | dm_block_t block; |
Mike Snitzer | 7f21466 | 2013-12-17 13:43:31 -0500 | [diff] [blame] | 137 | bool shared:1; |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 138 | }; |
| 139 | |
| 140 | /* |
| 141 | * Returns: |
Joe Thornber | e5cfc69 | 2014-10-06 15:24:55 +0100 | [diff] [blame] | 142 | * -EWOULDBLOCK iff @can_issue_io is set and would issue IO |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 143 | * -ENODATA iff that mapping is not present. |
| 144 | * 0 success |
| 145 | */ |
| 146 | int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block, |
Joe Thornber | e5cfc69 | 2014-10-06 15:24:55 +0100 | [diff] [blame] | 147 | int can_issue_io, struct dm_thin_lookup_result *result); |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 148 | |
| 149 | /* |
Joe Thornber | a5d895a | 2015-04-16 12:47:21 +0100 | [diff] [blame] | 150 | * Retrieve the next run of contiguously mapped blocks. Useful for working |
| 151 | * out where to break up IO. Returns 0 on success, < 0 on error. |
| 152 | */ |
| 153 | int dm_thin_find_mapped_range(struct dm_thin_device *td, |
| 154 | dm_block_t begin, dm_block_t end, |
| 155 | dm_block_t *thin_begin, dm_block_t *thin_end, |
| 156 | dm_block_t *pool_begin, bool *maybe_shared); |
| 157 | |
| 158 | /* |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 159 | * Obtain an unused block. |
| 160 | */ |
| 161 | int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result); |
| 162 | |
| 163 | /* |
| 164 | * Insert or remove block. |
| 165 | */ |
| 166 | int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block, |
| 167 | dm_block_t data_block); |
| 168 | |
| 169 | int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block); |
Joe Thornber | 6550f07 | 2015-04-13 09:45:25 +0100 | [diff] [blame] | 170 | int dm_thin_remove_range(struct dm_thin_device *td, |
| 171 | dm_block_t begin, dm_block_t end); |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 172 | |
| 173 | /* |
| 174 | * Queries. |
| 175 | */ |
Joe Thornber | 40db5a5 | 2012-07-27 15:08:14 +0100 | [diff] [blame] | 176 | bool dm_thin_changed_this_transaction(struct dm_thin_device *td); |
| 177 | |
Mike Snitzer | 4d1662a | 2014-02-06 06:08:56 -0500 | [diff] [blame] | 178 | bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd); |
| 179 | |
Joe Thornber | da105ed | 2012-07-27 15:08:15 +0100 | [diff] [blame] | 180 | bool dm_thin_aborted_changes(struct dm_thin_device *td); |
| 181 | |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 182 | int dm_thin_get_highest_mapped_block(struct dm_thin_device *td, |
| 183 | dm_block_t *highest_mapped); |
| 184 | |
| 185 | int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result); |
| 186 | |
| 187 | int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, |
| 188 | dm_block_t *result); |
| 189 | |
| 190 | int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd, |
| 191 | dm_block_t *result); |
| 192 | |
| 193 | int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd, |
| 194 | dm_block_t *result); |
| 195 | |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 196 | int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); |
| 197 | |
Joe Thornber | 19fa1a6 | 2013-12-17 12:09:40 -0500 | [diff] [blame] | 198 | int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); |
| 199 | |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 200 | /* |
| 201 | * Returns -ENOSPC if the new size is too small and already allocated |
| 202 | * blocks would be lost. |
| 203 | */ |
| 204 | int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); |
Joe Thornber | 24347e9 | 2013-05-10 14:37:19 +0100 | [diff] [blame] | 205 | int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 206 | |
Joe Thornber | 12ba58a | 2012-07-27 15:08:15 +0100 | [diff] [blame] | 207 | /* |
| 208 | * Flicks the underlying block manager into read only mode, so you know |
| 209 | * that nothing is changing. |
| 210 | */ |
| 211 | void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd); |
Joe Thornber | 9b7aaa6 | 2013-12-04 16:58:19 -0500 | [diff] [blame] | 212 | void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd); |
Joe Thornber | 12ba58a | 2012-07-27 15:08:15 +0100 | [diff] [blame] | 213 | |
Joe Thornber | ac8c3f3 | 2013-05-10 14:37:21 +0100 | [diff] [blame] | 214 | int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, |
| 215 | dm_block_t threshold, |
| 216 | dm_sm_threshold_fn fn, |
| 217 | void *context); |
| 218 | |
Mike Snitzer | 07f2b6e0 | 2014-02-14 11:58:41 -0500 | [diff] [blame] | 219 | /* |
| 220 | * Updates the superblock immediately. |
| 221 | */ |
| 222 | int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd); |
| 223 | bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd); |
| 224 | |
Joe Thornber | 8a01a6a | 2014-10-06 15:28:30 +0100 | [diff] [blame] | 225 | /* |
| 226 | * Issue any prefetches that may be useful. |
| 227 | */ |
| 228 | void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd); |
| 229 | |
Joe Thornber | 991d9fa | 2011-10-31 20:21:18 +0000 | [diff] [blame] | 230 | /*----------------------------------------------------------------*/ |
| 231 | |
| 232 | #endif |