Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1 | #include "kvm/qcow.h" |
| 2 | |
| 3 | #include "kvm/disk-image.h" |
| 4 | #include "kvm/read-write.h" |
Pekka Enberg | c0799eb | 2011-07-09 14:04:12 +0300 | [diff] [blame] | 5 | #include "kvm/mutex.h" |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 6 | #include "kvm/util.h" |
| 7 | |
| 8 | #include <sys/types.h> |
| 9 | #include <sys/stat.h> |
| 10 | #include <stdbool.h> |
| 11 | #include <stdlib.h> |
| 12 | #include <string.h> |
| 13 | #include <unistd.h> |
| 14 | #include <fcntl.h> |
Lan Tianyu | 2d2179c | 2011-12-15 21:40:44 +0800 | [diff] [blame] | 15 | #include <errno.h> |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 16 | #ifdef CONFIG_HAS_ZLIB |
| 17 | #include <zlib.h> |
| 18 | #endif |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 19 | |
Lan Tianyu | 2d2179c | 2011-12-15 21:40:44 +0800 | [diff] [blame] | 20 | #include <linux/err.h> |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 21 | #include <linux/byteorder.h> |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 22 | #include <linux/kernel.h> |
Pekka Enberg | 0df6b4d | 2011-05-11 20:38:37 +0300 | [diff] [blame] | 23 | #include <linux/types.h> |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 24 | |
Lan Tianyu | 2d2179c | 2011-12-15 21:40:44 +0800 | [diff] [blame] | 25 | static int update_cluster_refcount(struct qcow *q, u64 clust_idx, u16 append); |
| 26 | static int qcow_write_refcount_table(struct qcow *q); |
| 27 | static u64 qcow_alloc_clusters(struct qcow *q, u64 size, int update_ref); |
| 28 | static void qcow_free_clusters(struct qcow *q, u64 clust_start, u64 size); |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 29 | |
| 30 | static inline int qcow_pwrite_sync(int fd, |
| 31 | void *buf, size_t count, off_t offset) |
| 32 | { |
| 33 | if (pwrite_in_full(fd, buf, count, offset) < 0) |
| 34 | return -1; |
| 35 | |
| 36 | return fdatasync(fd); |
| 37 | } |
| 38 | |
Pekka Enberg | e94cdf0 | 2011-07-24 22:04:01 +0300 | [diff] [blame] | 39 | static int l2_table_insert(struct rb_root *root, struct qcow_l2_table *new) |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 40 | { |
| 41 | struct rb_node **link = &(root->rb_node), *parent = NULL; |
| 42 | u64 offset = new->offset; |
| 43 | |
| 44 | /* search the tree */ |
| 45 | while (*link) { |
Pekka Enberg | 473d58f | 2011-07-09 14:13:51 +0300 | [diff] [blame] | 46 | struct qcow_l2_table *t; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 47 | |
Pekka Enberg | 473d58f | 2011-07-09 14:13:51 +0300 | [diff] [blame] | 48 | t = rb_entry(*link, struct qcow_l2_table, node); |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 49 | if (!t) |
| 50 | goto error; |
| 51 | |
| 52 | parent = *link; |
| 53 | |
| 54 | if (t->offset > offset) |
| 55 | link = &(*link)->rb_left; |
| 56 | else if (t->offset < offset) |
| 57 | link = &(*link)->rb_right; |
| 58 | else |
| 59 | goto out; |
| 60 | } |
| 61 | |
| 62 | /* add new node */ |
| 63 | rb_link_node(&new->node, parent, link); |
| 64 | rb_insert_color(&new->node, root); |
| 65 | out: |
| 66 | return 0; |
| 67 | error: |
| 68 | return -1; |
| 69 | } |
| 70 | |
Pekka Enberg | e94cdf0 | 2011-07-24 22:04:01 +0300 | [diff] [blame] | 71 | static struct qcow_l2_table *l2_table_lookup(struct rb_root *root, u64 offset) |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 72 | { |
| 73 | struct rb_node *link = root->rb_node; |
| 74 | |
| 75 | while (link) { |
Pekka Enberg | 473d58f | 2011-07-09 14:13:51 +0300 | [diff] [blame] | 76 | struct qcow_l2_table *t; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 77 | |
Pekka Enberg | 473d58f | 2011-07-09 14:13:51 +0300 | [diff] [blame] | 78 | t = rb_entry(link, struct qcow_l2_table, node); |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 79 | if (!t) |
| 80 | goto out; |
| 81 | |
| 82 | if (t->offset > offset) |
| 83 | link = link->rb_left; |
| 84 | else if (t->offset < offset) |
| 85 | link = link->rb_right; |
| 86 | else |
| 87 | return t; |
| 88 | } |
| 89 | out: |
| 90 | return NULL; |
| 91 | } |
| 92 | |
Pekka Enberg | e94cdf0 | 2011-07-24 22:04:01 +0300 | [diff] [blame] | 93 | static void l1_table_free_cache(struct qcow_l1_table *l1t) |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 94 | { |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 95 | struct rb_root *r = &l1t->root; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 96 | struct list_head *pos, *n; |
Pekka Enberg | 473d58f | 2011-07-09 14:13:51 +0300 | [diff] [blame] | 97 | struct qcow_l2_table *t; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 98 | |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 99 | list_for_each_safe(pos, n, &l1t->lru_list) { |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 100 | /* Remove cache table from the list and RB tree */ |
| 101 | list_del(pos); |
Pekka Enberg | 473d58f | 2011-07-09 14:13:51 +0300 | [diff] [blame] | 102 | t = list_entry(pos, struct qcow_l2_table, list); |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 103 | rb_erase(&t->node, r); |
| 104 | |
| 105 | /* Free the cached node */ |
| 106 | free(t); |
| 107 | } |
| 108 | } |
| 109 | |
Pekka Enberg | a4e4651 | 2011-07-09 10:58:37 +0300 | [diff] [blame] | 110 | static int qcow_l2_cache_write(struct qcow *q, struct qcow_l2_table *c) |
| 111 | { |
| 112 | struct qcow_header *header = q->header; |
| 113 | u64 size; |
| 114 | |
Pekka Enberg | aff8897 | 2011-07-09 15:15:52 +0300 | [diff] [blame] | 115 | if (!c->dirty) |
| 116 | return 0; |
| 117 | |
Pekka Enberg | a4e4651 | 2011-07-09 10:58:37 +0300 | [diff] [blame] | 118 | size = 1 << header->l2_bits; |
| 119 | |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 120 | if (qcow_pwrite_sync(q->fd, c->table, |
| 121 | size * sizeof(u64), c->offset) < 0) |
Pekka Enberg | aff8897 | 2011-07-09 15:15:52 +0300 | [diff] [blame] | 122 | return -1; |
| 123 | |
| 124 | c->dirty = 0; |
| 125 | |
| 126 | return 0; |
Pekka Enberg | a4e4651 | 2011-07-09 10:58:37 +0300 | [diff] [blame] | 127 | } |
| 128 | |
Pekka Enberg | 473d58f | 2011-07-09 14:13:51 +0300 | [diff] [blame] | 129 | static int cache_table(struct qcow *q, struct qcow_l2_table *c) |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 130 | { |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 131 | struct qcow_l1_table *l1t = &q->table; |
| 132 | struct rb_root *r = &l1t->root; |
Pekka Enberg | 473d58f | 2011-07-09 14:13:51 +0300 | [diff] [blame] | 133 | struct qcow_l2_table *lru; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 134 | |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 135 | if (l1t->nr_cached == MAX_CACHE_NODES) { |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 136 | /* |
| 137 | * The node at the head of the list is least recently used |
| 138 | * node. Remove it from the list and replaced with a new node. |
| 139 | */ |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 140 | lru = list_first_entry(&l1t->lru_list, struct qcow_l2_table, list); |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 141 | |
| 142 | /* Remove the node from the cache */ |
| 143 | rb_erase(&lru->node, r); |
| 144 | list_del_init(&lru->list); |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 145 | l1t->nr_cached--; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 146 | |
| 147 | /* Free the LRUed node */ |
| 148 | free(lru); |
| 149 | } |
| 150 | |
| 151 | /* Add new node in RB Tree: Helps in searching faster */ |
Pekka Enberg | e94cdf0 | 2011-07-24 22:04:01 +0300 | [diff] [blame] | 152 | if (l2_table_insert(r, c) < 0) |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 153 | goto error; |
| 154 | |
| 155 | /* Add in LRU replacement list */ |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 156 | list_add_tail(&c->list, &l1t->lru_list); |
| 157 | l1t->nr_cached++; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 158 | |
| 159 | return 0; |
| 160 | error: |
| 161 | return -1; |
| 162 | } |
| 163 | |
Pekka Enberg | e94cdf0 | 2011-07-24 22:04:01 +0300 | [diff] [blame] | 164 | static struct qcow_l2_table *l2_table_search(struct qcow *q, u64 offset) |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 165 | { |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 166 | struct qcow_l1_table *l1t = &q->table; |
Pekka Enberg | fe8bdde | 2011-07-09 14:23:47 +0300 | [diff] [blame] | 167 | struct qcow_l2_table *l2t; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 168 | |
Pekka Enberg | e94cdf0 | 2011-07-24 22:04:01 +0300 | [diff] [blame] | 169 | l2t = l2_table_lookup(&l1t->root, offset); |
Pekka Enberg | fe8bdde | 2011-07-09 14:23:47 +0300 | [diff] [blame] | 170 | if (!l2t) |
| 171 | return NULL; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 172 | |
| 173 | /* Update the LRU state, by moving the searched node to list tail */ |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 174 | list_move_tail(&l2t->list, &l1t->lru_list); |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 175 | |
Pekka Enberg | fe8bdde | 2011-07-09 14:23:47 +0300 | [diff] [blame] | 176 | return l2t; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 177 | } |
| 178 | |
| 179 | /* Allocates a new node for caching L2 table */ |
Pekka Enberg | 473d58f | 2011-07-09 14:13:51 +0300 | [diff] [blame] | 180 | static struct qcow_l2_table *new_cache_table(struct qcow *q, u64 offset) |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 181 | { |
| 182 | struct qcow_header *header = q->header; |
Pekka Enberg | 473d58f | 2011-07-09 14:13:51 +0300 | [diff] [blame] | 183 | struct qcow_l2_table *c; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 184 | u64 l2t_sz; |
| 185 | u64 size; |
| 186 | |
| 187 | l2t_sz = 1 << header->l2_bits; |
| 188 | size = sizeof(*c) + l2t_sz * sizeof(u64); |
| 189 | c = calloc(1, size); |
| 190 | if (!c) |
| 191 | goto out; |
| 192 | |
| 193 | c->offset = offset; |
| 194 | RB_CLEAR_NODE(&c->node); |
| 195 | INIT_LIST_HEAD(&c->list); |
| 196 | out: |
| 197 | return c; |
| 198 | } |
| 199 | |
Prasad Joshi | 742fce7 | 2011-04-16 14:50:57 +0100 | [diff] [blame] | 200 | static inline u64 get_l1_index(struct qcow *q, u64 offset) |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 201 | { |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 202 | struct qcow_header *header = q->header; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 203 | |
| 204 | return offset >> (header->l2_bits + header->cluster_bits); |
| 205 | } |
| 206 | |
Prasad Joshi | 742fce7 | 2011-04-16 14:50:57 +0100 | [diff] [blame] | 207 | static inline u64 get_l2_index(struct qcow *q, u64 offset) |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 208 | { |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 209 | struct qcow_header *header = q->header; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 210 | |
| 211 | return (offset >> (header->cluster_bits)) & ((1 << header->l2_bits)-1); |
| 212 | } |
| 213 | |
Prasad Joshi | 742fce7 | 2011-04-16 14:50:57 +0100 | [diff] [blame] | 214 | static inline u64 get_cluster_offset(struct qcow *q, u64 offset) |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 215 | { |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 216 | struct qcow_header *header = q->header; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 217 | |
| 218 | return offset & ((1 << header->cluster_bits)-1); |
| 219 | } |
| 220 | |
Pekka Enberg | fe8bdde | 2011-07-09 14:23:47 +0300 | [diff] [blame] | 221 | static struct qcow_l2_table *qcow_read_l2_table(struct qcow *q, u64 offset) |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 222 | { |
| 223 | struct qcow_header *header = q->header; |
Pekka Enberg | fe8bdde | 2011-07-09 14:23:47 +0300 | [diff] [blame] | 224 | struct qcow_l2_table *l2t; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 225 | u64 size; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 226 | |
Pekka Enberg | fe8bdde | 2011-07-09 14:23:47 +0300 | [diff] [blame] | 227 | size = 1 << header->l2_bits; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 228 | |
| 229 | /* search an entry for offset in cache */ |
Pekka Enberg | e94cdf0 | 2011-07-24 22:04:01 +0300 | [diff] [blame] | 230 | l2t = l2_table_search(q, offset); |
Pekka Enberg | fe8bdde | 2011-07-09 14:23:47 +0300 | [diff] [blame] | 231 | if (l2t) |
| 232 | return l2t; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 233 | |
| 234 | /* allocate new node for caching l2 table */ |
Pekka Enberg | fe8bdde | 2011-07-09 14:23:47 +0300 | [diff] [blame] | 235 | l2t = new_cache_table(q, offset); |
| 236 | if (!l2t) |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 237 | goto error; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 238 | |
| 239 | /* table not cached: read from the disk */ |
Pekka Enberg | fe8bdde | 2011-07-09 14:23:47 +0300 | [diff] [blame] | 240 | if (pread_in_full(q->fd, l2t->table, size * sizeof(u64), offset) < 0) |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 241 | goto error; |
| 242 | |
| 243 | /* cache the table */ |
Pekka Enberg | fe8bdde | 2011-07-09 14:23:47 +0300 | [diff] [blame] | 244 | if (cache_table(q, l2t) < 0) |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 245 | goto error; |
| 246 | |
Pekka Enberg | fe8bdde | 2011-07-09 14:23:47 +0300 | [diff] [blame] | 247 | return l2t; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 248 | error: |
Pekka Enberg | fe8bdde | 2011-07-09 14:23:47 +0300 | [diff] [blame] | 249 | free(l2t); |
| 250 | return NULL; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 251 | } |
| 252 | |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 253 | static int qcow_decompress_buffer(u8 *out_buf, int out_buf_size, |
| 254 | const u8 *buf, int buf_size) |
| 255 | { |
| 256 | #ifdef CONFIG_HAS_ZLIB |
| 257 | z_stream strm1, *strm = &strm1; |
| 258 | int ret, out_len; |
| 259 | |
| 260 | memset(strm, 0, sizeof(*strm)); |
| 261 | |
Asias He | 449ca0a | 2012-06-05 22:42:44 +0800 | [diff] [blame] | 262 | strm->next_in = (u8 *)buf; |
| 263 | strm->avail_in = buf_size; |
| 264 | strm->next_out = out_buf; |
| 265 | strm->avail_out = out_buf_size; |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 266 | |
| 267 | ret = inflateInit2(strm, -12); |
| 268 | if (ret != Z_OK) |
| 269 | return -1; |
| 270 | |
| 271 | ret = inflate(strm, Z_FINISH); |
| 272 | out_len = strm->next_out - out_buf; |
| 273 | if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || |
| 274 | out_len != out_buf_size) { |
| 275 | inflateEnd(strm); |
| 276 | return -1; |
| 277 | } |
| 278 | |
| 279 | inflateEnd(strm); |
| 280 | return 0; |
| 281 | #else |
| 282 | return -1; |
| 283 | #endif |
| 284 | } |
| 285 | |
| 286 | static ssize_t qcow1_read_cluster(struct qcow *q, u64 offset, |
| 287 | void *dst, u32 dst_len) |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 288 | { |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 289 | struct qcow_header *header = q->header; |
Pekka Enberg | 3fb67b9 | 2011-07-24 12:10:12 +0300 | [diff] [blame] | 290 | struct qcow_l1_table *l1t = &q->table; |
| 291 | struct qcow_l2_table *l2t; |
Prasad Joshi | 742fce7 | 2011-04-16 14:50:57 +0100 | [diff] [blame] | 292 | u64 clust_offset; |
| 293 | u64 clust_start; |
Pekka Enberg | 3fb67b9 | 2011-07-24 12:10:12 +0300 | [diff] [blame] | 294 | u64 l2t_offset; |
Pekka Enberg | a51948c | 2011-04-19 19:57:36 +0300 | [diff] [blame] | 295 | size_t length; |
Pekka Enberg | 3fb67b9 | 2011-07-24 12:10:12 +0300 | [diff] [blame] | 296 | u64 l2t_size; |
Prasad Joshi | 742fce7 | 2011-04-16 14:50:57 +0100 | [diff] [blame] | 297 | u64 l1_idx; |
| 298 | u64 l2_idx; |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 299 | int coffset; |
| 300 | int csize; |
Prasad Joshi | 3dac48d | 2011-04-16 20:40:25 +0100 | [diff] [blame] | 301 | |
| 302 | l1_idx = get_l1_index(q, offset); |
Pekka Enberg | 3fb67b9 | 2011-07-24 12:10:12 +0300 | [diff] [blame] | 303 | if (l1_idx >= l1t->table_size) |
Pekka Enberg | c0799eb | 2011-07-09 14:04:12 +0300 | [diff] [blame] | 304 | return -1; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 305 | |
Prasad Joshi | 3dac48d | 2011-04-16 20:40:25 +0100 | [diff] [blame] | 306 | clust_offset = get_cluster_offset(q, offset); |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 307 | if (clust_offset >= q->cluster_size) |
Pekka Enberg | c0799eb | 2011-07-09 14:04:12 +0300 | [diff] [blame] | 308 | return -1; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 309 | |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 310 | length = q->cluster_size - clust_offset; |
Prasad Joshi | 3dac48d | 2011-04-16 20:40:25 +0100 | [diff] [blame] | 311 | if (length > dst_len) |
| 312 | length = dst_len; |
| 313 | |
Pekka Enberg | c0799eb | 2011-07-09 14:04:12 +0300 | [diff] [blame] | 314 | mutex_lock(&q->mutex); |
Pekka Enberg | b2ebe61 | 2011-07-10 15:36:11 +0300 | [diff] [blame] | 315 | |
Pekka Enberg | 3fb67b9 | 2011-07-24 12:10:12 +0300 | [diff] [blame] | 316 | l2t_offset = be64_to_cpu(l1t->l1_table[l1_idx]); |
Pekka Enberg | 3fb67b9 | 2011-07-24 12:10:12 +0300 | [diff] [blame] | 317 | if (!l2t_offset) |
Prasad Joshi | 3dac48d | 2011-04-16 20:40:25 +0100 | [diff] [blame] | 318 | goto zero_cluster; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 319 | |
Pekka Enberg | 3fb67b9 | 2011-07-24 12:10:12 +0300 | [diff] [blame] | 320 | l2t_size = 1 << header->l2_bits; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 321 | |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 322 | /* read and cache level 2 table */ |
Pekka Enberg | 3fb67b9 | 2011-07-24 12:10:12 +0300 | [diff] [blame] | 323 | l2t = qcow_read_l2_table(q, l2t_offset); |
| 324 | if (!l2t) |
Sasha Levin | b6edb0e | 2011-04-16 14:45:43 +0300 | [diff] [blame] | 325 | goto out_error; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 326 | |
Prasad Joshi | 3dac48d | 2011-04-16 20:40:25 +0100 | [diff] [blame] | 327 | l2_idx = get_l2_index(q, offset); |
Pekka Enberg | 3fb67b9 | 2011-07-24 12:10:12 +0300 | [diff] [blame] | 328 | if (l2_idx >= l2t_size) |
Sasha Levin | b6edb0e | 2011-04-16 14:45:43 +0300 | [diff] [blame] | 329 | goto out_error; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 330 | |
Pekka Enberg | 3fb67b9 | 2011-07-24 12:10:12 +0300 | [diff] [blame] | 331 | clust_start = be64_to_cpu(l2t->table[l2_idx]); |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 332 | if (clust_start & QCOW1_OFLAG_COMPRESSED) { |
Asias He | 449ca0a | 2012-06-05 22:42:44 +0800 | [diff] [blame] | 333 | coffset = clust_start & q->cluster_offset_mask; |
| 334 | csize = clust_start >> (63 - q->header->cluster_bits); |
| 335 | csize &= (q->cluster_size - 1); |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 336 | |
| 337 | if (pread_in_full(q->fd, q->cluster_data, csize, |
Sasha Levin | 3a60be0 | 2011-12-16 10:40:06 +0200 | [diff] [blame] | 338 | coffset) < 0) |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 339 | goto out_error; |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 340 | |
| 341 | if (qcow_decompress_buffer(q->cluster_cache, q->cluster_size, |
Sasha Levin | 3a60be0 | 2011-12-16 10:40:06 +0200 | [diff] [blame] | 342 | q->cluster_data, csize) < 0) |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 343 | goto out_error; |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 344 | |
| 345 | memcpy(dst, q->cluster_cache + clust_offset, length); |
| 346 | mutex_unlock(&q->mutex); |
Sasha Levin | 3a60be0 | 2011-12-16 10:40:06 +0200 | [diff] [blame] | 347 | } else { |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 348 | if (!clust_start) |
| 349 | goto zero_cluster; |
| 350 | |
| 351 | mutex_unlock(&q->mutex); |
| 352 | |
| 353 | if (pread_in_full(q->fd, dst, length, |
| 354 | clust_start + clust_offset) < 0) |
| 355 | return -1; |
Pekka Enberg | b2ebe61 | 2011-07-10 15:36:11 +0300 | [diff] [blame] | 356 | } |
| 357 | |
Prasad Joshi | 3dac48d | 2011-04-16 20:40:25 +0100 | [diff] [blame] | 358 | return length; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 359 | |
Pekka Enberg | 179b71f | 2011-04-17 13:42:28 +0300 | [diff] [blame] | 360 | zero_cluster: |
Pekka Enberg | c0799eb | 2011-07-09 14:04:12 +0300 | [diff] [blame] | 361 | mutex_unlock(&q->mutex); |
Pekka Enberg | 179b71f | 2011-04-17 13:42:28 +0300 | [diff] [blame] | 362 | memset(dst, 0, length); |
Pekka Enberg | c0799eb | 2011-07-09 14:04:12 +0300 | [diff] [blame] | 363 | return length; |
Pekka Enberg | 179b71f | 2011-04-17 13:42:28 +0300 | [diff] [blame] | 364 | |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 365 | out_error: |
Pekka Enberg | c0799eb | 2011-07-09 14:04:12 +0300 | [diff] [blame] | 366 | mutex_unlock(&q->mutex); |
Pekka Enberg | 179b71f | 2011-04-17 13:42:28 +0300 | [diff] [blame] | 367 | length = -1; |
Pekka Enberg | c0799eb | 2011-07-09 14:04:12 +0300 | [diff] [blame] | 368 | return -1; |
Prasad Joshi | 3dac48d | 2011-04-16 20:40:25 +0100 | [diff] [blame] | 369 | } |
Sasha Levin | b6edb0e | 2011-04-16 14:45:43 +0300 | [diff] [blame] | 370 | |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 371 | static ssize_t qcow2_read_cluster(struct qcow *q, u64 offset, |
| 372 | void *dst, u32 dst_len) |
| 373 | { |
| 374 | struct qcow_header *header = q->header; |
| 375 | struct qcow_l1_table *l1t = &q->table; |
| 376 | struct qcow_l2_table *l2t; |
| 377 | u64 clust_offset; |
| 378 | u64 clust_start; |
| 379 | u64 l2t_offset; |
| 380 | size_t length; |
| 381 | u64 l2t_size; |
| 382 | u64 l1_idx; |
| 383 | u64 l2_idx; |
| 384 | int coffset; |
| 385 | int sector_offset; |
| 386 | int nb_csectors; |
| 387 | int csize; |
| 388 | |
| 389 | l1_idx = get_l1_index(q, offset); |
| 390 | if (l1_idx >= l1t->table_size) |
| 391 | return -1; |
| 392 | |
| 393 | clust_offset = get_cluster_offset(q, offset); |
| 394 | if (clust_offset >= q->cluster_size) |
| 395 | return -1; |
| 396 | |
| 397 | length = q->cluster_size - clust_offset; |
| 398 | if (length > dst_len) |
| 399 | length = dst_len; |
| 400 | |
| 401 | mutex_lock(&q->mutex); |
| 402 | |
| 403 | l2t_offset = be64_to_cpu(l1t->l1_table[l1_idx]); |
| 404 | |
| 405 | l2t_offset &= ~QCOW2_OFLAG_COPIED; |
| 406 | if (!l2t_offset) |
| 407 | goto zero_cluster; |
| 408 | |
| 409 | l2t_size = 1 << header->l2_bits; |
| 410 | |
| 411 | /* read and cache level 2 table */ |
| 412 | l2t = qcow_read_l2_table(q, l2t_offset); |
| 413 | if (!l2t) |
| 414 | goto out_error; |
| 415 | |
| 416 | l2_idx = get_l2_index(q, offset); |
| 417 | if (l2_idx >= l2t_size) |
| 418 | goto out_error; |
| 419 | |
| 420 | clust_start = be64_to_cpu(l2t->table[l2_idx]); |
| 421 | if (clust_start & QCOW2_OFLAG_COMPRESSED) { |
| 422 | coffset = clust_start & q->cluster_offset_mask; |
| 423 | nb_csectors = ((clust_start >> q->csize_shift) |
| 424 | & q->csize_mask) + 1; |
| 425 | sector_offset = coffset & (SECTOR_SIZE - 1); |
| 426 | csize = nb_csectors * SECTOR_SIZE - sector_offset; |
| 427 | |
| 428 | if (pread_in_full(q->fd, q->cluster_data, |
| 429 | nb_csectors * SECTOR_SIZE, |
| 430 | coffset & ~(SECTOR_SIZE - 1)) < 0) { |
| 431 | goto out_error; |
| 432 | } |
| 433 | |
| 434 | if (qcow_decompress_buffer(q->cluster_cache, q->cluster_size, |
| 435 | q->cluster_data + sector_offset, |
| 436 | csize) < 0) { |
| 437 | goto out_error; |
| 438 | } |
| 439 | |
| 440 | memcpy(dst, q->cluster_cache + clust_offset, length); |
| 441 | mutex_unlock(&q->mutex); |
Sasha Levin | 3a60be0 | 2011-12-16 10:40:06 +0200 | [diff] [blame] | 442 | } else { |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 443 | clust_start &= QCOW2_OFFSET_MASK; |
| 444 | if (!clust_start) |
| 445 | goto zero_cluster; |
| 446 | |
| 447 | mutex_unlock(&q->mutex); |
| 448 | |
| 449 | if (pread_in_full(q->fd, dst, length, |
| 450 | clust_start + clust_offset) < 0) |
| 451 | return -1; |
| 452 | } |
| 453 | |
| 454 | return length; |
| 455 | |
| 456 | zero_cluster: |
| 457 | mutex_unlock(&q->mutex); |
| 458 | memset(dst, 0, length); |
| 459 | return length; |
| 460 | |
| 461 | out_error: |
| 462 | mutex_unlock(&q->mutex); |
| 463 | length = -1; |
| 464 | return -1; |
| 465 | } |
| 466 | |
Sasha Levin | 2534c9b | 2011-11-02 07:41:06 +0200 | [diff] [blame] | 467 | static ssize_t qcow_read_sector_single(struct disk_image *disk, u64 sector, |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 468 | void *dst, u32 dst_len) |
Prasad Joshi | 3dac48d | 2011-04-16 20:40:25 +0100 | [diff] [blame] | 469 | { |
Sasha Levin | 43835ac | 2011-05-11 19:52:56 +0300 | [diff] [blame] | 470 | struct qcow *q = disk->priv; |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 471 | struct qcow_header *header = q->header; |
Pekka Enberg | d8eea99 | 2011-04-17 13:40:37 +0300 | [diff] [blame] | 472 | u32 nr_read; |
Pekka Enberg | 0df6b4d | 2011-05-11 20:38:37 +0300 | [diff] [blame] | 473 | u64 offset; |
| 474 | char *buf; |
Prasad Joshi | 3dac48d | 2011-04-16 20:40:25 +0100 | [diff] [blame] | 475 | u32 nr; |
| 476 | |
Sasha Levin | 3a60be0 | 2011-12-16 10:40:06 +0200 | [diff] [blame] | 477 | buf = dst; |
| 478 | nr_read = 0; |
Pekka Enberg | 0df6b4d | 2011-05-11 20:38:37 +0300 | [diff] [blame] | 479 | |
Pekka Enberg | d8eea99 | 2011-04-17 13:40:37 +0300 | [diff] [blame] | 480 | while (nr_read < dst_len) { |
Sasha Levin | 3a60be0 | 2011-12-16 10:40:06 +0200 | [diff] [blame] | 481 | offset = sector << SECTOR_SHIFT; |
Prasad Joshi | 3dac48d | 2011-04-16 20:40:25 +0100 | [diff] [blame] | 482 | if (offset >= header->size) |
Pekka Enberg | 0df6b4d | 2011-05-11 20:38:37 +0300 | [diff] [blame] | 483 | return -1; |
Prasad Joshi | 3dac48d | 2011-04-16 20:40:25 +0100 | [diff] [blame] | 484 | |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 485 | if (q->version == QCOW1_VERSION) |
| 486 | nr = qcow1_read_cluster(q, offset, buf, |
| 487 | dst_len - nr_read); |
| 488 | else |
| 489 | nr = qcow2_read_cluster(q, offset, buf, |
| 490 | dst_len - nr_read); |
| 491 | |
Pekka Enberg | a51948c | 2011-04-19 19:57:36 +0300 | [diff] [blame] | 492 | if (nr <= 0) |
Pekka Enberg | 0df6b4d | 2011-05-11 20:38:37 +0300 | [diff] [blame] | 493 | return -1; |
Prasad Joshi | 3dac48d | 2011-04-16 20:40:25 +0100 | [diff] [blame] | 494 | |
Sasha Levin | 3a60be0 | 2011-12-16 10:40:06 +0200 | [diff] [blame] | 495 | nr_read += nr; |
| 496 | buf += nr; |
| 497 | sector += (nr >> SECTOR_SHIFT); |
Prasad Joshi | 3dac48d | 2011-04-16 20:40:25 +0100 | [diff] [blame] | 498 | } |
Pekka Enberg | 0df6b4d | 2011-05-11 20:38:37 +0300 | [diff] [blame] | 499 | |
Asias He | 72133dd | 2011-05-18 16:19:09 +0800 | [diff] [blame] | 500 | return dst_len; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 501 | } |
| 502 | |
Sasha Levin | 2534c9b | 2011-11-02 07:41:06 +0200 | [diff] [blame] | 503 | static ssize_t qcow_read_sector(struct disk_image *disk, u64 sector, |
Sasha Levin | 5af2116 | 2011-11-02 07:41:09 +0200 | [diff] [blame] | 504 | const struct iovec *iov, int iovcount, void *param) |
Sasha Levin | 2534c9b | 2011-11-02 07:41:06 +0200 | [diff] [blame] | 505 | { |
| 506 | ssize_t nr, total = 0; |
| 507 | |
| 508 | while (iovcount--) { |
| 509 | nr = qcow_read_sector_single(disk, sector, iov->iov_base, iov->iov_len); |
| 510 | if (nr != (ssize_t)iov->iov_len) { |
| 511 | pr_info("qcow_read_sector error: nr=%ld iov_len=%ld\n", (long)nr, (long)iov->iov_len); |
| 512 | return -1; |
| 513 | } |
| 514 | |
Sasha Levin | 3a60be0 | 2011-12-16 10:40:06 +0200 | [diff] [blame] | 515 | sector += iov->iov_len >> SECTOR_SHIFT; |
| 516 | total += nr; |
Sasha Levin | 2534c9b | 2011-11-02 07:41:06 +0200 | [diff] [blame] | 517 | iov++; |
Sasha Levin | 2534c9b | 2011-11-02 07:41:06 +0200 | [diff] [blame] | 518 | } |
| 519 | |
| 520 | return total; |
| 521 | } |
| 522 | |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 523 | static void refcount_table_free_cache(struct qcow_refcount_table *rft) |
| 524 | { |
| 525 | struct rb_root *r = &rft->root; |
| 526 | struct list_head *pos, *n; |
| 527 | struct qcow_refcount_block *t; |
| 528 | |
| 529 | list_for_each_safe(pos, n, &rft->lru_list) { |
| 530 | list_del(pos); |
| 531 | t = list_entry(pos, struct qcow_refcount_block, list); |
| 532 | rb_erase(&t->node, r); |
| 533 | |
| 534 | free(t); |
| 535 | } |
| 536 | } |
| 537 | |
| 538 | static int refcount_block_insert(struct rb_root *root, struct qcow_refcount_block *new) |
| 539 | { |
| 540 | struct rb_node **link = &(root->rb_node), *parent = NULL; |
| 541 | u64 offset = new->offset; |
| 542 | |
| 543 | /* search the tree */ |
| 544 | while (*link) { |
| 545 | struct qcow_refcount_block *t; |
| 546 | |
| 547 | t = rb_entry(*link, struct qcow_refcount_block, node); |
| 548 | if (!t) |
| 549 | goto error; |
| 550 | |
| 551 | parent = *link; |
| 552 | |
| 553 | if (t->offset > offset) |
| 554 | link = &(*link)->rb_left; |
| 555 | else if (t->offset < offset) |
| 556 | link = &(*link)->rb_right; |
| 557 | else |
| 558 | goto out; |
| 559 | } |
| 560 | |
| 561 | /* add new node */ |
| 562 | rb_link_node(&new->node, parent, link); |
| 563 | rb_insert_color(&new->node, root); |
| 564 | out: |
| 565 | return 0; |
| 566 | error: |
| 567 | return -1; |
| 568 | } |
| 569 | |
| 570 | static int write_refcount_block(struct qcow *q, struct qcow_refcount_block *rfb) |
| 571 | { |
| 572 | if (!rfb->dirty) |
| 573 | return 0; |
| 574 | |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 575 | if (qcow_pwrite_sync(q->fd, rfb->entries, |
| 576 | rfb->size * sizeof(u16), rfb->offset) < 0) |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 577 | return -1; |
| 578 | |
| 579 | rfb->dirty = 0; |
| 580 | |
| 581 | return 0; |
| 582 | } |
| 583 | |
| 584 | static int cache_refcount_block(struct qcow *q, struct qcow_refcount_block *c) |
| 585 | { |
| 586 | struct qcow_refcount_table *rft = &q->refcount_table; |
| 587 | struct rb_root *r = &rft->root; |
| 588 | struct qcow_refcount_block *lru; |
| 589 | |
| 590 | if (rft->nr_cached == MAX_CACHE_NODES) { |
| 591 | lru = list_first_entry(&rft->lru_list, struct qcow_refcount_block, list); |
| 592 | |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 593 | rb_erase(&lru->node, r); |
| 594 | list_del_init(&lru->list); |
| 595 | rft->nr_cached--; |
| 596 | |
| 597 | free(lru); |
| 598 | } |
| 599 | |
| 600 | if (refcount_block_insert(r, c) < 0) |
| 601 | goto error; |
| 602 | |
| 603 | list_add_tail(&c->list, &rft->lru_list); |
| 604 | rft->nr_cached++; |
| 605 | |
| 606 | return 0; |
| 607 | error: |
| 608 | return -1; |
| 609 | } |
| 610 | |
| 611 | static struct qcow_refcount_block *new_refcount_block(struct qcow *q, u64 rfb_offset) |
| 612 | { |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 613 | struct qcow_refcount_block *rfb; |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 614 | |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 615 | rfb = malloc(sizeof *rfb + q->cluster_size); |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 616 | if (!rfb) |
| 617 | return NULL; |
| 618 | |
| 619 | rfb->offset = rfb_offset; |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 620 | rfb->size = q->cluster_size / sizeof(u16); |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 621 | RB_CLEAR_NODE(&rfb->node); |
| 622 | INIT_LIST_HEAD(&rfb->list); |
| 623 | |
| 624 | return rfb; |
| 625 | } |
| 626 | |
| 627 | static struct qcow_refcount_block *refcount_block_lookup(struct rb_root *root, u64 offset) |
| 628 | { |
| 629 | struct rb_node *link = root->rb_node; |
| 630 | |
| 631 | while (link) { |
| 632 | struct qcow_refcount_block *t; |
| 633 | |
| 634 | t = rb_entry(link, struct qcow_refcount_block, node); |
| 635 | if (!t) |
| 636 | goto out; |
| 637 | |
| 638 | if (t->offset > offset) |
| 639 | link = link->rb_left; |
| 640 | else if (t->offset < offset) |
| 641 | link = link->rb_right; |
| 642 | else |
| 643 | return t; |
| 644 | } |
| 645 | out: |
| 646 | return NULL; |
| 647 | } |
| 648 | |
| 649 | static struct qcow_refcount_block *refcount_block_search(struct qcow *q, u64 offset) |
| 650 | { |
| 651 | struct qcow_refcount_table *rft = &q->refcount_table; |
| 652 | struct qcow_refcount_block *rfb; |
| 653 | |
| 654 | rfb = refcount_block_lookup(&rft->root, offset); |
| 655 | if (!rfb) |
| 656 | return NULL; |
| 657 | |
| 658 | /* Update the LRU state, by moving the searched node to list tail */ |
| 659 | list_move_tail(&rfb->list, &rft->lru_list); |
| 660 | |
| 661 | return rfb; |
| 662 | } |
| 663 | |
Lan Tianyu | 2d2179c | 2011-12-15 21:40:44 +0800 | [diff] [blame] | 664 | static struct qcow_refcount_block *qcow_grow_refcount_block(struct qcow *q, |
| 665 | u64 clust_idx) |
| 666 | { |
| 667 | struct qcow_header *header = q->header; |
| 668 | struct qcow_refcount_table *rft = &q->refcount_table; |
| 669 | struct qcow_refcount_block *rfb; |
| 670 | u64 new_block_offset; |
| 671 | u64 rft_idx; |
| 672 | |
| 673 | rft_idx = clust_idx >> (header->cluster_bits - |
| 674 | QCOW_REFCOUNT_BLOCK_SHIFT); |
| 675 | |
| 676 | if (rft_idx >= rft->rf_size) { |
| 677 | pr_warning("Don't support grow refcount block table"); |
| 678 | return NULL; |
| 679 | } |
| 680 | |
| 681 | new_block_offset = qcow_alloc_clusters(q, q->cluster_size, 0); |
Andre Przywara | 823c7fd | 2015-07-17 17:02:08 +0100 | [diff] [blame] | 682 | if (new_block_offset == (u64)-1) |
Lan Tianyu | 2d2179c | 2011-12-15 21:40:44 +0800 | [diff] [blame] | 683 | return NULL; |
| 684 | |
| 685 | rfb = new_refcount_block(q, new_block_offset); |
| 686 | if (!rfb) |
| 687 | return NULL; |
| 688 | |
| 689 | memset(rfb->entries, 0x00, q->cluster_size); |
| 690 | rfb->dirty = 1; |
| 691 | |
| 692 | /* write refcount block */ |
| 693 | if (write_refcount_block(q, rfb) < 0) |
| 694 | goto free_rfb; |
| 695 | |
| 696 | if (cache_refcount_block(q, rfb) < 0) |
| 697 | goto free_rfb; |
| 698 | |
| 699 | rft->rf_table[rft_idx] = cpu_to_be64(new_block_offset); |
| 700 | if (update_cluster_refcount(q, new_block_offset >> |
| 701 | header->cluster_bits, 1) < 0) |
| 702 | goto recover_rft; |
| 703 | |
| 704 | if (qcow_write_refcount_table(q) < 0) |
| 705 | goto recover_rft; |
| 706 | |
| 707 | return rfb; |
| 708 | |
| 709 | recover_rft: |
| 710 | rft->rf_table[rft_idx] = 0; |
| 711 | free_rfb: |
| 712 | free(rfb); |
| 713 | return NULL; |
| 714 | } |
| 715 | |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 716 | static struct qcow_refcount_block *qcow_read_refcount_block(struct qcow *q, u64 clust_idx) |
| 717 | { |
| 718 | struct qcow_header *header = q->header; |
| 719 | struct qcow_refcount_table *rft = &q->refcount_table; |
| 720 | struct qcow_refcount_block *rfb; |
| 721 | u64 rfb_offset; |
| 722 | u64 rft_idx; |
| 723 | |
| 724 | rft_idx = clust_idx >> (header->cluster_bits - QCOW_REFCOUNT_BLOCK_SHIFT); |
| 725 | if (rft_idx >= rft->rf_size) |
Lan Tianyu | 2d2179c | 2011-12-15 21:40:44 +0800 | [diff] [blame] | 726 | return ERR_PTR(-ENOSPC); |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 727 | |
| 728 | rfb_offset = be64_to_cpu(rft->rf_table[rft_idx]); |
Lan Tianyu | 2d2179c | 2011-12-15 21:40:44 +0800 | [diff] [blame] | 729 | if (!rfb_offset) |
| 730 | return ERR_PTR(-ENOSPC); |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 731 | |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 732 | rfb = refcount_block_search(q, rfb_offset); |
| 733 | if (rfb) |
| 734 | return rfb; |
| 735 | |
| 736 | rfb = new_refcount_block(q, rfb_offset); |
| 737 | if (!rfb) |
| 738 | return NULL; |
| 739 | |
| 740 | if (pread_in_full(q->fd, rfb->entries, rfb->size * sizeof(u16), rfb_offset) < 0) |
| 741 | goto error_free_rfb; |
| 742 | |
| 743 | if (cache_refcount_block(q, rfb) < 0) |
| 744 | goto error_free_rfb; |
| 745 | |
| 746 | return rfb; |
| 747 | |
| 748 | error_free_rfb: |
| 749 | free(rfb); |
| 750 | |
| 751 | return NULL; |
| 752 | } |
| 753 | |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 754 | static u16 qcow_get_refcount(struct qcow *q, u64 clust_idx) |
| 755 | { |
| 756 | struct qcow_refcount_block *rfb = NULL; |
| 757 | struct qcow_header *header = q->header; |
| 758 | u64 rfb_idx; |
| 759 | |
| 760 | rfb = qcow_read_refcount_block(q, clust_idx); |
Lan Tianyu | 2d2179c | 2011-12-15 21:40:44 +0800 | [diff] [blame] | 761 | if (PTR_ERR(rfb) == -ENOSPC) |
| 762 | return 0; |
| 763 | else if (IS_ERR_OR_NULL(rfb)) { |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 764 | pr_warning("Error while reading refcount table"); |
| 765 | return -1; |
| 766 | } |
| 767 | |
| 768 | rfb_idx = clust_idx & (((1ULL << |
| 769 | (header->cluster_bits - QCOW_REFCOUNT_BLOCK_SHIFT)) - 1)); |
| 770 | |
| 771 | if (rfb_idx >= rfb->size) { |
| 772 | pr_warning("L1: refcount block index out of bounds"); |
| 773 | return -1; |
| 774 | } |
| 775 | |
| 776 | return be16_to_cpu(rfb->entries[rfb_idx]); |
| 777 | } |
| 778 | |
| 779 | static int update_cluster_refcount(struct qcow *q, u64 clust_idx, u16 append) |
| 780 | { |
| 781 | struct qcow_refcount_block *rfb = NULL; |
| 782 | struct qcow_header *header = q->header; |
| 783 | u16 refcount; |
| 784 | u64 rfb_idx; |
| 785 | |
| 786 | rfb = qcow_read_refcount_block(q, clust_idx); |
Lan Tianyu | 2d2179c | 2011-12-15 21:40:44 +0800 | [diff] [blame] | 787 | if (PTR_ERR(rfb) == -ENOSPC) { |
| 788 | rfb = qcow_grow_refcount_block(q, clust_idx); |
| 789 | if (!rfb) { |
| 790 | pr_warning("error while growing refcount table"); |
| 791 | return -1; |
| 792 | } |
| 793 | } else if (IS_ERR_OR_NULL(rfb)) { |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 794 | pr_warning("error while reading refcount table"); |
| 795 | return -1; |
| 796 | } |
| 797 | |
| 798 | rfb_idx = clust_idx & (((1ULL << |
| 799 | (header->cluster_bits - QCOW_REFCOUNT_BLOCK_SHIFT)) - 1)); |
| 800 | if (rfb_idx >= rfb->size) { |
| 801 | pr_warning("refcount block index out of bounds"); |
| 802 | return -1; |
| 803 | } |
| 804 | |
| 805 | refcount = be16_to_cpu(rfb->entries[rfb_idx]) + append; |
| 806 | rfb->entries[rfb_idx] = cpu_to_be16(refcount); |
| 807 | rfb->dirty = 1; |
| 808 | |
| 809 | /* write refcount block */ |
| 810 | if (write_refcount_block(q, rfb) < 0) { |
| 811 | pr_warning("refcount block index out of bounds"); |
| 812 | return -1; |
| 813 | } |
| 814 | |
| 815 | /* update free_clust_idx since refcount becomes zero */ |
| 816 | if (!refcount && clust_idx < q->free_clust_idx) |
| 817 | q->free_clust_idx = clust_idx; |
| 818 | |
| 819 | return 0; |
| 820 | } |
| 821 | |
| 822 | static void qcow_free_clusters(struct qcow *q, u64 clust_start, u64 size) |
| 823 | { |
| 824 | struct qcow_header *header = q->header; |
| 825 | u64 start, end, offset; |
| 826 | |
| 827 | start = clust_start & ~(q->cluster_size - 1); |
| 828 | end = (clust_start + size - 1) & ~(q->cluster_size - 1); |
| 829 | for (offset = start; offset <= end; offset += q->cluster_size) |
| 830 | update_cluster_refcount(q, offset >> header->cluster_bits, -1); |
| 831 | } |
| 832 | |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 833 | /* |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 834 | * Allocate clusters according to the size. Find a postion that |
| 835 | * can satisfy the size. free_clust_idx is initialized to zero and |
| 836 | * Record last position. |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 837 | */ |
Lan Tianyu | 2d2179c | 2011-12-15 21:40:44 +0800 | [diff] [blame] | 838 | static u64 qcow_alloc_clusters(struct qcow *q, u64 size, int update_ref) |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 839 | { |
| 840 | struct qcow_header *header = q->header; |
| 841 | u16 clust_refcount; |
Lan Tianyu | 2d2179c | 2011-12-15 21:40:44 +0800 | [diff] [blame] | 842 | u32 clust_idx = 0, i; |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 843 | u64 clust_num; |
| 844 | |
| 845 | clust_num = (size + (q->cluster_size - 1)) >> header->cluster_bits; |
| 846 | |
| 847 | again: |
| 848 | for (i = 0; i < clust_num; i++) { |
| 849 | clust_idx = q->free_clust_idx++; |
| 850 | clust_refcount = qcow_get_refcount(q, clust_idx); |
Andre Przywara | 823c7fd | 2015-07-17 17:02:08 +0100 | [diff] [blame] | 851 | if (clust_refcount == (u16)-1) |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 852 | return -1; |
| 853 | else if (clust_refcount > 0) |
| 854 | goto again; |
| 855 | } |
| 856 | |
Lan Tianyu | 2d2179c | 2011-12-15 21:40:44 +0800 | [diff] [blame] | 857 | clust_idx++; |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 858 | |
Lan Tianyu | 2d2179c | 2011-12-15 21:40:44 +0800 | [diff] [blame] | 859 | if (update_ref) |
| 860 | for (i = 0; i < clust_num; i++) |
| 861 | if (update_cluster_refcount(q, |
| 862 | clust_idx - clust_num + i, 1)) |
| 863 | return -1; |
| 864 | |
| 865 | return (clust_idx - clust_num) << header->cluster_bits; |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 866 | } |
| 867 | |
| 868 | static int qcow_write_l1_table(struct qcow *q) |
| 869 | { |
| 870 | struct qcow_l1_table *l1t = &q->table; |
| 871 | struct qcow_header *header = q->header; |
| 872 | |
| 873 | if (qcow_pwrite_sync(q->fd, l1t->l1_table, |
| 874 | l1t->table_size * sizeof(u64), |
| 875 | header->l1_table_offset) < 0) |
| 876 | return -1; |
| 877 | |
| 878 | return 0; |
| 879 | } |
| 880 | |
| 881 | /* |
| 882 | * Get l2 table. If the table has been copied, read table directly. |
| 883 | * If the table exists, allocate a new cluster and copy the table |
| 884 | * to the new cluster. |
| 885 | */ |
| 886 | static int get_cluster_table(struct qcow *q, u64 offset, |
| 887 | struct qcow_l2_table **result_l2t, u64 *result_l2_index) |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 888 | { |
| 889 | struct qcow_header *header = q->header; |
Pekka Enberg | 3fb67b9 | 2011-07-24 12:10:12 +0300 | [diff] [blame] | 890 | struct qcow_l1_table *l1t = &q->table; |
Pekka Enberg | fe8bdde | 2011-07-09 14:23:47 +0300 | [diff] [blame] | 891 | struct qcow_l2_table *l2t; |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 892 | u64 l1t_idx; |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 893 | u64 l2t_offset; |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 894 | u64 l2t_idx; |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 895 | u64 l2t_size; |
| 896 | u64 l2t_new_offset; |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 897 | |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 898 | l2t_size = 1 << header->l2_bits; |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 899 | |
Pekka Enberg | 3fb67b9 | 2011-07-24 12:10:12 +0300 | [diff] [blame] | 900 | l1t_idx = get_l1_index(q, offset); |
| 901 | if (l1t_idx >= l1t->table_size) |
Pekka Enberg | c0799eb | 2011-07-09 14:04:12 +0300 | [diff] [blame] | 902 | return -1; |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 903 | |
Pekka Enberg | 3fb67b9 | 2011-07-24 12:10:12 +0300 | [diff] [blame] | 904 | l2t_idx = get_l2_index(q, offset); |
| 905 | if (l2t_idx >= l2t_size) |
Pekka Enberg | c0799eb | 2011-07-09 14:04:12 +0300 | [diff] [blame] | 906 | return -1; |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 907 | |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 908 | l2t_offset = be64_to_cpu(l1t->l1_table[l1t_idx]); |
| 909 | if (l2t_offset & QCOW2_OFLAG_COPIED) { |
| 910 | l2t_offset &= ~QCOW2_OFLAG_COPIED; |
| 911 | l2t = qcow_read_l2_table(q, l2t_offset); |
| 912 | if (!l2t) |
| 913 | goto error; |
| 914 | } else { |
Lan Tianyu | 2d2179c | 2011-12-15 21:40:44 +0800 | [diff] [blame] | 915 | l2t_new_offset = qcow_alloc_clusters(q, |
| 916 | l2t_size*sizeof(u64), 1); |
| 917 | |
Andre Przywara | 823c7fd | 2015-07-17 17:02:08 +0100 | [diff] [blame] | 918 | if (l2t_new_offset != (u64)-1) |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 919 | goto error; |
| 920 | |
| 921 | l2t = new_cache_table(q, l2t_new_offset); |
| 922 | if (!l2t) |
| 923 | goto free_cluster; |
| 924 | |
| 925 | if (l2t_offset) { |
| 926 | l2t = qcow_read_l2_table(q, l2t_offset); |
| 927 | if (!l2t) |
| 928 | goto free_cache; |
| 929 | } else |
| 930 | memset(l2t->table, 0x00, l2t_size * sizeof(u64)); |
| 931 | |
| 932 | /* write l2 table */ |
| 933 | l2t->dirty = 1; |
| 934 | if (qcow_l2_cache_write(q, l2t) < 0) |
| 935 | goto free_cache; |
| 936 | |
| 937 | /* cache l2 table */ |
| 938 | if (cache_table(q, l2t)) |
| 939 | goto free_cache; |
| 940 | |
| 941 | /* update the l1 talble */ |
| 942 | l1t->l1_table[l1t_idx] = cpu_to_be64(l2t_new_offset |
| 943 | | QCOW2_OFLAG_COPIED); |
| 944 | if (qcow_write_l1_table(q)) { |
| 945 | pr_warning("Update l1 table error"); |
| 946 | goto free_cache; |
| 947 | } |
| 948 | |
| 949 | /* free old cluster */ |
| 950 | qcow_free_clusters(q, l2t_offset, q->cluster_size); |
| 951 | } |
| 952 | |
| 953 | *result_l2t = l2t; |
| 954 | *result_l2_index = l2t_idx; |
| 955 | |
| 956 | return 0; |
| 957 | |
| 958 | free_cache: |
| 959 | free(l2t); |
| 960 | |
| 961 | free_cluster: |
| 962 | qcow_free_clusters(q, l2t_new_offset, q->cluster_size); |
| 963 | |
| 964 | error: |
| 965 | return -1; |
| 966 | } |
| 967 | |
| 968 | /* |
| 969 | * If the cluster has been copied, write data directly. If not, |
| 970 | * read the original data and write it to the new cluster with |
| 971 | * modification. |
| 972 | */ |
| 973 | static ssize_t qcow_write_cluster(struct qcow *q, u64 offset, |
| 974 | void *buf, u32 src_len) |
| 975 | { |
| 976 | struct qcow_l2_table *l2t; |
| 977 | u64 clust_new_start; |
| 978 | u64 clust_start; |
| 979 | u64 clust_flags; |
| 980 | u64 clust_off; |
| 981 | u64 l2t_idx; |
| 982 | u64 len; |
| 983 | |
| 984 | l2t = NULL; |
| 985 | |
Pekka Enberg | 3fb67b9 | 2011-07-24 12:10:12 +0300 | [diff] [blame] | 986 | clust_off = get_cluster_offset(q, offset); |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 987 | if (clust_off >= q->cluster_size) |
Pekka Enberg | c0799eb | 2011-07-09 14:04:12 +0300 | [diff] [blame] | 988 | return -1; |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 989 | |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 990 | len = q->cluster_size - clust_off; |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 991 | if (len > src_len) |
| 992 | len = src_len; |
| 993 | |
Pekka Enberg | c0799eb | 2011-07-09 14:04:12 +0300 | [diff] [blame] | 994 | mutex_lock(&q->mutex); |
| 995 | |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 996 | if (get_cluster_table(q, offset, &l2t, &l2t_idx)) { |
| 997 | pr_warning("Get l2 table error"); |
Pekka Enberg | 121dd76 | 2011-07-20 16:28:31 +0300 | [diff] [blame] | 998 | goto error; |
| 999 | } |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 1000 | |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1001 | clust_start = be64_to_cpu(l2t->table[l2t_idx]); |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1002 | clust_flags = clust_start & QCOW2_OFLAGS_MASK; |
Pekka Enberg | b2ebe61 | 2011-07-10 15:36:11 +0300 | [diff] [blame] | 1003 | |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1004 | clust_start &= QCOW2_OFFSET_MASK; |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1005 | if (!(clust_flags & QCOW2_OFLAG_COPIED)) { |
Lan Tianyu | 2d2179c | 2011-12-15 21:40:44 +0800 | [diff] [blame] | 1006 | clust_new_start = qcow_alloc_clusters(q, q->cluster_size, 1); |
Andre Przywara | 823c7fd | 2015-07-17 17:02:08 +0100 | [diff] [blame] | 1007 | if (clust_new_start != (u64)-1) { |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 1008 | pr_warning("Cluster alloc error"); |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1009 | goto error; |
| 1010 | } |
| 1011 | |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 1012 | offset &= ~(q->cluster_size - 1); |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1013 | |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 1014 | /* if clust_start is not zero, read the original data*/ |
| 1015 | if (clust_start) { |
| 1016 | mutex_unlock(&q->mutex); |
| 1017 | if (qcow2_read_cluster(q, offset, q->copy_buff, |
| 1018 | q->cluster_size) < 0) { |
| 1019 | pr_warning("Read copy cluster error"); |
| 1020 | qcow_free_clusters(q, clust_new_start, |
| 1021 | q->cluster_size); |
| 1022 | return -1; |
| 1023 | } |
| 1024 | mutex_lock(&q->mutex); |
| 1025 | } else |
| 1026 | memset(q->copy_buff, 0x00, q->cluster_size); |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1027 | |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 1028 | memcpy(q->copy_buff + clust_off, buf, len); |
| 1029 | |
| 1030 | /* Write actual data */ |
| 1031 | if (pwrite_in_full(q->fd, q->copy_buff, q->cluster_size, |
| 1032 | clust_new_start) < 0) |
| 1033 | goto free_cluster; |
| 1034 | |
| 1035 | /* update l2 table*/ |
| 1036 | l2t->table[l2t_idx] = cpu_to_be64(clust_new_start |
| 1037 | | QCOW2_OFLAG_COPIED); |
| 1038 | l2t->dirty = 1; |
| 1039 | |
| 1040 | if (qcow_l2_cache_write(q, l2t)) |
| 1041 | goto free_cluster; |
| 1042 | |
| 1043 | /* free old cluster*/ |
| 1044 | if (clust_flags & QCOW2_OFLAG_COMPRESSED) { |
| 1045 | int size; |
| 1046 | size = ((clust_start >> q->csize_shift) & |
| 1047 | q->csize_mask) + 1; |
| 1048 | size *= 512; |
| 1049 | clust_start &= q->cluster_offset_mask; |
| 1050 | clust_start &= ~511; |
| 1051 | |
| 1052 | qcow_free_clusters(q, clust_start, size); |
| 1053 | } else if (clust_start) |
| 1054 | qcow_free_clusters(q, clust_start, q->cluster_size); |
| 1055 | |
| 1056 | } else { |
| 1057 | /* Write actual data */ |
| 1058 | if (pwrite_in_full(q->fd, buf, len, |
| 1059 | clust_start + clust_off) < 0) |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1060 | goto error; |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1061 | } |
Pekka Enberg | c0799eb | 2011-07-09 14:04:12 +0300 | [diff] [blame] | 1062 | mutex_unlock(&q->mutex); |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 1063 | return len; |
Prasad Joshi | 3309045 | 2011-06-06 20:58:24 +0100 | [diff] [blame] | 1064 | |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 1065 | free_cluster: |
| 1066 | qcow_free_clusters(q, clust_new_start, q->cluster_size); |
| 1067 | |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 1068 | error: |
Pekka Enberg | c0799eb | 2011-07-09 14:04:12 +0300 | [diff] [blame] | 1069 | mutex_unlock(&q->mutex); |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 1070 | return -1; |
| 1071 | } |
| 1072 | |
Sasha Levin | 2534c9b | 2011-11-02 07:41:06 +0200 | [diff] [blame] | 1073 | static ssize_t qcow_write_sector_single(struct disk_image *disk, u64 sector, void *src, u32 src_len) |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1074 | { |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 1075 | struct qcow *q = disk->priv; |
| 1076 | struct qcow_header *header = q->header; |
Ingo Molnar | c4acb61 | 2011-05-13 10:19:09 +0200 | [diff] [blame] | 1077 | u32 nr_written; |
Pekka Enberg | 0df6b4d | 2011-05-11 20:38:37 +0300 | [diff] [blame] | 1078 | char *buf; |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 1079 | u64 offset; |
| 1080 | ssize_t nr; |
| 1081 | |
Pekka Enberg | 0df6b4d | 2011-05-11 20:38:37 +0300 | [diff] [blame] | 1082 | buf = src; |
| 1083 | nr_written = 0; |
| 1084 | offset = sector << SECTOR_SHIFT; |
| 1085 | |
| 1086 | while (nr_written < src_len) { |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 1087 | if (offset >= header->size) |
Pekka Enberg | 0df6b4d | 2011-05-11 20:38:37 +0300 | [diff] [blame] | 1088 | return -1; |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 1089 | |
Pekka Enberg | b1c8409 | 2011-05-29 11:57:19 +0300 | [diff] [blame] | 1090 | nr = qcow_write_cluster(q, offset, buf, src_len - nr_written); |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 1091 | if (nr < 0) |
Pekka Enberg | 0df6b4d | 2011-05-11 20:38:37 +0300 | [diff] [blame] | 1092 | return -1; |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 1093 | |
Pekka Enberg | 0df6b4d | 2011-05-11 20:38:37 +0300 | [diff] [blame] | 1094 | nr_written += nr; |
| 1095 | buf += nr; |
| 1096 | offset += nr; |
Prasad Joshi | 865c675 | 2011-05-10 15:43:30 +0100 | [diff] [blame] | 1097 | } |
Pekka Enberg | 0df6b4d | 2011-05-11 20:38:37 +0300 | [diff] [blame] | 1098 | |
Asias He | 72133dd | 2011-05-18 16:19:09 +0800 | [diff] [blame] | 1099 | return nr_written; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1100 | } |
| 1101 | |
Sasha Levin | 2534c9b | 2011-11-02 07:41:06 +0200 | [diff] [blame] | 1102 | static ssize_t qcow_write_sector(struct disk_image *disk, u64 sector, |
Sasha Levin | 5af2116 | 2011-11-02 07:41:09 +0200 | [diff] [blame] | 1103 | const struct iovec *iov, int iovcount, void *param) |
Sasha Levin | 2534c9b | 2011-11-02 07:41:06 +0200 | [diff] [blame] | 1104 | { |
| 1105 | ssize_t nr, total = 0; |
| 1106 | |
| 1107 | while (iovcount--) { |
| 1108 | nr = qcow_write_sector_single(disk, sector, iov->iov_base, iov->iov_len); |
| 1109 | if (nr != (ssize_t)iov->iov_len) { |
| 1110 | pr_info("qcow_write_sector error: nr=%ld iov_len=%ld\n", (long)nr, (long)iov->iov_len); |
| 1111 | return -1; |
| 1112 | } |
| 1113 | |
| 1114 | sector += iov->iov_len >> SECTOR_SHIFT; |
| 1115 | iov++; |
| 1116 | total += nr; |
| 1117 | } |
| 1118 | |
| 1119 | return total; |
| 1120 | } |
| 1121 | |
Pekka Enberg | 659f418 | 2011-07-09 10:15:12 +0300 | [diff] [blame] | 1122 | static int qcow_disk_flush(struct disk_image *disk) |
| 1123 | { |
Pekka Enberg | 73984b1 | 2011-07-09 10:23:07 +0300 | [diff] [blame] | 1124 | struct qcow *q = disk->priv; |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1125 | struct qcow_refcount_table *rft; |
Pekka Enberg | a4e4651 | 2011-07-09 10:58:37 +0300 | [diff] [blame] | 1126 | struct list_head *pos, *n; |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 1127 | struct qcow_l1_table *l1t; |
Pekka Enberg | 73984b1 | 2011-07-09 10:23:07 +0300 | [diff] [blame] | 1128 | |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 1129 | l1t = &q->table; |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1130 | rft = &q->refcount_table; |
Pekka Enberg | 73984b1 | 2011-07-09 10:23:07 +0300 | [diff] [blame] | 1131 | |
Pekka Enberg | a4e4651 | 2011-07-09 10:58:37 +0300 | [diff] [blame] | 1132 | mutex_lock(&q->mutex); |
| 1133 | |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1134 | list_for_each_safe(pos, n, &rft->lru_list) { |
| 1135 | struct qcow_refcount_block *c = list_entry(pos, struct qcow_refcount_block, list); |
| 1136 | |
| 1137 | if (write_refcount_block(q, c) < 0) |
| 1138 | goto error_unlock; |
| 1139 | } |
| 1140 | |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 1141 | list_for_each_safe(pos, n, &l1t->lru_list) { |
Pekka Enberg | a4e4651 | 2011-07-09 10:58:37 +0300 | [diff] [blame] | 1142 | struct qcow_l2_table *c = list_entry(pos, struct qcow_l2_table, list); |
| 1143 | |
| 1144 | if (qcow_l2_cache_write(q, c) < 0) |
| 1145 | goto error_unlock; |
| 1146 | } |
| 1147 | |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 1148 | if (qcow_write_l1_table < 0) |
Pekka Enberg | a4e4651 | 2011-07-09 10:58:37 +0300 | [diff] [blame] | 1149 | goto error_unlock; |
| 1150 | |
| 1151 | mutex_unlock(&q->mutex); |
Pekka Enberg | 73984b1 | 2011-07-09 10:23:07 +0300 | [diff] [blame] | 1152 | |
Pekka Enberg | 659f418 | 2011-07-09 10:15:12 +0300 | [diff] [blame] | 1153 | return fsync(disk->fd); |
Pekka Enberg | a4e4651 | 2011-07-09 10:58:37 +0300 | [diff] [blame] | 1154 | |
| 1155 | error_unlock: |
| 1156 | mutex_unlock(&q->mutex); |
| 1157 | return -1; |
Pekka Enberg | 659f418 | 2011-07-09 10:15:12 +0300 | [diff] [blame] | 1158 | } |
| 1159 | |
Pekka Enberg | b1c8409 | 2011-05-29 11:57:19 +0300 | [diff] [blame] | 1160 | static int qcow_disk_close(struct disk_image *disk) |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1161 | { |
| 1162 | struct qcow *q; |
| 1163 | |
Sasha Levin | 43835ac | 2011-05-11 19:52:56 +0300 | [diff] [blame] | 1164 | if (!disk) |
Asias He | 72133dd | 2011-05-18 16:19:09 +0800 | [diff] [blame] | 1165 | return 0; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1166 | |
Sasha Levin | 43835ac | 2011-05-11 19:52:56 +0300 | [diff] [blame] | 1167 | q = disk->priv; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1168 | |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1169 | refcount_table_free_cache(&q->refcount_table); |
Pekka Enberg | e94cdf0 | 2011-07-24 22:04:01 +0300 | [diff] [blame] | 1170 | l1_table_free_cache(&q->table); |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 1171 | free(q->copy_buff); |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1172 | free(q->cluster_data); |
| 1173 | free(q->cluster_cache); |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1174 | free(q->refcount_table.rf_table); |
Prasad Joshi | 6c6f79b | 2011-04-15 15:18:57 +0100 | [diff] [blame] | 1175 | free(q->table.l1_table); |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1176 | free(q->header); |
| 1177 | free(q); |
Asias He | 72133dd | 2011-05-18 16:19:09 +0800 | [diff] [blame] | 1178 | |
| 1179 | return 0; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1180 | } |
| 1181 | |
Pekka Enberg | b1c8409 | 2011-05-29 11:57:19 +0300 | [diff] [blame] | 1182 | static struct disk_image_operations qcow_disk_readonly_ops = { |
Asias He | dcd3cd8 | 2012-06-04 23:25:38 +0800 | [diff] [blame] | 1183 | .read = qcow_read_sector, |
| 1184 | .close = qcow_disk_close, |
Pekka Enberg | f10860c | 2011-05-11 21:23:03 +0300 | [diff] [blame] | 1185 | }; |
| 1186 | |
Pekka Enberg | b1c8409 | 2011-05-29 11:57:19 +0300 | [diff] [blame] | 1187 | static struct disk_image_operations qcow_disk_ops = { |
Asias He | dcd3cd8 | 2012-06-04 23:25:38 +0800 | [diff] [blame] | 1188 | .read = qcow_read_sector, |
| 1189 | .write = qcow_write_sector, |
| 1190 | .flush = qcow_disk_flush, |
| 1191 | .close = qcow_disk_close, |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1192 | }; |
| 1193 | |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1194 | static int qcow_read_refcount_table(struct qcow *q) |
| 1195 | { |
| 1196 | struct qcow_header *header = q->header; |
| 1197 | struct qcow_refcount_table *rft = &q->refcount_table; |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1198 | |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1199 | rft->rf_size = (header->refcount_table_size * q->cluster_size) |
| 1200 | / sizeof(u64); |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1201 | |
| 1202 | rft->rf_table = calloc(rft->rf_size, sizeof(u64)); |
| 1203 | if (!rft->rf_table) |
| 1204 | return -1; |
| 1205 | |
Andre Przywara | 15542ba | 2015-07-17 17:02:07 +0100 | [diff] [blame] | 1206 | rft->root = (struct rb_root) RB_ROOT; |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1207 | INIT_LIST_HEAD(&rft->lru_list); |
| 1208 | |
| 1209 | return pread_in_full(q->fd, rft->rf_table, sizeof(u64) * rft->rf_size, header->refcount_table_offset); |
| 1210 | } |
| 1211 | |
Lan Tianyu | 2d2179c | 2011-12-15 21:40:44 +0800 | [diff] [blame] | 1212 | static int qcow_write_refcount_table(struct qcow *q) |
| 1213 | { |
| 1214 | struct qcow_header *header = q->header; |
| 1215 | struct qcow_refcount_table *rft = &q->refcount_table; |
| 1216 | |
| 1217 | return qcow_pwrite_sync(q->fd, rft->rf_table, |
| 1218 | rft->rf_size * sizeof(u64), header->refcount_table_offset); |
| 1219 | } |
| 1220 | |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1221 | static int qcow_read_l1_table(struct qcow *q) |
| 1222 | { |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1223 | struct qcow_header *header = q->header; |
Pekka Enberg | 473aaa2 | 2011-07-24 12:02:34 +0300 | [diff] [blame] | 1224 | struct qcow_l1_table *table = &q->table; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1225 | |
Asias He | 449ca0a | 2012-06-05 22:42:44 +0800 | [diff] [blame] | 1226 | table->table_size = header->l1_size; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1227 | |
Prasad Joshi | 00adcc1 | 2011-04-15 15:18:56 +0100 | [diff] [blame] | 1228 | table->l1_table = calloc(table->table_size, sizeof(u64)); |
| 1229 | if (!table->l1_table) |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1230 | return -1; |
| 1231 | |
Pekka Enberg | 659f418 | 2011-07-09 10:15:12 +0300 | [diff] [blame] | 1232 | return pread_in_full(q->fd, table->l1_table, sizeof(u64) * table->table_size, header->l1_table_offset); |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1233 | } |
| 1234 | |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1235 | static void *qcow2_read_header(int fd) |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1236 | { |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1237 | struct qcow2_header_disk f_header; |
| 1238 | struct qcow_header *header; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1239 | |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1240 | header = malloc(sizeof(struct qcow_header)); |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1241 | if (!header) |
| 1242 | return NULL; |
| 1243 | |
Prasad Joshi | 0657f33 | 2011-05-06 17:39:46 +0100 | [diff] [blame] | 1244 | if (pread_in_full(fd, &f_header, sizeof(struct qcow2_header_disk), 0) < 0) { |
| 1245 | free(header); |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1246 | return NULL; |
Prasad Joshi | 0657f33 | 2011-05-06 17:39:46 +0100 | [diff] [blame] | 1247 | } |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1248 | |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1249 | be32_to_cpus(&f_header.magic); |
| 1250 | be32_to_cpus(&f_header.version); |
| 1251 | be64_to_cpus(&f_header.backing_file_offset); |
| 1252 | be32_to_cpus(&f_header.backing_file_size); |
| 1253 | be32_to_cpus(&f_header.cluster_bits); |
| 1254 | be64_to_cpus(&f_header.size); |
| 1255 | be32_to_cpus(&f_header.crypt_method); |
| 1256 | be32_to_cpus(&f_header.l1_size); |
| 1257 | be64_to_cpus(&f_header.l1_table_offset); |
| 1258 | be64_to_cpus(&f_header.refcount_table_offset); |
| 1259 | be32_to_cpus(&f_header.refcount_table_clusters); |
| 1260 | be32_to_cpus(&f_header.nb_snapshots); |
| 1261 | be64_to_cpus(&f_header.snapshots_offset); |
| 1262 | |
| 1263 | *header = (struct qcow_header) { |
| 1264 | .size = f_header.size, |
| 1265 | .l1_table_offset = f_header.l1_table_offset, |
| 1266 | .l1_size = f_header.l1_size, |
| 1267 | .cluster_bits = f_header.cluster_bits, |
| 1268 | .l2_bits = f_header.cluster_bits - 3, |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1269 | .refcount_table_offset = f_header.refcount_table_offset, |
| 1270 | .refcount_table_size = f_header.refcount_table_clusters, |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1271 | }; |
| 1272 | |
| 1273 | return header; |
| 1274 | } |
| 1275 | |
Pekka Enberg | f10860c | 2011-05-11 21:23:03 +0300 | [diff] [blame] | 1276 | static struct disk_image *qcow2_probe(int fd, bool readonly) |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1277 | { |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1278 | struct disk_image *disk_image; |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 1279 | struct qcow_l1_table *l1t; |
| 1280 | struct qcow_header *h; |
| 1281 | struct qcow *q; |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1282 | |
| 1283 | q = calloc(1, sizeof(struct qcow)); |
| 1284 | if (!q) |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1285 | return NULL; |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1286 | |
Pekka Enberg | c0799eb | 2011-07-09 14:04:12 +0300 | [diff] [blame] | 1287 | mutex_init(&q->mutex); |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1288 | q->fd = fd; |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 1289 | |
| 1290 | l1t = &q->table; |
| 1291 | |
Andre Przywara | 15542ba | 2015-07-17 17:02:07 +0100 | [diff] [blame] | 1292 | l1t->root = (struct rb_root) RB_ROOT; |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 1293 | INIT_LIST_HEAD(&l1t->lru_list); |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1294 | |
| 1295 | h = q->header = qcow2_read_header(fd); |
| 1296 | if (!h) |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1297 | goto free_qcow; |
| 1298 | |
| 1299 | q->version = QCOW2_VERSION; |
| 1300 | q->csize_shift = (62 - (q->header->cluster_bits - 8)); |
| 1301 | q->csize_mask = (1 << (q->header->cluster_bits - 8)) - 1; |
| 1302 | q->cluster_offset_mask = (1LL << q->csize_shift) - 1; |
| 1303 | q->cluster_size = 1 << q->header->cluster_bits; |
| 1304 | |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 1305 | q->copy_buff = malloc(q->cluster_size); |
| 1306 | if (!q->copy_buff) { |
| 1307 | pr_warning("copy buff malloc error"); |
| 1308 | goto free_header; |
| 1309 | } |
| 1310 | |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1311 | q->cluster_data = malloc(q->cluster_size); |
| 1312 | if (!q->cluster_data) { |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 1313 | pr_warning("cluster data malloc error"); |
| 1314 | goto free_copy_buff; |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1315 | } |
| 1316 | |
| 1317 | q->cluster_cache = malloc(q->cluster_size); |
| 1318 | if (!q->cluster_cache) { |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 1319 | pr_warning("cluster cache malloc error"); |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1320 | goto free_cluster_data; |
| 1321 | } |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1322 | |
| 1323 | if (qcow_read_l1_table(q) < 0) |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1324 | goto free_cluster_cache; |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1325 | |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1326 | if (qcow_read_refcount_table(q) < 0) |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1327 | goto free_l1_table; |
Pekka Enberg | 3ecac80 | 2011-07-21 12:04:39 +0300 | [diff] [blame] | 1328 | |
Asias He | 7d22135 | 2011-05-18 16:19:07 +0800 | [diff] [blame] | 1329 | /* |
| 1330 | * Do not use mmap use read/write instead |
| 1331 | */ |
Pekka Enberg | f10860c | 2011-05-11 21:23:03 +0300 | [diff] [blame] | 1332 | if (readonly) |
Sasha Levin | 38c396e | 2011-11-02 07:41:05 +0200 | [diff] [blame] | 1333 | disk_image = disk_image__new(fd, h->size, &qcow_disk_readonly_ops, DISK_IMAGE_REGULAR); |
Pekka Enberg | f10860c | 2011-05-11 21:23:03 +0300 | [diff] [blame] | 1334 | else |
Sasha Levin | 38c396e | 2011-11-02 07:41:05 +0200 | [diff] [blame] | 1335 | disk_image = disk_image__new(fd, h->size, &qcow_disk_ops, DISK_IMAGE_REGULAR); |
Pekka Enberg | f10860c | 2011-05-11 21:23:03 +0300 | [diff] [blame] | 1336 | |
Sasha Levin | 9f9207c | 2011-12-19 11:23:00 +0200 | [diff] [blame] | 1337 | if (IS_ERR_OR_NULL(disk_image)) |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1338 | goto free_refcount_table; |
Sasha Levin | f41a132 | 2011-11-02 07:41:14 +0200 | [diff] [blame] | 1339 | |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1340 | disk_image->priv = q; |
| 1341 | |
| 1342 | return disk_image; |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1343 | |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1344 | free_refcount_table: |
| 1345 | if (q->refcount_table.rf_table) |
| 1346 | free(q->refcount_table.rf_table); |
| 1347 | free_l1_table: |
| 1348 | if (q->table.l1_table) |
| 1349 | free(q->table.l1_table); |
| 1350 | free_cluster_cache: |
| 1351 | if (q->cluster_cache) |
| 1352 | free(q->cluster_cache); |
| 1353 | free_cluster_data: |
| 1354 | if (q->cluster_data) |
| 1355 | free(q->cluster_data); |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 1356 | free_copy_buff: |
| 1357 | if (q->copy_buff) |
| 1358 | free(q->copy_buff); |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1359 | free_header: |
| 1360 | if (q->header) |
| 1361 | free(q->header); |
| 1362 | free_qcow: |
Sasha Levin | 379d476 | 2012-12-20 14:11:10 -0500 | [diff] [blame] | 1363 | free(q); |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1364 | |
| 1365 | return NULL; |
| 1366 | } |
| 1367 | |
| 1368 | static bool qcow2_check_image(int fd) |
| 1369 | { |
| 1370 | struct qcow2_header_disk f_header; |
| 1371 | |
| 1372 | if (pread_in_full(fd, &f_header, sizeof(struct qcow2_header_disk), 0) < 0) |
| 1373 | return false; |
| 1374 | |
| 1375 | be32_to_cpus(&f_header.magic); |
| 1376 | be32_to_cpus(&f_header.version); |
| 1377 | |
| 1378 | if (f_header.magic != QCOW_MAGIC) |
| 1379 | return false; |
| 1380 | |
| 1381 | if (f_header.version != QCOW2_VERSION) |
| 1382 | return false; |
| 1383 | |
| 1384 | return true; |
| 1385 | } |
| 1386 | |
| 1387 | static void *qcow1_read_header(int fd) |
| 1388 | { |
| 1389 | struct qcow1_header_disk f_header; |
| 1390 | struct qcow_header *header; |
| 1391 | |
| 1392 | header = malloc(sizeof(struct qcow_header)); |
| 1393 | if (!header) |
| 1394 | return NULL; |
| 1395 | |
Sasha Levin | d39cefd | 2011-04-23 17:05:08 +0300 | [diff] [blame] | 1396 | if (pread_in_full(fd, &f_header, sizeof(struct qcow1_header_disk), 0) < 0) { |
| 1397 | free(header); |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1398 | return NULL; |
Sasha Levin | d39cefd | 2011-04-23 17:05:08 +0300 | [diff] [blame] | 1399 | } |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1400 | |
| 1401 | be32_to_cpus(&f_header.magic); |
| 1402 | be32_to_cpus(&f_header.version); |
| 1403 | be64_to_cpus(&f_header.backing_file_offset); |
| 1404 | be32_to_cpus(&f_header.backing_file_size); |
| 1405 | be32_to_cpus(&f_header.mtime); |
| 1406 | be64_to_cpus(&f_header.size); |
| 1407 | be32_to_cpus(&f_header.crypt_method); |
| 1408 | be64_to_cpus(&f_header.l1_table_offset); |
| 1409 | |
| 1410 | *header = (struct qcow_header) { |
| 1411 | .size = f_header.size, |
| 1412 | .l1_table_offset = f_header.l1_table_offset, |
| 1413 | .l1_size = f_header.size / ((1 << f_header.l2_bits) * (1 << f_header.cluster_bits)), |
| 1414 | .cluster_bits = f_header.cluster_bits, |
| 1415 | .l2_bits = f_header.l2_bits, |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1416 | }; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1417 | |
| 1418 | return header; |
| 1419 | } |
| 1420 | |
Pekka Enberg | f10860c | 2011-05-11 21:23:03 +0300 | [diff] [blame] | 1421 | static struct disk_image *qcow1_probe(int fd, bool readonly) |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1422 | { |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1423 | struct disk_image *disk_image; |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 1424 | struct qcow_l1_table *l1t; |
| 1425 | struct qcow_header *h; |
| 1426 | struct qcow *q; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1427 | |
| 1428 | q = calloc(1, sizeof(struct qcow)); |
| 1429 | if (!q) |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1430 | return NULL; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1431 | |
Pekka Enberg | c0799eb | 2011-07-09 14:04:12 +0300 | [diff] [blame] | 1432 | mutex_init(&q->mutex); |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1433 | q->fd = fd; |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 1434 | |
| 1435 | l1t = &q->table; |
| 1436 | |
Andre Przywara | 15542ba | 2015-07-17 17:02:07 +0100 | [diff] [blame] | 1437 | l1t->root = (struct rb_root)RB_ROOT; |
Pekka Enberg | 7b4eb53 | 2011-07-24 19:27:20 +0300 | [diff] [blame] | 1438 | INIT_LIST_HEAD(&l1t->lru_list); |
Jean-Philippe Brucker | ca14d9e | 2019-04-04 14:20:42 +0100 | [diff] [blame] | 1439 | INIT_LIST_HEAD(&q->refcount_table.lru_list); |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1440 | |
| 1441 | h = q->header = qcow1_read_header(fd); |
| 1442 | if (!h) |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1443 | goto free_qcow; |
| 1444 | |
| 1445 | q->version = QCOW1_VERSION; |
| 1446 | q->cluster_size = 1 << q->header->cluster_bits; |
| 1447 | q->cluster_offset_mask = (1LL << (63 - q->header->cluster_bits)) - 1; |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 1448 | q->free_clust_idx = 0; |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1449 | |
| 1450 | q->cluster_data = malloc(q->cluster_size); |
| 1451 | if (!q->cluster_data) { |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 1452 | pr_warning("cluster data malloc error"); |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1453 | goto free_header; |
| 1454 | } |
| 1455 | |
| 1456 | q->cluster_cache = malloc(q->cluster_size); |
| 1457 | if (!q->cluster_cache) { |
Lan Tianyu | e184700 | 2011-11-29 15:30:26 +0800 | [diff] [blame] | 1458 | pr_warning("cluster cache malloc error"); |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1459 | goto free_cluster_data; |
| 1460 | } |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1461 | |
| 1462 | if (qcow_read_l1_table(q) < 0) |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1463 | goto free_cluster_cache; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1464 | |
Asias He | 7d22135 | 2011-05-18 16:19:07 +0800 | [diff] [blame] | 1465 | /* |
| 1466 | * Do not use mmap use read/write instead |
| 1467 | */ |
Pekka Enberg | f10860c | 2011-05-11 21:23:03 +0300 | [diff] [blame] | 1468 | if (readonly) |
Sasha Levin | 38c396e | 2011-11-02 07:41:05 +0200 | [diff] [blame] | 1469 | disk_image = disk_image__new(fd, h->size, &qcow_disk_readonly_ops, DISK_IMAGE_REGULAR); |
Pekka Enberg | f10860c | 2011-05-11 21:23:03 +0300 | [diff] [blame] | 1470 | else |
Sasha Levin | 38c396e | 2011-11-02 07:41:05 +0200 | [diff] [blame] | 1471 | disk_image = disk_image__new(fd, h->size, &qcow_disk_ops, DISK_IMAGE_REGULAR); |
Pekka Enberg | f10860c | 2011-05-11 21:23:03 +0300 | [diff] [blame] | 1472 | |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1473 | if (!disk_image) |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1474 | goto free_l1_table; |
Sasha Levin | f41a132 | 2011-11-02 07:41:14 +0200 | [diff] [blame] | 1475 | |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1476 | disk_image->priv = q; |
| 1477 | |
| 1478 | return disk_image; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1479 | |
Lan Tianyu | af68c51 | 2011-10-12 21:00:03 +0800 | [diff] [blame] | 1480 | free_l1_table: |
| 1481 | if (q->table.l1_table) |
| 1482 | free(q->table.l1_table); |
| 1483 | free_cluster_cache: |
| 1484 | if (q->cluster_cache) |
| 1485 | free(q->cluster_cache); |
| 1486 | free_cluster_data: |
| 1487 | if (q->cluster_data) |
| 1488 | free(q->cluster_data); |
| 1489 | free_header: |
| 1490 | if (q->header) |
| 1491 | free(q->header); |
| 1492 | free_qcow: |
Sasha Levin | 379d476 | 2012-12-20 14:11:10 -0500 | [diff] [blame] | 1493 | free(q); |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1494 | |
| 1495 | return NULL; |
| 1496 | } |
| 1497 | |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1498 | static bool qcow1_check_image(int fd) |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1499 | { |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1500 | struct qcow1_header_disk f_header; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1501 | |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1502 | if (pread_in_full(fd, &f_header, sizeof(struct qcow1_header_disk), 0) < 0) |
| 1503 | return false; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1504 | |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1505 | be32_to_cpus(&f_header.magic); |
| 1506 | be32_to_cpus(&f_header.version); |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1507 | |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1508 | if (f_header.magic != QCOW_MAGIC) |
| 1509 | return false; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1510 | |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1511 | if (f_header.version != QCOW1_VERSION) |
| 1512 | return false; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1513 | |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1514 | return true; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1515 | } |
| 1516 | |
Pekka Enberg | f10860c | 2011-05-11 21:23:03 +0300 | [diff] [blame] | 1517 | struct disk_image *qcow_probe(int fd, bool readonly) |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1518 | { |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1519 | if (qcow1_check_image(fd)) |
Pekka Enberg | f10860c | 2011-05-11 21:23:03 +0300 | [diff] [blame] | 1520 | return qcow1_probe(fd, readonly); |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1521 | |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1522 | if (qcow2_check_image(fd)) |
Pekka Enberg | f10860c | 2011-05-11 21:23:03 +0300 | [diff] [blame] | 1523 | return qcow2_probe(fd, readonly); |
Pekka Enberg | ad627d6 | 2011-04-19 22:56:00 +0300 | [diff] [blame] | 1524 | |
| 1525 | return NULL; |
Prasad Joshi | 86835ce | 2011-04-13 20:26:02 +0100 | [diff] [blame] | 1526 | } |