Thomas Gleixner | 68252eb | 2019-05-20 19:08:00 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Squashfs - a compressed read only filesystem for Linux |
| 4 | * |
| 5 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
Phillip Lougher | d7f2ff6 | 2011-05-26 10:39:56 +0100 | [diff] [blame] | 6 | * Phillip Lougher <phillip@squashfs.org.uk> |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 7 | * |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 8 | * cache.c |
| 9 | */ |
| 10 | |
| 11 | /* |
| 12 | * Blocks in Squashfs are compressed. To avoid repeatedly decompressing |
| 13 | * recently accessed data Squashfs uses two small metadata and fragment caches. |
| 14 | * |
| 15 | * This file implements a generic cache implementation used for both caches, |
| 16 | * plus functions layered ontop of the generic cache implementation to |
| 17 | * access the metadata and fragment caches. |
| 18 | * |
Justin P. Mattock | 70f23fd | 2011-05-10 10:16:21 +0200 | [diff] [blame] | 19 | * To avoid out of memory and fragmentation issues with vmalloc the cache |
Kirill A. Shutemov | ea1754a | 2016-04-01 15:29:48 +0300 | [diff] [blame] | 20 | * uses sequences of kmalloced PAGE_SIZE buffers. |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 21 | * |
| 22 | * It should be noted that the cache is not used for file datablocks, these |
| 23 | * are decompressed and cached in the page-cache in the normal way. The |
| 24 | * cache is only used to temporarily cache fragment and metadata blocks |
| 25 | * which have been read as as a result of a metadata (i.e. inode or |
| 26 | * directory) or fragment access. Because metadata and fragments are packed |
| 27 | * together into blocks (to gain greater compression) the read of a particular |
| 28 | * piece of metadata or fragment will retrieve other metadata/fragments which |
| 29 | * have been packed with it, these because of locality-of-reference may be read |
| 30 | * in the near future. Temporarily caching them ensures they are available for |
| 31 | * near future access without requiring an additional read and decompress. |
| 32 | */ |
| 33 | |
| 34 | #include <linux/fs.h> |
| 35 | #include <linux/vfs.h> |
| 36 | #include <linux/slab.h> |
| 37 | #include <linux/vmalloc.h> |
| 38 | #include <linux/sched.h> |
| 39 | #include <linux/spinlock.h> |
| 40 | #include <linux/wait.h> |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 41 | #include <linux/pagemap.h> |
| 42 | |
| 43 | #include "squashfs_fs.h" |
| 44 | #include "squashfs_fs_sb.h" |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 45 | #include "squashfs.h" |
Phillip Lougher | 846b730 | 2013-11-18 02:59:12 +0000 | [diff] [blame] | 46 | #include "page_actor.h" |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 47 | |
| 48 | /* |
| 49 | * Look-up block in cache, and increment usage count. If not in cache, read |
| 50 | * and decompress it from disk. |
| 51 | */ |
| 52 | struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb, |
| 53 | struct squashfs_cache *cache, u64 block, int length) |
| 54 | { |
| 55 | int i, n; |
| 56 | struct squashfs_cache_entry *entry; |
| 57 | |
| 58 | spin_lock(&cache->lock); |
| 59 | |
| 60 | while (1) { |
Ajeet Yadav | d7fbd89 | 2011-12-27 15:10:04 +0530 | [diff] [blame] | 61 | for (i = cache->curr_blk, n = 0; n < cache->entries; n++) { |
| 62 | if (cache->entry[i].block == block) { |
| 63 | cache->curr_blk = i; |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 64 | break; |
Ajeet Yadav | d7fbd89 | 2011-12-27 15:10:04 +0530 | [diff] [blame] | 65 | } |
| 66 | i = (i + 1) % cache->entries; |
| 67 | } |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 68 | |
Ajeet Yadav | d7fbd89 | 2011-12-27 15:10:04 +0530 | [diff] [blame] | 69 | if (n == cache->entries) { |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 70 | /* |
| 71 | * Block not in cache, if all cache entries are used |
| 72 | * go to sleep waiting for one to become available. |
| 73 | */ |
| 74 | if (cache->unused == 0) { |
| 75 | cache->num_waiters++; |
| 76 | spin_unlock(&cache->lock); |
| 77 | wait_event(cache->wait_queue, cache->unused); |
| 78 | spin_lock(&cache->lock); |
| 79 | cache->num_waiters--; |
| 80 | continue; |
| 81 | } |
| 82 | |
| 83 | /* |
| 84 | * At least one unused cache entry. A simple |
| 85 | * round-robin strategy is used to choose the entry to |
| 86 | * be evicted from the cache. |
| 87 | */ |
| 88 | i = cache->next_blk; |
| 89 | for (n = 0; n < cache->entries; n++) { |
| 90 | if (cache->entry[i].refcount == 0) |
| 91 | break; |
| 92 | i = (i + 1) % cache->entries; |
| 93 | } |
| 94 | |
| 95 | cache->next_blk = (i + 1) % cache->entries; |
| 96 | entry = &cache->entry[i]; |
| 97 | |
| 98 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 99 | * Initialise chosen cache entry, and fill it in from |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 100 | * disk. |
| 101 | */ |
| 102 | cache->unused--; |
| 103 | entry->block = block; |
| 104 | entry->refcount = 1; |
| 105 | entry->pending = 1; |
| 106 | entry->num_waiters = 0; |
| 107 | entry->error = 0; |
| 108 | spin_unlock(&cache->lock); |
| 109 | |
Phillip Lougher | 846b730 | 2013-11-18 02:59:12 +0000 | [diff] [blame] | 110 | entry->length = squashfs_read_data(sb, block, length, |
| 111 | &entry->next_index, entry->actor); |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 112 | |
| 113 | spin_lock(&cache->lock); |
| 114 | |
| 115 | if (entry->length < 0) |
| 116 | entry->error = entry->length; |
| 117 | |
| 118 | entry->pending = 0; |
| 119 | |
| 120 | /* |
| 121 | * While filling this entry one or more other processes |
| 122 | * have looked it up in the cache, and have slept |
| 123 | * waiting for it to become available. |
| 124 | */ |
| 125 | if (entry->num_waiters) { |
| 126 | spin_unlock(&cache->lock); |
| 127 | wake_up_all(&entry->wait_queue); |
| 128 | } else |
| 129 | spin_unlock(&cache->lock); |
| 130 | |
| 131 | goto out; |
| 132 | } |
| 133 | |
| 134 | /* |
| 135 | * Block already in cache. Increment refcount so it doesn't |
| 136 | * get reused until we're finished with it, if it was |
| 137 | * previously unused there's one less cache entry available |
| 138 | * for reuse. |
| 139 | */ |
| 140 | entry = &cache->entry[i]; |
| 141 | if (entry->refcount == 0) |
| 142 | cache->unused--; |
| 143 | entry->refcount++; |
| 144 | |
| 145 | /* |
| 146 | * If the entry is currently being filled in by another process |
| 147 | * go to sleep waiting for it to become available. |
| 148 | */ |
| 149 | if (entry->pending) { |
| 150 | entry->num_waiters++; |
| 151 | spin_unlock(&cache->lock); |
| 152 | wait_event(entry->wait_queue, !entry->pending); |
| 153 | } else |
| 154 | spin_unlock(&cache->lock); |
| 155 | |
| 156 | goto out; |
| 157 | } |
| 158 | |
| 159 | out: |
| 160 | TRACE("Got %s %d, start block %lld, refcount %d, error %d\n", |
| 161 | cache->name, i, entry->block, entry->refcount, entry->error); |
| 162 | |
| 163 | if (entry->error) |
| 164 | ERROR("Unable to read %s cache entry [%llx]\n", cache->name, |
| 165 | block); |
| 166 | return entry; |
| 167 | } |
| 168 | |
| 169 | |
| 170 | /* |
| 171 | * Release cache entry, once usage count is zero it can be reused. |
| 172 | */ |
| 173 | void squashfs_cache_put(struct squashfs_cache_entry *entry) |
| 174 | { |
| 175 | struct squashfs_cache *cache = entry->cache; |
| 176 | |
| 177 | spin_lock(&cache->lock); |
| 178 | entry->refcount--; |
| 179 | if (entry->refcount == 0) { |
| 180 | cache->unused++; |
| 181 | /* |
| 182 | * If there's any processes waiting for a block to become |
| 183 | * available, wake one up. |
| 184 | */ |
| 185 | if (cache->num_waiters) { |
| 186 | spin_unlock(&cache->lock); |
| 187 | wake_up(&cache->wait_queue); |
| 188 | return; |
| 189 | } |
| 190 | } |
| 191 | spin_unlock(&cache->lock); |
| 192 | } |
| 193 | |
| 194 | /* |
| 195 | * Delete cache reclaiming all kmalloced buffers. |
| 196 | */ |
| 197 | void squashfs_cache_delete(struct squashfs_cache *cache) |
| 198 | { |
| 199 | int i, j; |
| 200 | |
| 201 | if (cache == NULL) |
| 202 | return; |
| 203 | |
| 204 | for (i = 0; i < cache->entries; i++) { |
| 205 | if (cache->entry[i].data) { |
| 206 | for (j = 0; j < cache->pages; j++) |
| 207 | kfree(cache->entry[i].data[j]); |
| 208 | kfree(cache->entry[i].data); |
| 209 | } |
Phillip Lougher | 846b730 | 2013-11-18 02:59:12 +0000 | [diff] [blame] | 210 | kfree(cache->entry[i].actor); |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 211 | } |
| 212 | |
| 213 | kfree(cache->entry); |
| 214 | kfree(cache); |
| 215 | } |
| 216 | |
| 217 | |
| 218 | /* |
| 219 | * Initialise cache allocating the specified number of entries, each of |
| 220 | * size block_size. To avoid vmalloc fragmentation issues each entry |
Kirill A. Shutemov | ea1754a | 2016-04-01 15:29:48 +0300 | [diff] [blame] | 221 | * is allocated as a sequence of kmalloced PAGE_SIZE buffers. |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 222 | */ |
| 223 | struct squashfs_cache *squashfs_cache_init(char *name, int entries, |
| 224 | int block_size) |
| 225 | { |
| 226 | int i, j; |
| 227 | struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL); |
| 228 | |
| 229 | if (cache == NULL) { |
| 230 | ERROR("Failed to allocate %s cache\n", name); |
| 231 | return NULL; |
| 232 | } |
| 233 | |
| 234 | cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); |
| 235 | if (cache->entry == NULL) { |
| 236 | ERROR("Failed to allocate %s cache\n", name); |
| 237 | goto cleanup; |
| 238 | } |
| 239 | |
Ajeet Yadav | d7fbd89 | 2011-12-27 15:10:04 +0530 | [diff] [blame] | 240 | cache->curr_blk = 0; |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 241 | cache->next_blk = 0; |
| 242 | cache->unused = entries; |
| 243 | cache->entries = entries; |
| 244 | cache->block_size = block_size; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 245 | cache->pages = block_size >> PAGE_SHIFT; |
Doug Chapman | a37b06d | 2009-05-13 02:56:39 +0100 | [diff] [blame] | 246 | cache->pages = cache->pages ? cache->pages : 1; |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 247 | cache->name = name; |
| 248 | cache->num_waiters = 0; |
| 249 | spin_lock_init(&cache->lock); |
| 250 | init_waitqueue_head(&cache->wait_queue); |
| 251 | |
| 252 | for (i = 0; i < entries; i++) { |
| 253 | struct squashfs_cache_entry *entry = &cache->entry[i]; |
| 254 | |
| 255 | init_waitqueue_head(&cache->entry[i].wait_queue); |
| 256 | entry->cache = cache; |
| 257 | entry->block = SQUASHFS_INVALID_BLK; |
| 258 | entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL); |
| 259 | if (entry->data == NULL) { |
| 260 | ERROR("Failed to allocate %s cache entry\n", name); |
| 261 | goto cleanup; |
| 262 | } |
| 263 | |
| 264 | for (j = 0; j < cache->pages; j++) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 265 | entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL); |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 266 | if (entry->data[j] == NULL) { |
| 267 | ERROR("Failed to allocate %s buffer\n", name); |
| 268 | goto cleanup; |
| 269 | } |
| 270 | } |
Phillip Lougher | 846b730 | 2013-11-18 02:59:12 +0000 | [diff] [blame] | 271 | |
| 272 | entry->actor = squashfs_page_actor_init(entry->data, |
| 273 | cache->pages, 0); |
| 274 | if (entry->actor == NULL) { |
| 275 | ERROR("Failed to allocate %s cache entry\n", name); |
| 276 | goto cleanup; |
| 277 | } |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 278 | } |
| 279 | |
| 280 | return cache; |
| 281 | |
| 282 | cleanup: |
| 283 | squashfs_cache_delete(cache); |
| 284 | return NULL; |
| 285 | } |
| 286 | |
| 287 | |
| 288 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 289 | * Copy up to length bytes from cache entry to buffer starting at offset bytes |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 290 | * into the cache entry. If there's not length bytes then copy the number of |
| 291 | * bytes available. In all cases return the number of bytes copied. |
| 292 | */ |
| 293 | int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry, |
| 294 | int offset, int length) |
| 295 | { |
| 296 | int remaining = length; |
| 297 | |
| 298 | if (length == 0) |
| 299 | return 0; |
| 300 | else if (buffer == NULL) |
| 301 | return min(length, entry->length - offset); |
| 302 | |
| 303 | while (offset < entry->length) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 304 | void *buff = entry->data[offset / PAGE_SIZE] |
| 305 | + (offset % PAGE_SIZE); |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 306 | int bytes = min_t(int, entry->length - offset, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 307 | PAGE_SIZE - (offset % PAGE_SIZE)); |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 308 | |
| 309 | if (bytes >= remaining) { |
| 310 | memcpy(buffer, buff, remaining); |
| 311 | remaining = 0; |
| 312 | break; |
| 313 | } |
| 314 | |
| 315 | memcpy(buffer, buff, bytes); |
| 316 | buffer += bytes; |
| 317 | remaining -= bytes; |
| 318 | offset += bytes; |
| 319 | } |
| 320 | |
| 321 | return length - remaining; |
| 322 | } |
| 323 | |
| 324 | |
| 325 | /* |
| 326 | * Read length bytes from metadata position <block, offset> (block is the |
| 327 | * start of the compressed block on disk, and offset is the offset into |
| 328 | * the block once decompressed). Data is packed into consecutive blocks, |
| 329 | * and length bytes may require reading more than one block. |
| 330 | */ |
| 331 | int squashfs_read_metadata(struct super_block *sb, void *buffer, |
| 332 | u64 *block, int *offset, int length) |
| 333 | { |
| 334 | struct squashfs_sb_info *msblk = sb->s_fs_info; |
Phillip Lougher | e552a59 | 2011-12-29 03:50:20 +0000 | [diff] [blame] | 335 | int bytes, res = length; |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 336 | struct squashfs_cache_entry *entry; |
| 337 | |
| 338 | TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset); |
| 339 | |
Linus Torvalds | 01cfb79 | 2018-07-29 12:44:46 -0700 | [diff] [blame] | 340 | if (unlikely(length < 0)) |
| 341 | return -EIO; |
| 342 | |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 343 | while (length) { |
| 344 | entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); |
Phillip Lougher | e552a59 | 2011-12-29 03:50:20 +0000 | [diff] [blame] | 345 | if (entry->error) { |
| 346 | res = entry->error; |
| 347 | goto error; |
| 348 | } else if (*offset >= entry->length) { |
| 349 | res = -EIO; |
| 350 | goto error; |
| 351 | } |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 352 | |
| 353 | bytes = squashfs_copy_data(buffer, entry, *offset, length); |
| 354 | if (buffer) |
| 355 | buffer += bytes; |
| 356 | length -= bytes; |
| 357 | *offset += bytes; |
| 358 | |
| 359 | if (*offset == entry->length) { |
| 360 | *block = entry->next_index; |
| 361 | *offset = 0; |
| 362 | } |
| 363 | |
| 364 | squashfs_cache_put(entry); |
| 365 | } |
| 366 | |
Phillip Lougher | e552a59 | 2011-12-29 03:50:20 +0000 | [diff] [blame] | 367 | return res; |
| 368 | |
| 369 | error: |
| 370 | squashfs_cache_put(entry); |
| 371 | return res; |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 372 | } |
| 373 | |
| 374 | |
| 375 | /* |
| 376 | * Look-up in the fragmment cache the fragment located at <start_block> in the |
| 377 | * filesystem. If necessary read and decompress it from disk. |
| 378 | */ |
| 379 | struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb, |
| 380 | u64 start_block, int length) |
| 381 | { |
| 382 | struct squashfs_sb_info *msblk = sb->s_fs_info; |
| 383 | |
| 384 | return squashfs_cache_get(sb, msblk->fragment_cache, start_block, |
| 385 | length); |
| 386 | } |
| 387 | |
| 388 | |
| 389 | /* |
| 390 | * Read and decompress the datablock located at <start_block> in the |
| 391 | * filesystem. The cache is used here to avoid duplicating locking and |
| 392 | * read/decompress code. |
| 393 | */ |
| 394 | struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb, |
| 395 | u64 start_block, int length) |
| 396 | { |
| 397 | struct squashfs_sb_info *msblk = sb->s_fs_info; |
| 398 | |
| 399 | return squashfs_cache_get(sb, msblk->read_page, start_block, length); |
| 400 | } |
| 401 | |
| 402 | |
| 403 | /* |
| 404 | * Read a filesystem table (uncompressed sequence of bytes) from disk |
| 405 | */ |
Phillip Lougher | 82de647 | 2011-05-20 02:26:43 +0100 | [diff] [blame] | 406 | void *squashfs_read_table(struct super_block *sb, u64 block, int length) |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 407 | { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 408 | int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 409 | int i, res; |
Phillip Lougher | 82de647 | 2011-05-20 02:26:43 +0100 | [diff] [blame] | 410 | void *table, *buffer, **data; |
Phillip Lougher | 846b730 | 2013-11-18 02:59:12 +0000 | [diff] [blame] | 411 | struct squashfs_page_actor *actor; |
Phillip Lougher | 82de647 | 2011-05-20 02:26:43 +0100 | [diff] [blame] | 412 | |
| 413 | table = buffer = kmalloc(length, GFP_KERNEL); |
| 414 | if (table == NULL) |
| 415 | return ERR_PTR(-ENOMEM); |
| 416 | |
| 417 | data = kcalloc(pages, sizeof(void *), GFP_KERNEL); |
| 418 | if (data == NULL) { |
| 419 | res = -ENOMEM; |
| 420 | goto failed; |
| 421 | } |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 422 | |
Phillip Lougher | 846b730 | 2013-11-18 02:59:12 +0000 | [diff] [blame] | 423 | actor = squashfs_page_actor_init(data, pages, length); |
| 424 | if (actor == NULL) { |
| 425 | res = -ENOMEM; |
| 426 | goto failed2; |
| 427 | } |
| 428 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 429 | for (i = 0; i < pages; i++, buffer += PAGE_SIZE) |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 430 | data[i] = buffer; |
Phillip Lougher | 82de647 | 2011-05-20 02:26:43 +0100 | [diff] [blame] | 431 | |
Phillip Lougher | 846b730 | 2013-11-18 02:59:12 +0000 | [diff] [blame] | 432 | res = squashfs_read_data(sb, block, length | |
| 433 | SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor); |
Phillip Lougher | 82de647 | 2011-05-20 02:26:43 +0100 | [diff] [blame] | 434 | |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 435 | kfree(data); |
Phillip Lougher | 846b730 | 2013-11-18 02:59:12 +0000 | [diff] [blame] | 436 | kfree(actor); |
Phillip Lougher | 82de647 | 2011-05-20 02:26:43 +0100 | [diff] [blame] | 437 | |
| 438 | if (res < 0) |
| 439 | goto failed; |
| 440 | |
| 441 | return table; |
| 442 | |
Phillip Lougher | 846b730 | 2013-11-18 02:59:12 +0000 | [diff] [blame] | 443 | failed2: |
| 444 | kfree(data); |
Phillip Lougher | 82de647 | 2011-05-20 02:26:43 +0100 | [diff] [blame] | 445 | failed: |
| 446 | kfree(table); |
| 447 | return ERR_PTR(res); |
Phillip Lougher | f400e12 | 2009-01-05 08:46:26 +0000 | [diff] [blame] | 448 | } |