blob: 2a689ce71de98a7d3d021ac2ccb828548189914c [file] [log] [blame]
Thomas Gleixner20c8ccb2019-06-04 10:11:32 +02001// SPDX-License-Identifier: GPL-2.0-only
Phillip Lougher0d455c12013-11-13 02:04:19 +00002/*
3 * Copyright (c) 2013
4 * Phillip Lougher <phillip@squashfs.org.uk>
Phillip Lougher0d455c12013-11-13 02:04:19 +00005 */
6
7#include <linux/fs.h>
8#include <linux/vfs.h>
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/string.h>
12#include <linux/pagemap.h>
13#include <linux/mutex.h>
14
15#include "squashfs_fs.h"
16#include "squashfs_fs_sb.h"
17#include "squashfs_fs_i.h"
18#include "squashfs.h"
19#include "page_actor.h"
20
Phillip Lougher0d455c12013-11-13 02:04:19 +000021/* Read separately compressed datablock directly into page cache */
Phillip Loughera3f94cb2018-08-02 16:45:15 +010022int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
23 int expected)
Phillip Lougher0d455c12013-11-13 02:04:19 +000024
25{
26 struct inode *inode = target_page->mapping->host;
27 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
28
Phillip Lougher12427de2023-11-13 16:09:01 +000029 loff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030030 int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
Phillip Lougher12427de2023-11-13 16:09:01 +000031 loff_t start_index = target_page->index & ~mask;
32 loff_t end_index = start_index | mask;
Phillip Lougher1bb1a072022-06-11 04:21:33 +010033 int i, n, pages, bytes, res = -ENOMEM;
Phillip Lougher0d455c12013-11-13 02:04:19 +000034 struct page **page;
35 struct squashfs_page_actor *actor;
36 void *pageaddr;
37
38 if (end_index > file_end)
39 end_index = file_end;
40
41 pages = end_index - start_index + 1;
42
Fabian Frederick14694882014-08-06 16:03:50 -070043 page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
Phillip Lougher0d455c12013-11-13 02:04:19 +000044 if (page == NULL)
45 return res;
46
Phillip Lougher0d455c12013-11-13 02:04:19 +000047 /* Try to grab all the pages covered by the Squashfs block */
Phillip Lougher1bb1a072022-06-11 04:21:33 +010048 for (i = 0, n = start_index; n <= end_index; n++) {
Phillip Lougher0d455c12013-11-13 02:04:19 +000049 page[i] = (n == target_page->index) ? target_page :
50 grab_cache_page_nowait(target_page->mapping, n);
51
Phillip Lougher1bb1a072022-06-11 04:21:33 +010052 if (page[i] == NULL)
Phillip Lougher0d455c12013-11-13 02:04:19 +000053 continue;
Phillip Lougher0d455c12013-11-13 02:04:19 +000054
55 if (PageUptodate(page[i])) {
56 unlock_page(page[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030057 put_page(page[i]);
Phillip Lougher1bb1a072022-06-11 04:21:33 +010058 continue;
Phillip Lougher0d455c12013-11-13 02:04:19 +000059 }
Phillip Lougher1bb1a072022-06-11 04:21:33 +010060
61 i++;
Phillip Lougher0d455c12013-11-13 02:04:19 +000062 }
63
Phillip Lougher1bb1a072022-06-11 04:21:33 +010064 pages = i;
Phillip Lougher0d455c12013-11-13 02:04:19 +000065
Phillip Lougherf268eed2022-06-11 04:21:32 +010066 /*
67 * Create a "page actor" which will kmap and kunmap the
68 * page cache pages appropriately within the decompressor
69 */
Phillip Lougher1bb1a072022-06-11 04:21:33 +010070 actor = squashfs_page_actor_init_special(msblk, page, pages, expected);
Phillip Lougherf268eed2022-06-11 04:21:32 +010071 if (actor == NULL)
72 goto out;
73
Phillip Lougher0d455c12013-11-13 02:04:19 +000074 /* Decompress directly into the page cache buffers */
75 res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
Phillip Lougherf268eed2022-06-11 04:21:32 +010076
Phillip Lougher1f13dff2022-08-22 22:54:30 +010077 squashfs_page_actor_free(actor);
Phillip Lougherf268eed2022-06-11 04:21:32 +010078
Phillip Lougher0d455c12013-11-13 02:04:19 +000079 if (res < 0)
80 goto mark_errored;
81
Phillip Loughera3f94cb2018-08-02 16:45:15 +010082 if (res != expected) {
83 res = -EIO;
84 goto mark_errored;
85 }
86
Phillip Lougher1bb1a072022-06-11 04:21:33 +010087 /* Last page (if present) may have trailing bytes not filled */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030088 bytes = res % PAGE_SIZE;
Phillip Lougher1bb1a072022-06-11 04:21:33 +010089 if (page[pages - 1]->index == end_index && bytes) {
90 pageaddr = kmap_local_page(page[pages - 1]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030091 memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
Phillip Lougher1bb1a072022-06-11 04:21:33 +010092 kunmap_local(pageaddr);
Phillip Lougher0d455c12013-11-13 02:04:19 +000093 }
94
95 /* Mark pages as uptodate, unlock and release */
96 for (i = 0; i < pages; i++) {
97 flush_dcache_page(page[i]);
98 SetPageUptodate(page[i]);
99 unlock_page(page[i]);
100 if (page[i] != target_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300101 put_page(page[i]);
Phillip Lougher0d455c12013-11-13 02:04:19 +0000102 }
103
Phillip Lougher0d455c12013-11-13 02:04:19 +0000104 kfree(page);
105
106 return 0;
107
108mark_errored:
Matthew Wilcox (Oracle)bbf45b72024-04-20 03:50:18 +0100109 /* Decompression failed. Target_page is
Phillip Lougher0d455c12013-11-13 02:04:19 +0000110 * dealt with by the caller
111 */
112 for (i = 0; i < pages; i++) {
Phillip Lougher6d565402013-11-24 00:40:49 +0000113 if (page[i] == NULL || page[i] == target_page)
Phillip Lougher0d455c12013-11-13 02:04:19 +0000114 continue;
115 flush_dcache_page(page[i]);
Phillip Lougher0d455c12013-11-13 02:04:19 +0000116 unlock_page(page[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300117 put_page(page[i]);
Phillip Lougher0d455c12013-11-13 02:04:19 +0000118 }
119
120out:
Phillip Lougher0d455c12013-11-13 02:04:19 +0000121 kfree(page);
122 return res;
123}