blob: db5e4b7636ec0f4284cc0e844e94efe6b58a4dde [file] [log] [blame]
Gao Xiang29b24f62019-07-31 23:57:31 +08001// SPDX-License-Identifier: GPL-2.0-only
Gao Xiang81781b02018-07-26 20:21:47 +08002/*
Gao Xiang81781b02018-07-26 20:21:47 +08003 * Copyright (C) 2017-2018 HUAWEI, Inc.
Alexander A. Klimov592e7cd2020-07-13 15:09:44 +02004 * https://www.huawei.com/
Gao Xiangc5aa9032021-08-20 18:00:19 +08005 * Copyright (C) 2021, Alibaba Cloud
Gao Xiang81781b02018-07-26 20:21:47 +08006 */
7#include "internal.h"
8#include <linux/prefetch.h>
Jeffle Xu5375e7c2022-04-25 20:21:39 +08009#include <linux/sched/mm.h>
Gao Xiang06252e92021-08-05 08:36:00 +080010#include <linux/dax.h>
Chao Yu13f06f42018-07-26 20:21:55 +080011#include <trace/events/erofs.h>
12
Gao Xiangfdf80a42022-01-02 12:00:13 +080013void erofs_unmap_metabuf(struct erofs_buf *buf)
14{
15 if (buf->kmap_type == EROFS_KMAP)
Gao Xiang927e5012022-10-18 18:53:13 +080016 kunmap_local(buf->base);
Gao Xiangfdf80a42022-01-02 12:00:13 +080017 buf->base = NULL;
18 buf->kmap_type = EROFS_NO_KMAP;
19}
20
21void erofs_put_metabuf(struct erofs_buf *buf)
22{
23 if (!buf->page)
24 return;
25 erofs_unmap_metabuf(buf);
26 put_page(buf->page);
27 buf->page = NULL;
28}
29
Jingbo Xu3acea5f2023-03-13 21:53:08 +080030/*
31 * Derive the block size from inode->i_blkbits to make compatible with
32 * anonymous inode in fscache mode.
33 */
Gao Xiangeb2c5e42023-04-07 22:17:04 +080034void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr,
35 enum erofs_kmap_type type)
Gao Xiangfdf80a42022-01-02 12:00:13 +080036{
Gao Xiangeb2c5e42023-04-07 22:17:04 +080037 struct inode *inode = buf->inode;
Jingbo Xu3acea5f2023-03-13 21:53:08 +080038 erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits;
Gao Xiangfdf80a42022-01-02 12:00:13 +080039 pgoff_t index = offset >> PAGE_SHIFT;
40 struct page *page = buf->page;
Jeffle Xu5375e7c2022-04-25 20:21:39 +080041 struct folio *folio;
42 unsigned int nofs_flag;
Gao Xiangfdf80a42022-01-02 12:00:13 +080043
44 if (!page || page->index != index) {
45 erofs_put_metabuf(buf);
Jeffle Xu5375e7c2022-04-25 20:21:39 +080046
47 nofs_flag = memalloc_nofs_save();
Gao Xiangeb2c5e42023-04-07 22:17:04 +080048 folio = read_cache_folio(inode->i_mapping, index, NULL, NULL);
Jeffle Xu5375e7c2022-04-25 20:21:39 +080049 memalloc_nofs_restore(nofs_flag);
50 if (IS_ERR(folio))
51 return folio;
52
Gao Xiangfdf80a42022-01-02 12:00:13 +080053 /* should already be PageUptodate, no need to lock page */
Jeffle Xu5375e7c2022-04-25 20:21:39 +080054 page = folio_file_page(folio, index);
Gao Xiangfdf80a42022-01-02 12:00:13 +080055 buf->page = page;
56 }
57 if (buf->kmap_type == EROFS_NO_KMAP) {
58 if (type == EROFS_KMAP)
Gao Xiang927e5012022-10-18 18:53:13 +080059 buf->base = kmap_local_page(page);
Gao Xiangfdf80a42022-01-02 12:00:13 +080060 buf->kmap_type = type;
61 } else if (buf->kmap_type != type) {
62 DBG_BUGON(1);
63 return ERR_PTR(-EFAULT);
64 }
65 if (type == EROFS_NO_KMAP)
66 return NULL;
67 return buf->base + (offset & ~PAGE_MASK);
68}
69
Gao Xiangeb2c5e42023-04-07 22:17:04 +080070void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
71{
72 if (erofs_is_fscache_mode(sb))
73 buf->inode = EROFS_SB(sb)->s_fscache->inode;
74 else
75 buf->inode = sb->s_bdev->bd_inode;
76}
77
Gao Xiangfe5de582022-03-16 09:22:45 +080078void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
79 erofs_blk_t blkaddr, enum erofs_kmap_type type)
80{
Gao Xiangeb2c5e42023-04-07 22:17:04 +080081 erofs_init_metabuf(buf, sb);
82 return erofs_bread(buf, blkaddr, type);
Gao Xiangfe5de582022-03-16 09:22:45 +080083}
84
Gao Xiang81781b02018-07-26 20:21:47 +080085static int erofs_map_blocks_flatmode(struct inode *inode,
Jingbo Xu8b58f9f2023-02-09 10:48:25 +080086 struct erofs_map_blocks *map)
Gao Xiang81781b02018-07-26 20:21:47 +080087{
88 erofs_blk_t nblocks, lastblk;
89 u64 offset = map->m_la;
Gao Xianga5876e22019-09-04 10:08:56 +080090 struct erofs_inode *vi = EROFS_I(inode);
Jingbo Xu3acea5f2023-03-13 21:53:08 +080091 struct super_block *sb = inode->i_sb;
Gao Xiang8a765682019-09-04 10:08:54 +080092 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
Gao Xiang81781b02018-07-26 20:21:47 +080093
Jingbo Xu3acea5f2023-03-13 21:53:08 +080094 nblocks = erofs_iblks(inode);
Gao Xiang8a765682019-09-04 10:08:54 +080095 lastblk = nblocks - tailendpacking;
Gao Xiang81781b02018-07-26 20:21:47 +080096
Gao Xiang81781b02018-07-26 20:21:47 +080097 /* there is no hole in flatmode */
98 map->m_flags = EROFS_MAP_MAPPED;
Jingbo Xu3acea5f2023-03-13 21:53:08 +080099 if (offset < erofs_pos(sb, lastblk)) {
100 map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la;
101 map->m_plen = erofs_pos(sb, lastblk) - offset;
Gao Xiang8a765682019-09-04 10:08:54 +0800102 } else if (tailendpacking) {
Gao Xiangb780d3f2023-01-14 23:08:23 +0800103 map->m_pa = erofs_iloc(inode) + vi->inode_isize +
Jingbo Xu3acea5f2023-03-13 21:53:08 +0800104 vi->xattr_isize + erofs_blkoff(sb, offset);
Gao Xiang81781b02018-07-26 20:21:47 +0800105 map->m_plen = inode->i_size - offset;
106
Gao Xiang469407a2021-12-09 09:29:18 +0800107 /* inline data should be located in the same meta block */
Jingbo Xu3acea5f2023-03-13 21:53:08 +0800108 if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
109 erofs_err(sb, "inline data cross block boundary @ nid %llu",
Gao Xiang4f761fa2019-09-04 10:09:09 +0800110 vi->nid);
Chen Gong9141b602018-09-18 22:27:28 +0800111 DBG_BUGON(1);
Gao Xiang469407a2021-12-09 09:29:18 +0800112 return -EFSCORRUPTED;
Chen Gong9141b602018-09-18 22:27:28 +0800113 }
Gao Xiang81781b02018-07-26 20:21:47 +0800114 map->m_flags |= EROFS_MAP_META;
115 } else {
Jingbo Xu3acea5f2023-03-13 21:53:08 +0800116 erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx",
Gao Xiang4f761fa2019-09-04 10:09:09 +0800117 vi->nid, inode->i_size, map->m_la);
Chen Gong9141b602018-09-18 22:27:28 +0800118 DBG_BUGON(1);
Gao Xiang469407a2021-12-09 09:29:18 +0800119 return -EIO;
Gao Xiang81781b02018-07-26 20:21:47 +0800120 }
Gao Xiang469407a2021-12-09 09:29:18 +0800121 return 0;
Gao Xiang81781b02018-07-26 20:21:47 +0800122}
123
Jingbo Xu8b58f9f2023-02-09 10:48:25 +0800124int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
Gao Xiangc5aa9032021-08-20 18:00:19 +0800125{
126 struct super_block *sb = inode->i_sb;
127 struct erofs_inode *vi = EROFS_I(inode);
128 struct erofs_inode_chunk_index *idx;
Gao Xiangfdf80a42022-01-02 12:00:13 +0800129 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
Gao Xiangc5aa9032021-08-20 18:00:19 +0800130 u64 chunknr;
131 unsigned int unit;
132 erofs_off_t pos;
Gao Xiangfdf80a42022-01-02 12:00:13 +0800133 void *kaddr;
Gao Xiangc5aa9032021-08-20 18:00:19 +0800134 int err = 0;
135
Jingbo Xu8b58f9f2023-02-09 10:48:25 +0800136 trace_erofs_map_blocks_enter(inode, map, 0);
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800137 map->m_deviceid = 0;
Gao Xiangc5aa9032021-08-20 18:00:19 +0800138 if (map->m_la >= inode->i_size) {
139 /* leave out-of-bound access unmapped */
140 map->m_flags = 0;
141 map->m_plen = 0;
142 goto out;
143 }
144
Gao Xiang469407a2021-12-09 09:29:18 +0800145 if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
Jingbo Xu8b58f9f2023-02-09 10:48:25 +0800146 err = erofs_map_blocks_flatmode(inode, map);
Gao Xiang469407a2021-12-09 09:29:18 +0800147 goto out;
148 }
Gao Xiangc5aa9032021-08-20 18:00:19 +0800149
150 if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
151 unit = sizeof(*idx); /* chunk index */
152 else
153 unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */
154
155 chunknr = map->m_la >> vi->chunkbits;
Gao Xiangb780d3f2023-01-14 23:08:23 +0800156 pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
Gao Xiangc5aa9032021-08-20 18:00:19 +0800157 vi->xattr_isize, unit) + unit * chunknr;
158
Jingbo Xu3acea5f2023-03-13 21:53:08 +0800159 kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP);
Gao Xiangfdf80a42022-01-02 12:00:13 +0800160 if (IS_ERR(kaddr)) {
161 err = PTR_ERR(kaddr);
Gao Xiang469407a2021-12-09 09:29:18 +0800162 goto out;
163 }
Gao Xiangc5aa9032021-08-20 18:00:19 +0800164 map->m_la = chunknr << vi->chunkbits;
165 map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
Jingbo Xu3acea5f2023-03-13 21:53:08 +0800166 round_up(inode->i_size - map->m_la, sb->s_blocksize));
Gao Xiangc5aa9032021-08-20 18:00:19 +0800167
168 /* handle block map */
169 if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
Jingbo Xu3acea5f2023-03-13 21:53:08 +0800170 __le32 *blkaddr = kaddr + erofs_blkoff(sb, pos);
Gao Xiangc5aa9032021-08-20 18:00:19 +0800171
172 if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
173 map->m_flags = 0;
174 } else {
Jingbo Xu3acea5f2023-03-13 21:53:08 +0800175 map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr));
Gao Xiangc5aa9032021-08-20 18:00:19 +0800176 map->m_flags = EROFS_MAP_MAPPED;
177 }
178 goto out_unlock;
179 }
180 /* parse chunk indexes */
Jingbo Xu3acea5f2023-03-13 21:53:08 +0800181 idx = kaddr + erofs_blkoff(sb, pos);
Gao Xiangc5aa9032021-08-20 18:00:19 +0800182 switch (le32_to_cpu(idx->blkaddr)) {
183 case EROFS_NULL_ADDR:
184 map->m_flags = 0;
185 break;
186 default:
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800187 map->m_deviceid = le16_to_cpu(idx->device_id) &
188 EROFS_SB(sb)->device_id_mask;
Jingbo Xu3acea5f2023-03-13 21:53:08 +0800189 map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr));
Gao Xiangc5aa9032021-08-20 18:00:19 +0800190 map->m_flags = EROFS_MAP_MAPPED;
191 break;
192 }
193out_unlock:
Gao Xiangfdf80a42022-01-02 12:00:13 +0800194 erofs_put_metabuf(&buf);
Gao Xiangc5aa9032021-08-20 18:00:19 +0800195out:
Gao Xiang469407a2021-12-09 09:29:18 +0800196 if (!err)
197 map->m_llen = map->m_plen;
Jingbo Xu8b58f9f2023-02-09 10:48:25 +0800198 trace_erofs_map_blocks_exit(inode, map, 0, err);
Gao Xiangc5aa9032021-08-20 18:00:19 +0800199 return err;
200}
201
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800202int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
203{
204 struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
205 struct erofs_device_info *dif;
206 int id;
207
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800208 map->m_bdev = sb->s_bdev;
209 map->m_daxdev = EROFS_SB(sb)->dax_dev;
Christoph Hellwigde205112021-11-29 11:22:00 +0100210 map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
Jeffle Xu955b4782022-04-25 20:21:38 +0800211 map->m_fscache = EROFS_SB(sb)->s_fscache;
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800212
213 if (map->m_deviceid) {
214 down_read(&devs->rwsem);
215 dif = idr_find(&devs->tree, map->m_deviceid - 1);
216 if (!dif) {
217 up_read(&devs->rwsem);
218 return -ENODEV;
219 }
Jia Zhu8b465fe2023-03-02 15:17:51 +0800220 if (devs->flatdev) {
221 map->m_pa += erofs_pos(sb, dif->mapped_blkaddr);
222 up_read(&devs->rwsem);
223 return 0;
224 }
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800225 map->m_bdev = dif->bdev;
226 map->m_daxdev = dif->dax_dev;
Christoph Hellwigde205112021-11-29 11:22:00 +0100227 map->m_dax_part_off = dif->dax_part_off;
Jeffle Xu955b4782022-04-25 20:21:38 +0800228 map->m_fscache = dif->fscache;
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800229 up_read(&devs->rwsem);
Jia Zhu8b465fe2023-03-02 15:17:51 +0800230 } else if (devs->extra_devices && !devs->flatdev) {
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800231 down_read(&devs->rwsem);
232 idr_for_each_entry(&devs->tree, dif, id) {
233 erofs_off_t startoff, length;
234
235 if (!dif->mapped_blkaddr)
236 continue;
Jingbo Xu3acea5f2023-03-13 21:53:08 +0800237 startoff = erofs_pos(sb, dif->mapped_blkaddr);
238 length = erofs_pos(sb, dif->blocks);
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800239
240 if (map->m_pa >= startoff &&
241 map->m_pa < startoff + length) {
242 map->m_pa -= startoff;
243 map->m_bdev = dif->bdev;
244 map->m_daxdev = dif->dax_dev;
Christoph Hellwigde205112021-11-29 11:22:00 +0100245 map->m_dax_part_off = dif->dax_part_off;
Jeffle Xu955b4782022-04-25 20:21:38 +0800246 map->m_fscache = dif->fscache;
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800247 break;
248 }
249 }
250 up_read(&devs->rwsem);
251 }
252 return 0;
253}
254
Huang Jianana08e67a2021-08-05 08:35:59 +0800255static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
256 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
257{
258 int ret;
Jingbo Xu3acea5f2023-03-13 21:53:08 +0800259 struct super_block *sb = inode->i_sb;
Huang Jianana08e67a2021-08-05 08:35:59 +0800260 struct erofs_map_blocks map;
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800261 struct erofs_map_dev mdev;
Huang Jianana08e67a2021-08-05 08:35:59 +0800262
263 map.m_la = offset;
264 map.m_llen = length;
265
Jingbo Xu8b58f9f2023-02-09 10:48:25 +0800266 ret = erofs_map_blocks(inode, &map);
Huang Jianana08e67a2021-08-05 08:35:59 +0800267 if (ret < 0)
268 return ret;
269
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800270 mdev = (struct erofs_map_dev) {
271 .m_deviceid = map.m_deviceid,
272 .m_pa = map.m_pa,
273 };
Jingbo Xu3acea5f2023-03-13 21:53:08 +0800274 ret = erofs_map_dev(sb, &mdev);
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800275 if (ret)
276 return ret;
277
Huang Jianana08e67a2021-08-05 08:35:59 +0800278 iomap->offset = map.m_la;
Gao Xiange33f42b2022-01-13 13:18:45 +0800279 if (flags & IOMAP_DAX)
Christoph Hellwigde205112021-11-29 11:22:00 +0100280 iomap->dax_dev = mdev.m_daxdev;
Gao Xiange33f42b2022-01-13 13:18:45 +0800281 else
Christoph Hellwigde205112021-11-29 11:22:00 +0100282 iomap->bdev = mdev.m_bdev;
Huang Jianana08e67a2021-08-05 08:35:59 +0800283 iomap->length = map.m_llen;
284 iomap->flags = 0;
Gao Xiang771c9942021-08-05 08:36:01 +0800285 iomap->private = NULL;
Huang Jianana08e67a2021-08-05 08:35:59 +0800286
287 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
288 iomap->type = IOMAP_HOLE;
289 iomap->addr = IOMAP_NULL_ADDR;
290 if (!iomap->length)
291 iomap->length = length;
292 return 0;
293 }
294
Huang Jianana08e67a2021-08-05 08:35:59 +0800295 if (map.m_flags & EROFS_MAP_META) {
Gao Xiangfdf80a42022-01-02 12:00:13 +0800296 void *ptr;
297 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
Gao Xiang771c9942021-08-05 08:36:01 +0800298
299 iomap->type = IOMAP_INLINE;
Jingbo Xu3acea5f2023-03-13 21:53:08 +0800300 ptr = erofs_read_metabuf(&buf, sb,
301 erofs_blknr(sb, mdev.m_pa), EROFS_KMAP);
Gao Xiangfdf80a42022-01-02 12:00:13 +0800302 if (IS_ERR(ptr))
303 return PTR_ERR(ptr);
Jingbo Xu3acea5f2023-03-13 21:53:08 +0800304 iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa);
Gao Xiangfdf80a42022-01-02 12:00:13 +0800305 iomap->private = buf.base;
Gao Xiang771c9942021-08-05 08:36:01 +0800306 } else {
307 iomap->type = IOMAP_MAPPED;
Gao Xiangdfeab2e2021-10-14 16:10:10 +0800308 iomap->addr = mdev.m_pa;
Gao Xiange33f42b2022-01-13 13:18:45 +0800309 if (flags & IOMAP_DAX)
310 iomap->addr += mdev.m_dax_part_off;
Huang Jianana08e67a2021-08-05 08:35:59 +0800311 }
Huang Jianana08e67a2021-08-05 08:35:59 +0800312 return 0;
313}
314
Gao Xiang771c9942021-08-05 08:36:01 +0800315static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
316 ssize_t written, unsigned int flags, struct iomap *iomap)
317{
Gao Xiangfdf80a42022-01-02 12:00:13 +0800318 void *ptr = iomap->private;
Gao Xiang771c9942021-08-05 08:36:01 +0800319
Gao Xiangfdf80a42022-01-02 12:00:13 +0800320 if (ptr) {
321 struct erofs_buf buf = {
322 .page = kmap_to_page(ptr),
323 .base = ptr,
324 .kmap_type = EROFS_KMAP,
325 };
326
Gao Xiang771c9942021-08-05 08:36:01 +0800327 DBG_BUGON(iomap->type != IOMAP_INLINE);
Gao Xiangfdf80a42022-01-02 12:00:13 +0800328 erofs_put_metabuf(&buf);
Gao Xiang771c9942021-08-05 08:36:01 +0800329 } else {
330 DBG_BUGON(iomap->type == IOMAP_INLINE);
331 }
332 return written;
333}
334
Huang Jianana08e67a2021-08-05 08:35:59 +0800335static const struct iomap_ops erofs_iomap_ops = {
336 .iomap_begin = erofs_iomap_begin,
Gao Xiang771c9942021-08-05 08:36:01 +0800337 .iomap_end = erofs_iomap_end,
Huang Jianana08e67a2021-08-05 08:35:59 +0800338};
339
Gao Xiangeadcd6b2021-08-13 13:29:31 +0800340int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
341 u64 start, u64 len)
342{
343 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
344#ifdef CONFIG_EROFS_FS_ZIP
345 return iomap_fiemap(inode, fieinfo, start, len,
346 &z_erofs_iomap_report_ops);
347#else
348 return -EOPNOTSUPP;
349#endif
350 }
351 return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
352}
353
Gao Xiang771c9942021-08-05 08:36:01 +0800354/*
355 * since we dont have write or truncate flows, so no inode
356 * locking needs to be held at the moment.
357 */
Matthew Wilcox (Oracle)7479c502022-04-29 08:54:32 -0400358static int erofs_read_folio(struct file *file, struct folio *folio)
Gao Xiang771c9942021-08-05 08:36:01 +0800359{
Matthew Wilcox (Oracle)7479c502022-04-29 08:54:32 -0400360 return iomap_read_folio(folio, &erofs_iomap_ops);
Gao Xiang771c9942021-08-05 08:36:01 +0800361}
362
363static void erofs_readahead(struct readahead_control *rac)
364{
365 return iomap_readahead(rac, &erofs_iomap_ops);
366}
367
368static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
369{
370 return iomap_bmap(mapping, block, &erofs_iomap_ops);
371}
372
Huang Jianana08e67a2021-08-05 08:35:59 +0800373static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
374{
Gao Xiangde8a8012022-07-20 16:22:29 +0800375 struct inode *inode = file_inode(iocb->ki_filp);
376
Huang Jianana08e67a2021-08-05 08:35:59 +0800377 /* no need taking (shared) inode lock since it's a ro filesystem */
378 if (!iov_iter_count(to))
379 return 0;
380
Gao Xiang06252e92021-08-05 08:36:00 +0800381#ifdef CONFIG_FS_DAX
Gao Xiangde8a8012022-07-20 16:22:29 +0800382 if (IS_DAX(inode))
Gao Xiang06252e92021-08-05 08:36:00 +0800383 return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
384#endif
Huang Jianana08e67a2021-08-05 08:35:59 +0800385 if (iocb->ki_flags & IOCB_DIRECT) {
Gao Xiangde8a8012022-07-20 16:22:29 +0800386 struct block_device *bdev = inode->i_sb->s_bdev;
387 unsigned int blksize_mask;
Huang Jianana08e67a2021-08-05 08:35:59 +0800388
Gao Xiangde8a8012022-07-20 16:22:29 +0800389 if (bdev)
390 blksize_mask = bdev_logical_block_size(bdev) - 1;
391 else
Yue Hu3993f4f2023-03-06 15:55:27 +0800392 blksize_mask = i_blocksize(inode) - 1;
Gao Xiangde8a8012022-07-20 16:22:29 +0800393
394 if ((iocb->ki_pos | iov_iter_count(to) |
395 iov_iter_alignment(to)) & blksize_mask)
396 return -EINVAL;
397
398 return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
399 NULL, 0, NULL, 0);
Huang Jianana08e67a2021-08-05 08:35:59 +0800400 }
401 return filemap_read(iocb, to, 0);
402}
403
Gao Xiang81781b02018-07-26 20:21:47 +0800404/* for uncompressed (aligned) files and raw access for other files */
405const struct address_space_operations erofs_raw_access_aops = {
Matthew Wilcox (Oracle)7479c502022-04-29 08:54:32 -0400406 .read_folio = erofs_read_folio,
Gao Xiang771c9942021-08-05 08:36:01 +0800407 .readahead = erofs_readahead,
Chao Yu9da681e2019-07-16 17:32:56 +0800408 .bmap = erofs_bmap,
Huang Jianana08e67a2021-08-05 08:35:59 +0800409 .direct_IO = noop_direct_IO,
Jingbo Xuce529cc2022-11-30 14:04:55 +0800410 .release_folio = iomap_release_folio,
411 .invalidate_folio = iomap_invalidate_folio,
Huang Jianana08e67a2021-08-05 08:35:59 +0800412};
413
Gao Xiang06252e92021-08-05 08:36:00 +0800414#ifdef CONFIG_FS_DAX
415static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
416 enum page_entry_size pe_size)
417{
418 return dax_iomap_fault(vmf, pe_size, NULL, NULL, &erofs_iomap_ops);
419}
420
421static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
422{
423 return erofs_dax_huge_fault(vmf, PE_SIZE_PTE);
424}
425
426static const struct vm_operations_struct erofs_dax_vm_ops = {
427 .fault = erofs_dax_fault,
428 .huge_fault = erofs_dax_huge_fault,
429};
430
431static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
432{
433 if (!IS_DAX(file_inode(file)))
434 return generic_file_readonly_mmap(file, vma);
435
436 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
437 return -EINVAL;
438
439 vma->vm_ops = &erofs_dax_vm_ops;
Suren Baghdasaryan1c712222023-01-26 11:37:49 -0800440 vm_flags_set(vma, VM_HUGEPAGE);
Gao Xiang06252e92021-08-05 08:36:00 +0800441 return 0;
442}
443#else
444#define erofs_file_mmap generic_file_readonly_mmap
445#endif
446
Huang Jianana08e67a2021-08-05 08:35:59 +0800447const struct file_operations erofs_file_fops = {
448 .llseek = generic_file_llseek,
449 .read_iter = erofs_file_read_iter,
Gao Xiang06252e92021-08-05 08:36:00 +0800450 .mmap = erofs_file_mmap,
David Howells2cb1e0892023-05-22 14:50:15 +0100451 .splice_read = filemap_splice_read,
Gao Xiang81781b02018-07-26 20:21:47 +0800452};