blob: 8ac10e3960508114473d191f4cc211bfdf15f83b [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Copyright (C) International Business Machines Corp., 2000-2004
4 * Portions Copyright (C) Christoph Hellwig, 2001-2002
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
6
7#include <linux/fs.h>
8#include <linux/mpage.h>
9#include <linux/buffer_head.h>
10#include <linux/pagemap.h>
11#include <linux/quotaops.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080012#include <linux/uio.h>
Christoph Hellwiga9185b42010-03-05 09:21:37 +010013#include <linux/writeback.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include "jfs_incore.h"
Dave Kleikamp1868f4a2005-05-04 15:29:35 -050015#include "jfs_inode.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include "jfs_filsys.h"
17#include "jfs_imap.h"
18#include "jfs_extent.h"
19#include "jfs_unicode.h"
20#include "jfs_debug.h"
Al Virob3b4a6e2019-04-15 22:48:59 -040021#include "jfs_dmap.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23
David Howellseab1df72008-02-07 00:15:43 -080024struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
Linus Torvalds1da177e2005-04-16 15:20:36 -070025{
David Howellseab1df72008-02-07 00:15:43 -080026 struct inode *inode;
27 int ret;
28
29 inode = iget_locked(sb, ino);
30 if (!inode)
31 return ERR_PTR(-ENOMEM);
32 if (!(inode->i_state & I_NEW))
33 return inode;
34
35 ret = diRead(inode);
36 if (ret < 0) {
37 iget_failed(inode);
38 return ERR_PTR(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 }
40
41 if (S_ISREG(inode->i_mode)) {
42 inode->i_op = &jfs_file_inode_operations;
43 inode->i_fop = &jfs_file_operations;
44 inode->i_mapping->a_ops = &jfs_aops;
45 } else if (S_ISDIR(inode->i_mode)) {
46 inode->i_op = &jfs_dir_inode_operations;
47 inode->i_fop = &jfs_dir_operations;
48 } else if (S_ISLNK(inode->i_mode)) {
49 if (inode->i_size >= IDATASIZE) {
50 inode->i_op = &page_symlink_inode_operations;
Al Viro21fc61c2015-11-17 01:07:57 -050051 inode_nohighmem(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 inode->i_mapping->a_ops = &jfs_aops;
Dave Kleikampd69e83d2008-12-16 10:21:34 -060053 } else {
Dmitry Monakhovc7f2e1f2010-04-16 08:05:50 -050054 inode->i_op = &jfs_fast_symlink_inode_operations;
Al Viroad476fe2015-05-02 10:41:20 -040055 inode->i_link = JFS_IP(inode)->i_inline;
Dave Kleikampd69e83d2008-12-16 10:21:34 -060056 /*
57 * The inline data should be null-terminated, but
58 * don't let on-disk corruption crash the kernel
59 */
Al Viroad476fe2015-05-02 10:41:20 -040060 inode->i_link[inode->i_size] = '\0';
Dave Kleikampd69e83d2008-12-16 10:21:34 -060061 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 } else {
63 inode->i_op = &jfs_file_inode_operations;
64 init_special_inode(inode, inode->i_mode, inode->i_rdev);
65 }
David Howellseab1df72008-02-07 00:15:43 -080066 unlock_new_inode(inode);
67 return inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068}
69
70/*
71 * Workhorse of both fsync & write_inode
72 */
73int jfs_commit_inode(struct inode *inode, int wait)
74{
75 int rc = 0;
76 tid_t tid;
77 static int noisy = 5;
78
79 jfs_info("In jfs_commit_inode, inode = 0x%p", inode);
80
81 /*
82 * Don't commit if inode has been committed since last being
83 * marked dirty, or if it has been deleted.
84 */
85 if (inode->i_nlink == 0 || !test_cflag(COMMIT_Dirty, inode))
86 return 0;
87
88 if (isReadOnly(inode)) {
89 /* kernel allows writes to devices on read-only
90 * partitions and may think inode is dirty
91 */
92 if (!special_file(inode->i_mode) && noisy) {
Joe Perches6ed71e92016-03-30 05:23:18 -070093 jfs_err("jfs_commit_inode(0x%p) called on read-only volume",
94 inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 jfs_err("Is remount racy?");
96 noisy--;
97 }
98 return 0;
99 }
100
101 tid = txBegin(inode->i_sb, COMMIT_INODE);
Ingo Molnar1de87442006-01-24 15:22:50 -0600102 mutex_lock(&JFS_IP(inode)->commit_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 /*
Ingo Molnar1de87442006-01-24 15:22:50 -0600105 * Retest inode state after taking commit_mutex
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 */
107 if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode))
108 rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0);
109
110 txEnd(tid);
Ingo Molnar1de87442006-01-24 15:22:50 -0600111 mutex_unlock(&JFS_IP(inode)->commit_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 return rc;
113}
114
Christoph Hellwiga9185b42010-03-05 09:21:37 +0100115int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116{
Christoph Hellwiga9185b42010-03-05 09:21:37 +0100117 int wait = wbc->sync_mode == WB_SYNC_ALL;
118
Dave Kleikamp73aaa222013-05-01 11:08:38 -0500119 if (inode->i_nlink == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 return 0;
121 /*
122 * If COMMIT_DIRTY is not set, the inode isn't really dirty.
123 * It has been committed since the last change, but was still
124 * on the dirty inode list.
125 */
Colin Ian Kingf7f31ad2015-06-04 17:57:03 +0100126 if (!test_cflag(COMMIT_Dirty, inode)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 /* Make sure committed changes hit the disk */
128 jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait);
129 return 0;
Colin Ian Kingf7f31ad2015-06-04 17:57:03 +0100130 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
132 if (jfs_commit_inode(inode, wait)) {
133 jfs_err("jfs_write_inode: jfs_commit_inode failed!");
134 return -EIO;
135 } else
136 return 0;
137}
138
Al Viro62aff862010-06-07 00:28:54 -0400139void jfs_evict_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140{
Al Virob3b4a6e2019-04-15 22:48:59 -0400141 struct jfs_inode_info *ji = JFS_IP(inode);
142
Al Viro62aff862010-06-07 00:28:54 -0400143 jfs_info("In jfs_evict_inode, inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Al Viro62aff862010-06-07 00:28:54 -0400145 if (!inode->i_nlink && !is_bad_inode(inode)) {
Christoph Hellwig871a2932010-03-03 09:05:07 -0500146 dquot_initialize(inode);
Christoph Hellwig907f4552010-03-03 09:05:06 -0500147
Al Viro62aff862010-06-07 00:28:54 -0400148 if (JFS_IP(inode)->fileset == FILESYSTEM_I) {
Haimin Zhanga5304622022-03-22 21:59:17 +0800149 struct inode *ipimap = JFS_SBI(inode->i_sb)->ipimap;
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700150 truncate_inode_pages_final(&inode->i_data);
Al Viro62aff862010-06-07 00:28:54 -0400151
152 if (test_cflag(COMMIT_Freewmap, inode))
153 jfs_free_zero_link(inode);
154
Haimin Zhanga5304622022-03-22 21:59:17 +0800155 if (ipimap && JFS_IP(ipimap)->i_imap)
Pavel Skripkin9d574f92021-06-06 17:24:05 +0300156 diFree(inode);
Al Viro62aff862010-06-07 00:28:54 -0400157
158 /*
159 * Free the inode from the quota allocation.
160 */
Al Viro62aff862010-06-07 00:28:54 -0400161 dquot_free_inode(inode);
162 }
163 } else {
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700164 truncate_inode_pages_final(&inode->i_data);
Dave Kleikampb1b5d7f92005-08-30 14:28:56 -0500165 }
Jan Karadbd57682012-05-03 14:48:02 +0200166 clear_inode(inode);
Al Viro62aff862010-06-07 00:28:54 -0400167 dquot_drop(inode);
Al Virob3b4a6e2019-04-15 22:48:59 -0400168
169 BUG_ON(!list_empty(&ji->anon_inode_list));
170
171 spin_lock_irq(&ji->ag_lock);
172 if (ji->active_ag != -1) {
173 struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
174 atomic_dec(&bmap->db_active[ji->active_ag]);
175 ji->active_ag = -1;
176 }
177 spin_unlock_irq(&ji->ag_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178}
179
Christoph Hellwigaa385722011-05-27 06:53:02 -0400180void jfs_dirty_inode(struct inode *inode, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181{
182 static int noisy = 5;
183
184 if (isReadOnly(inode)) {
185 if (!special_file(inode->i_mode) && noisy) {
186 /* kernel allows writes to devices on read-only
187 * partitions and may try to mark inode dirty
188 */
189 jfs_err("jfs_dirty_inode called on read-only volume");
190 jfs_err("Is remount racy?");
191 noisy--;
192 }
193 return;
194 }
195
196 set_cflag(COMMIT_Dirty, inode);
197}
198
Dave Kleikamp115ff502006-07-26 14:52:13 -0500199int jfs_get_block(struct inode *ip, sector_t lblock,
200 struct buffer_head *bh_result, int create)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
202 s64 lblock64 = lblock;
203 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 xad_t xad;
205 s64 xaddr;
206 int xflag;
Dave Kleikamp115ff502006-07-26 14:52:13 -0500207 s32 xlen = bh_result->b_size >> ip->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
209 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 * Take appropriate lock on inode
211 */
Dave Kleikamp7fab4792005-05-02 12:25:02 -0600212 if (create)
Dave Kleikamp82d5b9a2007-01-09 14:14:48 -0600213 IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
Dave Kleikamp7fab4792005-05-02 12:25:02 -0600214 else
Dave Kleikamp82d5b9a2007-01-09 14:14:48 -0600215 IREAD_LOCK(ip, RDWRLOCK_NORMAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217 if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) &&
Dave Kleikamp115ff502006-07-26 14:52:13 -0500218 (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) &&
Dave Kleikamp66284652005-05-02 12:25:13 -0600219 xaddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 if (xflag & XAD_NOTRECORDED) {
221 if (!create)
222 /*
223 * Allocated but not recorded, read treats
224 * this as a hole
225 */
226 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 XADoffset(&xad, lblock64);
228 XADlength(&xad, xlen);
229 XADaddress(&xad, xaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 rc = extRecord(ip, &xad);
231 if (rc)
232 goto unlock;
233 set_buffer_new(bh_result);
234 }
235
236 map_bh(bh_result, ip->i_sb, xaddr);
237 bh_result->b_size = xlen << ip->i_blkbits;
238 goto unlock;
239 }
240 if (!create)
241 goto unlock;
242
243 /*
244 * Allocate a new block
245 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad)))
247 goto unlock;
Richard Knutsson4d817152006-09-30 23:27:14 -0700248 rc = extAlloc(ip, xlen, lblock64, &xad, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 if (rc)
250 goto unlock;
251
252 set_buffer_new(bh_result);
253 map_bh(bh_result, ip->i_sb, addressXAD(&xad));
254 bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits;
255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 unlock:
257 /*
258 * Release lock on inode
259 */
Dave Kleikamp7fab4792005-05-02 12:25:02 -0600260 if (create)
261 IWRITE_UNLOCK(ip);
262 else
263 IREAD_UNLOCK(ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 return rc;
265}
266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267static int jfs_writepages(struct address_space *mapping,
268 struct writeback_control *wbc)
269{
270 return mpage_writepages(mapping, wbc, jfs_get_block);
271}
272
Matthew Wilcox (Oracle)f132ab72022-04-29 11:47:39 -0400273static int jfs_read_folio(struct file *file, struct folio *folio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274{
Matthew Wilcox (Oracle)f132ab72022-04-29 11:47:39 -0400275 return mpage_read_folio(folio, jfs_get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276}
277
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -0700278static void jfs_readahead(struct readahead_control *rac)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279{
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -0700280 mpage_readahead(rac, jfs_get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281}
282
Marco Stornelli86dd07d2012-12-15 11:54:25 +0100283static void jfs_write_failed(struct address_space *mapping, loff_t to)
284{
285 struct inode *inode = mapping->host;
286
287 if (to > inode->i_size) {
Kirill A. Shutemov7caef262013-09-12 15:13:56 -0700288 truncate_pagecache(inode, inode->i_size);
Marco Stornelli86dd07d2012-12-15 11:54:25 +0100289 jfs_truncate(inode);
290 }
291}
292
Nick Piggind5c5f842007-10-16 01:25:22 -0700293static int jfs_write_begin(struct file *file, struct address_space *mapping,
Matthew Wilcox (Oracle)9d6b0cd2022-02-22 14:31:43 -0500294 loff_t pos, unsigned len,
Nick Piggind5c5f842007-10-16 01:25:22 -0700295 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296{
Christoph Hellwigea0f04e2010-06-04 11:29:54 +0200297 int ret;
298
Christoph Hellwig002cbb12022-06-13 07:37:12 +0200299 ret = block_write_begin(mapping, pos, len, pagep, jfs_get_block);
Marco Stornelli86dd07d2012-12-15 11:54:25 +0100300 if (unlikely(ret))
301 jfs_write_failed(mapping, pos + len);
Christoph Hellwigea0f04e2010-06-04 11:29:54 +0200302
303 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304}
305
Christoph Hellwig002cbb12022-06-13 07:37:12 +0200306static int jfs_write_end(struct file *file, struct address_space *mapping,
307 loff_t pos, unsigned len, unsigned copied, struct page *page,
308 void *fsdata)
309{
310 int ret;
311
312 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
313 if (ret < len)
314 jfs_write_failed(mapping, pos + len);
315 return ret;
316}
317
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
319{
320 return generic_block_bmap(mapping, block, jfs_get_block);
321}
322
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700323static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324{
325 struct file *file = iocb->ki_filp;
Marco Stornelli86dd07d2012-12-15 11:54:25 +0100326 struct address_space *mapping = file->f_mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 struct inode *inode = file->f_mapping->host;
Al Viroa6cbcd42014-03-04 22:38:00 -0500328 size_t count = iov_iter_count(iter);
Christoph Hellwigeafdc7d2010-06-04 11:29:53 +0200329 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700331 ret = blockdev_direct_IO(iocb, inode, iter, jfs_get_block);
Christoph Hellwigeafdc7d2010-06-04 11:29:53 +0200332
333 /*
334 * In case of error extending write may have instantiated a few
335 * blocks outside i_size. Trim these off again.
336 */
Omar Sandoval6f673762015-03-16 04:33:52 -0700337 if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
Christoph Hellwigeafdc7d2010-06-04 11:29:53 +0200338 loff_t isize = i_size_read(inode);
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700339 loff_t end = iocb->ki_pos + count;
Christoph Hellwigeafdc7d2010-06-04 11:29:53 +0200340
341 if (end > isize)
Marco Stornelli86dd07d2012-12-15 11:54:25 +0100342 jfs_write_failed(mapping, end);
Christoph Hellwigeafdc7d2010-06-04 11:29:53 +0200343 }
344
345 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346}
347
Christoph Hellwigf5e54d62006-06-28 04:26:44 -0700348const struct address_space_operations jfs_aops = {
Matthew Wilcox (Oracle)e6219002022-02-09 20:22:12 +0000349 .dirty_folio = block_dirty_folio,
Matthew Wilcox (Oracle)7ba13ab2022-02-09 20:21:34 +0000350 .invalidate_folio = block_invalidate_folio,
Matthew Wilcox (Oracle)f132ab72022-04-29 11:47:39 -0400351 .read_folio = jfs_read_folio,
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -0700352 .readahead = jfs_readahead,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 .writepages = jfs_writepages,
Nick Piggind5c5f842007-10-16 01:25:22 -0700354 .write_begin = jfs_write_begin,
Christoph Hellwig002cbb12022-06-13 07:37:12 +0200355 .write_end = jfs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 .bmap = jfs_bmap,
357 .direct_IO = jfs_direct_IO,
Christoph Hellwig2274c3b2022-12-02 11:26:43 +0100358 .migrate_folio = buffer_migrate_folio,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359};
360
361/*
362 * Guts of jfs_truncate. Called with locks already held. Can be called
363 * with directory for truncating directory index table.
364 */
365void jfs_truncate_nolock(struct inode *ip, loff_t length)
366{
367 loff_t newsize;
368 tid_t tid;
369
370 ASSERT(length >= 0);
371
372 if (test_cflag(COMMIT_Nolink, ip)) {
373 xtTruncate(0, ip, length, COMMIT_WMAP);
374 return;
375 }
376
377 do {
378 tid = txBegin(ip->i_sb, 0);
379
380 /*
Ingo Molnar1de87442006-01-24 15:22:50 -0600381 * The commit_mutex cannot be taken before txBegin.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 * txBegin may block and there is a chance the inode
383 * could be marked dirty and need to be committed
384 * before txBegin unblocks
385 */
Ingo Molnar1de87442006-01-24 15:22:50 -0600386 mutex_lock(&JFS_IP(ip)->commit_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
388 newsize = xtTruncate(tid, ip, length,
389 COMMIT_TRUNCATE | COMMIT_PWMAP);
390 if (newsize < 0) {
391 txEnd(tid);
Ingo Molnar1de87442006-01-24 15:22:50 -0600392 mutex_unlock(&JFS_IP(ip)->commit_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 break;
394 }
395
Deepa Dinamani078cd822016-09-14 07:48:04 -0700396 ip->i_mtime = ip->i_ctime = current_time(ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 mark_inode_dirty(ip);
398
399 txCommit(tid, 1, &ip, 0);
400 txEnd(tid);
Ingo Molnar1de87442006-01-24 15:22:50 -0600401 mutex_unlock(&JFS_IP(ip)->commit_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 } while (newsize > length); /* Truncate isn't always atomic */
403}
404
405void jfs_truncate(struct inode *ip)
406{
407 jfs_info("jfs_truncate: size = 0x%lx", (ulong) ip->i_size);
408
Christoph Hellwig002cbb12022-06-13 07:37:12 +0200409 block_truncate_page(ip->i_mapping, ip->i_size, jfs_get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
Dave Kleikamp82d5b9a2007-01-09 14:14:48 -0600411 IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 jfs_truncate_nolock(ip, ip->i_size);
413 IWRITE_UNLOCK(ip);
414}