Thomas Gleixner | 1a59d1b8 | 2019-05-27 08:55:05 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) International Business Machines Corp., 2000-2004 |
| 4 | * Portions Copyright (C) Christoph Hellwig, 2001-2002 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/fs.h> |
| 8 | #include <linux/mpage.h> |
| 9 | #include <linux/buffer_head.h> |
| 10 | #include <linux/pagemap.h> |
| 11 | #include <linux/quotaops.h> |
Christoph Hellwig | e2e40f2 | 2015-02-22 08:58:50 -0800 | [diff] [blame] | 12 | #include <linux/uio.h> |
Christoph Hellwig | a9185b4 | 2010-03-05 09:21:37 +0100 | [diff] [blame] | 13 | #include <linux/writeback.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include "jfs_incore.h" |
Dave Kleikamp | 1868f4a | 2005-05-04 15:29:35 -0500 | [diff] [blame] | 15 | #include "jfs_inode.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include "jfs_filsys.h" |
| 17 | #include "jfs_imap.h" |
| 18 | #include "jfs_extent.h" |
| 19 | #include "jfs_unicode.h" |
| 20 | #include "jfs_debug.h" |
Al Viro | b3b4a6e | 2019-04-15 22:48:59 -0400 | [diff] [blame] | 21 | #include "jfs_dmap.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
| 23 | |
David Howells | eab1df7 | 2008-02-07 00:15:43 -0800 | [diff] [blame] | 24 | struct inode *jfs_iget(struct super_block *sb, unsigned long ino) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | { |
David Howells | eab1df7 | 2008-02-07 00:15:43 -0800 | [diff] [blame] | 26 | struct inode *inode; |
| 27 | int ret; |
| 28 | |
| 29 | inode = iget_locked(sb, ino); |
| 30 | if (!inode) |
| 31 | return ERR_PTR(-ENOMEM); |
| 32 | if (!(inode->i_state & I_NEW)) |
| 33 | return inode; |
| 34 | |
| 35 | ret = diRead(inode); |
| 36 | if (ret < 0) { |
| 37 | iget_failed(inode); |
| 38 | return ERR_PTR(ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | } |
| 40 | |
| 41 | if (S_ISREG(inode->i_mode)) { |
| 42 | inode->i_op = &jfs_file_inode_operations; |
| 43 | inode->i_fop = &jfs_file_operations; |
| 44 | inode->i_mapping->a_ops = &jfs_aops; |
| 45 | } else if (S_ISDIR(inode->i_mode)) { |
| 46 | inode->i_op = &jfs_dir_inode_operations; |
| 47 | inode->i_fop = &jfs_dir_operations; |
| 48 | } else if (S_ISLNK(inode->i_mode)) { |
| 49 | if (inode->i_size >= IDATASIZE) { |
| 50 | inode->i_op = &page_symlink_inode_operations; |
Al Viro | 21fc61c | 2015-11-17 01:07:57 -0500 | [diff] [blame] | 51 | inode_nohighmem(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | inode->i_mapping->a_ops = &jfs_aops; |
Dave Kleikamp | d69e83d | 2008-12-16 10:21:34 -0600 | [diff] [blame] | 53 | } else { |
Dmitry Monakhov | c7f2e1f | 2010-04-16 08:05:50 -0500 | [diff] [blame] | 54 | inode->i_op = &jfs_fast_symlink_inode_operations; |
Al Viro | ad476fe | 2015-05-02 10:41:20 -0400 | [diff] [blame] | 55 | inode->i_link = JFS_IP(inode)->i_inline; |
Dave Kleikamp | d69e83d | 2008-12-16 10:21:34 -0600 | [diff] [blame] | 56 | /* |
| 57 | * The inline data should be null-terminated, but |
| 58 | * don't let on-disk corruption crash the kernel |
| 59 | */ |
Al Viro | ad476fe | 2015-05-02 10:41:20 -0400 | [diff] [blame] | 60 | inode->i_link[inode->i_size] = '\0'; |
Dave Kleikamp | d69e83d | 2008-12-16 10:21:34 -0600 | [diff] [blame] | 61 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | } else { |
| 63 | inode->i_op = &jfs_file_inode_operations; |
| 64 | init_special_inode(inode, inode->i_mode, inode->i_rdev); |
| 65 | } |
David Howells | eab1df7 | 2008-02-07 00:15:43 -0800 | [diff] [blame] | 66 | unlock_new_inode(inode); |
| 67 | return inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | } |
| 69 | |
| 70 | /* |
| 71 | * Workhorse of both fsync & write_inode |
| 72 | */ |
| 73 | int jfs_commit_inode(struct inode *inode, int wait) |
| 74 | { |
| 75 | int rc = 0; |
| 76 | tid_t tid; |
| 77 | static int noisy = 5; |
| 78 | |
| 79 | jfs_info("In jfs_commit_inode, inode = 0x%p", inode); |
| 80 | |
| 81 | /* |
| 82 | * Don't commit if inode has been committed since last being |
| 83 | * marked dirty, or if it has been deleted. |
| 84 | */ |
| 85 | if (inode->i_nlink == 0 || !test_cflag(COMMIT_Dirty, inode)) |
| 86 | return 0; |
| 87 | |
| 88 | if (isReadOnly(inode)) { |
| 89 | /* kernel allows writes to devices on read-only |
| 90 | * partitions and may think inode is dirty |
| 91 | */ |
| 92 | if (!special_file(inode->i_mode) && noisy) { |
Joe Perches | 6ed71e9 | 2016-03-30 05:23:18 -0700 | [diff] [blame] | 93 | jfs_err("jfs_commit_inode(0x%p) called on read-only volume", |
| 94 | inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | jfs_err("Is remount racy?"); |
| 96 | noisy--; |
| 97 | } |
| 98 | return 0; |
| 99 | } |
| 100 | |
| 101 | tid = txBegin(inode->i_sb, COMMIT_INODE); |
Ingo Molnar | 1de8744 | 2006-01-24 15:22:50 -0600 | [diff] [blame] | 102 | mutex_lock(&JFS_IP(inode)->commit_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | |
| 104 | /* |
Ingo Molnar | 1de8744 | 2006-01-24 15:22:50 -0600 | [diff] [blame] | 105 | * Retest inode state after taking commit_mutex |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | */ |
| 107 | if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode)) |
| 108 | rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0); |
| 109 | |
| 110 | txEnd(tid); |
Ingo Molnar | 1de8744 | 2006-01-24 15:22:50 -0600 | [diff] [blame] | 111 | mutex_unlock(&JFS_IP(inode)->commit_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | return rc; |
| 113 | } |
| 114 | |
Christoph Hellwig | a9185b4 | 2010-03-05 09:21:37 +0100 | [diff] [blame] | 115 | int jfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | { |
Christoph Hellwig | a9185b4 | 2010-03-05 09:21:37 +0100 | [diff] [blame] | 117 | int wait = wbc->sync_mode == WB_SYNC_ALL; |
| 118 | |
Dave Kleikamp | 73aaa22 | 2013-05-01 11:08:38 -0500 | [diff] [blame] | 119 | if (inode->i_nlink == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | return 0; |
| 121 | /* |
| 122 | * If COMMIT_DIRTY is not set, the inode isn't really dirty. |
| 123 | * It has been committed since the last change, but was still |
| 124 | * on the dirty inode list. |
| 125 | */ |
Colin Ian King | f7f31ad | 2015-06-04 17:57:03 +0100 | [diff] [blame] | 126 | if (!test_cflag(COMMIT_Dirty, inode)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | /* Make sure committed changes hit the disk */ |
| 128 | jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait); |
| 129 | return 0; |
Colin Ian King | f7f31ad | 2015-06-04 17:57:03 +0100 | [diff] [blame] | 130 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | |
| 132 | if (jfs_commit_inode(inode, wait)) { |
| 133 | jfs_err("jfs_write_inode: jfs_commit_inode failed!"); |
| 134 | return -EIO; |
| 135 | } else |
| 136 | return 0; |
| 137 | } |
| 138 | |
Al Viro | 62aff86 | 2010-06-07 00:28:54 -0400 | [diff] [blame] | 139 | void jfs_evict_inode(struct inode *inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | { |
Al Viro | b3b4a6e | 2019-04-15 22:48:59 -0400 | [diff] [blame] | 141 | struct jfs_inode_info *ji = JFS_IP(inode); |
| 142 | |
Al Viro | 62aff86 | 2010-06-07 00:28:54 -0400 | [diff] [blame] | 143 | jfs_info("In jfs_evict_inode, inode = 0x%p", inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | |
Al Viro | 62aff86 | 2010-06-07 00:28:54 -0400 | [diff] [blame] | 145 | if (!inode->i_nlink && !is_bad_inode(inode)) { |
Christoph Hellwig | 871a293 | 2010-03-03 09:05:07 -0500 | [diff] [blame] | 146 | dquot_initialize(inode); |
Christoph Hellwig | 907f455 | 2010-03-03 09:05:06 -0500 | [diff] [blame] | 147 | |
Al Viro | 62aff86 | 2010-06-07 00:28:54 -0400 | [diff] [blame] | 148 | if (JFS_IP(inode)->fileset == FILESYSTEM_I) { |
Haimin Zhang | a530462 | 2022-03-22 21:59:17 +0800 | [diff] [blame] | 149 | struct inode *ipimap = JFS_SBI(inode->i_sb)->ipimap; |
Johannes Weiner | 91b0abe | 2014-04-03 14:47:49 -0700 | [diff] [blame] | 150 | truncate_inode_pages_final(&inode->i_data); |
Al Viro | 62aff86 | 2010-06-07 00:28:54 -0400 | [diff] [blame] | 151 | |
| 152 | if (test_cflag(COMMIT_Freewmap, inode)) |
| 153 | jfs_free_zero_link(inode); |
| 154 | |
Haimin Zhang | a530462 | 2022-03-22 21:59:17 +0800 | [diff] [blame] | 155 | if (ipimap && JFS_IP(ipimap)->i_imap) |
Pavel Skripkin | 9d574f9 | 2021-06-06 17:24:05 +0300 | [diff] [blame] | 156 | diFree(inode); |
Al Viro | 62aff86 | 2010-06-07 00:28:54 -0400 | [diff] [blame] | 157 | |
| 158 | /* |
| 159 | * Free the inode from the quota allocation. |
| 160 | */ |
Al Viro | 62aff86 | 2010-06-07 00:28:54 -0400 | [diff] [blame] | 161 | dquot_free_inode(inode); |
| 162 | } |
| 163 | } else { |
Johannes Weiner | 91b0abe | 2014-04-03 14:47:49 -0700 | [diff] [blame] | 164 | truncate_inode_pages_final(&inode->i_data); |
Dave Kleikamp | b1b5d7f9 | 2005-08-30 14:28:56 -0500 | [diff] [blame] | 165 | } |
Jan Kara | dbd5768 | 2012-05-03 14:48:02 +0200 | [diff] [blame] | 166 | clear_inode(inode); |
Al Viro | 62aff86 | 2010-06-07 00:28:54 -0400 | [diff] [blame] | 167 | dquot_drop(inode); |
Al Viro | b3b4a6e | 2019-04-15 22:48:59 -0400 | [diff] [blame] | 168 | |
| 169 | BUG_ON(!list_empty(&ji->anon_inode_list)); |
| 170 | |
| 171 | spin_lock_irq(&ji->ag_lock); |
| 172 | if (ji->active_ag != -1) { |
| 173 | struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap; |
| 174 | atomic_dec(&bmap->db_active[ji->active_ag]); |
| 175 | ji->active_ag = -1; |
| 176 | } |
| 177 | spin_unlock_irq(&ji->ag_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | } |
| 179 | |
Christoph Hellwig | aa38572 | 2011-05-27 06:53:02 -0400 | [diff] [blame] | 180 | void jfs_dirty_inode(struct inode *inode, int flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | { |
| 182 | static int noisy = 5; |
| 183 | |
| 184 | if (isReadOnly(inode)) { |
| 185 | if (!special_file(inode->i_mode) && noisy) { |
| 186 | /* kernel allows writes to devices on read-only |
| 187 | * partitions and may try to mark inode dirty |
| 188 | */ |
| 189 | jfs_err("jfs_dirty_inode called on read-only volume"); |
| 190 | jfs_err("Is remount racy?"); |
| 191 | noisy--; |
| 192 | } |
| 193 | return; |
| 194 | } |
| 195 | |
| 196 | set_cflag(COMMIT_Dirty, inode); |
| 197 | } |
| 198 | |
Dave Kleikamp | 115ff50 | 2006-07-26 14:52:13 -0500 | [diff] [blame] | 199 | int jfs_get_block(struct inode *ip, sector_t lblock, |
| 200 | struct buffer_head *bh_result, int create) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | { |
| 202 | s64 lblock64 = lblock; |
| 203 | int rc = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | xad_t xad; |
| 205 | s64 xaddr; |
| 206 | int xflag; |
Dave Kleikamp | 115ff50 | 2006-07-26 14:52:13 -0500 | [diff] [blame] | 207 | s32 xlen = bh_result->b_size >> ip->i_blkbits; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | |
| 209 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | * Take appropriate lock on inode |
| 211 | */ |
Dave Kleikamp | 7fab479 | 2005-05-02 12:25:02 -0600 | [diff] [blame] | 212 | if (create) |
Dave Kleikamp | 82d5b9a | 2007-01-09 14:14:48 -0600 | [diff] [blame] | 213 | IWRITE_LOCK(ip, RDWRLOCK_NORMAL); |
Dave Kleikamp | 7fab479 | 2005-05-02 12:25:02 -0600 | [diff] [blame] | 214 | else |
Dave Kleikamp | 82d5b9a | 2007-01-09 14:14:48 -0600 | [diff] [blame] | 215 | IREAD_LOCK(ip, RDWRLOCK_NORMAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | |
| 217 | if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) && |
Dave Kleikamp | 115ff50 | 2006-07-26 14:52:13 -0500 | [diff] [blame] | 218 | (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) && |
Dave Kleikamp | 6628465 | 2005-05-02 12:25:13 -0600 | [diff] [blame] | 219 | xaddr) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | if (xflag & XAD_NOTRECORDED) { |
| 221 | if (!create) |
| 222 | /* |
| 223 | * Allocated but not recorded, read treats |
| 224 | * this as a hole |
| 225 | */ |
| 226 | goto unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | XADoffset(&xad, lblock64); |
| 228 | XADlength(&xad, xlen); |
| 229 | XADaddress(&xad, xaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | rc = extRecord(ip, &xad); |
| 231 | if (rc) |
| 232 | goto unlock; |
| 233 | set_buffer_new(bh_result); |
| 234 | } |
| 235 | |
| 236 | map_bh(bh_result, ip->i_sb, xaddr); |
| 237 | bh_result->b_size = xlen << ip->i_blkbits; |
| 238 | goto unlock; |
| 239 | } |
| 240 | if (!create) |
| 241 | goto unlock; |
| 242 | |
| 243 | /* |
| 244 | * Allocate a new block |
| 245 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad))) |
| 247 | goto unlock; |
Richard Knutsson | 4d81715 | 2006-09-30 23:27:14 -0700 | [diff] [blame] | 248 | rc = extAlloc(ip, xlen, lblock64, &xad, false); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | if (rc) |
| 250 | goto unlock; |
| 251 | |
| 252 | set_buffer_new(bh_result); |
| 253 | map_bh(bh_result, ip->i_sb, addressXAD(&xad)); |
| 254 | bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits; |
| 255 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | unlock: |
| 257 | /* |
| 258 | * Release lock on inode |
| 259 | */ |
Dave Kleikamp | 7fab479 | 2005-05-02 12:25:02 -0600 | [diff] [blame] | 260 | if (create) |
| 261 | IWRITE_UNLOCK(ip); |
| 262 | else |
| 263 | IREAD_UNLOCK(ip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | return rc; |
| 265 | } |
| 266 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | static int jfs_writepages(struct address_space *mapping, |
| 268 | struct writeback_control *wbc) |
| 269 | { |
| 270 | return mpage_writepages(mapping, wbc, jfs_get_block); |
| 271 | } |
| 272 | |
Matthew Wilcox (Oracle) | f132ab7 | 2022-04-29 11:47:39 -0400 | [diff] [blame] | 273 | static int jfs_read_folio(struct file *file, struct folio *folio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | { |
Matthew Wilcox (Oracle) | f132ab7 | 2022-04-29 11:47:39 -0400 | [diff] [blame] | 275 | return mpage_read_folio(folio, jfs_get_block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | } |
| 277 | |
Matthew Wilcox (Oracle) | d438834 | 2020-06-01 21:47:02 -0700 | [diff] [blame] | 278 | static void jfs_readahead(struct readahead_control *rac) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | { |
Matthew Wilcox (Oracle) | d438834 | 2020-06-01 21:47:02 -0700 | [diff] [blame] | 280 | mpage_readahead(rac, jfs_get_block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | } |
| 282 | |
Marco Stornelli | 86dd07d | 2012-12-15 11:54:25 +0100 | [diff] [blame] | 283 | static void jfs_write_failed(struct address_space *mapping, loff_t to) |
| 284 | { |
| 285 | struct inode *inode = mapping->host; |
| 286 | |
| 287 | if (to > inode->i_size) { |
Kirill A. Shutemov | 7caef26 | 2013-09-12 15:13:56 -0700 | [diff] [blame] | 288 | truncate_pagecache(inode, inode->i_size); |
Marco Stornelli | 86dd07d | 2012-12-15 11:54:25 +0100 | [diff] [blame] | 289 | jfs_truncate(inode); |
| 290 | } |
| 291 | } |
| 292 | |
Nick Piggin | d5c5f84 | 2007-10-16 01:25:22 -0700 | [diff] [blame] | 293 | static int jfs_write_begin(struct file *file, struct address_space *mapping, |
Matthew Wilcox (Oracle) | 9d6b0cd | 2022-02-22 14:31:43 -0500 | [diff] [blame] | 294 | loff_t pos, unsigned len, |
Nick Piggin | d5c5f84 | 2007-10-16 01:25:22 -0700 | [diff] [blame] | 295 | struct page **pagep, void **fsdata) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | { |
Christoph Hellwig | ea0f04e | 2010-06-04 11:29:54 +0200 | [diff] [blame] | 297 | int ret; |
| 298 | |
Christoph Hellwig | 002cbb1 | 2022-06-13 07:37:12 +0200 | [diff] [blame] | 299 | ret = block_write_begin(mapping, pos, len, pagep, jfs_get_block); |
Marco Stornelli | 86dd07d | 2012-12-15 11:54:25 +0100 | [diff] [blame] | 300 | if (unlikely(ret)) |
| 301 | jfs_write_failed(mapping, pos + len); |
Christoph Hellwig | ea0f04e | 2010-06-04 11:29:54 +0200 | [diff] [blame] | 302 | |
| 303 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | } |
| 305 | |
Christoph Hellwig | 002cbb1 | 2022-06-13 07:37:12 +0200 | [diff] [blame] | 306 | static int jfs_write_end(struct file *file, struct address_space *mapping, |
| 307 | loff_t pos, unsigned len, unsigned copied, struct page *page, |
| 308 | void *fsdata) |
| 309 | { |
| 310 | int ret; |
| 311 | |
| 312 | ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); |
| 313 | if (ret < len) |
| 314 | jfs_write_failed(mapping, pos + len); |
| 315 | return ret; |
| 316 | } |
| 317 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | static sector_t jfs_bmap(struct address_space *mapping, sector_t block) |
| 319 | { |
| 320 | return generic_block_bmap(mapping, block, jfs_get_block); |
| 321 | } |
| 322 | |
Christoph Hellwig | c8b8e32 | 2016-04-07 08:51:58 -0700 | [diff] [blame] | 323 | static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | { |
| 325 | struct file *file = iocb->ki_filp; |
Marco Stornelli | 86dd07d | 2012-12-15 11:54:25 +0100 | [diff] [blame] | 326 | struct address_space *mapping = file->f_mapping; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | struct inode *inode = file->f_mapping->host; |
Al Viro | a6cbcd4 | 2014-03-04 22:38:00 -0500 | [diff] [blame] | 328 | size_t count = iov_iter_count(iter); |
Christoph Hellwig | eafdc7d | 2010-06-04 11:29:53 +0200 | [diff] [blame] | 329 | ssize_t ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | |
Christoph Hellwig | c8b8e32 | 2016-04-07 08:51:58 -0700 | [diff] [blame] | 331 | ret = blockdev_direct_IO(iocb, inode, iter, jfs_get_block); |
Christoph Hellwig | eafdc7d | 2010-06-04 11:29:53 +0200 | [diff] [blame] | 332 | |
| 333 | /* |
| 334 | * In case of error extending write may have instantiated a few |
| 335 | * blocks outside i_size. Trim these off again. |
| 336 | */ |
Omar Sandoval | 6f67376 | 2015-03-16 04:33:52 -0700 | [diff] [blame] | 337 | if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { |
Christoph Hellwig | eafdc7d | 2010-06-04 11:29:53 +0200 | [diff] [blame] | 338 | loff_t isize = i_size_read(inode); |
Christoph Hellwig | c8b8e32 | 2016-04-07 08:51:58 -0700 | [diff] [blame] | 339 | loff_t end = iocb->ki_pos + count; |
Christoph Hellwig | eafdc7d | 2010-06-04 11:29:53 +0200 | [diff] [blame] | 340 | |
| 341 | if (end > isize) |
Marco Stornelli | 86dd07d | 2012-12-15 11:54:25 +0100 | [diff] [blame] | 342 | jfs_write_failed(mapping, end); |
Christoph Hellwig | eafdc7d | 2010-06-04 11:29:53 +0200 | [diff] [blame] | 343 | } |
| 344 | |
| 345 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | } |
| 347 | |
Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 348 | const struct address_space_operations jfs_aops = { |
Matthew Wilcox (Oracle) | e621900 | 2022-02-09 20:22:12 +0000 | [diff] [blame] | 349 | .dirty_folio = block_dirty_folio, |
Matthew Wilcox (Oracle) | 7ba13ab | 2022-02-09 20:21:34 +0000 | [diff] [blame] | 350 | .invalidate_folio = block_invalidate_folio, |
Matthew Wilcox (Oracle) | f132ab7 | 2022-04-29 11:47:39 -0400 | [diff] [blame] | 351 | .read_folio = jfs_read_folio, |
Matthew Wilcox (Oracle) | d438834 | 2020-06-01 21:47:02 -0700 | [diff] [blame] | 352 | .readahead = jfs_readahead, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | .writepages = jfs_writepages, |
Nick Piggin | d5c5f84 | 2007-10-16 01:25:22 -0700 | [diff] [blame] | 354 | .write_begin = jfs_write_begin, |
Christoph Hellwig | 002cbb1 | 2022-06-13 07:37:12 +0200 | [diff] [blame] | 355 | .write_end = jfs_write_end, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | .bmap = jfs_bmap, |
| 357 | .direct_IO = jfs_direct_IO, |
Christoph Hellwig | 2274c3b | 2022-12-02 11:26:43 +0100 | [diff] [blame] | 358 | .migrate_folio = buffer_migrate_folio, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | }; |
| 360 | |
| 361 | /* |
| 362 | * Guts of jfs_truncate. Called with locks already held. Can be called |
| 363 | * with directory for truncating directory index table. |
| 364 | */ |
| 365 | void jfs_truncate_nolock(struct inode *ip, loff_t length) |
| 366 | { |
| 367 | loff_t newsize; |
| 368 | tid_t tid; |
| 369 | |
| 370 | ASSERT(length >= 0); |
| 371 | |
| 372 | if (test_cflag(COMMIT_Nolink, ip)) { |
| 373 | xtTruncate(0, ip, length, COMMIT_WMAP); |
| 374 | return; |
| 375 | } |
| 376 | |
| 377 | do { |
| 378 | tid = txBegin(ip->i_sb, 0); |
| 379 | |
| 380 | /* |
Ingo Molnar | 1de8744 | 2006-01-24 15:22:50 -0600 | [diff] [blame] | 381 | * The commit_mutex cannot be taken before txBegin. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | * txBegin may block and there is a chance the inode |
| 383 | * could be marked dirty and need to be committed |
| 384 | * before txBegin unblocks |
| 385 | */ |
Ingo Molnar | 1de8744 | 2006-01-24 15:22:50 -0600 | [diff] [blame] | 386 | mutex_lock(&JFS_IP(ip)->commit_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | |
| 388 | newsize = xtTruncate(tid, ip, length, |
| 389 | COMMIT_TRUNCATE | COMMIT_PWMAP); |
| 390 | if (newsize < 0) { |
| 391 | txEnd(tid); |
Ingo Molnar | 1de8744 | 2006-01-24 15:22:50 -0600 | [diff] [blame] | 392 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | break; |
| 394 | } |
| 395 | |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 396 | ip->i_mtime = ip->i_ctime = current_time(ip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | mark_inode_dirty(ip); |
| 398 | |
| 399 | txCommit(tid, 1, &ip, 0); |
| 400 | txEnd(tid); |
Ingo Molnar | 1de8744 | 2006-01-24 15:22:50 -0600 | [diff] [blame] | 401 | mutex_unlock(&JFS_IP(ip)->commit_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | } while (newsize > length); /* Truncate isn't always atomic */ |
| 403 | } |
| 404 | |
| 405 | void jfs_truncate(struct inode *ip) |
| 406 | { |
| 407 | jfs_info("jfs_truncate: size = 0x%lx", (ulong) ip->i_size); |
| 408 | |
Christoph Hellwig | 002cbb1 | 2022-06-13 07:37:12 +0200 | [diff] [blame] | 409 | block_truncate_page(ip->i_mapping, ip->i_size, jfs_get_block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | |
Dave Kleikamp | 82d5b9a | 2007-01-09 14:14:48 -0600 | [diff] [blame] | 411 | IWRITE_LOCK(ip, RDWRLOCK_NORMAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | jfs_truncate_nolock(ip, ip->i_size); |
| 413 | IWRITE_UNLOCK(ip); |
| 414 | } |