mm, fs: remove remaining PAGE_CACHE_* and page_cache_{get,release} usage
Mostly direct substitution with occasional adjustment or removing
outdated comments.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/Documentation/filesystems/cramfs.txt b/Documentation/filesystems/cramfs.txt
index 31f53f0..4006298 100644
--- a/Documentation/filesystems/cramfs.txt
+++ b/Documentation/filesystems/cramfs.txt
@@ -38,7 +38,7 @@
which the timestamp reverts to 1970, i.e. moves backwards in time.
Currently, cramfs must be written and read with architectures of the
-same endianness, and can be read only by kernels with PAGE_CACHE_SIZE
+same endianness, and can be read only by kernels with PAGE_SIZE
== 4096. At least the latter of these is a bug, but it hasn't been
decided what the best fix is. For the moment if you have larger pages
you can just change the #define in mkcramfs.c, so long as you don't
diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
index d392e15..d9c11d2 100644
--- a/Documentation/filesystems/tmpfs.txt
+++ b/Documentation/filesystems/tmpfs.txt
@@ -60,7 +60,7 @@
default is half of your physical RAM without swap. If you
oversize your tmpfs instances the machine will deadlock
since the OOM handler will not be able to free that memory.
-nr_blocks: The same as size, but in blocks of PAGE_CACHE_SIZE.
+nr_blocks: The same as size, but in blocks of PAGE_SIZE.
nr_inodes: The maximum number of inodes for this instance. The default
is half of the number of your physical RAM pages, or (on a
machine with highmem) the number of lowmem RAM pages,
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index b02a7d5..4164bd6 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -708,9 +708,9 @@
from the address space. This generally corresponds to either a
truncation, punch hole or a complete invalidation of the address
space (in the latter case 'offset' will always be 0 and 'length'
- will be PAGE_CACHE_SIZE). Any private data associated with the page
+ will be PAGE_SIZE). Any private data associated with the page
should be updated to reflect this truncation. If offset is 0 and
- length is PAGE_CACHE_SIZE, then the private data should be released,
+ length is PAGE_SIZE, then the private data should be released,
because the page must be able to be completely discarded. This may
be done by calling the ->releasepage function, but in this case the
release MUST succeed.
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 3c07d6b..6b3e7c6 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -22,7 +22,7 @@
#include <linux/swap.h>
#include <linux/unistd.h>
#include <linux/nodemask.h> /* for node_online_map */
-#include <linux/pagemap.h> /* for release_pages and page_cache_release */
+#include <linux/pagemap.h> /* for release_pages */
#include <linux/compat.h>
#include <asm/pgalloc.h>
diff --git a/block/bio.c b/block/bio.c
index 1685315..807d25e 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1615,8 +1615,8 @@
* the BIO and the offending pages and re-dirty the pages in process context.
*
* It is expected that bio_check_pages_dirty() will wholly own the BIO from
- * here on. It will run one page_cache_release() against each page and will
- * run one bio_put() against the BIO.
+ * here on. It will run one put_page() against each page and will run one
+ * bio_put() against the BIO.
*/
static void bio_dirty_fn(struct work_struct *work);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index c227fd4..7a1cf7e 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1327,8 +1327,8 @@
#endif
#endif
-/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
- * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
+/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
+ * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
* Since we may live in a mixed-platform cluster,
* we limit us to a platform agnostic constant here for now.
* A followup commit may allow even bigger BIO sizes,
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index 08f193c..1c679cb 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -514,7 +514,7 @@
/**
* Starting offset of the fragment within the page. Note that the
* end of the fragment must not pass the end of the page; i.e.,
- * kiov_len + kiov_offset <= PAGE_CACHE_SIZE.
+ * kiov_len + kiov_offset <= PAGE_SIZE.
*/
unsigned int kiov_offset;
} lnet_kiov_t;
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 5321dde..e689ca1 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -390,8 +390,8 @@
} tsi_u;
} sfw_test_instance_t;
-/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at
- * the end of pages are not used */
+/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
+ * pages are not used */
#define SFW_MAX_CONCUR LST_MAX_CONCUR
#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index b5088b1..242bb1e 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -1118,7 +1118,7 @@
{ \
type *value; \
\
- CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \
+ CLASSERT(PAGE_SIZE >= sizeof (*value)); \
\
value = kzalloc(sizeof(*value), GFP_NOFS); \
if (!value) \
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 1e2ebe5..5aae1d0 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -1022,10 +1022,10 @@
* MDS_READPAGE page size
*
* This is the directory page size packed in MDS_READPAGE RPC.
- * It's different than PAGE_CACHE_SIZE because the client needs to
+ * It's different than PAGE_SIZE because the client needs to
* access the struct lu_dirpage header packed at the beginning of
* the "page" and without this there isn't any way to know find the
- * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
+ * lu_dirpage header is if client and server PAGE_SIZE differ.
*/
#define LU_PAGE_SHIFT 12
#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index a5e9095..69586a5 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -112,8 +112,8 @@
# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
# endif
-# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
-# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
+# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
+# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
# endif
# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
# error "PTLRPC_MAX_BRW_SIZE too big"
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index f4167db..4264d97 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -272,7 +272,7 @@
int cl_grant_shrink_interval; /* seconds */
/* A chunk is an optimal size used by osc_extent to determine
- * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size)
+ * the extent size. A chunk is max(PAGE_SIZE, OST block size)
*/
int cl_chunkbits;
int cl_chunk;
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index a7c02e0..e4c8288 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -134,9 +134,8 @@
* a header lu_dirpage which describes the start/end hash, and whether this
* page is empty (contains no dir entry) or hash collide with next page.
* After client receives reply, several pages will be integrated into dir page
- * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the
- * lu_dirpage for this integrated page will be adjusted. See
- * lmv_adjust_dirpages().
+ * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage
+ * for this integrated page will be adjusted. See lmv_adjust_dirpages().
*
*/
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 4c7250a..edab6c5 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -521,7 +521,7 @@
* striped over, rather than having a constant value for all files here.
*/
-/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)).
+/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)).
* Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
* by default, this should be adjusted corresponding with max_read_ahead_mb
* and max_read_ahead_per_file_mb otherwise the readahead budget can be used
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 75d4df7..85a8359 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -512,7 +512,7 @@
vio->cui_ra_window_set = 1;
bead->lrr_start = cl_index(obj, pos);
/*
- * XXX: explicit PAGE_CACHE_SIZE
+ * XXX: explicit PAGE_SIZE
*/
bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
ll_ra_read_in(file, bead);
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index ce7e8b7..9abb7c2 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -2017,7 +2017,7 @@
* |s|e|f|p|ent| 0 | ... | 0 |
* '----------------- -----'
*
- * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is
+ * However, on hosts where the native VM page size (PAGE_SIZE) is
* larger than LU_PAGE_SIZE, a single host page may contain multiple
* lu_dirpages. After reading the lu_dirpages from the MDS, the
* ldp_hash_end of the first lu_dirpage refers to the one immediately
@@ -2048,7 +2048,7 @@
* - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
* to the first entry of the next lu_dirpage.
*/
-#if PAGE_CACHE_SIZE > LU_PAGE_SIZE
+#if PAGE_SIZE > LU_PAGE_SIZE
static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
{
int i;
@@ -2101,7 +2101,7 @@
}
#else
#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
-#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */
+#endif /* PAGE_SIZE > LU_PAGE_SIZE */
static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
struct page **pages, struct ptlrpc_request **request)
@@ -2110,7 +2110,7 @@
struct lmv_obd *lmv = &obd->u.lmv;
__u64 offset = op_data->op_offset;
int rc;
- int ncfspgs; /* pages read in PAGE_CACHE_SIZE */
+ int ncfspgs; /* pages read in PAGE_SIZE */
int nlupgs; /* pages read in LU_PAGE_SIZE */
struct lmv_tgt_desc *tgt;
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
index 4a2baaf..b41b65e2 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
@@ -47,7 +47,6 @@
#include "../../include/lustre/lustre_idl.h"
#include <linux/fs.h>
-#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
{
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 4e0a357..5f25bf8 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -1456,7 +1456,7 @@
* used, we should return these grants to OST. There're two cases where grants
* can be lost:
* 1. truncate;
- * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was
+ * 2. blocksize at OST is less than PAGE_SIZE and a partial page was
* written. In this case OST may use less chunks to serve this partial
* write. OSTs don't actually know the page size on the client side. so
* clients have to calculate lost grant by the blocksize on the OST.
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 866789e..516e19d 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -3039,13 +3039,13 @@
if (root->nodesize & ((u64)PAGE_SIZE - 1)) {
printk(KERN_INFO
- "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
+ "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
root->nodesize, PAGE_SIZE);
return -1;
}
if (root->sectorsize & ((u64)PAGE_SIZE - 1)) {
printk(KERN_INFO
- "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
+ "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
root->sectorsize, PAGE_SIZE);
return -1;
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 93d696d..d247fc0 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3264,13 +3264,11 @@
goto done;
}
/*
- * delalloc_end is already one less than the total
- * length, so we don't subtract one from
- * PAGE_CACHE_SIZE
+ * delalloc_end is already one less than the total length, so
+ * we don't subtract one from PAGE_SIZE
*/
delalloc_to_write += (delalloc_end - delalloc_start +
- PAGE_SIZE) >>
- PAGE_SHIFT;
+ PAGE_SIZE) >> PAGE_SHIFT;
delalloc_start = delalloc_end + 1;
}
if (wbc->nr_to_write < delalloc_to_write) {
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index b976597..e05619f 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -66,7 +66,7 @@
\
if (token && token->kaddr && token->offset <= offset && \
token->eb == eb && \
- (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \
+ (token->offset + PAGE_SIZE >= offset + size)) { \
kaddr = token->kaddr; \
p = kaddr + part_offset - token->offset; \
res = get_unaligned_le##bits(p + off); \
@@ -104,7 +104,7 @@
\
if (token && token->kaddr && token->offset <= offset && \
token->eb == eb && \
- (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \
+ (token->offset + PAGE_SIZE >= offset + size)) { \
kaddr = token->kaddr; \
p = kaddr + part_offset - token->offset; \
put_unaligned_le##bits(val, p + off); \
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index ac3a06d..70948b1 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -239,7 +239,7 @@
end = 0;
/*
* Currently if we fail to find dirty pages in the delalloc range we
- * will adjust max_bytes down to PAGE_CACHE_SIZE and then re-search. If
+ * will adjust max_bytes down to PAGE_SIZE and then re-search. If
* this changes at any point in the future we will need to fix this
* tests expected behavior.
*/
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d21da9f..f2cc0b3 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -714,7 +714,7 @@
*
* Note that this might make for "interesting" allocation problems during
* writeback however as we have to allocate an array of pointers for the
- * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
+ * pages. A 16M write means ~32kb page array with PAGE_SIZE == 4096.
*
* For reads, there is a similar problem as we need to allocate an array
* of kvecs to handle the receive, though that should only need to be done
@@ -733,7 +733,7 @@
/*
* The default wsize is 1M. find_get_pages seems to return a maximum of 256
- * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill
+ * pages in a single call. With PAGE_SIZE == 4k, this means we can fill
* a single wsize request with a single call.
*/
#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 5ce540d..c03d074 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1902,7 +1902,7 @@
* find_get_pages_tag seems to return a max of 256 on each
* iteration, so we must call it several times in order to
* fill the array or the wsize is effectively limited to
- * 256 * PAGE_CACHE_SIZE.
+ * 256 * PAGE_SIZE.
*/
*found_pages = 0;
pages = wdata->pages;
diff --git a/fs/cramfs/README b/fs/cramfs/README
index 445d1c2..9d4e7ea 100644
--- a/fs/cramfs/README
+++ b/fs/cramfs/README
@@ -86,26 +86,26 @@
(Block size in cramfs refers to the size of input data that is
compressed at a time. It's intended to be somewhere around
-PAGE_CACHE_SIZE for cramfs_readpage's convenience.)
+PAGE_SIZE for cramfs_readpage's convenience.)
The superblock ought to indicate the block size that the fs was
written for, since comments in <linux/pagemap.h> indicate that
-PAGE_CACHE_SIZE may grow in future (if I interpret the comment
+PAGE_SIZE may grow in future (if I interpret the comment
correctly).
-Currently, mkcramfs #define's PAGE_CACHE_SIZE as 4096 and uses that
-for blksize, whereas Linux-2.3.39 uses its PAGE_CACHE_SIZE, which in
+Currently, mkcramfs #define's PAGE_SIZE as 4096 and uses that
+for blksize, whereas Linux-2.3.39 uses its PAGE_SIZE, which in
turn is defined as PAGE_SIZE (which can be as large as 32KB on arm).
This discrepancy is a bug, though it's not clear which should be
changed.
-One option is to change mkcramfs to take its PAGE_CACHE_SIZE from
+One option is to change mkcramfs to take its PAGE_SIZE from
<asm/page.h>. Personally I don't like this option, but it does
require the least amount of change: just change `#define
-PAGE_CACHE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage
+PAGE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage
is that the generated cramfs cannot always be shared between different
kernels, not even necessarily kernels of the same architecture if
-PAGE_CACHE_SIZE is subject to change between kernel versions
+PAGE_SIZE is subject to change between kernel versions
(currently possible with arm and ia64).
The remaining options try to make cramfs more sharable.
@@ -126,22 +126,22 @@
1. Always 4096 bytes.
2. Writer chooses blocksize; kernel adapts but rejects blocksize >
- PAGE_CACHE_SIZE.
+ PAGE_SIZE.
3. Writer chooses blocksize; kernel adapts even to blocksize >
- PAGE_CACHE_SIZE.
+ PAGE_SIZE.
It's easy enough to change the kernel to use a smaller value than
-PAGE_CACHE_SIZE: just make cramfs_readpage read multiple blocks.
+PAGE_SIZE: just make cramfs_readpage read multiple blocks.
-The cost of option 1 is that kernels with a larger PAGE_CACHE_SIZE
+The cost of option 1 is that kernels with a larger PAGE_SIZE
value don't get as good compression as they can.
The cost of option 2 relative to option 1 is that the code uses
variables instead of #define'd constants. The gain is that people
-with kernels having larger PAGE_CACHE_SIZE can make use of that if
+with kernels having larger PAGE_SIZE can make use of that if
they don't mind their cramfs being inaccessible to kernels with
-smaller PAGE_CACHE_SIZE values.
+smaller PAGE_SIZE values.
Option 3 is easy to implement if we don't mind being CPU-inefficient:
e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 2096654..3a32ddf 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -137,7 +137,7 @@
* page cache and dentry tree anyway..
*
* This also acts as a way to guarantee contiguous areas of up to
- * BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to
+ * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to
* worry about end-of-buffer issues even when decompressing a full
* page cache.
*/
diff --git a/fs/dax.c b/fs/dax.c
index 1f95416..75ba46d 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1094,7 +1094,7 @@
* you are truncating a file, the helper function dax_truncate_page() may be
* more convenient.
*
- * We work in terms of PAGE_CACHE_SIZE here for commonality with
+ * We work in terms of PAGE_SIZE here for commonality with
* block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
* took care of disposing of the unnecessary blocks. Even if the filesystem
* block size is smaller than PAGE_SIZE, we have to zero the rest of the page
@@ -1146,7 +1146,7 @@
* Similar to block_truncate_page(), this function can be called by a
* filesystem when it is truncating a DAX file to handle the partial page.
*
- * We work in terms of PAGE_CACHE_SIZE here for commonality with
+ * We work in terms of PAGE_SIZE here for commonality with
* block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
* took care of disposing of the unnecessary blocks. Even if the filesystem
* block size is smaller than PAGE_SIZE, we have to zero the rest of the page
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 2a988f3..224b49e 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -763,8 +763,8 @@
} else { /* ia->ia_size < i_size_read(inode) */
/* We're chopping off all the pages down to the page
* in which ia->ia_size is located. Fill in the end of
- * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to
- * PAGE_CACHE_SIZE with zeros. */
+ * that page from (ia->ia_size & ~PAGE_MASK) to
+ * PAGE_SIZE with zeros. */
size_t num_zeros = (PAGE_SIZE
- (ia->ia_size & ~PAGE_MASK));
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 1b45694..7ff6fcf 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -37,7 +37,7 @@
{
unsigned len = le16_to_cpu(dlen);
-#if (PAGE_CACHE_SIZE >= 65536)
+#if (PAGE_SIZE >= 65536)
if (len == EXT2_MAX_REC_LEN)
return 1 << 16;
#endif
@@ -46,7 +46,7 @@
static inline __le16 ext2_rec_len_to_disk(unsigned len)
{
-#if (PAGE_CACHE_SIZE >= 65536)
+#if (PAGE_SIZE >= 65536)
if (len == (1 << 16))
return cpu_to_le16(EXT2_MAX_REC_LEN);
else
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index c047435..7ccba1a 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1961,7 +1961,7 @@
{
unsigned len = le16_to_cpu(dlen);
-#if (PAGE_CACHE_SIZE >= 65536)
+#if (PAGE_SIZE >= 65536)
if (len == EXT4_MAX_REC_LEN || len == 0)
return blocksize;
return (len & 65532) | ((len & 3) << 16);
@@ -1974,7 +1974,7 @@
{
if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3))
BUG();
-#if (PAGE_CACHE_SIZE >= 65536)
+#if (PAGE_SIZE >= 65536)
if (len < 65536)
return cpu_to_le16(len);
if (len == blocksize) {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 8a43c68..4f7043b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4894,7 +4894,7 @@
offset = inode->i_size & (PAGE_SIZE - 1);
/*
* All buffers in the last page remain valid? Then there's nothing to
- * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE ==
+ * do. We do the check mainly to optimize the common PAGE_SIZE ==
* blocksize case
*/
if (offset > PAGE_SIZE - (1 << inode->i_blkbits))
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c121747..eeeade7 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -119,7 +119,7 @@
*
*
* one block each for bitmap and buddy information. So for each group we
- * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
+ * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
* blocksize) blocks. So it can have information regarding groups_per_page
* which is blocks_per_page/2
*
@@ -807,7 +807,7 @@
*
* one block each for bitmap and buddy information.
* So for each group we take up 2 blocks. A page can
- * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks.
+ * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
* So it can have information regarding groups_per_page which
* is blocks_per_page/2
*
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index ea27aa1..f24e729 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -23,7 +23,7 @@
*
* then this code just gives up and calls the buffer_head-based read function.
* It does handle a page which has holes at the end - that is a common case:
- * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
+ * the end-of-file on blocksize < PAGE_SIZE setups.
*
*/
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index afb7c7f..4ea71eb 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -237,7 +237,7 @@
/*
* Support for read() - Find the page attached to f_mapping and copy out the
* data. Its *very* similar to do_generic_mapping_read(), we can't use that
- * since it has PAGE_CACHE_SIZE assumptions.
+ * since it has PAGE_SIZE assumptions.
*/
static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
diff --git a/fs/mpage.c b/fs/mpage.c
index e7083bf..eedc644 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -331,7 +331,7 @@
*
* then this code just gives up and calls the buffer_head-based read function.
* It does handle a page which has holes at the end - that is a common case:
- * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
+ * the end-of-file on blocksize < PAGE_SIZE setups.
*
* BH_Boundary explanation:
*
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index a474e7e..97768a1 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -674,7 +674,7 @@
// in the inode.
// Again, for each page do:
// __set_page_dirty_buffers();
- // page_cache_release()
+ // put_page()
// We don't need to wait on the writes.
// Update iblock.
}
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
index 37cd7e4..820d6ea 100644
--- a/fs/ntfs/aops.h
+++ b/fs/ntfs/aops.h
@@ -49,7 +49,7 @@
* @index: index into the page cache for @mapping of the page to map
*
* Read a page from the page cache of the address space @mapping at position
- * @index, where @index is in units of PAGE_CACHE_SIZE, and not in bytes.
+ * @index, where @index is in units of PAGE_SIZE, and not in bytes.
*
* If the page is not in memory it is loaded from disk first using the readpage
* method defined in the address space operations of @mapping and the page is
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index b6074a5..f2b5e74 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -105,10 +105,6 @@
ntfs_debug("Zeroing page region outside initialized size.");
if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
- /*
- * FIXME: Using clear_page() will become wrong when we get
- * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
- */
clear_page(kp);
return;
}
@@ -160,7 +156,7 @@
* @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
* completed during the decompression of the compression block (@cb_start).
*
- * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up
+ * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up
* unpredicatbly! You have been warned!
*
* Note to hackers: This function may not sleep until it has finished accessing
@@ -462,7 +458,7 @@
* have been written to so that we would lose data if we were to just overwrite
* them with the out-of-date uncompressed data.
*
- * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at
+ * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at
* the end of the file I think. We need to detect this case and zero the out
* of bounds remainder of the page in question and mark it as handled. At the
* moment we would just return -EIO on such a page. This bug will only become
@@ -470,7 +466,7 @@
* clusters so is probably not going to be seen by anyone. Still this should
* be fixed. (AIA)
*
- * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in
+ * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in
* handling sparse and compressed cbs. (AIA)
*
* FIXME: At the moment we don't do any zeroing out in the case that
@@ -497,12 +493,12 @@
u64 cb_size_mask = cb_size - 1UL;
VCN vcn;
LCN lcn;
- /* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */
+ /* The first wanted vcn (minimum alignment is PAGE_SIZE). */
VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>
vol->cluster_size_bits;
/*
* The first vcn after the last wanted vcn (minimum alignment is again
- * PAGE_CACHE_SIZE.
+ * PAGE_SIZE.
*/
VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)
& ~cb_size_mask) >> vol->cluster_size_bits;
@@ -753,11 +749,6 @@
for (; cur_page < cb_max_page; cur_page++) {
page = pages[cur_page];
if (page) {
- /*
- * FIXME: Using clear_page() will become wrong
- * when we get PAGE_CACHE_SIZE != PAGE_SIZE but
- * for now there is no problem.
- */
if (likely(!cur_ofs))
clear_page(page_address(page));
else
@@ -807,7 +798,7 @@
* synchronous io for the majority of pages.
* Or if we choose not to do the read-ahead/-behind stuff, we
* could just return block_read_full_page(pages[xpage]) as long
- * as PAGE_CACHE_SIZE <= cb_size.
+ * as PAGE_SIZE <= cb_size.
*/
if (cb_max_ofs)
cb_max_page--;
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index 3cdce16..a186135 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -315,7 +315,7 @@
descend_into_child_node:
/*
* Convert vcn to index into the index allocation attribute in units
- * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+ * of PAGE_SIZE and map the page cache page, reading it from
* disk if necessary.
*/
page = ntfs_map_page(ia_mapping, vcn <<
@@ -793,11 +793,11 @@
descend_into_child_node:
/*
* Convert vcn to index into the index allocation attribute in units
- * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+ * of PAGE_SIZE and map the page cache page, reading it from
* disk if necessary.
*/
page = ntfs_map_page(ia_mapping, vcn <<
- dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
+ dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
if (IS_ERR(page)) {
ntfs_error(sb, "Failed to map directory index page, error %ld.",
-PTR_ERR(page));
@@ -809,9 +809,9 @@
fast_descend_into_child_node:
/* Get to the index allocation block. */
ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
- dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
+ dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
/* Bounds checks. */
- if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
+ if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
"inode 0x%lx or driver bug.", dir_ni->mft_no);
goto unm_err_out;
@@ -844,7 +844,7 @@
goto unm_err_out;
}
index_end = (u8*)ia + dir_ni->itype.index.block_size;
- if (index_end > kaddr + PAGE_CACHE_SIZE) {
+ if (index_end > kaddr + PAGE_SIZE) {
ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
"0x%lx crosses page boundary. Impossible! "
"Cannot access! This is probably a bug in the "
@@ -968,9 +968,9 @@
/* If vcn is in the same page cache page as old_vcn we
* recycle the mapped page. */
if (old_vcn << vol->cluster_size_bits >>
- PAGE_CACHE_SHIFT == vcn <<
+ PAGE_SHIFT == vcn <<
vol->cluster_size_bits >>
- PAGE_CACHE_SHIFT)
+ PAGE_SHIFT)
goto fast_descend_into_child_node;
unlock_page(page);
ntfs_unmap_page(page);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 2dae608..91117ad 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -573,7 +573,7 @@
* only partially being written to.
*
* If @nr_pages is greater than one, we are guaranteed that the cluster size is
- * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside
+ * greater than PAGE_SIZE, that all pages in @pages are entirely inside
* the same cluster and that they are the entirety of that cluster, and that
* the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
*
diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c
index 02a83a4..0d645f3 100644
--- a/fs/ntfs/index.c
+++ b/fs/ntfs/index.c
@@ -272,7 +272,7 @@
descend_into_child_node:
/*
* Convert vcn to index into the index allocation attribute in units
- * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+ * of PAGE_SIZE and map the page cache page, reading it from
* disk if necessary.
*/
page = ntfs_map_page(ia_mapping, vcn <<
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 3eda6d4..f40972d 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -870,7 +870,7 @@
}
if (ni->itype.index.block_size > PAGE_SIZE) {
ntfs_error(vi->i_sb, "Index block size (%u) > "
- "PAGE_CACHE_SIZE (%ld) is not "
+ "PAGE_SIZE (%ld) is not "
"supported. Sorry.",
ni->itype.index.block_size,
PAGE_SIZE);
@@ -1586,7 +1586,7 @@
goto unm_err_out;
}
if (ni->itype.index.block_size > PAGE_SIZE) {
- ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_CACHE_SIZE "
+ ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_SIZE "
"(%ld) is not supported. Sorry.",
ni->itype.index.block_size, PAGE_SIZE);
err = -EOPNOTSUPP;
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index ab2b093..ecb4987 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -823,12 +823,12 @@
ntfs_debug("vol->mft_record_size_bits = %i (0x%x)",
vol->mft_record_size_bits, vol->mft_record_size_bits);
/*
- * We cannot support mft record sizes above the PAGE_CACHE_SIZE since
+ * We cannot support mft record sizes above the PAGE_SIZE since
* we store $MFT/$DATA, the table of mft records in the page cache.
*/
if (vol->mft_record_size > PAGE_SIZE) {
ntfs_error(vol->sb, "Mft record size (%i) exceeds the "
- "PAGE_CACHE_SIZE on your system (%lu). "
+ "PAGE_SIZE on your system (%lu). "
"This is not supported. Sorry.",
vol->mft_record_size, PAGE_SIZE);
return false;
@@ -2471,12 +2471,12 @@
down_read(&vol->lcnbmp_lock);
/*
* Convert the number of bits into bytes rounded up, then convert into
- * multiples of PAGE_CACHE_SIZE, rounding up so that if we have one
+ * multiples of PAGE_SIZE, rounding up so that if we have one
* full and one partial page max_index = 2.
*/
max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_SIZE - 1) >>
PAGE_SHIFT;
- /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */
+ /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.",
max_index, PAGE_SIZE / 4);
for (index = 0; index < max_index; index++) {
@@ -2547,7 +2547,7 @@
pgoff_t index;
ntfs_debug("Entering.");
- /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */
+ /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
"0x%lx.", max_index, PAGE_SIZE / 4);
for (index = 0; index < max_index; index++) {
@@ -2639,7 +2639,7 @@
size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits;
/*
* Convert the maximum number of set bits into bytes rounded up, then
- * convert into multiples of PAGE_CACHE_SIZE, rounding up so that if we
+ * convert into multiples of PAGE_SIZE, rounding up so that if we
* have one full and one partial page max_index = 2.
*/
max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits)
@@ -2765,7 +2765,7 @@
if (!parse_options(vol, (char*)opt))
goto err_out_now;
- /* We support sector sizes up to the PAGE_CACHE_SIZE. */
+ /* We support sector sizes up to the PAGE_SIZE. */
if (bdev_logical_block_size(sb->s_bdev) > PAGE_SIZE) {
if (!silent)
ntfs_error(sb, "Device has unsupported sector size "
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index ce5dc4f..ad15773 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -684,7 +684,7 @@
return ret;
}
-#if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
+#if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
#define OCFS2_MAX_CTXT_PAGES 1
#else
#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE)
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 881242c..744d5d9 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2956,7 +2956,7 @@
}
/*
- * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
+ * In case PAGE_SIZE <= CLUSTER_SIZE, This page
* can't be dirtied before we CoW it out.
*/
if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 8c64872..2ace90e 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -599,7 +599,7 @@
* This does a check to see if the buffer belongs to one of these
* lost pages before doing the final put_bh. If page->mapping was
* null, it tries to free buffers on the page, which should make the
- * final page_cache_release drop the page from the lru.
+ * final put_page drop the page from the lru.
*/
static void release_buffer_page(struct buffer_head *bh)
{
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 27e501a..23813c0 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -30,7 +30,7 @@
* access the metadata and fragment caches.
*
* To avoid out of memory and fragmentation issues with vmalloc the cache
- * uses sequences of kmalloced PAGE_CACHE_SIZE buffers.
+ * uses sequences of kmalloced PAGE_SIZE buffers.
*
* It should be noted that the cache is not used for file datablocks, these
* are decompressed and cached in the page-cache in the normal way. The
@@ -231,7 +231,7 @@
/*
* Initialise cache allocating the specified number of entries, each of
* size block_size. To avoid vmalloc fragmentation issues each entry
- * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers.
+ * is allocated as a sequence of kmalloced PAGE_SIZE buffers.
*/
struct squashfs_cache *squashfs_cache_init(char *name, int entries,
int block_size)
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 437de9e..13d8094 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -382,7 +382,7 @@
/*
* Loop copying datablock into pages. As the datablock likely covers
- * many PAGE_CACHE_SIZE pages (default block size is 128 KiB) explicitly
+ * many PAGE_SIZE pages (default block size is 128 KiB) explicitly
* grab the pages from the page cache, except for the page that we've
* been called to fill.
*/
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 1a9c664..446753d 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -554,7 +554,7 @@
* VFS copied less data to the page that it intended and
* declared in its '->write_begin()' call via the @len
* argument. If the page was not up-to-date, and @len was
- * @PAGE_CACHE_SIZE, the 'ubifs_write_begin()' function did
+ * @PAGE_SIZE, the 'ubifs_write_begin()' function did
* not load it from the media (for optimization reasons). This
* means that part of the page contains garbage. So read the
* page now.
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 20daea9..e98c24e 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2237,7 +2237,7 @@
BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4);
/*
- * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to
+ * We require that PAGE_SIZE is greater-than-or-equal-to
* UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2.
*/
if (PAGE_SIZE < UBIFS_BLOCK_SIZE) {
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 5b8ae03..e49b240 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1563,7 +1563,7 @@
int status;
struct xfs_mount *mp = XFS_I(mapping->host)->i_mount;
- ASSERT(len <= PAGE_CACHE_SIZE);
+ ASSERT(len <= PAGE_SIZE);
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
@@ -1620,7 +1620,7 @@
{
int ret;
- ASSERT(len <= PAGE_CACHE_SIZE);
+ ASSERT(len <= PAGE_SIZE);
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
if (unlikely(ret < len)) {
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 6082f54..187e14b 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -556,10 +556,10 @@
/* Figure out maximum filesize, on Linux this can depend on
* the filesystem blocksize (on 32 bit platforms).
* __block_write_begin does this in an [unsigned] long...
- * page->index << (PAGE_CACHE_SHIFT - bbits)
+ * page->index << (PAGE_SHIFT - bbits)
* So, for page sized blocks (4K on 32 bit platforms),
* this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
- * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
+ * (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
* but for smaller blocksizes it is less (bbits = log2 bsize).
* Note1: get_block_t takes a long (implicit cast from above)
* Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 1b4d69f..3f10307 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -135,7 +135,7 @@
struct backing_dev_info {
struct list_head bdi_list;
- unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
+ unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
unsigned int capabilities; /* Device capabilities */
congested_fn *congested_fn; /* Function pointer if device is md/dm */
void *congested_data; /* Pointer to aux data for congested func */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ed6407d..ffcff53 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -623,7 +623,7 @@
*
* A page may belong to an inode's memory mapping. In this case, page->mapping
* is the pointer to the inode, and page->index is the file offset of the page,
- * in units of PAGE_CACHE_SIZE.
+ * in units of PAGE_SIZE.
*
* If pagecache pages are not associated with an inode, they are said to be
* anonymous pages. These may become associated with the swapcache, and in that
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 944b2b3..c2d75b4 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -341,7 +341,7 @@
/* Information about our backing store: */
unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
- units, *not* PAGE_CACHE_SIZE */
+ units */
struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index efada23..957049f 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -41,8 +41,8 @@
struct page *wb_page; /* page to read in/write out */
struct nfs_open_context *wb_context; /* File state context info */
struct nfs_lock_context *wb_lock_context; /* lock context info */
- pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */
- unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */
+ pgoff_t wb_index; /* Offset >> PAGE_SHIFT */
+ unsigned int wb_offset, /* Offset & ~PAGE_MASK */
wb_pgbase, /* Start of page data */
wb_bytes; /* Length of request */
struct kref wb_kref; /* reference count */
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 9abb763..e9fcf90 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -331,7 +331,7 @@
{
unsigned len = le16_to_cpu(dlen);
-#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536)
+#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
if (len == NILFS_MAX_REC_LEN)
return 1 << 16;
#endif
@@ -340,7 +340,7 @@
static inline __le16 nilfs_rec_len_to_disk(unsigned len)
{
-#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536)
+#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
if (len == (1 << 16))
return cpu_to_le16(NILFS_MAX_REC_LEN);
else if (len > (1 << 16))
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index f396ccb..b3fc037 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -535,8 +535,7 @@
/*
* Fault a userspace page into pagetables. Return non-zero on a fault.
*
- * This assumes that two userspace pages are always sufficient. That's
- * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
+ * This assumes that two userspace pages are always sufficient.
*/
static inline int fault_in_pages_writeable(char __user *uaddr, int size)
{
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index cc0fc71..7ca44fb 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -129,7 +129,7 @@
*
* These happen to all be powers of 2, which is not strictly
* necessary but helps enforce the real limitation, which is
- * that they should be multiples of PAGE_CACHE_SIZE.
+ * that they should be multiples of PAGE_SIZE.
*
* For UDP transports, a block plus NFS,RPC, and UDP headers
* has to fit into the IP datagram limit of 64K. The largest
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 3d980ea..2b83359 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -433,7 +433,7 @@
#define si_swapinfo(val) \
do { (val)->freeswap = (val)->totalswap = 0; } while (0)
/* only sparc can not include linux/pagemap.h in this file
- * so leave page_cache_release and release_pages undeclared... */
+ * so leave put_page and release_pages undeclared... */
#define free_page_and_swap_cache(page) \
put_page(page)
#define free_pages_and_swap_cache(pages, nr) \
diff --git a/mm/gup.c b/mm/gup.c
index 7f1c4fb..fb87aea 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1107,7 +1107,7 @@
* @addr: user address
*
* Returns struct page pointer of user page pinned for dump,
- * to be freed afterwards by page_cache_release() or put_page().
+ * to be freed afterwards by put_page().
*
* Returns NULL on any kind of failure - a hole must then be inserted into
* the corefile, to preserve alignment with its headers; and also returns
diff --git a/mm/memory.c b/mm/memory.c
index 07a4204..93897f2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2400,7 +2400,6 @@
vba = vma->vm_pgoff;
vea = vba + vma_pages(vma) - 1;
- /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
zba = details->first_index;
if (zba < vba)
zba = vba;
diff --git a/mm/mincore.c b/mm/mincore.c
index 0eed23d..c0b5ba9 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -211,7 +211,7 @@
* return values:
* zero - success
* -EFAULT - vec points to an illegal address
- * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE
+ * -EINVAL - addr is not a multiple of PAGE_SIZE
* -ENOMEM - Addresses in the range [addr, addr + len] are
* invalid for the address space of this process, or
* specify one or more pages which are not currently
@@ -233,7 +233,7 @@
if (!access_ok(VERIFY_READ, (void __user *) start, len))
return -ENOMEM;
- /* This also avoids any overflows on PAGE_CACHE_ALIGN */
+ /* This also avoids any overflows on PAGE_ALIGN */
pages = len >> PAGE_SHIFT;
pages += (offset_in_page(len)) != 0;
diff --git a/mm/swap.c b/mm/swap.c
index ea641e2..a0bc206 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -698,7 +698,7 @@
}
/**
- * release_pages - batched page_cache_release()
+ * release_pages - batched put_page()
* @pages: array of pages to release
* @nr: number of pages
* @cold: whether the pages are cache cold
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 1e6aba0..6bdb386 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -164,7 +164,7 @@
* Note: the addresses pgto_base and pgfrom_base are both calculated in
* the same way:
* if a memory area starts at byte 'base' in page 'pages[i]',
- * then its address is given as (i << PAGE_CACHE_SHIFT) + base
+ * then its address is given as (i << PAGE_SHIFT) + base
* Also note: pgfrom_base must be < pgto_base, but the memory areas
* they point to may overlap.
*/