blob: d8d9fe3f685c00c7e7f1430ca924850d89713b07 [file] [log] [blame]
Carsten Otteceffc0782005-06-23 22:05:25 -07001/*
2 * linux/mm/filemap_xip.c
3 *
4 * Copyright (C) 2005 IBM Corporation
5 * Author: Carsten Otte <cotte@de.ibm.com>
6 *
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
8 *
9 */
10
11#include <linux/fs.h>
12#include <linux/pagemap.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040013#include <linux/export.h>
Carsten Otteceffc0782005-06-23 22:05:25 -070014#include <linux/uio.h>
15#include <linux/rmap.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070016#include <linux/mmu_notifier.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040017#include <linux/sched.h>
Nick Piggin538f8ea62008-08-20 14:09:20 -070018#include <linux/seqlock.h>
19#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/gfp.h>
Carsten Otteceffc0782005-06-23 22:05:25 -070021#include <asm/tlbflush.h>
Nick Piggin70688e42008-04-28 02:13:02 -070022#include <asm/io.h>
Carsten Otteceffc0782005-06-23 22:05:25 -070023
24/*
Carsten Ottea76c0b92007-03-29 01:20:39 -070025 * We do use our own empty page to avoid interference with other users
26 * of ZERO_PAGE(), such as /dev/zero
27 */
Nick Piggin538f8ea62008-08-20 14:09:20 -070028static DEFINE_MUTEX(xip_sparse_mutex);
John Stultz1ca7d672013-10-07 15:51:59 -070029static seqcount_t xip_sparse_seq = SEQCNT_ZERO(xip_sparse_seq);
Carsten Ottea76c0b92007-03-29 01:20:39 -070030static struct page *__xip_sparse_page;
31
Nick Piggin538f8ea62008-08-20 14:09:20 -070032/* called under xip_sparse_mutex */
Carsten Ottea76c0b92007-03-29 01:20:39 -070033static struct page *xip_sparse_page(void)
34{
35 if (!__xip_sparse_page) {
Akinobu Mitac51b1a12008-01-08 15:32:57 -080036 struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
37
Nick Piggin538f8ea62008-08-20 14:09:20 -070038 if (page)
39 __xip_sparse_page = page;
Carsten Ottea76c0b92007-03-29 01:20:39 -070040 }
41 return __xip_sparse_page;
42}
43
44/*
Carsten Otteceffc0782005-06-23 22:05:25 -070045 * This is a file read routine for execute in place files, and uses
Nick Piggin70688e42008-04-28 02:13:02 -070046 * the mapping->a_ops->get_xip_mem() function for the actual low-level
Carsten Otteceffc0782005-06-23 22:05:25 -070047 * stuff.
48 *
49 * Note the struct file* is not used at all. It may be NULL.
50 */
Nick Piggin70688e42008-04-28 02:13:02 -070051static ssize_t
Carsten Otteceffc0782005-06-23 22:05:25 -070052do_xip_mapping_read(struct address_space *mapping,
53 struct file_ra_state *_ra,
54 struct file *filp,
Nick Piggin70688e42008-04-28 02:13:02 -070055 char __user *buf,
56 size_t len,
57 loff_t *ppos)
Carsten Otteceffc0782005-06-23 22:05:25 -070058{
59 struct inode *inode = mapping->host;
Jan Kara2004dc82008-02-08 04:20:11 -080060 pgoff_t index, end_index;
61 unsigned long offset;
Nick Piggin70688e42008-04-28 02:13:02 -070062 loff_t isize, pos;
63 size_t copied = 0, error = 0;
Carsten Otteceffc0782005-06-23 22:05:25 -070064
Nick Piggin70688e42008-04-28 02:13:02 -070065 BUG_ON(!mapping->a_ops->get_xip_mem);
Carsten Otteceffc0782005-06-23 22:05:25 -070066
Nick Piggin70688e42008-04-28 02:13:02 -070067 pos = *ppos;
68 index = pos >> PAGE_CACHE_SHIFT;
69 offset = pos & ~PAGE_CACHE_MASK;
Carsten Otteceffc0782005-06-23 22:05:25 -070070
71 isize = i_size_read(inode);
72 if (!isize)
73 goto out;
74
75 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
Nick Piggin70688e42008-04-28 02:13:02 -070076 do {
77 unsigned long nr, left;
78 void *xip_mem;
79 unsigned long xip_pfn;
80 int zero = 0;
Carsten Otteceffc0782005-06-23 22:05:25 -070081
82 /* nr is the maximum number of bytes to copy from this page */
83 nr = PAGE_CACHE_SIZE;
84 if (index >= end_index) {
85 if (index > end_index)
86 goto out;
87 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
88 if (nr <= offset) {
89 goto out;
90 }
91 }
92 nr = nr - offset;
Martin Schwidefsky58984ce2009-04-02 16:56:42 -070093 if (nr > len - copied)
94 nr = len - copied;
Carsten Otteceffc0782005-06-23 22:05:25 -070095
Nick Piggin70688e42008-04-28 02:13:02 -070096 error = mapping->a_ops->get_xip_mem(mapping, index, 0,
97 &xip_mem, &xip_pfn);
98 if (unlikely(error)) {
99 if (error == -ENODATA) {
Carsten Otteceffc0782005-06-23 22:05:25 -0700100 /* sparse */
Nick Piggin70688e42008-04-28 02:13:02 -0700101 zero = 1;
102 } else
Carsten Otteceffc0782005-06-23 22:05:25 -0700103 goto out;
Carsten Otteafa597b2005-07-15 03:56:30 -0700104 }
Carsten Otteceffc0782005-06-23 22:05:25 -0700105
106 /* If users can be writing to this page using arbitrary
107 * virtual addresses, take care about potential aliasing
108 * before reading the page on the kernel side.
109 */
110 if (mapping_writably_mapped(mapping))
Nick Piggin70688e42008-04-28 02:13:02 -0700111 /* address based flush */ ;
Carsten Otteceffc0782005-06-23 22:05:25 -0700112
113 /*
Nick Piggin70688e42008-04-28 02:13:02 -0700114 * Ok, we have the mem, so now we can copy it to user space...
Carsten Otteceffc0782005-06-23 22:05:25 -0700115 *
116 * The actor routine returns how many bytes were actually used..
117 * NOTE! This may not be the same as how much of a user buffer
118 * we filled up (we may be padding etc), so we can only update
119 * "pos" here (the actor routine has to update the user buffer
120 * pointers and the remaining count).
121 */
Nick Piggin70688e42008-04-28 02:13:02 -0700122 if (!zero)
123 left = __copy_to_user(buf+copied, xip_mem+offset, nr);
124 else
125 left = __clear_user(buf + copied, nr);
126
127 if (left) {
128 error = -EFAULT;
129 goto out;
130 }
131
132 copied += (nr - left);
133 offset += (nr - left);
Carsten Otteceffc0782005-06-23 22:05:25 -0700134 index += offset >> PAGE_CACHE_SHIFT;
135 offset &= ~PAGE_CACHE_MASK;
Nick Piggin70688e42008-04-28 02:13:02 -0700136 } while (copied < len);
Carsten Otteceffc0782005-06-23 22:05:25 -0700137
138out:
Nick Piggin70688e42008-04-28 02:13:02 -0700139 *ppos = pos + copied;
Carsten Otteceffc0782005-06-23 22:05:25 -0700140 if (filp)
141 file_accessed(filp);
Nick Piggin70688e42008-04-28 02:13:02 -0700142
143 return (copied ? copied : error);
Carsten Otteceffc0782005-06-23 22:05:25 -0700144}
145
Carsten Otteceffc0782005-06-23 22:05:25 -0700146ssize_t
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700147xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
Carsten Otteceffc0782005-06-23 22:05:25 -0700148{
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700149 if (!access_ok(VERIFY_WRITE, buf, len))
150 return -EFAULT;
151
Nick Piggin70688e42008-04-28 02:13:02 -0700152 return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
153 buf, len, ppos);
Carsten Otteceffc0782005-06-23 22:05:25 -0700154}
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700155EXPORT_SYMBOL_GPL(xip_file_read);
Carsten Otteceffc0782005-06-23 22:05:25 -0700156
Carsten Otteceffc0782005-06-23 22:05:25 -0700157/*
158 * __xip_unmap is invoked from xip_unmap and
159 * xip_write
160 *
161 * This function walks all vmas of the address_space and unmaps the
Carsten Ottea76c0b92007-03-29 01:20:39 -0700162 * __xip_sparse_page when found at pgoff.
Carsten Otteceffc0782005-06-23 22:05:25 -0700163 */
164static void
165__xip_unmap (struct address_space * mapping,
166 unsigned long pgoff)
167{
168 struct vm_area_struct *vma;
169 struct mm_struct *mm;
Carsten Otteceffc0782005-06-23 22:05:25 -0700170 unsigned long address;
171 pte_t *pte;
172 pte_t pteval;
Hugh Dickinsc0718802005-10-29 18:16:31 -0700173 spinlock_t *ptl;
Hugh Dickins67b02f12005-10-29 18:16:31 -0700174 struct page *page;
Nick Piggin538f8ea62008-08-20 14:09:20 -0700175 unsigned count;
176 int locked = 0;
177
178 count = read_seqcount_begin(&xip_sparse_seq);
Carsten Otteceffc0782005-06-23 22:05:25 -0700179
Carsten Ottea76c0b92007-03-29 01:20:39 -0700180 page = __xip_sparse_page;
181 if (!page)
182 return;
183
Nick Piggin538f8ea62008-08-20 14:09:20 -0700184retry:
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700185 mutex_lock(&mapping->i_mmap_mutex);
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700186 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
Carsten Otteceffc0782005-06-23 22:05:25 -0700187 mm = vma->vm_mm;
188 address = vma->vm_start +
189 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
190 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
Nick Piggin479db0b2008-08-20 14:09:18 -0700191 pte = page_check_address(page, mm, address, &ptl, 1);
Hugh Dickinsc0718802005-10-29 18:16:31 -0700192 if (pte) {
Carsten Otteceffc0782005-06-23 22:05:25 -0700193 /* Nuke the page table entry. */
Geert Uytterhoeven082ff0a2005-07-12 13:58:18 -0700194 flush_cache_page(vma, address, pte_pfn(*pte));
Sagi Grimberg2ec74c32012-10-08 16:33:33 -0700195 pteval = ptep_clear_flush(vma, address, pte);
Hugh Dickinsedc315f2009-01-06 14:40:11 -0800196 page_remove_rmap(page);
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -0800197 dec_mm_counter(mm, MM_FILEPAGES);
Carsten Otteceffc0782005-06-23 22:05:25 -0700198 BUG_ON(pte_dirty(pteval));
Hugh Dickinsc0718802005-10-29 18:16:31 -0700199 pte_unmap_unlock(pte, ptl);
Sagi Grimberg2ec74c32012-10-08 16:33:33 -0700200 /* must invalidate_page _before_ freeing the page */
201 mmu_notifier_invalidate_page(mm, address);
Nick Pigginb5810032005-10-29 18:16:12 -0700202 page_cache_release(page);
Carsten Otteceffc0782005-06-23 22:05:25 -0700203 }
204 }
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700205 mutex_unlock(&mapping->i_mmap_mutex);
Nick Piggin538f8ea62008-08-20 14:09:20 -0700206
207 if (locked) {
208 mutex_unlock(&xip_sparse_mutex);
209 } else if (read_seqcount_retry(&xip_sparse_seq, count)) {
210 mutex_lock(&xip_sparse_mutex);
211 locked = 1;
212 goto retry;
213 }
Carsten Otteceffc0782005-06-23 22:05:25 -0700214}
215
216/*
Nick Piggin54cb8822007-07-19 01:46:59 -0700217 * xip_fault() is invoked via the vma operations vector for a
Carsten Otteceffc0782005-06-23 22:05:25 -0700218 * mapped memory region to read in file data during a page fault.
219 *
Nick Piggin54cb8822007-07-19 01:46:59 -0700220 * This function is derived from filemap_fault, but used for execute in place
Carsten Otteceffc0782005-06-23 22:05:25 -0700221 */
Nick Piggin70688e42008-04-28 02:13:02 -0700222static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Carsten Otteceffc0782005-06-23 22:05:25 -0700223{
Nick Piggin70688e42008-04-28 02:13:02 -0700224 struct file *file = vma->vm_file;
Carsten Otteceffc0782005-06-23 22:05:25 -0700225 struct address_space *mapping = file->f_mapping;
226 struct inode *inode = mapping->host;
Nick Piggin54cb8822007-07-19 01:46:59 -0700227 pgoff_t size;
Nick Piggin70688e42008-04-28 02:13:02 -0700228 void *xip_mem;
229 unsigned long xip_pfn;
230 struct page *page;
231 int error;
Carsten Otteceffc0782005-06-23 22:05:25 -0700232
Nick Piggin54cb8822007-07-19 01:46:59 -0700233 /* XXX: are VM_FAULT_ codes OK? */
Nick Piggin538f8ea62008-08-20 14:09:20 -0700234again:
Carsten Otteceffc0782005-06-23 22:05:25 -0700235 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
Nick Piggind0217ac2007-07-19 01:47:03 -0700236 if (vmf->pgoff >= size)
237 return VM_FAULT_SIGBUS;
Carsten Otteceffc0782005-06-23 22:05:25 -0700238
Nick Piggin70688e42008-04-28 02:13:02 -0700239 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
240 &xip_mem, &xip_pfn);
241 if (likely(!error))
242 goto found;
243 if (error != -ENODATA)
Nick Piggind0217ac2007-07-19 01:47:03 -0700244 return VM_FAULT_OOM;
Carsten Otteceffc0782005-06-23 22:05:25 -0700245
246 /* sparse block */
Nick Piggin70688e42008-04-28 02:13:02 -0700247 if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
248 (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
Carsten Otteceffc0782005-06-23 22:05:25 -0700249 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
Nick Piggin70688e42008-04-28 02:13:02 -0700250 int err;
251
Carsten Otteceffc0782005-06-23 22:05:25 -0700252 /* maybe shared writable, allocate new block */
Nick Piggin14bac5a2008-08-20 14:09:20 -0700253 mutex_lock(&xip_sparse_mutex);
Nick Piggin70688e42008-04-28 02:13:02 -0700254 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
255 &xip_mem, &xip_pfn);
Nick Piggin14bac5a2008-08-20 14:09:20 -0700256 mutex_unlock(&xip_sparse_mutex);
Nick Piggin70688e42008-04-28 02:13:02 -0700257 if (error)
Nick Piggind0217ac2007-07-19 01:47:03 -0700258 return VM_FAULT_SIGBUS;
Nick Piggin70688e42008-04-28 02:13:02 -0700259 /* unmap sparse mappings at pgoff from all other vmas */
Nick Piggind0217ac2007-07-19 01:47:03 -0700260 __xip_unmap(mapping, vmf->pgoff);
Nick Piggin70688e42008-04-28 02:13:02 -0700261
262found:
263 err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
264 xip_pfn);
265 if (err == -ENOMEM)
266 return VM_FAULT_OOM;
Carsten Otte99f02ef2012-02-03 15:37:14 -0800267 /*
268 * err == -EBUSY is fine, we've raced against another thread
269 * that faulted-in the same page
270 */
271 if (err != -EBUSY)
272 BUG_ON(err);
Nick Piggin70688e42008-04-28 02:13:02 -0700273 return VM_FAULT_NOPAGE;
Carsten Otteceffc0782005-06-23 22:05:25 -0700274 } else {
Nick Piggin538f8ea62008-08-20 14:09:20 -0700275 int err, ret = VM_FAULT_OOM;
276
277 mutex_lock(&xip_sparse_mutex);
278 write_seqcount_begin(&xip_sparse_seq);
279 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
280 &xip_mem, &xip_pfn);
281 if (unlikely(!error)) {
282 write_seqcount_end(&xip_sparse_seq);
283 mutex_unlock(&xip_sparse_mutex);
284 goto again;
285 }
286 if (error != -ENODATA)
287 goto out;
Carsten Ottea76c0b92007-03-29 01:20:39 -0700288 /* not shared and writable, use xip_sparse_page() */
289 page = xip_sparse_page();
Nick Piggind0217ac2007-07-19 01:47:03 -0700290 if (!page)
Nick Piggin538f8ea62008-08-20 14:09:20 -0700291 goto out;
292 err = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
293 page);
294 if (err == -ENOMEM)
295 goto out;
Carsten Otteceffc0782005-06-23 22:05:25 -0700296
Nick Piggin538f8ea62008-08-20 14:09:20 -0700297 ret = VM_FAULT_NOPAGE;
298out:
299 write_seqcount_end(&xip_sparse_seq);
300 mutex_unlock(&xip_sparse_mutex);
301
302 return ret;
Nick Piggin70688e42008-04-28 02:13:02 -0700303 }
Carsten Otteceffc0782005-06-23 22:05:25 -0700304}
305
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +0400306static const struct vm_operations_struct xip_file_vm_ops = {
Nick Piggin54cb8822007-07-19 01:46:59 -0700307 .fault = xip_file_fault,
Jan Kara4fcf1c62012-06-12 16:20:29 +0200308 .page_mkwrite = filemap_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -0700309 .remap_pages = generic_file_remap_pages,
Carsten Otteceffc0782005-06-23 22:05:25 -0700310};
311
312int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
313{
Nick Piggin70688e42008-04-28 02:13:02 -0700314 BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
Carsten Otteceffc0782005-06-23 22:05:25 -0700315
316 file_accessed(file);
317 vma->vm_ops = &xip_file_vm_ops;
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -0700318 vma->vm_flags |= VM_MIXEDMAP;
Carsten Otteceffc0782005-06-23 22:05:25 -0700319 return 0;
320}
321EXPORT_SYMBOL_GPL(xip_file_mmap);
322
323static ssize_t
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700324__xip_file_write(struct file *filp, const char __user *buf,
325 size_t count, loff_t pos, loff_t *ppos)
Carsten Otteceffc0782005-06-23 22:05:25 -0700326{
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700327 struct address_space * mapping = filp->f_mapping;
Christoph Hellwigf5e54d62006-06-28 04:26:44 -0700328 const struct address_space_operations *a_ops = mapping->a_ops;
Carsten Otteceffc0782005-06-23 22:05:25 -0700329 struct inode *inode = mapping->host;
330 long status = 0;
Carsten Otteceffc0782005-06-23 22:05:25 -0700331 size_t bytes;
Carsten Otteceffc0782005-06-23 22:05:25 -0700332 ssize_t written = 0;
333
Nick Piggin70688e42008-04-28 02:13:02 -0700334 BUG_ON(!mapping->a_ops->get_xip_mem);
Carsten Otteceffc0782005-06-23 22:05:25 -0700335
Carsten Otteceffc0782005-06-23 22:05:25 -0700336 do {
337 unsigned long index;
338 unsigned long offset;
339 size_t copied;
Nick Piggin70688e42008-04-28 02:13:02 -0700340 void *xip_mem;
341 unsigned long xip_pfn;
Carsten Otteceffc0782005-06-23 22:05:25 -0700342
343 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
344 index = pos >> PAGE_CACHE_SHIFT;
345 bytes = PAGE_CACHE_SIZE - offset;
346 if (bytes > count)
347 bytes = count;
348
Nick Piggin70688e42008-04-28 02:13:02 -0700349 status = a_ops->get_xip_mem(mapping, index, 0,
350 &xip_mem, &xip_pfn);
351 if (status == -ENODATA) {
Carsten Otteceffc0782005-06-23 22:05:25 -0700352 /* we allocate a new page unmap it */
Nick Piggin14bac5a2008-08-20 14:09:20 -0700353 mutex_lock(&xip_sparse_mutex);
Nick Piggin70688e42008-04-28 02:13:02 -0700354 status = a_ops->get_xip_mem(mapping, index, 1,
355 &xip_mem, &xip_pfn);
Nick Piggin14bac5a2008-08-20 14:09:20 -0700356 mutex_unlock(&xip_sparse_mutex);
Nick Piggin70688e42008-04-28 02:13:02 -0700357 if (!status)
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700358 /* unmap page at pgoff from all other vmas */
359 __xip_unmap(mapping, index);
Carsten Otteceffc0782005-06-23 22:05:25 -0700360 }
361
Nick Piggin70688e42008-04-28 02:13:02 -0700362 if (status)
Carsten Otteceffc0782005-06-23 22:05:25 -0700363 break;
Carsten Otteceffc0782005-06-23 22:05:25 -0700364
Nick Piggin4a9e5ef2007-10-16 01:24:58 -0700365 copied = bytes -
Nick Piggin70688e42008-04-28 02:13:02 -0700366 __copy_from_user_nocache(xip_mem + offset, buf, bytes);
Nick Piggin4a9e5ef2007-10-16 01:24:58 -0700367
Carsten Otteceffc0782005-06-23 22:05:25 -0700368 if (likely(copied > 0)) {
369 status = copied;
370
371 if (status >= 0) {
372 written += status;
373 count -= status;
374 pos += status;
375 buf += status;
Carsten Otteceffc0782005-06-23 22:05:25 -0700376 }
377 }
378 if (unlikely(copied != bytes))
379 if (status >= 0)
380 status = -EFAULT;
381 if (status < 0)
382 break;
383 } while (count);
384 *ppos = pos;
385 /*
386 * No need to use i_size_read() here, the i_size
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800387 * cannot change under us because we hold i_mutex.
Carsten Otteceffc0782005-06-23 22:05:25 -0700388 */
389 if (pos > inode->i_size) {
390 i_size_write(inode, pos);
391 mark_inode_dirty(inode);
392 }
393
394 return written ? written : status;
395}
396
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700397ssize_t
398xip_file_write(struct file *filp, const char __user *buf, size_t len,
399 loff_t *ppos)
Carsten Otteceffc0782005-06-23 22:05:25 -0700400{
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700401 struct address_space *mapping = filp->f_mapping;
402 struct inode *inode = mapping->host;
403 size_t count;
404 loff_t pos;
405 ssize_t ret;
Carsten Otteceffc0782005-06-23 22:05:25 -0700406
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800407 mutex_lock(&inode->i_mutex);
Carsten Otteceffc0782005-06-23 22:05:25 -0700408
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700409 if (!access_ok(VERIFY_READ, buf, len)) {
410 ret=-EFAULT;
411 goto out_up;
Carsten Otteceffc0782005-06-23 22:05:25 -0700412 }
413
Carsten Otteceffc0782005-06-23 22:05:25 -0700414 pos = *ppos;
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700415 count = len;
Carsten Otteceffc0782005-06-23 22:05:25 -0700416
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700417 /* We can write back this queue in page reclaim */
418 current->backing_dev_info = mapping->backing_dev_info;
Carsten Otteceffc0782005-06-23 22:05:25 -0700419
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700420 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
421 if (ret)
422 goto out_backing;
Carsten Otteceffc0782005-06-23 22:05:25 -0700423 if (count == 0)
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700424 goto out_backing;
Carsten Otteceffc0782005-06-23 22:05:25 -0700425
Miklos Szeredi2f1936b2008-06-24 16:50:14 +0200426 ret = file_remove_suid(filp);
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700427 if (ret)
428 goto out_backing;
Carsten Otteceffc0782005-06-23 22:05:25 -0700429
Josef Bacikc3b2da32012-03-26 09:59:21 -0400430 ret = file_update_time(filp);
431 if (ret)
432 goto out_backing;
Carsten Otteceffc0782005-06-23 22:05:25 -0700433
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700434 ret = __xip_file_write (filp, buf, count, pos, ppos);
Carsten Otteceffc0782005-06-23 22:05:25 -0700435
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700436 out_backing:
437 current->backing_dev_info = NULL;
438 out_up:
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800439 mutex_unlock(&inode->i_mutex);
Carsten Otteceffc0782005-06-23 22:05:25 -0700440 return ret;
441}
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700442EXPORT_SYMBOL_GPL(xip_file_write);
Carsten Otteceffc0782005-06-23 22:05:25 -0700443
444/*
445 * truncate a page used for execute in place
Nick Piggin70688e42008-04-28 02:13:02 -0700446 * functionality is analog to block_truncate_page but does use get_xip_mem
Carsten Otteceffc0782005-06-23 22:05:25 -0700447 * to get the page instead of page cache
448 */
449int
450xip_truncate_page(struct address_space *mapping, loff_t from)
451{
452 pgoff_t index = from >> PAGE_CACHE_SHIFT;
453 unsigned offset = from & (PAGE_CACHE_SIZE-1);
454 unsigned blocksize;
455 unsigned length;
Nick Piggin70688e42008-04-28 02:13:02 -0700456 void *xip_mem;
457 unsigned long xip_pfn;
458 int err;
Carsten Otteceffc0782005-06-23 22:05:25 -0700459
Nick Piggin70688e42008-04-28 02:13:02 -0700460 BUG_ON(!mapping->a_ops->get_xip_mem);
Carsten Otteceffc0782005-06-23 22:05:25 -0700461
462 blocksize = 1 << mapping->host->i_blkbits;
463 length = offset & (blocksize - 1);
464
465 /* Block boundary? Nothing to do */
466 if (!length)
467 return 0;
468
469 length = blocksize - length;
470
Nick Piggin70688e42008-04-28 02:13:02 -0700471 err = mapping->a_ops->get_xip_mem(mapping, index, 0,
472 &xip_mem, &xip_pfn);
473 if (unlikely(err)) {
474 if (err == -ENODATA)
Carsten Otteceffc0782005-06-23 22:05:25 -0700475 /* Hole? No need to truncate */
476 return 0;
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700477 else
Nick Piggin70688e42008-04-28 02:13:02 -0700478 return err;
Carsten Otteafa597b2005-07-15 03:56:30 -0700479 }
Nick Piggin70688e42008-04-28 02:13:02 -0700480 memset(xip_mem + offset, 0, length);
Carsten Otteeb6fe0c2005-06-23 22:05:28 -0700481 return 0;
Carsten Otteceffc0782005-06-23 22:05:25 -0700482}
483EXPORT_SYMBOL_GPL(xip_truncate_page);