| #ifndef _ASM_POWERPC_PGTABLE_H |
| #define _ASM_POWERPC_PGTABLE_H |
| #ifdef __KERNEL__ |
| |
| #ifndef __ASSEMBLY__ |
| #include <asm/processor.h> /* For TASK_SIZE */ |
| #include <asm/mmu.h> |
| #include <asm/page.h> |
| struct mm_struct; |
| #endif /* !__ASSEMBLY__ */ |
| |
| #if defined(CONFIG_PPC64) |
| # include <asm/pgtable-ppc64.h> |
| #else |
| # include <asm/pgtable-ppc32.h> |
| #endif |
| |
| #ifndef __ASSEMBLY__ |
| |
| /* |
| * Macro to mark a page protection value as "uncacheable". |
| */ |
| |
| #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ |
| _PAGE_WRITETHRU) |
| |
| #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ |
| _PAGE_NO_CACHE | _PAGE_GUARDED)) |
| |
| #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ |
| _PAGE_NO_CACHE)) |
| |
| #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ |
| _PAGE_COHERENT)) |
| |
| #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ |
| _PAGE_COHERENT | _PAGE_WRITETHRU)) |
| |
| |
| struct file; |
| extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| unsigned long size, pgprot_t vma_prot); |
| #define __HAVE_PHYS_MEM_ACCESS_PROT |
| |
| /* |
| * ZERO_PAGE is a global shared page that is always zero: used |
| * for zero-mapped memory areas etc.. |
| */ |
| extern unsigned long empty_zero_page[]; |
| #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
| |
| extern pgd_t swapper_pg_dir[]; |
| |
| extern void paging_init(void); |
| |
| /* |
| * kern_addr_valid is intended to indicate whether an address is a valid |
| * kernel address. Most 32-bit archs define it as always true (like this) |
| * but most 64-bit archs actually perform a test. What should we do here? |
| */ |
| #define kern_addr_valid(addr) (1) |
| |
| #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
| remap_pfn_range(vma, vaddr, pfn, size, prot) |
| |
| #include <asm-generic/pgtable.h> |
| |
| |
| /* |
| * This gets called at the end of handling a page fault, when |
| * the kernel has put a new PTE into the page table for the process. |
| * We use it to ensure coherency between the i-cache and d-cache |
| * for the page which has just been mapped in. |
| * On machines which use an MMU hash table, we use this to put a |
| * corresponding HPTE into the hash table ahead of time, instead of |
| * waiting for the inevitable extra hash-table miss exception. |
| */ |
| extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); |
| |
| #endif /* __ASSEMBLY__ */ |
| |
| #endif /* __KERNEL__ */ |
| #endif /* _ASM_POWERPC_PGTABLE_H */ |