| /* SPDX-License-Identifier: GPL-2.0 */ |
| #ifndef _LINUX_MM_TYPES_H |
| #define _LINUX_MM_TYPES_H |
| |
| #include <linux/mm_types_task.h> |
| |
| #include <linux/auxvec.h> |
| #include <linux/list.h> |
| #include <linux/spinlock.h> |
| #include <linux/rbtree.h> |
| #include <linux/rwsem.h> |
| #include <linux/completion.h> |
| #include <linux/cpumask.h> |
| #include <linux/uprobes.h> |
| #include <linux/page-flags-layout.h> |
| #include <linux/workqueue.h> |
| |
| #include <asm/mmu.h> |
| |
| #ifndef AT_VECTOR_SIZE_ARCH |
| #define AT_VECTOR_SIZE_ARCH 0 |
| #endif |
| #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) |
| |
| typedef int vm_fault_t; |
| |
| struct address_space; |
| struct mem_cgroup; |
| struct hmm; |
| |
| /* |
| * Each physical page in the system has a struct page associated with |
| * it to keep track of whatever it is we are using the page for at the |
| * moment. Note that we have no way to track which tasks are using |
| * a page, though if it is a pagecache page, rmap structures can tell us |
| * who is mapping it. If you allocate the page using alloc_pages(), you |
| * can use some of the space in struct page for your own purposes. |
| * |
| * Pages that were once in the page cache may be found under the RCU lock |
| * even after they have been recycled to a different purpose. The page |
| * cache reads and writes some of the fields in struct page to pin the |
| * page before checking that it's still in the page cache. It is vital |
| * that all users of struct page: |
| * 1. Use the first word as PageFlags. |
| * 2. Clear or preserve bit 0 of page->compound_head. It is used as |
| * PageTail for compound pages, and the page cache must not see false |
| * positives. Some users put a pointer here (guaranteed to be at least |
| * 4-byte aligned), other users avoid using the field altogether. |
| * 3. page->_refcount must either not be used, or must be used in such a |
| * way that other CPUs temporarily incrementing and then decrementing the |
| * refcount does not cause problems. On receiving the page from |
| * alloc_pages(), the refcount will be positive. |
| * 4. Either preserve page->_mapcount or restore it to -1 before freeing it. |
| * |
| * If you allocate pages of order > 0, you can use the fields in the struct |
| * page associated with each page, but bear in mind that the pages may have |
| * been inserted individually into the page cache, so you must use the above |
| * four fields in a compatible way for each struct page. |
| * |
| * SLUB uses cmpxchg_double() to atomically update its freelist and |
| * counters. That requires that freelist & counters be adjacent and |
| * double-word aligned. We align all struct pages to double-word |
| * boundaries, and ensure that 'freelist' is aligned within the |
| * struct. |
| */ |
| #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE |
| #define _struct_page_alignment __aligned(2 * sizeof(unsigned long)) |
| #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) |
| #define _slub_counter_t unsigned long |
| #else |
| #define _slub_counter_t unsigned int |
| #endif |
| #else /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */ |
| #define _struct_page_alignment |
| #define _slub_counter_t unsigned int |
| #endif /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */ |
| |
| struct page { |
| /* First double word block */ |
| unsigned long flags; /* Atomic flags, some possibly |
| * updated asynchronously */ |
| union { |
| /* See page-flags.h for the definition of PAGE_MAPPING_FLAGS */ |
| struct address_space *mapping; |
| |
| void *s_mem; /* slab first object */ |
| atomic_t compound_mapcount; /* first tail page */ |
| /* page_deferred_list().next -- second tail page */ |
| }; |
| |
| /* Second double word */ |
| union { |
| pgoff_t index; /* Our offset within mapping. */ |
| void *freelist; /* sl[aou]b first free object */ |
| /* page_deferred_list().prev -- second tail page */ |
| }; |
| |
| union { |
| _slub_counter_t counters; |
| unsigned int active; /* SLAB */ |
| struct { /* SLUB */ |
| unsigned inuse:16; |
| unsigned objects:15; |
| unsigned frozen:1; |
| }; |
| int units; /* SLOB */ |
| |
| struct { /* Page cache */ |
| /* |
| * Count of ptes mapped in mms, to show when |
| * page is mapped & limit reverse map searches. |
| * |
| * Extra information about page type may be |
| * stored here for pages that are never mapped, |
| * in which case the value MUST BE <= -2. |
| * See page-flags.h for more details. |
| */ |
| atomic_t _mapcount; |
| |
| /* |
| * Usage count, *USE WRAPPER FUNCTION* when manual |
| * accounting. See page_ref.h |
| */ |
| atomic_t _refcount; |
| }; |
| }; |
| |
| /* |
| * WARNING: bit 0 of the first word encode PageTail(). That means |
| * the rest users of the storage space MUST NOT use the bit to |
| * avoid collision and false-positive PageTail(). |
| */ |
| union { |
| struct list_head lru; /* Pageout list, eg. active_list |
| * protected by zone_lru_lock ! |
| * Can be used as a generic list |
| * by the page owner. |
| */ |
| struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an |
| * lru or handled by a slab |
| * allocator, this points to the |
| * hosting device page map. |
| */ |
| struct { /* slub per cpu partial pages */ |
| struct page *next; /* Next partial slab */ |
| #ifdef CONFIG_64BIT |
| int pages; /* Nr of partial slabs left */ |
| int pobjects; /* Approximate # of objects */ |
| #else |
| short int pages; |
| short int pobjects; |
| #endif |
| }; |
| |
| struct rcu_head rcu_head; /* Used by SLAB |
| * when destroying via RCU |
| */ |
| /* Tail pages of compound page */ |
| struct { |
| unsigned long compound_head; /* If bit zero is set */ |
| |
| /* First tail page only */ |
| unsigned char compound_dtor; |
| unsigned char compound_order; |
| /* two/six bytes available here */ |
| }; |
| |
| #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS |
| struct { |
| unsigned long __pad; /* do not overlay pmd_huge_pte |
| * with compound_head to avoid |
| * possible bit 0 collision. |
| */ |
| pgtable_t pmd_huge_pte; /* protected by page->ptl */ |
| }; |
| #endif |
| }; |
| |
| union { |
| /* |
| * Mapping-private opaque data: |
| * Usually used for buffer_heads if PagePrivate |
| * Used for swp_entry_t if PageSwapCache |
| * Indicates order in the buddy system if PageBuddy |
| */ |
| unsigned long private; |
| #if USE_SPLIT_PTE_PTLOCKS |
| #if ALLOC_SPLIT_PTLOCKS |
| spinlock_t *ptl; |
| #else |
| spinlock_t ptl; |
| #endif |
| #endif |
| struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ |
| }; |
| |
| #ifdef CONFIG_MEMCG |
| struct mem_cgroup *mem_cgroup; |
| #endif |
| |
| /* |
| * On machines where all RAM is mapped into kernel address space, |
| * we can simply calculate the virtual address. On machines with |
| * highmem some memory is mapped into kernel virtual memory |
| * dynamically, so we need a place to store that address. |
| * Note that this field could be 16 bits on x86 ... ;) |
| * |
| * Architectures with slow multiplication can define |
| * WANT_PAGE_VIRTUAL in asm/page.h |
| */ |
| #if defined(WANT_PAGE_VIRTUAL) |
| void *virtual; /* Kernel virtual address (NULL if |
| not kmapped, ie. highmem) */ |
| #endif /* WANT_PAGE_VIRTUAL */ |
| |
| #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS |
| int _last_cpupid; |
| #endif |
| } _struct_page_alignment; |
| |
| #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) |
| #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) |
| |
| struct page_frag_cache { |
| void * va; |
| #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) |
| __u16 offset; |
| __u16 size; |
| #else |
| __u32 offset; |
| #endif |
| /* we maintain a pagecount bias, so that we dont dirty cache line |
| * containing page->_refcount every time we allocate a fragment. |
| */ |
| unsigned int pagecnt_bias; |
| bool pfmemalloc; |
| }; |
| |
| typedef unsigned long vm_flags_t; |
| |
| /* |
| * A region containing a mapping of a non-memory backed file under NOMMU |
| * conditions. These are held in a global tree and are pinned by the VMAs that |
| * map parts of them. |
| */ |
| struct vm_region { |
| struct rb_node vm_rb; /* link in global region tree */ |
| vm_flags_t vm_flags; /* VMA vm_flags */ |
| unsigned long vm_start; /* start address of region */ |
| unsigned long vm_end; /* region initialised to here */ |
| unsigned long vm_top; /* region allocated to here */ |
| unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ |
| struct file *vm_file; /* the backing file or NULL */ |
| |
| int vm_usage; /* region usage count (access under nommu_region_sem) */ |
| bool vm_icache_flushed : 1; /* true if the icache has been flushed for |
| * this region */ |
| }; |
| |
| #ifdef CONFIG_USERFAULTFD |
| #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, }) |
| struct vm_userfaultfd_ctx { |
| struct userfaultfd_ctx *ctx; |
| }; |
| #else /* CONFIG_USERFAULTFD */ |
| #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {}) |
| struct vm_userfaultfd_ctx {}; |
| #endif /* CONFIG_USERFAULTFD */ |
| |
| /* |
| * This struct defines a memory VMM memory area. There is one of these |
| * per VM-area/task. A VM area is any part of the process virtual memory |
| * space that has a special rule for the page-fault handlers (ie a shared |
| * library, the executable area etc). |
| */ |
| struct vm_area_struct { |
| /* The first cache line has the info for VMA tree walking. */ |
| |
| unsigned long vm_start; /* Our start address within vm_mm. */ |
| unsigned long vm_end; /* The first byte after our end address |
| within vm_mm. */ |
| |
| /* linked list of VM areas per task, sorted by address */ |
| struct vm_area_struct *vm_next, *vm_prev; |
| |
| struct rb_node vm_rb; |
| |
| /* |
| * Largest free memory gap in bytes to the left of this VMA. |
| * Either between this VMA and vma->vm_prev, or between one of the |
| * VMAs below us in the VMA rbtree and its ->vm_prev. This helps |
| * get_unmapped_area find a free area of the right size. |
| */ |
| unsigned long rb_subtree_gap; |
| |
| /* Second cache line starts here. */ |
| |
| struct mm_struct *vm_mm; /* The address space we belong to. */ |
| pgprot_t vm_page_prot; /* Access permissions of this VMA. */ |
| unsigned long vm_flags; /* Flags, see mm.h. */ |
| |
| /* |
| * For areas with an address space and backing store, |
| * linkage into the address_space->i_mmap interval tree. |
| */ |
| struct { |
| struct rb_node rb; |
| unsigned long rb_subtree_last; |
| } shared; |
| |
| /* |
| * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma |
| * list, after a COW of one of the file pages. A MAP_SHARED vma |
| * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack |
| * or brk vma (with NULL file) can only be in an anon_vma list. |
| */ |
| struct list_head anon_vma_chain; /* Serialized by mmap_sem & |
| * page_table_lock */ |
| struct anon_vma *anon_vma; /* Serialized by page_table_lock */ |
| |
| /* Function pointers to deal with this struct. */ |
| const struct vm_operations_struct *vm_ops; |
| |
| /* Information about our backing store: */ |
| unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE |
| units */ |
| struct file * vm_file; /* File we map to (can be NULL). */ |
| void * vm_private_data; /* was vm_pte (shared mem) */ |
| |
| atomic_long_t swap_readahead_info; |
| #ifndef CONFIG_MMU |
| struct vm_region *vm_region; /* NOMMU mapping region */ |
| #endif |
| #ifdef CONFIG_NUMA |
| struct mempolicy *vm_policy; /* NUMA policy for the VMA */ |
| #endif |
| struct vm_userfaultfd_ctx vm_userfaultfd_ctx; |
| } __randomize_layout; |
| |
| struct core_thread { |
| struct task_struct *task; |
| struct core_thread *next; |
| }; |
| |
| struct core_state { |
| atomic_t nr_threads; |
| struct core_thread dumper; |
| struct completion startup; |
| }; |
| |
| struct kioctx_table; |
| struct mm_struct { |
| struct vm_area_struct *mmap; /* list of VMAs */ |
| struct rb_root mm_rb; |
| u32 vmacache_seqnum; /* per-thread vmacache */ |
| #ifdef CONFIG_MMU |
| unsigned long (*get_unmapped_area) (struct file *filp, |
| unsigned long addr, unsigned long len, |
| unsigned long pgoff, unsigned long flags); |
| #endif |
| unsigned long mmap_base; /* base of mmap area */ |
| unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ |
| #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES |
| /* Base adresses for compatible mmap() */ |
| unsigned long mmap_compat_base; |
| unsigned long mmap_compat_legacy_base; |
| #endif |
| unsigned long task_size; /* size of task vm space */ |
| unsigned long highest_vm_end; /* highest vma end address */ |
| pgd_t * pgd; |
| |
| /** |
| * @mm_users: The number of users including userspace. |
| * |
| * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops |
| * to 0 (i.e. when the task exits and there are no other temporary |
| * reference holders), we also release a reference on @mm_count |
| * (which may then free the &struct mm_struct if @mm_count also |
| * drops to 0). |
| */ |
| atomic_t mm_users; |
| |
| /** |
| * @mm_count: The number of references to &struct mm_struct |
| * (@mm_users count as 1). |
| * |
| * Use mmgrab()/mmdrop() to modify. When this drops to 0, the |
| * &struct mm_struct is freed. |
| */ |
| atomic_t mm_count; |
| |
| #ifdef CONFIG_MMU |
| atomic_long_t pgtables_bytes; /* PTE page table pages */ |
| #endif |
| int map_count; /* number of VMAs */ |
| |
| spinlock_t page_table_lock; /* Protects page tables and some counters */ |
| struct rw_semaphore mmap_sem; |
| |
| struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung |
| * together off init_mm.mmlist, and are protected |
| * by mmlist_lock |
| */ |
| |
| |
| unsigned long hiwater_rss; /* High-watermark of RSS usage */ |
| unsigned long hiwater_vm; /* High-water virtual memory usage */ |
| |
| unsigned long total_vm; /* Total pages mapped */ |
| unsigned long locked_vm; /* Pages that have PG_mlocked set */ |
| unsigned long pinned_vm; /* Refcount permanently increased */ |
| unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ |
| unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ |
| unsigned long stack_vm; /* VM_STACK */ |
| unsigned long def_flags; |
| unsigned long start_code, end_code, start_data, end_data; |
| unsigned long start_brk, brk, start_stack; |
| unsigned long arg_start, arg_end, env_start, env_end; |
| |
| unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ |
| |
| /* |
| * Special counters, in some configurations protected by the |
| * page_table_lock, in other configurations by being atomic. |
| */ |
| struct mm_rss_stat rss_stat; |
| |
| struct linux_binfmt *binfmt; |
| |
| cpumask_var_t cpu_vm_mask_var; |
| |
| /* Architecture-specific MM context */ |
| mm_context_t context; |
| |
| unsigned long flags; /* Must use atomic bitops to access the bits */ |
| |
| struct core_state *core_state; /* coredumping support */ |
| #ifdef CONFIG_MEMBARRIER |
| atomic_t membarrier_state; |
| #endif |
| #ifdef CONFIG_AIO |
| spinlock_t ioctx_lock; |
| struct kioctx_table __rcu *ioctx_table; |
| #endif |
| #ifdef CONFIG_MEMCG |
| /* |
| * "owner" points to a task that is regarded as the canonical |
| * user/owner of this mm. All of the following must be true in |
| * order for it to be changed: |
| * |
| * current == mm->owner |
| * current->mm != mm |
| * new_owner->mm == mm |
| * new_owner->alloc_lock is held |
| */ |
| struct task_struct __rcu *owner; |
| #endif |
| struct user_namespace *user_ns; |
| |
| /* store ref to file /proc/<pid>/exe symlink points to */ |
| struct file __rcu *exe_file; |
| #ifdef CONFIG_MMU_NOTIFIER |
| struct mmu_notifier_mm *mmu_notifier_mm; |
| #endif |
| #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS |
| pgtable_t pmd_huge_pte; /* protected by page_table_lock */ |
| #endif |
| #ifdef CONFIG_CPUMASK_OFFSTACK |
| struct cpumask cpumask_allocation; |
| #endif |
| #ifdef CONFIG_NUMA_BALANCING |
| /* |
| * numa_next_scan is the next time that the PTEs will be marked |
| * pte_numa. NUMA hinting faults will gather statistics and migrate |
| * pages to new nodes if necessary. |
| */ |
| unsigned long numa_next_scan; |
| |
| /* Restart point for scanning and setting pte_numa */ |
| unsigned long numa_scan_offset; |
| |
| /* numa_scan_seq prevents two threads setting pte_numa */ |
| int numa_scan_seq; |
| #endif |
| /* |
| * An operation with batched TLB flushing is going on. Anything that |
| * can move process memory needs to flush the TLB when moving a |
| * PROT_NONE or PROT_NUMA mapped page. |
| */ |
| atomic_t tlb_flush_pending; |
| #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
| /* See flush_tlb_batched_pending() */ |
| bool tlb_flush_batched; |
| #endif |
| struct uprobes_state uprobes_state; |
| #ifdef CONFIG_HUGETLB_PAGE |
| atomic_long_t hugetlb_usage; |
| #endif |
| struct work_struct async_put_work; |
| |
| #if IS_ENABLED(CONFIG_HMM) |
| /* HMM needs to track a few things per mm */ |
| struct hmm *hmm; |
| #endif |
| } __randomize_layout; |
| |
| extern struct mm_struct init_mm; |
| |
| static inline void mm_init_cpumask(struct mm_struct *mm) |
| { |
| #ifdef CONFIG_CPUMASK_OFFSTACK |
| mm->cpu_vm_mask_var = &mm->cpumask_allocation; |
| #endif |
| cpumask_clear(mm->cpu_vm_mask_var); |
| } |
| |
| /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ |
| static inline cpumask_t *mm_cpumask(struct mm_struct *mm) |
| { |
| return mm->cpu_vm_mask_var; |
| } |
| |
| struct mmu_gather; |
| extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, |
| unsigned long start, unsigned long end); |
| extern void tlb_finish_mmu(struct mmu_gather *tlb, |
| unsigned long start, unsigned long end); |
| |
| static inline void init_tlb_flush_pending(struct mm_struct *mm) |
| { |
| atomic_set(&mm->tlb_flush_pending, 0); |
| } |
| |
| static inline void inc_tlb_flush_pending(struct mm_struct *mm) |
| { |
| atomic_inc(&mm->tlb_flush_pending); |
| /* |
| * The only time this value is relevant is when there are indeed pages |
| * to flush. And we'll only flush pages after changing them, which |
| * requires the PTL. |
| * |
| * So the ordering here is: |
| * |
| * atomic_inc(&mm->tlb_flush_pending); |
| * spin_lock(&ptl); |
| * ... |
| * set_pte_at(); |
| * spin_unlock(&ptl); |
| * |
| * spin_lock(&ptl) |
| * mm_tlb_flush_pending(); |
| * .... |
| * spin_unlock(&ptl); |
| * |
| * flush_tlb_range(); |
| * atomic_dec(&mm->tlb_flush_pending); |
| * |
| * Where the increment if constrained by the PTL unlock, it thus |
| * ensures that the increment is visible if the PTE modification is |
| * visible. After all, if there is no PTE modification, nobody cares |
| * about TLB flushes either. |
| * |
| * This very much relies on users (mm_tlb_flush_pending() and |
| * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and |
| * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc |
| * locks (PPC) the unlock of one doesn't order against the lock of |
| * another PTL. |
| * |
| * The decrement is ordered by the flush_tlb_range(), such that |
| * mm_tlb_flush_pending() will not return false unless all flushes have |
| * completed. |
| */ |
| } |
| |
| static inline void dec_tlb_flush_pending(struct mm_struct *mm) |
| { |
| /* |
| * See inc_tlb_flush_pending(). |
| * |
| * This cannot be smp_mb__before_atomic() because smp_mb() simply does |
| * not order against TLB invalidate completion, which is what we need. |
| * |
| * Therefore we must rely on tlb_flush_*() to guarantee order. |
| */ |
| atomic_dec(&mm->tlb_flush_pending); |
| } |
| |
| static inline bool mm_tlb_flush_pending(struct mm_struct *mm) |
| { |
| /* |
| * Must be called after having acquired the PTL; orders against that |
| * PTLs release and therefore ensures that if we observe the modified |
| * PTE we must also observe the increment from inc_tlb_flush_pending(). |
| * |
| * That is, it only guarantees to return true if there is a flush |
| * pending for _this_ PTL. |
| */ |
| return atomic_read(&mm->tlb_flush_pending); |
| } |
| |
| static inline bool mm_tlb_flush_nested(struct mm_struct *mm) |
| { |
| /* |
| * Similar to mm_tlb_flush_pending(), we must have acquired the PTL |
| * for which there is a TLB flush pending in order to guarantee |
| * we've seen both that PTE modification and the increment. |
| * |
| * (no requirement on actually still holding the PTL, that is irrelevant) |
| */ |
| return atomic_read(&mm->tlb_flush_pending) > 1; |
| } |
| |
| struct vm_fault; |
| |
| struct vm_special_mapping { |
| const char *name; /* The name, e.g. "[vdso]". */ |
| |
| /* |
| * If .fault is not provided, this points to a |
| * NULL-terminated array of pages that back the special mapping. |
| * |
| * This must not be NULL unless .fault is provided. |
| */ |
| struct page **pages; |
| |
| /* |
| * If non-NULL, then this is called to resolve page faults |
| * on the special mapping. If used, .pages is not checked. |
| */ |
| int (*fault)(const struct vm_special_mapping *sm, |
| struct vm_area_struct *vma, |
| struct vm_fault *vmf); |
| |
| int (*mremap)(const struct vm_special_mapping *sm, |
| struct vm_area_struct *new_vma); |
| }; |
| |
| enum tlb_flush_reason { |
| TLB_FLUSH_ON_TASK_SWITCH, |
| TLB_REMOTE_SHOOTDOWN, |
| TLB_LOCAL_SHOOTDOWN, |
| TLB_LOCAL_MM_SHOOTDOWN, |
| TLB_REMOTE_SEND_IPI, |
| NR_TLB_FLUSH_REASONS, |
| }; |
| |
| /* |
| * A swap entry has to fit into a "unsigned long", as the entry is hidden |
| * in the "index" field of the swapper address space. |
| */ |
| typedef struct { |
| unsigned long val; |
| } swp_entry_t; |
| |
| #endif /* _LINUX_MM_TYPES_H */ |