powerpc: Trivially merge several headers from asm-ppc64 to asm-powerpc

For these, I have just done the lame-o merge where the file ends up
looking like:

	#ifndef CONFIG_PPC64
	#include <asm-ppc/foo.h>
	#else
	... contents from asm-ppc64/foo.h
	#endif

so nothing has changed, really, except that we reduce include/asm-ppc64
a bit more.

Signed-off-by: Paul Mackerras <paulus@samba.org>
diff --git a/include/asm-powerpc/mmu_context.h b/include/asm-powerpc/mmu_context.h
new file mode 100644
index 0000000..ea6798c
--- /dev/null
+++ b/include/asm-powerpc/mmu_context.h
@@ -0,0 +1,89 @@
+#ifndef __ASM_POWERPC_MMU_CONTEXT_H
+#define __ASM_POWERPC_MMU_CONTEXT_H
+
+#ifndef CONFIG_PPC64
+#include <asm-ppc/mmu_context.h>
+#else
+
+#include <linux/kernel.h>	
+#include <linux/mm.h>	
+#include <asm/mmu.h>	
+#include <asm/cputable.h>
+
+/*
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * Getting into a kernel thread, there is no valid user segment, mark
+ * paca->pgdir NULL so that SLB miss on user addresses will fault
+ */
+static inline void enter_lazy_tlb(struct mm_struct *mm,
+				  struct task_struct *tsk)
+{
+#ifdef CONFIG_PPC_64K_PAGES
+	get_paca()->pgdir = NULL;
+#endif /* CONFIG_PPC_64K_PAGES */
+}
+
+#define NO_CONTEXT	0
+#define MAX_CONTEXT	(0x100000-1)
+
+extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+extern void destroy_context(struct mm_struct *mm);
+
+extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
+extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
+
+/*
+ * switch_mm is the entry point called from the architecture independent
+ * code in kernel/sched.c
+ */
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+			     struct task_struct *tsk)
+{
+	if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask))
+		cpu_set(smp_processor_id(), next->cpu_vm_mask);
+
+	/* No need to flush userspace segments if the mm doesnt change */
+#ifdef CONFIG_PPC_64K_PAGES
+	if (prev == next && get_paca()->pgdir == next->pgd)
+		return;
+#else
+	if (prev == next)
+		return;
+#endif /* CONFIG_PPC_64K_PAGES */
+
+#ifdef CONFIG_ALTIVEC
+	if (cpu_has_feature(CPU_FTR_ALTIVEC))
+		asm volatile ("dssall");
+#endif /* CONFIG_ALTIVEC */
+
+	if (cpu_has_feature(CPU_FTR_SLB))
+		switch_slb(tsk, next);
+	else
+		switch_stab(tsk, next);
+}
+
+#define deactivate_mm(tsk,mm)	do { } while (0)
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	switch_mm(prev, next, current);
+	local_irq_restore(flags);
+}
+
+#endif /* CONFIG_PPC64 */
+#endif /* __ASM_POWERPC_MMU_CONTEXT_H */