Add kvm_el2_gb build target to produce goto-program for CBMC.

This CL implements the following changes:
 * add kvm_el2 make target to build the hypervisor binary only (i.e.
   without other kernel components)
 * add kvm_el_gb make to produce a goto program to be used in CBMC
   formal verification flow
 * workarounds for build-related issues specific to CBMC make target

To build kvm_el2 binary:
ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make defconfig kvm_el2

To build a goto program for CBMC:
ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make defconfig kvm_el2_gb

Bug: 216547537
Change-Id: Ic7b77c67e21dfd64ad14801f52b24c414b12d5af
diff --git a/.gitignore b/.gitignore
index 372e57a..ae9e0b1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,6 +21,7 @@
 *.dtb.S
 *.dwo
 *.elf
+*.gb
 *.gcno
 *.gz
 *.i
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 07c5e0f..fe3fd45 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -183,6 +183,15 @@
 	$(Q)$(MAKE) $(clean)=$(boot)
 
 ifeq ($(KBUILD_EXTMOD),)
+
+PHONY += kvm_el2
+kvm_el2: prepare
+	$(Q)$(MAKE) $(build)=arch/arm64/kvm/hyp/nvhe arch/arm64/kvm/hyp/nvhe/kvm_nvhe.o
+
+PHONY += kvm_el2_gb
+kvm_el2_gb: prepare
+	$(Q)$(MAKE) $(build)=arch/arm64/kvm/hyp/nvhe arch/arm64/kvm/hyp/nvhe/kvm_nvhe.gb
+
 # We need to generate vdso-offsets.h before compiling certain files in kernel/.
 # In order to do that, we should use the archprepare target, but we can't since
 # asm-offsets.h is included in some files used to generate vdso-offsets.h, and
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index b54bbd2..dcb22b6 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -540,6 +540,7 @@ cpuid_feature_extract_unsigned_field(u64 features, int field)
 	return cpuid_feature_extract_unsigned_field_width(features, field, 4);
 }
 
+#ifndef CBMC_PROVER
 /*
  * Fields that identify the version of the Performance Monitors Extension do
  * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825,
@@ -567,6 +568,7 @@ static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
 {
 	return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
 }
+#endif
 
 static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
 {
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index 592968f..6f4e0ac 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -93,10 +93,12 @@ extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
 				    unsigned long enabled);
 extern int ptrauth_get_enabled_keys(struct task_struct *tsk);
 
+#ifndef CBMC_PROVER
 static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
 {
 	return ptrauth_clear_pac(ptr);
 }
+#endif
 
 static __always_inline void ptrauth_enable(void)
 {
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 0118a3b..a8b37f6 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -55,10 +55,17 @@ struct host_kvm {
 extern struct host_kvm host_kvm;
 
 typedef u32 pkvm_id;
+#ifndef CBMC_PROVER
 static const pkvm_id pkvm_host_id	= 0;
 static const pkvm_id pkvm_hyp_id	= (1 << 16);
 static const pkvm_id pkvm_ffa_id	= pkvm_hyp_id + 1; /* Secure world */
 static const pkvm_id pkvm_host_poison	= pkvm_ffa_id + 1;
+#else
+#define pkvm_host_id		((pkvm_id)0)
+#define pkvm_hyp_id		((pkvm_id)(1 << 16))
+#define pkvm_ffa_id		((pkvm_id)pkvm_hyp_id + 1)
+#define pkvm_host_poison	((pkvm_id)pkvm_ffa_id + 1)
+#endif
 
 extern unsigned long hyp_nr_cpus;
 
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 56aad42..4280042 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -6,6 +6,14 @@
 asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
 ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS -D__DISABLE_TRACE_MMIO__
 
+incdir := $(srctree)/$(src)/../include
+asflags-y += 	-I$(incdir)
+ccflags-y += 	-I$(incdir)				\
+		-fno-stack-protector			\
+		-DDISABLE_BRANCH_PROFILING		\
+		$(DISABLE_STACKLEAK_PLUGIN)
+
+
 hostprogs := gen-hyprel
 HOST_EXTRACFLAGS += -I$(objtree)/include
 
@@ -72,6 +80,15 @@
 $(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.rel.o FORCE
 	$(call if_changed,hypcopy)
 
+# A rule to build goto-program for CBMC formal verification.
+# For formal verification with CBMC we don't need relocations, thus,
+# compiling and linking the object files is sufficient.
+$(obj)/kvm_nvhe.gb: CC := goto-cc --native-compiler $(CC) --native-linker $(LD)
+$(obj)/kvm_nvhe.gb: LD := $(CC)
+$(obj)/kvm_nvhe.gb: ccflags-y := $(ccflags-y) -DCBMC_PROVER
+$(obj)/kvm_nvhe.gb: $(obj)/kvm_nvhe.tmp.o
+	cp $< $@
+
 # The HYPREL command calls `gen-hyprel` to generate an assembly file with
 # a list of relocations targeting hyp code/data.
 quiet_cmd_hyprel = HYPREL  $@
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index 9b32772..9931e61 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -80,6 +80,7 @@ struct sys_reg_desc {
 #define REG_HIDDEN		(1 << 0) /* hidden from userspace and guest */
 #define REG_RAZ			(1 << 1) /* RAZ from userspace and guest */
 
+#ifndef CBMC_PROVER
 static __printf(2, 3)
 inline void print_sys_reg_msg(const struct sys_reg_params *p,
 				       char *fmt, ...)
@@ -99,6 +100,7 @@ static inline void print_sys_reg_instr(const struct sys_reg_params *p)
 	/* GCC warns on an empty format string */
 	print_sys_reg_msg(p, "%s", "");
 }
+#endif // CBMC_PROVER
 
 static inline bool ignore_write(struct kvm_vcpu *vcpu,
 				const struct sys_reg_params *p)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 1bd3a03..3c18e10 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -811,6 +811,7 @@ static inline void __ftrace_enabled_restore(int enabled)
 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
 
+#ifndef CBMC_PROVER
 static inline unsigned long get_lock_parent_ip(void)
 {
 	unsigned long addr = CALLER_ADDR0;
@@ -822,6 +823,7 @@ static inline unsigned long get_lock_parent_ip(void)
 		return addr;
 	return CALLER_ADDR2;
 }
+#endif
 
 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
   extern void trace_preempt_on(unsigned long a0, unsigned long a1);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 67a50c7..21b61af 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1148,10 +1148,12 @@ static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned lo
  * NOTE: assumes @regs is otherwise already 0 filled; this is important for
  * things like PERF_SAMPLE_REGS_INTR.
  */
+#ifndef CBMC_PROVER
 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
 {
 	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
 }
+#endif
 
 static __always_inline void
 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index abfb53a..2533e93 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -170,12 +170,14 @@ static inline void __raw_read_lock_irq(rwlock_t *lock)
 	LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
 }
 
+#ifndef CBMC_PROVER
 static inline void __raw_read_lock_bh(rwlock_t *lock)
 {
 	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
 	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
 	LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
 }
+#endif
 
 static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
 {
@@ -197,12 +199,14 @@ static inline void __raw_write_lock_irq(rwlock_t *lock)
 	LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
 }
 
+#ifndef CBMC_PROVER
 static inline void __raw_write_lock_bh(rwlock_t *lock)
 {
 	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
 	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 	LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
 }
+#endif
 
 static inline void __raw_write_lock(rwlock_t *lock)
 {
@@ -244,12 +248,14 @@ static inline void __raw_read_unlock_irq(rwlock_t *lock)
 	preempt_enable();
 }
 
+#ifndef CBMC_PROVER
 static inline void __raw_read_unlock_bh(rwlock_t *lock)
 {
 	rwlock_release(&lock->dep_map, _RET_IP_);
 	do_raw_read_unlock(lock);
 	__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
 }
+#endif
 
 static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
 					     unsigned long flags)
@@ -268,11 +274,13 @@ static inline void __raw_write_unlock_irq(rwlock_t *lock)
 	preempt_enable();
 }
 
+#ifndef CBMC_PROVER
 static inline void __raw_write_unlock_bh(rwlock_t *lock)
 {
 	rwlock_release(&lock->dep_map, _RET_IP_);
 	do_raw_write_unlock(lock);
 	__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
 }
+#endif
 
 #endif /* __LINUX_RWLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 19a9be9..bc62ac3a 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -129,12 +129,14 @@ static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
 	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 }
 
+#ifndef CBMC_PROVER
 static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
 {
 	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
 	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 }
+#endif
 
 static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
@@ -169,6 +171,7 @@ static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
 	preempt_enable();
 }
 
+#ifndef CBMC_PROVER
 static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
 {
 	spin_release(&lock->dep_map, _RET_IP_);
@@ -186,6 +189,7 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
 	__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
 	return 0;
 }
+#endif
 
 #include <linux/rwlock_api_smp.h>