KVM: arm64: Handle reclaiming lent memory regions as well as shared.
Our implementation of FFA_MEM_RECLAIM initially only worked for memory
regions which were previously shared with the secure world; this will
make it work for regions which were lent instead.
Signed-off-by: Andrew Walbran <qwandor@google.com>
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index c79bf4a..94ddad9 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -96,8 +96,10 @@ int __pkvm_host_share_secure_world(hpa_t host_addr, size_t size,
enum kvm_pgtable_prot mode);
int __pkvm_host_check_donate_secure_world(hpa_t host_addr, size_t size);
int __pkvm_host_donate_secure_world(hpa_t host_addr, size_t size);
-int __pkvm_host_check_reclaim_secure_world(hpa_t host_addr, size_t size);
-int __pkvm_host_reclaim_secure_world(hpa_t host_addr, size_t size);
+int __pkvm_host_check_reclaim_shared_secure_world(hpa_t host_addr, size_t size);
+int __pkvm_host_reclaim_shared_secure_world(hpa_t host_addr, size_t size);
+int __pkvm_host_check_reclaim_lent_secure_world(hpa_t host_addr, size_t size);
+int __pkvm_host_reclaim_lent_secure_world(hpa_t host_addr, size_t size);
bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
diff --git a/arch/arm64/kvm/hyp/nvhe/ffa_memory.c b/arch/arm64/kvm/hyp/nvhe/ffa_memory.c
index 15ce0be..afeaebb 100644
--- a/arch/arm64/kvm/hyp/nvhe/ffa_memory.c
+++ b/arch/arm64/kvm/hyp/nvhe/ffa_memory.c
@@ -989,6 +989,7 @@ ffa_tee_reclaim_check_update(ffa_memory_handle_t handle,
struct arm_smccc_1_2_regs ret;
ffa_memory_region_flags_t tee_flags;
uint32_t i;
+ bool lent;
/*
* Make sure constituents are properly aligned to a 64-bit boundary. If
@@ -1005,14 +1006,33 @@ ffa_tee_reclaim_check_update(ffa_memory_handle_t handle,
}
/*
+ * If the state transition for reclaiming shared memory fails for the first
+ * constituent, then maybe it was lent rather than shared so check that
+ * instead.
+ */
+ lent = __pkvm_host_check_reclaim_shared_secure_world(
+ constituents[0].address,
+ constituents[0].pg_cnt * FFA_PAGE_SIZE) != 0;
+
+ /*
* Check that the state transition is allowed for all constituents of
* the memory region being reclaimed, according to the host page table.
*/
for (i = 0; i < constituent_count; ++i) {
hpa_t begin = constituents[i].address;
size_t size = constituents[i].pg_cnt * FFA_PAGE_SIZE;
+ int check_ret;
- if (__pkvm_host_check_reclaim_secure_world(begin, size) != 0) {
+ if (lent) {
+ check_ret = __pkvm_host_check_reclaim_lent_secure_world(
+ begin, size);
+ } else {
+ check_ret =
+ __pkvm_host_check_reclaim_shared_secure_world(
+ begin, size);
+ }
+
+ if (check_ret != 0) {
pr_warn("Host tried to reclaim memory in invalid state.");
return ffa_error(FFA_RET_DENIED);
}
@@ -1041,8 +1061,17 @@ ffa_tee_reclaim_check_update(ffa_memory_handle_t handle,
for (i = 0; i < constituent_count; ++i) {
hpa_t begin = constituents[i].address;
size_t size = constituents[i].pg_cnt * FFA_PAGE_SIZE;
+ int reclaim_ret;
- if (__pkvm_host_reclaim_secure_world(begin, size) != 0) {
+ if (lent) {
+ reclaim_ret = __pkvm_host_reclaim_lent_secure_world(
+ begin, size);
+ } else {
+ reclaim_ret = __pkvm_host_reclaim_shared_secure_world(
+ begin, size);
+ }
+
+ if (reclaim_ret != 0) {
pr_warn("Failed to update host page table for reclaiming memory.");
ret = ffa_error(FFA_RET_NO_MEMORY);
// TODO: Roll back partial update.
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 5e2ee50..84e10ce 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -810,6 +810,14 @@ static int host_request_unshare(u64 *completer_addr,
return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
}
+static int secure_world_request_donation(u64 *completer_addr,
+ const struct pkvm_mem_transition *tx)
+{
+ *completer_addr = tx->initiator.hyp.completer_addr;
+ /* Nothing to check, always succeeds. */
+ return 0;
+}
+
static int host_initiate_share(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
{
@@ -1412,6 +1420,9 @@ static int check_donation(struct pkvm_mem_donation *donation)
case PKVM_ID_HYP:
ret = hyp_request_donation(&completer_addr, tx);
break;
+ case PKVM_ID_SECURE_WORLD:
+ ret = secure_world_request_donation(&completer_addr, tx);
+ break;
default:
ret = -EINVAL;
}
@@ -1453,6 +1464,9 @@ static int __do_donate(struct pkvm_mem_donation *donation)
case PKVM_ID_HYP:
ret = hyp_initiate_donation(&completer_addr, tx);
break;
+ case PKVM_ID_SECURE_WORLD:
+ ret = secure_world_request_donation(&completer_addr, tx);
+ break;
default:
ret = -EINVAL;
}
@@ -1731,19 +1745,18 @@ int __pkvm_guest_unshare_host(struct kvm_vcpu *vcpu, u64 ipa)
}
/**
- * __pkvm_host_check_reclaim_secure_world() - Checks whether the host can
- * reclaim the given memory range
- * from the secure world.
+ * __pkvm_host_check_reclaim_shared_secure_world() - Checks whether the host can
+ * reclaim the given memory
+ * range which was shared with
+ * the secure world.
* @host_addr:
* The IPA of the start of the memory range in the host stage-2 page table.
* @size: The length of the memory range in bytes.
*
* Return: 0 if the transition is valid, or a negative error value if not.
*/
-int __pkvm_host_check_reclaim_secure_world(hpa_t host_addr, size_t size)
+int __pkvm_host_check_reclaim_shared_secure_world(hpa_t host_addr, size_t size)
{
- // TODO: This handles reclaiming memory that has been shared, but what
- // about memory that was lent?
struct pkvm_mem_share share = {
.tx = {
.nr_pages = size / PAGE_SIZE,
@@ -1765,15 +1778,16 @@ int __pkvm_host_check_reclaim_secure_world(hpa_t host_addr, size_t size)
}
/**
- * __pkvm_host_reclaim_secure_world() - Reclaims memory which the host
- * previously shared with the secure world.
+ * __pkvm_host_reclaim_shared_secure_world() - Reclaims memory which the host
+ * previously shared with the secure
+ * world.
* @host_addr:
* The IPA of the start of the memory range in the host stage-2 page table.
* @size: The length of the memory range in bytes.
*
* Return: 0 on success, or a negative error value on failure.
*/
-int __pkvm_host_reclaim_secure_world(hpa_t host_addr, size_t size)
+int __pkvm_host_reclaim_shared_secure_world(hpa_t host_addr, size_t size)
{
struct pkvm_mem_share share = {
.tx = {
@@ -1870,6 +1884,70 @@ int __pkvm_host_donate_secure_world(hpa_t host_addr, size_t size)
return do_donate(&donation);
}
+/**
+ * __pkvm_host_check_reclaim_lent_secure_world() - Checks whether the host can
+ * reclaim the given memory
+ * range which was lent to the
+ * secure world.
+ * @host_addr:
+ * The IPA of the start of the memory range in the host stage-2 page table.
+ * @size: The length of the memory range in bytes.
+ *
+ * Return: 0 if the reclaim transition is valid, or a negative error value if
+ * not.
+ */
+int __pkvm_host_check_reclaim_lent_secure_world(hpa_t host_addr, size_t size)
+{
+ struct pkvm_mem_donation donation = {
+ .tx = {
+ .nr_pages = size / PAGE_SIZE,
+ .initiator = {
+ .id = PKVM_ID_SECURE_WORLD,
+ .addr = 0,
+ .host = {
+ .completer_addr = host_addr,
+ },
+ },
+ .completer = {
+ .id = PKVM_ID_HOST,
+ },
+ },
+ };
+
+ return check_donation(&donation);
+}
+
+/**
+ * __pkvm_host_reclaim_lent_secure_world() - Reclaims the given memory range
+ * which the host previously lent to
+ * the secure world.
+ * @host_addr:
+ * The IPA of the start of the memory range in the host stage-2 page table.
+ * @size: The length of the memory range in bytes.
+ *
+ * Return: 0 on success, or a negative error value on failure.
+ */
+int __pkvm_host_reclaim_lent_secure_world(hpa_t host_addr, size_t size)
+{
+ struct pkvm_mem_donation donation = {
+ .tx = {
+ .nr_pages = size / PAGE_SIZE,
+ .initiator = {
+ .id = PKVM_ID_SECURE_WORLD,
+ .addr = 0,
+ .host = {
+ .completer_addr = host_addr,
+ },
+ },
+ .completer = {
+ .id = PKVM_ID_HOST,
+ },
+ },
+ };
+
+ return do_donate(&donation);
+}
+
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
{
int ret;