/*
* Copyright 2022 The Hafnium Authors.
*
* Use of this source code is governed by a BSD-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/BSD-3-Clause.
*/
#include "hf/api.h"
#include "hf/arch/cpu.h"
#include "hf/arch/ffa.h"
#include "hf/arch/mm.h"
#include "hf/arch/other_world.h"
#include "hf/arch/plat/ffa.h"
#include "hf/arch/timer.h"
#include "hf/arch/vm.h"
#include "hf/check.h"
#include "hf/dlog.h"
#include "hf/ffa_internal.h"
#include "hf/ffa_memory.h"
#include "hf/ffa_v1_0.h"
#include "hf/mm.h"
#include "hf/plat/console.h"
#include "hf/plat/interrupts.h"
#include "hf/spinlock.h"
#include "hf/static_assert.h"
#include "hf/std.h"
#include "hf/vm.h"
#include "vmapi/hf/call.h"
#include "vmapi/hf/ffa.h"
#include "vmapi/hf/ffa_v1_0.h"
static_assert(sizeof(struct ffa_partition_info_v1_0) == 8,
"Partition information descriptor size doesn't match the one in "
"the FF-A 1.0 EAC specification, Table 82.");
static_assert(sizeof(struct ffa_partition_info) == 24,
"Partition information descriptor size doesn't match the one in "
"the FF-A 1.1 BETA0 EAC specification, Table 13.34.");
static_assert((sizeof(struct ffa_partition_info) & 7) == 0,
"Partition information descriptor must be a multiple of 8 bytes"
" for ffa_partition_info_get_regs to work correctly. Information"
" from this structure are returned in 8 byte registers and the"
" count of 8 byte registers is returned by the ABI.");
/*
* To eliminate the risk of deadlocks, we define a partial order for the
* acquisition of locks held concurrently by the same physical CPU. Our current
* ordering requirements are as follows:
*
* vm::lock -> vcpu::lock -> mm_stage1_lock -> dlog sl
*
* Locks of the same kind require the lock of lowest address to be locked first,
* see `sl_lock_both()`.
*/
static_assert(HF_MAILBOX_SIZE == PAGE_SIZE,
"Currently, a page is mapped for the send and receive buffers so "
"the maximum request is the size of a page.");
static_assert(MM_PPOOL_ENTRY_SIZE >= HF_MAILBOX_SIZE,
"The page pool entry size must be at least as big as the mailbox "
"size, so that memory region descriptors can be copied from the "
"mailbox for memory sharing.");
/*
* Maximum ffa_partition_info entries that can be returned by an invocation
* of FFA_PARTITION_INFO_GET_REGS_64 is size in bytes, of available
* registers/args in struct ffa_value divided by size of struct
* ffa_partition_info. For this ABI, arg3-arg17 in ffa_value can be used, i.e.
* 15 uint64_t fields. For FF-A v1.1, this value should be 5.
*/
#define MAX_INFO_REGS_ENTRIES_PER_CALL \
((15 * sizeof(uint64_t)) / sizeof(struct ffa_partition_info))
static_assert(MAX_INFO_REGS_ENTRIES_PER_CALL == 5,
"FF-A v1.1 supports no more than 5 entries"
" per FFA_PARTITION_INFO_GET_REGS64 calls");
static struct mpool api_page_pool;
/**
* Initialises the API page pool by taking ownership of the contents of the
* given page pool.
*/
void api_init(struct mpool *ppool)
{
mpool_init_from(&api_page_pool, ppool);
}
/**
* Get target VM vCPU:
* If VM is UP then return first vCPU.
* If VM is MP then return vCPU whose index matches current CPU index.
*/
struct vcpu *api_ffa_get_vm_vcpu(struct vm *vm, struct vcpu *current)
{
ffa_vcpu_index_t current_cpu_index = cpu_index(current->cpu);
struct vcpu *vcpu = NULL;
CHECK((vm != NULL) && (current != NULL));
if (vm->vcpu_count == 1) {
vcpu = vm_get_vcpu(vm, 0);
} else if (current_cpu_index < vm->vcpu_count) {
vcpu = vm_get_vcpu(vm, current_cpu_index);
}
return vcpu;
}
/**
* Switches the physical CPU back to the corresponding vCPU of the VM whose ID
* is given as argument of the function.
*
* Called to change the context between SPs for direct messaging (when Hafnium
* is SPMC), and on the context of the remaining 'api_switch_to_*' functions.
*
* This function works for partitions that are:
* - UP migratable.
* - MP with pinned Execution Contexts.
*/
static struct vcpu *api_switch_to_vm(struct vcpu *current,
struct ffa_value to_ret,
enum vcpu_state vcpu_state,
ffa_vm_id_t to_id)
{
struct vm *to_vm = vm_find(to_id);
struct vcpu *next = api_ffa_get_vm_vcpu(to_vm, current);
CHECK(next != NULL);
/* Set the return value for the target VM. */
arch_regs_set_retval(&next->regs, to_ret);
/* Set the current vCPU state. */
sl_lock(¤t->lock);
current->state = vcpu_state;
sl_unlock(¤t->lock);
return next;
}
/**
* Switches the physical CPU back to the corresponding vCPU of the primary VM.
*
* This triggers the scheduling logic to run. Run in the context of secondary VM
* to cause FFA_RUN to return and the primary VM to regain control of the CPU.
*/
struct vcpu *api_switch_to_primary(struct vcpu *current,
struct ffa_value primary_ret,
enum vcpu_state secondary_state)
{
/*
* If the secondary is blocked but has a timer running, sleep until the
* timer fires rather than indefinitely.
*/
switch (primary_ret.func) {
case HF_FFA_RUN_WAIT_FOR_INTERRUPT:
case FFA_MSG_WAIT_32: {
if (arch_timer_enabled_current()) {
uint64_t remaining_ns =
arch_timer_remaining_ns_current();
if (remaining_ns == 0) {
/*
* Timer is pending, so the current vCPU should
* be run again right away.
*/
primary_ret = (struct ffa_value){
.func = FFA_INTERRUPT_32};
} else {
primary_ret.arg2 = remaining_ns;
}
} else {
primary_ret.arg2 = FFA_SLEEP_INDEFINITE;
}
break;
}
default:
/* Do nothing. */
break;
}
return api_switch_to_vm(current, primary_ret, secondary_state,
HF_PRIMARY_VM_ID);
}
/**
* Choose next vCPU to run to be the counterpart vCPU in the other
* world (run the normal world if currently running in the secure
* world). Set current vCPU state to the given vcpu_state parameter.
* Set FF-A return values to the target vCPU in the other world.
*
* Called in context of a direct message response from a secure
* partition to a VM.
*/
struct vcpu *api_switch_to_other_world(struct vcpu *current,
struct ffa_value other_world_ret,
enum vcpu_state vcpu_state)
{
return api_switch_to_vm(current, other_world_ret, vcpu_state,
HF_OTHER_WORLD_ID);
}
/**
* Returns true if the given vCPU is executing in context of an
* FFA_MSG_SEND_DIRECT_REQ invocation.
*/
bool is_ffa_direct_msg_request_ongoing(struct vcpu_locked locked)
{
return locked.vcpu->direct_request_origin_vm_id != HF_INVALID_VM_ID;
}
/**
* Returns true if the VM owning the given vCPU is supporting managed exit and
* the vCPU is currently processing a managed exit.
*/
static bool api_ffa_is_managed_exit_ongoing(struct vcpu_locked vcpu_locked)
{
return (plat_ffa_vm_managed_exit_supported(vcpu_locked.vcpu->vm) &&
vcpu_locked.vcpu->processing_managed_exit);
}
/**
* Returns to the primary VM and signals that the vCPU still has work to do so.
*/
struct vcpu *api_preempt(struct vcpu *current)
{
struct ffa_value ret = {
.func = FFA_INTERRUPT_32,
.arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
};
return api_switch_to_primary(current, ret, VCPU_STATE_PREEMPTED);
}
/**
* Puts the current vCPU in wait for interrupt mode, and returns to the primary
* VM.
*/
struct vcpu *api_wait_for_interrupt(struct vcpu *current)
{
struct ffa_value ret = {
.func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
.arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
};
return api_switch_to_primary(current, ret,
VCPU_STATE_BLOCKED_INTERRUPT);
}
/**
* Puts the current vCPU in off mode, and returns to the primary VM.
*/
struct vcpu *api_vcpu_off(struct vcpu *current)
{
struct ffa_value ret = {
.func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
.arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
};
/*
* Disable the timer, so the scheduler doesn't get told to call back
* based on it.
*/
arch_timer_disable_current();