diff --git a/scripts/config_defs_gen.c b/scripts/config_defs_gen.c index c70dabc5..0cc2ebd5 100644 --- a/scripts/config_defs_gen.c +++ b/scripts/config_defs_gen.c @@ -6,6 +6,21 @@ #include #include +static size_t remio_dev_num(void) +{ + size_t dev_num = 0; + for (size_t vm_id = 0; vm_id < config.vmlist_size; vm_id++) { + struct vm_config* vm_config = &config.vmlist[vm_id]; + for (size_t i = 0; i < vm_config->platform.remio_dev_num; i++) { + struct remio_dev* dev = &vm_config->platform.remio_devs[i]; + if (dev->type == REMIO_DEV_BACKEND) { + dev_num++; + } + } + } + return dev_num; +} + int main() { size_t vcpu_num = 0; for (size_t i = 0; i < config.vmlist_size; i++) { @@ -21,5 +36,7 @@ int main() { printf("#define CONFIG_HYP_BASE_ADDR PLAT_BASE_ADDR\n"); } + printf("#define CONFIG_REMIO_DEV_NUM %ld\n", remio_dev_num()); + return 0; } diff --git a/src/arch/armv8/aborts.c b/src/arch/armv8/aborts.c index 2bf440c2..c488212b 100644 --- a/src/arch/armv8/aborts.c +++ b/src/arch/armv8/aborts.c @@ -195,6 +195,9 @@ void aborts_sync_handler(void) abort_handler_t handler = abort_handlers[ec]; if (handler) { handler(iss, ipa_fault_addr, il, ec); + if (vcpu_arch_is_on(cpu()->vcpu) && !cpu()->vcpu->active) { + cpu_standby(); + } } else { ERROR("no handler for abort ec = 0x%x", ec); // unknown guest exception } diff --git a/src/arch/armv8/inc/arch/hypercall.h b/src/arch/armv8/inc/arch/hypercall.h index e39857a7..a8cdd1aa 100644 --- a/src/arch/armv8/inc/arch/hypercall.h +++ b/src/arch/armv8/inc/arch/hypercall.h @@ -6,6 +6,7 @@ #ifndef ARCH_HYPERCALL_H #define ARCH_HYPERCALL_H -#define HYPCALL_ARG_REG(ARG) ((ARG) + 1) +#define HYPCALL_IN_ARG_REG(ARG) ((ARG) + 1) +#define HYPCALL_OUT_ARG_REG(ARG) (HYPCALL_IN_ARG_REG(ARG)) #endif /* ARCH_HYPERCALL_H */ diff --git a/src/arch/riscv/inc/arch/hypercall.h b/src/arch/riscv/inc/arch/hypercall.h index 314de976..2b5d3445 100644 --- a/src/arch/riscv/inc/arch/hypercall.h +++ b/src/arch/riscv/inc/arch/hypercall.h @@ -6,6 +6,7 @@ #ifndef ARCH_HYPERCALL_H #define ARCH_HYPERCALL_H -#define HYPCALL_ARG_REG(ARG) ((ARG) + REG_A0) +#define HYPCALL_IN_ARG_REG(ARG) ((ARG) + REG_A0) +#define HYPCALL_OUT_ARG_REG(ARG) (HYPCALL_IN_ARG_REG((ARG) + 2)) #endif /* ARCH_HYPERCALL_H */ diff --git a/src/arch/riscv/sync_exceptions.c b/src/arch/riscv/sync_exceptions.c index e44c5414..ee61dfaa 100644 --- a/src/arch/riscv/sync_exceptions.c +++ b/src/arch/riscv/sync_exceptions.c @@ -152,5 +152,8 @@ void sync_exception_handler(void) ERROR("unkown synchronous exception (%d)", _scause); } - cpu()->vcpu->regs.sepc += pc_step; + vcpu_writepc(cpu()->vcpu, vcpu_readpc(cpu()->vcpu) + pc_step); + if (vcpu_arch_is_on(cpu()->vcpu) && !cpu()->vcpu->active) { + cpu_standby(); + } } diff --git a/src/core/hypercall.c b/src/core/hypercall.c index a4ca0372..40e44a44 100644 --- a/src/core/hypercall.c +++ b/src/core/hypercall.c @@ -4,21 +4,17 @@ */ #include -#include -#include -#include long int hypercall(unsigned long id) { long int ret = -HC_E_INVAL_ID; - unsigned long ipc_id = vcpu_readreg(cpu()->vcpu, HYPCALL_ARG_REG(0)); - unsigned long arg1 = vcpu_readreg(cpu()->vcpu, HYPCALL_ARG_REG(1)); - unsigned long arg2 = vcpu_readreg(cpu()->vcpu, HYPCALL_ARG_REG(2)); - switch (id) { case HC_IPC: - ret = ipc_hypercall(ipc_id, arg1, arg2); + ret = ipc_hypercall(); + break; + case HC_REMIO: + ret = remio_hypercall(); break; default: WARNING("Unknown hypercall id %d", id); diff --git a/src/core/inc/config_defs.h b/src/core/inc/config_defs.h index 207a65d0..aa53e6a1 100644 --- a/src/core/inc/config_defs.h +++ b/src/core/inc/config_defs.h @@ -11,6 +11,7 @@ #define CONFIG_VCPU_NUM 1 #define CONFIG_VM_NUM 1 #define CONFIG_HYP_BASE_ADDR 0 +#define CONFIG_REMIO_DEV_NUM 0 #else /* GENERATING_DEFS */ diff --git a/src/core/inc/hypercall.h b/src/core/inc/hypercall.h index cb510c47..a8379e28 100644 --- a/src/core/inc/hypercall.h +++ b/src/core/inc/hypercall.h @@ -8,14 +8,24 @@ #include #include +#include -enum { HC_INVAL = 0, HC_IPC = 1 }; +enum { HC_INVAL = 0, HC_IPC = 1, HC_REMIO = 2 }; enum { HC_E_SUCCESS = 0, HC_E_FAILURE = 1, HC_E_INVAL_ID = 2, HC_E_INVAL_ARGS = 3 }; -typedef unsigned long (*hypercall_handler)(unsigned long arg0, unsigned long arg1, - unsigned long arg2); +typedef unsigned long (*hypercall_handler)(void); long int hypercall(unsigned long id); +static inline unsigned long hypercall_get_arg(struct vcpu* vcpu, size_t arg_index) +{ + return vcpu_readreg(vcpu, HYPCALL_IN_ARG_REG(arg_index)); +} + +static inline void hypercall_set_ret(struct vcpu* vcpu, size_t arg_index, unsigned long arg_val) +{ + vcpu_writereg(vcpu, HYPCALL_OUT_ARG_REG(arg_index), arg_val); +} + #endif /* HYPERCALL_H */ diff --git a/src/core/inc/ipc.h b/src/core/inc/ipc.h index 8350af49..3343c648 100644 --- a/src/core/inc/ipc.h +++ b/src/core/inc/ipc.h @@ -19,7 +19,7 @@ struct ipc { struct vm_config; -long int ipc_hypercall(unsigned long arg0, unsigned long arg1, unsigned long arg2); +long int ipc_hypercall(void); void ipc_init(void); #endif /* IPC_H */ diff --git a/src/core/inc/objpool.h b/src/core/inc/objpool.h index 33957d76..5cceeb1f 100644 --- a/src/core/inc/objpool.h +++ b/src/core/inc/objpool.h @@ -32,6 +32,15 @@ struct objpool { void objpool_init(struct objpool* objpool); void* objpool_alloc(struct objpool* objpool); +void* objpool_alloc_with_id(struct objpool* objpool, objpool_id_t* id); void objpool_free(struct objpool* objpool, void* obj); +inline void* objpool_get_by_id(struct objpool* objpool, objpool_id_t id) +{ + if (id < objpool->num) { + return (void*)((uintptr_t)objpool->pool + (objpool->objsize * id)); + } + return NULL; +} + #endif /* OBJPOOL_H */ diff --git a/src/core/inc/remio.h b/src/core/inc/remio.h new file mode 100644 index 00000000..6079036b --- /dev/null +++ b/src/core/inc/remio.h @@ -0,0 +1,84 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +/** + * @file remio.h + * @brief This header file contains the Remote I/O device interface + */ + +#ifndef REMIO_H +#define REMIO_H + +#include +#include +#include +#include + +/** + * @struct remio_shmem + * @brief This structure represents a shared memory region used by a Remote I/O device + */ +struct remio_shmem { + paddr_t base; /**< Shared memory base address */ + size_t size; /**< Shared memory size */ + size_t shmem_id; /**< Shared memory ID */ +}; + +/** + * @enum REMIO_DEV_TYPE + * @brief This enum represents the Remote I/O device type + */ +enum REMIO_DEV_TYPE { + REMIO_DEV_FRONTEND = 0, /**< Remote I/O frontend device */ + REMIO_DEV_BACKEND /**< Remote I/O backend device */ +}; + +/** + * @struct remio_dev + * @brief This structure represents a Remote I/O device + * @note The device can be either a frontend (driver) or a backend (device) + */ +struct remio_dev { + vaddr_t va; /**< Frontend MMIO base virtual address */ + size_t size; /**< Frontend MMIO size */ + irqid_t interrupt; /**< Frontend/backend interrupt number */ + remio_bind_key_t bind_key; /**< Remote I/O bind key */ + enum REMIO_DEV_TYPE type; /**< Type of the Remote I/O device */ + struct remio_shmem shmem; /**< Shared memory region */ + struct emul_mem emul; /**< Frontend MMIO emulation memory */ +}; + +/** + * @brief Remote I/O device initialization routine + * @note Executed only once by the master CPU + */ +void remio_init(void); + +/** + * @brief Remote I/O device VM CPU assignment routine + * @note Executed by each VM that holds a Remote I/O device, it is responsible for + * assigning the frontend or backend CPU ID for the respective Remote I/O device + * If the VM was alloacted with more than one CPU the assigned CPU will be the + * one with the lowest ID, since only one CPU is required to inject VM interrupts + * @param vm Pointer to the VM structure + */ +void remio_assign_vm_cpus(struct vm* vm); + +/** + * @brief Remote I/O hypercall callback + * @note Used to exchange information between the Remote I/O system and the backend VM + * @return Returns the number of pending I/O requests + */ +long int remio_hypercall(void); + +/** + * @brief Remote I/O MMIO emulation handler + * @note Executed by the frontend VM when a MMIO access is performed + * @param emul_access Holds the information about the MMIO access + * @return Returns true if handled successfully, false otherwise + */ +bool remio_mmio_emul_handler(struct emul_access* emul_access); + +#endif /* __REMIO_H__ */ diff --git a/src/core/inc/types.h b/src/core/inc/types.h index e9cf250a..234f884a 100644 --- a/src/core/inc/types.h +++ b/src/core/inc/types.h @@ -48,6 +48,10 @@ typedef unsigned irqid_t; typedef unsigned deviceid_t; +typedef size_t objpool_id_t; + +typedef size_t remio_bind_key_t; + typedef enum AS_SEC { /*--- HYP AS SECTIONS -----*/ SEC_HYP_GLOBAL = 0, diff --git a/src/core/inc/vm.h b/src/core/inc/vm.h index dd8dcaa2..2d0686ed 100644 --- a/src/core/inc/vm.h +++ b/src/core/inc/vm.h @@ -17,6 +17,7 @@ #include #include #include +#include struct vm_mem_region { paddr_t base; @@ -47,6 +48,9 @@ struct vm_platform { size_t dev_num; struct vm_dev_region* devs; + size_t remio_dev_num; + struct remio_dev* remio_devs; + // /** // * In MPU-based platforms which might also support virtual memory // * (i.e. aarch64 cortex-r) the hypervisor sets up the VM using an MPU by @@ -84,6 +88,9 @@ struct vm { size_t ipc_num; struct ipc* ipcs; + + size_t remio_dev_num; + struct remio_dev* remio_devs; }; struct vcpu { diff --git a/src/core/ipc.c b/src/core/ipc.c index f5dc7433..df24cbd3 100644 --- a/src/core/ipc.c +++ b/src/core/ipc.c @@ -58,9 +58,10 @@ static void ipc_handler(uint32_t event, uint64_t data) } CPU_MSG_HANDLER(ipc_handler, IPC_CPUMSG_ID) -long int ipc_hypercall(unsigned long ipc_id, unsigned long ipc_event, unsigned long arg2) +long int ipc_hypercall(void) { - UNUSED_ARG(arg2); + unsigned long ipc_id = hypercall_get_arg(cpu()->vcpu, 0); + unsigned long ipc_event = hypercall_get_arg(cpu()->vcpu, 1); long int ret = -HC_E_SUCCESS; diff --git a/src/core/objects.mk b/src/core/objects.mk index 89cbdaa4..755b7c88 100644 --- a/src/core/objects.mk +++ b/src/core/objects.mk @@ -14,3 +14,4 @@ core-objs-y+=ipc.o core-objs-y+=objpool.o core-objs-y+=hypercall.o core-objs-y+=shmem.o +core-objs-y+=remio.o diff --git a/src/core/objpool.c b/src/core/objpool.c index 11fa8234..9665dad6 100644 --- a/src/core/objpool.c +++ b/src/core/objpool.c @@ -12,7 +12,7 @@ void objpool_init(struct objpool* objpool) memset(objpool->bitmap, 0, BITMAP_SIZE(objpool->num)); } -void* objpool_alloc(struct objpool* objpool) +void* objpool_alloc_with_id(struct objpool* objpool, objpool_id_t* id) { void* obj = NULL; spin_lock(&objpool->lock); @@ -21,10 +21,18 @@ void* objpool_alloc(struct objpool* objpool) bitmap_set(objpool->bitmap, (size_t)n); obj = (void*)((uintptr_t)objpool->pool + (objpool->objsize * (size_t)n)); } + if (id != NULL) { + *id = (objpool_id_t)n; + } spin_unlock(&objpool->lock); return obj; } +void* objpool_alloc(struct objpool* objpool) +{ + return objpool_alloc_with_id(objpool, NULL); +} + void objpool_free(struct objpool* objpool, void* obj) { vaddr_t obj_addr = (vaddr_t)obj; diff --git a/src/core/remio.c b/src/core/remio.c new file mode 100644 index 00000000..9aa2b9b8 --- /dev/null +++ b/src/core/remio.c @@ -0,0 +1,626 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +/** + * @file remio.c + * @brief This source file contains the Remote I/O implementation + */ + +#include +#include +#include +#include +#include +#include +#include + +#define REMIO_VCPU_NUM PLAT_CPU_NUM +#define REMIO_NUM_DEV_TYPES (REMIO_DEV_BACKEND - REMIO_DEV_FRONTEND + 1) +#define REMIO_NEXT_PENDING_REQUEST -1 + +/** + * @enum REMIO_HYP_EVENT + * @brief This enum represents the Remote I/O hypercall events + * @note Used by the backend VM to specify the operation to be performed + */ +enum REMIO_HYP_EVENT { + REMIO_HYP_WRITE, /**< Write operation */ + REMIO_HYP_READ, /**< Read operation */ + REMIO_HYP_ASK, /**< Ask operation (used to request a new pending I/O request) */ + REMIO_HYP_NOTIFY, /**< Notify operation (used buffer or configuration change) */ +}; + +/** + * @enum REMIO_CPU_MSG_EVENT + * @brief This enum represents the Remote I/O CPU message events + */ +enum REMIO_CPU_MSG_EVENT { + REMIO_CPU_MSG_WRITE, /**< Write notification */ + REMIO_CPU_MSG_READ, /**< Read notification */ + REMIO_CPU_MSG_NOTIFY, /**< Notify notification (used to inject an interrupt into the + frontend or backend VM) */ +}; + +/** + * @enum REMIO_STATE + * @brief This enum represents the I/O request states + */ +enum REMIO_STATE { + REMIO_STATE_FREE, /**< The I/O request slot is free */ + REMIO_STATE_PENDING, /**< The I/O request is pending to be processed by the backend VM */ + REMIO_STATE_PROCESSING, /**< The I/O request is being processed by the backend VM */ + REMIO_STATE_COMPLETE, /**< The I/O request was completed by the backend VM but not yet + completed on frontend VM's eyes */ +}; + +/** + * @union remio_cpu_msg_data + * @brief This union represents the Remote I/O CPU message data + */ +union remio_cpu_msg_data { + struct { + uint8_t remio_bind_key; /**< Remote I/O bind key */ + uint8_t request_id; /**< Remote I/O request ID */ + uint8_t interrupt; /**< Interrupt ID */ + }; + uint64_t raw; /**< Raw data */ +}; + +/** + * @struct remio_request + * @brief This structure contains the information of a Remote I/O request + */ +struct remio_request { + node_t node; /** Node */ + objpool_id_t id; /**< Request ID */ + remio_bind_key_t bind_key; /**< Remote I/O bind key associated with the request */ + vaddr_t addr; /**< Address of the accessed MMIO Register */ + unsigned long access_width; /**< Access width */ + unsigned long op; /**< MMIO operation type (read or write) */ + unsigned long value; /**< Value to be written or read */ + unsigned long reg; /**< vCPU resgiter used during the MMIO access */ + cpuid_t cpu_id; /**< CPU ID of the frontend VM that performed the MMIO access */ + enum REMIO_STATE state; /**< I/O request state */ +}; + +/** + * @struct remio_device_config + * @brief This structure holds the configuration of a Remote I/O device + */ +struct remio_device_config { + struct { + remio_bind_key_t bind_key; /**< Backend bind key */ + cpuid_t cpu_id; /**< Backend VM CPU ID */ + vmid_t vm_id; /**< Backend VM ID */ + irqid_t interrupt; /**< Backend interrupt ID */ + struct remio_shmem shmem; /**< Backend shared memory region */ + bool ready; /**< Backend ready flag */ + } backend; + struct { + remio_bind_key_t bind_key; /**< Frontend bind key */ + cpuid_t cpu_id; /**< Frontend VM CPU ID */ + vmid_t vm_id; /**< Frontend VM ID */ + irqid_t interrupt; /**< Frontend interrupt ID */ + struct remio_shmem shmem; /**< Frontend shared memory region */ + bool ready; /**< Frontend ready flag */ + } frontend; +}; + +/** + * @struct remio_device + * @brief This structure comprises all the information needed about a Remote I/O device + */ +struct remio_device { + node_t node; /**< Node */ + volatile bool ready; /**< Remote I/O device ready flag */ + remio_bind_key_t bind_key; /**< Remote I/O bind key */ + struct remio_device_config config; /**< Remote I/O device configuration */ + struct list pending_requests_list; /**< List of pending I/O requests */ +}; + +/** List of Remote I/O devices */ +struct list remio_device_list; + +/** + * @brief Remote I/O CPU message handler + * @param event Message event (REMIO_CPU_MSG_*) + * @param data Remote I/O CPU message data (remio_cpu_msg_data) + */ +static void remio_cpu_msg_handler(uint32_t event, uint64_t data); + +/** Associate the Remote I/O CPU message handler with a new Remote I/O CPU message ID */ +CPU_MSG_HANDLER(remio_cpu_msg_handler, REMIO_CPUMSG_ID) + +/** Object pool to allocate Remote I/O devices */ +OBJPOOL_ALLOC(remio_device_pool, struct remio_device, + CONFIG_REMIO_DEV_NUM ? CONFIG_REMIO_DEV_NUM : 1); + +/** Object pool to allocate Remote I/O requests */ +OBJPOOL_ALLOC(remio_request_pool, struct remio_request, REMIO_VCPU_NUM); + +/** + * @brief Creates a new Remote I/O request based on the MMIO access information + * @param device Pointer to the Remote I/O device + * @param acc Pointer to the emul_access structure containing the MMIO access information + * @return Returns true if the operation was successful, false otherwise + */ +static bool remio_create_request(struct remio_device* device, struct emul_access* acc) +{ + objpool_id_t id; + struct remio_request* request = objpool_alloc_with_id(&remio_request_pool, &id); + if (request == NULL) { + return false; + } + + request->id = id; + request->addr = acc->addr; + request->reg = acc->reg; + request->access_width = acc->width; + request->state = REMIO_STATE_PENDING; + request->cpu_id = cpu()->id; + request->bind_key = device->bind_key; + + if (acc->write) { + long unsigned int value = vcpu_readreg(cpu()->vcpu, acc->reg); + request->op = REMIO_HYP_WRITE; + request->value = value; + } else { + request->op = REMIO_HYP_READ; + request->value = 0; + } + + list_push(&device->pending_requests_list, (node_t*)request); + + return true; +} + +/** + * @brief Gets the Remote I/O request + * @param device Pointer to the Remote I/O device + * @param id Remote I/O request ID + * @return Returns the Remote I/O request or NULL if the request was not found + */ +static inline struct remio_request* remio_get_request(struct remio_device* device, long int id) +{ + struct remio_request* request = NULL; + + if (id == REMIO_NEXT_PENDING_REQUEST) { + request = (struct remio_request*)list_pop(&device->pending_requests_list); + } else if (id >= 0) { + request = objpool_get_by_id(&remio_request_pool, (objpool_id_t)id); + } + + if (request == NULL || request->bind_key != device->bind_key) { + return NULL; + } + + return request; +} + +/** + * @brief Checks if the Remote I/O device has pending I/O requests + * @param device Pointer to the Remote I/O device + * @return Returns true if the Remote I/O device has pending I/O requests, false otherwise + */ +static inline bool remio_has_pending_request(struct remio_device* device) +{ + return list_empty(&device->pending_requests_list); +} + +/** + * @brief Finds the Remote I/O device based on the Remote I/O bind key + * @param bind_key Remote I/O bind key + * @return Returns the Remote I/O device or NULL if the device was not found + */ +static struct remio_device* remio_find_dev_by_bind_key(remio_bind_key_t bind_key) +{ + struct remio_device* device = NULL; + list_foreach (remio_device_list, struct remio_device, dev) { + if (bind_key == dev->bind_key) { + device = dev; + break; + } + } + return device; +} + +/** + * @brief Finds the Remote I/O device associated with a VM based on the Remote I/O bind key + * @param vm Pointer to the VM structure + * @param bind_key Remote I/O bind key + * @return Returns the Remote I/O device or NULL if the device was not found + */ +static struct remio_device* remio_find_vm_dev_by_bind_key(struct vm* vm, unsigned long bind_key) +{ + struct remio_dev* dev = NULL; + struct remio_device* device = NULL; + + /** Find the Remote I/O device VM configuration based on the Remote I/O bind key */ + for (size_t i = 0; i < vm->remio_dev_num; i++) { + dev = &vm->remio_devs[i]; + if (dev->bind_key == bind_key) { + break; + } + } + + if (dev != NULL) { + /** Find the Remote I/O device based on the Remote I/O bind key */ + device = remio_find_dev_by_bind_key(bind_key); + if (device != NULL) { + /** Check if the Remote I/O device is associated with the VM */ + if ((dev->type == REMIO_DEV_BACKEND && vm->id == device->config.backend.vm_id) || + (dev->type == REMIO_DEV_FRONTEND && vm->id == device->config.frontend.vm_id)) { + return device; + } + } + } + + return NULL; +} + +/** + * @brief Finds the Remote I/O device associated with a VM based on the MMIO access address + * @param vm Pointer to the VM structure + * @param addr MMIO access address + * @return Returns the Remote I/O device or NULL if the device was not found + */ +static struct remio_device* remio_find_vm_dev_by_addr(struct vm* vm, unsigned long addr) +{ + struct remio_dev* dev = NULL; + + for (size_t i = 0; i < vm->remio_dev_num; i++) { + dev = &vm->remio_devs[i]; + if (in_range(addr, dev->va, dev->size)) { + break; + } + } + + if (dev == NULL) { + return NULL; + } + + return remio_find_vm_dev_by_bind_key(vm, dev->bind_key); +} + +/** + * @brief Sends a Remote I/O CPU message to the target CPU + * @param event Message event (REMIO_CPU_MSG_*) + * @param target_cpu Target CPU ID + * @param remio_bind_key Remote I/O bind key + * @param request_id Remote I/O request ID + * @param interrupt Interrupt ID + */ +static void remio_cpu_send_msg(enum REMIO_CPU_MSG_EVENT event, unsigned long target_cpu, + unsigned long remio_bind_key, unsigned long request_id, unsigned long interrupt) +{ + union remio_cpu_msg_data data = { + .remio_bind_key = (uint8_t)remio_bind_key, + .request_id = (uint8_t)request_id, + .interrupt = (uint8_t)interrupt, + }; + struct cpu_msg msg = { (uint32_t)REMIO_CPUMSG_ID, event, data.raw }; + cpu_send_msg(target_cpu, &msg); +} + +void remio_init(void) +{ + size_t counter[REMIO_NUM_DEV_TYPES] = { 0 }; + + /** Only execute the Remote I/O initialization routine on the master CPU */ + if (!cpu_is_master()) { + return; + } + + objpool_init(&remio_device_pool); + objpool_init(&remio_request_pool); + list_init(&remio_device_list); + + /** Create the Remote I/O devices based on the VM configuration */ + for (size_t vm_id = 0; vm_id < config.vmlist_size; vm_id++) { + struct vm_config* vm_config = &config.vmlist[vm_id]; + for (size_t i = 0; i < vm_config->platform.remio_dev_num; i++) { + struct remio_dev* dev = &vm_config->platform.remio_devs[i]; + struct remio_device* device = NULL; + list_foreach (remio_device_list, struct remio_device, remio_device) { + if ((dev->bind_key == remio_device->config.backend.bind_key && + dev->type == REMIO_DEV_BACKEND) || + (dev->bind_key == remio_device->config.frontend.bind_key && + dev->type == REMIO_DEV_FRONTEND)) { + ERROR("Failed to link backend to the frontend, more than one %s was " + "atributed to the Remote I/O device %d", + dev->type == REMIO_DEV_BACKEND ? "backend" : "frontend", dev->bind_key); + } else if ((dev->type == REMIO_DEV_BACKEND && + dev->bind_key == remio_device->config.frontend.bind_key) || + (dev->type == REMIO_DEV_FRONTEND && + dev->bind_key == remio_device->config.backend.bind_key)) { + device = remio_device; + break; + } + } + if (device == NULL) { + device = objpool_alloc(&remio_device_pool); + if (device == NULL) { + ERROR("Failed creating Remote I/O device %d", dev->bind_key); + } + device->ready = false; + device->bind_key = dev->bind_key; + device->config.backend.bind_key = (remio_bind_key_t)-1; + device->config.frontend.bind_key = (remio_bind_key_t)-1; + list_init(&device->pending_requests_list); + list_push(&remio_device_list, (node_t*)device); + } + if (dev->type == REMIO_DEV_BACKEND) { + device->config.backend.bind_key = dev->bind_key; + device->config.backend.shmem = dev->shmem; + device->config.backend.ready = false; + } else if (dev->type == REMIO_DEV_FRONTEND) { + device->config.frontend.bind_key = dev->bind_key; + device->config.frontend.shmem = dev->shmem; + device->config.frontend.ready = false; + } else { + ERROR("Unknown Remote I/O device type"); + } + counter[dev->type]++; + } + } + + /** Check if there is a 1-to-1 mapping between a Remote I/O backend and Remote I/O frontend */ + if (counter[REMIO_DEV_FRONTEND] != counter[REMIO_DEV_BACKEND]) { + ERROR("There is no 1-to-1 mapping between a Remote I/O backend and Remote I/O frontend"); + } + + /** Check if the shared memory regions are correctly configured */ + list_foreach (remio_device_list, struct remio_device, dev) { + if (dev->config.backend.shmem.base != dev->config.frontend.shmem.base || + dev->config.backend.shmem.size != dev->config.frontend.shmem.size || + dev->config.backend.shmem.shmem_id != dev->config.frontend.shmem.shmem_id) { + ERROR("Invalid shared memory region configuration for Remote I/O device %d.\n" + "The frontend and backend shared memory regions must be the aligned.", + dev->bind_key); + } + } + + /** Update the Remote I/O device configuration */ + for (size_t vm_id = 0; vm_id < config.vmlist_size; vm_id++) { + struct vm_config* vm_config = &config.vmlist[vm_id]; + for (size_t i = 0; i < vm_config->platform.remio_dev_num; i++) { + struct remio_dev* dev = &vm_config->platform.remio_devs[i]; + struct remio_device* device = remio_find_dev_by_bind_key(dev->bind_key); + if (device == NULL) { + ERROR("Failed to find Remote I/O device %d", dev->bind_key); + } + if (dev->type == REMIO_DEV_BACKEND) { + device->config.backend.vm_id = vm_id; + device->config.backend.interrupt = dev->interrupt; + device->config.backend.cpu_id = (cpuid_t)-1; + } else if (dev->type == REMIO_DEV_FRONTEND) { + device->config.frontend.vm_id = vm_id; + device->config.frontend.interrupt = dev->interrupt; + device->config.frontend.cpu_id = (cpuid_t)-1; + } else { + ERROR("Unknown Remote I/O device type"); + } + } + } +} + +void remio_assign_vm_cpus(struct vm* vm) +{ + list_foreach (remio_device_list, struct remio_device, dev) { + if (vm->id == dev->config.backend.vm_id) { + dev->config.backend.cpu_id = min(dev->config.backend.cpu_id, cpu()->id); + dev->config.backend.ready = true; + } else if (vm->id == dev->config.frontend.vm_id) { + dev->config.frontend.cpu_id = min(dev->config.frontend.cpu_id, cpu()->id); + dev->config.frontend.ready = true; + } + dev->ready = dev->config.backend.ready && dev->config.frontend.ready; + } +} + +/** + * @brief Handles the Remote I/O ask operation + * @param addr Should always be zero (convention) + * @param value Should always be zero (convention) + * @param device Pointer to the Remote I/O device + * @return Returns the number of pending I/O requests + */ +static long int remio_handle_ask(unsigned long addr, unsigned long value, + struct remio_device* device) +{ + long int ret = -HC_E_SUCCESS; + bool has_pending_requests = false; + struct remio_request* request = NULL; + + /** By convention, the addr and value fields must be zero */ + if (addr != 0 || value != 0) { + return HC_E_FAILURE; + } + + /** Get the next pending I/O request */ + request = remio_get_request(device, REMIO_NEXT_PENDING_REQUEST); + + if (request == NULL || request->state != REMIO_STATE_PENDING) { + return HC_E_FAILURE; + } + + /** Check if the Remote I/O device has other pending I/O requests */ + has_pending_requests = remio_has_pending_request(device); + + /** Update the I/O request state as processing */ + request->state = REMIO_STATE_PROCESSING; + + /** Write the I/O request information to the backend VM's vCPU registers */ + hypercall_set_ret(cpu()->vcpu, 0, request->addr); + hypercall_set_ret(cpu()->vcpu, 1, request->op); + hypercall_set_ret(cpu()->vcpu, 2, request->value); + hypercall_set_ret(cpu()->vcpu, 3, request->access_width); + hypercall_set_ret(cpu()->vcpu, 4, request->id); + hypercall_set_ret(cpu()->vcpu, 5, (unsigned long)has_pending_requests); + + return ret; +} + +/** + * @brief Handles the Remote I/O read and write operations + * @param value Value to be written or read + * @param request_id Remote I/O request ID + * @param device Pointer to the Remote I/O device + * @return Returns true if the operation was successful, false otherwise + */ +static bool remio_handle_rw(unsigned long value, unsigned long request_id, + struct remio_device* device) +{ + struct remio_request* request = remio_get_request(device, (long int)request_id); + if (request == NULL) { + return false; + } + if (request->state != REMIO_STATE_PROCESSING) { + return false; + } + request->value = value; + request->state = REMIO_STATE_COMPLETE; + return true; +} + +/** + * @brief Performs the post work after the completion of the I/O request + * @note This function is executed by the frontend VM and is responsible for updating the + * vCPU register in case of a read operation and activating the frontend vCPU + * @param event Message event (REMIO_CPU_MSG_*) + * @param remio_bind_key Remote I/O bind key + * @param request_id Remote I/O request ID + * @return Returns true if the operation was successful, false otherwise + */ +static bool remio_cpu_post_work(uint32_t event, uint8_t remio_bind_key, uint8_t request_id) +{ + struct remio_device* device = remio_find_dev_by_bind_key(remio_bind_key); + if (device == NULL) { + return false; + } + + struct remio_request* request = remio_get_request(device, (long int)request_id); + if (request == NULL) { + return false; + } + + switch (event) { + case REMIO_CPU_MSG_READ: + vcpu_writereg(cpu()->vcpu, request->reg, request->value); + break; + default: + break; + } + + request->state = REMIO_STATE_FREE; + objpool_free(&remio_request_pool, request); + cpu()->vcpu->active = true; + + return true; +} + +long int remio_hypercall(void) +{ + long int ret = -HC_E_SUCCESS; + unsigned long dm_id = hypercall_get_arg(cpu()->vcpu, 0); + unsigned long addr = hypercall_get_arg(cpu()->vcpu, 1); + unsigned long op = hypercall_get_arg(cpu()->vcpu, 2); + unsigned long value = hypercall_get_arg(cpu()->vcpu, 3); + unsigned long request_id = hypercall_get_arg(cpu()->vcpu, 4); + struct remio_device* device = NULL; + struct vm* vm = cpu()->vcpu->vm; + + /** Check if the device model ID is within the valid range */ + if (dm_id >= vm->remio_dev_num) { + return -HC_E_INVAL_ARGS; + } + + /** Get the Remote I/O bind key based on the device model ID */ + unsigned long remio_dev_bind_key = vm->remio_devs[dm_id].bind_key; + + /** Find the Remote I/O device associated with the current backend VM */ + device = remio_find_vm_dev_by_bind_key(vm, remio_dev_bind_key); + if (device == NULL) { + return -HC_E_FAILURE; + } + + switch (op) { + case REMIO_HYP_WRITE: + case REMIO_HYP_READ: + if (remio_handle_rw(value, request_id, device)) { + struct remio_request* request = remio_get_request(device, (long int)request_id); + if (request == NULL) { + return -HC_E_FAILURE; + } + /** Send a CPU message to the backend VM to execute the post work */ + remio_cpu_send_msg(op == REMIO_HYP_WRITE ? REMIO_CPU_MSG_WRITE : REMIO_CPU_MSG_READ, + request->cpu_id, remio_dev_bind_key, request_id, 0); + } else { + ret = -HC_E_FAILURE; + } + break; + case REMIO_HYP_ASK: + ret = remio_handle_ask(addr, value, device); + break; + case REMIO_HYP_NOTIFY: + /** Send a CPU message to the frontend VM to inject an interrupt */ + remio_cpu_send_msg(REMIO_CPU_MSG_NOTIFY, device->config.frontend.cpu_id, 0, 0, + device->config.frontend.interrupt); + break; + default: + ret = -HC_E_INVAL_ARGS; + break; + } + + return ret; +} + +bool remio_mmio_emul_handler(struct emul_access* acc) +{ + struct remio_device* device = NULL; + + /** Find the Remote I/O device based on the MMIO access address */ + device = remio_find_vm_dev_by_addr(cpu()->vcpu->vm, acc->addr); + if (device == NULL) { + return false; + } + + /** Wait until the Remote I/O device is ready */ + while (!device->ready) + ; + + /** Create a new Remote I/O request based on the MMIO access information */ + if (!remio_create_request(device, acc)) { + return false; + } + + /** Send a CPU message to the backend VM to then inject an interrupt */ + remio_cpu_send_msg(REMIO_CPU_MSG_NOTIFY, device->config.backend.cpu_id, 0, 0, + device->config.backend.interrupt); + + /** Pause the current vCPU to wait for the MMIO emulation to be completed */ + cpu()->vcpu->active = false; + + return true; +} + +static void remio_cpu_msg_handler(uint32_t event, uint64_t data) +{ + union remio_cpu_msg_data msg = { .raw = data }; + switch (event) { + case REMIO_CPU_MSG_WRITE: + case REMIO_CPU_MSG_READ: + if (!remio_cpu_post_work(event, msg.remio_bind_key, msg.request_id)) { + ERROR("Failed to perform the post work after the completion of the I/O request"); + } + break; + case REMIO_CPU_MSG_NOTIFY: + vcpu_inject_irq(cpu()->vcpu, msg.interrupt); + break; + default: + WARNING("Unknown Remote I/O CPU message event"); + break; + } +} diff --git a/src/core/vm.c b/src/core/vm.c index 553aa211..7d4b7d0c 100644 --- a/src/core/vm.c +++ b/src/core/vm.c @@ -9,6 +9,7 @@ #include #include #include +#include static void vm_master_init(struct vm* vm, const struct vm_config* vm_config, vmid_t vm_id) { @@ -225,6 +226,57 @@ static void vm_init_dev(struct vm* vm, const struct vm_config* vm_config) } } +static void vm_init_remio_dev(struct vm* vm, struct remio_dev* remio_dev) +{ + struct shmem* shmem = shmem_get(remio_dev->shmem.shmem_id); + if (shmem == NULL) { + ERROR("Invalid shmem id (%d) in the Remote I/O device (%d) configuration", + remio_dev->shmem.shmem_id, remio_dev->bind_key); + } + size_t shmem_size = remio_dev->shmem.size; + if (shmem_size > shmem->size) { + shmem_size = shmem->size; + WARNING("Trying to map region to smaller shared memory. Truncated"); + } + spin_lock(&shmem->lock); + shmem->cpu_masters |= (1UL << cpu()->id); + spin_unlock(&shmem->lock); + + struct vm_mem_region reg = { + .base = remio_dev->shmem.base, + .size = shmem_size, + .place_phys = true, + .phys = shmem->phys, + .colors = shmem->colors, + }; + + vm_map_mem_region(vm, ®); + + if (remio_dev->type == REMIO_DEV_FRONTEND) { + struct emul_mem* emu = &remio_dev->emul; + emu->va_base = remio_dev->va; + emu->size = remio_dev->size; + emu->handler = remio_mmio_emul_handler; + vm_emul_add_mem(vm, emu); + } +} + +static void vm_init_remio(struct vm* vm, const struct vm_config* vm_config) +{ + if (vm_config->platform.remio_dev_num == 0) { + return; + } + + vm->remio_dev_num = vm_config->platform.remio_dev_num; + vm->remio_devs = vm_config->platform.remio_devs; + + for (size_t i = 0; i < vm_config->platform.remio_dev_num; i++) { + struct remio_dev* remio_dev = &vm_config->platform.remio_devs[i]; + vm_init_remio_dev(vm, remio_dev); + } + remio_assign_vm_cpus(vm); +} + static struct vm* vm_allocation_init(struct vm_allocation* vm_alloc) { struct vm* vm = vm_alloc->vm; @@ -271,6 +323,7 @@ struct vm* vm_init(struct vm_allocation* vm_alloc, const struct vm_config* vm_co vm_init_mem_regions(vm, vm_config); vm_init_dev(vm, vm_config); vm_init_ipc(vm, vm_config); + vm_init_remio(vm, vm_config); } cpu_sync_and_clear_msgs(&vm->sync); diff --git a/src/core/vmm.c b/src/core/vmm.c index 32737889..67e5eeb6 100644 --- a/src/core/vmm.c +++ b/src/core/vmm.c @@ -128,6 +128,7 @@ void vmm_init() vmm_arch_init(); vmm_io_init(); shmem_init(); + remio_init(); cpu_sync_barrier(&cpu_glb_sync);