Put GPR CSR and FP registers to kvm by KVM_SET_ONE_REG ioctl
Signed-off-by: Yifei Jiang <jiangyifei(a)huawei.com>
Signed-off-by: Mingwang Li <limingwang(a)huawei.com>
Reviewed-by: Alistair Francis <alistair.francis(a)wdc.com>
---
target/riscv/kvm.c | 104 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 103 insertions(+), 1 deletion(-)
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
index 6d4df0ef6d..e695b91dc7 100644
--- a/target/riscv/kvm.c
+++ b/target/riscv/kvm.c
@@ -73,6 +73,14 @@ static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
uint64_t idx
} \
} while(0)
+#define KVM_RISCV_SET_CSR(cs, env, csr, reg) \
+ do { \
+ int ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \
+ if (ret) { \
+ return ret; \
+ } \
+ } while(0)
+
static int kvm_riscv_get_regs_core(CPUState *cs)
{
int ret = 0;
@@ -98,6 +106,31 @@ static int kvm_riscv_get_regs_core(CPUState *cs)
return ret;
}
+static int kvm_riscv_put_regs_core(CPUState *cs)
+{
+ int ret = 0;
+ int i;
+ target_ulong reg;
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
+
+ reg = env->pc;
+ ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), ®);
+ if (ret) {
+ return ret;
+ }
+
+ for (i = 1; i < 32; i++) {
+ uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
+ reg = env->gpr[i];
+ ret = kvm_set_one_reg(cs, id, ®);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
static int kvm_riscv_get_regs_csr(CPUState *cs)
{
int ret = 0;
@@ -115,6 +148,24 @@ static int kvm_riscv_get_regs_csr(CPUState *cs)
return ret;
}
+static int kvm_riscv_put_regs_csr(CPUState *cs)
+{
+ int ret = 0;
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
+
+ KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus);
+ KVM_RISCV_SET_CSR(cs, env, sie, env->mie);
+ KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec);
+ KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch);
+ KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc);
+ KVM_RISCV_SET_CSR(cs, env, scause, env->scause);
+ KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
+ KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
+ KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
+
+ return ret;
+}
+
static int kvm_riscv_get_regs_fp(CPUState *cs)
{
int ret = 0;
@@ -148,6 +199,40 @@ static int kvm_riscv_get_regs_fp(CPUState *cs)
return ret;
}
+static int kvm_riscv_put_regs_fp(CPUState *cs)
+{
+ int ret = 0;
+ int i;
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
+
+ if (riscv_has_ext(env, RVD)) {
+ uint64_t reg;
+ for (i = 0; i < 32; i++) {
+ reg = env->fpr[i];
+ ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(env, i), ®);
+ if (ret) {
+ return ret;
+ }
+ }
+ return ret;
+ }
+
+ if (riscv_has_ext(env, RVF)) {
+ uint32_t reg;
+ for (i = 0; i < 32; i++) {
+ reg = env->fpr[i];
+ ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(env, i), ®);
+ if (ret) {
+ return ret;
+ }
+ }
+ return ret;
+ }
+
+ return ret;
+}
+
+
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
};
@@ -176,7 +261,24 @@ int kvm_arch_get_registers(CPUState *cs)
int kvm_arch_put_registers(CPUState *cs, int level)
{
- return 0;
+ int ret = 0;
+
+ ret = kvm_riscv_put_regs_core(cs);
+ if (ret) {
+ return ret;
+ }
+
+ ret = kvm_riscv_put_regs_csr(cs);
+ if (ret) {
+ return ret;
+ }
+
+ ret = kvm_riscv_put_regs_fp(cs);
+ if (ret) {
+ return ret;
+ }
+
+ return ret;
}
int kvm_arch_release_virq_post(int virq)
--
2.19.1