Get GPR CSR and FP registers from kvm by KVM_GET_ONE_REG ioctl.
Signed-off-by: Yifei Jiang <jiangyifei(a)huawei.com>
Signed-off-by: Yipeng Yin <yinyipeng1(a)huawei.com>
---
target/riscv/kvm.c | 150 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 149 insertions(+), 1 deletion(-)
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
index 86660ba81b..e679619e79 100644
--- a/target/riscv/kvm.c
+++ b/target/riscv/kvm.c
@@ -50,13 +50,161 @@ static __u64 kvm_riscv_reg_id(__u64 type, __u64 idx)
return id;
}
+#define RISCV_CORE_REG(name) kvm_riscv_reg_id(KVM_REG_RISCV_CORE, \
+ KVM_REG_RISCV_CORE_REG(name))
+
+#define RISCV_CSR_REG(name) kvm_riscv_reg_id(KVM_REG_RISCV_CSR, \
+ KVM_REG_RISCV_CSR_REG(name))
+
+#define RISCV_FP_F_REG(idx) kvm_riscv_reg_id(KVM_REG_RISCV_FP_F, idx)
+
+#define RISCV_FP_D_REG(idx) kvm_riscv_reg_id(KVM_REG_RISCV_FP_D, idx)
+
+static int kvm_riscv_get_regs_core(CPUState *cs)
+{
+ int ret = 0;
+ int i;
+ target_ulong reg;
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
+
+ ret = kvm_get_one_reg(cs, RISCV_CORE_REG(regs.pc), ®);
+ if (ret) {
+ return ret;
+ }
+ env->pc = reg;
+
+ for (i = 1; i < 32; i++) {
+ __u64 id = kvm_riscv_reg_id(KVM_REG_RISCV_CORE, i);
+ ret = kvm_get_one_reg(cs, id, ®);
+ if (ret) {
+ return ret;
+ }
+ env->gpr[i] = reg;
+ }
+
+ return ret;
+}
+
+static int kvm_riscv_get_regs_csr(CPUState *cs)
+{
+ int ret = 0;
+ target_ulong reg;
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
+
+ ret = kvm_get_one_reg(cs, RISCV_CSR_REG(sstatus), ®);
+ if (ret) {
+ return ret;
+ }
+ env->mstatus = reg;
+
+ ret = kvm_get_one_reg(cs, RISCV_CSR_REG(sie), ®);
+ if (ret) {
+ return ret;
+ }
+ env->mie = reg;
+
+ ret = kvm_get_one_reg(cs, RISCV_CSR_REG(stvec), ®);
+ if (ret) {
+ return ret;
+ }
+ env->stvec = reg;
+
+ ret = kvm_get_one_reg(cs, RISCV_CSR_REG(sscratch), ®);
+ if (ret) {
+ return ret;
+ }
+ env->sscratch = reg;
+
+ ret = kvm_get_one_reg(cs, RISCV_CSR_REG(sepc), ®);
+ if (ret) {
+ return ret;
+ }
+ env->sepc = reg;
+
+ ret = kvm_get_one_reg(cs, RISCV_CSR_REG(scause), ®);
+ if (ret) {
+ return ret;
+ }
+ env->scause = reg;
+
+ ret = kvm_get_one_reg(cs, RISCV_CSR_REG(stval), ®);
+ if (ret) {
+ return ret;
+ }
+ env->sbadaddr = reg;
+
+ ret = kvm_get_one_reg(cs, RISCV_CSR_REG(sip), ®);
+ if (ret) {
+ return ret;
+ }
+ env->mip = reg;
+
+ ret = kvm_get_one_reg(cs, RISCV_CSR_REG(satp), ®);
+ if (ret) {
+ return ret;
+ }
+ env->satp = reg;
+
+ return ret;
+}
+
+static int kvm_riscv_get_regs_fp(CPUState *cs)
+{
+ int ret = 0;
+ int i;
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
+
+ if (riscv_has_ext(env, RVD)) {
+ uint64_t reg;
+ for (i = 0; i < 32; i++) {
+ ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(i), ®);
+ if (ret) {
+ return ret;
+ }
+ env->fpr[i] = reg;
+ }
+ return ret;
+ }
+
+ if (riscv_has_ext(env, RVF)) {
+ uint32_t reg;
+ for (i = 0; i < 32; i++) {
+ ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(i), ®);
+ if (ret) {
+ return ret;
+ }
+ env->fpr[i] = reg;
+ }
+ return ret;
+ }
+
+ return ret;
+}
+
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
};
int kvm_arch_get_registers(CPUState *cs)
{
- return 0;
+ int ret = 0;
+
+ ret = kvm_riscv_get_regs_core(cs);
+ if (ret) {
+ return ret;
+ }
+
+ ret = kvm_riscv_get_regs_csr(cs);
+ if (ret) {
+ return ret;
+ }
+
+ ret = kvm_riscv_get_regs_fp(cs);
+ if (ret) {
+ return ret;
+ }
+
+ return ret;
}
int kvm_arch_put_registers(CPUState *cs, int level)
--
2.19.1