|
when qemu is
passed the -enable-kvm option, in vl.c: 5438 #endif 5439 #ifdef CONFIG_KVM 5440 case QEMU_OPTION_enable_kvm: 5441 kvm_allowed = 1; and then it will initialized itself: 5887 5888 if (kvm_enabled()) { 5889 int ret; 5890 5891 ret = kvm_init(smp_cpus); 5892 if (ret < 0) { 5893 fprintf(stderr, "failed to initialize KVM\n"); 5894 exit(1); 5895 } 5896 } where kvm_init() is in kvm-all.c, essentially opening the /dev/kvm (setup by kvm.ko and kvm-intel.ko), and then issuing ioctl() against it: 418 419 s->vmfd = -1; 420 s->fd = open("/dev/kvm", O_RDWR); 421 if (s->fd == -1) { 422 fprintf(stderr, "Could not access KVM kernel module: %m\n"); 423 ret = -errno; 424 goto err; 425 } 426 427 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); 428 if (ret < KVM_API_VERSION) { 429 if (ret > 0) 430 ret = -EINVAL; 431 fprintf(stderr, "kvm version too old\n"); 432 goto err; 433 } 434 435 if (ret > KVM_API_VERSION) { 436 ret = -EINVAL; 437 fprintf(stderr, "kvm version not supported\n"); 438 goto err; 439 } 440 441 s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0); 442 if (s->vmfd < 0) 443 goto err; 444 445 /* initially, KVM allocated its own memory and we had to jump through 446 * hooks to make phys_ram_base point to this. Modern versions of KVM 447 * just use a user allocated buffer so we can use regular pages 448 * unmodified. Make sure we have a sufficiently modern version of KVM. 449 */ 450 if (!kvm_check_extension(s, KVM_CAP_USER_MEMORY)) { 451 ret = -EINVAL; 452 fprintf(stderr, "kvm does not support KVM_CAP_USER_MEMORY\n%s", 453 upgrade_note); 454 goto err; 455 } And throughout the qemu system it will interact with KVM in various ways: ./vl.c: (!kvm_enabled() || kvm_has_sync_mmu())) { if (kvm_enabled()) kvm_init_vcpu(env); static void *kvm_cpu_thread_fn(void *arg) if (kvm_enabled()) if (kvm_enabled()) { static void kvm_start_vcpu(CPUState *env) kvm_init_vcpu(env); qemu_thread_create(env->thread, kvm_cpu_thread_fn, env); if (kvm_enabled()) kvm_start_vcpu(env); #ifdef CONFIG_KVM case QEMU_OPTION_enable_kvm: kvm_allowed = 1; #if defined(CONFIG_KVM) && defined(CONFIG_KQEMU) if (kvm_allowed && kqemu_allowed) { "You can not enable both KVM and kqemu at the same time\n"); if (kvm_enabled()) { ret = kvm_init(smp_cpus); fprintf(stderr, "failed to initialize KVM\n"); ./monitor.c: static void do_info_kvm(Monitor *mon) #ifdef CONFIG_KVM monitor_printf(mon, "kvm support: "); if (kvm_enabled()) monitor_printf(mon, "kvm support: not compiled\n"); if (kvm_enabled() && !kvm_has_sync_mmu()) monitor_printf(mon, "Using KVM without synchronous MMU, " { "kvm", "", do_info_kvm, "", "show KVM information", }, ./target-ppc/helper.c: if (!kvm_enabled()) ./target-ppc/machine.c: ./target-ppc/kvm.c: * PowerPC implementation of KVM hooks //#define DEBUG_KVM #ifdef DEBUG_KVM int kvm_arch_init(KVMState *s, int smp_cpus) int kvm_arch_init_vcpu(CPUState *cenv) int kvm_arch_put_registers(CPUState *env) struct kvm_regs regs; ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s); ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s); int kvm_arch_get_registers(CPUState *env) struct kvm_regs regs; ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s); int kvm_arch_pre_run(CPUState *env, struct kvm_run *run) /* For now KVM disregards the 'irq' argument. However, in the * future KVM could cache it in-kernel to avoid a heavyweight exit r = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &irq); int kvm_arch_post_run(CPUState *env, struct kvm_run *run) static int kvmppc_handle_halt(CPUState *env) static int kvmppc_handle_dcr_read(CPUState *env, uint32_t dcrn, uint32_t *data) static int kvmppc_handle_dcr_write(CPUState *env, uint32_t dcrn, uint32_t data) int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run) case KVM_EXIT_DCR: ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data); ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data); case KVM_EXIT_HLT: ret = kvmppc_handle_halt(env); ./target-ppc/kvm_ppc.c: * PowerPC KVM support static QEMUTimer *kvmppc_timer; static unsigned int kvmppc_timer_rate; int kvmppc_read_host_property(const char *node_path, const char *prop, static int kvmppc_copy_host_cell(void *fdt, const char *node, const char *prop) ret = kvmppc_read_host_property(node, prop, &cell, sizeof(cell)); void kvmppc_fdt_update(void *fdt) kvmppc_copy_host_cell(fdt, "/cpus/c...@0", "clock-frequency"); kvmppc_copy_host_cell(fdt, "/cpus/c...@0", "timebase-frequency"); static void kvmppc_timer_hack(void *opaque) qemu_mod_timer(kvmppc_timer, qemu_get_clock(vm_clock) + kvmppc_timer_rate); void kvmppc_init(void) /* XXX The only reason KVM yields control back to qemu is device IO. Since kvmppc_timer_rate = ticks_per_sec / 10; kvmppc_timer = qemu_new_timer(vm_clock, &kvmppc_timer_hack, NULL); qemu_mod_timer(kvmppc_timer, qemu_get_clock(vm_clock) + kvmppc_timer_rate); ./target-i386/helper.c: if (kvm_enabled() && strcmp(name, "host") == 0) { if (kvm_enabled()) kvm_arch_get_registers(env); #if defined(CONFIG_KVM) if (kvm_enabled() && !env->cpuid_vendor_override) if (kvm_enabled()) { /* Nested SVM not yet supported in KVM */ ./target-i386/machine.c: /* KVM-related states */ ./target-i386/kvm.c: * QEMU KVM support //#define DEBUG_KVM #ifdef DEBUG_KVM #ifdef KVM_CAP_EXT_CPUID static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max) struct kvm_cpuid2 *cpuid; cpuid = (struct kvm_cpuid2 *)qemu_mallocz(size); r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid); fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n", uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg) struct kvm_cpuid2 *cpuid; if (!kvm_check_extension(env->kvm_state, KVM_CAP_EXT_CPUID)) { while ((cpuid = try_get_cpuid(env->kvm_state, max)) == NULL) { /* On Intel, kvm returns cpuid according to the Intel spec, cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, R_EDX); uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg) static void kvm_trim_features(uint32_t *features, uint32_t supported) int kvm_arch_init_vcpu(CPUState *env) struct kvm_cpuid2 cpuid; struct kvm_cpuid_entry2 entries[100]; env->mp_state = KVM_MP_STATE_RUNNABLE; kvm_trim_features(&env->cpuid_features, kvm_arch_get_supported_cpuid(env, 1, R_EDX)); kvm_trim_features(&env->cpuid_ext_features, kvm_arch_get_supported_cpuid(env, 1, R_ECX)); kvm_trim_features(&env->cpuid_ext2_features, kvm_arch_get_supported_cpuid(env, 0x80000001, R_EDX)); kvm_trim_features(&env->cpuid_ext3_features, kvm_arch_get_supported_cpuid(env, 0x80000001, R_ECX)); struct kvm_cpuid_entry2 *c = &cpuid_data.entries[cpuid_i++]; c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | KVM_CPUID_FLAG_STATE_READ_NEXT; c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC; c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; struct kvm_cpuid_entry2 *c = &cpuid_data.entries[cpuid_i++]; return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data); static int kvm_has_msr_star(CPUState *env) struct kvm_msr_list msr_list, *kvm_msr_list; /* Obtain MSR list from KVM. These are the MSRs that we must ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list); kvm_msr_list = qemu_mallocz(MAX(1024, sizeof(msr_list) + kvm_msr_list->nmsrs = msr_list.nmsrs; ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); for (i = 0; i < kvm_msr_list->nmsrs; i++) { if (kvm_msr_list->indices[i] == MSR_STAR) { free(kvm_msr_list); int kvm_arch_init(KVMState *s, int smp_cpus) /* create vm86 tss. KVM uses vm86 mode to emulate 16-bit code * versions of KVM just assumed that it would be at the end of physical * refuse to work with those older versions of KVM. */ ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR); fprintf(stderr, "kvm does not support KVM_CAP_SET_TSS_ADDR\n"); return kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, 0xfffbd000); static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs) static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs) static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs) static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set) *kvm_reg = *qemu_reg; *qemu_reg = *kvm_reg; static int kvm_getput_regs(CPUState *env, int set) struct kvm_regs regs; ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s); kvm_getput_reg(®s.rax, &env->regs[R_EAX], set); kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set); kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set); kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set); kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set); kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set); kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set); kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set); kvm_getput_reg(®s.r8, &env->regs[8], set); kvm_getput_reg(®s.r9, &env->regs[9], set); kvm_getput_reg(®s.r10, &env->regs[10], set); kvm_getput_reg(®s.r11, &env->regs[11], set); kvm_getput_reg(®s.r12, &env->regs[12], set); kvm_getput_reg(®s.r13, &env->regs[13], set); kvm_getput_reg(®s.r14, &env->regs[14], set); kvm_getput_reg(®s.r15, &env->regs[15], set); kvm_getput_reg(®s.rflags, &env->eflags, set); kvm_getput_reg(®s.rip, &env->eip, set); ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s); static int kvm_put_fpu(CPUState *env) struct kvm_fpu fpu; return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu); static int kvm_put_sregs(CPUState *env) struct kvm_sregs sregs; return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs); static void kvm_msr_entry_set(struct kvm_msr_entry *entry, static int kvm_put_msrs(CPUState *env) struct kvm_msrs info; struct kvm_msr_entry entries[100]; struct kvm_msr_entry *msrs = msr_data.entries; kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs); kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp); kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip); if (kvm_has_msr_star(env)) kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star); kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc); kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar); kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase); kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask); kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar); return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data); static int kvm_get_fpu(CPUState *env) struct kvm_fpu fpu; ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu); static int kvm_get_sregs(CPUState *env) struct kvm_sregs sregs; ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); static int kvm_get_msrs(CPUState *env) struct kvm_msrs info; struct kvm_msr_entry entries[100]; struct kvm_msr_entry *msrs = msr_data.entries; if (kvm_has_msr_star(env)) ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data); int kvm_arch_put_registers(CPUState *env) ret = kvm_getput_regs(env, 1); ret = kvm_put_fpu(env); ret = kvm_put_sregs(env); ret = kvm_put_msrs(env); ret = kvm_put_mp_state(env); ret = kvm_get_mp_state(env); int kvm_arch_get_registers(CPUState *env) ret = kvm_getput_regs(env, 0); ret = kvm_get_fpu(env); ret = kvm_get_sregs(env); ret = kvm_get_msrs(env); int kvm_arch_pre_run(CPUState *env, struct kvm_run *run) struct kvm_interrupt intr; kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr); int kvm_arch_post_run(CPUState *env, struct kvm_run *run) static int kvm_handle_halt(CPUState *env) int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run) case KVM_EXIT_HLT: ret = kvm_handle_halt(env); #ifdef KVM_CAP_SET_GUEST_DEBUG int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp) int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp) int kvm_arch_insert_hw_breakpoint(target_ulong addr, int kvm_arch_remove_hw_breakpoint(target_ulong addr, void kvm_arch_remove_all_hw_breakpoints(void) int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info) } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc)) kvm_update_guest_debug(cpu_single_env, KVM_GUESTDBG_INJECT_DB : KVM_GUESTDBG_INJECT_BP); void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg) if (kvm_sw_breakpoints_active(env)) dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; #endif /* KVM_CAP_SET_GUEST_DEBUG */ ./cpu-exec.c: if (kvm_enabled()) { kvm_cpu_exec(env); ./kvm-all.c: * QEMU KVM support /* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */ //#define DEBUG_KVM #ifdef DEBUG_KVM typedef struct KVMSlot } KVMSlot; typedef struct kvm_dirty_log KVMDirtyLog; int kvm_allowed = 0; struct KVMState KVMSlot slots[32]; #ifdef KVM_CAP_SET_GUEST_DEBUG struct kvm_sw_breakpoint_head kvm_sw_breakpoints; static KVMState *kvm_state; static KVMSlot *kvm_alloc_slot(KVMState *s) /* KVM private memory slots */ static KVMSlot *kvm_lookup_matching_slot(KVMState *s, KVMSlot *mem = &s->slots[i]; static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s, KVMSlot *found = NULL; KVMSlot *mem = &s->slots[i]; static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) struct kvm_userspace_memory_region mem; mem.flags |= KVM_MEM_LOG_DIRTY_PAGES; return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); static void kvm_reset_vcpu(void *opaque) if (kvm_arch_put_registers(env)) { fprintf(stderr, "Fatal: kvm vcpu reset failed\n"); int kvm_init_vcpu(CPUState *env) KVMState *s = kvm_state; dprintf("kvm_init_vcpu\n"); ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index); dprintf("kvm_create_vcpu failed\n"); env->kvm_fd = ret; env->kvm_state = s; mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); dprintf("KVM_GET_VCPU_MMAP_SIZE failed\n"); env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, env->kvm_fd, 0); if (env->kvm_run == MAP_FAILED) { ret = kvm_arch_init_vcpu(env); qemu_register_reset(kvm_reset_vcpu, env); ret = kvm_arch_put_registers(env); int kvm_put_mp_state(CPUState *env) struct kvm_mp_state mp_state = { .mp_state = env->mp_state }; return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state); int kvm_get_mp_state(CPUState *env) struct kvm_mp_state mp_state; ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state); static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr, KVMState *s = kvm_state; KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size); flags |= KVM_MEM_LOG_DIRTY_PAGES; return kvm_set_user_memory_region(s, mem); int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size) return kvm_dirty_pages_log_change(phys_addr, size, KVM_MEM_LOG_DIRTY_PAGES, KVM_MEM_LOG_DIRTY_PAGES); int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size) return kvm_dirty_pages_log_change(phys_addr, size, KVM_MEM_LOG_DIRTY_PAGES); int kvm_set_migration_log(int enable) KVMState *s = kvm_state; KVMSlot *mem; if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) { err = kvm_set_user_memory_region(s, mem); * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, KVMState *s = kvm_state; KVMDirtyLog d; KVMSlot *mem; mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr); if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) { int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size) #ifdef KVM_CAP_COALESCED_MMIO KVMState *s = kvm_state; struct kvm_coalesced_mmio_zone zone; ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone); int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size) #ifdef KVM_CAP_COALESCED_MMIO KVMState *s = kvm_state; struct kvm_coalesced_mmio_zone zone; ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); int kvm_check_extension(KVMState *s, unsigned int extension) ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension); int kvm_init(int smp_cpus) "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" "(see http://sourceforge.net/projects/kvm).\n"; KVMState *s; fprintf(stderr, "No SMP KVM support, use '-smp 1'\n"); s = qemu_mallocz(sizeof(KVMState)); #ifdef KVM_CAP_SET_GUEST_DEBUG TAILQ_INIT(&s->kvm_sw_breakpoints); s->fd = open("/dev/kvm", O_RDWR); fprintf(stderr, "Could not access KVM kernel module: %m\n"); ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); if (ret < KVM_API_VERSION) { fprintf(stderr, "kvm version too old\n"); if (ret > KVM_API_VERSION) { fprintf(stderr, "kvm version not supported\n"); s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0); /* initially, KVM allocated its own memory and we had to jump through * hooks to make phys_ram_base point to this. Modern versions of KVM * unmodified. Make sure we have a sufficiently modern version of KVM. if (!kvm_check_extension(s, KVM_CAP_USER_MEMORY)) { fprintf(stderr, "kvm does not support KVM_CAP_USER_MEMORY\n%s", /* There was a nasty bug in < kvm-80 that prevents memory slots from being if (!kvm_check_extension(s, KVM_CAP_DESTROY_MEMORY_REGION_WORKS)) { "KVM kernel module broken (DESTROY_MEMORY_REGION).\n%s", #ifdef KVM_CAP_COALESCED_MMIO s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); #ifdef KVM_CAP_JOIN_MEMORY_REGIONS_WORKS ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS); ret = kvm_arch_init(s, smp_cpus); kvm_state = s; static int kvm_handle_io(CPUState *env, uint16_t port, void *data, if (direction == KVM_EXIT_IO_IN) { static void kvm_run_coalesced_mmio(CPUState *env, struct kvm_run *run) #ifdef KVM_CAP_COALESCED_MMIO KVMState *s = kvm_state; struct kvm_coalesced_mmio_ring *ring; struct kvm_coalesced_mmio *ent; ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX; int kvm_cpu_exec(CPUState *env) struct kvm_run *run = env->kvm_run; dprintf("kvm_cpu_exec()\n"); kvm_arch_pre_run(env, run); ret = kvm_vcpu_ioctl(env, KVM_RUN, 0); kvm_arch_post_run(env, run); dprintf("kvm run failed %s\n", strerror(-ret)); kvm_run_coalesced_mmio(env, run); case KVM_EXIT_IO: ret = kvm_handle_io(env, run->io.port, case KVM_EXIT_MMIO: case KVM_EXIT_IRQ_WINDOW_OPEN: case KVM_EXIT_SHUTDOWN: case KVM_EXIT_UNKNOWN: dprintf("kvm_exit_unknown\n"); case KVM_EXIT_FAIL_ENTRY: dprintf("kvm_exit_fail_entry\n"); case KVM_EXIT_EXCEPTION: dprintf("kvm_exit_exception\n"); case KVM_EXIT_DEBUG: dprintf("kvm_exit_debug\n"); #ifdef KVM_CAP_SET_GUEST_DEBUG if (kvm_arch_debug(&run->debug.arch)) { #endif /* KVM_CAP_SET_GUEST_DEBUG */ dprintf("kvm_arch_handle_exit\n"); ret = kvm_arch_handle_exit(env, run); void kvm_set_phys_mem(target_phys_addr_t start_addr, KVMState *s = kvm_state; KVMSlot *mem, old; if (!kvm_lookup_overlapping_slot(s, start_addr, fprintf(stderr, "Unaligned split of a KVM memory slot\n"); /* KVM does not support read-only slots */ mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size); err = kvm_set_user_memory_region(s, mem); /* Workaround for older KVM versions: we can't join slots, even not by * - and actually require a recent KVM version. */ mem = kvm_alloc_slot(s); err = kvm_set_user_memory_region(s, mem); mem = kvm_alloc_slot(s); err = kvm_set_user_memory_region(s, mem); mem = kvm_alloc_slot(s); err = kvm_set_user_memory_region(s, mem); /* in case the KVM bug workaround already "consumed" the new slot */ /* KVM does not need to know about this memory */ mem = kvm_alloc_slot(s); err = kvm_set_user_memory_region(s, mem); int kvm_ioctl(KVMState *s, int type, ...) int kvm_vm_ioctl(KVMState *s, int type, ...) int kvm_vcpu_ioctl(CPUState *env, int type, ...) ret = ioctl(env->kvm_fd, type, arg); int kvm_has_sync_mmu(void) #ifdef KVM_CAP_SYNC_MMU KVMState *s = kvm_state; return kvm_check_extension(s, KVM_CAP_SYNC_MMU); void kvm_setup_guest_memory(void *start, size_t size) if (!kvm_has_sync_mmu()) { "Need MADV_DONTFORK in absence of synchronous KVM MMU\n"); #ifdef KVM_CAP_SET_GUEST_DEBUG struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp; TAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) { int kvm_sw_breakpoints_active(CPUState *env) return !TAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints); int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) struct kvm_guest_debug dbg; dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; kvm_arch_update_guest_debug(env, &dbg); return kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg); int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, struct kvm_sw_breakpoint *bp; bp = kvm_find_sw_breakpoint(current_env, addr); bp = qemu_malloc(sizeof(struct kvm_sw_breakpoint)); err = kvm_arch_insert_sw_breakpoint(current_env, bp); TAILQ_INSERT_HEAD(¤t_env->kvm_state->kvm_sw_breakpoints, err = kvm_arch_insert_hw_breakpoint(addr, len, type); err = kvm_update_guest_debug(env, 0); int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, struct kvm_sw_breakpoint *bp; bp = kvm_find_sw_breakpoint(current_env, addr); err = kvm_arch_remove_sw_breakpoint(current_env, bp); TAILQ_REMOVE(¤t_env->kvm_state->kvm_sw_breakpoints, bp, entry); err = kvm_arch_remove_hw_breakpoint(addr, len, type); err = kvm_update_guest_debug(env, 0); void kvm_remove_all_breakpoints(CPUState *current_env) struct kvm_sw_breakpoint *bp, *next; KVMState *s = current_env->kvm_state; TAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) { if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) kvm_arch_remove_all_hw_breakpoints(); kvm_update_guest_debug(env, 0); #else /* !KVM_CAP_SET_GUEST_DEBUG */ int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, void kvm_remove_all_breakpoints(CPUState *current_env) #endif /* !KVM_CAP_SET_GUEST_DEBUG */ ./hw/virtio-pci.c: * KVM or if kqemu gets SMP support. proxy->class_code != PCI_CLASS_OTHERS) /* qemu-kvm */ ./hw/acpi.c: if (kvm_enabled()) { /* Mark SMM as already inited (until KVM supports SMM). */ if (kvm_enabled()) { /* Mark SMM as already inited to prevent SMM from running. KVM does not ./hw/ppc440_bamboo.c: if (kvm_enabled()) kvmppc_fdt_update(fdt); /* XXX we currently depend on KVM to create some initial TLB entries. */ if (kvm_enabled()) kvmppc_init(); ./hw/vga.c: if (kvm_enabled() && s->map_addr) kvm_log_start(s->map_addr, s->map_end - s->map_addr); if (kvm_enabled() && s->lfb_vram_mapped) { kvm_log_start(isa_mem_base + 0xa0000, 0x8000); kvm_log_start(isa_mem_base + 0xa8000, 0x8000); ./hw/ppc440.c: ./hw/virtio-balloon.c: if (!kvm_enabled() || kvm_has_sync_mmu()) ./hw/apic.c: ./hw/virtio.c: * KVM or if kqemu gets SMP support. ./hw/cirrus_vga.c: ./hw/ppce500_mpc8544ds.c: ret = kvmppc_read_host_property(node, prop, &cell, sizeof(cell)); if (kvm_enabled()) { /* XXX we currently depend on KVM to create some initial TLB entries. */ if (kvm_enabled()) kvmppc_init(); ./gdbstub.c: if (kvm_enabled()) return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type); if (kvm_enabled()) return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type); if (kvm_enabled()) { kvm_remove_all_breakpoints(gdbserver_state->c_cpu); ./exec.c: if (kvm_enabled()) kvm_update_guest_debug(env, 0); if (kvm_enabled()) { return kvm_set_migration_log(enable); if (kvm_enabled()) ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr); if (kvm_enabled()) kvm_set_phys_mem(start_addr, size, phys_offset); if (kvm_enabled()) kvm_coalesce_mmio_region(addr, size); if (kvm_enabled()) kvm_uncoalesce_mmio_region(addr, size); if (kvm_enabled()) kvm_setup_guest_memory(new_block->host, size); |
