Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Paolo Bonzini:
"ARM:
- Move the arch-specific code into arch/arm64/kvm
- Start the post-32bit cleanup
- Cherry-pick a few non-invasive pre-NV patches
x86:
- Rework of TLB flushing
- Rework of event injection, especially with respect to nested
virtualization
- Nested AMD event injection facelift, building on the rework of
generic code and fixing a lot of corner cases
- Nested AMD live migration support
- Optimization for TSC deadline MSR writes and IPIs
- Various cleanups
- Asynchronous page fault cleanups (from tglx, common topic branch
with tip tree)
- Interrupt-based delivery of asynchronous "page ready" events (host
side)
- Hyper-V MSRs and hypercalls for guest debugging
- VMX preemption timer fixes
s390:
- Cleanups
Generic:
- switch vCPU thread wakeup from swait to rcuwait
The other architectures, and the guest side of the asynchronous page
fault work, will come next week"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (256 commits)
KVM: selftests: fix rdtsc() for vmx_tsc_adjust_test
KVM: check userspace_addr for all memslots
KVM: selftests: update hyperv_cpuid with SynDBG tests
x86/kvm/hyper-v: Add support for synthetic debugger via hypercalls
x86/kvm/hyper-v: enable hypercalls regardless of hypercall page
x86/kvm/hyper-v: Add support for synthetic debugger interface
x86/hyper-v: Add synthetic debugger definitions
KVM: selftests: VMX preemption timer migration test
KVM: nVMX: Fix VMX preemption timer migration
x86/kvm/hyper-v: Explicitly align hcall param for kvm_hyperv_exit
KVM: x86/pmu: Support full width counting
KVM: x86/pmu: Tweak kvm_pmu_get_msr to pass 'struct msr_data' in
KVM: x86: announce KVM_FEATURE_ASYNC_PF_INT
KVM: x86: acknowledgment mechanism for async pf page ready notifications
KVM: x86: interrupt based APF 'page ready' event delivery
KVM: introduce kvm_read_guest_offset_cached()
KVM: rename kvm_arch_can_inject_async_page_present() to kvm_arch_can_dequeue_async_page_present()
KVM: x86: extend struct kvm_vcpu_pv_apf_data with token info
Revert "KVM: async_pf: Fix #DF due to inject "Page not Present" and "Page Ready" exceptions simultaneously"
KVM: VMX: Replace zero-length array with flexible-array
...
This commit is contained in:
@@ -46,6 +46,9 @@
|
||||
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
|
||||
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
|
||||
|
||||
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
|
||||
KVM_DIRTY_LOG_INITIALLY_SET)
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||
|
||||
extern unsigned int kvm_sve_max_vl;
|
||||
@@ -112,12 +115,8 @@ struct kvm_vcpu_fault_info {
|
||||
u64 disr_el1; /* Deferred [SError] Status Register */
|
||||
};
|
||||
|
||||
/*
|
||||
* 0 is reserved as an invalid value.
|
||||
* Order should be kept in sync with the save/restore code.
|
||||
*/
|
||||
enum vcpu_sysreg {
|
||||
__INVALID_SYSREG__,
|
||||
__INVALID_SYSREG__, /* 0 is reserved as an invalid value */
|
||||
MPIDR_EL1, /* MultiProcessor Affinity Register */
|
||||
CSSELR_EL1, /* Cache Size Selection Register */
|
||||
SCTLR_EL1, /* System Control Register */
|
||||
@@ -415,6 +414,8 @@ struct kvm_vm_stat {
|
||||
struct kvm_vcpu_stat {
|
||||
u64 halt_successful_poll;
|
||||
u64 halt_attempted_poll;
|
||||
u64 halt_poll_success_ns;
|
||||
u64 halt_poll_fail_ns;
|
||||
u64 halt_poll_invalid;
|
||||
u64 halt_wakeup;
|
||||
u64 hvc_exit_stat;
|
||||
@@ -530,39 +531,6 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
|
||||
cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr();
|
||||
}
|
||||
|
||||
void __kvm_enable_ssbs(void);
|
||||
|
||||
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
||||
unsigned long hyp_stack_ptr,
|
||||
unsigned long vector_ptr)
|
||||
{
|
||||
/*
|
||||
* Calculate the raw per-cpu offset without a translation from the
|
||||
* kernel's mapping to the linear mapping, and store it in tpidr_el2
|
||||
* so that we can use adr_l to access per-cpu variables in EL2.
|
||||
*/
|
||||
u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) -
|
||||
(u64)kvm_ksym_ref(kvm_host_data));
|
||||
|
||||
/*
|
||||
* Call initialization code, and switch to the full blown HYP code.
|
||||
* If the cpucaps haven't been finalized yet, something has gone very
|
||||
* wrong, and hyp will crash and burn when it uses any
|
||||
* cpus_have_const_cap() wrapper.
|
||||
*/
|
||||
BUG_ON(!system_capabilities_finalized());
|
||||
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
|
||||
|
||||
/*
|
||||
* Disabling SSBD on a non-VHE system requires us to enable SSBS
|
||||
* at EL2.
|
||||
*/
|
||||
if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
|
||||
arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
|
||||
kvm_call_hyp(__kvm_enable_ssbs);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool kvm_arch_requires_vhe(void)
|
||||
{
|
||||
/*
|
||||
@@ -594,8 +562,6 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
|
||||
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr);
|
||||
|
||||
static inline void __cpu_init_stage2(void) {}
|
||||
|
||||
/* Guest/host FPSIMD coordination helpers */
|
||||
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
|
||||
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
|
||||
|
||||
Reference in New Issue
Block a user