]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
x86: kvm: introduce periodic global clock updates
authorAndrew Jones <drjones@redhat.com>
Fri, 28 Feb 2014 11:52:55 +0000 (12:52 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 4 Mar 2014 10:50:54 +0000 (11:50 +0100)
commit 0061d53daf26f introduced a mechanism to execute a global clock
update for a vm. We can apply this periodically in order to propagate
host NTP corrections. Also, if all vcpus of a vm are pinned, then
without an additional trigger, no guest NTP corrections can propagate
either, as the current trigger is only vcpu cpu migration.

Signed-off-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c

index 9aa09d330a4b57c1ee2dcf19461cf0e5c69e35a8..85be627ef5de2be9e494881009692a92a14c4e52 100644 (file)
@@ -599,6 +599,7 @@ struct kvm_arch {
        u64 master_kernel_ns;
        cycle_t master_cycle_now;
        struct delayed_work kvmclock_update_work;
+       struct delayed_work kvmclock_sync_work;
 
        struct kvm_xen_hvm_config xen_hvm_config;
 
index 5ed9293a696d26b20559a601731ee1e093748980..1e91a246e99621830bb2978d43a252a64937a10a 100644 (file)
@@ -1660,6 +1660,20 @@ static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
                                        KVMCLOCK_UPDATE_DELAY);
 }
 
+#define KVMCLOCK_SYNC_PERIOD (300 * HZ)
+
+static void kvmclock_sync_fn(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
+                                          kvmclock_sync_work);
+       struct kvm *kvm = container_of(ka, struct kvm, arch);
+
+       schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
+       schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
+                                       KVMCLOCK_SYNC_PERIOD);
+}
+
 static bool msr_mtrr_valid(unsigned msr)
 {
        switch (msr) {
@@ -6736,6 +6750,7 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 {
        int r;
        struct msr_data msr;
+       struct kvm *kvm = vcpu->kvm;
 
        r = vcpu_load(vcpu);
        if (r)
@@ -6746,6 +6761,9 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
        kvm_write_tsc(vcpu, &msr);
        vcpu_put(vcpu);
 
+       schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
+                                       KVMCLOCK_SYNC_PERIOD);
+
        return r;
 }
 
@@ -7039,6 +7057,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        pvclock_update_vm_gtod_copy(kvm);
 
        INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
+       INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
 
        return 0;
 }
@@ -7077,6 +7096,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
 
 void kvm_arch_sync_events(struct kvm *kvm)
 {
+       cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
        cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
        kvm_free_all_assigned_devices(kvm);
        kvm_free_pit(kvm);