From: Takuya Yoshikawa <yoshikawa.tak...@oss.ntt.co.jp>

Make it clear that this is not related to virtual memory.

Remove vm_ prefix from the corresponding member of the struct kvm to
avoid kvm->vm_ redundancy alongside.

Signed-off-by: Takuya Yoshikawa <yoshikawa.tak...@oss.ntt.co.jp>
---
 Documentation/virtual/kvm/locking.txt |    2 +-
 arch/x86/include/asm/kvm_host.h       |    2 +-
 arch/x86/kvm/mmu.c                    |    4 ++--
 arch/x86/kvm/x86.c                    |    4 ++--
 include/linux/kvm_host.h              |    2 +-
 virt/kvm/kvm_main.c                   |   12 ++++++------
 6 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/Documentation/virtual/kvm/locking.txt 
b/Documentation/virtual/kvm/locking.txt
index 3b4cd3b..1a851be 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virtual/kvm/locking.txt
@@ -12,7 +12,7 @@ KVM Lock Overview
 Name:          kvm_lock
 Type:          raw_spinlock
 Arch:          any
-Protects:      - vm_list
+Protects:      - kvm_list
                - hardware virtualization enable/disable
 Comment:       'raw' because hardware enabling/disabling must be atomic /wrt
                migration.
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 020413a..186b2b0 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -105,7 +105,7 @@
 #define ASYNC_PF_PER_VCPU 64
 
 extern raw_spinlock_t kvm_lock;
-extern struct list_head vm_list;
+extern struct list_head kvm_list;
 
 struct kvm_vcpu;
 struct kvm;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2a2a9b4..590f76b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3911,7 +3911,7 @@ static int mmu_shrink(struct shrinker *shrink, struct 
shrink_control *sc)
 
        raw_spin_lock(&kvm_lock);
 
-       list_for_each_entry(kvm, &vm_list, vm_list) {
+       list_for_each_entry(kvm, &kvm_list, list) {
                int idx;
                LIST_HEAD(invalid_list);
 
@@ -3930,7 +3930,7 @@ static int mmu_shrink(struct shrinker *shrink, struct 
shrink_control *sc)
                srcu_read_unlock(&kvm->srcu, idx);
        }
        if (kvm_freed)
-               list_move_tail(&kvm_freed->vm_list, &vm_list);
+               list_move_tail(&kvm_freed->list, &kvm_list);
 
        raw_spin_unlock(&kvm_lock);
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index eeeaf2e..96f118b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4566,7 +4566,7 @@ static int kvmclock_cpufreq_notifier(struct 
notifier_block *nb, unsigned long va
        smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
 
        raw_spin_lock(&kvm_lock);
-       list_for_each_entry(kvm, &vm_list, vm_list) {
+       list_for_each_entry(kvm, &kvm_list, list) {
                kvm_for_each_vcpu(i, vcpu, kvm) {
                        if (vcpu->cpu != freq->cpu)
                                continue;
@@ -5857,7 +5857,7 @@ int kvm_arch_hardware_enable(void *garbage)
        int i;
 
        kvm_shared_msr_cpu_online();
-       list_for_each_entry(kvm, &vm_list, vm_list)
+       list_for_each_entry(kvm, &kvm_list, list)
                kvm_for_each_vcpu(i, vcpu, kvm)
                        if (vcpu->cpu == smp_processor_id())
                                kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 8c5c303..054b52e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -256,7 +256,7 @@ struct kvm {
        struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
        atomic_t online_vcpus;
        int last_boosted_vcpu;
-       struct list_head vm_list;
+       struct list_head list; /* the list of kvm instances */
        struct mutex lock;
        struct kvm_io_bus *buses[KVM_NR_BUSES];
 #ifdef CONFIG_HAVE_KVM_EVENTFD
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d8bac07..03ae960 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -70,8 +70,8 @@ MODULE_LICENSE("GPL");
  *             kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  */
 
-DEFINE_RAW_SPINLOCK(kvm_lock);
-LIST_HEAD(vm_list);
+DEFINE_RAW_SPINLOCK(kvm_lock); /* protect kvm_list */
+LIST_HEAD(kvm_list);           /* the list of kvm instances */
 
 static cpumask_var_t cpus_hardware_enabled;
 static int kvm_usage_count = 0;
@@ -498,7 +498,7 @@ static struct kvm *kvm_create_vm(void)
                goto out_err;
 
        raw_spin_lock(&kvm_lock);
-       list_add(&kvm->vm_list, &vm_list);
+       list_add(&kvm->list, &kvm_list);
        raw_spin_unlock(&kvm_lock);
 
        return kvm;
@@ -573,7 +573,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
 
        kvm_arch_sync_events(kvm);
        raw_spin_lock(&kvm_lock);
-       list_del(&kvm->vm_list);
+       list_del(&kvm->list);
        raw_spin_unlock(&kvm_lock);
        kvm_free_irq_routing(kvm);
        for (i = 0; i < KVM_NR_BUSES; i++)
@@ -2626,7 +2626,7 @@ static int vm_stat_get(void *_offset, u64 *val)
 
        *val = 0;
        raw_spin_lock(&kvm_lock);
-       list_for_each_entry(kvm, &vm_list, vm_list)
+       list_for_each_entry(kvm, &kvm_list, list)
                *val += *(u32 *)((void *)kvm + offset);
        raw_spin_unlock(&kvm_lock);
        return 0;
@@ -2643,7 +2643,7 @@ static int vcpu_stat_get(void *_offset, u64 *val)
 
        *val = 0;
        raw_spin_lock(&kvm_lock);
-       list_for_each_entry(kvm, &vm_list, vm_list)
+       list_for_each_entry(kvm, &kvm_list, list)
                kvm_for_each_vcpu(i, vcpu, kvm)
                        *val += *(u32 *)((void *)vcpu + offset);
 
-- 
1.7.5.4

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to