Signed-off-by: Marcelo Tosatti <mtosa...@redhat.com>

Index: kvm/virt/kvm/coalesced_mmio.c
===================================================================
--- kvm.orig/virt/kvm/coalesced_mmio.c
+++ kvm/virt/kvm/coalesced_mmio.c
@@ -26,9 +26,10 @@ static int coalesced_mmio_in_range(struc
        if (!is_write)
                return 0;
 
-       /* kvm->lock is taken by the caller and must be not released before
-         * dev.read/write
-         */
+       /*
+        * dev->lock must not be released before coalesced_mmio_write.
+        */
+       mutex_lock(&dev->lock);
 
        /* Are we able to batch it ? */
 
@@ -41,6 +42,7 @@ static int coalesced_mmio_in_range(struc
                                                        KVM_COALESCED_MMIO_MAX;
        if (next == dev->kvm->coalesced_mmio_ring->first) {
                /* full */
+               mutex_unlock(&dev->lock);
                return 0;
        }
 
@@ -57,6 +59,7 @@ static int coalesced_mmio_in_range(struc
                    addr + len <= zone->addr + zone->size)
                        return 1;
        }
+       mutex_unlock(&dev->lock);
        return 0;
 }
 
@@ -67,8 +70,6 @@ static void coalesced_mmio_write(struct 
                                (struct kvm_coalesced_mmio_dev*)this->private;
        struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
 
-       /* kvm->lock must be taken by caller before call to in_range()*/
-
        /* copy data in first free entry of the ring */
 
        ring->coalesced_mmio[ring->last].phys_addr = addr;
@@ -76,6 +77,7 @@ static void coalesced_mmio_write(struct 
        memcpy(ring->coalesced_mmio[ring->last].data, val, len);
        smp_wmb();
        ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
+       mutex_unlock(&dev->lock);
 }
 
 static void coalesced_mmio_destructor(struct kvm_io_device *this)
@@ -90,6 +92,8 @@ int kvm_coalesced_mmio_init(struct kvm *
        dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
        if (!dev)
                return -ENOMEM;
+       mutex_init(&dev->lock);
+
        dev->dev.write  = coalesced_mmio_write;
        dev->dev.in_range  = coalesced_mmio_in_range;
        dev->dev.destructor  = coalesced_mmio_destructor;
@@ -109,16 +113,16 @@ int kvm_vm_ioctl_register_coalesced_mmio
        if (dev == NULL)
                return -EINVAL;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&dev->lock);
        if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
-               mutex_unlock(&kvm->lock);
+               mutex_unlock(&dev->lock);
                return -ENOBUFS;
        }
 
        dev->zone[dev->nb_zones] = *zone;
        dev->nb_zones++;
 
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&dev->lock);
        return 0;
 }
 
@@ -132,7 +136,7 @@ int kvm_vm_ioctl_unregister_coalesced_mm
        if (dev == NULL)
                return -EINVAL;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&dev->lock);
 
        i = dev->nb_zones;
        while(i) {
@@ -150,7 +154,7 @@ int kvm_vm_ioctl_unregister_coalesced_mm
                i--;
        }
 
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&dev->lock);
 
        return 0;
 }
Index: kvm/virt/kvm/coalesced_mmio.h
===================================================================
--- kvm.orig/virt/kvm/coalesced_mmio.h
+++ kvm/virt/kvm/coalesced_mmio.h
@@ -12,6 +12,7 @@
 struct kvm_coalesced_mmio_dev {
        struct kvm_io_device dev;
        struct kvm *kvm;
+       struct mutex lock;
        int nb_zones;
        struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX];
 };

-- 

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to