If we are in a place where it is known that interrupts are enabled,
functions spin_lock_irq/spin_unlock_irq should be used instead of
spin_lock_irqsave/spin_unlock_irqrestore.

spin_lock_irq and spin_unlock_irq are faster because the don't need to
push and pop the flags register.

Signed-off-by: Mikulas Patocka <[email protected]>

---
 drivers/md/dm-cache-target.c |   99 ++++++++++++-------------------------------
 1 file changed, 28 insertions(+), 71 deletions(-)

Index: linux-2.6/drivers/md/dm-cache-target.c
===================================================================
--- linux-2.6.orig/drivers/md/dm-cache-target.c 2019-10-16 17:05:31.000000000 
+0200
+++ linux-2.6/drivers/md/dm-cache-target.c      2019-10-16 17:09:35.000000000 
+0200
@@ -74,22 +74,19 @@ static bool __iot_idle_for(struct io_tra
 static bool iot_idle_for(struct io_tracker *iot, unsigned long jifs)
 {
        bool r;
-       unsigned long flags;
 
-       spin_lock_irqsave(&iot->lock, flags);
+       spin_lock_irq(&iot->lock);
        r = __iot_idle_for(iot, jifs);
-       spin_unlock_irqrestore(&iot->lock, flags);
+       spin_unlock_irq(&iot->lock);
 
        return r;
 }
 
 static void iot_io_begin(struct io_tracker *iot, sector_t len)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&iot->lock, flags);
+       spin_lock_irq(&iot->lock);
        iot->in_flight += len;
-       spin_unlock_irqrestore(&iot->lock, flags);
+       spin_unlock_irq(&iot->lock);
 }
 
 static void __iot_io_end(struct io_tracker *iot, sector_t len)
@@ -172,7 +169,6 @@ static void __commit(struct work_struct
 {
        struct batcher *b = container_of(_ws, struct batcher, commit_work);
        blk_status_t r;
-       unsigned long flags;
        struct list_head work_items;
        struct work_struct *ws, *tmp;
        struct continuation *k;
@@ -186,12 +182,12 @@ static void __commit(struct work_struct
         * We have to grab these before the commit_op to avoid a race
         * condition.
         */
-       spin_lock_irqsave(&b->lock, flags);
+       spin_lock_irq(&b->lock);
        list_splice_init(&b->work_items, &work_items);
        bio_list_merge(&bios, &b->bios);
        bio_list_init(&b->bios);
        b->commit_scheduled = false;
-       spin_unlock_irqrestore(&b->lock, flags);
+       spin_unlock_irq(&b->lock);
 
        r = b->commit_op(b->commit_context);
 
@@ -238,13 +234,12 @@ static void async_commit(struct batcher
 
 static void continue_after_commit(struct batcher *b, struct continuation *k)
 {
-       unsigned long flags;
        bool commit_scheduled;
 
-       spin_lock_irqsave(&b->lock, flags);
+       spin_lock_irq(&b->lock);
        commit_scheduled = b->commit_scheduled;
        list_add_tail(&k->ws.entry, &b->work_items);
-       spin_unlock_irqrestore(&b->lock, flags);
+       spin_unlock_irq(&b->lock);
 
        if (commit_scheduled)
                async_commit(b);
@@ -255,13 +250,12 @@ static void continue_after_commit(struct
  */
 static void issue_after_commit(struct batcher *b, struct bio *bio)
 {
-       unsigned long flags;
        bool commit_scheduled;
 
-       spin_lock_irqsave(&b->lock, flags);
+       spin_lock_irq(&b->lock);
        commit_scheduled = b->commit_scheduled;
        bio_list_add(&b->bios, bio);
-       spin_unlock_irqrestore(&b->lock, flags);
+       spin_unlock_irq(&b->lock);
 
        if (commit_scheduled)
               async_commit(b);
@@ -273,12 +267,11 @@ static void issue_after_commit(struct ba
 static void schedule_commit(struct batcher *b)
 {
        bool immediate;
-       unsigned long flags;
 
-       spin_lock_irqsave(&b->lock, flags);
+       spin_lock_irq(&b->lock);
        immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios);
        b->commit_scheduled = true;
-       spin_unlock_irqrestore(&b->lock, flags);
+       spin_unlock_irq(&b->lock);
 
        if (immediate)
                async_commit(b);
@@ -630,23 +623,19 @@ static struct per_bio_data *init_per_bio
 
 static void defer_bio(struct cache *cache, struct bio *bio)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&cache->lock, flags);
+       spin_lock_irq(&cache->lock);
        bio_list_add(&cache->deferred_bios, bio);
-       spin_unlock_irqrestore(&cache->lock, flags);
+       spin_unlock_irq(&cache->lock);
 
        wake_deferred_bio_worker(cache);
 }
 
 static void defer_bios(struct cache *cache, struct bio_list *bios)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&cache->lock, flags);
+       spin_lock_irq(&cache->lock);
        bio_list_merge(&cache->deferred_bios, bios);
        bio_list_init(bios);
-       spin_unlock_irqrestore(&cache->lock, flags);
+       spin_unlock_irq(&cache->lock);
 
        wake_deferred_bio_worker(cache);
 }
@@ -662,10 +651,6 @@ static bool bio_detain_shared(struct cac
        struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
 
        cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if 
calling from worker */
-       if (!cell_prealloc) {
-               defer_bio(cache, bio);
-               return false;
-       }
 
        build_key(oblock, end, &key);
        r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, 
cell_prealloc, &cell);
@@ -760,33 +745,27 @@ static dm_dblock_t oblock_to_dblock(stru
 
 static void set_discard(struct cache *cache, dm_dblock_t b)
 {
-       unsigned long flags;
-
        BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
        atomic_inc(&cache->stats.discard_count);
 
-       spin_lock_irqsave(&cache->lock, flags);
+       spin_lock_irq(&cache->lock);
        set_bit(from_dblock(b), cache->discard_bitset);
-       spin_unlock_irqrestore(&cache->lock, flags);
+       spin_unlock_irq(&cache->lock);
 }
 
 static void clear_discard(struct cache *cache, dm_dblock_t b)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&cache->lock, flags);
+       spin_lock_irq(&cache->lock);
        clear_bit(from_dblock(b), cache->discard_bitset);
-       spin_unlock_irqrestore(&cache->lock, flags);
+       spin_unlock_irq(&cache->lock);
 }
 
 static bool is_discarded(struct cache *cache, dm_dblock_t b)
 {
        int r;
-       unsigned long flags;
-
-       spin_lock_irqsave(&cache->lock, flags);
+       spin_lock_irq(&cache->lock);
        r = test_bit(from_dblock(b), cache->discard_bitset);
-       spin_unlock_irqrestore(&cache->lock, flags);
+       spin_unlock_irq(&cache->lock);
 
        return r;
 }
@@ -794,12 +773,10 @@ static bool is_discarded(struct cache *c
 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
 {
        int r;
-       unsigned long flags;
-
-       spin_lock_irqsave(&cache->lock, flags);
+       spin_lock_irq(&cache->lock);
        r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
                     cache->discard_bitset);
-       spin_unlock_irqrestore(&cache->lock, flags);
+       spin_unlock_irq(&cache->lock);
 
        return r;
 }
@@ -831,17 +808,16 @@ static void remap_to_cache(struct cache
 
 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
 {
-       unsigned long flags;
        struct per_bio_data *pb;
 
-       spin_lock_irqsave(&cache->lock, flags);
+       spin_lock_irq(&cache->lock);
        if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
            bio_op(bio) != REQ_OP_DISCARD) {
                pb = get_per_bio_data(bio);
                pb->tick = true;
                cache->need_tick_bio = false;
        }
-       spin_unlock_irqrestore(&cache->lock, flags);
+       spin_unlock_irq(&cache->lock);
 }
 
 static void __remap_to_origin_clear_discard(struct cache *cache, struct bio 
*bio,
@@ -1491,11 +1467,6 @@ static int mg_lock_writes(struct dm_cach
        struct dm_bio_prison_cell_v2 *prealloc;
 
        prealloc = alloc_prison_cell(cache);
-       if (!prealloc) {
-               DMERR_LIMIT("%s: alloc_prison_cell failed", 
cache_device_name(cache));
-               mg_complete(mg, false);
-               return -ENOMEM;
-       }
 
        /*
         * Prevent writes to the block, but allow reads to continue.
@@ -1533,11 +1504,6 @@ static int mg_start(struct cache *cache,
        }
 
        mg = alloc_migration(cache);
-       if (!mg) {
-               policy_complete_background_work(cache->policy, op, false);
-               background_work_end(cache);
-               return -ENOMEM;
-       }
 
        mg->op = op;
        mg->overwrite_bio = bio;
@@ -1626,10 +1592,6 @@ static int invalidate_lock(struct dm_cac
        struct dm_bio_prison_cell_v2 *prealloc;
 
        prealloc = alloc_prison_cell(cache);
-       if (!prealloc) {
-               invalidate_complete(mg, false);
-               return -ENOMEM;
-       }
 
        build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), 
&key);
        r = dm_cell_lock_v2(cache->prison, &key,
@@ -1667,10 +1629,6 @@ static int invalidate_start(struct cache
                return -EPERM;
 
        mg = alloc_migration(cache);
-       if (!mg) {
-               background_work_end(cache);
-               return -ENOMEM;
-       }
 
        mg->overwrite_bio = bio;
        mg->invalidate_cblock = cblock;
@@ -1911,17 +1869,16 @@ static void process_deferred_bios(struct
 {
        struct cache *cache = container_of(ws, struct cache, 
deferred_bio_worker);
 
-       unsigned long flags;
        bool commit_needed = false;
        struct bio_list bios;
        struct bio *bio;
 
        bio_list_init(&bios);
 
-       spin_lock_irqsave(&cache->lock, flags);
+       spin_lock_irq(&cache->lock);
        bio_list_merge(&bios, &cache->deferred_bios);
        bio_list_init(&cache->deferred_bios);
-       spin_unlock_irqrestore(&cache->lock, flags);
+       spin_unlock_irq(&cache->lock);
 
        while ((bio = bio_list_pop(&bios))) {
                if (bio->bi_opf & REQ_PREFLUSH)

--
dm-devel mailing list
[email protected]
https://www.redhat.com/mailman/listinfo/dm-devel

Reply via email to