On 27.01.23 13:34, Nikolay Borisov wrote:
Using the macro makes the code more explicit and somewhat easier to
comprehend.
Signed-off-by: Nikolay Borisov <nikolay.bori...@virtuozzo.com>
---
block/blk-cbt.c | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/block/blk-cbt.c b/block/blk-cbt.c
index 68c7f71ec3a0..967533a2a7a3 100644
--- a/block/blk-cbt.c
+++ b/block/blk-cbt.c
@@ -18,11 +18,12 @@
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include <linux/log2.h>
+#include <linux/math.h>
#include <asm/atomic.h>
#include <asm/uaccess.h>
#define CBT_MAX_EXTENTS 512
-#define NR_PAGES(bits) (((bits) + PAGE_SIZE*8 - 1) / (PAGE_SIZE*8))
+#define NR_PAGES(bits) DIV_ROUND_UP((bits), PAGE_SIZE*8)
#define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3))
#define CBT_PAGE_MISSED (struct page *)(0x1)
@@ -230,7 +231,7 @@ static void blk_cbt_add(struct request_queue *q, blkcnt_t
start, blkcnt_t len)
if (unlikely(test_bit(CBT_ERROR, &cbt->flags)))
goto out_rcu;
- end = (start + len + (1 << cbt->block_bits) -1) >> cbt->block_bits;
+ end = DIV_ROUND_UP(start + len, 1 << cbt->block_bits);
start >>= cbt->block_bits;
len = end - start;
if (unlikely(test_bit(CBT_NOCACHE, &cbt->flags))) {
@@ -272,7 +273,7 @@ static struct cbt_info* do_cbt_alloc(struct request_queue
*q, __u8 *uuid,
return ERR_PTR(-ENOMEM);
cbt->block_bits = ilog2(blocksize);
- cbt->block_max = (size + blocksize - 1) >> cbt->block_bits;
+ cbt->block_max = DIV_ROUND_UP(size, blocksize);
spin_lock_init(&cbt->lock);
memcpy(cbt->uuid, uuid, sizeof(cbt->uuid));
cbt->cache = alloc_percpu(struct cbt_extent);
@@ -587,7 +588,7 @@ void blk_cbt_update_size(struct block_device *bdev)
return;
}
bsz = 1 << cbt->block_bits;
- if ((new_sz + bsz - 1) >> cbt->block_bits <= cbt->block_max)
+ if (DIV_ROUND_UP(new_sz, bsz) <= cbt->block_max)
goto err_mtx;
new = do_cbt_alloc(q, cbt->uuid, new_sz, bsz);
@@ -945,7 +946,7 @@ static int cbt_ioc_set(struct block_device *bdev, struct
blk_user_cbt_info __use
struct cbt_extent ex;
ex.start = cur_ex->ce_physical >> cbt->block_bits;
- ex.len = (cur_ex->ce_length + (1 << cbt->block_bits) -1) >>
cbt->block_bits;
+ ex.len = DIV_ROUND_UP(cur_ex->ce_length, 1 << cbt->block_bits);
if (ex.start > q->cbt->block_max ||
ex.start + ex.len > q->cbt->block_max ||
ex.len == 0) {
LGTM - macro is definitely more clear. The only difference that comes
from the macro is shift vs division but none of the changes are at fast
paths.
--
Regards,
Alexander Atanasov
_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel