> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index bcfc288dba3f..b6a7457d8581 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -21,15 +21,20 @@
>  
>  #include "../internal.h"
>  
> +#define DIRTY_BITS(x)        ((x) + PAGE_SIZE / SECTOR_SIZE)
>  /*

Nit: please keep an empty line between a definition and a comment.

> +      * The first half bits are used to track sub-page uptodate status,
> +      * the second half bits are for dirty status.
> +      */
> +     DECLARE_BITMAP(state, PAGE_SIZE * 2 / SECTOR_SIZE);
>  };
>  
>  static inline struct iomap_page *to_iomap_page(struct page *page)
> @@ -52,8 +57,8 @@ iomap_page_create(struct inode *inode, struct page *page)
>       iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
>       atomic_set(&iop->read_count, 0);
>       atomic_set(&iop->write_count, 0);
> -     spin_lock_init(&iop->uptodate_lock);
> -     bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
> +     spin_lock_init(&iop->state_lock);
> +     bitmap_zero(iop->state, PAGE_SIZE * 2 / SECTOR_SIZE);

Maybe add a

#define IOMAP_STATE_ARRAY_SIZE  (PAGE_SIZE * 2 / SECTOR_SIZE)

and use?  That isn't much shorter, but a little easier to read at least.

> +     spin_lock_irqsave(&iop->state_lock, flags);
> +     for (i = first; i <= last; i++)
> +             set_bit(i, iop->state);

I think Matthew had some patches to move the these days pointlessly
atomic bitops to use the bitmap_* routines.  It might make sense to
start out that way for new code as well.

> +
> +     if (last >= first)
> +             iomap_set_page_dirty(page);
> +
> +     spin_unlock_irqrestore(&iop->state_lock, flags);

As aready pointed out, this probably needs to move out of the lock.

> +static void
> +iomap_set_range_dirty(struct page *page, unsigned int off,
> +             unsigned int len)
> +{
> +     if (PageError(page))
> +             return;
> +
> +     if (page_has_private(page))
> +             iomap_iop_set_range_dirty(page, off, len);

I'd be tempted to merge this function and iomap_iop_set_range_dirty,
and just return early if there is an error or no iomap_page structure,
relying on the fact that to_iomap_page returns NULL for that case.

> +static void
> +iomap_iop_clear_range_dirty(struct page *page, unsigned int off,
> +             unsigned int len)
> +{
> +     struct iomap_page *iop = to_iomap_page(page);
> +     struct inode *inode = page->mapping->host;
> +     unsigned int first = DIRTY_BITS(off >> inode->i_blkbits);
> +     unsigned int last = DIRTY_BITS((off + len - 1) >> inode->i_blkbits);
> +     unsigned long flags;
> +     unsigned int i;
> +
> +     spin_lock_irqsave(&iop->state_lock, flags);
> +     for (i = first; i <= last; i++)
> +             clear_bit(i, iop->state);

should probably use bitmap_clear().

> +static void
> +iomap_clear_range_dirty(struct page *page, unsigned int off,
> +             unsigned int len)
> +{
> +     if (PageError(page))
> +             return;
> +
> +     if (page_has_private(page))
> +             iomap_iop_clear_range_dirty(page, off, len);
> +}

Same comment about merging the two functions as above.

Reply via email to