On Fri, Dec 26 2014, "Stefan I. Strogin" <s.stro...@partner.samsung.com> wrote:
> From: Dmitry Safonov <d.safo...@partner.samsung.com>
>
> Here are two functions that provide interface to compute/get used size
> and size of biggest free chunk in cma region.
> Added that information in cmainfo.
>
> Signed-off-by: Dmitry Safonov <d.safo...@partner.samsung.com>

Acked-by: Michal Nazarewicz <min...@mina86.com>

> ---
>  include/linux/cma.h |  2 ++
>  mm/cma.c            | 34 ++++++++++++++++++++++++++++++++++
>  2 files changed, 36 insertions(+)
>
> diff --git a/include/linux/cma.h b/include/linux/cma.h
> index 9384ba6..855e6f2 100644
> --- a/include/linux/cma.h
> +++ b/include/linux/cma.h
> @@ -18,6 +18,8 @@ struct cma;
>  extern unsigned long totalcma_pages;
>  extern phys_addr_t cma_get_base(struct cma *cma);
>  extern unsigned long cma_get_size(struct cma *cma);
> +extern unsigned long cma_get_used(struct cma *cma);
> +extern unsigned long cma_get_maxchunk(struct cma *cma);
>  
>  extern int __init cma_declare_contiguous(phys_addr_t base,
>                       phys_addr_t size, phys_addr_t limit,
> diff --git a/mm/cma.c b/mm/cma.c
> index ffaea26..5e560ed 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -78,6 +78,36 @@ unsigned long cma_get_size(struct cma *cma)
>       return cma->count << PAGE_SHIFT;
>  }
>  
> +unsigned long cma_get_used(struct cma *cma)
> +{
> +     unsigned long ret = 0;
> +
> +     mutex_lock(&cma->lock);
> +     /* pages counter is smaller than sizeof(int) */
> +     ret = bitmap_weight(cma->bitmap, (int)cma->count);
> +     mutex_unlock(&cma->lock);
> +
> +     return ret << (PAGE_SHIFT + cma->order_per_bit);
> +}
> +
> +unsigned long cma_get_maxchunk(struct cma *cma)
> +{
> +     unsigned long maxchunk = 0;
> +     unsigned long start, end = 0;
> +
> +     mutex_lock(&cma->lock);
> +     for (;;) {
> +             start = find_next_zero_bit(cma->bitmap, cma->count, end);
> +             if (start >= cma->count)
> +                     break;
> +             end = find_next_bit(cma->bitmap, cma->count, start);
> +             maxchunk = max(end - start, maxchunk);
> +     }
> +     mutex_unlock(&cma->lock);
> +
> +     return maxchunk << (PAGE_SHIFT + cma->order_per_bit);
> +}
> +
>  static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int 
> align_order)
>  {
>       if (align_order <= cma->order_per_bit)
> @@ -591,6 +621,10 @@ static int s_show(struct seq_file *m, void *p)
>       struct cma_buffer *cmabuf;
>       struct stack_trace trace;
>  
> +     seq_printf(m, "CMARegion stat: %8lu kB total, %8lu kB used, %8lu kB max 
> contiguous chunk\n\n",
> +                cma_get_size(cma) >> 10,
> +                cma_get_used(cma) >> 10,
> +                cma_get_maxchunk(cma) >> 10);
>       mutex_lock(&cma->list_lock);
>  
>       list_for_each_entry(cmabuf, &cma->buffers_list, list) {
> -- 
> 2.1.0
>

-- 
Best regards,                                         _     _
.o. | Liege of Serenely Enlightened Majesty of      o' \,=./ `o
..o | Computer Science,  Michał “mina86” Nazarewicz    (o o)
ooo +--<m...@google.com>--<xmpp:min...@jabber.org>--ooO--(_)--Ooo--
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to