Hi Huacai,

[auto build test WARNING on mmotm/master]
[also build test WARNING on v4.13 next-20170908]
[if your patch is applied to the wrong git tree, please drop us a note to help 
improve the system]

url:    
https://github.com/0day-ci/linux/commits/Huacai-Chen/mm-dmapool-Align-to-ARCH_DMA_MINALIGN-in-non-coherent-DMA-mode/20170909-230504
base:   git://git.cmpxchg.org/linux-mmotm.git master
config: i386-randconfig-x000-201736 (attached as .config)
compiler: gcc-6 (Debian 6.2.0-3) 6.2.0 20160901
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

All warnings (new ones prefixed by >>):

   In file included from include/linux/ioport.h:12:0,
                    from include/linux/device.h:16,
                    from mm/dmapool.c:25:
   mm/dmapool.c: In function 'dma_pool_create':
   mm/dmapool.c:143:7: error: implicit declaration of function 
'plat_device_is_coherent' [-Werror=implicit-function-declaration]
     if (!plat_device_is_coherent(dev))
          ^
   include/linux/compiler.h:156:30: note: in definition of macro '__trace_if'
     if (__builtin_constant_p(!!(cond)) ? !!(cond) :   \
                                 ^~~~
>> mm/dmapool.c:143:2: note: in expansion of macro 'if'
     if (!plat_device_is_coherent(dev))
     ^~
   mm/dmapool.c: At top level:
   include/linux/compiler.h:162:4: warning: '______f' is static but declared in 
inline function 'memcpy_and_pad' which is not static
       ______f = {     \
       ^
   include/linux/compiler.h:154:23: note: in expansion of macro '__trace_if'
    #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
                          ^~~~~~~~~~
   include/linux/string.h:451:2: note: in expansion of macro 'if'
     if (dest_len > count) {
     ^~
   include/linux/compiler.h:162:4: warning: '______f' is static but declared in 
inline function 'memcpy_and_pad' which is not static
       ______f = {     \
       ^
   include/linux/compiler.h:154:23: note: in expansion of macro '__trace_if'
    #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
                          ^~~~~~~~~~
   include/linux/string.h:449:2: note: in expansion of macro 'if'
     if (dest_size < dest_len)
     ^~
   include/linux/compiler.h:162:4: warning: '______f' is static but declared in 
inline function 'memcpy_and_pad' which is not static
       ______f = {     \
       ^
   include/linux/compiler.h:154:23: note: in expansion of macro '__trace_if'
    #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
                          ^~~~~~~~~~
   include/linux/string.h:446:8: note: in expansion of macro 'if'
      else if (src_size < dest_len && src_size < count)
           ^~
   include/linux/compiler.h:162:4: warning: '______f' is static but declared in 
inline function 'memcpy_and_pad' which is not static
       ______f = {     \
       ^
   include/linux/compiler.h:154:23: note: in expansion of macro '__trace_if'
    #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
                          ^~~~~~~~~~
   include/linux/string.h:444:3: note: in expansion of macro 'if'
      if (dest_size < dest_len && dest_size < count)
      ^~
   include/linux/compiler.h:162:4: warning: '______f' is static but declared in 
inline function 'memcpy_and_pad' which is not static
       ______f = {     \
       ^
   include/linux/compiler.h:154:23: note: in expansion of macro '__trace_if'
    #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
                          ^~~~~~~~~~
   include/linux/string.h:443:2: note: in expansion of macro 'if'
     if (__builtin_constant_p(dest_len) && __builtin_constant_p(count)) {
     ^~
   include/linux/compiler.h:162:4: warning: '______f' is static but declared in 
inline function 'strcpy' which is not static
       ______f = {     \
       ^
   include/linux/compiler.h:154:23: note: in expansion of macro '__trace_if'
    #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
                          ^~~~~~~~~~
   include/linux/string.h:421:2: note: in expansion of macro 'if'
     if (p_size == (size_t)-1 && q_size == (size_t)-1)
     ^~
   include/linux/compiler.h:162:4: warning: '______f' is static but declared in 
inline function 'kmemdup' which is not static
       ______f = {     \
       ^
   include/linux/compiler.h:154:23: note: in expansion of macro '__trace_if'
    #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
                          ^~~~~~~~~~
   include/linux/string.h:411:2: note: in expansion of macro 'if'
     if (p_size < size)
     ^~
   include/linux/compiler.h:162:4: warning: '______f' is static but declared in 
inline function 'kmemdup' which is not static
       ______f = {     \
       ^
   include/linux/compiler.h:154:23: note: in expansion of macro '__trace_if'
    #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
                          ^~~~~~~~~~
   include/linux/string.h:409:2: note: in expansion of macro 'if'
     if (__builtin_constant_p(size) && p_size < size)
     ^~
   include/linux/compiler.h:162:4: warning: '______f' is static but declared in 
inline function 'memchr_inv' which is not static
       ______f = {     \
       ^
   include/linux/compiler.h:154:23: note: in expansion of macro '__trace_if'
    #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
                          ^~~~~~~~~~
   include/linux/string.h:400:2: note: in expansion of macro 'if'
     if (p_size < size)
     ^~
   include/linux/compiler.h:162:4: warning: '______f' is static but declared in 
inline function 'memchr_inv' which is not static
       ______f = {     \
       ^
   include/linux/compiler.h:154:23: note: in expansion of macro '__trace_if'
    #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
                          ^~~~~~~~~~
   include/linux/string.h:398:2: note: in expansion of macro 'if'
     if (__builtin_constant_p(size) && p_size < size)
     ^~
   include/linux/compiler.h:162:4: warning: '______f' is static but declared in 
inline function 'memchr' which is not static
       ______f = {     \
       ^
   include/linux/compiler.h:154:23: note: in expansion of macro '__trace_if'
    #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
                          ^~~~~~~~~~
   include/linux/string.h:389:2: note: in expansion of macro 'if'

vim +/if +143 mm/dmapool.c

  > 25  #include <linux/device.h>
    26  #include <linux/dma-mapping.h>
    27  #include <linux/dmapool.h>
    28  #include <linux/kernel.h>
    29  #include <linux/list.h>
    30  #include <linux/export.h>
    31  #include <linux/mutex.h>
    32  #include <linux/poison.h>
    33  #include <linux/sched.h>
    34  #include <linux/slab.h>
    35  #include <linux/stat.h>
    36  #include <linux/spinlock.h>
    37  #include <linux/string.h>
    38  #include <linux/types.h>
    39  #include <linux/wait.h>
    40  
    41  #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
    42  #define DMAPOOL_DEBUG 1
    43  #endif
    44  
    45  struct dma_pool {               /* the pool */
    46          struct list_head page_list;
    47          spinlock_t lock;
    48          size_t size;
    49          struct device *dev;
    50          size_t allocation;
    51          size_t boundary;
    52          char name[32];
    53          struct list_head pools;
    54  };
    55  
    56  struct dma_page {               /* cacheable header for 'allocation' 
bytes */
    57          struct list_head page_list;
    58          void *vaddr;
    59          dma_addr_t dma;
    60          unsigned int in_use;
    61          unsigned int offset;
    62  };
    63  
    64  static DEFINE_MUTEX(pools_lock);
    65  static DEFINE_MUTEX(pools_reg_lock);
    66  
    67  static ssize_t
    68  show_pools(struct device *dev, struct device_attribute *attr, char *buf)
    69  {
    70          unsigned temp;
    71          unsigned size;
    72          char *next;
    73          struct dma_page *page;
    74          struct dma_pool *pool;
    75  
    76          next = buf;
    77          size = PAGE_SIZE;
    78  
    79          temp = scnprintf(next, size, "poolinfo - 0.1\n");
    80          size -= temp;
    81          next += temp;
    82  
    83          mutex_lock(&pools_lock);
    84          list_for_each_entry(pool, &dev->dma_pools, pools) {
    85                  unsigned pages = 0;
    86                  unsigned blocks = 0;
    87  
    88                  spin_lock_irq(&pool->lock);
    89                  list_for_each_entry(page, &pool->page_list, page_list) {
    90                          pages++;
    91                          blocks += page->in_use;
    92                  }
    93                  spin_unlock_irq(&pool->lock);
    94  
    95                  /* per-pool info, no real statistics yet */
    96                  temp = scnprintf(next, size, "%-16s %4u %4zu %4zu 
%2u\n",
    97                                   pool->name, blocks,
    98                                   pages * (pool->allocation / 
pool->size),
    99                                   pool->size, pages);
   100                  size -= temp;
   101                  next += temp;
   102          }
   103          mutex_unlock(&pools_lock);
   104  
   105          return PAGE_SIZE - size;
   106  }
   107  
   108  static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
   109  
   110  /**
   111   * dma_pool_create - Creates a pool of consistent memory blocks, for 
dma.
   112   * @name: name of pool, for diagnostics
   113   * @dev: device that will be doing the DMA
   114   * @size: size of the blocks in this pool.
   115   * @align: alignment requirement for blocks; must be a power of two
   116   * @boundary: returned blocks won't cross this power of two boundary
   117   * Context: !in_interrupt()
   118   *
   119   * Returns a dma allocation pool with the requested characteristics, or
   120   * null if one can't be created.  Given one of these pools, 
dma_pool_alloc()
   121   * may be used to allocate memory.  Such memory will all have 
"consistent"
   122   * DMA mappings, accessible by the device and its driver without using
   123   * cache flushing primitives.  The actual size of blocks allocated may 
be
   124   * larger than requested because of alignment.
   125   *
   126   * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
   127   * cross that size boundary.  This is useful for devices which have
   128   * addressing restrictions on individual DMA transfers, such as not 
crossing
   129   * boundaries of 4KBytes.
   130   */
   131  struct dma_pool *dma_pool_create(const char *name, struct device *dev,
   132                                   size_t size, size_t align, size_t 
boundary)
   133  {
   134          struct dma_pool *retval;
   135          size_t allocation;
   136          bool empty = false;
   137  
   138          if (align == 0)
   139                  align = 1;
   140          else if (align & (align - 1))
   141                  return NULL;
   142  
 > 143          if (!plat_device_is_coherent(dev))
   144                  align = max_t(size_t, align, dma_get_cache_alignment());
   145  
   146          if (size == 0)
   147                  return NULL;
   148          else if (size < 4)
   149                  size = 4;
   150  
   151          if ((size % align) != 0)
   152                  size = ALIGN(size, align);
   153  
   154          allocation = max_t(size_t, size, PAGE_SIZE);
   155  
   156          if (!boundary)
   157                  boundary = allocation;
   158          else if ((boundary < size) || (boundary & (boundary - 1)))
   159                  return NULL;
   160  
   161          retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, 
dev_to_node(dev));
   162          if (!retval)
   163                  return retval;
   164  
   165          strlcpy(retval->name, name, sizeof(retval->name));
   166  
   167          retval->dev = dev;
   168  
   169          INIT_LIST_HEAD(&retval->page_list);
   170          spin_lock_init(&retval->lock);
   171          retval->size = size;
   172          retval->boundary = boundary;
   173          retval->allocation = allocation;
   174  
   175          INIT_LIST_HEAD(&retval->pools);
   176  
   177          /*
   178           * pools_lock ensures that the ->dma_pools list does not get 
corrupted.
   179           * pools_reg_lock ensures that there is not a race between
   180           * dma_pool_create() and dma_pool_destroy() or within 
dma_pool_create()
   181           * when the first invocation of dma_pool_create() failed on
   182           * device_create_file() and the second assumes that it has been 
done (I
   183           * know it is a short window).
   184           */
   185          mutex_lock(&pools_reg_lock);
   186          mutex_lock(&pools_lock);
   187          if (list_empty(&dev->dma_pools))
   188                  empty = true;
   189          list_add(&retval->pools, &dev->dma_pools);
   190          mutex_unlock(&pools_lock);
   191          if (empty) {
   192                  int err;
   193  
   194                  err = device_create_file(dev, &dev_attr_pools);
   195                  if (err) {
   196                          mutex_lock(&pools_lock);
   197                          list_del(&retval->pools);
   198                          mutex_unlock(&pools_lock);
   199                          mutex_unlock(&pools_reg_lock);
   200                          kfree(retval);
   201                          return NULL;
   202                  }
   203          }
   204          mutex_unlock(&pools_reg_lock);
   205          return retval;
   206  }
   207  EXPORT_SYMBOL(dma_pool_create);
   208  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip

Reply via email to