Hi Amit,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on robh/for-next]
[also build test WARNING on clk/clk-next pza/reset/next linus/master v5.7 
next-20200605]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]

url:    
https://github.com/0day-ci/linux/commits/Amit-Singh-Tomar/Add-MMC-and-DMA-support-for-Actions-S700/20200603-013935
base:   https://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git for-next
config: arm64-randconfig-r024-20200605 (attached as .config)
compiler: clang version 11.0.0 (https://github.com/llvm/llvm-project 
6dd738e2f0609f7d3313b574a1d471263d2d3ba1)
reproduce (this is a W=1 build):
        wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
        chmod +x ~/bin/make.cross
        # install arm64 cross compiling tool for clang build
        # apt-get install binutils-aarch64-linux-gnu
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=arm64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <l...@intel.com>

All warnings (new ones prefixed by >>, old ones prefixed by <<):

>> drivers/dma/owl-dma.c:1102:14: warning: cast to smaller integer type 'enum 
>> owl_dma_id' from 'const void *' [-Wvoid-pointer-to-enum-cast]
od->devid = (enum owl_dma_id)of_id->data;
^~~~~~~~~~~~~~~~~~~~~~~~~~~~
1 warning generated.

vim +1102 drivers/dma/owl-dma.c

  1070  
  1071  static int owl_dma_probe(struct platform_device *pdev)
  1072  {
  1073          struct device_node *np = pdev->dev.of_node;
  1074          struct owl_dma *od;
  1075          int ret, i, nr_channels, nr_requests;
  1076          const struct of_device_id *of_id =
  1077                                  of_match_device(owl_dma_match, 
&pdev->dev);
  1078  
  1079          od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
  1080          if (!od)
  1081                  return -ENOMEM;
  1082  
  1083          od->base = devm_platform_ioremap_resource(pdev, 0);
  1084          if (IS_ERR(od->base))
  1085                  return PTR_ERR(od->base);
  1086  
  1087          ret = of_property_read_u32(np, "dma-channels", &nr_channels);
  1088          if (ret) {
  1089                  dev_err(&pdev->dev, "can't get dma-channels\n");
  1090                  return ret;
  1091          }
  1092  
  1093          ret = of_property_read_u32(np, "dma-requests", &nr_requests);
  1094          if (ret) {
  1095                  dev_err(&pdev->dev, "can't get dma-requests\n");
  1096                  return ret;
  1097          }
  1098  
  1099          dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
  1100                   nr_channels, nr_requests);
  1101  
> 1102          od->devid = (enum owl_dma_id)of_id->data;
  1103  
  1104          od->nr_pchans = nr_channels;
  1105          od->nr_vchans = nr_requests;
  1106  
  1107          pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
  1108  
  1109          platform_set_drvdata(pdev, od);
  1110          spin_lock_init(&od->lock);
  1111  
  1112          dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
  1113          dma_cap_set(DMA_SLAVE, od->dma.cap_mask);
  1114          dma_cap_set(DMA_CYCLIC, od->dma.cap_mask);
  1115  
  1116          od->dma.dev = &pdev->dev;
  1117          od->dma.device_free_chan_resources = 
owl_dma_free_chan_resources;
  1118          od->dma.device_tx_status = owl_dma_tx_status;
  1119          od->dma.device_issue_pending = owl_dma_issue_pending;
  1120          od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
  1121          od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg;
  1122          od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic;
  1123          od->dma.device_config = owl_dma_config;
  1124          od->dma.device_pause = owl_dma_pause;
  1125          od->dma.device_resume = owl_dma_resume;
  1126          od->dma.device_terminate_all = owl_dma_terminate_all;
  1127          od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  1128          od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  1129          od->dma.directions = BIT(DMA_MEM_TO_MEM);
  1130          od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1131  
  1132          INIT_LIST_HEAD(&od->dma.channels);
  1133  
  1134          od->clk = devm_clk_get(&pdev->dev, NULL);
  1135          if (IS_ERR(od->clk)) {
  1136                  dev_err(&pdev->dev, "unable to get clock\n");
  1137                  return PTR_ERR(od->clk);
  1138          }
  1139  
  1140          /*
  1141           * Eventhough the DMA controller is capable of generating 4
  1142           * IRQ's for DMA priority feature, we only use 1 IRQ for
  1143           * simplification.
  1144           */
  1145          od->irq = platform_get_irq(pdev, 0);
  1146          ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 
0,
  1147                                 dev_name(&pdev->dev), od);
  1148          if (ret) {
  1149                  dev_err(&pdev->dev, "unable to request IRQ\n");
  1150                  return ret;
  1151          }
  1152  
  1153          /* Init physical channel */
  1154          od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans,
  1155                                    sizeof(struct owl_dma_pchan), 
GFP_KERNEL);
  1156          if (!od->pchans)
  1157                  return -ENOMEM;
  1158  
  1159          for (i = 0; i < od->nr_pchans; i++) {
  1160                  struct owl_dma_pchan *pchan = &od->pchans[i];
  1161  
  1162                  pchan->id = i;
  1163                  pchan->base = od->base + OWL_DMA_CHAN_BASE(i);
  1164          }
  1165  
  1166          /* Init virtual channel */
  1167          od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans,
  1168                                    sizeof(struct owl_dma_vchan), 
GFP_KERNEL);
  1169          if (!od->vchans)
  1170                  return -ENOMEM;
  1171  
  1172          for (i = 0; i < od->nr_vchans; i++) {
  1173                  struct owl_dma_vchan *vchan = &od->vchans[i];
  1174  
  1175                  vchan->vc.desc_free = owl_dma_desc_free;
  1176                  vchan_init(&vchan->vc, &od->dma);
  1177          }
  1178  
  1179          /* Create a pool of consistent memory blocks for hardware 
descriptors */
  1180          od->lli_pool = dma_pool_create(dev_name(od->dma.dev), 
od->dma.dev,
  1181                                         sizeof(struct owl_dma_lli),
  1182                                         __alignof__(struct owl_dma_lli),
  1183                                         0);
  1184          if (!od->lli_pool) {
  1185                  dev_err(&pdev->dev, "unable to allocate DMA descriptor 
pool\n");
  1186                  return -ENOMEM;
  1187          }
  1188  
  1189          clk_prepare_enable(od->clk);
  1190  
  1191          ret = dma_async_device_register(&od->dma);
  1192          if (ret) {
  1193                  dev_err(&pdev->dev, "failed to register DMA engine 
device\n");
  1194                  goto err_pool_free;
  1195          }
  1196  
  1197          /* Device-tree DMA controller registration */
  1198          ret = of_dma_controller_register(pdev->dev.of_node,
  1199                                           owl_dma_of_xlate, od);
  1200          if (ret) {
  1201                  dev_err(&pdev->dev, "of_dma_controller_register 
failed\n");
  1202                  goto err_dma_unregister;
  1203          }
  1204  
  1205          return 0;
  1206  
  1207  err_dma_unregister:
  1208          dma_async_device_unregister(&od->dma);
  1209  err_pool_free:
  1210          clk_disable_unprepare(od->clk);
  1211          dma_pool_destroy(od->lli_pool);
  1212  
  1213          return ret;
  1214  }
  1215  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org

Attachment: .config.gz
Description: application/gzip

Reply via email to