Hi Eric,

I love your patch! Perhaps something to improve:

[auto build test WARNING on iommu/next]
[also build test WARNING on linus/master v5.10-rc4 next-20201118]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    
https://github.com/0day-ci/linux/commits/Eric-Auger/SMMUv3-Nested-Stage-Setup-IOMMU-part/20201118-192520
base:   https://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git next
config: arm64-randconfig-s031-20201118 (attached as .config)
compiler: aarch64-linux-gcc (GCC) 9.3.0
reproduce:
        wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
        chmod +x ~/bin/make.cross
        # apt-get install sparse
        # sparse version: v0.6.3-123-g626c4742-dirty
        # 
https://github.com/0day-ci/linux/commit/7308cdb07384d807c5ef43e6bfe0cd61c35a121e
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review 
Eric-Auger/SMMUv3-Nested-Stage-Setup-IOMMU-part/20201118-192520
        git checkout 7308cdb07384d807c5ef43e6bfe0cd61c35a121e
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross C=1 
CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=arm64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <l...@intel.com>


"sparse warnings: (new ones prefixed by >>)"
>> drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c:1326:37: sparse: sparse: 
>> restricted __le64 degrades to integer
   drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c:1326:37: sparse: sparse: cast to 
restricted __le64
   drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c: note: in included file (through 
arch/arm64/include/asm/atomic.h, include/linux/atomic.h, 
include/asm-generic/bitops/atomic.h, ...):
   arch/arm64/include/asm/cmpxchg.h:172:1: sparse: sparse: cast truncates bits 
from constant value (ffffffff80000000 becomes 0)
   arch/arm64/include/asm/cmpxchg.h:172:1: sparse: sparse: cast truncates bits 
from constant value (ffffffff80000000 becomes 0)

vim +1326 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c

  1175  
  1176  static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, 
u32 sid,
  1177                                        __le64 *dst)
  1178  {
  1179          /*
  1180           * This is hideously complicated, but we only really care about
  1181           * three cases at the moment:
  1182           *
  1183           * 1. Invalid (all zero) -> bypass/fault (init)
  1184           * 2. Bypass/fault -> single stage translation/bypass (attach)
  1185           * 3. Single or nested stage Translation/bypass -> bypass/fault 
(detach)
  1186           * 4. S2 -> S1 + S2 (attach_pasid_table)
  1187           * 5. S1 + S2 -> S2 (detach_pasid_table)
  1188           *
  1189           * Given that we can't update the STE atomically and the SMMU
  1190           * doesn't read the thing in a defined order, that leaves us
  1191           * with the following maintenance requirements:
  1192           *
  1193           * 1. Update Config, return (init time STEs aren't live)
  1194           * 2. Write everything apart from dword 0, sync, write dword 0, 
sync
  1195           * 3. Update Config, sync
  1196           */
  1197          u64 val = le64_to_cpu(dst[0]);
  1198          bool s1_live = false, s2_live = false, ste_live;
  1199          bool abort, nested = false, translate = false;
  1200          struct arm_smmu_device *smmu = NULL;
  1201          struct arm_smmu_s1_cfg *s1_cfg;
  1202          struct arm_smmu_s2_cfg *s2_cfg;
  1203          struct arm_smmu_domain *smmu_domain = NULL;
  1204          struct arm_smmu_cmdq_ent prefetch_cmd = {
  1205                  .opcode         = CMDQ_OP_PREFETCH_CFG,
  1206                  .prefetch       = {
  1207                          .sid    = sid,
  1208                  },
  1209          };
  1210  
  1211          if (master) {
  1212                  smmu_domain = master->domain;
  1213                  smmu = master->smmu;
  1214          }
  1215  
  1216          if (smmu_domain) {
  1217                  s1_cfg = &smmu_domain->s1_cfg;
  1218                  s2_cfg = &smmu_domain->s2_cfg;
  1219  
  1220                  switch (smmu_domain->stage) {
  1221                  case ARM_SMMU_DOMAIN_S1:
  1222                          s1_cfg->set = true;
  1223                          s2_cfg->set = false;
  1224                          break;
  1225                  case ARM_SMMU_DOMAIN_S2:
  1226                          s1_cfg->set = false;
  1227                          s2_cfg->set = true;
  1228                          break;
  1229                  case ARM_SMMU_DOMAIN_NESTED:
  1230                          /*
  1231                           * Actual usage of stage 1 depends on nested 
mode:
  1232                           * legacy (2d stage only) or true nested mode
  1233                           */
  1234                          s2_cfg->set = true;
  1235                          break;
  1236                  default:
  1237                          break;
  1238                  }
  1239                  nested = s1_cfg->set && s2_cfg->set;
  1240                  translate = s1_cfg->set || s2_cfg->set;
  1241          }
  1242  
  1243          if (val & STRTAB_STE_0_V) {
  1244                  switch (FIELD_GET(STRTAB_STE_0_CFG, val)) {
  1245                  case STRTAB_STE_0_CFG_BYPASS:
  1246                          break;
  1247                  case STRTAB_STE_0_CFG_S1_TRANS:
  1248                          s1_live = true;
  1249                          break;
  1250                  case STRTAB_STE_0_CFG_S2_TRANS:
  1251                          s2_live = true;
  1252                          break;
  1253                  case STRTAB_STE_0_CFG_NESTED:
  1254                          s1_live = true;
  1255                          s2_live = true;
  1256                          break;
  1257                  case STRTAB_STE_0_CFG_ABORT:
  1258                          break;
  1259                  default:
  1260                          BUG(); /* STE corruption */
  1261                  }
  1262          }
  1263  
  1264          ste_live = s1_live || s2_live;
  1265  
  1266          /* Nuke the existing STE_0 value, as we're going to rewrite it 
*/
  1267          val = STRTAB_STE_0_V;
  1268  
  1269          /* Bypass/fault */
  1270  
  1271          if (!smmu_domain)
  1272                  abort = disable_bypass;
  1273          else
  1274                  abort = smmu_domain->abort;
  1275  
  1276          if (abort || !translate) {
  1277                  if (abort)
  1278                          val |= FIELD_PREP(STRTAB_STE_0_CFG, 
STRTAB_STE_0_CFG_ABORT);
  1279                  else
  1280                          val |= FIELD_PREP(STRTAB_STE_0_CFG, 
STRTAB_STE_0_CFG_BYPASS);
  1281  
  1282                  dst[0] = cpu_to_le64(val);
  1283                  dst[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
  1284                                                  
STRTAB_STE_1_SHCFG_INCOMING));
  1285                  dst[2] = 0; /* Nuke the VMID */
  1286                  /*
  1287                   * The SMMU can perform negative caching, so we must 
sync
  1288                   * the STE regardless of whether the old value was live.
  1289                   */
  1290                  if (smmu)
  1291                          arm_smmu_sync_ste_for_sid(smmu, sid);
  1292                  return;
  1293          }
  1294  
  1295          BUG_ON(ste_live && !nested);
  1296  
  1297          if (ste_live) {
  1298                  /* First invalidate the live STE */
  1299                  dst[0] = cpu_to_le64(STRTAB_STE_0_CFG_ABORT);
  1300                  arm_smmu_sync_ste_for_sid(smmu, sid);
  1301          }
  1302  
  1303          if (s1_cfg->set) {
  1304                  BUG_ON(s1_live);
  1305                  dst[1] = cpu_to_le64(
  1306                           FIELD_PREP(STRTAB_STE_1_S1DSS, 
STRTAB_STE_1_S1DSS_SSID0) |
  1307                           FIELD_PREP(STRTAB_STE_1_S1CIR, 
STRTAB_STE_1_S1C_CACHE_WBRA) |
  1308                           FIELD_PREP(STRTAB_STE_1_S1COR, 
STRTAB_STE_1_S1C_CACHE_WBRA) |
  1309                           FIELD_PREP(STRTAB_STE_1_S1CSH, 
ARM_SMMU_SH_ISH) |
  1310                           FIELD_PREP(STRTAB_STE_1_STRW, 
STRTAB_STE_1_STRW_NSEL1));
  1311  
  1312                  if (smmu->features & ARM_SMMU_FEAT_STALLS &&
  1313                     !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
  1314                          dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
  1315  
  1316                  val |= (s1_cfg->cdcfg.cdtab_dma & 
STRTAB_STE_0_S1CTXPTR_MASK) |
  1317                          FIELD_PREP(STRTAB_STE_0_CFG, 
STRTAB_STE_0_CFG_S1_TRANS) |
  1318                          FIELD_PREP(STRTAB_STE_0_S1CDMAX, 
s1_cfg->s1cdmax) |
  1319                          FIELD_PREP(STRTAB_STE_0_S1FMT, s1_cfg->s1fmt);
  1320          }
  1321  
  1322          if (s2_cfg->set) {
  1323                  u64 vttbr = s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK;
  1324  
  1325                  if (s2_live) {
> 1326                          u64 s2ttb = le64_to_cpu(dst[3] & 
> STRTAB_STE_3_S2TTB_MASK);
  1327  
  1328                          BUG_ON(s2ttb != vttbr);
  1329                  }
  1330  
  1331                  dst[2] = cpu_to_le64(
  1332                           FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
  1333                           FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
  1334  #ifdef __BIG_ENDIAN
  1335                           STRTAB_STE_2_S2ENDI |
  1336  #endif
  1337                           STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
  1338                           STRTAB_STE_2_S2R);
  1339  
  1340                  dst[3] = cpu_to_le64(vttbr);
  1341  
  1342                  val |= FIELD_PREP(STRTAB_STE_0_CFG, 
STRTAB_STE_0_CFG_S2_TRANS);
  1343          } else {
  1344                  dst[2] = 0;
  1345                  dst[3] = 0;
  1346          }
  1347  
  1348          if (master->ats_enabled)
  1349                  dst[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
  1350                                                   
STRTAB_STE_1_EATS_TRANS));
  1351  
  1352          arm_smmu_sync_ste_for_sid(smmu, sid);
  1353          /* See comment in arm_smmu_write_ctx_desc() */
  1354          WRITE_ONCE(dst[0], cpu_to_le64(val));
  1355          arm_smmu_sync_ste_for_sid(smmu, sid);
  1356  
  1357          /* It's likely that we'll want to use the new STE soon */
  1358          if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
  1359                  arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
  1360  }
  1361  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org

Attachment: .config.gz
Description: application/gzip

Reply via email to