1996fanrui commented on code in PR #23219: URL: https://github.com/apache/flink/pull/23219#discussion_r1308166223
########## flink-yarn/src/test/java/org/apache/flink/yarn/YarnClusterDescriptorTest.java: ########## @@ -543,22 +546,62 @@ void testExplicitFileShipping() throws Exception { Files.createTempDirectory(temporaryFolder, UUID.randomUUID().toString()) .toFile(); - assertThat(descriptor.getShipFiles()).doesNotContain(libFile, libFolder); + assertThat(descriptor.getShipFiles()) + .doesNotContain(getPathFromLocalFile(libFile), getPathFromLocalFile(libFolder)); - List<File> shipFiles = new ArrayList<>(); - shipFiles.add(libFile); - shipFiles.add(libFolder); + List<Path> shipFiles = new ArrayList<>(); + shipFiles.add(getPathFromLocalFile(libFile)); + shipFiles.add(getPathFromLocalFile(libFolder)); descriptor.addShipFiles(shipFiles); - assertThat(descriptor.getShipFiles()).contains(libFile, libFolder); + assertThat(descriptor.getShipFiles()) + .contains(getPathFromLocalFile(libFile), getPathFromLocalFile(libFolder)); // only execute part of the deployment to test for shipped files - Set<File> effectiveShipFiles = new HashSet<>(); + Set<Path> effectiveShipFiles = new HashSet<>(); descriptor.addLibFoldersToShipFiles(effectiveShipFiles); assertThat(effectiveShipFiles).isEmpty(); - assertThat(descriptor.getShipFiles()).hasSize(2).contains(libFile, libFolder); + assertThat(descriptor.getShipFiles()) + .hasSize(2) + .contains(getPathFromLocalFile(libFile), getPathFromLocalFile(libFolder)); + } + + String hdfsDir = "hdfs:///flink/hdfs_dir"; + String hdfsFile = "hdfs:///flink/hdfs_file"; + File libFile = Files.createTempFile(temporaryFolder, "libFile", ".jar").toFile(); + File libFolder = + Files.createTempDirectory(temporaryFolder, UUID.randomUUID().toString()).toFile(); + final org.apache.hadoop.conf.Configuration hdConf = + new org.apache.hadoop.conf.Configuration(); + hdConf.set( + MiniDFSCluster.HDFS_MINIDFS_BASEDIR, temporaryFolder.toAbsolutePath().toString()); + try (final MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(hdConf).build()) { + final org.apache.hadoop.fs.Path hdfsRootPath = + new org.apache.hadoop.fs.Path(hdfsCluster.getURI()); + hdfsCluster.getFileSystem().mkdirs(new org.apache.hadoop.fs.Path(hdfsDir)); + hdfsCluster.getFileSystem().createNewFile(new org.apache.hadoop.fs.Path(hdfsFile)); + + Configuration flinkConfiguration = new Configuration(); + flinkConfiguration.set( + YarnConfigOptions.SHIP_FILES, + Arrays.asList( + libFile.getAbsolutePath(), + libFolder.getAbsolutePath(), + hdfsDir, + hdfsFile)); + final YarnConfiguration yarnConfig = new YarnConfiguration(); + yarnConfig.set( + CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, hdfsRootPath.toString()); + YarnClusterDescriptor descriptor = + createYarnClusterDescriptor(flinkConfiguration, yarnConfig); + assertThat(descriptor.getShipFiles()) + .containsExactly( + getPathFromLocalFile(libFile), + getPathFromLocalFile(libFolder), + new Path(hdfsDir), + new Path(hdfsFile)); Review Comment: These new tests can be moved to a separate test method. The old test is testing by `YarnClusterDescriptor.addShipFiles`, the method name also mentioned it. And your test is testing by `YarnConfigOptions.SHIP_FILES`, so it's not suitable adding your new test to the old test method. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@flink.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org