[ 
https://issues.apache.org/jira/browse/HDFS-17680?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=18015495#comment-18015495
 ] 

ASF GitHub Bot commented on HDFS-17680:
---------------------------------------

MikaelSmith commented on code in PR #7884:
URL: https://github.com/apache/hadoop/pull/7884#discussion_r2291754729


##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestDatanodeHttpServer.java:
##########
@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.web;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStreamReader;
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.net.URLConnection;
+import java.util.Arrays;
+import java.util.Collection;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.http.HttpConfig.Policy;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+@RunWith(value = Parameterized.class)
+public class TestDatanodeHttpServer {
+  private static final String BASEDIR = GenericTestUtils
+      .getTempPath(TestDatanodeHttpServer.class.getSimpleName());
+  private static String keystoresDir;
+  private static String sslConfDir;
+  private static Configuration conf;
+  private static URLConnectionFactory connectionFactory;
+
+  @Parameters
+  public static Collection<Object[]> policy() {
+    Object[][] params = new Object[][] { { HttpConfig.Policy.HTTP_ONLY },
+        { HttpConfig.Policy.HTTPS_ONLY }, { HttpConfig.Policy.HTTP_AND_HTTPS } 
};
+    return Arrays.asList(params);
+  }
+
+  private final HttpConfig.Policy policy;
+
+  public TestDatanodeHttpServer(Policy policy) {
+    super();
+    this.policy = policy;
+  }
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    File base = new File(BASEDIR);
+    FileUtil.fullyDelete(base);
+    base.mkdirs();
+    conf = new Configuration();
+    keystoresDir = new File(BASEDIR).getAbsolutePath();
+    sslConfDir = 
KeyStoreTestUtil.getClasspathDir(TestDatanodeHttpServer.class);
+    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
+    connectionFactory = URLConnectionFactory
+        .newDefaultURLConnectionFactory(conf);
+    conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
+        KeyStoreTestUtil.getClientSSLConfigFileName());
+    conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
+        KeyStoreTestUtil.getServerSSLConfigFileName());
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    FileUtil.fullyDelete(new File(BASEDIR));
+    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
+  }
+
+  @Test
+  public void testHttpPolicy() throws Exception {
+    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
+    conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "localhost:0");
+    conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
+
+    DatanodeHttpServer server = null;
+    try {
+      server = new DatanodeHttpServer(conf, null, null);
+      server.start();
+
+      Assert.assertTrue(implies(policy.isHttpEnabled(),
+          canAccess("http", server.getHttpAddress())));
+      Assert.assertTrue(implies(!policy.isHttpEnabled(),
+          server.getHttpAddress() == null));
+
+      Assert.assertTrue(implies(policy.isHttpsEnabled(),
+          canAccess("https", server.getHttpsAddress())));
+      Assert.assertTrue(implies(!policy.isHttpsEnabled(),
+          server.getHttpsAddress() == null));
+
+    } finally {
+      if (server != null) {
+        server.close();
+      }
+    }
+  }
+
+  private static boolean canAccess(String scheme, InetSocketAddress addr) {
+    if (addr == null)
+      return false;
+    try {
+      URL url = new URL(scheme + "://" + NetUtils.getHostPortString(addr));
+      URLConnection conn = connectionFactory.openConnection(url);
+      conn.connect();
+      Assert.assertTrue(conn instanceof java.net.HttpURLConnection);
+      java.net.HttpURLConnection httpConn = (java.net.HttpURLConnection) conn;
+      if (httpConn.getResponseCode() != 200) {
+        return false;
+      }
+
+      BufferedReader reader = new BufferedReader(new 
InputStreamReader((conn.getInputStream())));

Review Comment:
   Actually TestNameNodeHttpServer.java doesn't try to do anything with the 
input stream (it calls conn.getContent()). So if anything, a different problem.





> HDFS ui in the datanodes doesn't redirect to https when dfs.http.policy is 
> HTTPS_ONLY
> -------------------------------------------------------------------------------------
>
>                 Key: HDFS-17680
>                 URL: https://issues.apache.org/jira/browse/HDFS-17680
>             Project: Hadoop HDFS
>          Issue Type: Bug
>          Components: datanode, ui
>    Affects Versions: 3.4.1
>            Reporter: Luis Pigueiras
>            Priority: Minor
>              Labels: pull-request-available
>
> _(I'm not sure if I should put it in HDFS or in HADOOP, feel free to move it 
> if it's not the correct place)_
> We have noticed that with having a https_only configuration when accessing 
> the datanodes from the namenode UI, there is a wrong redirection when 
> clicking on the link of the datanodes.
> If you visit in the hdfs UI of a namenode:
> https://<node>:50070/ -> datanodes -> click on the datanode you get 
> redirected from https://<node>:9865 to http://<node>:9865. The 302 should 
> redirect to https and not to http. If you do a curl to the link that is 
> exposed on the website, you get the redirected to the wrong place.
> {code}
> curl -k https://testing2475891.example.org:9865 -vvv
> ...
> < HTTP/1.1 302 Found
> < Location: http://testing2475891.example.org:9865/index.html 
> {code}
> This issue is present in our 3.3.6 but it's also present in 3.4.1 because I 
> managed to reproduce it with the following steps:
>  - Download latest version (binary from: 
> [https://hadoop.apache.org/releases.html] -> 3.4.1)
>  - Uncompress the binaries:
> {code:java}
> tar -xvf hadoop-3.4.1.tar.gz
> cd hadoop-3.4.1
> {code}
>  - Generate dummy certs for TLS and move them to {{etc/hadoop}}
> {code:java}
> keytool -genkeypair -alias hadoop -keyalg RSA -keystore hadoop.keystore 
> -storepass changeit -validity 365
> keytool -export -alias hadoop -keystore hadoop.keystore -file hadoop.cer 
> -storepass changeit
> keytool -import -alias hadoop -file hadoop.cer -keystore hadoop.truststore 
> -storepass changeit -noprompt
> cp hadoop.* etc/hadoop
> {code}
>  - Add this to {{etc/hadoop/hadoop-env.sh}}
> {code:java}
> export JAVA_HOME=/usr/lib/jvm/java-11-openjdk
> export HDFS_NAMENODE_USER=root
> export HDFS_DATANODE_USER=root
> export HDFS_SECONDARYNAMENODE_USER=root
> {code}
>  - Create a etc/hadoop/ssl-server.xml with:
> {code:java}
> <configuration>
> <property>
>   <name>ssl.server.truststore.location</name>
>   <value>/root/hadoop/hadoop-3.4.1/etc/hadoop/hadoop.truststore</value>
>   <description>Truststore to be used by NN and DN. Must be specified.
>   </description>
> </property>
> <property>
>   <name>ssl.server.truststore.password</name>
>   <value>changeit</value>
>   <description>Optional. Default value is "".
>   </description>
> </property>
> <property>
>   <name>ssl.server.truststore.type</name>
>   <value>jks</value>
>   <description>Optional. The keystore file format, default value is "jks".
>   </description>
> </property>
> <property>
>   <name>ssl.server.truststore.reload.interval</name>
>   <value>10000</value>
>   <description>Truststore reload check interval, in milliseconds.
>   Default value is 10000 (10 seconds).
>   </description>
> </property>
> <property>
>   <name>ssl.server.keystore.location</name>
>   <value>/root/hadoop/hadoop-3.4.1/etc/hadoop/hadoop.keystore</value>
>   <description>Keystore to be used by NN and DN. Must be specified.
>   </description>
> </property>
> <property>
>   <name>ssl.server.keystore.password</name>
>   <value>changeit</value>
>   <description>Must be specified.
>   </description>
> </property>
> <property>
>   <name>ssl.server.keystore.keypassword</name>
>   <value>changeit</value>
>   <description>Must be specified.
>   </description>
> </property>
> <property>
>   <name>ssl.server.keystore.type</name>
>   <value>jks</value>
>   <description>Optional. The keystore file format, default value is "jks".
>   </description>
> </property>
> <property>
>   <name>ssl.server.exclude.cipher.list</name>
>   <value>TLS_ECDHE_RSA_WITH_RC4_128_SHA,SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
>   SSL_RSA_WITH_DES_CBC_SHA,SSL_DHE_RSA_WITH_DES_CBC_SHA,
>   SSL_RSA_EXPORT_WITH_RC4_40_MD5,SSL_RSA_EXPORT_WITH_DES40_CBC_SHA,
>   SSL_RSA_WITH_RC4_128_MD5</value>
>   <description>Optional. The weak security cipher suites that you want 
> excluded
>   from SSL communication.</description>
> </property>
> </configuration>
> {code}
>  - hdfs-site.xml:
> {code:java}
> <?xml version="1.0" encoding="UTF-8"?>
> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
> <!--
>   Licensed under the Apache License, Version 2.0 (the "License");
>   you may not use this file except in compliance with the License.
>   You may obtain a copy of the License at
>     http://www.apache.org/licenses/LICENSE-2.0
>   Unless required by applicable law or agreed to in writing, software
>   distributed under the License is distributed on an "AS IS" BASIS,
>   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
>   See the License for the specific language governing permissions and
>   limitations under the License. See accompanying LICENSE file.
> -->
> <!-- Put site-specific property overrides in this file. -->
> <configuration>
>         <configuration>
>     <property>
>         <name>dfs.replication</name>
>         <value>1</value>
>     </property>
> </configuration>
> <property>
>   <name>dfs.http.policy</name>
>   <value>HTTPS_ONLY</value>
> </property>
> <property>
>     <name>dfs.https.enable</name>
>     <value>true</value>
> </property>
> <property>
>     <name>dfs.namenode.https-address</name>
>     <value>0.0.0.0:50070</value>
> </property>
> <property>
>   <name>dfs.https.server.keystore.resource</name>
>   <value>ssl-server.xml</value>
> </property>
> </configuration>
> {code}
>  - core-site.xml:
> {code:java}
> <configuration>
>         <configuration>
>     <property>
>         <name>fs.defaultFS</name>
>         <value>hdfs://localhost:9000</value>
>     </property>
> </configuration>
> <property>
>   <name>hadoop.ssl.enabled</name>
>   <value>true</value>
> </property>
> <property>
>   <name>hadoop.ssl.keystores.factory.class&lt;/name>
>   <value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value>
> </property>
> <property>
>   <name>hadoop.ssl.server.keystore.resource</name>
>   <value>hadoop.keystore</value>
> </property>
> <property>
>   <name>hadoop.ssl.server.keystore.password</name>
>   <value>changeit</value>
> </property>
> <property>
>   <name>hadoop.ssl.server.truststore.resource</name>
>   <value>hadoop.truststore</value>
> </property>
> <property>
>   <name>hadoop.ssl.server.truststore.password</name>
>   <value>changeit</value>
> </property>
> </configuration>
> {code}
>  - Now you can initialize:
> {code:java}
> bin/hdfs namenode -format
> sbin/start-dfs.sh
> {code}
>  - If you visit https://<node>:50070/ -> datanodes -> click on the datanode 
> you get redirected from https://<node>:9865 to http://<node>:9865
> {code}
>  curl -k https://testing2475891.example.org:9865 -vvv
> ...
> < HTTP/1.1 302 Found
> < Location: http://testing2475891.example.org:9865/index.html
> {code}



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org

Reply via email to