[ https://issues.apache.org/jira/browse/HDFS-16997?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]
Xiaoqiao He resolved HDFS-16997. -------------------------------- Fix Version/s: 3.4.0 Hadoop Flags: Reviewed Resolution: Fixed > Set the locale to avoid printing useless logs in BlockSender > ------------------------------------------------------------ > > Key: HDFS-16997 > URL: https://issues.apache.org/jira/browse/HDFS-16997 > Project: Hadoop HDFS > Issue Type: Improvement > Reporter: Shuyan Zhang > Assignee: Shuyan Zhang > Priority: Major > Labels: pull-request-available > Fix For: 3.4.0 > > > In our production environment, if the hadoop process is started in a > non-English environment, many unexpected error logs will be printed. The > following is the error message printed by datanode. > ``` > 2023-05-01 09:10:50,299 ERROR > org.apache.hadoop.hdfs.server.datanode.FileIoProvider: error in op > transferToSocketFully : 断开的管道 > 2023-05-01 09:10:50,299 ERROR > org.apache.hadoop.hdfs.server.datanode.DataNode: BlockSender.sendChunks() > exception: > java.io.IOException: 断开的管道 > at sun.nio.ch.FileChannelImpl.transferTo0(Native Method) > at > sun.nio.ch.FileChannelImpl.transferToDirectlyInternal(FileChannelImpl.java:428) > at > sun.nio.ch.FileChannelImpl.transferToDirectly(FileChannelImpl.java:493) > at sun.nio.ch.FileChannelImpl.transferTo(FileChannelImpl.java:608) > at > org.apache.hadoop.net.SocketOutputStream.transferToFully(SocketOutputStream.java:242) > at > org.apache.hadoop.hdfs.server.datanode.FileIoProvider.transferToSocketFully(FileIoProvider.java:260) > at > org.apache.hadoop.hdfs.server.datanode.BlockSender.sendPacket(BlockSender.java:559) > at > org.apache.hadoop.hdfs.server.datanode.BlockSender.doSendBlock(BlockSender.java:801) > at > org.apache.hadoop.hdfs.server.datanode.BlockSender.sendBlock(BlockSender.java:755) > at > org.apache.hadoop.hdfs.server.datanode.DataXceiver.readBlock(DataXceiver.java:580) > at > org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opReadBlock(Receiver.java:116) > at > org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:71) > at > org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:258) > at java.lang.Thread.run(Thread.java:745) > 2023-05-01 09:10:50,298 ERROR > org.apache.hadoop.hdfs.server.datanode.FileIoProvider: error in op > transferToSocketFully : 断开的管道 > 2023-05-01 09:10:50,298 ERROR > org.apache.hadoop.hdfs.server.datanode.FileIoProvider: error in op > transferToSocketFully : 断开的管道 > 2023-05-01 09:10:50,298 ERROR > org.apache.hadoop.hdfs.server.datanode.FileIoProvider: error in op > transferToSocketFully : 断开的管道 > 2023-05-01 09:10:50,298 ERROR > org.apache.hadoop.hdfs.server.datanode.FileIoProvider: error in op > transferToSocketFully : 断开的管道 > 2023-05-01 09:10:50,302 ERROR > org.apache.hadoop.hdfs.server.datanode.DataNode: BlockSender.sendChunks() > exception: > java.io.IOException: 断开的管道 > at sun.nio.ch.FileChannelImpl.transferTo0(Native Method) > at > sun.nio.ch.FileChannelImpl.transferToDirectlyInternal(FileChannelImpl.java:428) > at > sun.nio.ch.FileChannelImpl.transferToDirectly(FileChannelImpl.java:493) > at sun.nio.ch.FileChannelImpl.transferTo(FileChannelImpl.java:608) > at > org.apache.hadoop.net.SocketOutputStream.transferToFully(SocketOutputStream.java:242) > at > org.apache.hadoop.hdfs.server.datanode.FileIoProvider.transferToSocketFully(FileIoProvider.java:260) > at > org.apache.hadoop.hdfs.server.datanode.BlockSender.sendPacket(BlockSender.java:559) > at > org.apache.hadoop.hdfs.server.datanode.BlockSender.doSendBlock(BlockSender.java:801) > at > org.apache.hadoop.hdfs.server.datanode.BlockSender.sendBlock(BlockSender.java:755) > at > org.apache.hadoop.hdfs.server.datanode.DataXceiver.readBlock(DataXceiver.java:580) > at > org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opReadBlock(Receiver.java:116) > at > org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:71) > at > org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:258) > at java.lang.Thread.run(Thread.java:745) > 2023-05-01 09:10:50,303 ERROR > org.apache.hadoop.hdfs.server.datanode.FileIoProvider: error in op > transferToSocketFully : 断开的管道 > 2023-05-01 09:10:50,303 ERROR > org.apache.hadoop.hdfs.server.datanode.DataNode: BlockSender.sendChunks() > exception: > java.io.IOException: 断开的管道 > at sun.nio.ch.FileChannelImpl.transferTo0(Native Method) > at > sun.nio.ch.FileChannelImpl.transferToDirectlyInternal(FileChannelImpl.java:428) > at > sun.nio.ch.FileChannelImpl.transferToDirectly(FileChannelImpl.java:493) > at sun.nio.ch.FileChannelImpl.transferTo(FileChannelImpl.java:608) > at > org.apache.hadoop.net.SocketOutputStream.transferToFully(SocketOutputStream.java:242) > at > org.apache.hadoop.hdfs.server.datanode.FileIoProvider.transferToSocketFully(FileIoProvider.java:260) > at > org.apache.hadoop.hdfs.server.datanode.BlockSender.sendPacket(BlockSender.java:568) > at > org.apache.hadoop.hdfs.server.datanode.BlockSender.doSendBlock(BlockSender.java:801) > at > org.apache.hadoop.hdfs.server.datanode.BlockSender.sendBlock(BlockSender.java:755) > at > org.apache.hadoop.hdfs.server.datanode.DataXceiver.readBlock(DataXceiver.java:580) > at > org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opReadBlock(Receiver.java:116) > at > org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:71) > at > org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:258) > at java.lang.Thread.run(Thread.java:745) > ``` > The reason for this situation is that the code uses the message of > IOException to determine whether to print Exception logs, but different > locales will change the content of the message. > This large number of error logs is very misleading, so this patch sets the > environment variable LANG in hadoop-env.sh. -- This message was sent by Atlassian Jira (v8.20.10#820010) --------------------------------------------------------------------- To unsubscribe, e-mail: hdfs-dev-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-dev-h...@hadoop.apache.org