diff --git a/LICENSE.txt b/LICENSE.txt
index f8de86a1053..393ed0e7f90 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -553,7 +553,7 @@ For:
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dataTables.bootstrap.js
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dataTables.bootstrap.css
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery.dataTables.min.js
-hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/
--------------------------------------------------------------------------------
Copyright (C) 2008-2016, SpryMedia Ltd.
diff --git a/README.txt b/README.txt
index 148cd31c86b..559099b73a1 100644
--- a/README.txt
+++ b/README.txt
@@ -1,6 +1,6 @@
For the latest information about Hadoop, please visit our website at:
- http://hadoop.apache.org/core/
+ http://hadoop.apache.org/
and our wiki, at:
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index d3d96d902f3..6ec3503fbfb 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -617,8 +617,8 @@ function makearelease
#shellcheck disable=SC2038
find . -name rat.txt | xargs -I% cat % > "${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}-rat.txt"
- # Stage CHANGES and RELEASENOTES files
- for i in CHANGES RELEASENOTES; do
+ # Stage CHANGELOG and RELEASENOTES files
+ for i in CHANGELOG RELEASENOTES; do
run cp -p \
"${BASEDIR}/hadoop-common-project/hadoop-common/src/site/markdown/release/${HADOOP_VERSION}"/${i}*.md \
"${ARTIFACTS_DIR}/${i}.md"
diff --git a/dev-support/bin/ozone-dist-layout-stitching b/dev-support/bin/ozone-dist-layout-stitching
index c30a37d2e5f..00854b45f2d 100755
--- a/dev-support/bin/ozone-dist-layout-stitching
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -117,18 +117,15 @@ ROOT=$(cd "${BASEDIR}"/../..;pwd)
echo
echo "Current directory $(pwd)"
echo
-run rm -rf "ozone"
-run mkdir "ozone"
-run cd "ozone"
+run rm -rf "ozone-${HDDS_VERSION}"
+run mkdir "ozone-${HDDS_VERSION}"
+run cd "ozone-${HDDS_VERSION}"
run cp -p "${ROOT}/LICENSE.txt" .
run cp -p "${ROOT}/NOTICE.txt" .
-run cp -p "${ROOT}/README.txt" .
# Copy hadoop-common first so that it have always have all dependencies.
# Remaining projects will copy only libraries which are not present already in 'share' directory.
run copy "${ROOT}/hadoop-common-project/hadoop-common/target/hadoop-common-${VERSION}" .
-run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${VERSION}" .
-run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-${VERSION}" .
# HDDS
@@ -145,15 +142,40 @@ run copy "${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-$
run copy "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}" .
run copy "${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" .
run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" .
+
+#shaded ozonefs
mkdir -p "./share/hadoop/ozonefs"
-cp "${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.jar" "./share/hadoop/ozonefs/hadoop-ozone-filesystem.jar"
+cp "${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.jar" "./share/hadoop/ozonefs/hadoop-ozone-filesystem-${HDDS_VERSION}.jar"
+
+#shaded datanode service
+mkdir -p "./share/hadoop/ozoneplugin"
+cp "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}-plugin.jar" "./share/hadoop/ozoneplugin/hadoop-ozone-datanode-plugin-${HDDS_VERSION}.jar"
+
+
# Optional documentation, could be missing
cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/ozone/webapps/ozoneManager/
cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdds/webapps/scm/
+cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./
+
+
+rm sbin/*all.sh
+rm sbin/*all.cmd
+
+#remove test and java sources
+find . -name "*tests.jar" | xargs rm
+find . -name "*sources.jar" | xargs rm
+find . -name jdiff -type d | xargs rm -rf
+
+#add ozone specific readme
+run cp "${ROOT}/hadoop-dist/src/main/ozone/README.txt" README.txt
+#Copy docker compose files and robot tests
+run cp -p -r "${ROOT}/hadoop-dist/src/main/compose" .
+run cp -p -r "${ROOT}/hadoop-dist/src/main/smoketest" .
mkdir -p ./share/hadoop/mapreduce
mkdir -p ./share/hadoop/yarn
+mkdir -p ./share/hadoop/hdfs
echo
-echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone"
+echo "Hadoop Ozone dist layout available at: ${BASEDIR}/ozone-${HDDS_VERSION}"
echo
diff --git a/dev-support/bin/ozone-dist-tar-stitching b/dev-support/bin/ozone-dist-tar-stitching
index d1116e4fe18..93d0525e7ec 100755
--- a/dev-support/bin/ozone-dist-tar-stitching
+++ b/dev-support/bin/ozone-dist-tar-stitching
@@ -36,13 +36,8 @@ function run()
fi
}
-#To make the final dist directory easily mountable from docker we don't use
-#version name in the directory name.
-#To include the version name in the root directory of the tar file
-# we create a symbolic link and dereference it during the tar creation
-ln -s -f ozone ozone-${VERSION}
-run tar -c --dereference -f "ozone-${VERSION}.tar" "ozone-${VERSION}"
+run tar -c -f "ozone-${VERSION}.tar" "ozone-${VERSION}"
run gzip -f "ozone-${VERSION}.tar"
echo
echo "Ozone dist tar available at: ${BASEDIR}/ozone-${VERSION}.tar.gz"
-echo
\ No newline at end of file
+echo
diff --git a/dev-support/bin/yetus-wrapper b/dev-support/bin/yetus-wrapper
index 9f6bb331c34..ae05d426b2c 100755
--- a/dev-support/bin/yetus-wrapper
+++ b/dev-support/bin/yetus-wrapper
@@ -73,7 +73,7 @@ WANTED="$1"
shift
ARGV=("$@")
-HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.4.0}
+HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.8.0}
BIN=$(yetus_abs "${BASH_SOURCE-$0}")
BINDIR=$(dirname "${BIN}")
diff --git a/dev-support/byteman/README.md b/dev-support/byteman/README.md
new file mode 100644
index 00000000000..9a17fc55be0
--- /dev/null
+++ b/dev-support/byteman/README.md
@@ -0,0 +1,31 @@
+
+
+This folder contains example byteman scripts (http://byteman.jboss.org/) to help
+Hadoop debuging.
+
+As the startup script of the hadoop-runner docker image supports byteman
+instrumentation it's enough to set the URL of a script to a specific environment
+variable to activate it with the docker runs:
+
+
+```
+BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
+```
+
+For more info see HADOOP-15656 and HDDS-342
+
diff --git a/dev-support/byteman/hadooprpc.btm b/dev-support/byteman/hadooprpc.btm
new file mode 100644
index 00000000000..13894fe4ab0
--- /dev/null
+++ b/dev-support/byteman/hadooprpc.btm
@@ -0,0 +1,44 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This script instruments hadoop rpc layer to print out all the request/response messages to the standard output.
+#
+
+RULE Hadoop RPC request
+INTERFACE ^com.google.protobuf.BlockingService
+METHOD callBlockingMethod
+IF true
+DO traceln("--> RPC message request: " + $3.getClass().getSimpleName() + " from " + linked(Thread.currentThread(), "source"));
+ traceln($3.toString())
+ENDRULE
+
+
+RULE Hadoop RPC response
+INTERFACE ^com.google.protobuf.BlockingService
+METHOD callBlockingMethod
+AT EXIT
+IF true
+DO traceln("--> RPC message response: " + $3.getClass().getSimpleName() + " to " + unlink(Thread.currentThread(), "source"));
+ traceln($!.toString())
+ENDRULE
+
+
+RULE Hadoop RPC source IP
+CLASS org.apache.hadoop.ipc.Server$RpcCall
+METHOD run
+IF true
+DO link(Thread.currentThread(), "source", $0.connection.toString())
+ENDRULE
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 369c6060233..7679500f094 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -22,6 +22,8 @@ FROM ubuntu:xenial
WORKDIR /root
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+
#####
# Disable suggests/recommends
#####
@@ -38,140 +40,159 @@ ENV DEBCONF_TERSE true
# WARNING: DO NOT PUT JAVA APPS HERE! Otherwise they will install default
# Ubuntu Java. See Java section below!
######
-RUN apt-get -q update && apt-get -q install -y \
- apt-utils \
- build-essential \
- bzip2 \
- clang \
- curl \
- doxygen \
- fuse \
- g++ \
- gcc \
- git \
- gnupg-agent \
- libbz2-dev \
- libcurl4-openssl-dev \
- libfuse-dev \
- libprotobuf-dev \
- libprotoc-dev \
- libsasl2-dev \
- libsnappy-dev \
- libssl-dev \
- libtool \
- locales \
- make \
- pinentry-curses \
- pkg-config \
- python \
- python2.7 \
- python-pip \
- python-pkg-resources \
- python-setuptools \
- python-wheel \
- rsync \
- software-properties-common \
- snappy \
- sudo \
- valgrind \
- zlib1g-dev
+# hadolint ignore=DL3008
+RUN apt-get -q update \
+ && apt-get -q install -y --no-install-recommends \
+ apt-utils \
+ build-essential \
+ bzip2 \
+ clang \
+ curl \
+ doxygen \
+ fuse \
+ g++ \
+ gcc \
+ git \
+ gnupg-agent \
+ libbz2-dev \
+ libcurl4-openssl-dev \
+ libfuse-dev \
+ libprotobuf-dev \
+ libprotoc-dev \
+ libsasl2-dev \
+ libsnappy-dev \
+ libssl-dev \
+ libtool \
+ locales \
+ make \
+ pinentry-curses \
+ pkg-config \
+ python \
+ python2.7 \
+ python-pip \
+ python-pkg-resources \
+ python-setuptools \
+ python-wheel \
+ rsync \
+ software-properties-common \
+ snappy \
+ sudo \
+ valgrind \
+ zlib1g-dev \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
-#######
-# OpenJDK 8
-#######
-RUN apt-get -q install -y openjdk-8-jdk
#######
-# OpenJDK 9
-# w/workaround for
-# https://bugs.launchpad.net/ubuntu/+source/openjdk-9/+bug/1593191
+# OpenJDK 8
#######
-RUN apt-get -o Dpkg::Options::="--force-overwrite" \
- -q install -y \
- openjdk-9-jdk-headless
+# hadolint ignore=DL3008
+RUN apt-get -q update \
+ && apt-get -q install -y --no-install-recommends openjdk-8-jdk libbcprov-java \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
-#######
-# Set default Java
-#######
-#
-# By default, OpenJDK sets the default Java to the highest version.
-# We want the opposite, soooooo....
-#
-RUN update-java-alternatives --set java-1.8.0-openjdk-amd64
-RUN update-alternatives --get-selections | grep -i jdk | \
- while read line; do \
- alternative=$(echo $line | awk '{print $1}'); \
- path=$(echo $line | awk '{print $3}'); \
- newpath=$(echo $path | sed -e 's/java-9/java-8/'); \
- update-alternatives --set $alternative $newpath; \
- done
######
# Install cmake 3.1.0 (3.5.1 ships with Xenial)
######
-RUN mkdir -p /opt/cmake && \
- curl -L -s -S \
+RUN mkdir -p /opt/cmake \
+ && curl -L -s -S \
https://cmake.org/files/v3.1/cmake-3.1.0-Linux-x86_64.tar.gz \
- -o /opt/cmake.tar.gz && \
- tar xzf /opt/cmake.tar.gz --strip-components 1 -C /opt/cmake
+ -o /opt/cmake.tar.gz \
+ && tar xzf /opt/cmake.tar.gz --strip-components 1 -C /opt/cmake
ENV CMAKE_HOME /opt/cmake
ENV PATH "${PATH}:/opt/cmake/bin"
######
# Install Google Protobuf 2.5.0 (2.6.0 ships with Xenial)
######
-RUN mkdir -p /opt/protobuf-src && \
- curl -L -s -S \
+# hadolint ignore=DL3003
+RUN mkdir -p /opt/protobuf-src \
+ && curl -L -s -S \
https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.gz \
- -o /opt/protobuf.tar.gz && \
- tar xzf /opt/protobuf.tar.gz --strip-components 1 -C /opt/protobuf-src
-RUN cd /opt/protobuf-src && ./configure --prefix=/opt/protobuf && make install
+ -o /opt/protobuf.tar.gz \
+ && tar xzf /opt/protobuf.tar.gz --strip-components 1 -C /opt/protobuf-src \
+ && cd /opt/protobuf-src \
+ && ./configure --prefix=/opt/protobuf \
+ && make install \
+ && cd /root \
+ && rm -rf /opt/protobuf-src
ENV PROTOBUF_HOME /opt/protobuf
ENV PATH "${PATH}:/opt/protobuf/bin"
######
# Install Apache Maven 3.3.9 (3.3.9 ships with Xenial)
######
-RUN apt-get -q update && apt-get -q install -y maven
+# hadolint ignore=DL3008
+RUN apt-get -q update \
+ && apt-get -q install -y --no-install-recommends maven \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
ENV MAVEN_HOME /usr
######
# Install findbugs 3.0.1 (3.0.1 ships with Xenial)
# Ant is needed for findbugs
######
-RUN apt-get -q update && apt-get -q install -y findbugs ant
+# hadolint ignore=DL3008
+RUN apt-get -q update \
+ && apt-get -q install -y --no-install-recommends findbugs ant \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
ENV FINDBUGS_HOME /usr
####
# Install shellcheck (0.4.6, the latest as of 2017-09-26)
####
-RUN add-apt-repository -y ppa:jonathonf/ghc-8.0.2
-RUN apt-get -q update && apt-get -q install -y shellcheck
+# hadolint ignore=DL3008
+RUN add-apt-repository -y ppa:jonathonf/ghc-8.0.2 \
+ && apt-get -q update \
+ && apt-get -q install -y --no-install-recommends shellcheck \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
####
# Install bats (0.4.0, the latest as of 2017-09-26, ships with Xenial)
####
-RUN apt-get -q update && apt-get -q install -y bats
+# hadolint ignore=DL3008
+RUN apt-get -q update \
+ && apt-get -q install -y --no-install-recommends bats \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
####
-# Install pylint (always want latest)
+# Install pylint at fixed version (2.0.0 removed python2 support)
+# https://github.com/PyCQA/pylint/issues/2294
####
-RUN pip2 install pylint
+RUN pip2 install pylint==1.9.2
####
# Install dateutil.parser
####
-RUN pip2 install python-dateutil
+RUN pip2 install python-dateutil==2.7.3
###
# Install node.js for web UI framework (4.2.6 ships with Xenial)
###
-RUN apt-get -y install nodejs && \
- ln -s /usr/bin/nodejs /usr/bin/node && \
- apt-get -y install npm && \
- npm install npm@latest -g && \
- npm install -g bower && \
- npm install -g ember-cli
+# hadolint ignore=DL3008, DL3016
+RUN apt-get -q update \
+ && apt-get install -y --no-install-recommends nodejs npm \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/* \
+ && ln -s /usr/bin/nodejs /usr/bin/node \
+ && npm install npm@latest -g \
+ && npm install -g jshint
+
+###
+# Install hadolint
+####
+RUN curl -L -s -S \
+ https://github.com/hadolint/hadolint/releases/download/v1.11.1/hadolint-Linux-x86_64 \
+ -o /bin/hadolint \
+ && chmod a+rx /bin/hadolint \
+ && shasum -a 512 /bin/hadolint | \
+ awk '$1!="734e37c1f6619cbbd86b9b249e69c9af8ee1ea87a2b1ff71dccda412e9dac35e63425225a95d71572091a3f0a11e9a04c2fc25d9e91b840530c26af32b9891ca" {exit(1)}'
###
# Avoid out of memory errors in builds
@@ -188,21 +209,27 @@ ENV MAVEN_OPTS -Xms256m -Xmx1536m
####
# Install svn & Forrest (for Apache Hadoop website)
###
-RUN apt-get -q update && apt-get -q install -y subversion
-
-RUN mkdir -p /opt/apache-forrest && \
- curl -L -s -S \
+# hadolint ignore=DL3008
+RUN apt-get -q update \
+ && apt-get -q install -y --no-install-recommends subversion \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN mkdir -p /opt/apache-forrest \
+ && curl -L -s -S \
https://archive.apache.org/dist/forrest/0.8/apache-forrest-0.8.tar.gz \
- -o /opt/forrest.tar.gz && \
- tar xzf /opt/forrest.tar.gz --strip-components 1 -C /opt/apache-forrest
+ -o /opt/forrest.tar.gz \
+ && tar xzf /opt/forrest.tar.gz --strip-components 1 -C /opt/apache-forrest
RUN echo 'forrest.home=/opt/apache-forrest' > build.properties
ENV FORREST_HOME=/opt/apache-forrest
# Hugo static website generator (for new hadoop site and Ozone docs)
-RUN curl -L -o hugo.deb https://github.com/gohugoio/hugo/releases/download/v0.30.2/hugo_0.30.2_Linux-64bit.deb && dpkg --install hugo.deb && rm hugo.deb
+RUN curl -L -o hugo.deb https://github.com/gohugoio/hugo/releases/download/v0.30.2/hugo_0.30.2_Linux-64bit.deb \
+ && dpkg --install hugo.deb \
+ && rm hugo.deb
# Add a welcome message and environment checks.
-ADD hadoop_env_checks.sh /root/hadoop_env_checks.sh
+COPY hadoop_env_checks.sh /root/hadoop_env_checks.sh
RUN chmod 755 /root/hadoop_env_checks.sh
-RUN echo '~/hadoop_env_checks.sh' >> /root/.bashrc
-
+# hadolint ignore=SC2016
+RUN echo '${HOME}/hadoop_env_checks.sh' >> /root/.bashrc
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdsl.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdds.xml
similarity index 100%
rename from hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdsl.xml
rename to hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdds.xml
diff --git a/hadoop-build-tools/src/main/resources/checkstyle/suppressions.xml b/hadoop-build-tools/src/main/resources/checkstyle/suppressions.xml
index 084384d689c..ccc89c8bf0f 100644
--- a/hadoop-build-tools/src/main/resources/checkstyle/suppressions.xml
+++ b/hadoop-build-tools/src/main/resources/checkstyle/suppressions.xml
@@ -18,5 +18,4 @@
-
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 6fa24b49e54..ea8d6800d0d 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -166,6 +166,14 @@
commons-iocommons-io
+
+ org.apache.commons
+ commons-lang3
+
+
+ org.apache.commons
+ commons-text
+ commons-loggingcommons-logging
@@ -491,6 +499,10 @@
commons-codeccommons-codec
+
+ org.apache.commons
+ commons-lang3
+ commons-loggingcommons-logging
diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml
index 64dde7187b4..fd329e29482 100644
--- a/hadoop-common-project/hadoop-annotations/pom.xml
+++ b/hadoop-common-project/hadoop-annotations/pom.xml
@@ -53,6 +53,30 @@
+
+ jdk10
+
+ [10,)
+
+
+
+
+ maven-compiler-plugin
+
+
+ org/apache/hadoop/classification/tools/
+
+
+
+
+ maven-javadoc-plugin
+
+ org.apache.hadoop.classification.tools
+
+
+
+
+
diff --git a/hadoop-common-project/hadoop-common/HadoopJNI.cmake b/hadoop-common-project/hadoop-common/HadoopJNI.cmake
index 78d7ffda6fb..bf0d73eb56a 100644
--- a/hadoop-common-project/hadoop-common/HadoopJNI.cmake
+++ b/hadoop-common-project/hadoop-common/HadoopJNI.cmake
@@ -93,5 +93,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
# Otherwise, use the standard FindJNI module to locate the JNI components.
#
else()
+ find_package(Java REQUIRED)
+ include(UseJava)
find_package(JNI REQUIRED)
endif()
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 67a5a54839f..695dcdee58a 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -171,6 +171,11 @@
commons-lang3compile
+
+ org.apache.commons
+ commons-text
+ compile
+ org.slf4jslf4j-api
@@ -1074,6 +1079,8 @@
--projecttitleApache Hadoop--usetoday
+ --fileversions
+ --dirversions--version${project.version}
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index cbedd972188..71ba7fffc10 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -600,6 +600,7 @@ function hadoop_bootstrap
HDDS_LIB_JARS_DIR=${HDDS_LIB_JARS_DIR:-"share/hadoop/hdds/lib"}
OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"}
OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"}
+ OZONEFS_DIR=${OZONEFS_DIR:-"share/hadoop/ozonefs"}
HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_HOME}}
HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
index 3826f67a5ea..6db085a3261 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
@@ -88,7 +88,7 @@
# Extra Java runtime options for all Hadoop commands. We don't support
# IPv6 yet/still, so by default the preference is set to IPv4.
# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
-# For Kerberos debugging, an extended option set logs more invormation
+# For Kerberos debugging, an extended option set logs more information
# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
# Some parts of the shell code may do special things dependent upon
@@ -120,9 +120,9 @@ esac
#
# By default, Apache Hadoop overrides Java's CLASSPATH
# environment variable. It is configured such
-# that it sarts out blank with new entries added after passing
+# that it starts out blank with new entries added after passing
# a series of checks (file/dir exists, not already listed aka
-# de-deduplication). During de-depulication, wildcards and/or
+# de-deduplication). During de-deduplication, wildcards and/or
# directories are *NOT* expanded to keep it simple. Therefore,
# if the computed classpath has two specific mentions of
# awesome-methods-1.0.jar, only the first one added will be seen.
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
index 16fdcf05629..f061313cb4d 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
@@ -47,7 +47,7 @@
#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
# Tag values to use for the ganglia prefix. If not defined no tags are used.
-# If '*' all tags are used. If specifiying multiple tags separate them with
+# If '*' all tags are used. If specifying multiple tags separate them with
# commas. Note that the last segment of the property name is the context name.
#
# A typical use of tags is separating the metrics by the HDFS rpc port
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
index cf3dd1f4ec3..bd7c11124f5 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
@@ -242,4 +242,24 @@
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.
+
+
+ security.applicationmaster-nodemanager.applicationmaster.protocol.acl
+ *
+ ACL for ApplicationMasterProtocol, used by the Nodemanager
+ and ApplicationMasters to communicate.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
+
+
+ security.distributedscheduling.protocol.acl
+ *
+ ACL for DistributedSchedulingAMProtocol, used by the Nodemanager
+ and Resourcemanager to communicate.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.
+
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 5783013040d..aeae2b81d1c 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -304,29 +304,6 @@ log4j.appender.FSLOGGER.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.FSLOGGER.MaxFileSize=${hadoop.log.maxfilesize}
log4j.appender.FSLOGGER.MaxBackupIndex=${hadoop.log.maxbackupindex}
-#
-# Add a logger for ozone that is separate from the Datanode.
-#
-log4j.logger.org.apache.hadoop.ozone=DEBUG,OZONE,FILE
-
-# Do not log into datanode logs. Remove this line to have single log.
-log4j.additivity.org.apache.hadoop.ozone=false
-
-# For development purposes, log both to console and log file.
-log4j.appender.OZONE=org.apache.log4j.ConsoleAppender
-log4j.appender.OZONE.Threshold=info
-log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout
-log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
- %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n
-
-# Real ozone logger that writes to ozone.log
-log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log
-log4j.appender.FILE.Threshold=debug
-log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
-(%F:%L) %X{function} %X{resource} %X{user} %X{request} - \
-%m%n
#
# Fair scheduler state dump
#
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
index c5bdf4e021b..ef4eac69145 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.conf;
-import org.apache.commons.lang3.StringEscapeUtils;
+import org.apache.commons.text.StringEscapeUtils;
import java.util.Collection;
import java.util.Enumeration;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java
index be85497209b..b55f84226d3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java
@@ -27,22 +27,31 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.util.CleanerUtil;
import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
@InterfaceAudience.Private
public class CryptoStreamUtils {
private static final int MIN_BUFFER_SIZE = 512;
-
+ private static final Logger LOG =
+ LoggerFactory.getLogger(CryptoStreamUtils.class);
+
/** Forcibly free the direct buffer. */
public static void freeDB(ByteBuffer buffer) {
- if (buffer instanceof sun.nio.ch.DirectBuffer) {
- final sun.misc.Cleaner bufferCleaner =
- ((sun.nio.ch.DirectBuffer) buffer).cleaner();
- bufferCleaner.clean();
+ if (CleanerUtil.UNMAP_SUPPORTED) {
+ try {
+ CleanerUtil.getCleaner().freeBuffer(buffer);
+ } catch (IOException e) {
+ LOG.info("Failed to free the buffer", e);
+ }
+ } else {
+ LOG.trace(CleanerUtil.UNMAP_NOT_SUPPORTED_REASON);
}
}
-
+
/** Read crypto buffer size */
public static int getBufferSize(Configuration conf) {
return conf.getInt(HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
index fa84c47d26c..3f8b337f357 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
@@ -265,8 +265,7 @@ public void execute() throws IOException {
}
}
} catch (IOException e) {
- getOut().println("Cannot list keys for KeyProvider: " + provider
- + ": " + e.toString());
+ getOut().println("Cannot list keys for KeyProvider: " + provider);
throw e;
}
}
@@ -318,12 +317,12 @@ public void execute() throws NoSuchAlgorithmException, IOException {
printProviderWritten();
} catch (NoSuchAlgorithmException e) {
getOut().println("Cannot roll key: " + keyName +
- " within KeyProvider: " + provider + ". " + e.toString());
+ " within KeyProvider: " + provider + ".");
throw e;
}
} catch (IOException e1) {
getOut().println("Cannot roll key: " + keyName + " within KeyProvider: "
- + provider + ". " + e1.toString());
+ + provider + ".");
throw e1;
}
}
@@ -374,8 +373,8 @@ public boolean validate() {
}
return cont;
} catch (IOException e) {
- getOut().println(keyName + " will not be deleted.");
- e.printStackTrace(getErr());
+ getOut().println(keyName + " will not be deleted. "
+ + prettifyException(e));
}
}
return true;
@@ -392,7 +391,7 @@ public void execute() throws IOException {
getOut().println(keyName + " has been successfully deleted.");
printProviderWritten();
} catch (IOException e) {
- getOut().println(keyName + " has not been deleted. " + e.toString());
+ getOut().println(keyName + " has not been deleted.");
throw e;
}
}
@@ -463,13 +462,13 @@ public void execute() throws IOException, NoSuchAlgorithmException {
"with options " + options.toString() + ".");
printProviderWritten();
} catch (InvalidParameterException e) {
- getOut().println(keyName + " has not been created. " + e.toString());
+ getOut().println(keyName + " has not been created.");
throw e;
} catch (IOException e) {
- getOut().println(keyName + " has not been created. " + e.toString());
+ getOut().println(keyName + " has not been created.");
throw e;
} catch (NoSuchAlgorithmException e) {
- getOut().println(keyName + " has not been created. " + e.toString());
+ getOut().println(keyName + " has not been created.");
throw e;
}
}
@@ -520,7 +519,7 @@ public void execute() throws NoSuchAlgorithmException, IOException {
printProviderWritten();
} catch (IOException e) {
getOut().println("Cannot invalidate cache for key: " + keyName +
- " within KeyProvider: " + provider + ". " + e.toString());
+ " within KeyProvider: " + provider + ".");
throw e;
}
}
@@ -531,6 +530,17 @@ public String getUsage() {
}
}
+ @Override
+ protected void printException(Exception e){
+ getErr().println("Executing command failed with " +
+ "the following exception: " + prettifyException(e));
+ }
+
+ private String prettifyException(Exception e) {
+ return e.getClass().getSimpleName() + ": " +
+ e.getLocalizedMessage().split("\n")[0];
+ }
+
/**
* main() entry point for the KeyShell. While strictly speaking the
* return is void, it will System.exit() with a return code: 0 is for
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 7b4607507b9..8125510a72e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -56,6 +56,7 @@
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.lang.reflect.UndeclaredThrowableException;
+import java.net.ConnectException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
@@ -478,10 +479,14 @@ public HttpURLConnection run() throws Exception {
return authUrl.openConnection(url, authToken, doAsUser);
}
});
+ } catch (ConnectException ex) {
+ String msg = "Failed to connect to: " + url.toString();
+ LOG.warn(msg);
+ throw new IOException(msg, ex);
+ } catch (SocketTimeoutException ex) {
+ LOG.warn("Failed to connect to {}:{}", url.getHost(), url.getPort());
+ throw ex;
} catch (IOException ex) {
- if (ex instanceof SocketTimeoutException) {
- LOG.warn("Failed to connect to {}:{}", url.getHost(), url.getPort());
- }
throw ex;
} catch (UndeclaredThrowableException ex) {
throw new IOException(ex.getUndeclaredThrowable());
@@ -1036,13 +1041,13 @@ private String getDoAsUser() throws IOException {
public Token> run() throws Exception {
// Not using the cached token here.. Creating a new token here
// everytime.
- LOG.debug("Getting new token from {}, renewer:{}", url, renewer);
+ LOG.info("Getting new token from {}, renewer:{}", url, renewer);
return authUrl.getDelegationToken(url,
new DelegationTokenAuthenticatedURL.Token(), renewer, doAsUser);
}
});
if (token != null) {
- LOG.debug("New token received: ({})", token);
+ LOG.info("New token received: ({})", token);
credentials.addToken(token.getService(), token);
tokens = new Token>[] { token };
} else {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
index 42cd47dd7a5..e68e8448aa3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import java.io.InterruptedIOException;
+import java.net.ConnectException;
import java.security.GeneralSecurityException;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
@@ -27,6 +28,8 @@
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
+import javax.net.ssl.SSLHandshakeException;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
@@ -110,12 +113,11 @@ public LoadBalancingKMSClientProvider(KMSClientProvider[] providers,
return providers;
}
- private T doOp(ProviderCallable op, int currPos)
- throws IOException {
+ private T doOp(ProviderCallable op, int currPos,
+ boolean isIdempotent) throws IOException {
if (providers.length == 0) {
throw new IOException("No providers configured !");
}
- IOException ex = null;
int numFailovers = 0;
for (int i = 0;; i++, numFailovers++) {
KMSClientProvider provider = providers[(currPos + i) % providers.length];
@@ -130,11 +132,18 @@ public LoadBalancingKMSClientProvider(KMSClientProvider[] providers,
} catch (IOException ioe) {
LOG.warn("KMS provider at [{}] threw an IOException: ",
provider.getKMSUrl(), ioe);
- ex = ioe;
-
+ // SSLHandshakeException can occur here because of lost connection
+ // with the KMS server, creating a ConnectException from it,
+ // so that the FailoverOnNetworkExceptionRetry policy will retry
+ if (ioe instanceof SSLHandshakeException) {
+ Exception cause = ioe;
+ ioe = new ConnectException("SSLHandshakeException: "
+ + cause.getMessage());
+ ioe.initCause(cause);
+ }
RetryAction action = null;
try {
- action = retryPolicy.shouldRetry(ioe, 0, numFailovers, false);
+ action = retryPolicy.shouldRetry(ioe, 0, numFailovers, isIdempotent);
} catch (Exception e) {
if (e instanceof IOException) {
throw (IOException)e;
@@ -145,7 +154,7 @@ public LoadBalancingKMSClientProvider(KMSClientProvider[] providers,
// compatible with earlier versions of LBKMSCP
if (action.action == RetryAction.RetryDecision.FAIL
&& numFailovers >= providers.length - 1) {
- LOG.warn("Aborting since the Request has failed with all KMS"
+ LOG.error("Aborting since the Request has failed with all KMS"
+ " providers(depending on {}={} setting and numProviders={})"
+ " in the group OR the exception is not recoverable",
CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY,
@@ -153,7 +162,7 @@ public LoadBalancingKMSClientProvider(KMSClientProvider[] providers,
CommonConfigurationKeysPublic.
KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, providers.length),
providers.length);
- throw ex;
+ throw ioe;
}
if (((numFailovers + 1) % providers.length) == 0) {
// Sleep only after we try all the providers for every cycle.
@@ -192,7 +201,7 @@ private int nextIdx() {
public Token>[] call(KMSClientProvider provider) throws IOException {
return provider.addDelegationTokens(renewer, credentials);
}
- }, nextIdx());
+ }, nextIdx(), false);
}
@Override
@@ -202,7 +211,7 @@ public long renewDelegationToken(final Token> token) throws IOException {
public Long call(KMSClientProvider provider) throws IOException {
return provider.renewDelegationToken(token);
}
- }, nextIdx());
+ }, nextIdx(), false);
}
@Override
@@ -213,7 +222,7 @@ public Void call(KMSClientProvider provider) throws IOException {
provider.cancelDelegationToken(token);
return null;
}
- }, nextIdx());
+ }, nextIdx(), false);
}
// This request is sent to all providers in the load-balancing group
@@ -266,7 +275,7 @@ public EncryptedKeyVersion call(KMSClientProvider provider)
throws IOException, GeneralSecurityException {
return provider.generateEncryptedKey(encryptionKeyName);
}
- }, nextIdx());
+ }, nextIdx(), true);
} catch (WrapperException we) {
if (we.getCause() instanceof GeneralSecurityException) {
throw (GeneralSecurityException) we.getCause();
@@ -286,7 +295,7 @@ public KeyVersion call(KMSClientProvider provider)
throws IOException, GeneralSecurityException {
return provider.decryptEncryptedKey(encryptedKeyVersion);
}
- }, nextIdx());
+ }, nextIdx(), true);
} catch (WrapperException we) {
if (we.getCause() instanceof GeneralSecurityException) {
throw (GeneralSecurityException) we.getCause();
@@ -306,7 +315,7 @@ public EncryptedKeyVersion call(KMSClientProvider provider)
throws IOException, GeneralSecurityException {
return provider.reencryptEncryptedKey(ekv);
}
- }, nextIdx());
+ }, nextIdx(), true);
} catch (WrapperException we) {
if (we.getCause() instanceof GeneralSecurityException) {
throw (GeneralSecurityException) we.getCause();
@@ -326,7 +335,7 @@ public Void call(KMSClientProvider provider)
provider.reencryptEncryptedKeys(ekvs);
return null;
}
- }, nextIdx());
+ }, nextIdx(), true);
} catch (WrapperException we) {
if (we.getCause() instanceof GeneralSecurityException) {
throw (GeneralSecurityException) we.getCause();
@@ -342,7 +351,7 @@ public KeyVersion getKeyVersion(final String versionName) throws IOException {
public KeyVersion call(KMSClientProvider provider) throws IOException {
return provider.getKeyVersion(versionName);
}
- }, nextIdx());
+ }, nextIdx(), true);
}
@Override
@@ -352,7 +361,7 @@ public KeyVersion call(KMSClientProvider provider) throws IOException {
public List call(KMSClientProvider provider) throws IOException {
return provider.getKeys();
}
- }, nextIdx());
+ }, nextIdx(), true);
}
@Override
@@ -362,7 +371,7 @@ public KeyVersion call(KMSClientProvider provider) throws IOException {
public Metadata[] call(KMSClientProvider provider) throws IOException {
return provider.getKeysMetadata(names);
}
- }, nextIdx());
+ }, nextIdx(), true);
}
@Override
@@ -373,7 +382,7 @@ public KeyVersion call(KMSClientProvider provider) throws IOException {
throws IOException {
return provider.getKeyVersions(name);
}
- }, nextIdx());
+ }, nextIdx(), true);
}
@Override
@@ -383,8 +392,9 @@ public KeyVersion getCurrentKey(final String name) throws IOException {
public KeyVersion call(KMSClientProvider provider) throws IOException {
return provider.getCurrentKey(name);
}
- }, nextIdx());
+ }, nextIdx(), true);
}
+
@Override
public Metadata getMetadata(final String name) throws IOException {
return doOp(new ProviderCallable() {
@@ -392,7 +402,7 @@ public Metadata getMetadata(final String name) throws IOException {
public Metadata call(KMSClientProvider provider) throws IOException {
return provider.getMetadata(name);
}
- }, nextIdx());
+ }, nextIdx(), true);
}
@Override
@@ -403,7 +413,7 @@ public KeyVersion createKey(final String name, final byte[] material,
public KeyVersion call(KMSClientProvider provider) throws IOException {
return provider.createKey(name, material, options);
}
- }, nextIdx());
+ }, nextIdx(), false);
}
@Override
@@ -416,7 +426,7 @@ public KeyVersion call(KMSClientProvider provider) throws IOException,
NoSuchAlgorithmException {
return provider.createKey(name, options);
}
- }, nextIdx());
+ }, nextIdx(), false);
} catch (WrapperException e) {
if (e.getCause() instanceof GeneralSecurityException) {
throw (NoSuchAlgorithmException) e.getCause();
@@ -433,7 +443,7 @@ public Void call(KMSClientProvider provider) throws IOException {
provider.deleteKey(name);
return null;
}
- }, nextIdx());
+ }, nextIdx(), false);
}
@Override
@@ -444,7 +454,7 @@ public KeyVersion rollNewVersion(final String name, final byte[] material)
public KeyVersion call(KMSClientProvider provider) throws IOException {
return provider.rollNewVersion(name, material);
}
- }, nextIdx());
+ }, nextIdx(), false);
invalidateCache(name);
return newVersion;
}
@@ -459,7 +469,7 @@ public KeyVersion call(KMSClientProvider provider) throws IOException,
NoSuchAlgorithmException {
return provider.rollNewVersion(name);
}
- }, nextIdx());
+ }, nextIdx(), false);
invalidateCache(name);
return newVersion;
} catch (WrapperException e) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index 75622ad3742..c56f6e09e2d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -27,10 +27,12 @@
import java.util.Arrays;
import java.util.EnumSet;
+import java.util.NoSuchElementException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
import org.slf4j.Logger;
@@ -527,4 +529,39 @@ public boolean reportChecksumFailure(Path f, FSDataInputStream in,
}
return results.toArray(new FileStatus[results.size()]);
}
+
+ @Override
+ public RemoteIterator listLocatedStatus(final Path f)
+ throws AccessControlException, FileNotFoundException,
+ UnresolvedLinkException, IOException {
+ final RemoteIterator iter =
+ getMyFs().listLocatedStatus(f);
+ return new RemoteIterator() {
+
+ private LocatedFileStatus next = null;
+
+ @Override
+ public boolean hasNext() throws IOException {
+ while (next == null && iter.hasNext()) {
+ LocatedFileStatus unfilteredNext = iter.next();
+ if (!isChecksumFile(unfilteredNext.getPath())) {
+ next = unfilteredNext;
+ }
+ }
+ return next != null;
+ }
+
+ @Override
+ public LocatedFileStatus next() throws IOException {
+ if (!hasNext()) {
+ throw new NoSuchElementException();
+ }
+ LocatedFileStatus tmp = next;
+ next = null;
+ return tmp;
+ }
+
+ };
+ }
+
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index c7f32f92a69..b101b3b3096 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -905,5 +905,14 @@
public static final String HADOOP_TAGS_SYSTEM = "hadoop.tags.system";
public static final String HADOOP_TAGS_CUSTOM = "hadoop.tags.custom";
+
+ /** Configuration option for the shutdown hook manager shutdown time:
+ * {@value}. */
+ public static final String SERVICE_SHUTDOWN_TIMEOUT =
+ "hadoop.service.shutdown.timeout";
+
+ /** Default shutdown hook timeout: {@value} seconds. */
+ public static final long SERVICE_SHUTDOWN_TIMEOUT_DEFAULT = 30;
+
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
index 383d65a06a3..c3e088b66d8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
@@ -116,7 +116,14 @@
* Enforce the file to be a replicated file, no matter what its parent
* directory's replication or erasure coding policy is.
*/
- SHOULD_REPLICATE((short) 0x80);
+ SHOULD_REPLICATE((short) 0x80),
+
+ /**
+ * Advise that the first block replica NOT take into account DataNode
+ * locality. The first block replica should be placed randomly within the
+ * cluster. Subsequent block replicas should follow DataNode locality rules.
+ */
+ IGNORE_CLIENT_LOCALITY((short) 0x100);
private final short mode;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
index b57ff3dc3a6..f13b50bd20c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
@@ -16,12 +16,6 @@
*/
package org.apache.hadoop.fs;
-import com.google.common.base.Charsets;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.permission.FsPermission;
-
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
@@ -29,13 +23,26 @@
import java.util.List;
import java.util.stream.Collectors;
+import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+
+import org.apache.commons.compress.utils.IOUtils;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+import static org.apache.hadoop.fs.Path.mergePaths;
+
/**
* A MultipartUploader that uses the basic FileSystem commands.
* This is done in three stages:
- * Init - create a temp _multipart directory.
- * PutPart - copying the individual parts of the file to the temp directory.
- * Complete - use {@link FileSystem#concat} to merge the files; and then delete
- * the temp directory.
+ *
+ *
Init - create a temp {@code _multipart} directory.
+ *
PutPart - copying the individual parts of the file to the temp
+ * directory.
+ *
Complete - use {@link FileSystem#concat} to merge the files;
+ * and then delete the temp directory.
+ *
*/
public class FileSystemMultipartUploader extends MultipartUploader {
@@ -61,31 +68,50 @@ public PartHandle putPart(Path filePath, InputStream inputStream,
throws IOException {
byte[] uploadIdByteArray = uploadId.toByteArray();
+ checkUploadId(uploadIdByteArray);
Path collectorPath = new Path(new String(uploadIdByteArray, 0,
uploadIdByteArray.length, Charsets.UTF_8));
Path partPath =
- Path.mergePaths(collectorPath, Path.mergePaths(new Path(Path.SEPARATOR),
+ mergePaths(collectorPath, mergePaths(new Path(Path.SEPARATOR),
new Path(Integer.toString(partNumber) + ".part")));
- FSDataOutputStreamBuilder outputStream = fs.createFile(partPath);
- FSDataOutputStream fsDataOutputStream = outputStream.build();
- IOUtils.copy(inputStream, fsDataOutputStream, 4096);
- fsDataOutputStream.close();
+ try(FSDataOutputStream fsDataOutputStream =
+ fs.createFile(partPath).build()) {
+ IOUtils.copy(inputStream, fsDataOutputStream, 4096);
+ } finally {
+ org.apache.hadoop.io.IOUtils.cleanupWithLogger(LOG, inputStream);
+ }
return BBPartHandle.from(ByteBuffer.wrap(
partPath.toString().getBytes(Charsets.UTF_8)));
}
private Path createCollectorPath(Path filePath) {
- return Path.mergePaths(filePath.getParent(),
- Path.mergePaths(new Path(filePath.getName().split("\\.")[0]),
- Path.mergePaths(new Path("_multipart"),
+ return mergePaths(filePath.getParent(),
+ mergePaths(new Path(filePath.getName().split("\\.")[0]),
+ mergePaths(new Path("_multipart"),
new Path(Path.SEPARATOR))));
}
+ private PathHandle getPathHandle(Path filePath) throws IOException {
+ FileStatus status = fs.getFileStatus(filePath);
+ return fs.getPathHandle(status);
+ }
+
@Override
@SuppressWarnings("deprecation") // rename w/ OVERWRITE
public PathHandle complete(Path filePath,
List> handles, UploadHandle multipartUploadId)
throws IOException {
+
+ checkUploadId(multipartUploadId.toByteArray());
+
+ if (handles.isEmpty()) {
+ throw new IOException("Empty upload");
+ }
+ // If destination already exists, we believe we already completed it.
+ if (fs.exists(filePath)) {
+ return getPathHandle(filePath);
+ }
+
handles.sort(Comparator.comparing(Pair::getKey));
List partHandles = handles
.stream()
@@ -97,22 +123,25 @@ public PathHandle complete(Path filePath,
.collect(Collectors.toList());
Path collectorPath = createCollectorPath(filePath);
- Path filePathInsideCollector = Path.mergePaths(collectorPath,
+ Path filePathInsideCollector = mergePaths(collectorPath,
new Path(Path.SEPARATOR + filePath.getName()));
fs.create(filePathInsideCollector).close();
fs.concat(filePathInsideCollector,
partHandles.toArray(new Path[handles.size()]));
fs.rename(filePathInsideCollector, filePath, Options.Rename.OVERWRITE);
fs.delete(collectorPath, true);
- FileStatus status = fs.getFileStatus(filePath);
- return fs.getPathHandle(status);
+ return getPathHandle(filePath);
}
@Override
public void abort(Path filePath, UploadHandle uploadId) throws IOException {
byte[] uploadIdByteArray = uploadId.toByteArray();
+ checkUploadId(uploadIdByteArray);
Path collectorPath = new Path(new String(uploadIdByteArray, 0,
uploadIdByteArray.length, Charsets.UTF_8));
+
+ // force a check for a file existing; raises FNFE if not found
+ fs.getFileStatus(collectorPath);
fs.delete(collectorPath, true);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
index 1c216f430af..a4b158a85ab 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
@@ -24,8 +24,6 @@
import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.util.*;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -78,11 +76,25 @@
/** Used when size of file to be allocated is unknown. */
public static final int SIZE_UNKNOWN = -1;
+ private final DiskValidator diskValidator;
+
/**Create an allocator object
* @param contextCfgItemName
*/
public LocalDirAllocator(String contextCfgItemName) {
this.contextCfgItemName = contextCfgItemName;
+ try {
+ this.diskValidator = DiskValidatorFactory.getInstance(
+ BasicDiskValidator.NAME);
+ } catch (DiskErrorException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public LocalDirAllocator(String contextCfgItemName,
+ DiskValidator diskValidator) {
+ this.contextCfgItemName = contextCfgItemName;
+ this.diskValidator = diskValidator;
}
/** This method must be used to obtain the dir allocation context for a
@@ -96,7 +108,8 @@ private AllocatorPerContext obtainContext(String contextCfgItemName) {
AllocatorPerContext l = contexts.get(contextCfgItemName);
if (l == null) {
contexts.put(contextCfgItemName,
- (l = new AllocatorPerContext(contextCfgItemName)));
+ (l = new AllocatorPerContext(contextCfgItemName,
+ diskValidator)));
}
return l;
}
@@ -255,6 +268,7 @@ int getCurrentDirectoryIndex() {
// NOTE: the context must be accessed via a local reference as it
// may be updated at any time to reference a different context
private AtomicReference currentContext;
+ private final DiskValidator diskValidator;
private static class Context {
private AtomicInteger dirNumLastAccessed = new AtomicInteger(0);
@@ -280,9 +294,11 @@ public int getAndIncrDirNumLastAccessed(int delta) {
}
}
- public AllocatorPerContext(String contextCfgItemName) {
+ public AllocatorPerContext(String contextCfgItemName,
+ DiskValidator diskValidator) {
this.contextCfgItemName = contextCfgItemName;
this.currentContext = new AtomicReference(new Context());
+ this.diskValidator = diskValidator;
}
/** This method gets called everytime before any read/write to make sure
@@ -312,7 +328,7 @@ private Context confChanged(Configuration conf)
? new File(ctx.localFS.makeQualified(tmpDir).toUri())
: new File(dirStrings[i]);
- DiskChecker.checkDir(tmpFile);
+ diskValidator.checkStatus(tmpFile);
dirs.add(new Path(tmpFile.getPath()));
dfList.add(new DF(tmpFile, 30000));
} catch (DiskErrorException de) {
@@ -348,7 +364,7 @@ private Path createPath(Path dir, String path,
//check whether we are able to create a directory here. If the disk
//happens to be RDONLY we will fail
try {
- DiskChecker.checkDir(new File(file.getParent().toUri().getPath()));
+ diskValidator.checkStatus(new File(file.getParent().toUri().getPath()));
return file;
} catch (DiskErrorException d) {
LOG.warn("Disk Error Exception: ", d);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
index 24a92169a2b..76f58d35978 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
@@ -21,17 +21,21 @@
import java.io.InputStream;
import java.util.List;
-import org.apache.commons.lang3.tuple.Pair;
-
+import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.commons.lang3.tuple.Pair;
+
/**
* MultipartUploader is an interface for copying files multipart and across
* multiple nodes. Users should:
- * 1. Initialize an upload
- * 2. Upload parts in any order
- * 3. Complete the upload in order to have it materialize in the destination FS.
+ *
+ *
Initialize an upload
+ *
Upload parts in any order
+ *
Complete the upload in order to have it materialize in the destination
+ * FS
+ *
*
* Implementers should make sure that the complete function should make sure
* that 'complete' will reorder parts if the destination FS doesn't already
@@ -45,7 +49,7 @@
* Initialize a multipart upload.
* @param filePath Target path for upload.
* @return unique identifier associating part uploads.
- * @throws IOException
+ * @throws IOException IO failure
*/
public abstract UploadHandle initialize(Path filePath) throws IOException;
@@ -53,12 +57,13 @@
* Put part as part of a multipart upload. It should be possible to have
* parts uploaded in any order (or in parallel).
* @param filePath Target path for upload (same as {@link #initialize(Path)}).
- * @param inputStream Data for this part.
+ * @param inputStream Data for this part. Implementations MUST close this
+ * stream after reading in the data.
* @param partNumber Index of the part relative to others.
* @param uploadId Identifier from {@link #initialize(Path)}.
* @param lengthInBytes Target length to read from the stream.
* @return unique PartHandle identifier for the uploaded part.
- * @throws IOException
+ * @throws IOException IO failure
*/
public abstract PartHandle putPart(Path filePath, InputStream inputStream,
int partNumber, UploadHandle uploadId, long lengthInBytes)
@@ -67,12 +72,12 @@ public abstract PartHandle putPart(Path filePath, InputStream inputStream,
/**
* Complete a multipart upload.
* @param filePath Target path for upload (same as {@link #initialize(Path)}.
- * @param handles Identifiers with associated part numbers from
- * {@link #putPart(Path, InputStream, int, UploadHandle, long)}.
+ * @param handles non-empty list of identifiers with associated part numbers
+ * from {@link #putPart(Path, InputStream, int, UploadHandle, long)}.
* Depending on the backend, the list order may be significant.
* @param multipartUploadId Identifier from {@link #initialize(Path)}.
* @return unique PathHandle identifier for the uploaded file.
- * @throws IOException
+ * @throws IOException IO failure or the handle list is empty.
*/
public abstract PathHandle complete(Path filePath,
List> handles, UploadHandle multipartUploadId)
@@ -81,10 +86,20 @@ public abstract PathHandle complete(Path filePath,
/**
* Aborts a multipart upload.
* @param filePath Target path for upload (same as {@link #initialize(Path)}.
- * @param multipartuploadId Identifier from {@link #initialize(Path)}.
- * @throws IOException
+ * @param multipartUploadId Identifier from {@link #initialize(Path)}.
+ * @throws IOException IO failure
*/
- public abstract void abort(Path filePath, UploadHandle multipartuploadId)
+ public abstract void abort(Path filePath, UploadHandle multipartUploadId)
throws IOException;
+ /**
+ * Utility method to validate uploadIDs
+ * @param uploadId
+ * @throws IllegalArgumentException
+ */
+ protected void checkUploadId(byte[] uploadId)
+ throws IllegalArgumentException {
+ Preconditions.checkArgument(uploadId.length > 0,
+ "Empty UploadId is not valid");
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartHandle.java
index df70b746cce..47ce3ab1894 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartHandle.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartHandle.java
@@ -16,14 +16,14 @@
*/
package org.apache.hadoop.fs;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
import java.io.Serializable;
import java.nio.ByteBuffer;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
/**
- * Opaque, serializable reference to an part id for multipart uploads.
+ * Opaque, serializable reference to a part id for multipart uploads.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathHandle.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathHandle.java
index 60aa6a53bfb..d5304ba5493 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathHandle.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathHandle.java
@@ -25,15 +25,16 @@
/**
* Opaque, serializable reference to an entity in the FileSystem. May contain
- * metadata sufficient to resolve or verify subsequent accesses indepedent of
+ * metadata sufficient to resolve or verify subsequent accesses independent of
* other modifications to the FileSystem.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
+@FunctionalInterface
public interface PathHandle extends Serializable {
/**
- * @return Serialized from in bytes.
+ * @return Serialized form in bytes.
*/
default byte[] toByteArray() {
ByteBuffer bb = bytes();
@@ -42,6 +43,10 @@
return ret;
}
+ /**
+ * Get the bytes of this path handle.
+ * @return the bytes to get to the process completing the upload.
+ */
ByteBuffer bytes();
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 265e967b01e..6e101a26e24 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -148,6 +148,20 @@ public boolean moveToTrash(Path path) throws IOException {
LOG.warn("Can't create(mkdir) trash directory: " + baseTrashPath);
return false;
}
+ } catch (FileAlreadyExistsException e) {
+ // find the path which is not a directory, and modify baseTrashPath
+ // & trashPath, then mkdirs
+ Path existsFilePath = baseTrashPath;
+ while (!fs.exists(existsFilePath)) {
+ existsFilePath = existsFilePath.getParent();
+ }
+ baseTrashPath = new Path(baseTrashPath.toString().replace(
+ existsFilePath.toString(), existsFilePath.toString() + Time.now())
+ );
+ trashPath = new Path(baseTrashPath, trashPath.getName());
+ // retry, ignore current failure
+ --i;
+ continue;
} catch (IOException e) {
LOG.warn("Can't create trash directory: " + baseTrashPath, e);
cause = e;
@@ -235,7 +249,7 @@ public Runnable getEmptier() throws IOException {
LOG.info("Namenode trash configuration: Deletion interval = "
+ (deletionInterval / MSECS_PER_MINUTE)
+ " minutes, Emptier interval = "
- + (emptierInterval / MSECS_PER_MINUTE) + " minutes.");
+ + (this.emptierInterval / MSECS_PER_MINUTE) + " minutes.");
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
index 4a134148a09..784bbf33f78 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
@@ -66,7 +66,7 @@ public static void registerCommands(CommandFactory factory) {
factory.registerCommands(Tail.class);
factory.registerCommands(Head.class);
factory.registerCommands(Test.class);
- factory.registerCommands(Touch.class);
+ factory.registerCommands(TouchCommands.class);
factory.registerCommands(Truncate.class);
factory.registerCommands(SnapshotCommands.class);
factory.registerCommands(XAttrCommands.class);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
index 9f39da29ce2..5828b0bbf4d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
@@ -68,11 +68,14 @@ protected void processPath(PathData item) throws IOException {
@Override
protected void processNonexistentPath(PathData item) throws IOException {
- // check if parent exists. this is complicated because getParent(a/b/c/) returns a/b/c, but
- // we want a/b
- if (!createParents &&
- !item.fs.exists(new Path(item.path.toString()).getParent())) {
- throw new PathNotFoundException(item.toString());
+ if (!createParents) {
+ // check if parent exists. this is complicated because getParent(a/b/c/) returns a/b/c, but
+ // we want a/b
+ final Path itemPath = new Path(item.path.toString());
+ final Path itemParentPath = itemPath.getParent();
+ if (!item.fs.exists(itemParentPath)) {
+ throw new PathNotFoundException(itemParentPath.toString());
+ }
}
if (!item.fs.mkdirs(item.path)) {
throw new PathIOException(item.toString());
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
deleted file mode 100644
index a6c751ea6f0..00000000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.shell;
-
-import java.io.IOException;
-import java.util.LinkedList;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.PathIOException;
-import org.apache.hadoop.fs.PathIsDirectoryException;
-import org.apache.hadoop.fs.PathNotFoundException;
-
-/**
- * Unix touch like commands
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-
-class Touch extends FsCommand {
- public static void registerCommands(CommandFactory factory) {
- factory.addClass(Touchz.class, "-touchz");
- }
-
- /**
- * (Re)create zero-length file at the specified path.
- * This will be replaced by a more UNIX-like touch when files may be
- * modified.
- */
- public static class Touchz extends Touch {
- public static final String NAME = "touchz";
- public static final String USAGE = " ...";
- public static final String DESCRIPTION =
- "Creates a file of zero length " +
- "at with current time as the timestamp of that . " +
- "An error is returned if the file exists with non-zero length\n";
-
- @Override
- protected void processOptions(LinkedList args) {
- CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE);
- cf.parse(args);
- }
-
- @Override
- protected void processPath(PathData item) throws IOException {
- if (item.stat.isDirectory()) {
- // TODO: handle this
- throw new PathIsDirectoryException(item.toString());
- }
- if (item.stat.getLen() != 0) {
- throw new PathIOException(item.toString(), "Not a zero-length file");
- }
- touchz(item);
- }
-
- @Override
- protected void processNonexistentPath(PathData item) throws IOException {
- if (!item.parentExists()) {
- throw new PathNotFoundException(item.toString())
- .withFullyQualifiedPath(item.path.toUri().toString());
- }
- touchz(item);
- }
-
- private void touchz(PathData item) throws IOException {
- item.fs.create(item.path).close();
- }
- }
-}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/TouchCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/TouchCommands.java
new file mode 100644
index 00000000000..be174b5e9cf
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/TouchCommands.java
@@ -0,0 +1,198 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.shell;
+
+import java.io.IOException;
+import java.text.DateFormat;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.LinkedList;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.PathIOException;
+import org.apache.hadoop.fs.PathIsDirectoryException;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.util.StringUtils;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Unix touch like commands
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+
+public class TouchCommands extends FsCommand {
+ public static void registerCommands(CommandFactory factory) {
+ factory.addClass(Touchz.class, "-touchz");
+ factory.addClass(Touch.class, "-touch");
+ }
+
+ /**
+ * (Re)create zero-length file at the specified path.
+ * This will be replaced by a more UNIX-like touch when files may be
+ * modified.
+ */
+ public static class Touchz extends TouchCommands {
+ public static final String NAME = "touchz";
+ public static final String USAGE = " ...";
+ public static final String DESCRIPTION =
+ "Creates a file of zero length " +
+ "at with current time as the timestamp of that . " +
+ "An error is returned if the file exists with non-zero length\n";
+
+ @Override
+ protected void processOptions(LinkedList args) {
+ CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE);
+ cf.parse(args);
+ }
+
+ @Override
+ protected void processPath(PathData item) throws IOException {
+ if (item.stat.isDirectory()) {
+ // TODO: handle this
+ throw new PathIsDirectoryException(item.toString());
+ }
+ if (item.stat.getLen() != 0) {
+ throw new PathIOException(item.toString(), "Not a zero-length file");
+ }
+ touchz(item);
+ }
+
+ @Override
+ protected void processNonexistentPath(PathData item) throws IOException {
+ if (!item.parentExists()) {
+ throw new PathNotFoundException(item.toString())
+ .withFullyQualifiedPath(item.path.toUri().toString());
+ }
+ touchz(item);
+ }
+
+ private void touchz(PathData item) throws IOException {
+ item.fs.create(item.path).close();
+ }
+ }
+
+ /**
+ * A UNIX like touch command.
+ */
+ public static class Touch extends TouchCommands {
+ private static final String OPTION_CHANGE_ONLY_MODIFICATION_TIME = "m";
+ private static final String OPTION_CHANGE_ONLY_ACCESS_TIME = "a";
+ private static final String OPTION_USE_TIMESTAMP = "t";
+ private static final String OPTION_DO_NOT_CREATE_FILE = "c";
+
+ public static final String NAME = "touch";
+ public static final String USAGE = "[-" + OPTION_CHANGE_ONLY_ACCESS_TIME
+ + "] [-" + OPTION_CHANGE_ONLY_MODIFICATION_TIME + "] [-"
+ + OPTION_USE_TIMESTAMP + " TIMESTAMP ] [-" + OPTION_DO_NOT_CREATE_FILE
+ + "] ...";
+ public static final String DESCRIPTION =
+ "Updates the access and modification times of the file specified by the"
+ + " to the current time. If the file does not exist, then a zero"
+ + " length file is created at with current time as the timestamp"
+ + " of that .\n"
+ + "-" + OPTION_CHANGE_ONLY_ACCESS_TIME
+ + " Change only the access time \n" + "-"
+ + OPTION_CHANGE_ONLY_MODIFICATION_TIME
+ + " Change only the modification time \n" + "-"
+ + OPTION_USE_TIMESTAMP + " TIMESTAMP"
+ + " Use specified timestamp (in format yyyyMMddHHmmss) instead of current time \n"
+ + "-" + OPTION_DO_NOT_CREATE_FILE + " Do not create any files";
+
+ private boolean changeModTime = false;
+ private boolean changeAccessTime = false;
+ private boolean doNotCreate = false;
+ private String timestamp;
+ private final SimpleDateFormat dateFormat =
+ new SimpleDateFormat("yyyyMMdd:HHmmss");
+
+ @InterfaceAudience.Private
+ @VisibleForTesting
+ public DateFormat getDateFormat() {
+ return dateFormat;
+ }
+
+ @Override
+ protected void processOptions(LinkedList args) {
+ this.timestamp =
+ StringUtils.popOptionWithArgument("-" + OPTION_USE_TIMESTAMP, args);
+
+ CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE,
+ OPTION_USE_TIMESTAMP, OPTION_CHANGE_ONLY_ACCESS_TIME,
+ OPTION_CHANGE_ONLY_MODIFICATION_TIME);
+ cf.parse(args);
+ this.changeModTime = cf.getOpt(OPTION_CHANGE_ONLY_MODIFICATION_TIME);
+ this.changeAccessTime = cf.getOpt(OPTION_CHANGE_ONLY_ACCESS_TIME);
+ this.doNotCreate = cf.getOpt(OPTION_DO_NOT_CREATE_FILE);
+ }
+
+ @Override
+ protected void processPath(PathData item) throws IOException {
+ if (item.stat.isDirectory()) {
+ throw new PathIsDirectoryException(item.toString());
+ }
+ touch(item);
+ }
+
+ @Override
+ protected void processNonexistentPath(PathData item) throws IOException {
+ if (!item.parentExists()) {
+ throw new PathNotFoundException(item.toString())
+ .withFullyQualifiedPath(item.path.toUri().toString());
+ }
+ touch(item);
+ }
+
+ private void touch(PathData item) throws IOException {
+ if (!item.fs.exists(item.path)) {
+ if (doNotCreate) {
+ return;
+ }
+ item.fs.create(item.path).close();
+ if (timestamp != null) {
+ // update the time only if user specified a timestamp using -t option.
+ updateTime(item);
+ }
+ } else {
+ updateTime(item);
+ }
+ }
+
+ private void updateTime(PathData item) throws IOException {
+ long time = System.currentTimeMillis();
+ if (timestamp != null) {
+ try {
+ time = dateFormat.parse(timestamp).getTime();
+ } catch (ParseException e) {
+ throw new IllegalArgumentException(
+ "Unable to parse the specified timestamp " + timestamp, e);
+ }
+ }
+ if (changeModTime ^ changeAccessTime) {
+ long atime = changeModTime ? -1 : time;
+ long mtime = changeAccessTime ? -1 : time;
+ item.fs.setTimes(item.path, mtime, atime);
+ } else {
+ item.fs.setTimes(item.path, time, time);
+ }
+ }
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
index 9b7d7ba5d1a..e955979acde 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
@@ -574,8 +574,11 @@ private int help(String[] argv) {
}
return 0;
}
-
- protected static class UsageInfo {
+
+ /**
+ * UsageInfo class holds args and help details.
+ */
+ public static class UsageInfo {
public final String args;
public final String help;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 2435671a31a..d2ba469ab43 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -141,6 +141,10 @@
"hadoop.http.selector.count";
// -1 to use default behavior of setting count based on CPU core count
public static final int HTTP_SELECTOR_COUNT_DEFAULT = -1;
+ // idle timeout in milliseconds
+ public static final String HTTP_IDLE_TIMEOUT_MS_KEY =
+ "hadoop.http.idle_timeout.ms";
+ public static final int HTTP_IDLE_TIMEOUT_MS_DEFAULT = 10000;
public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir";
public static final String FILTER_INITIALIZER_PROPERTY
@@ -445,6 +449,8 @@ public HttpServer2 build() throws IOException {
int responseHeaderSize = conf.getInt(
HTTP_MAX_RESPONSE_HEADER_SIZE_KEY,
HTTP_MAX_RESPONSE_HEADER_SIZE_DEFAULT);
+ int idleTimeout = conf.getInt(HTTP_IDLE_TIMEOUT_MS_KEY,
+ HTTP_IDLE_TIMEOUT_MS_DEFAULT);
HttpConfiguration httpConfig = new HttpConfiguration();
httpConfig.setRequestHeaderSize(requestHeaderSize);
@@ -470,6 +476,7 @@ public HttpServer2 build() throws IOException {
connector.setHost(ep.getHost());
connector.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
connector.setAcceptQueueSize(backlogSize);
+ connector.setIdleTimeout(idleTimeout);
server.addListener(connector);
}
server.loadListeners();
@@ -483,7 +490,13 @@ private ServerConnector createHttpChannelConnector(
conf.getInt(HTTP_SELECTOR_COUNT_KEY, HTTP_SELECTOR_COUNT_DEFAULT));
ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
conn.addConnectionFactory(connFactory);
- configureChannelConnector(conn);
+ if(Shell.WINDOWS) {
+ // result of setting the SO_REUSEADDR flag is different on Windows
+ // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
+ // without this 2 NN's can start on the same machine and listen on
+ // the same port with indeterminate routing of incoming requests to them
+ conn.setReuseAddress(false);
+ }
return conn;
}
@@ -659,17 +672,6 @@ private static void addNoCacheFilter(ServletContextHandler ctxt) {
Collections. emptyMap(), new String[] { "/*" });
}
- private static void configureChannelConnector(ServerConnector c) {
- c.setIdleTimeout(10000);
- if(Shell.WINDOWS) {
- // result of setting the SO_REUSEADDR flag is different on Windows
- // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
- // without this 2 NN's can start on the same machine and listen on
- // the same port with indeterminate routing of incoming requests to them
- c.setReuseAddress(false);
- }
- }
-
/** Get an array of FilterConfiguration specified in the conf */
private static FilterInitializer[] getFilterInitializers(Configuration conf) {
if (conf == null) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/IsActiveServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/IsActiveServlet.java
new file mode 100644
index 00000000000..3838beb1990
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/IsActiveServlet.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.http;
+
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+
+/**
+ * Used by Load Balancers to detect the active NameNode/ResourceManager/Router.
+ */
+public abstract class IsActiveServlet extends HttpServlet {
+
+ /** Default serial identifier. */
+ private static final long serialVersionUID = 1L;
+
+ public static final String SERVLET_NAME = "isActive";
+ public static final String PATH_SPEC = "/isActive";
+
+ public static final String RESPONSE_ACTIVE =
+ "I am Active!";
+
+ public static final String RESPONSE_NOT_ACTIVE =
+ "I am not Active!";
+
+ /**
+ * Check whether this instance is the Active one.
+ * @param req HTTP request
+ * @param resp HTTP response to write to
+ */
+ @Override
+ public void doGet(
+ final HttpServletRequest req, final HttpServletResponse resp)
+ throws IOException {
+
+ // By default requests are persistent. We don't want long-lived connections
+ // on server side.
+ resp.addHeader("Connection", "close");
+
+ if (!isActive()) {
+ // Report not SC_OK
+ resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED,
+ RESPONSE_NOT_ACTIVE);
+ return;
+ }
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.getWriter().write(RESPONSE_ACTIVE);
+ resp.getWriter().flush();
+ }
+
+ /**
+ * @return true if this instance is in Active HA state.
+ */
+ protected abstract boolean isActive();
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
index fa85ed77a1f..c4347e0c1af 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
@@ -5,9 +5,9 @@
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
- *
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
@@ -24,6 +24,7 @@
import java.io.OutputStream;
import java.util.ArrayList;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
@@ -78,25 +79,33 @@ public void flush() throws IOException {
public enum Algorithm {
LZO(TFile.COMPRESSION_LZO) {
private transient boolean checked = false;
+ private transient ClassNotFoundException cnf;
+ private transient boolean reinitCodecInTests;
private static final String defaultClazz =
"org.apache.hadoop.io.compress.LzoCodec";
+ private transient String clazz;
private transient CompressionCodec codec = null;
+ private String getLzoCodecClass() {
+ String extClazzConf = conf.get(CONF_LZO_CLASS);
+ String extClazz = (extClazzConf != null) ?
+ extClazzConf : System.getProperty(CONF_LZO_CLASS);
+ return (extClazz != null) ? extClazz : defaultClazz;
+ }
+
@Override
public synchronized boolean isSupported() {
- if (!checked) {
+ if (!checked || reinitCodecInTests) {
checked = true;
- String extClazzConf = conf.get(CONF_LZO_CLASS);
- String extClazz = (extClazzConf != null) ?
- extClazzConf : System.getProperty(CONF_LZO_CLASS);
- String clazz = (extClazz != null) ? extClazz : defaultClazz;
+ reinitCodecInTests = conf.getBoolean("test.reload.lzo.codec", false);
+ clazz = getLzoCodecClass();
try {
LOG.info("Trying to load Lzo codec class: " + clazz);
codec =
(CompressionCodec) ReflectionUtils.newInstance(Class
.forName(clazz), conf);
} catch (ClassNotFoundException e) {
- // that is okay
+ cnf = e;
}
}
return codec != null;
@@ -105,9 +114,9 @@ public synchronized boolean isSupported() {
@Override
CompressionCodec getCodec() throws IOException {
if (!isSupported()) {
- throw new IOException(
- "LZO codec class not specified. Did you forget to set property "
- + CONF_LZO_CLASS + "?");
+ throw new IOException(String.format(
+ "LZO codec %s=%s could not be loaded", CONF_LZO_CLASS, clazz),
+ cnf);
}
return codec;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
index adc74bd52b8..4e0cd8fdd86 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.fs.PathIOException;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
+import org.apache.hadoop.util.CleanerUtil;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.PerformanceAdvisory;
@@ -315,7 +316,7 @@ static void mlock(ByteBuffer buffer, long len)
}
mlock_native(buffer, len);
}
-
+
/**
* Unmaps the block from memory. See munmap(2).
*
@@ -329,10 +330,14 @@ static void mlock(ByteBuffer buffer, long len)
* @param buffer The buffer to unmap.
*/
public static void munmap(MappedByteBuffer buffer) {
- if (buffer instanceof sun.nio.ch.DirectBuffer) {
- sun.misc.Cleaner cleaner =
- ((sun.nio.ch.DirectBuffer)buffer).cleaner();
- cleaner.clean();
+ if (CleanerUtil.UNMAP_SUPPORTED) {
+ try {
+ CleanerUtil.getCleaner().freeBuffer(buffer);
+ } catch (IOException e) {
+ LOG.info("Failed to unmap the buffer", e);
+ }
+ } else {
+ LOG.trace(CleanerUtil.UNMAP_NOT_SUPPORTED_REASON);
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/FailoverProxyProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/FailoverProxyProvider.java
index c73e0837721..f2fa3af7d59 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/FailoverProxyProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/FailoverProxyProvider.java
@@ -30,27 +30,30 @@
*/
@InterfaceStability.Evolving
public interface FailoverProxyProvider extends Closeable {
- public static final class ProxyInfo {
- public final T proxy;
+ static class ProxyInfo {
+ public T proxy;
/*
* The information (e.g., the IP address) of the current proxy object. It
* provides information for debugging purposes.
*/
- public final String proxyInfo;
+ public String proxyInfo;
public ProxyInfo(T proxy, String proxyInfo) {
this.proxy = proxy;
this.proxyInfo = proxyInfo;
}
+ private String proxyName() {
+ return proxy != null ? proxy.getClass().getSimpleName() : "UnknownProxy";
+ }
+
public String getString(String methodName) {
- return proxy.getClass().getSimpleName() + "." + methodName
- + " over " + proxyInfo;
+ return proxyName() + "." + methodName + " over " + proxyInfo;
}
@Override
public String toString() {
- return proxy.getClass().getSimpleName() + " over " + proxyInfo;
+ return proxyName() + " over " + proxyInfo;
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
index d1bd1807b03..29649a6b6ff 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
@@ -81,8 +81,9 @@ public CallQueueManager(Class extends BlockingQueue> backingClass,
this.clientBackOffEnabled = clientBackOffEnabled;
this.putRef = new AtomicReference>(bq);
this.takeRef = new AtomicReference>(bq);
- LOG.info("Using callQueue: " + backingClass + " queueCapacity: " +
- maxQueueSize + " scheduler: " + schedulerClass);
+ LOG.info("Using callQueue: {}, queueCapacity: {}, " +
+ "scheduler: {}, ipcBackoff: {}.",
+ backingClass, maxQueueSize, schedulerClass, clientBackOffEnabled);
}
@VisibleForTesting // only!
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 163e80dfc40..07a2f13a442 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -70,6 +70,7 @@
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
import static org.apache.hadoop.ipc.RpcConstants.CONNECTION_CONTEXT_CALL_ID;
import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
@@ -439,6 +440,8 @@ public synchronized Writable getRpcResponse() {
private final Object sendRpcRequestLock = new Object();
+ private AtomicReference connectingThread = new AtomicReference<>();
+
public Connection(ConnectionId remoteId, int serviceClass) throws IOException {
this.remoteId = remoteId;
this.server = remoteId.getAddress();
@@ -677,7 +680,8 @@ private synchronized void setupConnection(
this.socket.setReuseAddress(true);
localAddr = NetUtils.bindToLocalAddress(localAddr,
bindToWildCardAddress);
- LOG.debug("Binding {} to {}", principal, localAddr);
+ LOG.debug("Binding {} to {}", principal,
+ (bindToWildCardAddress) ? "0.0.0.0" : localAddr);
this.socket.bind(new InetSocketAddress(localAddr, 0));
}
}
@@ -776,6 +780,7 @@ private synchronized void setupIOstreams(
}
}
try {
+ connectingThread.set(Thread.currentThread());
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to "+server);
}
@@ -861,6 +866,8 @@ public AuthMethod run()
markClosed(new IOException("Couldn't set up IO streams: " + t, t));
}
close();
+ } finally {
+ connectingThread.set(null);
}
}
@@ -1214,6 +1221,13 @@ private synchronized void markClosed(IOException e) {
notifyAll();
}
}
+
+ private void interruptConnectingThread() {
+ Thread connThread = connectingThread.get();
+ if (connThread != null) {
+ connThread.interrupt();
+ }
+ }
/** Close the connection. */
private synchronized void close() {
@@ -1281,9 +1295,6 @@ public Client(Class extends Writable> valueClass, Configuration conf,
this.bindToWildCardAddress = conf
.getBoolean(CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_KEY,
CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_DEFAULT);
- LOG.debug("{} set to true. Will bind client sockets to wildcard "
- + "address.",
- CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_KEY);
this.clientId = ClientId.getClientId();
this.sendParamsExecutor = clientExcecutorFactory.refAndGetInstance();
@@ -1323,6 +1334,7 @@ public void stop() {
// wake up all connections
for (Connection conn : connections.values()) {
conn.interrupt();
+ conn.interruptConnectingThread();
}
// wait until all connections are closed
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
index f12ecb6462a..8bb0ce4c9d7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
@@ -391,6 +391,7 @@ private static Boolean parseBackOffByResponseTimeEnabled(String ns,
* counts current.
*/
private void decayCurrentCounts() {
+ LOG.debug("Start to decay current counts.");
try {
long totalDecayedCount = 0;
long totalRawCount = 0;
@@ -410,7 +411,12 @@ private void decayCurrentCounts() {
totalDecayedCount += nextValue;
decayedCount.set(nextValue);
+ LOG.debug("Decaying counts for the user: {}, " +
+ "its decayedCount: {}, rawCount: {}", entry.getKey(),
+ nextValue, rawCount.get());
if (nextValue == 0) {
+ LOG.debug("The decayed count for the user {} is zero " +
+ "and being cleaned.", entry.getKey());
// We will clean up unused keys here. An interesting optimization
// might be to have an upper bound on keyspace in callCounts and only
// clean once we pass it.
@@ -422,6 +428,8 @@ private void decayCurrentCounts() {
totalDecayedCallCount.set(totalDecayedCount);
totalRawCallCount.set(totalRawCount);
+ LOG.debug("After decaying the stored counts, totalDecayedCount: {}, " +
+ "totalRawCallCount: {}.", totalDecayedCount, totalRawCount);
// Now refresh the cache of scheduling decisions
recomputeScheduleCache();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
index d308725c053..096cc1ad43f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
@@ -109,6 +109,9 @@ private void moveToNextQueue() {
// Finally, reset requestsLeft. This will enable moveToNextQueue to be
// called again, for the new currentQueueIndex
this.requestsLeft.set(this.queueWeights[nextIdx]);
+ LOG.debug("Moving to next queue from queue index {} to index {}, " +
+ "number of requests left for current queue: {}.",
+ thisIdx, nextIdx, requestsLeft);
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metric.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metric.java
index b8e6a8ace16..37fa760ee75 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metric.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/annotation/Metric.java
@@ -68,4 +68,9 @@
* @return optional type (counter|gauge) of the metric
*/
Type type() default Type.DEFAULT;
+
+ /**
+ * @return optional roll over interval in secs for MutableQuantiles
+ */
+ int interval() default 10;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
index a3ca98d0407..c7adaa5d991 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
@@ -83,6 +83,10 @@ MutableMetric newForField(Field field, Metric annotation,
return registry.newMutableRollingAverages(info.name(),
annotation.valueName());
}
+ if (cls == MutableQuantiles.class) {
+ return registry.newQuantiles(info.name(), annotation.about(),
+ annotation.sampleName(), annotation.valueName(), annotation.interval());
+ }
throw new MetricsException("Unsupported metric field "+ field.getName() +
" of type "+ field.getType().getName());
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
index ad0986511d8..63ec9a5d29e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
@@ -73,7 +73,8 @@
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class Groups {
- private static final Logger LOG = LoggerFactory.getLogger(Groups.class);
+ @VisibleForTesting
+ static final Logger LOG = LoggerFactory.getLogger(Groups.class);
private final GroupMappingServiceProvider impl;
@@ -308,6 +309,7 @@ public long read() {
*/
@Override
public List load(String user) throws Exception {
+ LOG.debug("GroupCacheLoader - load.");
TraceScope scope = null;
Tracer tracer = Tracer.curThreadTracer();
if (tracer != null) {
@@ -346,6 +348,7 @@ public long read() {
public ListenableFuture> reload(final String key,
List oldValue)
throws Exception {
+ LOG.debug("GroupCacheLoader - reload (async).");
if (!reloadGroupsInBackground) {
return super.reload(key, oldValue);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 29b9fea424c..6ce72edb8e2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -40,6 +40,7 @@
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
+import java.util.Date;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.Iterator;
@@ -851,81 +852,121 @@ void spawnAutoRenewalThreadForUserCreds(boolean force) {
}
//spawn thread only if we have kerb credentials
- Thread t = new Thread(new Runnable() {
+ KerberosTicket tgt = getTGT();
+ if (tgt == null) {
+ return;
+ }
+ String cmd = conf.get("hadoop.kerberos.kinit.command", "kinit");
+ long nextRefresh = getRefreshTime(tgt);
+ Thread t =
+ new Thread(new AutoRenewalForUserCredsRunnable(tgt, cmd, nextRefresh));
+ t.setDaemon(true);
+ t.setName("TGT Renewer for " + getUserName());
+ t.start();
+ }
+
+ @VisibleForTesting
+ class AutoRenewalForUserCredsRunnable implements Runnable {
+ private KerberosTicket tgt;
+ private RetryPolicy rp;
+ private String kinitCmd;
+ private long nextRefresh;
+ private boolean runRenewalLoop = true;
+
+ AutoRenewalForUserCredsRunnable(KerberosTicket tgt, String kinitCmd,
+ long nextRefresh){
+ this.tgt = tgt;
+ this.kinitCmd = kinitCmd;
+ this.nextRefresh = nextRefresh;
+ this.rp = null;
+ }
+
+ public void setRunRenewalLoop(boolean runRenewalLoop) {
+ this.runRenewalLoop = runRenewalLoop;
+ }
- @Override
- public void run() {
- String cmd = conf.get("hadoop.kerberos.kinit.command", "kinit");
- KerberosTicket tgt = getTGT();
- if (tgt == null) {
+ @Override
+ public void run() {
+ do {
+ try {
+ long now = Time.now();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Current time is " + now);
+ LOG.debug("Next refresh is " + nextRefresh);
+ }
+ if (now < nextRefresh) {
+ Thread.sleep(nextRefresh - now);
+ }
+ String output = Shell.execCommand(kinitCmd, "-R");
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Renewed ticket. kinit output: {}", output);
+ }
+ reloginFromTicketCache();
+ tgt = getTGT();
+ if (tgt == null) {
+ LOG.warn("No TGT after renewal. Aborting renew thread for " +
+ getUserName());
+ return;
+ }
+ nextRefresh = Math.max(getRefreshTime(tgt),
+ now + kerberosMinSecondsBeforeRelogin);
+ metrics.renewalFailures.set(0);
+ rp = null;
+ } catch (InterruptedException ie) {
+ LOG.warn("Terminating renewal thread");
return;
- }
- long nextRefresh = getRefreshTime(tgt);
- RetryPolicy rp = null;
- while (true) {
+ } catch (IOException ie) {
+ metrics.renewalFailuresTotal.incr();
+ final long now = Time.now();
+
+ if (tgt.isDestroyed()) {
+ LOG.error("TGT is destroyed. Aborting renew thread for {}.",
+ getUserName());
+ return;
+ }
+
+ long tgtEndTime;
+ // As described in HADOOP-15593 we need to handle the case when
+ // tgt.getEndTime() throws NPE because of JDK issue JDK-8147772
+ // NPE is only possible if this issue is not fixed in the JDK
+ // currently used
try {
- long now = Time.now();
- if (LOG.isDebugEnabled()) {
- LOG.debug("Current time is " + now);
- LOG.debug("Next refresh is " + nextRefresh);
- }
- if (now < nextRefresh) {
- Thread.sleep(nextRefresh - now);
- }
- String output = Shell.execCommand(cmd, "-R");
- if (LOG.isDebugEnabled()) {
- LOG.debug("Renewed ticket. kinit output: {}", output);
- }
- reloginFromTicketCache();
- tgt = getTGT();
- if (tgt == null) {
- LOG.warn("No TGT after renewal. Aborting renew thread for " +
- getUserName());
- return;
- }
- nextRefresh = Math.max(getRefreshTime(tgt),
- now + kerberosMinSecondsBeforeRelogin);
- metrics.renewalFailures.set(0);
- rp = null;
- } catch (InterruptedException ie) {
- LOG.warn("Terminating renewal thread");
+ tgtEndTime = tgt.getEndTime().getTime();
+ } catch (NullPointerException npe) {
+ LOG.error("NPE thrown while getting KerberosTicket endTime. "
+ + "Aborting renew thread for {}.", getUserName());
+ return;
+ }
+
+ LOG.warn("Exception encountered while running the renewal "
+ + "command for {}. (TGT end time:{}, renewalFailures: {},"
+ + "renewalFailuresTotal: {})", getUserName(), tgtEndTime,
+ metrics.renewalFailures.value(),
+ metrics.renewalFailuresTotal.value(), ie);
+ if (rp == null) {
+ // Use a dummy maxRetries to create the policy. The policy will
+ // only be used to get next retry time with exponential back-off.
+ // The final retry time will be later limited within the
+ // tgt endTime in getNextTgtRenewalTime.
+ rp = RetryPolicies.exponentialBackoffRetry(Long.SIZE - 2,
+ kerberosMinSecondsBeforeRelogin, TimeUnit.MILLISECONDS);
+ }
+ try {
+ nextRefresh = getNextTgtRenewalTime(tgtEndTime, now, rp);
+ } catch (Exception e) {
+ LOG.error("Exception when calculating next tgt renewal time", e);
+ return;
+ }
+ metrics.renewalFailures.incr();
+ // retry until close enough to tgt endTime.
+ if (now > nextRefresh) {
+ LOG.error("TGT is expired. Aborting renew thread for {}.",
+ getUserName());
return;
- } catch (IOException ie) {
- metrics.renewalFailuresTotal.incr();
- final long tgtEndTime = tgt.getEndTime().getTime();
- LOG.warn("Exception encountered while running the renewal "
- + "command for {}. (TGT end time:{}, renewalFailures: {},"
- + "renewalFailuresTotal: {})", getUserName(), tgtEndTime,
- metrics.renewalFailures, metrics.renewalFailuresTotal, ie);
- final long now = Time.now();
- if (rp == null) {
- // Use a dummy maxRetries to create the policy. The policy will
- // only be used to get next retry time with exponential back-off.
- // The final retry time will be later limited within the
- // tgt endTime in getNextTgtRenewalTime.
- rp = RetryPolicies.exponentialBackoffRetry(Long.SIZE - 2,
- kerberosMinSecondsBeforeRelogin, TimeUnit.MILLISECONDS);
- }
- try {
- nextRefresh = getNextTgtRenewalTime(tgtEndTime, now, rp);
- } catch (Exception e) {
- LOG.error("Exception when calculating next tgt renewal time", e);
- return;
- }
- metrics.renewalFailures.incr();
- // retry until close enough to tgt endTime.
- if (now > nextRefresh) {
- LOG.error("TGT is expired. Aborting renew thread for {}.",
- getUserName());
- return;
- }
}
}
- }
- });
- t.setDaemon(true);
- t.setName("TGT Renewer for " + getUserName());
- t.start();
+ } while (runRenewalLoop);
+ }
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java
index 26cd7ab2614..b766d5c37fa 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java
@@ -75,9 +75,9 @@ public void init(String configurationPrefix) {
// $configPrefix.[ANY].hosts
//
String prefixRegEx = configPrefix.replace(".", "\\.");
- String usersGroupsRegEx = prefixRegEx + "[^.]*(" +
+ String usersGroupsRegEx = prefixRegEx + "[\\S]*(" +
Pattern.quote(CONF_USERS) + "|" + Pattern.quote(CONF_GROUPS) + ")";
- String hostsRegEx = prefixRegEx + "[^.]*" + Pattern.quote(CONF_HOSTS);
+ String hostsRegEx = prefixRegEx + "[\\S]*" + Pattern.quote(CONF_HOSTS);
// get list of users and groups per proxyuser
Map allMatchKeys =
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
index 33cb9ec98f0..25aac8853ef 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
@@ -23,6 +23,7 @@
import com.google.common.primitives.Bytes;
import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -358,6 +359,10 @@ private static String encodeWritable(Writable obj) throws IOException {
*/
private static void decodeWritable(Writable obj,
String newValue) throws IOException {
+ if (newValue == null) {
+ throw new HadoopIllegalArgumentException(
+ "Invalid argument, newValue is null");
+ }
Base64 decoder = new Base64(0, null, true);
DataInputBuffer buf = new DataInputBuffer();
byte[] decoded = decoder.decode(newValue);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
index 70de647ab9d..5b96fbf4725 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
@@ -254,7 +254,7 @@ public final void close() throws IOException {
* @param exception the exception
*/
protected final void noteFailure(Exception exception) {
- LOG.debug("noteFailure {}" + exception);
+ LOG.debug("noteFailure", exception);
if (exception == null) {
//make sure failure logic doesn't itself cause problems
return;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/CommandShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/CommandShell.java
index 70c8eaf936f..a53e2259e0e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/CommandShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/CommandShell.java
@@ -76,7 +76,7 @@ public int run(String[] args) throws Exception {
}
} catch (Exception e) {
printShellUsage();
- e.printStackTrace(err);
+ printException(e);
return 1;
}
return exitCode;
@@ -98,6 +98,10 @@ protected final void printShellUsage() {
out.flush();
}
+ protected void printException(Exception ex){
+ ex.printStackTrace(err);
+ }
+
/**
* Base class for any subcommands of this shell command.
*/
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CleanerUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CleanerUtil.java
new file mode 100644
index 00000000000..a56602eafab
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CleanerUtil.java
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.nio.ByteBuffer;
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+import java.util.Objects;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import static java.lang.invoke.MethodHandles.constant;
+import static java.lang.invoke.MethodHandles.dropArguments;
+import static java.lang.invoke.MethodHandles.filterReturnValue;
+import static java.lang.invoke.MethodHandles.guardWithTest;
+import static java.lang.invoke.MethodType.methodType;
+
+/**
+ * sun.misc.Cleaner has moved in OpenJDK 9 and
+ * sun.misc.Unsafe#invokeCleaner(ByteBuffer) is the replacement.
+ * This class is a hack to use sun.misc.Cleaner in Java 8 and
+ * use the replacement in Java 9+.
+ * This implementation is inspired by LUCENE-6989.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class CleanerUtil {
+
+ // Prevent instantiation
+ private CleanerUtil(){}
+
+ /**
+ * true, if this platform supports unmapping mmapped files.
+ */
+ public static final boolean UNMAP_SUPPORTED;
+
+ /**
+ * if {@link #UNMAP_SUPPORTED} is {@code false}, this contains the reason
+ * why unmapping is not supported.
+ */
+ public static final String UNMAP_NOT_SUPPORTED_REASON;
+
+
+ private static final BufferCleaner CLEANER;
+
+ /**
+ * Reference to a BufferCleaner that does unmapping.
+ * @return {@code null} if not supported.
+ */
+ public static BufferCleaner getCleaner() {
+ return CLEANER;
+ }
+
+ static {
+ final Object hack = AccessController.doPrivileged(
+ (PrivilegedAction