Index: ant/ivy.xml =================================================================== --- ant/ivy.xml (revision 0) +++ ant/ivy.xml (revision 0) @@ -0,0 +1,3 @@ + + + Index: ant/src/org/apache/hadoop/hive/ant/GetVersionPref.java =================================================================== --- ant/src/org/apache/hadoop/hive/ant/GetVersionPref.java (revision 0) +++ ant/src/org/apache/hadoop/hive/ant/GetVersionPref.java (revision 0) @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ant; + +import org.apache.tools.ant.AntClassLoader; +import org.apache.tools.ant.BuildException; +import org.apache.tools.ant.Task; +import org.apache.tools.ant.Project; + +import java.util.regex.Pattern; +import java.util.regex.Matcher; +import java.io.*; + +/** + * Implementation of the ant task . + * + * This ant task takes an input version string (e.g. 0.17.2) and set an ant property (whose name + * is specified in the property attribute) with the version prefix. For 0.17.2, the version prefix + * is 0.17. Similarly, for 0.18.0, the version prefix is 0.18. The version prefix is the first two + * components of the version string. + */ +public class GetVersionPref extends Task { + + /** + * The name of the property that gets the version prefix. + */ + protected String property; + + /** + * The input string that contains the version string. + */ + protected String input; + + public void setProperty(String property) { + this.property = property; + } + + public String getProperty() { + return property; + } + + public void setInput(String input) { + this.input = input; + } + + public String getInput() { + return input; + } + + /** + * Executes the ant task . + * + * It extracts the version prefix using regular expressions on the version string. It then sets + * the property in the project with the extracted prefix. The property is set to an empty string + * in case no match is found for the prefix regular expression (which will happen in case the + * version string does not conform to the version format). + */ + @Override + public void execute() throws BuildException { + + if (property == null) { + throw new BuildException("No property specified"); + } + + if (input == null) { + throw new BuildException("No input stringspecified"); + } + + try { + Pattern p = Pattern.compile("^(\\d+\\.\\d+).*"); + Matcher m = p.matcher(input); + getProject().setProperty(property, m.matches() ? m.group(1) : ""); + } + catch (Exception e) { + throw new BuildException("Failed with: " + e.getMessage()); + } + } +} Index: ant/src/org/apache/hadoop/hive/ant/antlib.xml =================================================================== --- ant/src/org/apache/hadoop/hive/ant/antlib.xml (revision 723901) +++ ant/src/org/apache/hadoop/hive/ant/antlib.xml (working copy) @@ -21,4 +21,6 @@ + Index: hadoopcore/conf/capacity-scheduler.xml.template =================================================================== --- hadoopcore/conf/capacity-scheduler.xml.template (revision 723901) +++ hadoopcore/conf/capacity-scheduler.xml.template (working copy) @@ -1,77 +0,0 @@ - - - - - - - - - - - mapred.capacity-scheduler.queue.default.guaranteed-capacity - 100 - Percentage of the number of slots in the cluster that are - guaranteed to be available for jobs in this queue. - - - - - mapred.capacity-scheduler.queue.default.reclaim-time-limit - 300 - The amount of time, in seconds, before which - resources distributed to other queues will be reclaimed. - - - - - mapred.capacity-scheduler.queue.default.supports-priority - false - If true, priorities of jobs will be taken into - account in scheduling decisions. - - - - - mapred.capacity-scheduler.queue.default.minimum-user-limit-percent - 100 - Each queue enforces a limit on the percentage of resources - allocated to a user at any given time, if there is competition for them. - This user limit can vary between a minimum and maximum value. The former - depends on the number of users who have submitted jobs, and the latter is - set to this property value. For example, suppose the value of this - property is 25. If two users have submitted jobs to a queue, no single - user can use more than 50% of the queue resources. If a third user submits - a job, no single user can use more than 33% of the queue resources. With 4 - or more users, no user can use more than 25% of the queue's resources. A - value of 100 implies no user limits are imposed. - - - - - - - - mapred.capacity-scheduler.default-reclaim-time-limit - 300 - The amount of time, in seconds, before which - resources distributed to other queues will be reclaimed by default - in a job queue. - - - - - mapred.capacity-scheduler.default-supports-priority - false - If true, priorities of jobs will be taken into - account in scheduling decisions by default in a job queue. - - - - - mapred.capacity-scheduler.default-minimum-user-limit-percent - 100 - The percentage of the resources limited to a particular user - for the job queue at any given point of time by default. - - - Index: hadoopcore/conf/hadoop-env.sh.template =================================================================== --- hadoopcore/conf/hadoop-env.sh.template (revision 723901) +++ hadoopcore/conf/hadoop-env.sh.template (working copy) @@ -1,54 +0,0 @@ -# Set Hadoop-specific environment variables here. - -# The only required environment variable is JAVA_HOME. All others are -# optional. When running a distributed configuration it is best to -# set JAVA_HOME in this file, so that it is correctly defined on -# remote nodes. - -# The java implementation to use. Required. -# export JAVA_HOME=/usr/lib/j2sdk1.5-sun - -# Extra Java CLASSPATH elements. Optional. -# export HADOOP_CLASSPATH= - -# The maximum amount of heap to use, in MB. Default is 1000. -# export HADOOP_HEAPSIZE=2000 - -# Extra Java runtime options. Empty by default. -# export HADOOP_OPTS=-server - -# Command specific options appended to HADOOP_OPTS when specified -export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS" -export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS" -export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS" -export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS" -export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS" -# export HADOOP_TASKTRACKER_OPTS= -# The following applies to multiple commands (fs, dfs, fsck, distcp etc) -# export HADOOP_CLIENT_OPTS - -# Extra ssh options. Empty by default. -# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR" - -# Where log files are stored. $HADOOP_HOME/logs by default. -# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs - -# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default. -# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves - -# host:path where hadoop code should be rsync'd from. Unset by default. -# export HADOOP_MASTER=master:/home/$USER/src/hadoop - -# Seconds to sleep between slave commands. Unset by default. This -# can be useful in large clusters, where, e.g., slave rsyncs can -# otherwise arrive faster than the master can service them. -# export HADOOP_SLAVE_SLEEP=0.1 - -# The directory where pid files are stored. /tmp by default. -# export HADOOP_PID_DIR=/var/hadoop/pids - -# A string representing this instance of hadoop. $USER by default. -# export HADOOP_IDENT_STRING=$USER - -# The scheduling priority for daemon processes. See 'man nice'. -# export HADOOP_NICENESS=10 Index: hadoopcore/conf/hadoop-site.xml.template =================================================================== --- hadoopcore/conf/hadoop-site.xml.template (revision 723901) +++ hadoopcore/conf/hadoop-site.xml.template (working copy) @@ -1,8 +0,0 @@ - - - - - - - - Index: hadoopcore/conf/masters.template =================================================================== --- hadoopcore/conf/masters.template (revision 723901) +++ hadoopcore/conf/masters.template (working copy) @@ -1 +0,0 @@ -localhost Index: hadoopcore/conf/hadoop-default.xml =================================================================== --- hadoopcore/conf/hadoop-default.xml (revision 723901) +++ hadoopcore/conf/hadoop-default.xml (working copy) @@ -1,1549 +0,0 @@ - - - - - - - - - - - - - hadoop.tmp.dir - /tmp/hadoop-${user.name} - A base for other temporary directories. - - - - hadoop.native.lib - true - Should native hadoop libraries, if present, be used. - - - - hadoop.http.filter.initializers - - A comma separated list of class names. Each class in the list - must extend org.apache.hadoop.http.FilterInitializer. The corresponding - Filter will be initialized. Then, the Filter will be applied to all user - facing jsp and servlet web pages. The ordering of the list defines the - ordering of the filters. - - - - - - hadoop.logfile.size - 10000000 - The max size of each log file - - - - hadoop.logfile.count - 10 - The max number of log files - - - - hadoop.job.history.location - - If job tracker is static the history files are stored - in this single well known place. If No value is set here, by default, - it is in the local file system at ${hadoop.log.dir}/history. - - - - - hadoop.job.history.user.location - - User can specify a location to store the history files of - a particular job. If nothing is specified, the logs are stored in - output directory. The files are stored in "_logs/history/" in the directory. - User can stop logging by giving the value "none". - - - - - dfs.namenode.logging.level - info - The logging level for dfs namenode. Other values are "dir"(trac -e namespace mutations), "block"(trace block under/over replications and block -creations/deletions), or "all". - - - - - - io.sort.factor - 10 - The number of streams to merge at once while sorting - files. This determines the number of open file handles. - - - - io.sort.mb - 100 - The total amount of buffer memory to use while sorting - files, in megabytes. By default, gives each merge stream 1MB, which - should minimize seeks. - - - - io.sort.record.percent - 0.05 - The percentage of io.sort.mb dedicated to tracking record - boundaries. Let this value be r, io.sort.mb be x. The maximum number - of records collected before the collection thread must block is equal - to (r * x) / 4 - - - - io.sort.spill.percent - 0.80 - The soft limit in either the buffer or record collection - buffers. Once reached, a thread will begin to spill the contents to disk - in the background. Note that this does not imply any chunking of data to - the spill. A value less than 0.5 is not recommended. - - - - io.file.buffer.size - 4096 - The size of buffer for use in sequence files. - The size of this buffer should probably be a multiple of hardware - page size (4096 on Intel x86), and it determines how much data is - buffered during read and write operations. - - - - io.bytes.per.checksum - 512 - The number of bytes per checksum. Must not be larger than - io.file.buffer.size. - - - - io.skip.checksum.errors - false - If true, when a checksum error is encountered while - reading a sequence file, entries are skipped, instead of throwing an - exception. - - - - io.map.index.skip - 0 - Number of index entries to skip between each entry. - Zero by default. Setting this to values larger than zero can - facilitate opening large map files using less memory. - - - - io.compression.codecs - org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec - A list of the compression codec classes that can be used - for compression/decompression. - - - - io.serializations - org.apache.hadoop.io.serializer.WritableSerialization - A list of serialization classes that can be used for - obtaining serializers and deserializers. - - - - - - fs.default.name - file:/// - The name of the default file system. A URI whose - scheme and authority determine the FileSystem implementation. The - uri's scheme determines the config property (fs.SCHEME.impl) naming - the FileSystem implementation class. The uri's authority is used to - determine the host, port, etc. for a filesystem. - - - - fs.trash.interval - 0 - Number of minutes between trash checkpoints. - If zero, the trash feature is disabled. - - - - - fs.file.impl - org.apache.hadoop.fs.LocalFileSystem - The FileSystem for file: uris. - - - - fs.hdfs.impl - org.apache.hadoop.hdfs.DistributedFileSystem - The FileSystem for hdfs: uris. - - - - fs.s3.impl - org.apache.hadoop.fs.s3.S3FileSystem - The FileSystem for s3: uris. - - - - fs.s3n.impl - org.apache.hadoop.fs.s3native.NativeS3FileSystem - The FileSystem for s3n: (Native S3) uris. - - - - fs.kfs.impl - org.apache.hadoop.fs.kfs.KosmosFileSystem - The FileSystem for kfs: uris. - - - - fs.hftp.impl - org.apache.hadoop.hdfs.HftpFileSystem - - - - fs.hsftp.impl - org.apache.hadoop.hdfs.HsftpFileSystem - - - - fs.ftp.impl - org.apache.hadoop.fs.ftp.FTPFileSystem - The FileSystem for ftp: uris. - - - - fs.ramfs.impl - org.apache.hadoop.fs.InMemoryFileSystem - The FileSystem for ramfs: uris. - - - - fs.har.impl - org.apache.hadoop.fs.HarFileSystem - The filesystem for Hadoop archives. - - - - fs.checkpoint.dir - ${hadoop.tmp.dir}/dfs/namesecondary - Determines where on the local filesystem the DFS secondary - name node should store the temporary images to merge. - If this is a comma-delimited list of directories then the image is - replicated in all of the directories for redundancy. - - - - - fs.checkpoint.edits.dir - ${fs.checkpoint.dir} - Determines where on the local filesystem the DFS secondary - name node should store the temporary edits to merge. - If this is a comma-delimited list of directoires then teh edits is - replicated in all of the directoires for redundancy. - Default value is same as fs.checkpoint.dir - - - - - fs.checkpoint.period - 3600 - The number of seconds between two periodic checkpoints. - - - - - fs.checkpoint.size - 67108864 - The size of the current edit log (in bytes) that triggers - a periodic checkpoint even if the fs.checkpoint.period hasn't expired. - - - - - dfs.secondary.http.address - 0.0.0.0:50090 - - The secondary namenode http server address and port. - If the port is 0 then the server will start on a free port. - - - - - dfs.datanode.address - 0.0.0.0:50010 - - The address where the datanode server will listen to. - If the port is 0 then the server will start on a free port. - - - - - dfs.datanode.http.address - 0.0.0.0:50075 - - The datanode http server address and port. - If the port is 0 then the server will start on a free port. - - - - - dfs.datanode.ipc.address - 0.0.0.0:50020 - - The datanode ipc server address and port. - If the port is 0 then the server will start on a free port. - - - - - dfs.datanode.handler.count - 3 - The number of server threads for the datanode. - - - - dfs.http.address - 0.0.0.0:50070 - - The address and the base port where the dfs namenode web ui will listen on. - If the port is 0 then the server will start on a free port. - - - - - dfs.https.enable - false - Decide if HTTPS(SSL) is supported on HDFS - - - - - dfs.https.need.client.auth - false - Whether SSL client certificate authentication is required - - - - - dfs.https.server.keystore.resource - ssl-server.xml - Resource file from which ssl server keystore - information will be extracted - - - - - dfs.https.client.keystore.resource - ssl-client.xml - Resource file from which ssl client keystore - information will be extracted - - - - - dfs.datanode.https.address - 0.0.0.0:50475 - - - - dfs.https.address - 0.0.0.0:50470 - - - - dfs.datanode.dns.interface - default - The name of the Network Interface from which a data node should - report its IP address. - - - - - dfs.datanode.dns.nameserver - default - The host name or IP address of the name server (DNS) - which a DataNode should use to determine the host name used by the - NameNode for communication and display purposes. - - - - - dfs.replication.considerLoad - true - Decide if chooseTarget considers the target's load or not - - - - dfs.default.chunk.view.size - 32768 - The number of bytes to view for a file on the browser. - - - - - dfs.datanode.du.reserved - 0 - Reserved space in bytes per volume. Always leave this much space free for non dfs use. - - - - - dfs.name.dir - ${hadoop.tmp.dir}/dfs/name - Determines where on the local filesystem the DFS name node - should store the name table(fsimage). If this is a comma-delimited list - of directories then the name table is replicated in all of the - directories, for redundancy. - - - - dfs.name.edits.dir - ${dfs.name.dir} - Determines where on the local filesystem the DFS name node - should store the transaction (edits) file. If this is a comma-delimited list - of directories then the transaction file is replicated in all of the - directories, for redundancy. Default value is same as dfs.name.dir - - - - dfs.web.ugi - webuser,webgroup - The user account used by the web interface. - Syntax: USERNAME,GROUP1,GROUP2, ... - - - - - dfs.permissions - true - - If "true", enable permission checking in HDFS. - If "false", permission checking is turned off, - but all other behavior is unchanged. - Switching from one parameter value to the other does not change the mode, - owner or group of files or directories. - - - - - dfs.permissions.supergroup - supergroup - The name of the group of super-users. - - - - dfs.data.dir - ${hadoop.tmp.dir}/dfs/data - Determines where on the local filesystem an DFS data node - should store its blocks. If this is a comma-delimited - list of directories, then data will be stored in all named - directories, typically on different devices. - Directories that do not exist are ignored. - - - - - dfs.replication - 3 - Default block replication. - The actual number of replications can be specified when the file is created. - The default is used if replication is not specified in create time. - - - - - dfs.replication.max - 512 - Maximal block replication. - - - - - dfs.replication.min - 1 - Minimal block replication. - - - - - dfs.block.size - 67108864 - The default block size for new files. - - - - dfs.df.interval - 60000 - Disk usage statistics refresh interval in msec. - - - - dfs.client.block.write.retries - 3 - The number of retries for writing blocks to the data nodes, - before we signal failure to the application. - - - - - dfs.blockreport.intervalMsec - 3600000 - Determines block reporting interval in milliseconds. - - - - dfs.blockreport.initialDelay 0 - Delay for first block report in seconds. - - - - dfs.heartbeat.interval - 3 - Determines datanode heartbeat interval in seconds. - - - - dfs.namenode.handler.count - 10 - The number of server threads for the namenode. - - - - dfs.safemode.threshold.pct - 0.999f - - Specifies the percentage of blocks that should satisfy - the minimal replication requirement defined by dfs.replication.min. - Values less than or equal to 0 mean not to start in safe mode. - Values greater than 1 will make safe mode permanent. - - - - - dfs.safemode.extension - 30000 - - Determines extension of safe mode in milliseconds - after the threshold level is reached. - - - - - dfs.balance.bandwidthPerSec - 1048576 - - Specifies the maximum amount of bandwidth that each datanode - can utilize for the balancing purpose in term of - the number of bytes per second. - - - - - dfs.hosts - - Names a file that contains a list of hosts that are - permitted to connect to the namenode. The full pathname of the file - must be specified. If the value is empty, all hosts are - permitted. - - - - dfs.hosts.exclude - - Names a file that contains a list of hosts that are - not permitted to connect to the namenode. The full pathname of the - file must be specified. If the value is empty, no hosts are - excluded. - - - - dfs.max.objects - 0 - The maximum number of files, directories and blocks - dfs supports. A value of zero indicates no limit to the number - of objects that dfs supports. - - - - - dfs.namenode.decommission.interval - 300 - Namenode periodicity in seconds to check if decommission is - complete. - - - - dfs.replication.interval - 3 - The periodicity in seconds with which the namenode computes - repliaction work for datanodes. - - - - dfs.access.time.precision - 3600000 - The access time for HDFS file is precise upto this value. - The default value is 1 hour. Setting a value of 0 disables - access times for HDFS. - - - - - fs.s3.block.size - 67108864 - Block size to use when writing files to S3. - - - - fs.s3.buffer.dir - ${hadoop.tmp.dir}/s3 - Determines where on the local filesystem the S3 filesystem - should store files before sending them to S3 - (or after retrieving them from S3). - - - - - fs.s3.maxRetries - 4 - The maximum number of retries for reading or writing files to S3, - before we signal failure to the application. - - - - - fs.s3.sleepTimeSeconds - 10 - The number of seconds to sleep between each S3 retry. - - - - - - - mapred.job.tracker - local - The host and port that the MapReduce job tracker runs - at. If "local", then jobs are run in-process as a single map - and reduce task. - - - - - mapred.job.tracker.http.address - 0.0.0.0:50030 - - The job tracker http server address and port the server will listen on. - If the port is 0 then the server will start on a free port. - - - - - mapred.job.tracker.handler.count - 10 - - The number of server threads for the JobTracker. This should be roughly - 4% of the number of tasktracker nodes. - - - - - mapred.task.tracker.report.address - 127.0.0.1:0 - The interface and port that task tracker server listens on. - Since it is only connected to by the tasks, it uses the local interface. - EXPERT ONLY. Should only be changed if your host does not have the loopback - interface. - - - - mapred.local.dir - ${hadoop.tmp.dir}/mapred/local - The local directory where MapReduce stores intermediate - data files. May be a comma-separated list of - directories on different devices in order to spread disk i/o. - Directories that do not exist are ignored. - - - - - local.cache.size - 10737418240 - The limit on the size of cache you want to keep, set by default - to 10GB. This will act as a soft limit on the cache directory for out of band data. - - - - - mapred.system.dir - ${hadoop.tmp.dir}/mapred/system - The shared directory where MapReduce stores control files. - - - - - mapred.temp.dir - ${hadoop.tmp.dir}/mapred/temp - A shared directory for temporary files. - - - - - mapred.local.dir.minspacestart - 0 - If the space in mapred.local.dir drops under this, - do not ask for more tasks. - Value in bytes. - - - - - mapred.local.dir.minspacekill - 0 - If the space in mapred.local.dir drops under this, - do not ask more tasks until all the current ones have finished and - cleaned up. Also, to save the rest of the tasks we have running, - kill one of them, to clean up some space. Start with the reduce tasks, - then go with the ones that have finished the least. - Value in bytes. - - - - - mapred.tasktracker.expiry.interval - 600000 - Expert: The time-interval, in miliseconds, after which - a tasktracker is declared 'lost' if it doesn't send heartbeats. - - - - - mapred.tasktracker.instrumentation - org.apache.hadoop.mapred.TaskTrackerMetricsInst - Expert: The instrumentation class to associate with each TaskTracker. - - - - - mapred.tasktracker.taskmemorymanager.monitoring-interval - 5000 - The interval, in milliseconds, for which the tasktracker waits - between two cycles of monitoring its tasks' memory usage. Used only if - tasks' memory management is enabled via mapred.tasktracker.tasks.maxmemory. - - - - - mapred.tasktracker.procfsbasedprocesstree.sleeptime-before-sigkill - 5000 - The time, in milliseconds, the tasktracker waits for sending a - SIGKILL to a process that has overrun memory limits, after it has been sent - a SIGTERM. Used only if tasks' memory management is enabled via - mapred.tasktracker.tasks.maxmemory. - - - - mapred.map.tasks - 2 - The default number of map tasks per job. Typically set - to a prime several times greater than number of available hosts. - Ignored when mapred.job.tracker is "local". - - - - - mapred.reduce.tasks - 1 - The default number of reduce tasks per job. Typically set - to a prime close to the number of available hosts. Ignored when - mapred.job.tracker is "local". - - - - - mapred.jobtracker.restart.recover - false - "true" to enable (job) recovery upon restart, - "false" to start afresh - - - - - mapred.jobtracker.job.history.block.size - 3145728 - The block size of the job history file. Since the job recovery - uses job history, its important to dump job history to disk as - soon as possible. Note that this is an expert level parameter. - The default value is set to 3 MB. - - - - - mapred.jobtracker.taskScheduler - org.apache.hadoop.mapred.JobQueueTaskScheduler - The class responsible for scheduling the tasks. - - - - mapred.jobtracker.taskScheduler.maxRunningTasksPerJob - - The maximum number of running tasks for a job before - it gets preempted. No limits if undefined. - - - - - mapred.map.max.attempts - 4 - Expert: The maximum number of attempts per map task. - In other words, framework will try to execute a map task these many number - of times before giving up on it. - - - - - mapred.reduce.max.attempts - 4 - Expert: The maximum number of attempts per reduce task. - In other words, framework will try to execute a reduce task these many number - of times before giving up on it. - - - - - mapred.reduce.parallel.copies - 5 - The default number of parallel transfers run by reduce - during the copy(shuffle) phase. - - - - - mapred.reduce.copy.backoff - 300 - The maximum amount of time (in seconds) a reducer spends on - fetching one map output before declaring it as failed. - - - - - mapred.task.timeout - 600000 - The number of milliseconds before a task will be - terminated if it neither reads an input, writes an output, nor - updates its status string. - - - - - mapred.tasktracker.map.tasks.maximum - 2 - The maximum number of map tasks that will be run - simultaneously by a task tracker. - - - - - mapred.tasktracker.reduce.tasks.maximum - 2 - The maximum number of reduce tasks that will be run - simultaneously by a task tracker. - - - - - mapred.jobtracker.completeuserjobs.maximum - 100 - The maximum number of complete jobs per user to keep around - before delegating them to the job history. - - - - mapred.jobtracker.instrumentation - org.apache.hadoop.mapred.JobTrackerMetricsInst - Expert: The instrumentation class to associate with each JobTracker. - - - - - mapred.child.java.opts - -Xmx200m - Java opts for the task tracker child processes. - The following symbol, if present, will be interpolated: @taskid@ is replaced - by current TaskID. Any other occurrences of '@' will go unchanged. - For example, to enable verbose gc logging to a file named for the taskid in - /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: - -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc - - The configuration variable mapred.child.ulimit can be used to control the - maximum virtual memory of the child processes. - - - - - mapred.child.ulimit - - The maximum virtual memory, in KB, of a process launched by the - Map-Reduce framework. This can be used to control both the Mapper/Reducer - tasks and applications using Hadoop Pipes, Hadoop Streaming etc. - By default it is left unspecified to let cluster admins control it via - limits.conf and other such relevant mechanisms. - - Note: mapred.child.ulimit must be greater than or equal to the -Xmx passed to - JavaVM, else the VM might not start. - - - - - mapred.child.tmp - ./tmp - To set the value of tmp directory for map and reduce tasks. - If the value is an absolute path, it is directly assigned. Otherwise, it is - prepended with task's working directory. The java tasks are executed with - option -Djava.io.tmpdir='the absolute path of the tmp dir'. Pipes and - streaming are set with environment variable, - TMPDIR='the absolute path of the tmp dir' - - - - - mapred.inmem.merge.threshold - 1000 - The threshold, in terms of the number of files - for the in-memory merge process. When we accumulate threshold number of files - we initiate the in-memory merge and spill to disk. A value of 0 or less than - 0 indicates we want to DON'T have any threshold and instead depend only on - the ramfs's memory consumption to trigger the merge. - - - - - mapred.job.shuffle.merge.percent - 0.66 - The usage threshold at which an in-memory merge will be - initiated, expressed as a percentage of the total memory allocated to - storing in-memory map outputs, as defined by - mapred.job.shuffle.input.buffer.percent. - - - - - mapred.job.shuffle.input.buffer.percent - 0.70 - The percentage of memory to be allocated from the maximum heap - size to storing map outputs during the shuffle. - - - - - mapred.job.reduce.input.buffer.percent - 0.0 - The percentage of memory- relative to the maximum heap size- to - retain map outputs during the reduce. When the shuffle is concluded, any - remaining map outputs in memory must consume less than this threshold before - the reduce can begin. - - - - - mapred.map.tasks.speculative.execution - true - If true, then multiple instances of some map tasks - may be executed in parallel. - - - - mapred.reduce.tasks.speculative.execution - true - If true, then multiple instances of some reduce tasks - may be executed in parallel. - - - - mapred.job.reuse.jvm.num.tasks - 1 - How many tasks to run per jvm. If set to -1, there is - no limit. - - - - - mapred.min.split.size - 0 - The minimum size chunk that map input should be split - into. Note that some file formats may have minimum split sizes that - take priority over this setting. - - - - mapred.jobtracker.maxtasks.per.job - -1 - The maximum number of tasks for a single job. - A value of -1 indicates that there is no maximum. - - - - mapred.submit.replication - 10 - The replication level for submitted job files. This - should be around the square root of the number of nodes. - - - - - - mapred.tasktracker.dns.interface - default - The name of the Network Interface from which a task - tracker should report its IP address. - - - - - mapred.tasktracker.dns.nameserver - default - The host name or IP address of the name server (DNS) - which a TaskTracker should use to determine the host name used by - the JobTracker for communication and display purposes. - - - - - tasktracker.http.threads - 40 - The number of worker threads that for the http server. This is - used for map output fetching - - - - - mapred.task.tracker.http.address - 0.0.0.0:50060 - - The task tracker http server address and port. - If the port is 0 then the server will start on a free port. - - - - - keep.failed.task.files - false - Should the files for failed tasks be kept. This should only be - used on jobs that are failing, because the storage is never - reclaimed. It also prevents the map outputs from being erased - from the reduce directory as they are consumed. - - - - - - mapred.output.compress - false - Should the job outputs be compressed? - - - - - mapred.output.compression.type - RECORD - If the job outputs are to compressed as SequenceFiles, how should - they be compressed? Should be one of NONE, RECORD or BLOCK. - - - - - mapred.output.compression.codec - org.apache.hadoop.io.compress.DefaultCodec - If the job outputs are compressed, how should they be compressed? - - - - - mapred.compress.map.output - false - Should the outputs of the maps be compressed before being - sent across the network. Uses SequenceFile compression. - - - - - mapred.map.output.compression.codec - org.apache.hadoop.io.compress.DefaultCodec - If the map outputs are compressed, how should they be - compressed? - - - - - io.seqfile.compress.blocksize - 1000000 - The minimum block size for compression in block compressed - SequenceFiles. - - - - - io.seqfile.lazydecompress - true - Should values of block-compressed SequenceFiles be decompressed - only when necessary. - - - - - io.seqfile.sorter.recordlimit - 1000000 - The limit on number of records to be kept in memory in a spill - in SequenceFiles.Sorter - - - - - map.sort.class - org.apache.hadoop.util.QuickSort - The default sort class for sorting keys. - - - - - mapred.userlog.limit.kb - 0 - The maximum size of user-logs of each task in KB. 0 disables the cap. - - - - - mapred.userlog.retain.hours - 24 - The maximum time, in hours, for which the user-logs are to be - retained. - - - - - mapred.hosts - - Names a file that contains the list of nodes that may - connect to the jobtracker. If the value is empty, all hosts are - permitted. - - - - mapred.hosts.exclude - - Names a file that contains the list of hosts that - should be excluded by the jobtracker. If the value is empty, no - hosts are excluded. - - - - mapred.max.tracker.failures - 4 - The number of task-failures on a tasktracker of a given job - after which new tasks of that job aren't assigned to it. - - - - - jobclient.output.filter - FAILED - The filter for controlling the output of the task's userlogs sent - to the console of the JobClient. - The permissible options are: NONE, KILLED, FAILED, SUCCEEDED and - ALL. - - - - - mapred.job.tracker.persist.jobstatus.active - false - Indicates if persistency of job status information is - active or not. - - - - - mapred.job.tracker.persist.jobstatus.hours - 0 - The number of hours job status information is persisted in DFS. - The job status information will be available after it drops of the memory - queue and between jobtracker restarts. With a zero value the job status - information is not persisted at all in DFS. - - - - - mapred.job.tracker.persist.jobstatus.dir - /jobtracker/jobsInfo - The directory where the job status information is persisted - in a file system to be available after it drops of the memory queue and - between jobtracker restarts. - - - - - mapred.task.profile - false - To set whether the system should collect profiler - information for some of the tasks in this job? The information is stored - in the the user log directory. The value is "true" if task profiling - is enabled. - - - - mapred.task.profile.maps - 0-2 - To set the ranges of map tasks to profile. - mapred.task.profile has to be set to true for the value to be accounted. - - - - - mapred.task.profile.reduces - 0-2 - To set the ranges of reduce tasks to profile. - mapred.task.profile has to be set to true for the value to be accounted. - - - - - mapred.line.input.format.linespermap - 1 - Number of lines per split in NLineInputFormat. - - - - - mapred.skip.attempts.to.start.skipping - 2 - The number of Task attempts AFTER which skip mode - will be kicked off. When skip mode is kicked off, the - tasks reports the range of records which it will process - next, to the TaskTracker. So that on failures, TT knows which - ones are possibly the bad records. On further executions, - those are skipped. - - - - - mapred.skip.map.auto.incr.proc.count - true - The flag which if set to true, - SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS is incremented - by MapRunner after invoking the map function. This value must be set to - false for applications which process the records asynchronously - or buffer the input records. For example streaming. - In such cases applications should increment this counter on their own. - - - - - mapred.skip.reduce.auto.incr.proc.count - true - The flag which if set to true, - SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS is incremented - by framework after invoking the reduce function. This value must be set to - false for applications which process the records asynchronously - or buffer the input records. For example streaming. - In such cases applications should increment this counter on their own. - - - - - mapred.skip.out.dir - - If no value is specified here, the skipped records are - written to the output directory at _logs/skip. - User can stop writing skipped records by giving the value "none". - - - - - mapred.skip.map.max.skip.records - 0 - The number of acceptable skip records surrounding the bad - record PER bad record in mapper. The number includes the bad record as well. - To turn the feature of detection/skipping of bad records off, set the - value to 0. - The framework tries to narrow down the skipped range by retrying - until this threshold is met OR all attempts get exhausted for this task. - Set the value to Long.MAX_VALUE to indicate that framework need not try to - narrow down. Whatever records(depends on application) get skipped are - acceptable. - - - - - mapred.skip.reduce.max.skip.groups - 0 - The number of acceptable skip groups surrounding the bad - group PER bad group in reducer. The number includes the bad group as well. - To turn the feature of detection/skipping of bad groups off, set the - value to 0. - The framework tries to narrow down the skipped range by retrying - until this threshold is met OR all attempts get exhausted for this task. - Set the value to Long.MAX_VALUE to indicate that framework need not try to - narrow down. Whatever groups(depends on application) get skipped are - acceptable. - - - - - - - ipc.client.idlethreshold - 4000 - Defines the threshold number of connections after which - connections will be inspected for idleness. - - - - - ipc.client.kill.max - 10 - Defines the maximum number of clients to disconnect in one go. - - - - - ipc.client.connection.maxidletime - 10000 - The maximum time in msec after which a client will bring down the - connection to the server. - - - - - ipc.client.connect.max.retries - 10 - Indicates the number of retries a client will make to establish - a server connection. - - - - - ipc.server.listen.queue.size - 128 - Indicates the length of the listen queue for servers accepting - client connections. - - - - - ipc.server.tcpnodelay - false - Turn on/off Nagle's algorithm for the TCP socket connection on - the server. Setting to true disables the algorithm and may decrease latency - with a cost of more/smaller packets. - - - - - ipc.client.tcpnodelay - false - Turn on/off Nagle's algorithm for the TCP socket connection on - the client. Setting to true disables the algorithm and may decrease latency - with a cost of more/smaller packets. - - - - - - - - - job.end.retry.attempts - 0 - Indicates how many times hadoop should attempt to contact the - notification URL - - - - job.end.retry.interval - 30000 - Indicates time in milliseconds between notification URL retry - calls - - - - - - webinterface.private.actions - false - If set to true, the web interfaces of JT and NN may contain - actions, such as kill job, delete file, etc., that should - not be exposed to public. Enable this option if the interfaces - are only reachable by those who have the right authorization. - - - - - - - hadoop.rpc.socket.factory.class.default - org.apache.hadoop.net.StandardSocketFactory - Default SocketFactory to use. This parameter is expected to be - formatted as "package.FactoryClassName". - - - - - hadoop.rpc.socket.factory.class.ClientProtocol - - SocketFactory to use to connect to a DFS. If null or empty, use - hadoop.rpc.socket.class.default. This socket factory is also used by - DFSClient to create sockets to DataNodes. - - - - - hadoop.rpc.socket.factory.class.JobSubmissionProtocol - - SocketFactory to use to connect to a Map/Reduce master - (JobTracker). If null or empty, then use hadoop.rpc.socket.class.default. - - - - - hadoop.socks.server - - Address (host:port) of the SOCKS server to be used by the - SocksSocketFactory. - - - - - - - topology.node.switch.mapping.impl - org.apache.hadoop.net.ScriptBasedMapping - The default implementation of the DNSToSwitchMapping. It - invokes a script specified in topology.script.file.name to resolve - node names. If the value for topology.script.file.name is not set, the - default value of DEFAULT_RACK is returned for all node names. - - - - - topology.script.file.name - - The script name that should be invoked to resolve DNS names to - NetworkTopology names. Example: the script would take host.foo.bar as an - argument, and return /rack1 as the output. - - - - - topology.script.number.args - 100 - The max number of args that the script configured with - topology.script.file.name should be run with. Each arg is an - IP address. - - - - - mapred.task.cache.levels - 2 - This is the max level of the task cache. For example, if - the level is 2, the tasks cached are at the host level and at the rack - level. - - - - - mapred.queue.names - default - Comma separated list of queues configured for this jobtracker. - Jobs are added to queues and schedulers can configure different - scheduling properties for the various queues. To configure a property - for a queue, the name of the queue must match the name specified in this - value. Queue properties that are common to all schedulers are configured - here with the naming convention, mapred.queue.$QUEUE-NAME.$PROPERTY-NAME, - for e.g. mapred.queue.default.submit-job-acl. - The number of queues configured in this parameter could depend on the - type of scheduler being used, as specified in - mapred.jobtracker.taskScheduler. For example, the JobQueueTaskScheduler - supports only a single queue, which is the default configured here. - Before adding more queues, ensure that the scheduler you've configured - supports multiple queues. - - - - - mapred.acls.enabled - false - Specifies whether ACLs are enabled, and should be checked - for various operations. - - - - - mapred.queue.default.acl-submit-job - * - Comma separated list of user and group names that are allowed - to submit jobs to the 'default' queue. The user list and the group list - are separated by a blank. For e.g. alice,bob group1,group2. - If set to the special value '*', it means all users are allowed to - submit jobs. - - - - - mapred.queue.default.acl-administer-jobs - * - Comma separated list of user and group names that are allowed - to delete jobs or modify job's priority for jobs not owned by the current - user in the 'default' queue. The user list and the group list - are separated by a blank. For e.g. alice,bob group1,group2. - If set to the special value '*', it means all users are allowed to do - this operation. - - - - - mapred.job.queue.name - default - Queue to which a job is submitted. This must match one of the - queues defined in mapred.queue.names for the system. Also, the ACL setup - for the queue must allow the current user to submit a job to the queue. - Before specifying a queue, ensure that the system is configured with - the queue, and access is allowed for submitting jobs to the queue. - - - - - mapred.tasktracker.indexcache.mb - 10 - The maximum memory that a task tracker allows for the - index cache that is used when serving map outputs to reducers. - - - - Index: hadoopcore/conf/slaves =================================================================== --- hadoopcore/conf/slaves (revision 723901) +++ hadoopcore/conf/slaves (working copy) @@ -1 +0,0 @@ -localhost Index: hadoopcore/conf/ssl-server.xml.example =================================================================== --- hadoopcore/conf/ssl-server.xml.example (revision 723901) +++ hadoopcore/conf/ssl-server.xml.example (working copy) @@ -1,55 +0,0 @@ - - - - - - - ssl.server.truststore.location - - Truststore to be used by NN and DN. Must be specified. - - - - - ssl.server.truststore.password - - Optional. Default value is "". - - - - - ssl.server.truststore.type - jks - Optional. Default value is "jks". - - - - - ssl.server.keystore.location - - Keystore to be used by NN and DN. Must be specified. - - - - - ssl.server.keystore.password - - Must be specified. - - - - - ssl.server.keystore.keypassword - - Must be specified. - - - - - ssl.server.keystore.type - jks - Optional. Default value is "jks". - - - - Index: hadoopcore/conf/ssl-client.xml.example =================================================================== --- hadoopcore/conf/ssl-client.xml.example (revision 723901) +++ hadoopcore/conf/ssl-client.xml.example (working copy) @@ -1,57 +0,0 @@ - - - - - - - ssl.client.truststore.location - - Truststore to be used by clients like distcp. Must be - specified. - - - - - ssl.client.truststore.password - - Optional. Default value is "". - - - - - ssl.client.truststore.type - jks - Optional. Default value is "jks". - - - - - ssl.client.keystore.location - - Keystore to be used by clients like distcp. Must be - specified. - - - - - ssl.client.keystore.password - - Optional. Default value is "". - - - - - ssl.client.keystore.keypassword - - Optional. Default value is "". - - - - - ssl.client.keystore.type - jks - Optional. Default value is "jks". - - - - Index: hadoopcore/conf/slaves.template =================================================================== --- hadoopcore/conf/slaves.template (revision 723901) +++ hadoopcore/conf/slaves.template (working copy) @@ -1 +0,0 @@ -localhost Index: hadoopcore/conf/capacity-scheduler.xml =================================================================== --- hadoopcore/conf/capacity-scheduler.xml (revision 723901) +++ hadoopcore/conf/capacity-scheduler.xml (working copy) @@ -1,77 +0,0 @@ - - - - - - - - - - - mapred.capacity-scheduler.queue.default.guaranteed-capacity - 100 - Percentage of the number of slots in the cluster that are - guaranteed to be available for jobs in this queue. - - - - - mapred.capacity-scheduler.queue.default.reclaim-time-limit - 300 - The amount of time, in seconds, before which - resources distributed to other queues will be reclaimed. - - - - - mapred.capacity-scheduler.queue.default.supports-priority - false - If true, priorities of jobs will be taken into - account in scheduling decisions. - - - - - mapred.capacity-scheduler.queue.default.minimum-user-limit-percent - 100 - Each queue enforces a limit on the percentage of resources - allocated to a user at any given time, if there is competition for them. - This user limit can vary between a minimum and maximum value. The former - depends on the number of users who have submitted jobs, and the latter is - set to this property value. For example, suppose the value of this - property is 25. If two users have submitted jobs to a queue, no single - user can use more than 50% of the queue resources. If a third user submits - a job, no single user can use more than 33% of the queue resources. With 4 - or more users, no user can use more than 25% of the queue's resources. A - value of 100 implies no user limits are imposed. - - - - - - - - mapred.capacity-scheduler.default-reclaim-time-limit - 300 - The amount of time, in seconds, before which - resources distributed to other queues will be reclaimed by default - in a job queue. - - - - - mapred.capacity-scheduler.default-supports-priority - false - If true, priorities of jobs will be taken into - account in scheduling decisions by default in a job queue. - - - - - mapred.capacity-scheduler.default-minimum-user-limit-percent - 100 - The percentage of the resources limited to a particular user - for the job queue at any given point of time by default. - - - Index: hadoopcore/conf/hadoop-env.sh =================================================================== --- hadoopcore/conf/hadoop-env.sh (revision 723901) +++ hadoopcore/conf/hadoop-env.sh (working copy) @@ -1,54 +0,0 @@ -# Set Hadoop-specific environment variables here. - -# The only required environment variable is JAVA_HOME. All others are -# optional. When running a distributed configuration it is best to -# set JAVA_HOME in this file, so that it is correctly defined on -# remote nodes. - -# The java implementation to use. Required. -# export JAVA_HOME=/usr/lib/j2sdk1.5-sun - -# Extra Java CLASSPATH elements. Optional. -# export HADOOP_CLASSPATH= - -# The maximum amount of heap to use, in MB. Default is 1000. -# export HADOOP_HEAPSIZE=2000 - -# Extra Java runtime options. Empty by default. -# export HADOOP_OPTS=-server - -# Command specific options appended to HADOOP_OPTS when specified -export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS" -export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS" -export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS" -export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS" -export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS" -# export HADOOP_TASKTRACKER_OPTS= -# The following applies to multiple commands (fs, dfs, fsck, distcp etc) -# export HADOOP_CLIENT_OPTS - -# Extra ssh options. Empty by default. -# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR" - -# Where log files are stored. $HADOOP_HOME/logs by default. -# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs - -# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default. -# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves - -# host:path where hadoop code should be rsync'd from. Unset by default. -# export HADOOP_MASTER=master:/home/$USER/src/hadoop - -# Seconds to sleep between slave commands. Unset by default. This -# can be useful in large clusters, where, e.g., slave rsyncs can -# otherwise arrive faster than the master can service them. -# export HADOOP_SLAVE_SLEEP=0.1 - -# The directory where pid files are stored. /tmp by default. -# export HADOOP_PID_DIR=/var/hadoop/pids - -# A string representing this instance of hadoop. $USER by default. -# export HADOOP_IDENT_STRING=$USER - -# The scheduling priority for daemon processes. See 'man nice'. -# export HADOOP_NICENESS=10 Index: hadoopcore/conf/hadoop-metrics.properties =================================================================== --- hadoopcore/conf/hadoop-metrics.properties (revision 723901) +++ hadoopcore/conf/hadoop-metrics.properties (working copy) @@ -1,40 +0,0 @@ -# Configuration of the "dfs" context for null -dfs.class=org.apache.hadoop.metrics.spi.NullContext - -# Configuration of the "dfs" context for file -#dfs.class=org.apache.hadoop.metrics.file.FileContext -#dfs.period=10 -#dfs.fileName=/tmp/dfsmetrics.log - -# Configuration of the "dfs" context for ganglia -# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext -# dfs.period=10 -# dfs.servers=localhost:8649 - - -# Configuration of the "mapred" context for null -mapred.class=org.apache.hadoop.metrics.spi.NullContext - -# Configuration of the "mapred" context for file -#mapred.class=org.apache.hadoop.metrics.file.FileContext -#mapred.period=10 -#mapred.fileName=/tmp/mrmetrics.log - -# Configuration of the "mapred" context for ganglia -# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext -# mapred.period=10 -# mapred.servers=localhost:8649 - - -# Configuration of the "jvm" context for null -jvm.class=org.apache.hadoop.metrics.spi.NullContext - -# Configuration of the "jvm" context for file -#jvm.class=org.apache.hadoop.metrics.file.FileContext -#jvm.period=10 -#jvm.fileName=/tmp/jvmmetrics.log - -# Configuration of the "jvm" context for ganglia -# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext -# jvm.period=10 -# jvm.servers=localhost:8649 Index: hadoopcore/conf/log4j.properties =================================================================== --- hadoopcore/conf/log4j.properties (revision 723901) +++ hadoopcore/conf/log4j.properties (working copy) @@ -1,94 +0,0 @@ -# Define some default values that can be overridden by system properties -hadoop.root.logger=INFO,console -hadoop.log.dir=. -hadoop.log.file=hadoop.log - -# Define the root logger to the system property "hadoop.root.logger". -log4j.rootLogger=${hadoop.root.logger}, EventCounter - -# Logging Threshold -log4j.threshhold=ALL - -# -# Daily Rolling File Appender -# - -log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file} - -# Rollver at midnight -log4j.appender.DRFA.DatePattern=.yyyy-MM-dd - -# 30-day backup -#log4j.appender.DRFA.MaxBackupIndex=30 -log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout - -# Pattern format: Date LogLevel LoggerName LogMessage -log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -# Debugging Pattern format -#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n - - -# -# console -# Add "console" to rootlogger above if you want to use this -# - -log4j.appender.console=org.apache.log4j.ConsoleAppender -log4j.appender.console.target=System.err -log4j.appender.console.layout=org.apache.log4j.PatternLayout -log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n - -# -# TaskLog Appender -# - -#Default values -hadoop.tasklog.taskid=null -hadoop.tasklog.noKeepSplits=4 -hadoop.tasklog.totalLogFileSize=100 -hadoop.tasklog.purgeLogSplits=true -hadoop.tasklog.logsRetainHours=12 - -log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender -log4j.appender.TLA.taskId=${hadoop.tasklog.taskid} -log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize} - -log4j.appender.TLA.layout=org.apache.log4j.PatternLayout -log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n - -# -# Rolling File Appender -# - -#log4j.appender.RFA=org.apache.log4j.RollingFileAppender -#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file} - -# Logfile size and and 30-day backups -#log4j.appender.RFA.MaxFileSize=1MB -#log4j.appender.RFA.MaxBackupIndex=30 - -#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout -#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n -#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n - -# -# FSNamesystem Audit logging -# All audit events are logged at INFO level -# -log4j.logger.org.apache.hadoop.fs.FSNamesystem.audit=WARN - -# Custom Logging levels - -#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG -#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG -#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG - -# Jets3t library -log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR - -# -# Event Counter Appender -# Sends counts of logging messages at different severity levels to Hadoop Metrics. -# -log4j.appender.EventCounter=org.apache.hadoop.metrics.jvm.EventCounter Index: hadoopcore/conf/hadoop-site.xml =================================================================== --- hadoopcore/conf/hadoop-site.xml (revision 723901) +++ hadoopcore/conf/hadoop-site.xml (working copy) @@ -1,8 +0,0 @@ - - - - - - - - Index: hadoopcore/conf/configuration.xsl =================================================================== --- hadoopcore/conf/configuration.xsl (revision 723901) +++ hadoopcore/conf/configuration.xsl (working copy) @@ -1,24 +0,0 @@ - - - - - - - - - - - - - - - - - - - -
namevaluedescription
- - -
-
Index: hadoopcore/conf/masters =================================================================== --- hadoopcore/conf/masters (revision 723901) +++ hadoopcore/conf/masters (working copy) @@ -1 +0,0 @@ -localhost Index: hadoopcore/lib/kfs-0.2.LICENSE.txt =================================================================== --- hadoopcore/lib/kfs-0.2.LICENSE.txt (revision 723901) +++ hadoopcore/lib/kfs-0.2.LICENSE.txt (working copy) @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. Index: hadoopcore/lib/commons-logging-api-1.0.4.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/slf4j-LICENSE.txt =================================================================== --- hadoopcore/lib/slf4j-LICENSE.txt (revision 723901) +++ hadoopcore/lib/slf4j-LICENSE.txt (working copy) @@ -1,24 +0,0 @@ -Copyright (c) 2004-2008 QOS.ch -All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - Index: hadoopcore/lib/xmlenc-0.52.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/commons-cli-2.0-SNAPSHOT.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/jets3t-0.6.1.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/kfs-0.2.2.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/jetty-ext/jasper-runtime.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/jetty-ext/commons-el.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/jetty-ext/jasper-compiler.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/jetty-ext/jsp-api.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/hsqldb-1.8.0.10.LICENSE.txt =================================================================== --- hadoopcore/lib/hsqldb-1.8.0.10.LICENSE.txt (revision 723901) +++ hadoopcore/lib/hsqldb-1.8.0.10.LICENSE.txt (working copy) @@ -1,66 +0,0 @@ -/* Copyright (c) 1995-2000, The Hypersonic SQL Group. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of the Hypersonic SQL Group nor the names of its - * contributors may be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE HYPERSONIC SQL GROUP, - * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This software consists of voluntary contributions made by many individuals - * on behalf of the Hypersonic SQL Group. - * - * - * For work added by the HSQL Development Group: - * - * Copyright (c) 2001-2004, The HSQL Development Group - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of the HSQL Development Group nor the names of its - * contributors may be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL HSQL DEVELOPMENT GROUP, HSQLDB.ORG, - * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - - Index: hadoopcore/lib/junit-3.8.1.LICENSE.txt =================================================================== --- hadoopcore/lib/junit-3.8.1.LICENSE.txt (revision 723901) +++ hadoopcore/lib/junit-3.8.1.LICENSE.txt (working copy) @@ -1,100 +0,0 @@ -Common Public License Version 1.0 - -THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS COMMON PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - -1. DEFINITIONS - -"Contribution" means: - - a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and - - b) in the case of each subsequent Contributor: - - i) changes to the Program, and - - ii) additions to the Program; - - where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. - -"Contributor" means any person or entity that distributes the Program. - -"Licensed Patents " mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. - -"Program" means the Contributions distributed in accordance with this Agreement. - -"Recipient" means anyone who receives the Program under this Agreement, including all Contributors. - -2. GRANT OF RIGHTS - - a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. - - b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. - - c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. - - d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. - -3. REQUIREMENTS - -A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: - - a) it complies with the terms and conditions of this Agreement; and - - b) its license agreement: - - i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; - - ii) effectively excludes on behalf of all Cntributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; - - iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and - - iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. - -When the Program is made available in source code form: - - a) it must be made available under this Agreement; and - - b) a copy of this Agreement must be included with each copy of the Program. - -Contributors may not remove or alter any copyright notices contained within the Program. - -Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. - -4. COMMERCIAL DISTRIBUTION - -Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. - -For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. - -5. NO WARRANTY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. - -6. DISCLAIMER OF LIABILITY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PR LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -7. GENERAL - -If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. - -If Recipient institutes patent litigation against a Contributor with respect to a patent applicable to software (including a cross-claim or counterclaim in a lawsuit), then any patent licenses granted by that Contributor to such Recipient under this Agreement shall terminate as of the date such litigation is filed. In addition, if Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. - -All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. - -Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. IBM is the initial Agreement Steward. IBM may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. - -This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. -OFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -7. GENERAL - -If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. - -If Recipient institutes patent litigation against a Contributor with respect to a patent applicable to software (including a cross-claim or counterclaim in a lawsuit), then any patent licenses granted by that Contributor to such Recipient under this Agreement shall terminate as of the date such litigation is filed. In addition, if Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. - -All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. - -Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. IBM is the initial Agreement Steward. IBM may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. - -This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. Index: hadoopcore/lib/commons-httpclient-3.0.1.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/commons-codec-1.3.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/jetty-5.1.4.LICENSE.txt =================================================================== --- hadoopcore/lib/jetty-5.1.4.LICENSE.txt (revision 723901) +++ hadoopcore/lib/jetty-5.1.4.LICENSE.txt (working copy) @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. Index: hadoopcore/lib/hadoop-0.20.0-dev-core.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/slf4j-api-1.4.3.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/servlet-api.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/commons-logging-1.0.4.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/junit-3.8.1.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/hsqldb-1.8.0.10.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/commons-net-1.4.1.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/log4j-1.2.15.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/jetty-5.1.4.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/oro-2.0.8.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/lib/slf4j-log4j12-1.4.3.jar =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Index: hadoopcore/bin/start-dfs.sh =================================================================== --- hadoopcore/bin/start-dfs.sh (revision 723901) +++ hadoopcore/bin/start-dfs.sh (working copy) @@ -1,52 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Start hadoop dfs daemons. -# Optinally upgrade or rollback dfs state. -# Run this on master node. - -usage="Usage: start-dfs.sh [-upgrade|-rollback]" - -bin=`dirname "$0"` -bin=`cd "$bin"; pwd` - -. "$bin"/hadoop-config.sh - -# get arguments -if [ $# -ge 1 ]; then - nameStartOpt=$1 - shift - case $nameStartOpt in - (-upgrade) - ;; - (-rollback) - dataStartOpt=$nameStartOpt - ;; - (*) - echo $usage - exit 1 - ;; - esac -fi - -# start dfs daemons -# start namenode after datanodes, to minimize time namenode is up w/o data -# note: datanodes will log connection errors until namenode starts -"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start namenode $nameStartOpt -"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR start datanode $dataStartOpt -"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR --hosts masters start secondarynamenode Index: hadoopcore/bin/stop-balancer.sh =================================================================== --- hadoopcore/bin/stop-balancer.sh (revision 723901) +++ hadoopcore/bin/stop-balancer.sh (working copy) @@ -1,26 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -bin=`dirname "$0"` -bin=`cd "$bin"; pwd` - -. "$bin"/hadoop-config.sh - -# Stop balancer daemon. -# Run this on the machine where the balancer is running - -"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop balancer Index: hadoopcore/bin/hadoop-daemon.sh =================================================================== --- hadoopcore/bin/hadoop-daemon.sh (revision 723901) +++ hadoopcore/bin/hadoop-daemon.sh (working copy) @@ -1,143 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Runs a Hadoop command as a daemon. -# -# Environment Variables -# -# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf. -# HADOOP_LOG_DIR Where log files are stored. PWD by default. -# HADOOP_MASTER host:path where hadoop code should be rsync'd from -# HADOOP_PID_DIR The pid files are stored. /tmp by default. -# HADOOP_IDENT_STRING A string representing this instance of hadoop. $USER by default -# HADOOP_NICENESS The scheduling priority for daemons. Defaults to 0. -## - -usage="Usage: hadoop-daemon.sh [--config ] [--hosts hostlistfile] (start|stop) " - -# if no args specified, show usage -if [ $# -le 1 ]; then - echo $usage - exit 1 -fi - -bin=`dirname "$0"` -bin=`cd "$bin"; pwd` - -. "$bin"/hadoop-config.sh - -# get arguments -startStop=$1 -shift -command=$1 -shift - -hadoop_rotate_log () -{ - log=$1; - num=5; - if [ -n "$2" ]; then - num=$2 - fi - if [ -f "$log" ]; then # rotate logs - while [ $num -gt 1 ]; do - prev=`expr $num - 1` - [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num" - num=$prev - done - mv "$log" "$log.$num"; - fi -} - -if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then - . "${HADOOP_CONF_DIR}/hadoop-env.sh" -fi - -# get log directory -if [ "$HADOOP_LOG_DIR" = "" ]; then - export HADOOP_LOG_DIR="$HADOOP_HOME/logs" -fi -mkdir -p "$HADOOP_LOG_DIR" - -if [ "$HADOOP_PID_DIR" = "" ]; then - HADOOP_PID_DIR=/tmp -fi - -if [ "$HADOOP_IDENT_STRING" = "" ]; then - export HADOOP_IDENT_STRING="$USER" -fi - -# some variables -export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log -export HADOOP_ROOT_LOGGER="INFO,DRFA" -log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out -pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid - -# Set default scheduling priority -if [ "$HADOOP_NICENESS" = "" ]; then - export HADOOP_NICENESS=0 -fi - -case $startStop in - - (start) - - mkdir -p "$HADOOP_PID_DIR" - - if [ -f $pid ]; then - if kill -0 `cat $pid` > /dev/null 2>&1; then - echo $command running as process `cat $pid`. Stop it first. - exit 1 - fi - fi - - if [ "$HADOOP_MASTER" != "" ]; then - echo rsync from $HADOOP_MASTER - rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $HADOOP_MASTER/ "$HADOOP_HOME" - fi - - hadoop_rotate_log $log - echo starting $command, logging to $log - cd "$HADOOP_HOME" - nohup nice -n $HADOOP_NICENESS "$HADOOP_HOME"/bin/hadoop --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null & - echo $! > $pid - sleep 1; head "$log" - ;; - - (stop) - - if [ -f $pid ]; then - if kill -0 `cat $pid` > /dev/null 2>&1; then - echo stopping $command - kill `cat $pid` - else - echo no $command to stop - fi - else - echo no $command to stop - fi - ;; - - (*) - echo $usage - exit 1 - ;; - -esac - - Index: hadoopcore/bin/stop-all.sh =================================================================== --- hadoopcore/bin/stop-all.sh (revision 723901) +++ hadoopcore/bin/stop-all.sh (working copy) @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Stop all hadoop daemons. Run this on master node. - -bin=`dirname "$0"` -bin=`cd "$bin"; pwd` - -. "$bin"/hadoop-config.sh - -"$bin"/stop-mapred.sh --config $HADOOP_CONF_DIR -"$bin"/stop-dfs.sh --config $HADOOP_CONF_DIR Index: hadoopcore/bin/stop-mapred.sh =================================================================== --- hadoopcore/bin/stop-mapred.sh (revision 723901) +++ hadoopcore/bin/stop-mapred.sh (working copy) @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Stop hadoop map reduce daemons. Run this on master node. - -bin=`dirname "$0"` -bin=`cd "$bin"; pwd` - -. "$bin"/hadoop-config.sh - -"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop jobtracker -"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR stop tasktracker - Index: hadoopcore/bin/hadoop-config.sh =================================================================== --- hadoopcore/bin/hadoop-config.sh (revision 723901) +++ hadoopcore/bin/hadoop-config.sh (working copy) @@ -1,68 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# included in all the hadoop scripts with source command -# should not be executable directly -# also should not be passed any arguments, since we need original $* - -# resolve links - $0 may be a softlink - -this="$0" -while [ -h "$this" ]; do - ls=`ls -ld "$this"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '.*/.*' > /dev/null; then - this="$link" - else - this=`dirname "$this"`/"$link" - fi -done - -# convert relative path to absolute path -bin=`dirname "$this"` -script=`basename "$this"` -bin=`cd "$bin"; pwd` -this="$bin/$script" - -# the root of the Hadoop installation -export HADOOP_HOME=`dirname "$this"`/.. - -#check to see if the conf dir is given as an optional argument -if [ $# -gt 1 ] -then - if [ "--config" = "$1" ] - then - shift - confdir=$1 - shift - HADOOP_CONF_DIR=$confdir - fi -fi - -# Allow alternate conf dir location. -HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_HOME/conf}" - -#check to see it is specified whether to use the slaves or the -# masters file -if [ $# -gt 1 ] -then - if [ "--hosts" = "$1" ] - then - shift - slavesfile=$1 - shift - export HADOOP_SLAVES="${HADOOP_CONF_DIR}/$slavesfile" - fi -fi Index: hadoopcore/bin/slaves.sh =================================================================== --- hadoopcore/bin/slaves.sh (revision 723901) +++ hadoopcore/bin/slaves.sh (working copy) @@ -1,68 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Run a shell command on all slave hosts. -# -# Environment Variables -# -# HADOOP_SLAVES File naming remote hosts. -# Default is ${HADOOP_CONF_DIR}/slaves. -# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf. -# HADOOP_SLAVE_SLEEP Seconds to sleep between spawning remote commands. -# HADOOP_SSH_OPTS Options passed to ssh when running remote commands. -## - -usage="Usage: slaves.sh [--config confdir] command..." - -# if no args specified, show usage -if [ $# -le 0 ]; then - echo $usage - exit 1 -fi - -bin=`dirname "$0"` -bin=`cd "$bin"; pwd` - -. "$bin"/hadoop-config.sh - -# If the slaves file is specified in the command line, -# then it takes precedence over the definition in -# hadoop-env.sh. Save it here. -HOSTLIST=$HADOOP_SLAVES - -if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then - . "${HADOOP_CONF_DIR}/hadoop-env.sh" -fi - -if [ "$HOSTLIST" = "" ]; then - if [ "$HADOOP_SLAVES" = "" ]; then - export HOSTLIST="${HADOOP_CONF_DIR}/slaves" - else - export HOSTLIST="${HADOOP_SLAVES}" - fi -fi - -for slave in `cat "$HOSTLIST"|sed "s/#.*$//;/^$/d"`; do - ssh $HADOOP_SSH_OPTS $slave $"${@// /\\ }" \ - 2>&1 | sed "s/^/$slave: /" & - if [ "$HADOOP_SLAVE_SLEEP" != "" ]; then - sleep $HADOOP_SLAVE_SLEEP - fi -done - -wait Index: hadoopcore/bin/hadoop-daemons.sh =================================================================== --- hadoopcore/bin/hadoop-daemons.sh (revision 723901) +++ hadoopcore/bin/hadoop-daemons.sh (working copy) @@ -1,34 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Run a Hadoop command on all slave hosts. - -usage="Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] [start|stop] command args..." - -# if no args specified, show usage -if [ $# -le 1 ]; then - echo $usage - exit 1 -fi - -bin=`dirname "$0"` -bin=`cd "$bin"; pwd` - -. $bin/hadoop-config.sh - -exec "$bin/slaves.sh" --config $HADOOP_CONF_DIR cd "$HADOOP_HOME" \; "$bin/hadoop-daemon.sh" --config $HADOOP_CONF_DIR "$@" Index: hadoopcore/bin/rcc =================================================================== --- hadoopcore/bin/rcc (revision 723901) +++ hadoopcore/bin/rcc (working copy) @@ -1,99 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# The Hadoop record compiler -# -# Environment Variables -# -# JAVA_HOME The java implementation to use. Overrides JAVA_HOME. -# -# HADOOP_OPTS Extra Java runtime options. -# -# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf. -# - -bin=`dirname "$0"` -bin=`cd "$bin"; pwd` - -. "$bin"/hadoop-config.sh - -if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then - . "${HADOOP_CONF_DIR}/hadoop-env.sh" -fi - -# some Java parameters -if [ "$JAVA_HOME" != "" ]; then - #echo "run java in $JAVA_HOME" - JAVA_HOME=$JAVA_HOME -fi - -if [ "$JAVA_HOME" = "" ]; then - echo "Error: JAVA_HOME is not set." - exit 1 -fi - -JAVA=$JAVA_HOME/bin/java -JAVA_HEAP_MAX=-Xmx1000m - -# CLASSPATH initially contains $HADOOP_CONF_DIR -CLASSPATH="${HADOOP_CONF_DIR}" -CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar - -# for developers, add Hadoop classes to CLASSPATH -if [ -d "$HADOOP_HOME/build/classes" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/classes -fi -if [ -d "$HADOOP_HOME/build/webapps" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build -fi -if [ -d "$HADOOP_HOME/build/test/classes" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/test/classes -fi - -# so that filenames w/ spaces are handled correctly in loops below -IFS= - -# for releases, add core hadoop jar & webapps to CLASSPATH -if [ -d "$HADOOP_HOME/webapps" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_HOME -fi -for f in $HADOOP_HOME/hadoop-*-core.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -# add libs to CLASSPATH -for f in $HADOOP_HOME/lib/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -for f in $HADOOP_HOME/lib/jetty-ext/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -# restore ordinary behaviour -unset IFS - -CLASS='org.apache.hadoop.record.compiler.generated.Rcc' - -# cygwin path translation -if expr `uname` : 'CYGWIN*' > /dev/null; then - CLASSPATH=`cygpath -p -w "$CLASSPATH"` -fi - -# run it -exec "$JAVA" $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@" Index: hadoopcore/bin/stop-dfs.sh =================================================================== --- hadoopcore/bin/stop-dfs.sh (revision 723901) +++ hadoopcore/bin/stop-dfs.sh (working copy) @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Stop hadoop DFS daemons. Run this on master node. - -bin=`dirname "$0"` -bin=`cd "$bin"; pwd` - -. "$bin"/hadoop-config.sh - -"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop namenode -"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR stop datanode -"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR --hosts masters stop secondarynamenode - Index: hadoopcore/bin/hadoop =================================================================== --- hadoopcore/bin/hadoop (revision 723901) +++ hadoopcore/bin/hadoop (working copy) @@ -1,273 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# The Hadoop command script -# -# Environment Variables -# -# JAVA_HOME The java implementation to use. Overrides JAVA_HOME. -# -# HADOOP_CLASSPATH Extra Java CLASSPATH entries. -# -# HADOOP_HEAPSIZE The maximum amount of heap to use, in MB. -# Default is 1000. -# -# HADOOP_OPTS Extra Java runtime options. -# -# HADOOP_NAMENODE_OPTS These options are added to HADOOP_OPTS -# HADOOP_CLIENT_OPTS when the respective command is run. -# HADOOP_{COMMAND}_OPTS etc HADOOP_JT_OPTS applies to JobTracker -# for e.g. HADOOP_CLIENT_OPTS applies to -# more than one command (fs, dfs, fsck, -# dfsadmin etc) -# -# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf. -# -# HADOOP_ROOT_LOGGER The root appender. Default is INFO,console -# - -bin=`dirname "$0"` -bin=`cd "$bin"; pwd` - -. "$bin"/hadoop-config.sh - -cygwin=false -case "`uname`" in -CYGWIN*) cygwin=true;; -esac - -# if no args specified, show usage -if [ $# = 0 ]; then - echo "Usage: hadoop [--config confdir] COMMAND" - echo "where COMMAND is one of:" - echo " namenode -format format the DFS filesystem" - echo " secondarynamenode run the DFS secondary namenode" - echo " namenode run the DFS namenode" - echo " datanode run a DFS datanode" - echo " dfsadmin run a DFS admin client" - echo " fsck run a DFS filesystem checking utility" - echo " fs run a generic filesystem user client" - echo " balancer run a cluster balancing utility" - echo " jobtracker run the MapReduce job Tracker node" - echo " pipes run a Pipes job" - echo " tasktracker run a MapReduce task Tracker node" - echo " job manipulate MapReduce jobs" - echo " queue get information regarding JobQueues" - echo " version print the version" - echo " jar run a jar file" - echo " distcp copy file or directories recursively" - echo " archive -archiveName NAME * create a hadoop archive" - echo " daemonlog get/set the log level for each daemon" - echo " or" - echo " CLASSNAME run the class named CLASSNAME" - echo "Most commands print help when invoked w/o parameters." - exit 1 -fi - -# get arguments -COMMAND=$1 -shift - -if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then - . "${HADOOP_CONF_DIR}/hadoop-env.sh" -fi - -# some Java parameters -if [ "$JAVA_HOME" != "" ]; then - #echo "run java in $JAVA_HOME" - JAVA_HOME=$JAVA_HOME -fi - -if [ "$JAVA_HOME" = "" ]; then - echo "Error: JAVA_HOME is not set." - exit 1 -fi - -JAVA=$JAVA_HOME/bin/java -JAVA_HEAP_MAX=-Xmx1000m - -# check envvars which might override default args -if [ "$HADOOP_HEAPSIZE" != "" ]; then - #echo "run with heapsize $HADOOP_HEAPSIZE" - JAVA_HEAP_MAX="-Xmx""$HADOOP_HEAPSIZE""m" - #echo $JAVA_HEAP_MAX -fi - -# CLASSPATH initially contains $HADOOP_CONF_DIR -CLASSPATH="${HADOOP_CONF_DIR}" -CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar - -# for developers, add Hadoop classes to CLASSPATH -if [ -d "$HADOOP_HOME/build/classes" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/classes -fi -if [ -d "$HADOOP_HOME/build/webapps" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build -fi -if [ -d "$HADOOP_HOME/build/test/classes" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/test/classes -fi -if [ -d "$HADOOP_HOME/build/tools" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/tools -fi - -# so that filenames w/ spaces are handled correctly in loops below -IFS= - -# for releases, add core hadoop jar & webapps to CLASSPATH -if [ -d "$HADOOP_HOME/webapps" ]; then - CLASSPATH=${CLASSPATH}:$HADOOP_HOME -fi -for f in $HADOOP_HOME/hadoop-*-core.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -# add libs to CLASSPATH -for f in $HADOOP_HOME/lib/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -for f in $HADOOP_HOME/lib/jetty-ext/*.jar; do - CLASSPATH=${CLASSPATH}:$f; -done - -for f in $HADOOP_HOME/hadoop-*-tools.jar; do - TOOL_PATH=${TOOL_PATH}:$f; -done -for f in $HADOOP_HOME/build/hadoop-*-tools.jar; do - TOOL_PATH=${TOOL_PATH}:$f; -done - -# add user-specified CLASSPATH last -if [ "$HADOOP_CLASSPATH" != "" ]; then - CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH} -fi - -# default log directory & file -if [ "$HADOOP_LOG_DIR" = "" ]; then - HADOOP_LOG_DIR="$HADOOP_HOME/logs" -fi -if [ "$HADOOP_LOGFILE" = "" ]; then - HADOOP_LOGFILE='hadoop.log' -fi - -# restore ordinary behaviour -unset IFS - -# figure out which class to run -if [ "$COMMAND" = "namenode" ] ; then - CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode' - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS" -elif [ "$COMMAND" = "secondarynamenode" ] ; then - CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode' - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS" -elif [ "$COMMAND" = "datanode" ] ; then - CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode' - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_DATANODE_OPTS" -elif [ "$COMMAND" = "fs" ] ; then - CLASS=org.apache.hadoop.fs.FsShell - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" -elif [ "$COMMAND" = "dfs" ] ; then - CLASS=org.apache.hadoop.fs.FsShell - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" -elif [ "$COMMAND" = "dfsadmin" ] ; then - CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" -elif [ "$COMMAND" = "fsck" ] ; then - CLASS=org.apache.hadoop.hdfs.tools.DFSck - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" -elif [ "$COMMAND" = "balancer" ] ; then - CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS" -elif [ "$COMMAND" = "jobtracker" ] ; then - CLASS=org.apache.hadoop.mapred.JobTracker - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOBTRACKER_OPTS" -elif [ "$COMMAND" = "tasktracker" ] ; then - CLASS=org.apache.hadoop.mapred.TaskTracker - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_TASKTRACKER_OPTS" -elif [ "$COMMAND" = "job" ] ; then - CLASS=org.apache.hadoop.mapred.JobClient -elif [ "$COMMAND" = "queue" ] ; then - CLASS=org.apache.hadoop.mapred.JobQueueClient -elif [ "$COMMAND" = "pipes" ] ; then - CLASS=org.apache.hadoop.mapred.pipes.Submitter - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" -elif [ "$COMMAND" = "version" ] ; then - CLASS=org.apache.hadoop.util.VersionInfo - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" -elif [ "$COMMAND" = "jar" ] ; then - CLASS=org.apache.hadoop.mapred.JobShell -elif [ "$COMMAND" = "distcp" ] ; then - CLASS=org.apache.hadoop.tools.DistCp - CLASSPATH=${CLASSPATH}:${TOOL_PATH} - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" -elif [ "$COMMAND" = "daemonlog" ] ; then - CLASS=org.apache.hadoop.log.LogLevel - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" -elif [ "$COMMAND" = "archive" ] ; then - CLASS=org.apache.hadoop.tools.HadoopArchives - CLASSPATH=${CLASSPATH}:${TOOL_PATH} - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" -elif [ "$COMMAND" = "sampler" ] ; then - CLASS=org.apache.hadoop.mapred.lib.InputSampler - HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" -else - CLASS=$COMMAND -fi - -# cygwin path translation -if $cygwin; then - CLASSPATH=`cygpath -p -w "$CLASSPATH"` - HADOOP_HOME=`cygpath -d "$HADOOP_HOME"` - HADOOP_LOG_DIR=`cygpath -d "$HADOOP_LOG_DIR"` - TOOL_PATH=`cygpath -p -w "$TOOL_PATH"` -fi -# setup 'java.library.path' for native-hadoop code if necessary -JAVA_LIBRARY_PATH='' -if [ -d "${HADOOP_HOME}/build/native" -o -d "${HADOOP_HOME}/lib/native" ]; then - JAVA_PLATFORM=`CLASSPATH=${CLASSPATH} ${JAVA} org.apache.hadoop.util.PlatformName | sed -e "s/ /_/g"` - - if [ -d "$HADOOP_HOME/build/native" ]; then - JAVA_LIBRARY_PATH=${HADOOP_HOME}/build/native/${JAVA_PLATFORM}/lib - fi - - if [ -d "${HADOOP_HOME}/lib/native" ]; then - if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then - JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_HOME}/lib/native/${JAVA_PLATFORM} - else - JAVA_LIBRARY_PATH=${HADOOP_HOME}/lib/native/${JAVA_PLATFORM} - fi - fi -fi - -# cygwin path translation -if $cygwin; then - JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"` -fi - -HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR" -HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE" -HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.home.dir=$HADOOP_HOME" -HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING" -HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}" -if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then - HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" -fi - -# run it -exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@" Index: hadoopcore/bin/start-balancer.sh =================================================================== --- hadoopcore/bin/start-balancer.sh (revision 723901) +++ hadoopcore/bin/start-balancer.sh (working copy) @@ -1,25 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -bin=`dirname "$0"` -bin=`cd "$bin"; pwd` - -. "$bin"/hadoop-config.sh - -# Start balancer daemon. - -"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start balancer $@ Index: hadoopcore/bin/start-all.sh =================================================================== --- hadoopcore/bin/start-all.sh (revision 723901) +++ hadoopcore/bin/start-all.sh (working copy) @@ -1,30 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Start all hadoop daemons. Run this on master node. - -bin=`dirname "$0"` -bin=`cd "$bin"; pwd` - -. "$bin"/hadoop-config.sh - -# start dfs daemons -"$bin"/start-dfs.sh --config $HADOOP_CONF_DIR - -# start mapred daemons -"$bin"/start-mapred.sh --config $HADOOP_CONF_DIR Index: hadoopcore/bin/start-mapred.sh =================================================================== --- hadoopcore/bin/start-mapred.sh (revision 723901) +++ hadoopcore/bin/start-mapred.sh (working copy) @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Start hadoop map reduce daemons. Run this on master node. - -bin=`dirname "$0"` -bin=`cd "$bin"; pwd` - -. "$bin"/hadoop-config.sh - -# start mapred daemons -# start jobtracker first to minimize connection errors at startup -"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start jobtracker -"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR start tasktracker Index: build-common.xml =================================================================== --- build-common.xml (revision 723901) +++ build-common.xml (working copy) @@ -18,7 +18,7 @@ --> - + @@ -26,15 +26,19 @@ - - + + + + + + @@ -68,6 +72,21 @@ + + + + + + + + + + + + + + + @@ -107,6 +126,7 @@ + @@ -121,7 +141,7 @@ - + --> + Index: conf/hive-default.xml =================================================================== --- conf/hive-default.xml (revision 723901) +++ conf/hive-default.xml (working copy) @@ -9,21 +9,6 @@ - - - hadoop.bin.path - ${user.dir}/hadoopcore/bin/hadoop - - Path to hadoop binary. Assumes that by default we are executing from hive - - - - hadoop.config.dir - ${user.dir}/hadoopcore/conf - - Path to hadoop configuration. Again assumes that by default we are executing from hive/ - - hive.exec.scratchdir Index: common/ivy.xml =================================================================== --- common/ivy.xml (revision 0) +++ common/ivy.xml (revision 0) @@ -0,0 +1,8 @@ + + + + + + + + Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java =================================================================== --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 723901) +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy) @@ -53,7 +53,7 @@ // hadoop stuff HADOOPBIN("hadoop.bin.path", System.getenv("HADOOP_HOME") + "/bin/hadoop"), - HADOOPCONF("hadoop.config.dir", System.getProperty("user.dir") + "/../../../conf"), + HADOOPCONF("hadoop.config.dir", System.getenv("HADOOP_HOME") + "/conf"), HADOOPFS("fs.default.name", "file:///"), HADOOPMAPFILENAME("map.input.file", null), HADOOPJT("mapred.job.tracker", "local"), Index: serde/ivy.xml =================================================================== --- serde/ivy.xml (revision 0) +++ serde/ivy.xml (revision 0) @@ -0,0 +1,8 @@ + + + + + + + + Index: metastore/ivy.xml =================================================================== --- metastore/ivy.xml (revision 0) +++ metastore/ivy.xml (revision 0) @@ -0,0 +1,8 @@ + + + + + + + + Index: ivy/get_ivy.xml =================================================================== --- ivy/get_ivy.xml (revision 0) +++ ivy/get_ivy.xml (revision 0) @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Index: ivy/ivysettings.xml =================================================================== --- ivy/ivysettings.xml (revision 0) +++ ivy/ivysettings.xml (revision 0) @@ -0,0 +1,28 @@ + + + + + + + + + + Index: cli/ivy.xml =================================================================== --- cli/ivy.xml (revision 0) +++ cli/ivy.xml (revision 0) @@ -0,0 +1,8 @@ + + + + + + + + Index: data/conf/hive-site.xml =================================================================== --- data/conf/hive-site.xml (revision 723901) +++ data/conf/hive-site.xml (working copy) @@ -9,20 +9,6 @@ - - - - hadoop.bin.path - ${user.dir}/../hadoopcore/bin/hadoop - Path to hadoop binary. Assumes that by default we are executing from hive/ - - - - hadoop.config.dir - ${user.dir}/../hadoopcore/conf - Path to hadoop configuration. Again assumes that by default we are executing from hive/ - - hive.exec.scratchdir Index: ql/ivy.xml =================================================================== --- ql/ivy.xml (revision 0) +++ ql/ivy.xml (revision 0) @@ -0,0 +1,8 @@ + + + + + + + + Index: ql/src/test/results/clientpositive/input3_limit.q.out =================================================================== --- ql/src/test/results/clientpositive/input3_limit.q.out (revision 723901) +++ ql/src/test/results/clientpositive/input3_limit.q.out (working copy) @@ -48,23 +48,23 @@ name: t2 +128 val_128 +150 val_150 +165 val_165 +193 val_193 +213 val_213 +224 val_224 238 val_238 -86 val_86 -311 val_311 -27 val_27 -165 val_165 -409 val_409 255 val_255 -278 val_278 -98 val_98 -484 val_484 265 val_265 -193 val_193 -401 val_401 -150 val_150 +27 val_27 273 val_273 -224 val_224 +278 val_278 +311 val_311 369 val_369 +401 val_401 +409 val_409 +484 val_484 66 val_66 -128 val_128 -213 val_213 +86 val_86 +98 val_98 Index: ql/src/test/results/clientpositive/mapreduce2.q.out =================================================================== --- ql/src/test/results/clientpositive/mapreduce2.q.out (revision 723901) +++ ql/src/test/results/clientpositive/mapreduce2.q.out (working copy) @@ -63,503 +63,503 @@ name: dest1 -238 23 8 val_238 -86 8 6 val_86 -311 31 1 val_311 -27 2 7 val_27 -165 16 5 val_165 -409 40 9 val_409 -255 25 5 val_255 -278 27 8 val_278 -98 9 8 val_98 -484 48 4 val_484 -265 26 5 val_265 -193 19 3 val_193 -401 40 1 val_401 -150 15 0 val_150 -273 27 3 val_273 -224 22 4 val_224 -369 36 9 val_369 -66 6 6 val_66 +0 0 0 val_0 +0 0 0 val_0 +0 0 0 val_0 +10 1 0 val_10 +100 10 0 val_100 +100 10 0 val_100 +103 10 3 val_103 +103 10 3 val_103 +104 10 4 val_104 +104 10 4 val_104 +105 10 5 val_105 +11 1 1 val_11 +111 11 1 val_111 +113 11 3 val_113 +113 11 3 val_113 +114 11 4 val_114 +116 11 6 val_116 +118 11 8 val_118 +118 11 8 val_118 +119 11 9 val_119 +119 11 9 val_119 +119 11 9 val_119 +12 1 2 val_12 +12 1 2 val_12 +120 12 0 val_120 +120 12 0 val_120 +125 12 5 val_125 +125 12 5 val_125 +126 12 6 val_126 128 12 8 val_128 -213 21 3 val_213 +128 12 8 val_128 +128 12 8 val_128 +129 12 9 val_129 +129 12 9 val_129 +131 13 1 val_131 +133 13 3 val_133 +134 13 4 val_134 +134 13 4 val_134 +136 13 6 val_136 +137 13 7 val_137 +137 13 7 val_137 +138 13 8 val_138 +138 13 8 val_138 +138 13 8 val_138 +138 13 8 val_138 +143 14 3 val_143 +145 14 5 val_145 146 14 6 val_146 -406 40 6 val_406 -429 42 9 val_429 -374 37 4 val_374 +146 14 6 val_146 +149 14 9 val_149 +149 14 9 val_149 +15 1 5 val_15 +15 1 5 val_15 +150 15 0 val_150 152 15 2 val_152 -469 46 9 val_469 -145 14 5 val_145 -495 49 5 val_495 -37 3 7 val_37 -327 32 7 val_327 -281 28 1 val_281 -277 27 7 val_277 -209 20 9 val_209 -15 1 5 val_15 -82 8 2 val_82 -403 40 3 val_403 +152 15 2 val_152 +153 15 3 val_153 +155 15 5 val_155 +156 15 6 val_156 +157 15 7 val_157 +158 15 8 val_158 +160 16 0 val_160 +162 16 2 val_162 +163 16 3 val_163 +164 16 4 val_164 +164 16 4 val_164 +165 16 5 val_165 +165 16 5 val_165 166 16 6 val_166 -417 41 7 val_417 -430 43 0 val_430 -252 25 2 val_252 -292 29 2 val_292 -219 21 9 val_219 -287 28 7 val_287 -153 15 3 val_153 -193 19 3 val_193 -338 33 8 val_338 -446 44 6 val_446 -459 45 9 val_459 -394 39 4 val_394 -237 23 7 val_237 -482 48 2 val_482 +167 16 7 val_167 +167 16 7 val_167 +167 16 7 val_167 +168 16 8 val_168 +169 16 9 val_169 +169 16 9 val_169 +169 16 9 val_169 +169 16 9 val_169 +17 1 7 val_17 +170 17 0 val_170 +172 17 2 val_172 +172 17 2 val_172 174 17 4 val_174 -413 41 3 val_413 -494 49 4 val_494 -207 20 7 val_207 -199 19 9 val_199 -466 46 6 val_466 -208 20 8 val_208 174 17 4 val_174 -399 39 9 val_399 -396 39 6 val_396 -247 24 7 val_247 -417 41 7 val_417 -489 48 9 val_489 -162 16 2 val_162 -377 37 7 val_377 -397 39 7 val_397 -309 30 9 val_309 -365 36 5 val_365 -266 26 6 val_266 -439 43 9 val_439 -342 34 2 val_342 -367 36 7 val_367 -325 32 5 val_325 -167 16 7 val_167 +175 17 5 val_175 +175 17 5 val_175 +176 17 6 val_176 +176 17 6 val_176 +177 17 7 val_177 +178 17 8 val_178 +179 17 9 val_179 +179 17 9 val_179 +18 1 8 val_18 +18 1 8 val_18 +180 18 0 val_180 +181 18 1 val_181 +183 18 3 val_183 +186 18 6 val_186 +187 18 7 val_187 +187 18 7 val_187 +187 18 7 val_187 +189 18 9 val_189 +19 1 9 val_19 +190 19 0 val_190 +191 19 1 val_191 +191 19 1 val_191 +192 19 2 val_192 +193 19 3 val_193 +193 19 3 val_193 +193 19 3 val_193 +194 19 4 val_194 195 19 5 val_195 -475 47 5 val_475 -17 1 7 val_17 -113 11 3 val_113 -155 15 5 val_155 +195 19 5 val_195 +196 19 6 val_196 +197 19 7 val_197 +197 19 7 val_197 +199 19 9 val_199 +199 19 9 val_199 +199 19 9 val_199 +2 0 2 val_2 +20 2 0 val_20 +200 20 0 val_200 +200 20 0 val_200 +201 20 1 val_201 +202 20 2 val_202 203 20 3 val_203 -339 33 9 val_339 -0 0 0 val_0 -455 45 5 val_455 -128 12 8 val_128 -311 31 1 val_311 -316 31 6 val_316 -57 5 7 val_57 -302 30 2 val_302 +203 20 3 val_203 205 20 5 val_205 -149 14 9 val_149 -438 43 8 val_438 -345 34 5 val_345 -129 12 9 val_129 -170 17 0 val_170 -20 2 0 val_20 -489 48 9 val_489 -157 15 7 val_157 -378 37 8 val_378 -221 22 1 val_221 -92 9 2 val_92 -111 11 1 val_111 -47 4 7 val_47 -72 7 2 val_72 -4 0 4 val_4 -280 28 0 val_280 -35 3 5 val_35 -427 42 7 val_427 -277 27 7 val_277 +205 20 5 val_205 +207 20 7 val_207 +207 20 7 val_207 208 20 8 val_208 -356 35 6 val_356 -399 39 9 val_399 -169 16 9 val_169 -382 38 2 val_382 -498 49 8 val_498 -125 12 5 val_125 -386 38 6 val_386 -437 43 7 val_437 -469 46 9 val_469 -192 19 2 val_192 -286 28 6 val_286 -187 18 7 val_187 -176 17 6 val_176 -54 5 4 val_54 -459 45 9 val_459 -51 5 1 val_51 -138 13 8 val_138 -103 10 3 val_103 -239 23 9 val_239 +208 20 8 val_208 +208 20 8 val_208 +209 20 9 val_209 +209 20 9 val_209 213 21 3 val_213 +213 21 3 val_213 +214 21 4 val_214 216 21 6 val_216 -430 43 0 val_430 -278 27 8 val_278 -176 17 6 val_176 -289 28 9 val_289 +216 21 6 val_216 +217 21 7 val_217 +217 21 7 val_217 +218 21 8 val_218 +219 21 9 val_219 +219 21 9 val_219 221 22 1 val_221 -65 6 5 val_65 -318 31 8 val_318 -332 33 2 val_332 -311 31 1 val_311 -275 27 5 val_275 -137 13 7 val_137 -241 24 1 val_241 -83 8 3 val_83 -333 33 3 val_333 -180 18 0 val_180 -284 28 4 val_284 -12 1 2 val_12 +221 22 1 val_221 +222 22 2 val_222 +223 22 3 val_223 +223 22 3 val_223 +224 22 4 val_224 +224 22 4 val_224 +226 22 6 val_226 +228 22 8 val_228 +229 22 9 val_229 +229 22 9 val_229 230 23 0 val_230 -181 18 1 val_181 -67 6 7 val_67 -260 26 0 val_260 -404 40 4 val_404 -384 38 4 val_384 -489 48 9 val_489 -353 35 3 val_353 -373 37 3 val_373 -272 27 2 val_272 -138 13 8 val_138 -217 21 7 val_217 -84 8 4 val_84 -348 34 8 val_348 -466 46 6 val_466 -58 5 8 val_58 -8 0 8 val_8 -411 41 1 val_411 230 23 0 val_230 -208 20 8 val_208 -348 34 8 val_348 +230 23 0 val_230 +230 23 0 val_230 +230 23 0 val_230 +233 23 3 val_233 +233 23 3 val_233 +235 23 5 val_235 +237 23 7 val_237 +237 23 7 val_237 +238 23 8 val_238 +238 23 8 val_238 +239 23 9 val_239 +239 23 9 val_239 24 2 4 val_24 -463 46 3 val_463 -431 43 1 val_431 -179 17 9 val_179 -172 17 2 val_172 -42 4 2 val_42 -129 12 9 val_129 -158 15 8 val_158 -119 11 9 val_119 -496 49 6 val_496 -0 0 0 val_0 -322 32 2 val_322 -197 19 7 val_197 -468 46 8 val_468 -393 39 3 val_393 -454 45 4 val_454 -100 10 0 val_100 -298 29 8 val_298 -199 19 9 val_199 -191 19 1 val_191 -418 41 8 val_418 -96 9 6 val_96 +24 2 4 val_24 +241 24 1 val_241 +242 24 2 val_242 +242 24 2 val_242 +244 24 4 val_244 +247 24 7 val_247 +248 24 8 val_248 +249 24 9 val_249 +252 25 2 val_252 +255 25 5 val_255 +255 25 5 val_255 +256 25 6 val_256 +256 25 6 val_256 +257 25 7 val_257 +258 25 8 val_258 26 2 6 val_26 -165 16 5 val_165 -327 32 7 val_327 -230 23 0 val_230 -205 20 5 val_205 -120 12 0 val_120 -131 13 1 val_131 -51 5 1 val_51 -404 40 4 val_404 -43 4 3 val_43 -436 43 6 val_436 -156 15 6 val_156 -469 46 9 val_469 -468 46 8 val_468 -308 30 8 val_308 -95 9 5 val_95 -196 19 6 val_196 -288 28 8 val_288 -481 48 1 val_481 -457 45 7 val_457 -98 9 8 val_98 +26 2 6 val_26 +260 26 0 val_260 +262 26 2 val_262 +263 26 3 val_263 +265 26 5 val_265 +265 26 5 val_265 +266 26 6 val_266 +27 2 7 val_27 +272 27 2 val_272 +272 27 2 val_272 +273 27 3 val_273 +273 27 3 val_273 +273 27 3 val_273 +274 27 4 val_274 +275 27 5 val_275 +277 27 7 val_277 +277 27 7 val_277 +277 27 7 val_277 +277 27 7 val_277 +278 27 8 val_278 +278 27 8 val_278 +28 2 8 val_28 +280 28 0 val_280 +280 28 0 val_280 +281 28 1 val_281 +281 28 1 val_281 282 28 2 val_282 -197 19 7 val_197 -187 18 7 val_187 -318 31 8 val_318 -318 31 8 val_318 -409 40 9 val_409 -470 47 0 val_470 -137 13 7 val_137 -369 36 9 val_369 -316 31 6 val_316 -169 16 9 val_169 -413 41 3 val_413 -85 8 5 val_85 -77 7 7 val_77 -0 0 0 val_0 -490 49 0 val_490 -87 8 7 val_87 -364 36 4 val_364 -179 17 9 val_179 -118 11 8 val_118 -134 13 4 val_134 -395 39 5 val_395 282 28 2 val_282 -138 13 8 val_138 -238 23 8 val_238 -419 41 9 val_419 -15 1 5 val_15 -118 11 8 val_118 -72 7 2 val_72 -90 9 0 val_90 +283 28 3 val_283 +284 28 4 val_284 +285 28 5 val_285 +286 28 6 val_286 +287 28 7 val_287 +288 28 8 val_288 +288 28 8 val_288 +289 28 9 val_289 +291 29 1 val_291 +292 29 2 val_292 +296 29 6 val_296 +298 29 8 val_298 +298 29 8 val_298 +298 29 8 val_298 +30 3 0 val_30 +302 30 2 val_302 +305 30 5 val_305 +306 30 6 val_306 307 30 7 val_307 -19 1 9 val_19 -435 43 5 val_435 -10 1 0 val_10 -277 27 7 val_277 -273 27 3 val_273 -306 30 6 val_306 -224 22 4 val_224 +307 30 7 val_307 +308 30 8 val_308 309 30 9 val_309 -389 38 9 val_389 +309 30 9 val_309 +310 31 0 val_310 +311 31 1 val_311 +311 31 1 val_311 +311 31 1 val_311 +315 31 5 val_315 +316 31 6 val_316 +316 31 6 val_316 +316 31 6 val_316 +317 31 7 val_317 +317 31 7 val_317 +318 31 8 val_318 +318 31 8 val_318 +318 31 8 val_318 +321 32 1 val_321 +321 32 1 val_321 +322 32 2 val_322 +322 32 2 val_322 +323 32 3 val_323 +325 32 5 val_325 +325 32 5 val_325 327 32 7 val_327 -242 24 2 val_242 -369 36 9 val_369 -392 39 2 val_392 -272 27 2 val_272 +327 32 7 val_327 +327 32 7 val_327 +33 3 3 val_33 331 33 1 val_331 -401 40 1 val_401 -242 24 2 val_242 -452 45 2 val_452 -177 17 7 val_177 -226 22 6 val_226 -5 0 5 val_5 -497 49 7 val_497 -402 40 2 val_402 -396 39 6 val_396 -317 31 7 val_317 -395 39 5 val_395 -58 5 8 val_58 -35 3 5 val_35 +331 33 1 val_331 +332 33 2 val_332 +333 33 3 val_333 +333 33 3 val_333 +335 33 5 val_335 336 33 6 val_336 -95 9 5 val_95 -11 1 1 val_11 -168 16 8 val_168 +338 33 8 val_338 +339 33 9 val_339 34 3 4 val_34 -229 22 9 val_229 -233 23 3 val_233 -143 14 3 val_143 -472 47 2 val_472 -322 32 2 val_322 -498 49 8 val_498 -160 16 0 val_160 -195 19 5 val_195 -42 4 2 val_42 -321 32 1 val_321 -430 43 0 val_430 -119 11 9 val_119 -489 48 9 val_489 -458 45 8 val_458 -78 7 8 val_78 -76 7 6 val_76 -41 4 1 val_41 -223 22 3 val_223 -492 49 2 val_492 -149 14 9 val_149 -449 44 9 val_449 -218 21 8 val_218 -228 22 8 val_228 -138 13 8 val_138 -453 45 3 val_453 -30 3 0 val_30 -209 20 9 val_209 -64 6 4 val_64 -468 46 8 val_468 -76 7 6 val_76 -74 7 4 val_74 +341 34 1 val_341 342 34 2 val_342 -69 6 9 val_69 -230 23 0 val_230 -33 3 3 val_33 -368 36 8 val_368 -103 10 3 val_103 -296 29 6 val_296 -113 11 3 val_113 -216 21 6 val_216 -367 36 7 val_367 +342 34 2 val_342 344 34 4 val_344 -167 16 7 val_167 -274 27 4 val_274 -219 21 9 val_219 -239 23 9 val_239 -485 48 5 val_485 -116 11 6 val_116 -223 22 3 val_223 -256 25 6 val_256 -263 26 3 val_263 -70 7 0 val_70 -487 48 7 val_487 -480 48 0 val_480 -401 40 1 val_401 -288 28 8 val_288 -191 19 1 val_191 -5 0 5 val_5 -244 24 4 val_244 -438 43 8 val_438 -128 12 8 val_128 -467 46 7 val_467 -432 43 2 val_432 -202 20 2 val_202 -316 31 6 val_316 -229 22 9 val_229 -469 46 9 val_469 -463 46 3 val_463 -280 28 0 val_280 -2 0 2 val_2 +344 34 4 val_344 +345 34 5 val_345 +348 34 8 val_348 +348 34 8 val_348 +348 34 8 val_348 +348 34 8 val_348 +348 34 8 val_348 35 3 5 val_35 -283 28 3 val_283 -331 33 1 val_331 -235 23 5 val_235 -80 8 0 val_80 -44 4 4 val_44 -193 19 3 val_193 -321 32 1 val_321 -335 33 5 val_335 -104 10 4 val_104 -466 46 6 val_466 +35 3 5 val_35 +35 3 5 val_35 +351 35 1 val_351 +353 35 3 val_353 +353 35 3 val_353 +356 35 6 val_356 +360 36 0 val_360 +362 36 2 val_362 +364 36 4 val_364 +365 36 5 val_365 366 36 6 val_366 -175 17 5 val_175 +367 36 7 val_367 +367 36 7 val_367 +368 36 8 val_368 +369 36 9 val_369 +369 36 9 val_369 +369 36 9 val_369 +37 3 7 val_37 +37 3 7 val_37 +373 37 3 val_373 +374 37 4 val_374 +375 37 5 val_375 +377 37 7 val_377 +378 37 8 val_378 +379 37 9 val_379 +382 38 2 val_382 +382 38 2 val_382 +384 38 4 val_384 +384 38 4 val_384 +384 38 4 val_384 +386 38 6 val_386 +389 38 9 val_389 +392 39 2 val_392 +393 39 3 val_393 +394 39 4 val_394 +395 39 5 val_395 +395 39 5 val_395 +396 39 6 val_396 +396 39 6 val_396 +396 39 6 val_396 +397 39 7 val_397 +397 39 7 val_397 +399 39 9 val_399 +399 39 9 val_399 +4 0 4 val_4 +400 40 0 val_400 +401 40 1 val_401 +401 40 1 val_401 +401 40 1 val_401 +401 40 1 val_401 +401 40 1 val_401 +402 40 2 val_402 403 40 3 val_403 -483 48 3 val_483 -53 5 3 val_53 -105 10 5 val_105 -257 25 7 val_257 +403 40 3 val_403 +403 40 3 val_403 +404 40 4 val_404 +404 40 4 val_404 406 40 6 val_406 +406 40 6 val_406 +406 40 6 val_406 +406 40 6 val_406 +407 40 7 val_407 409 40 9 val_409 -190 19 0 val_190 -406 40 6 val_406 -401 40 1 val_401 -114 11 4 val_114 -258 25 8 val_258 -90 9 0 val_90 -203 20 3 val_203 -262 26 2 val_262 -348 34 8 val_348 +409 40 9 val_409 +409 40 9 val_409 +41 4 1 val_41 +411 41 1 val_411 +413 41 3 val_413 +413 41 3 val_413 +414 41 4 val_414 +414 41 4 val_414 +417 41 7 val_417 +417 41 7 val_417 +417 41 7 val_417 +418 41 8 val_418 +419 41 9 val_419 +42 4 2 val_42 +42 4 2 val_42 +421 42 1 val_421 424 42 4 val_424 -12 1 2 val_12 -396 39 6 val_396 -201 20 1 val_201 -217 21 7 val_217 -164 16 4 val_164 +424 42 4 val_424 +427 42 7 val_427 +429 42 9 val_429 +429 42 9 val_429 +43 4 3 val_43 +430 43 0 val_430 +430 43 0 val_430 +430 43 0 val_430 431 43 1 val_431 -454 45 4 val_454 -478 47 8 val_478 -298 29 8 val_298 -125 12 5 val_125 431 43 1 val_431 -164 16 4 val_164 -424 42 4 val_424 -187 18 7 val_187 -382 38 2 val_382 -5 0 5 val_5 -70 7 0 val_70 -397 39 7 val_397 -480 48 0 val_480 -291 29 1 val_291 -24 2 4 val_24 -351 35 1 val_351 -255 25 5 val_255 -104 10 4 val_104 -70 7 0 val_70 -163 16 3 val_163 +431 43 1 val_431 +432 43 2 val_432 +435 43 5 val_435 +436 43 6 val_436 +437 43 7 val_437 438 43 8 val_438 -119 11 9 val_119 -414 41 4 val_414 -200 20 0 val_200 -491 49 1 val_491 -237 23 7 val_237 +438 43 8 val_438 +438 43 8 val_438 439 43 9 val_439 -360 36 0 val_360 -248 24 8 val_248 -479 47 9 val_479 -305 30 5 val_305 -417 41 7 val_417 -199 19 9 val_199 +439 43 9 val_439 +44 4 4 val_44 +443 44 3 val_443 444 44 4 val_444 -120 12 0 val_120 -429 42 9 val_429 -169 16 9 val_169 -443 44 3 val_443 -323 32 3 val_323 -325 32 5 val_325 -277 27 7 val_277 -230 23 0 val_230 -478 47 8 val_478 -178 17 8 val_178 -468 46 8 val_468 -310 31 0 val_310 -317 31 7 val_317 -333 33 3 val_333 -493 49 3 val_493 +446 44 6 val_446 +448 44 8 val_448 +449 44 9 val_449 +452 45 2 val_452 +453 45 3 val_453 +454 45 4 val_454 +454 45 4 val_454 +454 45 4 val_454 +455 45 5 val_455 +457 45 7 val_457 +458 45 8 val_458 +458 45 8 val_458 +459 45 9 val_459 +459 45 9 val_459 460 46 0 val_460 -207 20 7 val_207 -249 24 9 val_249 -265 26 5 val_265 -480 48 0 val_480 -83 8 3 val_83 -136 13 6 val_136 -353 35 3 val_353 -172 17 2 val_172 -214 21 4 val_214 462 46 2 val_462 -233 23 3 val_233 -406 40 6 val_406 -133 13 3 val_133 -175 17 5 val_175 -189 18 9 val_189 -454 45 4 val_454 -375 37 5 val_375 -401 40 1 val_401 -421 42 1 val_421 -407 40 7 val_407 -384 38 4 val_384 -256 25 6 val_256 -26 2 6 val_26 -134 13 4 val_134 -67 6 7 val_67 -384 38 4 val_384 -379 37 9 val_379 -18 1 8 val_18 462 46 2 val_462 +463 46 3 val_463 +463 46 3 val_463 +466 46 6 val_466 +466 46 6 val_466 +466 46 6 val_466 +467 46 7 val_467 +468 46 8 val_468 +468 46 8 val_468 +468 46 8 val_468 +468 46 8 val_468 +469 46 9 val_469 +469 46 9 val_469 +469 46 9 val_469 +469 46 9 val_469 +469 46 9 val_469 +47 4 7 val_47 +470 47 0 val_470 +472 47 2 val_472 +475 47 5 val_475 +477 47 7 val_477 +478 47 8 val_478 +478 47 8 val_478 +479 47 9 val_479 +480 48 0 val_480 +480 48 0 val_480 +480 48 0 val_480 +481 48 1 val_481 +482 48 2 val_482 +483 48 3 val_483 +484 48 4 val_484 +485 48 5 val_485 +487 48 7 val_487 +489 48 9 val_489 +489 48 9 val_489 +489 48 9 val_489 +489 48 9 val_489 +490 49 0 val_490 +491 49 1 val_491 492 49 2 val_492 -100 10 0 val_100 -298 29 8 val_298 -9 0 9 val_9 -341 34 1 val_341 +492 49 2 val_492 +493 49 3 val_493 +494 49 4 val_494 +495 49 5 val_495 +496 49 6 val_496 +497 49 7 val_497 498 49 8 val_498 -146 14 6 val_146 -458 45 8 val_458 -362 36 2 val_362 -186 18 6 val_186 -285 28 5 val_285 -348 34 8 val_348 -167 16 7 val_167 -18 1 8 val_18 -273 27 3 val_273 -183 18 3 val_183 -281 28 1 val_281 -344 34 4 val_344 -97 9 7 val_97 -469 46 9 val_469 -315 31 5 val_315 +498 49 8 val_498 +498 49 8 val_498 +5 0 5 val_5 +5 0 5 val_5 +5 0 5 val_5 +51 5 1 val_51 +51 5 1 val_51 +53 5 3 val_53 +54 5 4 val_54 +57 5 7 val_57 +58 5 8 val_58 +58 5 8 val_58 +64 6 4 val_64 +65 6 5 val_65 +66 6 6 val_66 +67 6 7 val_67 +67 6 7 val_67 +69 6 9 val_69 +70 7 0 val_70 +70 7 0 val_70 +70 7 0 val_70 +72 7 2 val_72 +72 7 2 val_72 +74 7 4 val_74 +76 7 6 val_76 +76 7 6 val_76 +77 7 7 val_77 +78 7 8 val_78 +8 0 8 val_8 +80 8 0 val_80 +82 8 2 val_82 +83 8 3 val_83 +83 8 3 val_83 84 8 4 val_84 -28 2 8 val_28 -37 3 7 val_37 -448 44 8 val_448 -152 15 2 val_152 -348 34 8 val_348 -307 30 7 val_307 -194 19 4 val_194 -414 41 4 val_414 -477 47 7 val_477 -222 22 2 val_222 -126 12 6 val_126 +84 8 4 val_84 +85 8 5 val_85 +86 8 6 val_86 +87 8 7 val_87 +9 0 9 val_9 90 9 0 val_90 -169 16 9 val_169 -403 40 3 val_403 -400 40 0 val_400 -200 20 0 val_200 +90 9 0 val_90 +90 9 0 val_90 +92 9 2 val_92 +95 9 5 val_95 +95 9 5 val_95 +96 9 6 val_96 97 9 7 val_97 +97 9 7 val_97 +98 9 8 val_98 +98 9 8 val_98 Index: ql/src/test/queries/clientpositive/union.q =================================================================== --- ql/src/test/queries/clientpositive/union.q (revision 723901) +++ ql/src/test/queries/clientpositive/union.q (working copy) @@ -13,4 +13,4 @@ ) unioninput INSERT OVERWRITE DIRECTORY '../build/ql/test/data/warehouse/union.out' SELECT unioninput.*; -!../hadoopcore/bin/hadoop dfs -cat ../build/ql/test/data/warehouse/union.out/*; +dfs -cat ../build/ql/test/data/warehouse/union.out/*; Index: ql/src/test/queries/clientpositive/input3_limit.q =================================================================== --- ql/src/test/queries/clientpositive/input3_limit.q (revision 723901) +++ ql/src/test/queries/clientpositive/input3_limit.q (working copy) @@ -12,8 +12,7 @@ INSERT OVERWRITE TABLE T2 SELECT a.key, a.value from T1 a LIMIT 20; -SELECT * FROM T2; +SELECT * FROM (SELECT * FROM T2 DISTRIBUTE BY key SORT BY key, value) T; - DROP TABLE T1; DROP TABLE T2; Index: ql/src/test/queries/clientpositive/mapreduce2.q =================================================================== --- ql/src/test/queries/clientpositive/mapreduce2.q (revision 723901) +++ ql/src/test/queries/clientpositive/mapreduce2.q (working copy) @@ -14,4 +14,4 @@ USING '/bin/cat' AS (tkey, ten, one, tvalue) DISTRIBUTE BY tvalue, tkey; -SELECT dest1.* FROM dest1; +SELECT * FROM (SELECT dest1.* FROM dest1 DISTRIBUTE BY key SORT BY key, ten, one, value) T; Index: ql/src/test/queries/clientpositive/input13.q =================================================================== --- ql/src/test/queries/clientpositive/input13.q (revision 723901) +++ ql/src/test/queries/clientpositive/input13.q (working copy) @@ -18,4 +18,4 @@ SELECT dest1.* FROM dest1; SELECT dest2.* FROM dest2; SELECT dest3.* FROM dest3; -!../hadoopcore/bin/hadoop dfs -cat ../build/ql/test/data/warehouse/dest4.out/*; +dfs -cat ../build/ql/test/data/warehouse/dest4.out/*; Index: ql/src/test/queries/clientpositive/subq.q =================================================================== --- ql/src/test/queries/clientpositive/subq.q (revision 723901) +++ ql/src/test/queries/clientpositive/subq.q (working copy) @@ -9,5 +9,5 @@ ) unioninput INSERT OVERWRITE DIRECTORY '../build/ql/test/data/warehouse/union.out' SELECT unioninput.*; -!../hadoopcore/bin/hadoop dfs -cat ../build/ql/test/data/warehouse/union.out/*; +dfs -cat ../build/ql/test/data/warehouse/union.out/*; Index: ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java (revision 723901) +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java (working copy) @@ -219,6 +219,30 @@ return result.toArray(new HiveInputSplit[result.size()]); } +//[exclude_0_19] + public void validateInput(JobConf job) throws IOException { + + init(job); + + Path[] dirs = FileInputFormat.getInputPaths(job); + if (dirs.length == 0) { + throw new IOException("No input paths specified in job"); + } + JobConf newjob = new JobConf(job); + + // for each dir, get the InputFormat, and do validateInput. + for(Path dir: dirs) { + tableDesc table = getTableDescFromPath(dir); + // create a new InputFormat instance if this is the first time to see this class + InputFormat inputFormat = getInputFormatFromCache(table.getInputFileFormatClass()); + + FileInputFormat.setInputPaths(newjob, dir); + newjob.setInputFormat(inputFormat.getClass()); + inputFormat.validateInput(newjob); + } + } +//[endexclude_0_19] + private tableDesc getTableDescFromPath(Path dir) throws IOException { partitionDesc partDesc = pathToPartitionInfo.get(dir.toString()); Index: ql/build.xml =================================================================== --- ql/build.xml (revision 723901) +++ ql/build.xml (working copy) @@ -94,12 +94,36 @@ + + + - + + + + + + + + + + + + + + + + + + + + +