diff --git a/bin/setup-hadoop.bat b/bin/setup-hadoop.bat index c4c73b3..a11ef8c 100644 --- a/bin/setup-hadoop.bat +++ b/bin/setup-hadoop.bat @@ -23,6 +23,6 @@ if "%OS%" == "Windows_NT" setlocal -set MAIN_CLASS=org.apache.ignite.hadoop.GridHadoopSetup +set MAIN_CLASS=org.apache.ignite.internal.processors.hadoop.HadoopSetup call "%~dp0\ignite.bat" %* diff --git a/bin/setup-hadoop.sh b/bin/setup-hadoop.sh index 8969dfa..d66353f 100755 --- a/bin/setup-hadoop.sh +++ b/bin/setup-hadoop.sh @@ -54,7 +54,7 @@ setIgniteHome # # Set utility environment. # -export MAIN_CLASS=org.apache.ignite.internal.processors.hadoop.GridHadoopSetup +export MAIN_CLASS=org.apache.ignite.internal.processors.hadoop.HadoopSetup # # Start utility. diff --git a/config/hadoop/default-config.xml b/config/hadoop/default-config.xml index a264749..65a281e 100644 --- a/config/hadoop/default-config.xml +++ b/config/hadoop/default-config.xml @@ -52,7 +52,7 @@ - + @@ -94,7 +94,7 @@ Apache Hadoop Accelerator configuration. --> - + @@ -112,9 +112,9 @@ - + - + @@ -135,7 +135,7 @@ - + diff --git a/docs/core-site.ignite.xml b/docs/core-site.ignite.xml index ed11a79..8b8e634 100644 --- a/docs/core-site.ignite.xml +++ b/docs/core-site.ignite.xml @@ -48,7 +48,7 @@ --> fs.igfs.impl - org.apache.ignite.igfs.hadoop.v1.IgfsHadoopFileSystem + org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem fs.AbstractFileSystem.igfs.impl - org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem + org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem diff --git a/examples/config/filesystem/core-site.xml b/examples/config/filesystem/core-site.xml index 7c6cfaa..b6f0291 100644 --- a/examples/config/filesystem/core-site.xml +++ b/examples/config/filesystem/core-site.xml @@ -31,12 +31,12 @@ fs.igfs.impl - org.apache.ignite.igfs.hadoop.v1.IgfsHadoopFileSystem + org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem fs.AbstractFileSystem.igfs.impl - org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem + org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem diff --git a/examples/config/filesystem/example-igfs.xml b/examples/config/filesystem/example-igfs.xml index 30cf51e..d8ccd34 100644 --- a/examples/config/filesystem/example-igfs.xml +++ b/examples/config/filesystem/example-igfs.xml @@ -63,9 +63,9 @@ - + - + diff --git a/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsExample.java b/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsExample.java index 56f3fa4..8bfd38d 100644 --- a/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsExample.java @@ -25,7 +25,7 @@ import java.io.*; import java.util.*; /** - * Example that shows usage of {@link org.apache.ignite.IgniteFs} API. It starts a node with {@code IgniteFs} + * Example that shows usage of {@link org.apache.ignite.IgniteFileSystem} API. It starts a node with {@code IgniteFs} * configured and performs several file system operations (create, write, append, read and delete * files, create, list and delete directories). *

@@ -50,7 +50,7 @@ public final class IgfsExample { try { // Get an instance of Ignite File System. - IgniteFs fs = ignite.fileSystem("igfs"); + IgniteFileSystem fs = ignite.fileSystem("igfs"); // Working directory path. IgfsPath workDir = new IgfsPath("/examples/fs"); @@ -107,7 +107,7 @@ public final class IgfsExample { * @param path File or directory path. * @throws IgniteException In case of error. */ - private static void delete(IgniteFs fs, IgfsPath path) throws IgniteException { + private static void delete(IgniteFileSystem fs, IgfsPath path) throws IgniteException { assert fs != null; assert path != null; @@ -139,7 +139,7 @@ public final class IgfsExample { * @param path Directory path. * @throws IgniteException In case of error. */ - private static void mkdirs(IgniteFs fs, IgfsPath path) throws IgniteException { + private static void mkdirs(IgniteFileSystem fs, IgfsPath path) throws IgniteException { assert fs != null; assert path != null; @@ -166,7 +166,7 @@ public final class IgfsExample { * @throws IgniteException If file can't be created. * @throws IOException If data can't be written. */ - private static void create(IgniteFs fs, IgfsPath path, @Nullable byte[] data) + private static void create(IgniteFileSystem fs, IgfsPath path, @Nullable byte[] data) throws IgniteException, IOException { assert fs != null; assert path != null; @@ -195,7 +195,7 @@ public final class IgfsExample { * @throws IgniteException If file can't be created. * @throws IOException If data can't be written. */ - private static void append(IgniteFs fs, IgfsPath path, byte[] data) throws IgniteException, IOException { + private static void append(IgniteFileSystem fs, IgfsPath path, byte[] data) throws IgniteException, IOException { assert fs != null; assert path != null; assert data != null; @@ -220,7 +220,7 @@ public final class IgfsExample { * @throws IgniteException If file can't be opened. * @throws IOException If data can't be read. */ - private static void read(IgniteFs fs, IgfsPath path) throws IgniteException, IOException { + private static void read(IgniteFileSystem fs, IgfsPath path) throws IgniteException, IOException { assert fs != null; assert path != null; assert fs.info(path).isFile(); @@ -242,7 +242,7 @@ public final class IgfsExample { * @param path Directory path. * @throws IgniteException In case of error. */ - private static void list(IgniteFs fs, IgfsPath path) throws IgniteException { + private static void list(IgniteFileSystem fs, IgfsPath path) throws IgniteException { assert fs != null; assert path != null; assert fs.info(path).isDirectory(); @@ -271,7 +271,7 @@ public final class IgfsExample { * @param path File or directory path. * @throws IgniteException In case of error. */ - private static void printInfo(IgniteFs fs, IgfsPath path) throws IgniteException { + private static void printInfo(IgniteFileSystem fs, IgfsPath path) throws IgniteException { System.out.println(); System.out.println("Information for " + path + ": " + fs.info(path)); } diff --git a/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsMapReduceExample.java b/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsMapReduceExample.java index 22a66a3..ed0abe4 100644 --- a/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsMapReduceExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/igfs/IgfsMapReduceExample.java @@ -61,7 +61,7 @@ public class IgfsMapReduceExample { String regexStr = args[1]; // Get an instance of Ignite File System. - IgniteFs fs = ignite.fileSystem("igfs"); + IgniteFileSystem fs = ignite.fileSystem("igfs"); // Working directory path. IgfsPath workDir = new IgfsPath("/examples/fs"); @@ -94,7 +94,7 @@ public class IgfsMapReduceExample { * @param file File to write. * @throws Exception In case of exception. */ - private static void writeFile(IgniteFs fs, IgfsPath fsPath, File file) throws Exception { + private static void writeFile(IgniteFileSystem fs, IgfsPath fsPath, File file) throws Exception { System.out.println(); System.out.println("Copying file to IGFS: " + file); @@ -173,7 +173,7 @@ public class IgfsMapReduceExample { } /** {@inheritDoc} */ - @Override public Object execute(IgniteFs igfs, IgfsRangeInputStream in) throws IgniteException, IOException { + @Override public Object execute(IgniteFileSystem igfs, IgfsRangeInputStream in) throws IgniteException, IOException { Collection res = null; long start = in.startOffset(); diff --git a/modules/core/src/main/java/org/apache/ignite/Ignite.java b/modules/core/src/main/java/org/apache/ignite/Ignite.java index 8851d8f..31b827e 100644 --- a/modules/core/src/main/java/org/apache/ignite/Ignite.java +++ b/modules/core/src/main/java/org/apache/ignite/Ignite.java @@ -43,7 +43,7 @@ import java.util.concurrent.*; *

    *
  • {@link org.apache.ignite.cache.GridCache} - functionality for in-memory distributed cache.
  • *
  • {@link IgniteDataLoader} - functionality for loading data large amounts of data into cache.
  • - *
  • {@link IgniteFs} - functionality for distributed Hadoop-compliant in-memory file system and map-reduce.
  • + *
  • {@link IgniteFileSystem} - functionality for distributed Hadoop-compliant in-memory file system and map-reduce.
  • *
  • {@link IgniteStreamer} - functionality for streaming events workflow with queries and indexes into rolling windows.
  • *
  • {@link IgniteScheduler} - functionality for scheduling jobs using UNIX Cron syntax.
  • *
  • {@link IgniteCompute} - functionality for executing tasks and closures on all grid nodes (inherited form {@link ClusterGroup}).
  • @@ -213,7 +213,7 @@ public interface Ignite extends AutoCloseable { public IgniteDataLoader dataLoader(@Nullable String cacheName); /** - * Gets an instance of IGFS - Ignite In-Memory File System, if one is not + * Gets an instance of IGFS (Ignite In-Memory File System). If one is not * configured then {@link IllegalArgumentException} will be thrown. *

    * IGFS is fully compliant with Hadoop {@code FileSystem} APIs and can @@ -222,15 +222,16 @@ public interface Ignite extends AutoCloseable { * * @param name IGFS name. * @return IGFS instance. + * @throws IllegalArgumentException If IGFS with such name is not configured. */ - public IgniteFs fileSystem(String name); + public IgniteFileSystem fileSystem(String name); /** - * Gets all instances of the grid file systems. + * Gets all instances of IGFS (Ignite In-Memory File System). * - * @return Collection of grid file systems instances. + * @return Collection of IGFS instances. */ - public Collection fileSystems(); + public Collection fileSystems(); /** * Gets an instance of streamer by name, if one does not exist then diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteFs.java b/modules/core/src/main/java/org/apache/ignite/IgniteFileSystem.java similarity index 70% rename from modules/core/src/main/java/org/apache/ignite/IgniteFs.java rename to modules/core/src/main/java/org/apache/ignite/IgniteFileSystem.java index dc11973..d221ae2 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteFs.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteFileSystem.java @@ -51,18 +51,15 @@ import java.util.*; *

    Integration With Hadoop

    * In addition to direct file system API, {@code IGFS} can be integrated with {@code Hadoop} by * plugging in as {@code Hadoop FileSystem}. Refer to - * {@code org.apache.ignite.igfs.hadoop.v1.IgfsHadoopFileSystem} or - * {@code org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem} for more information. + * {@code org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem} or + * {@code org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem} for more information. *

    * NOTE: integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition. */ -public interface IgniteFs extends Igfs, IgniteAsyncSupport { +public interface IgniteFileSystem extends IgniteAsyncSupport { /** IGFS scheme name. */ public static final String IGFS_SCHEME = "igfs"; - /** File property: prefer writes to local node. */ - public static final String PROP_PREFER_LOCAL_WRITES = "locWrite"; - /** * Gets IGFS name. * @@ -75,7 +72,7 @@ public interface IgniteFs extends Igfs, IgniteAsyncSupport { * * @return IGFS configuration. */ - public IgfsConfiguration configuration(); + public FileSystemConfiguration configuration(); /** * Gets summary (total number of files, total number of directories and total length) @@ -107,7 +104,7 @@ public interface IgniteFs extends Igfs, IgniteAsyncSupport { * @throws IgniteException In case of error. * @throws org.apache.ignite.igfs.IgfsFileNotFoundException If path doesn't exist. */ - @Override public IgfsInputStream open(IgfsPath path, int bufSize) throws IgniteException; + public IgfsInputStream open(IgfsPath path, int bufSize) throws IgniteException; /** * Opens a file for reading. @@ -129,7 +126,7 @@ public interface IgniteFs extends Igfs, IgniteAsyncSupport { * @return File output stream to write data to. * @throws IgniteException In case of error. */ - @Override public IgfsOutputStream create(IgfsPath path, boolean overwrite) throws IgniteException; + public IgfsOutputStream create(IgfsPath path, boolean overwrite) throws IgniteException; /** * Creates a file and opens it for writing. @@ -143,7 +140,7 @@ public interface IgniteFs extends Igfs, IgniteAsyncSupport { * @return File output stream to write data to. * @throws IgniteException In case of error. */ - @Override public IgfsOutputStream create(IgfsPath path, int bufSize, boolean overwrite, int replication, + public IgfsOutputStream create(IgfsPath path, int bufSize, boolean overwrite, int replication, long blockSize, @Nullable Map props) throws IgniteException; /** @@ -186,8 +183,8 @@ public interface IgniteFs extends Igfs, IgniteAsyncSupport { * @throws IgniteException In case of error. * @throws org.apache.ignite.igfs.IgfsFileNotFoundException If path doesn't exist and create flag is {@code false}. */ - @Override public IgfsOutputStream append(IgfsPath path, int bufSize, boolean create, - @Nullable Map props) throws IgniteException; + public IgfsOutputStream append(IgfsPath path, int bufSize, boolean create, @Nullable Map props) + throws IgniteException; /** * Sets last access time and last modification time for a given path. If argument is {@code null}, @@ -284,7 +281,7 @@ public interface IgniteFs extends Igfs, IgniteAsyncSupport { /** * Executes IGFS task with overridden maximum range length (see - * {@link org.apache.ignite.configuration.IgfsConfiguration#getMaximumTaskRangeLength()} for more information). + * {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()} for more information). *

    * Supports asynchronous execution (see {@link IgniteAsyncSupport}). * @@ -322,7 +319,7 @@ public interface IgniteFs extends Igfs, IgniteAsyncSupport { /** * Executes IGFS task with overridden maximum range length (see - * {@link org.apache.ignite.configuration.IgfsConfiguration#getMaximumTaskRangeLength()} for more information). + * {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()} for more information). *

    * Supports asynchronous execution (see {@link IgniteAsyncSupport}). * @@ -341,6 +338,120 @@ public interface IgniteFs extends Igfs, IgniteAsyncSupport { @Nullable IgfsRecordResolver rslvr, Collection paths, boolean skipNonExistentFiles, long maxRangeLen, @Nullable T arg) throws IgniteException; + /** + * Checks if the specified path exists in the file system. + * + * @param path Path to check for existence in the file system. + * @return {@code True} if such file exists, otherwise - {@code false}. + * @throws IgniteException In case of error. + */ + public boolean exists(IgfsPath path); + + /** + * Updates file information for the specified path. Existent properties, not listed in the passed collection, + * will not be affected. Other properties will be added or overwritten. Passed properties with {@code null} values + * will be removed from the stored properties or ignored if they don't exist in the file info. + *

    + * When working in {@code DUAL_SYNC} or {@code DUAL_ASYNC} modes only the following properties will be propagated + * to the secondary file system: + *

      + *
    • {@code usrName} - file owner name;
    • + *
    • {@code grpName} - file owner group;
    • + *
    • {@code permission} - Unix-style string representing file permissions.
    • + *
    + * + * @param path File path to set properties for. + * @param props Properties to update. + * @return File information for specified path or {@code null} if such path does not exist. + * @throws IgniteException In case of error. + */ + public IgfsFile update(IgfsPath path, Map props) throws IgniteException; + + /** + * Renames/moves a file. + *

    + * You are free to rename/move data files as you wish, but directories can be only renamed. + * You cannot move the directory between different parent directories. + *

    + * Examples: + *

      + *
    • "/work/file.txt" => "/home/project/Presentation Scenario.txt"
    • + *
    • "/work" => "/work-2012.bkp"
    • + *
    • "/work" => "/backups/work" - such operation is restricted for directories.
    • + *
    + * + * @param src Source file path to rename. + * @param dest Destination file path. If destination path is a directory, then source file will be placed + * into destination directory with original name. + * @throws IgniteException In case of error. + * @throws IgfsFileNotFoundException If source file doesn't exist. + */ + public void rename(IgfsPath src, IgfsPath dest) throws IgniteException; + + /** + * Deletes file. + * + * @param path File path to delete. + * @param recursive Delete non-empty directories recursively. + * @return {@code True} in case of success, {@code false} otherwise. + * @throws IgniteException In case of error. + */ + public boolean delete(IgfsPath path, boolean recursive) throws IgniteException; + + /** + * Creates directories under specified path. + * + * @param path Path of directories chain to create. + * @throws IgniteException In case of error. + */ + public void mkdirs(IgfsPath path) throws IgniteException; + + /** + * Creates directories under specified path with the specified properties. + * + * @param path Path of directories chain to create. + * @param props Metadata properties to set on created directories. + * @throws IgniteException In case of error. + */ + public void mkdirs(IgfsPath path, @Nullable Map props) throws IgniteException; + + /** + * Lists file paths under the specified path. + * + * @param path Path to list files under. + * @return List of files under the specified path. + * @throws IgniteException In case of error. + * @throws IgfsFileNotFoundException If path doesn't exist. + */ + public Collection listPaths(IgfsPath path) throws IgniteException; + + /** + * Lists files under the specified path. + * + * @param path Path to list files under. + * @return List of files under the specified path. + * @throws IgniteException In case of error. + * @throws IgfsFileNotFoundException If path doesn't exist. + */ + public Collection listFiles(IgfsPath path) throws IgniteException; + + /** + * Gets file information for the specified path. + * + * @param path Path to get information for. + * @return File information for specified path or {@code null} if such path does not exist. + * @throws IgniteException In case of error. + */ + public IgfsFile info(IgfsPath path) throws IgniteException; + + /** + * Gets used space in bytes. + * + * @return Used space in bytes. + * @throws IgniteException In case of error. + */ + public long usedSpaceSize() throws IgniteException; + /** {@inheritDoc} */ - @Override public IgniteFs withAsync(); + @Override public IgniteFileSystem withAsync(); } diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/IgfsConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/FileSystemConfiguration.java similarity index 96% rename from modules/core/src/main/java/org/apache/ignite/configuration/IgfsConfiguration.java rename to modules/core/src/main/java/org/apache/ignite/configuration/FileSystemConfiguration.java index 308471d..f679fc0 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/IgfsConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/FileSystemConfiguration.java @@ -18,6 +18,7 @@ package org.apache.ignite.configuration; import org.apache.ignite.igfs.*; +import org.apache.ignite.igfs.secondary.*; import org.apache.ignite.internal.util.typedef.internal.*; import org.jetbrains.annotations.*; @@ -26,13 +27,10 @@ import java.util.concurrent.*; /** * {@code IGFS} configuration. More than one file system can be configured within grid. - * {@code IGFS} configuration is provided via {@link org.apache.ignite.configuration.IgniteConfiguration#getIgfsConfiguration()} + * {@code IGFS} configuration is provided via {@link IgniteConfiguration#getFileSystemConfiguration()} * method. - *

    - * Refer to {@code config/hadoop/default-config.xml} or {@code config/hadoop/default-config-client.xml} - * configuration files under Ignite installation to see sample {@code IGFS} configuration. */ -public class IgfsConfiguration { +public class FileSystemConfiguration { /** Default file system user name. */ public static final String DFLT_USER_NAME = System.getProperty("user.name", "anonymous"); @@ -121,7 +119,7 @@ public class IgfsConfiguration { private int mgmtPort = DFLT_MGMT_PORT; /** Secondary file system */ - private Igfs secondaryFs; + private IgfsSecondaryFileSystem secondaryFs; /** IGFS mode. */ private IgfsMode dfltMode = DFLT_MODE; @@ -165,7 +163,7 @@ public class IgfsConfiguration { /** * Constructs default configuration. */ - public IgfsConfiguration() { + public FileSystemConfiguration() { // No-op. } @@ -174,7 +172,7 @@ public class IgfsConfiguration { * * @param cfg Configuration to copy. */ - public IgfsConfiguration(IgfsConfiguration cfg) { + public FileSystemConfiguration(FileSystemConfiguration cfg) { assert cfg != null; /* @@ -313,8 +311,8 @@ public class IgfsConfiguration { * Default is {@code 0} which means that pre-fetching will start right away. *

    Integration With Hadoop

    * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing - * {@code org.apache.ignite.igfs.hadoop.IgfsHadoopParameters.PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH} - * configuration property directly to Hadoop MapReduce task. + * {@code fs.igfs.[name].open.sequential_reads_before_prefetch} configuration property directly to Hadoop + * MapReduce task. *

    * NOTE: Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition. * @@ -333,8 +331,8 @@ public class IgfsConfiguration { * Default is {@code 0} which means that pre-fetching will start right away. *

    Integration With Hadoop

    * This parameter can be also overridden for individual Hadoop MapReduce tasks by passing - * {@code org.apache.ignite.igfs.hadoop.IgfsHadoopParameters.PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH} - * configuration property directly to Hadoop MapReduce task. + * {@code fs.igfs.[name].open.sequential_reads_before_prefetch} configuration property directly to Hadoop + * MapReduce task. *

    * NOTE: Integration with Hadoop is available only in {@code In-Memory Accelerator For Hadoop} edition. * @@ -517,7 +515,7 @@ public class IgfsConfiguration { * * @return Secondary file system. */ - public Igfs getSecondaryFileSystem() { + public IgfsSecondaryFileSystem getSecondaryFileSystem() { return secondaryFs; } @@ -527,7 +525,7 @@ public class IgfsConfiguration { * * @param fileSystem */ - public void setSecondaryFileSystem(Igfs fileSystem) { + public void setSecondaryFileSystem(IgfsSecondaryFileSystem fileSystem) { secondaryFs = fileSystem; } @@ -802,6 +800,6 @@ public class IgfsConfiguration { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(IgfsConfiguration.class, this); + return S.toString(FileSystemConfiguration.class, this); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/HadoopConfiguration.java similarity index 90% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopConfiguration.java rename to modules/core/src/main/java/org/apache/ignite/configuration/HadoopConfiguration.java index f66b95a..01ef8b0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/HadoopConfiguration.java @@ -15,14 +15,15 @@ * limitations under the License. */ -package org.apache.ignite.internal.processors.hadoop; +package org.apache.ignite.configuration; +import org.apache.ignite.internal.processors.hadoop.*; import org.apache.ignite.internal.util.typedef.internal.*; /** - * Hadoop configuration. + * Ignite Hadoop Accelerator configuration. */ -public class GridHadoopConfiguration { +public class HadoopConfiguration { /** Default finished job info time-to-live. */ public static final long DFLT_FINISHED_JOB_INFO_TTL = 10_000; @@ -36,7 +37,7 @@ public class GridHadoopConfiguration { public static final int DFLT_MAX_TASK_QUEUE_SIZE = 1000; /** Map reduce planner. */ - private GridHadoopMapReducePlanner planner; + private HadoopMapReducePlanner planner; /** */ private boolean extExecution = DFLT_EXTERNAL_EXECUTION; @@ -53,7 +54,7 @@ public class GridHadoopConfiguration { /** * Default constructor. */ - public GridHadoopConfiguration() { + public HadoopConfiguration() { // No-op. } @@ -62,7 +63,7 @@ public class GridHadoopConfiguration { * * @param cfg Configuration to copy. */ - public GridHadoopConfiguration(GridHadoopConfiguration cfg) { + public HadoopConfiguration(HadoopConfiguration cfg) { // Preserve alphabetic order. extExecution = cfg.isExternalExecution(); finishedJobInfoTtl = cfg.getFinishedJobInfoTtl(); @@ -151,7 +152,7 @@ public class GridHadoopConfiguration { * * @return Map-reduce planner. */ - public GridHadoopMapReducePlanner getMapReducePlanner() { + public HadoopMapReducePlanner getMapReducePlanner() { return planner; } @@ -161,12 +162,12 @@ public class GridHadoopConfiguration { * * @param planner Map-reduce planner. */ - public void setMapReducePlanner(GridHadoopMapReducePlanner planner) { + public void setMapReducePlanner(HadoopMapReducePlanner planner) { this.planner = planner; } /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopConfiguration.class, this, super.toString()); + return S.toString(HadoopConfiguration.class, this, super.toString()); } } diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java index cf88778..8bd2f83 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java @@ -20,7 +20,6 @@ package org.apache.ignite.configuration; import org.apache.ignite.*; import org.apache.ignite.events.*; import org.apache.ignite.internal.managers.eventstorage.*; -import org.apache.ignite.internal.processors.hadoop.*; import org.apache.ignite.internal.util.typedef.internal.*; import org.apache.ignite.lang.*; import org.apache.ignite.lifecycle.*; @@ -345,7 +344,7 @@ public class IgniteConfiguration { private Map, int[]> lsnrs; /** IGFS configuration. */ - private IgfsConfiguration[] igfsCfg; + private FileSystemConfiguration[] igfsCfg; /** Streamer configuration. */ private StreamerConfiguration[] streamerCfg; @@ -354,7 +353,7 @@ public class IgniteConfiguration { private ServiceConfiguration[] svcCfgs; /** Hadoop configuration. */ - private GridHadoopConfiguration hadoopCfg; + private HadoopConfiguration hadoopCfg; /** Client access configuration. */ private ConnectorConfiguration connectorCfg = new ConnectorConfiguration(); @@ -418,7 +417,7 @@ public class IgniteConfiguration { ggHome = cfg.getIgniteHome(); ggWork = cfg.getWorkDirectory(); gridName = cfg.getGridName(); - igfsCfg = cfg.getIgfsConfiguration(); + igfsCfg = cfg.getFileSystemConfiguration(); igfsPoolSize = cfg.getIgfsThreadPoolSize(); hadoopCfg = cfg.getHadoopConfiguration(); inclEvtTypes = cfg.getIncludeEventTypes(); @@ -1728,20 +1727,20 @@ public class IgniteConfiguration { } /** - * Gets IGFS configurations. + * Gets IGFS (Ignite In-Memory File System) configurations. * * @return IGFS configurations. */ - public IgfsConfiguration[] getIgfsConfiguration() { + public FileSystemConfiguration[] getFileSystemConfiguration() { return igfsCfg; } /** - * Sets IGFS configurations. + * Sets IGFS (Ignite In-Memory File System) configurations. * * @param igfsCfg IGFS configurations. */ - public void setIgfsConfiguration(IgfsConfiguration... igfsCfg) { + public void setFileSystemConfiguration(FileSystemConfiguration... igfsCfg) { this.igfsCfg = igfsCfg; } @@ -1768,7 +1767,7 @@ public class IgniteConfiguration { * * @return Hadoop configuration. */ - public GridHadoopConfiguration getHadoopConfiguration() { + public HadoopConfiguration getHadoopConfiguration() { return hadoopCfg; } @@ -1777,7 +1776,7 @@ public class IgniteConfiguration { * * @param hadoopCfg Hadoop configuration. */ - public void setHadoopConfiguration(GridHadoopConfiguration hadoopCfg) { + public void setHadoopConfiguration(HadoopConfiguration hadoopCfg) { this.hadoopCfg = hadoopCfg; } diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsBlockLocation.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsBlockLocation.java index c4f28c6..afd0314 100644 --- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsBlockLocation.java +++ b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsBlockLocation.java @@ -22,7 +22,7 @@ import java.util.*; /** * {@code IGFS} file's data block location in the grid. It is used to determine * node affinity of a certain file block within the Grid by calling - * {@link org.apache.ignite.IgniteFs#affinity(IgfsPath, long, long)} method. + * {@link org.apache.ignite.IgniteFileSystem#affinity(IgfsPath, long, long)} method. */ public interface IgfsBlockLocation { /** diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsFile.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsFile.java index 172dca1..550679a 100644 --- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsFile.java +++ b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsFile.java @@ -77,7 +77,7 @@ public interface IgfsFile { /** * Gets file last access time. File last access time is not updated automatically due to * performance considerations and can be updated on demand with - * {@link org.apache.ignite.IgniteFs#setTimes(IgfsPath, long, long)} method. + * {@link org.apache.ignite.IgniteFileSystem#setTimes(IgfsPath, long, long)} method. *

    * By default last access time equals file creation time. * diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInputStream.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInputStream.java index 308dbcb..c2ddbb0 100644 --- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInputStream.java +++ b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsInputStream.java @@ -17,13 +17,15 @@ package org.apache.ignite.igfs; +import org.apache.ignite.igfs.secondary.*; + import java.io.*; /** * {@code IGFS} input stream to read data from the file system. * It provides several additional methods for asynchronous access. */ -public abstract class IgfsInputStream extends InputStream implements IgfsReader { +public abstract class IgfsInputStream extends InputStream { /** * Gets file length during file open. * @@ -76,5 +78,5 @@ public abstract class IgfsInputStream extends InputStream implements IgfsReader * @return Total number of bytes read into the buffer, or -1 if there is no more data (EOF). * @throws IOException In case of IO exception. */ - @Override public abstract int read(long pos, byte[] buf, int off, int len) throws IOException; + public abstract int read(long pos, byte[] buf, int off, int len) throws IOException; } diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java index afdae1a..50b5435 100644 --- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java @@ -33,7 +33,7 @@ public interface IgfsMetrics { /** * Gets maximum amount of data that can be stored on local node. This metrics is either - * equal to {@link org.apache.ignite.configuration.IgfsConfiguration#getMaxSpaceSize()}, or, if it is {@code 0}, equal to + * equal to {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaxSpaceSize()}, or, if it is {@code 0}, equal to * {@code 80%} of maximum heap size allocated for JVM. * * @return Maximum IGFS local space size. diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMode.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMode.java index 3c440ab..2c9fcdd 100644 --- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMode.java +++ b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMode.java @@ -24,7 +24,7 @@ import org.jetbrains.annotations.*; * Secondary Hadoop file system is provided for pass-through, write-through, and * read-through purposes. *

    - * This mode is configured via {@link org.apache.ignite.configuration.IgfsConfiguration#getDefaultMode()} + * This mode is configured via {@link org.apache.ignite.configuration.FileSystemConfiguration#getDefaultMode()} * configuration property. */ public enum IgfsMode { @@ -39,7 +39,7 @@ public enum IgfsMode { * through to secondary Hadoop file system. If this mode is enabled, then * secondary Hadoop file system must be configured. * - * @see org.apache.ignite.configuration.IgfsConfiguration#getSecondaryFileSystem() + * @see org.apache.ignite.configuration.FileSystemConfiguration#getSecondaryFileSystem() */ PROXY, @@ -50,7 +50,7 @@ public enum IgfsMode { * If secondary Hadoop file system is not configured, then this mode behaves like * {@link #PRIMARY} mode. * - * @see org.apache.ignite.configuration.IgfsConfiguration#getSecondaryFileSystem() + * @see org.apache.ignite.configuration.FileSystemConfiguration#getSecondaryFileSystem() */ DUAL_SYNC, @@ -61,7 +61,7 @@ public enum IgfsMode { * If secondary Hadoop file system is not configured, then this mode behaves like * {@link #PRIMARY} mode. * - * @see org.apache.ignite.configuration.IgfsConfiguration#getSecondaryFileSystem() + * @see org.apache.ignite.configuration.FileSystemConfiguration#getSecondaryFileSystem() */ DUAL_ASYNC; diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsInputStreamJobAdapter.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsInputStreamJobAdapter.java index bf2636f..d7fd866 100644 --- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsInputStreamJobAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsInputStreamJobAdapter.java @@ -32,7 +32,7 @@ import java.io.*; */ public abstract class IgfsInputStreamJobAdapter extends IgfsJobAdapter { /** {@inheritDoc} */ - @Override public final Object execute(IgniteFs igfs, IgfsFileRange range, IgfsInputStream in) + @Override public final Object execute(IgniteFileSystem igfs, IgfsFileRange range, IgfsInputStream in) throws IgniteException, IOException { in.seek(range.start()); @@ -48,5 +48,5 @@ public abstract class IgfsInputStreamJobAdapter extends IgfsJobAdapter { * @throws IgniteException If execution failed. * @throws IOException If IO exception encountered while working with stream. */ - public abstract Object execute(IgniteFs igfs, IgfsRangeInputStream in) throws IgniteException, IOException; + public abstract Object execute(IgniteFileSystem igfs, IgfsRangeInputStream in) throws IgniteException, IOException; } diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsJob.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsJob.java index d12fc79..0fdc0f2 100644 --- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsJob.java +++ b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsJob.java @@ -26,7 +26,7 @@ import java.io.*; * Defines executable unit for {@link IgfsTask}. Before this job is executed, it is assigned one of the * ranges provided by the {@link IgfsRecordResolver} passed to one of the {@code IgniteFs.execute(...)} methods. *

    - * {@link #execute(org.apache.ignite.IgniteFs, IgfsFileRange, org.apache.ignite.igfs.IgfsInputStream)} method is given {@link IgfsFileRange} this + * {@link #execute(org.apache.ignite.IgniteFileSystem, IgfsFileRange, org.apache.ignite.igfs.IgfsInputStream)} method is given {@link IgfsFileRange} this * job is expected to operate on, and already opened {@link org.apache.ignite.igfs.IgfsInputStream} for the file this range belongs to. *

    * Note that provided input stream has position already adjusted to range start. However, it will not @@ -52,7 +52,7 @@ public interface IgfsJob { * @throws IgniteException If execution failed. * @throws IOException If file system operation resulted in IO exception. */ - public Object execute(IgniteFs igfs, IgfsFileRange range, IgfsInputStream in) throws IgniteException, + public Object execute(IgniteFileSystem igfs, IgfsFileRange range, IgfsInputStream in) throws IgniteException, IOException; /** diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsRecordResolver.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsRecordResolver.java index 52afeda..cc1d73f 100644 --- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsRecordResolver.java +++ b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsRecordResolver.java @@ -51,6 +51,6 @@ public interface IgfsRecordResolver extends Serializable { * @throws IgniteException If resolve failed. * @throws IOException If resolve failed. */ - @Nullable public IgfsFileRange resolveRecords(IgniteFs fs, IgfsInputStream stream, + @Nullable public IgfsFileRange resolveRecords(IgniteFileSystem fs, IgfsInputStream stream, IgfsFileRange suggestedRecord) throws IgniteException, IOException; } diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTask.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTask.java index e42b012..9936140 100644 --- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTask.java +++ b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTask.java @@ -37,7 +37,7 @@ import java.util.*; *

    * Each file participating in IGFS task is split into {@link IgfsFileRange}s first. Normally range is a number of * consequent bytes located on a single node (see {@code IgfssGroupDataBlocksKeyMapper}). In case maximum range size - * is provided (either through {@link org.apache.ignite.configuration.IgfsConfiguration#getMaximumTaskRangeLength()} or {@code IgniteFs.execute()} + * is provided (either through {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()} or {@code IgniteFs.execute()} * argument), then ranges could be further divided into smaller chunks. *

    * Once file is split into ranges, each range is passed to {@code IgfsTask.createJob()} method in order to create a @@ -88,7 +88,7 @@ public abstract class IgfsTask extends ComputeTaskAdapter, assert ignite != null; assert args != null; - IgniteFs fs = ignite.fileSystem(args.igfsName()); + IgniteFileSystem fs = ignite.fileSystem(args.igfsName()); IgfsProcessorAdapter igfsProc = ((IgniteKernal) ignite).context().igfs(); Map splitMap = new HashMap<>(); diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTaskArgs.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTaskArgs.java index 7db26ec..5ef5352 100644 --- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTaskArgs.java +++ b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/IgfsTaskArgs.java @@ -33,7 +33,7 @@ import java.util.*; *

  • {@link IgfsRecordResolver} for that task
  • *
  • Flag indicating whether to skip non-existent file paths or throw an exception
  • *
  • User-defined task argument
  • - *
  • Maximum file range length for that task (see {@link org.apache.ignite.configuration.IgfsConfiguration#getMaximumTaskRangeLength()})
  • + *
  • Maximum file range length for that task (see {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()})
  • *
*/ public interface IgfsTaskArgs { diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsByteDelimiterRecordResolver.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsByteDelimiterRecordResolver.java index c1c15a5..c9ed821 100644 --- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsByteDelimiterRecordResolver.java +++ b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsByteDelimiterRecordResolver.java @@ -20,7 +20,6 @@ package org.apache.ignite.igfs.mapreduce.records; import org.apache.ignite.*; import org.apache.ignite.igfs.*; import org.apache.ignite.igfs.mapreduce.*; -import org.apache.ignite.internal.util.*; import org.apache.ignite.internal.util.tostring.*; import org.apache.ignite.internal.util.typedef.*; import org.apache.ignite.internal.util.typedef.internal.*; @@ -78,7 +77,7 @@ public class IgfsByteDelimiterRecordResolver implements IgfsRecordResolver, Exte } /** {@inheritDoc} */ - @Override public IgfsFileRange resolveRecords(IgniteFs fs, IgfsInputStream stream, + @Override public IgfsFileRange resolveRecords(IgniteFileSystem fs, IgfsInputStream stream, IgfsFileRange suggestedRecord) throws IgniteException, IOException { long suggestedStart = suggestedRecord.start(); long suggestedEnd = suggestedStart + suggestedRecord.length(); diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsFixedLengthRecordResolver.java b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsFixedLengthRecordResolver.java index fe22627..bcc8f69 100644 --- a/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsFixedLengthRecordResolver.java +++ b/modules/core/src/main/java/org/apache/ignite/igfs/mapreduce/records/IgfsFixedLengthRecordResolver.java @@ -52,7 +52,7 @@ public class IgfsFixedLengthRecordResolver implements IgfsRecordResolver, Extern } /** {@inheritDoc} */ - @Override public IgfsFileRange resolveRecords(IgniteFs fs, IgfsInputStream stream, + @Override public IgfsFileRange resolveRecords(IgniteFileSystem fs, IgfsInputStream stream, IgfsFileRange suggestedRecord) throws IgniteException, IOException { long suggestedEnd = suggestedRecord.start() + suggestedRecord.length(); diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/Igfs.java b/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystem.java similarity index 88% rename from modules/core/src/main/java/org/apache/ignite/igfs/Igfs.java rename to modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystem.java index 48b9b58..089a8e3 100644 --- a/modules/core/src/main/java/org/apache/ignite/igfs/Igfs.java +++ b/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystem.java @@ -15,38 +15,23 @@ * limitations under the License. */ -package org.apache.ignite.igfs; +package org.apache.ignite.igfs.secondary; import org.apache.ignite.*; +import org.apache.ignite.igfs.*; import org.jetbrains.annotations.*; import java.io.*; import java.util.*; /** - * Common file system interface. It provides a typical generalized "view" of any file system: - *
    - *
  • list directories or get information for a single path
  • - *
  • create/move/delete files or directories
  • - *
  • write/read data streams into/from files
  • - *
- * - * This is the minimum of functionality that is needed to work as secondary file system in dual modes of IGFS. + * Secondary file system interface. */ -public interface Igfs { - /** File property: user name. */ - public static final String PROP_USER_NAME = "usrName"; - - /** File property: group name. */ - public static final String PROP_GROUP_NAME = "grpName"; - - /** File property: permission. */ - public static final String PROP_PERMISSION = "permission"; - +public interface IgfsSecondaryFileSystem { /** - * Checks if the specified path exists in the file system. + * Checks if the specified path exists. * - * @param path Path to check for existence in the file system. + * @param path Path to check for existence. * @return {@code True} if such file exists, otherwise - {@code false}. * @throws IgniteException In case of error. */ @@ -149,7 +134,7 @@ public interface Igfs { * @throws IgniteException In case of error. * @throws IgfsFileNotFoundException If path doesn't exist. */ - public IgfsReader open(IgfsPath path, int bufSize) throws IgniteException; + public IgfsSecondaryFileSystemPositionedReadable open(IgfsPath path, int bufSize) throws IgniteException; /** * Creates a file and opens it for writing. diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsReader.java b/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystemPositionedReadable.java similarity index 90% rename from modules/core/src/main/java/org/apache/ignite/igfs/IgfsReader.java rename to modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystemPositionedReadable.java index fff8ca3..3d36236 100644 --- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsReader.java +++ b/modules/core/src/main/java/org/apache/ignite/igfs/secondary/IgfsSecondaryFileSystemPositionedReadable.java @@ -15,14 +15,14 @@ * limitations under the License. */ -package org.apache.ignite.igfs; +package org.apache.ignite.igfs.secondary; import java.io.*; /** - * The simplest data input interface to read from secondary file system in dual modes. + * The simplest data input interface to read from secondary file system. */ -public interface IgfsReader extends Closeable { +public interface IgfsSecondaryFileSystemPositionedReadable extends Closeable { /** * Read up to the specified number of bytes, from a given position within a file, and return the number of bytes * read. diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/package.html b/modules/core/src/main/java/org/apache/ignite/igfs/secondary/package.html similarity index 95% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/package.html rename to modules/core/src/main/java/org/apache/ignite/igfs/secondary/package.html index 4b070d3..8bd668d 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/package.html +++ b/modules/core/src/main/java/org/apache/ignite/igfs/secondary/package.html @@ -19,6 +19,6 @@ - Contains IGFS client and common classes. + Contains APIs for IGFS secondary file system. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java index cb9ffa1..30ba883 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java @@ -282,7 +282,7 @@ public interface GridKernalContext extends Iterable { * * @return Hadoop processor. */ - public IgniteHadoopProcessorAdapter hadoop(); + public HadoopProcessorAdapter hadoop(); /** * Gets utility cache pool. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java index 756c16a..e80df0b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java @@ -230,7 +230,7 @@ public class GridKernalContextImpl implements GridKernalContext, Externalizable /** */ @GridToStringExclude - private IgniteHadoopProcessorAdapter hadoopProc; + private HadoopProcessorAdapter hadoopProc; /** */ @GridToStringExclude @@ -456,8 +456,8 @@ public class GridKernalContextImpl implements GridKernalContext, Externalizable streamProc = (GridStreamProcessor)comp; else if (comp instanceof GridContinuousProcessor) contProc = (GridContinuousProcessor)comp; - else if (comp instanceof IgniteHadoopProcessorAdapter) - hadoopProc = (IgniteHadoopProcessorAdapter)comp; + else if (comp instanceof HadoopProcessorAdapter) + hadoopProc = (HadoopProcessorAdapter)comp; else if (comp instanceof GridPortableProcessor) portableProc = (GridPortableProcessor)comp; else if (comp instanceof IgnitePluginProcessor) @@ -680,7 +680,7 @@ public class GridKernalContextImpl implements GridKernalContext, Externalizable } /** {@inheritDoc} */ - @Override public IgniteHadoopProcessorAdapter hadoop() { + @Override public HadoopProcessorAdapter hadoop() { return hadoopProc; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteComponentType.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteComponentType.java index d0e487a..0e5c1cf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteComponentType.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteComponentType.java @@ -35,8 +35,8 @@ public enum IgniteComponentType { /** Hadoop. */ HADOOP( - "org.apache.ignite.internal.processors.hadoop.IgniteHadoopNoopProcessor", - "org.apache.ignite.internal.processors.hadoop.IgniteHadoopProcessor", + "org.apache.ignite.internal.processors.hadoop.HadoopNoopProcessor", + "org.apache.ignite.internal.processors.hadoop.HadoopProcessor", "ignite-hadoop" ), diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteEx.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteEx.java index 3c35a08..3ede8d5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteEx.java @@ -117,14 +117,14 @@ public interface IgniteEx extends Ignite { * @param name IGFS name. * @return IGFS. */ - @Nullable public IgniteFs igfsx(@Nullable String name); + @Nullable public IgniteFileSystem igfsx(@Nullable String name); /** * Get Hadoop facade. * * @return Hadoop. */ - public GridHadoop hadoop(); + public Hadoop hadoop(); /** {@inheritDoc} */ @Override IgniteClusterEx cluster(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java index b9abdb5..76c070d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java @@ -704,7 +704,7 @@ public class IgniteKernal implements IgniteEx, IgniteMXBean, Externalizable { // Starts lifecycle aware components. U.startLifecycleAware(lifecycleAwares(cfg)); - addHelper(ctx, IGFS_HELPER.create(F.isEmpty(cfg.getIgfsConfiguration()))); + addHelper(ctx, IGFS_HELPER.create(F.isEmpty(cfg.getFileSystemConfiguration()))); startProcessor(ctx, new IgnitePluginProcessor(ctx, cfg), attrs); @@ -756,7 +756,7 @@ public class IgniteKernal implements IgniteEx, IgniteMXBean, Externalizable { startProcessor(ctx, new GridRestProcessor(ctx), attrs); startProcessor(ctx, new GridDataLoaderProcessor(ctx), attrs); startProcessor(ctx, new GridStreamProcessor(ctx), attrs); - startProcessor(ctx, (GridProcessor) IGFS.create(ctx, F.isEmpty(cfg.getIgfsConfiguration())), attrs); + startProcessor(ctx, (GridProcessor) IGFS.create(ctx, F.isEmpty(cfg.getFileSystemConfiguration())), attrs); startProcessor(ctx, new GridContinuousProcessor(ctx), attrs); startProcessor(ctx, (GridProcessor)(cfg.isPeerClassLoadingEnabled() ? IgniteComponentType.HADOOP.create(ctx, true): // No-op when peer class loading is enabled. @@ -2349,11 +2349,11 @@ public class IgniteKernal implements IgniteEx, IgniteMXBean, Externalizable { } /** {@inheritDoc} */ - @Override public IgniteFs fileSystem(String name) { + @Override public IgniteFileSystem fileSystem(String name) { guard(); try{ - IgniteFs fs = ctx.igfs().igfs(name); + IgniteFileSystem fs = ctx.igfs().igfs(name); if (fs == null) throw new IllegalArgumentException("IGFS is not configured: " + name); @@ -2366,7 +2366,7 @@ public class IgniteKernal implements IgniteEx, IgniteMXBean, Externalizable { } /** {@inheritDoc} */ - @Nullable @Override public IgniteFs igfsx(@Nullable String name) { + @Nullable @Override public IgniteFileSystem igfsx(@Nullable String name) { guard(); try { @@ -2378,7 +2378,7 @@ public class IgniteKernal implements IgniteEx, IgniteMXBean, Externalizable { } /** {@inheritDoc} */ - @Override public Collection fileSystems() { + @Override public Collection fileSystems() { guard(); try { @@ -2390,7 +2390,7 @@ public class IgniteKernal implements IgniteEx, IgniteMXBean, Externalizable { } /** {@inheritDoc} */ - @Override public GridHadoop hadoop() { + @Override public Hadoop hadoop() { guard(); try { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java index cb2efbf..2e8cfc1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java @@ -1606,15 +1606,15 @@ public class IgnitionEx { if (myCfg.getPeerClassLoadingLocalClassPathExclude() == null) myCfg.setPeerClassLoadingLocalClassPathExclude(EMPTY_STR_ARR); - IgfsConfiguration[] igfsCfgs = myCfg.getIgfsConfiguration(); + FileSystemConfiguration[] igfsCfgs = myCfg.getFileSystemConfiguration(); if (igfsCfgs != null) { - IgfsConfiguration[] clone = igfsCfgs.clone(); + FileSystemConfiguration[] clone = igfsCfgs.clone(); for (int i = 0; i < igfsCfgs.length; i++) - clone[i] = new IgfsConfiguration(igfsCfgs[i]); + clone[i] = new FileSystemConfiguration(igfsCfgs[i]); - myCfg.setIgfsConfiguration(clone); + myCfg.setFileSystemConfiguration(clone); } StreamerConfiguration[] streamerCfgs = myCfg.getStreamerConfiguration(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java index 12ea535..fe88012 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java @@ -236,10 +236,10 @@ public abstract class GridCacheAdapter implements GridCache, mxBean = new CacheMetricsMXBeanImpl(this); - IgfsConfiguration[] igfsCfgs = gridCfg.getIgfsConfiguration(); + FileSystemConfiguration[] igfsCfgs = gridCfg.getFileSystemConfiguration(); if (igfsCfgs != null) { - for (IgfsConfiguration igfsCfg : igfsCfgs) { + for (FileSystemConfiguration igfsCfg : igfsCfgs) { if (F.eq(ctx.name(), igfsCfg.getDataCacheName())) { if (!ctx.isNear()) { igfsDataCache = true; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java index 3b2ca47..72c2b49 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java @@ -550,10 +550,10 @@ public class GridCacheProcessor extends GridProcessorAdapter { maxPreloadOrder = validatePreloadOrder(ctx.config().getCacheConfiguration()); // Internal caches which should not be returned to user. - IgfsConfiguration[] igfsCfgs = ctx.grid().configuration().getIgfsConfiguration(); + FileSystemConfiguration[] igfsCfgs = ctx.grid().configuration().getFileSystemConfiguration(); if (igfsCfgs != null) { - for (IgfsConfiguration igfsCfg : igfsCfgs) { + for (FileSystemConfiguration igfsCfg : igfsCfgs) { sysCaches.add(igfsCfg.getMetaCacheName()); sysCaches.add(igfsCfg.getDataCacheName()); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java index 1e67907..b07c14e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java @@ -1594,10 +1594,10 @@ public class GridCacheUtils { * @return {@code True} in this is IGFS data or meta cache. */ public static boolean isIgfsCache(IgniteConfiguration cfg, @Nullable String cacheName) { - IgfsConfiguration[] igfsCfgs = cfg.getIgfsConfiguration(); + FileSystemConfiguration[] igfsCfgs = cfg.getFileSystemConfiguration(); if (igfsCfgs != null) { - for (IgfsConfiguration igfsCfg : igfsCfgs) { + for (FileSystemConfiguration igfsCfg : igfsCfgs) { // IGFS config probably has not been validated yet => possible NPE, so we check for null. if (igfsCfg != null && (F.eq(cacheName, igfsCfg.getDataCacheName()) || F.eq(cacheName, igfsCfg.getMetaCacheName()))) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoop.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/Hadoop.java similarity index 77% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoop.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/Hadoop.java index c262d48..9efc4a9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoop.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/Hadoop.java @@ -18,26 +18,28 @@ package org.apache.ignite.internal.processors.hadoop; import org.apache.ignite.*; +import org.apache.ignite.configuration.*; import org.apache.ignite.internal.*; +import org.apache.ignite.internal.processors.hadoop.counter.*; import org.jetbrains.annotations.*; /** * Hadoop facade providing access to Ignite Hadoop features. */ -public interface GridHadoop { +public interface Hadoop { /** * Gets Hadoop module configuration. * * @return Hadoop module configuration. */ - public GridHadoopConfiguration configuration(); + public HadoopConfiguration configuration(); /** * Generate next job ID. * * @return Next job ID. */ - public GridHadoopJobId nextJobId(); + public HadoopJobId nextJobId(); /** * Submits job to job tracker. @@ -46,7 +48,7 @@ public interface GridHadoop { * @param jobInfo Job info to submit. * @return Execution future. */ - public IgniteInternalFuture submit(GridHadoopJobId jobId, GridHadoopJobInfo jobInfo); + public IgniteInternalFuture submit(HadoopJobId jobId, HadoopJobInfo jobInfo); /** * Gets Hadoop job execution status. @@ -55,7 +57,7 @@ public interface GridHadoop { * @return Job execution status or {@code null} in case job with the given ID is not found. * @throws IgniteCheckedException If failed. */ - @Nullable public GridHadoopJobStatus status(GridHadoopJobId jobId) throws IgniteCheckedException; + @Nullable public HadoopJobStatus status(HadoopJobId jobId) throws IgniteCheckedException; /** * Returns job counters. @@ -64,7 +66,7 @@ public interface GridHadoop { * @return Job counters object. * @throws IgniteCheckedException If failed. */ - public GridHadoopCounters counters(GridHadoopJobId jobId) throws IgniteCheckedException; + public HadoopCounters counters(HadoopJobId jobId) throws IgniteCheckedException; /** * Gets Hadoop finish future for particular job. @@ -73,7 +75,7 @@ public interface GridHadoop { * @return Job finish future or {@code null} in case job with the given ID is not found. * @throws IgniteCheckedException If failed. */ - @Nullable public IgniteInternalFuture finishFuture(GridHadoopJobId jobId) throws IgniteCheckedException; + @Nullable public IgniteInternalFuture finishFuture(HadoopJobId jobId) throws IgniteCheckedException; /** * Kills job. @@ -82,5 +84,5 @@ public interface GridHadoop { * @return {@code True} if job was killed. * @throws IgniteCheckedException If failed. */ - public boolean kill(GridHadoopJobId jobId) throws IgniteCheckedException; + public boolean kill(HadoopJobId jobId) throws IgniteCheckedException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopFileBlock.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopFileBlock.java similarity index 90% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopFileBlock.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopFileBlock.java index fae111a..223e572 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopFileBlock.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopFileBlock.java @@ -27,7 +27,7 @@ import java.util.*; /** * Hadoop file block. */ -public class GridHadoopFileBlock extends GridHadoopInputSplit { +public class HadoopFileBlock extends HadoopInputSplit { /** */ private static final long serialVersionUID = 0L; @@ -46,7 +46,7 @@ public class GridHadoopFileBlock extends GridHadoopInputSplit { /** * Creates new file block. */ - public GridHadoopFileBlock() { + public HadoopFileBlock() { // No-op. } @@ -58,7 +58,7 @@ public class GridHadoopFileBlock extends GridHadoopInputSplit { * @param start Start position of the block in the file. * @param len Length of the block. */ - public GridHadoopFileBlock(String[] hosts, URI file, long start, long len) { + public HadoopFileBlock(String[] hosts, URI file, long start, long len) { A.notNull(hosts, "hosts", file, "file"); this.hosts = hosts; @@ -137,10 +137,10 @@ public class GridHadoopFileBlock extends GridHadoopInputSplit { if (this == o) return true; - if (!(o instanceof GridHadoopFileBlock)) + if (!(o instanceof HadoopFileBlock)) return false; - GridHadoopFileBlock that = (GridHadoopFileBlock)o; + HadoopFileBlock that = (HadoopFileBlock)o; return len == that.len && start == that.start && file.equals(that.file); } @@ -157,6 +157,6 @@ public class GridHadoopFileBlock extends GridHadoopInputSplit { /** {@inheritDoc} */ public String toString() { - return S.toString(GridHadoopFileBlock.class, this, "hosts", Arrays.toString(hosts)); + return S.toString(HadoopFileBlock.class, this, "hosts", Arrays.toString(hosts)); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopInputSplit.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopInputSplit.java similarity index 95% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopInputSplit.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopInputSplit.java index e68a6f5..0c94012 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopInputSplit.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopInputSplit.java @@ -22,7 +22,7 @@ import java.io.*; /** * Abstract fragment of an input data source. */ -public abstract class GridHadoopInputSplit implements Externalizable { +public abstract class HadoopInputSplit implements Externalizable { /** */ protected String[] hosts; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJob.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJob.java similarity index 85% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJob.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJob.java index f7ea105..65cb48d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJob.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJob.java @@ -24,27 +24,27 @@ import java.util.*; /** * Hadoop job. */ -public interface GridHadoopJob { +public interface HadoopJob { /** * Gets job ID. * * @return Job ID. */ - public GridHadoopJobId id(); + public HadoopJobId id(); /** * Gets job information. * * @return Job information. */ - public GridHadoopJobInfo info(); + public HadoopJobInfo info(); /** * Gets collection of input splits for this job. * * @return Input splits. */ - public Collection input() throws IgniteCheckedException; + public Collection input() throws IgniteCheckedException; /** * Returns context for task execution. @@ -53,7 +53,7 @@ public interface GridHadoopJob { * @return Task Context. * @throws IgniteCheckedException If failed. */ - public GridHadoopTaskContext getTaskContext(GridHadoopTaskInfo info) throws IgniteCheckedException; + public HadoopTaskContext getTaskContext(HadoopTaskInfo info) throws IgniteCheckedException; /** * Does all the needed initialization for the job. Will be called on each node where tasks for this job must @@ -85,7 +85,7 @@ public interface GridHadoopJob { * @param info Task info. * @throws IgniteCheckedException If failed. */ - public void prepareTaskEnvironment(GridHadoopTaskInfo info) throws IgniteCheckedException; + public void prepareTaskEnvironment(HadoopTaskInfo info) throws IgniteCheckedException; /** * Cleans up local environment of the task. @@ -93,7 +93,7 @@ public interface GridHadoopJob { * @param info Task info. * @throws IgniteCheckedException If failed. */ - public void cleanupTaskEnvironment(GridHadoopTaskInfo info) throws IgniteCheckedException; + public void cleanupTaskEnvironment(HadoopTaskInfo info) throws IgniteCheckedException; /** * Cleans up the job staging directory. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobId.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobId.java similarity index 92% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobId.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobId.java index ffc2057..b0593a8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobId.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobId.java @@ -26,7 +26,7 @@ import java.util.*; /** * Job ID. */ -public class GridHadoopJobId implements GridCacheInternal, Externalizable { +public class HadoopJobId implements GridCacheInternal, Externalizable { /** */ private static final long serialVersionUID = 0L; @@ -39,7 +39,7 @@ public class GridHadoopJobId implements GridCacheInternal, Externalizable { /** * For {@link Externalizable}. */ - public GridHadoopJobId() { + public HadoopJobId() { // No-op. } @@ -47,7 +47,7 @@ public class GridHadoopJobId implements GridCacheInternal, Externalizable { * @param nodeId Node ID. * @param jobId Job ID. */ - public GridHadoopJobId(UUID nodeId, int jobId) { + public HadoopJobId(UUID nodeId, int jobId) { this.nodeId = nodeId; this.jobId = jobId; } @@ -80,7 +80,7 @@ public class GridHadoopJobId implements GridCacheInternal, Externalizable { if (o == null || getClass() != o.getClass()) return false; - GridHadoopJobId that = (GridHadoopJobId) o; + HadoopJobId that = (HadoopJobId) o; if (jobId != that.jobId) return false; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobInfo.java similarity index 85% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobInfo.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobInfo.java index 9a891f4..51faf5d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobInfo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobInfo.java @@ -25,7 +25,7 @@ import java.io.*; /** * Compact job description. */ -public interface GridHadoopJobInfo extends Serializable { +public interface HadoopJobInfo extends Serializable { /** * Gets optional configuration property for the job. * @@ -43,7 +43,7 @@ public interface GridHadoopJobInfo extends Serializable { /** * Checks whether job has reducer. - * Actual number of reducers will be in {@link GridHadoopMapReducePlan#reducers()}. + * Actual number of reducers will be in {@link HadoopMapReducePlan#reducers()}. * * @return Number of reducer. */ @@ -51,7 +51,7 @@ public interface GridHadoopJobInfo extends Serializable { /** * Creates new job instance for the given ID. - * {@link GridHadoopJobInfo} is reusable for multiple jobs while {@link GridHadoopJob} is for one job execution. + * {@link HadoopJobInfo} is reusable for multiple jobs while {@link HadoopJob} is for one job execution. * This method will be called once for the same ID on one node, though it can be called on the same host * multiple times from different processes (in case of multiple nodes on the same host or external execution). * @@ -60,7 +60,7 @@ public interface GridHadoopJobInfo extends Serializable { * @return Job. * @throws IgniteCheckedException If failed. */ - GridHadoopJob createJob(GridHadoopJobId jobId, IgniteLogger log) throws IgniteCheckedException; + HadoopJob createJob(HadoopJobId jobId, IgniteLogger log) throws IgniteCheckedException; /** * @return Number of reducers configured for job. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobPhase.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobPhase.java similarity index 97% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobPhase.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobPhase.java index cc122bb..8c932bb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobPhase.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobPhase.java @@ -20,7 +20,7 @@ package org.apache.ignite.internal.processors.hadoop; /** * Job run phase. */ -public enum GridHadoopJobPhase { +public enum HadoopJobPhase { /** Job is running setup task. */ PHASE_SETUP, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobProperty.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobProperty.java similarity index 91% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobProperty.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobProperty.java index 0ece051..1a58624 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobProperty.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobProperty.java @@ -22,7 +22,7 @@ import org.jetbrains.annotations.*; /** * Enumeration of optional properties supported by Ignite for Apache Hadoop. */ -public enum GridHadoopJobProperty { +public enum HadoopJobProperty { /** * Initial size for hashmap which stores output of mapper and will be used as input of combiner. *

@@ -89,7 +89,7 @@ public enum GridHadoopJobProperty { /** * */ - GridHadoopJobProperty() { + HadoopJobProperty() { ptyName = "ignite." + name().toLowerCase().replace('_', '.'); } @@ -106,7 +106,7 @@ public enum GridHadoopJobProperty { * @param dflt Default value. * @return Property value. */ - public static String get(GridHadoopJobInfo jobInfo, GridHadoopJobProperty pty, @Nullable String dflt) { + public static String get(HadoopJobInfo jobInfo, HadoopJobProperty pty, @Nullable String dflt) { String res = jobInfo.property(pty.propertyName()); return res == null ? dflt : res; @@ -118,7 +118,7 @@ public enum GridHadoopJobProperty { * @param dflt Default value. * @return Property value. */ - public static int get(GridHadoopJobInfo jobInfo, GridHadoopJobProperty pty, int dflt) { + public static int get(HadoopJobInfo jobInfo, HadoopJobProperty pty, int dflt) { String res = jobInfo.property(pty.propertyName()); return res == null ? dflt : Integer.parseInt(res); @@ -130,7 +130,7 @@ public enum GridHadoopJobProperty { * @param dflt Default value. * @return Property value. */ - public static boolean get(GridHadoopJobInfo jobInfo, GridHadoopJobProperty pty, boolean dflt) { + public static boolean get(HadoopJobInfo jobInfo, HadoopJobProperty pty, boolean dflt) { String res = jobInfo.property(pty.propertyName()); return res == null ? dflt : Boolean.parseBoolean(res); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobStatus.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobStatus.java similarity index 89% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobStatus.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobStatus.java index 02ea883..752556d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobStatus.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobStatus.java @@ -24,12 +24,12 @@ import java.io.*; /** * Hadoop job status. */ -public class GridHadoopJobStatus implements Externalizable { +public class HadoopJobStatus implements Externalizable { /** */ private static final long serialVersionUID = 0L; /** Job ID. */ - private GridHadoopJobId jobId; + private HadoopJobId jobId; /** Job name. */ private String jobName; @@ -49,7 +49,7 @@ public class GridHadoopJobStatus implements Externalizable { /** Total reducers count. */ private int totalReducerCnt; /** Phase. */ - private GridHadoopJobPhase jobPhase; + private HadoopJobPhase jobPhase; /** */ private boolean failed; @@ -60,7 +60,7 @@ public class GridHadoopJobStatus implements Externalizable { /** * {@link Externalizable} support. */ - public GridHadoopJobStatus() { + public HadoopJobStatus() { // No-op. } @@ -78,15 +78,15 @@ public class GridHadoopJobStatus implements Externalizable { * @param failed Failed. * @param ver Version. */ - public GridHadoopJobStatus( - GridHadoopJobId jobId, + public HadoopJobStatus( + HadoopJobId jobId, String jobName, String usr, int pendingMapperCnt, int pendingReducerCnt, int totalMapperCnt, int totalReducerCnt, - GridHadoopJobPhase jobPhase, + HadoopJobPhase jobPhase, boolean failed, long ver ) { @@ -105,7 +105,7 @@ public class GridHadoopJobStatus implements Externalizable { /** * @return Job ID. */ - public GridHadoopJobId jobId() { + public HadoopJobId jobId() { return jobId; } @@ -161,7 +161,7 @@ public class GridHadoopJobStatus implements Externalizable { /** * @return Job phase. */ - public GridHadoopJobPhase jobPhase() { + public HadoopJobPhase jobPhase() { return jobPhase; } @@ -174,7 +174,7 @@ public class GridHadoopJobStatus implements Externalizable { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopJobStatus.class, this); + return S.toString(HadoopJobStatus.class, this); } /** {@inheritDoc} */ @@ -193,14 +193,14 @@ public class GridHadoopJobStatus implements Externalizable { /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { - jobId = (GridHadoopJobId)in.readObject(); + jobId = (HadoopJobId)in.readObject(); jobName = U.readString(in); usr = U.readString(in); pendingMapperCnt = in.readInt(); pendingReducerCnt = in.readInt(); totalMapperCnt = in.readInt(); totalReducerCnt = in.readInt(); - jobPhase = (GridHadoopJobPhase)in.readObject(); + jobPhase = (HadoopJobPhase)in.readObject(); failed = in.readBoolean(); ver = in.readLong(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopMapReducePlan.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReducePlan.java similarity index 94% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopMapReducePlan.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReducePlan.java index 2fd5160..3da2fb1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopMapReducePlan.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReducePlan.java @@ -25,14 +25,14 @@ import java.util.*; /** * Map-reduce job execution plan. */ -public interface GridHadoopMapReducePlan extends Serializable { +public interface HadoopMapReducePlan extends Serializable { /** * Gets collection of file blocks for which mappers should be executed. * * @param nodeId Node ID to check. * @return Collection of file blocks or {@code null} if no mappers should be executed on given node. */ - @Nullable public Collection mappers(UUID nodeId); + @Nullable public Collection mappers(UUID nodeId); /** * Gets reducer IDs that should be started on given node. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopMapReducePlanner.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReducePlanner.java similarity index 84% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopMapReducePlanner.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReducePlanner.java index 56c6913..ab885fe 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopMapReducePlanner.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReducePlanner.java @@ -26,7 +26,7 @@ import java.util.*; /** * Map-reduce execution planner. */ -public interface GridHadoopMapReducePlanner { +public interface HadoopMapReducePlanner { /** * Prepares map-reduce execution plan for the given job and topology. * @@ -35,6 +35,6 @@ public interface GridHadoopMapReducePlanner { * @param oldPlan Old plan in case of partial failure. * @return Map reduce plan. */ - public GridHadoopMapReducePlan preparePlan(GridHadoopJob job, Collection top, - @Nullable GridHadoopMapReducePlan oldPlan) throws IgniteCheckedException; + public HadoopMapReducePlan preparePlan(HadoopJob job, Collection top, + @Nullable HadoopMapReducePlan oldPlan) throws IgniteCheckedException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/IgniteHadoopNoopProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopNoopProcessor.java similarity index 66% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/IgniteHadoopNoopProcessor.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopNoopProcessor.java index d0ef4ce..eb84d00 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/IgniteHadoopNoopProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopNoopProcessor.java @@ -18,57 +18,59 @@ package org.apache.ignite.internal.processors.hadoop; import org.apache.ignite.*; +import org.apache.ignite.configuration.*; import org.apache.ignite.internal.*; +import org.apache.ignite.internal.processors.hadoop.counter.*; import org.apache.ignite.internal.util.future.*; /** * Hadoop processor. */ -public class IgniteHadoopNoopProcessor extends IgniteHadoopProcessorAdapter { +public class HadoopNoopProcessor extends HadoopProcessorAdapter { /** * @param ctx Kernal context. */ - public IgniteHadoopNoopProcessor(GridKernalContext ctx) { + public HadoopNoopProcessor(GridKernalContext ctx) { super(ctx); } /** {@inheritDoc} */ - @Override public GridHadoop hadoop() { + @Override public Hadoop hadoop() { throw new IllegalStateException("Hadoop module is not found in class path."); } /** {@inheritDoc} */ - @Override public GridHadoopConfiguration config() { + @Override public HadoopConfiguration config() { return null; } /** {@inheritDoc} */ - @Override public GridHadoopJobId nextJobId() { + @Override public HadoopJobId nextJobId() { return null; } /** {@inheritDoc} */ - @Override public IgniteInternalFuture submit(GridHadoopJobId jobId, GridHadoopJobInfo jobInfo) { + @Override public IgniteInternalFuture submit(HadoopJobId jobId, HadoopJobInfo jobInfo) { return new GridFinishedFutureEx<>(new IgniteCheckedException("Hadoop is not available.")); } /** {@inheritDoc} */ - @Override public GridHadoopJobStatus status(GridHadoopJobId jobId) throws IgniteCheckedException { + @Override public HadoopJobStatus status(HadoopJobId jobId) throws IgniteCheckedException { return null; } /** {@inheritDoc} */ - @Override public GridHadoopCounters counters(GridHadoopJobId jobId) { + @Override public HadoopCounters counters(HadoopJobId jobId) { return null; } /** {@inheritDoc} */ - @Override public IgniteInternalFuture finishFuture(GridHadoopJobId jobId) throws IgniteCheckedException { + @Override public IgniteInternalFuture finishFuture(HadoopJobId jobId) throws IgniteCheckedException { return null; } /** {@inheritDoc} */ - @Override public boolean kill(GridHadoopJobId jobId) throws IgniteCheckedException { + @Override public boolean kill(HadoopJobId jobId) throws IgniteCheckedException { return false; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopPartitioner.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopPartitioner.java similarity index 96% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopPartitioner.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopPartitioner.java index fcde424..ec94f81 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopPartitioner.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopPartitioner.java @@ -20,7 +20,7 @@ package org.apache.ignite.internal.processors.hadoop; /** * Partitioner. */ -public interface GridHadoopPartitioner { +public interface HadoopPartitioner { /** * Gets partition which is actually a reducer index for the given key and value pair. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/IgniteHadoopProcessorAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopProcessorAdapter.java similarity index 71% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/IgniteHadoopProcessorAdapter.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopProcessorAdapter.java index c2cf542..44ff8be 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/IgniteHadoopProcessorAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopProcessorAdapter.java @@ -18,34 +18,36 @@ package org.apache.ignite.internal.processors.hadoop; import org.apache.ignite.*; +import org.apache.ignite.configuration.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.processors.*; +import org.apache.ignite.internal.processors.hadoop.counter.*; /** * Hadoop processor. */ -public abstract class IgniteHadoopProcessorAdapter extends GridProcessorAdapter { +public abstract class HadoopProcessorAdapter extends GridProcessorAdapter { /** * @param ctx Kernal context. */ - protected IgniteHadoopProcessorAdapter(GridKernalContext ctx) { + protected HadoopProcessorAdapter(GridKernalContext ctx) { super(ctx); } /** * @return Hadoop facade. */ - public abstract GridHadoop hadoop(); + public abstract Hadoop hadoop(); /** * @return Hadoop configuration. */ - public abstract GridHadoopConfiguration config(); + public abstract HadoopConfiguration config(); /** * @return Collection of generated IDs. */ - public abstract GridHadoopJobId nextJobId(); + public abstract HadoopJobId nextJobId(); /** * Submits job to job tracker. @@ -54,7 +56,7 @@ public abstract class IgniteHadoopProcessorAdapter extends GridProcessorAdapter * @param jobInfo Job info to submit. * @return Execution future. */ - public abstract IgniteInternalFuture submit(GridHadoopJobId jobId, GridHadoopJobInfo jobInfo); + public abstract IgniteInternalFuture submit(HadoopJobId jobId, HadoopJobInfo jobInfo); /** * Gets Hadoop job execution status. @@ -63,7 +65,7 @@ public abstract class IgniteHadoopProcessorAdapter extends GridProcessorAdapter * @return Job execution status. * @throws IgniteCheckedException If failed. */ - public abstract GridHadoopJobStatus status(GridHadoopJobId jobId) throws IgniteCheckedException; + public abstract HadoopJobStatus status(HadoopJobId jobId) throws IgniteCheckedException; /** * Returns Hadoop job counters. @@ -72,7 +74,7 @@ public abstract class IgniteHadoopProcessorAdapter extends GridProcessorAdapter * @return Job counters. * @throws IgniteCheckedException If failed. */ - public abstract GridHadoopCounters counters(GridHadoopJobId jobId) throws IgniteCheckedException; + public abstract HadoopCounters counters(HadoopJobId jobId) throws IgniteCheckedException; /** * Gets Hadoop job finish future. @@ -81,7 +83,7 @@ public abstract class IgniteHadoopProcessorAdapter extends GridProcessorAdapter * @return Job finish future or {@code null}. * @throws IgniteCheckedException If failed. */ - public abstract IgniteInternalFuture finishFuture(GridHadoopJobId jobId) throws IgniteCheckedException; + public abstract IgniteInternalFuture finishFuture(HadoopJobId jobId) throws IgniteCheckedException; /** * Kills job. @@ -90,5 +92,5 @@ public abstract class IgniteHadoopProcessorAdapter extends GridProcessorAdapter * @return {@code True} if job was killed. * @throws IgniteCheckedException If failed. */ - public abstract boolean kill(GridHadoopJobId jobId) throws IgniteCheckedException; + public abstract boolean kill(HadoopJobId jobId) throws IgniteCheckedException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSerialization.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopSerialization.java similarity index 96% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSerialization.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopSerialization.java index 5bc8806..aab803b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSerialization.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopSerialization.java @@ -25,7 +25,7 @@ import java.io.*; /** * Hadoop serialization. Not thread safe object, must be created for each thread or correctly synchronized. */ -public interface GridHadoopSerialization extends AutoCloseable { +public interface HadoopSerialization extends AutoCloseable { /** * Writes the given object to output. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTask.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTask.java similarity index 84% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTask.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTask.java index be34f81..3ce83ae 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTask.java @@ -24,16 +24,16 @@ import java.io.*; /** * Hadoop task. */ -public abstract class GridHadoopTask { +public abstract class HadoopTask { /** */ - private GridHadoopTaskInfo taskInfo; + private HadoopTaskInfo taskInfo; /** * Creates task. * * @param taskInfo Task info. */ - protected GridHadoopTask(GridHadoopTaskInfo taskInfo) { + protected HadoopTask(HadoopTaskInfo taskInfo) { assert taskInfo != null; this.taskInfo = taskInfo; @@ -43,7 +43,7 @@ public abstract class GridHadoopTask { * For {@link Externalizable}. */ @SuppressWarnings("ConstructorNotProtectedInAbstractClass") - public GridHadoopTask() { + public HadoopTask() { // No-op. } @@ -52,7 +52,7 @@ public abstract class GridHadoopTask { * * @return Task info. */ - public GridHadoopTaskInfo info() { + public HadoopTaskInfo info() { return taskInfo; } @@ -63,7 +63,7 @@ public abstract class GridHadoopTask { * @throws org.apache.ignite.internal.IgniteInterruptedCheckedException If interrupted. * @throws IgniteCheckedException If failed. */ - public abstract void run(GridHadoopTaskContext taskCtx) throws IgniteCheckedException; + public abstract void run(HadoopTaskContext taskCtx) throws IgniteCheckedException; /** * Interrupts task execution. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskContext.java similarity index 77% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskContext.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskContext.java index bedd93b..371fd81 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskContext.java @@ -18,30 +18,31 @@ package org.apache.ignite.internal.processors.hadoop; import org.apache.ignite.*; +import org.apache.ignite.internal.processors.hadoop.counter.*; import java.util.*; /** * Task context. */ -public abstract class GridHadoopTaskContext { +public abstract class HadoopTaskContext { /** */ - private final GridHadoopJob job; + private final HadoopJob job; /** */ - private GridHadoopTaskInput input; + private HadoopTaskInput input; /** */ - private GridHadoopTaskOutput output; + private HadoopTaskOutput output; /** */ - private GridHadoopTaskInfo taskInfo; + private HadoopTaskInfo taskInfo; /** * @param taskInfo Task info. * @param job Job. */ - protected GridHadoopTaskContext(GridHadoopTaskInfo taskInfo, GridHadoopJob job) { + protected HadoopTaskContext(HadoopTaskInfo taskInfo, HadoopJob job) { this.taskInfo = taskInfo; this.job = job; } @@ -51,7 +52,7 @@ public abstract class GridHadoopTaskContext { * * @return Task info. */ - public GridHadoopTaskInfo taskInfo() { + public HadoopTaskInfo taskInfo() { return taskInfo; } @@ -60,7 +61,7 @@ public abstract class GridHadoopTaskContext { * * @param info Task info. */ - public void taskInfo(GridHadoopTaskInfo info) { + public void taskInfo(HadoopTaskInfo info) { taskInfo = info; } @@ -69,7 +70,7 @@ public abstract class GridHadoopTaskContext { * * @return Task output. */ - public GridHadoopTaskOutput output() { + public HadoopTaskOutput output() { return output; } @@ -78,14 +79,14 @@ public abstract class GridHadoopTaskContext { * * @return Task input. */ - public GridHadoopTaskInput input() { + public HadoopTaskInput input() { return input; } /** * @return Job. */ - public GridHadoopJob job() { + public HadoopJob job() { return job; } @@ -96,21 +97,21 @@ public abstract class GridHadoopTaskContext { * @param name Counter name. * @return Counter. */ - public abstract T counter(String grp, String name, Class cls); + public abstract T counter(String grp, String name, Class cls); /** * Gets all known counters. * * @return Unmodifiable collection of counters. */ - public abstract GridHadoopCounters counters(); + public abstract HadoopCounters counters(); /** * Sets input of the task. * * @param in Input. */ - public void input(GridHadoopTaskInput in) { + public void input(HadoopTaskInput in) { input = in; } @@ -119,7 +120,7 @@ public abstract class GridHadoopTaskContext { * * @param out Output. */ - public void output(GridHadoopTaskOutput out) { + public void output(HadoopTaskOutput out) { output = out; } @@ -129,7 +130,7 @@ public abstract class GridHadoopTaskContext { * @return Partitioner. * @throws IgniteCheckedException If failed. */ - public abstract GridHadoopPartitioner partitioner() throws IgniteCheckedException; + public abstract HadoopPartitioner partitioner() throws IgniteCheckedException; /** * Gets serializer for values. @@ -137,7 +138,7 @@ public abstract class GridHadoopTaskContext { * @return Serializer for keys. * @throws IgniteCheckedException If failed. */ - public abstract GridHadoopSerialization keySerialization() throws IgniteCheckedException; + public abstract HadoopSerialization keySerialization() throws IgniteCheckedException; /** * Gets serializer for values. @@ -145,7 +146,7 @@ public abstract class GridHadoopTaskContext { * @return Serializer for values. * @throws IgniteCheckedException If failed. */ - public abstract GridHadoopSerialization valueSerialization() throws IgniteCheckedException; + public abstract HadoopSerialization valueSerialization() throws IgniteCheckedException; /** * Gets sorting comparator. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskInfo.java similarity index 78% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskInfo.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskInfo.java index 75e06ca..eb82cb4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskInfo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskInfo.java @@ -25,15 +25,15 @@ import java.io.*; /** * Task info. */ -public class GridHadoopTaskInfo implements Externalizable { +public class HadoopTaskInfo implements Externalizable { /** */ private static final long serialVersionUID = 0L; /** */ - private GridHadoopTaskType type; + private HadoopTaskType type; /** */ - private GridHadoopJobId jobId; + private HadoopJobId jobId; /** */ private int taskNum; @@ -42,12 +42,12 @@ public class GridHadoopTaskInfo implements Externalizable { private int attempt; /** */ - private GridHadoopInputSplit inputSplit; + private HadoopInputSplit inputSplit; /** * For {@link Externalizable}. */ - public GridHadoopTaskInfo() { + public HadoopTaskInfo() { // No-op. } @@ -60,8 +60,8 @@ public class GridHadoopTaskInfo implements Externalizable { * @param attempt Attempt for this task. * @param inputSplit Input split. */ - public GridHadoopTaskInfo(GridHadoopTaskType type, GridHadoopJobId jobId, int taskNum, int attempt, - @Nullable GridHadoopInputSplit inputSplit) { + public HadoopTaskInfo(HadoopTaskType type, HadoopJobId jobId, int taskNum, int attempt, + @Nullable HadoopInputSplit inputSplit) { this.type = type; this.jobId = jobId; this.taskNum = taskNum; @@ -80,24 +80,24 @@ public class GridHadoopTaskInfo implements Externalizable { /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { - type = GridHadoopTaskType.fromOrdinal(in.readByte()); - jobId = (GridHadoopJobId)in.readObject(); + type = HadoopTaskType.fromOrdinal(in.readByte()); + jobId = (HadoopJobId)in.readObject(); taskNum = in.readInt(); attempt = in.readInt(); - inputSplit = (GridHadoopInputSplit)in.readObject(); + inputSplit = (HadoopInputSplit)in.readObject(); } /** * @return Type. */ - public GridHadoopTaskType type() { + public HadoopTaskType type() { return type; } /** * @return Job id. */ - public GridHadoopJobId jobId() { + public HadoopJobId jobId() { return jobId; } @@ -118,7 +118,7 @@ public class GridHadoopTaskInfo implements Externalizable { /** * @return Input split. */ - @Nullable public GridHadoopInputSplit inputSplit() { + @Nullable public HadoopInputSplit inputSplit() { return inputSplit; } @@ -127,10 +127,10 @@ public class GridHadoopTaskInfo implements Externalizable { if (this == o) return true; - if (!(o instanceof GridHadoopTaskInfo)) + if (!(o instanceof HadoopTaskInfo)) return false; - GridHadoopTaskInfo that = (GridHadoopTaskInfo)o; + HadoopTaskInfo that = (HadoopTaskInfo)o; return attempt == that.attempt && taskNum == that.taskNum && jobId.equals(that.jobId) && type == that.type; } @@ -148,6 +148,6 @@ public class GridHadoopTaskInfo implements Externalizable { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopTaskInfo.class, this); + return S.toString(HadoopTaskInfo.class, this); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskInput.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskInput.java similarity index 95% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskInput.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskInput.java index 479cf6d..ad6446f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskInput.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskInput.java @@ -24,7 +24,7 @@ import java.util.*; /** * Task input. */ -public interface GridHadoopTaskInput extends AutoCloseable { +public interface HadoopTaskInput extends AutoCloseable { /** * Moves cursor to the next element. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskOutput.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskOutput.java similarity index 95% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskOutput.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskOutput.java index 6480d8d..41d9847 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskOutput.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskOutput.java @@ -22,7 +22,7 @@ import org.apache.ignite.*; /** * Task output. */ -public interface GridHadoopTaskOutput extends AutoCloseable { +public interface HadoopTaskOutput extends AutoCloseable { /** * Writes key and value to the output. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskType.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskType.java similarity index 89% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskType.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskType.java index 404d6b8..a88e189 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskType.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskType.java @@ -22,7 +22,7 @@ import org.jetbrains.annotations.*; /** * Task type. */ -public enum GridHadoopTaskType { +public enum HadoopTaskType { /** Setup task. */ SETUP, @@ -42,7 +42,7 @@ public enum GridHadoopTaskType { ABORT; /** Enumerated values. */ - private static final GridHadoopTaskType[] VALS = values(); + private static final HadoopTaskType[] VALS = values(); /** * Efficiently gets enumerated value from its ordinal. @@ -50,7 +50,7 @@ public enum GridHadoopTaskType { * @param ord Ordinal value. * @return Enumerated value. */ - @Nullable public static GridHadoopTaskType fromOrdinal(byte ord) { + @Nullable public static HadoopTaskType fromOrdinal(byte ord) { return ord >= 0 && ord < VALS.length ? VALS[ord] : null; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopCounter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounter.java similarity index 89% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopCounter.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounter.java index 83902dd..918c3bc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopCounter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounter.java @@ -15,12 +15,12 @@ * limitations under the License. */ -package org.apache.ignite.internal.processors.hadoop; +package org.apache.ignite.internal.processors.hadoop.counter; /** * Hadoop counter. */ -public interface GridHadoopCounter { +public interface HadoopCounter { /** * Gets name. * @@ -40,5 +40,5 @@ public interface GridHadoopCounter { * * @param cntr Counter to merge into this counter. */ - public void merge(GridHadoopCounter cntr); + public void merge(HadoopCounter cntr); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopCounterWriter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounterWriter.java similarity index 81% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopCounterWriter.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounterWriter.java index af72e69..ce67c57 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopCounterWriter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounterWriter.java @@ -15,15 +15,16 @@ * limitations under the License. */ -package org.apache.ignite.internal.processors.hadoop; +package org.apache.ignite.internal.processors.hadoop.counter; import org.apache.ignite.*; +import org.apache.ignite.internal.processors.hadoop.*; /** * The object that writes some system counters to some storage for each running job. This operation is a part of * whole statistics collection process. */ -public interface GridHadoopCounterWriter { +public interface HadoopCounterWriter { /** * Writes counters of given job to some statistics storage. * @@ -32,5 +33,5 @@ public interface GridHadoopCounterWriter { * @param cntrs Counters. * @throws IgniteCheckedException If failed. */ - public void write(GridHadoopJobInfo jobInfo, GridHadoopJobId jobId, GridHadoopCounters cntrs) throws IgniteCheckedException; + public void write(HadoopJobInfo jobInfo, HadoopJobId jobId, HadoopCounters cntrs) throws IgniteCheckedException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopCounters.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounters.java similarity index 84% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopCounters.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounters.java index 91eb8a1..706ba77 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopCounters.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounters.java @@ -15,14 +15,14 @@ * limitations under the License. */ -package org.apache.ignite.internal.processors.hadoop; +package org.apache.ignite.internal.processors.hadoop.counter; import java.util.*; /** * Counters store. */ -public interface GridHadoopCounters { +public interface HadoopCounters { /** * Returns counter for the specified group and counter name. Creates new if it does not exist. * @@ -31,19 +31,19 @@ public interface GridHadoopCounters { * @param cls Class for new instance creation if it's needed. * @return The counter that was found or added or {@code null} if create is false. */ - T counter(String grp, String name, Class cls); + T counter(String grp, String name, Class cls); /** * Returns all existing counters. * * @return Collection of counters. */ - Collection all(); + Collection all(); /** * Merges all counters from another store with existing counters. * * @param other Counters to merge with. */ - void merge(GridHadoopCounters other); + void merge(HadoopCounters other); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsAsyncImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsAsyncImpl.java index 1479a88..48a32f4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsAsyncImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsAsyncImpl.java @@ -21,6 +21,7 @@ import org.apache.ignite.*; import org.apache.ignite.configuration.*; import org.apache.ignite.igfs.*; import org.apache.ignite.igfs.mapreduce.*; +import org.apache.ignite.igfs.secondary.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.util.typedef.internal.*; import org.apache.ignite.lang.*; @@ -32,7 +33,7 @@ import java.util.*; /** * Igfs supporting asynchronous operations. */ -public class IgfsAsyncImpl extends AsyncSupportAdapter implements IgfsEx { +public class IgfsAsyncImpl extends AsyncSupportAdapter implements IgfsEx { /** */ private final IgfsImpl igfs; @@ -192,7 +193,7 @@ public class IgfsAsyncImpl extends AsyncSupportAdapter implements Igfs } /** {@inheritDoc} */ - @Override public IgfsConfiguration configuration() { + @Override public FileSystemConfiguration configuration() { return igfs.configuration(); } @@ -310,7 +311,7 @@ public class IgfsAsyncImpl extends AsyncSupportAdapter implements Igfs } /** {@inheritDoc} */ - @Override public Map properties() { - return igfs.properties(); + @Override public IgfsSecondaryFileSystem asSecondary() { + return igfs.asSecondary(); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsContext.java index 245a1dd..475a7fc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsContext.java @@ -37,7 +37,7 @@ public class IgfsContext { private final GridKernalContext ctx; /** Configuration. */ - private final IgfsConfiguration cfg; + private final FileSystemConfiguration cfg; /** Managers. */ private List mgrs = new LinkedList<>(); @@ -68,7 +68,7 @@ public class IgfsContext { */ public IgfsContext( GridKernalContext ctx, - IgfsConfiguration cfg, + FileSystemConfiguration cfg, IgfsMetaManager metaMgr, IgfsDataManager dataMgr, IgfsServerManager srvMgr, @@ -102,7 +102,7 @@ public class IgfsContext { /** * @return IGFS configuration. */ - public IgfsConfiguration configuration() { + public FileSystemConfiguration configuration() { return cfg; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsDataManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsDataManager.java index e960422..72bd60a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsDataManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsDataManager.java @@ -24,6 +24,7 @@ import org.apache.ignite.cluster.*; import org.apache.ignite.configuration.*; import org.apache.ignite.events.*; import org.apache.ignite.igfs.*; +import org.apache.ignite.igfs.secondary.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.cluster.*; import org.apache.ignite.internal.managers.communication.*; @@ -306,7 +307,7 @@ public class IgfsDataManager extends IgfsManager { IgniteDataLoader ldr = igfsCtx.kernalContext().dataLoad().dataLoader(dataCachePrj.name()); - IgfsConfiguration cfg = igfsCtx.configuration(); + FileSystemConfiguration cfg = igfsCtx.configuration(); if (cfg.getPerNodeBatchSize() > 0) ldr.perNodeBufferSize(cfg.getPerNodeBatchSize()); @@ -382,7 +383,7 @@ public class IgfsDataManager extends IgfsManager { * @throws IgniteCheckedException If failed. */ @Nullable public IgniteInternalFuture dataBlock(final IgfsFileInfo fileInfo, final IgfsPath path, - final long blockIdx, @Nullable final IgfsReader secReader) + final long blockIdx, @Nullable final IgfsSecondaryFileSystemPositionedReadable secReader) throws IgniteCheckedException { //assert validTxState(any); // Allow this method call for any transaction state. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsEx.java index a380a6d..0c5debd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsEx.java @@ -19,6 +19,7 @@ package org.apache.ignite.internal.processors.igfs; import org.apache.ignite.*; import org.apache.ignite.igfs.*; +import org.apache.ignite.igfs.secondary.*; import org.apache.ignite.internal.*; import org.apache.ignite.lang.*; import org.jetbrains.annotations.*; @@ -26,14 +27,26 @@ import org.jetbrains.annotations.*; import java.net.*; /** - * Internal API extension for {@link org.apache.ignite.IgniteFs}. + * Internal API extension for {@link org.apache.ignite.IgniteFileSystem}. */ -public interface IgfsEx extends IgniteFs { +public interface IgfsEx extends IgniteFileSystem { + /** File property: user name. */ + public static final String PROP_USER_NAME = "usrName"; + + /** File property: group name. */ + public static final String PROP_GROUP_NAME = "grpName"; + + /** File property: permission. */ + public static final String PROP_PERMISSION = "permission"; + + /** File property: prefer writes to local node. */ + public static final String PROP_PREFER_LOCAL_WRITES = "locWrite"; + /** Property name for path to Hadoop configuration. */ - String SECONDARY_FS_CONFIG_PATH = "SECONDARY_FS_CONFIG_PATH"; + public static final String SECONDARY_FS_CONFIG_PATH = "SECONDARY_FS_CONFIG_PATH"; /** Property name for URI of file system. */ - String SECONDARY_FS_URI = "SECONDARY_FS_URI"; + public static final String SECONDARY_FS_URI = "SECONDARY_FS_URI"; /** * Stops IGFS cleaning all used resources. @@ -146,4 +159,11 @@ public interface IgfsEx extends IgniteFs { * @return {@code True} if proxy. */ public boolean isProxy(URI path); + + /** + * Return the given IGFS as a secondary file system. + * + * @return Secondary file system wrapper. + */ + public IgfsSecondaryFileSystem asSecondary(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsFileInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsFileInfo.java index 43def03..e88503b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsFileInfo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsFileInfo.java @@ -99,13 +99,13 @@ public final class IgfsFileInfo implements Externalizable { } /** - * Constructs directory or file info with {@link org.apache.ignite.configuration.IgfsConfiguration#DFLT_BLOCK_SIZE default} block size. + * Constructs directory or file info with {@link org.apache.ignite.configuration.FileSystemConfiguration#DFLT_BLOCK_SIZE default} block size. * * @param isDir Constructs directory info if {@code true} or file info if {@code false}. * @param props Meta properties to set. */ public IgfsFileInfo(boolean isDir, @Nullable Map props) { - this(isDir, null, isDir ? 0 : IgfsConfiguration.DFLT_BLOCK_SIZE, 0, null, null, props, null, false, + this(isDir, null, isDir ? 0 : FileSystemConfiguration.DFLT_BLOCK_SIZE, 0, null, null, props, null, false, System.currentTimeMillis(), false); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java index eff987e..614815f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java @@ -26,6 +26,7 @@ import org.apache.ignite.configuration.*; import org.apache.ignite.events.*; import org.apache.ignite.igfs.*; import org.apache.ignite.igfs.mapreduce.*; +import org.apache.ignite.igfs.secondary.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.managers.communication.*; import org.apache.ignite.internal.managers.eventstorage.*; @@ -71,7 +72,7 @@ public final class IgfsImpl implements IgfsEx { private IgfsDataManager data; /** FS configuration. */ - private IgfsConfiguration cfg; + private FileSystemConfiguration cfg; /** IGFS context. */ private IgfsContext igfsCtx; @@ -89,7 +90,7 @@ public final class IgfsImpl implements IgfsEx { private final IgfsModeResolver modeRslvr; /** Connection to the secondary file system. */ - private Igfs secondaryFs; + private IgfsSecondaryFileSystem secondaryFs; /** Busy lock. */ private final GridSpinBusyLock busyLock = new GridSpinBusyLock(); @@ -124,6 +125,7 @@ public final class IgfsImpl implements IgfsEx { * @param igfsCtx Context. * @throws IgniteCheckedException In case of error. */ + @SuppressWarnings("ConstantConditions") IgfsImpl(IgfsContext igfsCtx) throws IgniteCheckedException { assert igfsCtx != null; @@ -376,7 +378,7 @@ public final class IgfsImpl implements IgfsEx { } /** {@inheritDoc} */ - @Override public IgfsConfiguration configuration() { + @Override public FileSystemConfiguration configuration() { return cfg; } @@ -938,6 +940,7 @@ public final class IgfsImpl implements IgfsEx { } /** {@inheritDoc} */ + @SuppressWarnings("unchecked") @Override public Collection listPaths(final IgfsPath path) { if (enterBusy()) { try { @@ -1067,11 +1070,6 @@ public final class IgfsImpl implements IgfsEx { } /** {@inheritDoc} */ - @Override public Map properties() { - return Collections.emptyMap(); - } - - /** {@inheritDoc} */ @Override public IgfsInputStreamAdapter open(IgfsPath path) { return open(path, cfg.getStreamBufferSize(), cfg.getSequentialReadsBeforePrefetch()); } @@ -1193,7 +1191,7 @@ public final class IgfsImpl implements IgfsEx { IgfsMode mode = modeRslvr.resolveMode(path); - IgfsFileWorkerBatch batch = null; + IgfsFileWorkerBatch batch; if (mode == PROXY) throw new IgniteException("PROXY mode cannot be used in IGFS directly: " + path); @@ -1250,6 +1248,8 @@ public final class IgfsImpl implements IgfsEx { IgfsFileInfo oldInfo = meta.info(oldId); + assert oldInfo != null; + if (oldInfo.isDirectory()) throw new IgfsPathAlreadyExistsException("Failed to create file (path points to a " + "directory): " + path); @@ -1268,7 +1268,7 @@ public final class IgfsImpl implements IgfsEx { info = meta.lock(info.id()); IgfsEventAwareOutputStream os = new IgfsEventAwareOutputStream(path, info, parentId, - bufSize == 0 ? cfg.getStreamBufferSize() : bufSize, mode, batch); + bufSize == 0 ? cfg.getStreamBufferSize() : bufSize, mode, null); if (evts.isRecordable(EVT_IGFS_FILE_OPENED_WRITE)) evts.record(new IgfsEvent(path, localNode(), EVT_IGFS_FILE_OPENED_WRITE)); @@ -1339,8 +1339,7 @@ public final class IgfsImpl implements IgfsEx { if (parentId == null) throw new IgfsInvalidPathException("Failed to resolve parent directory: " + path); - info = new IgfsFileInfo(cfg.getBlockSize(), /**affinity key*/null, evictExclude(path, - mode == PRIMARY), props); + info = new IgfsFileInfo(cfg.getBlockSize(), /**affinity key*/null, evictExclude(path, true), props); IgniteUuid oldId = meta.putIfAbsent(parentId, path.name(), info); @@ -1351,6 +1350,8 @@ public final class IgfsImpl implements IgfsEx { evts.record(new IgfsEvent(path, localNode(), EVT_IGFS_FILE_CREATED)); } + assert info != null; + if (!info.isFile()) throw new IgfsInvalidPathException("Failed to open file (not a file): " + path); @@ -1788,7 +1789,7 @@ public final class IgfsImpl implements IgfsEx { /** * Executes IGFS task with overridden maximum range length (see - * {@link org.apache.ignite.configuration.IgfsConfiguration#getMaximumTaskRangeLength()} for more information). + * {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()} for more information). * * @param task Task to execute. * @param rslvr Optional resolver to control split boundaries. @@ -1822,7 +1823,7 @@ public final class IgfsImpl implements IgfsEx { /** * Executes IGFS task asynchronously with overridden maximum range length (see - * {@link org.apache.ignite.configuration.IgfsConfiguration#getMaximumTaskRangeLength()} for more information). + * {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()} for more information). * * @param taskCls Task class to execute. * @param rslvr Optional resolver to control split boundaries. @@ -1833,6 +1834,7 @@ public final class IgfsImpl implements IgfsEx { * @param arg Optional task argument. * @return Execution future. */ + @SuppressWarnings("unchecked") IgniteInternalFuture executeAsync(Class> taskCls, @Nullable IgfsRecordResolver rslvr, Collection paths, boolean skipNonExistentFiles, long maxRangeLen, @Nullable T arg) { @@ -1898,7 +1900,7 @@ public final class IgfsImpl implements IgfsEx { } /** {@inheritDoc} */ - @Override public IgniteFs withAsync() { + @Override public IgniteFileSystem withAsync() { return new IgfsAsyncImpl(this); } @@ -2033,7 +2035,7 @@ public final class IgfsImpl implements IgfsEx { * @param metrics Metrics. */ IgfsEventAwareInputStream(IgfsContext igfsCtx, IgfsPath path, IgfsFileInfo fileInfo, - int prefetchBlocks, int seqReadsBeforePrefetch, @Nullable IgfsReader secReader, + int prefetchBlocks, int seqReadsBeforePrefetch, @Nullable IgfsSecondaryFileSystemPositionedReadable secReader, IgfsLocalMetrics metrics) { super(igfsCtx, path, fileInfo, prefetchBlocks, seqReadsBeforePrefetch, secReader, metrics); @@ -2083,7 +2085,7 @@ public final class IgfsImpl implements IgfsEx { private Ignite g; @Nullable @Override public IgniteBiTuple execute() { - IgniteFs igfs = ((IgniteKernal)g).context().igfs().igfs(igfsName); + IgniteFileSystem igfs = ((IgniteKernal)g).context().igfs().igfs(igfsName); if (igfs == null) return F.t(0L, 0L); @@ -2129,6 +2131,7 @@ public final class IgfsImpl implements IgfsEx { */ private class FormatMessageListener implements GridMessageListener { /** {@inheritDoc} */ + @SuppressWarnings("ThrowableResultOfMethodCallIgnored") @Override public void onMessage(UUID nodeId, Object msg) { if (msg instanceof IgfsDeleteMessage) { ClusterNode node = igfsCtx.kernalContext().discovery().node(nodeId); @@ -2219,4 +2222,9 @@ public final class IgfsImpl implements IgfsEx { return mode == PROXY; } + + /** {@inheritDoc} */ + @Override public IgfsSecondaryFileSystem asSecondary() { + return new IgfsSecondaryFileSystemImpl(this); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsInputStreamAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsInputStreamAdapter.java index 51e57db..fc333b5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsInputStreamAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsInputStreamAdapter.java @@ -18,13 +18,15 @@ package org.apache.ignite.internal.processors.igfs; import org.apache.ignite.igfs.*; +import org.apache.ignite.igfs.secondary.*; import java.io.*; /** * Implementation adapter providing necessary methods. */ -public abstract class IgfsInputStreamAdapter extends IgfsInputStream { +public abstract class IgfsInputStreamAdapter extends IgfsInputStream + implements IgfsSecondaryFileSystemPositionedReadable { /** {@inheritDoc} */ @Override public long length() { return fileInfo().length(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsInputStreamImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsInputStreamImpl.java index 92b4383..5afa523 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsInputStreamImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsInputStreamImpl.java @@ -19,6 +19,7 @@ package org.apache.ignite.internal.processors.igfs; import org.apache.ignite.*; import org.apache.ignite.igfs.*; +import org.apache.ignite.igfs.secondary.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.util.*; import org.apache.ignite.internal.util.typedef.internal.*; @@ -45,7 +46,7 @@ public class IgfsInputStreamImpl extends IgfsInputStreamAdapter { /** Secondary file system reader. */ @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized") - private final IgfsReader secReader; + private final IgfsSecondaryFileSystemPositionedReadable secReader; /** Logger. */ private IgniteLogger log; @@ -110,7 +111,7 @@ public class IgfsInputStreamImpl extends IgfsInputStreamAdapter { * @param metrics Local IGFS metrics. */ IgfsInputStreamImpl(IgfsContext igfsCtx, IgfsPath path, IgfsFileInfo fileInfo, int prefetchBlocks, - int seqReadsBeforePrefetch, @Nullable IgfsReader secReader, IgfsLocalMetrics metrics) { + int seqReadsBeforePrefetch, @Nullable IgfsSecondaryFileSystemPositionedReadable secReader, IgfsLocalMetrics metrics) { assert igfsCtx != null; assert path != null; assert fileInfo != null; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsJobImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsJobImpl.java index f6b17bf..fa90e21 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsJobImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsJobImpl.java @@ -79,7 +79,7 @@ public class IgfsJobImpl implements ComputeJob, GridInternalWrapper { /** {@inheritDoc} */ @Override public Object execute() { - IgniteFs fs = ignite.fileSystem(igfsName); + IgniteFileSystem fs = ignite.fileSystem(igfsName); try (IgfsInputStream in = fs.open(path)) { IgfsFileRange split = new IgfsFileRange(path, start, len); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsMetaManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsMetaManager.java index 9d1795a..adc0254 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsMetaManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsMetaManager.java @@ -23,6 +23,7 @@ import org.apache.ignite.cluster.*; import org.apache.ignite.configuration.*; import org.apache.ignite.events.*; import org.apache.ignite.igfs.*; +import org.apache.ignite.igfs.secondary.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.managers.eventstorage.*; import org.apache.ignite.internal.processors.cache.*; @@ -51,7 +52,7 @@ import static org.apache.ignite.transactions.TransactionIsolation.*; @SuppressWarnings("all") public class IgfsMetaManager extends IgfsManager { /** IGFS configuration. */ - private IgfsConfiguration cfg; + private FileSystemConfiguration cfg; /** Metadata cache. */ private GridCache metaCache; @@ -1588,7 +1589,7 @@ public class IgfsMetaManager extends IgfsManager { * @return Output stream descriptor. * @throws IgniteCheckedException If file creation failed. */ - public IgfsSecondaryOutputStreamDescriptor createDual(final Igfs fs, + public IgfsSecondaryOutputStreamDescriptor createDual(final IgfsSecondaryFileSystem fs, final IgfsPath path, final boolean simpleCreate, @Nullable final Map props, @@ -1752,7 +1753,7 @@ public class IgfsMetaManager extends IgfsManager { * @return Output stream descriptor. * @throws IgniteCheckedException If output stream open for append has failed. */ - public IgfsSecondaryOutputStreamDescriptor appendDual(final Igfs fs, final IgfsPath path, + public IgfsSecondaryOutputStreamDescriptor appendDual(final IgfsSecondaryFileSystem fs, final IgfsPath path, final int bufSize) throws IgniteCheckedException { if (busyLock.enterBusy()) { try { @@ -1783,7 +1784,7 @@ public class IgfsMetaManager extends IgfsManager { if (remainder > 0) { int blockIdx = (int)(len / blockSize); - IgfsReader reader = fs.open(path, bufSize); + IgfsSecondaryFileSystemPositionedReadable reader = fs.open(path, bufSize); try { igfsCtx.data().dataBlock(info, path, blockIdx, reader).get(); @@ -1832,7 +1833,7 @@ public class IgfsMetaManager extends IgfsManager { * @return Input stream descriptor. * @throws IgniteCheckedException If input stream open has failed. */ - public IgfsSecondaryInputStreamDescriptor openDual(final Igfs fs, final IgfsPath path, + public IgfsSecondaryInputStreamDescriptor openDual(final IgfsSecondaryFileSystem fs, final IgfsPath path, final int bufSize) throws IgniteCheckedException { if (busyLock.enterBusy()) { @@ -1893,7 +1894,7 @@ public class IgfsMetaManager extends IgfsManager { * @return File info or {@code null} if file not found. * @throws IgniteCheckedException If sync task failed. */ - @Nullable public IgfsFileInfo synchronizeFileDual(final Igfs fs, final IgfsPath path) + @Nullable public IgfsFileInfo synchronizeFileDual(final IgfsSecondaryFileSystem fs, final IgfsPath path) throws IgniteCheckedException { assert fs != null; assert path != null; @@ -1941,7 +1942,7 @@ public class IgfsMetaManager extends IgfsManager { * @return {@code True} in case rename was successful. * @throws IgniteCheckedException If directory creation failed. */ - public boolean mkdirsDual(final Igfs fs, final IgfsPath path, final Map props) + public boolean mkdirsDual(final IgfsSecondaryFileSystem fs, final IgfsPath path, final Map props) throws IgniteCheckedException { if (busyLock.enterBusy()) { try { @@ -2025,7 +2026,7 @@ public class IgfsMetaManager extends IgfsManager { * @return Operation result. * @throws IgniteCheckedException If failed. */ - public boolean renameDual(final Igfs fs, final IgfsPath src, final IgfsPath dest) throws + public boolean renameDual(final IgfsSecondaryFileSystem fs, final IgfsPath src, final IgfsPath dest) throws IgniteCheckedException { if (busyLock.enterBusy()) { try { @@ -2124,7 +2125,7 @@ public class IgfsMetaManager extends IgfsManager { * @return Operation result. * @throws IgniteCheckedException If delete failed. */ - public boolean deleteDual(final Igfs fs, final IgfsPath path, final boolean recursive) + public boolean deleteDual(final IgfsSecondaryFileSystem fs, final IgfsPath path, final boolean recursive) throws IgniteCheckedException { if (busyLock.enterBusy()) { try { @@ -2190,7 +2191,7 @@ public class IgfsMetaManager extends IgfsManager { * @return Update file info. * @throws IgniteCheckedException If update failed. */ - public IgfsFileInfo updateDual(final Igfs fs, final IgfsPath path, final Map props) + public IgfsFileInfo updateDual(final IgfsSecondaryFileSystem fs, final IgfsPath path, final Map props) throws IgniteCheckedException { assert fs != null; assert path != null; @@ -2243,7 +2244,7 @@ public class IgfsMetaManager extends IgfsManager { * @return File info of the end path. * @throws IgniteCheckedException If failed. */ - private IgfsFileInfo synchronize(Igfs fs, + private IgfsFileInfo synchronize(IgfsSecondaryFileSystem fs, IgfsPath startPath, IgfsFileInfo startPathInfo, IgfsPath endPath, @@ -2328,7 +2329,7 @@ public class IgfsMetaManager extends IgfsManager { * @throws IgniteCheckedException If failed. */ private T synchronizeAndExecute(SynchronizationTask task, - Igfs fs, + IgfsSecondaryFileSystem fs, boolean strict, IgfsPath... paths) throws IgniteCheckedException @@ -2349,7 +2350,7 @@ public class IgfsMetaManager extends IgfsManager { * @throws IgniteCheckedException If failed. */ private T synchronizeAndExecute(SynchronizationTask task, - Igfs fs, + IgfsSecondaryFileSystem fs, boolean strict, @Nullable Collection extraLockIds, IgfsPath... paths) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsNoopProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsNoopProcessor.java index a299fc4..41dcc31 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsNoopProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsNoopProcessor.java @@ -49,12 +49,12 @@ public class IgfsNoopProcessor extends IgfsProcessorAdapter { } /** {@inheritDoc} */ - @Override public Collection igfss() { + @Override public Collection igfss() { return Collections.emptyList(); } /** {@inheritDoc} */ - @Nullable @Override public IgniteFs igfs(@Nullable String name) { + @Nullable @Override public IgniteFileSystem igfs(@Nullable String name) { return null; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsOutputStreamImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsOutputStreamImpl.java index 5941579..6c6dd9d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsOutputStreamImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsOutputStreamImpl.java @@ -405,7 +405,7 @@ class IgfsOutputStreamImpl extends IgfsOutputStreamAdapter { if (!igfsCtx.configuration().isFragmentizerEnabled()) return null; - if (!Boolean.parseBoolean(fileInfo.properties().get(IgniteFs.PROP_PREFER_LOCAL_WRITES))) + if (!Boolean.parseBoolean(fileInfo.properties().get(IgfsEx.PROP_PREFER_LOCAL_WRITES))) return null; int blockSize = fileInfo.blockSize(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsProcessor.java index 847cd50..e1b5114 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsProcessor.java @@ -51,8 +51,8 @@ public class IgfsProcessor extends IgfsProcessorAdapter { private static final String NULL_NAME = UUID.randomUUID().toString(); /** Converts context to IGFS. */ - private static final IgniteClosure CTX_TO_IGFS = new C1() { - @Override public IgniteFs apply(IgfsContext igfsCtx) { + private static final IgniteClosure CTX_TO_IGFS = new C1() { + @Override public IgniteFileSystem apply(IgfsContext igfsCtx) { return igfsCtx.igfs(); } }; @@ -73,17 +73,17 @@ public class IgfsProcessor extends IgfsProcessorAdapter { if (ctx.config().isDaemon()) return; - IgfsConfiguration[] cfgs = ctx.config().getIgfsConfiguration(); + FileSystemConfiguration[] cfgs = ctx.config().getFileSystemConfiguration(); assert cfgs != null && cfgs.length > 0; validateLocalIgfsConfigurations(cfgs); // Start IGFS instances. - for (IgfsConfiguration cfg : cfgs) { + for (FileSystemConfiguration cfg : cfgs) { IgfsContext igfsCtx = new IgfsContext( ctx, - new IgfsConfiguration(cfg), + new FileSystemConfiguration(cfg), new IgfsMetaManager(), new IgfsDataManager(), new IgfsServerManager(), @@ -167,12 +167,12 @@ public class IgfsProcessor extends IgfsProcessorAdapter { /** {@inheritDoc} */ @SuppressWarnings("unchecked") - @Override public Collection igfss() { + @Override public Collection igfss() { return F.viewReadOnly(igfsCache.values(), CTX_TO_IGFS); } /** {@inheritDoc} */ - @Override @Nullable public IgniteFs igfs(@Nullable String name) { + @Override @Nullable public IgniteFileSystem igfs(@Nullable String name) { IgfsContext igfsCtx = igfsCache.get(maskName(name)); return igfsCtx == null ? null : igfsCtx.igfs(); @@ -202,7 +202,7 @@ public class IgfsProcessor extends IgfsProcessorAdapter { // is daemon; // doesn't have configured IGFS; // doesn't have configured caches. - if (gridCfg.isDaemon() || F.isEmpty(gridCfg.getIgfsConfiguration()) || + if (gridCfg.isDaemon() || F.isEmpty(gridCfg.getFileSystemConfiguration()) || F.isEmpty(gridCfg.getCacheConfiguration())) return; @@ -216,9 +216,9 @@ public class IgfsProcessor extends IgfsProcessorAdapter { Collection attrVals = new ArrayList<>(); - assert gridCfg.getIgfsConfiguration() != null; + assert gridCfg.getFileSystemConfiguration() != null; - for (IgfsConfiguration igfsCfg : gridCfg.getIgfsConfiguration()) { + for (FileSystemConfiguration igfsCfg : gridCfg.getFileSystemConfiguration()) { CacheConfiguration cacheCfg = cacheCfgs.get(igfsCfg.getDataCacheName()); if (cacheCfg == null) @@ -258,10 +258,10 @@ public class IgfsProcessor extends IgfsProcessorAdapter { * @param cfgs IGFS configurations * @throws IgniteCheckedException If any of IGFS configurations is invalid. */ - private void validateLocalIgfsConfigurations(IgfsConfiguration[] cfgs) throws IgniteCheckedException { + private void validateLocalIgfsConfigurations(FileSystemConfiguration[] cfgs) throws IgniteCheckedException { Collection cfgNames = new HashSet<>(); - for (IgfsConfiguration cfg : cfgs) { + for (FileSystemConfiguration cfg : cfgs) { String name = cfg.getName(); if (cfgNames.contains(name)) @@ -333,7 +333,7 @@ public class IgfsProcessor extends IgfsProcessorAdapter { if (secondary) { // When working in any mode except of primary, secondary FS config must be provided. assertParameter(cfg.getSecondaryFileSystem() != null, - "secondaryFileSystem cannot be null when mode is SECONDARY"); + "secondaryFileSystem cannot be null when mode is not " + IgfsMode.PRIMARY); } cfgNames.add(name); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsProcessorAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsProcessorAdapter.java index b695104..ee38ab3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsProcessorAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsProcessorAdapter.java @@ -46,7 +46,7 @@ public abstract class IgfsProcessorAdapter extends GridProcessorAdapter { * * @return Collection of IGFS instances. */ - public abstract Collection igfss(); + public abstract Collection igfss(); /** * Gets IGFS instance. @@ -54,7 +54,7 @@ public abstract class IgfsProcessorAdapter extends GridProcessorAdapter { * @param name (Nullable) IGFS name. * @return IGFS instance. */ - @Nullable public abstract IgniteFs igfs(@Nullable String name); + @Nullable public abstract IgniteFileSystem igfs(@Nullable String name); /** * Gets server endpoints for particular IGFS. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsSecondaryFileSystemImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsSecondaryFileSystemImpl.java new file mode 100644 index 0000000..683b317 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsSecondaryFileSystemImpl.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.igfs; + +import org.apache.ignite.*; +import org.apache.ignite.igfs.*; +import org.apache.ignite.igfs.secondary.*; +import org.jetbrains.annotations.*; + +import java.io.*; +import java.util.*; + +/** + * Secondary file system over native IGFS. + */ +class IgfsSecondaryFileSystemImpl implements IgfsSecondaryFileSystem { + /** Delegate. */ + private final IgfsImpl igfs; + + /** + * Constructor. + * + * @param igfs Delegate. + */ + IgfsSecondaryFileSystemImpl(IgfsImpl igfs) { + this.igfs = igfs; + } + + /** {@inheritDoc} */ + @Override public boolean exists(IgfsPath path) { + return igfs.exists(path); + } + + /** {@inheritDoc} */ + @Override public IgfsFile update(IgfsPath path, Map props) throws IgniteException { + return igfs.update(path, props); + } + + /** {@inheritDoc} */ + @Override public void rename(IgfsPath src, IgfsPath dest) throws IgniteException { + igfs.rename(src, dest); + } + + /** {@inheritDoc} */ + @Override public boolean delete(IgfsPath path, boolean recursive) throws IgniteException { + return igfs.delete(path, recursive); + } + + /** {@inheritDoc} */ + @Override public void mkdirs(IgfsPath path) throws IgniteException { + igfs.mkdirs(path); + } + + /** {@inheritDoc} */ + @Override public void mkdirs(IgfsPath path, @Nullable Map props) throws IgniteException { + igfs.mkdirs(path, props); + } + + /** {@inheritDoc} */ + @Override public Collection listPaths(IgfsPath path) throws IgniteException { + return igfs.listPaths(path); + } + + /** {@inheritDoc} */ + @Override public Collection listFiles(IgfsPath path) throws IgniteException { + return igfs.listFiles(path); + } + + /** {@inheritDoc} */ + @Override public IgfsSecondaryFileSystemPositionedReadable open(IgfsPath path, int bufSize) + throws IgniteException { + return igfs.open(path, bufSize); + } + + /** {@inheritDoc} */ + @Override public OutputStream create(IgfsPath path, boolean overwrite) throws IgniteException { + return igfs.create(path, overwrite); + } + + /** {@inheritDoc} */ + @Override public OutputStream create(IgfsPath path, int bufSize, boolean overwrite, int replication, + long blockSize, @Nullable Map props) throws IgniteException { + return igfs.create(path, bufSize, overwrite, replication, blockSize, props); + } + + /** {@inheritDoc} */ + @Override public OutputStream append(IgfsPath path, int bufSize, boolean create, + @Nullable Map props) throws IgniteException { + return igfs.append(path, bufSize, create, props); + } + + /** {@inheritDoc} */ + @Override public IgfsFile info(IgfsPath path) throws IgniteException { + return igfs.info(path); + } + + /** {@inheritDoc} */ + @Override public long usedSpaceSize() throws IgniteException { + return igfs.usedSpaceSize(); + } + + /** {@inheritDoc} */ + @Override public Map properties() { + return Collections.emptyMap(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsSecondaryInputStreamDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsSecondaryInputStreamDescriptor.java index 6e48103..e9ba6f4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsSecondaryInputStreamDescriptor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsSecondaryInputStreamDescriptor.java @@ -17,7 +17,7 @@ package org.apache.ignite.internal.processors.igfs; -import org.apache.ignite.igfs.*; +import org.apache.ignite.igfs.secondary.*; /** * Descriptor of an input stream opened to the secondary file system. @@ -27,7 +27,7 @@ public class IgfsSecondaryInputStreamDescriptor { private final IgfsFileInfo info; /** Secondary file system input stream wrapper. */ - private final IgfsReader secReader; + private final IgfsSecondaryFileSystemPositionedReadable secReader; /** * Constructor. @@ -35,7 +35,7 @@ public class IgfsSecondaryInputStreamDescriptor { * @param info File info in the primary file system. * @param secReader Secondary file system reader. */ - IgfsSecondaryInputStreamDescriptor(IgfsFileInfo info, IgfsReader secReader) { + IgfsSecondaryInputStreamDescriptor(IgfsFileInfo info, IgfsSecondaryFileSystemPositionedReadable secReader) { assert info != null; assert secReader != null; @@ -53,7 +53,7 @@ public class IgfsSecondaryInputStreamDescriptor { /** * @return Secondary file system reader. */ - IgfsReader reader() { + IgfsSecondaryFileSystemPositionedReadable reader() { return secReader; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsServerManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsServerManager.java index e1f4a0f..643eeff 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsServerManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsServerManager.java @@ -29,7 +29,7 @@ import org.apache.ignite.thread.*; import java.util.*; import java.util.concurrent.*; -import static org.apache.ignite.configuration.IgfsConfiguration.*; +import static org.apache.ignite.configuration.FileSystemConfiguration.*; /** * IGFS server manager. @@ -49,7 +49,7 @@ public class IgfsServerManager extends IgfsManager { /** {@inheritDoc} */ @Override protected void start0() throws IgniteCheckedException { - IgfsConfiguration igfsCfg = igfsCtx.configuration(); + FileSystemConfiguration igfsCfg = igfsCtx.configuration(); Map cfg = igfsCfg.getIpcEndpointConfiguration(); if (F.isEmpty(cfg)) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/igfs/VisorIgfs.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/igfs/VisorIgfs.java index 2258d4d..edcde6a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/igfs/VisorIgfs.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/igfs/VisorIgfs.java @@ -24,7 +24,7 @@ import org.apache.ignite.internal.util.typedef.internal.*; import java.io.*; /** - * Data transfer object for {@link org.apache.ignite.IgniteFs}. + * Data transfer object for {@link org.apache.ignite.IgniteFileSystem}. */ public class VisorIgfs implements Serializable { /** */ @@ -66,7 +66,7 @@ public class VisorIgfs implements Serializable { * @param igfs Source IGFS. * @return Data transfer object for given IGFS. */ - public static VisorIgfs from(IgniteFs igfs) { + public static VisorIgfs from(IgniteFileSystem igfs) { assert igfs != null; return new VisorIgfs( diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/igfs/VisorIgfsProfilerClearTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/igfs/VisorIgfsProfilerClearTask.java index f63e669..814fc50 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/igfs/VisorIgfsProfilerClearTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/igfs/VisorIgfsProfilerClearTask.java @@ -59,7 +59,7 @@ public class VisorIgfsProfilerClearTask extends VisorOneNodeTask(getenv()); sysProps = getProperties(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorIgfsConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorIgfsConfiguration.java index 78943a2..9064ea6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorIgfsConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorIgfsConfiguration.java @@ -19,6 +19,7 @@ package org.apache.ignite.internal.visor.node; import org.apache.ignite.configuration.*; import org.apache.ignite.igfs.*; +import org.apache.ignite.igfs.secondary.*; import org.apache.ignite.internal.util.typedef.internal.*; import org.jetbrains.annotations.*; import static org.apache.ignite.internal.processors.igfs.IgfsEx.*; @@ -120,7 +121,7 @@ public class VisorIgfsConfiguration implements Serializable { * @param igfs IGFS configuration. * @return Data transfer object for IGFS configuration properties. */ - public static VisorIgfsConfiguration from(IgfsConfiguration igfs) { + public static VisorIgfsConfiguration from(FileSystemConfiguration igfs) { VisorIgfsConfiguration cfg = new VisorIgfsConfiguration(); cfg.name = igfs.getName(); @@ -132,7 +133,7 @@ public class VisorIgfsConfiguration implements Serializable { cfg.perNodeBatchSize = igfs.getPerNodeBatchSize(); cfg.perNodeParallelBatchCnt = igfs.getPerNodeParallelBatchCount(); - Igfs secFs = igfs.getSecondaryFileSystem(); + IgfsSecondaryFileSystem secFs = igfs.getSecondaryFileSystem(); if (secFs != null) { Map props = secFs.properties(); @@ -171,13 +172,13 @@ public class VisorIgfsConfiguration implements Serializable { * @param igfss Igfs configurations. * @return igfs configurations properties. */ - public static Iterable list(IgfsConfiguration[] igfss) { + public static Iterable list(FileSystemConfiguration[] igfss) { if (igfss == null) return Collections.emptyList(); final Collection cfgs = new ArrayList<>(igfss.length); - for (IgfsConfiguration igfs : igfss) + for (FileSystemConfiguration igfs : igfss) cfgs.add(from(igfs)); return cfgs; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java index 43a987c..b86ea5a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java @@ -151,7 +151,7 @@ public class VisorNodeDataCollectorJob extends VisorJob fs.igfs.impl - org.apache.ignite.igfs.hadoop.v1.IgfsHadoopFileSystem + org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem fs.AbstractFileSystem.igfs.impl - org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem + org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem diff --git a/modules/core/src/test/config/hadoop/core-site-loopback.xml b/modules/core/src/test/config/hadoop/core-site-loopback.xml index 63902fa..e1d1320 100644 --- a/modules/core/src/test/config/hadoop/core-site-loopback.xml +++ b/modules/core/src/test/config/hadoop/core-site-loopback.xml @@ -29,12 +29,12 @@ fs.igfs.impl - org.apache.ignite.igfs.hadoop.v1.IgfsHadoopFileSystem + org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem fs.AbstractFileSystem.igfs.impl - org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem + org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem diff --git a/modules/core/src/test/config/hadoop/core-site-secondary.xml b/modules/core/src/test/config/hadoop/core-site-secondary.xml index 648df36..fa301be 100644 --- a/modules/core/src/test/config/hadoop/core-site-secondary.xml +++ b/modules/core/src/test/config/hadoop/core-site-secondary.xml @@ -29,12 +29,12 @@ fs.igfs.impl - org.apache.ignite.igfs.hadoop.v1.IgfsHadoopFileSystem + org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem fs.AbstractFileSystem.igfs.impl - org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem + org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem diff --git a/modules/core/src/test/config/hadoop/core-site.xml b/modules/core/src/test/config/hadoop/core-site.xml index eb16ce7..0a9eee7 100644 --- a/modules/core/src/test/config/hadoop/core-site.xml +++ b/modules/core/src/test/config/hadoop/core-site.xml @@ -29,11 +29,11 @@ fs.igfs.impl - org.apache.ignite.igfs.hadoop.v1.IgfsHadoopFileSystem + org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem fs.AbstractFileSystem.igfs.impl - org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem + org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem diff --git a/modules/core/src/test/config/igfs-loopback.xml b/modules/core/src/test/config/igfs-loopback.xml index 81ce501..46377d9 100644 --- a/modules/core/src/test/config/igfs-loopback.xml +++ b/modules/core/src/test/config/igfs-loopback.xml @@ -78,9 +78,9 @@ - + - + diff --git a/modules/core/src/test/config/igfs-shmem.xml b/modules/core/src/test/config/igfs-shmem.xml index d30051b..c08f78d 100644 --- a/modules/core/src/test/config/igfs-shmem.xml +++ b/modules/core/src/test/config/igfs-shmem.xml @@ -78,9 +78,9 @@ - + - + diff --git a/modules/core/src/test/java/org/apache/ignite/igfs/IgfsEventsAbstractSelfTest.java b/modules/core/src/test/java/org/apache/ignite/igfs/IgfsEventsAbstractSelfTest.java index 364eaa1..2d9d269 100644 --- a/modules/core/src/test/java/org/apache/ignite/igfs/IgfsEventsAbstractSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/igfs/IgfsEventsAbstractSelfTest.java @@ -40,7 +40,7 @@ import static org.apache.ignite.events.EventType.*; import static org.apache.ignite.testframework.GridTestUtils.*; /** - * Tests events, generated by {@link org.apache.ignite.IgniteFs} implementation. + * Tests events, generated by {@link org.apache.ignite.IgniteFileSystem} implementation. */ public abstract class IgfsEventsAbstractSelfTest extends GridCommonAbstractTest { /** IGFS. */ @@ -84,8 +84,8 @@ public abstract class IgfsEventsAbstractSelfTest extends GridCommonAbstractTest /** * @return IGFS configuration for this test. */ - protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException { + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("dataCache"); igfsCfg.setMetaCacheName("metaCache"); @@ -109,7 +109,7 @@ public abstract class IgfsEventsAbstractSelfTest extends GridCommonAbstractTest * @return Grid configuration. * @throws Exception If failed. */ - protected IgniteConfiguration getConfiguration(String gridName, IgfsConfiguration igfsCfg) throws Exception { + protected IgniteConfiguration getConfiguration(String gridName, FileSystemConfiguration igfsCfg) throws Exception { IgniteConfiguration cfg = IgnitionEx.loadConfiguration("config/hadoop/default-config.xml").get1(); assert cfg != null; @@ -118,7 +118,7 @@ public abstract class IgfsEventsAbstractSelfTest extends GridCommonAbstractTest cfg.setIncludeEventTypes(concat(EVTS_IGFS, EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED)); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setCacheConfiguration(getCacheConfiguration(gridName)); diff --git a/modules/core/src/test/java/org/apache/ignite/igfs/IgfsFragmentizerAbstractSelfTest.java b/modules/core/src/test/java/org/apache/ignite/igfs/IgfsFragmentizerAbstractSelfTest.java index 6ae9c86..3fa0804 100644 --- a/modules/core/src/test/java/org/apache/ignite/igfs/IgfsFragmentizerAbstractSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/igfs/IgfsFragmentizerAbstractSelfTest.java @@ -63,7 +63,7 @@ public class IgfsFragmentizerAbstractSelfTest extends IgfsCommonAbstractTest { cfg.setCacheConfiguration(metaConfiguration(), dataConfiguration()); - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setName("igfs"); igfsCfg.setMetaCacheName(META_CACHE_NAME); @@ -76,7 +76,7 @@ public class IgfsFragmentizerAbstractSelfTest extends IgfsCommonAbstractTest { igfsCfg.setFragmentizerThrottlingBlockLength(16 * IGFS_BLOCK_SIZE); igfsCfg.setFragmentizerThrottlingDelay(10); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/igfs/IgfsFragmentizerSelfTest.java b/modules/core/src/test/java/org/apache/ignite/igfs/IgfsFragmentizerSelfTest.java index 99a40c4..b51db0c 100644 --- a/modules/core/src/test/java/org/apache/ignite/igfs/IgfsFragmentizerSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/igfs/IgfsFragmentizerSelfTest.java @@ -36,7 +36,7 @@ public class IgfsFragmentizerSelfTest extends IgfsFragmentizerAbstractSelfTest { * @throws Exception If failed. */ public void testReadFragmentizing() throws Exception { - IgniteFs igfs = grid(0).fileSystem("igfs"); + IgniteFileSystem igfs = grid(0).fileSystem("igfs"); IgfsPath path = new IgfsPath("/someFile"); @@ -108,7 +108,7 @@ public class IgfsFragmentizerSelfTest extends IgfsFragmentizerAbstractSelfTest { int fileSize = 30 * IGFS_GROUP_SIZE * IGFS_BLOCK_SIZE; while (written < fileSize) { - IgniteFs igfs = grid(igfsIdx).fileSystem("igfs"); + IgniteFileSystem igfs = grid(igfsIdx).fileSystem("igfs"); try (IgfsOutputStream out = igfs.append(path, true)) { byte[] data = new byte[chunkSize]; @@ -131,7 +131,7 @@ public class IgfsFragmentizerSelfTest extends IgfsFragmentizerAbstractSelfTest { } } - IgniteFs igfs = grid(0).fileSystem("igfs"); + IgniteFileSystem igfs = grid(0).fileSystem("igfs"); try (IgfsInputStream in = igfs.open(path)) { i = 0; @@ -180,7 +180,7 @@ public class IgfsFragmentizerSelfTest extends IgfsFragmentizerAbstractSelfTest { int fileSize = 50 * IGFS_GROUP_SIZE * IGFS_BLOCK_SIZE; - IgniteFs igfs = grid(0).fileSystem("igfs"); + IgniteFileSystem igfs = grid(0).fileSystem("igfs"); byte[] chunk = new byte[chunkSize]; diff --git a/modules/core/src/test/java/org/apache/ignite/igfs/IgfsFragmentizerTopologySelfTest.java b/modules/core/src/test/java/org/apache/ignite/igfs/IgfsFragmentizerTopologySelfTest.java index 59faa42..cb7b63a 100644 --- a/modules/core/src/test/java/org/apache/ignite/igfs/IgfsFragmentizerTopologySelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/igfs/IgfsFragmentizerTopologySelfTest.java @@ -33,7 +33,7 @@ public class IgfsFragmentizerTopologySelfTest extends IgfsFragmentizerAbstractSe try { IgfsPath path = new IgfsPath("/someFile"); - IgniteFs igfs = grid(1).fileSystem("igfs"); + IgniteFileSystem igfs = grid(1).fileSystem("igfs"); try (IgfsOutputStream out = igfs.create(path, true)) { for (int i = 0; i < 10 * IGFS_GROUP_SIZE; i++) diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsAbstractSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsAbstractSelfTest.java index a7e6780..9a99611 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsAbstractSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsAbstractSelfTest.java @@ -21,6 +21,7 @@ import org.apache.ignite.*; import org.apache.ignite.cache.*; import org.apache.ignite.configuration.*; import org.apache.ignite.igfs.*; +import org.apache.ignite.igfs.secondary.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.util.lang.*; import org.apache.ignite.internal.util.typedef.*; @@ -37,11 +38,11 @@ import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.*; -import static org.apache.ignite.IgniteFs.*; import static org.apache.ignite.cache.CacheAtomicityMode.*; import static org.apache.ignite.cache.CacheMemoryMode.*; import static org.apache.ignite.cache.CacheMode.*; import static org.apache.ignite.igfs.IgfsMode.*; +import static org.apache.ignite.internal.processors.igfs.IgfsEx.*; /** * Test fo regular igfs operations. @@ -154,7 +155,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { igfsSecondary = (IgfsImpl) igniteSecondary.fileSystem("igfs-secondary"); - Ignite ignite = startGridWithIgfs("ignite", "igfs", mode, igfsSecondary, PRIMARY_REST_CFG); + Ignite ignite = startGridWithIgfs("ignite", "igfs", mode, igfsSecondary.asSecondary(), PRIMARY_REST_CFG); igfs = (IgfsImpl) ignite.fileSystem("igfs"); } @@ -181,8 +182,8 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { * @throws Exception If failed. */ protected Ignite startGridWithIgfs(String gridName, String igfsName, IgfsMode mode, - @Nullable Igfs secondaryFs, @Nullable Map restCfg) throws Exception { - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + @Nullable IgfsSecondaryFileSystem secondaryFs, @Nullable Map restCfg) throws Exception { + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("dataCache"); igfsCfg.setMetaCacheName("metaCache"); @@ -225,7 +226,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setLocalHost("127.0.0.1"); cfg.setConnectorConfiguration(null); @@ -262,7 +263,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { * @throws Exception If failed. */ public void testExists() throws Exception { - create(igfs, paths(DIR), null); + create(igfs.asSecondary(), paths(DIR), null); checkExist(igfs, igfsSecondary, DIR); } @@ -373,7 +374,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { IgfsPath file1 = new IgfsPath("/file1"); IgfsPath file2 = new IgfsPath("/file2"); - create(igfs, null, paths(file1)); + create(igfs.asSecondary(), null, paths(file1)); igfs.rename(file1, file2); @@ -405,7 +406,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { IgfsPath dir1 = new IgfsPath("/dir1"); IgfsPath dir2 = new IgfsPath("/dir2"); - create(igfs, paths(dir1), null); + create(igfs.asSecondary(), paths(dir1), null); igfs.rename(dir1, dir2); @@ -434,7 +435,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { * @throws Exception If failed. */ public void testMoveFileDestinationRoot() throws Exception { - create(igfs, paths(DIR, SUBDIR), paths(FILE)); + create(igfs.asSecondary(), paths(DIR, SUBDIR), paths(FILE)); igfs.rename(FILE, new IgfsPath()); @@ -526,7 +527,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { * @throws Exception If failed. */ public void testMoveDirectoryDestinationRoot() throws Exception { - create(igfs, paths(DIR, SUBDIR, SUBSUBDIR), null); + create(igfs.asSecondary(), paths(DIR, SUBDIR, SUBSUBDIR), null); igfs.rename(SUBSUBDIR, new IgfsPath()); @@ -542,7 +543,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { public void testMoveDirectorySourceParentRoot() throws Exception { IgfsPath dir = new IgfsPath("/" + SUBSUBDIR.name()); - create(igfs, paths(DIR_NEW, SUBDIR_NEW, dir), null); + create(igfs.asSecondary(), paths(DIR_NEW, SUBDIR_NEW, dir), null); igfs.rename(dir, SUBDIR_NEW); @@ -851,7 +852,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { public void testOpen() throws Exception { create(igfs, paths(DIR, SUBDIR), null); - createFile(igfs, FILE, true, chunk); + createFile(igfs.asSecondary(), FILE, true, chunk); checkFileContent(igfs, FILE, chunk); } @@ -886,9 +887,9 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { * @throws Exception If failed. */ public void testCreate() throws Exception { - create(igfs, paths(DIR, SUBDIR), null); + create(igfs.asSecondary(), paths(DIR, SUBDIR), null); - createFile(igfs, FILE, true, chunk); + createFile(igfs.asSecondary(), FILE, true, chunk); checkFile(igfs, igfsSecondary, FILE, chunk); } @@ -901,7 +902,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { public void testCreateParentRoot() throws Exception { IgfsPath file = new IgfsPath("/" + FILE.name()); - createFile(igfs, file, true, chunk); + createFile(igfs.asSecondary(), file, true, chunk); checkFile(igfs, igfsSecondary, file, chunk); } @@ -1092,7 +1093,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { assert igfs.exists(path); } - awaitFileClose(igfs, path); + awaitFileClose(igfs.asSecondary(), path); checkFileContent(igfs, path, chunk); } @@ -1166,7 +1167,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { fut.get(); - awaitFileClose(igfs, FILE); + awaitFileClose(igfs.asSecondary(), FILE); if (err.get() != null) throw err.get(); @@ -1212,7 +1213,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { public void testAppendNoClose() throws Exception { create(igfs, paths(DIR, SUBDIR), null); - createFile(igfs, FILE, false); + createFile(igfs.asSecondary(), FILE, false); GridTestUtils.assertThrowsInherited(log(), new Callable() { @Override public Object call() throws Exception { @@ -1241,7 +1242,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { public void testAppendRenameNoClose() throws Exception { create(igfs, paths(DIR, SUBDIR), null); - createFile(igfs, FILE, false); + createFile(igfs.asSecondary(), FILE, false); IgfsOutputStream os = null; @@ -1263,9 +1264,9 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { * @throws Exception If failed. */ public void testAppendRenameParentNoClose() throws Exception { - create(igfs, paths(DIR, SUBDIR), null); + create(igfs.asSecondary(), paths(DIR, SUBDIR), null); - createFile(igfs, FILE, false); + createFile(igfs.asSecondary(), FILE, false); IgfsOutputStream os = null; @@ -1289,7 +1290,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { public void testAppendDeleteNoClose() throws Exception { create(igfs, paths(DIR, SUBDIR), null); - createFile(igfs, FILE, false); + createFile(igfs.asSecondary(), FILE, false); GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { @@ -1321,7 +1322,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { public void testAppendDeleteParentNoClose() throws Exception { create(igfs, paths(DIR, SUBDIR), null); - createFile(igfs, FILE, false); + createFile(igfs.asSecondary(), FILE, false); GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { @@ -1360,7 +1361,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { create(igfs, paths(DIR, SUBDIR), null); - createFile(igfs, FILE, false); + createFile(igfs.asSecondary(), FILE, false); IgfsOutputStream os = null; @@ -1388,7 +1389,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { int threadCnt = 10; for (int i = 0; i < threadCnt; i++) - createFile(igfs, new IgfsPath("/file" + i), false); + createFile(igfs.asSecondary(), new IgfsPath("/file" + i), false); multithreaded(new Runnable() { @Override public void run() { @@ -1411,7 +1412,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { assert igfs.exists(path); } - awaitFileClose(igfs, path); + awaitFileClose(igfs.asSecondary(), path); checkFileContent(igfs, path, chunks); } @@ -1483,7 +1484,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { fut.get(); - awaitFileClose(igfs, FILE); + awaitFileClose(igfs.asSecondary(), FILE); if (err.get() != null) throw err.get(); @@ -2137,7 +2138,19 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { * @param files Files. * @throws Exception If failed. */ - public static void create(Igfs igfs, @Nullable IgfsPath[] dirs, @Nullable IgfsPath[] files) + public static void create(IgfsImpl igfs, @Nullable IgfsPath[] dirs, @Nullable IgfsPath[] files) throws Exception { + create(igfs.asSecondary(), dirs, files); + } + + /** + * Create the given directories and files in the given IGFS. + * + * @param igfs IGFS. + * @param dirs Directories. + * @param files Files. + * @throws Exception If failed. + */ + public static void create(IgfsSecondaryFileSystem igfs, @Nullable IgfsPath[] dirs, @Nullable IgfsPath[] files) throws Exception { if (dirs != null) { for (IgfsPath dir : dirs) @@ -2162,7 +2175,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { * @param chunks Data chunks. * @throws IOException In case of IO exception. */ - protected static void createFile(Igfs igfs, IgfsPath file, boolean overwrite, @Nullable byte[]... chunks) + protected static void createFile(IgfsSecondaryFileSystem igfs, IgfsPath file, boolean overwrite, @Nullable byte[]... chunks) throws IOException { OutputStream os = null; @@ -2188,7 +2201,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { * @param chunks Data chunks. * @throws Exception If failed. */ - protected void createFile(IgniteFs igfs, IgfsPath file, boolean overwrite, long blockSize, + protected void createFile(IgfsImpl igfs, IgfsPath file, boolean overwrite, long blockSize, @Nullable byte[]... chunks) throws Exception { IgfsOutputStream os = null; @@ -2200,7 +2213,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { finally { U.closeQuiet(os); - awaitFileClose(igfs, file); + awaitFileClose(igfs.asSecondary(), file); } } @@ -2212,7 +2225,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { * @param chunks Data chunks. * @throws Exception If failed. */ - protected void appendFile(IgniteFs igfs, IgfsPath file, @Nullable byte[]... chunks) + protected void appendFile(IgfsImpl igfs, IgfsPath file, @Nullable byte[]... chunks) throws Exception { IgfsOutputStream os = null; @@ -2224,7 +2237,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { finally { U.closeQuiet(os); - awaitFileClose(igfs, file); + awaitFileClose(igfs.asSecondary(), file); } } @@ -2248,7 +2261,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { * @param igfs IGFS. * @param file File. */ - public static void awaitFileClose(Igfs igfs, IgfsPath file) { + public static void awaitFileClose(IgfsSecondaryFileSystem igfs, IgfsPath file) { try { igfs.update(file, Collections.singletonMap("prop", "val")); } @@ -2418,7 +2431,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { * @param igfsSecondary Second IGFS. * @throws Exception If failed. */ - protected void clear(IgniteFs igfs, IgniteFs igfsSecondary) throws Exception { + protected void clear(IgniteFileSystem igfs, IgniteFileSystem igfsSecondary) throws Exception { clear(igfs); if (dual) @@ -2431,7 +2444,7 @@ public abstract class IgfsAbstractSelfTest extends IgfsCommonAbstractTest { * @param igfs IGFS. * @throws Exception If failed. */ - public static void clear(IgniteFs igfs) throws Exception { + public static void clear(IgniteFileSystem igfs) throws Exception { Field workerMapFld = IgfsImpl.class.getDeclaredField("workerMap"); workerMapFld.setAccessible(true); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/GridCacheIgfsPerBlockLruEvictionPolicySelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsCachePerBlockLruEvictionPolicySelfTest.java similarity index 96% rename from modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/GridCacheIgfsPerBlockLruEvictionPolicySelfTest.java rename to modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsCachePerBlockLruEvictionPolicySelfTest.java index f0e2c70..fd590f5 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/GridCacheIgfsPerBlockLruEvictionPolicySelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsCachePerBlockLruEvictionPolicySelfTest.java @@ -42,7 +42,7 @@ import static org.apache.ignite.igfs.IgfsMode.*; * Tests for IGFS per-block LR eviction policy. */ @SuppressWarnings({"ConstantConditions", "ThrowableResultOfMethodCallIgnored"}) -public class GridCacheIgfsPerBlockLruEvictionPolicySelfTest extends IgfsCommonAbstractTest { +public class IgfsCachePerBlockLruEvictionPolicySelfTest extends IgfsCommonAbstractTest { /** Primary IGFS name. */ private static final String IGFS_PRIMARY = "igfs-primary"; @@ -65,7 +65,7 @@ public class GridCacheIgfsPerBlockLruEvictionPolicySelfTest extends IgfsCommonAb private static IgfsImpl igfsPrimary; /** Secondary IGFS instance. */ - private static IgniteFs secondaryFs; + private static IgfsImpl secondaryFs; /** Primary file system data cache. */ private static GridCacheAdapter dataCache; @@ -79,7 +79,7 @@ public class GridCacheIgfsPerBlockLruEvictionPolicySelfTest extends IgfsCommonAb * @throws Exception If failed. */ private void startPrimary() throws Exception { - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("dataCache"); igfsCfg.setMetaCacheName("metaCache"); @@ -88,7 +88,7 @@ public class GridCacheIgfsPerBlockLruEvictionPolicySelfTest extends IgfsCommonAb igfsCfg.setDefaultMode(PRIMARY); igfsCfg.setPrefetchBlocks(1); igfsCfg.setSequentialReadsBeforePrefetch(Integer.MAX_VALUE); - igfsCfg.setSecondaryFileSystem(secondaryFs); + igfsCfg.setSecondaryFileSystem(secondaryFs.asSecondary()); Map pathModes = new HashMap<>(); @@ -130,7 +130,7 @@ public class GridCacheIgfsPerBlockLruEvictionPolicySelfTest extends IgfsCommonAb cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setLocalHost("127.0.0.1"); cfg.setConnectorConfiguration(null); @@ -149,7 +149,7 @@ public class GridCacheIgfsPerBlockLruEvictionPolicySelfTest extends IgfsCommonAb * @throws Exception If failed. */ private void startSecondary() throws Exception { - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("dataCache"); igfsCfg.setMetaCacheName("metaCache"); @@ -188,14 +188,14 @@ public class GridCacheIgfsPerBlockLruEvictionPolicySelfTest extends IgfsCommonAb cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setLocalHost("127.0.0.1"); cfg.setConnectorConfiguration(null); Ignite g = G.start(cfg); - secondaryFs = g.fileSystem(IGFS_SECONDARY); + secondaryFs = (IgfsImpl)g.fileSystem(IGFS_SECONDARY); } /** {@inheritDoc} */ diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsCacheSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsCacheSelfTest.java index bde395b..255e319 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsCacheSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsCacheSelfTest.java @@ -57,13 +57,13 @@ public class IgfsCacheSelfTest extends IgfsCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setMetaCacheName(META_CACHE_NAME); igfsCfg.setDataCacheName(DATA_CACHE_NAME); igfsCfg.setName("igfs"); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsDataManagerSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsDataManagerSelfTest.java index 2e48648..297cc7b 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsDataManagerSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsDataManagerSelfTest.java @@ -24,7 +24,6 @@ import org.apache.ignite.igfs.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.processors.cache.*; import org.apache.ignite.internal.processors.cache.transactions.*; -import org.apache.ignite.internal.util.*; import org.apache.ignite.internal.util.typedef.*; import org.apache.ignite.internal.util.typedef.internal.*; import org.apache.ignite.lang.*; @@ -96,7 +95,7 @@ public class IgfsDataManagerSelfTest extends IgfsCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setMetaCacheName(META_CACHE_NAME); igfsCfg.setDataCacheName(DATA_CACHE_NAME); @@ -104,7 +103,7 @@ public class IgfsDataManagerSelfTest extends IgfsCommonAbstractTest { igfsCfg.setName("igfs"); igfsCfg.setBlockSize(BLOCK_SIZE); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsDualAbstractSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsDualAbstractSelfTest.java index 327d7fa..f44a988 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsDualAbstractSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsDualAbstractSelfTest.java @@ -28,8 +28,8 @@ import java.io.*; import java.util.*; import java.util.concurrent.*; -import static org.apache.ignite.IgniteFs.*; import static org.apache.ignite.igfs.IgfsMode.*; +import static org.apache.ignite.internal.processors.igfs.IgfsEx.*; /** * Tests for IGFS working in mode when remote file system exists: DUAL_SYNC, DUAL_ASYNC. @@ -1120,7 +1120,7 @@ public abstract class IgfsDualAbstractSelfTest extends IgfsAbstractSelfTest { create(igfsSecondary, paths(DIR, SUBDIR), null); create(igfs, null, null); - createFile(igfsSecondary, FILE, true, chunk); + createFile(igfsSecondary.asSecondary(), FILE, true, chunk); checkFileContent(igfs, FILE, chunk); } @@ -1148,7 +1148,7 @@ public abstract class IgfsDualAbstractSelfTest extends IgfsAbstractSelfTest { out.close(); - awaitFileClose(igfsSecondary, FILE); + awaitFileClose(igfsSecondary.asSecondary(), FILE); // Read the first block. int totalRead = 0; @@ -1227,7 +1227,7 @@ public abstract class IgfsDualAbstractSelfTest extends IgfsAbstractSelfTest { out.close(); - awaitFileClose(igfsSecondary, FILE); + awaitFileClose(igfsSecondary.asSecondary(), FILE); // Read the first two blocks. int totalRead = 0; @@ -1292,7 +1292,7 @@ public abstract class IgfsDualAbstractSelfTest extends IgfsAbstractSelfTest { igfsSecondary.update(SUBDIR, props); - createFile(igfs, FILE, true, chunk); + createFile(igfs.asSecondary(), FILE, true, chunk); // Ensure that directory structure was created. checkExist(igfs, igfsSecondary, SUBDIR); @@ -1317,7 +1317,7 @@ public abstract class IgfsDualAbstractSelfTest extends IgfsAbstractSelfTest { igfsSecondary.update(DIR, propsDir); igfsSecondary.update(SUBDIR, propsSubDir); - createFile(igfs, FILE, true, chunk); + createFile(igfs.asSecondary(), FILE, true, chunk); checkExist(igfs, igfsSecondary, SUBDIR); checkFile(igfs, igfsSecondary, FILE, chunk); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsMetaManagerSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsMetaManagerSelfTest.java index 1a52b7c..11d6cc5 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsMetaManagerSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsMetaManagerSelfTest.java @@ -75,13 +75,13 @@ public class IgfsMetaManagerSelfTest extends IgfsCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setMetaCacheName(META_CACHE_NAME); igfsCfg.setDataCacheName(DATA_CACHE_NAME); igfsCfg.setName("igfs"); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsMetricsSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsMetricsSelfTest.java index be5afe7..0af1dea 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsMetricsSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsMetricsSelfTest.java @@ -56,10 +56,10 @@ public class IgfsMetricsSelfTest extends IgfsCommonAbstractTest { private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); /** Primary IGFS instances. */ - private static IgniteFs[] igfsPrimary; + private static IgniteFileSystem[] igfsPrimary; /** Secondary IGFS instance. */ - private static IgniteFs igfsSecondary; + private static IgfsImpl igfsSecondary; /** Primary file system block size. */ public static final int PRIMARY_BLOCK_SIZE = 512; @@ -84,7 +84,7 @@ public class IgfsMetricsSelfTest extends IgfsCommonAbstractTest { * @throws Exception If failed. */ private void startPrimary() throws Exception { - igfsPrimary = new IgniteFs[NODES_CNT]; + igfsPrimary = new IgniteFileSystem[NODES_CNT]; for (int i = 0; i < NODES_CNT; i++) { Ignite g = G.start(primaryConfiguration(i)); @@ -101,14 +101,14 @@ public class IgfsMetricsSelfTest extends IgfsCommonAbstractTest { * @throws Exception If failed. */ private IgniteConfiguration primaryConfiguration(int idx) throws Exception { - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("dataCache"); igfsCfg.setMetaCacheName("metaCache"); igfsCfg.setName(IGFS_PRIMARY); igfsCfg.setBlockSize(PRIMARY_BLOCK_SIZE); igfsCfg.setDefaultMode(PRIMARY); - igfsCfg.setSecondaryFileSystem(igfsSecondary); + igfsCfg.setSecondaryFileSystem(igfsSecondary.asSecondary()); Map pathModes = new HashMap<>(); @@ -146,7 +146,7 @@ public class IgfsMetricsSelfTest extends IgfsCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setLocalHost("127.0.0.1"); @@ -159,7 +159,7 @@ public class IgfsMetricsSelfTest extends IgfsCommonAbstractTest { * @throws Exception If failed. */ private void startSecondary() throws Exception { - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("dataCache"); igfsCfg.setMetaCacheName("metaCache"); @@ -198,18 +198,18 @@ public class IgfsMetricsSelfTest extends IgfsCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setLocalHost("127.0.0.1"); Ignite g = G.start(cfg); - igfsSecondary = g.fileSystem(IGFS_SECONDARY); + igfsSecondary = (IgfsImpl)g.fileSystem(IGFS_SECONDARY); } /** @throws Exception If failed. */ public void testMetrics() throws Exception { - IgniteFs fs = igfsPrimary[0]; + IgniteFileSystem fs = igfsPrimary[0]; assertNotNull(fs); @@ -349,7 +349,7 @@ public class IgfsMetricsSelfTest extends IgfsCommonAbstractTest { /** @throws Exception If failed. */ public void testMultipleClose() throws Exception { - IgniteFs fs = igfsPrimary[0]; + IgniteFileSystem fs = igfsPrimary[0]; IgfsOutputStream out = fs.create(new IgfsPath("/file"), false); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsModesSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsModesSelfTest.java index 2b501f7..4a58285 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsModesSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsModesSelfTest.java @@ -82,7 +82,7 @@ public class IgfsModesSelfTest extends IgfsCommonAbstractTest { private void startUp() throws Exception { startUpSecondary(); - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("partitioned"); igfsCfg.setMetaCacheName("replicated"); @@ -97,7 +97,7 @@ public class IgfsModesSelfTest extends IgfsCommonAbstractTest { igfsCfg.setPathModes(pathModes); if (setSecondaryFs) - igfsCfg.setSecondaryFileSystem(igfsSecondary); + igfsCfg.setSecondaryFileSystem(igfsSecondary.asSecondary()); CacheConfiguration cacheCfg = defaultCacheConfiguration(); @@ -128,7 +128,7 @@ public class IgfsModesSelfTest extends IgfsCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(metaCacheCfg, cacheCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setLocalHost("127.0.0.1"); cfg.setConnectorConfiguration(null); @@ -144,7 +144,7 @@ public class IgfsModesSelfTest extends IgfsCommonAbstractTest { * @throws Exception If failed. */ private void startUpSecondary() throws Exception { - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("partitioned"); igfsCfg.setMetaCacheName("replicated"); @@ -185,7 +185,7 @@ public class IgfsModesSelfTest extends IgfsCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(metaCacheCfg, cacheCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setLocalHost("127.0.0.1"); cfg.setConnectorConfiguration(null); @@ -378,7 +378,7 @@ public class IgfsModesSelfTest extends IgfsCommonAbstractTest { } assertTrue(errMsg.startsWith( - "Grid configuration parameter invalid: secondaryFileSystem cannot be null when mode is SECONDARY")); + "Grid configuration parameter invalid: secondaryFileSystem cannot be null when mode is not PRIMARY")); } /** @@ -444,7 +444,7 @@ public class IgfsModesSelfTest extends IgfsCommonAbstractTest { } assertTrue(errMsg.startsWith( - "Grid configuration parameter invalid: secondaryFileSystem cannot be null when mode is SECONDARY")); + "Grid configuration parameter invalid: secondaryFileSystem cannot be null when mode is not PRIMARY")); } /** diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsProcessorSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsProcessorSelfTest.java index ead7511..0e03aa8 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsProcessorSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsProcessorSelfTest.java @@ -59,7 +59,7 @@ public class IgfsProcessorSelfTest extends IgfsCommonAbstractTest { protected final SecureRandom rnd = new SecureRandom(); /** File system. */ - protected IgniteFs igfs; + protected IgniteFileSystem igfs; /** Meta cache. */ private GridCache metaCache; @@ -73,7 +73,7 @@ public class IgfsProcessorSelfTest extends IgfsCommonAbstractTest { igfs = grid.fileSystem(igfsName()); - IgfsConfiguration[] cfgs = grid.configuration().getIgfsConfiguration(); + FileSystemConfiguration[] cfgs = grid.configuration().getFileSystemConfiguration(); assert cfgs.length == 1; @@ -111,13 +111,13 @@ public class IgfsProcessorSelfTest extends IgfsCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setMetaCacheName(META_CACHE_NAME); igfsCfg.setDataCacheName(DATA_CACHE_NAME); igfsCfg.setName("igfs"); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); return cfg; } @@ -157,7 +157,7 @@ public class IgfsProcessorSelfTest extends IgfsCommonAbstractTest { /** @throws Exception If failed. */ public void testigfsEnabled() throws Exception { - IgniteFs igfs = grid(0).fileSystem(igfsName()); + IgniteFileSystem igfs = grid(0).fileSystem(igfsName()); assertNotNull(igfs); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsProcessorValidationSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsProcessorValidationSelfTest.java index 6f444c0..dcd0413 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsProcessorValidationSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsProcessorValidationSelfTest.java @@ -37,7 +37,7 @@ import static org.apache.ignite.igfs.IgfsMode.*; * Tests for node validation logic in {@link IgfsProcessor}. *

* Tests starting with "testLocal" are checking - * {@link IgfsProcessor#validateLocalIgfsConfigurations(org.apache.ignite.configuration.IgfsConfiguration[])}. + * {@link IgfsProcessor#validateLocalIgfsConfigurations(org.apache.ignite.configuration.FileSystemConfiguration[])}. *

* Tests starting with "testRemote" are checking {@link IgfsProcessor#checkIgfsOnRemoteNode(org.apache.ignite.cluster.ClusterNode)}. */ @@ -61,10 +61,10 @@ public class IgfsProcessorValidationSelfTest extends IgfsCommonAbstractTest { private static final String metaCache2Name = "metaCache2"; /** First IGFS config in grid #1. */ - private IgfsConfiguration g1IgfsCfg1 = new IgfsConfiguration(); + private FileSystemConfiguration g1IgfsCfg1 = new FileSystemConfiguration(); /** Second IGFS config in grid#1. */ - private IgfsConfiguration g1IgfsCfg2 = new IgfsConfiguration(); + private FileSystemConfiguration g1IgfsCfg2 = new FileSystemConfiguration(); /** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { @@ -89,7 +89,7 @@ public class IgfsProcessorValidationSelfTest extends IgfsCommonAbstractTest { g1IgfsCfg2.setDataCacheName(dataCache2Name); g1IgfsCfg2.setMetaCacheName(metaCache2Name); - cfg.setIgfsConfiguration(g1IgfsCfg1, g1IgfsCfg2); + cfg.setFileSystemConfiguration(g1IgfsCfg1, g1IgfsCfg2); cfg.setLocalHost("127.0.0.1"); @@ -266,7 +266,7 @@ public class IgfsProcessorValidationSelfTest extends IgfsCommonAbstractTest { g1IgfsCfg2.setDefaultMode(PROXY); - checkGridStartFails(g1Cfg, "secondaryFileSystem cannot be null when mode is SECONDARY", true); + checkGridStartFails(g1Cfg, "secondaryFileSystem cannot be null when mode is not PRIMARY", true); } /** @@ -278,11 +278,11 @@ public class IgfsProcessorValidationSelfTest extends IgfsCommonAbstractTest { g1Cfg.setCacheConfiguration(concat(dataCaches(1024), metaCaches(), CacheConfiguration.class)); g2Cfg.setCacheConfiguration(concat(dataCaches(1024), metaCaches(), CacheConfiguration.class)); - IgfsConfiguration g2IgfsCfg1 = new IgfsConfiguration(g1IgfsCfg1); + FileSystemConfiguration g2IgfsCfg1 = new FileSystemConfiguration(g1IgfsCfg1); g2IgfsCfg1.setBlockSize(g2IgfsCfg1.getBlockSize() + 100); - g2Cfg.setIgfsConfiguration(g2IgfsCfg1, g1IgfsCfg2); + g2Cfg.setFileSystemConfiguration(g2IgfsCfg1, g1IgfsCfg2); G.start(g1Cfg); @@ -310,8 +310,8 @@ public class IgfsProcessorValidationSelfTest extends IgfsCommonAbstractTest { public void testRemoteIfMetaCacheNameDiffers() throws Exception { IgniteConfiguration g2Cfg = getConfiguration("g2"); - IgfsConfiguration g2IgfsCfg1 = new IgfsConfiguration(g1IgfsCfg1); - IgfsConfiguration g2IgfsCfg2 = new IgfsConfiguration(g1IgfsCfg2); + FileSystemConfiguration g2IgfsCfg1 = new FileSystemConfiguration(g1IgfsCfg1); + FileSystemConfiguration g2IgfsCfg2 = new FileSystemConfiguration(g1IgfsCfg2); g2IgfsCfg1.setMetaCacheName("g2MetaCache1"); g2IgfsCfg2.setMetaCacheName("g2MetaCache2"); @@ -320,7 +320,7 @@ public class IgfsProcessorValidationSelfTest extends IgfsCommonAbstractTest { g2Cfg.setCacheConfiguration(concat(dataCaches(1024), metaCaches("g2MetaCache1", "g2MetaCache2"), CacheConfiguration.class)); - g2Cfg.setIgfsConfiguration(g2IgfsCfg1, g2IgfsCfg2); + g2Cfg.setFileSystemConfiguration(g2IgfsCfg1, g2IgfsCfg2); G.start(g1Cfg); @@ -333,8 +333,8 @@ public class IgfsProcessorValidationSelfTest extends IgfsCommonAbstractTest { public void testRemoteIfMetaCacheNameEquals() throws Exception { IgniteConfiguration g2Cfg = getConfiguration("g2"); - IgfsConfiguration g2IgfsCfg1 = new IgfsConfiguration(g1IgfsCfg1); - IgfsConfiguration g2IgfsCfg2 = new IgfsConfiguration(g1IgfsCfg2); + FileSystemConfiguration g2IgfsCfg1 = new FileSystemConfiguration(g1IgfsCfg1); + FileSystemConfiguration g2IgfsCfg2 = new FileSystemConfiguration(g1IgfsCfg2); g2IgfsCfg1.setName("g2IgfsCfg1"); g2IgfsCfg2.setName("g2IgfsCfg2"); @@ -346,7 +346,7 @@ public class IgfsProcessorValidationSelfTest extends IgfsCommonAbstractTest { g2Cfg.setCacheConfiguration(concat(dataCaches(1024, "g2DataCache1", "g2DataCache2"), metaCaches(), CacheConfiguration.class)); - g2Cfg.setIgfsConfiguration(g2IgfsCfg1, g2IgfsCfg2); + g2Cfg.setFileSystemConfiguration(g2IgfsCfg1, g2IgfsCfg2); G.start(g1Cfg); @@ -359,8 +359,8 @@ public class IgfsProcessorValidationSelfTest extends IgfsCommonAbstractTest { public void testRemoteIfDataCacheNameDiffers() throws Exception { IgniteConfiguration g2Cfg = getConfiguration("g2"); - IgfsConfiguration g2IgfsCfg1 = new IgfsConfiguration(g1IgfsCfg1); - IgfsConfiguration g2IgfsCfg2 = new IgfsConfiguration(g1IgfsCfg2); + FileSystemConfiguration g2IgfsCfg1 = new FileSystemConfiguration(g1IgfsCfg1); + FileSystemConfiguration g2IgfsCfg2 = new FileSystemConfiguration(g1IgfsCfg2); g2IgfsCfg1.setDataCacheName("g2DataCache1"); g2IgfsCfg2.setDataCacheName("g2DataCache2"); @@ -369,7 +369,7 @@ public class IgfsProcessorValidationSelfTest extends IgfsCommonAbstractTest { g2Cfg.setCacheConfiguration(concat(dataCaches(1024, "g2DataCache1", "g2DataCache2"), metaCaches(), CacheConfiguration.class)); - g2Cfg.setIgfsConfiguration(g2IgfsCfg1, g2IgfsCfg2); + g2Cfg.setFileSystemConfiguration(g2IgfsCfg1, g2IgfsCfg2); G.start(g1Cfg); @@ -382,8 +382,8 @@ public class IgfsProcessorValidationSelfTest extends IgfsCommonAbstractTest { public void testRemoteIfDataCacheNameEquals() throws Exception { IgniteConfiguration g2Cfg = getConfiguration("g2"); - IgfsConfiguration g2IgfsCfg1 = new IgfsConfiguration(g1IgfsCfg1); - IgfsConfiguration g2IgfsCfg2 = new IgfsConfiguration(g1IgfsCfg2); + FileSystemConfiguration g2IgfsCfg1 = new FileSystemConfiguration(g1IgfsCfg1); + FileSystemConfiguration g2IgfsCfg2 = new FileSystemConfiguration(g1IgfsCfg2); g2IgfsCfg1.setName("g2IgfsCfg1"); g2IgfsCfg2.setName("g2IgfsCfg2"); @@ -395,7 +395,7 @@ public class IgfsProcessorValidationSelfTest extends IgfsCommonAbstractTest { g2Cfg.setCacheConfiguration(concat(dataCaches(1024), metaCaches("g2MetaCache1", "g2MetaCache2"), CacheConfiguration.class)); - g2Cfg.setIgfsConfiguration(g2IgfsCfg1, g2IgfsCfg2); + g2Cfg.setFileSystemConfiguration(g2IgfsCfg1, g2IgfsCfg2); G.start(g1Cfg); @@ -408,8 +408,8 @@ public class IgfsProcessorValidationSelfTest extends IgfsCommonAbstractTest { public void testRemoteIfDefaultModeDiffers() throws Exception { IgniteConfiguration g2Cfg = getConfiguration("g2"); - IgfsConfiguration g2IgfsCfg1 = new IgfsConfiguration(g1IgfsCfg1); - IgfsConfiguration g2IgfsCfg2 = new IgfsConfiguration(g1IgfsCfg2); + FileSystemConfiguration g2IgfsCfg1 = new FileSystemConfiguration(g1IgfsCfg1); + FileSystemConfiguration g2IgfsCfg2 = new FileSystemConfiguration(g1IgfsCfg2); g1IgfsCfg1.setDefaultMode(DUAL_ASYNC); g1IgfsCfg2.setDefaultMode(DUAL_ASYNC); @@ -420,7 +420,7 @@ public class IgfsProcessorValidationSelfTest extends IgfsCommonAbstractTest { g1Cfg.setCacheConfiguration(concat(dataCaches(1024), metaCaches(), CacheConfiguration.class)); g2Cfg.setCacheConfiguration(concat(dataCaches(1024), metaCaches(), CacheConfiguration.class)); - g2Cfg.setIgfsConfiguration(g2IgfsCfg1, g2IgfsCfg2); + g2Cfg.setFileSystemConfiguration(g2IgfsCfg1, g2IgfsCfg2); G.start(g1Cfg); @@ -433,8 +433,8 @@ public class IgfsProcessorValidationSelfTest extends IgfsCommonAbstractTest { public void testRemoteIfPathModeDiffers() throws Exception { IgniteConfiguration g2Cfg = getConfiguration("g2"); - IgfsConfiguration g2IgfsCfg1 = new IgfsConfiguration(g1IgfsCfg1); - IgfsConfiguration g2IgfsCfg2 = new IgfsConfiguration(g1IgfsCfg2); + FileSystemConfiguration g2IgfsCfg1 = new FileSystemConfiguration(g1IgfsCfg1); + FileSystemConfiguration g2IgfsCfg2 = new FileSystemConfiguration(g1IgfsCfg2); g2IgfsCfg1.setPathModes(Collections.singletonMap("/somePath", DUAL_SYNC)); g2IgfsCfg2.setPathModes(Collections.singletonMap("/somePath", DUAL_SYNC)); @@ -442,7 +442,7 @@ public class IgfsProcessorValidationSelfTest extends IgfsCommonAbstractTest { g1Cfg.setCacheConfiguration(concat(dataCaches(1024), metaCaches(), CacheConfiguration.class)); g2Cfg.setCacheConfiguration(concat(dataCaches(1024), metaCaches(), CacheConfiguration.class)); - g2Cfg.setIgfsConfiguration(g2IgfsCfg1, g2IgfsCfg2); + g2Cfg.setFileSystemConfiguration(g2IgfsCfg1, g2IgfsCfg2); G.start(g1Cfg); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsServerManagerIpcEndpointRegistrationAbstractSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsServerManagerIpcEndpointRegistrationAbstractSelfTest.java index ee8e7bd..694d5c3 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsServerManagerIpcEndpointRegistrationAbstractSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsServerManagerIpcEndpointRegistrationAbstractSelfTest.java @@ -36,7 +36,7 @@ import java.util.*; import java.util.concurrent.atomic.*; import static org.apache.ignite.cache.CacheAtomicityMode.*; -import static org.apache.ignite.configuration.IgfsConfiguration.*; +import static org.apache.ignite.configuration.FileSystemConfiguration.*; /** * Base test class for {@link IgfsServer} checking IPC endpoint registrations. @@ -58,7 +58,7 @@ public abstract class IgfsServerManagerIpcEndpointRegistrationAbstractSelfTest e public void testLoopbackEndpointsRegistration() throws Exception { IgniteConfiguration cfg = gridConfiguration(); - cfg.setIgfsConfiguration( + cfg.setFileSystemConfiguration( igfsConfiguration("tcp", DFLT_IPC_PORT, null) ); @@ -77,7 +77,7 @@ public abstract class IgfsServerManagerIpcEndpointRegistrationAbstractSelfTest e public void testLoopbackEndpointsCustomHostRegistration() throws Exception { IgniteConfiguration cfg = gridConfiguration(); - cfg.setIgfsConfiguration( + cfg.setFileSystemConfiguration( igfsConfiguration("tcp", DFLT_IPC_PORT, "127.0.0.1"), igfsConfiguration("tcp", DFLT_IPC_PORT + 1, U.getLocalHost().getHostName())); @@ -154,7 +154,7 @@ public abstract class IgfsServerManagerIpcEndpointRegistrationAbstractSelfTest e * @param endPntHost End point host. * @return test-purposed IgfsConfiguration. */ - protected IgfsConfiguration igfsConfiguration(@Nullable String endPntType, @Nullable Integer endPntPort, + protected FileSystemConfiguration igfsConfiguration(@Nullable String endPntType, @Nullable Integer endPntPort, @Nullable String endPntHost) throws IgniteCheckedException { HashMap endPntCfg = null; @@ -170,7 +170,7 @@ public abstract class IgfsServerManagerIpcEndpointRegistrationAbstractSelfTest e endPntCfg.put("host", endPntHost); } - IgfsConfiguration igfsConfiguration = new IgfsConfiguration(); + FileSystemConfiguration igfsConfiguration = new FileSystemConfiguration(); igfsConfiguration.setDataCacheName("partitioned"); igfsConfiguration.setMetaCacheName("replicated"); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsServerManagerIpcEndpointRegistrationOnLinuxAndMacSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsServerManagerIpcEndpointRegistrationOnLinuxAndMacSelfTest.java index d7319f4..57f10d9 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsServerManagerIpcEndpointRegistrationOnLinuxAndMacSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsServerManagerIpcEndpointRegistrationOnLinuxAndMacSelfTest.java @@ -20,7 +20,7 @@ package org.apache.ignite.internal.processors.igfs; import org.apache.ignite.configuration.*; import org.apache.ignite.internal.util.typedef.*; -import static org.apache.ignite.configuration.IgfsConfiguration.*; +import static org.apache.ignite.configuration.FileSystemConfiguration.*; /** * Tests for {@link IgfsServer} that checks all IPC endpoint registration types @@ -34,7 +34,7 @@ public class IgfsServerManagerIpcEndpointRegistrationOnLinuxAndMacSelfTest public void testLoopbackAndShmemEndpointsRegistration() throws Exception { IgniteConfiguration cfg = gridConfiguration(); - cfg.setIgfsConfiguration( + cfg.setFileSystemConfiguration( igfsConfiguration(null, null, null), // Check null IPC endpoint config won't bring any hassles. igfsConfiguration("tcp", DFLT_IPC_PORT + 1, null), igfsConfiguration("shmem", DFLT_IPC_PORT + 2, null)); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsServerManagerIpcEndpointRegistrationOnWindowsSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsServerManagerIpcEndpointRegistrationOnWindowsSelfTest.java index 259f31a..4f18aff 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsServerManagerIpcEndpointRegistrationOnWindowsSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsServerManagerIpcEndpointRegistrationOnWindowsSelfTest.java @@ -40,7 +40,7 @@ public class IgfsServerManagerIpcEndpointRegistrationOnWindowsSelfTest @Override public Object call() throws Exception { IgniteConfiguration cfg = gridConfiguration(); - cfg.setIgfsConfiguration(igfsConfiguration("shmem", IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT, + cfg.setFileSystemConfiguration(igfsConfiguration("shmem", IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT, null)); return G.start(cfg); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsSizeSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsSizeSelfTest.java index b212f02..c62e759 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsSizeSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsSizeSelfTest.java @@ -111,7 +111,7 @@ public class IgfsSizeSelfTest extends IgfsCommonAbstractTest { @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName(DATA_CACHE_NAME); igfsCfg.setMetaCacheName(META_CACHE_NAME); @@ -154,7 +154,7 @@ public class IgfsSizeSelfTest extends IgfsCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(metaCfg, dataCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsStreamsSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsStreamsSelfTest.java index 054665d..25816c7 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsStreamsSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsStreamsSelfTest.java @@ -74,7 +74,7 @@ public class IgfsStreamsSelfTest extends IgfsCommonAbstractTest { public static final int ASSERT_RETRY_INTERVAL = 100; /** File system to test. */ - private IgniteFs fs; + private IgniteFileSystem fs; /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { @@ -110,7 +110,7 @@ public class IgfsStreamsSelfTest extends IgfsCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setMetaCacheName(META_CACHE_NAME); igfsCfg.setDataCacheName(DATA_CACHE_NAME); @@ -118,7 +118,7 @@ public class IgfsStreamsSelfTest extends IgfsCommonAbstractTest { igfsCfg.setBlockSize(CFG_BLOCK_SIZE); igfsCfg.setFragmentizerEnabled(true); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); return cfg; } @@ -229,12 +229,12 @@ public class IgfsStreamsSelfTest extends IgfsCommonAbstractTest { IgfsPath path = new IgfsPath("/file"); try { - IgniteFs fs0 = grid(0).fileSystem("igfs"); - IgniteFs fs1 = grid(1).fileSystem("igfs"); - IgniteFs fs2 = grid(2).fileSystem("igfs"); + IgniteFileSystem fs0 = grid(0).fileSystem("igfs"); + IgniteFileSystem fs1 = grid(1).fileSystem("igfs"); + IgniteFileSystem fs2 = grid(2).fileSystem("igfs"); try (IgfsOutputStream out = fs0.create(path, 128, false, 1, CFG_GRP_SIZE, - F.asMap(IgniteFs.PROP_PREFER_LOCAL_WRITES, "true"))) { + F.asMap(IgfsEx.PROP_PREFER_LOCAL_WRITES, "true"))) { // 1.5 blocks byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2]; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsTaskSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsTaskSelfTest.java index 5b7a636..d1778df 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsTaskSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsTaskSelfTest.java @@ -68,7 +68,7 @@ public class IgfsTaskSelfTest extends IgfsCommonAbstractTest { private static final int REPEAT_CNT = 10; /** IGFS. */ - private static IgniteFs igfs; + private static IgniteFileSystem igfs; /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { @@ -97,7 +97,7 @@ public class IgfsTaskSelfTest extends IgfsCommonAbstractTest { * @return Grid configuration */ private IgniteConfiguration config(int idx) { - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("dataCache"); igfsCfg.setMetaCacheName("metaCache"); @@ -133,7 +133,7 @@ public class IgfsTaskSelfTest extends IgfsCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setGridName("node-" + idx); @@ -172,7 +172,7 @@ public class IgfsTaskSelfTest extends IgfsCommonAbstractTest { assertFalse(igfs.isAsync()); - IgniteFs igfsAsync = igfs.withAsync(); + IgniteFileSystem igfsAsync = igfs.withAsync(); assertTrue(igfsAsync.isAsync()); @@ -269,7 +269,7 @@ public class IgfsTaskSelfTest extends IgfsCommonAbstractTest { private ComputeJobContext ctx; /** {@inheritDoc} */ - @Override public Object execute(IgniteFs igfs, IgfsFileRange range, IgfsInputStream in) + @Override public Object execute(IgniteFileSystem igfs, IgfsFileRange range, IgfsInputStream in) throws IOException { assert ignite != null; assert ses != null; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/split/IgfsAbstractRecordResolverSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/split/IgfsAbstractRecordResolverSelfTest.java index 05de61a..487d391 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/split/IgfsAbstractRecordResolverSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/split/IgfsAbstractRecordResolverSelfTest.java @@ -46,11 +46,11 @@ public class IgfsAbstractRecordResolverSelfTest extends GridCommonAbstractTest { private final TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true); /** IGFS. */ - protected static IgniteFs igfs; + protected static IgniteFileSystem igfs; /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("dataCache"); igfsCfg.setMetaCacheName("metaCache"); @@ -87,7 +87,7 @@ public class IgfsAbstractRecordResolverSelfTest extends GridCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); Ignite g = G.start(cfg); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryNodeStartup.java b/modules/core/src/test/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryNodeStartup.java index 400bbb1..8051a3e 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryNodeStartup.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/util/ipc/shmem/IpcSharedMemoryNodeStartup.java @@ -41,7 +41,7 @@ public class IpcSharedMemoryNodeStartup { public static void main(String[] args) throws Exception{ IgniteConfiguration cfg = new IgniteConfiguration(); - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); TcpDiscoverySpi discoSpi = new TcpDiscoverySpi(); @@ -60,7 +60,7 @@ public class IpcSharedMemoryNodeStartup { igfsCfg.setMetaCacheName("partitioned"); igfsCfg.setName("igfs"); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); CacheConfiguration cacheCfg = new CacheConfiguration(); diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/junits/IgniteMock.java b/modules/core/src/test/java/org/apache/ignite/testframework/junits/IgniteMock.java index 58478d3..220d5d6 100644 --- a/modules/core/src/test/java/org/apache/ignite/testframework/junits/IgniteMock.java +++ b/modules/core/src/test/java/org/apache/ignite/testframework/junits/IgniteMock.java @@ -176,12 +176,12 @@ public class IgniteMock implements Ignite { } /** {@inheritDoc} */ - @Override public IgniteFs fileSystem(String name) { + @Override public IgniteFileSystem fileSystem(String name) { return null; } /** {@inheritDoc} */ - @Override public Collection fileSystems() { + @Override public Collection fileSystems() { return null; } diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteIgfsTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteIgfsTestSuite.java index 8f06117..cea510a 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteIgfsTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteIgfsTestSuite.java @@ -48,7 +48,7 @@ public class IgniteIgfsTestSuite extends TestSuite { if (U.isWindows()) suite.addTest(new TestSuite(IgfsServerManagerIpcEndpointRegistrationOnWindowsSelfTest.class)); - suite.addTest(new TestSuite(GridCacheIgfsPerBlockLruEvictionPolicySelfTest.class)); + suite.addTest(new TestSuite(IgfsCachePerBlockLruEvictionPolicySelfTest.class)); suite.addTest(new TestSuite(IgfsStreamsSelfTest.class)); suite.addTest(new TestSuite(IgfsModesSelfTest.class)); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/GridHadoopFSCounterWriter.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopFileSystemCounterWriter.java similarity index 83% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/GridHadoopFSCounterWriter.java rename to modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopFileSystemCounterWriter.java index 55dcc4c..66e9761 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/GridHadoopFSCounterWriter.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopFileSystemCounterWriter.java @@ -15,13 +15,15 @@ * limitations under the License. */ -package org.apache.ignite.internal.processors.hadoop.counter; +package org.apache.ignite.hadoop.fs; import org.apache.hadoop.conf.*; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.*; import org.apache.ignite.*; import org.apache.ignite.internal.processors.hadoop.*; +import org.apache.ignite.internal.processors.hadoop.counter.*; +import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters; import org.apache.ignite.internal.util.typedef.*; import java.io.*; @@ -30,7 +32,7 @@ import java.util.*; /** * Statistic writer implementation that writes info into any Hadoop file system. */ -public class GridHadoopFSCounterWriter implements GridHadoopCounterWriter { +public class IgniteHadoopFileSystemCounterWriter implements HadoopCounterWriter { /** */ public static final String PERFORMANCE_COUNTER_FILE_NAME = "performance"; @@ -47,12 +49,12 @@ public class GridHadoopFSCounterWriter implements GridHadoopCounterWriter { private static final String DEFAULT_COUNTER_WRITER_DIR = "/user/" + USER_MACRO; /** {@inheritDoc} */ - @Override public void write(GridHadoopJobInfo jobInfo, GridHadoopJobId jobId, GridHadoopCounters cntrs) + @Override public void write(HadoopJobInfo jobInfo, HadoopJobId jobId, HadoopCounters cntrs) throws IgniteCheckedException { Configuration hadoopCfg = new Configuration(); - for (Map.Entry e : ((GridHadoopDefaultJobInfo)jobInfo).properties().entrySet()) + for (Map.Entry e : ((HadoopDefaultJobInfo)jobInfo).properties().entrySet()) hadoopCfg.set(e.getKey(), e.getValue()); String user = jobInfo.user(); @@ -67,7 +69,7 @@ public class GridHadoopFSCounterWriter implements GridHadoopCounterWriter { Path jobStatPath = new Path(new Path(dir.replace(USER_MACRO, user)), jobId.toString()); - GridHadoopPerformanceCounter perfCntr = GridHadoopPerformanceCounter.getCounter(cntrs, null); + HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(cntrs, null); try { FileSystem fs = jobStatPath.getFileSystem(hadoopCfg); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/IgfsHadoopFileSystemWrapper.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopIgfsSecondaryFileSystem.java similarity index 91% rename from modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/IgfsHadoopFileSystemWrapper.java rename to modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopIgfsSecondaryFileSystem.java index bdab61d..98f2e46 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/IgfsHadoopFileSystemWrapper.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/IgniteHadoopIgfsSecondaryFileSystem.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.igfs.hadoop; +package org.apache.ignite.hadoop.fs; import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.FileSystem; @@ -23,21 +23,23 @@ import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.ipc.*; import org.apache.ignite.*; import org.apache.ignite.igfs.*; -import org.apache.ignite.internal.igfs.hadoop.*; +import org.apache.ignite.igfs.secondary.*; import org.apache.ignite.internal.processors.hadoop.*; +import org.apache.ignite.internal.processors.hadoop.igfs.*; import org.apache.ignite.internal.processors.igfs.*; import org.apache.ignite.internal.util.typedef.*; import org.jetbrains.annotations.*; -import static org.apache.ignite.internal.processors.igfs.IgfsEx.*; import java.io.*; import java.net.*; import java.util.*; +import static org.apache.ignite.internal.processors.igfs.IgfsEx.*; + /** - * Adapter to use any Hadoop file system {@link org.apache.hadoop.fs.FileSystem} as {@link org.apache.ignite.igfs.Igfs}. + * Adapter to use any Hadoop file system {@link FileSystem} as {@link IgfsSecondaryFileSystem}. */ -public class IgfsHadoopFileSystemWrapper implements Igfs, AutoCloseable { +public class IgniteHadoopIgfsSecondaryFileSystem implements IgfsSecondaryFileSystem, AutoCloseable { /** Hadoop file system. */ private final FileSystem fileSys; @@ -51,7 +53,7 @@ public class IgfsHadoopFileSystemWrapper implements Igfs, AutoCloseable { * @param uri URI of file system. * @throws IgniteCheckedException In case of error. */ - public IgfsHadoopFileSystemWrapper(String uri) throws IgniteCheckedException { + public IgniteHadoopIgfsSecondaryFileSystem(String uri) throws IgniteCheckedException { this(uri, null); } @@ -62,7 +64,7 @@ public class IgfsHadoopFileSystemWrapper implements Igfs, AutoCloseable { * @param cfgPath Additional path to Hadoop configuration. * @throws IgniteCheckedException In case of error. */ - public IgfsHadoopFileSystemWrapper(@Nullable String uri, @Nullable String cfgPath) throws IgniteCheckedException { + public IgniteHadoopIgfsSecondaryFileSystem(@Nullable String uri, @Nullable String cfgPath) throws IgniteCheckedException { try { SecondaryFileSystemProvider secProvider = new SecondaryFileSystemProvider(uri, cfgPath); @@ -100,16 +102,14 @@ public class IgfsHadoopFileSystemWrapper implements Igfs, AutoCloseable { * @param detailMsg Detailed error message. * @return Appropriate exception. */ + @SuppressWarnings({"ThrowableResultOfMethodCallIgnored", "unchecked"}) private IgfsException handleSecondaryFsError(IOException e, String detailMsg) { boolean wrongVer = X.hasCause(e, RemoteException.class) || (e.getMessage() != null && e.getMessage().contains("Failed on local")); - IgfsException igfsErr = !wrongVer ? cast(detailMsg, e) : + return !wrongVer ? cast(detailMsg, e) : new IgfsInvalidHdfsVersionException("HDFS version you are connecting to differs from local " + - "version.", e); - - return igfsErr; - } + "version.", e); } /** * Cast IO exception to IGFS exception. @@ -158,7 +158,7 @@ public class IgfsHadoopFileSystemWrapper implements Igfs, AutoCloseable { /** {@inheritDoc} */ @Nullable @Override public IgfsFile update(IgfsPath path, Map props) { - IgfsHadoopFSProperties props0 = new IgfsHadoopFSProperties(props); + HadoopIgfsProperties props0 = new HadoopIgfsProperties(props); try { if (props0.userName() != null || props0.groupName() != null) @@ -212,7 +212,7 @@ public class IgfsHadoopFileSystemWrapper implements Igfs, AutoCloseable { /** {@inheritDoc} */ @Override public void mkdirs(IgfsPath path, @Nullable Map props) { try { - if (!fileSys.mkdirs(convert(path), new IgfsHadoopFSProperties(props).permission())) + if (!fileSys.mkdirs(convert(path), new HadoopIgfsProperties(props).permission())) throw new IgniteException("Failed to make directories [path=" + path + ", props=" + props + "]"); } catch (IOException e) { @@ -272,8 +272,8 @@ public class IgfsHadoopFileSystemWrapper implements Igfs, AutoCloseable { } /** {@inheritDoc} */ - @Override public IgfsReader open(IgfsPath path, int bufSize) { - return new IgfsHadoopReader(fileSys, convert(path), bufSize); + @Override public IgfsSecondaryFileSystemPositionedReadable open(IgfsPath path, int bufSize) { + return new HadoopIgfsSecondaryFileSystemPositionedReadable(fileSys, convert(path), bufSize); } /** {@inheritDoc} */ @@ -289,8 +289,8 @@ public class IgfsHadoopFileSystemWrapper implements Igfs, AutoCloseable { /** {@inheritDoc} */ @Override public OutputStream create(IgfsPath path, int bufSize, boolean overwrite, int replication, long blockSize, @Nullable Map props) { - IgfsHadoopFSProperties props0 = - new IgfsHadoopFSProperties(props != null ? props : Collections.emptyMap()); + HadoopIgfsProperties props0 = + new HadoopIgfsProperties(props != null ? props : Collections.emptyMap()); try { return fileSys.create(convert(path), props0.permission(), overwrite, bufSize, (short)replication, blockSize, diff --git a/modules/hadoop/src/main/java/org/apache/ignite/client/hadoop/package.html b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/package.html similarity index 95% rename from modules/hadoop/src/main/java/org/apache/ignite/client/hadoop/package.html rename to modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/package.html index f182598..1d78952 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/client/hadoop/package.html +++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/package.html @@ -19,6 +19,6 @@ - Ignite Hadoop client protocol. + Ignite Hadoop Accelerator file system API. diff --git a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v1/IgfsHadoopFileSystem.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/IgniteHadoopFileSystem.java similarity index 96% rename from modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v1/IgfsHadoopFileSystem.java rename to modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/IgniteHadoopFileSystem.java index c4d2f5e..93c0df4 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v1/IgfsHadoopFileSystem.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/IgniteHadoopFileSystem.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.igfs.hadoop.v1; +package org.apache.ignite.hadoop.fs.v1; import org.apache.hadoop.conf.*; import org.apache.hadoop.fs.*; @@ -27,8 +27,8 @@ import org.apache.hadoop.util.*; import org.apache.ignite.*; import org.apache.ignite.igfs.*; import org.apache.ignite.internal.igfs.common.*; -import org.apache.ignite.internal.igfs.hadoop.*; import org.apache.ignite.internal.processors.hadoop.*; +import org.apache.ignite.internal.processors.hadoop.igfs.*; import org.apache.ignite.internal.processors.igfs.*; import org.apache.ignite.internal.util.typedef.*; import org.apache.ignite.internal.util.typedef.internal.*; @@ -39,11 +39,10 @@ import java.net.*; import java.util.*; import java.util.concurrent.atomic.*; -import static org.apache.ignite.IgniteFs.*; -import static org.apache.ignite.configuration.IgfsConfiguration.*; +import static org.apache.ignite.configuration.FileSystemConfiguration.*; import static org.apache.ignite.igfs.IgfsMode.*; -import static org.apache.ignite.igfs.hadoop.IgfsHadoopParameters.*; -import static org.apache.ignite.internal.igfs.hadoop.IgfsHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.*; +import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.*; import static org.apache.ignite.internal.processors.igfs.IgfsEx.*; /** @@ -58,7 +57,7 @@ import static org.apache.ignite.internal.processors.igfs.IgfsEx.*; * * <property> * <name>fs.igfs.impl</name> - * <value>org.apache.ignite.igfs.hadoop.v1.IgfsHadoopFileSystem</value> + * <value>org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem</value> * </property> * * You should also add Ignite JAR and all libraries to Hadoop classpath. To @@ -82,7 +81,7 @@ import static org.apache.ignite.internal.processors.igfs.IgfsEx.*; * For sample client and data node configuration refer to {@code config/hadoop/default-config-client.xml} * and {@code config/hadoop/default-config.xml} configuration files in Ignite installation. */ -public class IgfsHadoopFileSystem extends FileSystem { +public class IgniteHadoopFileSystem extends FileSystem { /** Internal property to indicate management connection. */ public static final String IGFS_MANAGEMENT = "fs.igfs.management.connection"; @@ -96,7 +95,7 @@ public class IgfsHadoopFileSystem extends FileSystem { private final AtomicBoolean closeGuard = new AtomicBoolean(); /** Grid remote client. */ - private IgfsHadoopWrapper rmtClient; + private HadoopIgfsWrapper rmtClient; /** User name for each thread. */ private final ThreadLocal userName = new ThreadLocal(){ @@ -160,7 +159,7 @@ public class IgfsHadoopFileSystem extends FileSystem { /** {@inheritDoc} */ @Override public URI getUri() { if (uri == null) - throw new IllegalStateException("URI is null (was IgfsHadoopFileSystem properly initialized?)."); + throw new IllegalStateException("URI is null (was IgniteHadoopFileSystem properly initialized?)."); return uri; } @@ -193,6 +192,7 @@ public class IgfsHadoopFileSystem extends FileSystem { } /** {@inheritDoc} */ + @SuppressWarnings("ConstantConditions") @Override public void initialize(URI name, Configuration cfg) throws IOException { enterBusy(); @@ -244,7 +244,7 @@ public class IgfsHadoopFileSystem extends FileSystem { String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null; - rmtClient = new IgfsHadoopWrapper(uriAuthority, logDir, cfg, LOG); + rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG); // Handshake. IgfsHandshakeResponse handshake = rmtClient.handshake(logDir); @@ -487,13 +487,13 @@ public class IgfsHadoopFileSystem extends FileSystem { clientLog.logOpen(logId, path, PROXY, bufSize, size); - return new FSDataInputStream(new IgfsHadoopProxyInputStream(is, clientLog, logId)); + return new FSDataInputStream(new HadoopIgfsProxyInputStream(is, clientLog, logId)); } else return is; } else { - IgfsHadoopStreamDelegate stream = seqReadsBeforePrefetchOverride ? + HadoopIgfsStreamDelegate stream = seqReadsBeforePrefetchOverride ? rmtClient.open(path, seqReadsBeforePrefetch) : rmtClient.open(path); long logId = -1; @@ -508,7 +508,7 @@ public class IgfsHadoopFileSystem extends FileSystem { LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']'); - IgfsHadoopInputStream igfsIn = new IgfsHadoopInputStream(stream, stream.length(), + HadoopIgfsInputStream igfsIn = new HadoopIgfsInputStream(stream, stream.length(), bufSize, LOG, clientLog, logId); if (LOG.isDebugEnabled()) @@ -555,14 +555,14 @@ public class IgfsHadoopFileSystem extends FileSystem { clientLog.logCreate(logId, path, PROXY, overwrite, bufSize, replication, blockSize); - return new FSDataOutputStream(new IgfsHadoopProxyOutputStream(os, clientLog, logId)); + return new FSDataOutputStream(new HadoopIgfsProxyOutputStream(os, clientLog, logId)); } else return os; } else { // Create stream and close it in the 'finally' section if any sequential operation failed. - IgfsHadoopStreamDelegate stream = rmtClient.create(path, overwrite, colocateFileWrites, + HadoopIgfsStreamDelegate stream = rmtClient.create(path, overwrite, colocateFileWrites, replication, blockSize, F.asMap(PROP_PERMISSION, toString(perm), PROP_PREFER_LOCAL_WRITES, Boolean.toString(preferLocFileWrites))); @@ -579,7 +579,7 @@ public class IgfsHadoopFileSystem extends FileSystem { if (LOG.isDebugEnabled()) LOG.debug("Opened output stream in create [path=" + path + ", delegate=" + stream + ']'); - IgfsHadoopOutputStream igfsOut = new IgfsHadoopOutputStream(stream, LOG, clientLog, + HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId); bufSize = Math.max(64 * 1024, bufSize); @@ -632,13 +632,13 @@ public class IgfsHadoopFileSystem extends FileSystem { clientLog.logAppend(logId, path, PROXY, bufSize); // Don't have stream ID. - return new FSDataOutputStream(new IgfsHadoopProxyOutputStream(os, clientLog, logId)); + return new FSDataOutputStream(new HadoopIgfsProxyOutputStream(os, clientLog, logId)); } else return os; } else { - IgfsHadoopStreamDelegate stream = rmtClient.append(path, false, null); + HadoopIgfsStreamDelegate stream = rmtClient.append(path, false, null); assert stream != null; @@ -653,7 +653,7 @@ public class IgfsHadoopFileSystem extends FileSystem { if (LOG.isDebugEnabled()) LOG.debug("Opened output stream in append [path=" + path + ", delegate=" + stream + ']'); - IgfsHadoopOutputStream igfsOut = new IgfsHadoopOutputStream(stream, LOG, clientLog, + HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId); bufSize = Math.max(64 * 1024, bufSize); @@ -669,6 +669,7 @@ public class IgfsHadoopFileSystem extends FileSystem { } /** {@inheritDoc} */ + @SuppressWarnings("unchecked") @Override public boolean rename(Path src, Path dst) throws IOException { A.notNull(src, "src"); A.notNull(dst, "dst"); @@ -722,6 +723,7 @@ public class IgfsHadoopFileSystem extends FileSystem { } /** {@inheritDoc} */ + @SuppressWarnings("unchecked") @Override public boolean delete(Path f, boolean recursive) throws IOException { A.notNull(f, "f"); @@ -881,6 +883,7 @@ public class IgfsHadoopFileSystem extends FileSystem { } /** {@inheritDoc} */ + @SuppressWarnings("unchecked") @Override public boolean mkdirs(Path f, FsPermission perm) throws IOException { A.notNull(f, "f"); @@ -1229,6 +1232,6 @@ public class IgfsHadoopFileSystem extends FileSystem { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(IgfsHadoopFileSystem.class, this); + return S.toString(IgniteHadoopFileSystem.class, this); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/package.html b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/package.html similarity index 93% rename from modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/package.html rename to modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/package.html index 137055b..69801af 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/package.html +++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v1/package.html @@ -19,6 +19,6 @@ - Contains common files for Hadoop 1.x and Hadoop 2.x distros. + Contains Ignite Hadoop 1.x FileSystem implementation. diff --git a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v2/IgfsHadoopFileSystem.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/IgniteHadoopFileSystem.java similarity index 95% rename from modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v2/IgfsHadoopFileSystem.java rename to modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/IgniteHadoopFileSystem.java index 0759203..70ad99f 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v2/IgfsHadoopFileSystem.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/IgniteHadoopFileSystem.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.igfs.hadoop.v2; +package org.apache.ignite.hadoop.fs.v2; import org.apache.commons.logging.*; import org.apache.hadoop.conf.*; @@ -27,8 +27,8 @@ import org.apache.hadoop.util.*; import org.apache.ignite.*; import org.apache.ignite.igfs.*; import org.apache.ignite.internal.igfs.common.*; -import org.apache.ignite.internal.igfs.hadoop.*; import org.apache.ignite.internal.processors.hadoop.*; +import org.apache.ignite.internal.processors.hadoop.igfs.*; import org.apache.ignite.internal.processors.igfs.*; import org.apache.ignite.internal.util.typedef.*; import org.apache.ignite.internal.util.typedef.internal.*; @@ -39,11 +39,10 @@ import java.net.*; import java.util.*; import java.util.concurrent.atomic.*; -import static org.apache.ignite.IgniteFs.*; -import static org.apache.ignite.configuration.IgfsConfiguration.*; +import static org.apache.ignite.configuration.FileSystemConfiguration.*; import static org.apache.ignite.igfs.IgfsMode.*; -import static org.apache.ignite.igfs.hadoop.IgfsHadoopParameters.*; -import static org.apache.ignite.internal.igfs.hadoop.IgfsHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.*; +import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.*; import static org.apache.ignite.internal.processors.igfs.IgfsEx.*; /** @@ -58,7 +57,7 @@ import static org.apache.ignite.internal.processors.igfs.IgfsEx.*; * * <property> * <name>fs.igfs.impl</name> - * <value>org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem</value> + * <value>org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem</value> * </property> * * You should also add Ignite JAR and all libraries to Hadoop classpath. To @@ -82,15 +81,15 @@ import static org.apache.ignite.internal.processors.igfs.IgfsEx.*; * For sample client and data node configuration refer to {@code config/hadoop/default-config-client.xml} * and {@code config/hadoop/default-config.xml} configuration files in Ignite installation. */ -public class IgfsHadoopFileSystem extends AbstractFileSystem implements Closeable { +public class IgniteHadoopFileSystem extends AbstractFileSystem implements Closeable { /** Logger. */ - private static final Log LOG = LogFactory.getLog(IgfsHadoopFileSystem.class); + private static final Log LOG = LogFactory.getLog(IgniteHadoopFileSystem.class); /** Ensures that close routine is invoked at most once. */ private final AtomicBoolean closeGuard = new AtomicBoolean(); /** Grid remote client. */ - private IgfsHadoopWrapper rmtClient; + private HadoopIgfsWrapper rmtClient; /** Working directory. */ private IgfsPath workingDir; @@ -137,8 +136,8 @@ public class IgfsHadoopFileSystem extends AbstractFileSystem implements Closeabl * @throws URISyntaxException if name has invalid syntax. * @throws IOException If initialization failed. */ - public IgfsHadoopFileSystem(URI name, Configuration cfg) throws URISyntaxException, IOException { - super(IgfsHadoopEndpoint.normalize(name), IGFS_SCHEME, false, -1); + public IgniteHadoopFileSystem(URI name, Configuration cfg) throws URISyntaxException, IOException { + super(HadoopIgfsEndpoint.normalize(name), IGFS_SCHEME, false, -1); uri = name; @@ -203,6 +202,7 @@ public class IgfsHadoopFileSystem extends AbstractFileSystem implements Closeabl * @param cfg Configuration passed to constructor. * @throws IOException If initialization failed. */ + @SuppressWarnings("ConstantConditions") private void initialize(URI name, Configuration cfg) throws IOException { enterBusy(); @@ -240,7 +240,7 @@ public class IgfsHadoopFileSystem extends AbstractFileSystem implements Closeabl String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null; - rmtClient = new IgfsHadoopWrapper(uriAuthority, logDir, cfg, LOG); + rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG); // Handshake. IgfsHandshakeResponse handshake = rmtClient.handshake(logDir); @@ -420,13 +420,13 @@ public class IgfsHadoopFileSystem extends AbstractFileSystem implements Closeabl clientLog.logOpen(logId, path, PROXY, bufSize, size); - return new FSDataInputStream(new IgfsHadoopProxyInputStream(is, clientLog, logId)); + return new FSDataInputStream(new HadoopIgfsProxyInputStream(is, clientLog, logId)); } else return is; } else { - IgfsHadoopStreamDelegate stream = seqReadsBeforePrefetchOverride ? + HadoopIgfsStreamDelegate stream = seqReadsBeforePrefetchOverride ? rmtClient.open(path, seqReadsBeforePrefetch) : rmtClient.open(path); long logId = -1; @@ -441,7 +441,7 @@ public class IgfsHadoopFileSystem extends AbstractFileSystem implements Closeabl LOG.debug("Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']'); - IgfsHadoopInputStream igfsIn = new IgfsHadoopInputStream(stream, stream.length(), + HadoopIgfsInputStream igfsIn = new HadoopIgfsInputStream(stream, stream.length(), bufSize, LOG, clientLog, logId); if (LOG.isDebugEnabled()) @@ -498,7 +498,7 @@ public class IgfsHadoopFileSystem extends AbstractFileSystem implements Closeabl else clientLog.logCreate(logId, path, PROXY, overwrite, bufSize, replication, blockSize); - return new FSDataOutputStream(new IgfsHadoopProxyOutputStream(os, clientLog, logId)); + return new FSDataOutputStream(new HadoopIgfsProxyOutputStream(os, clientLog, logId)); } else return os; @@ -508,7 +508,7 @@ public class IgfsHadoopFileSystem extends AbstractFileSystem implements Closeabl PROP_PREFER_LOCAL_WRITES, Boolean.toString(preferLocFileWrites)); // Create stream and close it in the 'finally' section if any sequential operation failed. - IgfsHadoopStreamDelegate stream; + HadoopIgfsStreamDelegate stream; long logId = -1; @@ -540,7 +540,7 @@ public class IgfsHadoopFileSystem extends AbstractFileSystem implements Closeabl assert stream != null; - IgfsHadoopOutputStream igfsOut = new IgfsHadoopOutputStream(stream, LOG, + HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId); bufSize = Math.max(64 * 1024, bufSize); @@ -977,6 +977,6 @@ public class IgfsHadoopFileSystem extends AbstractFileSystem implements Closeabl /** {@inheritDoc} */ @Override public String toString() { - return S.toString(IgfsHadoopFileSystem.class, this); + return S.toString(IgniteHadoopFileSystem.class, this); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/package.html b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/package.html new file mode 100644 index 0000000..8adac96 --- /dev/null +++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/fs/v2/package.html @@ -0,0 +1,24 @@ + + + + + + + Contains Ignite Hadoop 2.x FileSystem implementation. + + diff --git a/modules/hadoop/src/main/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocolProvider.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopClientProtocolProvider.java similarity index 92% rename from modules/hadoop/src/main/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocolProvider.java rename to modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopClientProtocolProvider.java index a9a1c9d..83053ce 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocolProvider.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopClientProtocolProvider.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.client.hadoop; +package org.apache.ignite.hadoop.mapreduce; import org.apache.hadoop.conf.*; import org.apache.hadoop.mapreduce.*; @@ -24,6 +24,7 @@ import org.apache.ignite.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.client.*; import org.apache.ignite.internal.client.marshaller.optimized.*; +import org.apache.ignite.internal.processors.hadoop.proto.*; import org.apache.ignite.internal.util.future.*; import org.apache.ignite.internal.util.typedef.*; @@ -33,13 +34,13 @@ import java.util.*; import java.util.concurrent.*; import static org.apache.ignite.internal.client.GridClientProtocol.*; -import static org.apache.ignite.client.hadoop.GridHadoopClientProtocol.*; +import static org.apache.ignite.internal.processors.hadoop.proto.HadoopClientProtocol.*; /** - * Grid Hadoop client protocol provider. + * Ignite Hadoop client protocol provider. */ -public class GridHadoopClientProtocolProvider extends ClientProtocolProvider { +public class IgniteHadoopClientProtocolProvider extends ClientProtocolProvider { /** Clients. */ private static final ConcurrentHashMap> cliMap = new ConcurrentHashMap<>(); @@ -84,7 +85,7 @@ public class GridHadoopClientProtocolProvider extends ClientProtocolProvider { * @throws IOException If failed. */ private static ClientProtocol createProtocol(String addr, Configuration conf) throws IOException { - return new GridHadoopClientProtocol(conf, client(addr)); + return new HadoopClientProtocol(conf, client(addr)); } /** diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/GridHadoopDefaultMapReducePlanner.java b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopMapReducePlanner.java similarity index 90% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/GridHadoopDefaultMapReducePlanner.java rename to modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopMapReducePlanner.java index 342cbab..9a6b4d8 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/GridHadoopDefaultMapReducePlanner.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/IgniteHadoopMapReducePlanner.java @@ -15,13 +15,14 @@ * limitations under the License. */ -package org.apache.ignite.internal.processors.hadoop.planner; +package org.apache.ignite.hadoop.mapreduce; import org.apache.ignite.*; import org.apache.ignite.cluster.*; import org.apache.ignite.igfs.*; import org.apache.ignite.internal.*; -import org.apache.ignite.internal.igfs.hadoop.*; +import org.apache.ignite.internal.processors.hadoop.igfs.*; +import org.apache.ignite.internal.processors.hadoop.planner.*; import org.apache.ignite.internal.processors.igfs.*; import org.apache.ignite.internal.processors.hadoop.*; import org.apache.ignite.internal.util.typedef.*; @@ -31,12 +32,12 @@ import org.jetbrains.annotations.*; import java.util.*; -import static org.apache.ignite.IgniteFs.*; +import static org.apache.ignite.IgniteFileSystem.*; /** * Default map-reduce planner implementation. */ -public class GridHadoopDefaultMapReducePlanner implements GridHadoopMapReducePlanner { +public class IgniteHadoopMapReducePlanner implements HadoopMapReducePlanner { /** Injected grid. */ @IgniteInstanceResource private Ignite ignite; @@ -47,15 +48,15 @@ public class GridHadoopDefaultMapReducePlanner implements GridHadoopMapReducePla private IgniteLogger log; /** {@inheritDoc} */ - @Override public GridHadoopMapReducePlan preparePlan(GridHadoopJob job, Collection top, - @Nullable GridHadoopMapReducePlan oldPlan) throws IgniteCheckedException { + @Override public HadoopMapReducePlan preparePlan(HadoopJob job, Collection top, + @Nullable HadoopMapReducePlan oldPlan) throws IgniteCheckedException { // Convert collection of topology nodes to collection of topology node IDs. Collection topIds = new HashSet<>(top.size(), 1.0f); for (ClusterNode topNode : top) topIds.add(topNode.id()); - Map> mappers = mappers(top, topIds, job.input()); + Map> mappers = mappers(top, topIds, job.input()); int rdcCnt = job.info().reducers(); @@ -64,7 +65,7 @@ public class GridHadoopDefaultMapReducePlanner implements GridHadoopMapReducePla Map reducers = reducers(top, mappers, rdcCnt); - return new GridHadoopDefaultMapReducePlan(mappers, reducers); + return new HadoopDefaultMapReducePlan(mappers, reducers); } /** @@ -76,9 +77,9 @@ public class GridHadoopDefaultMapReducePlanner implements GridHadoopMapReducePla * @return Mappers map. * @throws IgniteCheckedException If failed. */ - private Map> mappers(Collection top, Collection topIds, - Iterable splits) throws IgniteCheckedException { - Map> mappers = new HashMap<>(); + private Map> mappers(Collection top, Collection topIds, + Iterable splits) throws IgniteCheckedException { + Map> mappers = new HashMap<>(); Map> nodes = hosts(top); @@ -87,13 +88,13 @@ public class GridHadoopDefaultMapReducePlanner implements GridHadoopMapReducePla for (UUID nodeId : topIds) nodeLoads.put(nodeId, 0); - for (GridHadoopInputSplit split : splits) { + for (HadoopInputSplit split : splits) { UUID nodeId = nodeForSplit(split, topIds, nodes, nodeLoads); if (log.isDebugEnabled()) log.debug("Mapped split to node [split=" + split + ", nodeId=" + nodeId + ']'); - Collection nodeSplits = mappers.get(nodeId); + Collection nodeSplits = mappers.get(nodeId); if (nodeSplits == null) { nodeSplits = new ArrayList<>(); @@ -147,13 +148,13 @@ public class GridHadoopDefaultMapReducePlanner implements GridHadoopMapReducePla * @return Node ID. */ @SuppressWarnings("unchecked") - private UUID nodeForSplit(GridHadoopInputSplit split, Collection topIds, Map> nodes, + private UUID nodeForSplit(HadoopInputSplit split, Collection topIds, Map> nodes, Map nodeLoads) throws IgniteCheckedException { - if (split instanceof GridHadoopFileBlock) { - GridHadoopFileBlock split0 = (GridHadoopFileBlock)split; + if (split instanceof HadoopFileBlock) { + HadoopFileBlock split0 = (HadoopFileBlock)split; if (IGFS_SCHEME.equalsIgnoreCase(split0.file().getScheme())) { - IgfsHadoopEndpoint endpoint = new IgfsHadoopEndpoint(split0.file().getAuthority()); + HadoopIgfsEndpoint endpoint = new HadoopIgfsEndpoint(split0.file().getAuthority()); IgfsEx igfs = null; @@ -293,14 +294,14 @@ public class GridHadoopDefaultMapReducePlanner implements GridHadoopMapReducePla * @return Reducers map. */ private Map reducers(Collection top, - Map> mappers, int reducerCnt) { + Map> mappers, int reducerCnt) { // Determine initial node weights. int totalWeight = 0; List nodes = new ArrayList<>(top.size()); for (ClusterNode node : top) { - Collection split = mappers.get(node.id()); + Collection split = mappers.get(node.id()); int weight = reducerNodeWeight(node, split != null ? split.size() : 0); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/igfs/package.html b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/package.html similarity index 94% rename from modules/hadoop/src/main/java/org/apache/ignite/igfs/package.html rename to modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/package.html index ec38a21..e289841 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/igfs/package.html +++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/mapreduce/package.html @@ -19,6 +19,6 @@ - Contains IGnite File System APIs. + Ignite Hadoop Accelerator map-reduce classes. diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/package.html b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/package.html similarity index 96% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/package.html rename to modules/hadoop/src/main/java/org/apache/ignite/hadoop/package.html index ec380f2..d687e32 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/package.html +++ b/modules/hadoop/src/main/java/org/apache/ignite/hadoop/package.html @@ -19,6 +19,6 @@ - Contains IGFS client classes. + Ignite Hadoop Accelerator API. diff --git a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v1/package.html b/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v1/package.html deleted file mode 100644 index 4b62db1..0000000 --- a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v1/package.html +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - Contains Hadoop 1.x FileSystem wrapper for Ignite file system. - - diff --git a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v2/package.html b/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v2/package.html deleted file mode 100644 index 6df66f4..0000000 --- a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/v2/package.html +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - Contains Hadoop 2.x FileSystem wrapper for Ignite file system. - - diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopClassLoader.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoader.java similarity index 96% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopClassLoader.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoader.java index bc4c0bb..1856e41 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopClassLoader.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoader.java @@ -35,7 +35,7 @@ import java.util.concurrent.atomic.*; * Also supports class parsing for finding dependencies which contain transitive dependencies * unavailable for parent. */ -public class GridHadoopClassLoader extends URLClassLoader { +public class HadoopClassLoader extends URLClassLoader { /** * We are very parallel capable. */ @@ -44,7 +44,7 @@ public class GridHadoopClassLoader extends URLClassLoader { } /** */ - private static final URLClassLoader APP_CLS_LDR = (URLClassLoader)GridHadoopClassLoader.class.getClassLoader(); + private static final URLClassLoader APP_CLS_LDR = (URLClassLoader)HadoopClassLoader.class.getClassLoader(); /** */ private static final Collection appJars = F.asList(APP_CLS_LDR.getURLs()); @@ -61,10 +61,10 @@ public class GridHadoopClassLoader extends URLClassLoader { /** * @param urls Urls. */ - public GridHadoopClassLoader(URL[] urls) { + public HadoopClassLoader(URL[] urls) { super(addHadoopUrls(urls), APP_CLS_LDR); - assert !(getParent() instanceof GridHadoopClassLoader); + assert !(getParent() instanceof HadoopClassLoader); } /** @@ -73,7 +73,7 @@ public class GridHadoopClassLoader extends URLClassLoader { * @param cls Class name. * @return {@code true} if we need to check this class. */ - private static boolean isIgfsHadoop(String cls) { + private static boolean isHadoopIgfs(String cls) { String ignitePackagePrefix = "org.apache.ignite"; int len = ignitePackagePrefix.length(); @@ -93,14 +93,14 @@ public class GridHadoopClassLoader extends URLClassLoader { try { if (isHadoop(name)) { // Always load Hadoop classes explicitly, since Hadoop can be available in App classpath. if (name.endsWith(".util.ShutdownHookManager")) // Dirty hack to get rid of Hadoop shutdown hooks. - return loadFromBytes(name, GridHadoopShutdownHookManager.class.getName()); + return loadFromBytes(name, HadoopShutdownHookManager.class.getName()); else if (name.endsWith(".util.NativeCodeLoader")) - return loadFromBytes(name, GridHadoopNativeCodeLoader.class.getName()); + return loadFromBytes(name, HadoopNativeCodeLoader.class.getName()); return loadClassExplicitly(name, resolve); } - if (isIgfsHadoop(name)) { // For Ignite Hadoop and IGFS classes we have to check if they depend on Hadoop. + if (isHadoopIgfs(name)) { // For Ignite Hadoop and IGFS classes we have to check if they depend on Hadoop. Boolean hasDeps = cache.get(name); if (hasDeps == null) { @@ -224,7 +224,7 @@ public class GridHadoopClassLoader extends URLClassLoader { if (in == null) // The class is external itself, it must be loaded from this class loader. return true; - if (!isIgfsHadoop(clsName)) // Other classes should not have external dependencies. + if (!isHadoopIgfs(clsName)) // Other classes should not have external dependencies. return false; final ClassReader rdr; @@ -508,7 +508,7 @@ public class GridHadoopClassLoader extends URLClassLoader { if (hadoopUrls != null) return hadoopUrls; - synchronized (GridHadoopClassLoader.class) { + synchronized (HadoopClassLoader.class) { hadoopUrls = hadoopJars; if (hadoopUrls != null) diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopComponent.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopComponent.java similarity index 90% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopComponent.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopComponent.java index 337bfe9..cea11eb 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopComponent.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopComponent.java @@ -22,9 +22,9 @@ import org.apache.ignite.*; /** * Abstract class for all hadoop components. */ -public abstract class GridHadoopComponent { +public abstract class HadoopComponent { /** Hadoop context. */ - protected GridHadoopContext ctx; + protected HadoopContext ctx; /** Logger. */ protected IgniteLogger log; @@ -32,7 +32,7 @@ public abstract class GridHadoopComponent { /** * @param ctx Hadoop context. */ - public void start(GridHadoopContext ctx) throws IgniteCheckedException { + public void start(HadoopContext ctx) throws IgniteCheckedException { this.ctx = ctx; log = ctx.kernalContext().log(getClass()); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopContext.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopContext.java similarity index 80% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopContext.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopContext.java index 3160e3d..68f0baf 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopContext.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopContext.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.processors.hadoop; import org.apache.ignite.cluster.*; +import org.apache.ignite.configuration.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.processors.hadoop.jobtracker.*; import org.apache.ignite.internal.processors.hadoop.shuffle.*; @@ -29,34 +30,34 @@ import java.util.*; /** * Hadoop accelerator context. */ -public class GridHadoopContext { +public class HadoopContext { /** Kernal context. */ private GridKernalContext ctx; /** Hadoop configuration. */ - private GridHadoopConfiguration cfg; + private HadoopConfiguration cfg; /** Job tracker. */ - private GridHadoopJobTracker jobTracker; + private HadoopJobTracker jobTracker; /** External task executor. */ - private GridHadoopTaskExecutorAdapter taskExecutor; + private HadoopTaskExecutorAdapter taskExecutor; /** */ - private GridHadoopShuffle shuffle; + private HadoopShuffle shuffle; /** Managers list. */ - private List components = new ArrayList<>(); + private List components = new ArrayList<>(); /** * @param ctx Kernal context. */ - public GridHadoopContext( + public HadoopContext( GridKernalContext ctx, - GridHadoopConfiguration cfg, - GridHadoopJobTracker jobTracker, - GridHadoopTaskExecutorAdapter taskExecutor, - GridHadoopShuffle shuffle + HadoopConfiguration cfg, + HadoopJobTracker jobTracker, + HadoopTaskExecutorAdapter taskExecutor, + HadoopShuffle shuffle ) { this.ctx = ctx; this.cfg = cfg; @@ -71,7 +72,7 @@ public class GridHadoopContext { * * @return List of managers. */ - public List components() { + public List components() { return components; } @@ -89,7 +90,7 @@ public class GridHadoopContext { * * @return Hadoop configuration. */ - public GridHadoopConfiguration configuration() { + public HadoopConfiguration configuration() { return cfg; } @@ -143,13 +144,13 @@ public class GridHadoopContext { * @param meta Job metadata. * @return {@code true} If local node is participating in job execution. */ - public boolean isParticipating(GridHadoopJobMetadata meta) { + public boolean isParticipating(HadoopJobMetadata meta) { UUID locNodeId = localNodeId(); if (locNodeId.equals(meta.submitNodeId())) return true; - GridHadoopMapReducePlan plan = meta.mapReducePlan(); + HadoopMapReducePlan plan = meta.mapReducePlan(); return plan.mapperNodeIds().contains(locNodeId) || plan.reducerNodeIds().contains(locNodeId) || jobUpdateLeader(); } @@ -157,28 +158,28 @@ public class GridHadoopContext { /** * @return Jon tracker instance. */ - public GridHadoopJobTracker jobTracker() { + public HadoopJobTracker jobTracker() { return jobTracker; } /** * @return Task executor. */ - public GridHadoopTaskExecutorAdapter taskExecutor() { + public HadoopTaskExecutorAdapter taskExecutor() { return taskExecutor; } /** * @return Shuffle. */ - public GridHadoopShuffle shuffle() { + public HadoopShuffle shuffle() { return shuffle; } /** * @return Map-reduce planner. */ - public GridHadoopMapReducePlanner planner() { + public HadoopMapReducePlanner planner() { return cfg.getMapReducePlanner(); } @@ -188,7 +189,7 @@ public class GridHadoopContext { * @param c Component to add. * @return Added manager. */ - private C add(C c) { + private C add(C c) { components.add(c); return c; diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopDefaultJobInfo.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultJobInfo.java similarity index 85% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopDefaultJobInfo.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultJobInfo.java index 555c573..77eb6d2 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopDefaultJobInfo.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultJobInfo.java @@ -29,7 +29,7 @@ import java.util.*; /** * Hadoop job info based on default Hadoop configuration. */ -public class GridHadoopDefaultJobInfo implements GridHadoopJobInfo, Externalizable { +public class HadoopDefaultJobInfo implements HadoopJobInfo, Externalizable { /** */ private static final long serialVersionUID = 5489900236464999951L; @@ -54,7 +54,7 @@ public class GridHadoopDefaultJobInfo implements GridHadoopJobInfo, Externalizab /** * Default constructor required by {@link Externalizable}. */ - public GridHadoopDefaultJobInfo() { + public HadoopDefaultJobInfo() { // No-op. } @@ -67,7 +67,7 @@ public class GridHadoopDefaultJobInfo implements GridHadoopJobInfo, Externalizab * @param numReduces Number of reducers configured for job. * @param props All other properties of the job. */ - public GridHadoopDefaultJobInfo(String jobName, String user, boolean hasCombiner, int numReduces, + public HadoopDefaultJobInfo(String jobName, String user, boolean hasCombiner, int numReduces, Map props) { this.jobName = jobName; this.user = user; @@ -82,24 +82,24 @@ public class GridHadoopDefaultJobInfo implements GridHadoopJobInfo, Externalizab } /** {@inheritDoc} */ - @Override public GridHadoopJob createJob(GridHadoopJobId jobId, IgniteLogger log) throws IgniteCheckedException { + @Override public HadoopJob createJob(HadoopJobId jobId, IgniteLogger log) throws IgniteCheckedException { try { Class jobCls0 = jobCls; if (jobCls0 == null) { // It is enough to have only one class loader with only Hadoop classes. - synchronized (GridHadoopDefaultJobInfo.class) { + synchronized (HadoopDefaultJobInfo.class) { if ((jobCls0 = jobCls) == null) { - GridHadoopClassLoader ldr = new GridHadoopClassLoader(null); + HadoopClassLoader ldr = new HadoopClassLoader(null); - jobCls = jobCls0 = ldr.loadClass(GridHadoopV2Job.class.getName()); + jobCls = jobCls0 = ldr.loadClass(HadoopV2Job.class.getName()); } } } - Constructor constructor = jobCls0.getConstructor(GridHadoopJobId.class, GridHadoopDefaultJobInfo.class, + Constructor constructor = jobCls0.getConstructor(HadoopJobId.class, HadoopDefaultJobInfo.class, IgniteLogger.class); - return (GridHadoopJob)constructor.newInstance(jobId, this, log); + return (HadoopJob)constructor.newInstance(jobId, this, log); } // NB: java.lang.NoClassDefFoundError may be thrown from Class#getConstructor() call. catch (Throwable t) { diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopImpl.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopImpl.java similarity index 81% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopImpl.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopImpl.java index 55e3690..27542a1 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopImpl.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopImpl.java @@ -18,16 +18,18 @@ package org.apache.ignite.internal.processors.hadoop; import org.apache.ignite.*; +import org.apache.ignite.configuration.*; import org.apache.ignite.internal.*; +import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters; import org.apache.ignite.internal.util.*; import org.jetbrains.annotations.*; /** * Hadoop facade implementation. */ -public class GridHadoopImpl implements GridHadoop { +public class HadoopImpl implements Hadoop { /** Hadoop processor. */ - private final IgniteHadoopProcessor proc; + private final HadoopProcessor proc; /** Busy lock. */ private final GridSpinBusyLock busyLock = new GridSpinBusyLock(); @@ -37,17 +39,17 @@ public class GridHadoopImpl implements GridHadoop { * * @param proc Hadoop processor. */ - GridHadoopImpl(IgniteHadoopProcessor proc) { + HadoopImpl(HadoopProcessor proc) { this.proc = proc; } /** {@inheritDoc} */ - @Override public GridHadoopConfiguration configuration() { + @Override public HadoopConfiguration configuration() { return proc.config(); } /** {@inheritDoc} */ - @Override public GridHadoopJobId nextJobId() { + @Override public HadoopJobId nextJobId() { if (busyLock.enterBusy()) { try { return proc.nextJobId(); @@ -61,7 +63,7 @@ public class GridHadoopImpl implements GridHadoop { } /** {@inheritDoc} */ - @Override public IgniteInternalFuture submit(GridHadoopJobId jobId, GridHadoopJobInfo jobInfo) { + @Override public IgniteInternalFuture submit(HadoopJobId jobId, HadoopJobInfo jobInfo) { if (busyLock.enterBusy()) { try { return proc.submit(jobId, jobInfo); @@ -75,7 +77,7 @@ public class GridHadoopImpl implements GridHadoop { } /** {@inheritDoc} */ - @Nullable @Override public GridHadoopJobStatus status(GridHadoopJobId jobId) throws IgniteCheckedException { + @Nullable @Override public HadoopJobStatus status(HadoopJobId jobId) throws IgniteCheckedException { if (busyLock.enterBusy()) { try { return proc.status(jobId); @@ -89,7 +91,7 @@ public class GridHadoopImpl implements GridHadoop { } /** {@inheritDoc} */ - @Nullable @Override public GridHadoopCounters counters(GridHadoopJobId jobId) throws IgniteCheckedException { + @Nullable @Override public HadoopCounters counters(HadoopJobId jobId) throws IgniteCheckedException { if (busyLock.enterBusy()) { try { return proc.counters(jobId); @@ -103,7 +105,7 @@ public class GridHadoopImpl implements GridHadoop { } /** {@inheritDoc} */ - @Nullable @Override public IgniteInternalFuture finishFuture(GridHadoopJobId jobId) throws IgniteCheckedException { + @Nullable @Override public IgniteInternalFuture finishFuture(HadoopJobId jobId) throws IgniteCheckedException { if (busyLock.enterBusy()) { try { return proc.finishFuture(jobId); @@ -117,7 +119,7 @@ public class GridHadoopImpl implements GridHadoop { } /** {@inheritDoc} */ - @Override public boolean kill(GridHadoopJobId jobId) throws IgniteCheckedException { + @Override public boolean kill(HadoopJobId jobId) throws IgniteCheckedException { if (busyLock.enterBusy()) { try { return proc.kill(jobId); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/client/hadoop/counter/GridHadoopClientCounterGroup.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounterGroup.java similarity index 92% rename from modules/hadoop/src/main/java/org/apache/ignite/client/hadoop/counter/GridHadoopClientCounterGroup.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounterGroup.java index 37cd28d..b0c2d3e 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/client/hadoop/counter/GridHadoopClientCounterGroup.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounterGroup.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.client.hadoop.counter; +package org.apache.ignite.internal.processors.hadoop; import org.apache.hadoop.mapreduce.*; import org.apache.hadoop.mapreduce.counters.*; @@ -24,11 +24,11 @@ import java.io.*; import java.util.*; /** - * Hadoop Client API Counters adapter. + * Hadoop +counter group adapter. */ -class GridHadoopClientCounterGroup implements CounterGroup { +class HadoopMapReduceCounterGroup implements CounterGroup { /** Counters. */ - private final GridHadoopClientCounters cntrs; + private final HadoopMapReduceCounters cntrs; /** Group name. */ private final String name; @@ -39,7 +39,7 @@ class GridHadoopClientCounterGroup implements CounterGroup { * @param cntrs Client counters instance. * @param name Group name. */ - GridHadoopClientCounterGroup(GridHadoopClientCounters cntrs, String name) { + HadoopMapReduceCounterGroup(HadoopMapReduceCounters cntrs, String name) { this.cntrs = cntrs; this.name = name; } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/client/hadoop/counter/GridHadoopClientCounters.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounters.java similarity index 80% rename from modules/hadoop/src/main/java/org/apache/ignite/client/hadoop/counter/GridHadoopClientCounters.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounters.java index 9f4ec02..c2c9e2a 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/client/hadoop/counter/GridHadoopClientCounters.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceCounters.java @@ -15,11 +15,10 @@ * limitations under the License. */ -package org.apache.ignite.client.hadoop.counter; +package org.apache.ignite.internal.processors.hadoop; import org.apache.hadoop.mapreduce.*; import org.apache.hadoop.mapreduce.counters.*; -import org.apache.ignite.internal.processors.hadoop.*; import org.apache.ignite.internal.processors.hadoop.counter.*; import org.apache.ignite.internal.processors.hadoop.v2.*; import org.apache.ignite.internal.util.typedef.*; @@ -28,21 +27,21 @@ import java.io.*; import java.util.*; /** - * Hadoop Client API Counters adapter. + * Hadoop counters adapter. */ -public class GridHadoopClientCounters extends Counters { +public class HadoopMapReduceCounters extends Counters { /** */ - private final Map,GridHadoopLongCounter> cntrs = new HashMap<>(); + private final Map,HadoopLongCounter> cntrs = new HashMap<>(); /** * Creates new instance based on given counters. * * @param cntrs Counters to adapt. */ - public GridHadoopClientCounters(GridHadoopCounters cntrs) { - for (GridHadoopCounter cntr : cntrs.all()) - if (cntr instanceof GridHadoopLongCounter) - this.cntrs.put(new T2<>(cntr.group(), cntr.name()), (GridHadoopLongCounter) cntr); + public HadoopMapReduceCounters(org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters cntrs) { + for (HadoopCounter cntr : cntrs.all()) + if (cntr instanceof HadoopLongCounter) + this.cntrs.put(new T2<>(cntr.group(), cntr.name()), (HadoopLongCounter) cntr); } /** {@inheritDoc} */ @@ -52,7 +51,7 @@ public class GridHadoopClientCounters extends Counters { /** {@inheritDoc} */ @Override public CounterGroup addGroup(String name, String displayName) { - return new GridHadoopClientCounterGroup(this, name); + return new HadoopMapReduceCounterGroup(this, name); } /** {@inheritDoc} */ @@ -74,7 +73,7 @@ public class GridHadoopClientCounters extends Counters { @Override public synchronized Iterable getGroupNames() { Collection res = new HashSet<>(); - for (GridHadoopCounter counter : cntrs.values()) + for (HadoopCounter counter : cntrs.values()) res.add(counter.group()); return res; @@ -93,7 +92,7 @@ public class GridHadoopClientCounters extends Counters { if (!hasNext()) throw new NoSuchElementException(); - return new GridHadoopClientCounterGroup(GridHadoopClientCounters.this, iter.next()); + return new HadoopMapReduceCounterGroup(HadoopMapReduceCounters.this, iter.next()); } @Override public void remove() { @@ -104,7 +103,7 @@ public class GridHadoopClientCounters extends Counters { /** {@inheritDoc} */ @Override public synchronized CounterGroup getGroup(String grpName) { - return new GridHadoopClientCounterGroup(this, grpName); + return new HadoopMapReduceCounterGroup(this, grpName); } /** {@inheritDoc} */ @@ -133,10 +132,10 @@ public class GridHadoopClientCounters extends Counters { /** {@inheritDoc} */ @Override public boolean equals(Object genericRight) { - if (!(genericRight instanceof GridHadoopClientCounters)) + if (!(genericRight instanceof HadoopMapReduceCounters)) return false; - return cntrs.equals(((GridHadoopClientCounters) genericRight).cntrs); + return cntrs.equals(((HadoopMapReduceCounters) genericRight).cntrs); } /** {@inheritDoc} */ @@ -168,7 +167,7 @@ public class GridHadoopClientCounters extends Counters { public int groupSize(String grpName) { int res = 0; - for (GridHadoopCounter counter : cntrs.values()) { + for (HadoopCounter counter : cntrs.values()) { if (grpName.equals(counter.group())) res++; } @@ -185,9 +184,9 @@ public class GridHadoopClientCounters extends Counters { public Iterator iterateGroup(String grpName) { Collection grpCounters = new ArrayList<>(); - for (GridHadoopLongCounter counter : cntrs.values()) { + for (HadoopLongCounter counter : cntrs.values()) { if (grpName.equals(counter.group())) - grpCounters.add(new GridHadoopV2Counter(counter)); + grpCounters.add(new HadoopV2Counter(counter)); } return grpCounters.iterator(); @@ -204,14 +203,14 @@ public class GridHadoopClientCounters extends Counters { public Counter findCounter(String grpName, String cntrName, boolean create) { T2 key = new T2<>(grpName, cntrName); - GridHadoopLongCounter internalCntr = cntrs.get(key); + HadoopLongCounter internalCntr = cntrs.get(key); if (internalCntr == null & create) { - internalCntr = new GridHadoopLongCounter(grpName,cntrName); + internalCntr = new HadoopLongCounter(grpName,cntrName); - cntrs.put(key, new GridHadoopLongCounter(grpName,cntrName)); + cntrs.put(key, new HadoopLongCounter(grpName,cntrName)); } - return internalCntr == null ? null : new GridHadoopV2Counter(internalCntr); + return internalCntr == null ? null : new HadoopV2Counter(internalCntr); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/IgniteHadoopProcessor.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopProcessor.java similarity index 65% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/IgniteHadoopProcessor.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopProcessor.java index 4ef9e35..e0c5916 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/IgniteHadoopProcessor.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopProcessor.java @@ -18,9 +18,11 @@ package org.apache.ignite.internal.processors.hadoop; import org.apache.ignite.*; +import org.apache.ignite.configuration.*; +import org.apache.ignite.hadoop.mapreduce.*; import org.apache.ignite.internal.*; +import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters; import org.apache.ignite.internal.processors.hadoop.jobtracker.*; -import org.apache.ignite.internal.processors.hadoop.planner.*; import org.apache.ignite.internal.processors.hadoop.shuffle.*; import org.apache.ignite.internal.processors.hadoop.taskexecutor.*; import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.*; @@ -30,27 +32,27 @@ import org.apache.ignite.internal.util.typedef.internal.*; import java.util.*; import java.util.concurrent.atomic.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopClassLoader.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopClassLoader.*; /** * Hadoop processor. */ -public class IgniteHadoopProcessor extends IgniteHadoopProcessorAdapter { +public class HadoopProcessor extends HadoopProcessorAdapter { /** Job ID counter. */ private final AtomicInteger idCtr = new AtomicInteger(); /** Hadoop context. */ @GridToStringExclude - private GridHadoopContext hctx; + private HadoopContext hctx; /** Hadoop facade for public API. */ @GridToStringExclude - private GridHadoop hadoop; + private Hadoop hadoop; /** * @param ctx Kernal context. */ - public IgniteHadoopProcessor(GridKernalContext ctx) { + public HadoopProcessor(GridKernalContext ctx) { super(ctx); } @@ -59,12 +61,12 @@ public class IgniteHadoopProcessor extends IgniteHadoopProcessorAdapter { if (ctx.isDaemon()) return; - GridHadoopConfiguration cfg = ctx.config().getHadoopConfiguration(); + HadoopConfiguration cfg = ctx.config().getHadoopConfiguration(); if (cfg == null) - cfg = new GridHadoopConfiguration(); + cfg = new HadoopConfiguration(); else - cfg = new GridHadoopConfiguration(cfg); + cfg = new HadoopConfiguration(cfg); initializeDefaults(cfg); @@ -85,24 +87,24 @@ public class IgniteHadoopProcessor extends IgniteHadoopProcessorAdapter { } if (ok) { - hctx = new GridHadoopContext( + hctx = new HadoopContext( ctx, cfg, - new GridHadoopJobTracker(), - cfg.isExternalExecution() ? new GridHadoopExternalTaskExecutor() : new GridHadoopEmbeddedTaskExecutor(), - new GridHadoopShuffle()); + new HadoopJobTracker(), + cfg.isExternalExecution() ? new HadoopExternalTaskExecutor() : new HadoopEmbeddedTaskExecutor(), + new HadoopShuffle()); - for (GridHadoopComponent c : hctx.components()) + for (HadoopComponent c : hctx.components()) c.start(hctx); - hadoop = new GridHadoopImpl(this); + hadoop = new HadoopImpl(this); } } /** {@inheritDoc} */ @Override public String toString() { - return S.toString(IgniteHadoopProcessor.class, this); + return S.toString(HadoopProcessor.class, this); } /** {@inheritDoc} */ @@ -112,10 +114,10 @@ public class IgniteHadoopProcessor extends IgniteHadoopProcessorAdapter { if (hctx == null) return; - List components = hctx.components(); + List components = hctx.components(); - for (ListIterator it = components.listIterator(components.size()); it.hasPrevious();) { - GridHadoopComponent c = it.previous(); + for (ListIterator it = components.listIterator(components.size()); it.hasPrevious();) { + HadoopComponent c = it.previous(); c.stop(cancel); } @@ -128,7 +130,7 @@ public class IgniteHadoopProcessor extends IgniteHadoopProcessorAdapter { if (hctx == null) return; - for (GridHadoopComponent c : hctx.components()) + for (HadoopComponent c : hctx.components()) c.onKernalStart(); } @@ -139,10 +141,10 @@ public class IgniteHadoopProcessor extends IgniteHadoopProcessorAdapter { if (hctx == null) return; - List components = hctx.components(); + List components = hctx.components(); - for (ListIterator it = components.listIterator(components.size()); it.hasPrevious();) { - GridHadoopComponent c = it.previous(); + for (ListIterator it = components.listIterator(components.size()); it.hasPrevious();) { + HadoopComponent c = it.previous(); c.onKernalStop(cancel); } @@ -153,12 +155,12 @@ public class IgniteHadoopProcessor extends IgniteHadoopProcessorAdapter { * * @return Hadoop context. */ - public GridHadoopContext context() { + public HadoopContext context() { return hctx; } /** {@inheritDoc} */ - @Override public GridHadoop hadoop() { + @Override public Hadoop hadoop() { if (hadoop == null) throw new IllegalStateException("Hadoop accelerator is disabled (Hadoop is not in classpath, " + "is HADOOP_HOME environment variable set?)"); @@ -167,37 +169,37 @@ public class IgniteHadoopProcessor extends IgniteHadoopProcessorAdapter { } /** {@inheritDoc} */ - @Override public GridHadoopConfiguration config() { + @Override public HadoopConfiguration config() { return hctx.configuration(); } /** {@inheritDoc} */ - @Override public GridHadoopJobId nextJobId() { - return new GridHadoopJobId(ctx.localNodeId(), idCtr.incrementAndGet()); + @Override public HadoopJobId nextJobId() { + return new HadoopJobId(ctx.localNodeId(), idCtr.incrementAndGet()); } /** {@inheritDoc} */ - @Override public IgniteInternalFuture submit(GridHadoopJobId jobId, GridHadoopJobInfo jobInfo) { + @Override public IgniteInternalFuture submit(HadoopJobId jobId, HadoopJobInfo jobInfo) { return hctx.jobTracker().submit(jobId, jobInfo); } /** {@inheritDoc} */ - @Override public GridHadoopJobStatus status(GridHadoopJobId jobId) throws IgniteCheckedException { + @Override public HadoopJobStatus status(HadoopJobId jobId) throws IgniteCheckedException { return hctx.jobTracker().status(jobId); } /** {@inheritDoc} */ - @Override public GridHadoopCounters counters(GridHadoopJobId jobId) throws IgniteCheckedException { + @Override public HadoopCounters counters(HadoopJobId jobId) throws IgniteCheckedException { return hctx.jobTracker().jobCounters(jobId); } /** {@inheritDoc} */ - @Override public IgniteInternalFuture finishFuture(GridHadoopJobId jobId) throws IgniteCheckedException { + @Override public IgniteInternalFuture finishFuture(HadoopJobId jobId) throws IgniteCheckedException { return hctx.jobTracker().finishFuture(jobId); } /** {@inheritDoc} */ - @Override public boolean kill(GridHadoopJobId jobId) throws IgniteCheckedException { + @Override public boolean kill(HadoopJobId jobId) throws IgniteCheckedException { return hctx.jobTracker().killJob(jobId); } @@ -206,9 +208,9 @@ public class IgniteHadoopProcessor extends IgniteHadoopProcessorAdapter { * * @param cfg Hadoop configuration. */ - private void initializeDefaults(GridHadoopConfiguration cfg) { + private void initializeDefaults(HadoopConfiguration cfg) { if (cfg.getMapReducePlanner() == null) - cfg.setMapReducePlanner(new GridHadoopDefaultMapReducePlanner()); + cfg.setMapReducePlanner(new IgniteHadoopMapReducePlanner()); } /** @@ -217,7 +219,7 @@ public class IgniteHadoopProcessor extends IgniteHadoopProcessorAdapter { * @param hadoopCfg Hadoop configuration. * @throws IgniteCheckedException If failed. */ - private void validate(GridHadoopConfiguration hadoopCfg) throws IgniteCheckedException { + private void validate(HadoopConfiguration hadoopCfg) throws IgniteCheckedException { if (ctx.config().isPeerClassLoadingEnabled()) throw new IgniteCheckedException("Peer class loading cannot be used with Hadoop (disable it using " + "GridConfiguration.setPeerClassLoadingEnabled())."); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSetup.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopSetup.java similarity index 99% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSetup.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopSetup.java index 66b1db4..35df5da 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSetup.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopSetup.java @@ -31,7 +31,7 @@ import static org.apache.ignite.internal.IgniteVersionUtils.*; /** * Setup tool to configure Hadoop client. */ -public class GridHadoopSetup { +public class HadoopSetup { /** */ public static final String WINUTILS_EXE = "winutils.exe"; diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskCancelledException.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskCancelledException.java similarity index 89% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskCancelledException.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskCancelledException.java index c762181..bb3d1cc 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskCancelledException.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskCancelledException.java @@ -22,14 +22,14 @@ import org.apache.ignite.*; /** * Exception that throws when the task is cancelling. */ -public class GridHadoopTaskCancelledException extends IgniteException { +public class HadoopTaskCancelledException extends IgniteException { /** */ private static final long serialVersionUID = 0L; /** * @param msg Exception message. */ - public GridHadoopTaskCancelledException(String msg) { + public HadoopTaskCancelledException(String msg) { super(msg); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopUtils.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopUtils.java similarity index 90% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopUtils.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopUtils.java index 763f45a..00be422 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/GridHadoopUtils.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopUtils.java @@ -35,7 +35,7 @@ import java.util.*; /** * Hadoop utility methods. */ -public class GridHadoopUtils { +public class HadoopUtils { /** Property to store timestamp of new job id request. */ public static final String REQ_NEW_JOBID_TS_PROPERTY = "ignite.job.requestNewIdTs"; @@ -65,7 +65,7 @@ public class GridHadoopUtils { * @param hosts Hosts. * @throws IOException If failed. */ - public static GridHadoopSplitWrapper wrapSplit(int id, Object split, String[] hosts) throws IOException { + public static HadoopSplitWrapper wrapSplit(int id, Object split, String[] hosts) throws IOException { ByteArrayOutputStream arr = new ByteArrayOutputStream(); ObjectOutput out = new ObjectOutputStream(arr); @@ -75,7 +75,7 @@ public class GridHadoopUtils { out.flush(); - return new GridHadoopSplitWrapper(id, split.getClass().getName(), arr.toByteArray(), hosts); + return new HadoopSplitWrapper(id, split.getClass().getName(), arr.toByteArray(), hosts); } /** @@ -84,9 +84,9 @@ public class GridHadoopUtils { * @param o Wrapper. * @return Split. */ - public static Object unwrapSplit(GridHadoopSplitWrapper o) { + public static Object unwrapSplit(HadoopSplitWrapper o) { try { - Writable w = (Writable)GridHadoopUtils.class.getClassLoader().loadClass(o.className()).newInstance(); + Writable w = (Writable)HadoopUtils.class.getClassLoader().loadClass(o.className()).newInstance(); w.readFields(new ObjectInputStream(new ByteArrayInputStream(o.bytes()))); @@ -103,7 +103,7 @@ public class GridHadoopUtils { * @param status Ignite job status. * @return Hadoop job status. */ - public static JobStatus status(GridHadoopJobStatus status, Configuration conf) { + public static JobStatus status(HadoopJobStatus status, Configuration conf) { JobID jobId = new JobID(status.jobId().globalId().toString(), status.jobId().localId()); float setupProgress = 0; @@ -200,7 +200,7 @@ public class GridHadoopUtils { * @return Job info. * @throws IgniteCheckedException If failed. */ - public static GridHadoopDefaultJobInfo createJobInfo(Configuration cfg) throws IgniteCheckedException { + public static HadoopDefaultJobInfo createJobInfo(Configuration cfg) throws IgniteCheckedException { JobConf jobConf = new JobConf(cfg); boolean hasCombiner = jobConf.get("mapred.combiner.class") != null @@ -255,7 +255,7 @@ public class GridHadoopUtils { for (Map.Entry entry : jobConf) props.put(entry.getKey(), entry.getValue()); - return new GridHadoopDefaultJobInfo(jobConf.getJobName(), jobConf.getUser(), hasCombiner, numReduces, props); + return new HadoopDefaultJobInfo(jobConf.getJobName(), jobConf.getUser(), hasCombiner, numReduces, props); } /** @@ -281,7 +281,7 @@ public class GridHadoopUtils { * @return Working directory for job. * @throws IgniteCheckedException If Failed. */ - public static File jobLocalDir(UUID locNodeId, GridHadoopJobId jobId) throws IgniteCheckedException { + public static File jobLocalDir(UUID locNodeId, HadoopJobId jobId) throws IgniteCheckedException { return new File(new File(U.resolveWorkDirectory("hadoop", false), "node-" + locNodeId), "job_" + jobId); } @@ -293,7 +293,7 @@ public class GridHadoopUtils { * @return Working directory for task. * @throws IgniteCheckedException If Failed. */ - public static File taskLocalDir(UUID locNodeId, GridHadoopTaskInfo info) throws IgniteCheckedException { + public static File taskLocalDir(UUID locNodeId, HadoopTaskInfo info) throws IgniteCheckedException { File jobLocDir = jobLocalDir(locNodeId, info.jobId()); return new File(jobLocDir, info.type() + "_" + info.taskNumber() + "_" + info.attempt()); @@ -302,7 +302,7 @@ public class GridHadoopUtils { /** * Constructor. */ - private GridHadoopUtils() { + private HadoopUtils() { // No-op. } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/GridHadoopCounterAdapter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounterAdapter.java similarity index 89% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/GridHadoopCounterAdapter.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounterAdapter.java index 9e46846..c2ed5bb 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/GridHadoopCounterAdapter.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCounterAdapter.java @@ -17,7 +17,6 @@ package org.apache.ignite.internal.processors.hadoop.counter; -import org.apache.ignite.internal.processors.hadoop.*; import org.apache.ignite.internal.util.typedef.internal.*; import org.jetbrains.annotations.*; @@ -26,7 +25,7 @@ import java.io.*; /** * Default Hadoop counter implementation. */ -public abstract class GridHadoopCounterAdapter implements GridHadoopCounter, Externalizable { +public abstract class HadoopCounterAdapter implements HadoopCounter, Externalizable { /** */ private static final long serialVersionUID = 0L; @@ -39,7 +38,7 @@ public abstract class GridHadoopCounterAdapter implements GridHadoopCounter, Ext /** * Default constructor required by {@link Externalizable}. */ - protected GridHadoopCounterAdapter() { + protected HadoopCounterAdapter() { // No-op. } @@ -49,7 +48,7 @@ public abstract class GridHadoopCounterAdapter implements GridHadoopCounter, Ext * @param grp Counter group name. * @param name Counter name. */ - protected GridHadoopCounterAdapter(String grp, String name) { + protected HadoopCounterAdapter(String grp, String name) { assert grp != null : "counter must have group"; assert name != null : "counter must have name"; @@ -88,7 +87,7 @@ public abstract class GridHadoopCounterAdapter implements GridHadoopCounter, Ext if (o == null || getClass() != o.getClass()) return false; - GridHadoopCounterAdapter cntr = (GridHadoopCounterAdapter)o; + HadoopCounterAdapter cntr = (HadoopCounterAdapter)o; if (!grp.equals(cntr.grp)) return false; @@ -107,7 +106,7 @@ public abstract class GridHadoopCounterAdapter implements GridHadoopCounter, Ext /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopCounterAdapter.class, this); + return S.toString(HadoopCounterAdapter.class, this); } /** diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/GridHadoopCountersImpl.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCountersImpl.java similarity index 76% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/GridHadoopCountersImpl.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCountersImpl.java index 92d54af..78e1c26 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/GridHadoopCountersImpl.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopCountersImpl.java @@ -18,7 +18,6 @@ package org.apache.ignite.internal.processors.hadoop.counter; import org.apache.ignite.*; -import org.apache.ignite.internal.processors.hadoop.*; import org.apache.ignite.internal.util.lang.*; import org.apache.ignite.internal.util.typedef.internal.*; import org.jdk8.backport.*; @@ -31,17 +30,17 @@ import java.util.concurrent.*; /** * Default in-memory counters store. */ -public class GridHadoopCountersImpl implements GridHadoopCounters, Externalizable { +public class HadoopCountersImpl implements HadoopCounters, Externalizable { /** */ private static final long serialVersionUID = 0L; /** */ - private final ConcurrentMap cntrsMap = new ConcurrentHashMap8<>(); + private final ConcurrentMap cntrsMap = new ConcurrentHashMap8<>(); /** * Default constructor. Creates new instance without counters. */ - public GridHadoopCountersImpl() { + public HadoopCountersImpl() { // No-op. } @@ -50,7 +49,7 @@ public class GridHadoopCountersImpl implements GridHadoopCounters, Externalizabl * * @param cntrs Counters to store. */ - public GridHadoopCountersImpl(Iterable cntrs) { + public HadoopCountersImpl(Iterable cntrs) { addCounters(cntrs, true); } @@ -59,7 +58,7 @@ public class GridHadoopCountersImpl implements GridHadoopCounters, Externalizabl * * @param cntrs Counters to copy. */ - public GridHadoopCountersImpl(GridHadoopCounters cntrs) { + public HadoopCountersImpl(HadoopCounters cntrs) { this(cntrs.all()); } @@ -71,7 +70,7 @@ public class GridHadoopCountersImpl implements GridHadoopCounters, Externalizabl * @param name Counter name. * @return Counter. */ - private T createCounter(Class cls, String grp, + private T createCounter(Class cls, String grp, String name) { try { Constructor constructor = cls.getConstructor(String.class, String.class); @@ -89,12 +88,12 @@ public class GridHadoopCountersImpl implements GridHadoopCounters, Externalizabl * @param cntrs Counters to add. * @param cp Whether to copy counters or not. */ - private void addCounters(Iterable cntrs, boolean cp) { + private void addCounters(Iterable cntrs, boolean cp) { assert cntrs != null; - for (GridHadoopCounter cntr : cntrs) { + for (HadoopCounter cntr : cntrs) { if (cp) { - GridHadoopCounter cntrCp = createCounter(cntr.getClass(), cntr.group(), cntr.name()); + HadoopCounter cntrCp = createCounter(cntr.getClass(), cntr.group(), cntr.name()); cntrCp.merge(cntr); @@ -106,7 +105,7 @@ public class GridHadoopCountersImpl implements GridHadoopCounters, Externalizabl } /** {@inheritDoc} */ - @Override public T counter(String grp, String name, Class cls) { + @Override public T counter(String grp, String name, Class cls) { assert cls != null; CounterKey mapKey = new CounterKey(cls, grp, name); @@ -126,13 +125,13 @@ public class GridHadoopCountersImpl implements GridHadoopCounters, Externalizabl } /** {@inheritDoc} */ - @Override public Collection all() { + @Override public Collection all() { return cntrsMap.values(); } /** {@inheritDoc} */ - @Override public void merge(GridHadoopCounters other) { - for (GridHadoopCounter counter : other.all()) + @Override public void merge(HadoopCounters other) { + for (HadoopCounter counter : other.all()) counter(counter.group(), counter.name(), counter.getClass()).merge(counter); } @@ -144,7 +143,7 @@ public class GridHadoopCountersImpl implements GridHadoopCounters, Externalizabl /** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { - addCounters(U.readCollection(in), false); + addCounters(U.readCollection(in), false); } /** {@inheritDoc} */ @@ -155,7 +154,7 @@ public class GridHadoopCountersImpl implements GridHadoopCounters, Externalizabl if (o == null || getClass() != o.getClass()) return false; - GridHadoopCountersImpl counters = (GridHadoopCountersImpl)o; + HadoopCountersImpl counters = (HadoopCountersImpl)o; return cntrsMap.equals(counters.cntrsMap); } @@ -167,13 +166,13 @@ public class GridHadoopCountersImpl implements GridHadoopCounters, Externalizabl /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopCountersImpl.class, this, "counters", cntrsMap.values()); + return S.toString(HadoopCountersImpl.class, this, "counters", cntrsMap.values()); } /** * The tuple of counter identifier components for more readable code. */ - private static class CounterKey extends GridTuple3, String, String> { + private static class CounterKey extends GridTuple3, String, String> { /** */ private static final long serialVersionUID = 0L; @@ -184,7 +183,7 @@ public class GridHadoopCountersImpl implements GridHadoopCounters, Externalizabl * @param grp Group name. * @param name Counter name. */ - private CounterKey(Class cls, String grp, String name) { + private CounterKey(Class cls, String grp, String name) { super(cls, grp, name); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/GridHadoopLongCounter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopLongCounter.java similarity index 86% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/GridHadoopLongCounter.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopLongCounter.java index 67af49f..ce86edb 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/GridHadoopLongCounter.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopLongCounter.java @@ -17,14 +17,12 @@ package org.apache.ignite.internal.processors.hadoop.counter; -import org.apache.ignite.internal.processors.hadoop.*; - import java.io.*; /** * Standard hadoop counter to use via original Hadoop API in Hadoop jobs. */ -public class GridHadoopLongCounter extends GridHadoopCounterAdapter { +public class HadoopLongCounter extends HadoopCounterAdapter { /** */ private static final long serialVersionUID = 0L; @@ -34,7 +32,7 @@ public class GridHadoopLongCounter extends GridHadoopCounterAdapter { /** * Default constructor required by {@link Externalizable}. */ - public GridHadoopLongCounter() { + public HadoopLongCounter() { // No-op. } @@ -44,7 +42,7 @@ public class GridHadoopLongCounter extends GridHadoopCounterAdapter { * @param grp Group name. * @param name Counter name. */ - public GridHadoopLongCounter(String grp, String name) { + public HadoopLongCounter(String grp, String name) { super(grp, name); } @@ -59,8 +57,8 @@ public class GridHadoopLongCounter extends GridHadoopCounterAdapter { } /** {@inheritDoc} */ - @Override public void merge(GridHadoopCounter cntr) { - val += ((GridHadoopLongCounter)cntr).val; + @Override public void merge(HadoopCounter cntr) { + val += ((HadoopLongCounter)cntr).val; } /** diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/GridHadoopPerformanceCounter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopPerformanceCounter.java similarity index 82% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/GridHadoopPerformanceCounter.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopPerformanceCounter.java index d5ceebf..351839a 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/GridHadoopPerformanceCounter.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/counter/HadoopPerformanceCounter.java @@ -25,12 +25,12 @@ import org.jetbrains.annotations.*; import java.io.*; import java.util.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.*; /** * Counter for the job statistics accumulation. */ -public class GridHadoopPerformanceCounter extends GridHadoopCounterAdapter { +public class HadoopPerformanceCounter extends HadoopCounterAdapter { /** */ private static final long serialVersionUID = 0L; @@ -58,7 +58,7 @@ public class GridHadoopPerformanceCounter extends GridHadoopCounterAdapter { /** * Default constructor required by {@link Externalizable}. */ - public GridHadoopPerformanceCounter() { + public HadoopPerformanceCounter() { // No-op. } @@ -68,7 +68,7 @@ public class GridHadoopPerformanceCounter extends GridHadoopCounterAdapter { * @param grp Group name. * @param name Counter name. */ - public GridHadoopPerformanceCounter(String grp, String name) { + public HadoopPerformanceCounter(String grp, String name) { super(grp, name); } @@ -77,7 +77,7 @@ public class GridHadoopPerformanceCounter extends GridHadoopCounterAdapter { * * @param nodeId Id of the work node. */ - public GridHadoopPerformanceCounter(UUID nodeId) { + public HadoopPerformanceCounter(UUID nodeId) { this.nodeId = nodeId; } @@ -97,8 +97,8 @@ public class GridHadoopPerformanceCounter extends GridHadoopCounterAdapter { } /** {@inheritDoc} */ - @Override public void merge(GridHadoopCounter cntr) { - evts.addAll(((GridHadoopPerformanceCounter)cntr).evts); + @Override public void merge(HadoopCounter cntr) { + evts.addAll(((HadoopPerformanceCounter)cntr).evts); } /** @@ -117,7 +117,7 @@ public class GridHadoopPerformanceCounter extends GridHadoopCounterAdapter { * @param evtType The type of the event. * @return String contains necessary event information. */ - private String eventName(GridHadoopTaskInfo info, String evtType) { + private String eventName(HadoopTaskInfo info, String evtType) { return eventName(info.type().toString(), info.taskNumber(), evtType); } @@ -141,7 +141,7 @@ public class GridHadoopPerformanceCounter extends GridHadoopCounterAdapter { * @param info Task info. * @param ts Timestamp of the event. */ - public void onTaskSubmit(GridHadoopTaskInfo info, long ts) { + public void onTaskSubmit(HadoopTaskInfo info, long ts) { evts.add(new T2<>(eventName(info, "submit"), ts)); } @@ -151,7 +151,7 @@ public class GridHadoopPerformanceCounter extends GridHadoopCounterAdapter { * @param info Task info. * @param ts Timestamp of the event. */ - public void onTaskPrepare(GridHadoopTaskInfo info, long ts) { + public void onTaskPrepare(HadoopTaskInfo info, long ts) { evts.add(new T2<>(eventName(info, "prepare"), ts)); } @@ -161,8 +161,8 @@ public class GridHadoopPerformanceCounter extends GridHadoopCounterAdapter { * @param info Task info. * @param ts Timestamp of the event. */ - public void onTaskFinish(GridHadoopTaskInfo info, long ts) { - if (info.type() == GridHadoopTaskType.REDUCE && lastShuffleMsg != null) { + public void onTaskFinish(HadoopTaskInfo info, long ts) { + if (info.type() == HadoopTaskType.REDUCE && lastShuffleMsg != null) { evts.add(new T2<>(eventName("SHUFFLE", reducerNum, "start"), firstShuffleMsg)); evts.add(new T2<>(eventName("SHUFFLE", reducerNum, "finish"), lastShuffleMsg)); @@ -178,7 +178,7 @@ public class GridHadoopPerformanceCounter extends GridHadoopCounterAdapter { * @param info Task info. * @param ts Timestamp of the event. */ - public void onTaskStart(GridHadoopTaskInfo info, long ts) { + public void onTaskStart(HadoopTaskInfo info, long ts) { evts.add(new T2<>(eventName(info, "start"), ts)); } @@ -209,7 +209,7 @@ public class GridHadoopPerformanceCounter extends GridHadoopCounterAdapter { * * @param info Job info. */ - public void clientSubmissionEvents(GridHadoopJobInfo info) { + public void clientSubmissionEvents(HadoopJobInfo info) { assert nodeId != null; addEventFromProperty("JOB requestId", info, REQ_NEW_JOBID_TS_PROPERTY); @@ -224,7 +224,7 @@ public class GridHadoopPerformanceCounter extends GridHadoopCounterAdapter { * @param info Job info. * @param propName Property name to get timestamp. */ - private void addEventFromProperty(String evt, GridHadoopJobInfo info, String propName) { + private void addEventFromProperty(String evt, HadoopJobInfo info, String propName) { String val = info.property(propName); if (!F.isEmpty(val)) { @@ -253,19 +253,19 @@ public class GridHadoopPerformanceCounter extends GridHadoopCounterAdapter { } /** - * Gets system predefined performance counter from the GridHadoopCounters object. + * Gets system predefined performance counter from the HadoopCounters object. * - * @param cntrs GridHadoopCounters object. + * @param cntrs HadoopCounters object. * @param nodeId Node id for methods that adds events. It may be null if you don't use ones. * @return Predefined performance counter. */ - public static GridHadoopPerformanceCounter getCounter(GridHadoopCounters cntrs, @Nullable UUID nodeId) { - GridHadoopPerformanceCounter cntr = cntrs.counter(GROUP_NAME, COUNTER_NAME, GridHadoopPerformanceCounter.class); + public static HadoopPerformanceCounter getCounter(HadoopCounters cntrs, @Nullable UUID nodeId) { + HadoopPerformanceCounter cntr = cntrs.counter(GROUP_NAME, COUNTER_NAME, HadoopPerformanceCounter.class); if (nodeId != null) cntr.nodeId(nodeId); - return cntrs.counter(GROUP_NAME, COUNTER_NAME, GridHadoopPerformanceCounter.class); + return cntrs.counter(GROUP_NAME, COUNTER_NAME, HadoopPerformanceCounter.class); } /** diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/GridHadoopDistributedFileSystem.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopDistributedFileSystem.java similarity index 94% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/GridHadoopDistributedFileSystem.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopDistributedFileSystem.java index e9461e2..509f443 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/GridHadoopDistributedFileSystem.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopDistributedFileSystem.java @@ -25,12 +25,12 @@ import org.apache.hadoop.mapreduce.*; import java.io.*; import java.net.*; -import static org.apache.ignite.configuration.IgfsConfiguration.*; +import static org.apache.ignite.configuration.FileSystemConfiguration.*; /** * Wrapper of HDFS for support of separated working directory. */ -public class GridHadoopDistributedFileSystem extends DistributedFileSystem { +public class HadoopDistributedFileSystem extends DistributedFileSystem { /** User name for each thread. */ private final ThreadLocal userName = new ThreadLocal() { /** {@inheritDoc} */ diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/GridHadoopFileSystemsUtils.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemsUtils.java similarity index 80% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/GridHadoopFileSystemsUtils.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemsUtils.java index 52e7d29..f3f51d4 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/GridHadoopFileSystemsUtils.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopFileSystemsUtils.java @@ -20,12 +20,12 @@ package org.apache.ignite.internal.processors.hadoop.fs; import org.apache.hadoop.conf.*; import org.apache.hadoop.fs.*; import org.apache.hadoop.hdfs.protocol.*; -import org.apache.ignite.igfs.hadoop.v1.*; +import org.apache.ignite.hadoop.fs.v1.*; /** * Utilities for configuring file systems to support the separate working directory per each thread. */ -public class GridHadoopFileSystemsUtils { +public class HadoopFileSystemsUtils { /** Name of the property for setting working directory on create new local FS instance. */ public static final String LOC_FS_WORK_DIR_PROP = "fs." + FsConstants.LOCAL_FS_URI.getScheme() + ".workDir"; @@ -36,10 +36,10 @@ public class GridHadoopFileSystemsUtils { * @param userName User name. */ public static void setUser(FileSystem fs, String userName) { - if (fs instanceof IgfsHadoopFileSystem) - ((IgfsHadoopFileSystem)fs).setUser(userName); - else if (fs instanceof GridHadoopDistributedFileSystem) - ((GridHadoopDistributedFileSystem)fs).setUser(userName); + if (fs instanceof IgniteHadoopFileSystem) + ((IgniteHadoopFileSystem)fs).setUser(userName); + else if (fs instanceof HadoopDistributedFileSystem) + ((HadoopDistributedFileSystem)fs).setUser(userName); } /** @@ -48,10 +48,10 @@ public class GridHadoopFileSystemsUtils { * @param cfg Config for setup. */ public static void setupFileSystems(Configuration cfg) { - cfg.set("fs." + FsConstants.LOCAL_FS_URI.getScheme() + ".impl", GridHadoopLocalFileSystemV1.class.getName()); + cfg.set("fs." + FsConstants.LOCAL_FS_URI.getScheme() + ".impl", HadoopLocalFileSystemV1.class.getName()); cfg.set("fs.AbstractFileSystem." + FsConstants.LOCAL_FS_URI.getScheme() + ".impl", - GridHadoopLocalFileSystemV2.class.getName()); + HadoopLocalFileSystemV2.class.getName()); - cfg.set("fs." + HdfsConstants.HDFS_URI_SCHEME + ".impl", GridHadoopDistributedFileSystem.class.getName()); + cfg.set("fs." + HdfsConstants.HDFS_URI_SCHEME + ".impl", HadoopDistributedFileSystem.class.getName()); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/GridHadoopLocalFileSystemV1.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV1.java similarity index 82% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/GridHadoopLocalFileSystemV1.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV1.java index 28834d4..9cc5881 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/GridHadoopLocalFileSystemV1.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV1.java @@ -24,16 +24,16 @@ import java.io.*; /** * Local file system replacement for Hadoop jobs. */ -public class GridHadoopLocalFileSystemV1 extends LocalFileSystem { +public class HadoopLocalFileSystemV1 extends LocalFileSystem { /** * Creates new local file system. */ - public GridHadoopLocalFileSystemV1() { - super(new GridHadoopRawLocalFileSystem()); + public HadoopLocalFileSystemV1() { + super(new HadoopRawLocalFileSystem()); } /** {@inheritDoc} */ @Override public File pathToFile(Path path) { - return ((GridHadoopRawLocalFileSystem)getRaw()).convert(path); + return ((HadoopRawLocalFileSystem)getRaw()).convert(path); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/GridHadoopLocalFileSystemV2.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV2.java similarity index 86% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/GridHadoopLocalFileSystemV2.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV2.java index 62d7cea..15ddc5a 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/GridHadoopLocalFileSystemV2.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopLocalFileSystemV2.java @@ -29,7 +29,7 @@ import static org.apache.hadoop.fs.FsConstants.*; /** * Local file system replacement for Hadoop jobs. */ -public class GridHadoopLocalFileSystemV2 extends ChecksumFs { +public class HadoopLocalFileSystemV2 extends ChecksumFs { /** * Creates new local file system. * @@ -37,7 +37,7 @@ public class GridHadoopLocalFileSystemV2 extends ChecksumFs { * @throws IOException If failed. * @throws URISyntaxException If failed. */ - public GridHadoopLocalFileSystemV2(Configuration cfg) throws IOException, URISyntaxException { + public HadoopLocalFileSystemV2(Configuration cfg) throws IOException, URISyntaxException { super(new DelegateFS(cfg)); } @@ -49,7 +49,7 @@ public class GridHadoopLocalFileSystemV2 extends ChecksumFs { * @throws IOException If failed. * @throws URISyntaxException If failed. */ - public GridHadoopLocalFileSystemV2(URI uri, Configuration cfg) throws IOException, URISyntaxException { + public HadoopLocalFileSystemV2(URI uri, Configuration cfg) throws IOException, URISyntaxException { this(cfg); } @@ -65,7 +65,7 @@ public class GridHadoopLocalFileSystemV2 extends ChecksumFs { * @throws URISyntaxException If failed. */ public DelegateFS(Configuration cfg) throws IOException, URISyntaxException { - super(LOCAL_FS_URI, new GridHadoopRawLocalFileSystem(), cfg, LOCAL_FS_URI.getScheme(), false); + super(LOCAL_FS_URI, new HadoopRawLocalFileSystem(), cfg, LOCAL_FS_URI.getScheme(), false); } /** {@inheritDoc} */ diff --git a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/IgfsHadoopParameters.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopParameters.java similarity index 96% rename from modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/IgfsHadoopParameters.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopParameters.java index 6237dd4..7edcec0 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/igfs/hadoop/IgfsHadoopParameters.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopParameters.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.fs; /** * This class lists parameters that can be specified in Hadoop configuration. @@ -24,7 +24,7 @@ package org.apache.ignite.igfs.hadoop; *

    *
  • * {@code fs.igfs.[name].open.sequential_reads_before_prefetch} - this parameter overrides - * the one specified in {@link org.apache.ignite.configuration.IgfsConfiguration#getSequentialReadsBeforePrefetch()} + * the one specified in {@link org.apache.ignite.configuration.FileSystemConfiguration#getSequentialReadsBeforePrefetch()} * IGFS data node configuration property. *
  • *
  • @@ -72,7 +72,7 @@ package org.apache.ignite.igfs.hadoop; * If you want to use these parameters in code, then you have to substitute you file system name in it. The easiest * way to do that is {@code String.format(PARAM_IGFS_COLOCATED_WRITES, [name])}. */ -public class IgfsHadoopParameters { +public class HadoopParameters { /** Parameter name for control over file colocation write mode. */ public static final String PARAM_IGFS_COLOCATED_WRITES = "fs.igfs.%s.colocated.writes"; diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/GridHadoopRawLocalFileSystem.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopRawLocalFileSystem.java similarity index 98% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/GridHadoopRawLocalFileSystem.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopRawLocalFileSystem.java index 29645f8..e5ec3f7 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/GridHadoopRawLocalFileSystem.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/fs/HadoopRawLocalFileSystem.java @@ -33,7 +33,7 @@ import java.nio.file.*; /** * Local file system implementation for Hadoop. */ -public class GridHadoopRawLocalFileSystem extends FileSystem { +public class HadoopRawLocalFileSystem extends FileSystem { /** Working directory for each thread. */ private final ThreadLocal workDir = new ThreadLocal() { @Override protected Path initialValue() { @@ -74,7 +74,7 @@ public class GridHadoopRawLocalFileSystem extends FileSystem { setConf(conf); - String initWorkDir = conf.get(GridHadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP); + String initWorkDir = conf.get(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP); if (initWorkDir != null) setWorkingDirectory(new Path(initWorkDir)); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoop.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfs.java similarity index 95% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoop.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfs.java index 27d6e33..b3cb235 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoop.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfs.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.ignite.*; import org.apache.ignite.igfs.*; @@ -28,7 +28,7 @@ import java.util.*; /** * Facade for communication with grid. */ -public interface IgfsHadoop { +public interface HadoopIgfs { /** * Perform handshake. * @@ -158,7 +158,7 @@ public interface IgfsHadoop { * @return Future for open operation. * @throws IgniteCheckedException If failed. */ - public IgfsHadoopStreamDelegate open(IgfsPath path) throws IgniteCheckedException, IOException; + public HadoopIgfsStreamDelegate open(IgfsPath path) throws IgniteCheckedException, IOException; /** * Command to open file for reading. @@ -167,7 +167,7 @@ public interface IgfsHadoop { * @return Future for open operation. * @throws IgniteCheckedException If failed. */ - public IgfsHadoopStreamDelegate open(IgfsPath path, int seqReadsBeforePrefetch) throws IgniteCheckedException, + public HadoopIgfsStreamDelegate open(IgfsPath path, int seqReadsBeforePrefetch) throws IgniteCheckedException, IOException; /** @@ -181,7 +181,7 @@ public interface IgfsHadoop { * @return Stream descriptor. * @throws IgniteCheckedException If failed. */ - public IgfsHadoopStreamDelegate create(IgfsPath path, boolean overwrite, boolean colocate, + public HadoopIgfsStreamDelegate create(IgfsPath path, boolean overwrite, boolean colocate, int replication, long blockSize, @Nullable Map props) throws IgniteCheckedException, IOException; /** @@ -193,6 +193,6 @@ public interface IgfsHadoop { * @return Stream descriptor. * @throws IgniteCheckedException If failed. */ - public IgfsHadoopStreamDelegate append(IgfsPath path, boolean create, + public HadoopIgfsStreamDelegate append(IgfsPath path, boolean create, @Nullable Map props) throws IgniteCheckedException, IOException; } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopCommunicationException.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsCommunicationException.java similarity index 83% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopCommunicationException.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsCommunicationException.java index 03bf733..ff69478 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopCommunicationException.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsCommunicationException.java @@ -15,14 +15,14 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.ignite.*; /** * Communication exception indicating a problem between file system and IGFS instance. */ -public class IgfsHadoopCommunicationException extends IgniteCheckedException { +public class HadoopIgfsCommunicationException extends IgniteCheckedException { /** */ private static final long serialVersionUID = 0L; @@ -32,7 +32,7 @@ public class IgfsHadoopCommunicationException extends IgniteCheckedException { * * @param cause Non-null throwable cause. */ - public IgfsHadoopCommunicationException(Exception cause) { + public HadoopIgfsCommunicationException(Exception cause) { super(cause); } @@ -41,7 +41,7 @@ public class IgfsHadoopCommunicationException extends IgniteCheckedException { * * @param msg Error message. */ - public IgfsHadoopCommunicationException(String msg) { + public HadoopIgfsCommunicationException(String msg) { super(msg); } @@ -51,7 +51,7 @@ public class IgfsHadoopCommunicationException extends IgniteCheckedException { * @param msg Error message. * @param cause Cause. */ - public IgfsHadoopCommunicationException(String msg, Exception cause) { + public HadoopIgfsCommunicationException(String msg, Exception cause) { super(msg, cause); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopEndpoint.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsEndpoint.java similarity index 92% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopEndpoint.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsEndpoint.java index 35638ea..7502f57 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopEndpoint.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsEndpoint.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.ignite.*; import org.apache.ignite.internal.util.typedef.*; @@ -26,12 +26,12 @@ import org.jetbrains.annotations.*; import java.io.*; import java.net.*; -import static org.apache.ignite.configuration.IgfsConfiguration.*; +import static org.apache.ignite.configuration.FileSystemConfiguration.*; /** * IGFS endpoint abstraction. */ -public class IgfsHadoopEndpoint { +public class HadoopIgfsEndpoint { /** Localhost. */ public static final String LOCALHOST = "127.0.0.1"; @@ -56,10 +56,10 @@ public class IgfsHadoopEndpoint { */ public static URI normalize(URI uri) throws IOException { try { - if (!F.eq(IgniteFs.IGFS_SCHEME, uri.getScheme())) + if (!F.eq(IgniteFileSystem.IGFS_SCHEME, uri.getScheme())) throw new IOException("Failed to normalize UIR because it has non IGFS scheme: " + uri); - IgfsHadoopEndpoint endpoint = new IgfsHadoopEndpoint(uri.getAuthority()); + HadoopIgfsEndpoint endpoint = new HadoopIgfsEndpoint(uri.getAuthority()); StringBuilder sb = new StringBuilder(); @@ -83,7 +83,7 @@ public class IgfsHadoopEndpoint { * @param connStr Connection string. * @throws IgniteCheckedException If failed to parse connection string. */ - public IgfsHadoopEndpoint(@Nullable String connStr) throws IgniteCheckedException { + public HadoopIgfsEndpoint(@Nullable String connStr) throws IgniteCheckedException { if (connStr == null) connStr = ""; @@ -205,6 +205,6 @@ public class IgfsHadoopEndpoint { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(IgfsHadoopEndpoint.class, this); + return S.toString(HadoopIgfsEndpoint.class, this); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopEx.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsEx.java similarity index 84% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopEx.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsEx.java index da86e37..2200e78 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopEx.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsEx.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.ignite.internal.util.lang.*; import org.jetbrains.annotations.*; @@ -25,7 +25,7 @@ import java.io.*; /** * Extended IGFS server interface. */ -public interface IgfsHadoopEx extends IgfsHadoop { +public interface HadoopIgfsEx extends HadoopIgfs { /** * Adds event listener that will be invoked when connection with server is lost or remote error has occurred. * If connection is closed already, callback will be invoked synchronously inside this method. @@ -33,14 +33,14 @@ public interface IgfsHadoopEx extends IgfsHadoop { * @param delegate Stream delegate. * @param lsnr Event listener. */ - public void addEventListener(IgfsHadoopStreamDelegate delegate, IgfsHadoopStreamEventListener lsnr); + public void addEventListener(HadoopIgfsStreamDelegate delegate, HadoopIgfsStreamEventListener lsnr); /** * Removes event listener that will be invoked when connection with server is lost or remote error has occurred. * * @param delegate Stream delegate. */ - public void removeEventListener(IgfsHadoopStreamDelegate delegate); + public void removeEventListener(HadoopIgfsStreamDelegate delegate); /** * Asynchronously reads specified amount of bytes from opened input stream. @@ -55,7 +55,7 @@ public interface IgfsHadoopEx extends IgfsHadoop { * @param outLen Output length. * @return Read data. */ - public GridPlainFuture readData(IgfsHadoopStreamDelegate delegate, long pos, int len, + public GridPlainFuture readData(HadoopIgfsStreamDelegate delegate, long pos, int len, @Nullable final byte[] outBuf, final int outOff, final int outLen); /** @@ -68,7 +68,7 @@ public interface IgfsHadoopEx extends IgfsHadoop { * @param len Length. * @throws IOException If failed. */ - public void writeData(IgfsHadoopStreamDelegate delegate, byte[] data, int off, int len) throws IOException; + public void writeData(HadoopIgfsStreamDelegate delegate, byte[] data, int off, int len) throws IOException; /** * Close server stream. @@ -76,7 +76,7 @@ public interface IgfsHadoopEx extends IgfsHadoop { * @param delegate Stream delegate. * @throws IOException If failed. */ - public void closeStream(IgfsHadoopStreamDelegate delegate) throws IOException; + public void closeStream(HadoopIgfsStreamDelegate delegate) throws IOException; /** * Flush output stream. @@ -84,5 +84,5 @@ public interface IgfsHadoopEx extends IgfsHadoop { * @param delegate Stream delegate. * @throws IOException If failed. */ - public void flush(IgfsHadoopStreamDelegate delegate) throws IOException; + public void flush(HadoopIgfsStreamDelegate delegate) throws IOException; } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopFuture.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsFuture.java similarity index 94% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopFuture.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsFuture.java index 476641c..59a8f49 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopFuture.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsFuture.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.ignite.internal.util.lang.*; import org.jetbrains.annotations.*; @@ -23,7 +23,7 @@ import org.jetbrains.annotations.*; /** * IGFS client future that holds response parse closure. */ -public class IgfsHadoopFuture extends GridPlainFutureAdapter { +public class HadoopIgfsFuture extends GridPlainFutureAdapter { /** Output buffer. */ private byte[] outBuf; diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopInProc.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInProc.java similarity index 82% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopInProc.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInProc.java index 8245125..a8eb58c 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopInProc.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInProc.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.commons.logging.*; import org.apache.ignite.*; @@ -31,7 +31,7 @@ import java.util.concurrent.*; /** * Communication with grid in the same process. */ -public class IgfsHadoopInProc implements IgfsHadoopEx { +public class HadoopIgfsInProc implements HadoopIgfsEx { /** Target IGFS. */ private final IgfsEx igfs; @@ -39,7 +39,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { private final int bufSize; /** Event listeners. */ - private final Map lsnrs = + private final Map lsnrs = new ConcurrentHashMap<>(); /** Logger. */ @@ -51,7 +51,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { * @param igfs Target IGFS. * @param log Log. */ - public IgfsHadoopInProc(IgfsEx igfs, Log log) { + public HadoopIgfsInProc(IgfsEx igfs, Log log) { this.igfs = igfs; this.log = log; @@ -69,7 +69,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { /** {@inheritDoc} */ @Override public void close(boolean force) { // Perform cleanup. - for (IgfsHadoopStreamEventListener lsnr : lsnrs.values()) { + for (HadoopIgfsStreamEventListener lsnr : lsnrs.values()) { try { lsnr.onClose(); } @@ -89,7 +89,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { throw new IgniteCheckedException(e); } catch (IllegalStateException e) { - throw new IgfsHadoopCommunicationException("Failed to get file info because Grid is stopping: " + path); + throw new HadoopIgfsCommunicationException("Failed to get file info because Grid is stopping: " + path); } } @@ -102,7 +102,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { throw new IgniteCheckedException(e); } catch (IllegalStateException e) { - throw new IgfsHadoopCommunicationException("Failed to update file because Grid is stopping: " + path); + throw new HadoopIgfsCommunicationException("Failed to update file because Grid is stopping: " + path); } } @@ -117,7 +117,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { throw new IgniteCheckedException(e); } catch (IllegalStateException e) { - throw new IgfsHadoopCommunicationException("Failed to set path times because Grid is stopping: " + + throw new HadoopIgfsCommunicationException("Failed to set path times because Grid is stopping: " + path); } } @@ -133,7 +133,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { throw new IgniteCheckedException(e); } catch (IllegalStateException e) { - throw new IgfsHadoopCommunicationException("Failed to rename path because Grid is stopping: " + src); + throw new HadoopIgfsCommunicationException("Failed to rename path because Grid is stopping: " + src); } } @@ -146,7 +146,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { throw new IgniteCheckedException(e); } catch (IllegalStateException e) { - throw new IgfsHadoopCommunicationException("Failed to delete path because Grid is stopping: " + path); + throw new HadoopIgfsCommunicationException("Failed to delete path because Grid is stopping: " + path); } } @@ -156,7 +156,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { return igfs.globalSpace(); } catch (IllegalStateException e) { - throw new IgfsHadoopCommunicationException("Failed to get file system status because Grid is " + + throw new HadoopIgfsCommunicationException("Failed to get file system status because Grid is " + "stopping."); } } @@ -170,7 +170,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { throw new IgniteCheckedException(e); } catch (IllegalStateException e) { - throw new IgfsHadoopCommunicationException("Failed to list paths because Grid is stopping: " + path); + throw new HadoopIgfsCommunicationException("Failed to list paths because Grid is stopping: " + path); } } @@ -183,7 +183,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { throw new IgniteCheckedException(e); } catch (IllegalStateException e) { - throw new IgfsHadoopCommunicationException("Failed to list files because Grid is stopping: " + path); + throw new HadoopIgfsCommunicationException("Failed to list files because Grid is stopping: " + path); } } @@ -198,7 +198,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { throw new IgniteCheckedException(e); } catch (IllegalStateException e) { - throw new IgfsHadoopCommunicationException("Failed to create directory because Grid is stopping: " + + throw new HadoopIgfsCommunicationException("Failed to create directory because Grid is stopping: " + path); } } @@ -212,7 +212,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { throw new IgniteCheckedException(e); } catch (IllegalStateException e) { - throw new IgfsHadoopCommunicationException("Failed to get content summary because Grid is stopping: " + + throw new HadoopIgfsCommunicationException("Failed to get content summary because Grid is stopping: " + path); } } @@ -227,76 +227,76 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { throw new IgniteCheckedException(e); } catch (IllegalStateException e) { - throw new IgfsHadoopCommunicationException("Failed to get affinity because Grid is stopping: " + path); + throw new HadoopIgfsCommunicationException("Failed to get affinity because Grid is stopping: " + path); } } /** {@inheritDoc} */ - @Override public IgfsHadoopStreamDelegate open(IgfsPath path) throws IgniteCheckedException { + @Override public HadoopIgfsStreamDelegate open(IgfsPath path) throws IgniteCheckedException { try { IgfsInputStreamAdapter stream = igfs.open(path, bufSize); - return new IgfsHadoopStreamDelegate(this, stream, stream.fileInfo().length()); + return new HadoopIgfsStreamDelegate(this, stream, stream.fileInfo().length()); } catch (IgniteException e) { throw new IgniteCheckedException(e); } catch (IllegalStateException e) { - throw new IgfsHadoopCommunicationException("Failed to open file because Grid is stopping: " + path); + throw new HadoopIgfsCommunicationException("Failed to open file because Grid is stopping: " + path); } } /** {@inheritDoc} */ - @Override public IgfsHadoopStreamDelegate open(IgfsPath path, int seqReadsBeforePrefetch) + @Override public HadoopIgfsStreamDelegate open(IgfsPath path, int seqReadsBeforePrefetch) throws IgniteCheckedException { try { IgfsInputStreamAdapter stream = igfs.open(path, bufSize, seqReadsBeforePrefetch); - return new IgfsHadoopStreamDelegate(this, stream, stream.fileInfo().length()); + return new HadoopIgfsStreamDelegate(this, stream, stream.fileInfo().length()); } catch (IgniteException e) { throw new IgniteCheckedException(e); } catch (IllegalStateException e) { - throw new IgfsHadoopCommunicationException("Failed to open file because Grid is stopping: " + path); + throw new HadoopIgfsCommunicationException("Failed to open file because Grid is stopping: " + path); } } /** {@inheritDoc} */ - @Override public IgfsHadoopStreamDelegate create(IgfsPath path, boolean overwrite, boolean colocate, + @Override public HadoopIgfsStreamDelegate create(IgfsPath path, boolean overwrite, boolean colocate, int replication, long blockSize, @Nullable Map props) throws IgniteCheckedException { try { IgfsOutputStream stream = igfs.create(path, bufSize, overwrite, colocate ? igfs.nextAffinityKey() : null, replication, blockSize, props); - return new IgfsHadoopStreamDelegate(this, stream); + return new HadoopIgfsStreamDelegate(this, stream); } catch (IgniteException e) { throw new IgniteCheckedException(e); } catch (IllegalStateException e) { - throw new IgfsHadoopCommunicationException("Failed to create file because Grid is stopping: " + path); + throw new HadoopIgfsCommunicationException("Failed to create file because Grid is stopping: " + path); } } /** {@inheritDoc} */ - @Override public IgfsHadoopStreamDelegate append(IgfsPath path, boolean create, + @Override public HadoopIgfsStreamDelegate append(IgfsPath path, boolean create, @Nullable Map props) throws IgniteCheckedException { try { IgfsOutputStream stream = igfs.append(path, bufSize, create, props); - return new IgfsHadoopStreamDelegate(this, stream); + return new HadoopIgfsStreamDelegate(this, stream); } catch (IgniteException e) { throw new IgniteCheckedException(e); } catch (IllegalStateException e) { - throw new IgfsHadoopCommunicationException("Failed to append file because Grid is stopping: " + path); + throw new HadoopIgfsCommunicationException("Failed to append file because Grid is stopping: " + path); } } /** {@inheritDoc} */ - @Override public GridPlainFuture readData(IgfsHadoopStreamDelegate delegate, long pos, int len, + @Override public GridPlainFuture readData(HadoopIgfsStreamDelegate delegate, long pos, int len, @Nullable byte[] outBuf, int outOff, int outLen) { IgfsInputStreamAdapter stream = delegate.target(); @@ -326,7 +326,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { return new GridPlainFutureAdapter<>(res); } catch (IllegalStateException | IOException e) { - IgfsHadoopStreamEventListener lsnr = lsnrs.get(delegate); + HadoopIgfsStreamEventListener lsnr = lsnrs.get(delegate); if (lsnr != null) lsnr.onError(e.getMessage()); @@ -336,7 +336,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { } /** {@inheritDoc} */ - @Override public void writeData(IgfsHadoopStreamDelegate delegate, byte[] data, int off, int len) + @Override public void writeData(HadoopIgfsStreamDelegate delegate, byte[] data, int off, int len) throws IOException { try { IgfsOutputStream stream = delegate.target(); @@ -344,7 +344,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { stream.write(data, off, len); } catch (IllegalStateException | IOException e) { - IgfsHadoopStreamEventListener lsnr = lsnrs.get(delegate); + HadoopIgfsStreamEventListener lsnr = lsnrs.get(delegate); if (lsnr != null) lsnr.onError(e.getMessage()); @@ -357,14 +357,14 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { } /** {@inheritDoc} */ - @Override public void flush(IgfsHadoopStreamDelegate delegate) throws IOException { + @Override public void flush(HadoopIgfsStreamDelegate delegate) throws IOException { try { IgfsOutputStream stream = delegate.target(); stream.flush(); } catch (IllegalStateException | IOException e) { - IgfsHadoopStreamEventListener lsnr = lsnrs.get(delegate); + HadoopIgfsStreamEventListener lsnr = lsnrs.get(delegate); if (lsnr != null) lsnr.onError(e.getMessage()); @@ -377,7 +377,7 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { } /** {@inheritDoc} */ - @Override public void closeStream(IgfsHadoopStreamDelegate desc) throws IOException { + @Override public void closeStream(HadoopIgfsStreamDelegate desc) throws IOException { Closeable closeable = desc.target(); try { @@ -389,9 +389,9 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { } /** {@inheritDoc} */ - @Override public void addEventListener(IgfsHadoopStreamDelegate delegate, - IgfsHadoopStreamEventListener lsnr) { - IgfsHadoopStreamEventListener lsnr0 = lsnrs.put(delegate, lsnr); + @Override public void addEventListener(HadoopIgfsStreamDelegate delegate, + HadoopIgfsStreamEventListener lsnr) { + HadoopIgfsStreamEventListener lsnr0 = lsnrs.put(delegate, lsnr); assert lsnr0 == null || lsnr0 == lsnr; @@ -400,8 +400,8 @@ public class IgfsHadoopInProc implements IgfsHadoopEx { } /** {@inheritDoc} */ - @Override public void removeEventListener(IgfsHadoopStreamDelegate delegate) { - IgfsHadoopStreamEventListener lsnr0 = lsnrs.remove(delegate); + @Override public void removeEventListener(HadoopIgfsStreamDelegate delegate) { + HadoopIgfsStreamEventListener lsnr0 = lsnrs.remove(delegate); if (lsnr0 != null && log.isDebugEnabled()) log.debug("Removed stream event listener [delegate=" + delegate + ']'); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopInputStream.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInputStream.java similarity index 97% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopInputStream.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInputStream.java index efc5264..c335a62 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopInputStream.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsInputStream.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.commons.logging.*; import org.apache.hadoop.fs.*; @@ -31,13 +31,13 @@ import java.io.*; * IGFS input stream wrapper for hadoop interfaces. */ @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized") -public final class IgfsHadoopInputStream extends InputStream implements Seekable, PositionedReadable, - IgfsHadoopStreamEventListener { +public final class HadoopIgfsInputStream extends InputStream implements Seekable, PositionedReadable, + HadoopIgfsStreamEventListener { /** Minimum buffer size. */ private static final int MIN_BUF_SIZE = 4 * 1024; /** Server stream delegate. */ - private IgfsHadoopStreamDelegate delegate; + private HadoopIgfsStreamDelegate delegate; /** Stream ID used by logger. */ private long logStreamId; @@ -90,7 +90,7 @@ public final class IgfsHadoopInputStream extends InputStream implements Seekable * @param log Log. * @param clientLog Client logger. */ - public IgfsHadoopInputStream(IgfsHadoopStreamDelegate delegate, long limit, int bufSize, Log log, + public HadoopIgfsInputStream(HadoopIgfsStreamDelegate delegate, long limit, int bufSize, Log log, IgfsLogger clientLog, long logStreamId) { assert limit >= 0; @@ -151,7 +151,7 @@ public final class IgfsHadoopInputStream extends InputStream implements Seekable return res; } catch (IgniteCheckedException e) { - throw IgfsHadoopUtils.cast(e); + throw HadoopIgfsUtils.cast(e); } finally { readEnd(); @@ -191,7 +191,7 @@ public final class IgfsHadoopInputStream extends InputStream implements Seekable return read; } catch (IgniteCheckedException e) { - throw IgfsHadoopUtils.cast(e); + throw HadoopIgfsUtils.cast(e); } finally { readEnd(); @@ -322,7 +322,7 @@ public final class IgfsHadoopInputStream extends InputStream implements Seekable clientLog.logRandomRead(logStreamId, position, len); } catch (IgniteCheckedException e) { - throw IgfsHadoopUtils.cast(e); + throw HadoopIgfsUtils.cast(e); } finally { readEnd(); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopIo.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIo.java similarity index 93% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopIo.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIo.java index 46f5a6c..88dd896 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopIo.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIo.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.ignite.*; import org.apache.ignite.internal.igfs.common.*; @@ -26,7 +26,7 @@ import org.jetbrains.annotations.*; * IO abstraction layer for IGFS client. Two kind of messages are expected to be sent: requests with response * and request without response. */ -public interface IgfsHadoopIo { +public interface HadoopIgfsIo { /** * Sends given IGFS client message and asynchronously awaits for response. * @@ -65,12 +65,12 @@ public interface IgfsHadoopIo { * * @param lsnr Event listener. */ - public void addEventListener(IgfsHadoopIpcIoListener lsnr); + public void addEventListener(HadoopIgfsIpcIoListener lsnr); /** * Removes event listener that will be invoked when connection with server is lost or remote error has occurred. * * @param lsnr Event listener. */ - public void removeEventListener(IgfsHadoopIpcIoListener lsnr); + public void removeEventListener(HadoopIgfsIpcIoListener lsnr); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopIpcIo.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIo.java similarity index 91% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopIpcIo.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIo.java index d07f34d..c9c61fe 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopIpcIo.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIo.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.commons.logging.*; import org.apache.ignite.*; @@ -39,12 +39,12 @@ import java.util.concurrent.locks.*; * IO layer implementation based on blocking IPC streams. */ @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized") -public class IgfsHadoopIpcIo implements IgfsHadoopIo { +public class HadoopIgfsIpcIo implements HadoopIgfsIo { /** Logger. */ private Log log; /** Request futures map. */ - private ConcurrentMap reqMap = + private ConcurrentMap reqMap = new ConcurrentHashMap8<>(); /** Request ID counter. */ @@ -75,11 +75,11 @@ public class IgfsHadoopIpcIo implements IgfsHadoopIo { private final AtomicInteger activeCnt = new AtomicInteger(1); /** Event listeners. */ - private final Collection lsnrs = + private final Collection lsnrs = new GridConcurrentHashSet<>(); /** Cached connections. */ - private static final ConcurrentMap ipcCache = + private static final ConcurrentMap ipcCache = new ConcurrentHashMap8<>(); /** Striped lock that prevents multiple instance creation in {@link #get(Log, String)}. */ @@ -90,7 +90,7 @@ public class IgfsHadoopIpcIo implements IgfsHadoopIo { * @param marsh Protocol. * @param log Logger to use. */ - public IgfsHadoopIpcIo(String endpointAddr, IgfsMarshaller marsh, Log log) { + public HadoopIgfsIpcIo(String endpointAddr, IgfsMarshaller marsh, Log log) { assert endpointAddr != null; assert marsh != null; @@ -108,9 +108,9 @@ public class IgfsHadoopIpcIo implements IgfsHadoopIo { * @return New or existing cached instance, which is started and operational. * @throws IOException If new instance was created but failed to start. */ - public static IgfsHadoopIpcIo get(Log log, String endpoint) throws IOException { + public static HadoopIgfsIpcIo get(Log log, String endpoint) throws IOException { while (true) { - IgfsHadoopIpcIo clientIo = ipcCache.get(endpoint); + HadoopIgfsIpcIo clientIo = ipcCache.get(endpoint); if (clientIo != null) { if (clientIo.acquire()) @@ -136,7 +136,7 @@ public class IgfsHadoopIpcIo implements IgfsHadoopIo { } // Otherwise try creating a new one. - clientIo = new IgfsHadoopIpcIo(endpoint, new IgfsMarshaller(), log); + clientIo = new HadoopIgfsIpcIo(endpoint, new IgfsMarshaller(), log); try { clientIo.start(); @@ -145,7 +145,7 @@ public class IgfsHadoopIpcIo implements IgfsHadoopIo { throw new IOException(e.getMessage(), e); } - IgfsHadoopIpcIo old = ipcCache.putIfAbsent(endpoint, clientIo); + HadoopIgfsIpcIo old = ipcCache.putIfAbsent(endpoint, clientIo); // Put in exclusive lock. assert old == null; @@ -237,7 +237,7 @@ public class IgfsHadoopIpcIo implements IgfsHadoopIo { try { endpoint = IpcEndpointFactory.connectEndpoint( - endpointAddr, new GridLoggerProxy(new IgfsHadoopJclLogger(log), null, null, "")); + endpointAddr, new GridLoggerProxy(new HadoopIgfsJclLogger(log), null, null, "")); out = new IgfsDataOutputStream(new BufferedOutputStream(endpoint.outputStream())); @@ -287,7 +287,7 @@ public class IgfsHadoopIpcIo implements IgfsHadoopIo { } /** {@inheritDoc} */ - @Override public void addEventListener(IgfsHadoopIpcIoListener lsnr) { + @Override public void addEventListener(HadoopIgfsIpcIoListener lsnr) { if (!busyLock.readLock().tryLock()) { lsnr.onClose(); @@ -311,7 +311,7 @@ public class IgfsHadoopIpcIo implements IgfsHadoopIo { } /** {@inheritDoc} */ - @Override public void removeEventListener(IgfsHadoopIpcIoListener lsnr) { + @Override public void removeEventListener(HadoopIgfsIpcIoListener lsnr) { lsnrs.remove(lsnr); } @@ -326,24 +326,24 @@ public class IgfsHadoopIpcIo implements IgfsHadoopIo { assert outBuf == null || msg.command() == IgfsIpcCommand.READ_BLOCK; if (!busyLock.readLock().tryLock()) - throw new IgfsHadoopCommunicationException("Failed to send message (client is being concurrently " + + throw new HadoopIgfsCommunicationException("Failed to send message (client is being concurrently " + "closed)."); try { if (stopping) - throw new IgfsHadoopCommunicationException("Failed to send message (client is being concurrently " + + throw new HadoopIgfsCommunicationException("Failed to send message (client is being concurrently " + "closed)."); long reqId = reqIdCnt.getAndIncrement(); - IgfsHadoopFuture fut = new IgfsHadoopFuture<>(); + HadoopIgfsFuture fut = new HadoopIgfsFuture<>(); fut.outputBuffer(outBuf); fut.outputOffset(outOff); fut.outputLength(outLen); fut.read(msg.command() == IgfsIpcCommand.READ_BLOCK); - IgfsHadoopFuture oldFut = reqMap.putIfAbsent(reqId, fut); + HadoopIgfsFuture oldFut = reqMap.putIfAbsent(reqId, fut); assert oldFut == null; @@ -365,7 +365,7 @@ public class IgfsHadoopIpcIo implements IgfsHadoopIo { err = e; } catch (IOException e) { - err = new IgfsHadoopCommunicationException(e); + err = new HadoopIgfsCommunicationException(e); } if (err != null) { @@ -384,12 +384,12 @@ public class IgfsHadoopIpcIo implements IgfsHadoopIo { /** {@inheritDoc} */ @Override public void sendPlain(IgfsMessage msg) throws IgniteCheckedException { if (!busyLock.readLock().tryLock()) - throw new IgfsHadoopCommunicationException("Failed to send message (client is being " + + throw new HadoopIgfsCommunicationException("Failed to send message (client is being " + "concurrently closed)."); try { if (stopping) - throw new IgfsHadoopCommunicationException("Failed to send message (client is being concurrently closed)."); + throw new HadoopIgfsCommunicationException("Failed to send message (client is being concurrently closed)."); assert msg.command() == IgfsIpcCommand.WRITE_BLOCK; @@ -408,7 +408,7 @@ public class IgfsHadoopIpcIo implements IgfsHadoopIo { } } catch (IOException e) { - throw new IgfsHadoopCommunicationException(e); + throw new HadoopIgfsCommunicationException(e); } finally { busyLock.readLock().unlock(); @@ -444,17 +444,17 @@ public class IgfsHadoopIpcIo implements IgfsHadoopIo { endpoint.close(); // Unwind futures. We can safely iterate here because no more futures will be added. - Iterator it = reqMap.values().iterator(); + Iterator it = reqMap.values().iterator(); while (it.hasNext()) { - IgfsHadoopFuture fut = it.next(); + HadoopIgfsFuture fut = it.next(); fut.onDone(err); it.remove(); } - for (IgfsHadoopIpcIoListener lsnr : lsnrs) + for (HadoopIgfsIpcIoListener lsnr : lsnrs) lsnr.onClose(); } @@ -495,11 +495,11 @@ public class IgfsHadoopIpcIo implements IgfsHadoopIo { long streamId = dis.readLong(); - for (IgfsHadoopIpcIoListener lsnr : lsnrs) + for (HadoopIgfsIpcIoListener lsnr : lsnrs) lsnr.onError(streamId, errMsg); } else { - IgfsHadoopFuture fut = reqMap.remove(reqId); + HadoopIgfsFuture fut = reqMap.remove(reqId); if (fut == null) { String msg = "Failed to read response from server: response closure is unavailable for " + @@ -577,7 +577,7 @@ public class IgfsHadoopIpcIo implements IgfsHadoopIo { if (!stopping) log.error("Failed to read data (connection will be closed)", e); - err = new IgfsHadoopCommunicationException(e); + err = new HadoopIgfsCommunicationException(e); } catch (IgniteCheckedException e) { if (!stopping) diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopIpcIoListener.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIoListener.java similarity index 87% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopIpcIoListener.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIoListener.java index ffc58ba..c2dad82 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopIpcIoListener.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsIpcIoListener.java @@ -15,12 +15,12 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; /** - * Listens to the events of {@link IgfsHadoopIpcIo}. + * Listens to the events of {@link HadoopIgfsIpcIo}. */ -public interface IgfsHadoopIpcIoListener { +public interface HadoopIgfsIpcIoListener { /** * Callback invoked when the IO is being closed. */ diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopJclLogger.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsJclLogger.java similarity index 87% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopJclLogger.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsJclLogger.java index e43d77a..0c29454 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopJclLogger.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsJclLogger.java @@ -15,17 +15,20 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.commons.logging.*; import org.apache.ignite.*; +import org.apache.ignite.internal.util.tostring.*; +import org.apache.ignite.internal.util.typedef.internal.*; import org.jetbrains.annotations.*; /** * JCL logger wrapper for Hadoop. */ -public class IgfsHadoopJclLogger implements IgniteLogger { +public class HadoopIgfsJclLogger implements IgniteLogger { /** JCL implementation proxy. */ + @GridToStringInclude private Log impl; /** @@ -33,7 +36,7 @@ public class IgfsHadoopJclLogger implements IgniteLogger { * * @param impl JCL implementation to use. */ - IgfsHadoopJclLogger(Log impl) { + HadoopIgfsJclLogger(Log impl) { assert impl != null; this.impl = impl; @@ -41,7 +44,7 @@ public class IgfsHadoopJclLogger implements IgniteLogger { /** {@inheritDoc} */ @Override public IgniteLogger getLogger(Object ctgr) { - return new IgfsHadoopJclLogger(LogFactory.getLog( + return new HadoopIgfsJclLogger(LogFactory.getLog( ctgr instanceof Class ? ((Class)ctgr).getName() : String.valueOf(ctgr))); } @@ -107,6 +110,6 @@ public class IgfsHadoopJclLogger implements IgniteLogger { /** {@inheritDoc} */ @Override public String toString() { - return "IgfsHadoopJclLogger [impl=" + impl + ']'; + return S.toString(HadoopIgfsJclLogger.class, this); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopOutProc.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutProc.java similarity index 88% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopOutProc.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutProc.java index 31183a8..662541a 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopOutProc.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutProc.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.commons.logging.*; import org.apache.ignite.*; @@ -34,7 +34,7 @@ import static org.apache.ignite.internal.igfs.common.IgfsIpcCommand.*; /** * Communication with external process (TCP or shmem). */ -public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener { +public class HadoopIgfsOutProc implements HadoopIgfsEx, HadoopIgfsIpcIoListener { /** Expected result is boolean. */ private static final GridPlainClosure, Boolean> BOOL_RES = createClosure(); @@ -82,10 +82,10 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener private final Log log; /** Client IO. */ - private final IgfsHadoopIpcIo io; + private final HadoopIgfsIpcIo io; /** Event listeners. */ - private final Map lsnrs = new ConcurrentHashMap8<>(); + private final Map lsnrs = new ConcurrentHashMap8<>(); /** * Constructor for TCP endpoint. @@ -97,7 +97,7 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener * @param log Client logger. * @throws IOException If failed. */ - public IgfsHadoopOutProc(String host, int port, String grid, String igfs, Log log) throws IOException { + public HadoopIgfsOutProc(String host, int port, String grid, String igfs, Log log) throws IOException { this(host, port, grid, igfs, false, log); } @@ -110,7 +110,7 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener * @param log Client logger. * @throws IOException If failed. */ - public IgfsHadoopOutProc(int port, String grid, String igfs, Log log) throws IOException { + public HadoopIgfsOutProc(int port, String grid, String igfs, Log log) throws IOException { this(null, port, grid, igfs, true, log); } @@ -125,7 +125,7 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener * @param log Client logger. * @throws IOException If failed. */ - private IgfsHadoopOutProc(String host, int port, String grid, String igfs, boolean shmem, Log log) + private HadoopIgfsOutProc(String host, int port, String grid, String igfs, boolean shmem, Log log) throws IOException { assert host != null && !shmem || host == null && shmem : "Invalid arguments [host=" + host + ", port=" + port + ", shmem=" + shmem + ']'; @@ -136,7 +136,7 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener this.igfs = igfs; this.log = log; - io = IgfsHadoopIpcIo.get(log, endpoint); + io = HadoopIgfsIpcIo.get(log, endpoint); io.addEventListener(this); } @@ -279,7 +279,7 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener } /** {@inheritDoc} */ - @Override public IgfsHadoopStreamDelegate open(IgfsPath path) throws IgniteCheckedException { + @Override public HadoopIgfsStreamDelegate open(IgfsPath path) throws IgniteCheckedException { final IgfsPathControlRequest msg = new IgfsPathControlRequest(); msg.command(OPEN_READ); @@ -288,11 +288,11 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener IgfsInputStreamDescriptor rmtDesc = io.send(msg).chain(STREAM_DESCRIPTOR_RES).get(); - return new IgfsHadoopStreamDelegate(this, rmtDesc.streamId(), rmtDesc.length()); + return new HadoopIgfsStreamDelegate(this, rmtDesc.streamId(), rmtDesc.length()); } /** {@inheritDoc} */ - @Override public IgfsHadoopStreamDelegate open(IgfsPath path, + @Override public HadoopIgfsStreamDelegate open(IgfsPath path, int seqReadsBeforePrefetch) throws IgniteCheckedException { final IgfsPathControlRequest msg = new IgfsPathControlRequest(); @@ -303,11 +303,11 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener IgfsInputStreamDescriptor rmtDesc = io.send(msg).chain(STREAM_DESCRIPTOR_RES).get(); - return new IgfsHadoopStreamDelegate(this, rmtDesc.streamId(), rmtDesc.length()); + return new HadoopIgfsStreamDelegate(this, rmtDesc.streamId(), rmtDesc.length()); } /** {@inheritDoc} */ - @Override public IgfsHadoopStreamDelegate create(IgfsPath path, boolean overwrite, boolean colocate, + @Override public HadoopIgfsStreamDelegate create(IgfsPath path, boolean overwrite, boolean colocate, int replication, long blockSize, @Nullable Map props) throws IgniteCheckedException { final IgfsPathControlRequest msg = new IgfsPathControlRequest(); @@ -321,11 +321,11 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener Long streamId = io.send(msg).chain(LONG_RES).get(); - return new IgfsHadoopStreamDelegate(this, streamId); + return new HadoopIgfsStreamDelegate(this, streamId); } /** {@inheritDoc} */ - @Override public IgfsHadoopStreamDelegate append(IgfsPath path, boolean create, + @Override public HadoopIgfsStreamDelegate append(IgfsPath path, boolean create, @Nullable Map props) throws IgniteCheckedException { final IgfsPathControlRequest msg = new IgfsPathControlRequest(); @@ -336,11 +336,11 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener Long streamId = io.send(msg).chain(LONG_RES).get(); - return new IgfsHadoopStreamDelegate(this, streamId); + return new HadoopIgfsStreamDelegate(this, streamId); } /** {@inheritDoc} */ - @Override public GridPlainFuture readData(IgfsHadoopStreamDelegate desc, long pos, int len, + @Override public GridPlainFuture readData(HadoopIgfsStreamDelegate desc, long pos, int len, final @Nullable byte[] outBuf, final int outOff, final int outLen) { assert len > 0; @@ -360,7 +360,7 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener } /** {@inheritDoc} */ - @Override public void writeData(IgfsHadoopStreamDelegate desc, byte[] data, int off, int len) + @Override public void writeData(HadoopIgfsStreamDelegate desc, byte[] data, int off, int len) throws IOException { final IgfsStreamControlRequest msg = new IgfsStreamControlRequest(); @@ -374,17 +374,17 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener io.sendPlain(msg); } catch (IgniteCheckedException e) { - throw IgfsHadoopUtils.cast(e); + throw HadoopIgfsUtils.cast(e); } } /** {@inheritDoc} */ - @Override public void flush(IgfsHadoopStreamDelegate delegate) throws IOException { + @Override public void flush(HadoopIgfsStreamDelegate delegate) throws IOException { // No-op. } /** {@inheritDoc} */ - @Override public void closeStream(IgfsHadoopStreamDelegate desc) throws IOException { + @Override public void closeStream(HadoopIgfsStreamDelegate desc) throws IOException { final IgfsStreamControlRequest msg = new IgfsStreamControlRequest(); msg.command(CLOSE); @@ -394,16 +394,16 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener io.send(msg).chain(BOOL_RES).get(); } catch (IgniteCheckedException e) { - throw IgfsHadoopUtils.cast(e); + throw HadoopIgfsUtils.cast(e); } } /** {@inheritDoc} */ - @Override public void addEventListener(IgfsHadoopStreamDelegate desc, - IgfsHadoopStreamEventListener lsnr) { + @Override public void addEventListener(HadoopIgfsStreamDelegate desc, + HadoopIgfsStreamEventListener lsnr) { long streamId = desc.target(); - IgfsHadoopStreamEventListener lsnr0 = lsnrs.put(streamId, lsnr); + HadoopIgfsStreamEventListener lsnr0 = lsnrs.put(streamId, lsnr); assert lsnr0 == null || lsnr0 == lsnr; @@ -412,10 +412,10 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener } /** {@inheritDoc} */ - @Override public void removeEventListener(IgfsHadoopStreamDelegate desc) { + @Override public void removeEventListener(HadoopIgfsStreamDelegate desc) { long streamId = desc.target(); - IgfsHadoopStreamEventListener lsnr0 = lsnrs.remove(streamId); + HadoopIgfsStreamEventListener lsnr0 = lsnrs.remove(streamId); if (lsnr0 != null && log.isDebugEnabled()) log.debug("Removed stream event listener [streamId=" + streamId + ']'); @@ -423,7 +423,7 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener /** {@inheritDoc} */ @Override public void onClose() { - for (IgfsHadoopStreamEventListener lsnr : lsnrs.values()) { + for (HadoopIgfsStreamEventListener lsnr : lsnrs.values()) { try { lsnr.onClose(); } @@ -435,7 +435,7 @@ public class IgfsHadoopOutProc implements IgfsHadoopEx, IgfsHadoopIpcIoListener /** {@inheritDoc} */ @Override public void onError(long streamId, String errMsg) { - IgfsHadoopStreamEventListener lsnr = lsnrs.get(streamId); + HadoopIgfsStreamEventListener lsnr = lsnrs.get(streamId); if (lsnr != null) lsnr.onError(errMsg); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopOutputStream.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutputStream.java similarity index 94% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopOutputStream.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutputStream.java index ae5f980..902d710 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopOutputStream.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsOutputStream.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.commons.logging.*; import org.apache.ignite.*; @@ -27,7 +27,7 @@ import java.io.*; /** * IGFS Hadoop output stream implementation. */ -public class IgfsHadoopOutputStream extends OutputStream implements IgfsHadoopStreamEventListener { +public class HadoopIgfsOutputStream extends OutputStream implements HadoopIgfsStreamEventListener { /** Log instance. */ private Log log; @@ -38,7 +38,7 @@ public class IgfsHadoopOutputStream extends OutputStream implements IgfsHadoopSt private long logStreamId; /** Server stream delegate. */ - private IgfsHadoopStreamDelegate delegate; + private HadoopIgfsStreamDelegate delegate; /** Closed flag. */ private volatile boolean closed; @@ -68,7 +68,7 @@ public class IgfsHadoopOutputStream extends OutputStream implements IgfsHadoopSt * @param log Logger to use. * @param clientLog Client logger. */ - public IgfsHadoopOutputStream(IgfsHadoopStreamDelegate delegate, Log log, + public HadoopIgfsOutputStream(HadoopIgfsStreamDelegate delegate, Log log, IgfsLogger clientLog, long logStreamId) { this.delegate = delegate; this.log = log; diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopFSProperties.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProperties.java similarity index 89% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopFSProperties.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProperties.java index c9d1322..54e87dd 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopFSProperties.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProperties.java @@ -15,19 +15,19 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.hadoop.fs.permission.*; import org.apache.ignite.*; import java.util.*; -import static org.apache.ignite.IgniteFs.*; +import static org.apache.ignite.internal.processors.igfs.IgfsEx.*; /** * Hadoop file system properties. */ -public class IgfsHadoopFSProperties { +public class HadoopIgfsProperties { /** Username. */ private String usrName; @@ -43,7 +43,7 @@ public class IgfsHadoopFSProperties { * @param props Properties. * @throws IgniteException In case of error. */ - public IgfsHadoopFSProperties(Map props) throws IgniteException { + public HadoopIgfsProperties(Map props) throws IgniteException { usrName = props.get(PROP_USER_NAME); grpName = props.get(PROP_GROUP_NAME); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopProxyInputStream.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyInputStream.java similarity index 97% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopProxyInputStream.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyInputStream.java index 330537d..4530e64 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopProxyInputStream.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyInputStream.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.hadoop.fs.*; import org.apache.ignite.internal.igfs.common.*; @@ -25,7 +25,7 @@ import java.io.*; /** * Secondary Hadoop file system input stream wrapper. */ -public class IgfsHadoopProxyInputStream extends InputStream implements Seekable, PositionedReadable { +public class HadoopIgfsProxyInputStream extends InputStream implements Seekable, PositionedReadable { /** Actual input stream to the secondary file system. */ private final FSDataInputStream is; @@ -56,7 +56,7 @@ public class IgfsHadoopProxyInputStream extends InputStream implements Seekable, * @param is Actual input stream to the secondary file system. * @param clientLog Client log. */ - public IgfsHadoopProxyInputStream(FSDataInputStream is, IgfsLogger clientLog, long logStreamId) { + public HadoopIgfsProxyInputStream(FSDataInputStream is, IgfsLogger clientLog, long logStreamId) { assert is != null; assert clientLog != null; diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopProxyOutputStream.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyOutputStream.java similarity index 95% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopProxyOutputStream.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyOutputStream.java index 41e80eb..9ab552e 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopProxyOutputStream.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsProxyOutputStream.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.hadoop.fs.*; import org.apache.ignite.internal.igfs.common.*; @@ -25,7 +25,7 @@ import java.io.*; /** * Secondary Hadoop file system output stream wrapper. */ -public class IgfsHadoopProxyOutputStream extends OutputStream { +public class HadoopIgfsProxyOutputStream extends OutputStream { /** Actual output stream. */ private FSDataOutputStream os; @@ -57,7 +57,7 @@ public class IgfsHadoopProxyOutputStream extends OutputStream { * @param clientLog Client logger. * @param logStreamId Log stream ID. */ - public IgfsHadoopProxyOutputStream(FSDataOutputStream os, IgfsLogger clientLog, long logStreamId) { + public HadoopIgfsProxyOutputStream(FSDataOutputStream os, IgfsLogger clientLog, long logStreamId) { assert os != null; assert clientLog != null; diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopReader.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsSecondaryFileSystemPositionedReadable.java similarity index 89% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopReader.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsSecondaryFileSystemPositionedReadable.java index 3ab3acc..e921fd3 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopReader.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsSecondaryFileSystemPositionedReadable.java @@ -15,11 +15,11 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.FileSystem; -import org.apache.ignite.igfs.*; +import org.apache.ignite.igfs.secondary.*; import org.apache.ignite.internal.util.typedef.internal.*; import java.io.*; @@ -30,7 +30,7 @@ import java.io.*; *

    * The class is expected to be used only from synchronized context and therefore is not tread-safe. */ -public class IgfsHadoopReader implements IgfsReader { +public class HadoopIgfsSecondaryFileSystemPositionedReadable implements IgfsSecondaryFileSystemPositionedReadable { /** Secondary file system. */ private final FileSystem fs; @@ -56,7 +56,7 @@ public class IgfsHadoopReader implements IgfsReader { * @param path Path to the file to open. * @param bufSize Buffer size. */ - public IgfsHadoopReader(FileSystem fs, Path path, int bufSize) { + public HadoopIgfsSecondaryFileSystemPositionedReadable(FileSystem fs, Path path, int bufSize) { assert fs != null; assert path != null; diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopStreamDelegate.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamDelegate.java similarity index 80% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopStreamDelegate.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamDelegate.java index 9aaab4c..54f7377 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopStreamDelegate.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamDelegate.java @@ -15,16 +15,16 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.ignite.internal.util.typedef.internal.*; /** * IGFS Hadoop stream descriptor. */ -public class IgfsHadoopStreamDelegate { +public class HadoopIgfsStreamDelegate { /** RPC handler. */ - private final IgfsHadoopEx hadoop; + private final HadoopIgfsEx hadoop; /** Target. */ private final Object target; @@ -37,7 +37,7 @@ public class IgfsHadoopStreamDelegate { * * @param target Target. */ - public IgfsHadoopStreamDelegate(IgfsHadoopEx hadoop, Object target) { + public HadoopIgfsStreamDelegate(HadoopIgfsEx hadoop, Object target) { this(hadoop, target, -1); } @@ -47,7 +47,7 @@ public class IgfsHadoopStreamDelegate { * @param target Target. * @param len Optional length. */ - public IgfsHadoopStreamDelegate(IgfsHadoopEx hadoop, Object target, long len) { + public HadoopIgfsStreamDelegate(HadoopIgfsEx hadoop, Object target, long len) { assert hadoop != null; assert target != null; @@ -59,7 +59,7 @@ public class IgfsHadoopStreamDelegate { /** * @return RPC handler. */ - public IgfsHadoopEx hadoop() { + public HadoopIgfsEx hadoop() { return hadoop; } @@ -85,12 +85,12 @@ public class IgfsHadoopStreamDelegate { /** {@inheritDoc} */ @Override public boolean equals(Object obj) { - return obj != null && obj instanceof IgfsHadoopStreamDelegate && - target == ((IgfsHadoopStreamDelegate)obj).target; + return obj != null && obj instanceof HadoopIgfsStreamDelegate && + target == ((HadoopIgfsStreamDelegate)obj).target; } /** {@inheritDoc} */ @Override public String toString() { - return S.toString(IgfsHadoopStreamDelegate.class, this); + return S.toString(HadoopIgfsStreamDelegate.class, this); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopStreamEventListener.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamEventListener.java similarity index 91% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopStreamEventListener.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamEventListener.java index 20d7f2a..6b3fa82 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopStreamEventListener.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsStreamEventListener.java @@ -15,14 +15,14 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.ignite.*; /** * IGFS input stream event listener. */ -public interface IgfsHadoopStreamEventListener { +public interface HadoopIgfsStreamEventListener { /** * Callback invoked when the stream is being closed. * diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopUtils.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsUtils.java similarity index 97% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopUtils.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsUtils.java index bd96e60..e30a4ec 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopUtils.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsUtils.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.hadoop.conf.*; import org.apache.hadoop.fs.*; @@ -29,7 +29,7 @@ import java.io.*; /** * Utility constants and methods for IGFS Hadoop file system. */ -public class IgfsHadoopUtils { +public class HadoopIgfsUtils { /** Parameter name for endpoint no embed mode flag. */ public static final String PARAM_IGFS_ENDPOINT_NO_EMBED = "fs.igfs.%s.endpoint.no_embed"; @@ -125,7 +125,7 @@ public class IgfsHadoopUtils { /** * Constructor. */ - private IgfsHadoopUtils() { + private HadoopIgfsUtils() { // No-op. } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopWrapper.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsWrapper.java similarity index 82% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopWrapper.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsWrapper.java index 5586e72..1dada21 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/igfs/hadoop/IgfsHadoopWrapper.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/igfs/HadoopIgfsWrapper.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.internal.igfs.hadoop; +package org.apache.ignite.internal.processors.hadoop.igfs; import org.apache.commons.logging.*; import org.apache.hadoop.conf.*; @@ -30,13 +30,13 @@ import java.io.*; import java.util.*; import java.util.concurrent.atomic.*; -import static org.apache.ignite.internal.igfs.hadoop.IgfsHadoopEndpoint.*; -import static org.apache.ignite.internal.igfs.hadoop.IgfsHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsEndpoint.*; +import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.*; /** * Wrapper for IGFS server. */ -public class IgfsHadoopWrapper implements IgfsHadoop { +public class HadoopIgfsWrapper implements HadoopIgfs { /** Delegate. */ private final AtomicReference delegateRef = new AtomicReference<>(); @@ -44,7 +44,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { private final String authority; /** Connection string. */ - private final IgfsHadoopEndpoint endpoint; + private final HadoopIgfsEndpoint endpoint; /** Log directory. */ private final String logDir; @@ -63,10 +63,10 @@ public class IgfsHadoopWrapper implements IgfsHadoop { * @param conf Configuration. * @param log Current logger. */ - public IgfsHadoopWrapper(String authority, String logDir, Configuration conf, Log log) throws IOException { + public HadoopIgfsWrapper(String authority, String logDir, Configuration conf, Log log) throws IOException { try { this.authority = authority; - this.endpoint = new IgfsHadoopEndpoint(authority); + this.endpoint = new HadoopIgfsEndpoint(authority); this.logDir = logDir; this.conf = conf; this.log = log; @@ -79,7 +79,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { /** {@inheritDoc} */ @Override public IgfsHandshakeResponse handshake(String logDir) throws IOException { return withReconnectHandling(new FileSystemClosure() { - @Override public IgfsHandshakeResponse apply(IgfsHadoopEx hadoop, + @Override public IgfsHandshakeResponse apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) { return hndResp; } @@ -97,7 +97,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { /** {@inheritDoc} */ @Override public IgfsFile info(final IgfsPath path) throws IOException { return withReconnectHandling(new FileSystemClosure() { - @Override public IgfsFile apply(IgfsHadoopEx hadoop, IgfsHandshakeResponse hndResp) + @Override public IgfsFile apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException { return hadoop.info(path); } @@ -107,7 +107,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { /** {@inheritDoc} */ @Override public IgfsFile update(final IgfsPath path, final Map props) throws IOException { return withReconnectHandling(new FileSystemClosure() { - @Override public IgfsFile apply(IgfsHadoopEx hadoop, IgfsHandshakeResponse hndResp) + @Override public IgfsFile apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException { return hadoop.update(path, props); } @@ -118,7 +118,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { @Override public Boolean setTimes(final IgfsPath path, final long accessTime, final long modificationTime) throws IOException { return withReconnectHandling(new FileSystemClosure() { - @Override public Boolean apply(IgfsHadoopEx hadoop, IgfsHandshakeResponse hndResp) + @Override public Boolean apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException { return hadoop.setTimes(path, accessTime, modificationTime); } @@ -128,7 +128,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { /** {@inheritDoc} */ @Override public Boolean rename(final IgfsPath src, final IgfsPath dest) throws IOException { return withReconnectHandling(new FileSystemClosure() { - @Override public Boolean apply(IgfsHadoopEx hadoop, IgfsHandshakeResponse hndResp) + @Override public Boolean apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException { return hadoop.rename(src, dest); } @@ -138,7 +138,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { /** {@inheritDoc} */ @Override public Boolean delete(final IgfsPath path, final boolean recursive) throws IOException { return withReconnectHandling(new FileSystemClosure() { - @Override public Boolean apply(IgfsHadoopEx hadoop, IgfsHandshakeResponse hndResp) + @Override public Boolean apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException { return hadoop.delete(path, recursive); } @@ -149,7 +149,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { @Override public Collection affinity(final IgfsPath path, final long start, final long len) throws IOException { return withReconnectHandling(new FileSystemClosure>() { - @Override public Collection apply(IgfsHadoopEx hadoop, + @Override public Collection apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException { return hadoop.affinity(path, start, len); } @@ -159,7 +159,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { /** {@inheritDoc} */ @Override public IgfsPathSummary contentSummary(final IgfsPath path) throws IOException { return withReconnectHandling(new FileSystemClosure() { - @Override public IgfsPathSummary apply(IgfsHadoopEx hadoop, IgfsHandshakeResponse hndResp) + @Override public IgfsPathSummary apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException { return hadoop.contentSummary(path); } @@ -169,7 +169,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { /** {@inheritDoc} */ @Override public Boolean mkdirs(final IgfsPath path, final Map props) throws IOException { return withReconnectHandling(new FileSystemClosure() { - @Override public Boolean apply(IgfsHadoopEx hadoop, IgfsHandshakeResponse hndResp) + @Override public Boolean apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException { return hadoop.mkdirs(path, props); } @@ -179,7 +179,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { /** {@inheritDoc} */ @Override public Collection listFiles(final IgfsPath path) throws IOException { return withReconnectHandling(new FileSystemClosure>() { - @Override public Collection apply(IgfsHadoopEx hadoop, + @Override public Collection apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException { return hadoop.listFiles(path); } @@ -189,7 +189,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { /** {@inheritDoc} */ @Override public Collection listPaths(final IgfsPath path) throws IOException { return withReconnectHandling(new FileSystemClosure>() { - @Override public Collection apply(IgfsHadoopEx hadoop, + @Override public Collection apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException { return hadoop.listPaths(path); } @@ -199,7 +199,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { /** {@inheritDoc} */ @Override public IgfsStatus fsStatus() throws IOException { return withReconnectHandling(new FileSystemClosure() { - @Override public IgfsStatus apply(IgfsHadoopEx hadoop, IgfsHandshakeResponse hndResp) + @Override public IgfsStatus apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException { return hadoop.fsStatus(); } @@ -207,9 +207,9 @@ public class IgfsHadoopWrapper implements IgfsHadoop { } /** {@inheritDoc} */ - @Override public IgfsHadoopStreamDelegate open(final IgfsPath path) throws IOException { - return withReconnectHandling(new FileSystemClosure() { - @Override public IgfsHadoopStreamDelegate apply(IgfsHadoopEx hadoop, + @Override public HadoopIgfsStreamDelegate open(final IgfsPath path) throws IOException { + return withReconnectHandling(new FileSystemClosure() { + @Override public HadoopIgfsStreamDelegate apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException { return hadoop.open(path); } @@ -217,10 +217,10 @@ public class IgfsHadoopWrapper implements IgfsHadoop { } /** {@inheritDoc} */ - @Override public IgfsHadoopStreamDelegate open(final IgfsPath path, final int seqReadsBeforePrefetch) + @Override public HadoopIgfsStreamDelegate open(final IgfsPath path, final int seqReadsBeforePrefetch) throws IOException { - return withReconnectHandling(new FileSystemClosure() { - @Override public IgfsHadoopStreamDelegate apply(IgfsHadoopEx hadoop, + return withReconnectHandling(new FileSystemClosure() { + @Override public HadoopIgfsStreamDelegate apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException { return hadoop.open(path, seqReadsBeforePrefetch); } @@ -228,11 +228,11 @@ public class IgfsHadoopWrapper implements IgfsHadoop { } /** {@inheritDoc} */ - @Override public IgfsHadoopStreamDelegate create(final IgfsPath path, final boolean overwrite, + @Override public HadoopIgfsStreamDelegate create(final IgfsPath path, final boolean overwrite, final boolean colocate, final int replication, final long blockSize, @Nullable final Map props) throws IOException { - return withReconnectHandling(new FileSystemClosure() { - @Override public IgfsHadoopStreamDelegate apply(IgfsHadoopEx hadoop, + return withReconnectHandling(new FileSystemClosure() { + @Override public HadoopIgfsStreamDelegate apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException { return hadoop.create(path, overwrite, colocate, replication, blockSize, props); } @@ -240,10 +240,10 @@ public class IgfsHadoopWrapper implements IgfsHadoop { } /** {@inheritDoc} */ - @Override public IgfsHadoopStreamDelegate append(final IgfsPath path, final boolean create, + @Override public HadoopIgfsStreamDelegate append(final IgfsPath path, final boolean create, @Nullable final Map props) throws IOException { - return withReconnectHandling(new FileSystemClosure() { - @Override public IgfsHadoopStreamDelegate apply(IgfsHadoopEx hadoop, + return withReconnectHandling(new FileSystemClosure() { + @Override public HadoopIgfsStreamDelegate apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException { return hadoop.append(path, create, props); } @@ -288,7 +288,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { return clo.apply(curDelegate.hadoop, curDelegate.hndResp); } - catch (IgfsHadoopCommunicationException e) { + catch (HadoopIgfsCommunicationException e) { if (curDelegate != null && !curDelegate.doomed) { // Try getting rid fo faulty delegate ASAP. delegateRef.compareAndSet(curDelegate, null); @@ -303,7 +303,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { err = e; } catch (IgniteCheckedException e) { - throw IgfsHadoopUtils.cast(e, path != null ? path.toString() : null); + throw HadoopIgfsUtils.cast(e, path != null ? path.toString() : null); } finally { if (close) { @@ -322,7 +322,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { * * @return Delegate. */ - private Delegate delegate() throws IgfsHadoopCommunicationException { + private Delegate delegate() throws HadoopIgfsCommunicationException { Exception err = null; // 1. If delegate is set, return it immediately. @@ -359,15 +359,15 @@ public class IgfsHadoopWrapper implements IgfsHadoop { } if (igfs != null) { - IgfsHadoopEx hadoop = null; + HadoopIgfsEx hadoop = null; try { - hadoop = new IgfsHadoopInProc(igfs, log); + hadoop = new HadoopIgfsInProc(igfs, log); curDelegate = new Delegate(hadoop, hadoop.handshake(logDir)); } catch (IOException | IgniteCheckedException e) { - if (e instanceof IgfsHadoopCommunicationException) + if (e instanceof HadoopIgfsCommunicationException) hadoop.close(true); if (log.isDebugEnabled()) @@ -381,15 +381,15 @@ public class IgfsHadoopWrapper implements IgfsHadoop { // 3. Try connecting using shmem. if (!parameter(conf, PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM, authority, false)) { if (curDelegate == null && !U.isWindows()) { - IgfsHadoopEx hadoop = null; + HadoopIgfsEx hadoop = null; try { - hadoop = new IgfsHadoopOutProc(endpoint.port(), endpoint.grid(), endpoint.igfs(), log); + hadoop = new HadoopIgfsOutProc(endpoint.port(), endpoint.grid(), endpoint.igfs(), log); curDelegate = new Delegate(hadoop, hadoop.handshake(logDir)); } catch (IOException | IgniteCheckedException e) { - if (e instanceof IgfsHadoopCommunicationException) + if (e instanceof HadoopIgfsCommunicationException) hadoop.close(true); if (log.isDebugEnabled()) @@ -405,16 +405,16 @@ public class IgfsHadoopWrapper implements IgfsHadoop { if (!skipLocTcp) { if (curDelegate == null) { - IgfsHadoopEx hadoop = null; + HadoopIgfsEx hadoop = null; try { - hadoop = new IgfsHadoopOutProc(LOCALHOST, endpoint.port(), endpoint.grid(), endpoint.igfs(), + hadoop = new HadoopIgfsOutProc(LOCALHOST, endpoint.port(), endpoint.grid(), endpoint.igfs(), log); curDelegate = new Delegate(hadoop, hadoop.handshake(logDir)); } catch (IOException | IgniteCheckedException e) { - if (e instanceof IgfsHadoopCommunicationException) + if (e instanceof HadoopIgfsCommunicationException) hadoop.close(true); if (log.isDebugEnabled()) @@ -427,15 +427,15 @@ public class IgfsHadoopWrapper implements IgfsHadoop { // 5. Try remote TCP connection. if (curDelegate == null && (skipLocTcp || !F.eq(LOCALHOST, endpoint.host()))) { - IgfsHadoopEx hadoop = null; + HadoopIgfsEx hadoop = null; try { - hadoop = new IgfsHadoopOutProc(endpoint.host(), endpoint.port(), endpoint.grid(), endpoint.igfs(), log); + hadoop = new HadoopIgfsOutProc(endpoint.host(), endpoint.port(), endpoint.grid(), endpoint.igfs(), log); curDelegate = new Delegate(hadoop, hadoop.handshake(logDir)); } catch (IOException | IgniteCheckedException e) { - if (e instanceof IgfsHadoopCommunicationException) + if (e instanceof HadoopIgfsCommunicationException) hadoop.close(true); if (log.isDebugEnabled()) @@ -452,7 +452,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { return curDelegate; } else - throw new IgfsHadoopCommunicationException("Failed to connect to IGFS: " + endpoint, err); + throw new HadoopIgfsCommunicationException("Failed to connect to IGFS: " + endpoint, err); } /** @@ -468,7 +468,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { * @throws IgniteCheckedException If failed. * @throws IOException If failed. */ - public T apply(IgfsHadoopEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException; + public T apply(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) throws IgniteCheckedException, IOException; } /** @@ -476,7 +476,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { */ private static class Delegate { /** RPC handler. */ - private final IgfsHadoopEx hadoop; + private final HadoopIgfsEx hadoop; /** Handshake request. */ private final IgfsHandshakeResponse hndResp; @@ -493,7 +493,7 @@ public class IgfsHadoopWrapper implements IgfsHadoop { * @param hadoop Hadoop. * @param hndResp Handshake response. */ - private Delegate(IgfsHadoopEx hadoop, IgfsHandshakeResponse hndResp) { + private Delegate(HadoopIgfsEx hadoop, IgfsHandshakeResponse hndResp) { this.hadoop = hadoop; this.hndResp = hndResp; } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/GridHadoopJobMetadata.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobMetadata.java similarity index 76% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/GridHadoopJobMetadata.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobMetadata.java index b124312..3f574e9 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/GridHadoopJobMetadata.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobMetadata.java @@ -19,6 +19,7 @@ package org.apache.ignite.internal.processors.hadoop.jobtracker; import org.apache.ignite.internal.processors.hadoop.*; import org.apache.ignite.internal.processors.hadoop.counter.*; +import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters; import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.*; import org.apache.ignite.internal.util.tostring.*; import org.apache.ignite.internal.util.typedef.internal.*; @@ -26,39 +27,39 @@ import org.apache.ignite.internal.util.typedef.internal.*; import java.io.*; import java.util.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopJobPhase.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.*; /** * Hadoop job metadata. Internal object used for distributed job state tracking. */ -public class GridHadoopJobMetadata implements Externalizable { +public class HadoopJobMetadata implements Externalizable { /** */ private static final long serialVersionUID = 0L; /** Job ID. */ - private GridHadoopJobId jobId; + private HadoopJobId jobId; /** Job info. */ - private GridHadoopJobInfo jobInfo; + private HadoopJobInfo jobInfo; /** Node submitted job. */ private UUID submitNodeId; /** Map-reduce plan. */ - private GridHadoopMapReducePlan mrPlan; + private HadoopMapReducePlan mrPlan; /** Pending splits for which mapper should be executed. */ - private Map pendingSplits; + private Map pendingSplits; /** Pending reducers. */ private Collection pendingReducers; /** Reducers addresses. */ @GridToStringInclude - private Map reducersAddrs; + private Map reducersAddrs; /** Job phase. */ - private GridHadoopJobPhase phase = PHASE_SETUP; + private HadoopJobPhase phase = PHASE_SETUP; /** Fail cause. */ @GridToStringExclude @@ -68,12 +69,12 @@ public class GridHadoopJobMetadata implements Externalizable { private long ver; /** Job counters */ - private GridHadoopCounters counters = new GridHadoopCountersImpl(); + private HadoopCounters counters = new HadoopCountersImpl(); /** * Empty constructor required by {@link Externalizable}. */ - public GridHadoopJobMetadata() { + public HadoopJobMetadata() { // No-op. } @@ -84,7 +85,7 @@ public class GridHadoopJobMetadata implements Externalizable { * @param jobId Job ID. * @param jobInfo Job info. */ - public GridHadoopJobMetadata(UUID submitNodeId, GridHadoopJobId jobId, GridHadoopJobInfo jobInfo) { + public HadoopJobMetadata(UUID submitNodeId, HadoopJobId jobId, HadoopJobInfo jobInfo) { this.jobId = jobId; this.jobInfo = jobInfo; this.submitNodeId = submitNodeId; @@ -95,7 +96,7 @@ public class GridHadoopJobMetadata implements Externalizable { * * @param src Metadata to copy. */ - public GridHadoopJobMetadata(GridHadoopJobMetadata src) { + public HadoopJobMetadata(HadoopJobMetadata src) { // Make sure to preserve alphabetic order. counters = src.counters; failCause = src.failCause; @@ -120,14 +121,14 @@ public class GridHadoopJobMetadata implements Externalizable { /** * @param phase Job phase. */ - public void phase(GridHadoopJobPhase phase) { + public void phase(HadoopJobPhase phase) { this.phase = phase; } /** * @return Job phase. */ - public GridHadoopJobPhase phase() { + public HadoopJobPhase phase() { return phase; } @@ -136,7 +137,7 @@ public class GridHadoopJobMetadata implements Externalizable { * * @return Reducers addresses. */ - public Map reducersAddresses() { + public Map reducersAddresses() { return reducersAddrs; } @@ -145,7 +146,7 @@ public class GridHadoopJobMetadata implements Externalizable { * * @param reducersAddrs Map of addresses. */ - public void reducersAddresses(Map reducersAddrs) { + public void reducersAddresses(Map reducersAddrs) { this.reducersAddrs = reducersAddrs; } @@ -154,7 +155,7 @@ public class GridHadoopJobMetadata implements Externalizable { * * @param pendingSplits Collection of pending splits. */ - public void pendingSplits(Map pendingSplits) { + public void pendingSplits(Map pendingSplits) { this.pendingSplits = pendingSplits; } @@ -163,7 +164,7 @@ public class GridHadoopJobMetadata implements Externalizable { * * @return Collection of pending splits. */ - public Map pendingSplits() { + public Map pendingSplits() { return pendingSplits; } @@ -188,14 +189,14 @@ public class GridHadoopJobMetadata implements Externalizable { /** * @return Job ID. */ - public GridHadoopJobId jobId() { + public HadoopJobId jobId() { return jobId; } /** * @param mrPlan Map-reduce plan. */ - public void mapReducePlan(GridHadoopMapReducePlan mrPlan) { + public void mapReducePlan(HadoopMapReducePlan mrPlan) { assert this.mrPlan == null : "Map-reduce plan can only be initialized once."; this.mrPlan = mrPlan; @@ -204,14 +205,14 @@ public class GridHadoopJobMetadata implements Externalizable { /** * @return Map-reduce plan. */ - public GridHadoopMapReducePlan mapReducePlan() { + public HadoopMapReducePlan mapReducePlan() { return mrPlan; } /** * @return Job info. */ - public GridHadoopJobInfo jobInfo() { + public HadoopJobInfo jobInfo() { return jobInfo; } @@ -220,7 +221,7 @@ public class GridHadoopJobMetadata implements Externalizable { * * @return Collection of counters. */ - public GridHadoopCounters counters() { + public HadoopCounters counters() { return counters; } @@ -229,7 +230,7 @@ public class GridHadoopJobMetadata implements Externalizable { * * @param counters Collection of counters. */ - public void counters(GridHadoopCounters counters) { + public void counters(HadoopCounters counters) { this.counters = counters; } @@ -261,7 +262,7 @@ public class GridHadoopJobMetadata implements Externalizable { * @param split Split. * @return Task number. */ - public int taskNumber(GridHadoopInputSplit split) { + public int taskNumber(HadoopInputSplit split) { return pendingSplits.get(split); } @@ -284,21 +285,21 @@ public class GridHadoopJobMetadata implements Externalizable { @SuppressWarnings("unchecked") @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { submitNodeId = U.readUuid(in); - jobId = (GridHadoopJobId)in.readObject(); - jobInfo = (GridHadoopJobInfo)in.readObject(); - mrPlan = (GridHadoopMapReducePlan)in.readObject(); - pendingSplits = (Map)in.readObject(); + jobId = (HadoopJobId)in.readObject(); + jobInfo = (HadoopJobInfo)in.readObject(); + mrPlan = (HadoopMapReducePlan)in.readObject(); + pendingSplits = (Map)in.readObject(); pendingReducers = (Collection)in.readObject(); - phase = (GridHadoopJobPhase)in.readObject(); + phase = (HadoopJobPhase)in.readObject(); failCause = (Throwable)in.readObject(); ver = in.readLong(); - reducersAddrs = (Map)in.readObject(); - counters = (GridHadoopCounters)in.readObject(); + reducersAddrs = (Map)in.readObject(); + counters = (HadoopCounters)in.readObject(); } /** {@inheritDoc} */ public String toString() { - return S.toString(GridHadoopJobMetadata.class, this, "pendingMaps", pendingSplits.size(), + return S.toString(HadoopJobMetadata.class, this, "pendingMaps", pendingSplits.size(), "pendingReduces", pendingReducers.size(), "failCause", failCause == null ? null : failCause.getClass().getName()); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/GridHadoopJobTracker.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobTracker.java similarity index 79% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/GridHadoopJobTracker.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobTracker.java index 0beaf32..39f42b2 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/GridHadoopJobTracker.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/jobtracker/HadoopJobTracker.java @@ -25,6 +25,7 @@ import org.apache.ignite.internal.managers.eventstorage.*; import org.apache.ignite.internal.processors.cache.*; import org.apache.ignite.internal.processors.hadoop.*; import org.apache.ignite.internal.processors.hadoop.counter.*; +import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters; import org.apache.ignite.internal.processors.hadoop.taskexecutor.*; import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.*; import org.apache.ignite.internal.util.*; @@ -44,35 +45,35 @@ import java.util.concurrent.*; import java.util.concurrent.atomic.*; import static java.util.concurrent.TimeUnit.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopJobPhase.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopTaskType.*; -import static org.apache.ignite.internal.processors.hadoop.taskexecutor.GridHadoopTaskState.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.*; +import static org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState.*; /** * Hadoop job tracker. */ -public class GridHadoopJobTracker extends GridHadoopComponent { +public class HadoopJobTracker extends HadoopComponent { /** */ private final GridMutex mux = new GridMutex(); /** */ - private volatile GridCacheProjectionEx jobMetaPrj; + private volatile GridCacheProjectionEx jobMetaPrj; /** Projection with expiry policy for finished job updates. */ - private volatile GridCacheProjectionEx finishedJobMetaPrj; + private volatile GridCacheProjectionEx finishedJobMetaPrj; /** Map-reduce execution planner. */ @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized") - private GridHadoopMapReducePlanner mrPlanner; + private HadoopMapReducePlanner mrPlanner; /** All the known jobs. */ - private final ConcurrentMap> jobs = new ConcurrentHashMap8<>(); + private final ConcurrentMap> jobs = new ConcurrentHashMap8<>(); /** Locally active jobs. */ - private final ConcurrentMap activeJobs = new ConcurrentHashMap8<>(); + private final ConcurrentMap activeJobs = new ConcurrentHashMap8<>(); /** Locally requested finish futures. */ - private final ConcurrentMap> activeFinishFuts = + private final ConcurrentMap> activeFinishFuts = new ConcurrentHashMap8<>(); /** Event processing service. */ @@ -94,7 +95,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { }; /** {@inheritDoc} */ - @Override public void start(GridHadoopContext ctx) throws IgniteCheckedException { + @Override public void start(HadoopContext ctx) throws IgniteCheckedException { super.start(ctx); busyLock = new GridSpinReadWriteLock(); @@ -106,8 +107,8 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @return Job meta projection. */ @SuppressWarnings("NonPrivateFieldAccessedInSynchronizedContext") - private GridCacheProjectionEx jobMetaCache() { - GridCacheProjectionEx prj = jobMetaPrj; + private GridCacheProjectionEx jobMetaCache() { + GridCacheProjectionEx prj = jobMetaPrj; if (prj == null) { synchronized (mux) { @@ -128,8 +129,8 @@ public class GridHadoopJobTracker extends GridHadoopComponent { throw new IllegalStateException(e); } - jobMetaPrj = prj = (GridCacheProjectionEx) - sysCache.projection(GridHadoopJobId.class, GridHadoopJobMetadata.class); + jobMetaPrj = prj = (GridCacheProjectionEx) + sysCache.projection(HadoopJobId.class, HadoopJobMetadata.class); if (ctx.configuration().getFinishedJobInfoTtl() > 0) { ExpiryPolicy finishedJobPlc = new ModifiedExpiryPolicy( @@ -149,8 +150,8 @@ public class GridHadoopJobTracker extends GridHadoopComponent { /** * @return Projection with expiry policy for finished job updates. */ - private GridCacheProjectionEx finishedJobMetaCache() { - GridCacheProjectionEx prj = finishedJobMetaPrj; + private GridCacheProjectionEx finishedJobMetaCache() { + GridCacheProjectionEx prj = finishedJobMetaPrj; if (prj == null) { jobMetaCache(); @@ -169,9 +170,9 @@ public class GridHadoopJobTracker extends GridHadoopComponent { super.onKernalStart(); jobMetaCache().context().continuousQueries().executeInternalQuery( - new CacheEntryUpdatedListener() { - @Override public void onUpdated(final Iterable> evts) { + new CacheEntryUpdatedListener() { + @Override public void onUpdated(final Iterable> evts) { if (!busyLock.tryReadLock()) return; @@ -222,7 +223,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { evtProcSvc.shutdown(); // Fail all pending futures. - for (GridFutureAdapter fut : activeFinishFuts.values()) + for (GridFutureAdapter fut : activeFinishFuts.values()) fut.onDone(new IgniteCheckedException("Failed to execute Hadoop map-reduce job (grid is stopping).")); } @@ -234,7 +235,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @return Job completion future. */ @SuppressWarnings("unchecked") - public IgniteInternalFuture submit(GridHadoopJobId jobId, GridHadoopJobInfo info) { + public IgniteInternalFuture submit(HadoopJobId jobId, HadoopJobInfo info) { if (!busyLock.tryReadLock()) { return new GridFinishedFutureEx<>(new IgniteCheckedException("Failed to execute map-reduce job " + "(grid is stopping): " + info)); @@ -246,20 +247,20 @@ public class GridHadoopJobTracker extends GridHadoopComponent { if (jobs.containsKey(jobId) || jobMetaCache().containsKey(jobId)) throw new IgniteCheckedException("Failed to submit job. Job with the same ID already exists: " + jobId); - GridHadoopJob job = job(jobId, info); + HadoopJob job = job(jobId, info); - GridHadoopMapReducePlan mrPlan = mrPlanner.preparePlan(job, ctx.nodes(), null); + HadoopMapReducePlan mrPlan = mrPlanner.preparePlan(job, ctx.nodes(), null); - GridHadoopJobMetadata meta = new GridHadoopJobMetadata(ctx.localNodeId(), jobId, info); + HadoopJobMetadata meta = new HadoopJobMetadata(ctx.localNodeId(), jobId, info); meta.mapReducePlan(mrPlan); meta.pendingSplits(allSplits(mrPlan)); meta.pendingReducers(allReducers(mrPlan)); - GridFutureAdapter completeFut = new GridFutureAdapter<>(); + GridFutureAdapter completeFut = new GridFutureAdapter<>(); - GridFutureAdapter old = activeFinishFuts.put(jobId, completeFut); + GridFutureAdapter old = activeFinishFuts.put(jobId, completeFut); assert old == null : "Duplicate completion future [jobId=" + jobId + ", old=" + old + ']'; @@ -268,7 +269,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { long jobStart = U.currentTimeMillis(); - GridHadoopPerformanceCounter perfCntr = GridHadoopPerformanceCounter.getCounter(meta.counters(), + HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(meta.counters(), ctx.localNodeId()); perfCntr.clientSubmissionEvents(info); @@ -297,10 +298,10 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @return Status. */ @SuppressWarnings("ThrowableResultOfMethodCallIgnored") - public static GridHadoopJobStatus status(GridHadoopJobMetadata meta) { - GridHadoopJobInfo jobInfo = meta.jobInfo(); + public static HadoopJobStatus status(HadoopJobMetadata meta) { + HadoopJobInfo jobInfo = meta.jobInfo(); - return new GridHadoopJobStatus( + return new HadoopJobStatus( meta.jobId(), jobInfo.jobName(), jobInfo.user(), @@ -320,12 +321,12 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param jobId Job ID to get status for. * @return Job status for given job ID or {@code null} if job was not found. */ - @Nullable public GridHadoopJobStatus status(GridHadoopJobId jobId) throws IgniteCheckedException { + @Nullable public HadoopJobStatus status(HadoopJobId jobId) throws IgniteCheckedException { if (!busyLock.tryReadLock()) return null; // Grid is stopping. try { - GridHadoopJobMetadata meta = jobMetaCache().get(jobId); + HadoopJobMetadata meta = jobMetaCache().get(jobId); return meta != null ? status(meta) : null; } @@ -341,12 +342,12 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @return Finish future or {@code null}. * @throws IgniteCheckedException If failed. */ - @Nullable public IgniteInternalFuture finishFuture(GridHadoopJobId jobId) throws IgniteCheckedException { + @Nullable public IgniteInternalFuture finishFuture(HadoopJobId jobId) throws IgniteCheckedException { if (!busyLock.tryReadLock()) return null; // Grid is stopping. try { - GridHadoopJobMetadata meta = jobMetaCache().get(jobId); + HadoopJobMetadata meta = jobMetaCache().get(jobId); if (meta == null) return null; @@ -361,8 +362,8 @@ public class GridHadoopJobTracker extends GridHadoopComponent { return new GridFinishedFutureEx<>(jobId, meta.failCause()); } - GridFutureAdapter fut = F.addIfAbsent(activeFinishFuts, jobId, - new GridFutureAdapter()); + GridFutureAdapter fut = F.addIfAbsent(activeFinishFuts, jobId, + new GridFutureAdapter()); // Get meta from cache one more time to close the window. meta = jobMetaCache().get(jobId); @@ -395,12 +396,12 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @return Job plan. * @throws IgniteCheckedException If failed. */ - public GridHadoopMapReducePlan plan(GridHadoopJobId jobId) throws IgniteCheckedException { + public HadoopMapReducePlan plan(HadoopJobId jobId) throws IgniteCheckedException { if (!busyLock.tryReadLock()) return null; try { - GridHadoopJobMetadata meta = jobMetaCache().get(jobId); + HadoopJobMetadata meta = jobMetaCache().get(jobId); if (meta != null) return meta.mapReducePlan(); @@ -419,7 +420,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param status Task status. */ @SuppressWarnings({"ConstantConditions", "ThrowableResultOfMethodCallIgnored"}) - public void onTaskFinished(GridHadoopTaskInfo info, GridHadoopTaskStatus status) { + public void onTaskFinished(HadoopTaskInfo info, HadoopTaskStatus status) { if (!busyLock.tryReadLock()) return; @@ -470,7 +471,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { case COMMIT: case ABORT: { - GridCacheProjectionEx cache = finishedJobMetaCache(); + GridCacheProjectionEx cache = finishedJobMetaCache(); cache.invokeAsync(info.jobId(), new UpdatePhaseProcessor(incrCntrs, PHASE_COMPLETE)). listenAsync(failsLog); @@ -488,7 +489,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param jobId Job id. * @param c Closure of operation. */ - private void transform(GridHadoopJobId jobId, EntryProcessor c) { + private void transform(HadoopJobId jobId, EntryProcessor c) { jobMetaCache().invokeAsync(jobId, c).listenAsync(failsLog); } @@ -499,8 +500,8 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param reducers Reducers. * @param desc Process descriptor. */ - public void onExternalMappersInitialized(GridHadoopJobId jobId, Collection reducers, - GridHadoopProcessDescriptor desc) { + public void onExternalMappersInitialized(HadoopJobId jobId, Collection reducers, + HadoopProcessDescriptor desc) { transform(jobId, new InitializeReducersProcessor(null, reducers, desc)); } @@ -511,13 +512,13 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @return Collection of all input splits that should be processed. */ @SuppressWarnings("ConstantConditions") - private Map allSplits(GridHadoopMapReducePlan plan) { - Map res = new HashMap<>(); + private Map allSplits(HadoopMapReducePlan plan) { + Map res = new HashMap<>(); int taskNum = 0; for (UUID nodeId : plan.mapperNodeIds()) { - for (GridHadoopInputSplit split : plan.mappers(nodeId)) { + for (HadoopInputSplit split : plan.mappers(nodeId)) { if (res.put(split, taskNum++) != null) throw new IllegalStateException("Split duplicate."); } @@ -532,7 +533,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param plan Map-reduce plan. * @return Collection of reducers. */ - private Collection allReducers(GridHadoopMapReducePlan plan) { + private Collection allReducers(HadoopMapReducePlan plan) { Collection res = new HashSet<>(); for (int i = 0; i < plan.reducers(); i++) @@ -557,20 +558,20 @@ public class GridHadoopJobTracker extends GridHadoopComponent { // Iteration over all local entries is correct since system cache is REPLICATED. for (Object metaObj : jobMetaCache().values()) { - GridHadoopJobMetadata meta = (GridHadoopJobMetadata)metaObj; + HadoopJobMetadata meta = (HadoopJobMetadata)metaObj; - GridHadoopJobId jobId = meta.jobId(); + HadoopJobId jobId = meta.jobId(); - GridHadoopMapReducePlan plan = meta.mapReducePlan(); + HadoopMapReducePlan plan = meta.mapReducePlan(); - GridHadoopJobPhase phase = meta.phase(); + HadoopJobPhase phase = meta.phase(); try { if (checkSetup && phase == PHASE_SETUP && !activeJobs.containsKey(jobId)) { // Failover setup task. - GridHadoopJob job = job(jobId, meta.jobInfo()); + HadoopJob job = job(jobId, meta.jobInfo()); - Collection setupTask = setupTask(jobId); + Collection setupTask = setupTask(jobId); assert setupTask != null; @@ -579,12 +580,12 @@ public class GridHadoopJobTracker extends GridHadoopComponent { else if (phase == PHASE_MAP || phase == PHASE_REDUCE) { // Must check all nodes, even that are not event node ID due to // multiple node failure possibility. - Collection cancelSplits = null; + Collection cancelSplits = null; for (UUID nodeId : plan.mapperNodeIds()) { if (ctx.kernalContext().discovery().node(nodeId) == null) { // Node has left the grid. - Collection mappers = plan.mappers(nodeId); + Collection mappers = plan.mappers(nodeId); if (cancelSplits == null) cancelSplits = new HashSet<>(); @@ -626,13 +627,13 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @throws IgniteCheckedException If failed. */ private void processJobMetadataUpdates( - Iterable> updated) + Iterable> updated) throws IgniteCheckedException { UUID locNodeId = ctx.localNodeId(); - for (CacheEntryEvent entry : updated) { - GridHadoopJobId jobId = entry.getKey(); - GridHadoopJobMetadata meta = entry.getValue(); + for (CacheEntryEvent entry : updated) { + HadoopJobId jobId = entry.getKey(); + HadoopJobMetadata meta = entry.getValue(); if (meta == null || !ctx.isParticipating(meta)) continue; @@ -661,7 +662,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param jobId Job ID. * @param plan Map-reduce plan. */ - private void printPlan(GridHadoopJobId jobId, GridHadoopMapReducePlan plan) { + private void printPlan(HadoopJobId jobId, HadoopMapReducePlan plan) { log.info("Plan for " + jobId); SB b = new SB(); @@ -689,18 +690,18 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param locNodeId Local node ID. * @throws IgniteCheckedException If failed. */ - private void processJobMetaUpdate(GridHadoopJobId jobId, GridHadoopJobMetadata meta, UUID locNodeId) + private void processJobMetaUpdate(HadoopJobId jobId, HadoopJobMetadata meta, UUID locNodeId) throws IgniteCheckedException { JobLocalState state = activeJobs.get(jobId); - GridHadoopJob job = job(jobId, meta.jobInfo()); + HadoopJob job = job(jobId, meta.jobInfo()); - GridHadoopMapReducePlan plan = meta.mapReducePlan(); + HadoopMapReducePlan plan = meta.mapReducePlan(); switch (meta.phase()) { case PHASE_SETUP: { if (ctx.jobUpdateLeader()) { - Collection setupTask = setupTask(jobId); + Collection setupTask = setupTask(jobId); if (setupTask != null) ctx.taskExecutor().run(job, setupTask); @@ -711,7 +712,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { case PHASE_MAP: { // Check if we should initiate new task on local node. - Collection tasks = mapperTasks(plan.mappers(locNodeId), meta); + Collection tasks = mapperTasks(plan.mappers(locNodeId), meta); if (tasks != null) ctx.taskExecutor().run(job, tasks); @@ -721,7 +722,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { case PHASE_REDUCE: { if (meta.pendingReducers().isEmpty() && ctx.jobUpdateLeader()) { - GridHadoopTaskInfo info = new GridHadoopTaskInfo(COMMIT, jobId, 0, 0, null); + HadoopTaskInfo info = new HadoopTaskInfo(COMMIT, jobId, 0, 0, null); if (log.isDebugEnabled()) log.debug("Submitting COMMIT task for execution [locNodeId=" + locNodeId + @@ -732,7 +733,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { break; } - Collection tasks = reducerTasks(plan.reducers(locNodeId), job); + Collection tasks = reducerTasks(plan.reducers(locNodeId), job); if (tasks != null) ctx.taskExecutor().run(job, tasks); @@ -756,7 +757,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { // Prevent running multiple abort tasks. if (state.onAborted()) { - GridHadoopTaskInfo info = new GridHadoopTaskInfo(ABORT, jobId, 0, 0, null); + HadoopTaskInfo info = new HadoopTaskInfo(ABORT, jobId, 0, 0, null); if (log.isDebugEnabled()) log.debug("Submitting ABORT task for execution [locNodeId=" + locNodeId + @@ -770,13 +771,13 @@ public class GridHadoopJobTracker extends GridHadoopComponent { } else { // Check if there are unscheduled mappers or reducers. - Collection cancelMappers = new ArrayList<>(); + Collection cancelMappers = new ArrayList<>(); Collection cancelReducers = new ArrayList<>(); - Collection mappers = plan.mappers(ctx.localNodeId()); + Collection mappers = plan.mappers(ctx.localNodeId()); if (mappers != null) { - for (GridHadoopInputSplit b : mappers) { + for (HadoopInputSplit b : mappers) { if (state == null || !state.mapperScheduled(b)) cancelMappers.add(b); } @@ -811,7 +812,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { ctx.shuffle().jobFinished(jobId); } - GridFutureAdapter finishFut = activeFinishFuts.remove(jobId); + GridFutureAdapter finishFut = activeFinishFuts.remove(jobId); if (finishFut != null) { if (log.isDebugEnabled()) @@ -831,14 +832,14 @@ public class GridHadoopJobTracker extends GridHadoopComponent { ClassLoader ldr = job.getClass().getClassLoader(); try { - String statWriterClsName = job.info().property(GridHadoopUtils.JOB_COUNTER_WRITER_PROPERTY); + String statWriterClsName = job.info().property(HadoopUtils.JOB_COUNTER_WRITER_PROPERTY); if (statWriterClsName != null) { Class cls = ldr.loadClass(statWriterClsName); - GridHadoopCounterWriter writer = (GridHadoopCounterWriter)cls.newInstance(); + HadoopCounterWriter writer = (HadoopCounterWriter)cls.newInstance(); - GridHadoopCounters cntrs = meta.counters(); + HadoopCounters cntrs = meta.counters(); writer.write(job.info(), jobId, cntrs); } @@ -862,13 +863,13 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param jobId Job ID. * @return Setup task wrapped in collection. */ - @Nullable private Collection setupTask(GridHadoopJobId jobId) { + @Nullable private Collection setupTask(HadoopJobId jobId) { if (activeJobs.containsKey(jobId)) return null; else { initState(jobId); - return Collections.singleton(new GridHadoopTaskInfo(SETUP, jobId, 0, 0, null)); + return Collections.singleton(new HadoopTaskInfo(SETUP, jobId, 0, 0, null)); } } @@ -879,25 +880,25 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param meta Job metadata. * @return Collection of created task infos or {@code null} if no mapper tasks scheduled for local node. */ - private Collection mapperTasks(Iterable mappers, GridHadoopJobMetadata meta) { + private Collection mapperTasks(Iterable mappers, HadoopJobMetadata meta) { UUID locNodeId = ctx.localNodeId(); - GridHadoopJobId jobId = meta.jobId(); + HadoopJobId jobId = meta.jobId(); JobLocalState state = activeJobs.get(jobId); - Collection tasks = null; + Collection tasks = null; if (mappers != null) { if (state == null) state = initState(jobId); - for (GridHadoopInputSplit split : mappers) { + for (HadoopInputSplit split : mappers) { if (state.addMapper(split)) { if (log.isDebugEnabled()) log.debug("Submitting MAP task for execution [locNodeId=" + locNodeId + ", split=" + split + ']'); - GridHadoopTaskInfo taskInfo = new GridHadoopTaskInfo(MAP, jobId, meta.taskNumber(split), 0, split); + HadoopTaskInfo taskInfo = new HadoopTaskInfo(MAP, jobId, meta.taskNumber(split), 0, split); if (tasks == null) tasks = new ArrayList<>(); @@ -917,13 +918,13 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param job Job instance. * @return Collection of task infos. */ - private Collection reducerTasks(int[] reducers, GridHadoopJob job) { + private Collection reducerTasks(int[] reducers, HadoopJob job) { UUID locNodeId = ctx.localNodeId(); - GridHadoopJobId jobId = job.id(); + HadoopJobId jobId = job.id(); JobLocalState state = activeJobs.get(jobId); - Collection tasks = null; + Collection tasks = null; if (reducers != null) { if (state == null) @@ -935,7 +936,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { log.debug("Submitting REDUCE task for execution [locNodeId=" + locNodeId + ", rdc=" + rdc + ']'); - GridHadoopTaskInfo taskInfo = new GridHadoopTaskInfo(REDUCE, jobId, rdc, 0, null); + HadoopTaskInfo taskInfo = new HadoopTaskInfo(REDUCE, jobId, rdc, 0, null); if (tasks == null) tasks = new ArrayList<>(); @@ -954,7 +955,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param jobId Job ID. * @return Local state. */ - private JobLocalState initState(GridHadoopJobId jobId) { + private JobLocalState initState(HadoopJobId jobId) { return F.addIfAbsent(activeJobs, jobId, new JobLocalState()); } @@ -966,19 +967,19 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @return Job. * @throws IgniteCheckedException If failed. */ - @Nullable public GridHadoopJob job(GridHadoopJobId jobId, @Nullable GridHadoopJobInfo jobInfo) throws IgniteCheckedException { - GridFutureAdapterEx fut = jobs.get(jobId); + @Nullable public HadoopJob job(HadoopJobId jobId, @Nullable HadoopJobInfo jobInfo) throws IgniteCheckedException { + GridFutureAdapterEx fut = jobs.get(jobId); - if (fut != null || (fut = jobs.putIfAbsent(jobId, new GridFutureAdapterEx())) != null) + if (fut != null || (fut = jobs.putIfAbsent(jobId, new GridFutureAdapterEx())) != null) return fut.get(); fut = jobs.get(jobId); - GridHadoopJob job = null; + HadoopJob job = null; try { if (jobInfo == null) { - GridHadoopJobMetadata meta = jobMetaCache().get(jobId); + HadoopJobMetadata meta = jobMetaCache().get(jobId); if (meta == null) throw new IgniteCheckedException("Failed to find job metadata for ID: " + jobId); @@ -1019,15 +1020,15 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @return {@code True} if job was killed. * @throws IgniteCheckedException If failed. */ - public boolean killJob(GridHadoopJobId jobId) throws IgniteCheckedException { + public boolean killJob(HadoopJobId jobId) throws IgniteCheckedException { if (!busyLock.tryReadLock()) return false; // Grid is stopping. try { - GridHadoopJobMetadata meta = jobMetaCache().get(jobId); + HadoopJobMetadata meta = jobMetaCache().get(jobId); if (meta != null && meta.phase() != PHASE_COMPLETE && meta.phase() != PHASE_CANCELLING) { - GridHadoopTaskCancelledException err = new GridHadoopTaskCancelledException("Job cancelled."); + HadoopTaskCancelledException err = new HadoopTaskCancelledException("Job cancelled."); jobMetaCache().invoke(jobId, new CancelJobProcessor(null, err)); } @@ -1043,7 +1044,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { fut.get(); } catch (Throwable e) { - if (e.getCause() instanceof GridHadoopTaskCancelledException) + if (e.getCause() instanceof HadoopTaskCancelledException) return true; } } @@ -1058,12 +1059,12 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @return Job counters or {@code null} if job cannot be found. * @throws IgniteCheckedException If failed. */ - @Nullable public GridHadoopCounters jobCounters(GridHadoopJobId jobId) throws IgniteCheckedException { + @Nullable public HadoopCounters jobCounters(HadoopJobId jobId) throws IgniteCheckedException { if (!busyLock.tryReadLock()) return null; try { - final GridHadoopJobMetadata meta = jobMetaCache().get(jobId); + final HadoopJobMetadata meta = jobMetaCache().get(jobId); return meta != null ? meta.counters() : null; } @@ -1103,7 +1104,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { */ private class JobLocalState { /** Mappers. */ - private final Collection currMappers = new HashSet<>(); + private final Collection currMappers = new HashSet<>(); /** Reducers. */ private final Collection currReducers = new HashSet<>(); @@ -1121,7 +1122,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param mapSplit Map split to add. * @return {@code True} if mapper was added. */ - private boolean addMapper(GridHadoopInputSplit mapSplit) { + private boolean addMapper(HadoopInputSplit mapSplit) { return currMappers.add(mapSplit); } @@ -1139,7 +1140,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param mapSplit Map split to check. * @return {@code True} if mapper was scheduled. */ - public boolean mapperScheduled(GridHadoopInputSplit mapSplit) { + public boolean mapperScheduled(HadoopInputSplit mapSplit) { return currMappers.contains(mapSplit); } @@ -1158,8 +1159,8 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param status Task status. * @param prev Previous closure. */ - private void onSetupFinished(final GridHadoopTaskInfo taskInfo, GridHadoopTaskStatus status, StackedProcessor prev) { - final GridHadoopJobId jobId = taskInfo.jobId(); + private void onSetupFinished(final HadoopTaskInfo taskInfo, HadoopTaskStatus status, StackedProcessor prev) { + final HadoopJobId jobId = taskInfo.jobId(); if (status.state() == FAILED || status.state() == CRASHED) transform(jobId, new CancelJobProcessor(prev, status.failCause())); @@ -1172,9 +1173,9 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param status Task status. * @param prev Previous closure. */ - private void onMapFinished(final GridHadoopTaskInfo taskInfo, GridHadoopTaskStatus status, + private void onMapFinished(final HadoopTaskInfo taskInfo, HadoopTaskStatus status, final StackedProcessor prev) { - final GridHadoopJobId jobId = taskInfo.jobId(); + final HadoopJobId jobId = taskInfo.jobId(); boolean lastMapperFinished = completedMappersCnt.incrementAndGet() == currMappers.size(); @@ -1213,8 +1214,8 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param status Task status. * @param prev Previous closure. */ - private void onReduceFinished(GridHadoopTaskInfo taskInfo, GridHadoopTaskStatus status, StackedProcessor prev) { - GridHadoopJobId jobId = taskInfo.jobId(); + private void onReduceFinished(HadoopTaskInfo taskInfo, HadoopTaskStatus status, StackedProcessor prev) { + HadoopJobId jobId = taskInfo.jobId(); if (status.state() == FAILED || status.state() == CRASHED) // Fail the whole job. transform(jobId, new RemoveReducerProcessor(prev, taskInfo.taskNumber(), status.failCause())); @@ -1227,9 +1228,9 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param status Task status. * @param prev Previous closure. */ - private void onCombineFinished(GridHadoopTaskInfo taskInfo, GridHadoopTaskStatus status, + private void onCombineFinished(HadoopTaskInfo taskInfo, HadoopTaskStatus status, final StackedProcessor prev) { - final GridHadoopJobId jobId = taskInfo.jobId(); + final HadoopJobId jobId = taskInfo.jobId(); if (status.state() == FAILED || status.state() == CRASHED) // Fail the whole job. @@ -1289,20 +1290,20 @@ public class GridHadoopJobTracker extends GridHadoopComponent { private static final long serialVersionUID = 0L; /** Phase to update. */ - private final GridHadoopJobPhase phase; + private final HadoopJobPhase phase; /** * @param prev Previous closure. * @param phase Phase to update. */ - private UpdatePhaseProcessor(@Nullable StackedProcessor prev, GridHadoopJobPhase phase) { + private UpdatePhaseProcessor(@Nullable StackedProcessor prev, HadoopJobPhase phase) { super(prev); this.phase = phase; } /** {@inheritDoc} */ - @Override protected void update(GridHadoopJobMetadata meta, GridHadoopJobMetadata cp) { + @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) { cp.phase(phase); } } @@ -1315,7 +1316,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { private static final long serialVersionUID = 0L; /** Mapper split to remove. */ - private final Collection splits; + private final Collection splits; /** Error. */ private final Throwable err; @@ -1325,7 +1326,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param split Mapper split to remove. * @param err Error. */ - private RemoveMappersProcessor(@Nullable StackedProcessor prev, GridHadoopInputSplit split, Throwable err) { + private RemoveMappersProcessor(@Nullable StackedProcessor prev, HadoopInputSplit split, Throwable err) { this(prev, Collections.singletonList(split), err); } @@ -1334,7 +1335,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param splits Mapper splits to remove. * @param err Error. */ - private RemoveMappersProcessor(@Nullable StackedProcessor prev, Collection splits, + private RemoveMappersProcessor(@Nullable StackedProcessor prev, Collection splits, Throwable err) { super(prev); @@ -1343,10 +1344,10 @@ public class GridHadoopJobTracker extends GridHadoopComponent { } /** {@inheritDoc} */ - @Override protected void update(GridHadoopJobMetadata meta, GridHadoopJobMetadata cp) { - Map splitsCp = new HashMap<>(cp.pendingSplits()); + @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) { + Map splitsCp = new HashMap<>(cp.pendingSplits()); - for (GridHadoopInputSplit s : splits) + for (HadoopInputSplit s : splits) splitsCp.remove(s); cp.pendingSplits(splitsCp); @@ -1400,7 +1401,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { } /** {@inheritDoc} */ - @Override protected void update(GridHadoopJobMetadata meta, GridHadoopJobMetadata cp) { + @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) { Collection rdcCp = new HashSet<>(cp.pendingReducers()); rdcCp.remove(rdc); @@ -1425,7 +1426,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { private final Collection rdc; /** Process descriptor for reducers. */ - private final GridHadoopProcessDescriptor desc; + private final HadoopProcessDescriptor desc; /** * @param prev Previous closure. @@ -1434,7 +1435,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { */ private InitializeReducersProcessor(@Nullable StackedProcessor prev, Collection rdc, - GridHadoopProcessDescriptor desc) { + HadoopProcessDescriptor desc) { super(prev); assert !F.isEmpty(rdc); @@ -1445,11 +1446,11 @@ public class GridHadoopJobTracker extends GridHadoopComponent { } /** {@inheritDoc} */ - @Override protected void update(GridHadoopJobMetadata meta, GridHadoopJobMetadata cp) { - Map oldMap = meta.reducersAddresses(); + @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) { + Map oldMap = meta.reducersAddresses(); - Map rdcMap = oldMap == null ? - new HashMap() : new HashMap<>(oldMap); + Map rdcMap = oldMap == null ? + new HashMap() : new HashMap<>(oldMap); for (Integer r : rdc) rdcMap.put(r, desc); @@ -1466,7 +1467,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { private static final long serialVersionUID = 0L; /** Mapper split to remove. */ - private final Collection splits; + private final Collection splits; /** Reducers to remove. */ private final Collection rdc; @@ -1488,7 +1489,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param rdc Reducers to remove. */ private CancelJobProcessor(@Nullable StackedProcessor prev, - Collection splits, + Collection splits, Collection rdc) { this(prev, null, splits, rdc); } @@ -1501,7 +1502,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { */ private CancelJobProcessor(@Nullable StackedProcessor prev, Throwable err, - Collection splits, + Collection splits, Collection rdc) { super(prev); @@ -1511,7 +1512,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { } /** {@inheritDoc} */ - @Override protected void update(GridHadoopJobMetadata meta, GridHadoopJobMetadata cp) { + @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) { assert meta.phase() == PHASE_CANCELLING || err != null: "Invalid phase for cancel: " + meta; Collection rdcCp = new HashSet<>(cp.pendingReducers()); @@ -1521,10 +1522,10 @@ public class GridHadoopJobTracker extends GridHadoopComponent { cp.pendingReducers(rdcCp); - Map splitsCp = new HashMap<>(cp.pendingSplits()); + Map splitsCp = new HashMap<>(cp.pendingSplits()); if (splits != null) { - for (GridHadoopInputSplit s : splits) + for (HadoopInputSplit s : splits) splitsCp.remove(s); } @@ -1545,13 +1546,13 @@ public class GridHadoopJobTracker extends GridHadoopComponent { private static final long serialVersionUID = 0L; /** */ - private final GridHadoopCounters counters; + private final HadoopCounters counters; /** * @param prev Previous closure. * @param counters Task counters to add into job counters. */ - private IncrementCountersProcessor(@Nullable StackedProcessor prev, GridHadoopCounters counters) { + private IncrementCountersProcessor(@Nullable StackedProcessor prev, HadoopCounters counters) { super(prev); assert counters != null; @@ -1560,8 +1561,8 @@ public class GridHadoopJobTracker extends GridHadoopComponent { } /** {@inheritDoc} */ - @Override protected void update(GridHadoopJobMetadata meta, GridHadoopJobMetadata cp) { - GridHadoopCounters cntrs = new GridHadoopCountersImpl(cp.counters()); + @Override protected void update(HadoopJobMetadata meta, HadoopJobMetadata cp) { + HadoopCounters cntrs = new HadoopCountersImpl(cp.counters()); cntrs.merge(counters); @@ -1573,7 +1574,7 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * Abstract stacked closure. */ private abstract static class StackedProcessor implements - EntryProcessor, Serializable { + EntryProcessor, Serializable { /** */ private static final long serialVersionUID = 0L; @@ -1588,8 +1589,8 @@ public class GridHadoopJobTracker extends GridHadoopComponent { } /** {@inheritDoc} */ - @Override public Void process(MutableEntry e, Object... args) { - GridHadoopJobMetadata val = apply(e.getValue()); + @Override public Void process(MutableEntry e, Object... args) { + HadoopJobMetadata val = apply(e.getValue()); if (val != null) e.setValue(val); @@ -1603,11 +1604,11 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param meta Old value. * @return New value. */ - private GridHadoopJobMetadata apply(GridHadoopJobMetadata meta) { + private HadoopJobMetadata apply(HadoopJobMetadata meta) { if (meta == null) return null; - GridHadoopJobMetadata cp = prev != null ? prev.apply(meta) : new GridHadoopJobMetadata(meta); + HadoopJobMetadata cp = prev != null ? prev.apply(meta) : new HadoopJobMetadata(meta); update(meta, cp); @@ -1620,6 +1621,6 @@ public class GridHadoopJobTracker extends GridHadoopComponent { * @param meta Initial job metadata. * @param cp Copy. */ - protected abstract void update(GridHadoopJobMetadata meta, GridHadoopJobMetadata cp); + protected abstract void update(HadoopJobMetadata meta, HadoopJobMetadata cp); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/message/GridHadoopMessage.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/message/HadoopMessage.java similarity index 94% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/message/GridHadoopMessage.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/message/HadoopMessage.java index 1670a8a..cab6138 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/message/GridHadoopMessage.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/message/HadoopMessage.java @@ -22,6 +22,6 @@ import java.io.*; /** * Marker interface for all hadoop messages. */ -public interface GridHadoopMessage extends Externalizable { +public interface HadoopMessage extends Externalizable { // No-op. } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/GridHadoopDefaultMapReducePlan.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/HadoopDefaultMapReducePlan.java similarity index 87% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/GridHadoopDefaultMapReducePlan.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/HadoopDefaultMapReducePlan.java index 7988403..1413612 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/GridHadoopDefaultMapReducePlan.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/planner/HadoopDefaultMapReducePlan.java @@ -25,12 +25,12 @@ import java.util.*; /** * Map-reduce plan. */ -public class GridHadoopDefaultMapReducePlan implements GridHadoopMapReducePlan { +public class HadoopDefaultMapReducePlan implements HadoopMapReducePlan { /** */ private static final long serialVersionUID = 0L; /** Mappers map. */ - private Map> mappers; + private Map> mappers; /** Reducers map. */ private Map reducers; @@ -45,13 +45,13 @@ public class GridHadoopDefaultMapReducePlan implements GridHadoopMapReducePlan { * @param mappers Mappers map. * @param reducers Reducers map. */ - public GridHadoopDefaultMapReducePlan(Map> mappers, + public HadoopDefaultMapReducePlan(Map> mappers, Map reducers) { this.mappers = mappers; this.reducers = reducers; if (mappers != null) { - for (Collection splits : mappers.values()) + for (Collection splits : mappers.values()) mappersCnt += splits.size(); } @@ -86,7 +86,7 @@ public class GridHadoopDefaultMapReducePlan implements GridHadoopMapReducePlan { } /** {@inheritDoc} */ - @Override @Nullable public Collection mappers(UUID nodeId) { + @Override @Nullable public Collection mappers(UUID nodeId) { return mappers.get(nodeId); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocol.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopClientProtocol.java similarity index 83% rename from modules/hadoop/src/main/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocol.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopClientProtocol.java index bd31951..b454760 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocol.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopClientProtocol.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.ignite.client.hadoop; +package org.apache.ignite.internal.processors.hadoop.proto; import org.apache.hadoop.conf.*; import org.apache.hadoop.fs.FileSystem; @@ -31,20 +31,19 @@ import org.apache.hadoop.security.*; import org.apache.hadoop.security.authorize.*; import org.apache.hadoop.security.token.*; import org.apache.ignite.*; -import org.apache.ignite.client.hadoop.counter.*; import org.apache.ignite.internal.client.*; import org.apache.ignite.internal.processors.hadoop.*; -import org.apache.ignite.internal.processors.hadoop.proto.*; +import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters; import org.apache.ignite.internal.util.typedef.internal.*; import java.io.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.*; /** * Hadoop client protocol. */ -public class GridHadoopClientProtocol implements ClientProtocol { +public class HadoopClientProtocol implements ClientProtocol { /** Ignite framework name property. */ public static final String FRAMEWORK_NAME = "ignite"; @@ -64,7 +63,7 @@ public class GridHadoopClientProtocol implements ClientProtocol { private long lastVer = -1; /** Last received status. */ - private GridHadoopJobStatus lastStatus; + private HadoopJobStatus lastStatus; /** * Constructor. @@ -72,7 +71,7 @@ public class GridHadoopClientProtocol implements ClientProtocol { * @param conf Configuration. * @param cli Ignite client. */ - GridHadoopClientProtocol(Configuration conf, GridClient cli) { + public HadoopClientProtocol(Configuration conf, GridClient cli) { assert cli != null; this.conf = conf; @@ -84,7 +83,7 @@ public class GridHadoopClientProtocol implements ClientProtocol { try { conf.setLong(REQ_NEW_JOBID_TS_PROPERTY, U.currentTimeMillis()); - GridHadoopJobId jobID = cli.compute().execute(GridHadoopProtocolNextTaskIdTask.class.getName(), null); + HadoopJobId jobID = cli.compute().execute(HadoopProtocolNextTaskIdTask.class.getName(), null); conf.setLong(RESPONSE_NEW_JOBID_TS_PROPERTY, U.currentTimeMillis()); @@ -101,8 +100,8 @@ public class GridHadoopClientProtocol implements ClientProtocol { try { conf.setLong(JOB_SUBMISSION_START_TS_PROPERTY, U.currentTimeMillis()); - GridHadoopJobStatus status = cli.compute().execute(GridHadoopProtocolSubmitJobTask.class.getName(), - new GridHadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId(), createJobInfo(conf))); + HadoopJobStatus status = cli.compute().execute(HadoopProtocolSubmitJobTask.class.getName(), + new HadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId(), createJobInfo(conf))); if (status == null) throw new IOException("Failed to submit job (null status obtained): " + jobId); @@ -137,8 +136,8 @@ public class GridHadoopClientProtocol implements ClientProtocol { /** {@inheritDoc} */ @Override public void killJob(JobID jobId) throws IOException, InterruptedException { try { - cli.compute().execute(GridHadoopProtocolKillJobTask.class.getName(), - new GridHadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId())); + cli.compute().execute(HadoopProtocolKillJobTask.class.getName(), + new HadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId())); } catch (GridClientException e) { throw new IOException("Failed to kill job: " + jobId, e); @@ -159,13 +158,13 @@ public class GridHadoopClientProtocol implements ClientProtocol { /** {@inheritDoc} */ @Override public JobStatus getJobStatus(JobID jobId) throws IOException, InterruptedException { try { - Long delay = conf.getLong(GridHadoopJobProperty.JOB_STATUS_POLL_DELAY.propertyName(), -1); + Long delay = conf.getLong(HadoopJobProperty.JOB_STATUS_POLL_DELAY.propertyName(), -1); - GridHadoopProtocolTaskArguments args = delay >= 0 ? - new GridHadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId(), delay) : - new GridHadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId()); + HadoopProtocolTaskArguments args = delay >= 0 ? + new HadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId(), delay) : + new HadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId()); - GridHadoopJobStatus status = cli.compute().execute(GridHadoopProtocolJobStatusTask.class.getName(), args); + HadoopJobStatus status = cli.compute().execute(HadoopProtocolJobStatusTask.class.getName(), args); if (status == null) throw new IOException("Job tracker doesn't have any information about the job: " + jobId); @@ -180,13 +179,13 @@ public class GridHadoopClientProtocol implements ClientProtocol { /** {@inheritDoc} */ @Override public Counters getJobCounters(JobID jobId) throws IOException, InterruptedException { try { - final GridHadoopCounters counters = cli.compute().execute(GridHadoopProtocolJobCountersTask.class.getName(), - new GridHadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId())); + final HadoopCounters counters = cli.compute().execute(HadoopProtocolJobCountersTask.class.getName(), + new HadoopProtocolTaskArguments(jobId.getJtIdentifier(), jobId.getId())); if (counters == null) throw new IOException("Job tracker doesn't have any information about the job: " + jobId); - return new GridHadoopClientCounters(counters); + return new HadoopMapReduceCounters(counters); } catch (GridClientException e) { throw new IOException("Failed to get job counters: " + jobId, e); @@ -240,7 +239,7 @@ public class GridHadoopClientProtocol implements ClientProtocol { @Override public String getStagingAreaDir() throws IOException, InterruptedException { String usr = UserGroupInformation.getCurrentUser().getShortUserName(); - return GridHadoopUtils.stagingAreaDir(conf, usr).toString(); + return HadoopUtils.stagingAreaDir(conf, usr).toString(); } /** {@inheritDoc} */ @@ -314,9 +313,9 @@ public class GridHadoopClientProtocol implements ClientProtocol { * @param status Ignite status. * @return Hadoop status. */ - private JobStatus processStatus(GridHadoopJobStatus status) { + private JobStatus processStatus(HadoopJobStatus status) { // IMPORTANT! This method will only work in single-threaded environment. It is valid at the moment because - // GridHadoopClientProtocolProvider creates new instance of this class for every new job and Job class + // IgniteHadoopClientProtocolProvider creates new instance of this class for every new job and Job class // serializes invocations of submitJob() and getJobStatus() methods. However, if any of these conditions will // change in future and either protocol will serve statuses for several jobs or status update will not be // serialized anymore, then we have to fallback to concurrent approach (e.g. using ConcurrentHashMap). @@ -329,6 +328,6 @@ public class GridHadoopClientProtocol implements ClientProtocol { else assert lastStatus != null; - return GridHadoopUtils.status(lastStatus, conf); + return HadoopUtils.status(lastStatus, conf); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolJobCountersTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobCountersTask.java similarity index 77% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolJobCountersTask.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobCountersTask.java index 37073d9..ebdda9f 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolJobCountersTask.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobCountersTask.java @@ -20,19 +20,20 @@ package org.apache.ignite.internal.processors.hadoop.proto; import org.apache.ignite.*; import org.apache.ignite.compute.*; import org.apache.ignite.internal.processors.hadoop.*; +import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters; import java.util.*; /** * Task to get job counters. */ -public class GridHadoopProtocolJobCountersTask extends GridHadoopProtocolTaskAdapter { +public class HadoopProtocolJobCountersTask extends HadoopProtocolTaskAdapter { /** */ private static final long serialVersionUID = 0L; /** {@inheritDoc} */ - @Override public GridHadoopCounters run(ComputeJobContext jobCtx, GridHadoop hadoop, - GridHadoopProtocolTaskArguments args) throws IgniteCheckedException { + @Override public HadoopCounters run(ComputeJobContext jobCtx, Hadoop hadoop, + HadoopProtocolTaskArguments args) throws IgniteCheckedException { UUID nodeId = UUID.fromString(args.get(0)); Integer id = args.get(1); @@ -40,6 +41,6 @@ public class GridHadoopProtocolJobCountersTask extends GridHadoopProtocolTaskAda assert nodeId != null; assert id != null; - return hadoop.counters(new GridHadoopJobId(nodeId, id)); + return hadoop.counters(new HadoopJobId(nodeId, id)); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolJobStatusTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobStatusTask.java similarity index 87% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolJobStatusTask.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobStatusTask.java index de4f89c..1734562 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolJobStatusTask.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolJobStatusTask.java @@ -29,7 +29,7 @@ import java.util.*; /** * Job status task. */ -public class GridHadoopProtocolJobStatusTask extends GridHadoopProtocolTaskAdapter { +public class HadoopProtocolJobStatusTask extends HadoopProtocolTaskAdapter { /** */ private static final long serialVersionUID = 0L; @@ -40,8 +40,8 @@ public class GridHadoopProtocolJobStatusTask extends GridHadoopProtocolTaskAdapt private static final String ATTR_HELD = "held"; /** {@inheritDoc} */ - @Override public GridHadoopJobStatus run(final ComputeJobContext jobCtx, GridHadoop hadoop, - GridHadoopProtocolTaskArguments args) throws IgniteCheckedException { + @Override public HadoopJobStatus run(final ComputeJobContext jobCtx, Hadoop hadoop, + HadoopProtocolTaskArguments args) throws IgniteCheckedException { UUID nodeId = UUID.fromString(args.get(0)); Integer id = args.get(1); Long pollDelay = args.get(2); @@ -49,7 +49,7 @@ public class GridHadoopProtocolJobStatusTask extends GridHadoopProtocolTaskAdapt assert nodeId != null; assert id != null; - GridHadoopJobId jobId = new GridHadoopJobId(nodeId, id); + HadoopJobId jobId = new HadoopJobId(nodeId, id); if (pollDelay == null) pollDelay = DFLT_POLL_DELAY; diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolKillJobTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolKillJobTask.java similarity index 80% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolKillJobTask.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolKillJobTask.java index 384bc23..d173612 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolKillJobTask.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolKillJobTask.java @@ -26,20 +26,20 @@ import java.util.*; /** * Kill job task. */ -public class GridHadoopProtocolKillJobTask extends GridHadoopProtocolTaskAdapter { +public class HadoopProtocolKillJobTask extends HadoopProtocolTaskAdapter { /** */ private static final long serialVersionUID = 0L; /** {@inheritDoc} */ - @Override public Boolean run(ComputeJobContext jobCtx, GridHadoop hadoop, - GridHadoopProtocolTaskArguments args) throws IgniteCheckedException { + @Override public Boolean run(ComputeJobContext jobCtx, Hadoop hadoop, + HadoopProtocolTaskArguments args) throws IgniteCheckedException { UUID nodeId = UUID.fromString(args.get(0)); Integer id = args.get(1); assert nodeId != null; assert id != null; - GridHadoopJobId jobId = new GridHadoopJobId(nodeId, id); + HadoopJobId jobId = new HadoopJobId(nodeId, id); return hadoop.kill(jobId); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolNextTaskIdTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolNextTaskIdTask.java similarity index 82% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolNextTaskIdTask.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolNextTaskIdTask.java index f76f3b6..2782530 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolNextTaskIdTask.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolNextTaskIdTask.java @@ -23,13 +23,13 @@ import org.apache.ignite.internal.processors.hadoop.*; /** * Task to get the next job ID. */ -public class GridHadoopProtocolNextTaskIdTask extends GridHadoopProtocolTaskAdapter { +public class HadoopProtocolNextTaskIdTask extends HadoopProtocolTaskAdapter { /** */ private static final long serialVersionUID = 0L; /** {@inheritDoc} */ - @Override public GridHadoopJobId run(ComputeJobContext jobCtx, GridHadoop hadoop, - GridHadoopProtocolTaskArguments args) { + @Override public HadoopJobId run(ComputeJobContext jobCtx, Hadoop hadoop, + HadoopProtocolTaskArguments args) { return hadoop.nextJobId(); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolSubmitJobTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolSubmitJobTask.java similarity index 68% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolSubmitJobTask.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolSubmitJobTask.java index c734acd..f65d9bb 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolSubmitJobTask.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolSubmitJobTask.java @@ -23,34 +23,34 @@ import org.apache.ignite.internal.processors.hadoop.*; import java.util.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopJobPhase.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopJobPhase.*; /** * Submit job task. */ -public class GridHadoopProtocolSubmitJobTask extends GridHadoopProtocolTaskAdapter { +public class HadoopProtocolSubmitJobTask extends HadoopProtocolTaskAdapter { /** */ private static final long serialVersionUID = 0L; /** {@inheritDoc} */ - @Override public GridHadoopJobStatus run(ComputeJobContext jobCtx, GridHadoop hadoop, - GridHadoopProtocolTaskArguments args) throws IgniteCheckedException { + @Override public HadoopJobStatus run(ComputeJobContext jobCtx, Hadoop hadoop, + HadoopProtocolTaskArguments args) throws IgniteCheckedException { UUID nodeId = UUID.fromString(args.get(0)); Integer id = args.get(1); - GridHadoopDefaultJobInfo info = args.get(2); + HadoopDefaultJobInfo info = args.get(2); assert nodeId != null; assert id != null; assert info != null; - GridHadoopJobId jobId = new GridHadoopJobId(nodeId, id); + HadoopJobId jobId = new HadoopJobId(nodeId, id); hadoop.submit(jobId, info); - GridHadoopJobStatus res = hadoop.status(jobId); + HadoopJobStatus res = hadoop.status(jobId); if (res == null) // Submission failed. - res = new GridHadoopJobStatus(jobId, info.jobName(), info.user(), 0, 0, 0, 0, PHASE_CANCELLING, true, 1); + res = new HadoopJobStatus(jobId, info.jobName(), info.user(), 0, 0, 0, 0, PHASE_CANCELLING, true, 1); return res; } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolTaskAdapter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskAdapter.java similarity index 88% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolTaskAdapter.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskAdapter.java index 086545c..f763ccc 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/GridHadoopProtocolTaskAdapter.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/proto/HadoopProtocolTaskAdapter.java @@ -32,10 +32,10 @@ import java.util.*; /** * Hadoop protocol task adapter. */ -public abstract class GridHadoopProtocolTaskAdapter implements ComputeTask { +public abstract class HadoopProtocolTaskAdapter implements ComputeTask { /** {@inheritDoc} */ @Nullable @Override public Map map(List subgrid, - @Nullable GridHadoopProtocolTaskArguments arg) { + @Nullable HadoopProtocolTaskArguments arg) { return Collections.singletonMap(new Job(arg), subgrid.get(0)); } @@ -72,14 +72,14 @@ public abstract class GridHadoopProtocolTaskAdapter implements ComputeTask implements ComputeTask> jobs = new ConcurrentHashMap<>(); + private final ConcurrentMap> jobs = new ConcurrentHashMap<>(); /** */ protected final GridUnsafeMemory mem = new GridUnsafeMemory(0); /** {@inheritDoc} */ - @Override public void start(GridHadoopContext ctx) throws IgniteCheckedException { + @Override public void start(HadoopContext ctx) throws IgniteCheckedException { super.start(ctx); ctx.kernalContext().io().addUserMessageListener(GridTopic.TOPIC_HADOOP, new IgniteBiPredicate() { @Override public boolean apply(UUID nodeId, Object msg) { - return onMessageReceived(nodeId, (GridHadoopMessage)msg); + return onMessageReceived(nodeId, (HadoopMessage)msg); } }); } @@ -60,7 +60,7 @@ public class GridHadoopShuffle extends GridHadoopComponent { * @param cancel If should cancel all ongoing activities. */ @Override public void stop(boolean cancel) { - for (GridHadoopShuffleJob job : jobs.values()) { + for (HadoopShuffleJob job : jobs.values()) { try { job.close(); } @@ -79,10 +79,10 @@ public class GridHadoopShuffle extends GridHadoopComponent { * @return Created shuffle job. * @throws IgniteCheckedException If job creation failed. */ - private GridHadoopShuffleJob newJob(GridHadoopJobId jobId) throws IgniteCheckedException { - GridHadoopMapReducePlan plan = ctx.jobTracker().plan(jobId); + private HadoopShuffleJob newJob(HadoopJobId jobId) throws IgniteCheckedException { + HadoopMapReducePlan plan = ctx.jobTracker().plan(jobId); - GridHadoopShuffleJob job = new GridHadoopShuffleJob<>(ctx.localNodeId(), log, + HadoopShuffleJob job = new HadoopShuffleJob<>(ctx.localNodeId(), log, ctx.jobTracker().job(jobId, null), mem, plan.reducers(), plan.reducers(ctx.localNodeId())); UUID[] rdcAddrs = new UUID[plan.reducers()]; @@ -117,13 +117,13 @@ public class GridHadoopShuffle extends GridHadoopComponent { * @param jobId Task info. * @return Shuffle job. */ - private GridHadoopShuffleJob job(GridHadoopJobId jobId) throws IgniteCheckedException { - GridHadoopShuffleJob res = jobs.get(jobId); + private HadoopShuffleJob job(HadoopJobId jobId) throws IgniteCheckedException { + HadoopShuffleJob res = jobs.get(jobId); if (res == null) { res = newJob(jobId); - GridHadoopShuffleJob old = jobs.putIfAbsent(jobId, res); + HadoopShuffleJob old = jobs.putIfAbsent(jobId, res); if (old != null) { res.close(); @@ -142,10 +142,10 @@ public class GridHadoopShuffle extends GridHadoopComponent { * * @param shuffleJob Job to start sending for. */ - private void startSending(GridHadoopShuffleJob shuffleJob) { + private void startSending(HadoopShuffleJob shuffleJob) { shuffleJob.startSending(ctx.kernalContext().gridName(), - new IgniteInClosure2X() { - @Override public void applyx(UUID dest, GridHadoopShuffleMessage msg) throws IgniteCheckedException { + new IgniteInClosure2X() { + @Override public void applyx(UUID dest, HadoopShuffleMessage msg) throws IgniteCheckedException { send0(dest, msg); } } @@ -159,9 +159,9 @@ public class GridHadoopShuffle extends GridHadoopComponent { * @param msg Received message. * @return {@code True}. */ - public boolean onMessageReceived(UUID src, GridHadoopMessage msg) { - if (msg instanceof GridHadoopShuffleMessage) { - GridHadoopShuffleMessage m = (GridHadoopShuffleMessage)msg; + public boolean onMessageReceived(UUID src, HadoopMessage msg) { + if (msg instanceof HadoopShuffleMessage) { + HadoopShuffleMessage m = (HadoopShuffleMessage)msg; try { job(m.jobId()).onShuffleMessage(m); @@ -172,14 +172,14 @@ public class GridHadoopShuffle extends GridHadoopComponent { try { // Reply with ack. - send0(src, new GridHadoopShuffleAck(m.id(), m.jobId())); + send0(src, new HadoopShuffleAck(m.id(), m.jobId())); } catch (IgniteCheckedException e) { U.error(log, "Failed to reply back to shuffle message sender [snd=" + src + ", msg=" + msg + ']', e); } } - else if (msg instanceof GridHadoopShuffleAck) { - GridHadoopShuffleAck m = (GridHadoopShuffleAck)msg; + else if (msg instanceof HadoopShuffleAck) { + HadoopShuffleAck m = (HadoopShuffleAck)msg; try { job(m.jobId()).onShuffleAck(m); @@ -199,7 +199,7 @@ public class GridHadoopShuffle extends GridHadoopComponent { * @param taskCtx Task info. * @return Output. */ - public GridHadoopTaskOutput output(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { + public HadoopTaskOutput output(HadoopTaskContext taskCtx) throws IgniteCheckedException { return job(taskCtx.taskInfo().jobId()).output(taskCtx); } @@ -207,15 +207,15 @@ public class GridHadoopShuffle extends GridHadoopComponent { * @param taskCtx Task info. * @return Input. */ - public GridHadoopTaskInput input(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { + public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException { return job(taskCtx.taskInfo().jobId()).input(taskCtx); } /** * @param jobId Job id. */ - public void jobFinished(GridHadoopJobId jobId) { - GridHadoopShuffleJob job = jobs.remove(jobId); + public void jobFinished(HadoopJobId jobId) { + HadoopShuffleJob job = jobs.remove(jobId); if (job != null) { try { @@ -233,8 +233,8 @@ public class GridHadoopShuffle extends GridHadoopComponent { * @param jobId Job ID. * @return Future. */ - public IgniteInternalFuture flush(GridHadoopJobId jobId) { - GridHadoopShuffleJob job = jobs.get(jobId); + public IgniteInternalFuture flush(HadoopJobId jobId) { + HadoopShuffleJob job = jobs.get(jobId); if (job == null) return new GridFinishedFutureEx<>(); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/GridHadoopShuffleAck.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleAck.java similarity index 86% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/GridHadoopShuffleAck.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleAck.java index a8a52a9..49cbd65 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/GridHadoopShuffleAck.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleAck.java @@ -27,7 +27,7 @@ import java.io.*; /** * Acknowledgement message. */ -public class GridHadoopShuffleAck implements GridHadoopMessage { +public class HadoopShuffleAck implements HadoopMessage { /** */ private static final long serialVersionUID = 0L; @@ -37,19 +37,19 @@ public class GridHadoopShuffleAck implements GridHadoopMessage { /** */ @GridToStringInclude - private GridHadoopJobId jobId; + private HadoopJobId jobId; /** * */ - public GridHadoopShuffleAck() { + public HadoopShuffleAck() { // No-op. } /** * @param msgId Message ID. */ - public GridHadoopShuffleAck(long msgId, GridHadoopJobId jobId) { + public HadoopShuffleAck(long msgId, HadoopJobId jobId) { assert jobId != null; this.msgId = msgId; @@ -66,7 +66,7 @@ public class GridHadoopShuffleAck implements GridHadoopMessage { /** * @return Job ID. */ - public GridHadoopJobId jobId() { + public HadoopJobId jobId() { return jobId; } @@ -78,7 +78,7 @@ public class GridHadoopShuffleAck implements GridHadoopMessage { /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { - jobId = new GridHadoopJobId(); + jobId = new HadoopJobId(); jobId.readExternal(in); msgId = in.readLong(); @@ -86,6 +86,6 @@ public class GridHadoopShuffleAck implements GridHadoopMessage { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopShuffleAck.class, this); + return S.toString(HadoopShuffleAck.class, this); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/GridHadoopShuffleJob.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleJob.java similarity index 79% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/GridHadoopShuffleJob.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleJob.java index 545c1b8..7ae52df 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/GridHadoopShuffleJob.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleJob.java @@ -36,18 +36,18 @@ import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopJobProperty.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.*; import static org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory.*; /** * Shuffle job. */ -public class GridHadoopShuffleJob implements AutoCloseable { +public class HadoopShuffleJob implements AutoCloseable { /** */ private static final int MSG_BUF_SIZE = 128 * 1024; /** */ - private final GridHadoopJob job; + private final HadoopJob job; /** */ private final GridUnsafeMemory mem; @@ -56,7 +56,7 @@ public class GridHadoopShuffleJob implements AutoCloseable { private final boolean needPartitioner; /** Collection of task contexts for each reduce task. */ - private final Map reducersCtx = new HashMap<>(); + private final Map reducersCtx = new HashMap<>(); /** Reducers addresses. */ private T[] reduceAddrs; @@ -65,16 +65,16 @@ public class GridHadoopShuffleJob implements AutoCloseable { private final T locReduceAddr; /** */ - private final GridHadoopShuffleMessage[] msgs; + private final HadoopShuffleMessage[] msgs; /** */ - private final AtomicReferenceArray maps; + private final AtomicReferenceArray maps; /** */ - private volatile IgniteInClosure2X io; + private volatile IgniteInClosure2X io; /** */ - protected ConcurrentMap>> sentMsgs = + protected ConcurrentMap>> sentMsgs = new ConcurrentHashMap<>(); /** */ @@ -98,16 +98,16 @@ public class GridHadoopShuffleJob implements AutoCloseable { * @param locReducers Reducers will work on current node. * @throws IgniteCheckedException If error. */ - public GridHadoopShuffleJob(T locReduceAddr, IgniteLogger log, GridHadoopJob job, GridUnsafeMemory mem, + public HadoopShuffleJob(T locReduceAddr, IgniteLogger log, HadoopJob job, GridUnsafeMemory mem, int totalReducerCnt, int[] locReducers) throws IgniteCheckedException { this.locReduceAddr = locReduceAddr; this.job = job; this.mem = mem; - this.log = log.getLogger(GridHadoopShuffleJob.class); + this.log = log.getLogger(HadoopShuffleJob.class); if (!F.isEmpty(locReducers)) { for (int rdc : locReducers) { - GridHadoopTaskInfo taskInfo = new GridHadoopTaskInfo(GridHadoopTaskType.REDUCE, job.id(), rdc, 0, null); + HadoopTaskInfo taskInfo = new HadoopTaskInfo(HadoopTaskType.REDUCE, job.id(), rdc, 0, null); reducersCtx.put(rdc, job.getTaskContext(taskInfo)); } @@ -116,7 +116,7 @@ public class GridHadoopShuffleJob implements AutoCloseable { needPartitioner = totalReducerCnt > 1; maps = new AtomicReferenceArray<>(totalReducerCnt); - msgs = new GridHadoopShuffleMessage[totalReducerCnt]; + msgs = new HadoopShuffleMessage[totalReducerCnt]; } /** @@ -145,7 +145,7 @@ public class GridHadoopShuffleJob implements AutoCloseable { * @param io IO Closure for sending messages. */ @SuppressWarnings("BusyWait") - public void startSending(String gridName, IgniteInClosure2X io) { + public void startSending(String gridName, IgniteInClosure2X io) { assert snd == null; assert io != null; @@ -178,13 +178,13 @@ public class GridHadoopShuffleJob implements AutoCloseable { * @param idx Index. * @return Map. */ - private GridHadoopMultimap getOrCreateMap(AtomicReferenceArray maps, int idx) { - GridHadoopMultimap map = maps.get(idx); + private HadoopMultimap getOrCreateMap(AtomicReferenceArray maps, int idx) { + HadoopMultimap map = maps.get(idx); if (map == null) { // Create new map. map = get(job.info(), SHUFFLE_REDUCER_NO_SORTING, false) ? - new GridHadoopConcurrentHashMultimap(job.info(), mem, get(job.info(), PARTITION_HASHMAP_SIZE, 8 * 1024)): - new GridHadoopSkipList(job.info(), mem); + new HadoopConcurrentHashMultimap(job.info(), mem, get(job.info(), PARTITION_HASHMAP_SIZE, 8 * 1024)): + new HadoopSkipList(job.info(), mem); if (!maps.compareAndSet(idx, null, map)) { map.close(); @@ -200,26 +200,26 @@ public class GridHadoopShuffleJob implements AutoCloseable { * @param msg Message. * @throws IgniteCheckedException Exception. */ - public void onShuffleMessage(GridHadoopShuffleMessage msg) throws IgniteCheckedException { + public void onShuffleMessage(HadoopShuffleMessage msg) throws IgniteCheckedException { assert msg.buffer() != null; assert msg.offset() > 0; - GridHadoopTaskContext taskCtx = reducersCtx.get(msg.reducer()); + HadoopTaskContext taskCtx = reducersCtx.get(msg.reducer()); - GridHadoopPerformanceCounter perfCntr = GridHadoopPerformanceCounter.getCounter(taskCtx.counters(), null); + HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(taskCtx.counters(), null); perfCntr.onShuffleMessage(msg.reducer(), U.currentTimeMillis()); - GridHadoopMultimap map = getOrCreateMap(maps, msg.reducer()); + HadoopMultimap map = getOrCreateMap(maps, msg.reducer()); // Add data from message to the map. - try (GridHadoopMultimap.Adder adder = map.startAdding(taskCtx)) { + try (HadoopMultimap.Adder adder = map.startAdding(taskCtx)) { final GridUnsafeDataInput dataInput = new GridUnsafeDataInput(); final UnsafeValue val = new UnsafeValue(msg.buffer()); - msg.visit(new GridHadoopShuffleMessage.Visitor() { + msg.visit(new HadoopShuffleMessage.Visitor() { /** */ - private GridHadoopMultimap.Key key; + private HadoopMultimap.Key key; @Override public void onKey(byte[] buf, int off, int len) throws IgniteCheckedException { dataInput.bytes(buf, off, off + len); @@ -241,8 +241,8 @@ public class GridHadoopShuffleJob implements AutoCloseable { * @param ack Shuffle ack. */ @SuppressWarnings("ConstantConditions") - public void onShuffleAck(GridHadoopShuffleAck ack) { - IgniteBiTuple> tup = sentMsgs.get(ack.id()); + public void onShuffleAck(HadoopShuffleAck ack) { + IgniteBiTuple> tup = sentMsgs.get(ack.id()); if (tup != null) tup.get2().onDone(); @@ -253,7 +253,7 @@ public class GridHadoopShuffleJob implements AutoCloseable { /** * Unsafe value. */ - private static class UnsafeValue implements GridHadoopMultimap.Value { + private static class UnsafeValue implements HadoopMultimap.Value { /** */ private final byte[] buf; @@ -288,17 +288,17 @@ public class GridHadoopShuffleJob implements AutoCloseable { */ private void collectUpdatesAndSend(boolean flush) throws IgniteCheckedException { for (int i = 0; i < maps.length(); i++) { - GridHadoopMultimap map = maps.get(i); + HadoopMultimap map = maps.get(i); if (map == null || locReduceAddr.equals(reduceAddrs[i])) continue; // Skip empty map and local node. if (msgs[i] == null) - msgs[i] = new GridHadoopShuffleMessage(job.id(), i, MSG_BUF_SIZE); + msgs[i] = new HadoopShuffleMessage(job.id(), i, MSG_BUF_SIZE); final int idx = i; - map.visit(false, new GridHadoopMultimap.Visitor() { + map.visit(false, new HadoopMultimap.Visitor() { /** */ private long keyPtr; @@ -317,7 +317,7 @@ public class GridHadoopShuffleJob implements AutoCloseable { } private boolean tryAdd(long valPtr, int valSize) { - GridHadoopShuffleMessage msg = msgs[idx]; + HadoopShuffleMessage msg = msgs[idx]; if (!keyAdded) { // Add key and value. int size = keySize + valSize; @@ -367,12 +367,12 @@ public class GridHadoopShuffleJob implements AutoCloseable { private void send(final int idx, int newBufMinSize) { final GridFutureAdapterEx fut = new GridFutureAdapterEx<>(); - GridHadoopShuffleMessage msg = msgs[idx]; + HadoopShuffleMessage msg = msgs[idx]; final long msgId = msg.id(); - IgniteBiTuple> old = sentMsgs.putIfAbsent(msgId, - new IgniteBiTuple>(msg, fut)); + IgniteBiTuple> old = sentMsgs.putIfAbsent(msgId, + new IgniteBiTuple>(msg, fut)); assert old == null; @@ -398,7 +398,7 @@ public class GridHadoopShuffleJob implements AutoCloseable { } }); - msgs[idx] = newBufMinSize == 0 ? null : new GridHadoopShuffleMessage(job.id(), idx, + msgs[idx] = newBufMinSize == 0 ? null : new HadoopShuffleMessage(job.id(), idx, Math.max(MSG_BUF_SIZE, newBufMinSize)); } @@ -421,9 +421,9 @@ public class GridHadoopShuffleJob implements AutoCloseable { /** * @param maps Maps. */ - private void close(AtomicReferenceArray maps) { + private void close(AtomicReferenceArray maps) { for (int i = 0; i < maps.length(); i++) { - GridHadoopMultimap map = maps.get(i); + HadoopMultimap map = maps.get(i); if (map != null) map.close(); @@ -471,7 +471,7 @@ public class GridHadoopShuffleJob implements AutoCloseable { GridCompoundFuture fut = new GridCompoundFuture<>(); - for (IgniteBiTuple> tup : sentMsgs.values()) + for (IgniteBiTuple> tup : sentMsgs.values()) fut.add(tup.get2()); fut.markInitialized(); @@ -487,7 +487,7 @@ public class GridHadoopShuffleJob implements AutoCloseable { * @return Output. * @throws IgniteCheckedException If failed. */ - public GridHadoopTaskOutput output(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { + public HadoopTaskOutput output(HadoopTaskContext taskCtx) throws IgniteCheckedException { switch (taskCtx.taskInfo().type()) { case MAP: assert !job.info().hasCombiner() : "The output creation is allowed if combiner has not been defined."; @@ -506,17 +506,17 @@ public class GridHadoopShuffleJob implements AutoCloseable { * @throws IgniteCheckedException If failed. */ @SuppressWarnings("unchecked") - public GridHadoopTaskInput input(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { + public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException { switch (taskCtx.taskInfo().type()) { case REDUCE: int reducer = taskCtx.taskInfo().taskNumber(); - GridHadoopMultimap m = maps.get(reducer); + HadoopMultimap m = maps.get(reducer); if (m != null) return m.input(taskCtx); - return new GridHadoopTaskInput() { // Empty input. + return new HadoopTaskInput() { // Empty input. @Override public boolean next() { return false; } @@ -542,21 +542,21 @@ public class GridHadoopShuffleJob implements AutoCloseable { /** * Partitioned output. */ - private class PartitionedOutput implements GridHadoopTaskOutput { + private class PartitionedOutput implements HadoopTaskOutput { /** */ - private final GridHadoopTaskOutput[] adders = new GridHadoopTaskOutput[maps.length()]; + private final HadoopTaskOutput[] adders = new HadoopTaskOutput[maps.length()]; /** */ - private GridHadoopPartitioner partitioner; + private HadoopPartitioner partitioner; /** */ - private final GridHadoopTaskContext taskCtx; + private final HadoopTaskContext taskCtx; /** * Constructor. * @param taskCtx Task context. */ - private PartitionedOutput(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { + private PartitionedOutput(HadoopTaskContext taskCtx) throws IgniteCheckedException { this.taskCtx = taskCtx; if (needPartitioner) @@ -574,7 +574,7 @@ public class GridHadoopShuffleJob implements AutoCloseable { throw new IgniteCheckedException("Invalid partition: " + part); } - GridHadoopTaskOutput out = adders[part]; + HadoopTaskOutput out = adders[part]; if (out == null) adders[part] = out = getOrCreateMap(maps, part).startAdding(taskCtx); @@ -584,7 +584,7 @@ public class GridHadoopShuffleJob implements AutoCloseable { /** {@inheritDoc} */ @Override public void close() throws IgniteCheckedException { - for (GridHadoopTaskOutput adder : adders) { + for (HadoopTaskOutput adder : adders) { if (adder != null) adder.close(); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/GridHadoopShuffleMessage.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleMessage.java similarity index 92% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/GridHadoopShuffleMessage.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleMessage.java index 24ebc0c..c350552a 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/GridHadoopShuffleMessage.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/HadoopShuffleMessage.java @@ -20,7 +20,6 @@ package org.apache.ignite.internal.processors.hadoop.shuffle; import org.apache.ignite.*; import org.apache.ignite.internal.processors.hadoop.*; import org.apache.ignite.internal.processors.hadoop.message.*; -import org.apache.ignite.internal.util.*; import org.apache.ignite.internal.util.tostring.*; import org.apache.ignite.internal.util.typedef.internal.*; @@ -32,7 +31,7 @@ import static org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory.*; /** * Shuffle message. */ -public class GridHadoopShuffleMessage implements GridHadoopMessage { +public class HadoopShuffleMessage implements HadoopMessage { /** */ private static final long serialVersionUID = 0L; @@ -51,7 +50,7 @@ public class GridHadoopShuffleMessage implements GridHadoopMessage { /** */ @GridToStringInclude - private GridHadoopJobId jobId; + private HadoopJobId jobId; /** */ @GridToStringInclude @@ -67,14 +66,14 @@ public class GridHadoopShuffleMessage implements GridHadoopMessage { /** * */ - public GridHadoopShuffleMessage() { + public HadoopShuffleMessage() { // No-op. } /** * @param size Size. */ - public GridHadoopShuffleMessage(GridHadoopJobId jobId, int reducer, int size) { + public HadoopShuffleMessage(HadoopJobId jobId, int reducer, int size) { assert jobId != null; buf = new byte[size]; @@ -95,7 +94,7 @@ public class GridHadoopShuffleMessage implements GridHadoopMessage { /** * @return Job ID. */ - public GridHadoopJobId jobId() { + public HadoopJobId jobId() { return jobId; } @@ -207,7 +206,7 @@ public class GridHadoopShuffleMessage implements GridHadoopMessage { /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { - jobId = new GridHadoopJobId(); + jobId = new HadoopJobId(); jobId.readExternal(in); msgId = in.readLong(); @@ -218,7 +217,7 @@ public class GridHadoopShuffleMessage implements GridHadoopMessage { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopShuffleMessage.class, this); + return S.toString(HadoopShuffleMessage.class, this); } /** diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopConcurrentHashMultimap.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimap.java similarity index 97% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopConcurrentHashMultimap.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimap.java index 32db722..65d9268 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopConcurrentHashMultimap.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimap.java @@ -32,7 +32,7 @@ import java.util.concurrent.atomic.*; /** * Multimap for map reduce intermediate results. */ -public class GridHadoopConcurrentHashMultimap extends GridHadoopHashMultimapBase { +public class HadoopConcurrentHashMultimap extends HadoopHashMultimapBase { /** */ private final AtomicReference state = new AtomicReference<>(State.READING_WRITING); @@ -56,7 +56,7 @@ public class GridHadoopConcurrentHashMultimap extends GridHadoopHashMultimapBase * @param mem Memory. * @param cap Initial capacity. */ - public GridHadoopConcurrentHashMultimap(GridHadoopJobInfo jobInfo, GridUnsafeMemory mem, int cap) { + public HadoopConcurrentHashMultimap(HadoopJobInfo jobInfo, GridUnsafeMemory mem, int cap) { super(jobInfo, mem); assert U.isPow2(cap); @@ -87,7 +87,7 @@ public class GridHadoopConcurrentHashMultimap extends GridHadoopHashMultimapBase * @return Adder object. * @param ctx Task context. */ - @Override public Adder startAdding(GridHadoopTaskContext ctx) throws IgniteCheckedException { + @Override public Adder startAdding(HadoopTaskContext ctx) throws IgniteCheckedException { if (inputs.get() != 0) throw new IllegalStateException("Active inputs."); @@ -162,7 +162,7 @@ public class GridHadoopConcurrentHashMultimap extends GridHadoopHashMultimapBase } /** {@inheritDoc} */ - @Override public GridHadoopTaskInput input(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { + @Override public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException { inputs.incrementAndGet(); if (!adders.isEmpty()) @@ -369,7 +369,7 @@ public class GridHadoopConcurrentHashMultimap extends GridHadoopHashMultimapBase * @param ctx Task context. * @throws IgniteCheckedException If failed. */ - private AdderImpl(GridHadoopTaskContext ctx) throws IgniteCheckedException { + private AdderImpl(HadoopTaskContext ctx) throws IgniteCheckedException { super(ctx); keyReader = new Reader(keySer); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopHashMultimap.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimap.java similarity index 92% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopHashMultimap.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimap.java index 2795b77..f524bdc 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopHashMultimap.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimap.java @@ -25,7 +25,7 @@ import org.apache.ignite.internal.util.typedef.internal.*; /** * Hash multimap. */ -public class GridHadoopHashMultimap extends GridHadoopHashMultimapBase { +public class HadoopHashMultimap extends HadoopHashMultimapBase { /** */ private long[] tbl; @@ -37,7 +37,7 @@ public class GridHadoopHashMultimap extends GridHadoopHashMultimapBase { * @param mem Memory. * @param cap Initial capacity. */ - public GridHadoopHashMultimap(GridHadoopJobInfo jobInfo, GridUnsafeMemory mem, int cap) { + public HadoopHashMultimap(HadoopJobInfo jobInfo, GridUnsafeMemory mem, int cap) { super(jobInfo, mem); assert U.isPow2(cap) : cap; @@ -46,7 +46,7 @@ public class GridHadoopHashMultimap extends GridHadoopHashMultimapBase { } /** {@inheritDoc} */ - @Override public Adder startAdding(GridHadoopTaskContext ctx) throws IgniteCheckedException { + @Override public Adder startAdding(HadoopTaskContext ctx) throws IgniteCheckedException { return new AdderImpl(ctx); } @@ -103,7 +103,7 @@ public class GridHadoopHashMultimap extends GridHadoopHashMultimapBase { * @param ctx Task context. * @throws IgniteCheckedException If failed. */ - protected AdderImpl(GridHadoopTaskContext ctx) throws IgniteCheckedException { + protected AdderImpl(HadoopTaskContext ctx) throws IgniteCheckedException { super(ctx); keyReader = new Reader(keySer); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopHashMultimapBase.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimapBase.java similarity index 91% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopHashMultimapBase.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimapBase.java index 92854f1..16aa673 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopHashMultimapBase.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMultimapBase.java @@ -26,12 +26,12 @@ import java.util.*; /** * Base class for hash multimaps. */ -public abstract class GridHadoopHashMultimapBase extends GridHadoopMultimapBase { +public abstract class HadoopHashMultimapBase extends HadoopMultimapBase { /** * @param jobInfo Job info. * @param mem Memory. */ - protected GridHadoopHashMultimapBase(GridHadoopJobInfo jobInfo, GridUnsafeMemory mem) { + protected HadoopHashMultimapBase(HadoopJobInfo jobInfo, GridUnsafeMemory mem) { super(jobInfo, mem); } @@ -41,7 +41,7 @@ public abstract class GridHadoopHashMultimapBase extends GridHadoopMultimapBase } /** {@inheritDoc} */ - @Override public GridHadoopTaskInput input(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { + @Override public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException { return new Input(taskCtx); } @@ -120,7 +120,7 @@ public abstract class GridHadoopHashMultimapBase extends GridHadoopMultimapBase /** * @param ser Serialization. */ - protected Reader(GridHadoopSerialization ser) { + protected Reader(HadoopSerialization ser) { super(ser); } @@ -143,7 +143,7 @@ public abstract class GridHadoopHashMultimapBase extends GridHadoopMultimapBase /** * Task input. */ - protected class Input implements GridHadoopTaskInput { + protected class Input implements HadoopTaskInput { /** */ private int idx = -1; @@ -163,7 +163,7 @@ public abstract class GridHadoopHashMultimapBase extends GridHadoopMultimapBase * @param taskCtx Task context. * @throws IgniteCheckedException If failed. */ - public Input(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { + public Input(HadoopTaskContext taskCtx) throws IgniteCheckedException { cap = capacity(); keyReader = new Reader(taskCtx.keySerialization()); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopMultimap.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimap.java similarity index 91% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopMultimap.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimap.java index b8eb12c..5def6d3 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopMultimap.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimap.java @@ -27,7 +27,7 @@ import java.io.*; * Multimap for hadoop intermediate results. */ @SuppressWarnings("PublicInnerClass") -public interface GridHadoopMultimap extends AutoCloseable { +public interface HadoopMultimap extends AutoCloseable { /** * Incrementally visits all the keys and values in the map. * @@ -42,14 +42,14 @@ public interface GridHadoopMultimap extends AutoCloseable { * @return Adder. * @throws IgniteCheckedException If failed. */ - public Adder startAdding(GridHadoopTaskContext ctx) throws IgniteCheckedException; + public Adder startAdding(HadoopTaskContext ctx) throws IgniteCheckedException; /** * @param taskCtx Task context. * @return Task input. * @throws IgniteCheckedException If failed. */ - public GridHadoopTaskInput input(GridHadoopTaskContext taskCtx) + public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException; /** {@inheritDoc} */ @@ -58,7 +58,7 @@ public interface GridHadoopMultimap extends AutoCloseable { /** * Adder. */ - public interface Adder extends GridHadoopTaskOutput { + public interface Adder extends HadoopTaskOutput { /** * @param in Data input. * @param reuse Reusable key. diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopMultimapBase.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimapBase.java similarity index 89% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopMultimapBase.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimapBase.java index 2d8660f..7f332aa 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopMultimapBase.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopMultimapBase.java @@ -28,12 +28,12 @@ import java.io.*; import java.util.*; import java.util.concurrent.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopJobProperty.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.*; /** * Base class for all multimaps. */ -public abstract class GridHadoopMultimapBase implements GridHadoopMultimap { +public abstract class HadoopMultimapBase implements HadoopMultimap { /** */ protected final GridUnsafeMemory mem; @@ -47,7 +47,7 @@ public abstract class GridHadoopMultimapBase implements GridHadoopMultimap { * @param jobInfo Job info. * @param mem Memory. */ - protected GridHadoopMultimapBase(GridHadoopJobInfo jobInfo, GridUnsafeMemory mem) { + protected HadoopMultimapBase(HadoopJobInfo jobInfo, GridUnsafeMemory mem) { assert jobInfo != null; assert mem != null; @@ -110,15 +110,15 @@ public abstract class GridHadoopMultimapBase implements GridHadoopMultimap { private Object tmp; /** */ - private final GridHadoopSerialization ser; + private final HadoopSerialization ser; /** */ - private final GridHadoopDataInStream in = new GridHadoopDataInStream(mem); + private final HadoopDataInStream in = new HadoopDataInStream(mem); /** * @param ser Serialization. */ - protected ReaderBase(GridHadoopSerialization ser) { + protected ReaderBase(HadoopSerialization ser) { assert ser != null; this.ser = ser; @@ -172,13 +172,13 @@ public abstract class GridHadoopMultimapBase implements GridHadoopMultimap { */ protected abstract class AdderBase implements Adder { /** */ - protected final GridHadoopSerialization keySer; + protected final HadoopSerialization keySer; /** */ - protected final GridHadoopSerialization valSer; + protected final HadoopSerialization valSer; /** */ - private final GridHadoopDataOutStream out; + private final HadoopDataOutStream out; /** */ private long writeStart; @@ -190,11 +190,11 @@ public abstract class GridHadoopMultimapBase implements GridHadoopMultimap { * @param ctx Task context. * @throws IgniteCheckedException If failed. */ - protected AdderBase(GridHadoopTaskContext ctx) throws IgniteCheckedException { + protected AdderBase(HadoopTaskContext ctx) throws IgniteCheckedException { valSer = ctx.valueSerialization(); keySer = ctx.keySerialization(); - out = new GridHadoopDataOutStream(mem) { + out = new HadoopDataOutStream(mem) { @Override public long move(long size) { long ptr = super.move(size); @@ -221,7 +221,7 @@ public abstract class GridHadoopMultimapBase implements GridHadoopMultimap { pages.add(newPageSize); pages.add(newPagePtr); - GridHadoopOffheapBuffer b = out.buffer(); + HadoopOffheapBuffer b = out.buffer(); b.set(newPagePtr, newPageSize); @@ -240,7 +240,7 @@ public abstract class GridHadoopMultimapBase implements GridHadoopMultimap { * @return Fixed pointer. */ private long fixAlignment() { - GridHadoopOffheapBuffer b = out.buffer(); + HadoopOffheapBuffer b = out.buffer(); long ptr = b.pointer(); @@ -259,7 +259,7 @@ public abstract class GridHadoopMultimapBase implements GridHadoopMultimap { * @return Page pointer. * @throws IgniteCheckedException If failed. */ - protected long write(int off, Object o, GridHadoopSerialization ser) throws IgniteCheckedException { + protected long write(int off, Object o, HadoopSerialization ser) throws IgniteCheckedException { writeStart = fixAlignment(); if (off != 0) @@ -288,7 +288,7 @@ public abstract class GridHadoopMultimapBase implements GridHadoopMultimap { * @param ptr Pointer. */ protected void localDeallocate(long ptr) { - GridHadoopOffheapBuffer b = out.buffer(); + HadoopOffheapBuffer b = out.buffer(); if (b.isInside(ptr)) b.pointer(ptr); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopSkipList.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipList.java similarity index 96% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopSkipList.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipList.java index a2c626c..69aa7a7 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopSkipList.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipList.java @@ -31,7 +31,7 @@ import java.util.concurrent.atomic.*; /** * Skip list. */ -public class GridHadoopSkipList extends GridHadoopMultimapBase { +public class HadoopSkipList extends HadoopMultimapBase { /** */ private static final int HEADS_SIZE = 24 + 33 * 8; // Offset + max level is from 0 to 32 inclusive. @@ -48,7 +48,7 @@ public class GridHadoopSkipList extends GridHadoopMultimapBase { * @param jobInfo Job info. * @param mem Memory. */ - public GridHadoopSkipList(GridHadoopJobInfo jobInfo, GridUnsafeMemory mem) { + public HadoopSkipList(HadoopJobInfo jobInfo, GridUnsafeMemory mem) { super(jobInfo, mem); heads = mem.allocate(HEADS_SIZE, true); @@ -93,12 +93,12 @@ public class GridHadoopSkipList extends GridHadoopMultimapBase { } /** {@inheritDoc} */ - @Override public Adder startAdding(GridHadoopTaskContext ctx) throws IgniteCheckedException { + @Override public Adder startAdding(HadoopTaskContext ctx) throws IgniteCheckedException { return new AdderImpl(ctx); } /** {@inheritDoc} */ - @Override public GridHadoopTaskInput input(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { + @Override public HadoopTaskInput input(HadoopTaskContext taskCtx) throws IgniteCheckedException { Input in = new Input(taskCtx); Comparator grpCmp = taskCtx.groupComparator(); @@ -243,7 +243,7 @@ public class GridHadoopSkipList extends GridHadoopMultimapBase { /** * @param ser Serialization. */ - protected Reader(GridHadoopSerialization ser) { + protected Reader(HadoopSerialization ser) { super(ser); } @@ -285,7 +285,7 @@ public class GridHadoopSkipList extends GridHadoopMultimapBase { * @param ctx Task context. * @throws IgniteCheckedException If failed. */ - protected AdderImpl(GridHadoopTaskContext ctx) throws IgniteCheckedException { + protected AdderImpl(HadoopTaskContext ctx) throws IgniteCheckedException { super(ctx); keyReader = new Reader(keySer); @@ -570,7 +570,7 @@ public class GridHadoopSkipList extends GridHadoopMultimapBase { /** * Task input. */ - private class Input implements GridHadoopTaskInput { + private class Input implements HadoopTaskInput { /** */ private long metaPtr = heads; @@ -584,7 +584,7 @@ public class GridHadoopSkipList extends GridHadoopMultimapBase { * @param taskCtx Task context. * @throws IgniteCheckedException If failed. */ - private Input(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { + private Input(HadoopTaskContext taskCtx) throws IgniteCheckedException { keyReader = new Reader(taskCtx.keySerialization()); valReader = new Reader(taskCtx.valueSerialization()); } @@ -616,7 +616,7 @@ public class GridHadoopSkipList extends GridHadoopMultimapBase { /** * Grouped input using grouping comparator. */ - private class GroupedInput implements GridHadoopTaskInput { + private class GroupedInput implements HadoopTaskInput { /** */ private final Comparator grpCmp; diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/GridHadoopDataInStream.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataInStream.java similarity index 93% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/GridHadoopDataInStream.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataInStream.java index 8b4f0c4..8a1ee70 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/GridHadoopDataInStream.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataInStream.java @@ -25,9 +25,9 @@ import java.nio.charset.*; /** * Data input stream. */ -public class GridHadoopDataInStream extends InputStream implements DataInput { +public class HadoopDataInStream extends InputStream implements DataInput { /** */ - private final GridHadoopOffheapBuffer buf = new GridHadoopOffheapBuffer(0, 0); + private final HadoopOffheapBuffer buf = new HadoopOffheapBuffer(0, 0); /** */ private final GridUnsafeMemory mem; @@ -35,7 +35,7 @@ public class GridHadoopDataInStream extends InputStream implements DataInput { /** * @param mem Memory. */ - public GridHadoopDataInStream(GridUnsafeMemory mem) { + public HadoopDataInStream(GridUnsafeMemory mem) { assert mem != null; this.mem = mem; @@ -44,7 +44,7 @@ public class GridHadoopDataInStream extends InputStream implements DataInput { /** * @return Buffer. */ - public GridHadoopOffheapBuffer buffer() { + public HadoopOffheapBuffer buffer() { return buf; } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/GridHadoopDataOutStream.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataOutStream.java similarity index 92% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/GridHadoopDataOutStream.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataOutStream.java index 8b837c8..51bddf9 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/GridHadoopDataOutStream.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataOutStream.java @@ -27,9 +27,9 @@ import static org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory.*; /** * Data output stream. */ -public class GridHadoopDataOutStream extends OutputStream implements DataOutput { +public class HadoopDataOutStream extends OutputStream implements DataOutput { /** */ - private final GridHadoopOffheapBuffer buf = new GridHadoopOffheapBuffer(0, 0); + private final HadoopOffheapBuffer buf = new HadoopOffheapBuffer(0, 0); /** */ private final GridUnsafeMemory mem; @@ -37,14 +37,14 @@ public class GridHadoopDataOutStream extends OutputStream implements DataOutput /** * @param mem Memory. */ - public GridHadoopDataOutStream(GridUnsafeMemory mem) { + public HadoopDataOutStream(GridUnsafeMemory mem) { this.mem = mem; } /** * @return Buffer. */ - public GridHadoopOffheapBuffer buffer() { + public HadoopOffheapBuffer buffer() { return buf; } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/GridHadoopOffheapBuffer.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopOffheapBuffer.java similarity index 96% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/GridHadoopOffheapBuffer.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopOffheapBuffer.java index f9f0e1d..a8e7a33 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/GridHadoopOffheapBuffer.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopOffheapBuffer.java @@ -20,7 +20,7 @@ package org.apache.ignite.internal.processors.hadoop.shuffle.streams; /** * Offheap buffer. */ -public class GridHadoopOffheapBuffer { +public class HadoopOffheapBuffer { /** Buffer begin address. */ private long bufPtr; @@ -34,7 +34,7 @@ public class GridHadoopOffheapBuffer { * @param bufPtr Pointer to buffer begin. * @param bufSize Size of the buffer. */ - public GridHadoopOffheapBuffer(long bufPtr, long bufSize) { + public HadoopOffheapBuffer(long bufPtr, long bufSize) { set(bufPtr, bufSize); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopEmbeddedTaskExecutor.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopEmbeddedTaskExecutor.java similarity index 64% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopEmbeddedTaskExecutor.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopEmbeddedTaskExecutor.java index fde5400..a3c20d8 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopEmbeddedTaskExecutor.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopEmbeddedTaskExecutor.java @@ -30,15 +30,15 @@ import java.util.concurrent.*; /** * Task executor. */ -public class GridHadoopEmbeddedTaskExecutor extends GridHadoopTaskExecutorAdapter { +public class HadoopEmbeddedTaskExecutor extends HadoopTaskExecutorAdapter { /** Job tracker. */ - private GridHadoopJobTracker jobTracker; + private HadoopJobTracker jobTracker; /** */ - private final ConcurrentMap> jobs = new ConcurrentHashMap<>(); + private final ConcurrentMap> jobs = new ConcurrentHashMap<>(); /** Executor service to run tasks. */ - private GridHadoopExecutorService exec; + private HadoopExecutorService exec; /** {@inheritDoc} */ @Override public void onKernalStart() throws IgniteCheckedException { @@ -46,7 +46,7 @@ public class GridHadoopEmbeddedTaskExecutor extends GridHadoopTaskExecutorAdapte jobTracker = ctx.jobTracker(); - exec = new GridHadoopExecutorService(log, ctx.kernalContext().gridName(), + exec = new HadoopExecutorService(log, ctx.kernalContext().gridName(), ctx.configuration().getMaxParallelTasks(), ctx.configuration().getMaxTaskQueueSize()); } @@ -56,7 +56,7 @@ public class GridHadoopEmbeddedTaskExecutor extends GridHadoopTaskExecutorAdapte exec.shutdown(3000); if (cancel) { - for (GridHadoopJobId jobId : jobs.keySet()) + for (HadoopJobId jobId : jobs.keySet()) cancelTasks(jobId); } } @@ -69,29 +69,29 @@ public class GridHadoopEmbeddedTaskExecutor extends GridHadoopTaskExecutorAdapte } /** {@inheritDoc} */ - @Override public void run(final GridHadoopJob job, Collection tasks) throws IgniteCheckedException { + @Override public void run(final HadoopJob job, Collection tasks) throws IgniteCheckedException { if (log.isDebugEnabled()) log.debug("Submitting tasks for local execution [locNodeId=" + ctx.localNodeId() + ", tasksCnt=" + tasks.size() + ']'); - Collection executedTasks = jobs.get(job.id()); + Collection executedTasks = jobs.get(job.id()); if (executedTasks == null) { executedTasks = new GridConcurrentHashSet<>(); - Collection extractedCol = jobs.put(job.id(), executedTasks); + Collection extractedCol = jobs.put(job.id(), executedTasks); assert extractedCol == null; } - final Collection finalExecutedTasks = executedTasks; + final Collection finalExecutedTasks = executedTasks; - for (final GridHadoopTaskInfo info : tasks) { + for (final HadoopTaskInfo info : tasks) { assert info != null; - GridHadoopRunnableTask task = new GridHadoopRunnableTask(log, job, ctx.shuffle().memory(), info, + HadoopRunnableTask task = new HadoopRunnableTask(log, job, ctx.shuffle().memory(), info, ctx.localNodeId()) { - @Override protected void onTaskFinished(GridHadoopTaskStatus status) { + @Override protected void onTaskFinished(HadoopTaskStatus status) { if (log.isDebugEnabled()) log.debug("Finished task execution [jobId=" + job.id() + ", taskInfo=" + info + ", " + "waitTime=" + waitTime() + ", execTime=" + executionTime() + ']'); @@ -101,11 +101,11 @@ public class GridHadoopEmbeddedTaskExecutor extends GridHadoopTaskExecutorAdapte jobTracker.onTaskFinished(info, status); } - @Override protected GridHadoopTaskInput createInput(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { + @Override protected HadoopTaskInput createInput(HadoopTaskContext taskCtx) throws IgniteCheckedException { return ctx.shuffle().input(taskCtx); } - @Override protected GridHadoopTaskOutput createOutput(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { + @Override protected HadoopTaskOutput createOutput(HadoopTaskContext taskCtx) throws IgniteCheckedException { return ctx.shuffle().output(taskCtx); } }; @@ -121,24 +121,24 @@ public class GridHadoopEmbeddedTaskExecutor extends GridHadoopTaskExecutorAdapte * for this job ID. *

    * It is guaranteed that this method will not be called concurrently with - * {@link #run(GridHadoopJob, Collection)} method. No more job submissions will be performed via - * {@link #run(GridHadoopJob, Collection)} method for given job ID after this method is called. + * {@link #run(org.apache.ignite.internal.processors.hadoop.HadoopJob, Collection)} method. No more job submissions will be performed via + * {@link #run(org.apache.ignite.internal.processors.hadoop.HadoopJob, Collection)} method for given job ID after this method is called. * * @param jobId Job ID to cancel. */ - @Override public void cancelTasks(GridHadoopJobId jobId) { - Collection executedTasks = jobs.get(jobId); + @Override public void cancelTasks(HadoopJobId jobId) { + Collection executedTasks = jobs.get(jobId); if (executedTasks != null) { - for (GridHadoopRunnableTask task : executedTasks) + for (HadoopRunnableTask task : executedTasks) task.cancel(); } } /** {@inheritDoc} */ - @Override public void onJobStateChanged(GridHadoopJobMetadata meta) throws IgniteCheckedException { - if (meta.phase() == GridHadoopJobPhase.PHASE_COMPLETE) { - Collection executedTasks = jobs.remove(meta.jobId()); + @Override public void onJobStateChanged(HadoopJobMetadata meta) throws IgniteCheckedException { + if (meta.phase() == HadoopJobPhase.PHASE_COMPLETE) { + Collection executedTasks = jobs.remove(meta.jobId()); assert executedTasks == null || executedTasks.isEmpty(); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopExecutorService.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorService.java similarity index 93% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopExecutorService.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorService.java index 9ec637b..1c318e9 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopExecutorService.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorService.java @@ -20,7 +20,6 @@ package org.apache.ignite.internal.processors.hadoop.taskexecutor; import org.apache.ignite.*; import org.apache.ignite.internal.processors.hadoop.*; -import org.apache.ignite.internal.*; import org.apache.ignite.internal.util.worker.*; import org.apache.ignite.thread.*; import org.jdk8.backport.*; @@ -34,7 +33,7 @@ import static java.util.Collections.*; /** * Executor service without thread pooling. */ -public class GridHadoopExecutorService { +public class HadoopExecutorService { /** */ private final LinkedBlockingQueue> queue; @@ -86,14 +85,14 @@ public class GridHadoopExecutorService { * @param maxTasks Max number of tasks. * @param maxQueue Max queue length. */ - public GridHadoopExecutorService(IgniteLogger log, String gridName, int maxTasks, int maxQueue) { + public HadoopExecutorService(IgniteLogger log, String gridName, int maxTasks, int maxQueue) { assert maxTasks > 0 : maxTasks; assert maxQueue > 0 : maxQueue; this.maxTasks = maxTasks; this.queue = new LinkedBlockingQueue<>(maxQueue); this.gridName = gridName; - this.log = log.getLogger(GridHadoopExecutorService.class); + this.log = log.getLogger(HadoopExecutorService.class); } /** @@ -170,8 +169,8 @@ public class GridHadoopExecutorService { private void startThread(final Callable task) { String workerName; - if (task instanceof GridHadoopRunnableTask) { - final GridHadoopTaskInfo i = ((GridHadoopRunnableTask)task).taskInfo(); + if (task instanceof HadoopRunnableTask) { + final HadoopTaskInfo i = ((HadoopRunnableTask)task).taskInfo(); workerName = "Hadoop-task-" + i.jobId() + "-" + i.type() + "-" + i.taskNumber() + "-" + i.attempt(); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopRunnableTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopRunnableTask.java similarity index 70% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopRunnableTask.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopRunnableTask.java index fd4a030..2b36267 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopRunnableTask.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopRunnableTask.java @@ -27,13 +27,13 @@ import org.apache.ignite.internal.util.typedef.internal.*; import java.util.*; import java.util.concurrent.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopJobProperty.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopTaskType.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.*; /** * Runnable task. */ -public abstract class GridHadoopRunnableTask implements Callable { +public abstract class HadoopRunnableTask implements Callable { /** */ private final GridUnsafeMemory mem; @@ -41,10 +41,10 @@ public abstract class GridHadoopRunnableTask implements Callable { private final IgniteLogger log; /** */ - private final GridHadoopJob job; + private final HadoopJob job; /** Task to run. */ - private final GridHadoopTaskInfo info; + private final HadoopTaskInfo info; /** Submit time. */ private final long submitTs = U.currentTimeMillis(); @@ -56,10 +56,10 @@ public abstract class GridHadoopRunnableTask implements Callable { private long execEndTs; /** */ - private GridHadoopMultimap combinerInput; + private HadoopMultimap combinerInput; /** */ - private volatile GridHadoopTaskContext ctx; + private volatile HadoopTaskContext ctx; /** Set if task is to cancelling. */ private volatile boolean cancelled; @@ -74,10 +74,10 @@ public abstract class GridHadoopRunnableTask implements Callable { * @param info Task info. * @param nodeId Node id. */ - protected GridHadoopRunnableTask(IgniteLogger log, GridHadoopJob job, GridUnsafeMemory mem, GridHadoopTaskInfo info, + protected HadoopRunnableTask(IgniteLogger log, HadoopJob job, GridUnsafeMemory mem, HadoopTaskInfo info, UUID nodeId) { this.nodeId = nodeId; - this.log = log.getLogger(GridHadoopRunnableTask.class); + this.log = log.getLogger(HadoopRunnableTask.class); this.job = job; this.mem = mem; this.info = info; @@ -103,14 +103,14 @@ public abstract class GridHadoopRunnableTask implements Callable { Throwable err = null; - GridHadoopTaskState state = GridHadoopTaskState.COMPLETED; + HadoopTaskState state = HadoopTaskState.COMPLETED; - GridHadoopPerformanceCounter perfCntr = null; + HadoopPerformanceCounter perfCntr = null; try { ctx = job.getTaskContext(info); - perfCntr = GridHadoopPerformanceCounter.getCounter(ctx.counters(), nodeId); + perfCntr = HadoopPerformanceCounter.getCounter(ctx.counters(), nodeId); perfCntr.onTaskSubmit(info, submitTs); perfCntr.onTaskPrepare(info, execStartTs); @@ -120,7 +120,7 @@ public abstract class GridHadoopRunnableTask implements Callable { runTask(perfCntr); if (info.type() == MAP && job.info().hasCombiner()) { - ctx.taskInfo(new GridHadoopTaskInfo(COMBINE, info.jobId(), info.taskNumber(), info.attempt(), null)); + ctx.taskInfo(new HadoopTaskInfo(COMBINE, info.jobId(), info.taskNumber(), info.attempt(), null)); try { runTask(perfCntr); @@ -130,11 +130,11 @@ public abstract class GridHadoopRunnableTask implements Callable { } } } - catch (GridHadoopTaskCancelledException ignored) { - state = GridHadoopTaskState.CANCELED; + catch (HadoopTaskCancelledException ignored) { + state = HadoopTaskState.CANCELED; } catch (Throwable e) { - state = GridHadoopTaskState.FAILED; + state = HadoopTaskState.FAILED; err = e; U.error(log, "Task execution failed.", e); @@ -145,7 +145,7 @@ public abstract class GridHadoopRunnableTask implements Callable { if (perfCntr != null) perfCntr.onTaskFinish(info, execEndTs); - onTaskFinished(new GridHadoopTaskStatus(state, err, ctx==null ? null : ctx.counters())); + onTaskFinished(new HadoopTaskStatus(state, err, ctx==null ? null : ctx.counters())); if (combinerInput != null) combinerInput.close(); @@ -161,12 +161,12 @@ public abstract class GridHadoopRunnableTask implements Callable { * @param perfCntr Performance counter. * @throws IgniteCheckedException If failed. */ - private void runTask(GridHadoopPerformanceCounter perfCntr) throws IgniteCheckedException { + private void runTask(HadoopPerformanceCounter perfCntr) throws IgniteCheckedException { if (cancelled) - throw new GridHadoopTaskCancelledException("Task cancelled."); + throw new HadoopTaskCancelledException("Task cancelled."); - try (GridHadoopTaskOutput out = createOutputInternal(ctx); - GridHadoopTaskInput in = createInputInternal(ctx)) { + try (HadoopTaskOutput out = createOutputInternal(ctx); + HadoopTaskInput in = createInputInternal(ctx)) { ctx.input(in); ctx.output(out); @@ -190,7 +190,7 @@ public abstract class GridHadoopRunnableTask implements Callable { /** * @param status Task status. */ - protected abstract void onTaskFinished(GridHadoopTaskStatus status); + protected abstract void onTaskFinished(HadoopTaskStatus status); /** * @param ctx Task context. @@ -198,7 +198,7 @@ public abstract class GridHadoopRunnableTask implements Callable { * @throws IgniteCheckedException If failed. */ @SuppressWarnings("unchecked") - private GridHadoopTaskInput createInputInternal(GridHadoopTaskContext ctx) throws IgniteCheckedException { + private HadoopTaskInput createInputInternal(HadoopTaskContext ctx) throws IgniteCheckedException { switch (ctx.taskInfo().type()) { case SETUP: case MAP: @@ -221,21 +221,21 @@ public abstract class GridHadoopRunnableTask implements Callable { * @return Input. * @throws IgniteCheckedException If failed. */ - protected abstract GridHadoopTaskInput createInput(GridHadoopTaskContext ctx) throws IgniteCheckedException; + protected abstract HadoopTaskInput createInput(HadoopTaskContext ctx) throws IgniteCheckedException; /** * @param ctx Task info. * @return Output. * @throws IgniteCheckedException If failed. */ - protected abstract GridHadoopTaskOutput createOutput(GridHadoopTaskContext ctx) throws IgniteCheckedException; + protected abstract HadoopTaskOutput createOutput(HadoopTaskContext ctx) throws IgniteCheckedException; /** * @param ctx Task info. * @return Task output. * @throws IgniteCheckedException If failed. */ - private GridHadoopTaskOutput createOutputInternal(GridHadoopTaskContext ctx) throws IgniteCheckedException { + private HadoopTaskOutput createOutputInternal(HadoopTaskContext ctx) throws IgniteCheckedException { switch (ctx.taskInfo().type()) { case SETUP: case REDUCE: @@ -248,8 +248,8 @@ public abstract class GridHadoopRunnableTask implements Callable { assert combinerInput == null; combinerInput = get(job.info(), SHUFFLE_COMBINER_NO_SORTING, false) ? - new GridHadoopHashMultimap(job.info(), mem, get(job.info(), COMBINER_HASHMAP_SIZE, 8 * 1024)): - new GridHadoopSkipList(job.info(), mem); // TODO replace with red-black tree + new HadoopHashMultimap(job.info(), mem, get(job.info(), COMBINER_HASHMAP_SIZE, 8 * 1024)): + new HadoopSkipList(job.info(), mem); // TODO replace with red-black tree return combinerInput.startAdding(ctx); } @@ -262,7 +262,7 @@ public abstract class GridHadoopRunnableTask implements Callable { /** * @return Task info. */ - public GridHadoopTaskInfo taskInfo() { + public HadoopTaskInfo taskInfo() { return info; } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopTaskExecutorAdapter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskExecutorAdapter.java similarity index 70% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopTaskExecutorAdapter.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskExecutorAdapter.java index 8f66190..39b4935 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopTaskExecutorAdapter.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskExecutorAdapter.java @@ -26,7 +26,7 @@ import java.util.*; /** * Common superclass for task executor. */ -public abstract class GridHadoopTaskExecutorAdapter extends GridHadoopComponent { +public abstract class HadoopTaskExecutorAdapter extends HadoopComponent { /** * Runs tasks. * @@ -34,24 +34,24 @@ public abstract class GridHadoopTaskExecutorAdapter extends GridHadoopComponent * @param tasks Tasks. * @throws IgniteCheckedException If failed. */ - public abstract void run(final GridHadoopJob job, Collection tasks) throws IgniteCheckedException; + public abstract void run(final HadoopJob job, Collection tasks) throws IgniteCheckedException; /** * Cancels all currently running tasks for given job ID and cancels scheduled execution of tasks * for this job ID. *

    * It is guaranteed that this method will not be called concurrently with - * {@link #run(GridHadoopJob, Collection)} method. No more job submissions will be performed via - * {@link #run(GridHadoopJob, Collection)} method for given job ID after this method is called. + * {@link #run(org.apache.ignite.internal.processors.hadoop.HadoopJob, Collection)} method. No more job submissions will be performed via + * {@link #run(org.apache.ignite.internal.processors.hadoop.HadoopJob, Collection)} method for given job ID after this method is called. * * @param jobId Job ID to cancel. */ - public abstract void cancelTasks(GridHadoopJobId jobId) throws IgniteCheckedException; + public abstract void cancelTasks(HadoopJobId jobId) throws IgniteCheckedException; /** * On job state change callback; * * @param meta Job metadata. */ - public abstract void onJobStateChanged(GridHadoopJobMetadata meta) throws IgniteCheckedException; + public abstract void onJobStateChanged(HadoopJobMetadata meta) throws IgniteCheckedException; } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopTaskState.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskState.java similarity index 97% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopTaskState.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskState.java index d1eaa66..cf2a28e 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopTaskState.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskState.java @@ -20,7 +20,7 @@ package org.apache.ignite.internal.processors.hadoop.taskexecutor; /** * State of the task. */ -public enum GridHadoopTaskState { +public enum HadoopTaskState { /** Running task. */ RUNNING, diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopTaskStatus.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskStatus.java similarity index 77% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopTaskStatus.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskStatus.java index 89ef8c1..c5ee16c 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopTaskStatus.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopTaskStatus.java @@ -17,7 +17,7 @@ package org.apache.ignite.internal.processors.hadoop.taskexecutor; -import org.apache.ignite.internal.processors.hadoop.*; +import org.apache.ignite.internal.processors.hadoop.counter.*; import org.apache.ignite.internal.util.typedef.internal.*; import org.jetbrains.annotations.*; @@ -26,23 +26,23 @@ import java.io.*; /** * Task status. */ -public class GridHadoopTaskStatus implements Externalizable { +public class HadoopTaskStatus implements Externalizable { /** */ private static final long serialVersionUID = 0L; /** */ - private GridHadoopTaskState state; + private HadoopTaskState state; /** */ private Throwable failCause; /** */ - private GridHadoopCounters cntrs; + private HadoopCounters cntrs; /** * Default constructor required by {@link Externalizable}. */ - public GridHadoopTaskStatus() { + public HadoopTaskStatus() { // No-op. } @@ -52,7 +52,7 @@ public class GridHadoopTaskStatus implements Externalizable { * @param state Task state. * @param failCause Failure cause (if any). */ - public GridHadoopTaskStatus(GridHadoopTaskState state, @Nullable Throwable failCause) { + public HadoopTaskStatus(HadoopTaskState state, @Nullable Throwable failCause) { this(state, failCause, null); } @@ -63,8 +63,8 @@ public class GridHadoopTaskStatus implements Externalizable { * @param failCause Failure cause (if any). * @param cntrs Task counters. */ - public GridHadoopTaskStatus(GridHadoopTaskState state, @Nullable Throwable failCause, - @Nullable GridHadoopCounters cntrs) { + public HadoopTaskStatus(HadoopTaskState state, @Nullable Throwable failCause, + @Nullable HadoopCounters cntrs) { assert state != null; this.state = state; @@ -75,7 +75,7 @@ public class GridHadoopTaskStatus implements Externalizable { /** * @return State. */ - public GridHadoopTaskState state() { + public HadoopTaskState state() { return state; } @@ -89,13 +89,13 @@ public class GridHadoopTaskStatus implements Externalizable { /** * @return Counters. */ - @Nullable public GridHadoopCounters counters() { + @Nullable public HadoopCounters counters() { return cntrs; } /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopTaskStatus.class, this); + return S.toString(HadoopTaskStatus.class, this); } /** {@inheritDoc} */ @@ -107,8 +107,8 @@ public class GridHadoopTaskStatus implements Externalizable { /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { - state = (GridHadoopTaskState)in.readObject(); + state = (HadoopTaskState)in.readObject(); failCause = (Throwable)in.readObject(); - cntrs = (GridHadoopCounters)in.readObject(); + cntrs = (HadoopCounters)in.readObject(); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopExternalTaskExecutor.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutor.java similarity index 82% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopExternalTaskExecutor.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutor.java index 72185c0..04a96de 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopExternalTaskExecutor.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutor.java @@ -39,14 +39,14 @@ import java.util.*; import java.util.concurrent.*; import java.util.concurrent.locks.*; -import static org.apache.ignite.internal.processors.hadoop.taskexecutor.GridHadoopTaskState.*; +import static org.apache.ignite.internal.processors.hadoop.taskexecutor.HadoopTaskState.*; /** * External process registry. Handles external process lifecycle. */ -public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapter { +public class HadoopExternalTaskExecutor extends HadoopTaskExecutorAdapter { /** Hadoop context. */ - private GridHadoopContext ctx; + private HadoopContext ctx; /** */ private String javaCmd; @@ -55,7 +55,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte private IgniteLogger log; /** Node process descriptor. */ - private GridHadoopProcessDescriptor nodeDesc; + private HadoopProcessDescriptor nodeDesc; /** Output base. */ private File outputBase; @@ -64,25 +64,25 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte private String pathSep; /** Hadoop external communication. */ - private GridHadoopExternalCommunication comm; + private HadoopExternalCommunication comm; /** Starting processes. */ private final ConcurrentMap runningProcsByProcId = new ConcurrentHashMap8<>(); /** Starting processes. */ - private final ConcurrentMap runningProcsByJobId = new ConcurrentHashMap8<>(); + private final ConcurrentMap runningProcsByJobId = new ConcurrentHashMap8<>(); /** Busy lock. */ private final GridSpinReadWriteLock busyLock = new GridSpinReadWriteLock(); /** Job tracker. */ - private GridHadoopJobTracker jobTracker; + private HadoopJobTracker jobTracker; /** {@inheritDoc} */ - @Override public void start(GridHadoopContext ctx) throws IgniteCheckedException { + @Override public void start(HadoopContext ctx) throws IgniteCheckedException { this.ctx = ctx; - log = ctx.kernalContext().log(GridHadoopExternalTaskExecutor.class); + log = ctx.kernalContext().log(HadoopExternalTaskExecutor.class); outputBase = U.resolveWorkDirectory("hadoop", false); @@ -90,7 +90,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte initJavaCommand(); - comm = new GridHadoopExternalCommunication( + comm = new HadoopExternalCommunication( ctx.localNodeId(), UUID.randomUUID(), ctx.kernalContext().config().getMarshaller(), @@ -105,11 +105,11 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte nodeDesc = comm.localProcessDescriptor(); ctx.kernalContext().ports().registerPort(nodeDesc.tcpPort(), IgnitePortProtocol.TCP, - GridHadoopExternalTaskExecutor.class); + HadoopExternalTaskExecutor.class); if (nodeDesc.sharedMemoryPort() != -1) ctx.kernalContext().ports().registerPort(nodeDesc.sharedMemoryPort(), IgnitePortProtocol.TCP, - GridHadoopExternalTaskExecutor.class); + HadoopExternalTaskExecutor.class); jobTracker = ctx.jobTracker(); } @@ -127,7 +127,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte } /** {@inheritDoc} */ - @Override public void onJobStateChanged(final GridHadoopJobMetadata meta) { + @Override public void onJobStateChanged(final HadoopJobMetadata meta) { final HadoopProcess proc = runningProcsByJobId.get(meta.jobId()); // If we have a local process for this job. @@ -135,7 +135,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte if (log.isDebugEnabled()) log.debug("Updating job information for remote task process [proc=" + proc + ", meta=" + meta + ']'); - if (meta.phase() == GridHadoopJobPhase.PHASE_COMPLETE) { + if (meta.phase() == HadoopJobPhase.PHASE_COMPLETE) { if (log.isDebugEnabled()) log.debug("Completed job execution, will terminate child process [jobId=" + meta.jobId() + ", proc=" + proc + ']'); @@ -156,8 +156,8 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte "[jobId=" + meta.jobId() + ", meta=" + meta + ']'); } else { - proc.initFut.listenAsync(new CI1>>() { - @Override public void apply(IgniteInternalFuture> f) { + proc.initFut.listenAsync(new CI1>>() { + @Override public void apply(IgniteInternalFuture> f) { try { f.get(); @@ -174,7 +174,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte } } else if (ctx.isParticipating(meta)) { - GridHadoopJob job; + HadoopJob job; try { job = jobTracker.job(meta.jobId(), meta.jobInfo()); @@ -191,7 +191,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte /** {@inheritDoc} */ @SuppressWarnings("ConstantConditions") - @Override public void run(final GridHadoopJob job, final Collection tasks) throws IgniteCheckedException { + @Override public void run(final HadoopJob job, final Collection tasks) throws IgniteCheckedException { if (!busyLock.tryReadLock()) { if (log.isDebugEnabled()) log.debug("Failed to start hadoop tasks (grid is stopping, will ignore)."); @@ -202,10 +202,10 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte try { HadoopProcess proc = runningProcsByJobId.get(job.id()); - GridHadoopTaskType taskType = F.first(tasks).type(); + HadoopTaskType taskType = F.first(tasks).type(); - if (taskType == GridHadoopTaskType.SETUP || taskType == GridHadoopTaskType.ABORT || - taskType == GridHadoopTaskType.COMMIT) { + if (taskType == HadoopTaskType.SETUP || taskType == HadoopTaskType.ABORT || + taskType == HadoopTaskType.COMMIT) { if (proc == null || proc.terminated()) { runningProcsByJobId.remove(job.id(), proc); @@ -223,9 +223,9 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte final HadoopProcess proc0 = proc; - proc.initFut.listenAsync(new CI1>>() { + proc.initFut.listenAsync(new CI1>>() { @Override public void apply( - IgniteInternalFuture> f) { + IgniteInternalFuture> f) { if (!busyLock.tryReadLock()) return; @@ -255,7 +255,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte } /** {@inheritDoc} */ - @Override public void cancelTasks(GridHadoopJobId jobId) { + @Override public void cancelTasks(HadoopJobId jobId) { HadoopProcess proc = runningProcsByJobId.get(jobId); if (proc != null) @@ -269,7 +269,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte * @param job Job instance. * @param tasks Collection of tasks to execute in started process. */ - private void sendExecutionRequest(HadoopProcess proc, GridHadoopJob job, Collection tasks) + private void sendExecutionRequest(HadoopProcess proc, HadoopJob job, Collection tasks) throws IgniteCheckedException { // Must synchronize since concurrent process crash may happen and will receive onConnectionLost(). proc.lock(); @@ -281,7 +281,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte return; } - GridHadoopTaskExecutionRequest req = new GridHadoopTaskExecutionRequest(); + HadoopTaskExecutionRequest req = new HadoopTaskExecutionRequest(); req.jobId(job.id()); req.jobInfo(job.info()); @@ -297,8 +297,8 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte /** * @return External task metadata. */ - private GridHadoopExternalTaskMetadata buildTaskMeta() { - GridHadoopExternalTaskMetadata meta = new GridHadoopExternalTaskMetadata(); + private HadoopExternalTaskMetadata buildTaskMeta() { + HadoopExternalTaskMetadata meta = new HadoopExternalTaskMetadata(); meta.classpath(Arrays.asList(System.getProperty("java.class.path").split(File.pathSeparator))); meta.jvmOptions(Arrays.asList("-Xmx1g", "-ea", "-XX:+UseConcMarkSweepGC", "-XX:+CMSClassUnloadingEnabled", @@ -312,10 +312,10 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte * @param state Fail state. * @param e Optional error. */ - private void notifyTasksFailed(Iterable tasks, GridHadoopTaskState state, Throwable e) { - GridHadoopTaskStatus fail = new GridHadoopTaskStatus(state, e); + private void notifyTasksFailed(Iterable tasks, HadoopTaskState state, Throwable e) { + HadoopTaskStatus fail = new HadoopTaskStatus(state, e); - for (GridHadoopTaskInfo task : tasks) + for (HadoopTaskInfo task : tasks) jobTracker.onTaskFinished(task, fail); } @@ -325,12 +325,12 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte * @param job Job instance. * @param plan Map reduce plan. */ - private HadoopProcess startProcess(final GridHadoopJob job, final GridHadoopMapReducePlan plan) { + private HadoopProcess startProcess(final HadoopJob job, final HadoopMapReducePlan plan) { final UUID childProcId = UUID.randomUUID(); - GridHadoopJobId jobId = job.id(); + HadoopJobId jobId = job.id(); - final GridHadoopProcessFuture fut = new GridHadoopProcessFuture(childProcId, jobId, ctx.kernalContext()); + final HadoopProcessFuture fut = new HadoopProcessFuture(childProcId, jobId, ctx.kernalContext()); final HadoopProcess proc = new HadoopProcess(jobId, fut, plan.reducers(ctx.localNodeId())); @@ -351,7 +351,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte } try { - GridHadoopExternalTaskMetadata startMeta = buildTaskMeta(); + HadoopExternalTaskMetadata startMeta = buildTaskMeta(); if (log.isDebugEnabled()) log.debug("Created hadoop child process metadata for job [job=" + job + @@ -404,8 +404,8 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte } }, true); - fut.listenAsync(new CI1>>() { - @Override public void apply(IgniteInternalFuture> f) { + fut.listenAsync(new CI1>>() { + @Override public void apply(IgniteInternalFuture> f) { try { // Make sure there were no exceptions. f.get(); @@ -493,8 +493,8 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte * @param job Job. * @return Started process. */ - private Process startJavaProcess(UUID childProcId, GridHadoopExternalTaskMetadata startMeta, - GridHadoopJob job) throws Exception { + private Process startJavaProcess(UUID childProcId, HadoopExternalTaskMetadata startMeta, + HadoopJob job) throws Exception { String outFldr = jobWorkFolder(job.id()) + File.separator + childProcId; if (log.isDebugEnabled()) @@ -508,7 +508,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte cmd.addAll(startMeta.jvmOptions()); cmd.add("-cp"); cmd.add(buildClasspath(startMeta.classpath())); - cmd.add(GridHadoopExternalProcessStarter.class.getName()); + cmd.add(HadoopExternalProcessStarter.class.getName()); cmd.add("-cpid"); cmd.add(String.valueOf(childProcId)); cmd.add("-ppid"); @@ -538,7 +538,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte * @param jobId Job ID. * @return Job work folder. */ - private String jobWorkFolder(GridHadoopJobId jobId) { + private String jobWorkFolder(HadoopJobId jobId) { return outputBase + File.separator + "Job_" + jobId; } @@ -565,18 +565,18 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte * @param proc Process to send request to. * @param meta Job metadata. */ - private void sendJobInfoUpdate(HadoopProcess proc, GridHadoopJobMetadata meta) { - Map rdcAddrs = meta.reducersAddresses(); + private void sendJobInfoUpdate(HadoopProcess proc, HadoopJobMetadata meta) { + Map rdcAddrs = meta.reducersAddresses(); int rdcNum = meta.mapReducePlan().reducers(); - GridHadoopProcessDescriptor[] addrs = null; + HadoopProcessDescriptor[] addrs = null; if (rdcAddrs != null && rdcAddrs.size() == rdcNum) { - addrs = new GridHadoopProcessDescriptor[rdcNum]; + addrs = new HadoopProcessDescriptor[rdcNum]; for (int i = 0; i < rdcNum; i++) { - GridHadoopProcessDescriptor desc = rdcAddrs.get(i); + HadoopProcessDescriptor desc = rdcAddrs.get(i); assert desc != null : "Missing reducing address [meta=" + meta + ", rdc=" + i + ']'; @@ -585,7 +585,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte } try { - comm.sendMessage(proc.descriptor(), new GridHadoopJobInfoUpdateRequest(proc.jobId, meta.phase(), addrs)); + comm.sendMessage(proc.descriptor(), new HadoopJobInfoUpdateRequest(proc.jobId, meta.phase(), addrs)); } catch (IgniteCheckedException e) { if (!proc.terminated()) { @@ -604,9 +604,9 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte * @param job Job. * @param plan Map reduce plan. */ - private void prepareForJob(HadoopProcess proc, GridHadoopJob job, GridHadoopMapReducePlan plan) { + private void prepareForJob(HadoopProcess proc, HadoopJob job, HadoopMapReducePlan plan) { try { - comm.sendMessage(proc.descriptor(), new GridHadoopPrepareForJobRequest(job.id(), job.info(), + comm.sendMessage(proc.descriptor(), new HadoopPrepareForJobRequest(job.id(), job.info(), plan.reducers(), plan.reducers(ctx.localNodeId()))); } catch (IgniteCheckedException e) { @@ -623,7 +623,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte * @param desc Remote process descriptor. * @param taskMsg Task finished message. */ - private void processTaskFinishedMessage(GridHadoopProcessDescriptor desc, GridHadoopTaskFinishedMessage taskMsg) { + private void processTaskFinishedMessage(HadoopProcessDescriptor desc, HadoopTaskFinishedMessage taskMsg) { HadoopProcess proc = runningProcsByProcId.get(desc.processId()); if (proc != null) @@ -635,19 +635,19 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte /** * */ - private class MessageListener implements GridHadoopMessageListener { + private class MessageListener implements HadoopMessageListener { /** {@inheritDoc} */ - @Override public void onMessageReceived(GridHadoopProcessDescriptor desc, GridHadoopMessage msg) { + @Override public void onMessageReceived(HadoopProcessDescriptor desc, HadoopMessage msg) { if (!busyLock.tryReadLock()) return; try { - if (msg instanceof GridHadoopProcessStartedAck) { + if (msg instanceof HadoopProcessStartedAck) { HadoopProcess proc = runningProcsByProcId.get(desc.processId()); assert proc != null : "Missing child process for processId: " + desc; - GridHadoopProcessFuture fut = proc.initFut; + HadoopProcessFuture fut = proc.initFut; if (fut != null) fut.onReplyReceived(desc); @@ -655,8 +655,8 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte else log.warning("Failed to find process start future (will ignore): " + desc); } - else if (msg instanceof GridHadoopTaskFinishedMessage) { - GridHadoopTaskFinishedMessage taskMsg = (GridHadoopTaskFinishedMessage)msg; + else if (msg instanceof HadoopTaskFinishedMessage) { + HadoopTaskFinishedMessage taskMsg = (HadoopTaskFinishedMessage)msg; processTaskFinishedMessage(desc, taskMsg); } @@ -669,7 +669,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte } /** {@inheritDoc} */ - @Override public void onConnectionLost(GridHadoopProcessDescriptor desc) { + @Override public void onConnectionLost(HadoopProcessDescriptor desc) { if (!busyLock.tryReadLock()) return; @@ -684,15 +684,15 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte HadoopProcess proc = runningProcsByProcId.get(desc.processId()); if (proc != null) { - Collection tasks = proc.tasks(); + Collection tasks = proc.tasks(); if (!F.isEmpty(tasks)) { log.warning("Lost connection with alive process (will terminate): " + desc); - GridHadoopTaskStatus status = new GridHadoopTaskStatus(CRASHED, + HadoopTaskStatus status = new HadoopTaskStatus(CRASHED, new IgniteCheckedException("Failed to run tasks (external process finished unexpectedly): " + desc)); - for (GridHadoopTaskInfo info : tasks) + for (HadoopTaskInfo info : tasks) jobTracker.onTaskFinished(info, status); runningProcsByJobId.remove(proc.jobId(), proc); @@ -716,22 +716,22 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte private static final long serialVersionUID = 0L; /** Job ID. */ - private final GridHadoopJobId jobId; + private final HadoopJobId jobId; /** Process. */ private Process proc; /** Init future. Completes when process is ready to receive messages. */ - private final GridHadoopProcessFuture initFut; + private final HadoopProcessFuture initFut; /** Process descriptor. */ - private GridHadoopProcessDescriptor procDesc; + private HadoopProcessDescriptor procDesc; /** Reducers planned for this process. */ private Collection reducers; /** Tasks. */ - private final Collection tasks = new ConcurrentLinkedDeque8<>(); + private final Collection tasks = new ConcurrentLinkedDeque8<>(); /** Terminated flag. */ private volatile boolean terminated; @@ -740,7 +740,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte * @param jobId Job ID. * @param initFut Init future. */ - private HadoopProcess(GridHadoopJobId jobId, GridHadoopProcessFuture initFut, + private HadoopProcess(HadoopJobId jobId, HadoopProcessFuture initFut, int[] reducers) { this.jobId = jobId; this.initFut = initFut; @@ -756,14 +756,14 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte /** * @return Communication process descriptor. */ - private GridHadoopProcessDescriptor descriptor() { + private HadoopProcessDescriptor descriptor() { return procDesc; } /** * @return Job ID. */ - public GridHadoopJobId jobId() { + public HadoopJobId jobId() { return jobId; } @@ -773,7 +773,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte * @param proc Java process representation. * @param procDesc Process descriptor. */ - private void onInitialized(Process proc, GridHadoopProcessDescriptor procDesc) { + private void onInitialized(Process proc, HadoopProcessDescriptor procDesc) { this.proc = proc; this.procDesc = procDesc; } @@ -789,9 +789,9 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte terminated = true; if (!initFut.isDone()) - initFut.listenAsync(new CI1>>() { + initFut.listenAsync(new CI1>>() { @Override public void apply( - IgniteInternalFuture> f) { + IgniteInternalFuture> f) { proc.destroy(); } }); @@ -815,7 +815,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte * * @param tasks Tasks to set. */ - private void addTasks(Collection tasks) { + private void addTasks(Collection tasks) { this.tasks.addAll(tasks); } @@ -824,7 +824,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte * * @param task Task to remove. */ - private void removeTask(GridHadoopTaskInfo task) { + private void removeTask(HadoopTaskInfo task) { if (tasks != null) tasks.remove(task); } @@ -832,7 +832,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte /** * @return Collection of tasks. */ - private Collection tasks() { + private Collection tasks() { return tasks; } @@ -852,7 +852,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte /** * */ - private class GridHadoopProcessFuture extends GridFutureAdapter> { + private class HadoopProcessFuture extends GridFutureAdapter> { /** */ private static final long serialVersionUID = 0L; @@ -860,10 +860,10 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte private UUID childProcId; /** Job ID. */ - private GridHadoopJobId jobId; + private HadoopJobId jobId; /** Process descriptor. */ - private GridHadoopProcessDescriptor desc; + private HadoopProcessDescriptor desc; /** Running process. */ private Process proc; @@ -875,19 +875,19 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte private volatile boolean replyReceived; /** Logger. */ - private final IgniteLogger log = GridHadoopExternalTaskExecutor.this.log; + private final IgniteLogger log = HadoopExternalTaskExecutor.this.log; /** * Empty constructor. */ - public GridHadoopProcessFuture() { + public HadoopProcessFuture() { // No-op. } /** * @param ctx Kernal context. */ - private GridHadoopProcessFuture(UUID childProcId, GridHadoopJobId jobId, GridKernalContext ctx) { + private HadoopProcessFuture(UUID childProcId, HadoopJobId jobId, GridKernalContext ctx) { super(ctx); this.childProcId = childProcId; @@ -909,7 +909,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte /** * Reply received callback. */ - public void onReplyReceived(GridHadoopProcessDescriptor desc) { + public void onReplyReceived(HadoopProcessDescriptor desc) { assert childProcId.equals(desc.processId()); this.desc = desc; @@ -921,7 +921,7 @@ public class GridHadoopExternalTaskExecutor extends GridHadoopTaskExecutorAdapte } /** {@inheritDoc} */ - @Override public boolean onDone(@Nullable IgniteBiTuple res, + @Override public boolean onDone(@Nullable IgniteBiTuple res, @Nullable Throwable err) { if (err == null) { HadoopProcess proc = runningProcsByProcId.get(childProcId); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopExternalTaskMetadata.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskMetadata.java similarity index 94% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopExternalTaskMetadata.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskMetadata.java index 39606bc..f0acc9f 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopExternalTaskMetadata.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskMetadata.java @@ -25,7 +25,7 @@ import java.util.*; /** * External task metadata (classpath, JVM options) needed to start external process execution. */ -public class GridHadoopExternalTaskMetadata { +public class HadoopExternalTaskMetadata { /** Process classpath. */ private Collection classpath; @@ -63,6 +63,6 @@ public class GridHadoopExternalTaskMetadata { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopExternalTaskMetadata.class, this); + return S.toString(HadoopExternalTaskMetadata.class, this); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopJobInfoUpdateRequest.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopJobInfoUpdateRequest.java similarity index 75% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopJobInfoUpdateRequest.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopJobInfoUpdateRequest.java index 2a7c7a8..25c9408 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopJobInfoUpdateRequest.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopJobInfoUpdateRequest.java @@ -27,26 +27,26 @@ import java.io.*; /** * Job info update request. */ -public class GridHadoopJobInfoUpdateRequest implements GridHadoopMessage { +public class HadoopJobInfoUpdateRequest implements HadoopMessage { /** */ private static final long serialVersionUID = 0L; /** Job ID. */ @GridToStringInclude - private GridHadoopJobId jobId; + private HadoopJobId jobId; /** Job phase. */ @GridToStringInclude - private GridHadoopJobPhase jobPhase; + private HadoopJobPhase jobPhase; /** Reducers addresses. */ @GridToStringInclude - private GridHadoopProcessDescriptor[] reducersAddrs; + private HadoopProcessDescriptor[] reducersAddrs; /** * Constructor required by {@link Externalizable}. */ - public GridHadoopJobInfoUpdateRequest() { + public HadoopJobInfoUpdateRequest() { // No-op. } @@ -55,8 +55,8 @@ public class GridHadoopJobInfoUpdateRequest implements GridHadoopMessage { * @param jobPhase Job phase. * @param reducersAddrs Reducers addresses. */ - public GridHadoopJobInfoUpdateRequest(GridHadoopJobId jobId, GridHadoopJobPhase jobPhase, - GridHadoopProcessDescriptor[] reducersAddrs) { + public HadoopJobInfoUpdateRequest(HadoopJobId jobId, HadoopJobPhase jobPhase, + HadoopProcessDescriptor[] reducersAddrs) { assert jobId != null; this.jobId = jobId; @@ -67,21 +67,21 @@ public class GridHadoopJobInfoUpdateRequest implements GridHadoopMessage { /** * @return Job ID. */ - public GridHadoopJobId jobId() { + public HadoopJobId jobId() { return jobId; } /** * @return Job phase. */ - public GridHadoopJobPhase jobPhase() { + public HadoopJobPhase jobPhase() { return jobPhase; } /** * @return Reducers addresses. */ - public GridHadoopProcessDescriptor[] reducersAddresses() { + public HadoopProcessDescriptor[] reducersAddresses() { return reducersAddrs; } @@ -95,15 +95,15 @@ public class GridHadoopJobInfoUpdateRequest implements GridHadoopMessage { /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { - jobId = new GridHadoopJobId(); + jobId = new HadoopJobId(); jobId.readExternal(in); - jobPhase = (GridHadoopJobPhase)in.readObject(); - reducersAddrs = (GridHadoopProcessDescriptor[])U.readArray(in); + jobPhase = (HadoopJobPhase)in.readObject(); + reducersAddrs = (HadoopProcessDescriptor[])U.readArray(in); } /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopJobInfoUpdateRequest.class, this); + return S.toString(HadoopJobInfoUpdateRequest.class, this); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopPrepareForJobRequest.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopPrepareForJobRequest.java similarity index 84% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopPrepareForJobRequest.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopPrepareForJobRequest.java index 3a55d19..df44dd7 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopPrepareForJobRequest.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopPrepareForJobRequest.java @@ -27,17 +27,17 @@ import java.io.*; /** * Child process initialization request. */ -public class GridHadoopPrepareForJobRequest implements GridHadoopMessage { +public class HadoopPrepareForJobRequest implements HadoopMessage { /** */ private static final long serialVersionUID = 0L; /** Job ID. */ @GridToStringInclude - private GridHadoopJobId jobId; + private HadoopJobId jobId; /** Job info. */ @GridToStringInclude - private GridHadoopJobInfo jobInfo; + private HadoopJobInfo jobInfo; /** Total amount of reducers in the job. */ @GridToStringInclude @@ -50,7 +50,7 @@ public class GridHadoopPrepareForJobRequest implements GridHadoopMessage { /** * Constructor required by {@link Externalizable}. */ - public GridHadoopPrepareForJobRequest() { + public HadoopPrepareForJobRequest() { // No-op. } @@ -60,7 +60,7 @@ public class GridHadoopPrepareForJobRequest implements GridHadoopMessage { * @param totalReducersCnt Number of reducers in the job. * @param locReducers Reducers to be executed on current node. */ - public GridHadoopPrepareForJobRequest(GridHadoopJobId jobId, GridHadoopJobInfo jobInfo, int totalReducersCnt, + public HadoopPrepareForJobRequest(HadoopJobId jobId, HadoopJobInfo jobInfo, int totalReducersCnt, int[] locReducers) { assert jobId != null; @@ -73,14 +73,14 @@ public class GridHadoopPrepareForJobRequest implements GridHadoopMessage { /** * @return Job info. */ - public GridHadoopJobInfo jobInfo() { + public HadoopJobInfo jobInfo() { return jobInfo; } /** * @return Job ID. */ - public GridHadoopJobId jobId() { + public HadoopJobId jobId() { return jobId; } @@ -110,10 +110,10 @@ public class GridHadoopPrepareForJobRequest implements GridHadoopMessage { /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { - jobId = new GridHadoopJobId(); + jobId = new HadoopJobId(); jobId.readExternal(in); - jobInfo = (GridHadoopJobInfo)in.readObject(); + jobInfo = (HadoopJobInfo)in.readObject(); totalReducersCnt = in.readInt(); locReducers = U.readIntArray(in); @@ -121,6 +121,6 @@ public class GridHadoopPrepareForJobRequest implements GridHadoopMessage { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopPrepareForJobRequest.class, this); + return S.toString(HadoopPrepareForJobRequest.class, this); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopProcessDescriptor.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessDescriptor.java similarity index 90% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopProcessDescriptor.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessDescriptor.java index 7fc8858..dea73c3 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopProcessDescriptor.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessDescriptor.java @@ -25,7 +25,7 @@ import java.util.*; /** * Process descriptor used to identify process for which task is running. */ -public class GridHadoopProcessDescriptor implements Serializable { +public class HadoopProcessDescriptor implements Serializable { /** */ private static final long serialVersionUID = 0L; @@ -48,7 +48,7 @@ public class GridHadoopProcessDescriptor implements Serializable { * @param parentNodeId Parent node ID. * @param procId Process ID. */ - public GridHadoopProcessDescriptor(UUID parentNodeId, UUID procId) { + public HadoopProcessDescriptor(UUID parentNodeId, UUID procId) { this.parentNodeId = parentNodeId; this.procId = procId; } @@ -126,10 +126,10 @@ public class GridHadoopProcessDescriptor implements Serializable { if (this == o) return true; - if (!(o instanceof GridHadoopProcessDescriptor)) + if (!(o instanceof HadoopProcessDescriptor)) return false; - GridHadoopProcessDescriptor that = (GridHadoopProcessDescriptor)o; + HadoopProcessDescriptor that = (HadoopProcessDescriptor)o; return parentNodeId.equals(that.parentNodeId) && procId.equals(that.procId); } @@ -145,6 +145,6 @@ public class GridHadoopProcessDescriptor implements Serializable { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopProcessDescriptor.class, this); + return S.toString(HadoopProcessDescriptor.class, this); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopProcessStartedAck.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessStartedAck.java similarity index 91% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopProcessStartedAck.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessStartedAck.java index 679da6c..49ff4bf 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopProcessStartedAck.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopProcessStartedAck.java @@ -25,7 +25,7 @@ import java.io.*; /** * Process started message. */ -public class GridHadoopProcessStartedAck implements GridHadoopMessage { +public class HadoopProcessStartedAck implements HadoopMessage { /** */ private static final long serialVersionUID = 0L; @@ -41,6 +41,6 @@ public class GridHadoopProcessStartedAck implements GridHadoopMessage { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopProcessStartedAck.class, this); + return S.toString(HadoopProcessStartedAck.class, this); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopTaskExecutionRequest.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskExecutionRequest.java similarity index 78% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopTaskExecutionRequest.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskExecutionRequest.java index 9f11e0e..05e12ef 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopTaskExecutionRequest.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskExecutionRequest.java @@ -28,67 +28,67 @@ import java.util.*; /** * Message sent from node to child process to start task(s) execution. */ -public class GridHadoopTaskExecutionRequest implements GridHadoopMessage { +public class HadoopTaskExecutionRequest implements HadoopMessage { /** */ private static final long serialVersionUID = 0L; /** Job ID. */ @GridToStringInclude - private GridHadoopJobId jobId; + private HadoopJobId jobId; /** Job info. */ @GridToStringInclude - private GridHadoopJobInfo jobInfo; + private HadoopJobInfo jobInfo; /** Mappers. */ @GridToStringInclude - private Collection tasks; + private Collection tasks; /** * @return Job ID. */ - public GridHadoopJobId jobId() { + public HadoopJobId jobId() { return jobId; } /** * @param jobId Job ID. */ - public void jobId(GridHadoopJobId jobId) { + public void jobId(HadoopJobId jobId) { this.jobId = jobId; } /** * @return Jon info. */ - public GridHadoopJobInfo jobInfo() { + public HadoopJobInfo jobInfo() { return jobInfo; } /** * @param jobInfo Job info. */ - public void jobInfo(GridHadoopJobInfo jobInfo) { + public void jobInfo(HadoopJobInfo jobInfo) { this.jobInfo = jobInfo; } /** * @return Tasks. */ - public Collection tasks() { + public Collection tasks() { return tasks; } /** * @param tasks Tasks. */ - public void tasks(Collection tasks) { + public void tasks(Collection tasks) { this.tasks = tasks; } /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopTaskExecutionRequest.class, this); + return S.toString(HadoopTaskExecutionRequest.class, this); } /** {@inheritDoc} */ @@ -101,10 +101,10 @@ public class GridHadoopTaskExecutionRequest implements GridHadoopMessage { /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { - jobId = new GridHadoopJobId(); + jobId = new HadoopJobId(); jobId.readExternal(in); - jobInfo = (GridHadoopJobInfo)in.readObject(); + jobInfo = (HadoopJobInfo)in.readObject(); tasks = U.readCollection(in); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopTaskFinishedMessage.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskFinishedMessage.java similarity index 80% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopTaskFinishedMessage.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskFinishedMessage.java index f69abaf..d3639c7 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopTaskFinishedMessage.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopTaskFinishedMessage.java @@ -27,20 +27,20 @@ import java.io.*; /** * Task finished message. Sent when local task finishes execution. */ -public class GridHadoopTaskFinishedMessage implements GridHadoopMessage { +public class HadoopTaskFinishedMessage implements HadoopMessage { /** */ private static final long serialVersionUID = 0L; /** Finished task info. */ - private GridHadoopTaskInfo taskInfo; + private HadoopTaskInfo taskInfo; /** Task finish status. */ - private GridHadoopTaskStatus status; + private HadoopTaskStatus status; /** * Constructor required by {@link Externalizable}. */ - public GridHadoopTaskFinishedMessage() { + public HadoopTaskFinishedMessage() { // No-op. } @@ -48,7 +48,7 @@ public class GridHadoopTaskFinishedMessage implements GridHadoopMessage { * @param taskInfo Finished task info. * @param status Task finish status. */ - public GridHadoopTaskFinishedMessage(GridHadoopTaskInfo taskInfo, GridHadoopTaskStatus status) { + public HadoopTaskFinishedMessage(HadoopTaskInfo taskInfo, HadoopTaskStatus status) { assert taskInfo != null; assert status != null; @@ -59,20 +59,20 @@ public class GridHadoopTaskFinishedMessage implements GridHadoopMessage { /** * @return Finished task info. */ - public GridHadoopTaskInfo taskInfo() { + public HadoopTaskInfo taskInfo() { return taskInfo; } /** * @return Task finish status. */ - public GridHadoopTaskStatus status() { + public HadoopTaskStatus status() { return status; } /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopTaskFinishedMessage.class, this); + return S.toString(HadoopTaskFinishedMessage.class, this); } /** {@inheritDoc} */ @@ -83,10 +83,10 @@ public class GridHadoopTaskFinishedMessage implements GridHadoopMessage { /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { - taskInfo = new GridHadoopTaskInfo(); + taskInfo = new HadoopTaskInfo(); taskInfo.readExternal(in); - status = new GridHadoopTaskStatus(); + status = new HadoopTaskStatus(); status.readExternal(in); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/GridHadoopChildProcessRunner.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopChildProcessRunner.java similarity index 77% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/GridHadoopChildProcessRunner.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopChildProcessRunner.java index 2d00222..e95b8cb 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/GridHadoopChildProcessRunner.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopChildProcessRunner.java @@ -34,27 +34,27 @@ import org.apache.ignite.internal.util.typedef.internal.*; import java.util.concurrent.*; import java.util.concurrent.atomic.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopTaskType.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopTaskType.*; /** * Hadoop process base. */ @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized") -public class GridHadoopChildProcessRunner { +public class HadoopChildProcessRunner { /** Node process descriptor. */ - private GridHadoopProcessDescriptor nodeDesc; + private HadoopProcessDescriptor nodeDesc; /** Message processing executor service. */ private ExecutorService msgExecSvc; /** Task executor service. */ - private GridHadoopExecutorService execSvc; + private HadoopExecutorService execSvc; /** */ protected GridUnsafeMemory mem = new GridUnsafeMemory(0); /** External communication. */ - private GridHadoopExternalCommunication comm; + private HadoopExternalCommunication comm; /** Logger. */ private IgniteLogger log; @@ -69,13 +69,13 @@ public class GridHadoopChildProcessRunner { private final GridFutureAdapterEx initFut = new GridFutureAdapterEx<>(); /** Job instance. */ - private GridHadoopJob job; + private HadoopJob job; /** Number of uncompleted tasks. */ private final AtomicInteger pendingTasks = new AtomicInteger(); /** Shuffle job. */ - private GridHadoopShuffleJob shuffleJob; + private HadoopShuffleJob shuffleJob; /** Concurrent mappers. */ private int concMappers; @@ -86,7 +86,7 @@ public class GridHadoopChildProcessRunner { /** * Starts child process runner. */ - public void start(GridHadoopExternalCommunication comm, GridHadoopProcessDescriptor nodeDesc, + public void start(HadoopExternalCommunication comm, HadoopProcessDescriptor nodeDesc, ExecutorService msgExecSvc, IgniteLogger parentLog) throws IgniteCheckedException { this.comm = comm; @@ -94,12 +94,12 @@ public class GridHadoopChildProcessRunner { this.msgExecSvc = msgExecSvc; comm.setListener(new MessageListener()); - log = parentLog.getLogger(GridHadoopChildProcessRunner.class); + log = parentLog.getLogger(HadoopChildProcessRunner.class); startTime = U.currentTimeMillis(); // At this point node knows that this process has started. - comm.sendMessage(this.nodeDesc, new GridHadoopProcessStartedAck()); + comm.sendMessage(this.nodeDesc, new HadoopProcessStartedAck()); } /** @@ -107,7 +107,7 @@ public class GridHadoopChildProcessRunner { * * @param req Initialization request. */ - private void prepareProcess(GridHadoopPrepareForJobRequest req) { + private void prepareProcess(HadoopPrepareForJobRequest req) { if (initGuard.compareAndSet(false, true)) { try { if (log.isDebugEnabled()) @@ -119,7 +119,7 @@ public class GridHadoopChildProcessRunner { job.initialize(true, nodeDesc.processId()); - shuffleJob = new GridHadoopShuffleJob<>(comm.localProcessDescriptor(), log, job, mem, + shuffleJob = new HadoopShuffleJob<>(comm.localProcessDescriptor(), log, job, mem, req.totalReducerCount(), req.localReducers()); initializeExecutors(req); @@ -143,7 +143,7 @@ public class GridHadoopChildProcessRunner { /** * @param req Task execution request. */ - private void runTasks(final GridHadoopTaskExecutionRequest req) { + private void runTasks(final HadoopTaskExecutionRequest req) { if (!initFut.isDone() && log.isDebugEnabled()) log.debug("Will wait for process initialization future completion: " + req); @@ -157,7 +157,7 @@ public class GridHadoopChildProcessRunner { assert set; - GridHadoopTaskInfo info = F.first(req.tasks()); + HadoopTaskInfo info = F.first(req.tasks()); assert info != null; @@ -170,21 +170,21 @@ public class GridHadoopChildProcessRunner { log.debug("Set executor service size for task type [type=" + info.type() + ", size=" + size + ']'); - for (GridHadoopTaskInfo taskInfo : req.tasks()) { + for (HadoopTaskInfo taskInfo : req.tasks()) { if (log.isDebugEnabled()) log.debug("Submitted task for external execution: " + taskInfo); - execSvc.submit(new GridHadoopRunnableTask(log, job, mem, taskInfo, nodeDesc.parentNodeId()) { - @Override protected void onTaskFinished(GridHadoopTaskStatus status) { + execSvc.submit(new HadoopRunnableTask(log, job, mem, taskInfo, nodeDesc.parentNodeId()) { + @Override protected void onTaskFinished(HadoopTaskStatus status) { onTaskFinished0(this, status); } - @Override protected GridHadoopTaskInput createInput(GridHadoopTaskContext ctx) + @Override protected HadoopTaskInput createInput(HadoopTaskContext ctx) throws IgniteCheckedException { return shuffleJob.input(ctx); } - @Override protected GridHadoopTaskOutput createOutput(GridHadoopTaskContext ctx) + @Override protected HadoopTaskOutput createOutput(HadoopTaskContext ctx) throws IgniteCheckedException { return shuffleJob.output(ctx); } @@ -192,8 +192,8 @@ public class GridHadoopChildProcessRunner { } } catch (IgniteCheckedException e) { - for (GridHadoopTaskInfo info : req.tasks()) - notifyTaskFinished(info, new GridHadoopTaskStatus(GridHadoopTaskState.FAILED, e), false); + for (HadoopTaskInfo info : req.tasks()) + notifyTaskFinished(info, new HadoopTaskStatus(HadoopTaskState.FAILED, e), false); } } }); @@ -204,13 +204,13 @@ public class GridHadoopChildProcessRunner { * * @param req Init child process request. */ - private void initializeExecutors(GridHadoopPrepareForJobRequest req) { + private void initializeExecutors(HadoopPrepareForJobRequest req) { int cpus = Runtime.getRuntime().availableProcessors(); // // concMappers = get(req.jobInfo(), EXTERNAL_CONCURRENT_MAPPERS, cpus); // concReducers = get(req.jobInfo(), EXTERNAL_CONCURRENT_REDUCERS, cpus); - execSvc = new GridHadoopExecutorService(log, "", cpus * 2, 1024); + execSvc = new HadoopExecutorService(log, "", cpus * 2, 1024); } /** @@ -218,7 +218,7 @@ public class GridHadoopChildProcessRunner { * * @param req Update request. */ - private void updateTasks(final GridHadoopJobInfoUpdateRequest req) { + private void updateTasks(final HadoopJobInfoUpdateRequest req) { initFut.listenAsync(new CI1>() { @Override public void apply(IgniteInternalFuture gridFut) { assert initGuard.get(); @@ -228,9 +228,9 @@ public class GridHadoopChildProcessRunner { if (req.reducersAddresses() != null) { if (shuffleJob.initializeReduceAddresses(req.reducersAddresses())) { shuffleJob.startSending("external", - new IgniteInClosure2X() { - @Override public void applyx(GridHadoopProcessDescriptor dest, - GridHadoopShuffleMessage msg) throws IgniteCheckedException { + new IgniteInClosure2X() { + @Override public void applyx(HadoopProcessDescriptor dest, + HadoopShuffleMessage msg) throws IgniteCheckedException { comm.sendMessage(dest, msg); } }); @@ -264,8 +264,8 @@ public class GridHadoopChildProcessRunner { * @param run Finished task runnable. * @param status Task status. */ - private void onTaskFinished0(GridHadoopRunnableTask run, GridHadoopTaskStatus status) { - GridHadoopTaskInfo info = run.taskInfo(); + private void onTaskFinished0(HadoopRunnableTask run, HadoopTaskStatus status) { + HadoopTaskInfo info = run.taskInfo(); int pendingTasks0 = pendingTasks.decrementAndGet(); @@ -286,10 +286,10 @@ public class GridHadoopChildProcessRunner { * @param taskInfo Finished task info. * @param status Task status. */ - private void notifyTaskFinished(final GridHadoopTaskInfo taskInfo, final GridHadoopTaskStatus status, + private void notifyTaskFinished(final HadoopTaskInfo taskInfo, final HadoopTaskStatus status, boolean flush) { - final GridHadoopTaskState state = status.state(); + final HadoopTaskState state = status.state(); final Throwable err = status.failCause(); if (!flush) { @@ -298,7 +298,7 @@ public class GridHadoopChildProcessRunner { log.debug("Sending notification to parent node [taskInfo=" + taskInfo + ", state=" + state + ", err=" + err + ']'); - comm.sendMessage(nodeDesc, new GridHadoopTaskFinishedMessage(taskInfo, status)); + comm.sendMessage(nodeDesc, new HadoopTaskFinishedMessage(taskInfo, status)); } catch (IgniteCheckedException e) { log.error("Failed to send message to parent node (will terminate child process).", e); @@ -335,7 +335,7 @@ public class GridHadoopChildProcessRunner { ", state=" + state + ", err=" + err + ']', e); notifyTaskFinished(taskInfo, - new GridHadoopTaskStatus(GridHadoopTaskState.FAILED, e), false); + new HadoopTaskStatus(HadoopTaskState.FAILED, e), false); } } }); @@ -344,7 +344,7 @@ public class GridHadoopChildProcessRunner { log.error("Failed to flush shuffle messages (will fail the task) [taskInfo=" + taskInfo + ", state=" + state + ", err=" + err + ']', e); - notifyTaskFinished(taskInfo, new GridHadoopTaskStatus(GridHadoopTaskState.FAILED, e), false); + notifyTaskFinished(taskInfo, new HadoopTaskStatus(HadoopTaskState.FAILED, e), false); } } } @@ -356,7 +356,7 @@ public class GridHadoopChildProcessRunner { * @param msg Received message. * @return {@code True} if received from parent node. */ - private boolean validateNodeMessage(GridHadoopProcessDescriptor desc, GridHadoopMessage msg) { + private boolean validateNodeMessage(HadoopProcessDescriptor desc, HadoopMessage msg) { if (!nodeDesc.processId().equals(desc.processId())) { log.warning("Received process control request from unknown process (will ignore) [desc=" + desc + ", msg=" + msg + ']'); @@ -377,33 +377,33 @@ public class GridHadoopChildProcessRunner { /** * Message listener. */ - private class MessageListener implements GridHadoopMessageListener { + private class MessageListener implements HadoopMessageListener { /** {@inheritDoc} */ - @Override public void onMessageReceived(final GridHadoopProcessDescriptor desc, final GridHadoopMessage msg) { - if (msg instanceof GridHadoopTaskExecutionRequest) { + @Override public void onMessageReceived(final HadoopProcessDescriptor desc, final HadoopMessage msg) { + if (msg instanceof HadoopTaskExecutionRequest) { if (validateNodeMessage(desc, msg)) - runTasks((GridHadoopTaskExecutionRequest)msg); + runTasks((HadoopTaskExecutionRequest)msg); } - else if (msg instanceof GridHadoopJobInfoUpdateRequest) { + else if (msg instanceof HadoopJobInfoUpdateRequest) { if (validateNodeMessage(desc, msg)) - updateTasks((GridHadoopJobInfoUpdateRequest)msg); + updateTasks((HadoopJobInfoUpdateRequest)msg); } - else if (msg instanceof GridHadoopPrepareForJobRequest) { + else if (msg instanceof HadoopPrepareForJobRequest) { if (validateNodeMessage(desc, msg)) - prepareProcess((GridHadoopPrepareForJobRequest)msg); + prepareProcess((HadoopPrepareForJobRequest)msg); } - else if (msg instanceof GridHadoopShuffleMessage) { + else if (msg instanceof HadoopShuffleMessage) { if (log.isTraceEnabled()) log.trace("Received shuffle message [desc=" + desc + ", msg=" + msg + ']'); initFut.listenAsync(new CI1>() { @Override public void apply(IgniteInternalFuture f) { try { - GridHadoopShuffleMessage m = (GridHadoopShuffleMessage)msg; + HadoopShuffleMessage m = (HadoopShuffleMessage)msg; shuffleJob.onShuffleMessage(m); - comm.sendMessage(desc, new GridHadoopShuffleAck(m.id(), m.jobId())); + comm.sendMessage(desc, new HadoopShuffleAck(m.id(), m.jobId())); } catch (IgniteCheckedException e) { U.error(log, "Failed to process hadoop shuffle message [desc=" + desc + ", msg=" + msg + ']', e); @@ -411,18 +411,18 @@ public class GridHadoopChildProcessRunner { } }); } - else if (msg instanceof GridHadoopShuffleAck) { + else if (msg instanceof HadoopShuffleAck) { if (log.isTraceEnabled()) log.trace("Received shuffle ack [desc=" + desc + ", msg=" + msg + ']'); - shuffleJob.onShuffleAck((GridHadoopShuffleAck)msg); + shuffleJob.onShuffleAck((HadoopShuffleAck)msg); } else log.warning("Unknown message received (will ignore) [desc=" + desc + ", msg=" + msg + ']'); } /** {@inheritDoc} */ - @Override public void onConnectionLost(GridHadoopProcessDescriptor desc) { + @Override public void onConnectionLost(HadoopProcessDescriptor desc) { if (log.isDebugEnabled()) log.debug("Lost connection with remote process: " + desc); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/GridHadoopExternalProcessStarter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopExternalProcessStarter.java similarity index 94% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/GridHadoopExternalProcessStarter.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopExternalProcessStarter.java index 5aeeeee..3a94d43 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/GridHadoopExternalProcessStarter.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/child/HadoopExternalProcessStarter.java @@ -33,7 +33,7 @@ import java.util.concurrent.*; /** * Hadoop external process base class. */ -public class GridHadoopExternalProcessStarter { +public class HadoopExternalProcessStarter { /** Path to Log4j configuration file. */ public static final String DFLT_LOG4J_CONFIG = "config/ignite-log4j.xml"; @@ -49,7 +49,7 @@ public class GridHadoopExternalProcessStarter { /** * @param args Parsed arguments. */ - public GridHadoopExternalProcessStarter(Args args) { + public HadoopExternalProcessStarter(Args args) { this.args = args; } @@ -60,7 +60,7 @@ public class GridHadoopExternalProcessStarter { try { Args args = arguments(cmdArgs); - new GridHadoopExternalProcessStarter(args).run(); + new HadoopExternalProcessStarter(args).run(); } catch (Exception e) { System.err.println("Failed"); @@ -87,7 +87,7 @@ public class GridHadoopExternalProcessStarter { IgniteLogger log = logger(outputDir); - GridHadoopExternalCommunication comm = new GridHadoopExternalCommunication( + HadoopExternalCommunication comm = new HadoopExternalCommunication( args.nodeId, args.childProcId, new OptimizedMarshaller(), @@ -98,12 +98,12 @@ public class GridHadoopExternalProcessStarter { comm.start(); - GridHadoopProcessDescriptor nodeDesc = new GridHadoopProcessDescriptor(args.nodeId, args.parentProcId); + HadoopProcessDescriptor nodeDesc = new HadoopProcessDescriptor(args.nodeId, args.parentProcId); nodeDesc.address(args.addr); nodeDesc.tcpPort(args.tcpPort); nodeDesc.sharedMemoryPort(args.shmemPort); - GridHadoopChildProcessRunner runner = new GridHadoopChildProcessRunner(); + HadoopChildProcessRunner runner = new HadoopChildProcessRunner(); runner.start(comm, nodeDesc, msgExecSvc, log); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopAbstractCommunicationClient.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopAbstractCommunicationClient.java similarity index 93% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopAbstractCommunicationClient.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopAbstractCommunicationClient.java index 5dee79b..3a5d84a 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopAbstractCommunicationClient.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopAbstractCommunicationClient.java @@ -24,7 +24,7 @@ import java.util.concurrent.atomic.*; /** * Implements basic lifecycle for communication clients. */ -public abstract class GridHadoopAbstractCommunicationClient implements GridHadoopCommunicationClient { +public abstract class HadoopAbstractCommunicationClient implements HadoopCommunicationClient { /** Time when this client was last used. */ private volatile long lastUsed = U.currentTimeMillis(); @@ -91,6 +91,6 @@ public abstract class GridHadoopAbstractCommunicationClient implements GridHadoo /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopAbstractCommunicationClient.class, this); + return S.toString(HadoopAbstractCommunicationClient.class, this); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopCommunicationClient.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopCommunicationClient.java similarity index 92% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopCommunicationClient.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopCommunicationClient.java index b375b55..ce42e9a 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopCommunicationClient.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopCommunicationClient.java @@ -24,7 +24,7 @@ import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.*; /** * */ -public interface GridHadoopCommunicationClient { +public interface HadoopCommunicationClient { /** * @return {@code True} if client has been closed by this call, * {@code false} if failed to close client (due to concurrent reservation or concurrent close). @@ -68,5 +68,5 @@ public interface GridHadoopCommunicationClient { * @param msg Message to send. * @throws IgniteCheckedException If failed. */ - public void sendMessage(GridHadoopProcessDescriptor desc, GridHadoopMessage msg) throws IgniteCheckedException; + public void sendMessage(HadoopProcessDescriptor desc, HadoopMessage msg) throws IgniteCheckedException; } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopExternalCommunication.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunication.java similarity index 91% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopExternalCommunication.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunication.java index e3457a9..499f2fa 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopExternalCommunication.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunication.java @@ -42,7 +42,7 @@ import java.util.concurrent.*; /** * Hadoop external communication class. */ -public class GridHadoopExternalCommunication { +public class HadoopExternalCommunication { /** IPC error message. */ public static final String OUT_OF_RESOURCES_TCP_MSG = "Failed to allocate shared memory segment " + "(switching to TCP, may be slower)."; @@ -87,10 +87,10 @@ public class GridHadoopExternalCommunication { public static final boolean DFLT_TCP_NODELAY = true; /** Server listener. */ - private final GridNioServerListener srvLsnr = - new GridNioServerListenerAdapter() { + private final GridNioServerListener srvLsnr = + new GridNioServerListenerAdapter() { @Override public void onConnected(GridNioSession ses) { - GridHadoopProcessDescriptor desc = ses.meta(PROCESS_META); + HadoopProcessDescriptor desc = ses.meta(PROCESS_META); assert desc != null : "Received connected notification without finished handshake: " + ses; } @@ -103,16 +103,16 @@ public class GridHadoopExternalCommunication { if (e != null) U.error(log, "Session disconnected due to exception: " + ses, e); - GridHadoopProcessDescriptor desc = ses.meta(PROCESS_META); + HadoopProcessDescriptor desc = ses.meta(PROCESS_META); if (desc != null) { - GridHadoopCommunicationClient rmv = clients.remove(desc.processId()); + HadoopCommunicationClient rmv = clients.remove(desc.processId()); if (rmv != null) rmv.forceClose(); } - GridHadoopMessageListener lsnr0 = lsnr; + HadoopMessageListener lsnr0 = lsnr; if (lsnr0 != null) // Notify listener about connection close. @@ -120,8 +120,8 @@ public class GridHadoopExternalCommunication { } /** {@inheritDoc} */ - @Override public void onMessage(GridNioSession ses, GridHadoopMessage msg) { - notifyListener(ses.meta(PROCESS_META), msg); + @Override public void onMessage(GridNioSession ses, HadoopMessage msg) { + notifyListener(ses.meta(PROCESS_META), msg); if (msgQueueLimit > 0) { GridNioMessageTracker tracker = ses.meta(TRACKER_META); @@ -137,7 +137,7 @@ public class GridHadoopExternalCommunication { private IgniteLogger log; /** Local process descriptor. */ - private GridHadoopProcessDescriptor locProcDesc; + private HadoopProcessDescriptor locProcDesc; /** Marshaller. */ private Marshaller marsh; @@ -183,7 +183,7 @@ public class GridHadoopExternalCommunication { private int msgQueueLimit = DFLT_MSG_QUEUE_LIMIT; /** NIO server. */ - private GridNioServer nioSrvr; + private GridNioServer nioSrvr; /** Shared memory server. */ private IpcSharedMemoryServerEndpoint shmemSrv; @@ -198,10 +198,10 @@ public class GridHadoopExternalCommunication { private final Collection shmemWorkers = new ConcurrentLinkedDeque8<>(); /** Clients. */ - private final ConcurrentMap clients = GridConcurrentFactory.newMap(); + private final ConcurrentMap clients = GridConcurrentFactory.newMap(); /** Message listener. */ - private volatile GridHadoopMessageListener lsnr; + private volatile HadoopMessageListener lsnr; /** Bound port. */ private int boundTcpPort = -1; @@ -226,7 +226,7 @@ public class GridHadoopExternalCommunication { * @param execSvc Executor service for message notification. * @param gridName Grid name. */ - public GridHadoopExternalCommunication( + public HadoopExternalCommunication( UUID parentNodeId, UUID procId, Marshaller marsh, @@ -234,10 +234,10 @@ public class GridHadoopExternalCommunication { ExecutorService execSvc, String gridName ) { - locProcDesc = new GridHadoopProcessDescriptor(parentNodeId, procId); + locProcDesc = new HadoopProcessDescriptor(parentNodeId, procId); this.marsh = marsh; - this.log = log.getLogger(GridHadoopExternalCommunication.class); + this.log = log.getLogger(HadoopExternalCommunication.class); this.execSvc = execSvc; this.gridName = gridName; } @@ -503,7 +503,7 @@ public class GridHadoopExternalCommunication { * * @param lsnr Message listener. */ - public void setListener(GridHadoopMessageListener lsnr) { + public void setListener(HadoopMessageListener lsnr) { this.lsnr = lsnr; } @@ -563,7 +563,7 @@ public class GridHadoopExternalCommunication { * * @return Local process descriptor. */ - public GridHadoopProcessDescriptor localProcessDescriptor() { + public HadoopProcessDescriptor localProcessDescriptor() { return locProcDesc; } @@ -576,7 +576,7 @@ public class GridHadoopExternalCommunication { return new GridNioFilter[] { new GridNioAsyncNotifyFilter(gridName, execSvc, log), new HandshakeAndBackpressureFilter(), - new GridHadoopMarshallerFilter(marsh), + new HadoopMarshallerFilter(marsh), new GridNioCodecFilter(new GridBufferedParser(directBuf, ByteOrder.nativeOrder()), log, false) }; } @@ -587,7 +587,7 @@ public class GridHadoopExternalCommunication { * @return Server instance. * @throws IgniteCheckedException Thrown if it's not possible to create server. */ - private GridNioServer resetNioServer() throws IgniteCheckedException { + private GridNioServer resetNioServer() throws IgniteCheckedException { if (boundTcpPort >= 0) throw new IgniteCheckedException("Tcp NIO server was already created on port " + boundTcpPort); @@ -596,8 +596,8 @@ public class GridHadoopExternalCommunication { // If configured TCP port is busy, find first available in range. for (int port = locPort; port < locPort + locPortRange; port++) { try { - GridNioServer srvr = - GridNioServer.builder() + GridNioServer srvr = + GridNioServer.builder() .address(locHost) .port(port) .listener(srvLsnr) @@ -706,7 +706,7 @@ public class GridHadoopExternalCommunication { shmemWorkers.clear(); // Force closing on stop (safety). - for (GridHadoopCommunicationClient client : clients.values()) + for (HadoopCommunicationClient client : clients.values()) client.forceClose(); // Clear resources. @@ -722,7 +722,7 @@ public class GridHadoopExternalCommunication { * @param msg * @throws IgniteCheckedException */ - public void sendMessage(GridHadoopProcessDescriptor desc, GridHadoopMessage msg) throws + public void sendMessage(HadoopProcessDescriptor desc, HadoopMessage msg) throws IgniteCheckedException { assert desc != null; assert msg != null; @@ -730,7 +730,7 @@ public class GridHadoopExternalCommunication { if (log.isTraceEnabled()) log.trace("Sending message to Hadoop process [desc=" + desc + ", msg=" + msg + ']'); - GridHadoopCommunicationClient client = null; + HadoopCommunicationClient client = null; boolean closeOnRelease = true; @@ -761,13 +761,13 @@ public class GridHadoopExternalCommunication { * @return The existing or just created client. * @throws IgniteCheckedException Thrown if any exception occurs. */ - private GridHadoopCommunicationClient reserveClient(GridHadoopProcessDescriptor desc) throws IgniteCheckedException { + private HadoopCommunicationClient reserveClient(HadoopProcessDescriptor desc) throws IgniteCheckedException { assert desc != null; UUID procId = desc.processId(); while (true) { - GridHadoopCommunicationClient client = clients.get(procId); + HadoopCommunicationClient client = clients.get(procId); if (client == null) { if (log.isDebugEnabled()) @@ -781,7 +781,7 @@ public class GridHadoopExternalCommunication { client = clients.get(procId); if (client == null) { - GridHadoopCommunicationClient old = clients.put(procId, client = createNioClient(desc)); + HadoopCommunicationClient old = clients.put(procId, client = createNioClient(desc)); assert old == null; } @@ -806,7 +806,7 @@ public class GridHadoopExternalCommunication { * @return Client. * @throws IgniteCheckedException If failed. */ - @Nullable protected GridHadoopCommunicationClient createNioClient(GridHadoopProcessDescriptor desc) + @Nullable protected HadoopCommunicationClient createNioClient(HadoopProcessDescriptor desc) throws IgniteCheckedException { assert desc != null; @@ -837,7 +837,7 @@ public class GridHadoopExternalCommunication { * @return Client. * @throws IgniteCheckedException If failed. */ - @Nullable protected GridHadoopCommunicationClient createShmemClient(GridHadoopProcessDescriptor desc, int port) + @Nullable protected HadoopCommunicationClient createShmemClient(HadoopProcessDescriptor desc, int port) throws IgniteCheckedException { int attempt = 1; @@ -862,7 +862,7 @@ public class GridHadoopExternalCommunication { throw e; } - GridHadoopCommunicationClient client = null; + HadoopCommunicationClient client = null; try { ShmemWorker worker = new ShmemWorker(clientEndpoint, false); @@ -876,13 +876,13 @@ public class GridHadoopExternalCommunication { // We are in lock, it is safe to get session and attach ses.addMeta(HANDSHAKE_FINISH_META, fin); - client = new GridHadoopTcpNioCommunicationClient(ses); + client = new HadoopTcpNioCommunicationClient(ses); new IgniteThread(worker).start(); fin.await(connTimeout0); } - catch (GridHadoopHandshakeTimeoutException e) { + catch (HadoopHandshakeTimeoutException e) { if (log.isDebugEnabled()) log.debug("Handshake timed out (will retry with increased timeout) [timeout=" + connTimeout0 + ", err=" + e.getMessage() + ", client=" + client + ']'); @@ -929,7 +929,7 @@ public class GridHadoopExternalCommunication { * @return Client. * @throws IgniteCheckedException If failed. */ - protected GridHadoopCommunicationClient createTcpClient(GridHadoopProcessDescriptor desc) throws IgniteCheckedException { + protected HadoopCommunicationClient createTcpClient(HadoopProcessDescriptor desc) throws IgniteCheckedException { String addr = desc.address(); int port = desc.tcpPort(); @@ -938,7 +938,7 @@ public class GridHadoopExternalCommunication { log.debug("Trying to connect to remote process [locProcDesc=" + locProcDesc + ", desc=" + desc + ']'); boolean conn = false; - GridHadoopTcpNioCommunicationClient client = null; + HadoopTcpNioCommunicationClient client = null; IgniteCheckedException errs = null; int connectAttempts = 1; @@ -968,7 +968,7 @@ public class GridHadoopExternalCommunication { GridNioSession ses = nioSrvr.createSession(ch, F.asMap(HANDSHAKE_FINISH_META, fin)).get(); - client = new GridHadoopTcpNioCommunicationClient(ses); + client = new HadoopTcpNioCommunicationClient(ses); if (log.isDebugEnabled()) log.debug("Waiting for handshake finish for client: " + client); @@ -977,7 +977,7 @@ public class GridHadoopExternalCommunication { conn = true; } - catch (GridHadoopHandshakeTimeoutException e) { + catch (HadoopHandshakeTimeoutException e) { if (client != null) { client.forceClose(); @@ -1066,8 +1066,8 @@ public class GridHadoopExternalCommunication { * @param desc Sender process descriptor. * @param msg Communication message. */ - protected void notifyListener(GridHadoopProcessDescriptor desc, GridHadoopMessage msg) { - GridHadoopMessageListener lsnr = this.lsnr; + protected void notifyListener(HadoopProcessDescriptor desc, HadoopMessage msg) { + HadoopMessageListener lsnr = this.lsnr; if (lsnr != null) // Notify listener of a new message. @@ -1079,7 +1079,7 @@ public class GridHadoopExternalCommunication { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopExternalCommunication.class, this); + return S.toString(HadoopExternalCommunication.class, this); } /** @@ -1135,7 +1135,7 @@ public class GridHadoopExternalCommunication { private final IpcEndpoint endpoint; /** Adapter. */ - private GridHadoopIpcToNioAdapter adapter; + private HadoopIpcToNioAdapter adapter; /** * @param endpoint Endpoint. @@ -1145,8 +1145,8 @@ public class GridHadoopExternalCommunication { this.endpoint = endpoint; - adapter = new GridHadoopIpcToNioAdapter<>( - GridHadoopExternalCommunication.this.log, + adapter = new HadoopIpcToNioAdapter<>( + HadoopExternalCommunication.this.log, endpoint, accepted, srvLsnr, @@ -1208,18 +1208,18 @@ public class GridHadoopExternalCommunication { /** * @param time Time to wait. - * @throws GridHadoopHandshakeTimeoutException If failed to wait. + * @throws HadoopHandshakeTimeoutException If failed to wait. */ - public void await(long time) throws GridHadoopHandshakeTimeoutException { + public void await(long time) throws HadoopHandshakeTimeoutException { try { if (!latch.await(time, TimeUnit.MILLISECONDS)) - throw new GridHadoopHandshakeTimeoutException("Failed to wait for handshake to finish [timeout=" + + throw new HadoopHandshakeTimeoutException("Failed to wait for handshake to finish [timeout=" + time + ']'); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - throw new GridHadoopHandshakeTimeoutException("Failed to wait for handshake to finish (thread was " + + throw new HadoopHandshakeTimeoutException("Failed to wait for handshake to finish (thread was " + "interrupted) [timeout=" + time + ']', e); } } @@ -1279,7 +1279,7 @@ public class GridHadoopExternalCommunication { /** {@inheritDoc} */ @Override public void onMessageReceived(GridNioSession ses, Object msg) throws IgniteCheckedException { - GridHadoopProcessDescriptor desc = ses.meta(PROCESS_META); + HadoopProcessDescriptor desc = ses.meta(PROCESS_META); UUID rmtProcId = desc == null ? null : desc.processId(); @@ -1319,7 +1319,7 @@ public class GridHadoopExternalCommunication { log.debug("Will reuse session for descriptor: " + rmtProcId); // Handshake finished flag is true. - clients.put(rmtProcId, new GridHadoopTcpNioCommunicationClient(ses)); + clients.put(rmtProcId, new HadoopTcpNioCommunicationClient(ses)); } else { if (log.isDebugEnabled()) @@ -1387,12 +1387,12 @@ public class GridHadoopExternalCommunication { * Process ID message. */ @SuppressWarnings("PublicInnerClass") - public static class ProcessHandshakeMessage implements GridHadoopMessage { + public static class ProcessHandshakeMessage implements HadoopMessage { /** */ private static final long serialVersionUID = 0L; /** Node ID. */ - private GridHadoopProcessDescriptor procDesc; + private HadoopProcessDescriptor procDesc; /** */ public ProcessHandshakeMessage() { @@ -1402,14 +1402,14 @@ public class GridHadoopExternalCommunication { /** * @param procDesc Process descriptor. */ - private ProcessHandshakeMessage(GridHadoopProcessDescriptor procDesc) { + private ProcessHandshakeMessage(HadoopProcessDescriptor procDesc) { this.procDesc = procDesc; } /** * @return Process ID. */ - public GridHadoopProcessDescriptor processDescriptor() { + public HadoopProcessDescriptor processDescriptor() { return procDesc; } @@ -1420,7 +1420,7 @@ public class GridHadoopExternalCommunication { /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { - procDesc = (GridHadoopProcessDescriptor)in.readObject(); + procDesc = (HadoopProcessDescriptor)in.readObject(); } /** {@inheritDoc} */ diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopHandshakeTimeoutException.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopHandshakeTimeoutException.java similarity index 85% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopHandshakeTimeoutException.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopHandshakeTimeoutException.java index e001dc9..36cefcb 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopHandshakeTimeoutException.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopHandshakeTimeoutException.java @@ -21,14 +21,14 @@ import org.apache.ignite.*; import org.jetbrains.annotations.*; /** Internal exception class for proper timeout handling. */ -class GridHadoopHandshakeTimeoutException extends IgniteCheckedException { +class HadoopHandshakeTimeoutException extends IgniteCheckedException { /** */ private static final long serialVersionUID = 0L; /** * @param msg Message. */ - GridHadoopHandshakeTimeoutException(String msg) { + HadoopHandshakeTimeoutException(String msg) { super(msg); } @@ -36,7 +36,7 @@ class GridHadoopHandshakeTimeoutException extends IgniteCheckedException { * @param msg Message. * @param cause Cause. */ - GridHadoopHandshakeTimeoutException(String msg, @Nullable Throwable cause) { + HadoopHandshakeTimeoutException(String msg, @Nullable Throwable cause) { super(msg, cause); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopIpcToNioAdapter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopIpcToNioAdapter.java similarity index 94% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopIpcToNioAdapter.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopIpcToNioAdapter.java index a39451d..8dbc96b 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopIpcToNioAdapter.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopIpcToNioAdapter.java @@ -33,7 +33,7 @@ import java.util.concurrent.atomic.*; * Note that this class consumes an entire thread inside {@link #serve()} method * in order to serve one {@link org.apache.ignite.internal.util.ipc.IpcEndpoint}. */ -public class GridHadoopIpcToNioAdapter { +public class HadoopIpcToNioAdapter { /** */ private final IpcEndpoint endp; @@ -55,7 +55,7 @@ public class GridHadoopIpcToNioAdapter { * @param lsnr Listener. * @param filters Filters. */ - public GridHadoopIpcToNioAdapter(IgniteLogger log, IpcEndpoint endp, boolean accepted, + public HadoopIpcToNioAdapter(IgniteLogger log, IpcEndpoint endp, boolean accepted, GridNioServerListener lsnr, GridNioFilter... filters) { this.endp = endp; @@ -182,8 +182,8 @@ public class GridHadoopIpcToNioAdapter { /** {@inheritDoc} */ @Override public GridNioFuture onSessionWrite(GridNioSession ses, Object msg) { - assert ses == GridHadoopIpcToNioAdapter.this.ses : "ses=" + ses + - ", this.ses=" + GridHadoopIpcToNioAdapter.this.ses; + assert ses == HadoopIpcToNioAdapter.this.ses : "ses=" + ses + + ", this.ses=" + HadoopIpcToNioAdapter.this.ses; return send((ByteBuffer)msg); } @@ -216,9 +216,9 @@ public class GridHadoopIpcToNioAdapter { /** {@inheritDoc} */ @Override public GridNioFuture onSessionClose(GridNioSession ses) { - assert ses == GridHadoopIpcToNioAdapter.this.ses; + assert ses == HadoopIpcToNioAdapter.this.ses; - boolean closed = GridHadoopIpcToNioAdapter.this.ses.setClosed(); + boolean closed = HadoopIpcToNioAdapter.this.ses.setClosed(); if (closed) endp.close(); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopMarshallerFilter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMarshallerFilter.java similarity index 91% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopMarshallerFilter.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMarshallerFilter.java index 2a25357..4cba117 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopMarshallerFilter.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMarshallerFilter.java @@ -25,15 +25,15 @@ import org.apache.ignite.marshaller.*; /** * Serialization filter. */ -public class GridHadoopMarshallerFilter extends GridNioFilterAdapter { +public class HadoopMarshallerFilter extends GridNioFilterAdapter { /** Marshaller. */ private Marshaller marshaller; /** * @param marshaller Marshaller to use. */ - public GridHadoopMarshallerFilter(Marshaller marshaller) { - super("GridHadoopMarshallerFilter"); + public HadoopMarshallerFilter(Marshaller marshaller) { + super("HadoopMarshallerFilter"); this.marshaller = marshaller; } @@ -55,7 +55,7 @@ public class GridHadoopMarshallerFilter extends GridNioFilterAdapter { /** {@inheritDoc} */ @Override public GridNioFuture onSessionWrite(GridNioSession ses, Object msg) throws IgniteCheckedException { - assert msg instanceof GridHadoopMessage : "Invalid message type: " + msg; + assert msg instanceof HadoopMessage : "Invalid message type: " + msg; return proceedSessionWrite(ses, marshaller.marshal(msg)); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopMessageListener.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMessageListener.java similarity index 86% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopMessageListener.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMessageListener.java index 219f4db..c21e494 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopMessageListener.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopMessageListener.java @@ -23,17 +23,17 @@ import org.apache.ignite.internal.processors.hadoop.taskexecutor.external.*; /** * Hadoop communication message listener. */ -public interface GridHadoopMessageListener { +public interface HadoopMessageListener { /** * @param desc Process descriptor. * @param msg Hadoop message. */ - public void onMessageReceived(GridHadoopProcessDescriptor desc, GridHadoopMessage msg); + public void onMessageReceived(HadoopProcessDescriptor desc, HadoopMessage msg); /** * Called when connection to remote process was lost. * * @param desc Process descriptor. */ - public void onConnectionLost(GridHadoopProcessDescriptor desc); + public void onConnectionLost(HadoopProcessDescriptor desc); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopTcpNioCommunicationClient.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopTcpNioCommunicationClient.java similarity index 86% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopTcpNioCommunicationClient.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopTcpNioCommunicationClient.java index 2be16cb..c4d1c54 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopTcpNioCommunicationClient.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopTcpNioCommunicationClient.java @@ -28,21 +28,21 @@ import java.io.*; /** * Grid client for NIO server. */ -public class GridHadoopTcpNioCommunicationClient extends GridHadoopAbstractCommunicationClient { +public class HadoopTcpNioCommunicationClient extends HadoopAbstractCommunicationClient { /** Socket. */ private final GridNioSession ses; /** * Constructor for test purposes only. */ - public GridHadoopTcpNioCommunicationClient() { + public HadoopTcpNioCommunicationClient() { ses = null; } /** * @param ses Session. */ - public GridHadoopTcpNioCommunicationClient(GridNioSession ses) { + public HadoopTcpNioCommunicationClient(GridNioSession ses) { assert ses != null; this.ses = ses; @@ -66,7 +66,7 @@ public class GridHadoopTcpNioCommunicationClient extends GridHadoopAbstractCommu } /** {@inheritDoc} */ - @Override public void sendMessage(GridHadoopProcessDescriptor desc, GridHadoopMessage msg) + @Override public void sendMessage(HadoopProcessDescriptor desc, HadoopMessage msg) throws IgniteCheckedException { if (closed()) throw new IgniteCheckedException("Client was closed: " + this); @@ -94,6 +94,6 @@ public class GridHadoopTcpNioCommunicationClient extends GridHadoopAbstractCommu /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridHadoopTcpNioCommunicationClient.class, this, super.toString()); + return S.toString(HadoopTcpNioCommunicationClient.class, this, super.toString()); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1CleanupTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1CleanupTask.java similarity index 84% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1CleanupTask.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1CleanupTask.java index 99ee9b77..fa570ea 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1CleanupTask.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1CleanupTask.java @@ -27,7 +27,7 @@ import java.io.*; /** * Hadoop cleanup task implementation for v1 API. */ -public class GridHadoopV1CleanupTask extends GridHadoopV1Task { +public class HadoopV1CleanupTask extends HadoopV1Task { /** Abort flag. */ private final boolean abort; @@ -35,15 +35,15 @@ public class GridHadoopV1CleanupTask extends GridHadoopV1Task { * @param taskInfo Task info. * @param abort Abort flag. */ - public GridHadoopV1CleanupTask(GridHadoopTaskInfo taskInfo, boolean abort) { + public HadoopV1CleanupTask(HadoopTaskInfo taskInfo, boolean abort) { super(taskInfo); this.abort = abort; } /** {@inheritDoc} */ - @Override public void run(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { - GridHadoopV2TaskContext ctx = (GridHadoopV2TaskContext)taskCtx; + @Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException { + HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx; JobContext jobCtx = ctx.jobContext(); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1Counter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Counter.java similarity index 92% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1Counter.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Counter.java index b986d3e..609297b 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1Counter.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Counter.java @@ -29,16 +29,16 @@ import static org.apache.hadoop.mapreduce.util.CountersStrings.*; /** * Hadoop counter implementation for v1 API. */ -public class GridHadoopV1Counter extends Counters.Counter { +public class HadoopV1Counter extends Counters.Counter { /** Delegate. */ - private final GridHadoopLongCounter cntr; + private final HadoopLongCounter cntr; /** * Creates new instance. * * @param cntr Delegate counter. */ - public GridHadoopV1Counter(GridHadoopLongCounter cntr) { + public HadoopV1Counter(HadoopLongCounter cntr) { this.cntr = cntr; } @@ -84,7 +84,7 @@ public class GridHadoopV1Counter extends Counters.Counter { /** {@inheritDoc} */ @Override public String makeEscapedCompactString() { - return toEscapedCompactString(new GridHadoopV2Counter(cntr)); + return toEscapedCompactString(new HadoopV2Counter(cntr)); } /** {@inheritDoc} */ diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1MapTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1MapTask.java similarity index 80% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1MapTask.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1MapTask.java index 878b61b..ad7b058 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1MapTask.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1MapTask.java @@ -27,7 +27,7 @@ import org.apache.ignite.internal.processors.hadoop.v2.*; /** * Hadoop map task implementation for v1 API. */ -public class GridHadoopV1MapTask extends GridHadoopV1Task { +public class HadoopV1MapTask extends HadoopV1Task { /** */ private static final String[] EMPTY_HOSTS = new String[0]; @@ -36,27 +36,27 @@ public class GridHadoopV1MapTask extends GridHadoopV1Task { * * @param taskInfo */ - public GridHadoopV1MapTask(GridHadoopTaskInfo taskInfo) { + public HadoopV1MapTask(HadoopTaskInfo taskInfo) { super(taskInfo); } /** {@inheritDoc} */ @SuppressWarnings("unchecked") - @Override public void run(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { - GridHadoopJob job = taskCtx.job(); + @Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException { + HadoopJob job = taskCtx.job(); - GridHadoopV2TaskContext ctx = (GridHadoopV2TaskContext)taskCtx; + HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx; JobConf jobConf = ctx.jobConf(); InputFormat inFormat = jobConf.getInputFormat(); - GridHadoopInputSplit split = info().inputSplit(); + HadoopInputSplit split = info().inputSplit(); InputSplit nativeSplit; - if (split instanceof GridHadoopFileBlock) { - GridHadoopFileBlock block = (GridHadoopFileBlock)split; + if (split instanceof HadoopFileBlock) { + HadoopFileBlock block = (HadoopFileBlock)split; nativeSplit = new FileSplit(new Path(block.file().toString()), block.start(), block.length(), EMPTY_HOSTS); } @@ -65,9 +65,9 @@ public class GridHadoopV1MapTask extends GridHadoopV1Task { assert nativeSplit != null; - Reporter reporter = new GridHadoopV1Reporter(taskCtx); + Reporter reporter = new HadoopV1Reporter(taskCtx); - GridHadoopV1OutputCollector collector = null; + HadoopV1OutputCollector collector = null; try { collector = collector(jobConf, ctx, !job.info().hasCombiner() && !job.info().hasReducer(), @@ -86,7 +86,7 @@ public class GridHadoopV1MapTask extends GridHadoopV1Task { try { while (reader.next(key, val)) { if (isCancelled()) - throw new GridHadoopTaskCancelledException("Map task cancelled."); + throw new HadoopTaskCancelledException("Map task cancelled."); mapper.map(key, val, collector, reporter); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1OutputCollector.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1OutputCollector.java similarity index 94% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1OutputCollector.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1OutputCollector.java index 2a38684..348274d 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1OutputCollector.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1OutputCollector.java @@ -27,12 +27,12 @@ import java.io.*; /** * Hadoop output collector. */ -public class GridHadoopV1OutputCollector implements OutputCollector { +public class HadoopV1OutputCollector implements OutputCollector { /** Job configuration. */ private final JobConf jobConf; /** Task context. */ - private final GridHadoopTaskContext taskCtx; + private final HadoopTaskContext taskCtx; /** Optional direct writer. */ private final RecordWriter writer; @@ -47,7 +47,7 @@ public class GridHadoopV1OutputCollector implements OutputCollector { * @param fileName File name. * @throws IOException In case of IO exception. */ - GridHadoopV1OutputCollector(JobConf jobConf, GridHadoopTaskContext taskCtx, boolean directWrite, + HadoopV1OutputCollector(JobConf jobConf, HadoopTaskContext taskCtx, boolean directWrite, @Nullable String fileName, TaskAttemptID attempt) throws IOException { this.jobConf = jobConf; this.taskCtx = taskCtx; diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1Partitioner.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Partitioner.java similarity index 90% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1Partitioner.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Partitioner.java index 688ccef..e45f92b 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1Partitioner.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Partitioner.java @@ -25,7 +25,7 @@ import org.apache.ignite.internal.processors.hadoop.*; /** * Hadoop partitioner adapter for v1 API. */ -public class GridHadoopV1Partitioner implements GridHadoopPartitioner { +public class HadoopV1Partitioner implements HadoopPartitioner { /** Partitioner instance. */ private Partitioner part; @@ -33,7 +33,7 @@ public class GridHadoopV1Partitioner implements GridHadoopPartitioner { * @param cls Hadoop partitioner class. * @param conf Job configuration. */ - public GridHadoopV1Partitioner(Class cls, Configuration conf) { + public HadoopV1Partitioner(Class cls, Configuration conf) { part = (Partitioner) ReflectionUtils.newInstance(cls, conf); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1ReduceTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1ReduceTask.java similarity index 81% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1ReduceTask.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1ReduceTask.java index 7deea90..18ee09d 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1ReduceTask.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1ReduceTask.java @@ -26,7 +26,7 @@ import org.apache.ignite.internal.processors.hadoop.v2.*; /** * Hadoop reduce task implementation for v1 API. */ -public class GridHadoopV1ReduceTask extends GridHadoopV1Task { +public class HadoopV1ReduceTask extends HadoopV1Task { /** {@code True} if reduce, {@code false} if combine. */ private final boolean reduce; @@ -36,7 +36,7 @@ public class GridHadoopV1ReduceTask extends GridHadoopV1Task { * @param taskInfo Task info. * @param reduce {@code True} if reduce, {@code false} if combine. */ - public GridHadoopV1ReduceTask(GridHadoopTaskInfo taskInfo, boolean reduce) { + public HadoopV1ReduceTask(HadoopTaskInfo taskInfo, boolean reduce) { super(taskInfo); this.reduce = reduce; @@ -44,16 +44,16 @@ public class GridHadoopV1ReduceTask extends GridHadoopV1Task { /** {@inheritDoc} */ @SuppressWarnings("unchecked") - @Override public void run(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { - GridHadoopJob job = taskCtx.job(); + @Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException { + HadoopJob job = taskCtx.job(); - GridHadoopV2TaskContext ctx = (GridHadoopV2TaskContext)taskCtx; + HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx; JobConf jobConf = ctx.jobConf(); - GridHadoopTaskInput input = taskCtx.input(); + HadoopTaskInput input = taskCtx.input(); - GridHadoopV1OutputCollector collector = null; + HadoopV1OutputCollector collector = null; try { collector = collector(jobConf, ctx, reduce || !job.info().hasReducer(), fileName(), ctx.attemptId()); @@ -67,7 +67,7 @@ public class GridHadoopV1ReduceTask extends GridHadoopV1Task { try { while (input.next()) { if (isCancelled()) - throw new GridHadoopTaskCancelledException("Reduce task cancelled."); + throw new HadoopTaskCancelledException("Reduce task cancelled."); reducer.reduce(input.key(), input.values(), collector, Reporter.NULL); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1Reporter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Reporter.java similarity index 89% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1Reporter.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Reporter.java index 1abb2c0..d799373 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1Reporter.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Reporter.java @@ -24,16 +24,16 @@ import org.apache.ignite.internal.processors.hadoop.counter.*; /** * Hadoop reporter implementation for v1 API. */ -public class GridHadoopV1Reporter implements Reporter { +public class HadoopV1Reporter implements Reporter { /** Context. */ - private final GridHadoopTaskContext ctx; + private final HadoopTaskContext ctx; /** * Creates new instance. * * @param ctx Context. */ - public GridHadoopV1Reporter(GridHadoopTaskContext ctx) { + public HadoopV1Reporter(HadoopTaskContext ctx) { this.ctx = ctx; } @@ -49,7 +49,7 @@ public class GridHadoopV1Reporter implements Reporter { /** {@inheritDoc} */ @Override public Counters.Counter getCounter(String grp, String name) { - return new GridHadoopV1Counter(ctx.counter(grp, name, GridHadoopLongCounter.class)); + return new HadoopV1Counter(ctx.counter(grp, name, HadoopLongCounter.class)); } /** {@inheritDoc} */ diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1SetupTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1SetupTask.java similarity index 84% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1SetupTask.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1SetupTask.java index c7dc3fd..a758f1d 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1SetupTask.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1SetupTask.java @@ -27,19 +27,19 @@ import java.io.*; /** * Hadoop setup task implementation for v1 API. */ -public class GridHadoopV1SetupTask extends GridHadoopV1Task { +public class HadoopV1SetupTask extends HadoopV1Task { /** * Constructor. * * @param taskInfo Task info. */ - public GridHadoopV1SetupTask(GridHadoopTaskInfo taskInfo) { + public HadoopV1SetupTask(HadoopTaskInfo taskInfo) { super(taskInfo); } /** {@inheritDoc} */ - @Override public void run(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { - GridHadoopV2TaskContext ctx = (GridHadoopV2TaskContext)taskCtx; + @Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException { + HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx; try { ctx.jobConf().getOutputFormat().checkOutputSpecs(null, ctx.jobConf()); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1Splitter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Splitter.java similarity index 80% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1Splitter.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Splitter.java index 257f4ea..9eebbb8 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1Splitter.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Splitter.java @@ -30,7 +30,7 @@ import java.util.*; /** * Hadoop API v1 splitter. */ -public class GridHadoopV1Splitter { +public class HadoopV1Splitter { /** */ private static final String[] EMPTY_HOSTS = {}; @@ -39,7 +39,7 @@ public class GridHadoopV1Splitter { * @return Collection of mapped splits. * @throws IgniteCheckedException If mapping failed. */ - public static Collection splitJob(JobConf jobConf) throws IgniteCheckedException { + public static Collection splitJob(JobConf jobConf) throws IgniteCheckedException { try { InputFormat format = jobConf.getInputFormat(); @@ -47,7 +47,7 @@ public class GridHadoopV1Splitter { InputSplit[] splits = format.getSplits(jobConf, 0); - Collection res = new ArrayList<>(splits.length); + Collection res = new ArrayList<>(splits.length); for (int i = 0; i < splits.length; i++) { InputSplit nativeSplit = splits[i]; @@ -55,10 +55,10 @@ public class GridHadoopV1Splitter { if (nativeSplit instanceof FileSplit) { FileSplit s = (FileSplit)nativeSplit; - res.add(new GridHadoopFileBlock(s.getLocations(), s.getPath().toUri(), s.getStart(), s.getLength())); + res.add(new HadoopFileBlock(s.getLocations(), s.getPath().toUri(), s.getStart(), s.getLength())); } else - res.add(GridHadoopUtils.wrapSplit(i, nativeSplit, nativeSplit.getLocations())); + res.add(HadoopUtils.wrapSplit(i, nativeSplit, nativeSplit.getLocations())); } return res; @@ -75,7 +75,7 @@ public class GridHadoopV1Splitter { * @return File block or {@code null} if it is not a {@link FileSplit} instance. * @throws IgniteCheckedException If failed. */ - @Nullable public static GridHadoopFileBlock readFileBlock(String clsName, FSDataInputStream in, + @Nullable public static HadoopFileBlock readFileBlock(String clsName, FSDataInputStream in, @Nullable String[] hosts) throws IgniteCheckedException { if (!FileSplit.class.getName().equals(clsName)) return null; @@ -92,6 +92,6 @@ public class GridHadoopV1Splitter { if (hosts == null) hosts = EMPTY_HOSTS; - return new GridHadoopFileBlock(hosts, split.getPath().toUri(), split.getStart(), split.getLength()); + return new HadoopFileBlock(hosts, split.getPath().toUri(), split.getStart(), split.getLength()); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1Task.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Task.java similarity index 86% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1Task.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Task.java index 86a7264..b7da700 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/GridHadoopV1Task.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v1/HadoopV1Task.java @@ -28,7 +28,7 @@ import java.text.*; /** * Extended Hadoop v1 task. */ -public abstract class GridHadoopV1Task extends GridHadoopTask { +public abstract class HadoopV1Task extends HadoopTask { /** Indicates that this task is to be cancelled. */ private volatile boolean cancelled; @@ -37,7 +37,7 @@ public abstract class GridHadoopV1Task extends GridHadoopTask { * * @param taskInfo Task info. */ - protected GridHadoopV1Task(GridHadoopTaskInfo taskInfo) { + protected HadoopV1Task(HadoopTaskInfo taskInfo) { super(taskInfo); } @@ -65,14 +65,14 @@ public abstract class GridHadoopV1Task extends GridHadoopTask { * @return Collector. * @throws IOException In case of IO exception. */ - protected GridHadoopV1OutputCollector collector(JobConf jobConf, GridHadoopV2TaskContext taskCtx, + protected HadoopV1OutputCollector collector(JobConf jobConf, HadoopV2TaskContext taskCtx, boolean directWrite, @Nullable String fileName, TaskAttemptID attempt) throws IOException { - GridHadoopV1OutputCollector collector = new GridHadoopV1OutputCollector(jobConf, taskCtx, directWrite, + HadoopV1OutputCollector collector = new HadoopV1OutputCollector(jobConf, taskCtx, directWrite, fileName, attempt) { /** {@inheritDoc} */ @Override public void collect(Object key, Object val) throws IOException { if (cancelled) - throw new GridHadoopTaskCancelledException("Task cancelled."); + throw new HadoopTaskCancelledException("Task cancelled."); super.collect(key, val); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopExternalSplit.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopExternalSplit.java similarity index 90% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopExternalSplit.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopExternalSplit.java index 36b40a2..496a710 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopExternalSplit.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopExternalSplit.java @@ -24,7 +24,7 @@ import java.io.*; /** * Split serialized in external file. */ -public class GridHadoopExternalSplit extends GridHadoopInputSplit { +public class HadoopExternalSplit extends HadoopInputSplit { /** */ private static final long serialVersionUID = 0L; @@ -34,7 +34,7 @@ public class GridHadoopExternalSplit extends GridHadoopInputSplit { /** * For {@link Externalizable}. */ - public GridHadoopExternalSplit() { + public HadoopExternalSplit() { // No-op. } @@ -42,7 +42,7 @@ public class GridHadoopExternalSplit extends GridHadoopInputSplit { * @param hosts Hosts. * @param off Offset of this split in external file. */ - public GridHadoopExternalSplit(String[] hosts, long off) { + public HadoopExternalSplit(String[] hosts, long off) { assert off >= 0 : off; assert hosts != null; @@ -75,7 +75,7 @@ public class GridHadoopExternalSplit extends GridHadoopInputSplit { if (o == null || getClass() != o.getClass()) return false; - GridHadoopExternalSplit that = (GridHadoopExternalSplit) o; + HadoopExternalSplit that = (HadoopExternalSplit) o; return off == that.off; } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopNativeCodeLoader.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopNativeCodeLoader.java similarity index 98% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopNativeCodeLoader.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopNativeCodeLoader.java index 5ef4759..081182d 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopNativeCodeLoader.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopNativeCodeLoader.java @@ -25,7 +25,7 @@ import org.apache.hadoop.conf.*; */ @InterfaceAudience.Private @InterfaceStability.Unstable -public class GridHadoopNativeCodeLoader { +public class HadoopNativeCodeLoader { /** * Check if native-hadoop code is loaded for this platform. * diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopSerializationWrapper.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSerializationWrapper.java similarity index 95% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopSerializationWrapper.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSerializationWrapper.java index 0f38548..bb9cb68 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopSerializationWrapper.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSerializationWrapper.java @@ -27,7 +27,7 @@ import java.io.*; /** * The wrapper around external serializer. */ -public class GridHadoopSerializationWrapper implements GridHadoopSerialization { +public class HadoopSerializationWrapper implements HadoopSerialization { /** External serializer - writer. */ private final Serializer serializer; @@ -70,7 +70,7 @@ public class GridHadoopSerializationWrapper implements GridHadoopSerializatio * @param serialization External serializer to wrap. * @param cls The class to serialize. */ - public GridHadoopSerializationWrapper(Serialization serialization, Class cls) throws IgniteCheckedException { + public HadoopSerializationWrapper(Serialization serialization, Class cls) throws IgniteCheckedException { assert cls != null; serializer = serialization.getSerializer(cls); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopShutdownHookManager.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopShutdownHookManager.java similarity index 91% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopShutdownHookManager.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopShutdownHookManager.java index 48558fc..454f90a 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopShutdownHookManager.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopShutdownHookManager.java @@ -23,16 +23,16 @@ import java.util.concurrent.atomic.*; /** * Fake manager for shutdown hooks. */ -public class GridHadoopShutdownHookManager { +public class HadoopShutdownHookManager { /** */ - private static final GridHadoopShutdownHookManager MGR = new GridHadoopShutdownHookManager(); + private static final HadoopShutdownHookManager MGR = new HadoopShutdownHookManager(); /** * Return ShutdownHookManager singleton. * * @return ShutdownHookManager singleton. */ - public static GridHadoopShutdownHookManager get() { + public static HadoopShutdownHookManager get() { return MGR; } @@ -45,7 +45,7 @@ public class GridHadoopShutdownHookManager { /** * Singleton. */ - private GridHadoopShutdownHookManager() { + private HadoopShutdownHookManager() { // No-op. } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopSplitWrapper.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSplitWrapper.java similarity index 91% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopSplitWrapper.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSplitWrapper.java index 57edfa9..bc7ded3 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopSplitWrapper.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopSplitWrapper.java @@ -27,7 +27,7 @@ import java.io.*; * * Warning!! This class must not depend on any Hadoop classes directly or indirectly. */ -public class GridHadoopSplitWrapper extends GridHadoopInputSplit { +public class HadoopSplitWrapper extends HadoopInputSplit { /** */ private static final long serialVersionUID = 0L; @@ -43,7 +43,7 @@ public class GridHadoopSplitWrapper extends GridHadoopInputSplit { /** * Creates new split wrapper. */ - public GridHadoopSplitWrapper() { + public HadoopSplitWrapper() { // No-op. } @@ -55,7 +55,7 @@ public class GridHadoopSplitWrapper extends GridHadoopInputSplit { * @param bytes Serialized class. * @param hosts Hosts where split is located. */ - public GridHadoopSplitWrapper(int id, String clsName, byte[] bytes, String[] hosts) { + public HadoopSplitWrapper(int id, String clsName, byte[] bytes, String[] hosts) { assert hosts != null; assert clsName != null; assert bytes != null; @@ -106,7 +106,7 @@ public class GridHadoopSplitWrapper extends GridHadoopInputSplit { if (o == null || getClass() != o.getClass()) return false; - GridHadoopSplitWrapper that = (GridHadoopSplitWrapper)o; + HadoopSplitWrapper that = (HadoopSplitWrapper)o; return id == that.id; } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2CleanupTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2CleanupTask.java similarity index 90% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2CleanupTask.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2CleanupTask.java index 38be3da..534033b 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2CleanupTask.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2CleanupTask.java @@ -30,7 +30,7 @@ import java.io.*; /** * Hadoop cleanup task (commits or aborts job). */ -public class GridHadoopV2CleanupTask extends GridHadoopV2Task { +public class HadoopV2CleanupTask extends HadoopV2Task { /** Abort flag. */ private final boolean abort; @@ -38,7 +38,7 @@ public class GridHadoopV2CleanupTask extends GridHadoopV2Task { * @param taskInfo Task info. * @param abort Abort flag. */ - public GridHadoopV2CleanupTask(GridHadoopTaskInfo taskInfo, boolean abort) { + public HadoopV2CleanupTask(HadoopTaskInfo taskInfo, boolean abort) { super(taskInfo); this.abort = abort; @@ -46,7 +46,7 @@ public class GridHadoopV2CleanupTask extends GridHadoopV2Task { /** {@inheritDoc} */ @SuppressWarnings("ConstantConditions") - @Override public void run0(GridHadoopV2TaskContext taskCtx) throws IgniteCheckedException { + @Override public void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException { JobContextImpl jobCtx = taskCtx.jobContext(); try { diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Context.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Context.java similarity index 82% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Context.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Context.java index 287b10f..3f8e2b6 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Context.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Context.java @@ -31,18 +31,18 @@ import java.util.*; /** * Hadoop context implementation for v2 API. It provides IO operations for hadoop tasks. */ -public class GridHadoopV2Context extends JobContextImpl implements MapContext, ReduceContext { - /** Input reader to overriding of GridHadoopTaskContext input. */ +public class HadoopV2Context extends JobContextImpl implements MapContext, ReduceContext { + /** Input reader to overriding of HadoopTaskContext input. */ private RecordReader reader; - /** Output writer to overriding of GridHadoopTaskContext output. */ + /** Output writer to overriding of HadoopTaskContext output. */ private RecordWriter writer; /** Output is provided by executor environment. */ - private final GridHadoopTaskOutput output; + private final HadoopTaskOutput output; /** Input is provided by executor environment. */ - private final GridHadoopTaskInput input; + private final HadoopTaskInput input; /** Unique identifier for a task attempt. */ private final TaskAttemptID taskAttemptID; @@ -54,7 +54,7 @@ public class GridHadoopV2Context extends JobContextImpl implements MapContext, R private InputSplit inputSplit; /** */ - private final GridHadoopTaskContext ctx; + private final HadoopTaskContext ctx; /** */ private String status; @@ -62,7 +62,7 @@ public class GridHadoopV2Context extends JobContextImpl implements MapContext, R /** * @param ctx Context for IO operations. */ - public GridHadoopV2Context(GridHadoopV2TaskContext ctx) { + public HadoopV2Context(HadoopV2TaskContext ctx) { super(ctx.jobConf(), ctx.jobContext().getJobID()); taskAttemptID = ctx.attemptId(); @@ -79,20 +79,20 @@ public class GridHadoopV2Context extends JobContextImpl implements MapContext, R /** {@inheritDoc} */ @Override public InputSplit getInputSplit() { if (inputSplit == null) { - GridHadoopInputSplit split = ctx.taskInfo().inputSplit(); + HadoopInputSplit split = ctx.taskInfo().inputSplit(); if (split == null) return null; - if (split instanceof GridHadoopFileBlock) { - GridHadoopFileBlock fileBlock = (GridHadoopFileBlock)split; + if (split instanceof HadoopFileBlock) { + HadoopFileBlock fileBlock = (HadoopFileBlock)split; inputSplit = new FileSplit(new Path(fileBlock.file()), fileBlock.start(), fileBlock.length(), null); } - else if (split instanceof GridHadoopExternalSplit) + else if (split instanceof HadoopExternalSplit) throw new UnsupportedOperationException(); // TODO - else if (split instanceof GridHadoopSplitWrapper) - inputSplit = (InputSplit)GridHadoopUtils.unwrapSplit((GridHadoopSplitWrapper)split); + else if (split instanceof HadoopSplitWrapper) + inputSplit = (InputSplit) HadoopUtils.unwrapSplit((HadoopSplitWrapper) split); else throw new IllegalStateException(); } @@ -103,7 +103,7 @@ public class GridHadoopV2Context extends JobContextImpl implements MapContext, R /** {@inheritDoc} */ @Override public boolean nextKeyValue() throws IOException, InterruptedException { if (cancelled) - throw new GridHadoopTaskCancelledException("Task cancelled."); + throw new HadoopTaskCancelledException("Task cancelled."); return reader.nextKeyValue(); } @@ -125,7 +125,7 @@ public class GridHadoopV2Context extends JobContextImpl implements MapContext, R @SuppressWarnings("unchecked") @Override public void write(Object key, Object val) throws IOException, InterruptedException { if (cancelled) - throw new GridHadoopTaskCancelledException("Task cancelled."); + throw new HadoopTaskCancelledException("Task cancelled."); if (writer != null) writer.write(key, val); @@ -171,7 +171,7 @@ public class GridHadoopV2Context extends JobContextImpl implements MapContext, R /** {@inheritDoc} */ @Override public Counter getCounter(String grpName, String cntrName) { - return new GridHadoopV2Counter(ctx.counter(grpName, cntrName, GridHadoopLongCounter.class)); + return new HadoopV2Counter(ctx.counter(grpName, cntrName, HadoopLongCounter.class)); } /** {@inheritDoc} */ @@ -191,7 +191,7 @@ public class GridHadoopV2Context extends JobContextImpl implements MapContext, R /** {@inheritDoc} */ @Override public boolean nextKey() throws IOException, InterruptedException { if (cancelled) - throw new GridHadoopTaskCancelledException("Task cancelled."); + throw new HadoopTaskCancelledException("Task cancelled."); return input.next(); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Counter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Counter.java similarity index 93% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Counter.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Counter.java index 6bf8a44..96ede0d 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Counter.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Counter.java @@ -25,16 +25,16 @@ import java.io.*; /** * Adapter from own counter implementation into Hadoop API Counter od version 2.0. */ -public class GridHadoopV2Counter implements Counter { +public class HadoopV2Counter implements Counter { /** Delegate. */ - private final GridHadoopLongCounter cntr; + private final HadoopLongCounter cntr; /** * Creates new instance with given delegate. * * @param cntr Internal counter. */ - public GridHadoopV2Counter(GridHadoopLongCounter cntr) { + public HadoopV2Counter(HadoopLongCounter cntr) { assert cntr != null : "counter must be non-null"; this.cntr = cntr; diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Job.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Job.java similarity index 72% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Job.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Job.java index 7c36948..f2f0cab 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Job.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Job.java @@ -39,12 +39,12 @@ import java.util.*; import java.util.Queue; import java.util.concurrent.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.*; /** * Hadoop job implementation for v2 API. */ -public class GridHadoopV2Job implements GridHadoopJob { +public class HadoopV2Job implements HadoopJob { /** */ private final JobConf jobConf; @@ -52,19 +52,19 @@ public class GridHadoopV2Job implements GridHadoopJob { private final JobContextImpl jobCtx; /** Hadoop job ID. */ - private final GridHadoopJobId jobId; + private final HadoopJobId jobId; /** Job info. */ - protected GridHadoopJobInfo jobInfo; + protected HadoopJobInfo jobInfo; /** */ private final JobID hadoopJobID; /** */ - private final GridHadoopV2JobResourceManager rsrcMgr; + private final HadoopV2JobResourceManager rsrcMgr; /** */ - private final ConcurrentMap, GridFutureAdapter> ctxs = + private final ConcurrentMap, GridFutureAdapter> ctxs = new ConcurrentHashMap8<>(); /** Pooling task context class and thus class loading environment. */ @@ -81,7 +81,7 @@ public class GridHadoopV2Job implements GridHadoopJob { * @param jobInfo Job info. * @param log Logger. */ - public GridHadoopV2Job(GridHadoopJobId jobId, final GridHadoopDefaultJobInfo jobInfo, IgniteLogger log) { + public HadoopV2Job(HadoopJobId jobId, final HadoopDefaultJobInfo jobInfo, IgniteLogger log) { assert jobId != null; assert jobInfo != null; @@ -90,14 +90,14 @@ public class GridHadoopV2Job implements GridHadoopJob { hadoopJobID = new JobID(jobId.globalId().toString(), jobId.localId()); - GridHadoopClassLoader clsLdr = (GridHadoopClassLoader)getClass().getClassLoader(); + HadoopClassLoader clsLdr = (HadoopClassLoader)getClass().getClassLoader(); // Before create JobConf instance we should set new context class loader. Thread.currentThread().setContextClassLoader(clsLdr); jobConf = new JobConf(); - GridHadoopFileSystemsUtils.setupFileSystems(jobConf); + HadoopFileSystemsUtils.setupFileSystems(jobConf); Thread.currentThread().setContextClassLoader(null); @@ -106,21 +106,21 @@ public class GridHadoopV2Job implements GridHadoopJob { jobCtx = new JobContextImpl(jobConf, hadoopJobID); - rsrcMgr = new GridHadoopV2JobResourceManager(jobId, jobCtx, log); + rsrcMgr = new HadoopV2JobResourceManager(jobId, jobCtx, log); } /** {@inheritDoc} */ - @Override public GridHadoopJobId id() { + @Override public HadoopJobId id() { return jobId; } /** {@inheritDoc} */ - @Override public GridHadoopJobInfo info() { + @Override public HadoopJobInfo info() { return jobInfo; } /** {@inheritDoc} */ - @Override public Collection input() throws IgniteCheckedException { + @Override public Collection input() throws IgniteCheckedException { Thread.currentThread().setContextClassLoader(jobConf.getClassLoader()); try { @@ -129,9 +129,9 @@ public class GridHadoopV2Job implements GridHadoopJob { if (jobDirPath == null) { // Probably job was submitted not by hadoop client. // Assume that we have needed classes and try to generate input splits ourself. if (jobConf.getUseNewMapper()) - return GridHadoopV2Splitter.splitJob(jobCtx); + return HadoopV2Splitter.splitJob(jobCtx); else - return GridHadoopV1Splitter.splitJob(jobConf); + return HadoopV1Splitter.splitJob(jobConf); } Path jobDir = new Path(jobDirPath); @@ -146,7 +146,7 @@ public class GridHadoopV2Job implements GridHadoopJob { Path splitsFile = JobSubmissionFiles.getJobSplitFile(jobDir); try (FSDataInputStream in = fs.open(splitsFile)) { - Collection res = new ArrayList<>(metaInfos.length); + Collection res = new ArrayList<>(metaInfos.length); for (JobSplit.TaskSplitMetaInfo metaInfo : metaInfos) { long off = metaInfo.getStartOffset(); @@ -157,12 +157,12 @@ public class GridHadoopV2Job implements GridHadoopJob { String clsName = Text.readString(in); - GridHadoopFileBlock block = GridHadoopV1Splitter.readFileBlock(clsName, in, hosts); + HadoopFileBlock block = HadoopV1Splitter.readFileBlock(clsName, in, hosts); if (block == null) - block = GridHadoopV2Splitter.readFileBlock(clsName, in, hosts); + block = HadoopV2Splitter.readFileBlock(clsName, in, hosts); - res.add(block != null ? block : new GridHadoopExternalSplit(hosts, off)); + res.add(block != null ? block : new HadoopExternalSplit(hosts, off)); } return res; @@ -178,15 +178,15 @@ public class GridHadoopV2Job implements GridHadoopJob { } /** {@inheritDoc} */ - @Override public GridHadoopTaskContext getTaskContext(GridHadoopTaskInfo info) throws IgniteCheckedException { - T2 locTaskId = new T2<>(info.type(), info.taskNumber()); + @Override public HadoopTaskContext getTaskContext(HadoopTaskInfo info) throws IgniteCheckedException { + T2 locTaskId = new T2<>(info.type(), info.taskNumber()); - GridFutureAdapter fut = ctxs.get(locTaskId); + GridFutureAdapter fut = ctxs.get(locTaskId); if (fut != null) return fut.get(); - GridFutureAdapter old = ctxs.putIfAbsent(locTaskId, fut = new GridFutureAdapter<>()); + GridFutureAdapter old = ctxs.putIfAbsent(locTaskId, fut = new GridFutureAdapter<>()); if (old != null) return old.get(); @@ -196,13 +196,13 @@ public class GridHadoopV2Job implements GridHadoopJob { try { if (cls == null) { // If there is no pooled class, then load new one. - GridHadoopClassLoader ldr = new GridHadoopClassLoader(rsrcMgr.classPath()); + HadoopClassLoader ldr = new HadoopClassLoader(rsrcMgr.classPath()); - cls = ldr.loadClass(GridHadoopV2TaskContext.class.getName()); + cls = ldr.loadClass(HadoopV2TaskContext.class.getName()); } - Constructor ctr = cls.getConstructor(GridHadoopTaskInfo.class, GridHadoopJob.class, - GridHadoopJobId.class, UUID.class, DataInput.class); + Constructor ctr = cls.getConstructor(HadoopTaskInfo.class, HadoopJob.class, + HadoopJobId.class, UUID.class, DataInput.class); if (jobConfData == null) synchronized(jobConf) { @@ -215,7 +215,7 @@ public class GridHadoopV2Job implements GridHadoopJob { } } - GridHadoopTaskContext res = (GridHadoopTaskContext)ctr.newInstance(info, this, jobId, locNodeId, + HadoopTaskContext res = (HadoopTaskContext)ctr.newInstance(info, this, jobId, locNodeId, new DataInputStream(new ByteArrayInputStream(jobConfData))); fut.onDone(res); @@ -256,13 +256,13 @@ public class GridHadoopV2Job implements GridHadoopJob { } /** {@inheritDoc} */ - @Override public void prepareTaskEnvironment(GridHadoopTaskInfo info) throws IgniteCheckedException { + @Override public void prepareTaskEnvironment(HadoopTaskInfo info) throws IgniteCheckedException { rsrcMgr.prepareTaskWorkDir(taskLocalDir(locNodeId, info)); } /** {@inheritDoc} */ - @Override public void cleanupTaskEnvironment(GridHadoopTaskInfo info) throws IgniteCheckedException { - GridHadoopTaskContext ctx = ctxs.remove(new T2<>(info.type(), info.taskNumber())).get(); + @Override public void cleanupTaskEnvironment(HadoopTaskInfo info) throws IgniteCheckedException { + HadoopTaskContext ctx = ctxs.remove(new T2<>(info.type(), info.taskNumber())).get(); taskCtxClsPool.offer(ctx.getClass()); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2JobResourceManager.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2JobResourceManager.java similarity index 96% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2JobResourceManager.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2JobResourceManager.java index be619c7..6f6bfa1 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2JobResourceManager.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2JobResourceManager.java @@ -39,7 +39,7 @@ import java.util.*; * Provides all resources are needed to the job execution. Downloads the main jar, the configuration and additional * files are needed to be placed on local files system. */ -public class GridHadoopV2JobResourceManager { +public class HadoopV2JobResourceManager { /** Hadoop job context. */ private final JobContextImpl ctx; @@ -47,7 +47,7 @@ public class GridHadoopV2JobResourceManager { private final IgniteLogger log; /** Job ID. */ - private final GridHadoopJobId jobId; + private final HadoopJobId jobId; /** Class path list. */ private URL[] clsPath; @@ -64,10 +64,10 @@ public class GridHadoopV2JobResourceManager { * @param ctx Hadoop job context. * @param log Logger. */ - public GridHadoopV2JobResourceManager(GridHadoopJobId jobId, JobContextImpl ctx, IgniteLogger log) { + public HadoopV2JobResourceManager(HadoopJobId jobId, JobContextImpl ctx, IgniteLogger log) { this.jobId = jobId; this.ctx = ctx; - this.log = log.getLogger(GridHadoopV2JobResourceManager.class); + this.log = log.getLogger(HadoopV2JobResourceManager.class); } /** @@ -82,7 +82,7 @@ public class GridHadoopV2JobResourceManager { Thread.currentThread().setContextClassLoader(cfg.getClassLoader()); try { - cfg.set(GridHadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP, dir.getAbsolutePath()); + cfg.set(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP, dir.getAbsolutePath()); if(!cfg.getBoolean("fs.file.impl.disable.cache", false)) FileSystem.getLocal(cfg).setWorkingDirectory(new Path(dir.getAbsolutePath())); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2MapTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2MapTask.java similarity index 87% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2MapTask.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2MapTask.java index be0bea2..2bf4292 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2MapTask.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2MapTask.java @@ -34,23 +34,23 @@ import org.apache.ignite.internal.*; /** * Hadoop map task implementation for v2 API. */ -public class GridHadoopV2MapTask extends GridHadoopV2Task { +public class HadoopV2MapTask extends HadoopV2Task { /** * @param taskInfo Task info. */ - public GridHadoopV2MapTask(GridHadoopTaskInfo taskInfo) { + public HadoopV2MapTask(HadoopTaskInfo taskInfo) { super(taskInfo); } /** {@inheritDoc} */ @SuppressWarnings({"ConstantConditions", "unchecked"}) - @Override public void run0(GridHadoopV2TaskContext taskCtx) throws IgniteCheckedException { - GridHadoopInputSplit split = info().inputSplit(); + @Override public void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException { + HadoopInputSplit split = info().inputSplit(); InputSplit nativeSplit; - if (split instanceof GridHadoopFileBlock) { - GridHadoopFileBlock block = (GridHadoopFileBlock)split; + if (split instanceof HadoopFileBlock) { + HadoopFileBlock block = (HadoopFileBlock)split; nativeSplit = new FileSplit(new Path(block.file().toString()), block.start(), block.length(), null); } @@ -74,7 +74,7 @@ public class GridHadoopV2MapTask extends GridHadoopV2Task { hadoopContext().reader(reader); - GridHadoopJobInfo jobInfo = taskCtx.job().info(); + HadoopJobInfo jobInfo = taskCtx.job().info(); outputFormat = jobInfo.hasCombiner() || jobInfo.hasReducer() ? null : prepareWriter(jobCtx); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Partitioner.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Partitioner.java similarity index 89% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Partitioner.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Partitioner.java index 0883520..36382d4 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Partitioner.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Partitioner.java @@ -25,7 +25,7 @@ import org.apache.ignite.internal.processors.hadoop.*; /** * Hadoop partitioner adapter for v2 API. */ -public class GridHadoopV2Partitioner implements GridHadoopPartitioner { +public class HadoopV2Partitioner implements HadoopPartitioner { /** Partitioner instance. */ private Partitioner part; @@ -33,7 +33,7 @@ public class GridHadoopV2Partitioner implements GridHadoopPartitioner { * @param cls Hadoop partitioner class. * @param conf Job configuration. */ - public GridHadoopV2Partitioner(Class> cls, Configuration conf) { + public HadoopV2Partitioner(Class> cls, Configuration conf) { part = (Partitioner) ReflectionUtils.newInstance(cls, conf); } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2ReduceTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2ReduceTask.java similarity index 91% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2ReduceTask.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2ReduceTask.java index 146e05c..250c41b 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2ReduceTask.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2ReduceTask.java @@ -29,7 +29,7 @@ import org.apache.ignite.internal.*; /** * Hadoop reduce task implementation for v2 API. */ -public class GridHadoopV2ReduceTask extends GridHadoopV2Task { +public class HadoopV2ReduceTask extends HadoopV2Task { /** {@code True} if reduce, {@code false} if combine. */ private final boolean reduce; @@ -39,7 +39,7 @@ public class GridHadoopV2ReduceTask extends GridHadoopV2Task { * @param taskInfo Task info. * @param reduce {@code True} if reduce, {@code false} if combine. */ - public GridHadoopV2ReduceTask(GridHadoopTaskInfo taskInfo, boolean reduce) { + public HadoopV2ReduceTask(HadoopTaskInfo taskInfo, boolean reduce) { super(taskInfo); this.reduce = reduce; @@ -47,7 +47,7 @@ public class GridHadoopV2ReduceTask extends GridHadoopV2Task { /** {@inheritDoc} */ @SuppressWarnings({"ConstantConditions", "unchecked"}) - @Override public void run0(GridHadoopV2TaskContext taskCtx) throws IgniteCheckedException { + @Override public void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException { OutputFormat outputFormat = null; Exception err = null; diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2SetupTask.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2SetupTask.java similarity index 89% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2SetupTask.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2SetupTask.java index 54eda25..81587c1 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2SetupTask.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2SetupTask.java @@ -29,19 +29,19 @@ import java.io.*; /** * Hadoop setup task (prepares job). */ -public class GridHadoopV2SetupTask extends GridHadoopV2Task { +public class HadoopV2SetupTask extends HadoopV2Task { /** * Constructor. * * @param taskInfo task info. */ - public GridHadoopV2SetupTask(GridHadoopTaskInfo taskInfo) { + public HadoopV2SetupTask(HadoopTaskInfo taskInfo) { super(taskInfo); } /** {@inheritDoc} */ @SuppressWarnings("ConstantConditions") - @Override protected void run0(GridHadoopV2TaskContext taskCtx) throws IgniteCheckedException { + @Override protected void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException { try { JobContextImpl jobCtx = taskCtx.jobContext(); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Splitter.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Splitter.java similarity index 81% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Splitter.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Splitter.java index e8ce70b..76a3329 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Splitter.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Splitter.java @@ -31,7 +31,7 @@ import java.util.*; /** * Hadoop API v2 splitter. */ -public class GridHadoopV2Splitter { +public class HadoopV2Splitter { /** */ private static final String[] EMPTY_HOSTS = {}; @@ -40,7 +40,7 @@ public class GridHadoopV2Splitter { * @return Collection of mapped splits. * @throws IgniteCheckedException If mapping failed. */ - public static Collection splitJob(JobContext ctx) throws IgniteCheckedException { + public static Collection splitJob(JobContext ctx) throws IgniteCheckedException { try { InputFormat format = ReflectionUtils.newInstance(ctx.getInputFormatClass(), ctx.getConfiguration()); @@ -48,7 +48,7 @@ public class GridHadoopV2Splitter { List splits = format.getSplits(ctx); - Collection res = new ArrayList<>(splits.size()); + Collection res = new ArrayList<>(splits.size()); int id = 0; @@ -56,10 +56,10 @@ public class GridHadoopV2Splitter { if (nativeSplit instanceof FileSplit) { FileSplit s = (FileSplit)nativeSplit; - res.add(new GridHadoopFileBlock(s.getLocations(), s.getPath().toUri(), s.getStart(), s.getLength())); + res.add(new HadoopFileBlock(s.getLocations(), s.getPath().toUri(), s.getStart(), s.getLength())); } else - res.add(GridHadoopUtils.wrapSplit(id, nativeSplit, nativeSplit.getLocations())); + res.add(HadoopUtils.wrapSplit(id, nativeSplit, nativeSplit.getLocations())); id++; } @@ -83,7 +83,7 @@ public class GridHadoopV2Splitter { * @return File block or {@code null} if it is not a {@link FileSplit} instance. * @throws IgniteCheckedException If failed. */ - public static GridHadoopFileBlock readFileBlock(String clsName, DataInput in, @Nullable String[] hosts) + public static HadoopFileBlock readFileBlock(String clsName, DataInput in, @Nullable String[] hosts) throws IgniteCheckedException { if (!FileSplit.class.getName().equals(clsName)) return null; @@ -100,6 +100,6 @@ public class GridHadoopV2Splitter { if (hosts == null) hosts = EMPTY_HOSTS; - return new GridHadoopFileBlock(hosts, split.getPath().toUri(), split.getStart(), split.getLength()); + return new HadoopFileBlock(hosts, split.getPath().toUri(), split.getStart(), split.getLength()); } } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Task.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Task.java similarity index 90% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Task.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Task.java index 37697c6..5ade3fb 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2Task.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2Task.java @@ -28,24 +28,24 @@ import java.io.*; /** * Extended Hadoop v2 task. */ -public abstract class GridHadoopV2Task extends GridHadoopTask { +public abstract class HadoopV2Task extends HadoopTask { /** Hadoop context. */ - private GridHadoopV2Context hadoopCtx; + private HadoopV2Context hadoopCtx; /** * Constructor. * * @param taskInfo Task info. */ - protected GridHadoopV2Task(GridHadoopTaskInfo taskInfo) { + protected HadoopV2Task(HadoopTaskInfo taskInfo) { super(taskInfo); } /** {@inheritDoc} */ - @Override public void run(GridHadoopTaskContext taskCtx) throws IgniteCheckedException { - GridHadoopV2TaskContext ctx = (GridHadoopV2TaskContext)taskCtx; + @Override public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException { + HadoopV2TaskContext ctx = (HadoopV2TaskContext)taskCtx; - hadoopCtx = new GridHadoopV2Context(ctx); + hadoopCtx = new HadoopV2Context(ctx); run0(ctx); } @@ -56,12 +56,12 @@ public abstract class GridHadoopV2Task extends GridHadoopTask { * @param taskCtx Task context. * @throws IgniteCheckedException */ - protected abstract void run0(GridHadoopV2TaskContext taskCtx) throws IgniteCheckedException; + protected abstract void run0(HadoopV2TaskContext taskCtx) throws IgniteCheckedException; /** * @return hadoop context. */ - protected GridHadoopV2Context hadoopContext() { + protected HadoopV2Context hadoopContext() { return hadoopCtx; } diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2TaskContext.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2TaskContext.java similarity index 79% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2TaskContext.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2TaskContext.java index 41bd24a..24f10a6 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopV2TaskContext.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopV2TaskContext.java @@ -31,6 +31,7 @@ import org.apache.hadoop.mapreduce.*; import org.apache.ignite.*; import org.apache.ignite.internal.processors.hadoop.*; import org.apache.ignite.internal.processors.hadoop.counter.*; +import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters; import org.apache.ignite.internal.processors.hadoop.fs.*; import org.apache.ignite.internal.processors.hadoop.v1.*; import org.apache.ignite.internal.util.typedef.internal.*; @@ -39,13 +40,13 @@ import org.jetbrains.annotations.*; import java.io.*; import java.util.*; -import static org.apache.ignite.igfs.hadoop.IgfsHadoopParameters.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.*; /** * Context for task execution. */ -public class GridHadoopV2TaskContext extends GridHadoopTaskContext { +public class HadoopV2TaskContext extends HadoopTaskContext { /** */ private static final boolean COMBINE_KEY_GROUPING_SUPPORTED; @@ -83,13 +84,13 @@ public class GridHadoopV2TaskContext extends GridHadoopTaskContext { private volatile boolean cancelled; /** Current task. */ - private volatile GridHadoopTask task; + private volatile HadoopTask task; /** Local node ID */ private UUID locNodeId; /** Counters for task. */ - private final GridHadoopCounters cntrs = new GridHadoopCountersImpl(); + private final HadoopCounters cntrs = new HadoopCountersImpl(); /** * @param taskInfo Task info. @@ -98,7 +99,7 @@ public class GridHadoopV2TaskContext extends GridHadoopTaskContext { * @param locNodeId Local node ID. * @param jobConfDataInput DataInput for read JobConf. */ - public GridHadoopV2TaskContext(GridHadoopTaskInfo taskInfo, GridHadoopJob job, GridHadoopJobId jobId, + public HadoopV2TaskContext(HadoopTaskInfo taskInfo, HadoopJob job, HadoopJobId jobId, @Nullable UUID locNodeId, DataInput jobConfDataInput) throws IgniteCheckedException { super(taskInfo, job); this.locNodeId = locNodeId; @@ -131,12 +132,12 @@ public class GridHadoopV2TaskContext extends GridHadoopTaskContext { } /** {@inheritDoc} */ - @Override public T counter(String grp, String name, Class cls) { + @Override public T counter(String grp, String name, Class cls) { return cntrs.counter(grp, name, cls); } /** {@inheritDoc} */ - @Override public GridHadoopCounters counters() { + @Override public HadoopCounters counters() { return cntrs; } @@ -145,28 +146,28 @@ public class GridHadoopV2TaskContext extends GridHadoopTaskContext { * * @return Task. */ - private GridHadoopTask createTask() { - boolean isAbort = taskInfo().type() == GridHadoopTaskType.ABORT; + private HadoopTask createTask() { + boolean isAbort = taskInfo().type() == HadoopTaskType.ABORT; switch (taskInfo().type()) { case SETUP: - return useNewMapper ? new GridHadoopV2SetupTask(taskInfo()) : new GridHadoopV1SetupTask(taskInfo()); + return useNewMapper ? new HadoopV2SetupTask(taskInfo()) : new HadoopV1SetupTask(taskInfo()); case MAP: - return useNewMapper ? new GridHadoopV2MapTask(taskInfo()) : new GridHadoopV1MapTask(taskInfo()); + return useNewMapper ? new HadoopV2MapTask(taskInfo()) : new HadoopV1MapTask(taskInfo()); case REDUCE: - return useNewReducer ? new GridHadoopV2ReduceTask(taskInfo(), true) : - new GridHadoopV1ReduceTask(taskInfo(), true); + return useNewReducer ? new HadoopV2ReduceTask(taskInfo(), true) : + new HadoopV1ReduceTask(taskInfo(), true); case COMBINE: - return useNewCombiner ? new GridHadoopV2ReduceTask(taskInfo(), false) : - new GridHadoopV1ReduceTask(taskInfo(), false); + return useNewCombiner ? new HadoopV2ReduceTask(taskInfo(), false) : + new HadoopV1ReduceTask(taskInfo(), false); case COMMIT: case ABORT: - return useNewReducer ? new GridHadoopV2CleanupTask(taskInfo(), isAbort) : - new GridHadoopV1CleanupTask(taskInfo(), isAbort); + return useNewReducer ? new HadoopV2CleanupTask(taskInfo(), isAbort) : + new HadoopV1CleanupTask(taskInfo(), isAbort); default: return null; @@ -186,7 +187,7 @@ public class GridHadoopV2TaskContext extends GridHadoopTaskContext { } if (cancelled) - throw new GridHadoopTaskCancelledException("Task cancelled."); + throw new HadoopTaskCancelledException("Task cancelled."); try { task.run(this); @@ -206,7 +207,7 @@ public class GridHadoopV2TaskContext extends GridHadoopTaskContext { @Override public void cancel() { cancelled = true; - GridHadoopTask t = task; + HadoopTask t = task; if (t != null) t.cancel(); @@ -234,7 +235,7 @@ public class GridHadoopV2TaskContext extends GridHadoopTaskContext { try { FileSystem fs = FileSystem.get(jobConf()); - GridHadoopFileSystemsUtils.setUser(fs, jobConf().getUser()); + HadoopFileSystemsUtils.setUser(fs, jobConf().getUser()); LocalFileSystem locFs = FileSystem.getLocal(jobConf()); @@ -268,7 +269,7 @@ public class GridHadoopV2TaskContext extends GridHadoopTaskContext { * @param type Task type. * @return Hadoop task type. */ - private TaskType taskType(GridHadoopTaskType type) { + private TaskType taskType(HadoopTaskType type) { switch (type) { case SETUP: return TaskType.JOB_SETUP; @@ -307,14 +308,14 @@ public class GridHadoopV2TaskContext extends GridHadoopTaskContext { } /** {@inheritDoc} */ - @Override public GridHadoopPartitioner partitioner() throws IgniteCheckedException { + @Override public HadoopPartitioner partitioner() throws IgniteCheckedException { Class partClsOld = jobConf().getClass("mapred.partitioner.class", null); if (partClsOld != null) - return new GridHadoopV1Partitioner(jobConf().getPartitionerClass(), jobConf()); + return new HadoopV1Partitioner(jobConf().getPartitionerClass(), jobConf()); try { - return new GridHadoopV2Partitioner(jobCtx.getPartitionerClass(), jobConf()); + return new HadoopV2Partitioner(jobCtx.getPartitionerClass(), jobConf()); } catch (ClassNotFoundException e) { throw new IgniteCheckedException(e); @@ -329,7 +330,7 @@ public class GridHadoopV2TaskContext extends GridHadoopTaskContext { * @return Appropriate serializer. */ @SuppressWarnings("unchecked") - private GridHadoopSerialization getSerialization(Class cls, Configuration jobConf) throws IgniteCheckedException { + private HadoopSerialization getSerialization(Class cls, Configuration jobConf) throws IgniteCheckedException { A.notNull(cls, "cls"); SerializationFactory factory = new SerializationFactory(jobConf); @@ -340,18 +341,18 @@ public class GridHadoopV2TaskContext extends GridHadoopTaskContext { throw new IgniteCheckedException("Failed to find serialization for: " + cls.getName()); if (serialization.getClass() == WritableSerialization.class) - return new GridHadoopWritableSerialization((Class)cls); + return new HadoopWritableSerialization((Class)cls); - return new GridHadoopSerializationWrapper(serialization, cls); + return new HadoopSerializationWrapper(serialization, cls); } /** {@inheritDoc} */ - @Override public GridHadoopSerialization keySerialization() throws IgniteCheckedException { + @Override public HadoopSerialization keySerialization() throws IgniteCheckedException { return getSerialization(jobCtx.getMapOutputKeyClass(), jobConf()); } /** {@inheritDoc} */ - @Override public GridHadoopSerialization valueSerialization() throws IgniteCheckedException { + @Override public HadoopSerialization valueSerialization() throws IgniteCheckedException { return getSerialization(jobCtx.getMapOutputValueClass(), jobConf()); } @@ -392,12 +393,12 @@ public class GridHadoopV2TaskContext extends GridHadoopTaskContext { * @throws IgniteCheckedException if failed. */ @SuppressWarnings("unchecked") - public Object getNativeSplit(GridHadoopInputSplit split) throws IgniteCheckedException { - if (split instanceof GridHadoopExternalSplit) - return readExternalSplit((GridHadoopExternalSplit)split); + public Object getNativeSplit(HadoopInputSplit split) throws IgniteCheckedException { + if (split instanceof HadoopExternalSplit) + return readExternalSplit((HadoopExternalSplit)split); - if (split instanceof GridHadoopSplitWrapper) - return unwrapSplit((GridHadoopSplitWrapper)split); + if (split instanceof HadoopSplitWrapper) + return unwrapSplit((HadoopSplitWrapper)split); throw new IllegalStateException("Unknown split: " + split); } @@ -408,7 +409,7 @@ public class GridHadoopV2TaskContext extends GridHadoopTaskContext { * @throws IgniteCheckedException If failed. */ @SuppressWarnings("unchecked") - private Object readExternalSplit(GridHadoopExternalSplit split) throws IgniteCheckedException { + private Object readExternalSplit(HadoopExternalSplit split) throws IgniteCheckedException { Path jobDir = new Path(jobConf().get(MRJobConfig.MAPREDUCE_JOB_DIR)); try (FileSystem fs = FileSystem.get(jobDir.toUri(), jobConf()); diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopWritableSerialization.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopWritableSerialization.java similarity index 93% rename from modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopWritableSerialization.java rename to modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopWritableSerialization.java index 4361ad4..3920dd5 100644 --- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/GridHadoopWritableSerialization.java +++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/v2/HadoopWritableSerialization.java @@ -28,14 +28,14 @@ import java.io.*; /** * Optimized serialization for Hadoop {@link Writable} types. */ -public class GridHadoopWritableSerialization implements GridHadoopSerialization { +public class HadoopWritableSerialization implements HadoopSerialization { /** */ private final Class cls; /** * @param cls Class. */ - public GridHadoopWritableSerialization(Class cls) { + public HadoopWritableSerialization(Class cls) { assert cls != null; this.cls = cls; diff --git a/modules/hadoop/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider b/modules/hadoop/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider index fe35d5e..8d5957b 100644 --- a/modules/hadoop/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider +++ b/modules/hadoop/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider @@ -1 +1 @@ -org.apache.ignite.client.hadoop.GridHadoopClientProtocolProvider +org.apache.ignite.hadoop.mapreduce.IgniteHadoopClientProtocolProvider diff --git a/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocolEmbeddedSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolEmbeddedSelfTest.java similarity index 76% rename from modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocolEmbeddedSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolEmbeddedSelfTest.java index 780ce67..ffa20d1 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocolEmbeddedSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolEmbeddedSelfTest.java @@ -17,15 +17,15 @@ package org.apache.ignite.client.hadoop; -import org.apache.ignite.internal.processors.hadoop.*; +import org.apache.ignite.configuration.*; /** * Hadoop client protocol tests in embedded process mode. */ -public class GridHadoopClientProtocolEmbeddedSelfTest extends GridHadoopClientProtocolSelfTest { +public class HadoopClientProtocolEmbeddedSelfTest extends HadoopClientProtocolSelfTest { /** {@inheritDoc} */ - @Override public GridHadoopConfiguration hadoopConfiguration(String gridName) { - GridHadoopConfiguration cfg = super.hadoopConfiguration(gridName); + @Override public HadoopConfiguration hadoopConfiguration(String gridName) { + HadoopConfiguration cfg = super.hadoopConfiguration(gridName); cfg.setExternalExecution(false); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocolSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolSelfTest.java similarity index 94% rename from modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocolSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolSelfTest.java index ff8798b..d19a8ea 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/GridHadoopClientProtocolSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/client/hadoop/HadoopClientProtocolSelfTest.java @@ -25,8 +25,10 @@ import org.apache.hadoop.mapreduce.lib.input.*; import org.apache.hadoop.mapreduce.lib.output.*; import org.apache.hadoop.mapreduce.protocol.*; import org.apache.ignite.*; +import org.apache.ignite.hadoop.mapreduce.*; import org.apache.ignite.igfs.*; import org.apache.ignite.internal.processors.hadoop.*; +import org.apache.ignite.internal.processors.hadoop.proto.*; import org.apache.ignite.internal.util.lang.*; import org.apache.ignite.internal.util.typedef.*; import org.apache.ignite.internal.util.typedef.internal.*; @@ -39,7 +41,7 @@ import java.util.*; * Hadoop client protocol tests in external process mode. */ @SuppressWarnings("ResultOfMethodCallIgnored") -public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest { +public class HadoopClientProtocolSelfTest extends HadoopAbstractSelfTest { /** Input path. */ private static final String PATH_INPUT = "/input"; @@ -93,7 +95,7 @@ public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest super.afterTestsStopped(); -// GridHadoopClientProtocolProvider.cliMap.clear(); +// IgniteHadoopClientProtocolProvider.cliMap.clear(); } /** {@inheritDoc} */ @@ -111,7 +113,7 @@ public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { - grid(0).fileSystem(GridHadoopAbstractSelfTest.igfsName).format(); + grid(0).fileSystem(HadoopAbstractSelfTest.igfsName).format(); setupLockFile.delete(); mapLockFile.delete(); @@ -127,9 +129,9 @@ public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest */ @SuppressWarnings("ConstantConditions") private void tstNextJobId() throws Exception { - GridHadoopClientProtocolProvider provider = provider(); + IgniteHadoopClientProtocolProvider provider = provider(); - ClientProtocol proto = provider.create(config(GridHadoopAbstractSelfTest.REST_PORT)); + ClientProtocol proto = provider.create(config(HadoopAbstractSelfTest.REST_PORT)); JobID jobId = proto.getNewJobID(); @@ -150,7 +152,7 @@ public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest * @throws Exception If failed. */ public void testJobCounters() throws Exception { - IgniteFs igfs = grid(0).fileSystem(GridHadoopAbstractSelfTest.igfsName); + IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName); igfs.mkdirs(new IgfsPath(PATH_INPUT)); @@ -170,7 +172,7 @@ public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest ); } - Configuration conf = config(GridHadoopAbstractSelfTest.REST_PORT); + Configuration conf = config(HadoopAbstractSelfTest.REST_PORT); final Job job = Job.getInstance(conf); @@ -219,9 +221,9 @@ public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest * @throws Exception If failed. */ private void tstUnknownJobCounters() throws Exception { - GridHadoopClientProtocolProvider provider = provider(); + IgniteHadoopClientProtocolProvider provider = provider(); - ClientProtocol proto = provider.create(config(GridHadoopAbstractSelfTest.REST_PORT)); + ClientProtocol proto = provider.create(config(HadoopAbstractSelfTest.REST_PORT)); try { proto.getJobCounters(new JobID(UUID.randomUUID().toString(), -1)); @@ -268,7 +270,7 @@ public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest * @throws Exception If failed. */ public void checkJobSubmit(boolean noCombiners, boolean noReducers) throws Exception { - IgniteFs igfs = grid(0).fileSystem(GridHadoopAbstractSelfTest.igfsName); + IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName); igfs.mkdirs(new IgfsPath(PATH_INPUT)); @@ -278,7 +280,7 @@ public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest bw.write("word"); } - Configuration conf = config(GridHadoopAbstractSelfTest.REST_PORT); + Configuration conf = config(HadoopAbstractSelfTest.REST_PORT); final Job job = Job.getInstance(conf); @@ -400,7 +402,7 @@ public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest * @throws Exception If failed. */ @SuppressWarnings("ConstantConditions") - private static void dumpIgfs(IgniteFs igfs, IgfsPath path) throws Exception { + private static void dumpIgfs(IgniteFileSystem igfs, IgfsPath path) throws Exception { IgfsFile file = igfs.info(path); assert file != null; @@ -451,7 +453,7 @@ public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest setupFileSystems(conf); - conf.set(MRConfig.FRAMEWORK_NAME, GridHadoopClientProtocol.FRAMEWORK_NAME); + conf.set(MRConfig.FRAMEWORK_NAME, HadoopClientProtocol.FRAMEWORK_NAME); conf.set(MRConfig.MASTER_ADDRESS, "127.0.0.1:" + port); conf.set("fs.defaultFS", "igfs://:" + getTestGridName(0) + "@/"); @@ -462,8 +464,8 @@ public class GridHadoopClientProtocolSelfTest extends GridHadoopAbstractSelfTest /** * @return Protocol provider. */ - private GridHadoopClientProtocolProvider provider() { - return new GridHadoopClientProtocolProvider(); + private IgniteHadoopClientProtocolProvider provider() { + return new IgniteHadoopClientProtocolProvider(); } /** diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoop20FileSystemAbstractSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemAbstractSelfTest.java similarity index 98% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoop20FileSystemAbstractSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemAbstractSelfTest.java index 9f9a6d8..d907a6c 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoop20FileSystemAbstractSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemAbstractSelfTest.java @@ -24,7 +24,7 @@ import org.apache.hadoop.fs.permission.*; import org.apache.ignite.*; import org.apache.ignite.cache.*; import org.apache.ignite.configuration.*; -import org.apache.ignite.igfs.hadoop.*; +import org.apache.ignite.hadoop.fs.*; import org.apache.ignite.internal.processors.igfs.*; import org.apache.ignite.internal.util.*; import org.apache.ignite.internal.util.typedef.*; @@ -53,7 +53,7 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * Hadoop 2.x compliant file system. */ -public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonAbstractTest { +public abstract class HadoopIgfs20FileSystemAbstractSelfTest extends IgfsCommonAbstractTest { /** Group size. */ public static final int GRP_SIZE = 128; @@ -83,7 +83,7 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA * * @param mode Default IGFS mode. */ - protected IgfsHadoop20FileSystemAbstractSelfTest(IgfsMode mode) { + protected HadoopIgfs20FileSystemAbstractSelfTest(IgfsMode mode) { this.mode = mode; } @@ -143,7 +143,7 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA private void startNodes() throws Exception { if (mode != PRIMARY) { // Start secondary IGFS. - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("partitioned"); igfsCfg.setMetaCacheName("replicated"); @@ -182,7 +182,7 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(metaCacheCfg, cacheCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED); cfg.setLocalHost(U.getLocalHost().getHostAddress()); cfg.setCommunicationSpi(communicationSpi()); @@ -208,7 +208,7 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(cacheConfiguration(gridName)); - cfg.setIgfsConfiguration(igfsConfiguration(gridName)); + cfg.setFileSystemConfiguration(igfsConfiguration(gridName)); cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED); cfg.setLocalHost("127.0.0.1"); cfg.setCommunicationSpi(communicationSpi()); @@ -251,8 +251,8 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA * @param gridName Grid name. * @return IGFS configuration. */ - protected IgfsConfiguration igfsConfiguration(String gridName) throws IgniteCheckedException { - IgfsConfiguration cfg = new IgfsConfiguration(); + protected FileSystemConfiguration igfsConfiguration(String gridName) throws IgniteCheckedException { + FileSystemConfiguration cfg = new FileSystemConfiguration(); cfg.setDataCacheName("partitioned"); cfg.setMetaCacheName("replicated"); @@ -262,7 +262,7 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA cfg.setDefaultMode(mode); if (mode != PRIMARY) - cfg.setSecondaryFileSystem(new IgfsHadoopFileSystemWrapper(secondaryFileSystemUriPath(), + cfg.setSecondaryFileSystem(new IgniteHadoopIgfsSecondaryFileSystem(secondaryFileSystemUriPath(), secondaryFileSystemConfigPath())); cfg.setIpcEndpointConfiguration(primaryIpcEndpointConfiguration(gridName)); @@ -327,7 +327,7 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA long used = 0, max = 0; for (int i = 0; i < 4; i++) { - IgniteFs igfs = grid(i).fileSystem("igfs"); + IgniteFileSystem igfs = grid(i).fileSystem("igfs"); IgfsMetrics metrics = igfs.metrics(); @@ -466,7 +466,7 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); - final int cnt = 5 * IgfsConfiguration.DFLT_BLOCK_SIZE; // Write 5 blocks. + final int cnt = 5 * FileSystemConfiguration.DFLT_BLOCK_SIZE; // Write 5 blocks. for (int i = 0; i < cnt; i++) os.writeInt(i); @@ -1298,7 +1298,7 @@ public abstract class IgfsHadoop20FileSystemAbstractSelfTest extends IgfsCommonA out.write(new byte[1024 * 1024]); } - IgniteFs igfs = grid(0).fileSystem("igfs"); + IgniteFileSystem igfs = grid(0).fileSystem("igfs"); IgfsPath filePath = new IgfsPath("/someFile"); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoop20FileSystemLoopbackPrimarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemLoopbackPrimarySelfTest.java similarity index 92% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoop20FileSystemLoopbackPrimarySelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemLoopbackPrimarySelfTest.java index 47e1c86..2be65fd 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoop20FileSystemLoopbackPrimarySelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemLoopbackPrimarySelfTest.java @@ -25,11 +25,11 @@ import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEnd /** * Tests Hadoop 2.x file system in primary mode. */ -public class IgfsHadoop20FileSystemLoopbackPrimarySelfTest extends IgfsHadoop20FileSystemAbstractSelfTest { +public class HadoopIgfs20FileSystemLoopbackPrimarySelfTest extends HadoopIgfs20FileSystemAbstractSelfTest { /** * Creates test in primary mode. */ - public IgfsHadoop20FileSystemLoopbackPrimarySelfTest() { + public HadoopIgfs20FileSystemLoopbackPrimarySelfTest() { super(PRIMARY); } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoop20FileSystemShmemPrimarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemShmemPrimarySelfTest.java similarity index 93% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoop20FileSystemShmemPrimarySelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemShmemPrimarySelfTest.java index 631f188..93f2d4a 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoop20FileSystemShmemPrimarySelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfs20FileSystemShmemPrimarySelfTest.java @@ -25,11 +25,11 @@ import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEnd /** * Tests Hadoop 2.x file system in primary mode. */ -public class IgfsHadoop20FileSystemShmemPrimarySelfTest extends IgfsHadoop20FileSystemAbstractSelfTest { +public class HadoopIgfs20FileSystemShmemPrimarySelfTest extends HadoopIgfs20FileSystemAbstractSelfTest { /** * Creates test in primary mode. */ - public IgfsHadoop20FileSystemShmemPrimarySelfTest() { + public HadoopIgfs20FileSystemShmemPrimarySelfTest() { super(PRIMARY); } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopDualAbstractSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAbstractSelfTest.java similarity index 93% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopDualAbstractSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAbstractSelfTest.java index a54e264..e89d015 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopDualAbstractSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAbstractSelfTest.java @@ -23,7 +23,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.ignite.*; import org.apache.ignite.cache.*; import org.apache.ignite.configuration.*; -import org.apache.ignite.igfs.hadoop.*; +import org.apache.ignite.hadoop.fs.*; +import org.apache.ignite.igfs.secondary.*; import org.apache.ignite.internal.processors.igfs.*; import org.apache.ignite.internal.util.typedef.*; import org.apache.ignite.internal.util.typedef.internal.*; @@ -40,13 +41,13 @@ import java.util.concurrent.*; import static org.apache.ignite.cache.CacheAtomicityMode.*; import static org.apache.ignite.cache.CacheMode.*; import static org.apache.ignite.igfs.IgfsMode.*; -import static org.apache.ignite.igfs.hadoop.IgfsHadoopParameters.*; +import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.*; import static org.apache.ignite.internal.processors.igfs.IgfsAbstractSelfTest.*; /** * Tests for IGFS working in mode when remote file system exists: DUAL_SYNC, DUAL_ASYNC. */ -public abstract class IgfsHadoopDualAbstractSelfTest extends IgfsCommonAbstractTest { +public abstract class HadoopIgfsDualAbstractSelfTest extends IgfsCommonAbstractTest { /** IGFS block size. */ protected static final int IGFS_BLOCK_SIZE = 512 * 1024; @@ -106,7 +107,7 @@ public abstract class IgfsHadoopDualAbstractSelfTest extends IgfsCommonAbstractT * * @param mode IGFS mode. */ - protected IgfsHadoopDualAbstractSelfTest(IgfsMode mode) { + protected HadoopIgfsDualAbstractSelfTest(IgfsMode mode) { this.mode = mode; assert mode == DUAL_SYNC || mode == DUAL_ASYNC; } @@ -123,8 +124,8 @@ public abstract class IgfsHadoopDualAbstractSelfTest extends IgfsCommonAbstractT * @throws Exception If failed. */ protected Ignite startGridWithIgfs(String gridName, String igfsName, IgfsMode mode, - @Nullable Igfs secondaryFs, @Nullable Map restCfg) throws Exception { - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + @Nullable IgfsSecondaryFileSystem secondaryFs, @Nullable Map restCfg) throws Exception { + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("dataCache"); igfsCfg.setMetaCacheName("metaCache"); @@ -166,7 +167,7 @@ public abstract class IgfsHadoopDualAbstractSelfTest extends IgfsCommonAbstractT cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(dataCacheCfg, metaCacheCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setLocalHost("127.0.0.1"); cfg.setConnectorConfiguration(null); @@ -183,7 +184,7 @@ public abstract class IgfsHadoopDualAbstractSelfTest extends IgfsCommonAbstractT Ignite igniteSecondary = startGridWithIgfs("grid-secondary", "igfs-secondary", PRIMARY, null, SECONDARY_REST_CFG); - Igfs hadoopFs = new IgfsHadoopFileSystemWrapper(SECONDARY_URI, SECONDARY_CFG); + IgfsSecondaryFileSystem hadoopFs = new IgniteHadoopIgfsSecondaryFileSystem(SECONDARY_URI, SECONDARY_CFG); Ignite ignite = startGridWithIgfs("grid", "igfs", mode, hadoopFs, PRIMARY_REST_CFG); @@ -235,7 +236,7 @@ public abstract class IgfsHadoopDualAbstractSelfTest extends IgfsCommonAbstractT out.close(); - awaitFileClose(igfsSecondary, FILE); + awaitFileClose(igfsSecondary.asSecondary(), FILE); // Instantiate file system with overridden "seq reads before prefetch" property. Configuration cfg = new Configuration(); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopDualAsyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAsyncSelfTest.java similarity index 90% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopDualAsyncSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAsyncSelfTest.java index c99b3c8..c518b9e 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopDualAsyncSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualAsyncSelfTest.java @@ -22,11 +22,11 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * Tests for DUAL_ASYNC mode. */ -public class IgfsHadoopDualAsyncSelfTest extends IgfsHadoopDualAbstractSelfTest { +public class HadoopIgfsDualAsyncSelfTest extends HadoopIgfsDualAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopDualAsyncSelfTest() { + public HadoopIgfsDualAsyncSelfTest() { super(DUAL_ASYNC); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopDualSyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualSyncSelfTest.java similarity index 90% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopDualSyncSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualSyncSelfTest.java index ffcd092..6739535 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopDualSyncSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopIgfsDualSyncSelfTest.java @@ -22,11 +22,11 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * Tests for DUAL_SYNC mode. */ -public class IgfsHadoopDualSyncSelfTest extends IgfsHadoopDualAbstractSelfTest { +public class HadoopIgfsDualSyncSelfTest extends HadoopIgfsDualAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopDualSyncSelfTest() { + public HadoopIgfsDualSyncSelfTest() { super(DUAL_SYNC); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java index 4ad74d0..2994107 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/HadoopSecondaryFileSystemConfigurationTest.java @@ -2,14 +2,15 @@ package org.apache.ignite.igfs; import org.apache.hadoop.conf.*; import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.permission.*; import org.apache.ignite.*; import org.apache.ignite.cache.*; import org.apache.ignite.configuration.*; -import org.apache.ignite.igfs.hadoop.*; -import org.apache.ignite.igfs.hadoop.v1.*; -import org.apache.ignite.internal.igfs.hadoop.*; +import org.apache.ignite.hadoop.fs.*; +import org.apache.ignite.hadoop.fs.v1.*; import org.apache.ignite.internal.processors.hadoop.*; +import org.apache.ignite.internal.processors.hadoop.igfs.*; import org.apache.ignite.internal.processors.igfs.*; import org.apache.ignite.internal.util.typedef.*; import org.apache.ignite.internal.util.typedef.internal.*; @@ -221,7 +222,7 @@ public class HadoopSecondaryFileSystemConfigurationTest extends IgfsCommonAbstra * Starts secondary IGFS */ private void startSecondary() { - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("partitioned"); igfsCfg.setMetaCacheName("replicated"); @@ -259,7 +260,7 @@ public class HadoopSecondaryFileSystemConfigurationTest extends IgfsCommonAbstra cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(metaCacheCfg, cacheCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED); cfg.setCommunicationSpi(communicationSpi()); @@ -295,7 +296,7 @@ public class HadoopSecondaryFileSystemConfigurationTest extends IgfsCommonAbstra cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(cacheConfiguration()); - cfg.setIgfsConfiguration(igfsConfiguration(gridName)); + cfg.setFileSystemConfiguration(fsConfiguration(gridName)); cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED); cfg.setCommunicationSpi(communicationSpi()); @@ -336,8 +337,8 @@ public class HadoopSecondaryFileSystemConfigurationTest extends IgfsCommonAbstra * @param gridName Grid name. * @return IGFS configuration. */ - protected IgfsConfiguration igfsConfiguration(String gridName) throws IgniteCheckedException { - IgfsConfiguration cfg = new IgfsConfiguration(); + protected FileSystemConfiguration fsConfiguration(String gridName) throws IgniteCheckedException { + FileSystemConfiguration cfg = new FileSystemConfiguration(); cfg.setDataCacheName("partitioned"); cfg.setMetaCacheName("replicated"); @@ -347,7 +348,7 @@ public class HadoopSecondaryFileSystemConfigurationTest extends IgfsCommonAbstra if (mode != PRIMARY) cfg.setSecondaryFileSystem( - new IgfsHadoopFileSystemWrapper(secondaryFsUriStr, secondaryConfFullPath)); + new IgniteHadoopIgfsSecondaryFileSystem(secondaryFsUriStr, secondaryConfFullPath)); cfg.setIpcEndpointConfiguration(primaryIpcEndpointConfiguration(gridName)); @@ -462,10 +463,10 @@ public class HadoopSecondaryFileSystemConfigurationTest extends IgfsCommonAbstra if (authority != null) { if (skipEmbed) - cfg.setBoolean(String.format(IgfsHadoopUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, authority), true); + cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, authority), true); if (skipLocShmem) - cfg.setBoolean(String.format(IgfsHadoopUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM, authority), true); + cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM, authority), true); } return cfg; @@ -477,10 +478,10 @@ public class HadoopSecondaryFileSystemConfigurationTest extends IgfsCommonAbstra * @param cfg the configuration to set parameters into. */ private static void setImplClasses(Configuration cfg) { - cfg.set("fs.igfs.impl", IgfsHadoopFileSystem.class.getName()); + cfg.set("fs.igfs.impl", IgniteHadoopFileSystem.class.getName()); cfg.set("fs.AbstractFileSystem.igfs.impl", - org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem.class.getName()); + org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.class.getName()); } /** diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java index 29696bf..f6f5bae 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsEventsTestSuite.java @@ -20,7 +20,7 @@ package org.apache.ignite.igfs; import junit.framework.*; import org.apache.ignite.*; import org.apache.ignite.configuration.*; -import org.apache.ignite.igfs.hadoop.*; +import org.apache.ignite.hadoop.fs.*; import org.apache.ignite.internal.processors.hadoop.*; import org.apache.ignite.internal.util.ipc.shmem.*; import org.apache.ignite.internal.util.typedef.*; @@ -40,7 +40,7 @@ public class IgfsEventsTestSuite extends TestSuite { * @throws Exception Thrown in case of the failure. */ public static TestSuite suite() throws Exception { - GridHadoopClassLoader ldr = new GridHadoopClassLoader(null); + HadoopClassLoader ldr = new HadoopClassLoader(null); TestSuite suite = new TestSuite("Ignite FS Events Test Suite"); @@ -60,7 +60,7 @@ public class IgfsEventsTestSuite extends TestSuite { * @throws Exception Thrown in case of the failure. */ public static TestSuite suiteNoarchOnly() throws Exception { - GridHadoopClassLoader ldr = new GridHadoopClassLoader(null); + HadoopClassLoader ldr = new HadoopClassLoader(null); TestSuite suite = new TestSuite("Ignite IGFS Events Test Suite Noarch Only"); @@ -76,8 +76,8 @@ public class IgfsEventsTestSuite extends TestSuite { */ public static class ShmemPrivate extends IgfsEventsAbstractSelfTest { /** {@inheritDoc} */ - @Override protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); + @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException { + FileSystemConfiguration igfsCfg = super.getIgfsConfiguration(); igfsCfg.setIpcEndpointConfiguration(new HashMap() {{ put("type", "shmem"); @@ -93,8 +93,8 @@ public class IgfsEventsTestSuite extends TestSuite { */ public static class LoopbackPrivate extends IgfsEventsAbstractSelfTest { /** {@inheritDoc} */ - @Override protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); + @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException { + FileSystemConfiguration igfsCfg = super.getIgfsConfiguration(); igfsCfg.setIpcEndpointConfiguration(new HashMap() {{ put("type", "tcp"); @@ -110,13 +110,13 @@ public class IgfsEventsTestSuite extends TestSuite { */ public abstract static class PrimarySecondaryTest extends IgfsEventsAbstractSelfTest { /** Secondary file system. */ - private static IgniteFs igfsSec; + private static IgniteFileSystem igfsSec; /** {@inheritDoc} */ - @Override protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); + @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException { + FileSystemConfiguration igfsCfg = super.getIgfsConfiguration(); - igfsCfg.setSecondaryFileSystem(new IgfsHadoopFileSystemWrapper( + igfsCfg.setSecondaryFileSystem(new IgniteHadoopIgfsSecondaryFileSystem( "igfs://igfs-secondary:grid-secondary@127.0.0.1:11500/", "modules/core/src/test/config/hadoop/core-site-secondary.xml")); @@ -126,8 +126,8 @@ public class IgfsEventsTestSuite extends TestSuite { /** * @return IGFS configuration for secondary file system. */ - protected IgfsConfiguration getSecondaryIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); + protected FileSystemConfiguration getSecondaryIgfsConfiguration() throws IgniteCheckedException { + FileSystemConfiguration igfsCfg = super.getIgfsConfiguration(); igfsCfg.setName("igfs-secondary"); igfsCfg.setDefaultMode(PRIMARY); @@ -167,7 +167,7 @@ public class IgfsEventsTestSuite extends TestSuite { * @return Secondary file system handle. * @throws Exception If failed. */ - @Nullable private IgniteFs startSecondary() throws Exception { + @Nullable private IgniteFileSystem startSecondary() throws Exception { IgniteConfiguration cfg = getConfiguration("grid-secondary", getSecondaryIgfsConfiguration()); cfg.setLocalHost("127.0.0.1"); @@ -184,8 +184,8 @@ public class IgfsEventsTestSuite extends TestSuite { */ public static class ShmemDualSync extends PrimarySecondaryTest { /** {@inheritDoc} */ - @Override protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); + @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException { + FileSystemConfiguration igfsCfg = super.getIgfsConfiguration(); igfsCfg.setDefaultMode(DUAL_SYNC); @@ -198,8 +198,8 @@ public class IgfsEventsTestSuite extends TestSuite { */ public static class ShmemDualAsync extends PrimarySecondaryTest { /** {@inheritDoc} */ - @Override protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); + @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException { + FileSystemConfiguration igfsCfg = super.getIgfsConfiguration(); igfsCfg.setDefaultMode(DUAL_ASYNC); @@ -212,10 +212,10 @@ public class IgfsEventsTestSuite extends TestSuite { */ public abstract static class LoopbackPrimarySecondaryTest extends PrimarySecondaryTest { /** {@inheritDoc} */ - @Override protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); + @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException { + FileSystemConfiguration igfsCfg = super.getIgfsConfiguration(); - igfsCfg.setSecondaryFileSystem(new IgfsHadoopFileSystemWrapper( + igfsCfg.setSecondaryFileSystem(new IgniteHadoopIgfsSecondaryFileSystem( "igfs://igfs-secondary:grid-secondary@127.0.0.1:11500/", "modules/core/src/test/config/hadoop/core-site-loopback-secondary.xml")); @@ -223,8 +223,8 @@ public class IgfsEventsTestSuite extends TestSuite { } /** {@inheritDoc} */ - @Override protected IgfsConfiguration getSecondaryIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration igfsCfg = super.getSecondaryIgfsConfiguration(); + @Override protected FileSystemConfiguration getSecondaryIgfsConfiguration() throws IgniteCheckedException { + FileSystemConfiguration igfsCfg = super.getSecondaryIgfsConfiguration(); igfsCfg.setName("igfs-secondary"); igfsCfg.setDefaultMode(PRIMARY); @@ -242,8 +242,8 @@ public class IgfsEventsTestSuite extends TestSuite { */ public static class LoopbackDualSync extends LoopbackPrimarySecondaryTest { /** {@inheritDoc} */ - @Override protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); + @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException { + FileSystemConfiguration igfsCfg = super.getIgfsConfiguration(); igfsCfg.setDefaultMode(DUAL_SYNC); @@ -256,8 +256,8 @@ public class IgfsEventsTestSuite extends TestSuite { */ public static class LoopbackDualAsync extends LoopbackPrimarySecondaryTest { /** {@inheritDoc} */ - @Override protected IgfsConfiguration getIgfsConfiguration() throws IgniteCheckedException { - IgfsConfiguration igfsCfg = super.getIgfsConfiguration(); + @Override protected FileSystemConfiguration getIgfsConfiguration() throws IgniteCheckedException { + FileSystemConfiguration igfsCfg = super.getIgfsConfiguration(); igfsCfg.setDefaultMode(DUAL_ASYNC); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java index d27d93d..d128731 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsNearOnlyMultiNodeSelfTest.java @@ -75,7 +75,7 @@ public class IgfsNearOnlyMultiNodeSelfTest extends GridCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("partitioned"); igfsCfg.setMetaCacheName("partitioned"); @@ -88,7 +88,7 @@ public class IgfsNearOnlyMultiNodeSelfTest extends GridCommonAbstractTest { igfsCfg.setBlockSize(512 * 1024); // Together with group blocks mapper will yield 64M per node groups. - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setCacheConfiguration(cacheConfiguration(gridName)); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemAbstractSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemAbstractSelfTest.java similarity index 97% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemAbstractSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemAbstractSelfTest.java index 7359fdf..be25c61 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemAbstractSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemAbstractSelfTest.java @@ -24,9 +24,9 @@ import org.apache.hadoop.fs.permission.*; import org.apache.ignite.*; import org.apache.ignite.cache.*; import org.apache.ignite.configuration.*; -import org.apache.ignite.igfs.hadoop.*; -import org.apache.ignite.igfs.hadoop.v1.IgfsHadoopFileSystem; -import org.apache.ignite.internal.igfs.hadoop.*; +import org.apache.ignite.hadoop.fs.*; +import org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem; +import org.apache.ignite.internal.processors.hadoop.igfs.*; import org.apache.ignite.internal.processors.igfs.*; import org.apache.ignite.internal.util.*; import org.apache.ignite.internal.util.lang.*; @@ -58,7 +58,7 @@ import static org.apache.ignite.igfs.IgfsMode.*; * Test hadoop file system implementation. */ @SuppressWarnings("all") -public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbstractTest { +public abstract class IgniteHadoopFileSystemAbstractSelfTest extends IgfsCommonAbstractTest { /** Primary file system authority. */ private static final String PRIMARY_AUTHORITY = "igfs:grid0@"; @@ -135,7 +135,7 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs * @param skipLocShmem Whether to skip local shmem mode. * @param skipLocTcp Whether to skip local TCP mode. */ - protected IgfsHadoopFileSystemAbstractSelfTest(IgfsMode mode, boolean skipEmbed, boolean skipLocShmem) { + protected IgniteHadoopFileSystemAbstractSelfTest(IgfsMode mode, boolean skipEmbed, boolean skipLocShmem) { this.mode = mode; this.skipEmbed = skipEmbed; this.skipLocShmem = skipLocShmem; @@ -173,7 +173,7 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs private void startNodes() throws Exception { if (mode != PRIMARY) { // Start secondary IGFS. - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("partitioned"); igfsCfg.setMetaCacheName("replicated"); @@ -211,7 +211,7 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(metaCacheCfg, cacheCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED); cfg.setCommunicationSpi(communicationSpi()); @@ -277,7 +277,7 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(cacheConfiguration(gridName)); - cfg.setIgfsConfiguration(igfsConfiguration(gridName)); + cfg.setFileSystemConfiguration(igfsConfiguration(gridName)); cfg.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED); cfg.setCommunicationSpi(communicationSpi()); @@ -319,8 +319,8 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs * @param gridName Grid name. * @return IGFS configuration. */ - protected IgfsConfiguration igfsConfiguration(String gridName) throws IgniteCheckedException { - IgfsConfiguration cfg = new IgfsConfiguration(); + protected FileSystemConfiguration igfsConfiguration(String gridName) throws IgniteCheckedException { + FileSystemConfiguration cfg = new FileSystemConfiguration(); cfg.setDataCacheName("partitioned"); cfg.setMetaCacheName("replicated"); @@ -329,7 +329,7 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs cfg.setDefaultMode(mode); if (mode != PRIMARY) - cfg.setSecondaryFileSystem(new IgfsHadoopFileSystemWrapper(SECONDARY_URI, SECONDARY_CFG_PATH)); + cfg.setSecondaryFileSystem(new IgniteHadoopIgfsSecondaryFileSystem(SECONDARY_URI, SECONDARY_CFG_PATH)); cfg.setIpcEndpointConfiguration(primaryIpcEndpointConfiguration(gridName)); @@ -352,9 +352,9 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs public void testGetUriIfFSIsNotInitialized() throws Exception { GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { - return new IgfsHadoopFileSystem().getUri(); + return new IgniteHadoopFileSystem().getUri(); } - }, IllegalStateException.class, "URI is null (was IgfsHadoopFileSystem properly initialized?)."); + }, IllegalStateException.class, "URI is null (was IgniteHadoopFileSystem properly initialized?)."); } /** @throws Exception If failed. */ @@ -362,7 +362,7 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs public void testInitializeCheckParametersNameIsNull() throws Exception { GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { - new IgfsHadoopFileSystem().initialize(null, new Configuration()); + new IgniteHadoopFileSystem().initialize(null, new Configuration()); return null; } @@ -374,7 +374,7 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs public void testInitializeCheckParametersCfgIsNull() throws Exception { GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { - new IgfsHadoopFileSystem().initialize(new URI(""), null); + new IgniteHadoopFileSystem().initialize(new URI(""), null); return null; } @@ -383,7 +383,7 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs /** @throws Exception If failed. */ public void testInitialize() throws Exception { - final IgfsHadoopFileSystem fs = new IgfsHadoopFileSystem(); + final IgniteHadoopFileSystem fs = new IgniteHadoopFileSystem(); fs.initialize(primaryFsUri, primaryFsCfg); @@ -410,17 +410,17 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs * @throws Exception If failed. */ public void testIpcCache() throws Exception { - IgfsHadoopEx hadoop = GridTestUtils.getFieldValue(fs, "rmtClient", "delegateRef", "value", "hadoop"); + HadoopIgfsEx hadoop = GridTestUtils.getFieldValue(fs, "rmtClient", "delegateRef", "value", "hadoop"); - if (hadoop instanceof IgfsHadoopOutProc) { + if (hadoop instanceof HadoopIgfsOutProc) { FileSystem fsOther = null; try { - Field field = IgfsHadoopIpcIo.class.getDeclaredField("ipcCache"); + Field field = HadoopIgfsIpcIo.class.getDeclaredField("ipcCache"); field.setAccessible(true); - Map cache = (Map)field.get(null); + Map cache = (Map)field.get(null); Configuration cfg = configuration(PRIMARY_AUTHORITY, skipEmbed, skipLocShmem); @@ -441,13 +441,13 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs assertEquals(initSize, cache.size()); - Field stopField = IgfsHadoopIpcIo.class.getDeclaredField("stopping"); + Field stopField = HadoopIgfsIpcIo.class.getDeclaredField("stopping"); stopField.setAccessible(true); - IgfsHadoopIpcIo io = null; + HadoopIgfsIpcIo io = null; - for (Map.Entry ioEntry : cache.entrySet()) { + for (Map.Entry ioEntry : cache.entrySet()) { if (endpoint.contains(ioEntry.getKey())) { io = ioEntry.getValue(); @@ -474,7 +474,7 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs /** @throws Exception If failed. */ public void testCloseIfNotInitialized() throws Exception { - final FileSystem fs = new IgfsHadoopFileSystem(); + final FileSystem fs = new IgniteHadoopFileSystem(); // Check close makes nothing harmful. fs.close(); @@ -649,7 +649,7 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs FSDataOutputStream os = fs.create(file, false, 128); - final int cnt = 5 * IgfsConfiguration.DFLT_BLOCK_SIZE; // Write 5 blocks. + final int cnt = 5 * FileSystemConfiguration.DFLT_BLOCK_SIZE; // Write 5 blocks. for (int i = 0; i < cnt; i++) os.writeInt(i); @@ -1537,7 +1537,7 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs out.write(new byte[1024 * 1024]); } - IgniteFs igfs = grid(0).fileSystem("igfs"); + IgniteFileSystem igfs = grid(0).fileSystem("igfs"); IgfsPath filePath = new IgfsPath("/someFile"); @@ -2349,17 +2349,17 @@ public abstract class IgfsHadoopFileSystemAbstractSelfTest extends IgfsCommonAbs Configuration cfg = new Configuration(); cfg.set("fs.defaultFS", "igfs://" + authority + "/"); - cfg.set("fs.igfs.impl", IgfsHadoopFileSystem.class.getName()); + cfg.set("fs.igfs.impl", IgniteHadoopFileSystem.class.getName()); cfg.set("fs.AbstractFileSystem.igfs.impl", - org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem.class.getName()); + org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.class.getName()); cfg.setBoolean("fs.igfs.impl.disable.cache", true); if (skipEmbed) - cfg.setBoolean(String.format(IgfsHadoopUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, authority), true); + cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, authority), true); if (skipLocShmem) - cfg.setBoolean(String.format(IgfsHadoopUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM, authority), true); + cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM, authority), true); return cfg; } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemClientSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemClientSelfTest.java similarity index 91% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemClientSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemClientSelfTest.java index 599fd1d..29dd996 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemClientSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemClientSelfTest.java @@ -21,7 +21,7 @@ import org.apache.commons.logging.*; import org.apache.ignite.configuration.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.igfs.common.*; -import org.apache.ignite.internal.igfs.hadoop.*; +import org.apache.ignite.internal.processors.hadoop.igfs.*; import org.apache.ignite.internal.processors.igfs.*; import org.apache.ignite.internal.util.typedef.*; import org.apache.ignite.internal.util.typedef.internal.*; @@ -43,9 +43,9 @@ import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEnd /** * Test interaction between a IGFS client and a IGFS server. */ -public class IgfsHadoopFileSystemClientSelfTest extends IgfsCommonAbstractTest { +public class IgniteHadoopFileSystemClientSelfTest extends IgfsCommonAbstractTest { /** Logger. */ - private static final Log LOG = LogFactory.getLog(IgfsHadoopFileSystemClientSelfTest.class); + private static final Log LOG = LogFactory.getLog(IgniteHadoopFileSystemClientSelfTest.class); /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { @@ -66,7 +66,7 @@ public class IgfsHadoopFileSystemClientSelfTest extends IgfsCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("partitioned"); igfsCfg.setMetaCacheName("replicated"); @@ -78,7 +78,7 @@ public class IgfsHadoopFileSystemClientSelfTest extends IgfsCommonAbstractTest { }}); cfg.setCacheConfiguration(cacheConfiguration()); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); return cfg; } @@ -125,15 +125,15 @@ public class IgfsHadoopFileSystemClientSelfTest extends IgfsCommonAbstractTest { try { switchHandlerErrorFlag(true); - IgfsHadoop client = new IgfsHadoopOutProc("127.0.0.1", 10500, getTestGridName(0), "igfs", LOG); + HadoopIgfs client = new HadoopIgfsOutProc("127.0.0.1", 10500, getTestGridName(0), "igfs", LOG); client.handshake(null); IgfsPath path = new IgfsPath("/test1.file"); - IgfsHadoopStreamDelegate delegate = client.create(path, true, false, 1, 1024, null); + HadoopIgfsStreamDelegate delegate = client.create(path, true, false, 1, 1024, null); - final IgfsHadoopOutputStream igfsOut = new IgfsHadoopOutputStream(delegate, LOG, + final HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(delegate, LOG, IgfsLogger.disabledLogger(), 0); // This call should return fine as exception is thrown for the first time. diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemHandshakeSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemHandshakeSelfTest.java similarity index 94% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemHandshakeSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemHandshakeSelfTest.java index 137db6d..3b4c5c2 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemHandshakeSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemHandshakeSelfTest.java @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.*; import org.apache.ignite.*; import org.apache.ignite.configuration.*; -import org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem; +import org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem; import org.apache.ignite.internal.processors.igfs.*; import org.apache.ignite.internal.util.typedef.*; import org.apache.ignite.spi.communication.tcp.*; @@ -41,13 +41,13 @@ import static org.apache.ignite.cache.CacheDistributionMode.*; import static org.apache.ignite.cache.CacheMode.*; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.*; import static org.apache.ignite.igfs.IgfsMode.*; -import static org.apache.ignite.internal.igfs.hadoop.IgfsHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsUtils.*; import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint.*; /** * Tests for IGFS file system handshake. */ -public class IgfsHadoopFileSystemHandshakeSelfTest extends IgfsCommonAbstractTest { +public class IgniteHadoopFileSystemHandshakeSelfTest extends IgfsCommonAbstractTest { /** IP finder. */ private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); @@ -175,7 +175,7 @@ public class IgfsHadoopFileSystemHandshakeSelfTest extends IgfsCommonAbstractTes private void startUp(boolean dfltGridName, boolean dfltIgfsName) throws Exception { Ignite ignite = G.start(gridConfiguration(dfltGridName, dfltIgfsName)); - IgniteFs igfs = ignite.fileSystem(dfltIgfsName ? null : IGFS_NAME); + IgniteFileSystem igfs = ignite.fileSystem(dfltIgfsName ? null : IGFS_NAME); igfs.mkdirs(PATH); } @@ -227,7 +227,7 @@ public class IgfsHadoopFileSystemHandshakeSelfTest extends IgfsCommonAbstractTes cfg.setCacheConfiguration(metaCacheCfg, dataCacheCfg); - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("partitioned"); igfsCfg.setMetaCacheName("replicated"); @@ -242,7 +242,7 @@ public class IgfsHadoopFileSystemHandshakeSelfTest extends IgfsCommonAbstractTes igfsCfg.setManagementPort(-1); igfsCfg.setBlockSize(512 * 1024); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); return cfg; } @@ -297,9 +297,9 @@ public class IgfsHadoopFileSystemHandshakeSelfTest extends IgfsCommonAbstractTes Configuration cfg = new Configuration(); cfg.set("fs.defaultFS", "igfs://" + authority + "/"); - cfg.set("fs.igfs.impl", org.apache.ignite.igfs.hadoop.v1.IgfsHadoopFileSystem.class.getName()); + cfg.set("fs.igfs.impl", org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.class.getName()); cfg.set("fs.AbstractFileSystem.igfs.impl", - IgfsHadoopFileSystem.class.getName()); + IgniteHadoopFileSystem.class.getName()); cfg.setBoolean("fs.igfs.impl.disable.cache", true); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemIpcCacheSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemIpcCacheSelfTest.java similarity index 89% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemIpcCacheSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemIpcCacheSelfTest.java index a6357f8..135a488 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemIpcCacheSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemIpcCacheSelfTest.java @@ -21,7 +21,7 @@ import org.apache.hadoop.conf.*; import org.apache.hadoop.fs.*; import org.apache.ignite.cache.*; import org.apache.ignite.configuration.*; -import org.apache.ignite.internal.igfs.hadoop.*; +import org.apache.ignite.internal.processors.hadoop.igfs.*; import org.apache.ignite.internal.processors.igfs.*; import org.apache.ignite.internal.util.ipc.shmem.*; import org.apache.ignite.internal.util.typedef.*; @@ -43,7 +43,7 @@ import static org.apache.ignite.events.EventType.*; /** * IPC cache test. */ -public class IgfsHadoopFileSystemIpcCacheSelfTest extends IgfsCommonAbstractTest { +public class IgniteHadoopFileSystemIpcCacheSelfTest extends IgfsCommonAbstractTest { /** IP finder. */ private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); @@ -65,12 +65,12 @@ public class IgfsHadoopFileSystemIpcCacheSelfTest extends IgfsCommonAbstractTest cfg.setDiscoverySpi(discoSpi); - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("partitioned"); igfsCfg.setMetaCacheName("replicated"); igfsCfg.setName("igfs"); - igfsCfg.setManagementPort(IgfsConfiguration.DFLT_MGMT_PORT + cnt); + igfsCfg.setManagementPort(FileSystemConfiguration.DFLT_MGMT_PORT + cnt); igfsCfg.setIpcEndpointConfiguration(new HashMap() {{ put("type", "shmem"); @@ -79,7 +79,7 @@ public class IgfsHadoopFileSystemIpcCacheSelfTest extends IgfsCommonAbstractTest igfsCfg.setBlockSize(512 * 1024); // Together with group blocks mapper will yield 64M per node groups. - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setCacheConfiguration(cacheConfiguration()); @@ -141,15 +141,15 @@ public class IgfsHadoopFileSystemIpcCacheSelfTest extends IgfsCommonAbstractTest */ @SuppressWarnings("unchecked") public void testIpcCache() throws Exception { - Field cacheField = IgfsHadoopIpcIo.class.getDeclaredField("ipcCache"); + Field cacheField = HadoopIgfsIpcIo.class.getDeclaredField("ipcCache"); cacheField.setAccessible(true); - Field activeCntField = IgfsHadoopIpcIo.class.getDeclaredField("activeCnt"); + Field activeCntField = HadoopIgfsIpcIo.class.getDeclaredField("activeCnt"); activeCntField.setAccessible(true); - Map cache = (Map)cacheField.get(null); + Map cache = (Map)cacheField.get(null); String name = "igfs:" + getTestGridName(0) + "@"; @@ -157,14 +157,14 @@ public class IgfsHadoopFileSystemIpcCacheSelfTest extends IgfsCommonAbstractTest cfg.addResource(U.resolveIgniteUrl(HADOOP_FS_CFG)); cfg.setBoolean("fs.igfs.impl.disable.cache", true); - cfg.setBoolean(String.format(IgfsHadoopUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, name), true); + cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, name), true); // Ensure that existing IO is reused. FileSystem fs1 = FileSystem.get(new URI("igfs://" + name + "/"), cfg); assertEquals(1, cache.size()); - IgfsHadoopIpcIo io = null; + HadoopIgfsIpcIo io = null; System.out.println("CACHE: " + cache); @@ -191,7 +191,7 @@ public class IgfsHadoopFileSystemIpcCacheSelfTest extends IgfsCommonAbstractTest assertEquals(1, cache.size()); assertEquals(1, ((AtomicInteger)activeCntField.get(io)).get()); - Field stopField = IgfsHadoopIpcIo.class.getDeclaredField("stopping"); + Field stopField = HadoopIgfsIpcIo.class.getDeclaredField("stopping"); stopField.setAccessible(true); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoggerSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerSelfTest.java similarity index 99% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoggerSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerSelfTest.java index 2e3a3ec..8d43e08 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoggerSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerSelfTest.java @@ -30,7 +30,7 @@ import static org.apache.ignite.internal.igfs.common.IgfsLogger.*; /** * Grid IGFS client logger test. */ -public class IgfsHadoopFileSystemLoggerSelfTest extends IgfsCommonAbstractTest { +public class IgniteHadoopFileSystemLoggerSelfTest extends IgfsCommonAbstractTest { /** Path string. */ private static final String PATH_STR = "/dir1/dir2/file;test"; diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoggerStateSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerStateSelfTest.java similarity index 94% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoggerStateSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerStateSelfTest.java index cbe83f0..1f6a204 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoggerStateSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoggerStateSelfTest.java @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.ignite.*; import org.apache.ignite.cache.*; import org.apache.ignite.configuration.*; -import org.apache.ignite.igfs.hadoop.v1.*; +import org.apache.ignite.hadoop.fs.v1.*; import org.apache.ignite.internal.igfs.common.*; import org.apache.ignite.internal.processors.igfs.*; import org.apache.ignite.internal.util.typedef.*; @@ -38,12 +38,12 @@ import java.util.*; import static org.apache.ignite.cache.CacheAtomicityMode.*; import static org.apache.ignite.cache.CacheMode.*; import static org.apache.ignite.igfs.IgfsMode.*; -import static org.apache.ignite.igfs.hadoop.IgfsHadoopParameters.*; +import static org.apache.ignite.internal.processors.hadoop.fs.HadoopParameters.*; /** * Ensures that sampling is really turned on/off. */ -public class IgfsHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractTest { +public class IgniteHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractTest { /** IGFS. */ private IgfsEx igfs; @@ -75,7 +75,7 @@ public class IgfsHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractT * @throws Exception If failed. */ private void startUp() throws Exception { - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("partitioned"); igfsCfg.setMetaCacheName("replicated"); @@ -116,7 +116,7 @@ public class IgfsHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractT cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(metaCacheCfg, cacheCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setLocalHost("127.0.0.1"); cfg.setConnectorConfiguration(null); @@ -292,7 +292,7 @@ public class IgfsHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractT * @return New file system. * @throws Exception If failed. */ - private IgfsHadoopFileSystem fileSystem() throws Exception { + private IgniteHadoopFileSystem fileSystem() throws Exception { Configuration fsCfg = new Configuration(); fsCfg.addResource(U.resolveIgniteUrl("modules/core/src/test/config/hadoop/core-site-loopback.xml")); @@ -304,7 +304,7 @@ public class IgfsHadoopFileSystemLoggerStateSelfTest extends IgfsCommonAbstractT fsCfg.setStrings(String.format(PARAM_IGFS_LOG_DIR, "igfs:igfs-grid@"), U.getIgniteHome()); - return (IgfsHadoopFileSystem)FileSystem.get(new URI("igfs://igfs:igfs-grid@/"), fsCfg); + return (IgniteHadoopFileSystem)FileSystem.get(new URI("igfs://igfs:igfs-grid@/"), fsCfg); } /** diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackAbstractSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackAbstractSelfTest.java similarity index 87% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackAbstractSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackAbstractSelfTest.java index bd9b031..e33cde7 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackAbstractSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackAbstractSelfTest.java @@ -24,15 +24,15 @@ import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEnd /** * IGFS Hadoop file system IPC loopback self test. */ -public abstract class IgfsHadoopFileSystemLoopbackAbstractSelfTest extends - IgfsHadoopFileSystemAbstractSelfTest { +public abstract class IgniteHadoopFileSystemLoopbackAbstractSelfTest extends + IgniteHadoopFileSystemAbstractSelfTest { /** * Constructor. * * @param mode IGFS mode. * @param skipEmbed Skip embedded mode flag. */ - protected IgfsHadoopFileSystemLoopbackAbstractSelfTest(IgfsMode mode, boolean skipEmbed) { + protected IgniteHadoopFileSystemLoopbackAbstractSelfTest(IgfsMode mode, boolean skipEmbed) { super(mode, skipEmbed, true); } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.java similarity index 83% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.java index 6035c7a..d46001d 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.java @@ -22,12 +22,12 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * IGFS Hadoop file system IPC loopback self test in DUAL_ASYNC mode. */ -public class IgfsHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest extends - IgfsHadoopFileSystemLoopbackAbstractSelfTest { +public class IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest extends + IgniteHadoopFileSystemLoopbackAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest() { + public IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest() { super(DUAL_ASYNC, false); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.java similarity index 83% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.java index 3900b0b..fa64734 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.java @@ -22,12 +22,12 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * IGFS Hadoop file system IPC loopback self test in DUAL_SYNC mode. */ -public class IgfsHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest - extends IgfsHadoopFileSystemLoopbackAbstractSelfTest { +public class IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest + extends IgniteHadoopFileSystemLoopbackAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest() { + public IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest() { super(DUAL_SYNC, false); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.java similarity index 84% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.java index dee717e..9035acd 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.java @@ -22,12 +22,12 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * IGFS Hadoop file system IPC loopback self test in PRIMARY mode. */ -public class IgfsHadoopFileSystemLoopbackEmbeddedPrimarySelfTest - extends IgfsHadoopFileSystemLoopbackAbstractSelfTest { +public class IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest + extends IgniteHadoopFileSystemLoopbackAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopFileSystemLoopbackEmbeddedPrimarySelfTest() { + public IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest() { super(PRIMARY, false); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.java similarity index 83% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.java index 1f13019..8198c52 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.java @@ -22,13 +22,13 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * IGFS Hadoop file system IPC loopback self test in SECONDARY mode. */ -public class IgfsHadoopFileSystemLoopbackEmbeddedSecondarySelfTest extends - IgfsHadoopFileSystemLoopbackAbstractSelfTest { +public class IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest extends + IgniteHadoopFileSystemLoopbackAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopFileSystemLoopbackEmbeddedSecondarySelfTest() { + public IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest() { super(PROXY, false); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackExternalDualAsyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest.java similarity index 83% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackExternalDualAsyncSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest.java index 4a66da6..246c516 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackExternalDualAsyncSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest.java @@ -22,12 +22,12 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * IGFS Hadoop file system IPC loopback self test in DUAL_ASYNC mode. */ -public class IgfsHadoopFileSystemLoopbackExternalDualAsyncSelfTest extends - IgfsHadoopFileSystemLoopbackAbstractSelfTest { +public class IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest extends + IgniteHadoopFileSystemLoopbackAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopFileSystemLoopbackExternalDualAsyncSelfTest() { + public IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest() { super(DUAL_ASYNC, true); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackExternalDualSyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest.java similarity index 83% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackExternalDualSyncSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest.java index f5f49ed..b5140af 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackExternalDualSyncSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest.java @@ -22,12 +22,12 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * IGFS Hadoop file system IPC loopback self test in DUAL_SYNC mode. */ -public class IgfsHadoopFileSystemLoopbackExternalDualSyncSelfTest - extends IgfsHadoopFileSystemLoopbackAbstractSelfTest { +public class IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest + extends IgniteHadoopFileSystemLoopbackAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopFileSystemLoopbackExternalDualSyncSelfTest() { + public IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest() { super(DUAL_SYNC, true); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackExternalPrimarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest.java similarity index 84% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackExternalPrimarySelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest.java index 5903704..572ac4b 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackExternalPrimarySelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest.java @@ -22,12 +22,12 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * IGFS Hadoop file system IPC loopback self test in PRIMARY mode. */ -public class IgfsHadoopFileSystemLoopbackExternalPrimarySelfTest - extends IgfsHadoopFileSystemLoopbackAbstractSelfTest { +public class IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest + extends IgniteHadoopFileSystemLoopbackAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopFileSystemLoopbackExternalPrimarySelfTest() { + public IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest() { super(PRIMARY, true); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackExternalSecondarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest.java similarity index 83% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackExternalSecondarySelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest.java index 0e95f83..006c271 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemLoopbackExternalSecondarySelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest.java @@ -22,13 +22,13 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * IGFS Hadoop file system IPC loopback self test in SECONDARY mode. */ -public class IgfsHadoopFileSystemLoopbackExternalSecondarySelfTest extends - IgfsHadoopFileSystemLoopbackAbstractSelfTest { +public class IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest extends + IgniteHadoopFileSystemLoopbackAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopFileSystemLoopbackExternalSecondarySelfTest() { + public IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest() { super(PROXY, true); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemSecondaryModeSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemSecondaryModeSelfTest.java similarity index 93% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemSecondaryModeSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemSecondaryModeSelfTest.java index b88816a..3f20070 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemSecondaryModeSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemSecondaryModeSelfTest.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.*; import org.apache.hadoop.fs.*; import org.apache.ignite.cache.*; import org.apache.ignite.configuration.*; -import org.apache.ignite.igfs.hadoop.*; -import org.apache.ignite.igfs.hadoop.v1.*; +import org.apache.ignite.hadoop.fs.*; +import org.apache.ignite.hadoop.fs.v1.*; import org.apache.ignite.internal.processors.igfs.*; import org.apache.ignite.internal.util.typedef.*; import org.apache.ignite.internal.util.typedef.internal.*; @@ -40,7 +40,7 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * Ensures correct modes resolution for SECONDARY paths. */ -public class IgfsHadoopFileSystemSecondaryModeSelfTest extends IgfsCommonAbstractTest { +public class IgniteHadoopFileSystemSecondaryModeSelfTest extends IgfsCommonAbstractTest { /** Path to check. */ private static final Path PATH = new Path("/dir"); @@ -57,7 +57,7 @@ public class IgfsHadoopFileSystemSecondaryModeSelfTest extends IgfsCommonAbstrac private Map pathModes; /** File system. */ - private IgfsHadoopFileSystem fs; + private IgniteHadoopFileSystem fs; /** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { @@ -83,7 +83,7 @@ public class IgfsHadoopFileSystemSecondaryModeSelfTest extends IgfsCommonAbstrac private void startUp() throws Exception { startUpSecondary(); - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("partitioned"); igfsCfg.setMetaCacheName("replicated"); @@ -97,7 +97,7 @@ public class IgfsHadoopFileSystemSecondaryModeSelfTest extends IgfsCommonAbstrac }}); igfsCfg.setManagementPort(-1); - igfsCfg.setSecondaryFileSystem(new IgfsHadoopFileSystemWrapper( + igfsCfg.setSecondaryFileSystem(new IgniteHadoopIgfsSecondaryFileSystem( "igfs://igfs-secondary:igfs-grid-secondary@127.0.0.1:11500/", "modules/core/src/test/config/hadoop/core-site-loopback-secondary.xml")); @@ -130,7 +130,7 @@ public class IgfsHadoopFileSystemSecondaryModeSelfTest extends IgfsCommonAbstrac cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(metaCacheCfg, cacheCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setLocalHost("127.0.0.1"); @@ -142,7 +142,7 @@ public class IgfsHadoopFileSystemSecondaryModeSelfTest extends IgfsCommonAbstrac fsCfg.setBoolean("fs.igfs.impl.disable.cache", true); - fs = (IgfsHadoopFileSystem)FileSystem.get(new URI("igfs://igfs:igfs-grid@/"), fsCfg); + fs = (IgniteHadoopFileSystem)FileSystem.get(new URI("igfs://igfs:igfs-grid@/"), fsCfg); } /** @@ -151,7 +151,7 @@ public class IgfsHadoopFileSystemSecondaryModeSelfTest extends IgfsCommonAbstrac * @throws Exception If failed. */ private void startUpSecondary() throws Exception { - IgfsConfiguration igfsCfg = new IgfsConfiguration(); + FileSystemConfiguration igfsCfg = new FileSystemConfiguration(); igfsCfg.setDataCacheName("partitioned"); igfsCfg.setMetaCacheName("replicated"); @@ -192,7 +192,7 @@ public class IgfsHadoopFileSystemSecondaryModeSelfTest extends IgfsCommonAbstrac cfg.setDiscoverySpi(discoSpi); cfg.setCacheConfiguration(metaCacheCfg, cacheCfg); - cfg.setIgfsConfiguration(igfsCfg); + cfg.setFileSystemConfiguration(igfsCfg); cfg.setLocalHost("127.0.0.1"); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemAbstractSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemAbstractSelfTest.java similarity index 93% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemAbstractSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemAbstractSelfTest.java index ed7b7cc..991045b 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemAbstractSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemAbstractSelfTest.java @@ -30,14 +30,14 @@ import static org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEnd /** * IGFS Hadoop file system IPC self test. */ -public abstract class IgfsHadoopFileSystemShmemAbstractSelfTest extends IgfsHadoopFileSystemAbstractSelfTest { +public abstract class IgniteHadoopFileSystemShmemAbstractSelfTest extends IgniteHadoopFileSystemAbstractSelfTest { /** * Constructor. * * @param mode IGFS mode. * @param skipEmbed Skip embedded mode flag. */ - protected IgfsHadoopFileSystemShmemAbstractSelfTest(IgfsMode mode, boolean skipEmbed) { + protected IgniteHadoopFileSystemShmemAbstractSelfTest(IgfsMode mode, boolean skipEmbed) { super(mode, skipEmbed, false); } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.java similarity index 84% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.java index d11e8d1..ed34398 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.java @@ -22,12 +22,12 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * IGFS Hadoop file system IPC shmem self test in DUAL_ASYNC mode. */ -public class IgfsHadoopFileSystemShmemEmbeddedDualAsyncSelfTest - extends IgfsHadoopFileSystemShmemAbstractSelfTest { +public class IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest + extends IgniteHadoopFileSystemShmemAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopFileSystemShmemEmbeddedDualAsyncSelfTest() { + public IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest() { super(DUAL_ASYNC, false); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemEmbeddedDualSyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest.java similarity index 84% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemEmbeddedDualSyncSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest.java index 6138215..4b7bfb1 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemEmbeddedDualSyncSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest.java @@ -22,12 +22,12 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * IGFS Hadoop file system IPC shmem self test in DUAL_SYNC mode. */ -public class IgfsHadoopFileSystemShmemEmbeddedDualSyncSelfTest - extends IgfsHadoopFileSystemShmemAbstractSelfTest { +public class IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest + extends IgniteHadoopFileSystemShmemAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopFileSystemShmemEmbeddedDualSyncSelfTest() { + public IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest() { super(DUAL_SYNC, false); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemEmbeddedPrimarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest.java similarity index 84% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemEmbeddedPrimarySelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest.java index 9fc2d33..c1393d3 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemEmbeddedPrimarySelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest.java @@ -22,12 +22,12 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * IGFS Hadoop file system IPC shmem self test in PRIMARY mode. */ -public class IgfsHadoopFileSystemShmemEmbeddedPrimarySelfTest - extends IgfsHadoopFileSystemShmemAbstractSelfTest { +public class IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest + extends IgniteHadoopFileSystemShmemAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopFileSystemShmemEmbeddedPrimarySelfTest() { + public IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest() { super(PRIMARY, false); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemEmbeddedSecondarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest.java similarity index 84% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemEmbeddedSecondarySelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest.java index a05158c..2c97e7b 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemEmbeddedSecondarySelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest.java @@ -22,12 +22,12 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * IGFS Hadoop file system IPC shmem self test in SECONDARY mode. */ -public class IgfsHadoopFileSystemShmemEmbeddedSecondarySelfTest - extends IgfsHadoopFileSystemShmemAbstractSelfTest { +public class IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest + extends IgniteHadoopFileSystemShmemAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopFileSystemShmemEmbeddedSecondarySelfTest() { + public IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest() { super(PROXY, false); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemExternalDualAsyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest.java similarity index 84% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemExternalDualAsyncSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest.java index c132e5b..6d9973f 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemExternalDualAsyncSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest.java @@ -22,12 +22,12 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * IGFS Hadoop file system IPC shmem self test in DUAL_ASYNC mode. */ -public class IgfsHadoopFileSystemShmemExternalDualAsyncSelfTest - extends IgfsHadoopFileSystemShmemAbstractSelfTest { +public class IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest + extends IgniteHadoopFileSystemShmemAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopFileSystemShmemExternalDualAsyncSelfTest() { + public IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest() { super(DUAL_ASYNC, true); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemExternalDualSyncSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualSyncSelfTest.java similarity index 84% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemExternalDualSyncSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualSyncSelfTest.java index 21b393d..6ec084d 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemExternalDualSyncSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalDualSyncSelfTest.java @@ -22,12 +22,12 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * IGFS Hadoop file system IPC shmem self test in DUAL_SYNC mode. */ -public class IgfsHadoopFileSystemShmemExternalDualSyncSelfTest - extends IgfsHadoopFileSystemShmemAbstractSelfTest { +public class IgniteHadoopFileSystemShmemExternalDualSyncSelfTest + extends IgniteHadoopFileSystemShmemAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopFileSystemShmemExternalDualSyncSelfTest() { + public IgniteHadoopFileSystemShmemExternalDualSyncSelfTest() { super(DUAL_SYNC, true); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemExternalPrimarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalPrimarySelfTest.java similarity index 84% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemExternalPrimarySelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalPrimarySelfTest.java index f147929..03f3fee 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemExternalPrimarySelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalPrimarySelfTest.java @@ -22,12 +22,12 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * IGFS Hadoop file system IPC shmem self test in PRIMARY mode. */ -public class IgfsHadoopFileSystemShmemExternalPrimarySelfTest - extends IgfsHadoopFileSystemShmemAbstractSelfTest { +public class IgniteHadoopFileSystemShmemExternalPrimarySelfTest + extends IgniteHadoopFileSystemShmemAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopFileSystemShmemExternalPrimarySelfTest() { + public IgniteHadoopFileSystemShmemExternalPrimarySelfTest() { super(PRIMARY, true); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemExternalSecondarySelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalSecondarySelfTest.java similarity index 84% rename from modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemExternalSecondarySelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalSecondarySelfTest.java index 7c77740..bc27674 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgfsHadoopFileSystemShmemExternalSecondarySelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/igfs/IgniteHadoopFileSystemShmemExternalSecondarySelfTest.java @@ -22,12 +22,12 @@ import static org.apache.ignite.igfs.IgfsMode.*; /** * IGFS Hadoop file system IPC shmem self test in SECONDARY mode. */ -public class IgfsHadoopFileSystemShmemExternalSecondarySelfTest - extends IgfsHadoopFileSystemShmemAbstractSelfTest { +public class IgniteHadoopFileSystemShmemExternalSecondarySelfTest + extends IgniteHadoopFileSystemShmemAbstractSelfTest { /** * Constructor. */ - public IgfsHadoopFileSystemShmemExternalSecondarySelfTest() { + public IgniteHadoopFileSystemShmemExternalSecondarySelfTest() { super(PROXY, true); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopAbstractSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractSelfTest.java similarity index 89% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopAbstractSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractSelfTest.java index 8319255..7fda532 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopAbstractSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractSelfTest.java @@ -20,7 +20,7 @@ package org.apache.ignite.internal.processors.hadoop; import org.apache.hadoop.conf.*; import org.apache.ignite.configuration.*; import org.apache.ignite.igfs.*; -import org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem; +import org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem; import org.apache.ignite.internal.processors.hadoop.fs.*; import org.apache.ignite.spi.communication.tcp.*; import org.apache.ignite.spi.discovery.tcp.*; @@ -37,7 +37,7 @@ import static org.apache.ignite.cache.CacheWriteSynchronizationMode.*; /** * Abstract class for Hadoop tests. */ -public abstract class GridHadoopAbstractSelfTest extends GridCommonAbstractTest { +public abstract class HadoopAbstractSelfTest extends GridCommonAbstractTest { /** */ private static TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); @@ -107,7 +107,7 @@ public abstract class GridHadoopAbstractSelfTest extends GridCommonAbstractTest if (igfsEnabled()) { cfg.setCacheConfiguration(metaCacheConfiguration(), dataCacheConfiguration()); - cfg.setIgfsConfiguration(igfsConfiguration()); + cfg.setFileSystemConfiguration(igfsConfiguration()); } if (restEnabled()) { @@ -128,8 +128,8 @@ public abstract class GridHadoopAbstractSelfTest extends GridCommonAbstractTest * @param gridName Grid name. * @return Hadoop configuration. */ - public GridHadoopConfiguration hadoopConfiguration(String gridName) { - GridHadoopConfiguration cfg = new GridHadoopConfiguration(); + public HadoopConfiguration hadoopConfiguration(String gridName) { + HadoopConfiguration cfg = new HadoopConfiguration(); cfg.setMaxParallelTasks(3); @@ -139,8 +139,8 @@ public abstract class GridHadoopAbstractSelfTest extends GridCommonAbstractTest /** * @return IGFS configuration. */ - public IgfsConfiguration igfsConfiguration() { - IgfsConfiguration cfg = new IgfsConfiguration(); + public FileSystemConfiguration igfsConfiguration() { + FileSystemConfiguration cfg = new FileSystemConfiguration(); cfg.setName(igfsName); cfg.setBlockSize(igfsBlockSize); @@ -206,11 +206,11 @@ public abstract class GridHadoopAbstractSelfTest extends GridCommonAbstractTest */ protected void setupFileSystems(Configuration cfg) { cfg.set("fs.defaultFS", igfsScheme()); - cfg.set("fs.igfs.impl", org.apache.ignite.igfs.hadoop.v1.IgfsHadoopFileSystem.class.getName()); - cfg.set("fs.AbstractFileSystem.igfs.impl", IgfsHadoopFileSystem. + cfg.set("fs.igfs.impl", org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.class.getName()); + cfg.set("fs.AbstractFileSystem.igfs.impl", IgniteHadoopFileSystem. class.getName()); - GridHadoopFileSystemsUtils.setupFileSystems(cfg); + HadoopFileSystemsUtils.setupFileSystems(cfg); } /** diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopAbstractWordCountTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractWordCountTest.java similarity index 97% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopAbstractWordCountTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractWordCountTest.java index ebbc0a6..1390982 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopAbstractWordCountTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopAbstractWordCountTest.java @@ -29,7 +29,7 @@ import java.util.*; /** * Abstract class for tests based on WordCount test job. */ -public abstract class GridHadoopAbstractWordCountTest extends GridHadoopAbstractSelfTest { +public abstract class HadoopAbstractWordCountTest extends HadoopAbstractSelfTest { /** Input path. */ protected static final String PATH_INPUT = "/input"; diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopClassLoaderTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoaderTest.java similarity index 93% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopClassLoaderTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoaderTest.java index 767be7c..a3289cb 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopClassLoaderTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopClassLoaderTest.java @@ -23,9 +23,9 @@ import org.apache.hadoop.mapreduce.*; /** * */ -public class GridHadoopClassLoaderTest extends TestCase { +public class HadoopClassLoaderTest extends TestCase { /** */ - GridHadoopClassLoader ldr = new GridHadoopClassLoader(null); + HadoopClassLoader ldr = new HadoopClassLoader(null); /** * @throws Exception If failed. diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopCommandLineTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopCommandLineTest.java similarity index 95% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopCommandLineTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopCommandLineTest.java index 80cd226..33fa358 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopCommandLineTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopCommandLineTest.java @@ -19,9 +19,9 @@ package org.apache.ignite.internal.processors.hadoop; import com.google.common.base.*; import org.apache.ignite.*; +import org.apache.ignite.hadoop.fs.*; import org.apache.ignite.igfs.*; import org.apache.ignite.internal.processors.igfs.*; -import org.apache.ignite.internal.processors.hadoop.counter.*; import org.apache.ignite.internal.processors.hadoop.jobtracker.*; import org.apache.ignite.internal.util.typedef.*; import org.apache.ignite.internal.util.typedef.internal.*; @@ -35,7 +35,7 @@ import java.util.*; /** * Test of integration with Hadoop client via command line interface. */ -public class GridHadoopCommandLineTest extends GridCommonAbstractTest { +public class HadoopCommandLineTest extends GridCommonAbstractTest { /** IGFS instance. */ private IgfsEx igfs; @@ -169,8 +169,8 @@ public class GridHadoopCommandLineTest extends GridCommonAbstractTest { if (line.startsWith("")) out.println( " \n" + - " " + GridHadoopUtils.JOB_COUNTER_WRITER_PROPERTY + "\n" + - " " + GridHadoopFSCounterWriter.class.getName() + "\n" + + " " + HadoopUtils.JOB_COUNTER_WRITER_PROPERTY + "\n" + + " " + IgniteHadoopFileSystemCounterWriter.class.getName() + "\n" + " \n"); out.println(line); @@ -209,8 +209,8 @@ public class GridHadoopCommandLineTest extends GridCommonAbstractTest { private ProcessBuilder createProcessBuilder() { String sep = ":"; - String ggClsPath = GridHadoopJob.class.getProtectionDomain().getCodeSource().getLocation().getPath() + sep + - GridHadoopJobTracker.class.getProtectionDomain().getCodeSource().getLocation().getPath() + sep + + String ggClsPath = HadoopJob.class.getProtectionDomain().getCodeSource().getLocation().getPath() + sep + + HadoopJobTracker.class.getProtectionDomain().getCodeSource().getLocation().getPath() + sep + ConcurrentHashMap8.class.getProtectionDomain().getCodeSource().getLocation().getPath(); ProcessBuilder res = new ProcessBuilder(); @@ -327,7 +327,7 @@ public class GridHadoopCommandLineTest extends GridCommonAbstractTest { assertEquals(0, executeHadoopCmd("fs", "-get", jobStatPath.toString() + "/performance", locStatFile.toString())); - long evtCnt = GridHadoopTestUtils.simpleCheckJobStatFile(new BufferedReader(new FileReader(locStatFile))); + long evtCnt = HadoopTestUtils.simpleCheckJobStatFile(new BufferedReader(new FileReader(locStatFile))); assertTrue(evtCnt >= 22); //It's the minimum amount of events for job with combiner. diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopDefaultMapReducePlannerSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultMapReducePlannerSelfTest.java similarity index 89% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopDefaultMapReducePlannerSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultMapReducePlannerSelfTest.java index b1b0275..76988a3 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopDefaultMapReducePlannerSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopDefaultMapReducePlannerSelfTest.java @@ -21,13 +21,14 @@ import org.apache.ignite.*; import org.apache.ignite.cache.*; import org.apache.ignite.cluster.*; import org.apache.ignite.configuration.*; +import org.apache.ignite.hadoop.mapreduce.*; import org.apache.ignite.igfs.*; import org.apache.ignite.igfs.mapreduce.*; +import org.apache.ignite.igfs.secondary.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.cluster.*; import org.apache.ignite.internal.processors.cache.*; import org.apache.ignite.internal.processors.igfs.*; -import org.apache.ignite.internal.processors.hadoop.planner.*; import org.apache.ignite.internal.util.typedef.*; import org.apache.ignite.lang.*; import org.apache.ignite.testframework.*; @@ -39,7 +40,7 @@ import java.util.*; /** * */ -public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstractSelfTest { +public class HadoopDefaultMapReducePlannerSelfTest extends HadoopAbstractSelfTest { /** */ private static final UUID ID_1 = new UUID(0, 1); @@ -71,10 +72,10 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac private static final MockIgnite GRID = new MockIgnite(); /** Mocked IGFS. */ - private static final IgniteFs IGFS = new MockIgfs(); + private static final IgniteFileSystem IGFS = new MockIgfs(); /** Planner. */ - private static final GridHadoopMapReducePlanner PLANNER = new GridHadoopDefaultMapReducePlanner(); + private static final HadoopMapReducePlanner PLANNER = new IgniteHadoopMapReducePlanner(); /** Block locations. */ private static final Map> BLOCK_MAP = new HashMap<>(); @@ -83,7 +84,7 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac private static final Map PROXY_MAP = new HashMap<>(); /** Last created plan. */ - private static final ThreadLocal PLAN = new ThreadLocal<>(); + private static final ThreadLocal PLAN = new ThreadLocal<>(); /** * @@ -104,9 +105,9 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac * @throws IgniteCheckedException If failed. */ public void testIgfsOneBlockPerNode() throws IgniteCheckedException { - GridHadoopFileBlock split1 = split(true, "/file1", 0, 100, HOST_1); - GridHadoopFileBlock split2 = split(true, "/file2", 0, 100, HOST_2); - GridHadoopFileBlock split3 = split(true, "/file3", 0, 100, HOST_3); + HadoopFileBlock split1 = split(true, "/file1", 0, 100, HOST_1); + HadoopFileBlock split2 = split(true, "/file2", 0, 100, HOST_2); + HadoopFileBlock split3 = split(true, "/file3", 0, 100, HOST_3); mapIgfsBlock(split1.file(), 0, 100, location(0, 100, ID_1)); mapIgfsBlock(split2.file(), 0, 100, location(0, 100, ID_2)); @@ -164,9 +165,9 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac * @throws IgniteCheckedException If failed. */ public void testNonIgfsOneBlockPerNode() throws IgniteCheckedException { - GridHadoopFileBlock split1 = split(false, "/file1", 0, 100, HOST_1); - GridHadoopFileBlock split2 = split(false, "/file2", 0, 100, HOST_2); - GridHadoopFileBlock split3 = split(false, "/file3", 0, 100, HOST_3); + HadoopFileBlock split1 = split(false, "/file1", 0, 100, HOST_1); + HadoopFileBlock split2 = split(false, "/file2", 0, 100, HOST_2); + HadoopFileBlock split3 = split(false, "/file3", 0, 100, HOST_3); plan(1, split1); assert ensureMappers(ID_1, split1); @@ -220,9 +221,9 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac * @throws IgniteCheckedException If failed. */ public void testIgfsSeveralBlocksPerNode() throws IgniteCheckedException { - GridHadoopFileBlock split1 = split(true, "/file1", 0, 100, HOST_1, HOST_2); - GridHadoopFileBlock split2 = split(true, "/file2", 0, 100, HOST_1, HOST_2); - GridHadoopFileBlock split3 = split(true, "/file3", 0, 100, HOST_1, HOST_3); + HadoopFileBlock split1 = split(true, "/file1", 0, 100, HOST_1, HOST_2); + HadoopFileBlock split2 = split(true, "/file2", 0, 100, HOST_1, HOST_2); + HadoopFileBlock split3 = split(true, "/file3", 0, 100, HOST_1, HOST_3); mapIgfsBlock(split1.file(), 0, 100, location(0, 100, ID_1, ID_2)); mapIgfsBlock(split2.file(), 0, 100, location(0, 100, ID_1, ID_2)); @@ -266,9 +267,9 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac * @throws IgniteCheckedException If failed. */ public void testNonIgfsSeveralBlocksPerNode() throws IgniteCheckedException { - GridHadoopFileBlock split1 = split(false, "/file1", 0, 100, HOST_1, HOST_2); - GridHadoopFileBlock split2 = split(false, "/file2", 0, 100, HOST_1, HOST_2); - GridHadoopFileBlock split3 = split(false, "/file3", 0, 100, HOST_1, HOST_3); + HadoopFileBlock split1 = split(false, "/file1", 0, 100, HOST_1, HOST_2); + HadoopFileBlock split2 = split(false, "/file2", 0, 100, HOST_1, HOST_2); + HadoopFileBlock split3 = split(false, "/file3", 0, 100, HOST_1, HOST_3); plan(1, split1); assert ensureMappers(ID_1, split1) && ensureReducers(ID_1, 1) && ensureEmpty(ID_2) || @@ -308,8 +309,8 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac * @throws IgniteCheckedException If failed. */ public void testIgfsSeveralComplexBlocksPerNode() throws IgniteCheckedException { - GridHadoopFileBlock split1 = split(true, "/file1", 0, 100, HOST_1, HOST_2, HOST_3); - GridHadoopFileBlock split2 = split(true, "/file2", 0, 100, HOST_1, HOST_2, HOST_3); + HadoopFileBlock split1 = split(true, "/file1", 0, 100, HOST_1, HOST_2, HOST_3); + HadoopFileBlock split2 = split(true, "/file2", 0, 100, HOST_1, HOST_2, HOST_3); mapIgfsBlock(split1.file(), 0, 100, location(0, 50, ID_1, ID_2), location(51, 100, ID_1, ID_3)); mapIgfsBlock(split2.file(), 0, 100, location(0, 50, ID_1, ID_2), location(51, 100, ID_2, ID_3)); @@ -344,9 +345,9 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac * @throws IgniteCheckedException If failed. */ public void testNonIgfsOrphans() throws IgniteCheckedException { - GridHadoopFileBlock split1 = split(false, "/file1", 0, 100, INVALID_HOST_1, INVALID_HOST_2); - GridHadoopFileBlock split2 = split(false, "/file2", 0, 100, INVALID_HOST_1, INVALID_HOST_3); - GridHadoopFileBlock split3 = split(false, "/file3", 0, 100, INVALID_HOST_2, INVALID_HOST_3); + HadoopFileBlock split1 = split(false, "/file1", 0, 100, INVALID_HOST_1, INVALID_HOST_2); + HadoopFileBlock split2 = split(false, "/file2", 0, 100, INVALID_HOST_1, INVALID_HOST_3); + HadoopFileBlock split3 = split(false, "/file3", 0, 100, INVALID_HOST_2, INVALID_HOST_3); plan(1, split1); assert ensureMappers(ID_1, split1) && ensureReducers(ID_1, 1) && ensureEmpty(ID_2) && ensureEmpty(ID_3) || @@ -400,11 +401,11 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac * @return Plan. * @throws IgniteCheckedException If failed. */ - private static GridHadoopMapReducePlan plan(int reducers, GridHadoopInputSplit... splits) throws IgniteCheckedException { + private static HadoopMapReducePlan plan(int reducers, HadoopInputSplit... splits) throws IgniteCheckedException { assert reducers > 0; assert splits != null && splits.length > 0; - Collection splitList = new ArrayList<>(splits.length); + Collection splitList = new ArrayList<>(splits.length); Collections.addAll(splitList, splits); @@ -422,7 +423,7 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac top.add(node2); top.add(node3); - GridHadoopMapReducePlan plan = PLANNER.preparePlan(new MockJob(reducers, splitList), top, null); + HadoopMapReducePlan plan = PLANNER.preparePlan(new MockJob(reducers, splitList), top, null); PLAN.set(plan); @@ -436,12 +437,12 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac * @param expSplits Expected splits. * @return {@code True} if this assumption is valid. */ - private static boolean ensureMappers(UUID nodeId, GridHadoopInputSplit... expSplits) { - Collection expSplitsCol = new ArrayList<>(); + private static boolean ensureMappers(UUID nodeId, HadoopInputSplit... expSplits) { + Collection expSplitsCol = new ArrayList<>(); Collections.addAll(expSplitsCol, expSplits); - Collection splits = PLAN.get().mappers(nodeId); + Collection splits = PLAN.get().mappers(nodeId); return F.eq(expSplitsCol, splits); } @@ -479,10 +480,10 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac * @param hosts Hosts. * @return Split. */ - private static GridHadoopFileBlock split(boolean igfs, String file, long start, long len, String... hosts) { + private static HadoopFileBlock split(boolean igfs, String file, long start, long len, String... hosts) { URI uri = URI.create((igfs ? "igfs://igfs@" : "hdfs://") + file); - return new GridHadoopFileBlock(hosts, uri, start, len); + return new HadoopFileBlock(hosts, uri, start, len); } /** @@ -586,12 +587,12 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac /** * Mocked job. */ - private static class MockJob implements GridHadoopJob { + private static class MockJob implements HadoopJob { /** Reducers count. */ private final int reducers; /** */ - private Collection splitList; + private Collection splitList; /** * Constructor. @@ -599,19 +600,19 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac * @param reducers Reducers count. * @param splitList Splits. */ - private MockJob(int reducers, Collection splitList) { + private MockJob(int reducers, Collection splitList) { this.reducers = reducers; this.splitList = splitList; } /** {@inheritDoc} */ - @Override public GridHadoopJobId id() { + @Override public HadoopJobId id() { return null; } /** {@inheritDoc} */ - @Override public GridHadoopJobInfo info() { - return new GridHadoopDefaultJobInfo() { + @Override public HadoopJobInfo info() { + return new HadoopDefaultJobInfo() { @Override public int reducers() { return reducers; } @@ -619,12 +620,12 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac } /** {@inheritDoc} */ - @Override public Collection input() throws IgniteCheckedException { + @Override public Collection input() throws IgniteCheckedException { return splitList; } /** {@inheritDoc} */ - @Override public GridHadoopTaskContext getTaskContext(GridHadoopTaskInfo info) throws IgniteCheckedException { + @Override public HadoopTaskContext getTaskContext(HadoopTaskInfo info) throws IgniteCheckedException { return null; } @@ -639,12 +640,12 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac } /** {@inheritDoc} */ - @Override public void prepareTaskEnvironment(GridHadoopTaskInfo info) throws IgniteCheckedException { + @Override public void prepareTaskEnvironment(HadoopTaskInfo info) throws IgniteCheckedException { // No-op. } /** {@inheritDoc} */ - @Override public void cleanupTaskEnvironment(GridHadoopTaskInfo info) throws IgniteCheckedException { + @Override public void cleanupTaskEnvironment(HadoopTaskInfo info) throws IgniteCheckedException { // No-op. } @@ -755,7 +756,7 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac } /** {@inheritDoc} */ - @Override public IgfsConfiguration configuration() { + @Override public FileSystemConfiguration configuration() { return null; } @@ -815,11 +816,6 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac } /** {@inheritDoc} */ - @Override public Map properties() { - return Collections.emptyMap(); - } - - /** {@inheritDoc} */ @Override public IgfsOutputStream create(IgfsPath path, boolean overwrite) { return null; } @@ -903,7 +899,7 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac } /** {@inheritDoc} */ - @Override public IgniteFs withAsync() { + @Override public IgniteFileSystem withAsync() { return null; } @@ -916,6 +912,11 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac @Override public IgniteFuture future() { return null; } + + /** {@inheritDoc} */ + @Override public IgfsSecondaryFileSystem asSecondary() { + return null; + } } /** @@ -929,14 +930,14 @@ public class GridHadoopDefaultMapReducePlannerSelfTest extends GridHadoopAbstrac } /** {@inheritDoc} */ - @Override public IgniteFs igfsx(String name) { + @Override public IgniteFileSystem igfsx(String name) { assert F.eq("igfs", name); return IGFS; } /** {@inheritDoc} */ - @Override public GridHadoop hadoop() { + @Override public Hadoop hadoop() { return null; } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopFileSystemsTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopFileSystemsTest.java similarity index 94% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopFileSystemsTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopFileSystemsTest.java index 18e5c03..8cf31a2 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopFileSystemsTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopFileSystemsTest.java @@ -32,7 +32,7 @@ import java.util.concurrent.atomic.*; /** * Test file systems for the working directory multi-threading support. */ -public class GridHadoopFileSystemsTest extends GridHadoopAbstractSelfTest { +public class HadoopFileSystemsTest extends HadoopAbstractSelfTest { private static final int THREAD_COUNT = 3; /** {@inheritDoc} */ @@ -67,7 +67,7 @@ public class GridHadoopFileSystemsTest extends GridHadoopAbstractSelfTest { setupFileSystems(cfg); - cfg.set(GridHadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP, + cfg.set(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP, new Path(new Path(uri), "user/" + System.getProperty("user.name")).toString()); final CountDownLatch changeUserPhase = new CountDownLatch(THREAD_COUNT); @@ -89,7 +89,7 @@ public class GridHadoopFileSystemsTest extends GridHadoopAbstractSelfTest { FileSystem fs = FileSystem.get(uri, cfg); - GridHadoopFileSystemsUtils.setUser(fs, "user" + curThreadNum); + HadoopFileSystemsUtils.setUser(fs, "user" + curThreadNum); if ("file".equals(uri.getScheme())) FileSystem.get(uri, cfg).setWorkingDirectory(new Path("file:///user/user" + curThreadNum)); @@ -132,7 +132,7 @@ public class GridHadoopFileSystemsTest extends GridHadoopAbstractSelfTest { Path workDir = new Path(new Path(uri), "user/user" + i); - cfg.set(GridHadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP, workDir.toString()); + cfg.set(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP, workDir.toString()); assertEquals(workDir, FileSystem.newInstance(uri, cfg).getWorkingDirectory()); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopGroupingTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopGroupingTest.java similarity index 93% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopGroupingTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopGroupingTest.java index 49099fc..e385ca7 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopGroupingTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopGroupingTest.java @@ -19,6 +19,7 @@ package org.apache.ignite.internal.processors.hadoop; import org.apache.hadoop.io.*; import org.apache.hadoop.mapreduce.*; +import org.apache.ignite.configuration.*; import org.apache.ignite.internal.util.*; import org.apache.ignite.internal.util.typedef.*; import org.apache.ignite.internal.util.typedef.internal.*; @@ -26,17 +27,17 @@ import org.apache.ignite.internal.util.typedef.internal.*; import java.io.*; import java.util.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.*; /** * Grouping test. */ -public class GridHadoopGroupingTest extends GridHadoopAbstractSelfTest { +public class HadoopGroupingTest extends HadoopAbstractSelfTest { /** */ private static final String PATH_OUTPUT = "/test-out"; /** */ - private static final GridConcurrentHashSet vals = GridHadoopSharedMap.map(GridHadoopGroupingTest.class) + private static final GridConcurrentHashSet vals = HadoopSharedMap.map(HadoopGroupingTest.class) .put("vals", new GridConcurrentHashSet()); /** {@inheritDoc} */ @@ -60,8 +61,8 @@ public class GridHadoopGroupingTest extends GridHadoopAbstractSelfTest { } /** {@inheritDoc} */ - @Override public GridHadoopConfiguration hadoopConfiguration(String gridName) { - GridHadoopConfiguration cfg = super.hadoopConfiguration(gridName); + @Override public HadoopConfiguration hadoopConfiguration(String gridName) { + HadoopConfiguration cfg = super.hadoopConfiguration(gridName); cfg.setExternalExecution(false); @@ -110,7 +111,7 @@ public class GridHadoopGroupingTest extends GridHadoopAbstractSelfTest { job.setGroupingComparatorClass(YearComparator.class); } - grid(0).hadoop().submit(new GridHadoopJobId(UUID.randomUUID(), 2), + grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 2), createJobInfo(job.getConfiguration())).get(30000); assertTrue(vals.isEmpty()); @@ -207,7 +208,7 @@ public class GridHadoopGroupingTest extends GridHadoopAbstractSelfTest { ArrayList list = new ArrayList<>(); for (int i = 0; i < 10; i++) - list.add(new GridHadoopSortingTest.FakeSplit(20)); + list.add(new HadoopSortingTest.FakeSplit(20)); return list; } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobTrackerSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopJobTrackerSelfTest.java similarity index 92% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobTrackerSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopJobTrackerSelfTest.java index 3aa74d0..943d89f 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopJobTrackerSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopJobTrackerSelfTest.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.*; import org.apache.hadoop.mapreduce.*; import org.apache.hadoop.mapreduce.lib.input.*; import org.apache.hadoop.mapreduce.lib.output.*; +import org.apache.ignite.configuration.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.util.typedef.internal.*; @@ -30,12 +31,12 @@ import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.*; /** * Job tracker self test. */ -public class GridHadoopJobTrackerSelfTest extends GridHadoopAbstractSelfTest { +public class HadoopJobTrackerSelfTest extends HadoopAbstractSelfTest { /** */ private static final String PATH_OUTPUT = "/test-out"; @@ -43,7 +44,7 @@ public class GridHadoopJobTrackerSelfTest extends GridHadoopAbstractSelfTest { private static final int BLOCK_CNT = 10; /** */ - private static GridHadoopSharedMap m = GridHadoopSharedMap.map(GridHadoopJobTrackerSelfTest.class); + private static HadoopSharedMap m = HadoopSharedMap.map(HadoopJobTrackerSelfTest.class); /** Map task execution count. */ private static final AtomicInteger mapExecCnt = m.put("mapExecCnt", new AtomicInteger()); @@ -91,10 +92,10 @@ public class GridHadoopJobTrackerSelfTest extends GridHadoopAbstractSelfTest { } /** {@inheritDoc} */ - @Override public GridHadoopConfiguration hadoopConfiguration(String gridName) { - GridHadoopConfiguration cfg = super.hadoopConfiguration(gridName); + @Override public HadoopConfiguration hadoopConfiguration(String gridName) { + HadoopConfiguration cfg = super.hadoopConfiguration(gridName); - cfg.setMapReducePlanner(new GridHadoopTestRoundRobinMrPlanner()); + cfg.setMapReducePlanner(new HadoopTestRoundRobinMrPlanner()); cfg.setExternalExecution(false); return cfg; @@ -116,7 +117,7 @@ public class GridHadoopJobTrackerSelfTest extends GridHadoopAbstractSelfTest { FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_OUTPUT + "1")); - GridHadoopJobId jobId = new GridHadoopJobId(globalId, 1); + HadoopJobId jobId = new HadoopJobId(globalId, 1); grid(0).hadoop().submit(jobId, createJobInfo(job.getConfiguration())); @@ -163,7 +164,7 @@ public class GridHadoopJobTrackerSelfTest extends GridHadoopAbstractSelfTest { FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_OUTPUT + "2")); - GridHadoopJobId jobId = new GridHadoopJobId(globalId, 1); + HadoopJobId jobId = new HadoopJobId(globalId, 1); grid(0).hadoop().submit(jobId, createJobInfo(job.getConfiguration())); @@ -213,13 +214,13 @@ public class GridHadoopJobTrackerSelfTest extends GridHadoopAbstractSelfTest { * @param complete Completion status. * @throws Exception If failed. */ - private void checkStatus(GridHadoopJobId jobId, boolean complete) throws Exception { + private void checkStatus(HadoopJobId jobId, boolean complete) throws Exception { for (int i = 0; i < gridCount(); i++) { IgniteKernal kernal = (IgniteKernal)grid(i); - GridHadoop hadoop = kernal.hadoop(); + Hadoop hadoop = kernal.hadoop(); - GridHadoopJobStatus stat = hadoop.status(jobId); + HadoopJobStatus stat = hadoop.status(jobId); assert stat != null; diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopMapReduceEmbeddedSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceEmbeddedSelfTest.java similarity index 90% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopMapReduceEmbeddedSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceEmbeddedSelfTest.java index dda041c..4a6e1ef 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopMapReduceEmbeddedSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceEmbeddedSelfTest.java @@ -25,25 +25,26 @@ import org.apache.hadoop.mapred.*; import org.apache.hadoop.mapreduce.*; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +import org.apache.ignite.configuration.*; import org.apache.ignite.igfs.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.processors.hadoop.examples.*; import java.util.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.*; /** * Tests map-reduce execution with embedded mode. */ -public class GridHadoopMapReduceEmbeddedSelfTest extends GridHadoopMapReduceTest { +public class HadoopMapReduceEmbeddedSelfTest extends HadoopMapReduceTest { /** */ - private static Map flags = GridHadoopSharedMap.map(GridHadoopMapReduceEmbeddedSelfTest.class) + private static Map flags = HadoopSharedMap.map(HadoopMapReduceEmbeddedSelfTest.class) .put("flags", new HashMap()); /** {@inheritDoc} */ - @Override public GridHadoopConfiguration hadoopConfiguration(String gridName) { - GridHadoopConfiguration cfg = super.hadoopConfiguration(gridName); + @Override public HadoopConfiguration hadoopConfiguration(String gridName) { + HadoopConfiguration cfg = super.hadoopConfiguration(gridName); cfg.setExternalExecution(false); @@ -60,7 +61,7 @@ public class GridHadoopMapReduceEmbeddedSelfTest extends GridHadoopMapReduceTest igfs.mkdirs(inDir); - IgfsPath inFile = new IgfsPath(inDir, GridHadoopWordCount2.class.getSimpleName() + "-input"); + IgfsPath inFile = new IgfsPath(inDir, HadoopWordCount2.class.getSimpleName() + "-input"); generateTestFile(inFile.toString(), "key1", 10000, "key2", 20000, "key3", 15000, "key4", 7000, "key5", 12000, "key6", 18000 ); @@ -88,7 +89,7 @@ public class GridHadoopMapReduceEmbeddedSelfTest extends GridHadoopMapReduceTest // File system coordinates. setupFileSystems(jobConf); - GridHadoopWordCount1.setTasksClasses(jobConf, !useNewAPI, !useNewAPI, !useNewAPI); + HadoopWordCount1.setTasksClasses(jobConf, !useNewAPI, !useNewAPI, !useNewAPI); if (!useNewAPI) { jobConf.setPartitionerClass(CustomV1Partitioner.class); @@ -98,7 +99,7 @@ public class GridHadoopMapReduceEmbeddedSelfTest extends GridHadoopMapReduceTest Job job = Job.getInstance(jobConf); - GridHadoopWordCount2.setTasksClasses(job, useNewAPI, useNewAPI, useNewAPI); + HadoopWordCount2.setTasksClasses(job, useNewAPI, useNewAPI, useNewAPI); if (useNewAPI) { job.setPartitionerClass(CustomV2Partitioner.class); @@ -114,9 +115,9 @@ public class GridHadoopMapReduceEmbeddedSelfTest extends GridHadoopMapReduceTest job.setNumReduceTasks(3); - job.setJarByClass(GridHadoopWordCount2.class); + job.setJarByClass(HadoopWordCount2.class); - IgniteInternalFuture fut = grid(0).hadoop().submit(new GridHadoopJobId(UUID.randomUUID(), 1), + IgniteInternalFuture fut = grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 1), createJobInfo(job.getConfiguration())); fut.get(); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopMapReduceTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceTest.java similarity index 82% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopMapReduceTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceTest.java index 072e764..6242ecc 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopMapReduceTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopMapReduceTest.java @@ -24,9 +24,11 @@ import org.apache.hadoop.mapreduce.*; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.ignite.*; +import org.apache.ignite.hadoop.fs.*; import org.apache.ignite.igfs.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.processors.hadoop.counter.*; +import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters; import org.apache.ignite.internal.processors.hadoop.examples.*; import org.apache.ignite.internal.util.lang.*; import org.apache.ignite.internal.util.typedef.*; @@ -35,12 +37,12 @@ import org.apache.ignite.testframework.*; import java.io.*; import java.util.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.*; /** * Test of whole cycle of map-reduce processing via Job tracker. */ -public class GridHadoopMapReduceTest extends GridHadoopAbstractWordCountTest { +public class HadoopMapReduceTest extends HadoopAbstractWordCountTest { /** {@inheritDoc} */ @Override protected int gridCount() { return 3; @@ -55,7 +57,7 @@ public class GridHadoopMapReduceTest extends GridHadoopAbstractWordCountTest { igfs.mkdirs(inDir); - IgfsPath inFile = new IgfsPath(inDir, GridHadoopWordCount2.class.getSimpleName() + "-input"); + IgfsPath inFile = new IgfsPath(inDir, HadoopWordCount2.class.getSimpleName() + "-input"); generateTestFile(inFile.toString(), "red", 100000, "blue", 200000, "green", 150000, "yellow", 70000 ); @@ -68,9 +70,9 @@ public class GridHadoopMapReduceTest extends GridHadoopAbstractWordCountTest { JobConf jobConf = new JobConf(); - jobConf.set(JOB_COUNTER_WRITER_PROPERTY, GridHadoopFSCounterWriter.class.getName()); + jobConf.set(JOB_COUNTER_WRITER_PROPERTY, IgniteHadoopFileSystemCounterWriter.class.getName()); jobConf.setUser("yyy"); - jobConf.set(GridHadoopFSCounterWriter.COUNTER_WRITER_DIR_PROPERTY, "/xxx/${USER}/zzz"); + jobConf.set(IgniteHadoopFileSystemCounterWriter.COUNTER_WRITER_DIR_PROPERTY, "/xxx/${USER}/zzz"); //To split into about 40 items for v2 jobConf.setInt(FileInputFormat.SPLIT_MAXSIZE, 65000); @@ -81,11 +83,11 @@ public class GridHadoopMapReduceTest extends GridHadoopAbstractWordCountTest { // File system coordinates. setupFileSystems(jobConf); - GridHadoopWordCount1.setTasksClasses(jobConf, !useNewMapper, !useNewCombiner, !useNewReducer); + HadoopWordCount1.setTasksClasses(jobConf, !useNewMapper, !useNewCombiner, !useNewReducer); Job job = Job.getInstance(jobConf); - GridHadoopWordCount2.setTasksClasses(job, useNewMapper, useNewCombiner, useNewReducer); + HadoopWordCount2.setTasksClasses(job, useNewMapper, useNewCombiner, useNewReducer); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); @@ -93,9 +95,9 @@ public class GridHadoopMapReduceTest extends GridHadoopAbstractWordCountTest { FileInputFormat.setInputPaths(job, new Path(igfsScheme() + inFile.toString())); FileOutputFormat.setOutputPath(job, new Path(igfsScheme() + PATH_OUTPUT)); - job.setJarByClass(GridHadoopWordCount2.class); + job.setJarByClass(HadoopWordCount2.class); - GridHadoopJobId jobId = new GridHadoopJobId(UUID.randomUUID(), 1); + HadoopJobId jobId = new HadoopJobId(UUID.randomUUID(), 1); IgniteInternalFuture fut = grid(0).hadoop().submit(jobId, createJobInfo(job.getConfiguration())); @@ -120,10 +122,10 @@ public class GridHadoopMapReduceTest extends GridHadoopAbstractWordCountTest { * @param jobId Job id. * @throws IgniteCheckedException */ - private void checkJobStatistics(GridHadoopJobId jobId) throws IgniteCheckedException, IOException { - GridHadoopCounters cntrs = grid(0).hadoop().counters(jobId); + private void checkJobStatistics(HadoopJobId jobId) throws IgniteCheckedException, IOException { + HadoopCounters cntrs = grid(0).hadoop().counters(jobId); - GridHadoopPerformanceCounter perfCntr = GridHadoopPerformanceCounter.getCounter(cntrs, null); + HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(cntrs, null); Map> tasks = new TreeMap<>(); @@ -190,6 +192,6 @@ public class GridHadoopMapReduceTest extends GridHadoopAbstractWordCountTest { BufferedReader reader = new BufferedReader(new InputStreamReader(igfs.open(statPath))); - assertEquals(apiEvtCnt, GridHadoopTestUtils.simpleCheckJobStatFile(reader)); + assertEquals(apiEvtCnt, HadoopTestUtils.simpleCheckJobStatFile(reader)); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopPopularWordsTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopPopularWordsTest.java similarity index 98% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopPopularWordsTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopPopularWordsTest.java index 3e8a95a..a2f2ac3 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopPopularWordsTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopPopularWordsTest.java @@ -44,7 +44,7 @@ import static java.util.Collections.*; * NOTE: in order to run this example on Windows please ensure that cygwin is installed and available in the system * path. */ -public class GridHadoopPopularWordsTest { +public class HadoopPopularWordsTest { /** Ignite home. */ private static final String IGNITE_HOME = U.getIgniteHome(); @@ -183,7 +183,7 @@ public class GridHadoopPopularWordsTest { cfg.addResource(U.resolveIgniteUrl(DFS_CFG)); jobCfg.setJobName("HadoopPopularWordExample"); - jobCfg.setJarByClass(GridHadoopPopularWordsTest.class); + jobCfg.setJarByClass(HadoopPopularWordsTest.class); jobCfg.setInputFormatClass(TextInputFormat.class); jobCfg.setOutputKeyClass(Text.class); jobCfg.setOutputValueClass(IntWritable.class); @@ -283,7 +283,7 @@ public class GridHadoopPopularWordsTest { */ public static void main(String[] args) { try { - new GridHadoopPopularWordsTest().runWordCountConfigBasedHadoopJob(); + new HadoopPopularWordsTest().runWordCountConfigBasedHadoopJob(); } catch (Exception e) { X.println(">>> Failed to run word count example: " + e.getMessage()); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSerializationWrapperSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSerializationWrapperSelfTest.java similarity index 88% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSerializationWrapperSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSerializationWrapperSelfTest.java index 79b9965..5d5bb94 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSerializationWrapperSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSerializationWrapperSelfTest.java @@ -28,13 +28,13 @@ import java.util.*; /** * Test of wrapper of the native serialization. */ -public class GridHadoopSerializationWrapperSelfTest extends GridCommonAbstractTest { +public class HadoopSerializationWrapperSelfTest extends GridCommonAbstractTest { /** * Tests read/write of IntWritable via native WritableSerialization. * @throws Exception If fails. */ public void testIntWritableSerialization() throws Exception { - GridHadoopSerialization ser = new GridHadoopSerializationWrapper(new WritableSerialization(), IntWritable.class); + HadoopSerialization ser = new HadoopSerializationWrapper(new WritableSerialization(), IntWritable.class); ByteArrayOutputStream buf = new ByteArrayOutputStream(); @@ -56,7 +56,7 @@ public class GridHadoopSerializationWrapperSelfTest extends GridCommonAbstractTe * @throws Exception If fails. */ public void testIntJavaSerialization() throws Exception { - GridHadoopSerialization ser = new GridHadoopSerializationWrapper(new JavaSerialization(), Integer.class); + HadoopSerialization ser = new HadoopSerializationWrapper(new JavaSerialization(), Integer.class); ByteArrayOutputStream buf = new ByteArrayOutputStream(); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSharedMap.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSharedMap.java similarity index 79% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSharedMap.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSharedMap.java index 689fb58..c73ee9f 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSharedMap.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSharedMap.java @@ -24,9 +24,9 @@ import java.util.concurrent.*; /** * For tests. */ -public class GridHadoopSharedMap { +public class HadoopSharedMap { /** */ - private static final ConcurrentMap maps = new ConcurrentHashMap8<>(); + private static final ConcurrentMap maps = new ConcurrentHashMap8<>(); /** */ private final ConcurrentMap map = new ConcurrentHashMap8<>(); @@ -34,7 +34,7 @@ public class GridHadoopSharedMap { /** * Private. */ - private GridHadoopSharedMap() { + private HadoopSharedMap() { // No-op. } @@ -54,13 +54,13 @@ public class GridHadoopSharedMap { * @param cls Class. * @return Map of static fields. */ - public static GridHadoopSharedMap map(Class cls) { - GridHadoopSharedMap m = maps.get(cls.getName()); + public static HadoopSharedMap map(Class cls) { + HadoopSharedMap m = maps.get(cls.getName()); if (m != null) return m; - GridHadoopSharedMap old = maps.putIfAbsent(cls.getName(), m = new GridHadoopSharedMap()); + HadoopSharedMap old = maps.putIfAbsent(cls.getName(), m = new HadoopSharedMap()); return old == null ? m : old; } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSortingExternalTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSortingExternalTest.java similarity index 79% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSortingExternalTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSortingExternalTest.java index 23884ef..772e77d 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSortingExternalTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSortingExternalTest.java @@ -17,13 +17,15 @@ package org.apache.ignite.internal.processors.hadoop; +import org.apache.ignite.configuration.*; + /** * External test for sorting. */ -public class GridHadoopSortingExternalTest extends GridHadoopSortingTest { +public class HadoopSortingExternalTest extends HadoopSortingTest { /** {@inheritDoc} */ - @Override public GridHadoopConfiguration hadoopConfiguration(String gridName) { - GridHadoopConfiguration cfg = super.hadoopConfiguration(gridName); + @Override public HadoopConfiguration hadoopConfiguration(String gridName) { + HadoopConfiguration cfg = super.hadoopConfiguration(gridName); cfg.setExternalExecution(true); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSortingTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSortingTest.java similarity index 94% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSortingTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSortingTest.java index 3a2c397..3f6594a 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSortingTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSortingTest.java @@ -23,18 +23,19 @@ import org.apache.hadoop.io.serializer.*; import org.apache.hadoop.mapreduce.*; import org.apache.hadoop.mapreduce.lib.input.*; import org.apache.hadoop.mapreduce.lib.output.*; +import org.apache.ignite.configuration.*; import org.apache.ignite.internal.util.typedef.*; import java.io.*; import java.net.*; import java.util.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.*; /** * Tests correct sorting. */ -public class GridHadoopSortingTest extends GridHadoopAbstractSelfTest { +public class HadoopSortingTest extends HadoopAbstractSelfTest { /** */ private static final String PATH_INPUT = "/test-in"; @@ -64,8 +65,8 @@ public class GridHadoopSortingTest extends GridHadoopAbstractSelfTest { } /** {@inheritDoc} */ - @Override public GridHadoopConfiguration hadoopConfiguration(String gridName) { - GridHadoopConfiguration cfg = super.hadoopConfiguration(gridName); + @Override public HadoopConfiguration hadoopConfiguration(String gridName) { + HadoopConfiguration cfg = super.hadoopConfiguration(gridName); cfg.setExternalExecution(false); @@ -93,7 +94,7 @@ public class GridHadoopSortingTest extends GridHadoopAbstractSelfTest { X.printerrln("Data generation started."); - grid(0).hadoop().submit(new GridHadoopJobId(UUID.randomUUID(), 1), + grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 1), createJobInfo(job.getConfiguration())).get(180000); X.printerrln("Data generation complete."); @@ -124,7 +125,7 @@ public class GridHadoopSortingTest extends GridHadoopAbstractSelfTest { X.printerrln("Job started."); - grid(0).hadoop().submit(new GridHadoopJobId(UUID.randomUUID(), 2), + grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 2), createJobInfo(job.getConfiguration())).get(180000); X.printerrln("Job complete."); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSplitWrapperSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSplitWrapperSelfTest.java similarity index 82% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSplitWrapperSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSplitWrapperSelfTest.java index 0b15a2c..ee490be 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopSplitWrapperSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopSplitWrapperSelfTest.java @@ -27,9 +27,9 @@ import java.util.*; import java.util.concurrent.*; /** - * Self test of {@link GridHadoopSplitWrapper}. + * Self test of {@link org.apache.ignite.internal.processors.hadoop.v2.HadoopSplitWrapper}. */ -public class GridHadoopSplitWrapperSelfTest extends GridHadoopAbstractSelfTest { +public class HadoopSplitWrapperSelfTest extends HadoopAbstractSelfTest { /** * Tests serialization of wrapper and the wrapped native split. * @throws Exception If fails. @@ -39,7 +39,7 @@ public class GridHadoopSplitWrapperSelfTest extends GridHadoopAbstractSelfTest { assertEquals("/path/to/file:100+500", nativeSplit.toString()); - GridHadoopSplitWrapper split = GridHadoopUtils.wrapSplit(10, nativeSplit, nativeSplit.getLocations()); + HadoopSplitWrapper split = HadoopUtils.wrapSplit(10, nativeSplit, nativeSplit.getLocations()); assertEquals("[host1, host2]", Arrays.toString(split.hosts())); @@ -51,9 +51,9 @@ public class GridHadoopSplitWrapperSelfTest extends GridHadoopAbstractSelfTest { ObjectInput in = new ObjectInputStream(new ByteArrayInputStream(buf.toByteArray())); - final GridHadoopSplitWrapper res = (GridHadoopSplitWrapper)in.readObject(); + final HadoopSplitWrapper res = (HadoopSplitWrapper)in.readObject(); - assertEquals("/path/to/file:100+500", GridHadoopUtils.unwrapSplit(res).toString()); + assertEquals("/path/to/file:100+500", HadoopUtils.unwrapSplit(res).toString()); GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopStartup.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopStartup.java similarity index 83% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopStartup.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopStartup.java index 6cc7635..1a93223 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopStartup.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopStartup.java @@ -18,14 +18,13 @@ package org.apache.ignite.internal.processors.hadoop; import org.apache.hadoop.conf.*; -import org.apache.ignite.*; -import org.apache.ignite.igfs.hadoop.v2.IgfsHadoopFileSystem; +import org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem; import org.apache.ignite.internal.util.typedef.*; /** * Hadoop node startup. */ -public class GridHadoopStartup { +public class HadoopStartup { /** * @param args Arguments. */ @@ -42,8 +41,8 @@ public class GridHadoopStartup { cfg.set("fs.defaultFS", "igfs://igfs@localhost"); - cfg.set("fs.igfs.impl", org.apache.ignite.igfs.hadoop.v1.IgfsHadoopFileSystem.class.getName()); - cfg.set("fs.AbstractFileSystem.igfs.impl", IgfsHadoopFileSystem.class.getName()); + cfg.set("fs.igfs.impl", org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.class.getName()); + cfg.set("fs.AbstractFileSystem.igfs.impl", IgniteHadoopFileSystem.class.getName()); cfg.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER"); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskExecutionSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskExecutionSelfTest.java similarity index 92% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskExecutionSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskExecutionSelfTest.java index 40546bb..20c5db2 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTaskExecutionSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTaskExecutionSelfTest.java @@ -26,8 +26,8 @@ import org.apache.hadoop.mapreduce.lib.input.*; import org.apache.hadoop.mapreduce.lib.output.*; import org.apache.ignite.*; import org.apache.ignite.configuration.*; +import org.apache.ignite.hadoop.fs.v1.*; import org.apache.ignite.igfs.*; -import org.apache.ignite.igfs.hadoop.v1.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.util.lang.*; import org.apache.ignite.internal.util.typedef.*; @@ -39,14 +39,14 @@ import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.*; /** * Tests map-reduce task execution basics. */ -public class GridHadoopTaskExecutionSelfTest extends GridHadoopAbstractSelfTest { +public class HadoopTaskExecutionSelfTest extends HadoopAbstractSelfTest { /** */ - private static GridHadoopSharedMap m = GridHadoopSharedMap.map(GridHadoopTaskExecutionSelfTest.class); + private static HadoopSharedMap m = HadoopSharedMap.map(HadoopTaskExecutionSelfTest.class); /** Line count. */ private static final AtomicInteger totalLineCnt = m.put("totalLineCnt", new AtomicInteger()); @@ -72,8 +72,8 @@ public class GridHadoopTaskExecutionSelfTest extends GridHadoopAbstractSelfTest /** {@inheritDoc} */ - @Override public IgfsConfiguration igfsConfiguration() { - IgfsConfiguration cfg = super.igfsConfiguration(); + @Override public FileSystemConfiguration igfsConfiguration() { + FileSystemConfiguration cfg = super.igfsConfiguration(); cfg.setFragmentizerEnabled(false); @@ -105,8 +105,8 @@ public class GridHadoopTaskExecutionSelfTest extends GridHadoopAbstractSelfTest } /** {@inheritDoc} */ - @Override public GridHadoopConfiguration hadoopConfiguration(String gridName) { - GridHadoopConfiguration cfg = super.hadoopConfiguration(gridName); + @Override public HadoopConfiguration hadoopConfiguration(String gridName) { + HadoopConfiguration cfg = super.hadoopConfiguration(gridName); cfg.setMaxParallelTasks(5); cfg.setExternalExecution(false); @@ -128,7 +128,7 @@ public class GridHadoopTaskExecutionSelfTest extends GridHadoopAbstractSelfTest Configuration cfg = new Configuration(); - cfg.setStrings("fs.igfs.impl", IgfsHadoopFileSystem.class.getName()); + cfg.setStrings("fs.igfs.impl", IgniteHadoopFileSystem.class.getName()); Job job = Job.getInstance(cfg); job.setOutputKeyClass(Text.class); @@ -145,7 +145,7 @@ public class GridHadoopTaskExecutionSelfTest extends GridHadoopAbstractSelfTest job.setJarByClass(getClass()); - IgniteInternalFuture fut = grid(0).hadoop().submit(new GridHadoopJobId(UUID.randomUUID(), 1), + IgniteInternalFuture fut = grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 1), createJobInfo(job.getConfiguration())); fut.get(); @@ -169,7 +169,7 @@ public class GridHadoopTaskExecutionSelfTest extends GridHadoopAbstractSelfTest Configuration cfg = new Configuration(); - cfg.setStrings("fs.igfs.impl", IgfsHadoopFileSystem.class.getName()); + cfg.setStrings("fs.igfs.impl", IgniteHadoopFileSystem.class.getName()); cfg.setBoolean(MAP_WRITE, true); Job job = Job.getInstance(cfg); @@ -189,7 +189,7 @@ public class GridHadoopTaskExecutionSelfTest extends GridHadoopAbstractSelfTest job.setJarByClass(getClass()); - GridHadoopJobId jobId = new GridHadoopJobId(UUID.randomUUID(), 2); + HadoopJobId jobId = new HadoopJobId(UUID.randomUUID(), 2); IgniteInternalFuture fut = grid(0).hadoop().submit(jobId, createJobInfo(job.getConfiguration())); @@ -211,7 +211,7 @@ public class GridHadoopTaskExecutionSelfTest extends GridHadoopAbstractSelfTest Configuration cfg = new Configuration(); - cfg.setStrings("fs.igfs.impl", IgfsHadoopFileSystem.class.getName()); + cfg.setStrings("fs.igfs.impl", IgniteHadoopFileSystem.class.getName()); Job job = Job.getInstance(cfg); job.setOutputKeyClass(Text.class); @@ -228,7 +228,7 @@ public class GridHadoopTaskExecutionSelfTest extends GridHadoopAbstractSelfTest job.setJarByClass(getClass()); - final IgniteInternalFuture fut = grid(0).hadoop().submit(new GridHadoopJobId(UUID.randomUUID(), 3), + final IgniteInternalFuture fut = grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 3), createJobInfo(job.getConfiguration())); GridTestUtils.assertThrows(log, new Callable() { @@ -246,7 +246,7 @@ public class GridHadoopTaskExecutionSelfTest extends GridHadoopAbstractSelfTest * @throws Exception If failed. */ private void prepareFile(String fileName, int lineCnt) throws Exception { - IgniteFs igfs = grid(0).fileSystem(igfsName); + IgniteFileSystem igfs = grid(0).fileSystem(igfsName); try (OutputStream os = igfs.create(new IgfsPath(fileName), true)) { PrintWriter w = new PrintWriter(new OutputStreamWriter(os)); @@ -314,7 +314,7 @@ public class GridHadoopTaskExecutionSelfTest extends GridHadoopAbstractSelfTest public void testTaskCancelling() throws Exception { Configuration cfg = prepareJobForCancelling(); - GridHadoopJobId jobId = new GridHadoopJobId(UUID.randomUUID(), 1); + HadoopJobId jobId = new HadoopJobId(UUID.randomUUID(), 1); final IgniteInternalFuture fut = grid(0).hadoop().submit(jobId, createJobInfo(cfg)); @@ -358,9 +358,9 @@ public class GridHadoopTaskExecutionSelfTest extends GridHadoopAbstractSelfTest public void testJobKill() throws Exception { Configuration cfg = prepareJobForCancelling(); - GridHadoop hadoop = grid(0).hadoop(); + Hadoop hadoop = grid(0).hadoop(); - GridHadoopJobId jobId = new GridHadoopJobId(UUID.randomUUID(), 1); + HadoopJobId jobId = new HadoopJobId(UUID.randomUUID(), 1); //Kill unknown job. boolean killRes = hadoop.kill(jobId); @@ -425,7 +425,7 @@ public class GridHadoopTaskExecutionSelfTest extends GridHadoopAbstractSelfTest try { super.run(ctx); } - catch (GridHadoopTaskCancelledException e) { + catch (HadoopTaskCancelledException e) { cancelledTasks.incrementAndGet(); throw e; diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTasksAllVersionsTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksAllVersionsTest.java similarity index 66% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTasksAllVersionsTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksAllVersionsTest.java index af3f872..aaf0f92 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTasksAllVersionsTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksAllVersionsTest.java @@ -31,7 +31,7 @@ import java.util.*; /** * Tests of Map, Combine and Reduce task executions of any version of hadoop API. */ -abstract class GridHadoopTasksAllVersionsTest extends GridHadoopAbstractWordCountTest { +abstract class HadoopTasksAllVersionsTest extends HadoopAbstractWordCountTest { /** Empty hosts array. */ private static final String[] HOSTS = new String[0]; @@ -43,7 +43,7 @@ abstract class GridHadoopTasksAllVersionsTest extends GridHadoopAbstractWordCoun * @return Hadoop job. * @throws IOException If fails. */ - public abstract GridHadoopV2Job getHadoopJob(String inFile, String outFile) throws Exception; + public abstract HadoopV2Job getHadoopJob(String inFile, String outFile) throws Exception; /** * @return prefix of reducer output file name. It's "part-" for v1 and "part-r-" for v2 API @@ -61,7 +61,7 @@ abstract class GridHadoopTasksAllVersionsTest extends GridHadoopAbstractWordCoun igfs.mkdirs(inDir); - IgfsPath inFile = new IgfsPath(inDir, GridHadoopWordCount2.class.getSimpleName() + "-input"); + IgfsPath inFile = new IgfsPath(inDir, HadoopWordCount2.class.getSimpleName() + "-input"); URI inFileUri = URI.create(igfsScheme() + inFile.toString()); @@ -70,20 +70,20 @@ abstract class GridHadoopTasksAllVersionsTest extends GridHadoopAbstractWordCoun pw.println("world1 hello1"); } - GridHadoopFileBlock fileBlock1 = new GridHadoopFileBlock(HOSTS, inFileUri, 0, igfs.info(inFile).length() - 1); + HadoopFileBlock fileBlock1 = new HadoopFileBlock(HOSTS, inFileUri, 0, igfs.info(inFile).length() - 1); try (PrintWriter pw = new PrintWriter(igfs.append(inFile, false))) { pw.println("hello2 world2"); pw.println("world3 hello3"); } - GridHadoopFileBlock fileBlock2 = new GridHadoopFileBlock(HOSTS, inFileUri, fileBlock1.length(), + HadoopFileBlock fileBlock2 = new HadoopFileBlock(HOSTS, inFileUri, fileBlock1.length(), igfs.info(inFile).length() - fileBlock1.length()); - GridHadoopV2Job gridJob = getHadoopJob(igfsScheme() + inFile.toString(), igfsScheme() + PATH_OUTPUT); + HadoopV2Job gridJob = getHadoopJob(igfsScheme() + inFile.toString(), igfsScheme() + PATH_OUTPUT); - GridHadoopTaskInfo taskInfo = new GridHadoopTaskInfo(GridHadoopTaskType.MAP, gridJob.id(), 0, 0, fileBlock1); + HadoopTaskInfo taskInfo = new HadoopTaskInfo(HadoopTaskType.MAP, gridJob.id(), 0, 0, fileBlock1); - GridHadoopTestTaskContext ctx = new GridHadoopTestTaskContext(taskInfo, gridJob); + HadoopTestTaskContext ctx = new HadoopTestTaskContext(taskInfo, gridJob); ctx.mockOutput().clear(); @@ -93,7 +93,7 @@ abstract class GridHadoopTasksAllVersionsTest extends GridHadoopAbstractWordCoun ctx.mockOutput().clear(); - ctx.taskInfo(new GridHadoopTaskInfo(GridHadoopTaskType.MAP, gridJob.id(), 0, 0, fileBlock2)); + ctx.taskInfo(new HadoopTaskInfo(HadoopTaskType.MAP, gridJob.id(), 0, 0, fileBlock2)); ctx.run(); @@ -110,11 +110,11 @@ abstract class GridHadoopTasksAllVersionsTest extends GridHadoopAbstractWordCoun * @return Context with mock output. * @throws IgniteCheckedException If fails. */ - private GridHadoopTestTaskContext runTaskWithInput(GridHadoopV2Job gridJob, GridHadoopTaskType taskType, + private HadoopTestTaskContext runTaskWithInput(HadoopV2Job gridJob, HadoopTaskType taskType, int taskNum, String... words) throws IgniteCheckedException { - GridHadoopTaskInfo taskInfo = new GridHadoopTaskInfo(taskType, gridJob.id(), taskNum, 0, null); + HadoopTaskInfo taskInfo = new HadoopTaskInfo(taskType, gridJob.id(), taskNum, 0, null); - GridHadoopTestTaskContext ctx = new GridHadoopTestTaskContext(taskInfo, gridJob); + HadoopTestTaskContext ctx = new HadoopTestTaskContext(taskInfo, gridJob); for (int i = 0; i < words.length; i+=2) { List valList = new ArrayList<>(); @@ -136,10 +136,10 @@ abstract class GridHadoopTasksAllVersionsTest extends GridHadoopAbstractWordCoun * @throws Exception If fails. */ public void testReduceTask() throws Exception { - GridHadoopV2Job gridJob = getHadoopJob(igfsScheme() + PATH_INPUT, igfsScheme() + PATH_OUTPUT); + HadoopV2Job gridJob = getHadoopJob(igfsScheme() + PATH_INPUT, igfsScheme() + PATH_OUTPUT); - runTaskWithInput(gridJob, GridHadoopTaskType.REDUCE, 0, "word1", "5", "word2", "10"); - runTaskWithInput(gridJob, GridHadoopTaskType.REDUCE, 1, "word3", "7", "word4", "15"); + runTaskWithInput(gridJob, HadoopTaskType.REDUCE, 0, "word1", "5", "word2", "10"); + runTaskWithInput(gridJob, HadoopTaskType.REDUCE, 1, "word3", "7", "word4", "15"); assertEquals( "word1\t5\n" + @@ -162,14 +162,14 @@ abstract class GridHadoopTasksAllVersionsTest extends GridHadoopAbstractWordCoun * @throws Exception If fails. */ public void testCombinerTask() throws Exception { - GridHadoopV2Job gridJob = getHadoopJob("/", "/"); + HadoopV2Job gridJob = getHadoopJob("/", "/"); - GridHadoopTestTaskContext ctx = - runTaskWithInput(gridJob, GridHadoopTaskType.COMBINE, 0, "word1", "5", "word2", "10"); + HadoopTestTaskContext ctx = + runTaskWithInput(gridJob, HadoopTaskType.COMBINE, 0, "word1", "5", "word2", "10"); assertEquals("word1,5; word2,10", Joiner.on("; ").join(ctx.mockOutput())); - ctx = runTaskWithInput(gridJob, GridHadoopTaskType.COMBINE, 1, "word3", "7", "word4", "15"); + ctx = runTaskWithInput(gridJob, HadoopTaskType.COMBINE, 1, "word3", "7", "word4", "15"); assertEquals("word3,7; word4,15", Joiner.on("; ").join(ctx.mockOutput())); } @@ -182,18 +182,18 @@ abstract class GridHadoopTasksAllVersionsTest extends GridHadoopAbstractWordCoun * @return Context of combine task with mock output. * @throws IgniteCheckedException If fails. */ - private GridHadoopTestTaskContext runMapCombineTask(GridHadoopFileBlock fileBlock, GridHadoopV2Job gridJob) + private HadoopTestTaskContext runMapCombineTask(HadoopFileBlock fileBlock, HadoopV2Job gridJob) throws IgniteCheckedException { - GridHadoopTaskInfo taskInfo = new GridHadoopTaskInfo(GridHadoopTaskType.MAP, gridJob.id(), 0, 0, fileBlock); + HadoopTaskInfo taskInfo = new HadoopTaskInfo(HadoopTaskType.MAP, gridJob.id(), 0, 0, fileBlock); - GridHadoopTestTaskContext mapCtx = new GridHadoopTestTaskContext(taskInfo, gridJob); + HadoopTestTaskContext mapCtx = new HadoopTestTaskContext(taskInfo, gridJob); mapCtx.run(); //Prepare input for combine - taskInfo = new GridHadoopTaskInfo(GridHadoopTaskType.COMBINE, gridJob.id(), 0, 0, null); + taskInfo = new HadoopTaskInfo(HadoopTaskType.COMBINE, gridJob.id(), 0, 0, null); - GridHadoopTestTaskContext combineCtx = new GridHadoopTestTaskContext(taskInfo, gridJob); + HadoopTestTaskContext combineCtx = new HadoopTestTaskContext(taskInfo, gridJob); combineCtx.makeTreeOfWritables(mapCtx.mockOutput()); @@ -214,7 +214,7 @@ abstract class GridHadoopTasksAllVersionsTest extends GridHadoopAbstractWordCoun igfs.mkdirs(inDir); - IgfsPath inFile = new IgfsPath(inDir, GridHadoopWordCount2.class.getSimpleName() + "-input"); + IgfsPath inFile = new IgfsPath(inDir, HadoopWordCount2.class.getSimpleName() + "-input"); URI inFileUri = URI.create(igfsScheme() + inFile.toString()); @@ -225,26 +225,26 @@ abstract class GridHadoopTasksAllVersionsTest extends GridHadoopAbstractWordCoun Long l = fileLen / 2; - GridHadoopFileBlock fileBlock1 = new GridHadoopFileBlock(HOSTS, inFileUri, 0, l); - GridHadoopFileBlock fileBlock2 = new GridHadoopFileBlock(HOSTS, inFileUri, l, fileLen - l); + HadoopFileBlock fileBlock1 = new HadoopFileBlock(HOSTS, inFileUri, 0, l); + HadoopFileBlock fileBlock2 = new HadoopFileBlock(HOSTS, inFileUri, l, fileLen - l); - GridHadoopV2Job gridJob = getHadoopJob(inFileUri.toString(), igfsScheme() + PATH_OUTPUT); + HadoopV2Job gridJob = getHadoopJob(inFileUri.toString(), igfsScheme() + PATH_OUTPUT); - GridHadoopTestTaskContext combine1Ctx = runMapCombineTask(fileBlock1, gridJob); + HadoopTestTaskContext combine1Ctx = runMapCombineTask(fileBlock1, gridJob); - GridHadoopTestTaskContext combine2Ctx = runMapCombineTask(fileBlock2, gridJob); + HadoopTestTaskContext combine2Ctx = runMapCombineTask(fileBlock2, gridJob); //Prepare input for combine - GridHadoopTaskInfo taskInfo = new GridHadoopTaskInfo(GridHadoopTaskType.REDUCE, gridJob.id(), 0, 0, null); + HadoopTaskInfo taskInfo = new HadoopTaskInfo(HadoopTaskType.REDUCE, gridJob.id(), 0, 0, null); - GridHadoopTestTaskContext reduceCtx = new GridHadoopTestTaskContext(taskInfo, gridJob); + HadoopTestTaskContext reduceCtx = new HadoopTestTaskContext(taskInfo, gridJob); reduceCtx.makeTreeOfWritables(combine1Ctx.mockOutput()); reduceCtx.makeTreeOfWritables(combine2Ctx.mockOutput()); reduceCtx.run(); - reduceCtx.taskInfo(new GridHadoopTaskInfo(GridHadoopTaskType.COMMIT, gridJob.id(), 0, 0, null)); + reduceCtx.taskInfo(new HadoopTaskInfo(HadoopTaskType.COMMIT, gridJob.id(), 0, 0, null)); reduceCtx.run(); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTasksV1Test.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV1Test.java similarity index 74% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTasksV1Test.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV1Test.java index 15ac125..b41a260 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTasksV1Test.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV1Test.java @@ -24,12 +24,12 @@ import org.apache.ignite.internal.processors.hadoop.v2.*; import java.io.*; import java.util.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.*; /** * Tests of Map, Combine and Reduce task executions via running of job of hadoop API v1. */ -public class GridHadoopTasksV1Test extends GridHadoopTasksAllVersionsTest { +public class HadoopTasksV1Test extends HadoopTasksAllVersionsTest { /** * Creates WordCount hadoop job for API v1. * @@ -38,16 +38,16 @@ public class GridHadoopTasksV1Test extends GridHadoopTasksAllVersionsTest { * @return Hadoop job. * @throws IOException If fails. */ - @Override public GridHadoopV2Job getHadoopJob(String inFile, String outFile) throws Exception { - JobConf jobConf = GridHadoopWordCount1.getJob(inFile, outFile); + @Override public HadoopV2Job getHadoopJob(String inFile, String outFile) throws Exception { + JobConf jobConf = HadoopWordCount1.getJob(inFile, outFile); setupFileSystems(jobConf); - GridHadoopDefaultJobInfo jobInfo = createJobInfo(jobConf); + HadoopDefaultJobInfo jobInfo = createJobInfo(jobConf); - GridHadoopJobId jobId = new GridHadoopJobId(new UUID(0, 0), 0); + HadoopJobId jobId = new HadoopJobId(new UUID(0, 0), 0); - return new GridHadoopV2Job(jobId, jobInfo, log); + return new HadoopV2Job(jobId, jobInfo, log); } /** {@inheritDoc} */ diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTasksV2Test.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV2Test.java similarity index 75% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTasksV2Test.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV2Test.java index e48eb01..b677c63 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTasksV2Test.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTasksV2Test.java @@ -28,12 +28,12 @@ import org.apache.ignite.internal.processors.hadoop.v2.*; import java.util.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.*; /** * Tests of Map, Combine and Reduce task executions via running of job of hadoop API v2. */ -public class GridHadoopTasksV2Test extends GridHadoopTasksAllVersionsTest { +public class HadoopTasksV2Test extends HadoopTasksAllVersionsTest { /** * Creates WordCount hadoop job for API v2. * @@ -42,13 +42,13 @@ public class GridHadoopTasksV2Test extends GridHadoopTasksAllVersionsTest { * @return Hadoop job. * @throws Exception if fails. */ - @Override public GridHadoopV2Job getHadoopJob(String inFile, String outFile) throws Exception { + @Override public HadoopV2Job getHadoopJob(String inFile, String outFile) throws Exception { Job job = Job.getInstance(); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); - GridHadoopWordCount2.setTasksClasses(job, true, true, true); + HadoopWordCount2.setTasksClasses(job, true, true, true); Configuration conf = job.getConfiguration(); @@ -57,15 +57,15 @@ public class GridHadoopTasksV2Test extends GridHadoopTasksAllVersionsTest { FileInputFormat.setInputPaths(job, new Path(inFile)); FileOutputFormat.setOutputPath(job, new Path(outFile)); - job.setJarByClass(GridHadoopWordCount2.class); + job.setJarByClass(HadoopWordCount2.class); - Job hadoopJob = GridHadoopWordCount2.getJob(inFile, outFile); + Job hadoopJob = HadoopWordCount2.getJob(inFile, outFile); - GridHadoopDefaultJobInfo jobInfo = createJobInfo(hadoopJob.getConfiguration()); + HadoopDefaultJobInfo jobInfo = createJobInfo(hadoopJob.getConfiguration()); - GridHadoopJobId jobId = new GridHadoopJobId(new UUID(0, 0), 0); + HadoopJobId jobId = new HadoopJobId(new UUID(0, 0), 0); - return new GridHadoopV2Job(jobId, jobInfo, log); + return new HadoopV2Job(jobId, jobInfo, log); } /** {@inheritDoc} */ diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTestRoundRobinMrPlanner.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestRoundRobinMrPlanner.java similarity index 74% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTestRoundRobinMrPlanner.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestRoundRobinMrPlanner.java index 5baa8cd..a56c7c7 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTestRoundRobinMrPlanner.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestRoundRobinMrPlanner.java @@ -27,22 +27,22 @@ import java.util.*; /** * Round-robin mr planner. */ -public class GridHadoopTestRoundRobinMrPlanner implements GridHadoopMapReducePlanner { +public class HadoopTestRoundRobinMrPlanner implements HadoopMapReducePlanner { /** {@inheritDoc} */ - @Override public GridHadoopMapReducePlan preparePlan(GridHadoopJob job, Collection top, - @Nullable GridHadoopMapReducePlan oldPlan) throws IgniteCheckedException { + @Override public HadoopMapReducePlan preparePlan(HadoopJob job, Collection top, + @Nullable HadoopMapReducePlan oldPlan) throws IgniteCheckedException { if (top.isEmpty()) throw new IllegalArgumentException("Topology is empty"); // Has at least one element. Iterator it = top.iterator(); - Map> mappers = new HashMap<>(); + Map> mappers = new HashMap<>(); - for (GridHadoopInputSplit block : job.input()) { + for (HadoopInputSplit block : job.input()) { ClusterNode node = it.next(); - Collection nodeBlocks = mappers.get(node.id()); + Collection nodeBlocks = mappers.get(node.id()); if (nodeBlocks == null) { nodeBlocks = new ArrayList<>(); @@ -61,6 +61,6 @@ public class GridHadoopTestRoundRobinMrPlanner implements GridHadoopMapReducePla for (int i = 0; i < rdc.length; i++) rdc[i] = i; - return new GridHadoopDefaultMapReducePlan(mappers, Collections.singletonMap(it.next().id(), rdc)); + return new HadoopDefaultMapReducePlan(mappers, Collections.singletonMap(it.next().id(), rdc)); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTestTaskContext.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestTaskContext.java similarity index 89% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTestTaskContext.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestTaskContext.java index 4e0aa9b..e444270 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTestTaskContext.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestTaskContext.java @@ -28,7 +28,7 @@ import java.util.*; /** * Context for test purpose. */ -class GridHadoopTestTaskContext extends GridHadoopV2TaskContext { +class HadoopTestTaskContext extends HadoopV2TaskContext { /** * Simple key-vale pair. * @param Key class. @@ -79,7 +79,7 @@ class GridHadoopTestTaskContext extends GridHadoopV2TaskContext { private Map mockInput = new TreeMap<>(); /** Context output implementation to write data into mockOutput. */ - private GridHadoopTaskOutput output = new GridHadoopTaskOutput() { + private HadoopTaskOutput output = new HadoopTaskOutput() { /** {@inheritDoc} */ @Override public void write(Object key, Object val) { //Check of casting and extract/copy values @@ -96,7 +96,7 @@ class GridHadoopTestTaskContext extends GridHadoopV2TaskContext { }; /** Context input implementation to read data from mockInput. */ - private GridHadoopTaskInput input = new GridHadoopTaskInput() { + private HadoopTaskInput input = new HadoopTaskInput() { /** Iterator of keys and associated lists of values. */ Iterator> iter; @@ -159,7 +159,7 @@ class GridHadoopTestTaskContext extends GridHadoopV2TaskContext { public void makeTreeOfWritables(Iterable> flatData) { Text key = new Text(); - for (GridHadoopTestTaskContext.Pair pair : flatData) { + for (HadoopTestTaskContext.Pair pair : flatData) { key.set(pair.key); ArrayList valList; @@ -178,7 +178,7 @@ class GridHadoopTestTaskContext extends GridHadoopV2TaskContext { * @param taskInfo Task info. * @param gridJob Grid Hadoop job. */ - public GridHadoopTestTaskContext(GridHadoopTaskInfo taskInfo, GridHadoopJob gridJob) throws IgniteCheckedException { + public HadoopTestTaskContext(HadoopTaskInfo taskInfo, HadoopJob gridJob) throws IgniteCheckedException { super(taskInfo, gridJob, gridJob.id(), null, jobConfDataInput(gridJob)); } @@ -189,10 +189,10 @@ class GridHadoopTestTaskContext extends GridHadoopV2TaskContext { * @return DataInput with JobConf. * @throws IgniteCheckedException If failed. */ - private static DataInput jobConfDataInput(GridHadoopJob job) throws IgniteCheckedException { + private static DataInput jobConfDataInput(HadoopJob job) throws IgniteCheckedException { JobConf jobConf = new JobConf(); - for (Map.Entry e : ((GridHadoopDefaultJobInfo)job.info()).properties().entrySet()) + for (Map.Entry e : ((HadoopDefaultJobInfo)job.info()).properties().entrySet()) jobConf.set(e.getKey(), e.getValue()); ByteArrayOutputStream buf = new ByteArrayOutputStream(); @@ -208,12 +208,12 @@ class GridHadoopTestTaskContext extends GridHadoopV2TaskContext { } /** {@inheritDoc} */ - @Override public GridHadoopTaskOutput output() { + @Override public HadoopTaskOutput output() { return output; } /** {@inheritDoc} */ - @Override public GridHadoopTaskInput input() { + @Override public HadoopTaskInput input() { return input; } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTestUtils.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestUtils.java similarity index 98% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTestUtils.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestUtils.java index cdbb809..ef60762 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopTestUtils.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopTestUtils.java @@ -27,7 +27,7 @@ import static org.junit.Assert.*; /** * Utility class for tests. */ -public class GridHadoopTestUtils { +public class HadoopTestUtils { /** * Checks that job statistics file contains valid strings only. * diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopV2JobSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopV2JobSelfTest.java similarity index 76% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopV2JobSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopV2JobSelfTest.java index b201614..ebc89f4 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopV2JobSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopV2JobSelfTest.java @@ -27,12 +27,12 @@ import org.apache.ignite.internal.processors.hadoop.v2.*; import java.io.*; import java.util.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.*; /** - * Self test of {@link GridHadoopV2Job}. + * Self test of {@link org.apache.ignite.internal.processors.hadoop.v2.HadoopV2Job}. */ -public class GridHadoopV2JobSelfTest extends GridHadoopAbstractSelfTest { +public class HadoopV2JobSelfTest extends HadoopAbstractSelfTest { /** */ private static final String TEST_SERIALIZED_VALUE = "Test serialized value"; @@ -55,7 +55,7 @@ public class GridHadoopV2JobSelfTest extends GridHadoopAbstractSelfTest { } /** - * Tests that {@link GridHadoopJob} provides wrapped serializer if it's set in configuration. + * Tests that {@link HadoopJob} provides wrapped serializer if it's set in configuration. * * @throws IgniteCheckedException If fails. */ @@ -66,14 +66,14 @@ public class GridHadoopV2JobSelfTest extends GridHadoopAbstractSelfTest { cfg.setMapOutputValueClass(Text.class); cfg.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, CustomSerialization.class.getName()); - GridHadoopJob job = new GridHadoopV2Job(new GridHadoopJobId(UUID.randomUUID(), 1), createJobInfo(cfg), log); + HadoopJob job = new HadoopV2Job(new HadoopJobId(UUID.randomUUID(), 1), createJobInfo(cfg), log); - GridHadoopTaskContext taskCtx = job.getTaskContext(new GridHadoopTaskInfo(GridHadoopTaskType.MAP, null, 0, 0, + HadoopTaskContext taskCtx = job.getTaskContext(new HadoopTaskInfo(HadoopTaskType.MAP, null, 0, 0, null)); - GridHadoopSerialization ser = taskCtx.keySerialization(); + HadoopSerialization ser = taskCtx.keySerialization(); - assertEquals(GridHadoopSerializationWrapper.class.getName(), ser.getClass().getName()); + assertEquals(HadoopSerializationWrapper.class.getName(), ser.getClass().getName()); DataInput in = new DataInputStream(new ByteArrayInputStream(new byte[0])); @@ -81,7 +81,7 @@ public class GridHadoopV2JobSelfTest extends GridHadoopAbstractSelfTest { ser = taskCtx.valueSerialization(); - assertEquals(GridHadoopSerializationWrapper.class.getName(), ser.getClass().getName()); + assertEquals(HadoopSerializationWrapper.class.getName(), ser.getClass().getName()); assertEquals(TEST_SERIALIZED_VALUE, ser.read(in, null).toString()); } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopValidationSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopValidationSelfTest.java similarity index 95% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopValidationSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopValidationSelfTest.java index 051d073..558dec5 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/GridHadoopValidationSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/HadoopValidationSelfTest.java @@ -22,7 +22,7 @@ import org.apache.ignite.configuration.*; /** * Configuration validation tests. */ -public class GridHadoopValidationSelfTest extends GridHadoopAbstractSelfTest { +public class HadoopValidationSelfTest extends HadoopAbstractSelfTest { /** Peer class loading enabled flag. */ public boolean peerClassLoading; diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount1.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1.java similarity index 89% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount1.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1.java index 40cf636..dd9058d 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount1.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1.java @@ -24,7 +24,7 @@ import org.apache.hadoop.mapred.*; /** * Example job for testing hadoop task execution. */ -public class GridHadoopWordCount1 { +public class HadoopWordCount1 { /** * Entry point to start job. * @param args command line parameters. @@ -49,7 +49,7 @@ public class GridHadoopWordCount1 { * @return Job configuration */ public static JobConf getJob(String input, String output) { - JobConf conf = new JobConf(GridHadoopWordCount1.class); + JobConf conf = new JobConf(HadoopWordCount1.class); conf.setJobName("wordcount"); conf.setOutputKeyClass(Text.class); @@ -73,15 +73,15 @@ public class GridHadoopWordCount1 { */ public static void setTasksClasses(JobConf jobConf, boolean setMapper, boolean setCombiner, boolean setReducer) { if (setMapper) { - jobConf.setMapperClass(GridHadoopWordCount1Map.class); + jobConf.setMapperClass(HadoopWordCount1Map.class); jobConf.setInputFormat(TextInputFormat.class); } if (setCombiner) - jobConf.setCombinerClass(GridHadoopWordCount1Reduce.class); + jobConf.setCombinerClass(HadoopWordCount1Reduce.class); if (setReducer) { - jobConf.setReducerClass(GridHadoopWordCount1Reduce.class); + jobConf.setReducerClass(HadoopWordCount1Reduce.class); jobConf.setOutputFormat(TextOutputFormat.class); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount1Map.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Map.java similarity index 94% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount1Map.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Map.java index 5d8e0cc..c10a7fb 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount1Map.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Map.java @@ -26,7 +26,7 @@ import java.util.*; /** * Mapper phase of WordCount job. */ -public class GridHadoopWordCount1Map extends MapReduceBase implements Mapper { +public class HadoopWordCount1Map extends MapReduceBase implements Mapper { /** Writable integer constant of '1' is writing as count of found words. */ private static final IntWritable one = new IntWritable(1); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount1Reduce.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Reduce.java similarity index 93% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount1Reduce.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Reduce.java index 1b69a43..76cd1c3 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount1Reduce.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount1Reduce.java @@ -26,7 +26,7 @@ import java.util.*; /** * Combiner and Reducer phase of WordCount job. */ -public class GridHadoopWordCount1Reduce extends MapReduceBase implements Reducer { +public class HadoopWordCount1Reduce extends MapReduceBase implements Reducer { /** Flag is to check that mapper was configured before run. */ private boolean wasConfigured; diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount2.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2.java similarity index 90% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount2.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2.java index 6310363..dc68df7 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount2.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2.java @@ -28,7 +28,7 @@ import java.io.*; /** * Example job for testing hadoop task execution. */ -public class GridHadoopWordCount2 { +public class HadoopWordCount2 { /** * Entry point to start job. * @@ -65,7 +65,7 @@ public class GridHadoopWordCount2 { FileInputFormat.setInputPaths(job, new Path(input)); FileOutputFormat.setOutputPath(job, new Path(output)); - job.setJarByClass(GridHadoopWordCount2.class); + job.setJarByClass(HadoopWordCount2.class); return job; } @@ -80,15 +80,15 @@ public class GridHadoopWordCount2 { */ public static void setTasksClasses(Job job, boolean setMapper, boolean setCombiner, boolean setReducer) { if (setMapper) { - job.setMapperClass(GridHadoopWordCount2Mapper.class); + job.setMapperClass(HadoopWordCount2Mapper.class); job.setInputFormatClass(TextInputFormat.class); } if (setCombiner) - job.setCombinerClass(GridHadoopWordCount2Reducer.class); + job.setCombinerClass(HadoopWordCount2Reducer.class); if (setReducer) { - job.setReducerClass(GridHadoopWordCount2Reducer.class); + job.setReducerClass(HadoopWordCount2Reducer.class); job.setOutputFormatClass(TextOutputFormat.class); } } diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount2Mapper.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Mapper.java similarity index 95% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount2Mapper.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Mapper.java index 849928a..6ca7ccd 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount2Mapper.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Mapper.java @@ -27,7 +27,7 @@ import java.util.*; /** * Mapper phase of WordCount job. */ -public class GridHadoopWordCount2Mapper extends Mapper implements Configurable { +public class HadoopWordCount2Mapper extends Mapper implements Configurable { /** Writable container for writing word. */ private Text word = new Text(); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount2Reducer.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Reducer.java similarity index 94% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount2Reducer.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Reducer.java index 922bb2f..fedaaf9 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/GridHadoopWordCount2Reducer.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/examples/HadoopWordCount2Reducer.java @@ -26,7 +26,7 @@ import java.io.*; /** * Combiner and Reducer phase of WordCount job. */ -public class GridHadoopWordCount2Reducer extends Reducer implements Configurable { +public class HadoopWordCount2Reducer extends Reducer implements Configurable { /** Writable container for writing sum of word counts. */ private IntWritable totalWordCnt = new IntWritable(); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopAbstractMapTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopAbstractMapTest.java similarity index 77% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopAbstractMapTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopAbstractMapTest.java index 716fe19..b4ed5e1 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopAbstractMapTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopAbstractMapTest.java @@ -21,6 +21,8 @@ import org.apache.commons.collections.comparators.*; import org.apache.hadoop.io.*; import org.apache.ignite.*; import org.apache.ignite.internal.processors.hadoop.*; +import org.apache.ignite.internal.processors.hadoop.counter.*; +import org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters; import org.apache.ignite.internal.processors.hadoop.v2.*; import org.apache.ignite.testframework.junits.common.*; import org.jetbrains.annotations.*; @@ -30,11 +32,11 @@ import java.util.*; /** * Abstract class for maps test. */ -public abstract class GridHadoopAbstractMapTest extends GridCommonAbstractTest { +public abstract class HadoopAbstractMapTest extends GridCommonAbstractTest { /** * Test task context. */ - protected static class TaskContext extends GridHadoopTaskContext { + protected static class TaskContext extends HadoopTaskContext { /** */ protected TaskContext() { @@ -42,30 +44,30 @@ public abstract class GridHadoopAbstractMapTest extends GridCommonAbstractTest { } /** {@inheritDoc} */ - @Override public T counter(String grp, String name, Class cls) { + @Override public T counter(String grp, String name, Class cls) { return null; } /** {@inheritDoc} */ - @Override public GridHadoopCounters counters() { + @Override public HadoopCounters counters() { return null; } /** {@inheritDoc} */ - @Override public GridHadoopPartitioner partitioner() throws IgniteCheckedException { + @Override public HadoopPartitioner partitioner() throws IgniteCheckedException { assert false; return null; } /** {@inheritDoc} */ - @Override public GridHadoopSerialization keySerialization() throws IgniteCheckedException { - return new GridHadoopWritableSerialization(IntWritable.class); + @Override public HadoopSerialization keySerialization() throws IgniteCheckedException { + return new HadoopWritableSerialization(IntWritable.class); } /** {@inheritDoc} */ - @Override public GridHadoopSerialization valueSerialization() throws IgniteCheckedException { - return new GridHadoopWritableSerialization(IntWritable.class); + @Override public HadoopSerialization valueSerialization() throws IgniteCheckedException { + return new HadoopWritableSerialization(IntWritable.class); } /** {@inheritDoc} */ @@ -101,7 +103,7 @@ public abstract class GridHadoopAbstractMapTest extends GridCommonAbstractTest { /** * Test job info. */ - protected static class JobInfo implements GridHadoopJobInfo { + protected static class JobInfo implements HadoopJobInfo { /** {@inheritDoc} */ @Nullable @Override public String property(String name) { return null; @@ -122,7 +124,7 @@ public abstract class GridHadoopAbstractMapTest extends GridCommonAbstractTest { } /** {@inheritDoc} */ - @Override public GridHadoopJob createJob(GridHadoopJobId jobId, IgniteLogger log) throws IgniteCheckedException { + @Override public HadoopJob createJob(HadoopJobId jobId, IgniteLogger log) throws IgniteCheckedException { assert false; return null; diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopConcurrentHashMultimapSelftest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimapSelftest.java similarity index 86% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopConcurrentHashMultimapSelftest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimapSelftest.java index 88dfd2b..ae6bafa 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopConcurrentHashMultimapSelftest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopConcurrentHashMultimapSelftest.java @@ -34,7 +34,7 @@ import static org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory.*; /** * */ -public class GridHadoopConcurrentHashMultimapSelftest extends GridHadoopAbstractMapTest { +public class HadoopConcurrentHashMultimapSelftest extends HadoopAbstractMapTest { /** */ public void testMapSimple() throws Exception { GridUnsafeMemory mem = new GridUnsafeMemory(0); @@ -50,13 +50,13 @@ public class GridHadoopConcurrentHashMultimapSelftest extends GridHadoopAbstract int mapSize = 16 << rnd.nextInt(3); - GridHadoopJobInfo job = new JobInfo(); + HadoopJobInfo job = new JobInfo(); - GridHadoopTaskContext taskCtx = new TaskContext(); + HadoopTaskContext taskCtx = new TaskContext(); - GridHadoopConcurrentHashMultimap m = new GridHadoopConcurrentHashMultimap(job, mem, mapSize); + HadoopConcurrentHashMultimap m = new HadoopConcurrentHashMultimap(job, mem, mapSize); - GridHadoopConcurrentHashMultimap.Adder a = m.startAdding(taskCtx); + HadoopConcurrentHashMultimap.Adder a = m.startAdding(taskCtx); Multimap mm = ArrayListMultimap.create(); Multimap vis = ArrayListMultimap.create(); @@ -90,9 +90,9 @@ public class GridHadoopConcurrentHashMultimapSelftest extends GridHadoopAbstract assertEquals(0, mem.allocatedSize()); } - private void check(GridHadoopConcurrentHashMultimap m, Multimap mm, - final Multimap vis, GridHadoopTaskContext taskCtx) throws Exception { - final GridHadoopTaskInput in = m.input(taskCtx); + private void check(HadoopConcurrentHashMultimap m, Multimap mm, + final Multimap vis, HadoopTaskContext taskCtx) throws Exception { + final HadoopTaskInput in = m.input(taskCtx); Map> mmm = mm.asMap(); @@ -129,7 +129,7 @@ public class GridHadoopConcurrentHashMultimapSelftest extends GridHadoopAbstract final GridDataInput dataInput = new GridUnsafeDataInput(); - m.visit(false, new GridHadoopConcurrentHashMultimap.Visitor() { + m.visit(false, new HadoopConcurrentHashMultimap.Visitor() { /** */ IntWritable key = new IntWritable(); @@ -180,11 +180,11 @@ public class GridHadoopConcurrentHashMultimapSelftest extends GridHadoopAbstract Random rnd = new GridRandom(); for (int i = 0; i < 20; i++) { - GridHadoopJobInfo job = new JobInfo(); + HadoopJobInfo job = new JobInfo(); - final GridHadoopTaskContext taskCtx = new TaskContext(); + final HadoopTaskContext taskCtx = new TaskContext(); - final GridHadoopConcurrentHashMultimap m = new GridHadoopConcurrentHashMultimap(job, mem, 16); + final HadoopConcurrentHashMultimap m = new HadoopConcurrentHashMultimap(job, mem, 16); final ConcurrentMap> mm = new ConcurrentHashMap<>(); @@ -199,7 +199,7 @@ public class GridHadoopConcurrentHashMultimapSelftest extends GridHadoopAbstract IntWritable key = new IntWritable(); IntWritable val = new IntWritable(); - GridHadoopMultimap.Adder a = m.startAdding(taskCtx); + HadoopMultimap.Adder a = m.startAdding(taskCtx); for (int i = 0; i < 50000; i++) { int k = rnd.nextInt(32000); @@ -238,7 +238,7 @@ public class GridHadoopConcurrentHashMultimapSelftest extends GridHadoopAbstract assertTrue(m.capacity() > 32000); - GridHadoopTaskInput in = m.input(taskCtx); + HadoopTaskInput in = m.input(taskCtx); while (in.next()) { IntWritable key = (IntWritable) in.key(); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopHashMapSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMapSelfTest.java similarity index 90% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopHashMapSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMapSelfTest.java index 92177ad..5b1b6a8 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopHashMapSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopHashMapSelfTest.java @@ -30,7 +30,7 @@ import java.util.concurrent.*; /** * */ -public class GridHadoopHashMapSelfTest extends GridHadoopAbstractMapTest { +public class HadoopHashMapSelfTest extends HadoopAbstractMapTest { public void _testAllocation() throws Exception { final GridUnsafeMemory mem = new GridUnsafeMemory(0); @@ -87,11 +87,11 @@ public class GridHadoopHashMapSelfTest extends GridHadoopAbstractMapTest { int mapSize = 16 << rnd.nextInt(3); - GridHadoopTaskContext taskCtx = new TaskContext(); + HadoopTaskContext taskCtx = new TaskContext(); - final GridHadoopHashMultimap m = new GridHadoopHashMultimap(new JobInfo(), mem, mapSize); + final HadoopHashMultimap m = new HadoopHashMultimap(new JobInfo(), mem, mapSize); - GridHadoopMultimap.Adder a = m.startAdding(taskCtx); + HadoopMultimap.Adder a = m.startAdding(taskCtx); Multimap mm = ArrayListMultimap.create(); @@ -124,8 +124,8 @@ public class GridHadoopHashMapSelfTest extends GridHadoopAbstractMapTest { assertEquals(0, mem.allocatedSize()); } - private void check(GridHadoopHashMultimap m, Multimap mm, GridHadoopTaskContext taskCtx) throws Exception { - final GridHadoopTaskInput in = m.input(taskCtx); + private void check(HadoopHashMultimap m, Multimap mm, HadoopTaskContext taskCtx) throws Exception { + final HadoopTaskInput in = m.input(taskCtx); Map> mmm = mm.asMap(); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopSkipListSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipListSelfTest.java similarity index 88% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopSkipListSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipListSelfTest.java index 6ba00ad..8a046e0 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/GridHadoopSkipListSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/collections/HadoopSkipListSelfTest.java @@ -35,7 +35,7 @@ import static org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory.*; /** * Skip list tests. */ -public class GridHadoopSkipListSelfTest extends GridHadoopAbstractMapTest { +public class HadoopSkipListSelfTest extends HadoopAbstractMapTest { /** * */ @@ -47,7 +47,7 @@ public class GridHadoopSkipListSelfTest extends GridHadoopAbstractMapTest { int all = 10000; for (int i = 0; i < all; i++) { - int level = GridHadoopSkipList.randomLevel(rnd); + int level = HadoopSkipList.randomLevel(rnd); levelsCnts[level]++; } @@ -82,13 +82,13 @@ public class GridHadoopSkipListSelfTest extends GridHadoopAbstractMapTest { int mapSize = 16 << rnd.nextInt(6); - GridHadoopJobInfo job = new JobInfo(); + HadoopJobInfo job = new JobInfo(); - GridHadoopTaskContext taskCtx = new TaskContext(); + HadoopTaskContext taskCtx = new TaskContext(); - GridHadoopMultimap m = new GridHadoopSkipList(job, mem); + HadoopMultimap m = new HadoopSkipList(job, mem); - GridHadoopConcurrentHashMultimap.Adder a = m.startAdding(taskCtx); + HadoopConcurrentHashMultimap.Adder a = m.startAdding(taskCtx); Multimap mm = ArrayListMultimap.create(); Multimap vis = ArrayListMultimap.create(); @@ -122,9 +122,9 @@ public class GridHadoopSkipListSelfTest extends GridHadoopAbstractMapTest { assertEquals(0, mem.allocatedSize()); } - private void check(GridHadoopMultimap m, Multimap mm, final Multimap vis, GridHadoopTaskContext taskCtx) + private void check(HadoopMultimap m, Multimap mm, final Multimap vis, HadoopTaskContext taskCtx) throws Exception { - final GridHadoopTaskInput in = m.input(taskCtx); + final HadoopTaskInput in = m.input(taskCtx); Map> mmm = mm.asMap(); @@ -165,7 +165,7 @@ public class GridHadoopSkipListSelfTest extends GridHadoopAbstractMapTest { final GridDataInput dataInput = new GridUnsafeDataInput(); - m.visit(false, new GridHadoopConcurrentHashMultimap.Visitor() { + m.visit(false, new HadoopConcurrentHashMultimap.Visitor() { /** */ IntWritable key = new IntWritable(); @@ -216,11 +216,11 @@ public class GridHadoopSkipListSelfTest extends GridHadoopAbstractMapTest { Random rnd = new GridRandom(); for (int i = 0; i < 20; i++) { - GridHadoopJobInfo job = new JobInfo(); + HadoopJobInfo job = new JobInfo(); - final GridHadoopTaskContext taskCtx = new TaskContext(); + final HadoopTaskContext taskCtx = new TaskContext(); - final GridHadoopMultimap m = new GridHadoopSkipList(job, mem); + final HadoopMultimap m = new HadoopSkipList(job, mem); final ConcurrentMap> mm = new ConcurrentHashMap<>(); @@ -235,7 +235,7 @@ public class GridHadoopSkipListSelfTest extends GridHadoopAbstractMapTest { IntWritable key = new IntWritable(); IntWritable val = new IntWritable(); - GridHadoopMultimap.Adder a = m.startAdding(taskCtx); + HadoopMultimap.Adder a = m.startAdding(taskCtx); for (int i = 0; i < 50000; i++) { int k = rnd.nextInt(32000); @@ -268,7 +268,7 @@ public class GridHadoopSkipListSelfTest extends GridHadoopAbstractMapTest { } }, 3 + rnd.nextInt(27)); - GridHadoopTaskInput in = m.input(taskCtx); + HadoopTaskInput in = m.input(taskCtx); int prevKey = Integer.MIN_VALUE; diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/GridHadoopDataStreamSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataStreamSelfTest.java similarity index 96% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/GridHadoopDataStreamSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataStreamSelfTest.java index 39a537b..48b99ab 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/GridHadoopDataStreamSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/shuffle/streams/HadoopDataStreamSelfTest.java @@ -26,12 +26,12 @@ import java.util.*; /** * */ -public class GridHadoopDataStreamSelfTest extends GridCommonAbstractTest { +public class HadoopDataStreamSelfTest extends GridCommonAbstractTest { public void testStreams() throws IOException { GridUnsafeMemory mem = new GridUnsafeMemory(0); - GridHadoopDataOutStream out = new GridHadoopDataOutStream(mem); + HadoopDataOutStream out = new HadoopDataOutStream(mem); int size = 4 * 1024; @@ -86,7 +86,7 @@ public class GridHadoopDataStreamSelfTest extends GridCommonAbstractTest { out.write(new byte[]{0,1,2,3}, 1, 2); out.writeUTF("mom washes rum"); - GridHadoopDataInStream in = new GridHadoopDataInStream(mem); + HadoopDataInStream in = new HadoopDataInStream(mem); in.buffer().set(ptr, out.buffer().pointer()); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopExecutorServiceTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorServiceTest.java similarity index 92% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopExecutorServiceTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorServiceTest.java index c97b6ab..aa50fa9 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/GridHadoopExecutorServiceTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/HadoopExecutorServiceTest.java @@ -28,12 +28,12 @@ import java.util.concurrent.atomic.*; /** * */ -public class GridHadoopExecutorServiceTest extends GridCommonAbstractTest { +public class HadoopExecutorServiceTest extends GridCommonAbstractTest { /** * @throws Exception If failed. */ public void testExecutesAll() throws Exception { - final GridHadoopExecutorService exec = new GridHadoopExecutorService(log, "_GRID_NAME_", 10, 5); + final HadoopExecutorService exec = new HadoopExecutorService(log, "_GRID_NAME_", 10, 5); for (int i = 0; i < 5; i++) { final int loops = 5000; @@ -77,7 +77,7 @@ public class GridHadoopExecutorServiceTest extends GridCommonAbstractTest { */ public void testShutdown() throws Exception { for (int i = 0; i < 5; i++) { - final GridHadoopExecutorService exec = new GridHadoopExecutorService(log, "_GRID_NAME_", 10, 5); + final HadoopExecutorService exec = new HadoopExecutorService(log, "_GRID_NAME_", 10, 5); final LongAdder sum = new LongAdder(); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopExternalTaskExecutionSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutionSelfTest.java similarity index 92% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopExternalTaskExecutionSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutionSelfTest.java index 52bfa98..59ac445 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/GridHadoopExternalTaskExecutionSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/HadoopExternalTaskExecutionSelfTest.java @@ -24,6 +24,7 @@ import org.apache.hadoop.mapreduce.*; import org.apache.hadoop.mapreduce.lib.input.*; import org.apache.hadoop.mapreduce.lib.output.*; import org.apache.ignite.*; +import org.apache.ignite.configuration.*; import org.apache.ignite.igfs.*; import org.apache.ignite.internal.*; import org.apache.ignite.internal.processors.hadoop.*; @@ -32,12 +33,12 @@ import org.apache.ignite.internal.util.typedef.*; import java.io.*; import java.util.*; -import static org.apache.ignite.internal.processors.hadoop.GridHadoopUtils.*; +import static org.apache.ignite.internal.processors.hadoop.HadoopUtils.*; /** * Job tracker self test. */ -public class GridHadoopExternalTaskExecutionSelfTest extends GridHadoopAbstractSelfTest { +public class HadoopExternalTaskExecutionSelfTest extends HadoopAbstractSelfTest { /** {@inheritDoc} */ @Override protected boolean igfsEnabled() { return true; @@ -54,8 +55,8 @@ public class GridHadoopExternalTaskExecutionSelfTest extends GridHadoopAbstractS } /** {@inheritDoc} */ - @Override public GridHadoopConfiguration hadoopConfiguration(String gridName) { - GridHadoopConfiguration cfg = super.hadoopConfiguration(gridName); + @Override public HadoopConfiguration hadoopConfiguration(String gridName) { + HadoopConfiguration cfg = super.hadoopConfiguration(gridName); cfg.setExternalExecution(true); @@ -92,7 +93,7 @@ public class GridHadoopExternalTaskExecutionSelfTest extends GridHadoopAbstractS job.setJarByClass(getClass()); - IgniteInternalFuture fut = grid(0).hadoop().submit(new GridHadoopJobId(UUID.randomUUID(), 1), + IgniteInternalFuture fut = grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 1), createJobInfo(job.getConfiguration())); fut.get(); @@ -128,7 +129,7 @@ public class GridHadoopExternalTaskExecutionSelfTest extends GridHadoopAbstractS job.setJarByClass(getClass()); - IgniteInternalFuture fut = grid(0).hadoop().submit(new GridHadoopJobId(UUID.randomUUID(), 1), + IgniteInternalFuture fut = grid(0).hadoop().submit(new HadoopJobId(UUID.randomUUID(), 1), createJobInfo(job.getConfiguration())); try { @@ -147,7 +148,7 @@ public class GridHadoopExternalTaskExecutionSelfTest extends GridHadoopAbstractS * @throws Exception If failed. */ private void prepareTestFile(String filePath) throws Exception { - IgniteFs igfs = grid(0).fileSystem(igfsName); + IgniteFileSystem igfs = grid(0).fileSystem(igfsName); try (IgfsOutputStream out = igfs.create(new IgfsPath(filePath), true)) { PrintWriter wr = new PrintWriter(new OutputStreamWriter(out)); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopExternalCommunicationSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunicationSelfTest.java similarity index 88% rename from modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopExternalCommunicationSelfTest.java rename to modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunicationSelfTest.java index a725ddc..a21633d 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/GridHadoopExternalCommunicationSelfTest.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/taskexecutor/external/communication/HadoopExternalCommunicationSelfTest.java @@ -32,7 +32,7 @@ import java.util.concurrent.*; /** * Tests Hadoop external communication component. */ -public class GridHadoopExternalCommunicationSelfTest extends GridCommonAbstractTest { +public class HadoopExternalCommunicationSelfTest extends GridCommonAbstractTest { /** * @throws Exception If failed. */ @@ -57,7 +57,7 @@ public class GridHadoopExternalCommunicationSelfTest extends GridCommonAbstractT IgniteLogger log = log(); - GridHadoopExternalCommunication[] comms = new GridHadoopExternalCommunication[4]; + HadoopExternalCommunication[] comms = new HadoopExternalCommunication[4]; try { String name = "grid"; @@ -67,7 +67,7 @@ public class GridHadoopExternalCommunicationSelfTest extends GridCommonAbstractT int msgs = 10; for (int i = 0; i < comms.length; i++) { - comms[i] = new GridHadoopExternalCommunication(parentNodeId, UUID.randomUUID(), marsh, log, + comms[i] = new HadoopExternalCommunication(parentNodeId, UUID.randomUUID(), marsh, log, Executors.newFixedThreadPool(1), name + i); if (useShmem) @@ -100,7 +100,7 @@ public class GridHadoopExternalCommunicationSelfTest extends GridCommonAbstractT } } finally { - for (GridHadoopExternalCommunication comm : comms) { + for (HadoopExternalCommunication comm : comms) { if (comm != null) comm.stop(); } @@ -110,7 +110,7 @@ public class GridHadoopExternalCommunicationSelfTest extends GridCommonAbstractT /** * */ - private static class TestHadoopListener implements GridHadoopMessageListener { + private static class TestHadoopListener implements HadoopMessageListener { /** Received messages (array list is safe because executor has one thread). */ private Collection msgs = new ArrayList<>(); @@ -125,7 +125,7 @@ public class GridHadoopExternalCommunicationSelfTest extends GridCommonAbstractT } /** {@inheritDoc} */ - @Override public void onMessageReceived(GridHadoopProcessDescriptor desc, GridHadoopMessage msg) { + @Override public void onMessageReceived(HadoopProcessDescriptor desc, HadoopMessage msg) { assert msg instanceof TestMessage; msgs.add((TestMessage)msg); @@ -134,7 +134,7 @@ public class GridHadoopExternalCommunicationSelfTest extends GridCommonAbstractT } /** {@inheritDoc} */ - @Override public void onConnectionLost(GridHadoopProcessDescriptor desc) { + @Override public void onConnectionLost(HadoopProcessDescriptor desc) { // No-op. } @@ -157,7 +157,7 @@ public class GridHadoopExternalCommunicationSelfTest extends GridCommonAbstractT /** * */ - private static class TestMessage implements GridHadoopMessage { + private static class TestMessage implements HadoopMessage { /** From index. */ private int from; diff --git a/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java b/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java index 836cdaa..4790e63 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java @@ -50,75 +50,75 @@ public class IgniteHadoopTestSuite extends TestSuite { downloadHadoop(); downloadHive(); - GridHadoopClassLoader ldr = new GridHadoopClassLoader(null); + HadoopClassLoader ldr = new HadoopClassLoader(null); TestSuite suite = new TestSuite("Ignite Hadoop MR Test Suite"); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemLoopbackExternalPrimarySelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemLoopbackExternalSecondarySelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemLoopbackExternalDualSyncSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemLoopbackExternalDualAsyncSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackExternalPrimarySelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackExternalSecondarySelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackExternalDualSyncSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackExternalDualAsyncSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackEmbeddedPrimarySelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackEmbeddedSecondarySelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackEmbeddedDualSyncSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoopbackEmbeddedDualAsyncSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemSecondaryModeSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemSecondaryModeSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemClientSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemClientSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemLoggerStateSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemLoggerSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoggerStateSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemLoggerSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemHandshakeSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemHandshakeSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoop20FileSystemLoopbackPrimarySelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopIgfs20FileSystemLoopbackPrimarySelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopDualSyncSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopDualAsyncSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopIgfsDualSyncSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopIgfsDualAsyncSelfTest.class.getName()))); suite.addTest(IgfsEventsTestSuite.suiteNoarchOnly()); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopFileSystemsTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopFileSystemsTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopValidationSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopValidationSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopDefaultMapReducePlannerSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopJobTrackerSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopDefaultMapReducePlannerSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopJobTrackerSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopHashMapSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopDataStreamSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopConcurrentHashMultimapSelftest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopHashMapSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopDataStreamSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopConcurrentHashMultimapSelftest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopSkipListSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopSkipListSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopTaskExecutionSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopTaskExecutionSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopV2JobSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopV2JobSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopSerializationWrapperSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopSplitWrapperSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopSerializationWrapperSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopSplitWrapperSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopTasksV1Test.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopTasksV2Test.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopTasksV1Test.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopTasksV2Test.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopMapReduceTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopMapReduceTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopMapReduceEmbeddedSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopMapReduceEmbeddedSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopExternalTaskExecutionSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopExternalCommunicationSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopExternalTaskExecutionSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopExternalCommunicationSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopSortingTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopSortingTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopSortingExternalTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopSortingExternalTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopGroupingTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopGroupingTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopClientProtocolSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopClientProtocolEmbeddedSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopClientProtocolSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopClientProtocolEmbeddedSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(GridHadoopCommandLineTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopCommandLineTest.class.getName()))); suite.addTest(new TestSuite(ldr.loadClass(HadoopSecondaryFileSystemConfigurationTest.class.getName()))); diff --git a/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteIgfsLinuxAndMacOSTestSuite.java b/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteIgfsLinuxAndMacOSTestSuite.java index 6055db9..87233fc 100644 --- a/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteIgfsLinuxAndMacOSTestSuite.java +++ b/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteIgfsLinuxAndMacOSTestSuite.java @@ -36,25 +36,25 @@ public class IgniteIgfsLinuxAndMacOSTestSuite extends TestSuite { public static TestSuite suite() throws Exception { downloadHadoop(); - GridHadoopClassLoader ldr = new GridHadoopClassLoader(null); + HadoopClassLoader ldr = new HadoopClassLoader(null); TestSuite suite = new TestSuite("Ignite IGFS Test Suite For Linux And Mac OS"); suite.addTest(new TestSuite(ldr.loadClass(IgfsServerManagerIpcEndpointRegistrationOnLinuxAndMacSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemShmemExternalPrimarySelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemShmemExternalSecondarySelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemShmemExternalDualSyncSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemShmemExternalDualAsyncSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemShmemExternalPrimarySelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemShmemExternalSecondarySelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemShmemExternalDualSyncSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemShmemExternalDualAsyncSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemShmemEmbeddedPrimarySelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemShmemEmbeddedSecondarySelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemShmemEmbeddedDualSyncSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemShmemEmbeddedPrimarySelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemShmemEmbeddedSecondarySelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemShmemEmbeddedDualSyncSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemShmemEmbeddedDualAsyncSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoopFileSystemIpcCacheSelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(IgniteHadoopFileSystemIpcCacheSelfTest.class.getName()))); - suite.addTest(new TestSuite(ldr.loadClass(IgfsHadoop20FileSystemShmemPrimarySelfTest.class.getName()))); + suite.addTest(new TestSuite(ldr.loadClass(HadoopIgfs20FileSystemShmemPrimarySelfTest.class.getName()))); suite.addTest(IgfsEventsTestSuite.suite()); diff --git a/modules/spring/src/main/java/org/apache/ignite/IgniteSpringBean.java b/modules/spring/src/main/java/org/apache/ignite/IgniteSpringBean.java index 31aa9e5..0f0a93f 100644 --- a/modules/spring/src/main/java/org/apache/ignite/IgniteSpringBean.java +++ b/modules/spring/src/main/java/org/apache/ignite/IgniteSpringBean.java @@ -254,14 +254,14 @@ public class IgniteSpringBean implements Ignite, DisposableBean, InitializingBea } /** {@inheritDoc} */ - @Override public IgniteFs fileSystem(String name) { + @Override public IgniteFileSystem fileSystem(String name) { assert g != null; return g.fileSystem(name); } /** {@inheritDoc} */ - @Override public Collection fileSystems() { + @Override public Collection fileSystems() { assert g != null; return g.fileSystems(); diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala index 964de1d..f0e5eba 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala @@ -1520,8 +1520,8 @@ object visor extends VisorTag { val cfgs = try // Cache, IGFS, streamer and DR configurations should be excluded from daemon node config. - spring.loadConfigurations(url, "cacheConfiguration", "igfsConfiguration", "streamerConfiguration", - "drSenderHubConfiguration", "drReceiverHubConfiguration").get1() + spring.loadConfigurations(url, "cacheConfiguration", "fileSystemConfiguration", + "streamerConfiguration", "drSenderHubConfiguration", "drReceiverHubConfiguration").get1() finally { if (log4jTup != null) U.removeLog4jNoOpLogger(log4jTup) diff --git a/pom.xml b/pom.xml index b561aab..9c6ef75 100644 --- a/pom.xml +++ b/pom.xml @@ -731,12 +731,12 @@ org.apache.ignite.services - Ignite File System - org.apache.ignite.igfs + File System APIs + org.apache.ignite.igfs:org.apache.ignite.igfs.mapreduce:org.apache.ignite.igfs.mapreduce.records:org.apache.ignite.igfs.secondary Hadoop Accelerator APIs - org.apache.ignite.igfs.hadoop:org.apache.ignite.igfs.hadoop.v1:org.apache.ignite.igfs.hadoop.v2:org.apache.ignite.igfs.mapreduce:org.apache.ignite.igfs.mapreduce.records:org.apache.ignite.hadoop + org.apache.ignite.hadoop:org.apache.ignite.hadoop.fs:org.apache.ignite.hadoop.fs.v1:org.apache.ignite.hadoop.fs.v2:org.apache.ignite.hadoop.mapreduce Streaming APIs @@ -926,12 +926,12 @@ org.apache.ignite.services - Ignite File System - org.apache.ignite.igfs + File System APIs + org.apache.ignite.igfs:org.apache.ignite.igfs.mapreduce:org.apache.ignite.igfs.mapreduce.records:org.apache.ignite.igfs.secondary Hadoop Accelerator APIs - org.apache.ignite.igfs.hadoop:org.apache.ignite.igfs.hadoop.v1:org.apache.ignite.igfs.hadoop.v2:org.apache.ignite.igfs.mapreduce:org.apache.ignite.igfs.mapreduce.records:org.apache.ignite.hadoop + org.apache.ignite.hadoop:org.apache.ignite.hadoop.fs:org.apache.ignite.hadoop.fs.v1:org.apache.ignite.hadoop.fs.v2:org.apache.ignite.hadoop.mapreduce Streaming APIs