From bd381e27b92e3e3b884de0bb7b0c09a9b201b332 Mon Sep 17 00:00:00 2001 From: Craig Condit Date: Mon, 2 Nov 2020 13:48:23 -0600 Subject: [PATCH] YARN-10494 CLI tool for docker-to-squashfs conversion (pure Java). --- hadoop-tools/hadoop-runc/pom.xml | 104 ++ .../runc/docker/DefaultManifestChooser.java | 31 + .../hadoop/runc/docker/DockerClient.java | 394 +++++++ .../hadoop/runc/docker/DockerContext.java | 52 + .../hadoop/runc/docker/DockerCoordinates.java | 143 +++ .../hadoop/runc/docker/DockerException.java | 40 + .../DockerResourceNotFoundException.java | 40 + .../hadoop/runc/docker/ManifestChooser.java | 28 + .../hadoop/runc/docker/auth/AuthToken.java | 80 ++ .../docker/auth/BearerAuthSchemeProvider.java | 42 + .../runc/docker/auth/BearerCredentials.java | 72 ++ .../auth/BearerCredentialsProvider.java | 102 ++ .../hadoop/runc/docker/auth/BearerScheme.java | 169 +++ .../runc/docker/json/InstantDeserializer.java | 48 + .../runc/docker/json/InstantSerializer.java | 44 + .../hadoop/runc/docker/model/BlobV2.java | 46 + .../runc/docker/model/ManifestListV2.java | 66 ++ .../runc/docker/model/ManifestRefV2.java | 59 + .../hadoop/runc/docker/model/ManifestV2.java | 76 ++ .../hadoop/runc/docker/model/PlatformV2.java | 85 ++ .../runc/squashfs/AbstractSquashFsReader.java | 246 ++++ .../runc/squashfs/FileSquashFsReader.java | 291 +++++ .../runc/squashfs/MappedSquashFsReader.java | 299 +++++ .../runc/squashfs/SquashFsConverter.java | 128 ++ .../hadoop/runc/squashfs/SquashFsEntry.java | 495 ++++++++ .../runc/squashfs/SquashFsEntryBuilder.java | 287 +++++ .../runc/squashfs/SquashFsException.java | 42 + .../hadoop/runc/squashfs/SquashFsReader.java | 115 ++ .../hadoop/runc/squashfs/SquashFsTree.java | 197 ++++ .../hadoop/runc/squashfs/SquashFsWriter.java | 243 ++++ .../hadoop/runc/squashfs/data/DataBlock.java | 49 + .../runc/squashfs/data/DataBlockCache.java | 157 +++ .../runc/squashfs/data/DataBlockReader.java | 238 ++++ .../runc/squashfs/data/DataBlockRef.java | 69 ++ .../runc/squashfs/data/DataBlockWriter.java | 90 ++ .../runc/squashfs/data/FragmentRef.java | 53 + .../runc/squashfs/data/FragmentWriter.java | 149 +++ .../squashfs/data/MappedDataBlockReader.java | 241 ++++ .../squashfs/directory/DirectoryBuilder.java | 129 ++ .../squashfs/directory/DirectoryElement.java | 30 + .../squashfs/directory/DirectoryEntry.java | 127 ++ .../squashfs/directory/DirectoryHeader.java | 92 ++ .../inode/AbstractBasicDeviceINode.java | 105 ++ .../squashfs/inode/AbstractBasicIpcINode.java | 93 ++ .../inode/AbstractExtendedDeviceINode.java | 106 ++ .../inode/AbstractExtendedIpcINode.java | 92 ++ .../runc/squashfs/inode/AbstractINode.java | 164 +++ .../squashfs/inode/BasicBlockDeviceINode.java | 57 + .../squashfs/inode/BasicCharDeviceINode.java | 57 + .../squashfs/inode/BasicDirectoryINode.java | 213 ++++ .../runc/squashfs/inode/BasicFifoINode.java | 54 + .../runc/squashfs/inode/BasicFileINode.java | 270 +++++ .../runc/squashfs/inode/BasicSocketINode.java | 55 + .../squashfs/inode/BasicSymlinkINode.java | 141 +++ .../runc/squashfs/inode/BlockDeviceINode.java | 25 + .../runc/squashfs/inode/CharDeviceINode.java | 25 + .../runc/squashfs/inode/DeviceINode.java | 39 + .../runc/squashfs/inode/DirectoryINode.java | 57 + .../inode/ExtendedBlockDeviceINode.java | 39 + .../inode/ExtendedCharDeviceINode.java | 39 + .../squashfs/inode/ExtendedDeviceINode.java | 29 + .../inode/ExtendedDirectoryINode.java | 180 +++ .../squashfs/inode/ExtendedFifoINode.java | 39 + .../squashfs/inode/ExtendedFileINode.java | 220 ++++ .../runc/squashfs/inode/ExtendedIpcINode.java | 29 + .../squashfs/inode/ExtendedSocketINode.java | 39 + .../squashfs/inode/ExtendedSymlinkINode.java | 125 ++ .../hadoop/runc/squashfs/inode/FifoINode.java | 25 + .../hadoop/runc/squashfs/inode/FileINode.java | 65 ++ .../hadoop/runc/squashfs/inode/INode.java | 75 ++ .../hadoop/runc/squashfs/inode/INodeRef.java | 54 + .../hadoop/runc/squashfs/inode/INodeType.java | 132 +++ .../hadoop/runc/squashfs/inode/IpcINode.java | 35 + .../runc/squashfs/inode/Permission.java | 155 +++ .../runc/squashfs/inode/SocketINode.java | 25 + .../runc/squashfs/inode/SymlinkINode.java | 39 + .../runc/squashfs/io/ByteBufferDataInput.java | 137 +++ .../hadoop/runc/squashfs/io/MappedFile.java | 107 ++ .../metadata/FileMetadataBlockReader.java | 85 ++ .../MappedFileMetadataBlockReader.java | 66 ++ .../metadata/MemoryMetadataBlockReader.java | 81 ++ .../runc/squashfs/metadata/MetadataBlock.java | 180 +++ .../squashfs/metadata/MetadataBlockCache.java | 179 +++ .../metadata/MetadataBlockReader.java | 63 + .../squashfs/metadata/MetadataBlockRef.java | 58 + .../squashfs/metadata/MetadataReader.java | 287 +++++ .../squashfs/metadata/MetadataReference.java | 143 +++ .../squashfs/metadata/MetadataWriter.java | 202 ++++ .../metadata/TaggedMetadataBlockReader.java | 74 ++ .../squashfs/superblock/CompressionId.java | 51 + .../runc/squashfs/superblock/SuperBlock.java | 333 ++++++ .../squashfs/superblock/SuperBlockFlag.java | 77 ++ .../runc/squashfs/table/ExportTable.java | 134 +++ .../runc/squashfs/table/FileTableReader.java | 75 ++ .../runc/squashfs/table/FragmentTable.java | 138 +++ .../squashfs/table/FragmentTableEntry.java | 65 ++ .../hadoop/runc/squashfs/table/IdTable.java | 129 ++ .../runc/squashfs/table/IdTableGenerator.java | 86 ++ .../squashfs/table/MappedFileTableReader.java | 66 ++ .../squashfs/table/MemoryTableReader.java | 67 ++ .../runc/squashfs/table/TableReader.java | 33 + .../hadoop/runc/squashfs/util/BinUtils.java | 239 ++++ .../runc/squashfs/util/SquashDebug.java | 253 ++++ .../hadoop/runc/tools/ImportDockerImage.java | 579 +++++++++ .../hadoop/runc/docker/TestDockerClient.java | 283 +++++ .../runc/squashfs/TestSquashFsConverter.java | 80 ++ .../runc/squashfs/TestSquashFsException.java | 83 ++ .../runc/squashfs/TestSquashFsInterop.java | 1037 +++++++++++++++++ .../runc/squashfs/data/TestDataBlock.java | 73 ++ .../squashfs/data/TestDataBlockCache.java | 122 ++ .../squashfs/data/TestDataBlockReader.java | 352 ++++++ .../runc/squashfs/data/TestDataBlockRef.java | 76 ++ .../squashfs/data/TestDataBlockWriter.java | 133 +++ .../runc/squashfs/data/TestFragmentRef.java | 71 ++ .../squashfs/data/TestFragmentWriter.java | 246 ++++ .../directory/TestDirectoryBuilder.java | 204 ++++ .../directory/TestDirectoryEntry.java | 178 +++ .../directory/TestDirectoryHeader.java | 117 ++ .../inode/TestAbstractBasicDeviceINode.java | 97 ++ .../inode/TestAbstractBasicIpcINode.java | 88 ++ .../TestAbstractExtendedDeviceINode.java | 94 ++ .../inode/TestAbstractExtendedIpcINode.java | 85 ++ .../squashfs/inode/TestAbstractINode.java | 116 ++ .../inode/TestBasicBlockDeviceINode.java | 85 ++ .../inode/TestBasicCharDeviceINode.java | 85 ++ .../inode/TestBasicDirectoryINode.java | 232 ++++ .../squashfs/inode/TestBasicFifoINode.java | 81 ++ .../squashfs/inode/TestBasicFileINode.java | 327 ++++++ .../squashfs/inode/TestBasicSocketINode.java | 81 ++ .../squashfs/inode/TestBasicSymlinkINode.java | 151 +++ .../inode/TestExtendedBlockDeviceINode.java | 76 ++ .../inode/TestExtendedCharDeviceINode.java | 76 ++ .../inode/TestExtendedDirectoryINode.java | 210 ++++ .../squashfs/inode/TestExtendedFifoINode.java | 71 ++ .../squashfs/inode/TestExtendedFileINode.java | 293 +++++ .../inode/TestExtendedSocketINode.java | 71 ++ .../inode/TestExtendedSymlinkINode.java | 138 +++ .../runc/squashfs/inode/TestINodeRef.java | 67 ++ .../runc/squashfs/inode/TestINodeType.java | 211 ++++ .../runc/squashfs/inode/TestPermission.java | 100 ++ .../squashfs/io/TestByteBufferDataInput.java | 198 ++++ .../metadata/TestFileMetadataBlockReader.java | 137 +++ .../TestMappedFileMetadataBlockReader.java | 112 ++ .../TestMemoryMetadataBlockReader.java | 100 ++ .../squashfs/metadata/TestMetadataBlock.java | 214 ++++ .../metadata/TestMetadataBlockCache.java | 262 +++++ .../metadata/TestMetadataBlockReader.java | 143 +++ .../metadata/TestMetadataBlockRef.java | 67 ++ .../squashfs/metadata/TestMetadataReader.java | 353 ++++++ .../metadata/TestMetadataReference.java | 150 +++ .../squashfs/metadata/TestMetadataWriter.java | 364 ++++++ .../metadata/TestTaggedMetadataBlock.java | 119 ++ .../superblock/TestCompressionId.java | 52 + .../squashfs/superblock/TestSuperBlock.java | 271 +++++ .../superblock/TestSuperBlockFlag.java | 138 +++ .../runc/squashfs/table/TestExportTable.java | 178 +++ .../squashfs/table/TestFileTableReader.java | 116 ++ .../squashfs/table/TestFragmentTable.java | 169 +++ .../table/TestFragmentTableEntry.java | 81 ++ .../runc/squashfs/table/TestIdTable.java | 140 +++ .../squashfs/table/TestIdTableGenerator.java | 109 ++ .../table/TestMappedFileTableReader.java | 102 ++ .../squashfs/table/TestMemoryTableReader.java | 80 ++ .../runc/squashfs/test/DataTestUtils.java | 127 ++ .../squashfs/test/DirectoryTestUtils.java | 149 +++ .../runc/squashfs/test/INodeTestUtils.java | 72 ++ .../squashfs/test/InMemoryFragmentTable.java | 57 + .../test/MetadataBlockReaderMock.java | 81 ++ .../runc/squashfs/test/MetadataTestUtils.java | 139 +++ .../squashfs/test/SuperBlockTestUtils.java | 51 + .../runc/tools/TestImportDockerImage.java | 370 ++++++ .../src/test/resources/log4j.properties | 24 + hadoop-tools/pom.xml | 1 + 173 files changed, 22988 insertions(+) create mode 100644 hadoop-tools/hadoop-runc/pom.xml create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DefaultManifestChooser.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerClient.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerContext.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerCoordinates.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerException.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerResourceNotFoundException.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/ManifestChooser.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/AuthToken.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/BearerAuthSchemeProvider.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/BearerCredentials.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/BearerCredentialsProvider.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/BearerScheme.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/json/InstantDeserializer.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/json/InstantSerializer.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/BlobV2.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/ManifestListV2.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/ManifestRefV2.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/ManifestV2.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/PlatformV2.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/AbstractSquashFsReader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/FileSquashFsReader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/MappedSquashFsReader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsConverter.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsEntry.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsEntryBuilder.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsException.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsReader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsTree.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsWriter.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlock.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlockCache.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlockReader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlockRef.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlockWriter.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/FragmentRef.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/FragmentWriter.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/MappedDataBlockReader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/directory/DirectoryBuilder.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/directory/DirectoryElement.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/directory/DirectoryEntry.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/directory/DirectoryHeader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractBasicDeviceINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractBasicIpcINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractExtendedDeviceINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractExtendedIpcINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicBlockDeviceINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicCharDeviceINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicDirectoryINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicFifoINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicFileINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicSocketINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicSymlinkINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BlockDeviceINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/CharDeviceINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/DeviceINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/DirectoryINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedBlockDeviceINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedCharDeviceINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedDeviceINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedDirectoryINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedFifoINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedFileINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedIpcINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedSocketINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedSymlinkINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/FifoINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/FileINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/INode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/INodeRef.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/INodeType.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/IpcINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/Permission.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/SocketINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/SymlinkINode.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/io/ByteBufferDataInput.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/io/MappedFile.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/FileMetadataBlockReader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MappedFileMetadataBlockReader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MemoryMetadataBlockReader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataBlock.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataBlockCache.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataBlockReader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataBlockRef.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataReader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataReference.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataWriter.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/TaggedMetadataBlockReader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/superblock/CompressionId.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/superblock/SuperBlock.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/superblock/SuperBlockFlag.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/ExportTable.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/FileTableReader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/FragmentTable.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/FragmentTableEntry.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/IdTable.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/IdTableGenerator.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/MappedFileTableReader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/MemoryTableReader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/TableReader.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/util/BinUtils.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/util/SquashDebug.java create mode 100644 hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/tools/ImportDockerImage.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/docker/TestDockerClient.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/TestSquashFsConverter.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/TestSquashFsException.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/TestSquashFsInterop.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlock.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlockCache.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlockReader.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlockRef.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlockWriter.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestFragmentRef.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestFragmentWriter.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/directory/TestDirectoryBuilder.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/directory/TestDirectoryEntry.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/directory/TestDirectoryHeader.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractBasicDeviceINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractBasicIpcINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractExtendedDeviceINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractExtendedIpcINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicBlockDeviceINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicCharDeviceINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicDirectoryINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicFifoINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicFileINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicSocketINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicSymlinkINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedBlockDeviceINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedCharDeviceINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedDirectoryINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedFifoINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedFileINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedSocketINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedSymlinkINode.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestINodeRef.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestINodeType.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestPermission.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/io/TestByteBufferDataInput.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestFileMetadataBlockReader.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMappedFileMetadataBlockReader.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMemoryMetadataBlockReader.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataBlock.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataBlockCache.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataBlockReader.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataBlockRef.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataReader.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataReference.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataWriter.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestTaggedMetadataBlock.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/superblock/TestCompressionId.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/superblock/TestSuperBlock.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/superblock/TestSuperBlockFlag.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestExportTable.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestFileTableReader.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestFragmentTable.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestFragmentTableEntry.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestIdTable.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestIdTableGenerator.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestMappedFileTableReader.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestMemoryTableReader.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/DataTestUtils.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/DirectoryTestUtils.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/INodeTestUtils.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/InMemoryFragmentTable.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/MetadataBlockReaderMock.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/MetadataTestUtils.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/SuperBlockTestUtils.java create mode 100644 hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/tools/TestImportDockerImage.java create mode 100644 hadoop-tools/hadoop-runc/src/test/resources/log4j.properties diff --git a/hadoop-tools/hadoop-runc/pom.xml b/hadoop-tools/hadoop-runc/pom.xml new file mode 100644 index 00000000000..4930e3256b3 --- /dev/null +++ b/hadoop-tools/hadoop-runc/pom.xml @@ -0,0 +1,104 @@ + + + + 4.0.0 + + org.apache.hadoop + hadoop-project + 3.3.0 + ../../hadoop-project + + hadoop-runc + 3.3.0 + Apache Hadoop RunC support tools + Apache Hadoop RunC + jar + + + UTF-8 + + + + + junit + junit + test + + + org.apache.commons + commons-compress + + + org.apache.hadoop + hadoop-common + provided + + + org.apache.hadoop + hadoop-yarn-api + provided + + + org.eclipse.jetty + jetty-server + test + + + org.slf4j + slf4j-api + + + org.apache.hadoop + hadoop-hdfs-client + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + true + 600 + + + + org.apache.maven.plugins + maven-jar-plugin + + + org.apache.maven.plugins + maven-dependency-plugin + + + deplist + compile + + list + + + + ${project.basedir}/target/hadoop-tools-deps/${project.artifactId}.tools-builtin.txt + + + + + + + diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DefaultManifestChooser.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DefaultManifestChooser.java new file mode 100644 index 00000000000..912da7afd1c --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DefaultManifestChooser.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.docker; + +import org.apache.hadoop.runc.docker.model.ManifestListV2; +import org.apache.hadoop.runc.docker.model.ManifestRefV2; + +public class DefaultManifestChooser implements ManifestChooser { + + @Override + public ManifestRefV2 chooseManifest(ManifestListV2 manifestList) { + return manifestList.getManifests().stream().findFirst().orElse(null); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerClient.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerClient.java new file mode 100644 index 00000000000..9975a0a9bca --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerClient.java @@ -0,0 +1,394 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.docker; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.runc.docker.auth.BearerAuthSchemeProvider; +import org.apache.hadoop.runc.docker.auth.BearerCredentialsProvider; +import org.apache.hadoop.runc.docker.auth.BearerScheme; +import org.apache.hadoop.runc.docker.model.ManifestListV2; +import org.apache.hadoop.runc.docker.model.ManifestRefV2; +import org.apache.hadoop.runc.docker.model.ManifestV2; +import org.apache.hadoop.runc.docker.model.PlatformV2; +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.HttpHeaders; +import org.apache.http.auth.AuthSchemeProvider; +import org.apache.http.client.CredentialsProvider; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.config.ConnectionConfig; +import org.apache.http.config.Registry; +import org.apache.http.config.RegistryBuilder; +import org.apache.http.config.SocketConfig; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.message.BasicHeader; +import org.apache.http.util.EntityUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.security.sasl.AuthenticationException; +import java.io.ByteArrayOutputStream; +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.io.UnsupportedEncodingException; +import java.net.URISyntaxException; +import java.net.URL; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class DockerClient implements Closeable { + + private static final Logger LOG = LoggerFactory.getLogger(DockerClient.class); + + private static final String DOCKER_DISTRIBUTION_API_VERSION_VALUE = + "registry/2.0"; + private static final String DOCKER_DISTRIBUTION_API_VERSION_HEADER = + "docker-distribution-api-version"; + + private static final String DOCKER_MANIFEST_V2_CT = + "application/vnd.docker.distribution.manifest.v2+json"; + private static final String DOCKER_MANIFEST_LIST_V2_CT = + "application/vnd.docker.distribution.manifest.list.v2+json"; + + private static final String MANIFEST_ACCEPT = + DOCKER_MANIFEST_LIST_V2_CT + ", " + + DOCKER_MANIFEST_V2_CT; + + private final CloseableHttpClient authClient; + private final CloseableHttpClient client; + private final BearerCredentialsProvider credsProvider; + private ManifestChooser manifestChooser + = new DefaultManifestChooser(); + + public DockerClient() { + credsProvider = new BearerCredentialsProvider(); + authClient = createHttpClient(null, null); + client = createHttpClient(credsProvider, authClient); + } + + public void setManifestChooser(ManifestChooser chooser) { + this.manifestChooser = manifestChooser; + } + + public ManifestChooser getManifestChooser() { + return manifestChooser; + } + + static CloseableHttpClient createHttpClient( + CredentialsProvider credsProvider, + CloseableHttpClient authClient) { + + ConnectionConfig connConfig = ConnectionConfig.custom() + .setCharset(StandardCharsets.UTF_8) + .build(); + + SocketConfig socketConfig = SocketConfig.custom() + .setSoTimeout(60_000) + .build(); + + RequestConfig requestConfig = RequestConfig.custom() + .setConnectTimeout(30_000) + .setSocketTimeout(120_000) + .setRedirectsEnabled(true) + .setRelativeRedirectsAllowed(true) + .setAuthenticationEnabled(true) + .setConnectionRequestTimeout(30_000) + .setTargetPreferredAuthSchemes(Arrays.asList("Bearer")) + .build(); + + Registry authSchemeRegistry = authClient == null ? null + : RegistryBuilder.create() + .register(BearerScheme.SCHEME, new BearerAuthSchemeProvider(authClient)) + .build(); + + List
headers = new ArrayList<>(); + headers.add(new BasicHeader( + DOCKER_DISTRIBUTION_API_VERSION_HEADER, + DOCKER_DISTRIBUTION_API_VERSION_VALUE)); + + return HttpClients.custom() + .setDefaultAuthSchemeRegistry(authSchemeRegistry) + .setDefaultCredentialsProvider(credsProvider) + .setDefaultConnectionConfig(connConfig) + .setDefaultSocketConfig(socketConfig) + .setDefaultRequestConfig(requestConfig) + .setDefaultHeaders(headers) + .build(); + } + + public DockerContext createContext(String baseUrl) + throws IOException, URISyntaxException { + + DockerContext context = new DockerContext(new URL(baseUrl)); + + HttpGet get = new HttpGet(context.getBaseUrl().toURI()); + try (CloseableHttpResponse response = client.execute(get)) { + if (response.getStatusLine().getStatusCode() != 200) { + throw new AuthenticationException( + String.format("Unable to authenticate to %s: %s", baseUrl, + response.getStatusLine())); + } + } + + return context; + } + + public ManifestListV2 listManifests( + DockerContext context, + String name, + String reference) + throws IOException, URISyntaxException, DockerException { + + URL baseUrl = context.getBaseUrl(); + + URL url = new URL(baseUrl, String + .format("%s/manifests/%s", encodeName(name), encodeName(reference))); + + HttpGet get = new HttpGet(url.toURI()); + get.setHeader(HttpHeaders.ACCEPT, MANIFEST_ACCEPT); + + try (CloseableHttpResponse response = client.execute(get)) { + if (response.getStatusLine().getStatusCode() != 200) { + if (response.getStatusLine().getStatusCode() == 404) { + throw new DockerException( + String.format("Image not found: %s:%s", name, reference)); + } + throw new DockerException( + String.format("Unexpected response [%d %s] from %s", + response.getStatusLine().getStatusCode(), + response.getStatusLine().getReasonPhrase(), + url)); + } + + Header contentType = response.getFirstHeader("content-type"); + if (contentType == null) { + throw new DockerException( + String.format("No content type received from %s", url)); + } + + String ct = contentType.getValue(); + LOG.debug("Got manifest content type: {}", ct); + + if (ManifestListV2.matches(ct)) { + HttpEntity entity = response.getEntity(); + try { + try (InputStream in = entity.getContent()) { + ObjectMapper mapper = new ObjectMapper(); + return mapper + .readerFor(ManifestListV2.class) + .readValue(in); + } + } finally { + EntityUtils.consumeQuietly(entity); + } + } else if (ManifestV2.matches(ct)) { + Header digest = response.getFirstHeader("docker-content-digest"); + if (digest == null) { + throw new DockerException("Unable to determine digest for manifest"); + } + + // synthesize a manifest list + ManifestListV2 manifests = new ManifestListV2(); + manifests.setMediaType(ManifestListV2.CONTENT_TYPE); + manifests.setSchemaVersion(2); + + ManifestRefV2 mref = new ManifestRefV2(); + mref.setDigest(digest.getValue().trim()); + mref.setMediaType(ManifestRefV2.CONTENT_TYPE); + mref.setSize(-1); + mref.setPlatform(new PlatformV2()); + manifests.getManifests().add(mref); + + return manifests; + } else { + throw new DockerException(String.format( + "Unknown content-type %s received from %s", ct, url)); + } + } + } + + public byte[] readManifest( + DockerContext context, + String name, + String blob) + throws IOException, URISyntaxException, DockerException { + + URL baseUrl = context.getBaseUrl(); + + URL url = new URL(baseUrl, + String.format("%s/manifests/%s", encodeName(name), blob)); + + LOG.info("Fetching manifest from {}", url); + + HttpGet get = new HttpGet(url.toURI()); + get.setHeader(HttpHeaders.ACCEPT, MANIFEST_ACCEPT); + + try (CloseableHttpResponse response = client.execute(get)) { + if (response.getStatusLine().getStatusCode() != 200) { + if (response.getStatusLine().getStatusCode() == 404) { + throw new DockerException( + String.format("Manifest not found: %s:%s", name, blob)); + } + throw new DockerException( + String.format("Unexpected response [%d %s] from %s", + response.getStatusLine().getStatusCode(), + response.getStatusLine().getReasonPhrase(), + url)); + } + + Header contentType = response.getFirstHeader("content-type"); + if (contentType == null) { + throw new DockerException( + String.format("No content type received from %s", url)); + } + + String ct = contentType.getValue(); + LOG.debug("Got manifest content type: {}", ct); + + if (ManifestV2.matches(ct)) { + HttpEntity entity = response.getEntity(); + try { + try (InputStream in = entity.getContent()) { + try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { + IOUtils.copyBytes(in, out, 4096); + return out.toByteArray(); + } + } + } finally { + EntityUtils.consumeQuietly(entity); + } + } else { + throw new DockerException(String.format( + "Unknown content-type %s received from %s", ct, url)); + } + } + } + + public ManifestV2 parseManifest(byte[] data) throws IOException { + ObjectMapper mapper = new ObjectMapper(); + return mapper + .readerFor(ManifestV2.class) + .readValue(data); + } + + public byte[] readConfig( + DockerContext context, + String name, + String blob) + throws IOException, URISyntaxException, DockerException { + + URL baseUrl = context.getBaseUrl(); + + URL url = new URL(baseUrl, + String.format("%s/blobs/%s", encodeName(name), blob)); + + LOG.info("Fetching config from {}", url); + + HttpGet get = new HttpGet(url.toURI()); + + try (CloseableHttpResponse response = client.execute(get)) { + if (response.getStatusLine().getStatusCode() != 200) { + if (response.getStatusLine().getStatusCode() == 404) { + throw new DockerException( + String.format("Config not found: %s:%s", name, blob)); + } + throw new DockerException( + String.format("Unexpected response [%d %s] from %s", + response.getStatusLine().getStatusCode(), + response.getStatusLine().getReasonPhrase(), + url)); + } + + Header contentType = response.getFirstHeader("content-type"); + if (contentType == null) { + throw new DockerException( + String.format("No content type received from %s", url)); + } + + String ct = contentType.getValue(); + LOG.debug("Got config content type: {}", ct); + + HttpEntity entity = response.getEntity(); + try { + try (InputStream in = entity.getContent()) { + try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { + IOUtils.copyBytes(in, out, 4096); + return out.toByteArray(); + } + } + } finally { + EntityUtils.consumeQuietly(entity); + } + } + } + + public InputStream download( + DockerContext context, String name, String blob) + throws IOException, URISyntaxException, DockerException { + + URL baseUrl = context.getBaseUrl(); + + URL url = new URL(baseUrl, + String.format("%s/blobs/%s", encodeName(name), blob)); + + HttpGet get = new HttpGet(url.toURI()); + get.setHeader(HttpHeaders.ACCEPT, "application/json"); + CloseableHttpResponse response = client.execute(get); + if (response.getStatusLine().getStatusCode() != 200) { + response.close(); + if (response.getStatusLine().getStatusCode() == 404) { + throw new DockerException( + String.format("Layer not found: %s [%s]", name, blob)); + } + throw new DockerException( + String.format("Unexpected response [%d %s] from %s", + response.getStatusLine().getStatusCode(), + response.getStatusLine().getReasonPhrase(), + url)); + } + return response.getEntity().getContent(); + } + + @Override + public void close() throws IOException { + client.close(); + authClient.close(); + } + + private String encodeName(String name) throws UnsupportedEncodingException { + String[] parts = name.split("/"); + StringBuilder buf = new StringBuilder(); + for (String part : parts) { + if (buf.length() > 0) { + buf.append("/"); + } + buf.append(URLEncoder.encode(part, "UTF-8")); + } + return buf.toString(); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerContext.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerContext.java new file mode 100644 index 00000000000..90f434afdea --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerContext.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.docker; + +import java.net.MalformedURLException; +import java.net.URL; + +public class DockerContext { + + private final URL baseUrl; + + public DockerContext(URL baseUrl) throws MalformedURLException { + this.baseUrl = normalize(baseUrl); + + } + + private static URL normalize(URL url) throws MalformedURLException { + if (!url.getPath().endsWith("/")) { + url = new URL( + url.getProtocol(), + url.getHost(), + url.getPort(), + url.getPath() + "/" + (url.getQuery() == null ? "" : url.getQuery())); + } + return url; + } + + public URL getBaseUrl() { + return baseUrl; + } + + @Override + public String toString() { + return String.format("docker-context { baseUrl=%s }", baseUrl); + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerCoordinates.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerCoordinates.java new file mode 100644 index 00000000000..7d52bce45fd --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerCoordinates.java @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.docker; + +import java.net.MalformedURLException; +import java.net.URL; + +public class DockerCoordinates { + private final String baseUrl; + private final String host; + private final String image; + private final String imageName; + private final String imageRef; + + public DockerCoordinates(String defaultRepo, String ref) { + boolean hasHost = hasHost(ref); + this.host = computeHost(ref, defaultRepo, hasHost); + this.baseUrl = computeBaseUrl(ref, defaultRepo, hasHost); + this.image = ref; + this.imageName = computeImageName(image, hasHost); + this.imageRef = computeImageRef(image, hasHost); + } + + private static String computeImageName(String ref, boolean hasHost) { + if (hasHost) { + ref = ref.substring(ref.indexOf("/") + 1); + } + + if (ref.contains("@")) { + return ref.substring(0, ref.indexOf('@')); + } + if (ref.contains(":")) { + return ref.substring(0, ref.indexOf(':')); + } + + return ref; + } + + private static String computeImageRef(String ref, boolean hasHost) { + if (hasHost) { + ref = ref.substring(ref.indexOf("/") + 1); + } + if (ref.contains("@")) { + return ref.substring(ref.indexOf('@') + 1); + } + if (ref.contains(":")) { + return ref.substring(ref.indexOf(':') + 1); + } + return "latest"; + } + + private static boolean hasHost(String ref) { + if (!ref.contains("/")) { + return false; + } + String[] parts = ref.split("/", 2); + String host = parts[0]; + return host.contains("."); + } + + private static String computeHost( + String ref, String defaultRepo, boolean hasHost) { + return hasHost ? ref.split("/", 2)[0] : defaultRepo; + } + + private static String computeBaseUrl( + String ref, String defaultRepo, boolean hasHost) { + + String host = hasHost ? ref.split("/", 2)[0] : defaultRepo; + + int port = 443; + if (host.contains(":")) { + String sPort = host.substring(host.indexOf(":") + 1); + host = host.substring(0, host.indexOf(":")); + try { + port = Integer.parseInt(sPort, 10); + } catch (NumberFormatException e) { + throw new IllegalArgumentException( + String.format("Invalid port number: %s", sPort)); + } + } + try { + String proto = "https"; + // use HTTP if IPv4 address specified + if (host.matches("^[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+$")) { + proto = "http"; + } + + if (port != 443) { + return new URL(proto, host, port, "/v2/").toExternalForm(); + } else { + return new URL(proto, host, "/v2/").toExternalForm(); + } + } catch (MalformedURLException e) { + throw new IllegalArgumentException( + String.format("Invalid docker host: %s", host)); + } + } + + public String getBaseUrl() { + return baseUrl; + } + + public String getImage() { + return image; + } + + public String getImageName() { + return imageName; + } + + public String getImageRef() { + return imageRef; + } + + public String getHost() { + return host; + } + + @Override + public String toString() { + return String.format( + "docker-coordinates { " + + "baseUrl=%s, host=%s, image=%s, imageName=%s, imageRef=%s }", + baseUrl, host, image, imageName, imageRef); + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerException.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerException.java new file mode 100644 index 00000000000..d1f63df8f3d --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerException.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.docker; + +public class DockerException extends Exception { + + private static final long serialVersionUID = 6989746271587645889L; + + public DockerException() { + } + + public DockerException(String message) { + super(message); + } + + public DockerException(Throwable cause) { + super(cause); + } + + public DockerException(String message, Throwable cause) { + super(message, cause); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerResourceNotFoundException.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerResourceNotFoundException.java new file mode 100644 index 00000000000..6073e93fd04 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/DockerResourceNotFoundException.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.docker; + +public class DockerResourceNotFoundException extends DockerException { + + private static final long serialVersionUID = -2688356045540486787L; + + public DockerResourceNotFoundException() { + } + + public DockerResourceNotFoundException(String message) { + super(message); + } + + public DockerResourceNotFoundException(Throwable cause) { + super(cause); + } + + public DockerResourceNotFoundException(String message, Throwable cause) { + super(message, cause); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/ManifestChooser.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/ManifestChooser.java new file mode 100644 index 00000000000..52e73e74147 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/ManifestChooser.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.docker; + +import org.apache.hadoop.runc.docker.model.ManifestListV2; +import org.apache.hadoop.runc.docker.model.ManifestRefV2; + +public interface ManifestChooser { + + ManifestRefV2 chooseManifest(ManifestListV2 manifestList); + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/AuthToken.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/AuthToken.java new file mode 100644 index 00000000000..8a4284e6b53 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/AuthToken.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.docker.auth; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import org.apache.hadoop.runc.docker.json.InstantDeserializer; +import org.apache.hadoop.runc.docker.json.InstantSerializer; + +import java.time.Instant; + +@JsonIgnoreProperties(ignoreUnknown = true) +public class AuthToken { + + private String token; + private int expiresIn = 60; + private Instant issuedAt = Instant.now(); + + @JsonProperty + public String getToken() { + return token; + } + + public void setToken(String token) { + this.token = token; + } + + @JsonProperty("expires_in") + public int getExpiresIn() { + return expiresIn; + } + + public void setExpiresIn(int expiresIn) { + this.expiresIn = expiresIn; + } + + @JsonProperty("issued_at") + @JsonSerialize(using = InstantSerializer.class) + @JsonDeserialize(using = InstantDeserializer.class) + public Instant getIssuedAt() { + return issuedAt; + } + + public void setIssuedAt(Instant issuedAt) { + this.issuedAt = issuedAt; + } + + @JsonIgnore + public boolean isExpired() { + return issuedAt.plusSeconds(expiresIn).isBefore(Instant.now()); + } + + @Override + public String toString() { + return String.format( + "auth-token { token=%s, expiresIn=%d, issuedAt=%s, expired=%s }", + (token == null || token.isEmpty()) ? "(none)" : "*redacted*", expiresIn, + issuedAt, isExpired()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/BearerAuthSchemeProvider.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/BearerAuthSchemeProvider.java new file mode 100644 index 00000000000..164d713195a --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/BearerAuthSchemeProvider.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.docker.auth; + +import org.apache.http.annotation.Contract; +import org.apache.http.annotation.ThreadingBehavior; +import org.apache.http.auth.AuthScheme; +import org.apache.http.auth.AuthSchemeProvider; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.protocol.HttpContext; + +@Contract(threading = ThreadingBehavior.IMMUTABLE) +public class BearerAuthSchemeProvider implements AuthSchemeProvider { + + private final CloseableHttpClient client; + + public BearerAuthSchemeProvider(CloseableHttpClient client) { + this.client = client; + } + + @Override + public AuthScheme create(HttpContext context) { + return new BearerScheme(client); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/BearerCredentials.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/BearerCredentials.java new file mode 100644 index 00000000000..cf01134517f --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/BearerCredentials.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.docker.auth; + +import org.apache.http.annotation.Contract; +import org.apache.http.annotation.ThreadingBehavior; +import org.apache.http.auth.AuthScope; +import org.apache.http.auth.BasicUserPrincipal; +import org.apache.http.auth.Credentials; + +import java.security.Principal; + +@Contract(threading = ThreadingBehavior.SAFE) +public class BearerCredentials implements Credentials { + + private final AuthScope authScope; + private final BasicUserPrincipal principal; + private AuthToken token; + + public BearerCredentials(AuthScope authScope) { + this(authScope, null); + } + + public BearerCredentials(AuthScope authScope, AuthToken token) { + this.authScope = authScope; + this.principal = new BasicUserPrincipal("bearer"); + this.token = token; + } + + @Override + public Principal getUserPrincipal() { + return principal; + } + + @Override + public synchronized String getPassword() { + return (token == null) ? null : token.getToken(); + } + + public synchronized boolean isValid() { + return (token != null) && (!token.isExpired()); + } + + public AuthToken getToken() { + return token; + } + + public void setToken(AuthToken token) { + this.token = token; + } + + public AuthScope getAuthScope() { + return authScope; + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/BearerCredentialsProvider.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/BearerCredentialsProvider.java new file mode 100644 index 00000000000..aa0029b6e66 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/BearerCredentialsProvider.java @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.docker.auth; + +import org.apache.http.annotation.Contract; +import org.apache.http.annotation.ThreadingBehavior; +import org.apache.http.auth.AuthScope; +import org.apache.http.auth.Credentials; +import org.apache.http.client.CredentialsProvider; +import org.apache.http.util.Args; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +@Contract(threading = ThreadingBehavior.SAFE) +public class BearerCredentialsProvider implements CredentialsProvider { + + private final ConcurrentHashMap credMap; + + public BearerCredentialsProvider() { + super(); + this.credMap = new ConcurrentHashMap(); + } + + /** + * Find matching {@link Credentials credentials} for the given + * authentication scope. + * + * @param map the credentials hash map + * @param authscope the {@link AuthScope authentication scope} + * @return the credentials + */ + private static Credentials matchCredentials( + final Map map, + final AuthScope authscope) { + // see if we get a direct hit + Credentials creds = map.get(authscope); + if (creds == null) { + // Nope. + // Do a full scan + int bestMatchFactor = -1; + AuthScope bestMatch = null; + for (final AuthScope current : map.keySet()) { + final int factor = authscope.match(current); + if (factor > bestMatchFactor) { + bestMatchFactor = factor; + bestMatch = current; + } + } + if (bestMatch != null) { + creds = map.get(bestMatch); + } + } + return creds; + } + + @Override + public void setCredentials( + final AuthScope authscope, + final Credentials credentials) { + Args.notNull(authscope, "Authentication scope"); + credMap.put(authscope, credentials); + } + + @Override + public Credentials getCredentials(final AuthScope authscope) { + Args.notNull(authscope, "Authentication scope"); + Credentials creds = matchCredentials(this.credMap, authscope); + if (creds == null) { + creds = new BearerCredentials(authscope); + credMap.put(authscope, creds); + } + return creds; + } + + @Override + public void clear() { + this.credMap.clear(); + } + + @Override + public String toString() { + return credMap.toString(); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/BearerScheme.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/BearerScheme.java new file mode 100644 index 00000000000..680d2186319 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/auth/BearerScheme.java @@ -0,0 +1,169 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.docker.auth; + +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.HttpHeaders; +import org.apache.http.HttpRequest; +import org.apache.http.auth.AuthenticationException; +import org.apache.http.auth.Credentials; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.utils.URIBuilder; +import org.apache.http.impl.auth.RFC2617Scheme; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.message.BasicHeader; +import org.apache.http.util.EntityUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URISyntaxException; +import java.net.URL; + +@SuppressWarnings("deprecation") +public class BearerScheme extends RFC2617Scheme { + + public static final String SCHEME = "bearer"; + private static final long serialVersionUID = -8061843510470788770L; + private static final Logger LOG = LoggerFactory.getLogger(BearerScheme.class); + private final CloseableHttpClient client; + private volatile boolean complete = false; + + public BearerScheme(CloseableHttpClient client) { + this.client = client; + } + + @Override + public String getSchemeName() { + return SCHEME; + } + + @Override + public boolean isConnectionBased() { + return false; + } + + @Override + public boolean isComplete() { + return complete; + } + + @Override + public Header authenticate(Credentials credentials, HttpRequest request) + throws AuthenticationException { + + // succeed or fail, don't come around again + complete = true; + + if (!(credentials instanceof BearerCredentials)) { + return null; + } + + BearerCredentials bearerCreds = (BearerCredentials) credentials; + if (bearerCreds.isValid()) { + LOG.debug("Returning cached credentials"); + return new BasicHeader(HttpHeaders.AUTHORIZATION, + String.format("Bearer %s", bearerCreds.getToken().getToken())); + } + + authenticate(bearerCreds); + if (bearerCreds.isValid()) { + return new BasicHeader(HttpHeaders.AUTHORIZATION, + String.format("Bearer %s", bearerCreds.getToken().getToken())); + } + + return null; + } + + private void authenticate(BearerCredentials creds) + throws AuthenticationException { + + String realm = creds.getAuthScope().getRealm(); + URL url; + try { + url = new URL(realm); + } catch (MalformedURLException e) { + throw new AuthenticationException( + String.format("Invalid realm: %s", realm), e); + } + + LOG.debug("Authenticating to {}", url); + + HttpGet get = new HttpGet(url.toExternalForm()); + + try (CloseableHttpResponse response = client.execute(get)) { + + if (response.getStatusLine().getStatusCode() != 200) { + LOG.debug("Got unexpected response {} from token service {}", + response.getStatusLine(), realm); + return; + } + + Header contentType = response.getFirstHeader("content-type"); + if (contentType != null && !contentType.getValue() + .equals("application/json")) { + LOG.debug("Got unexpected content type {} from token service {}", + contentType.getValue(), realm); + return; + } + + HttpEntity entity = response.getEntity(); + if (entity == null) { + LOG.debug("No token received from token service {}", realm); + return; + } + + try { + AuthToken authToken = new ObjectMapper().readerFor(AuthToken.class) + .readValue(entity.getContent()); + creds.setToken(authToken); + LOG.debug("Authenticated succesfully"); + } finally { + EntityUtils.consume(entity); + } + } catch (IOException e) { + throw new AuthenticationException("Unable to acquire token", e); + } + } + + @Override + public String getRealm() { + String realm = super.getRealm(); + try { + + URIBuilder ub = new URIBuilder(realm); + String service = getParameter("service"); + if (service != null) { + ub.addParameter("service", service); + } + String scope = getParameter("scope"); + if (scope != null) { + ub.addParameter("scope", scope); + } + return ub.build().toURL().toExternalForm(); + } catch (URISyntaxException | MalformedURLException e) { + return realm; + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/json/InstantDeserializer.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/json/InstantDeserializer.java new file mode 100644 index 00000000000..69ac7f61273 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/json/InstantDeserializer.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.docker.json; + +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZonedDateTime; +import java.time.format.DateTimeParseException; + +public class InstantDeserializer extends JsonDeserializer { + + @Override + public Instant deserialize(JsonParser p, DeserializationContext ctxt) + throws IOException { + String value = p.getValueAsString(); + if (value == null) { + return null; + } + + try { + return Instant.parse(value); + } catch (DateTimeParseException e) { + return ZonedDateTime.parse(value).toInstant(); + } + + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/json/InstantSerializer.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/json/InstantSerializer.java new file mode 100644 index 00000000000..d0ec903c12a --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/json/InstantSerializer.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.docker.json; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.SerializerProvider; + +import java.io.IOException; +import java.time.Instant; +import java.time.format.DateTimeFormatter; + +public class InstantSerializer extends JsonSerializer { + + @Override + public void serialize( + Instant value, + JsonGenerator gen, + SerializerProvider serializers) + throws IOException { + if (value == null) { + gen.writeNull(); + return; + } + gen.writeString(DateTimeFormatter.ISO_INSTANT.format(value)); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/BlobV2.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/BlobV2.java new file mode 100644 index 00000000000..72ca11dfb9b --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/BlobV2.java @@ -0,0 +1,46 @@ +package org.apache.hadoop.runc.docker.model; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +public class BlobV2 { + + private String mediaType; + private long size; + private String digest; + + @JsonProperty + public String getMediaType() { + return mediaType; + } + + public void setMediaType(String mediaType) { + this.mediaType = mediaType; + } + + @JsonProperty + public long getSize() { + return size; + } + + public void setSize(long size) { + this.size = size; + } + + @JsonProperty + public String getDigest() { + return digest; + } + + public void setDigest(String digest) { + this.digest = digest; + } + + @Override + public String toString() { + return String.format( + "{ mediaType=%s, size=%d, digest=%s }", mediaType, size, digest); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/ManifestListV2.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/ManifestListV2.java new file mode 100644 index 00000000000..76098887ef1 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/ManifestListV2.java @@ -0,0 +1,66 @@ +package org.apache.hadoop.runc.docker.model; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +@JsonIgnoreProperties(ignoreUnknown = true) +public class ManifestListV2 { + + public static String CONTENT_TYPE = + "application/vnd.docker.distribution.manifest.list.v2+json"; + + public static boolean matches(String contentType) { + return CONTENT_TYPE.equals(contentType); + } + + private String digest; + + private int schemaVersion; + private String mediaType; + private List manifests = new ArrayList<>(); + + @JsonProperty + public int getSchemaVersion() { + return schemaVersion; + } + + public void setSchemaVersion(int schemaVersion) { + this.schemaVersion = schemaVersion; + } + + @JsonProperty + public String getMediaType() { + return mediaType; + } + + public void setMediaType(String mediaType) { + this.mediaType = mediaType; + } + + @JsonProperty + public List getManifests() { + return manifests; + } + + @Override + public String toString() { + return String.format( + "{%n" + + " schemaVersion=%d,%n" + + " mediaType=%s,%n" + + " manifests=%s%n" + + "}", + schemaVersion, + mediaType, + manifests + .stream() + .map(Objects::toString) + .collect(Collectors.joining(",\n ", "[\n ", "\n ]"))); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/ManifestRefV2.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/ManifestRefV2.java new file mode 100644 index 00000000000..74ddc23b95f --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/ManifestRefV2.java @@ -0,0 +1,59 @@ +package org.apache.hadoop.runc.docker.model; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +public class ManifestRefV2 { + + public static final String CONTENT_TYPE + = "application/vnd.docker.distribution.manifest.v2+json"; + + private String digest; + private String mediaType; + private PlatformV2 platform; + private long size; + + @JsonProperty + public String getDigest() { + return digest; + } + + public void setDigest(String digest) { + this.digest = digest; + } + + @JsonProperty + public String getMediaType() { + return mediaType; + } + + public void setMediaType(String mediaType) { + this.mediaType = mediaType; + } + + @JsonProperty + public PlatformV2 getPlatform() { + return platform; + } + + public void setPlatform(PlatformV2 platform) { + this.platform = platform; + } + + @JsonProperty + public long getSize() { + return size; + } + + public void setSize(long size) { + this.size = size; + } + + @Override public String toString() { + return String.format( + "{ digest=%s, mediaType=%s, platform=%s, size=%d }", + digest, mediaType, platform, size); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/ManifestV2.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/ManifestV2.java new file mode 100644 index 00000000000..e315c4ce7a0 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/ManifestV2.java @@ -0,0 +1,76 @@ +package org.apache.hadoop.runc.docker.model; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +@JsonIgnoreProperties(ignoreUnknown = true) +public class ManifestV2 { + + public static String CONTENT_TYPE = + "application/vnd.docker.distribution.manifest.v2+json"; + + public static boolean matches(String contentType) { + return CONTENT_TYPE.equals(contentType); + } + + private int schemaVersion; + private String mediaType; + private BlobV2 config; + private List layers = new ArrayList<>(); + + @JsonProperty + public int getSchemaVersion() { + return schemaVersion; + } + + public void setSchemaVersion(int schemaVersion) { + this.schemaVersion = schemaVersion; + } + + @JsonProperty + public String getMediaType() { + return mediaType; + } + + public void setMediaType(String mediaType) { + this.mediaType = mediaType; + } + + @JsonProperty + public BlobV2 getConfig() { + return config; + } + + public void setConfig(BlobV2 config) { + this.config = config; + } + + @JsonProperty + public List getLayers() { + return layers; + } + + @Override + public String toString() { + return String.format( + "{%n" + + " schemaVersion=%d,%n" + + " mediaType=%s,%n" + + " config=%s,%n" + + " layers=%s%n" + + "}", + schemaVersion, + mediaType, + config, + layers + .stream() + .map(Objects::toString) + .collect(Collectors.joining(",\n ", "[\n ", "\n ]"))); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/PlatformV2.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/PlatformV2.java new file mode 100644 index 00000000000..b017f626246 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/docker/model/PlatformV2.java @@ -0,0 +1,85 @@ +package org.apache.hadoop.runc.docker.model; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +@JsonIgnoreProperties(ignoreUnknown = true) +public class PlatformV2 { + + private String architecture; + private String os; + private String osVersion; + private List osFeatures = new ArrayList<>(); + private String variant; + private List features = new ArrayList<>(); + + @JsonProperty + public String getArchitecture() { + return architecture; + } + + public void setArchitecture(String architecture) { + this.architecture = architecture; + } + + @JsonProperty + public String getOs() { + return os; + } + + public void setOs(String os) { + this.os = os; + } + + @JsonProperty("os.version") + public String getOsVersion() { + return osVersion; + } + + public void setOsVersion(String osVersion) { + this.osVersion = osVersion; + } + + @JsonProperty("os.features") + public List getOsFeatures() { + return osFeatures; + } + + @JsonProperty + public String getVariant() { + return variant; + } + + public void setVariant(String variant) { + this.variant = variant; + } + + @JsonProperty + public List getFeatures() { + return features; + } + + @Override + public String toString() { + return String.format("{ architecture=%s, os=%s, os.version=%s, " + + "os.features=%s, variant=%s, features=%s }", + architecture, + os, + osVersion, + osFeatures + .stream() + .map(Objects::toString) + .collect(Collectors.joining(", ", "[", "]")), + variant, + features + .stream() + .map(Objects::toString) + .collect(Collectors.joining(", ", "[", "]"))); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/AbstractSquashFsReader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/AbstractSquashFsReader.java new file mode 100644 index 00000000000..d19ed93d8c7 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/AbstractSquashFsReader.java @@ -0,0 +1,246 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs; + +import org.apache.hadoop.runc.squashfs.data.DataBlock; +import org.apache.hadoop.runc.squashfs.inode.FileINode; +import org.apache.hadoop.runc.squashfs.inode.INode; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.OutputStream; + +abstract public class AbstractSquashFsReader implements SquashFsReader { + + protected static int compareBytes(byte[] left, byte[] right) { + for (int i = 0; i < left.length && i < right.length; i++) { + int a = (left[i] & 0xff); + int b = (right[i] & 0xff); + if (a != b) { + return a - b; + } + } + return left.length - right.length; + } + + protected byte[] createSparseBlock(SuperBlock sb) { + return new byte[sb.getBlockSize()]; + } + + abstract protected byte[] getSparseBlock(); + + abstract protected DataBlock readBlock( + FileINode fileInode, + int blockNumber, + boolean cache) throws IOException, SquashFsException; + + abstract protected DataBlock readFragment( + FileINode fileInode, + int fragmentSize, + boolean cache) throws IOException, SquashFsException; + + @Override + public long writeFileStream(INode inode, OutputStream out) + throws IOException, SquashFsException { + + return writeFileOut(inode, (out instanceof DataOutput) + ? (DataOutput) out + : new DataOutputStream(out)); + } + + @Override + public long writeFileOut(INode inode, DataOutput out) + throws IOException, SquashFsException { + + if (!(inode instanceof FileINode)) { + throw new IllegalArgumentException("Inode is not a file"); + } + + FileINode fileInode = (FileINode) inode; + + long fileSize = fileInode.getFileSize(); + int blockSize = getSuperBlock().getBlockSize(); + int blockCount = fileInode.getBlockSizes().length; + boolean hasFragment = fileInode.isFragmentPresent(); + + long bytesRead = 0L; + + for (int i = 0; i < blockCount; i++) { + DataBlock data = readBlock(fileInode, i, false); + + if (i == (blockCount - 1) && !hasFragment) { + if (data.getLogicalSize() > blockSize) { + throw new SquashFsException( + String.format( + "Error during block read: expected max %d bytes, got %d", + blockSize, data.getLogicalSize())); + } + writeBlock(getSparseBlock(), out, data); + bytesRead += data.getLogicalSize(); + } else { + if (data.getLogicalSize() != blockSize) { + throw new SquashFsException( + String.format("Error during file read: expected %d bytes, got %d", + blockSize, data.getLogicalSize())); + } + writeBlock(getSparseBlock(), out, data); + bytesRead += data.getLogicalSize(); + } + } + + if (hasFragment) { + DataBlock data = + readFragment(fileInode, (int) (fileSize - bytesRead), true); + + if (data.getLogicalSize() > blockSize) { + throw new SquashFsException( + String.format( + "Error during fragment read: expected max %d bytes, got %d", + blockSize, data.getLogicalSize())); + } + writeBlock(getSparseBlock(), out, data); + bytesRead += data.getLogicalSize(); + } + if (bytesRead != fileSize) { + throw new SquashFsException( + String.format( + "Error during final block read: expected %d total bytes, got %d", + fileSize, bytesRead)); + } + + return bytesRead; + } + + @Override + public int read(INode inode, long fileOffset, byte[] buf, int off, int len) + throws IOException, SquashFsException { + + if (!(inode instanceof FileINode)) { + throw new IllegalArgumentException("Inode is not a file"); + } + + FileINode fileInode = (FileINode) inode; + + long fileSize = fileInode.getFileSize(); + int blockSize = getSuperBlock().getBlockSize(); + int blockCount = fileInode.getBlockSizes().length; + boolean hasFragment = fileInode.isFragmentPresent(); + + int blockRelative = (int) (fileOffset % (long) blockSize); + int blockNumber = (int) ((fileOffset - blockRelative) / (long) blockSize); + + int bytesToRead = Math.max(0, Math.min(len, blockSize - blockRelative)); + + if (blockNumber < blockCount) { + // read the block + DataBlock data = readBlock(fileInode, blockNumber, true); + + if (blockNumber == (blockCount - 1) && !hasFragment) { + if (data.getLogicalSize() > blockSize) { + throw new SquashFsException( + String.format( + "Error during block read: expected max %d bytes, got %d", + blockSize, data.getLogicalSize())); + } + + int bytesCopied = + copyData(getSparseBlock(), blockRelative, buf, off, bytesToRead, + data); + if (bytesCopied == 0) { + bytesCopied = -1; + } + return bytesCopied; + + } else { + if (data.getLogicalSize() != blockSize) { + throw new SquashFsException( + String.format("Error during file read: expected %d bytes, got %d", + blockSize, data.getLogicalSize())); + } + return copyData(getSparseBlock(), blockRelative, buf, off, bytesToRead, + data); + } + } else if (blockNumber == blockCount && hasFragment) { + int fragmentSize = (int) (fileSize % (long) blockSize); + + // read fragment + DataBlock data = readFragment(fileInode, fragmentSize, true); + + if (data.getLogicalSize() > blockSize) { + throw new SquashFsException( + String.format( + "Error during fragment read: expected max %d bytes, got %d", + blockSize, data.getLogicalSize())); + } + int bytesCopied = + copyData(getSparseBlock(), blockRelative, buf, off, bytesToRead, + data); + if (bytesCopied == 0) { + bytesCopied = -1; + } + return bytesCopied; + + } else { + // EOF + return -1; + } + } + + protected int copyData( + byte[] sparseBlock, + int blockOffset, + byte[] data, + int off, + int len, + DataBlock block) { + if (block.getLogicalSize() == 0) { + return 0; + } + + int bytesToCopy = + Math.max(0, Math.min(len, block.getLogicalSize() - blockOffset)); + if (bytesToCopy == 0) { + return 0; + } + + if (block.isSparse()) { + System.arraycopy(sparseBlock, 0, data, off, bytesToCopy); + return bytesToCopy; + } + System.arraycopy(block.getData(), blockOffset, data, off, bytesToCopy); + return bytesToCopy; + } + + protected void writeBlock(byte[] sparseBlock, DataOutput out, DataBlock block) + throws IOException { + if (block.getLogicalSize() == 0) { + return; + } + + if (block.isSparse()) { + out.write(sparseBlock, 0, block.getLogicalSize()); + return; + } + + out.write(block.getData(), 0, block.getLogicalSize()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/FileSquashFsReader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/FileSquashFsReader.java new file mode 100644 index 00000000000..818c98d1684 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/FileSquashFsReader.java @@ -0,0 +1,291 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs; + +import org.apache.hadoop.runc.squashfs.data.DataBlock; +import org.apache.hadoop.runc.squashfs.data.DataBlockCache; +import org.apache.hadoop.runc.squashfs.data.DataBlockReader; +import org.apache.hadoop.runc.squashfs.directory.DirectoryEntry; +import org.apache.hadoop.runc.squashfs.directory.DirectoryHeader; +import org.apache.hadoop.runc.squashfs.inode.DirectoryINode; +import org.apache.hadoop.runc.squashfs.inode.FileINode; +import org.apache.hadoop.runc.squashfs.inode.INode; +import org.apache.hadoop.runc.squashfs.inode.INodeRef; +import org.apache.hadoop.runc.squashfs.metadata.FileMetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockCache; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataReader; +import org.apache.hadoop.runc.squashfs.metadata.TaggedMetadataBlockReader; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.table.ExportTable; +import org.apache.hadoop.runc.squashfs.table.FileTableReader; +import org.apache.hadoop.runc.squashfs.table.FragmentTable; +import org.apache.hadoop.runc.squashfs.table.IdTable; +import org.apache.hadoop.runc.squashfs.table.TableReader; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +public class FileSquashFsReader extends AbstractSquashFsReader { + + private final int tag; + private final RandomAccessFile raf; + private final SuperBlock superBlock; + private final MetadataBlockCache metaReader; + private final DataBlockCache dataCache; + private final DataBlockCache fragmentCache; + private final IdTable idTable; + private final FragmentTable fragmentTable; + private final ExportTable exportTable; + private final byte[] sparseBlock; + + FileSquashFsReader(int tag, File inputFile) + throws SquashFsException, IOException { + this(tag, inputFile, new MetadataBlockCache( + new TaggedMetadataBlockReader(false)), + DataBlockCache.NO_CACHE, + DataBlockCache.NO_CACHE); + } + + FileSquashFsReader( + int tag, + File inputFile, + MetadataBlockCache metadataCache, + DataBlockCache dataCache, + DataBlockCache fragmentCache) throws SquashFsException, IOException { + this.tag = tag; + this.dataCache = dataCache; + this.fragmentCache = fragmentCache; + raf = new RandomAccessFile(inputFile, "r"); + superBlock = readSuperBlock(raf); + sparseBlock = createSparseBlock(superBlock); + + this.metaReader = metadataCache; + metaReader + .add(tag, new FileMetadataBlockReader(tag, raf, superBlock, false)); + idTable = readIdTable(tag, raf, metaReader); + fragmentTable = readFragmentTable(tag, raf, metaReader); + exportTable = readExportTable(tag, raf, metaReader); + } + + static SuperBlock readSuperBlock(RandomAccessFile raf) + throws IOException, SquashFsException { + raf.seek(0L); + return SuperBlock.read(raf); + } + + static IdTable readIdTable( + int tag, + RandomAccessFile raf, + MetadataBlockReader metaReader) throws IOException, SquashFsException { + + TableReader tr = + new FileTableReader(raf, metaReader.getSuperBlock(tag), false); + return IdTable.read(tag, tr, metaReader); + } + + static FragmentTable readFragmentTable( + int tag, + RandomAccessFile raf, + MetadataBlockReader metaReader) throws IOException, SquashFsException { + + TableReader tr = + new FileTableReader(raf, metaReader.getSuperBlock(tag), false); + return FragmentTable.read(tag, tr, metaReader); + } + + static ExportTable readExportTable( + int tag, + RandomAccessFile raf, + MetadataBlockReader metaReader) throws IOException, SquashFsException { + + TableReader tr = + new FileTableReader(raf, metaReader.getSuperBlock(tag), false); + return ExportTable.read(tag, tr, metaReader); + } + + @Override + public void close() throws IOException { + raf.close(); + } + + @Override + protected byte[] getSparseBlock() { + return sparseBlock; + } + + @Override + public SuperBlock getSuperBlock() { + return superBlock; + } + + @Override + public IdTable getIdTable() { + return idTable; + } + + @Override + public FragmentTable getFragmentTable() { + return fragmentTable; + } + + @Override + public ExportTable getExportTable() { + return exportTable; + } + + @Override + public MetadataBlockReader getMetaReader() { + return metaReader; + } + + @Override + public DirectoryINode getRootInode() throws IOException, SquashFsException { + SuperBlock sb = metaReader.getSuperBlock(tag); + long rootInodeRef = sb.getRootInodeRef(); + MetadataReader rootInodeReader = metaReader.inodeReader(tag, rootInodeRef); + INode parent = INode.read(metaReader.getSuperBlock(tag), rootInodeReader); + if (!(parent instanceof DirectoryINode)) { + throw new SquashFsException( + "Archive corrupt: root inode is not a directory"); + } + DirectoryINode dirInode = (DirectoryINode) parent; + return dirInode; + } + + @Override + public INode findInodeByInodeRef(INodeRef ref) + throws IOException, SquashFsException { + MetadataReader inodeReader = metaReader.inodeReader(tag, ref.getRaw()); + return INode.read(metaReader.getSuperBlock(tag), inodeReader); + } + + @Override + public INode findInodeByDirectoryEntry(DirectoryEntry entry) + throws IOException, SquashFsException { + MetadataReader inodeReader = metaReader.inodeReader(tag, entry); + return INode.read(metaReader.getSuperBlock(tag), inodeReader); + } + + @Override + public INode findInodeByPath(String path) + throws IOException, SquashFsException, FileNotFoundException { + long rootInodeRef = superBlock.getRootInodeRef(); + MetadataReader rootInodeReader = metaReader.inodeReader(tag, rootInodeRef); + INode parent = INode.read(metaReader.getSuperBlock(tag), rootInodeReader); + + // normalize path + String[] parts = + path.replaceAll("^/+", "").replaceAll("/+$", "").split("/+"); + + for (String part : parts) { + byte[] left = part.getBytes(StandardCharsets.ISO_8859_1); + + if (!(parent instanceof DirectoryINode)) { + throw new FileNotFoundException(path); + } + DirectoryINode dirInode = (DirectoryINode) parent; + + MetadataReader dirReader = metaReader.directoryReader(tag, dirInode); + int bytesToRead = dirInode.getFileSize() - 3; + boolean found = false; + while (dirReader.position() < bytesToRead) { + DirectoryHeader header = DirectoryHeader.read(dirReader); + for (int i = 0; i <= header.getCount(); i++) { + DirectoryEntry entry = DirectoryEntry.read(header, dirReader); + byte[] right = entry.getName(); + int compare = compareBytes(left, right); + if (compare == 0) { + found = true; + parent = INode.read(superBlock, metaReader.inodeReader(tag, entry)); + break; + } else if (compare < 0) { + // went past + throw new FileNotFoundException(path); + } + } + + if (found) { + break; + } + } + if (!found) { + throw new FileNotFoundException(path); + } + } + + return parent; + } + + @Override + public List getChildren(INode parent) + throws IOException, SquashFsException { + if (!(parent instanceof DirectoryINode)) { + throw new IllegalArgumentException("Inode is not a directory"); + } + + DirectoryINode dirInode = (DirectoryINode) parent; + + List dirEntries = new ArrayList<>(); + + MetadataReader dirReader = metaReader.directoryReader(tag, dirInode); + + int dirSize = dirInode.getFileSize(); + if (dirSize > 0) { + int bytesToRead = dirSize - 3; + + while (dirReader.position() < bytesToRead) { + DirectoryHeader header = DirectoryHeader.read(dirReader); + for (int i = 0; i <= header.getCount(); i++) { + DirectoryEntry entry = DirectoryEntry.read(header, dirReader); + dirEntries.add(entry); + } + } + if (dirReader.position() != bytesToRead) { + throw new SquashFsException(String.format("Read %d bytes, expected %d", + dirReader.position(), bytesToRead)); + } + } + return dirEntries; + } + + protected DataBlock readBlock(FileINode fileInode, int blockNumber, + boolean cache) + throws IOException, SquashFsException { + + return DataBlockReader + .readBlock(tag, raf, superBlock, fileInode, blockNumber, + cache ? dataCache : DataBlockCache.NO_CACHE); + } + + protected DataBlock readFragment(FileINode fileInode, int fragmentSize, + boolean cache) + throws IOException, SquashFsException { + + return DataBlockReader.readFragment( + tag, raf, superBlock, fileInode, fragmentTable, fragmentSize, + cache ? fragmentCache : DataBlockCache.NO_CACHE); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/MappedSquashFsReader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/MappedSquashFsReader.java new file mode 100644 index 00000000000..47663e74447 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/MappedSquashFsReader.java @@ -0,0 +1,299 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs; + +import org.apache.hadoop.runc.squashfs.data.DataBlock; +import org.apache.hadoop.runc.squashfs.data.DataBlockCache; +import org.apache.hadoop.runc.squashfs.data.MappedDataBlockReader; +import org.apache.hadoop.runc.squashfs.directory.DirectoryEntry; +import org.apache.hadoop.runc.squashfs.directory.DirectoryHeader; +import org.apache.hadoop.runc.squashfs.inode.DirectoryINode; +import org.apache.hadoop.runc.squashfs.inode.FileINode; +import org.apache.hadoop.runc.squashfs.inode.INode; +import org.apache.hadoop.runc.squashfs.inode.INodeRef; +import org.apache.hadoop.runc.squashfs.io.ByteBufferDataInput; +import org.apache.hadoop.runc.squashfs.io.MappedFile; +import org.apache.hadoop.runc.squashfs.metadata.MappedFileMetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockCache; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataReader; +import org.apache.hadoop.runc.squashfs.metadata.TaggedMetadataBlockReader; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.table.ExportTable; +import org.apache.hadoop.runc.squashfs.table.FragmentTable; +import org.apache.hadoop.runc.squashfs.table.IdTable; +import org.apache.hadoop.runc.squashfs.table.MappedFileTableReader; +import org.apache.hadoop.runc.squashfs.table.TableReader; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +public class MappedSquashFsReader extends AbstractSquashFsReader { + + public static final int PREFERRED_MAP_SIZE = 128 * 1024 * 1024; // 128 MB + public static final int PREFERRED_WINDOW_SIZE = 2 * PREFERRED_MAP_SIZE; + private static final Logger LOG = + LoggerFactory.getLogger(MappedSquashFsReader.class); + private final int tag; + private final MappedFile mmap; + private final SuperBlock superBlock; + private final MetadataBlockCache metaReader; + private final DataBlockCache dataCache; + private final DataBlockCache fragmentCache; + private final IdTable idTable; + private final FragmentTable fragmentTable; + private final ExportTable exportTable; + private final byte[] sparseBlock; + + MappedSquashFsReader(int tag, MappedFile mmap) + throws SquashFsException, IOException { + this(tag, mmap, new MetadataBlockCache( + new TaggedMetadataBlockReader(false)), + DataBlockCache.NO_CACHE, + DataBlockCache.NO_CACHE); + } + + MappedSquashFsReader( + int tag, + MappedFile mmap, + MetadataBlockCache metadataCache, + DataBlockCache dataCache, + DataBlockCache fragmentCache) throws SquashFsException, IOException { + + this.tag = tag; + this.dataCache = dataCache; + this.fragmentCache = fragmentCache; + this.mmap = mmap; + superBlock = readSuperBlock(mmap); + LOG.trace("Superblock: {}", superBlock); + sparseBlock = createSparseBlock(superBlock); + + this.metaReader = metadataCache; + metaReader + .add(tag, new MappedFileMetadataBlockReader(tag, superBlock, mmap)); + idTable = readIdTable(tag, mmap, metaReader); + LOG.trace("ID table: {}", idTable); + fragmentTable = readFragmentTable(tag, mmap, metaReader); + LOG.trace("Fragment table: {}", fragmentTable); + exportTable = readExportTable(tag, mmap, metaReader); + LOG.trace("Export table: {}", exportTable); + } + + static SuperBlock readSuperBlock(MappedFile mmap) + throws IOException, SquashFsException { + return SuperBlock.read(new ByteBufferDataInput(mmap.from(0L))); + } + + static IdTable readIdTable(int tag, MappedFile mmap, + MetadataBlockReader metaReader) + throws IOException, SquashFsException { + + TableReader tr = + new MappedFileTableReader(mmap, metaReader.getSuperBlock(tag)); + return IdTable.read(tag, tr, metaReader); + } + + static FragmentTable readFragmentTable(int tag, MappedFile mmap, + MetadataBlockReader metaReader) + throws IOException, SquashFsException { + + TableReader tr = + new MappedFileTableReader(mmap, metaReader.getSuperBlock(tag)); + return FragmentTable.read(tag, tr, metaReader); + } + + static ExportTable readExportTable(int tag, MappedFile mmap, + MetadataBlockReader metaReader) + throws IOException, SquashFsException { + + TableReader tr = + new MappedFileTableReader(mmap, metaReader.getSuperBlock(tag)); + return ExportTable.read(tag, tr, metaReader); + } + + @Override + public void close() { + } + + @Override + public SuperBlock getSuperBlock() { + return superBlock; + } + + @Override + protected byte[] getSparseBlock() { + return sparseBlock; + } + + @Override + public IdTable getIdTable() { + return idTable; + } + + @Override + public FragmentTable getFragmentTable() { + return fragmentTable; + } + + @Override + public ExportTable getExportTable() { + return exportTable; + } + + @Override + public MetadataBlockReader getMetaReader() { + return metaReader; + } + + @Override + public DirectoryINode getRootInode() throws IOException, SquashFsException { + SuperBlock sb = metaReader.getSuperBlock(tag); + + long rootInodeRef = sb.getRootInodeRef(); + + MetadataReader rootInodeReader = metaReader.inodeReader(tag, rootInodeRef); + INode parent = INode.read(metaReader.getSuperBlock(tag), rootInodeReader); + if (!(parent instanceof DirectoryINode)) { + throw new SquashFsException( + "Archive corrupt: root inode is not a directory"); + } + DirectoryINode dirInode = (DirectoryINode) parent; + return dirInode; + } + + @Override + public INode findInodeByInodeRef(INodeRef ref) + throws IOException, SquashFsException { + MetadataReader inodeReader = metaReader.inodeReader(tag, ref.getRaw()); + return INode.read(metaReader.getSuperBlock(tag), inodeReader); + } + + @Override + public INode findInodeByDirectoryEntry(DirectoryEntry entry) + throws IOException, SquashFsException { + MetadataReader inodeReader = metaReader.inodeReader(tag, entry); + return INode.read(metaReader.getSuperBlock(tag), inodeReader); + } + + @Override + public INode findInodeByPath(String path) + throws IOException, SquashFsException, FileNotFoundException { + long rootInodeRef = superBlock.getRootInodeRef(); + MetadataReader rootInodeReader = metaReader.inodeReader(tag, rootInodeRef); + INode parent = INode.read(metaReader.getSuperBlock(tag), rootInodeReader); + + // normalize path + String[] parts = + path.replaceAll("^/+", "").replaceAll("/+$", "").split("/+"); + + for (String part : parts) { + byte[] left = part.getBytes(StandardCharsets.ISO_8859_1); + + if (!(parent instanceof DirectoryINode)) { + throw new FileNotFoundException(path); + } + DirectoryINode dirInode = (DirectoryINode) parent; + + MetadataReader dirReader = metaReader.directoryReader(tag, dirInode); + int bytesToRead = dirInode.getFileSize() - 3; + boolean found = false; + while (dirReader.position() < bytesToRead) { + DirectoryHeader header = DirectoryHeader.read(dirReader); + for (int i = 0; i <= header.getCount(); i++) { + DirectoryEntry entry = DirectoryEntry.read(header, dirReader); + byte[] right = entry.getName(); + int compare = compareBytes(left, right); + if (compare == 0) { + found = true; + parent = INode.read(superBlock, metaReader.inodeReader(tag, entry)); + break; + } else if (compare < 0) { + // went past + throw new FileNotFoundException(path); + } + } + + if (found) { + break; + } + } + if (!found) { + throw new FileNotFoundException(path); + } + } + + return parent; + } + + @Override + public List getChildren(INode parent) + throws IOException, SquashFsException { + if (!(parent instanceof DirectoryINode)) { + throw new IllegalArgumentException("Inode is not a directory"); + } + + DirectoryINode dirInode = (DirectoryINode) parent; + + List dirEntries = new ArrayList<>(); + + MetadataReader dirReader = metaReader.directoryReader(tag, dirInode); + + int dirSize = dirInode.getFileSize(); + if (dirSize > 0) { + int bytesToRead = dirSize - 3; + + while (dirReader.position() < bytesToRead) { + DirectoryHeader header = DirectoryHeader.read(dirReader); + for (int i = 0; i <= header.getCount(); i++) { + DirectoryEntry entry = DirectoryEntry.read(header, dirReader); + dirEntries.add(entry); + } + } + if (dirReader.position() != bytesToRead) { + throw new SquashFsException(String.format("Read %d bytes, expected %d", + dirReader.position(), bytesToRead)); + } + } + return dirEntries; + } + + protected DataBlock readBlock(FileINode fileInode, int blockNumber, + boolean cache) + throws IOException, SquashFsException { + + return MappedDataBlockReader + .readBlock(tag, mmap, superBlock, fileInode, blockNumber, + cache ? dataCache : DataBlockCache.NO_CACHE); + } + + protected DataBlock readFragment(FileINode fileInode, int fragmentSize, + boolean cache) + throws IOException, SquashFsException { + + return MappedDataBlockReader.readFragment( + tag, mmap, superBlock, fileInode, fragmentTable, fragmentSize, + cache ? fragmentCache : DataBlockCache.NO_CACHE); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsConverter.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsConverter.java new file mode 100644 index 00000000000..4d6136167fe --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsConverter.java @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs; + +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.util.Date; +import java.util.concurrent.atomic.AtomicReference; +import java.util.zip.GZIPInputStream; + +public class SquashFsConverter { + + private static final Logger LOG + = LoggerFactory.getLogger(SquashFsConverter.class); + + public static void convertToSquashFs(File inputFile, File outputFile) + throws IOException { + + LOG.debug("Converting {} -> {}..", + inputFile.getAbsolutePath(), outputFile.getAbsolutePath()); + + try ( + FileInputStream fis = new FileInputStream(inputFile); + GZIPInputStream gis = new GZIPInputStream(fis); + TarArchiveInputStream tis = new TarArchiveInputStream(gis)) { + + long fileCount = 0L; + try (SquashFsWriter writer = new SquashFsWriter(outputFile)) { + TarArchiveEntry entry; + AtomicReference modDate = new AtomicReference<>(new Date(0)); + + while ((entry = tis.getNextTarEntry()) != null) { + processTarEntry(tis, entry, writer, modDate); + fileCount++; + } + writer.setModificationTime((int) (modDate.get().getTime() / 1000L)); + writer.finish(); + } + + LOG.debug("Converted image containing {} files", fileCount); + } + } + + private static void processTarEntry( + TarArchiveInputStream tis, + TarArchiveEntry entry, + SquashFsWriter writer, + AtomicReference modDate) throws IOException { + + int userId = (int) entry.getLongUserId(); + int groupId = (int) entry.getLongGroupId(); + + String name = entry.getName() + .replaceAll("/+", "/") + .replaceAll("^/", "") + .replaceAll("/$", "") + .replaceAll("^", "/"); + + short permissions = (short) (entry.getMode() & 07777); + + Date lastModified = entry.getLastModifiedDate(); + if (lastModified.after(modDate.get())) { + modDate.set(lastModified); + } + + SquashFsEntryBuilder tb = writer.entry(name) + .uid(userId) + .gid(groupId) + .permissions(permissions) + .fileSize(entry.getSize()) + .lastModified(lastModified); + + if (entry.isSymbolicLink()) { + tb.symlink(entry.getLinkName()); + } else if (entry.isDirectory()) { + tb.directory(); + } else if (entry.isFile()) { + tb.file(); + } else if (entry.isBlockDevice()) { + tb.blockDev(entry.getDevMajor(), entry.getDevMinor()); + } else if (entry.isCharacterDevice()) { + tb.charDev(entry.getDevMajor(), entry.getDevMinor()); + } else if (entry.isFIFO()) { + tb.fifo(); + } else { + throw new IOException( + String.format("Unknown file type for '%s'", entry.getName())); + } + + if (entry.isLink()) { + String target = entry.getLinkName() + .replaceAll("/+", "/") + .replaceAll("^/", "") + .replaceAll("/$", "") + .replaceAll("^", "/"); + tb.hardlink(target); + } + + if (entry.isFile() && !entry.isLink()) { + tb.content(tis, entry.getSize()); + } + + tb.build(); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsEntry.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsEntry.java new file mode 100644 index 00000000000..ef0982392d7 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsEntry.java @@ -0,0 +1,495 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs; + +import org.apache.hadoop.runc.squashfs.data.DataBlockRef; +import org.apache.hadoop.runc.squashfs.data.FragmentRef; +import org.apache.hadoop.runc.squashfs.directory.DirectoryBuilder; +import org.apache.hadoop.runc.squashfs.inode.DeviceINode; +import org.apache.hadoop.runc.squashfs.inode.DirectoryINode; +import org.apache.hadoop.runc.squashfs.inode.ExtendedBlockDeviceINode; +import org.apache.hadoop.runc.squashfs.inode.ExtendedCharDeviceINode; +import org.apache.hadoop.runc.squashfs.inode.ExtendedDirectoryINode; +import org.apache.hadoop.runc.squashfs.inode.ExtendedFifoINode; +import org.apache.hadoop.runc.squashfs.inode.ExtendedFileINode; +import org.apache.hadoop.runc.squashfs.inode.ExtendedSymlinkINode; +import org.apache.hadoop.runc.squashfs.inode.FifoINode; +import org.apache.hadoop.runc.squashfs.inode.FileINode; +import org.apache.hadoop.runc.squashfs.inode.INode; +import org.apache.hadoop.runc.squashfs.inode.INodeType; +import org.apache.hadoop.runc.squashfs.inode.Permission; +import org.apache.hadoop.runc.squashfs.inode.SymlinkINode; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockRef; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedMap; +import java.util.concurrent.atomic.AtomicInteger; + +public class SquashFsEntry { + final List children = new ArrayList<>(); + int inodeNumber; + SquashFsEntry parent; + INodeType type; + INode inode; + String name; + boolean synthetic; + short uid; + short gid; + short permissions; + int major; + int minor; + int nlink; + long fileSize; + int lastModified; + String symlinkTarget; + String hardlinkTarget; + SquashFsEntry hardlinkEntry; + List dataBlocks; + FragmentRef fragment; + + SquashFsEntry() { + this.type = INodeType.BASIC_DIRECTORY; + this.name = ""; + this.uid = 0; + this.gid = 0; + this.permissions = 0755; + this.nlink = 1; + this.lastModified = 0; + } + + SquashFsEntry( + INodeType type, + String name, + short uid, + short gid, + short permissions, + int major, + int minor, + long fileSize, + int lastModified, + String symlinkTarget, + String hardlinkTarget, + List dataBlocks, + FragmentRef fragment, + boolean synthetic) { + this.type = type; + this.name = name; + this.uid = uid; + this.gid = gid; + this.permissions = permissions; + this.major = major; + this.minor = minor; + this.nlink = 1; + this.fileSize = fileSize; + this.lastModified = lastModified; + this.symlinkTarget = symlinkTarget; + this.hardlinkTarget = hardlinkTarget; + this.dataBlocks = dataBlocks; + this.fragment = fragment; + this.synthetic = synthetic; + } + + static int compareBytes(byte[] left, byte[] right) { + for (int i = 0; i < left.length && i < right.length; i++) { + int a = (left[i] & 0xff); + int b = (right[i] & 0xff); + if (a != b) { + return a - b; + } + } + return left.length - right.length; + } + + static int compareEntries(SquashFsEntry left, SquashFsEntry right) { + return compareBytes( + left.getShortName().getBytes(StandardCharsets.ISO_8859_1), + right.getShortName().getBytes(StandardCharsets.ISO_8859_1)); + } + + public int getInodeNumber() { + return inodeNumber; + } + + public SquashFsEntry getParent() { + return parent; + } + + public INode getInode() { + return inode; + } + + public String getName() { + return name; + } + + public boolean isSynthetic() { + return synthetic; + } + + public short getUid() { + return uid; + } + + public short getGid() { + return gid; + } + + public int getMajor() { + return major; + } + + public int getMinor() { + return minor; + } + + public int getNlink() { + return nlink; + } + + public long getFileSize() { + return fileSize; + } + + public int getLastModified() { + return lastModified; + } + + public String getSymlinkTarget() { + return symlinkTarget; + } + + public String getHardlinkTarget() { + return hardlinkTarget; + } + + public SquashFsEntry getHardlinkEntry() { + return hardlinkEntry; + } + + public List getDataBlocks() { + return Collections.unmodifiableList(dataBlocks); + } + + public FragmentRef getFragment() { + return fragment; + } + + public String getShortName() { + return name.substring(name.lastIndexOf('/') + 1); + } + + public List getChildren() { + return Collections.unmodifiableList(children); + } + + void sortChildren() { + Collections.sort(children, SquashFsEntry::compareEntries); + for (SquashFsEntry child : children) { + child.sortChildren(); + } + } + + void assignInodes(Map entryMap, + AtomicInteger inodeAssignments) { + for (SquashFsEntry child : children) { + if (child.hardlinkTarget == null) { + child.inodeNumber = inodeAssignments.incrementAndGet(); + } + } + for (SquashFsEntry child : children) { + child.assignInodes(entryMap, inodeAssignments); + } + if (parent == null) { + inodeNumber = inodeAssignments.incrementAndGet(); + } + } + + void assignHardlinkInodes( + SortedMap entryMap, + SortedMap> inodeToEntry) { + if (hardlinkTarget != null) { + SquashFsEntry target = entryMap.get(hardlinkTarget); + hardlinkEntry = target; + inodeNumber = target.inodeNumber; + Integer key = Integer.valueOf(inodeNumber); + if (!inodeToEntry.containsKey(key)) { + inodeToEntry.put(key, new LinkedHashSet<>()); + } + inodeToEntry.get(key).add(target); + inodeToEntry.get(key).add(this); + } + for (SquashFsEntry child : children) { + child.assignHardlinkInodes(entryMap, inodeToEntry); + } + } + + void updateDirectoryLinkCounts() { + if (type != null && type.directory()) { + nlink++; + } + + for (SquashFsEntry child : children) { + nlink++; + child.updateDirectoryLinkCounts(); + } + } + + void updateHardlinkInodeCounts( + SortedMap> inodeToEntry) { + for (Set set : inodeToEntry.values()) { + int count = set.stream().mapToInt(e -> e.nlink).sum(); + set.stream().forEach(e -> e.nlink = count); + } + } + + void createInodes() { + for (SquashFsEntry child : children) { + if (child.hardlinkTarget == null) { + child.inode = child.createINode(); + } + } + for (SquashFsEntry child : children) { + child.createInodes(); + } + if (parent == null) { + inode = createINode(); + } + } + + void createHardlinkInodes() { + if (hardlinkEntry != null) { + this.inode = hardlinkEntry.inode; + } + for (SquashFsEntry child : children) { + child.createHardlinkInodes(); + } + } + + MetadataBlockRef writeMetadata( + MetadataWriter inodeWriter, + MetadataWriter dirWriter, + Map visitedInodes) throws IOException { + + if (type != null && type.directory()) { + if (children.isEmpty()) { + DirectoryINode dirInode = (DirectoryINode) inode; + dirInode.setFileSize(3); + dirInode.setStartBlock(0); + dirInode.setOffset((short) 0); + } else { + for (SquashFsEntry child : children) { + child.writeMetadata(inodeWriter, dirWriter, visitedInodes); + } + + DirectoryBuilder db = new DirectoryBuilder(); + + for (SquashFsEntry child : children) { + Integer inodeKey = Integer.valueOf(child.inodeNumber); + + MetadataBlockRef inodeRef; + if (visitedInodes.containsKey(inodeKey)) { + inodeRef = visitedInodes.get(inodeKey); + } else { + child.inode = child.inode.simplify(); + inodeRef = inodeWriter.getCurrentReference(); + child.inode.writeData(inodeWriter); + visitedInodes.put(inodeKey, inodeRef); + } + + db.add( + child.getShortName(), + inodeRef.getLocation(), + child.inodeNumber, + inodeRef.getOffset(), + child.inode.getInodeType()); + } + + MetadataBlockRef dirRef = dirWriter.getCurrentReference(); + db.write(dirWriter); + + int size = db.getStructureSize(); + DirectoryINode dirInode = (DirectoryINode) inode; + dirInode.setFileSize(size + 3); + dirInode.setStartBlock(dirRef.getLocation()); + dirInode.setOffset(dirRef.getOffset()); + } + + if (parent == null) { + // root + MetadataBlockRef rootInodeRef = inodeWriter.getCurrentReference(); + visitedInodes.put( + Integer.valueOf(inode.getInodeNumber()), rootInodeRef); + + DirectoryINode rootInode = (DirectoryINode) inode; + rootInode.setParentInodeNumber(visitedInodes.size() + 1); + inode = rootInode.simplify(); + inode.writeData(inodeWriter); + + return rootInodeRef; + } + } + + return null; + } + + private INode createINode() { + switch (type) { + case BASIC_DIRECTORY: + return createDirectoryINode(); + case BASIC_FILE: + return createFileINode(); + case BASIC_BLOCK_DEVICE: + return createBlockDevice(); + case BASIC_CHAR_DEVICE: + return createCharDevice(); + case BASIC_FIFO: + return createFifo(); + case BASIC_SYMLINK: + return createSymlink(); + default: + throw new IllegalArgumentException( + String.format("Invalid inode type %s", type)); + } + } + + private T fill(T inode) { + inode.setInodeNumber(inodeNumber); + inode.setUidIdx(uid); + inode.setGidIdx(gid); + inode.setPermissions(permissions); + inode.setModifiedTime(lastModified); + return inode; + } + + private DirectoryINode createDirectoryINode() { + ExtendedDirectoryINode dir = new ExtendedDirectoryINode(); + + dir.setParentInodeNumber(parent == null ? -1 : parent.inodeNumber); + dir.setNlink(nlink); + return fill(dir); + } + + private FileINode createFileINode() { + ExtendedFileINode file = new ExtendedFileINode(); + file.setFileSize(fileSize); + file.setNlink(nlink); + + if (dataBlocks == null || dataBlocks.isEmpty()) { + file.setBlocksStart(0L); + file.setBlockSizes(new int[0]); + file.setSparse(0L); + } else { + long sparse = 0L; + file.setBlocksStart(dataBlocks.get(0).getLocation()); + int[] sizes = new int[dataBlocks.size()]; + for (int i = 0; i < sizes.length; i++) { + DataBlockRef dbr = dataBlocks.get(i); + if (dbr.isSparse()) { + sparse += dbr.getLogicalSize(); + sizes[i] = 0; + } else { + sizes[i] = dbr.getPhysicalSize(); + if (!dbr.isCompressed()) { + sizes[i] |= 0x1_000_000; // uncompressed bit + } + } + } + if (sparse >= fileSize) { + sparse = fileSize - 1L; + } + file.setBlockSizes(sizes); + file.setSparse(sparse); + } + + if (fragment != null) { + file.setFragmentBlockIndex(fragment.getFragmentIndex()); + file.setFragmentOffset(fragment.getOffset()); + } + + return fill(file); + } + + private DeviceINode createBlockDevice() { + ExtendedBlockDeviceINode dev = new ExtendedBlockDeviceINode(); + + dev.setNlink(nlink); + dev.setDevice(deviceNum()); + + return fill(dev); + } + + private DeviceINode createCharDevice() { + ExtendedCharDeviceINode dev = new ExtendedCharDeviceINode(); + + dev.setNlink(nlink); + dev.setDevice(deviceNum()); + + return fill(dev); + } + + private FifoINode createFifo() { + ExtendedFifoINode fifo = new ExtendedFifoINode(); + fifo.setNlink(nlink); + return fill(fifo); + } + + private SymlinkINode createSymlink() { + ExtendedSymlinkINode symlink = new ExtendedSymlinkINode(); + symlink.setNlink(nlink); + symlink.setTargetPath(symlinkTarget.getBytes(StandardCharsets.ISO_8859_1)); + return fill(symlink); + } + + private int deviceNum() { + long deviceNum = 0L; + deviceNum |= ((major & 0xfff) << 8); + deviceNum |= (minor & 0xff); + deviceNum |= ((minor & 0xfff00) << 12); + + return (int) (deviceNum & 0xffffffff); + } + + @Override + public String toString() { + return String.format("%s%s %5d %5d %5d %5d %10d %s %s%s%s%s", + type.mode(), + Permission.toDisplay(permissions), + uid, + gid, + inodeNumber, + nlink, + fileSize, + new SimpleDateFormat("yyyy-MM-dd HH:mm:ss") + .format(lastModified * 1000L), + (parent == null && inode != null) ? "/" : "", + name, + hardlinkTarget == null ? "" : " link to " + hardlinkTarget, + symlinkTarget == null ? "" : " -> " + symlinkTarget); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsEntryBuilder.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsEntryBuilder.java new file mode 100644 index 00000000000..d8fef93d68d --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsEntryBuilder.java @@ -0,0 +1,287 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs; + +import org.apache.hadoop.runc.squashfs.data.DataBlockRef; +import org.apache.hadoop.runc.squashfs.data.FragmentRef; +import org.apache.hadoop.runc.squashfs.inode.INodeType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Optional; +import java.util.function.LongConsumer; + +public class SquashFsEntryBuilder { + + private static final Logger LOG = + LoggerFactory.getLogger(SquashFsEntryBuilder.class); + + private final SquashFsWriter writer; + + private INodeType type; + private int major; + private int minor; + private String name; + private Short uid; + private Short gid; + private Short permissions; + private Long fileSize; + private Integer lastModified; + private String symlinkTarget; + private String hardlinkTarget; + private List dataBlocks; + private FragmentRef fragment; + private boolean synthetic = false; + + public SquashFsEntryBuilder(SquashFsWriter writer, String name) { + if (name == null || name.isEmpty()) { + throw new IllegalArgumentException("filename is required"); + } + if (!name.startsWith("/")) { + throw new IllegalArgumentException( + String.format("filename '%s' must begin with a slash", name)); + } + if (name.length() > 1 && name.endsWith("/")) { + throw new IllegalArgumentException( + String.format("filename '%s' may not end with a slash", name)); + } + if ("/".equals(name)) { + name = ""; + } + this.writer = writer; + this.name = name; + } + + public SquashFsEntryBuilder uid(int uid) { + this.uid = writer.getIdGenerator().addUidGid(uid); + return this; + } + + public SquashFsEntryBuilder gid(int gid) { + this.gid = writer.getIdGenerator().addUidGid(gid); + return this; + } + + public SquashFsEntryBuilder dataBlock(DataBlockRef block) { + if (dataBlocks == null) { + dataBlocks = new ArrayList<>(); + } + LOG.debug("Wrote datablock {}", block); + dataBlocks.add(block); + return this; + } + + public SquashFsEntryBuilder fragment(FragmentRef fragment) { + this.fragment = fragment; + LOG.debug("Wrote fragment {}", fragment); + return this; + } + + public SquashFsEntryBuilder permissions(short permissions) { + this.permissions = permissions; + return this; + } + + public SquashFsEntryBuilder lastModified(Date lastModified) { + return lastModified(lastModified.getTime()); + } + + public SquashFsEntryBuilder lastModified(Instant lastModified) { + return lastModified(lastModified.toEpochMilli()); + } + + public SquashFsEntryBuilder lastModified(long lastModified) { + this.lastModified = (int) (lastModified / 1000); + return this; + } + + public SquashFsEntryBuilder fileSize(long fileSize) { + this.fileSize = fileSize; + return this; + } + + public SquashFsEntryBuilder directory() { + this.type = INodeType.BASIC_DIRECTORY; + return this; + } + + public SquashFsEntryBuilder file() { + this.type = INodeType.BASIC_FILE; + return this; + } + + public SquashFsEntryBuilder blockDev(int major, int minor) { + this.type = INodeType.BASIC_BLOCK_DEVICE; + this.major = major; + this.minor = minor; + return this; + } + + public SquashFsEntryBuilder charDev(int major, int minor) { + this.type = INodeType.BASIC_CHAR_DEVICE; + this.major = major; + this.minor = minor; + return this; + } + + public SquashFsEntryBuilder fifo() { + this.type = INodeType.BASIC_FIFO; + return this; + } + + public SquashFsEntryBuilder symlink(String target) { + this.type = INodeType.BASIC_SYMLINK; + this.symlinkTarget = target; + return this; + } + + public SquashFsEntryBuilder hardlink(String target) { + this.hardlinkTarget = target; + return this; + } + + public SquashFsEntryBuilder synthetic() { + this.synthetic = true; + return this; + } + + public SquashFsEntryBuilder content(byte[] content) throws IOException { + try (ByteArrayInputStream bis = new ByteArrayInputStream(content)) { + return content(bis, (long) content.length); + } + } + + public SquashFsEntryBuilder content(InputStream in) throws IOException { + return content(in, Long.MAX_VALUE); + } + + public SquashFsEntryBuilder content(InputStream in, long maxSize) + throws IOException { + return content(in, maxSize, l -> { + }); + } + + public SquashFsEntryBuilder content(InputStream in, long maxSize, + LongConsumer progress) throws IOException { + long written = 0L; + int c = 0; + int off = 0; + + byte[] blockBuffer = writer.getBlockBuffer(); + + // determine how many bytes to read + int bytesToRead = + (int) Math.min(blockBuffer.length - off, maxSize - written); + while (c >= 0 && bytesToRead > 0) { + + // attempt to read full block + while (bytesToRead > 0 + && (c = in.read(blockBuffer, off, bytesToRead)) >= 0) { + off += c; + written += c; + if (off == blockBuffer.length) { + // write the block + LOG.trace("Writing block of size {}", blockBuffer.length); + DataBlockRef dataBlock = + writer.getDataWriter().write(blockBuffer, 0, blockBuffer.length); + dataBlock(dataBlock); + progress.accept(written); + off = 0; + c = 0; + break; + } + bytesToRead = + (int) Math.min(blockBuffer.length - off, maxSize - written); + } + } + + if (off > 0) { + // write final block + LOG.trace("Writing fragment of size {}", off); + FragmentRef fragment = + writer.getFragmentWriter().write(blockBuffer, 0, off); + fragment(fragment); + progress.accept(written); + off = 0; + } + + LOG.debug("Wrote {} bytes to {}", written, name); + + // set output type to file + if (type == null) { + file(); + } + + // set output file size + if (fileSize == null) { + fileSize(written); + } + + return this; + } + + public SquashFsEntry build() { + if (type == null && hardlinkTarget == null) { + throw new IllegalArgumentException("type not set"); + } + if (uid == null && hardlinkTarget == null) { + throw new IllegalArgumentException("uid not set"); + } + if (gid == null && hardlinkTarget == null) { + throw new IllegalArgumentException("gid not set"); + } + if (permissions == null && hardlinkTarget == null) { + throw new IllegalArgumentException("permissions not set"); + } + if (fileSize == null && type == INodeType.BASIC_FILE) { + throw new IllegalArgumentException("fileSize not set"); + } + if (lastModified == null && hardlinkTarget == null) { + lastModified = (int) (System.currentTimeMillis() / 1000L); + } + + SquashFsEntry entry = new SquashFsEntry( + type, + name, + Optional.ofNullable(uid).orElse((short) 0), + Optional.ofNullable(gid).orElse((short) 0), + Optional.ofNullable(permissions).orElse((short) 0), + major, + minor, + Optional.ofNullable(fileSize).orElse(0L), + Optional.ofNullable(lastModified).orElse(0), + symlinkTarget, + hardlinkTarget, + dataBlocks, + fragment, + synthetic); + + writer.getFsTree().add(entry); + + return entry; + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsException.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsException.java new file mode 100644 index 00000000000..58a2d4c6824 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsException.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs; + +import java.io.IOException; + +public class SquashFsException extends IOException { + + private static final long serialVersionUID = 3728141657958154845L; + + public SquashFsException() { + } + + public SquashFsException(String message) { + super(message); + } + + public SquashFsException(Throwable cause) { + super(cause); + } + + public SquashFsException(String message, Throwable cause) { + super(message, cause); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsReader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsReader.java new file mode 100644 index 00000000000..c0b8113030b --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsReader.java @@ -0,0 +1,115 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs; + +import org.apache.hadoop.runc.squashfs.data.DataBlockCache; +import org.apache.hadoop.runc.squashfs.directory.DirectoryEntry; +import org.apache.hadoop.runc.squashfs.inode.DirectoryINode; +import org.apache.hadoop.runc.squashfs.inode.INode; +import org.apache.hadoop.runc.squashfs.inode.INodeRef; +import org.apache.hadoop.runc.squashfs.io.MappedFile; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockCache; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockReader; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.table.ExportTable; +import org.apache.hadoop.runc.squashfs.table.FragmentTable; +import org.apache.hadoop.runc.squashfs.table.IdTable; + +import java.io.Closeable; +import java.io.DataOutput; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.OutputStream; +import java.util.List; + +public interface SquashFsReader extends Closeable { + + static SquashFsReader fromFile(File inputFile) + throws SquashFsException, IOException { + return new FileSquashFsReader(0, inputFile); + } + + static SquashFsReader fromFile(int tag, File inputFile) + throws SquashFsException, IOException { + return new FileSquashFsReader(tag, inputFile); + } + + static SquashFsReader fromFile( + int tag, File inputFile, + MetadataBlockCache metadataCache, + DataBlockCache dataCache, + DataBlockCache fragmentCache) throws SquashFsException, IOException { + + return new FileSquashFsReader(tag, inputFile, metadataCache, dataCache, + fragmentCache); + } + + static SquashFsReader fromMappedFile(MappedFile mmap) + throws SquashFsException, IOException { + return new MappedSquashFsReader(0, mmap); + } + + static SquashFsReader fromMappedFile(int tag, MappedFile mmap) + throws SquashFsException, IOException { + return new MappedSquashFsReader(tag, mmap); + } + + static SquashFsReader fromMappedFile(int tag, MappedFile mmap, + MetadataBlockCache metadataCache, + DataBlockCache dataCache, + DataBlockCache fragmentCache) throws SquashFsException, IOException { + return new MappedSquashFsReader(tag, mmap, metadataCache, dataCache, + fragmentCache); + } + + SuperBlock getSuperBlock(); + + IdTable getIdTable(); + + FragmentTable getFragmentTable(); + + ExportTable getExportTable(); + + MetadataBlockReader getMetaReader(); + + DirectoryINode getRootInode() throws IOException, SquashFsException; + + INode findInodeByInodeRef(INodeRef ref) + throws IOException, SquashFsException; + + INode findInodeByDirectoryEntry(DirectoryEntry entry) + throws IOException, SquashFsException; + + INode findInodeByPath(String path) + throws IOException, SquashFsException, FileNotFoundException; + + List getChildren(INode parent) + throws IOException, SquashFsException; + + long writeFileStream(INode inode, OutputStream out) + throws IOException, SquashFsException; + + long writeFileOut(INode inode, DataOutput out) + throws IOException, SquashFsException; + + int read(INode inode, long fileOffset, byte[] buf, int off, int len) + throws IOException, SquashFsException; + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsTree.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsTree.java new file mode 100644 index 00000000000..76996ba5ece --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsTree.java @@ -0,0 +1,197 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs; + +import org.apache.hadoop.runc.squashfs.inode.INodeType; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockRef; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.table.ExportTable; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +public class SquashFsTree { + + private final SortedMap map = new TreeMap<>(); + + private final AtomicInteger inodeAssignments = new AtomicInteger(0); + private final SortedMap> inodeToEntry = + new TreeMap<>(); + private final MetadataWriter inodeWriter = new MetadataWriter(); + private final MetadataWriter dirWriter = new MetadataWriter(); + private final SortedMap visitedInodes = + new TreeMap<>(); + + private SquashFsEntry root = new SquashFsEntry(); + private MetadataBlockRef rootInodeRef; + + SquashFsTree() { + + } + + void add(SquashFsEntry squashFsEntry) { + if (squashFsEntry.name.isEmpty()) { + if (squashFsEntry.type != INodeType.BASIC_DIRECTORY) { + throw new IllegalArgumentException("Root entry must be a directory"); + } + this.root = squashFsEntry; + } else { + map.put(squashFsEntry.name, squashFsEntry); + } + } + + public SquashFsEntry getRoot() { + return root; + } + + void build() throws SquashFsException, IOException { + + // synthesize missing parents + SortedMap existing = new TreeMap<>(map); + for (Map.Entry squashFsEntry : existing.entrySet()) { + String name = squashFsEntry.getKey(); + String parent = name; + while ((parent = parentName(parent)) != null) { + SquashFsEntry p = map.get(parent); + if (p == null) { + // synthesize an entry + p = new SquashFsEntry(); + p.name = parent; + map.put(parent, p); + } else if (p.type != INodeType.BASIC_DIRECTORY) { + throw new IllegalArgumentException(String.format( + "Parent '%s' of entry '%s' is not a directory", + parent, name)); + } + } + } + + for (Map.Entry squashFsEntry : map.entrySet()) { + String name = squashFsEntry.getKey(); + String hardLinkTarget = squashFsEntry.getValue().hardlinkTarget; + if (hardLinkTarget != null && !map.containsKey(hardLinkTarget)) { + throw new IllegalArgumentException( + String.format("Hardlink target '%s' not found for entry '%s'", + hardLinkTarget, name)); + } + + // assign parent + String parent = parentName(name); + if (parent == null) { + root.children.add(squashFsEntry.getValue()); + squashFsEntry.getValue().parent = root; + } else { + SquashFsEntry parentEntry = map.get(parent); + parentEntry.children.add(squashFsEntry.getValue()); + squashFsEntry.getValue().parent = parentEntry; + } + } + + // walk tree, sort entries and assign inodes + root.sortChildren(); + + root.assignInodes(map, inodeAssignments); + root.assignHardlinkInodes(map, inodeToEntry); + + root.updateDirectoryLinkCounts(); + root.updateHardlinkInodeCounts(inodeToEntry); + + root.createInodes(); + root.createHardlinkInodes(); + + rootInodeRef = root.writeMetadata(inodeWriter, dirWriter, visitedInodes); + + // make sure all inodes were visited + if (visitedInodes.size() != root.inode.getInodeNumber()) { + throw new SquashFsException( + String.format("BUG: Visited inode count %d != actual inode count %d", + visitedInodes.size(), root.inode.getInodeNumber())); + } + + // make sure all inode numbers exist, from 1 to n + List allInodes = + visitedInodes.keySet().stream().collect(Collectors.toList()); + if (allInodes.get(0).intValue() != 1) { + throw new SquashFsException( + String.format("BUG: First inode number %d != 1", + allInodes.get(0).intValue())); + } + if (allInodes.get(allInodes.size() - 1).intValue() != allInodes.size()) { + throw new SquashFsException( + String.format("BUG: Last inode number %d != %d", + allInodes.get(allInodes.size() - 1).intValue(), + allInodes.size())); + } + } + + int getInodeCount() { + return visitedInodes.size(); + } + + List saveExportTable(MetadataWriter writer) + throws IOException { + + List exportRefs = new ArrayList<>(); + + int index = 0; + for (Map.Entry entry : visitedInodes + .entrySet()) { + if (index % ExportTable.ENTRIES_PER_BLOCK == 0) { + exportRefs.add(writer.getCurrentReference()); + } + MetadataBlockRef metaRef = entry.getValue(); + + long inodeRef = (((long) (metaRef.getLocation() & 0xffffffffL)) << 16) | + (((long) metaRef.getOffset()) & 0xffffL); + + writer.writeLong(inodeRef); + index++; + } + + return exportRefs; + } + + MetadataBlockRef getRootInodeRef() { + return rootInodeRef; + } + + MetadataWriter getINodeWriter() { + return inodeWriter; + } + + MetadataWriter getDirWriter() { + return dirWriter; + } + + private String parentName(String name) { + int slash = name.lastIndexOf('/'); + if (slash <= 0) { + return null; + } + return name.substring(0, slash); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsWriter.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsWriter.java new file mode 100644 index 00000000000..36b77b64e58 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/SquashFsWriter.java @@ -0,0 +1,243 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs; + +import org.apache.hadoop.runc.squashfs.data.DataBlockWriter; +import org.apache.hadoop.runc.squashfs.data.FragmentWriter; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockRef; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.table.IdTableGenerator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.List; + +public class SquashFsWriter implements Closeable { + + private static final Logger LOG = + LoggerFactory.getLogger(SquashFsWriter.class); + + private final RandomAccessFile raf; + private final IdTableGenerator idGenerator; + private final SuperBlock superBlock; + private final SquashFsTree fsTree; + private final DataBlockWriter dataWriter; + private final FragmentWriter fragmentWriter; + private final byte[] blockBuffer; + + private Integer modificationTime = null; + + public SquashFsWriter(File outputFile) throws SquashFsException, IOException { + raf = new RandomAccessFile(outputFile, "rw"); + writeDummySuperblock(raf); + superBlock = createSuperBlock(); + blockBuffer = createBlockBuffer(superBlock); + idGenerator = createIdTableGenerator(); + fsTree = createSquashFsTree(); + dataWriter = createDataWriter(superBlock, raf); + fragmentWriter = createFragmentWriter(superBlock, raf); + } + + public void setModificationTime(int modificationTime) { + this.modificationTime = modificationTime; + } + + static void writeDummySuperblock(RandomAccessFile raf) throws IOException { + raf.seek(0L); + raf.write(new byte[SuperBlock.SIZE]); + } + + static SuperBlock createSuperBlock() { + return new SuperBlock(); + } + + static byte[] createBlockBuffer(SuperBlock sb) { + return new byte[sb.getBlockSize()]; + } + + static IdTableGenerator createIdTableGenerator() { + IdTableGenerator idGenerator = new IdTableGenerator(); + idGenerator.addUidGid(0); + return idGenerator; + } + + static SquashFsTree createSquashFsTree() { + return new SquashFsTree(); + } + + static DataBlockWriter createDataWriter(SuperBlock sb, RandomAccessFile raf) { + return new DataBlockWriter(raf, sb.getBlockSize()); + } + + static FragmentWriter createFragmentWriter(SuperBlock sb, + RandomAccessFile raf) { + return new FragmentWriter(raf, sb.getBlockSize()); + } + + SuperBlock getSuperBlock() { + return superBlock; + } + + IdTableGenerator getIdGenerator() { + return idGenerator; + } + + DataBlockWriter getDataWriter() { + return dataWriter; + } + + FragmentWriter getFragmentWriter() { + return fragmentWriter; + } + + byte[] getBlockBuffer() { + return blockBuffer; + } + + public SquashFsTree getFsTree() { + return fsTree; + } + + public SquashFsEntryBuilder entry(String name) { + return new SquashFsEntryBuilder(this, name); + } + + public void finish() throws SquashFsException, IOException { + // flush any remaining fragments + fragmentWriter.flush(); + + // build the directory tree + fsTree.build(); + + // save inode table + long inodeTableStart = raf.getFilePointer(); + LOG.debug("Inode table start: {}", inodeTableStart); + fsTree.getINodeWriter().save(raf); + + // save directory table + long dirTableStart = raf.getFilePointer(); + LOG.debug("Directory table start: {}", dirTableStart); + fsTree.getDirWriter().save(raf); + + // build fragment table + long fragMetaStart = raf.getFilePointer(); + MetadataWriter fragMetaWriter = new MetadataWriter(); + List fragRefs = fragmentWriter.save(fragMetaWriter); + fragMetaWriter.save(raf); + + // save fragment table + long fragTableStart = raf.getFilePointer(); + LOG.debug("Fragment table start: {}", fragTableStart); + for (MetadataBlockRef fragRef : fragRefs) { + long fragTableFileOffset = fragMetaStart + fragRef.getLocation(); + byte[] buf = new byte[8]; + ByteBuffer bb = ByteBuffer.wrap(buf).order(ByteOrder.LITTLE_ENDIAN); + bb.putLong(fragTableFileOffset); + raf.write(buf); + } + + // build export table + long exportMetaStart = raf.getFilePointer(); + MetadataWriter exportMetaWriter = new MetadataWriter(); + List exportRefs = + fsTree.saveExportTable(exportMetaWriter); + exportMetaWriter.save(raf); + + // write export table + long exportTableStart = raf.getFilePointer(); + LOG.debug("Export table start: {}", exportTableStart); + for (MetadataBlockRef exportRef : exportRefs) { + long exportFileOffset = exportMetaStart + exportRef.getLocation(); + byte[] buf = new byte[8]; + ByteBuffer bb = ByteBuffer.wrap(buf).order(ByteOrder.LITTLE_ENDIAN); + bb.putLong(exportFileOffset); + raf.write(buf); + } + + // build ID table + long idMetaStart = raf.getFilePointer(); + MetadataWriter idMetaWriter = new MetadataWriter(); + List idRefs = idGenerator.save(idMetaWriter); + idMetaWriter.save(raf); + + MetadataBlockRef rootInodeRef = fsTree.getRootInodeRef(); + LOG.debug("Root inode ref: {}", rootInodeRef); + + // write ID table + long idTableStart = raf.getFilePointer(); + LOG.debug("ID table start: {}", idTableStart); + + for (MetadataBlockRef idRef : idRefs) { + long idFileOffset = idMetaStart + idRef.getLocation(); + byte[] buf = new byte[8]; + ByteBuffer bb = ByteBuffer.wrap(buf).order(ByteOrder.LITTLE_ENDIAN); + bb.putLong(idFileOffset); + raf.write(buf); + } + + long archiveSize = raf.getFilePointer(); + LOG.debug("Archive size:{}", archiveSize); + + // pad to 4096 bytes + int padding = (4096 - ((int) (archiveSize % 4096L))) % 4096; + for (int i = 0; i < padding; i++) { + raf.write(0); + } + + long fileSize = raf.getFilePointer(); + LOG.debug("File size: {}", fileSize); + + if (modificationTime == null) { + modificationTime = (int) (System.currentTimeMillis() / 1000L); + } + + // update superblock + superBlock.setInodeCount(fsTree.getInodeCount()); + superBlock.setModificationTime(modificationTime); + superBlock.setFragmentEntryCount(fragmentWriter.getFragmentEntryCount()); + superBlock.setIdCount((short) idGenerator.getIdCount()); + superBlock.setRootInodeRef(rootInodeRef.toINodeRefRaw()); + superBlock.setBytesUsed(archiveSize); + superBlock.setIdTableStart(idTableStart); + superBlock.setInodeTableStart(inodeTableStart); + superBlock.setDirectoryTableStart(dirTableStart); + superBlock.setFragmentTableStart(fragTableStart); + superBlock.setExportTableStart(exportTableStart); + + LOG.debug("Superblock: {}", superBlock); + + // write superblock + raf.seek(0L); + superBlock.writeData(raf); + raf.seek(fileSize); + } + + @Override + public void close() throws IOException { + raf.close(); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlock.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlock.java new file mode 100644 index 00000000000..2551e10a69b --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlock.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.data; + +public class DataBlock { + + byte[] data; + int logicalSize; + int physicalSize; + + public byte[] getData() { + return data; + } + + public int getLogicalSize() { + return logicalSize; + } + + public int getPhysicalSize() { + return physicalSize; + } + + public boolean isSparse() { + return logicalSize > 0 && physicalSize == 0; + } + + public DataBlock(byte[] data, int logicalSize, int physicalSize) { + this.data = data; + this.logicalSize = logicalSize; + this.physicalSize = physicalSize; + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlockCache.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlockCache.java new file mode 100644 index 00000000000..bc7d1e8efeb --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlockCache.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.data; + +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Objects; + +public class DataBlockCache { + + public static final DataBlockCache NO_CACHE = new DataBlockCache(0); + + private final LruBlockCache cache; + private final int cacheSize; + private volatile long cacheHits = 0L; + private volatile long cacheMisses = 0L; + + public DataBlockCache(int cacheSize) { + this.cache = cacheSize < 1 ? null : new LruBlockCache(cacheSize); + this.cacheSize = cacheSize; + } + + public synchronized void put(Key key, DataBlock block) { + if (cache != null) { + cache.put(key, block); + } + } + + public synchronized DataBlock get(Key key) { + if (cache == null) { + cacheMisses++; + return null; + } + + DataBlock block = cache.get(key); + if (block != null) { + cacheHits++; + } else { + cacheMisses++; + } + return block; + } + + public long getCacheHits() { + return cacheHits; + } + + public long getCacheMisses() { + return cacheMisses; + } + + public synchronized int getCacheLoad() { + return cache == null ? 0 : cache.size(); + } + + public void resetStatistics() { + cacheHits = 0L; + cacheMisses = 0L; + } + + public synchronized void clearCache() { + if (cache != null) { + cache.clear(); + } + resetStatistics(); + } + + public static final class Key { + + private final int tag; + private final boolean compressed; + private final long fileOffset; + private final long dataSize; + private final int expectedSize; + + public Key( + int tag, + boolean compressed, + long fileOffset, + int dataSize, + int expectedSize) { + this.tag = tag; + this.compressed = compressed; + this.fileOffset = fileOffset; + this.dataSize = dataSize; + this.expectedSize = expectedSize; + } + + @Override + public int hashCode() { + return Objects.hash(tag, compressed, fileOffset, dataSize, expectedSize); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof Key)) { + return false; + } + Key o = (Key) obj; + + return (tag == o.tag) && + (compressed == o.compressed) && + (fileOffset == o.fileOffset) && + (dataSize == o.dataSize) && + (expectedSize == o.expectedSize); + } + + @Override + public String toString() { + return String + .format("%d-%s-%d-%d-%d", tag, compressed, fileOffset, dataSize, + expectedSize); + } + } + + private static class LruBlockCache extends LinkedHashMap { + + private static final long serialVersionUID = -156607843124781789L; + + private final int cacheSize; + + public LruBlockCache(int cacheSize) { + super(16, 0.75f, true); + this.cacheSize = cacheSize; + } + + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + return size() > cacheSize; + } + + } + + @Override + public String toString() { + return String + .format("data-block-cache { capacity=%d, size=%d, hits=%d, misses=%d }", + cacheSize, getCacheLoad(), getCacheHits(), getCacheMisses()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlockReader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlockReader.java new file mode 100644 index 00000000000..a6e562a8749 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlockReader.java @@ -0,0 +1,238 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.data; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.data.DataBlockCache.Key; +import org.apache.hadoop.runc.squashfs.inode.FileINode; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlockFlag; +import org.apache.hadoop.runc.squashfs.table.FragmentTable; +import org.apache.hadoop.runc.squashfs.table.FragmentTableEntry; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.zip.Inflater; +import java.util.zip.InflaterInputStream; + +public class DataBlockReader { + + private static final byte[] EMPTY = new byte[0]; + + public static DataBlock readBlock( + int tag, + RandomAccessFile raf, + SuperBlock sb, + FileINode inode, + int blockNum) throws IOException, SquashFsException { + + return readBlock(tag, raf, sb, inode, blockNum, DataBlockCache.NO_CACHE); + } + + public static DataBlock readBlock( + int tag, + RandomAccessFile raf, + SuperBlock sb, + FileINode inode, + int blockNum, + DataBlockCache cache) throws IOException, SquashFsException { + + int blockCount = inode.getBlockSizes().length; + if (blockNum >= blockCount) { + throw new SquashFsException( + String.format("Attempted to read out of bounds block %d (count = %d)", + blockNum, blockCount)); + } + int blockSize = sb.getBlockSize(); + + int[] blockSizes = inode.getBlockSizes(); + + long blocksStart = inode.getBlocksStart(); + long fileSize = inode.getFileSize(); + long fileOffset = getFileOffset(blocksStart, blockNum, blockSizes); + + int dataSize = blockSizes[blockNum]; + boolean compressed = (dataSize & 0x1000000) == 0; + int actualSize = (dataSize & 0xfffff); + + long expectedSize = blockSize; + + if (blockNum == blockCount - 1 && !inode.isFragmentPresent()) { + expectedSize = fileSize - (blockSize * (blockCount - 1L)); + } + + if (actualSize == 0) { + // sparse block + return new DataBlock(EMPTY, (int) expectedSize, 0); + } + + DataBlockCache.Key key = + new Key(tag, compressed, fileOffset, actualSize, (int) expectedSize); + DataBlock block = cache.get(key); + if (block == null) { + block = readData(sb, raf, compressed, fileOffset, actualSize, + (int) expectedSize); + cache.put(key, block); + } + return block; + } + + public static DataBlock readFragment( + int tag, + RandomAccessFile raf, + SuperBlock sb, + FileINode inode, + FragmentTable fragTable, + int length) throws IOException, SquashFsException { + + return readFragment(tag, raf, sb, inode, fragTable, length, + DataBlockCache.NO_CACHE); + } + + public static DataBlock readFragment( + int tag, + RandomAccessFile raf, + SuperBlock sb, + FileINode inode, + FragmentTable fragTable, + int length, + DataBlockCache cache) throws IOException, SquashFsException { + + FragmentTableEntry fragEntry = + fragTable.getEntry(inode.getFragmentBlockIndex()); + + boolean compressed = fragEntry.isCompressed(); + int dataSize = fragEntry.getDiskSize(); + + long fileOffset = fragEntry.getStart(); + + DataBlockCache.Key key = + new Key(tag, compressed, fileOffset, dataSize, dataSize); + DataBlock fragment = cache.get(key); + if (fragment == null) { + fragment = readData(sb, raf, compressed, fileOffset, dataSize, dataSize); + cache.put(key, fragment); + } + + int offset = inode.getFragmentOffset(); + if (offset + length > fragment.getPhysicalSize()) { + throw new SquashFsException(String.format( + "Attempted to read %d bytes from a fragment with only %d bytes remaining", + length, + fragment.getLogicalSize() - offset)); + } + + byte[] data = new byte[length]; + System.arraycopy(fragment.getData(), offset, data, 0, length); + return new DataBlock(data, data.length, data.length); + } + + private static DataBlock readData( + SuperBlock sb, + RandomAccessFile raf, + boolean compressed, + long fileOffset, + int dataSize, + int expectedSize) throws IOException, SquashFsException { + + raf.seek(fileOffset); + + DataBlock data = compressed + ? readCompressed(sb, raf, dataSize, expectedSize) + : readUncompressed(sb, raf, dataSize, expectedSize); + + return data; + } + + private static DataBlock readUncompressed( + SuperBlock sb, + RandomAccessFile raf, + int dataSize, + int expectedSize) throws IOException, SquashFsException { + byte[] data = new byte[dataSize]; + raf.readFully(data); + return new DataBlock(data, expectedSize, data.length); + } + + private static DataBlock readCompressed( + SuperBlock sb, + RandomAccessFile raf, int dataSize, + int expectedSize) throws IOException, SquashFsException { + switch (sb.getCompressionId()) { + case NONE: + throw new SquashFsException( + "Archive claims no compression, but found compressed data"); + case ZLIB: + return readCompressedZlib(sb, raf, dataSize, expectedSize); + default: + throw new UnsupportedOperationException( + String.format("Reading compressed data of type %s not yet supported", + sb.getCompressionId())); + } + } + + private static DataBlock readCompressedZlib( + SuperBlock sb, + RandomAccessFile raf, + int dataSize, + int expectedSize) throws IOException, SquashFsException { + // see if there are compression flags + if (sb.hasFlag(SuperBlockFlag.COMPRESSOR_OPTIONS)) { + throw new UnsupportedOperationException( + "Reading ZLIB compressed data with non-standard options not yet supported"); + } + + byte[] buf = new byte[dataSize]; + raf.readFully(buf); + byte[] data; + + byte[] xfer = new byte[4096]; + try (ByteArrayInputStream bis = new ByteArrayInputStream(buf)) { + try (InflaterInputStream iis = + new InflaterInputStream(bis, new Inflater(), 4096)) { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream(4096)) { + int c = 0; + while ((c = iis.read(xfer, 0, 4096)) >= 0) { + if (c > 0) { + bos.write(xfer, 0, c); + } + } + data = bos.toByteArray(); + if (data.length > sb.getBlockSize()) { + throw new SquashFsException(String.format( + "Corrupt metadata block: Got size %d (max = %d)", data.length, + sb.getBlockSize())); + } + } + } + } + + return new DataBlock(data, expectedSize, data.length); + } + + static long getFileOffset(long blockStart, int blockNum, int[] blockSizes) { + for (int i = 0; i < blockNum; i++) { + blockStart += (blockSizes[i] & 0xfffff); + } + return blockStart; + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlockRef.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlockRef.java new file mode 100644 index 00000000000..1c0755d83ac --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlockRef.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.data; + +public class DataBlockRef { + + private final long location; + private final int logicalSize; + private final int physicalSize; + private final boolean compressed; + private final boolean sparse; + + public DataBlockRef(long location, int logicalSize, int physicalSize, + boolean compressed, boolean sparse) { + this.location = location; + this.logicalSize = logicalSize; + this.physicalSize = physicalSize; + this.compressed = compressed; + this.sparse = sparse; + } + + public long getLocation() { + return location; + } + + public int getLogicalSize() { + return logicalSize; + } + + public int getPhysicalSize() { + return physicalSize; + } + + public boolean isCompressed() { + return compressed; + } + + public boolean isSparse() { + return sparse; + } + + public int getInodeSize() { + return (physicalSize & 0xfffff) | (compressed ? 0 : 0x1000000); + } + + @Override + public String toString() { + return String.format( + "data-block-ref { location=%d, logicalSize=%d, physicalSize=%d, compressed=%s, sparse=%s }", + location, logicalSize, physicalSize, compressed, sparse); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlockWriter.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlockWriter.java new file mode 100644 index 00000000000..44a3f3c5d2e --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/DataBlockWriter.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.data; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.zip.Deflater; +import java.util.zip.DeflaterOutputStream; + +public class DataBlockWriter { + + private final RandomAccessFile raf; + private final int blockSize; + + public DataBlockWriter(RandomAccessFile raf, int blockSize) { + this.raf = raf; + this.blockSize = blockSize; + } + + public DataBlockRef write( + byte[] data, int offset, int length) throws IOException { + if (length != blockSize) { + throw new IllegalArgumentException( + String.format("Invalid block length %d (expected %d)", + length, blockSize)); + } + + long fileOffset = raf.getFilePointer(); + + if (isSparse(data, offset, length)) { + return new DataBlockRef(fileOffset, length, 0, false, true); + } + + byte[] compressed = compress(data, offset, length); + if (compressed != null) { + raf.write(compressed); + return new DataBlockRef(fileOffset, length, compressed.length, true, + false); + } + + raf.write(data, offset, length); + return new DataBlockRef(fileOffset, length, length, false, false); + } + + private boolean isSparse(byte[] data, int offset, int length) { + int end = offset + length; + for (int i = offset; i < end; i++) { + if (data[i] != 0) { + return false; + } + } + return true; + } + + private byte[] compress( + byte[] data, int offset, int length) throws IOException { + Deflater def = new Deflater(Deflater.BEST_COMPRESSION); + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (DeflaterOutputStream dos = new DeflaterOutputStream( + bos, def, 4096)) { + dos.write(data, offset, length); + } + byte[] result = bos.toByteArray(); + if (result.length > blockSize) { + return null; + } + return result; + } finally { + def.end(); + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/FragmentRef.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/FragmentRef.java new file mode 100644 index 00000000000..fbba0ec52b1 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/FragmentRef.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.data; + +public class FragmentRef { + + private int fragmentIndex = -1; + private final int offset; + + public FragmentRef(int offset) { + this.offset = offset; + } + + public int getFragmentIndex() { + return fragmentIndex; + } + + public int getOffset() { + return offset; + } + + public boolean isValid() { + return fragmentIndex >= 0; + } + + void commit(int fragmentIndex) { + this.fragmentIndex = fragmentIndex; + } + + @Override + public String toString() { + return String.format( + "fragment-ref { fragmentIndex=%d, offset=%d }", + fragmentIndex, offset); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/FragmentWriter.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/FragmentWriter.java new file mode 100644 index 00000000000..2d2f5214d6b --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/FragmentWriter.java @@ -0,0 +1,149 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.data; + +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockRef; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.table.FragmentTable; +import org.apache.hadoop.runc.squashfs.table.FragmentTableEntry; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.ArrayList; +import java.util.List; +import java.util.zip.Deflater; +import java.util.zip.DeflaterOutputStream; + +public class FragmentWriter { + + private final RandomAccessFile raf; + private final int blockSize; + private final byte[] currentBlock; + private final List currentFragments = new ArrayList<>(); + private int currentOffset = 0; + private final List fragmentEntries = new ArrayList<>(); + + public FragmentWriter(RandomAccessFile raf, int blockSize) { + this.raf = raf; + this.blockSize = blockSize; + this.currentBlock = new byte[blockSize]; + } + + public FragmentRef write( + byte[] data, int offset, int length) throws IOException { + if (length > blockSize || length <= 0) { + throw new IllegalArgumentException( + String.format("Invalid fragment length %d (min 1, max %d)", + length, blockSize)); + } + + if (currentOffset + length > currentBlock.length) { + flush(); + } + + System.arraycopy(data, offset, currentBlock, currentOffset, length); + + FragmentRef frag = new FragmentRef(currentOffset); + currentFragments.add(frag); + currentOffset += length; + return frag; + } + + public List getFragmentEntries() { + return fragmentEntries; + } + + public List save(MetadataWriter writer) throws IOException { + + List fragmentRefs = new ArrayList<>(); + + for (int i = 0; i < fragmentEntries.size(); i++) { + FragmentTableEntry fragment = fragmentEntries.get(i); + + if (i % FragmentTable.ENTRIES_PER_BLOCK == 0) { + fragmentRefs.add(writer.getCurrentReference()); + } + + writer.writeLong(fragment.getStart()); + writer.writeInt(fragment.getSize()); + writer.writeInt(0); // placeholder + } + + return fragmentRefs; + } + + public int getFragmentEntryCount() { + return fragmentEntries.size(); + } + + public int getFragmentTableRefSize() { + int entryCount = fragmentEntries.size(); + return (entryCount / FragmentTable.ENTRIES_PER_BLOCK) + + ((entryCount % FragmentTable.ENTRIES_PER_BLOCK == 0) ? 0 : 1); + } + + public void flush() throws IOException { + long fileOffset = raf.getFilePointer(); + + byte[] compressed = null; + int size = 0; + if (currentOffset <= 0) { + return; + } + + compressed = compressData(); + if (compressed == null) { + raf.write(currentBlock, 0, currentOffset); + size = currentOffset; + } else { + raf.write(compressed); + size = compressed.length; + } + + FragmentTableEntry fragEntry = + new FragmentTableEntry(fileOffset, size, compressed != null); + fragmentEntries.add(fragEntry); + + for (FragmentRef frag : currentFragments) { + frag.commit(fragmentEntries.size() - 1); + } + + currentFragments.clear(); + currentOffset = 0; + } + + private byte[] compressData() throws IOException { + Deflater def = new Deflater(Deflater.BEST_COMPRESSION); + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (DeflaterOutputStream dos = new DeflaterOutputStream( + bos, def, 4096)) { + dos.write(currentBlock, 0, currentOffset); + } + byte[] result = bos.toByteArray(); + if (result.length > currentOffset) { + return null; + } + return result; + } finally { + def.end(); + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/MappedDataBlockReader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/MappedDataBlockReader.java new file mode 100644 index 00000000000..7bd6ffe1f48 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/data/MappedDataBlockReader.java @@ -0,0 +1,241 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.data; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.data.DataBlockCache.Key; +import org.apache.hadoop.runc.squashfs.inode.FileINode; +import org.apache.hadoop.runc.squashfs.io.ByteBufferDataInput; +import org.apache.hadoop.runc.squashfs.io.MappedFile; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlockFlag; +import org.apache.hadoop.runc.squashfs.table.FragmentTable; +import org.apache.hadoop.runc.squashfs.table.FragmentTableEntry; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInput; +import java.io.IOException; +import java.util.zip.Inflater; +import java.util.zip.InflaterInputStream; + +public class MappedDataBlockReader { + + private static final byte[] EMPTY = new byte[0]; + + public static DataBlock readBlock( + int tag, + MappedFile mmap, + SuperBlock sb, + FileINode inode, + int blockNum) throws IOException, SquashFsException { + + return readBlock(tag, mmap, sb, inode, blockNum, DataBlockCache.NO_CACHE); + } + + public static DataBlock readBlock( + int tag, + MappedFile mmap, + SuperBlock sb, + FileINode inode, + int blockNum, + DataBlockCache cache) throws IOException, SquashFsException { + + int blockCount = inode.getBlockSizes().length; + if (blockNum >= blockCount) { + throw new SquashFsException( + String.format("Attempted to read out of bounds block %d (count = %d)", + blockNum, blockCount)); + } + int blockSize = sb.getBlockSize(); + + int[] blockSizes = inode.getBlockSizes(); + + long blocksStart = inode.getBlocksStart(); + long fileSize = inode.getFileSize(); + long fileOffset = getFileOffset(blocksStart, blockNum, blockSizes); + + int dataSize = blockSizes[blockNum]; + boolean compressed = (dataSize & 0x1000000) == 0; + int actualSize = (dataSize & 0xfffff); + + long expectedSize = blockSize; + + if (blockNum == blockCount - 1 && !inode.isFragmentPresent()) { + expectedSize = fileSize - (blockSize * (blockCount - 1L)); + } + + if (actualSize == 0) { + // sparse block + return new DataBlock(EMPTY, (int) expectedSize, 0); + } + + DataBlockCache.Key key = + new Key(tag, compressed, fileOffset, actualSize, (int) expectedSize); + DataBlock block = cache.get(key); + if (block == null) { + block = readData(sb, mmap, compressed, fileOffset, actualSize, + (int) expectedSize); + cache.put(key, block); + } + return block; + } + + public static DataBlock readFragment( + int tag, + MappedFile mmap, + SuperBlock sb, + FileINode inode, + FragmentTable fragTable, + int length) throws IOException, SquashFsException { + + return readFragment(tag, mmap, sb, inode, fragTable, length, + DataBlockCache.NO_CACHE); + } + + public static DataBlock readFragment( + int tag, + MappedFile mmap, + SuperBlock sb, + FileINode inode, + FragmentTable fragTable, + int length, + DataBlockCache cache) throws IOException, SquashFsException { + + FragmentTableEntry fragEntry = + fragTable.getEntry(inode.getFragmentBlockIndex()); + + boolean compressed = fragEntry.isCompressed(); + int dataSize = fragEntry.getDiskSize(); + + long fileOffset = fragEntry.getStart(); + + DataBlockCache.Key key = + new Key(tag, compressed, fileOffset, dataSize, dataSize); + DataBlock fragment = cache.get(key); + if (fragment == null) { + fragment = readData(sb, mmap, compressed, fileOffset, dataSize, dataSize); + cache.put(key, fragment); + } + + int offset = inode.getFragmentOffset(); + if (offset + length > fragment.getPhysicalSize()) { + throw new SquashFsException( + String.format( + "Attempted to read %d bytes from a fragment with only %d bytes remaining", + length, fragment.getLogicalSize() - offset)); + } + + byte[] data = new byte[length]; + System.arraycopy(fragment.getData(), offset, data, 0, length); + return new DataBlock(data, data.length, data.length); + } + + private static DataBlock readData( + SuperBlock sb, + MappedFile mmap, + boolean compressed, + long fileOffset, + int dataSize, + int expectedSize) throws IOException, SquashFsException { + + DataInput in = new ByteBufferDataInput(mmap.from(fileOffset)); + + DataBlock data = compressed + ? readCompressed(sb, in, dataSize, expectedSize) + : readUncompressed(sb, in, dataSize, expectedSize); + + return data; + } + + private static DataBlock readUncompressed( + SuperBlock sb, + DataInput in, + int dataSize, + int expectedSize) throws IOException, SquashFsException { + byte[] data = new byte[dataSize]; + in.readFully(data); + return new DataBlock(data, expectedSize, data.length); + } + + private static DataBlock readCompressed( + SuperBlock sb, + DataInput in, + int dataSize, + int expectedSize) throws IOException, SquashFsException { + switch (sb.getCompressionId()) { + case NONE: + throw new SquashFsException( + "Archive claims no compression, but found compressed data"); + case ZLIB: + return readCompressedZlib(sb, in, dataSize, expectedSize); + default: + throw new UnsupportedOperationException( + String.format("Reading compressed data of type %s not yet supported", + sb.getCompressionId())); + } + } + + private static DataBlock readCompressedZlib( + SuperBlock sb, + DataInput in, + int dataSize, + int expectedSize) throws IOException, SquashFsException { + // see if there are compression flags + if (sb.hasFlag(SuperBlockFlag.COMPRESSOR_OPTIONS)) { + throw new UnsupportedOperationException( + "Reading ZLIB compressed data with non-standard options not yet supported"); + } + + byte[] buf = new byte[dataSize]; + in.readFully(buf); + byte[] data; + + byte[] xfer = new byte[4096]; + try (ByteArrayInputStream bis = new ByteArrayInputStream(buf)) { + try (InflaterInputStream iis = + new InflaterInputStream(bis, new Inflater(), 4096)) { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream(4096)) { + int c = 0; + while ((c = iis.read(xfer, 0, 4096)) >= 0) { + if (c > 0) { + bos.write(xfer, 0, c); + } + } + data = bos.toByteArray(); + if (data.length > sb.getBlockSize()) { + throw new SquashFsException(String.format( + "Corrupt metadata block: Got size %d (max = %d)", data.length, + sb.getBlockSize())); + } + } + } + } + + return new DataBlock(data, expectedSize, data.length); + } + + static long getFileOffset(long blockStart, int blockNum, int[] blockSizes) { + for (int i = 0; i < blockNum; i++) { + blockStart += (blockSizes[i] & 0xfffff); + } + return blockStart; + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/directory/DirectoryBuilder.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/directory/DirectoryBuilder.java new file mode 100644 index 00000000000..f322f6a90e3 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/directory/DirectoryBuilder.java @@ -0,0 +1,129 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.directory; + +import org.apache.hadoop.runc.squashfs.inode.INodeType; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +public class DirectoryBuilder { + + boolean dirty = false; + final List entries = new ArrayList<>(); + final List elements = new ArrayList<>(); + + public void add( + String name, + int startBlock, + int inodeNumber, + short offset, + INodeType type) { + dirty = true; + byte[] nameBytes = name.getBytes(StandardCharsets.ISO_8859_1); + if (nameBytes.length < 1) { + throw new IllegalArgumentException("Filename is empty"); + } + if (nameBytes.length > ((int) DirectoryEntry.MAX_FILENAME_LENGTH)) { + throw new IllegalArgumentException(String.format( + "Filename '%s' too long (%d bytes, max %d)", + name, nameBytes.length, DirectoryEntry.MAX_FILENAME_LENGTH)); + } + entries.add(new Entry( + startBlock, inodeNumber, offset, type.dirValue(), nameBytes)); + } + + public int getStructureSize() { + build(); + int size = 0; + for (DirectoryElement element : elements) { + size += element.getStructureSize(); + } + return size; + } + + void build() { + if (!dirty) { + return; + } + elements.clear(); + + DirectoryHeader header = null; + for (Entry entry : entries) { + header = advance(header, entry); + header.count++; + + DirectoryEntry dent = new DirectoryEntry(); + dent.header = header; + dent.offset = entry.offset; + dent.inodeNumberDelta = (short) (entry.inodeNumber - header.inodeNumber); + dent.name = entry.name; + dent.type = entry.type; + dent.size = (short) (entry.name.length - 1); + + elements.add(dent); + } + dirty = false; + } + + public void write(MetadataWriter out) throws IOException { + build(); + for (DirectoryElement element : elements) { + element.writeData(out); + } + } + + private DirectoryHeader advance(DirectoryHeader header, Entry entry) { + if ((header != null) && + (header.startBlock == entry.startBlock) && + (entry.inodeNumber >= header.inodeNumber) && + (entry.inodeNumber <= (header.inodeNumber + 0x7fff)) && + (header.count < (DirectoryHeader.MAX_DIR_ENTRIES - 1))) { + return header; + } + + header = new DirectoryHeader(); + header.count = -1; + header.startBlock = entry.startBlock; + header.inodeNumber = entry.inodeNumber; + elements.add(header); + return header; + } + + static class Entry { + int startBlock; + int inodeNumber; + byte[] name; + short offset; + short type; + + Entry(int startBlock, int inodeNumber, short offset, short type, + byte[] name) { + this.startBlock = startBlock; + this.inodeNumber = inodeNumber; + this.offset = offset; + this.type = type; + this.name = name; + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/directory/DirectoryElement.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/directory/DirectoryElement.java new file mode 100644 index 00000000000..24a848621ac --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/directory/DirectoryElement.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.directory; + +import java.io.DataOutput; +import java.io.IOException; + +public interface DirectoryElement { + + int getStructureSize(); + + void writeData(DataOutput out) throws IOException; + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/directory/DirectoryEntry.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/directory/DirectoryEntry.java new file mode 100644 index 00000000000..efb30be1712 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/directory/DirectoryEntry.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.directory; + +import org.apache.hadoop.runc.squashfs.SquashFsException; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +public class DirectoryEntry implements DirectoryElement { + + private static final byte[] EMPTY = new byte[0]; + + public static final short MAX_FILENAME_LENGTH = 256; + + protected DirectoryHeader header; + + protected short offset; // offset into inode block where data starts + protected short inodeNumberDelta; // amount to add to header inodeNumber + protected short type; // inode type + protected short size; // size of name (1 less than actual size) + protected byte[] name = EMPTY; // filename (not null terminated) + + public DirectoryHeader getHeader() { + return header; + } + + public short getOffset() { + return offset; + } + + public short getInodeNumberDelta() { + return inodeNumberDelta; + } + + public short getType() { + return type; + } + + public short getSize() { + return size; + } + + public byte[] getName() { + return name; + } + + public String getNameAsString() { + return new String(name, StandardCharsets.ISO_8859_1); + } + + public int getStructureSize() { + return 8 + name.length; + } + + public static DirectoryEntry read(DirectoryHeader header, DataInput in) + throws SquashFsException, IOException { + DirectoryEntry entry = new DirectoryEntry(); + entry.readData(header, in); + return entry; + } + + public void readData(DirectoryHeader header, DataInput in) + throws SquashFsException, IOException { + this.header = header; + offset = in.readShort(); + inodeNumberDelta = in.readShort(); + type = in.readShort(); + size = (short) (in.readShort() & 0x7fff); + if (size + 1 > MAX_FILENAME_LENGTH) { + throw new SquashFsException(String.format( + "Invalid directory entry: Found filename of length %d (max = %d)%n%s", + size + 1, + MAX_FILENAME_LENGTH, + this)); + } + name = new byte[size + 1]; + in.readFully(name); + } + + @Override + public void writeData(DataOutput out) throws IOException { + out.writeShort(offset); + out.writeShort(inodeNumberDelta); + out.writeShort(type); + out.writeShort(size); + out.write(name); + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append(String.format("directory-entry {%n")); + int width = 18; + dumpBin(buf, width, "offset", offset, DECIMAL, UNSIGNED); + dumpBin(buf, width, "inodeNumberDelta", inodeNumberDelta, DECIMAL, + UNSIGNED); + dumpBin(buf, width, "type", type, DECIMAL, UNSIGNED); + dumpBin(buf, width, "size", size, DECIMAL, UNSIGNED); + dumpBin(buf, width, "name", name, 0, name.length, 16, 2); + buf.append("}"); + return buf.toString(); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/directory/DirectoryHeader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/directory/DirectoryHeader.java new file mode 100644 index 00000000000..279cf0efb52 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/directory/DirectoryHeader.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.directory; + +import org.apache.hadoop.runc.squashfs.SquashFsException; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +public class DirectoryHeader implements DirectoryElement { + + public static final short MAX_DIR_ENTRIES = 256; + + protected int count; // number of entries (1 less than actual length) + protected int startBlock; // starting inode block + protected int inodeNumber; // starting inode number + + public int getCount() { + return count; + } + + public int getStartBlock() { + return startBlock; + } + + public int getInodeNumber() { + return inodeNumber; + } + + public int getStructureSize() { + return 12; + } + + public static DirectoryHeader read(DataInput in) + throws SquashFsException, IOException { + DirectoryHeader entry = new DirectoryHeader(); + entry.readData(in); + return entry; + } + + public void readData(DataInput in) throws SquashFsException, IOException { + count = in.readInt(); + startBlock = in.readInt(); + inodeNumber = in.readInt(); + if (count + 1 > MAX_DIR_ENTRIES) { + throw new SquashFsException(String + .format("Invalid directory header: found %d entries (max = %d)%n%s", + count + 1, MAX_DIR_ENTRIES, this)); + } + } + + @Override + public void writeData(DataOutput out) throws IOException { + out.writeInt(count); + out.writeInt(startBlock); + out.writeInt(inodeNumber); + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append(String.format("directory-header {%n")); + int width = 13; + dumpBin(buf, width, "count", count, DECIMAL, UNSIGNED); + dumpBin(buf, width, "startBlock", startBlock, DECIMAL, UNSIGNED); + dumpBin(buf, width, "inodeNumber", inodeNumber, DECIMAL, UNSIGNED); + buf.append("}"); + return buf.toString(); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractBasicDeviceINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractBasicDeviceINode.java new file mode 100644 index 00000000000..6c9ac137bee --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractBasicDeviceINode.java @@ -0,0 +1,105 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.DataInput; +import java.io.IOException; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +abstract public class AbstractBasicDeviceINode extends AbstractINode + implements DeviceINode { + + int nlink = 1; + int device; + + @Override + public final int getNlink() { + return nlink; + } + + @Override + public void setNlink(int nlink) { + this.nlink = nlink; + } + + @Override + public final int getDevice() { + return device; + } + + @Override + public void setDevice(int device) { + this.device = device; + } + + @Override + public int getXattrIndex() { + return XATTR_NOT_PRESENT; + } + + @Override + public void setXattrIndex(int xattrIndex) { + if (xattrIndex != XATTR_NOT_PRESENT) { + throw new IllegalArgumentException( + "Basic device inodes do not support extended attributes"); + } + } + + @Override + public boolean isXattrPresent() { + return false; + } + + @Override + protected final int getChildSerializedSize() { + return 8; + } + + @Override + protected final void readExtraData(SuperBlock sb, DataInput in) + throws SquashFsException, IOException { + nlink = in.readInt(); + device = in.readInt(); + } + + @Override + protected void writeExtraData(MetadataWriter out) throws IOException { + out.writeInt(nlink); + out.writeInt(device); + } + + @Override + protected final int getPreferredDumpWidth() { + return 8; + } + + @Override + protected final void dumpProperties(StringBuilder buf, int width) { + dumpBin(buf, width, "nlink", nlink, DECIMAL, UNSIGNED); + dumpBin(buf, width, "device", device, DECIMAL, UNSIGNED); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractBasicIpcINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractBasicIpcINode.java new file mode 100644 index 00000000000..d365b9779a0 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractBasicIpcINode.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.DataInput; +import java.io.IOException; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +abstract public class AbstractBasicIpcINode extends AbstractINode + implements IpcINode { + + public static final int XATTR_NOT_PRESENT = 0xffff_ffff; + + int nlink = 1; + + @Override + public final int getNlink() { + return nlink; + } + + @Override + public void setNlink(int nlink) { + this.nlink = nlink; + } + + @Override + public int getXattrIndex() { + return XATTR_NOT_PRESENT; + } + + @Override + public void setXattrIndex(int xattrIndex) { + if (xattrIndex != XATTR_NOT_PRESENT) { + throw new IllegalArgumentException( + "Basic ipc inodes do not support extended attributes"); + } + } + + @Override + public boolean isXattrPresent() { + return false; + } + + @Override + protected final int getChildSerializedSize() { + return 4; + } + + @Override + protected final void readExtraData(SuperBlock sb, DataInput in) + throws SquashFsException, IOException { + nlink = in.readInt(); + } + + @Override + protected void writeExtraData(MetadataWriter out) throws IOException { + out.writeInt(nlink); + } + + @Override + protected final int getPreferredDumpWidth() { + return 8; + } + + @Override + protected final void dumpProperties(StringBuilder buf, int width) { + dumpBin(buf, width, "nlink", nlink, DECIMAL, UNSIGNED); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractExtendedDeviceINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractExtendedDeviceINode.java new file mode 100644 index 00000000000..d577d81fa70 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractExtendedDeviceINode.java @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.DataInput; +import java.io.IOException; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +abstract public class AbstractExtendedDeviceINode extends AbstractINode + implements ExtendedDeviceINode { + + int nlink = 1; + int device; + int xattrIndex = XATTR_NOT_PRESENT; + + @Override + public final int getNlink() { + return nlink; + } + + @Override + public void setNlink(int nlink) { + this.nlink = nlink; + } + + @Override + public final int getDevice() { + return device; + } + + @Override + public void setDevice(int device) { + this.device = device; + } + + @Override + public int getXattrIndex() { + return xattrIndex; + } + + @Override + public void setXattrIndex(int xattrIndex) { + this.xattrIndex = xattrIndex; + } + + @Override + public boolean isXattrPresent() { + return xattrIndex != XATTR_NOT_PRESENT; + } + + @Override + protected final int getChildSerializedSize() { + return 12; + } + + @Override + protected final void readExtraData(SuperBlock sb, DataInput in) + throws SquashFsException, IOException { + nlink = in.readInt(); + device = in.readInt(); + xattrIndex = in.readInt(); + } + + @Override + protected void writeExtraData(MetadataWriter out) throws IOException { + out.writeInt(nlink); + out.writeInt(device); + out.writeInt(xattrIndex); + } + + @Override + protected final int getPreferredDumpWidth() { + return 12; + } + + @Override + protected final void dumpProperties(StringBuilder buf, int width) { + dumpBin(buf, width, "nlink", nlink, DECIMAL, UNSIGNED); + dumpBin(buf, width, "device", device, DECIMAL, UNSIGNED); + dumpBin(buf, width, "xattrIndex", xattrIndex, DECIMAL, UNSIGNED); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractExtendedIpcINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractExtendedIpcINode.java new file mode 100644 index 00000000000..364181aad44 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractExtendedIpcINode.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.DataInput; +import java.io.IOException; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +abstract public class AbstractExtendedIpcINode extends AbstractINode + implements ExtendedIpcINode { + + int nlink = 1; + int xattrIndex = XATTR_NOT_PRESENT; + + @Override + public final int getNlink() { + return nlink; + } + + @Override + public void setNlink(int nlink) { + this.nlink = nlink; + } + + @Override + public int getXattrIndex() { + return xattrIndex; + } + + @Override + public void setXattrIndex(int xattrIndex) { + this.xattrIndex = xattrIndex; + } + + @Override + public boolean isXattrPresent() { + return xattrIndex != XATTR_NOT_PRESENT; + } + + @Override + protected final int getChildSerializedSize() { + return 8; + } + + @Override + protected final void readExtraData(SuperBlock sb, DataInput in) + throws SquashFsException, IOException { + nlink = in.readInt(); + xattrIndex = in.readInt(); + } + + @Override + protected void writeExtraData(MetadataWriter out) throws IOException { + out.writeInt(nlink); + out.writeInt(xattrIndex); + } + + @Override + protected final int getPreferredDumpWidth() { + return 12; + } + + @Override + protected final void dumpProperties(StringBuilder buf, int width) { + dumpBin(buf, width, "nlink", nlink, DECIMAL, UNSIGNED); + dumpBin(buf, width, "xattrIndex", xattrIndex, DECIMAL, UNSIGNED); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractINode.java new file mode 100644 index 00000000000..ee140fa9096 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/AbstractINode.java @@ -0,0 +1,164 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.DataInput; +import java.io.IOException; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.OCTAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNIX_TIMESTAMP; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +abstract public class AbstractINode implements INode { + + short permissions; + short uidIdx; + short gidIdx; + int modifiedTime; + int inodeNumber; + + @Override + public final void copyTo(INode dest) { + dest.setPermissions(permissions); + dest.setUidIdx(uidIdx); + dest.setGidIdx(gidIdx); + dest.setModifiedTime(modifiedTime); + dest.setInodeNumber(inodeNumber); + } + + @Override + public final int getSerializedSize() { + return 16 + getChildSerializedSize(); + } + + @Override + public short getPermissions() { + return permissions; + } + + @Override + public void setPermissions(short permissions) { + this.permissions = permissions; + } + + @Override + public short getUidIdx() { + return uidIdx; + } + + @Override + public void setUidIdx(short uidIdx) { + this.uidIdx = uidIdx; + } + + @Override + public short getGidIdx() { + return gidIdx; + } + + @Override + public void setGidIdx(short gidIdx) { + this.gidIdx = gidIdx; + } + + @Override + public int getModifiedTime() { + return modifiedTime; + } + + @Override + public void setModifiedTime(int modifiedTime) { + this.modifiedTime = modifiedTime; + } + + @Override + public int getInodeNumber() { + return inodeNumber; + } + + @Override + public void setInodeNumber(int inodeNumber) { + this.inodeNumber = inodeNumber; + } + + abstract protected int getChildSerializedSize(); + + @Override + public final void readData(SuperBlock sb, DataInput in) + throws SquashFsException, IOException { + permissions = in.readShort(); + uidIdx = in.readShort(); + gidIdx = in.readShort(); + modifiedTime = in.readInt(); + inodeNumber = in.readInt(); + + readExtraData(sb, in); + } + + @Override + public final void writeData(MetadataWriter out) throws IOException { + out.writeShort(getInodeType().value()); + out.writeShort(permissions); + out.writeShort(uidIdx); + out.writeShort(gidIdx); + out.writeInt(modifiedTime); + out.writeInt(inodeNumber); + + writeExtraData(out); + } + + abstract protected String getName(); + + abstract public INodeType getInodeType(); + + abstract protected void writeExtraData(MetadataWriter out) throws IOException; + + abstract protected void readExtraData(SuperBlock sb, DataInput in) + throws SquashFsException, IOException; + + abstract protected int getPreferredDumpWidth(); + + abstract protected void dumpProperties(StringBuilder buf, int width); + + @Override + public String toString() { + int width = Math.max(21, getPreferredDumpWidth()); + INodeType it = getInodeType(); + + StringBuilder buf = new StringBuilder(); + buf.append(String.format("%s {%n", getName())); + dumpBin(buf, width, "inodeType", it.value(), DECIMAL, UNSIGNED); + dumpBin(buf, width, "inodeType (decoded)", it.name()); + dumpBin(buf, width, "permissions", permissions, OCTAL, DECIMAL, UNSIGNED); + dumpBin(buf, width, "uidIdx", uidIdx, DECIMAL, UNSIGNED); + dumpBin(buf, width, "gidIdx", gidIdx, DECIMAL, UNSIGNED); + dumpBin(buf, width, "modifiedTime", modifiedTime, DECIMAL, UNSIGNED, + UNIX_TIMESTAMP); + dumpBin(buf, width, "inodeNumber", inodeNumber, DECIMAL, UNSIGNED); + dumpProperties(buf, width); + buf.append("}"); + return buf.toString(); + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicBlockDeviceINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicBlockDeviceINode.java new file mode 100644 index 00000000000..92aa9c6667f --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicBlockDeviceINode.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public class BasicBlockDeviceINode extends AbstractBasicDeviceINode + implements BlockDeviceINode { + + static BlockDeviceINode simplify(BlockDeviceINode src) { + if (src instanceof BasicBlockDeviceINode) { + return src; + } + + if (src.isXattrPresent()) { + return src; + } + + BasicBlockDeviceINode dest = new BasicBlockDeviceINode(); + src.copyTo(dest); + + dest.setNlink(src.getNlink()); + dest.setDevice(src.getDevice()); + + return dest; + } + + @Override + protected String getName() { + return "basic-block-dev-inode"; + } + + @Override + public INodeType getInodeType() { + return INodeType.BASIC_BLOCK_DEVICE; + } + + @Override + public BlockDeviceINode simplify() { + return this; + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicCharDeviceINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicCharDeviceINode.java new file mode 100644 index 00000000000..267c9dda830 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicCharDeviceINode.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public class BasicCharDeviceINode extends AbstractBasicDeviceINode + implements CharDeviceINode { + + static CharDeviceINode simplify(CharDeviceINode src) { + if (src instanceof BasicCharDeviceINode) { + return src; + } + + if (src.isXattrPresent()) { + return src; + } + + BasicCharDeviceINode dest = new BasicCharDeviceINode(); + src.copyTo(dest); + + dest.setNlink(src.getNlink()); + dest.setDevice(src.getDevice()); + + return dest; + } + + @Override + protected String getName() { + return "basic-char-dev-inode"; + } + + @Override + public INodeType getInodeType() { + return INodeType.BASIC_CHAR_DEVICE; + } + + @Override + public CharDeviceINode simplify() { + return this; + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicDirectoryINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicDirectoryINode.java new file mode 100644 index 00000000000..7311408c0b7 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicDirectoryINode.java @@ -0,0 +1,213 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.DataInput; +import java.io.IOException; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +public class BasicDirectoryINode extends AbstractINode + implements DirectoryINode { + + int startBlock; + int nlink = 1; + short fileSize; // 3 + # of uncompressed bytes in directory table + short offset; + int parentInodeNumber; + + static DirectoryINode simplify(DirectoryINode src) { + if (src instanceof BasicDirectoryINode) { + return src; + } + + if (src.getFileSize() > 0xffff) { + return src; + } + + if (src.isIndexPresent()) { + return src; + } + + if (src.isXattrPresent()) { + return src; + } + + BasicDirectoryINode dest = new BasicDirectoryINode(); + src.copyTo(dest); + + dest.setStartBlock(src.getStartBlock()); + dest.setNlink(src.getNlink()); + dest.setFileSize(src.getFileSize()); + dest.setOffset(src.getOffset()); + dest.setParentInodeNumber(src.getParentInodeNumber()); + + return dest; + } + + @Override + public int getStartBlock() { + return startBlock; + } + + @Override + public void setStartBlock(int startBlock) { + this.startBlock = startBlock; + } + + @Override + public int getNlink() { + return nlink; + } + + @Override + public void setNlink(int nlink) { + this.nlink = nlink; + } + + @Override + public int getFileSize() { + return fileSize & 0xffff; + } + + @Override + public void setFileSize(int fileSize) { + if (fileSize >= 0xffff) { + throw new IllegalArgumentException( + "Basic directory inodes do not support filesizes > 64K"); + } + this.fileSize = (short) (fileSize & 0xffff); + } + + @Override + public short getOffset() { + return offset; + } + + @Override + public void setOffset(short offset) { + this.offset = offset; + } + + @Override + public int getParentInodeNumber() { + return parentInodeNumber; + } + + @Override + public void setParentInodeNumber(int parentInodeNumber) { + this.parentInodeNumber = parentInodeNumber; + } + + @Override + public short getIndexCount() { + return 0; + } + + @Override + public void setIndexCount(short indexCount) { + if (indexCount != (short) 0) { + throw new IllegalArgumentException( + "Basic directory inodes do not support indexes"); + } + } + + @Override + public boolean isIndexPresent() { + return false; + } + + @Override + public int getXattrIndex() { + return XATTR_NOT_PRESENT; + } + + @Override + public void setXattrIndex(int xattrIndex) { + if (xattrIndex != XATTR_NOT_PRESENT) { + throw new IllegalArgumentException( + "Basic directory inodes do not support extended attributes"); + } + } + + @Override + public boolean isXattrPresent() { + return false; + } + + @Override + protected int getChildSerializedSize() { + return 16; + } + + @Override + protected String getName() { + return "basic-directory-inode"; + } + + @Override + public INodeType getInodeType() { + return INodeType.BASIC_DIRECTORY; + } + + @Override + protected void readExtraData(SuperBlock sb, DataInput in) + throws SquashFsException, IOException { + startBlock = in.readInt(); + nlink = in.readInt(); + fileSize = in.readShort(); + offset = in.readShort(); + parentInodeNumber = in.readInt(); + } + + @Override + protected void writeExtraData(MetadataWriter out) throws IOException { + out.writeInt(startBlock); + out.writeInt(nlink); + out.writeShort(fileSize); + out.writeShort(offset); + out.writeInt(parentInodeNumber); + } + + @Override + protected int getPreferredDumpWidth() { + return 19; + } + + @Override + protected void dumpProperties(StringBuilder buf, int width) { + dumpBin(buf, width, "startBlock", startBlock, DECIMAL, UNSIGNED); + dumpBin(buf, width, "nlink", nlink, DECIMAL, UNSIGNED); + dumpBin(buf, width, "fileSize", fileSize, DECIMAL, UNSIGNED); + dumpBin(buf, width, "offset", offset, DECIMAL, UNSIGNED); + dumpBin(buf, width, "parentInodeNumber", parentInodeNumber, DECIMAL, + UNSIGNED); + } + + @Override + public DirectoryINode simplify() { + return this; + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicFifoINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicFifoINode.java new file mode 100644 index 00000000000..5f91ca5927b --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicFifoINode.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public class BasicFifoINode extends AbstractBasicIpcINode implements FifoINode { + + static FifoINode simplify(FifoINode src) { + if (src instanceof BasicFifoINode) { + return src; + } + + if (src.isXattrPresent()) { + return src; + } + + BasicFifoINode dest = new BasicFifoINode(); + src.copyTo(dest); + + dest.setNlink(src.getNlink()); + return dest; + } + + @Override + protected String getName() { + return "basic-fifo-inode"; + } + + @Override + public INodeType getInodeType() { + return INodeType.BASIC_FIFO; + } + + @Override + public FifoINode simplify() { + return this; + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicFileINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicFileINode.java new file mode 100644 index 00000000000..13b06aa5a02 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicFileINode.java @@ -0,0 +1,270 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.DataInput; +import java.io.IOException; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +public class BasicFileINode extends AbstractINode implements FileINode { + + public static final long MAX_BLOCKS_START = 0xffff_ffffL; + public static final long MAX_FILE_SIZE = 0xffff_ffffL; + + private static final int[] EMPTY = new int[0]; + + int blocksStart; + int fragmentBlockIndex = FRAGMENT_BLOCK_INDEX_NONE; + int fragmentOffset = 0; + int fileSize; + int[] blockSizes = EMPTY; + + static FileINode simplify(FileINode src) { + + if (src instanceof BasicFileINode) { + return src; + } + + if (src.getBlocksStart() > MAX_BLOCKS_START) { + return src; + } + + if (src.getFileSize() > MAX_FILE_SIZE) { + return src; + } + + if (src.getNlink() > 1) { + return src; + } + + if (src.isSparseBlockPresent()) { + return src; + } + + if (src.isXattrPresent()) { + return src; + } + + BasicFileINode dest = new BasicFileINode(); + src.copyTo(dest); + + dest.setBlocksStart(src.getBlocksStart()); + dest.setFragmentBlockIndex(src.getFragmentBlockIndex()); + dest.setFragmentOffset(src.getFragmentOffset()); + dest.setFileSize(src.getFileSize()); + dest.setBlockSizes(src.getBlockSizes()); + + return dest; + } + + @Override + public long getBlocksStart() { + return blocksStart & MAX_BLOCKS_START; + } + + @Override + public void setBlocksStart(long blocksStart) { + if (blocksStart > MAX_BLOCKS_START) { + throw new IllegalArgumentException( + "Basic file inodes do not support blocks starting > 4G"); + } + this.blocksStart = (int) (blocksStart & MAX_BLOCKS_START); + } + + @Override + public int getFragmentBlockIndex() { + return fragmentBlockIndex; + } + + @Override + public void setFragmentBlockIndex(int fragmentBlockIndex) { + this.fragmentBlockIndex = fragmentBlockIndex; + } + + @Override + public int getFragmentOffset() { + return fragmentOffset; + } + + @Override + public void setFragmentOffset(int fragmentOffset) { + this.fragmentOffset = fragmentOffset; + } + + ; + + @Override + public long getFileSize() { + return fileSize & MAX_FILE_SIZE; + } + + @Override + public void setFileSize(long fileSize) { + if (fileSize > MAX_FILE_SIZE) { + throw new IllegalArgumentException( + "Basic file inodes do not support size > 4G"); + } + + this.fileSize = (int) (fileSize & MAX_FILE_SIZE); + } + + @Override + public boolean isFragmentPresent() { + return fragmentBlockIndex != FRAGMENT_BLOCK_INDEX_NONE; + } + + @Override + public long getSparse() { + return 0L; + } + + @Override + public void setSparse(long sparse) { + if (sparse != 0L) { + throw new IllegalArgumentException( + "Basic file inodes do not support sparse blocks"); + } + } + + @Override + public int getNlink() { + return 1; + } + + @Override + public void setNlink(int nlink) { + if (nlink > 1) { + throw new IllegalArgumentException( + "Basic file inodes do not support multiple links"); + } + } + + @Override + public int getXattrIndex() { + return XATTR_NOT_PRESENT; + } + + @Override + public void setXattrIndex(int xattrIndex) { + if (xattrIndex != XATTR_NOT_PRESENT) { + throw new IllegalArgumentException( + "Basic file inodes do not support extended attributes"); + } + } + + @Override + public boolean isXattrPresent() { + return false; + } + + @Override + public boolean isSparseBlockPresent() { + return false; + } + + @Override + public int[] getBlockSizes() { + return blockSizes; + } + + @Override + public void setBlockSizes(int[] blockSizes) { + this.blockSizes = blockSizes; + } + + @Override + protected int getChildSerializedSize() { + return 16 + (blockSizes.length * 4); + } + + private int fullBlockCount(SuperBlock sb) { + int blockSize = sb.getBlockSize(); + int blockCount = (fileSize / blockSize); + if (!isFragmentPresent() && (fileSize % blockSize != 0)) { + blockCount++; + } + return blockCount; + } + + @Override + protected String getName() { + return "basic-file-inode"; + } + + @Override + public INodeType getInodeType() { + return INodeType.BASIC_FILE; + } + + @Override + protected void readExtraData(SuperBlock sb, DataInput in) + throws SquashFsException, IOException { + blocksStart = in.readInt(); + fragmentBlockIndex = in.readInt(); + fragmentOffset = in.readInt(); + fileSize = in.readInt(); + int blockCount = fullBlockCount(sb); + blockSizes = new int[blockCount]; + for (int i = 0; i < blockCount; i++) { + blockSizes[i] = in.readInt(); + } + } + + @Override + protected void writeExtraData(MetadataWriter out) throws IOException { + out.writeInt(blocksStart); + out.writeInt(fragmentBlockIndex); + out.writeInt(fragmentOffset); + out.writeInt(fileSize); + for (int bs : blockSizes) { + out.writeInt(bs); + } + } + + @Override + protected int getPreferredDumpWidth() { + return 20; + } + + @Override + protected void dumpProperties(StringBuilder buf, int width) { + dumpBin(buf, width, "blocksStart", blocksStart, DECIMAL, UNSIGNED); + dumpBin(buf, width, "fragmentBlockIndex", fragmentBlockIndex, DECIMAL, + UNSIGNED); + dumpBin(buf, width, "fragmentOffset", fragmentOffset, DECIMAL, UNSIGNED); + dumpBin(buf, width, "fileSize", fileSize, DECIMAL, UNSIGNED); + dumpBin(buf, width, "blocks", blockSizes.length, DECIMAL, UNSIGNED); + for (int i = 0; i < blockSizes.length; i++) { + dumpBin(buf, width, String.format("blockSizes[%d]", i), blockSizes[i], + DECIMAL, UNSIGNED); + } + } + + @Override + public FileINode simplify() { + return this; + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicSocketINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicSocketINode.java new file mode 100644 index 00000000000..dd09740736c --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicSocketINode.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public class BasicSocketINode extends AbstractBasicIpcINode + implements SocketINode { + + static SocketINode simplify(SocketINode src) { + if (src instanceof BasicSocketINode) { + return src; + } + + if (src.isXattrPresent()) { + return src; + } + + BasicSocketINode dest = new BasicSocketINode(); + src.copyTo(dest); + + dest.setNlink(src.getNlink()); + return dest; + } + + @Override + protected String getName() { + return "basic-socket-inode"; + } + + @Override + public INodeType getInodeType() { + return INodeType.BASIC_SOCKET; + } + + @Override + public SocketINode simplify() { + return this; + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicSymlinkINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicSymlinkINode.java new file mode 100644 index 00000000000..d978c28db42 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BasicSymlinkINode.java @@ -0,0 +1,141 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.DataInput; +import java.io.IOException; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +public class BasicSymlinkINode extends AbstractINode implements SymlinkINode { + + private static final byte[] EMPTY = new byte[0]; + + int nlink = 1; + byte[] targetPath = EMPTY; + + static SymlinkINode simplify(SymlinkINode src) { + if (src instanceof BasicSymlinkINode) { + return src; + } + + if (src.isXattrPresent()) { + return src; + } + + BasicSymlinkINode dest = new BasicSymlinkINode(); + src.copyTo(dest); + dest.setNlink(src.getNlink()); + dest.setTargetPath(src.getTargetPath()); + + return dest; + } + + @Override + public int getNlink() { + return nlink; + } + + @Override + public void setNlink(int nlink) { + this.nlink = nlink; + } + + @Override + public byte[] getTargetPath() { + return targetPath; + } + + @Override + public void setTargetPath(byte[] targetPath) { + this.targetPath = (targetPath == null) ? EMPTY : targetPath; + } + + @Override + public int getXattrIndex() { + return XATTR_NOT_PRESENT; + } + + @Override + public void setXattrIndex(int xattrIndex) { + if (xattrIndex != XATTR_NOT_PRESENT) { + throw new IllegalArgumentException( + "Basic symlink inodes do not support extended attributes"); + } + } + + @Override + public boolean isXattrPresent() { + return false; + } + + @Override + protected int getChildSerializedSize() { + return 8 + targetPath.length; + } + + @Override + protected String getName() { + return "basic-symlink-inode"; + } + + @Override + public INodeType getInodeType() { + return INodeType.BASIC_SYMLINK; + } + + @Override + protected void readExtraData(SuperBlock sb, DataInput in) + throws SquashFsException, IOException { + nlink = in.readInt(); + int targetSize = in.readInt(); + targetPath = new byte[targetSize]; + in.readFully(targetPath); + } + + @Override + protected void writeExtraData(MetadataWriter out) throws IOException { + out.writeInt(nlink); + out.writeInt(targetPath.length); + out.write(targetPath); + } + + @Override + protected int getPreferredDumpWidth() { + return 12; + } + + @Override + protected void dumpProperties(StringBuilder buf, int width) { + dumpBin(buf, width, "nlink", nlink, DECIMAL, UNSIGNED); + dumpBin(buf, width, "targetSize", targetPath.length, DECIMAL, UNSIGNED); + dumpBin(buf, width, "targetPath", targetPath, 0, targetPath.length, 16, 2); + } + + @Override + public SymlinkINode simplify() { + return this; + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BlockDeviceINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BlockDeviceINode.java new file mode 100644 index 00000000000..97af0129d24 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/BlockDeviceINode.java @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public interface BlockDeviceINode extends DeviceINode { + + BlockDeviceINode simplify(); + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/CharDeviceINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/CharDeviceINode.java new file mode 100644 index 00000000000..326afb54513 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/CharDeviceINode.java @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public interface CharDeviceINode extends DeviceINode { + + CharDeviceINode simplify(); + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/DeviceINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/DeviceINode.java new file mode 100644 index 00000000000..85f0df95c1d --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/DeviceINode.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public interface DeviceINode extends INode { + + int getNlink(); + + void setNlink(int nlink); + + int getDevice(); + + void setDevice(int device); + + int getXattrIndex(); + + void setXattrIndex(int xattrIndex); + + boolean isXattrPresent(); + + DeviceINode simplify(); + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/DirectoryINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/DirectoryINode.java new file mode 100644 index 00000000000..c31e76966b8 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/DirectoryINode.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public interface DirectoryINode extends INode { + + int getStartBlock(); + + void setStartBlock(int startBlock); + + int getNlink(); + + void setNlink(int nlik); + + int getFileSize(); + + void setFileSize(int value); + + short getOffset(); + + void setOffset(short offset); + + int getParentInodeNumber(); + + void setParentInodeNumber(int parentInodeNumber); + + short getIndexCount(); + + void setIndexCount(short indexCount); + + boolean isIndexPresent(); + + int getXattrIndex(); + + void setXattrIndex(int xattrIndex); + + boolean isXattrPresent(); + + DirectoryINode simplify(); + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedBlockDeviceINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedBlockDeviceINode.java new file mode 100644 index 00000000000..ca0aa49b149 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedBlockDeviceINode.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public class ExtendedBlockDeviceINode extends AbstractExtendedDeviceINode + implements BlockDeviceINode { + + @Override + protected String getName() { + return "extended-block-dev-inode"; + } + + @Override + public INodeType getInodeType() { + return INodeType.EXTENDED_BLOCK_DEVICE; + } + + @Override + public BlockDeviceINode simplify() { + return BasicBlockDeviceINode.simplify(this); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedCharDeviceINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedCharDeviceINode.java new file mode 100644 index 00000000000..e8d082c0506 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedCharDeviceINode.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public class ExtendedCharDeviceINode extends AbstractExtendedDeviceINode + implements CharDeviceINode { + + @Override + protected String getName() { + return "extended-char-dev-inode"; + } + + @Override + public INodeType getInodeType() { + return INodeType.EXTENDED_CHAR_DEVICE; + } + + @Override + public CharDeviceINode simplify() { + return BasicCharDeviceINode.simplify(this); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedDeviceINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedDeviceINode.java new file mode 100644 index 00000000000..7917b871309 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedDeviceINode.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public interface ExtendedDeviceINode extends DeviceINode { + + int getXattrIndex(); + + void setXattrIndex(int xattrIndex); + + boolean isXattrPresent(); + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedDirectoryINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedDirectoryINode.java new file mode 100644 index 00000000000..65a8d995aba --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedDirectoryINode.java @@ -0,0 +1,180 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.DataInput; +import java.io.IOException; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +public class ExtendedDirectoryINode extends AbstractINode + implements DirectoryINode { + + int nlink = 1; + int fileSize; // 3 + # of uncompressed bytes in directory table + int startBlock; + int parentInodeNumber; + short indexCount; + short offset; + int xattrIndex = XATTR_NOT_PRESENT; + + @Override + public int getStartBlock() { + return startBlock; + } + + @Override + public void setStartBlock(int startBlock) { + this.startBlock = startBlock; + } + + @Override + public int getNlink() { + return nlink; + } + + @Override + public void setNlink(int nlink) { + this.nlink = nlink; + } + + @Override + public int getFileSize() { + return fileSize; + } + + @Override + public void setFileSize(int fileSize) { + this.fileSize = fileSize; + } + + @Override + public short getOffset() { + return offset; + } + + @Override + public void setOffset(short offset) { + this.offset = offset; + } + + @Override + public int getParentInodeNumber() { + return parentInodeNumber; + } + + @Override + public void setParentInodeNumber(int parentInodeNumber) { + this.parentInodeNumber = parentInodeNumber; + } + + @Override + public short getIndexCount() { + return indexCount; + } + + @Override + public void setIndexCount(short indexCount) { + this.indexCount = indexCount; + } + + @Override + public boolean isIndexPresent() { + return indexCount != (short) 0; + } + + public int getXattrIndex() { + return xattrIndex; + } + + public void setXattrIndex(int xattrIndex) { + this.xattrIndex = xattrIndex; + } + + @Override + public boolean isXattrPresent() { + return xattrIndex != XATTR_NOT_PRESENT; + } + + @Override + protected int getChildSerializedSize() { + return 24; + } + + @Override + protected String getName() { + return "extended-directory-inode"; + } + + @Override + public INodeType getInodeType() { + return INodeType.EXTENDED_DIRECTORY; + } + + @Override + protected void readExtraData(SuperBlock sb, DataInput in) + throws SquashFsException, IOException { + nlink = in.readInt(); + fileSize = in.readInt(); + startBlock = in.readInt(); + parentInodeNumber = in.readInt(); + indexCount = in.readShort(); + offset = in.readShort(); + xattrIndex = in.readInt(); + } + + @Override + protected void writeExtraData(MetadataWriter out) throws IOException { + out.writeInt(nlink); + out.writeInt(fileSize); + out.writeInt(startBlock); + out.writeInt(parentInodeNumber); + out.writeShort(indexCount); + out.writeShort(offset); + out.writeInt(xattrIndex); + } + + @Override + protected int getPreferredDumpWidth() { + return 19; + } + + @Override + protected void dumpProperties(StringBuilder buf, int width) { + dumpBin(buf, width, "nlink", nlink, DECIMAL, UNSIGNED); + dumpBin(buf, width, "fileSize", fileSize, DECIMAL, UNSIGNED); + dumpBin(buf, width, "startBlock", startBlock, DECIMAL, UNSIGNED); + dumpBin(buf, width, "parentInodeNumber", parentInodeNumber, DECIMAL, + UNSIGNED); + dumpBin(buf, width, "indexCount", indexCount, DECIMAL, UNSIGNED); + dumpBin(buf, width, "offset", offset, DECIMAL, UNSIGNED); + dumpBin(buf, width, "xattrIndex", xattrIndex, DECIMAL, UNSIGNED); + } + + @Override + public DirectoryINode simplify() { + return BasicDirectoryINode.simplify(this); + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedFifoINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedFifoINode.java new file mode 100644 index 00000000000..8a90721fde9 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedFifoINode.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public class ExtendedFifoINode extends AbstractExtendedIpcINode + implements FifoINode { + + @Override + protected String getName() { + return "extended-fifo-inode"; + } + + @Override + public INodeType getInodeType() { + return INodeType.EXTENDED_FIFO; + } + + @Override + public FifoINode simplify() { + return BasicFifoINode.simplify(this); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedFileINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedFileINode.java new file mode 100644 index 00000000000..4c21c548bc7 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedFileINode.java @@ -0,0 +1,220 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.DataInput; +import java.io.IOException; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +public class ExtendedFileINode extends AbstractINode implements FileINode { + + long blocksStart; + long fileSize; + long sparse; + int nlink = 1; + int fragmentBlockIndex = FRAGMENT_BLOCK_INDEX_NONE; + int fragmentOffset = 0; + int xattrIndex = XATTR_NOT_PRESENT; + int[] blockSizes; + + @Override + public long getBlocksStart() { + return blocksStart; + } + + @Override + public void setBlocksStart(long blocksStart) { + this.blocksStart = blocksStart; + } + + @Override + public int getFragmentBlockIndex() { + return fragmentBlockIndex; + } + + @Override + public void setFragmentBlockIndex(int fragmentBlockIndex) { + this.fragmentBlockIndex = fragmentBlockIndex; + } + + @Override + public int getFragmentOffset() { + return fragmentOffset; + } + + @Override + public void setFragmentOffset(int fragmentOffset) { + this.fragmentOffset = fragmentOffset; + } + + @Override + public long getFileSize() { + return fileSize; + } + + @Override + public void setFileSize(long fileSize) { + this.fileSize = fileSize; + } + + @Override + public int[] getBlockSizes() { + return blockSizes; + } + + @Override + public void setBlockSizes(int[] blockSizes) { + this.blockSizes = blockSizes; + } + + @Override + public long getSparse() { + return sparse; + } + + @Override + public void setSparse(long sparse) { + this.sparse = sparse; + } + + @Override + public int getNlink() { + return nlink; + } + + @Override + public void setNlink(int nlink) { + this.nlink = nlink; + } + + @Override + public int getXattrIndex() { + return xattrIndex; + } + + @Override + public void setXattrIndex(int xattrIndex) { + this.xattrIndex = xattrIndex; + } + + @Override + public boolean isXattrPresent() { + return xattrIndex != XATTR_NOT_PRESENT; + } + + @Override + protected int getChildSerializedSize() { + return 40 + (blockSizes.length * 4); + } + + @Override + public boolean isFragmentPresent() { + return fragmentBlockIndex != FRAGMENT_BLOCK_INDEX_NONE; + } + + @Override + public boolean isSparseBlockPresent() { + return sparse != 0L; + } + + private int fullBlockCount(SuperBlock sb) { + int blockSize = sb.getBlockSize(); + int blockCount = (int) (fileSize / blockSize); + if (!isFragmentPresent() && (fileSize % blockSize != 0)) { + blockCount++; + } + return blockCount; + } + + @Override + protected String getName() { + return "extended-file-inode"; + } + + @Override + public INodeType getInodeType() { + return INodeType.EXTENDED_FILE; + } + + @Override + protected void readExtraData(SuperBlock sb, DataInput in) + throws SquashFsException, IOException { + blocksStart = in.readLong(); + fileSize = in.readLong(); + sparse = in.readLong(); + nlink = in.readInt(); + fragmentBlockIndex = in.readInt(); + fragmentOffset = in.readInt(); + xattrIndex = in.readInt(); + + int blockCount = fullBlockCount(sb); + blockSizes = new int[blockCount]; + for (int i = 0; i < blockCount; i++) { + blockSizes[i] = in.readInt(); + } + } + + @Override + protected void writeExtraData(MetadataWriter out) throws IOException { + out.writeLong(blocksStart); + out.writeLong(fileSize); + out.writeLong(sparse); + out.writeInt(nlink); + out.writeInt(fragmentBlockIndex); + out.writeInt(fragmentOffset); + out.writeInt(xattrIndex); + for (int bs : blockSizes) { + out.writeInt(bs); + } + } + + @Override + protected int getPreferredDumpWidth() { + return 20; + } + + @Override + protected void dumpProperties(StringBuilder buf, int width) { + dumpBin(buf, width, "blocksStart", blocksStart, DECIMAL, UNSIGNED); + dumpBin(buf, width, "fileSize", fileSize, DECIMAL, UNSIGNED); + dumpBin(buf, width, "sparse", sparse, DECIMAL, UNSIGNED); + dumpBin(buf, width, "nlink", nlink, DECIMAL, UNSIGNED); + dumpBin(buf, width, "fragmentBlockIndex", fragmentBlockIndex, DECIMAL, + UNSIGNED); + dumpBin(buf, width, "fragmentOffset", fragmentOffset, DECIMAL, UNSIGNED); + dumpBin(buf, width, "xattrIndex", xattrIndex, DECIMAL, UNSIGNED); + dumpBin(buf, width, "blocks", blockSizes.length, DECIMAL, UNSIGNED); + for (int i = 0; i < blockSizes.length; i++) { + dumpBin(buf, width, String.format("blockSizes[%d]", i), blockSizes[i], + DECIMAL, UNSIGNED); + } + } + + @Override + public FileINode simplify() { + return BasicFileINode.simplify(this); + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedIpcINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedIpcINode.java new file mode 100644 index 00000000000..c069ff9b196 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedIpcINode.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public interface ExtendedIpcINode extends IpcINode { + + int getXattrIndex(); + + void setXattrIndex(int xattrIndex); + + boolean isXattrPresent(); + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedSocketINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedSocketINode.java new file mode 100644 index 00000000000..77be518ea13 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedSocketINode.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public class ExtendedSocketINode extends AbstractExtendedIpcINode + implements SocketINode { + + @Override + protected String getName() { + return "extended-socket-inode"; + } + + @Override + public INodeType getInodeType() { + return INodeType.EXTENDED_SOCKET; + } + + @Override + public SocketINode simplify() { + return BasicSocketINode.simplify(this); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedSymlinkINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedSymlinkINode.java new file mode 100644 index 00000000000..b3aca39c4e5 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/ExtendedSymlinkINode.java @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.DataInput; +import java.io.IOException; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +public class ExtendedSymlinkINode extends AbstractINode + implements SymlinkINode { + + private static final byte[] EMPTY = new byte[0]; + + int nlink = 1; + byte[] targetPath = EMPTY; + int xattrIndex = XATTR_NOT_PRESENT; + + @Override + public int getNlink() { + return nlink; + } + + @Override + public void setNlink(int nlink) { + this.nlink = nlink; + } + + @Override + public byte[] getTargetPath() { + return targetPath; + } + + @Override + public void setTargetPath(byte[] targetPath) { + this.targetPath = (targetPath == null) ? EMPTY : targetPath; + } + + @Override + public int getXattrIndex() { + return xattrIndex; + } + + @Override + public void setXattrIndex(int xattrIndex) { + this.xattrIndex = xattrIndex; + } + + @Override + public boolean isXattrPresent() { + return xattrIndex != XATTR_NOT_PRESENT; + } + + @Override + protected int getChildSerializedSize() { + return 12 + targetPath.length; + } + + @Override + protected String getName() { + return "extended-symlink-inode"; + } + + @Override + public INodeType getInodeType() { + return INodeType.EXTENDED_SYMLINK; + } + + @Override + protected void readExtraData(SuperBlock sb, DataInput in) + throws SquashFsException, IOException { + nlink = in.readInt(); + int targetSize = in.readInt(); + targetPath = new byte[targetSize]; + in.readFully(targetPath); + xattrIndex = in.readInt(); + } + + @Override + protected void writeExtraData(MetadataWriter out) throws IOException { + out.writeInt(nlink); + out.writeInt(targetPath.length); + out.write(targetPath); + out.writeInt(xattrIndex); + } + + @Override + protected int getPreferredDumpWidth() { + return 12; + } + + @Override + protected void dumpProperties(StringBuilder buf, int width) { + dumpBin(buf, width, "nlink", nlink, DECIMAL, UNSIGNED); + dumpBin(buf, width, "targetSize", targetPath.length, DECIMAL, UNSIGNED); + dumpBin(buf, width, "targetPath", targetPath, 0, targetPath.length, 16, 2); + dumpBin(buf, width, "xattrIndex", xattrIndex, DECIMAL, UNSIGNED); + } + + public SymlinkINode simplify() { + return BasicSymlinkINode.simplify(this); + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/FifoINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/FifoINode.java new file mode 100644 index 00000000000..88bd77ae4f3 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/FifoINode.java @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public interface FifoINode extends IpcINode { + + FifoINode simplify(); + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/FileINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/FileINode.java new file mode 100644 index 00000000000..9af3082d6b6 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/FileINode.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public interface FileINode extends INode { + + static final int FRAGMENT_BLOCK_INDEX_NONE = 0xffff_ffff; + + long getBlocksStart(); + + void setBlocksStart(long blocksStart); + + int getFragmentBlockIndex(); + + void setFragmentBlockIndex(int fragmentBlockIndex); + + int getFragmentOffset(); + + void setFragmentOffset(int fragmentOffset); + + long getFileSize(); + + void setFileSize(long fileSize); + + int[] getBlockSizes(); + + void setBlockSizes(int[] blockSizes); + + long getSparse(); + + void setSparse(long sparse); + + int getNlink(); + + void setNlink(int nlink); + + int getXattrIndex(); + + void setXattrIndex(int xattrIndex); + + boolean isSparseBlockPresent(); + + boolean isFragmentPresent(); + + boolean isXattrPresent(); + + FileINode simplify(); + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/INode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/INode.java new file mode 100644 index 00000000000..af77ed2e2f5 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/INode.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.DataInput; +import java.io.IOException; + +public interface INode { + + int XATTR_NOT_PRESENT = 0xffff_ffff; + + static INode read(SuperBlock sb, DataInput in) + throws SquashFsException, IOException { + INodeType inodeType = INodeType.fromValue(in.readShort()); + INode inode = inodeType.create(); + inode.readData(sb, in); + return inode; + } + + INodeType getInodeType(); + + int getSerializedSize(); + + short getPermissions(); + + void setPermissions(short permissions); + + short getUidIdx(); + + void setUidIdx(short uidIdx); + + short getGidIdx(); + + void setGidIdx(short uidIdx); + + int getModifiedTime(); + + void setModifiedTime(int modifiedTime); + + int getInodeNumber(); + + void setInodeNumber(int inodeNumber); + + int getNlink(); + + void copyTo(INode dest); + + INode simplify(); + + void readData(SuperBlock sb, DataInput in) + throws SquashFsException, IOException; + + void writeData(MetadataWriter out) throws IOException; + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/INodeRef.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/INodeRef.java new file mode 100644 index 00000000000..0d68d8a9f07 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/INodeRef.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public class INodeRef { + + private final int location; + private final short offset; + + public INodeRef(long value) { + this.location = (int) ((value >> 16) & 0xffffffffL); + this.offset = (short) (value & 0xffff); + } + + public INodeRef(int location, short offset) { + this.location = location; + this.offset = offset; + } + + public int getLocation() { + return location; + } + + public short getOffset() { + return offset; + } + + public long getRaw() { + return ((((long) location) & 0xffffffffL) << 16) | + (((long) (offset)) & 0xffffL); + } + + @Override + public String toString() { + return String.format("{ location=%d, offset=%d }", location, offset); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/INodeType.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/INodeType.java new file mode 100644 index 00000000000..d0a68926816 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/INodeType.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.SquashFsException; + +import java.util.function.Supplier; + +public enum INodeType { + + BASIC_DIRECTORY(1, 1, 'd', BasicDirectoryINode::new), + BASIC_FILE(2, 2, '-', BasicFileINode::new), + BASIC_SYMLINK(3, 3, 'l', BasicSymlinkINode::new), + BASIC_BLOCK_DEVICE(4, 4, 'b', BasicBlockDeviceINode::new), + BASIC_CHAR_DEVICE(5, 5, 'c', BasicCharDeviceINode::new), + BASIC_FIFO(6, 6, 'p', BasicFifoINode::new), + BASIC_SOCKET(7, 7, 's', BasicSocketINode::new), + EXTENDED_DIRECTORY(8, 1, 'd', ExtendedDirectoryINode::new), + EXTENDED_FILE(9, 2, '-', ExtendedFileINode::new), + EXTENDED_SYMLINK(10, 3, 'l', ExtendedSymlinkINode::new), + EXTENDED_BLOCK_DEVICE(11, 4, 'b', ExtendedBlockDeviceINode::new), + EXTENDED_CHAR_DEVICE(12, 5, 'c', ExtendedCharDeviceINode::new), + EXTENDED_FIFO(13, 6, 'p', ExtendedFifoINode::new), + EXTENDED_SOCKET(14, 7, 's', ExtendedSocketINode::new); + + private final short value; + private final short dirValue; + private final char mode; + private final Supplier creator; + + private INodeType( + int value, int dirValue, char mode, Supplier creator) { + this.value = (short) value; + this.dirValue = (short) dirValue; + this.mode = mode; + this.creator = creator; + } + + public static INodeType fromValue(short value) throws SquashFsException { + for (INodeType nt : values()) { + if (nt.value == value) { + return nt; + } + } + throw new SquashFsException( + String.format("Unknown inode type 0x%x (%d)", value, value)); + } + + public static INodeType fromDirectoryValue(short value) + throws SquashFsException { + for (INodeType nt : values()) { + if (nt.value == value && nt.basic()) { + return nt; + } + } + throw new SquashFsException( + String.format("Unknown inode type 0x%x (%d)", value, value)); + } + + public boolean basic() { + return value <= (short) 7; + } + + public boolean directory() { + return mode == 'd'; + } + + public boolean file() { + return mode == '-'; + } + + public boolean symlink() { + return mode == 'l'; + } + + public boolean blockDevice() { + return mode == 'b'; + } + + public boolean charDevice() { + return mode == 'c'; + } + + public boolean device() { + return blockDevice() || charDevice(); + } + + public boolean fifo() { + return mode == 'p'; + } + + public boolean socket() { + return mode == 's'; + } + + public boolean ipc() { + return fifo() || socket(); + } + + public short value() { + return value; + } + + public short dirValue() { + return dirValue; + } + + public char mode() { + return mode; + } + + public INode create() { + return creator.get(); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/IpcINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/IpcINode.java new file mode 100644 index 00000000000..2a0d28fec86 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/IpcINode.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public interface IpcINode extends INode { + + int getNlink(); + + void setNlink(int nlink); + + int getXattrIndex(); + + void setXattrIndex(int xattrIndex); + + boolean isXattrPresent(); + + IpcINode simplify(); + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/Permission.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/Permission.java new file mode 100644 index 00000000000..50c4779a6c7 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/Permission.java @@ -0,0 +1,155 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import java.util.EnumSet; + +public enum Permission { + S_IXOTH(0, "execute (other)"), + S_IWOTH(1, "write (other"), + S_IROTH(2, "read (other)"), + S_IXGRP(3, "execute (group)"), + S_IWGRP(4, "write (group)"), + S_IRGRP(5, "read (group)"), + S_IXUSR(6, "execute (user)"), + S_IWUSR(7, "write (user)"), + S_IRUSR(8, "read (user)"), + S_ISVTX(9, "restricted delete"), + S_ISGID(10, "set gid"), + S_ISUID(11, "set uid"); + + private final String description; + private final int mask; + private final int bit; + + private Permission(int bit, String description) { + this.bit = bit; + this.mask = (1 << bit); + this.description = description; + } + + public static EnumSet from(int value) { + EnumSet result = EnumSet.noneOf(Permission.class); + for (Permission perm : values()) { + if ((value & perm.mask) == perm.mask) { + result.add(perm); + } + } + return result; + } + + public static EnumSet from(String display) + throws IllegalArgumentException { + return from(toValue(display)); + } + + public static int toValue(EnumSet perms) { + int result = 0; + for (Permission perm : perms) { + result |= perm.mask; + } + return result; + } + + public static String toDisplay(int value) { + char[] c = new char[9]; + + c[0] = toDisplay(value, S_IRUSR.mask, 'r', '-'); + c[1] = toDisplay(value, S_IWUSR.mask, 'w', '-'); + c[2] = toDisplay(value, S_IXUSR.mask, S_ISUID.mask, 'x', '-', 's', 'S'); + c[3] = toDisplay(value, S_IRGRP.mask, 'r', '-'); + c[4] = toDisplay(value, S_IWGRP.mask, 'w', '-'); + c[5] = toDisplay(value, S_IXGRP.mask, S_ISGID.mask, 'x', '-', 's', 'S'); + c[6] = toDisplay(value, S_IROTH.mask, 'r', '-'); + c[7] = toDisplay(value, S_IWOTH.mask, 'w', '-'); + c[8] = toDisplay(value, S_IXOTH.mask, S_ISVTX.mask, 'x', '-', 't', 'T'); + + return new String(c); + } + + public static int toValue(String display) { + if (display.length() < 9) { + throw new IllegalArgumentException( + String.format("Invalid permission string %s", display)); + } + int result = 0; + char[] c = display.toCharArray(); + + // standard bits + result |= setBitIf(c[0], 'r', S_IRUSR.mask); + result |= setBitIf(c[1], 'w', S_IWUSR.mask); + result |= setBitIf(c[2], 'x', S_IXUSR.mask); + result |= setBitIf(c[3], 'r', S_IRGRP.mask); + result |= setBitIf(c[4], 'w', S_IWGRP.mask); + result |= setBitIf(c[5], 'x', S_IXGRP.mask); + result |= setBitIf(c[6], 'r', S_IROTH.mask); + result |= setBitIf(c[7], 'w', S_IWOTH.mask); + result |= setBitIf(c[8], 'x', S_IXOTH.mask); + + // extended bits + result |= setBitIf(c[2], 's', S_IXUSR.mask | S_ISUID.mask); + result |= setBitIf(c[2], 'S', S_ISUID.mask); + result |= setBitIf(c[5], 's', S_IXGRP.mask | S_ISGID.mask); + result |= setBitIf(c[5], 'S', S_ISGID.mask); + result |= setBitIf(c[8], 't', S_IXOTH.mask | S_ISVTX.mask); + result |= setBitIf(c[8], 'T', S_ISVTX.mask); + + return result; + } + + public static String toDisplay(EnumSet perms) { + return toDisplay(toValue(perms)); + } + + private static char toDisplay(int value, int mask, char yes, char no) { + return ((value & mask) == mask) ? yes : no; + } + + private static char toDisplay(int value, int mask1, int mask2, char c1, + char c0, char c12, char c2) { + int both = (mask1 | mask2); + + if ((value & both) == mask1) { + return c1; + } else if ((value & both) == mask2) { + return c2; + } else if ((value & both) == both) { + return c12; + } else { + return c0; + } + } + + private static int setBitIf(char c, char t, int value) { + return (c == t) ? value : 0; + } + + public String description() { + return description; + } + + public int mask() { + return mask; + } + + public int bit() { + return bit; + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/SocketINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/SocketINode.java new file mode 100644 index 00000000000..c3d69c65712 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/SocketINode.java @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public interface SocketINode extends IpcINode { + + SocketINode simplify(); + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/SymlinkINode.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/SymlinkINode.java new file mode 100644 index 00000000000..80fcb316801 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/inode/SymlinkINode.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +public interface SymlinkINode extends INode { + + int getNlink(); + + void setNlink(int nlink); + + byte[] getTargetPath(); + + void setTargetPath(byte[] targetPath); + + int getXattrIndex(); + + void setXattrIndex(int xattrIndex); + + boolean isXattrPresent(); + + SymlinkINode simplify(); + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/io/ByteBufferDataInput.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/io/ByteBufferDataInput.java new file mode 100644 index 00000000000..e882f308ba8 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/io/ByteBufferDataInput.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.io; + +import java.io.DataInput; +import java.io.EOFException; +import java.io.IOException; +import java.nio.BufferUnderflowException; +import java.nio.ByteBuffer; + +public class ByteBufferDataInput implements DataInput { + private final ByteBuffer bb; + private final byte[] tb = new byte[8]; + + public ByteBufferDataInput(ByteBuffer buffer) { + this.bb = buffer; + } + + @Override + public void readFully(byte[] b) throws IOException { + try { + bb.get(b); + } catch (BufferUnderflowException e) { + throw new EOFException(); + } + } + + @Override + public void readFully(byte[] b, int off, int len) throws IOException { + try { + bb.get(b, off, len); + } catch (BufferUnderflowException e) { + throw new EOFException(); + } + } + + @Override + public byte readByte() throws IOException { + try { + return bb.get(); + } catch (BufferUnderflowException e) { + throw new EOFException(); + } + } + + @Override + public int skipBytes(int n) throws IOException { + int bytesToSkip = Math.max(0, Math.min(n, bb.remaining())); + bb.position(bb.position() + bytesToSkip); + return bytesToSkip; + } + + @Override + public boolean readBoolean() throws IOException { + byte in = readByte(); + return in != (byte) 0; + } + + @Override + public int readUnsignedByte() throws IOException { + return readByte() & 0xff; + } + + @Override + public short readShort() throws IOException { + readFully(tb, 0, 2); + return (short) ((tb[0] << 8) | (tb[1] & 0xff)); + } + + @Override + public int readUnsignedShort() throws IOException { + return readShort() & 0xffff; + } + + @Override + public char readChar() throws IOException { + readFully(tb, 0, 2); + return (char) ((tb[0] << 8) | (tb[1] & 0xff)); + } + + @Override + public int readInt() throws IOException { + readFully(tb, 0, 4); + return (((tb[0] & 0xff) << 24) | ((tb[1] & 0xff) << 16) | + ((tb[2] & 0xff) << 8) | (tb[3] & 0xff)); + } + + @Override + public long readLong() throws IOException { + readFully(tb, 0, 8); + return (((long) (tb[0] & 0xff) << 56) | + ((long) (tb[1] & 0xff) << 48) | + ((long) (tb[2] & 0xff) << 40) | + ((long) (tb[3] & 0xff) << 32) | + ((long) (tb[4] & 0xff) << 24) | + ((long) (tb[5] & 0xff) << 16) | + ((long) (tb[6] & 0xff) << 8) | + ((long) (tb[7] & 0xff))); + } + + @Override + public float readFloat() throws IOException { + return Float.intBitsToFloat(readInt()); + } + + @Override + public double readDouble() throws IOException { + return Double.longBitsToDouble(readLong()); + } + + @Override + public String readLine() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String readUTF() throws IOException { + throw new UnsupportedOperationException(); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/io/MappedFile.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/io/MappedFile.java new file mode 100644 index 00000000000..a2a3d230f97 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/io/MappedFile.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.io; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.channels.FileChannel.MapMode; +import java.util.ArrayList; +import java.util.List; + +public class MappedFile { + + private final long fileSize; + private final MappedByteBuffer[] buffers; + private final int mapSize; + private final int windowSize; + + MappedFile(long fileSize, MappedByteBuffer[] buffers, int mapSize, + int windowSize) { + this.fileSize = fileSize; + this.buffers = buffers; + this.mapSize = mapSize; + this.windowSize = windowSize; + } + + public static MappedFile mmap(FileChannel channel, int bufferSize, + int windowSize) throws IOException { + long size = channel.size(); + + List buffers = new ArrayList<>(); + + long offset = 0L; + long remain = size; + + while (offset < size) { + long mapSize = Math.min(windowSize, remain); + + buffers.add(channel.map(MapMode.READ_ONLY, offset, mapSize)); + offset += bufferSize; + remain -= bufferSize; + } + + MappedByteBuffer[] bufferArray = + buffers.toArray(new MappedByteBuffer[buffers.size()]); + + return new MappedFile(size, bufferArray, bufferSize, windowSize); + } + + public long getFileSize() { + return fileSize; + } + + public int getMapSize() { + return mapSize; + } + + public int getWindowSize() { + return windowSize; + } + + MappedByteBuffer buffer(long offset) { + long remain = offset % mapSize; + int block = (int) ((offset - remain) / mapSize); + return buffers[block]; + } + + int bufferOffset(long offset) { + long remain = offset % mapSize; + return (int) remain; + } + + public ByteBuffer from(long offset) { + MappedByteBuffer src = buffer(offset); + int bufOffset = bufferOffset(offset); + + ByteBuffer copy = src.duplicate(); + copy.position(copy.position() + bufOffset); + ByteBuffer slice = copy.slice(); + return slice; + } + + @Override + public String toString() { + return String.format( + "mapped-file: { size=%d, buffers=%d, mapSize=%d, windowSize=%d }", + fileSize, buffers.length, mapSize, windowSize); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/FileMetadataBlockReader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/FileMetadataBlockReader.java new file mode 100644 index 00000000000..4cfd9f3a086 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/FileMetadataBlockReader.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; + +public class FileMetadataBlockReader implements MetadataBlockReader { + + private final int tag; + private final RandomAccessFile raf; + private final SuperBlock sb; + private final boolean shouldClose; + + public FileMetadataBlockReader(int tag, File file) + throws IOException, SquashFsException { + this.tag = tag; + this.raf = new RandomAccessFile(file, "r"); + this.sb = SuperBlock.read(raf); + this.shouldClose = true; + } + + public FileMetadataBlockReader( + int tag, + RandomAccessFile raf, + SuperBlock sb, + boolean shouldClose) throws SquashFsException, IOException { + this.tag = tag; + this.raf = raf; + this.sb = sb; + this.shouldClose = shouldClose; + } + + @Override + public SuperBlock getSuperBlock(int tag) { + if (this.tag != tag) { + throw new IllegalArgumentException(String.format("Invalid tag: %d", tag)); + } + return sb; + } + + @Override + public MetadataBlock read(int tag, long fileOffset) + throws IOException, SquashFsException { + if (this.tag != tag) { + throw new IllegalArgumentException(String.format("Invalid tag: %d", tag)); + } + long prevOffset = raf.getFilePointer(); + try { + raf.seek(fileOffset); + MetadataBlock block = MetadataBlock.read(raf, sb); + return block; + } finally { + raf.seek(prevOffset); + } + } + + @Override + public void close() throws IOException { + if (shouldClose) { + raf.close(); + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MappedFileMetadataBlockReader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MappedFileMetadataBlockReader.java new file mode 100644 index 00000000000..a53b278153e --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MappedFileMetadataBlockReader.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.io.ByteBufferDataInput; +import org.apache.hadoop.runc.squashfs.io.MappedFile; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.IOException; + +public class MappedFileMetadataBlockReader implements MetadataBlockReader { + + private final int tag; + private final SuperBlock sb; + private final MappedFile mmap; + + public MappedFileMetadataBlockReader(int tag, SuperBlock sb, MappedFile mmap) + throws IOException, SquashFsException { + + this.tag = tag; + this.sb = sb; + this.mmap = mmap; + } + + @Override + public SuperBlock getSuperBlock(int tag) { + if (this.tag != tag) { + throw new IllegalArgumentException(String.format("Invalid tag: %d", tag)); + } + return sb; + } + + @Override + public MetadataBlock read(int tag, long fileOffset) + throws IOException, SquashFsException { + if (this.tag != tag) { + throw new IllegalArgumentException(String.format("Invalid tag: %d", tag)); + } + + return MetadataBlock + .read(new ByteBufferDataInput(mmap.from(fileOffset)), sb); + } + + @Override + public void close() { + + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MemoryMetadataBlockReader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MemoryMetadataBlockReader.java new file mode 100644 index 00000000000..9abf436fa94 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MemoryMetadataBlockReader.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.EOFException; +import java.io.IOException; + +public class MemoryMetadataBlockReader implements MetadataBlockReader { + + private final int tag; + private final SuperBlock sb; + private final byte[] data; + private final int offset; + private final int length; + + public MemoryMetadataBlockReader(int tag, SuperBlock sb, byte[] data) { + this(tag, sb, data, 0, data.length); + } + + public MemoryMetadataBlockReader(int tag, SuperBlock sb, byte[] data, + int offset, int length) { + this.tag = tag; + this.sb = sb; + this.data = data; + this.offset = offset; + this.length = length; + } + + @Override + public SuperBlock getSuperBlock(int tag) { + if (this.tag != tag) { + throw new IllegalArgumentException(String.format("Invalid tag: %d", tag)); + } + return sb; + } + + @Override + public MetadataBlock read(int tag, long fileOffset) + throws IOException, SquashFsException { + if (this.tag != tag) { + throw new IllegalArgumentException(String.format("Invalid tag: %d", tag)); + } + if (fileOffset >= length) { + throw new EOFException("Read past end of buffer"); + } + int localOffset = (int) fileOffset; + + try (ByteArrayInputStream bis = new ByteArrayInputStream(data, + offset + localOffset, length - localOffset)) { + try (DataInputStream dis = new DataInputStream(bis)) { + return MetadataBlock.read(dis, sb); + } + } + } + + @Override + public void close() { + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataBlock.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataBlock.java new file mode 100644 index 00000000000..d2df1f76839 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataBlock.java @@ -0,0 +1,180 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlockFlag; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInput; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.zip.Inflater; +import java.util.zip.InflaterInputStream; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.BINARY; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +public class MetadataBlock { + + public static final int MAX_SIZE = 8192; + public static final int HEADER_SIZE = 2; + + private static final byte[] EMPTY = new byte[0]; + + protected short header; + protected byte[] data = EMPTY; + protected short fileLength = 0; + + public static MetadataReader reader(MetadataBlockReader metaReader, + MetadataReference metaRef) throws IOException { + return new MetadataReader(metaReader, metaRef); + } + + public static MetadataBlock read(DataInput in, SuperBlock sb) + throws IOException, SquashFsException { + MetadataBlock block = new MetadataBlock(); + block.readData(in, sb); + return block; + } + + public byte[] getData() { + return data; + } + + public short getDataSize() { + return (short) (header & 0x7fff); + } + + public boolean isCompressed() { + return (header & 0x8000) == 0; + } + + public short getFileLength() { + return fileLength; + } + + public void readData(DataInput in, SuperBlock sb) + throws IOException, SquashFsException { + readHeader(in); + fileLength = (short) (HEADER_SIZE + readPayload(in, sb)); + } + + private void readHeader(DataInput in) throws IOException { + byte[] raw = new byte[HEADER_SIZE]; + in.readFully(raw); + ByteBuffer buffer = ByteBuffer.wrap(raw).order(ByteOrder.LITTLE_ENDIAN); + header = buffer.getShort(); + } + + private int readPayload(DataInput in, SuperBlock sb) + throws IOException, SquashFsException { + if (isCompressed()) { + return readCompressed(in, sb); + } else { + return readUncompressed(in); + } + } + + private int readUncompressed(DataInput in) throws IOException { + int size = getDataSize(); + if (size > MAX_SIZE) { + throw new SquashFsException( + String.format("Corrupt metadata block: Got size %d (max = %d)", size, + MAX_SIZE)); + } + data = new byte[size]; + in.readFully(data); + return data.length; + } + + private int readCompressed(DataInput in, SuperBlock sb) + throws IOException, SquashFsException { + switch (sb.getCompressionId()) { + case NONE: + throw new SquashFsException( + "Archive claims no compression, but found compressed data"); + case ZLIB: + return readCompressedZlib(in, sb); + default: + throw new UnsupportedOperationException( + String.format("Reading compressed data of type %s not yet supported", + sb.getCompressionId())); + } + } + + private int readCompressedZlib(DataInput in, SuperBlock sb) + throws IOException, SquashFsException { + // see if there are compression flags + if (sb.hasFlag(SuperBlockFlag.COMPRESSOR_OPTIONS)) { + throw new UnsupportedOperationException( + "Reading ZLIB compressed data with non-standard options not yet supported"); + } + + int dataSize = getDataSize(); + byte[] buf = new byte[dataSize]; + in.readFully(buf); + + byte[] xfer = new byte[MAX_SIZE]; + try (ByteArrayInputStream bis = new ByteArrayInputStream(buf)) { + try ( + InflaterInputStream iis = new InflaterInputStream(bis, new Inflater(), + MAX_SIZE)) { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream(MAX_SIZE)) { + int c = 0; + while ((c = iis.read(xfer, 0, MAX_SIZE)) >= 0) { + if (c > 0) { + bos.write(xfer, 0, c); + } + } + data = bos.toByteArray(); + if (data.length > MAX_SIZE) { + throw new SquashFsException(String.format( + "Corrupt metadata block: Got size %d (max = %d)", data.length, + MAX_SIZE)); + } + } + } + } + + return dataSize; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append(String.format("metadata-block: {%n")); + int width = 22; + dumpBin(buf, width, "header", header, BINARY, UNSIGNED); + dumpBin(buf, width, "dataSize (decoded)", getDataSize(), DECIMAL, UNSIGNED); + dumpBin(buf, width, "fileLength", (short) fileLength, DECIMAL, UNSIGNED); + dumpBin(buf, width, "compressed (decoded)", + isCompressed() ? "true" : "false"); + dumpBin(buf, width, "blockSize", data.length, DECIMAL, UNSIGNED); + dumpBin(buf, width, "data", data, 0, data.length, 16, 2); + buf.append("}"); + return buf.toString(); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataBlockCache.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataBlockCache.java new file mode 100644 index 00000000000..49b5f430ac7 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataBlockCache.java @@ -0,0 +1,179 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.IOException; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; + +public class MetadataBlockCache implements MetadataBlockReader { + + private static final int DEFAULT_CACHE_SIZE = 32; // 256 KB + + private final int cacheSize; + private final TaggedMetadataBlockReader reader; + private final boolean shouldClose; + private final LruBlockCache cache; + private final AtomicLong cacheHits = new AtomicLong(0L); + private final AtomicLong cacheMisses = new AtomicLong(0L); + + public MetadataBlockCache(TaggedMetadataBlockReader reader) { + this(reader, DEFAULT_CACHE_SIZE); + } + + public MetadataBlockCache(TaggedMetadataBlockReader reader, int cacheSize) { + this(reader, cacheSize, true); + } + + public MetadataBlockCache(TaggedMetadataBlockReader reader, + boolean shouldClose) { + this(reader, DEFAULT_CACHE_SIZE, shouldClose); + } + + public MetadataBlockCache(TaggedMetadataBlockReader reader, int cacheSize, + boolean shouldClose) { + this.cacheSize = cacheSize; + this.reader = reader; + this.cache = new LruBlockCache(cacheSize < 1 ? 1 : cacheSize); + this.shouldClose = shouldClose; + } + + public synchronized void add(int tag, MetadataBlockReader reader) { + this.reader.add(tag, reader); + } + + @Override + public SuperBlock getSuperBlock(int tag) { + return reader.getSuperBlock(tag); + } + + @Override + public MetadataBlock read(int tag, long fileOffset) + throws IOException, SquashFsException { + Key key = new Key(tag, fileOffset); + + MetadataBlock block; + + synchronized (this) { + block = cache.get(key); + } + + if (block != null) { + cacheHits.incrementAndGet(); + } else { + cacheMisses.incrementAndGet(); + block = reader.read(tag, fileOffset); + synchronized (this) { + cache.put(key, block); + } + } + + return block; + } + + @Override + public void close() throws IOException { + if (shouldClose) { + reader.close(); + } + } + + public long getCacheHits() { + return cacheHits.get(); + } + + public long getCacheMisses() { + return cacheMisses.get(); + } + + public synchronized int getCacheLoad() { + return cache.size(); + } + + public void resetStatistics() { + cacheHits.set(0L); + cacheMisses.set(0L); + } + + public synchronized void clearCache() { + cache.clear(); + resetStatistics(); + } + + @Override + public String toString() { + return String.format( + "metadata-block-cache { capacity=%d, size=%d, hits=%d, misses=%d }", + cacheSize, getCacheLoad(), getCacheHits(), getCacheMisses()); + } + + public static final class Key { + + private final int tag; + private final long fileOffset; + + public Key(int tag, long fileOffset) { + this.tag = tag; + this.fileOffset = fileOffset; + } + + @Override + public int hashCode() { + return Objects.hash(tag, fileOffset); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof Key)) { + return false; + } + Key o = (Key) obj; + + return (tag == o.tag) && (fileOffset == o.fileOffset); + } + + @Override + public String toString() { + return String.format("%d.%d", tag, fileOffset); + } + } + + private static class LruBlockCache extends LinkedHashMap { + + private static final long serialVersionUID = 7509410739092012261L; + + private final int cacheSize; + + public LruBlockCache(int cacheSize) { + super(16, 0.75f, true); + this.cacheSize = cacheSize; + } + + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + return size() > cacheSize; + } + + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataBlockReader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataBlockReader.java new file mode 100644 index 00000000000..b76ce0fd1c3 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataBlockReader.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.directory.DirectoryEntry; +import org.apache.hadoop.runc.squashfs.inode.DirectoryINode; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.Closeable; +import java.io.IOException; + +public interface MetadataBlockReader extends Closeable { + + MetadataBlock read(int tag, long fileOffset) + throws IOException, SquashFsException; + + SuperBlock getSuperBlock(int tag); + + default MetadataReader reader(MetadataReference metaRef) + throws IOException, SquashFsException { + return MetadataBlock.reader(this, metaRef); + } + + default MetadataReader rawReader( + int tag, + long blockLocation, + short offset) throws IOException, SquashFsException { + return reader(MetadataReference.raw(tag, blockLocation, offset)); + } + + default MetadataReader inodeReader(int tag, long inodeRef) + throws IOException, SquashFsException { + return reader(MetadataReference.inode(tag, getSuperBlock(tag), inodeRef)); + } + + default MetadataReader inodeReader(int tag, DirectoryEntry dirEnt) + throws IOException, SquashFsException { + return reader(MetadataReference.inode(tag, getSuperBlock(tag), dirEnt)); + } + + default MetadataReader directoryReader(int tag, DirectoryINode dir) + throws IOException, SquashFsException { + return reader(MetadataReference.directory(tag, getSuperBlock(tag), dir)); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataBlockRef.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataBlockRef.java new file mode 100644 index 00000000000..3ff883fd45d --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataBlockRef.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.inode.INodeRef; + +public class MetadataBlockRef { + + private final int location; + private final short offset; + + public MetadataBlockRef(int location, short offset) { + this.location = location; + this.offset = offset; + } + + public int getLocation() { + return location; + } + + public short getOffset() { + return offset; + } + + @Override + public String toString() { + return String.format( + "metadata-block-ref { location=%d, offset=%d }", + location, + offset); + } + + public INodeRef toINodeRef() { + return new INodeRef(location, offset); + } + + public long toINodeRefRaw() { + return ((long) (location & 0xffffffffL) << 16) | ((long) (offset + & 0xffffL)); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataReader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataReader.java new file mode 100644 index 00000000000..64f6950f025 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataReader.java @@ -0,0 +1,287 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataInput; +import java.io.EOFException; +import java.io.IOException; + +public class MetadataReader implements DataInput { + + private static final Logger LOG = + LoggerFactory.getLogger(MetadataReader.class); + + private final MetadataBlockReader blockReader; + private final int tag; + private final int maxLength; + private final int startPosition; + + private MetadataBlock block = null; + private long nextBlockLocation; + private int position = -1; + private int bytesRead = 0; + + MetadataReader(MetadataBlockReader blockReader, MetadataReference metaRef) + throws IOException { + this.blockReader = blockReader; + this.tag = metaRef.getTag(); + this.nextBlockLocation = metaRef.getBlockLocation(); + this.startPosition = (int) metaRef.getOffset(); + this.maxLength = metaRef.getMaxLength() == Integer.MAX_VALUE + ? Integer.MAX_VALUE + : metaRef.getMaxLength() + ((int) metaRef.getOffset()); + skipBytes((int) metaRef.getOffset()); + LOG.trace("Reader initialized for reference: \n{}", metaRef); + } + + public boolean isEof() throws IOException { + if (bytesRead >= maxLength) { + return true; + } + if (position >= 0 && position < block.data.length) { + return false; + } + + // unknown, read more data + try { + return bytesAvailable() <= 0; + } catch (EOFException e) { + return true; + } + } + + public int position() { + return bytesRead - startPosition; + } + + public int available() throws IOException { + if (block == null || bytesRead >= maxLength) { + return 0; + } + if (position < 0 || position >= block.data.length) { + return 0; + } + return Math + .max(0, Math.min(block.data.length - position, maxLength - bytesRead)); + } + + private int bytesAvailable() throws IOException { + ensureDataReady(); + + if (block == null || bytesRead >= maxLength) { + return -1; // EOF + } + + if (position < 0 || position >= block.data.length) { + return 0; + } + + return Math + .max(-1, Math.min(block.data.length - position, maxLength - bytesRead)); + } + + private void ensureDataReady() throws SquashFsException, IOException { + if (bytesRead >= maxLength) { + block = null; + return; // EOF + } + + if (position >= 0 && position < block.data.length) { + return; + } + + block = blockReader.read(tag, nextBlockLocation); + + position = 0; + nextBlockLocation += block.getFileLength(); + } + + @Override + public void readFully(byte[] b) throws IOException { + readFully(b, 0, b.length); + } + + @Override + public void readFully(byte[] b, int off, int len) throws IOException { + int totalRead = 0; + while (len > 0) { + int av = bytesAvailable(); + if (av <= 0) { + throw new EOFException(String + .format( + "Read past end of block list. Read %d bytes, caller wanted %d more", + totalRead, len)); + } + int read = Math.min(len, av); + System.arraycopy(block.data, position, b, off, read); + off += read; + len -= read; + position += read; + bytesRead += read; + totalRead += read; + } + } + + @Override + public int skipBytes(int n) throws IOException { + int bytesToSkip = n; + int bytesSkipped = 0; + + while (bytesToSkip > 0) { + int av = bytesAvailable(); + if (av <= 0) { + return bytesSkipped; + } + int skip = Math.min(bytesToSkip, av); + position += skip; + bytesRead += skip; + bytesSkipped += skip; + bytesToSkip -= skip; + } + return bytesSkipped; + } + + @Override + public boolean readBoolean() throws IOException { + byte in = readByte(); + return in != (byte) 0; + } + + @Override + public byte readByte() throws IOException { + if (bytesAvailable() <= 0) { + throw new EOFException("Read past end of blocks"); + } + try { + return block.data[position]; + } finally { + position++; + bytesRead++; + } + } + + @Override + public int readUnsignedByte() throws IOException { + return readByte() & 0xff; + } + + @Override + public short readShort() throws IOException { + byte[] buf = new byte[2]; + readFully(buf); + short value = (short) ((buf[0] << 8) | (buf[1] & 0xff)); + return Short.reverseBytes(value); + } + + @Override + public int readUnsignedShort() throws IOException { + return readShort() & 0xffff; + } + + @Override + public char readChar() throws IOException { + byte[] buf = new byte[2]; + readFully(buf); + char value = (char) ((buf[0] << 8) | (buf[1] & 0xff)); + return Character.reverseBytes(value); + } + + @Override + public int readInt() throws IOException { + byte[] buf = new byte[4]; + readFully(buf); + int value = (((buf[0] & 0xff) << 24) | ((buf[1] & 0xff) << 16) | + ((buf[2] & 0xff) << 8) | (buf[3] & 0xff)); + return Integer.reverseBytes(value); + } + + @Override + public long readLong() throws IOException { + byte[] buf = new byte[8]; + readFully(buf); + long value = (((long) (buf[0] & 0xff) << 56) | + ((long) (buf[1] & 0xff) << 48) | + ((long) (buf[2] & 0xff) << 40) | + ((long) (buf[3] & 0xff) << 32) | + ((long) (buf[4] & 0xff) << 24) | + ((long) (buf[5] & 0xff) << 16) | + ((long) (buf[6] & 0xff) << 8) | + ((long) (buf[7] & 0xff))); + return Long.reverseBytes(value); + } + + @Override + public float readFloat() throws IOException { + int intValue = readInt(); + return Float.intBitsToFloat(intValue); + } + + @Override + public double readDouble() throws IOException { + long longValue = readLong(); + return Double.longBitsToDouble(longValue); + } + + @Override + public String readLine() throws IOException { + StringBuilder input = new StringBuilder(); + int c = -1; + boolean eol = false; + + while (!eol) { + if (bytesAvailable() <= 0) { + eol = true; + break; + } + switch (c = readByte()) { + case '\n': + eol = true; + break; + case '\r': + eol = true; + if (bytesAvailable() <= 0) { + break; + } + if (((char) block.data[position]) == '\n') { + position++; + bytesRead++; + } + break; + default: + input.append((char) c); + break; + } + } + + if ((c == -1) && (input.length() == 0)) { + return null; + } + return input.toString(); + } + + @Override + public String readUTF() throws IOException { + throw new UnsupportedOperationException(); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataReference.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataReference.java new file mode 100644 index 00000000000..710521a04ef --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataReference.java @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.directory.DirectoryEntry; +import org.apache.hadoop.runc.squashfs.inode.DirectoryINode; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +public class MetadataReference { + + private final int tag; + private final long blockLocation; + private final short offset; + private final int maxLength; + + MetadataReference(int tag, long blockLocation, short offset, int maxLength) { + this.tag = tag; + this.blockLocation = blockLocation; + this.offset = offset; + this.maxLength = maxLength; + } + + public static MetadataReference inode(int tag, SuperBlock sb, long inodeRef) + throws SquashFsException { + long inodeBlockRel = (inodeRef & 0x0000ffffffff0000L) >> 16; + long inodeBlock = sb.getInodeTableStart() + inodeBlockRel; + + short inodeOffset = (short) (inodeRef & 0x7fff); + if (inodeOffset >= MetadataBlock.MAX_SIZE) { + throw new SquashFsException( + String.format("Invalid inode reference with offset %d (max = %d", + inodeOffset, MetadataBlock.MAX_SIZE - 1)); + } + + return new MetadataReference(tag, inodeBlock, inodeOffset, + Integer.MAX_VALUE); + } + + public static MetadataReference inode(int tag, SuperBlock sb, + DirectoryEntry dirEnt) + throws SquashFsException { + + long inodeBlockRel = (dirEnt.getHeader().getStartBlock() & 0xffffffffL); + return inode(tag, sb, inodeBlockRel, dirEnt.getOffset()); + } + + private static MetadataReference inode(int tag, SuperBlock sb, + long inodeBlockRel, short inodeOffset) + throws SquashFsException { + + long inodeBlock = sb.getInodeTableStart() + inodeBlockRel; + inodeOffset = (short) (inodeOffset & 0x7fff); + + if (inodeOffset >= MetadataBlock.MAX_SIZE) { + throw new SquashFsException( + String.format("Invalid inode reference with offset %d (max = %d", + inodeOffset, MetadataBlock.MAX_SIZE - 1)); + } + + return new MetadataReference(tag, inodeBlock, inodeOffset, + Integer.MAX_VALUE); + } + + public static MetadataReference raw(int tag, long blockLocation, short offset) + throws SquashFsException { + + offset = (short) (offset & 0x7fff); + + if (offset >= MetadataBlock.MAX_SIZE) { + throw new SquashFsException( + String.format("Invalid raw reference with offset %d (max = %d", + offset, MetadataBlock.MAX_SIZE - 1)); + } + + return new MetadataReference(tag, blockLocation, offset, Integer.MAX_VALUE); + } + + public static MetadataReference directory(int tag, SuperBlock sb, + DirectoryINode dir) throws SquashFsException { + long dirBlockRel = dir.getStartBlock() & 0xffffffffL; + long dirBlock = sb.getDirectoryTableStart() + dirBlockRel; + short dirOffset = (short) (dir.getOffset() & 0x7fff); + + if (dirOffset >= MetadataBlock.MAX_SIZE) { + throw new SquashFsException(String + .format("Invalid directory table reference with offset %d (max = %d", + dirOffset, MetadataBlock.MAX_SIZE - 1)); + } + + return new MetadataReference(tag, dirBlock, dirOffset, + dir.getFileSize() - 3); + } + + public int getTag() { + return tag; + } + + public long getBlockLocation() { + return blockLocation; + } + + public short getOffset() { + return offset; + } + + public int getMaxLength() { + return maxLength; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append(String.format("metadata-reference: {%n")); + int width = 22; + dumpBin(buf, width, "tag", tag, DECIMAL, UNSIGNED); + dumpBin(buf, width, "blockLocation", blockLocation, DECIMAL, UNSIGNED); + dumpBin(buf, width, "offset", offset, DECIMAL, UNSIGNED); + dumpBin(buf, width, "maxLength", maxLength, DECIMAL, UNSIGNED); + buf.append("}"); + return buf.toString(); + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataWriter.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataWriter.java new file mode 100644 index 00000000000..beaf67908cf --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/MetadataWriter.java @@ -0,0 +1,202 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.zip.Deflater; +import java.util.zip.DeflaterOutputStream; + +public class MetadataWriter implements DataOutput { + + private final byte[] xfer = new byte[1]; + private final byte[] currentBlock = new byte[8192]; + private final List blocks = new ArrayList<>(); + private long location = 0L; + private int offset = 0; + + public void save(DataOutput out) throws IOException { + flush(); + for (byte[] block : blocks) { + out.write(block); + } + location = 0L; + offset = 0; + blocks.clear(); + } + + public MetadataBlockRef getCurrentReference() { + return new MetadataBlockRef((int) (location & 0xffffffff), (short) offset); + } + + @Override + public void write(byte[] b) throws IOException { + write(b, 0, b.length); + } + + public void flush() throws IOException { + if (offset == 0) { + return; + } + + byte[] compressed = compress(currentBlock, 0, offset); + byte[] encoded; + int size; + + if (compressed != null) { + size = compressed.length & 0x7fff; + encoded = new byte[compressed.length + 2]; + System.arraycopy(compressed, 0, encoded, 2, compressed.length); + } else { + size = (offset & 0x7fff) | 0x8000; + encoded = new byte[offset + 2]; + System.arraycopy(currentBlock, 0, encoded, 2, offset); + } + + encoded[0] = (byte) (size & 0xff); + encoded[1] = (byte) ((size >> 8) & 0xff); + + blocks.add(encoded); + location += encoded.length; + offset = 0; + } + + private byte[] compress(byte[] data, int offset, int length) + throws IOException { + Deflater def = new Deflater(Deflater.BEST_COMPRESSION); + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (DeflaterOutputStream dos = new DeflaterOutputStream( + bos, def, 4096)) { + dos.write(data, offset, length); + } + byte[] result = bos.toByteArray(); + if (result.length > length) { + return null; + } + return result; + } finally { + def.end(); + } + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + while (len > 0) { + int capacity = currentBlock.length - offset; + int bytesToWrite = Math.min(len, capacity); + + System.arraycopy(b, off, currentBlock, offset, bytesToWrite); + offset += bytesToWrite; + off += bytesToWrite; + len -= bytesToWrite; + if (currentBlock.length == offset) { + flush(); + } + } + } + + private void writeByteInternal(byte b) throws IOException { + xfer[0] = b; + write(xfer); + } + + @Override + public void write(int b) throws IOException { + writeByteInternal((byte) (b & 0xff)); + } + + @Override + public void writeBoolean(boolean v) throws IOException { + writeByteInternal((byte) (v ? 1 : 0)); + } + + @Override + public void writeByte(int v) throws IOException { + writeByteInternal((byte) (v & 0xff)); + } + + @Override + public void writeShort(int v) throws IOException { + writeByteInternal((byte) ((v >>> 0) & 0xff)); + writeByteInternal((byte) ((v >>> 8) & 0xff)); + } + + @Override + public void writeChar(int v) throws IOException { + writeByteInternal((byte) ((v >>> 0) & 0xff)); + writeByteInternal((byte) ((v >>> 8) & 0xff)); + } + + @Override + public void writeInt(int v) throws IOException { + writeByteInternal((byte) ((v >>> 0) & 0xff)); + writeByteInternal((byte) ((v >>> 8) & 0xff)); + writeByteInternal((byte) ((v >>> 16) & 0xff)); + writeByteInternal((byte) ((v >>> 24) & 0xff)); + } + + @Override + public void writeLong(long v) throws IOException { + writeByteInternal((byte) ((v >>> 0) & 0xff)); + writeByteInternal((byte) ((v >>> 8) & 0xff)); + writeByteInternal((byte) ((v >>> 16) & 0xff)); + writeByteInternal((byte) ((v >>> 24) & 0xff)); + writeByteInternal((byte) ((v >>> 32) & 0xff)); + writeByteInternal((byte) ((v >>> 40) & 0xff)); + writeByteInternal((byte) ((v >>> 48) & 0xff)); + writeByteInternal((byte) ((v >>> 56) & 0xff)); + } + + @Override + public void writeFloat(float v) throws IOException { + writeInt(Float.floatToIntBits(v)); + } + + @Override + public void writeDouble(double v) throws IOException { + writeLong(Double.doubleToLongBits(v)); + } + + @Override + public void writeBytes(String s) throws IOException { + int len = s.length(); + for (int i = 0; i < len; i++) { + writeByteInternal((byte) s.charAt(i)); + } + } + + @Override + public void writeChars(String s) throws IOException { + int len = s.length(); + for (int i = 0; i < len; i++) { + int v = s.charAt(i); + writeByteInternal((byte) ((v >>> 0) & 0xff)); + writeByteInternal((byte) ((v >>> 8) & 0xff)); + } + } + + @Override + public void writeUTF(String s) throws IOException { + throw new UnsupportedOperationException(); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/TaggedMetadataBlockReader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/TaggedMetadataBlockReader.java new file mode 100644 index 00000000000..00e96c3fc31 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/metadata/TaggedMetadataBlockReader.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class TaggedMetadataBlockReader implements MetadataBlockReader { + + private final boolean close; + private final Map readers = new HashMap<>(); + + public TaggedMetadataBlockReader(boolean close) { + this.close = close; + } + + public synchronized void add(int tag, MetadataBlockReader reader) { + Integer key = Integer.valueOf(tag); + if (readers.containsKey(key)) { + throw new IllegalArgumentException( + String.format("Tag '%d' is already in use", tag)); + } + readers.put(key, reader); + } + + @Override + public synchronized void close() throws IOException { + if (close) { + for (MetadataBlockReader reader : readers.values()) { + reader.close(); + } + } + } + + synchronized MetadataBlockReader readerFor(int tag) { + MetadataBlockReader mbr = readers.get(Integer.valueOf(tag)); + if (mbr == null) { + throw new IllegalArgumentException(String.format("Invalid tag: %d", tag)); + } + return mbr; + } + + @Override + public MetadataBlock read(int tag, long fileOffset) + throws IOException, SquashFsException { + return readerFor(tag).read(tag, fileOffset); + } + + @Override + public SuperBlock getSuperBlock(int tag) { + return readerFor(tag).getSuperBlock(tag); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/superblock/CompressionId.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/superblock/CompressionId.java new file mode 100644 index 00000000000..c45c8ae1ba2 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/superblock/CompressionId.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.superblock; + +import org.apache.hadoop.runc.squashfs.SquashFsException; + +public enum CompressionId { + NONE(0), + ZLIB(1), + LZMA(2), + LZO(3), + XZ(4), + LZ4(5), + ZSTD(6); + + private final short value; + + private CompressionId(int value) { + this.value = (short) value; + } + + public static CompressionId fromValue(short value) throws SquashFsException { + for (CompressionId id : values()) { + if (id.value == value) { + return id; + } + } + throw new SquashFsException( + String.format("Unknown compression id %d", value)); + } + + public short value() { + return value; + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/superblock/SuperBlock.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/superblock/SuperBlock.java new file mode 100644 index 00000000000..0fa03d8ca0f --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/superblock/SuperBlock.java @@ -0,0 +1,333 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.superblock; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.inode.INodeRef; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.BINARY; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNIX_TIMESTAMP; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +public class SuperBlock { + + public static final int SIZE = 96; + public static final int SQUASHFS_MAGIC = 0x73717368; + + public static final short DEFAULT_VERSION_MAJOR = 4; + public static final short DEFUALT_VERSION_MINOR = 0; + + public static final short DEFAULT_BLOCK_LOG = 17; + public static final int DEFAULT_BLOCK_SIZE = 1 << DEFAULT_BLOCK_LOG; + + public static final short DEFAULT_FLAGS = SuperBlockFlag.flagsFor( + SuperBlockFlag.DUPLICATES, SuperBlockFlag.EXPORTABLE); + + public static final long TABLE_NOT_PRESENT = 0xffff_ffff_ffff_ffffL; + + int inodeCount; + int modificationTime = (int) (System.currentTimeMillis() / 1000); + int blockSize = DEFAULT_BLOCK_SIZE; + int fragmentEntryCount; + CompressionId compressionId = CompressionId.ZLIB; + short blockLog = DEFAULT_BLOCK_LOG; + short flags = DEFAULT_FLAGS; + short idCount; + short versionMajor = DEFAULT_VERSION_MAJOR; + short versionMinor = DEFUALT_VERSION_MINOR; + long rootInodeRef; + long bytesUsed; + long idTableStart; + long xattrIdTableStart = TABLE_NOT_PRESENT; + long inodeTableStart; + long directoryTableStart; + long fragmentTableStart; + long exportTableStart; + + public static SuperBlock read(DataInput in) + throws IOException, SquashFsException { + SuperBlock block = new SuperBlock(); + block.readData(in); + return block; + } + + public int getInodeCount() { + return inodeCount; + } + + public void setInodeCount(int inodeCount) { + this.inodeCount = inodeCount; + } + + public int getModificationTime() { + return modificationTime; + } + + public void setModificationTime(int modificationTime) { + this.modificationTime = modificationTime; + } + + public int getBlockSize() { + return blockSize; + } + + public void setBlockSize(int blockSize) { + this.blockSize = blockSize; + } + + public int getFragmentEntryCount() { + return fragmentEntryCount; + } + + public void setFragmentEntryCount(int fragmentEntryCount) { + this.fragmentEntryCount = fragmentEntryCount; + } + + public CompressionId getCompressionId() { + return compressionId; + } + + public void setCompressionId(CompressionId compressionId) { + this.compressionId = compressionId; + } + + public short getBlockLog() { + return blockLog; + } + + public void setBlockLog(short blockLog) { + this.blockLog = blockLog; + } + + public short getFlags() { + return flags; + } + + public void setFlags(short flags) { + this.flags = flags; + } + + public short getIdCount() { + return idCount; + } + + public void setIdCount(short idCount) { + this.idCount = idCount; + } + + public short getVersionMajor() { + return versionMajor; + } + + public void setVersionMajor(short versionMajor) { + this.versionMajor = versionMajor; + } + + public short getVersionMinor() { + return versionMinor; + } + + public void setVersionMinor(short versionMinor) { + this.versionMinor = versionMinor; + } + + public long getRootInodeRef() { + return rootInodeRef; + } + + public void setRootInodeRef(long rootInodeRef) { + this.rootInodeRef = rootInodeRef; + } + + public long getBytesUsed() { + return bytesUsed; + } + + public void setBytesUsed(long bytesUsed) { + this.bytesUsed = bytesUsed; + } + + public long getIdTableStart() { + return idTableStart; + } + + public void setIdTableStart(long idTableStart) { + this.idTableStart = idTableStart; + } + + public long getXattrIdTableStart() { + return xattrIdTableStart; + } + + public void setXattrIdTableStart(long xattrIdTableStart) { + this.xattrIdTableStart = xattrIdTableStart; + } + + public long getInodeTableStart() { + return inodeTableStart; + } + + public void setInodeTableStart(long inodeTableStart) { + this.inodeTableStart = inodeTableStart; + } + + public long getDirectoryTableStart() { + return directoryTableStart; + } + + public void setDirectoryTableStart(long directoryTableStart) { + this.directoryTableStart = directoryTableStart; + } + + public long getFragmentTableStart() { + return fragmentTableStart; + } + + public void setFragmentTableStart(long fragmentTableStart) { + this.fragmentTableStart = fragmentTableStart; + } + + public long getExportTableStart() { + return exportTableStart; + } + + public void setExportTableStart(long exportTableStart) { + this.exportTableStart = exportTableStart; + } + + public boolean hasFlag(SuperBlockFlag flag) { + return flag.isSet(flags); + } + + public void writeData(DataOutput out) throws IOException { + byte[] raw = new byte[SIZE]; + ByteBuffer buffer = ByteBuffer.wrap(raw).order(ByteOrder.LITTLE_ENDIAN); + buffer.putInt(SQUASHFS_MAGIC); + buffer.putInt(inodeCount); + buffer.putInt(modificationTime); + buffer.putInt(blockSize); + buffer.putInt(fragmentEntryCount); + buffer.putShort(compressionId.value()); + buffer.putShort(blockLog); + buffer.putShort(flags); + buffer.putShort(idCount); + buffer.putShort(versionMajor); + buffer.putShort(versionMinor); + buffer.putLong(rootInodeRef); + buffer.putLong(bytesUsed); + buffer.putLong(idTableStart); + buffer.putLong(xattrIdTableStart); + buffer.putLong(inodeTableStart); + buffer.putLong(directoryTableStart); + buffer.putLong(fragmentTableStart); + buffer.putLong(exportTableStart); + out.write(raw); + } + + public void readData(DataInput in) throws IOException, SquashFsException { + byte[] raw = new byte[SIZE]; + in.readFully(raw); + ByteBuffer buffer = ByteBuffer.wrap(raw).order(ByteOrder.LITTLE_ENDIAN); + + int magic = buffer.getInt(); + if (magic != SQUASHFS_MAGIC) { + throw new SquashFsException( + String.format("Unknown magic %8x found", magic)); + } + + inodeCount = buffer.getInt(); + modificationTime = buffer.getInt(); + blockSize = buffer.getInt(); + fragmentEntryCount = buffer.getInt(); + compressionId = CompressionId.fromValue(buffer.getShort()); + blockLog = buffer.getShort(); + int expectedBlockSize = 1 << blockLog; + if (blockSize != expectedBlockSize) { + throw new SquashFsException( + String.format("Corrupt archive, expected block size %d, got %d", + expectedBlockSize, blockSize)); + } + flags = buffer.getShort(); + idCount = buffer.getShort(); + versionMajor = buffer.getShort(); + versionMinor = buffer.getShort(); + + String version = String.format("%d.%d", versionMajor, versionMinor); + if (!("4.0".equals(version))) { + throw new SquashFsException( + String.format("Unknown version %s found", version)); + } + + rootInodeRef = buffer.getLong(); + bytesUsed = buffer.getLong(); + idTableStart = buffer.getLong(); + xattrIdTableStart = buffer.getLong(); + inodeTableStart = buffer.getLong(); + directoryTableStart = buffer.getLong(); + fragmentTableStart = buffer.getLong(); + exportTableStart = buffer.getLong(); + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append(String.format("superblock: {%n")); + int width = 25; + dumpBin(buf, width, "inodeCount", inodeCount, DECIMAL, UNSIGNED); + dumpBin(buf, width, "modificationTime", modificationTime, DECIMAL, + UNIX_TIMESTAMP, UNSIGNED); + dumpBin(buf, width, "blockSize", blockSize, DECIMAL, UNSIGNED); + dumpBin(buf, width, "fragmentEntryCount", fragmentEntryCount, DECIMAL, + UNSIGNED); + dumpBin(buf, width, "compressionId", compressionId.value(), BINARY, + UNSIGNED); + dumpBin(buf, width, "compressionId (decoded)", compressionId.toString()); + dumpBin(buf, width, "blockLog", blockLog, DECIMAL, UNSIGNED); + dumpBin(buf, width, "blockSize (calculated)", (int) 1 << blockLog, DECIMAL, + UNSIGNED); + dumpBin(buf, width, "flags", flags, BINARY, UNSIGNED); + dumpBin(buf, width, "flags (decoded)", + SuperBlockFlag.flagsPresent(flags).toString()); + dumpBin(buf, width, "idCount", idCount, DECIMAL, UNSIGNED); + dumpBin(buf, width, "versionMajor", versionMajor, DECIMAL, UNSIGNED); + dumpBin(buf, width, "versionMinor", versionMinor, DECIMAL, UNSIGNED); + dumpBin(buf, width, "version (decoded)", + String.format("%d.%d", versionMajor, versionMinor)); + dumpBin(buf, width, "rootInodeRef", rootInodeRef, DECIMAL, UNSIGNED); + dumpBin(buf, width, "rootInodeRef (decoded)", + new INodeRef(rootInodeRef).toString()); + dumpBin(buf, width, "bytesUsed", bytesUsed, DECIMAL, UNSIGNED); + dumpBin(buf, width, "idTableStart", idTableStart, DECIMAL); + dumpBin(buf, width, "xattrIdTableStart", xattrIdTableStart, DECIMAL); + dumpBin(buf, width, "inodeTableStart", inodeTableStart, DECIMAL); + dumpBin(buf, width, "directoryTableStart", directoryTableStart, DECIMAL); + dumpBin(buf, width, "fragmentTableStart", fragmentTableStart, DECIMAL); + dumpBin(buf, width, "exportTableStart", exportTableStart, DECIMAL); + buf.append("}"); + return buf.toString(); + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/superblock/SuperBlockFlag.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/superblock/SuperBlockFlag.java new file mode 100644 index 00000000000..af521a10edc --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/superblock/SuperBlockFlag.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.superblock; + +import java.util.Collection; +import java.util.EnumSet; + +public enum SuperBlockFlag { + UNCOMPRESSED_INODES(0), + UNCOMPRESSED_DATA(1), + CHECK(2), + UNCOMPRESSED_FRAGMENTS(3), + NO_FRAGMENTS(4), + ALWAYS_FRAGMENTS(5), + DUPLICATES(6), + EXPORTABLE(7), + UNCOMPRESSED_XATTRS(8), + NO_XATTRS(9), + COMPRESSOR_OPTIONS(10), + UNCOMPRESSED_IDS(11); + + private final short mask; + + private SuperBlockFlag(int bit) { + mask = (short) (1 << bit); + } + + public static EnumSet flagsPresent(short value) { + EnumSet values = EnumSet.noneOf(SuperBlockFlag.class); + for (SuperBlockFlag flag : values()) { + if (flag.isSet(value)) { + values.add(flag); + } + } + return values; + } + + public static short flagsFor(SuperBlockFlag... flags) { + short value = 0; + for (SuperBlockFlag flag : flags) { + value |= flag.mask; + } + return value; + } + + public static short flagsFor(Collection flags) { + short value = 0; + for (SuperBlockFlag flag : flags) { + value |= flag.mask; + } + return value; + } + + public short mask() { + return mask; + } + + public boolean isSet(short value) { + return (value & mask) != 0; + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/ExportTable.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/ExportTable.java new file mode 100644 index 00000000000..05f0834d2d0 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/ExportTable.java @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.inode.INodeRef; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlock; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockReader; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlockFlag; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +public class ExportTable { + + public static final int EXPORT_TABLE_RECORD_LENGTH = 8; + public static final int BYTES_PER_TABLE_ENTRY = 8; + public static final int ENTRIES_PER_BLOCK = + MetadataBlock.MAX_SIZE / BYTES_PER_TABLE_ENTRY; + + private static final long[] EMPTY = new long[0]; + + MetadataBlockReader metaBlockReader; + + boolean available = false; + int tag = -1; + int inodeCount = 0; + long[] tableRef = EMPTY; + + private static int numTables(int inodeCount) { + return (inodeCount / ENTRIES_PER_BLOCK) + ( + ((inodeCount % ENTRIES_PER_BLOCK) == 0) ? 0 : 1); + } + + public static ExportTable read(int tag, TableReader tableReader, + MetadataBlockReader metaBlockReader) + throws IOException, SquashFsException { + + ExportTable table = new ExportTable(); + table.readData(tag, tableReader, metaBlockReader); + return table; + } + + public int getInodeCount() { + return inodeCount; + } + + public boolean isAvailable() { + return available; + } + + public INodeRef getInodeRef(int inode) throws IOException, SquashFsException { + return new INodeRef(getInodeRefRaw(inode)); + } + + public long getInodeRefRaw(int inode) throws IOException, SquashFsException { + if (inode < 1 || inode > inodeCount) { + throw new SquashFsException(String.format("No such inode %d", inode)); + } + + int nInode = (inode - 1); + int blockNum = nInode / ENTRIES_PER_BLOCK; + short offset = (short) (BYTES_PER_TABLE_ENTRY * (nInode - (blockNum + * ENTRIES_PER_BLOCK))); + + return metaBlockReader.rawReader(tag, tableRef[blockNum], offset) + .readLong(); + } + + public void readData(int tag, TableReader tableReader, + MetadataBlockReader metaBlockReader) + throws IOException, SquashFsException { + + this.tag = tag; + + SuperBlock sb = tableReader.getSuperBlock(); + if (!sb.hasFlag(SuperBlockFlag.EXPORTABLE)) { + available = false; + inodeCount = 0; + tableRef = EMPTY; + this.metaBlockReader = null; + return; + } + + inodeCount = sb.getInodeCount(); + int tableCount = numTables(inodeCount); + tableRef = new long[tableCount]; + + ByteBuffer tableData = tableReader.read(sb.getExportTableStart(), + tableCount * EXPORT_TABLE_RECORD_LENGTH); + for (int i = 0; i < tableCount; i++) { + tableRef[i] = tableData.getLong(); + } + this.metaBlockReader = metaBlockReader; + available = true; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append(String.format("export-table: {%n")); + int width = 18; + dumpBin(buf, width, "available", available ? "true" : "false"); + dumpBin(buf, width, "inodeCount", inodeCount, DECIMAL, UNSIGNED); + dumpBin(buf, width, "tableRefs", tableRef.length, DECIMAL); + for (int i = 0; i < tableRef.length; i++) { + dumpBin(buf, width, String.format("tableRef[%d]", i), tableRef[i], + DECIMAL, UNSIGNED); + } + buf.append("}"); + return buf.toString(); + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/FileTableReader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/FileTableReader.java new file mode 100644 index 00000000000..bc38344ca41 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/FileTableReader.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +public class FileTableReader implements TableReader { + + private final RandomAccessFile raf; + private final SuperBlock sb; + private final boolean shouldClose; + + public FileTableReader(File file) throws IOException, SquashFsException { + this.raf = new RandomAccessFile(file, "r"); + this.sb = SuperBlock.read(raf); + this.shouldClose = true; + } + + public FileTableReader(RandomAccessFile raf, SuperBlock sb, + boolean shouldClose) + throws SquashFsException, IOException { + this.raf = raf; + this.sb = sb; + this.shouldClose = shouldClose; + } + + @Override + public SuperBlock getSuperBlock() { + return sb; + } + + @Override + public ByteBuffer read(long fileOffset, int length) throws IOException { + long prevPosition = raf.getFilePointer(); + try { + raf.seek(fileOffset); + byte[] buf = new byte[length]; + raf.readFully(buf); + return ByteBuffer.wrap(buf).order(ByteOrder.LITTLE_ENDIAN); + } finally { + raf.seek(prevPosition); + } + } + + @Override + public void close() throws IOException { + if (shouldClose) { + raf.close(); + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/FragmentTable.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/FragmentTable.java new file mode 100644 index 00000000000..ec12299c216 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/FragmentTable.java @@ -0,0 +1,138 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlock; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataReader; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlockFlag; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +public class FragmentTable { + + public static final int FRAGMENT_TABLE_RECORD_LENGTH = 8; + public static final int BYTES_PER_TABLE_ENTRY = 16; + public static final int ENTRIES_PER_BLOCK = + MetadataBlock.MAX_SIZE / BYTES_PER_TABLE_ENTRY; + + private static final long[] EMPTY = new long[0]; + + MetadataBlockReader metaBlockReader; + + boolean available = false; + int tag = -1; + int fragmentCount = 0; + long[] tableRef = EMPTY; + + private static int numTables(int inodeCount) { + return (inodeCount / ENTRIES_PER_BLOCK) + ( + ((inodeCount % ENTRIES_PER_BLOCK) == 0) ? 0 : 1); + } + + ; + + public static FragmentTable read(int tag, TableReader tableReader, + MetadataBlockReader metaBlockReader) + throws IOException, SquashFsException { + + FragmentTable table = new FragmentTable(); + table.readData(tag, tableReader, metaBlockReader); + return table; + } + + public int getFragmentCount() { + return fragmentCount; + } + + public boolean isAvailable() { + return available; + } + + public FragmentTableEntry getEntry(int id) + throws IOException, SquashFsException { + if (id < 0 || id >= fragmentCount) { + throw new SquashFsException(String.format("No such fragment %d", id)); + } + + int blockNum = id / ENTRIES_PER_BLOCK; + short offset = + (short) (BYTES_PER_TABLE_ENTRY * (id - (blockNum * ENTRIES_PER_BLOCK))); + + MetadataReader reader = + metaBlockReader.rawReader(tag, tableRef[blockNum], offset); + + long start = reader.readLong(); + int size = reader.readInt(); + reader.readInt(); // unused + + return new FragmentTableEntry(start, size); + } + + public void readData(int tag, TableReader tableReader, + MetadataBlockReader metaBlockReader) + throws IOException, SquashFsException { + + this.tag = tag; + SuperBlock sb = tableReader.getSuperBlock(); + if (sb.hasFlag(SuperBlockFlag.NO_FRAGMENTS)) { + available = false; + fragmentCount = 0; + tableRef = EMPTY; + this.metaBlockReader = null; + return; + } + + fragmentCount = sb.getFragmentEntryCount(); + int tableCount = numTables(fragmentCount); + tableRef = new long[tableCount]; + + ByteBuffer tableData = tableReader.read(sb.getFragmentTableStart(), + tableCount * FRAGMENT_TABLE_RECORD_LENGTH); + for (int i = 0; i < tableCount; i++) { + tableRef[i] = tableData.getLong(); + } + this.metaBlockReader = metaBlockReader; + available = true; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append(String.format("fragment-table: {%n")); + int width = 18; + dumpBin(buf, width, "tag", tag, DECIMAL, UNSIGNED); + dumpBin(buf, width, "available", available ? "true" : "false"); + dumpBin(buf, width, "fragmentCount", fragmentCount, DECIMAL, UNSIGNED); + dumpBin(buf, width, "tableRefs", tableRef.length, DECIMAL); + for (int i = 0; i < tableRef.length; i++) { + dumpBin(buf, width, String.format("tableRef[%d]", i), tableRef[i], + DECIMAL, UNSIGNED); + } + buf.append("}"); + return buf.toString(); + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/FragmentTableEntry.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/FragmentTableEntry.java new file mode 100644 index 00000000000..68c8644fcd6 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/FragmentTableEntry.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +public class FragmentTableEntry { + private final long start; + private final int size; + + public FragmentTableEntry(long start, int size, boolean compressed) { + this(start, compressed ? size : (size | 0x1000000)); + } + + public FragmentTableEntry(long start, int size) { + this.start = start; + this.size = size; + } + + public long getStart() { + return start; + } + + public int getSize() { + return size; + } + + public boolean isCompressed() { + return (size & 0x1000000) == 0; + } + + public int getDiskSize() { + return (size & 0xFFFFF); + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append(String.format("fragment-table-entry {%n")); + int width = 10; + dumpBin(buf, width, "start", start, DECIMAL, UNSIGNED); + dumpBin(buf, width, "compressed", isCompressed() ? "true" : "false"); + dumpBin(buf, width, "diskSize", getDiskSize(), DECIMAL, UNSIGNED); + buf.append("}"); + return buf.toString(); + } +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/IdTable.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/IdTable.java new file mode 100644 index 00000000000..5c4366d955a --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/IdTable.java @@ -0,0 +1,129 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlock; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataReader; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.SortedMap; +import java.util.TreeMap; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +public class IdTable { + + public static final int ID_TABLE_RECORD_LENGTH = 8; + public static final int BYTES_PER_TABLE_ENTRY = 4; + public static final int ENTRIES_PER_BLOCK = + MetadataBlock.MAX_SIZE / BYTES_PER_TABLE_ENTRY; + private static final int[] EMPTY = new int[0]; + final SortedMap reverseMappings = new TreeMap<>(); + int[] mappings = EMPTY; + + private static int numTables(int idCount) { + return (idCount / ENTRIES_PER_BLOCK) + ( + ((idCount % ENTRIES_PER_BLOCK) == 0) ? 0 : 1); + } + + public static IdTable read(int tag, TableReader tableReader, + MetadataBlockReader metaBlockReader) + throws IOException, SquashFsException { + + IdTable table = new IdTable(); + table.readData(tag, tableReader, metaBlockReader); + return table; + } + + public int getIdCount() { + return mappings.length; + } + + public int idFromIndex(short index) throws SquashFsException { + int iIndex = (index & 0xffff); + if (iIndex >= mappings.length) { + throw new SquashFsException( + String.format("No UID/GID could be found for id ref %d", iIndex)); + } + return mappings[iIndex]; + } + + public short indexFromId(int id) throws SquashFsException { + Long key = Long.valueOf(id % 0xFFFFFFFFL); + Short value = reverseMappings.get(key); + if (value == null) { + throw new SquashFsException( + String.format("No id ref could be found for UID/GID %d", key)); + } + return value.shortValue(); + } + + public void readData(int tag, TableReader tableReader, + MetadataBlockReader metaBlockReader) + throws IOException, SquashFsException { + + reverseMappings.clear(); + + SuperBlock sb = tableReader.getSuperBlock(); + int idCount = sb.getIdCount() & 0xffff; + int tableCount = numTables(idCount); + long[] tableRef = new long[tableCount]; + + mappings = new int[idCount]; + + ByteBuffer tableData = tableReader + .read(sb.getIdTableStart(), tableCount * ID_TABLE_RECORD_LENGTH); + for (int i = 0; i < tableCount; i++) { + tableRef[i] = tableData.getLong(); + } + + MetadataReader reader = null; + int table = 0; + for (int i = 0; i < idCount; i++) { + if ((i % ENTRIES_PER_BLOCK) == 0) { + reader = metaBlockReader.rawReader(tag, tableRef[table++], (short) 0); + } + int id = reader.readInt(); + mappings[i] = id; + Long key = Long.valueOf(id & 0xFFFFFFFFL); + reverseMappings.put(key, Short.valueOf((short) (i & 0xffff))); + } + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append(String.format("id-table: {%n")); + int width = 17; + dumpBin(buf, width, "count", mappings.length, DECIMAL, UNSIGNED); + for (int i = 0; i < mappings.length; i++) { + dumpBin(buf, width, String.format("mappings[%d]", i), mappings[i], + DECIMAL, UNSIGNED); + } + buf.append("}"); + return buf.toString(); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/IdTableGenerator.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/IdTableGenerator.java new file mode 100644 index 00000000000..b32117bb74d --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/IdTableGenerator.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockRef; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.SortedMap; +import java.util.TreeMap; + +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.DECIMAL; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.DumpOptions.UNSIGNED; +import static org.apache.hadoop.runc.squashfs.util.BinUtils.dumpBin; + +public class IdTableGenerator { + + private final List forward = new ArrayList<>(); + private final SortedMap reverse = new TreeMap<>(); + + public short addUidGid(int value) { + Long key = Long.valueOf(value & 0xffffffffL); + Short result = reverse.get(key); + if (result != null) { + return result.shortValue(); + } + forward.add(value); + result = Short.valueOf((short) (forward.size() - 1)); + reverse.put(key, result); + return result.shortValue(); + } + + public int getIdCount() { + return forward.size(); + } + + public List save(MetadataWriter writer) throws IOException { + + List idRefs = new ArrayList<>(); + + int index = 0; + for (int i = 0; i < forward.size(); i++) { + if (index % IdTable.ENTRIES_PER_BLOCK == 0) { + idRefs.add(writer.getCurrentReference()); + } + int value = forward.get(i).intValue(); + writer.writeInt(value); + index++; + } + + return idRefs; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append(String.format("id-table-generator: {%n")); + int width = 17; + dumpBin(buf, width, "count", forward.size(), DECIMAL, UNSIGNED); + for (int i = 0; i < forward.size(); i++) { + dumpBin(buf, width, String.format("mappings[%d]", i), forward.get(i), + DECIMAL, UNSIGNED); + } + buf.append("}"); + return buf.toString(); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/MappedFileTableReader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/MappedFileTableReader.java new file mode 100644 index 00000000000..6c3ca6cc139 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/MappedFileTableReader.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.io.ByteBufferDataInput; +import org.apache.hadoop.runc.squashfs.io.MappedFile; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.EOFException; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +public class MappedFileTableReader implements TableReader { + + private final MappedFile mmap; + private final SuperBlock sb; + + public MappedFileTableReader(MappedFile mmap) + throws IOException, SquashFsException { + this.mmap = mmap; + this.sb = SuperBlock.read(new ByteBufferDataInput(mmap.from(0L))); + } + + public MappedFileTableReader(MappedFile mmap, SuperBlock sb) { + + this.mmap = mmap; + this.sb = sb; + } + + @Override + public SuperBlock getSuperBlock() { + return sb; + } + + @Override + public ByteBuffer read(long fileOffset, int length) throws EOFException { + ByteBuffer src = mmap.from(fileOffset); + if (src.remaining() < length) { + throw new EOFException(); + } + return src.order(ByteOrder.LITTLE_ENDIAN); + } + + @Override + public void close() { + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/MemoryTableReader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/MemoryTableReader.java new file mode 100644 index 00000000000..b3bafc0960f --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/MemoryTableReader.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.EOFException; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +public class MemoryTableReader implements TableReader { + + private final SuperBlock sb; + private final byte[] data; + private final int offset; + private final int length; + + public MemoryTableReader(SuperBlock sb, byte[] data) { + this(sb, data, 0, data.length); + } + + public MemoryTableReader(SuperBlock sb, byte[] data, int offset, int length) { + this.sb = sb; + this.data = data; + this.offset = offset; + this.length = length; + } + + @Override + public SuperBlock getSuperBlock() { + return sb; + } + + @Override + public ByteBuffer read(long fileOffset, int length) throws IOException { + if ((fileOffset + length) > (long) this.length) { + throw new EOFException(String.format( + "Read past end of table (offset = %d, length = %d, available = %d)", + fileOffset, length, this.length - fileOffset)); + } + int localOffset = ((int) fileOffset) + offset; + return ByteBuffer.wrap(data, localOffset, length) + .order(ByteOrder.LITTLE_ENDIAN); + } + + @Override + public void close() { + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/TableReader.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/TableReader.java new file mode 100644 index 00000000000..97a60e1f485 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/table/TableReader.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.Closeable; +import java.io.IOException; +import java.nio.ByteBuffer; + +public interface TableReader extends Closeable { + + ByteBuffer read(long fileOffset, int length) throws IOException; + + SuperBlock getSuperBlock(); + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/util/BinUtils.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/util/BinUtils.java new file mode 100644 index 00000000000..f8a3acbbc4a --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/util/BinUtils.java @@ -0,0 +1,239 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.util; + +import java.text.SimpleDateFormat; +import java.util.Date; + +public class BinUtils { + + private static final String NL = String.format("%n"); + + public enum DumpOptions { + UNSIGNED, + DECIMAL, + OCTAL, + BINARY, + UNIX_TIMESTAMP; + } + + public static void dumpBin( + StringBuilder buf, + int width, + String name, + byte value, + DumpOptions... options) { + name(buf, width, name); + value(buf, value, 8, options); + buf.append(NL); + } + + public static void dumpBin( + StringBuilder buf, + int width, + String name, + short value, + DumpOptions... options) { + name(buf, width, name); + value(buf, value, 16, options); + buf.append(NL); + } + + public static void dumpBin( + StringBuilder buf, + int width, + String name, + int value, + DumpOptions... options) { + name(buf, width, name); + value(buf, value, 32, options); + buf.append(NL); + } + + public static void dumpBin( + StringBuilder buf, + int width, + String name, + long value, + DumpOptions... options) { + name(buf, width, name); + value(buf, value, 64, options); + buf.append(NL); + } + + public static void dumpBin( + StringBuilder buf, + int width, + String name, + String value) { + name(buf, width, name); + buf.append(" "); + buf.append(value); + buf.append(NL); + } + + public static void dumpBin( + StringBuilder buf, + int width, + String name, + byte[] data, + int offset, + int length, + int bytesPerLine, int bytesPerGroup) { + name(buf, width, name); + String padding = spaces(width + 7); + for (int i = 0; i < length; i += bytesPerLine) { + if (i > 0) { + buf.append(padding); + } + buf.append(String.format(" %08x ", i)); + for (int j = 0; j < bytesPerLine; j++) { + if (j > 0 && j % bytesPerGroup == 0) { + buf.append(" "); + } + if (i + j < length) { + byte b = data[offset + i + j]; + buf.append(String.format("%02x", b & 0xFF)); + } else { + // spaces + buf.append(" "); + } + } + buf.append(" "); + for (int j = 0; j < bytesPerLine; j++) { + if (i + j < length) { + char c = (char) (data[offset + i + j] & 0xff); + if (c >= 32 && c <= 127) { + buf.append(c); + } else { + buf.append("."); + } + } else { + // space + buf.append(" "); + } + } + buf.append(NL); + } + + } + + private static String spaces(int length) { + String pattern = String.format("%%%ds", length); + return String.format(pattern, ""); + } + + private static void name(StringBuilder buf, int width, String name) { + String pattern = String.format("%%%ds: ", width < 1 ? 1 : width); + buf.append(String.format(pattern, name)); + } + + private static boolean has(DumpOptions value, DumpOptions[] options) { + for (DumpOptions option : options) { + if (option == value) { + return true; + } + } + return false; + } + + private static void value( + StringBuilder buf, + Number num, + int bits, + DumpOptions... options) { + long raw = num.longValue(); + boolean unsigned = has(DumpOptions.UNSIGNED, options); + boolean decimal = has(DumpOptions.DECIMAL, options); + boolean binary = has(DumpOptions.BINARY, options); + boolean octal = has(DumpOptions.OCTAL, options); + boolean unixTimestamp = has(DumpOptions.UNIX_TIMESTAMP, options); + + if (bits == 8) { + buf.append( + String.format(" %02x", unsigned ? (raw & 0xFFL) : raw)); + if (decimal) { + buf.append(String.format(" %d", unsigned ? (raw & 0xFFL) : raw)); + } + if (octal) { + buf.append(String.format(" 0%o", unsigned ? (raw & 0xFFL) : raw)); + } + if (binary) { + String value = "00000000" + Long.toBinaryString(raw & 0xFFL); + buf.append(String.format(" %s", value.substring(value.length() - 8))); + } + } else if (bits == 16) { + buf.append( + String.format(" %04x", unsigned ? (raw & 0xFFFFL) : raw)); + if (decimal) { + buf.append(String.format(" %d", unsigned ? (raw & 0xFFFFL) : raw)); + } + if (octal) { + buf.append(String.format(" 0%o", unsigned ? (raw & 0xFFFFL) : raw)); + } + if (binary) { + String value = "0000000000000000" + Long.toBinaryString(raw & 0xFFFFL); + buf.append(String.format(" %s", value.substring(value.length() - 16))); + } + } else if (bits == 32) { + buf.append( + String.format(" %08x", unsigned ? (raw & 0xFFFFFFFFL) : raw)); + if (decimal) { + buf.append(String.format(" %d", unsigned ? (raw & 0xFFFFFFFFL) : raw)); + } + if (octal) { + buf.append( + String.format(" 0%o", unsigned ? (raw & 0xFFFFFFFFL) : raw)); + } + if (binary) { + String value = "00000000000000000000000000000000" + Long + .toBinaryString(raw & 0xFFFFFFFFL); + buf.append(String.format(" %s", value.substring(value.length() - 32))); + } + if (unixTimestamp) { + buf.append(" "); + Date date = new Date(raw * 1000L); + buf.append(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(date)); + } + } else if (bits == 64) { + buf.append( + String.format("%016x", unsigned ? (raw & 0xFFFFFFFFFFFFFFFFL) : raw)); + if (decimal) { + buf.append(String + .format(" %d", unsigned ? (raw & 0xFFFFFFFFFFFFFFFFL) : raw)); + } + if (octal) { + buf.append(String + .format(" 0%o", unsigned ? (raw & 0xFFFFFFFFFFFFFFFFL) : raw)); + } + if (binary) { + String value = + "0000000000000000000000000000000000000000000000000000000000000000" + + Long.toBinaryString(raw); + buf.append(String.format(" %s", value.substring(value.length() - 64))); + } + if (unixTimestamp) { + buf.append(" "); + Date date = new Date(raw); + buf.append(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(date)); + } + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/util/SquashDebug.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/util/SquashDebug.java new file mode 100644 index 00000000000..4b35342ec6a --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/squashfs/util/SquashDebug.java @@ -0,0 +1,253 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.util; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.cli.PosixParser; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.runc.squashfs.MappedSquashFsReader; +import org.apache.hadoop.runc.squashfs.SquashFsReader; +import org.apache.hadoop.runc.squashfs.directory.DirectoryEntry; +import org.apache.hadoop.runc.squashfs.inode.DirectoryINode; +import org.apache.hadoop.runc.squashfs.inode.FileINode; +import org.apache.hadoop.runc.squashfs.inode.INode; +import org.apache.hadoop.runc.squashfs.io.MappedFile; +import org.apache.hadoop.runc.squashfs.metadata.MetadataReader; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.channels.FileChannel; + +public class SquashDebug extends Configured implements Tool { + + private SquashFsReader createReader( + File file, boolean mapped) throws IOException { + if (mapped) { + System.out.println("Using memory-mapped reader"); + System.out.println(); + try (RandomAccessFile raf = new RandomAccessFile(file, "r")) { + try (FileChannel channel = raf.getChannel()) { + MappedFile mmap = MappedFile.mmap(channel, + MappedSquashFsReader.PREFERRED_MAP_SIZE, + MappedSquashFsReader.PREFERRED_WINDOW_SIZE); + + return SquashFsReader.fromMappedFile(0, mmap); + } + } + } else { + System.out.println("Using file reader"); + System.out.println(); + return SquashFsReader.fromFile(0, file); + } + } + + private void dumpTree(SquashFsReader reader, boolean readFiles) + throws IOException { + System.out.println("Directory tree:"); + System.out.println(); + DirectoryINode root = reader.getRootInode(); + dumpSubtree(reader, true, "/", root, readFiles); + } + + private void dumpFileContent(SquashFsReader reader, FileINode inode) + throws IOException { + long fileSize = inode.getFileSize(); + long readSize; + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + reader.writeFileStream(inode, bos); + byte[] content = bos.toByteArray(); + readSize = content.length; + } + System.out.printf(" %d bytes, %d read%n", fileSize, readSize); + } + + private void dumpSubtree(SquashFsReader reader, boolean root, + String path, DirectoryINode inode, + boolean readFiles) + throws IOException { + + if (root) { + System.out.printf("/ (%d)%n", inode.getInodeNumber()); + } + + for (DirectoryEntry entry : reader.getChildren(inode)) { + INode childInode = reader.findInodeByDirectoryEntry(entry); + System.out.printf("%s%s%s (%d)%n", + path, entry.getNameAsString(), + childInode.getInodeType().directory() ? "/" : "", + childInode.getInodeNumber()); + + if (readFiles && childInode.getInodeType().file()) { + dumpFileContent(reader, (FileINode) childInode); + } + } + + for (DirectoryEntry entry : reader.getChildren(inode)) { + INode childInode = reader.findInodeByDirectoryEntry(entry); + if (childInode.getInodeType().directory()) { + dumpSubtree(reader, false, String.format("%s%s/", + path, entry.getNameAsString()), (DirectoryINode) childInode, + readFiles); + } + } + } + + private void dumpMetadataBlock( + SquashFsReader reader, long metaFileOffset, int metaBlockOffset) + throws IOException { + + System.out.println(); + System.out.printf("Dumping block at file offset %d, block offset %d%n", + metaFileOffset, metaBlockOffset); + System.out.println(); + + MetadataReader mr = reader.getMetaReader() + .rawReader(0, metaFileOffset, (short) metaBlockOffset); + mr.isEof(); // make sure block is read + byte[] buf = new byte[mr.available()]; + mr.readFully(buf); + + StringBuilder sb = new StringBuilder(); + BinUtils.dumpBin(sb, 0, "data", buf, 0, buf.length, 32, 2); + System.out.println(sb.toString()); + } + + @Override + public int run(String[] argv) throws Exception { + Options options = options(); + CommandLineParser parser = new PosixParser(); + CommandLine cmd; + try { + cmd = parser.parse(options, argv); + } catch (ParseException e) { + System.out.println( + "Error parsing command-line options: " + e.getMessage()); + printUsage(); + return -1; + } + + if (cmd.hasOption("h")) { + printUsage(); + return -1; + } + + boolean mapped = false; + boolean tree = false; + boolean files = false; + boolean metadata = false; + long metaFileOffset = 0L; + int metaBlockOffset = 0; + String squashFs = null; + + for (Option o : cmd.getOptions()) { + switch (o.getOpt()) { + case "p": + mapped = true; + break; + case "t": + tree = true; + break; + case "f": + files = true; + break; + case "m": + metadata = true; + break; + default: + throw new UnsupportedOperationException( + "Unknown option: " + o.getOpt()); + } + } + + String[] rem = cmd.getArgs(); + + if (metadata) { + if (rem.length != 3) { + printUsage(); + return -1; + } + metaFileOffset = Long.valueOf(rem[1]); + metaBlockOffset = Integer.valueOf(rem[2]); + } else { + if (rem.length != 1) { + printUsage(); + return -1; + } + } + + squashFs = rem[0]; + + try (SquashFsReader reader = createReader(new File(squashFs), mapped)) { + System.out.println(reader.getSuperBlock()); + System.out.println(); + System.out.println(reader.getIdTable()); + System.out.println(); + System.out.println(reader.getFragmentTable()); + System.out.println(); + System.out.println(reader.getExportTable()); + System.out.println(); + + if (tree || files) { + dumpTree(reader, files); + } + + if (metadata) { + dumpMetadataBlock(reader, metaFileOffset, metaBlockOffset); + } + } + return 0; + } + + protected void printUsage() { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp( + "squashdebug [OPTIONS] [file-offset] [block-offset]", + new Options()); + formatter.setSyntaxPrefix(""); + formatter.printHelp("Options", options()); + ToolRunner.printGenericCommandUsage(System.out); + } + + static Options options() { + Options options = new Options(); + options.addOption("p", "mmap", false, "Use mmap() for I/O"); + options.addOption("t", "tree", false, "Dump tree"); + options.addOption("f", "files", false, "Read all files (implies --tree)"); + options.addOption( + "m", "metadata", false, + "Dump metadata (requires file-offset and block-offset)"); + options.addOption("h", "help", false, "Print usage"); + return options; + } + + public static void main(String[] argv) throws Exception { + int ret = ToolRunner.run(new SquashDebug(), argv); + System.exit(ret); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/tools/ImportDockerImage.java b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/tools/ImportDockerImage.java new file mode 100644 index 00000000000..b8b1d5e8fcc --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/main/java/org/apache/hadoop/runc/tools/ImportDockerImage.java @@ -0,0 +1,579 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.tools; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.cli.PosixParser; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Options.Rename; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.runc.docker.DockerClient; +import org.apache.hadoop.runc.docker.DockerContext; +import org.apache.hadoop.runc.docker.DockerCoordinates; +import org.apache.hadoop.runc.docker.DockerException; +import org.apache.hadoop.runc.docker.model.BlobV2; +import org.apache.hadoop.runc.docker.model.ManifestListV2; +import org.apache.hadoop.runc.docker.model.ManifestRefV2; +import org.apache.hadoop.runc.docker.model.ManifestV2; +import org.apache.hadoop.runc.squashfs.SquashFsConverter; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InterruptedIOException; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; +import java.net.InetAddress; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.regex.Pattern; + +public class ImportDockerImage extends Configured implements Tool { + + private static final String PUBLIC_DOCKER_REPO = + "registry.hub.docker.com"; + + private static final Logger LOG + = LoggerFactory.getLogger(ImportDockerImage.class); + + public static final String IMPORT_PREFIX = + YarnConfiguration.RUNC_CONTAINER_RUNTIME_PREFIX + "import."; + + public static final String DEFAULT_DOCKER_REGISTRY_KEY = + IMPORT_PREFIX + "default-docker-registry"; + + public static final String MK_RUNC_IMPORT_TYPE = "runc.import.type"; + public static final String MK_RUNC_IMPORT_SOURCE = "runc.import.source"; + public static final String MK_RUNC_IMPORT_TIME = "runc.import.time"; + public static final String MK_RUNC_MANIFEST = "runc.manifest"; + + public static final String IT_DOCKER = "docker"; + + public static final String DEFAULT_NS = "library"; + public static final String DEFAULT_TAG = "latest"; + + private static final Pattern VALID_NS_PATTERN = + Pattern.compile("^[A-Za-z0-9]+$"); + + private static final Pattern VALID_NAME_PATTERN = + Pattern.compile("^[~^+-\\._A-Za-z0-9]+$"); + + private Configuration conf; + private String defaultRegistry; + private FileSystem fs; + private FileContext fc; + private Path repoPath; + private Path lockPath; + private Path metaPath; + private Path configPath; + private Path layerPath; + private Path manifestPath; + private File tmpDir; + + private String[] imageParts(String coordinates) { + String namespace; + String nameAndTag; + String name; + String tag; + + String[] parts = coordinates.split("/", -1); + if (parts.length == 2) { + namespace = parts[0]; + nameAndTag = parts[1]; + } else if (parts.length == 1) { + namespace = DEFAULT_NS; + nameAndTag = parts[0]; + } else { + throw new IllegalArgumentException( + "Invalid image coordinates: " + coordinates); + } + if (!VALID_NS_PATTERN.matcher(namespace).matches()) { + throw new IllegalArgumentException( + "Invalid image namespace: " + namespace); + } + + String[] tagParts = nameAndTag.split(":", -1); + if (tagParts.length == 2) { + name = tagParts[0]; + tag = tagParts[1]; + } else if (tagParts.length == 1) { + name = tagParts[0]; + tag = DEFAULT_TAG; + } else { + throw new IllegalArgumentException( + "Invalid image name: " + nameAndTag); + } + + if (!VALID_NAME_PATTERN.matcher(name).matches()) { + throw new IllegalArgumentException("Invalid image name: " + name); + } + + if (!VALID_NAME_PATTERN.matcher(tag).matches()) { + throw new IllegalArgumentException("Invalid image tag: " + tag); + } + + return new String[] { namespace, name, tag }; + } + + private void importDockerImage(String source, String destCoordinates) + throws IOException, URISyntaxException, DockerException { + + String imageCoordinates[] = imageParts(destCoordinates); + + byte[] buf = new byte[32768]; + + DockerCoordinates coord = new DockerCoordinates(defaultRegistry, source); + + LOG.debug("Using Docker coordinates {}", coord); + + Instant importTime = Instant.now(); + + try (DockerClient client = new DockerClient()) { + LOG.info("Fetching image '{}' from Docker repository at {}", + coord.getImage(), coord.getBaseUrl()); + + DockerContext context = client.createContext(coord.getBaseUrl()); + + ManifestListV2 manifests = client.listManifests( + context, coord.getImageName(), coord.getImageRef()); + + for (ManifestRefV2 manifest : manifests.getManifests()) { + LOG.debug("Found manifest ref: {}", manifest); + } + + ManifestRefV2 mref = + client.getManifestChooser().chooseManifest(manifests); + + if (mref == null) { + throw new DockerException("No matching manifest found"); + } + + LOG.debug("Choosing manifest: {}", mref); + + byte[] manifestData = client.readManifest( + context, coord.getImageName(), mref.getDigest()); + + // write manifest + String manifestHash = mref.getDigest().replaceAll("^sha256:", ""); + File manifestDir = new File(tmpDir, "manifests"); + manifestDir.mkdirs(); + File manifestFile = new File(manifestDir, manifestHash); + try (FileOutputStream fos = new FileOutputStream(manifestFile)) { + fos.write(manifestData); + } + + ManifestV2 manifest = client.parseManifest(manifestData); + + String configDigest = manifest.getConfig().getDigest(); + + byte[] config = client.readConfig( + context, coord.getImageName(), configDigest); + + // write config + String configHash = configDigest.replaceAll("^sha256:", ""); + + File configDir = new File(tmpDir, "config"); + configDir.mkdirs(); + File configFile = new File(configDir, configHash); + try (FileOutputStream fos = new FileOutputStream(configFile)) { + fos.write(config); + } + + // download layers + File layerDir = new File(tmpDir, "layers"); + layerDir.mkdirs(); + + List layersDownloaded = new ArrayList<>(); + + int count = manifest.getLayers().size(); + int current = 0; + for (BlobV2 blob : manifest.getLayers()) { + current++; + String digest = blob.getDigest(); + String hash = digest.replaceAll("^sha256:", ""); + String hashDir = hash.substring(0, 2); + + // check for sqsh and tar.gz files + Path tgzPath = new Path(layerPath, hashDir + "/" + hash + ".tar.gz"); + Path sqshPath = new Path(layerPath, hashDir + "/" + hash + ".sqsh"); + if (fs.exists(tgzPath) && fs.exists(sqshPath)) { + LOG.info("Skipping up-to-date layer {} ({} of {})", digest, current, + count); + continue; + } + + layersDownloaded.add(digest); + + LOG.info("Downloading layer {} ({} of {})", digest, current, count); + try (InputStream is = client + .download(context, coord.getImageName(), digest)) { + File outputFile = new File(layerDir, hash + ".tar.gz"); + try (FileOutputStream os = new FileOutputStream(outputFile)) { + int c; + while ((c = is.read(buf, 0, buf.length)) >= 0) { + if (c > 0) { + os.write(buf, 0, c); + } + } + } + } + } + + // convert layers + count = layersDownloaded.size(); + current = 0; + for (String digest : layersDownloaded) { + current++; + LOG.info("Converting layer {} ({} of {})", digest, current, count); + String hash = digest.replaceAll("^sha256:", ""); + + File inputFile = new File(layerDir, hash + ".tar.gz"); + File outputFile = new File(layerDir, hash + ".sqsh"); + SquashFsConverter.convertToSquashFs(inputFile, outputFile); + } + + // upload layers + current = 0; + for (String digest : layersDownloaded) { + current++; + LOG.info("Uploading layer {} ({} of {})", digest, current, count); + String hash = digest.replaceAll("^sha256:", ""); + + File tgzFile = new File(layerDir, hash + ".tar.gz"); + File sqshFile = new File(layerDir, hash + ".sqsh"); + + Path layerHashPath = new Path(layerPath, hash.substring(0, 2)); + + Path tmpTgz = new Path(layerHashPath, "._TMP." + hash + ".tar.gz"); + Path tmpSqsh = new Path(layerHashPath, "._TMP." + hash + ".sqsh"); + + Path tgz = new Path(layerHashPath, hash + ".tar.gz"); + Path sqsh = new Path(layerHashPath, hash + ".sqsh"); + + uploadFile(tgzFile, tgz, tmpTgz); + uploadFile(sqshFile, sqsh, tmpSqsh); + } + + // upload config if needed + Path configHashPath = new Path(configPath, configHash.substring(0, 2)); + Path remoteConfigFile = new Path(configHashPath, configHash); + if (fs.exists(remoteConfigFile)) { + LOG.info("Skipping up-to-date config {}", configDigest); + } else { + LOG.info("Uploading config {}", configDigest); + Path remoteTmp = new Path(configHashPath, "._TMP." + configHash); + uploadFile(configFile, remoteConfigFile, remoteTmp); + } + + // upload manifest if needed + Path manifestHashPath = new Path( + manifestPath, manifestHash.substring(0, 2)); + Path remoteManifestFile = new Path(manifestHashPath, manifestHash); + if (fs.exists(remoteManifestFile)) { + LOG.info("Skipping up-to-date manifest {}", configDigest); + } else { + LOG.info("Uploading manifest {}", mref.getDigest()); + Path remoteTmp = new Path(manifestHashPath, "._TMP." + manifestHash); + uploadFile(manifestFile, remoteManifestFile, remoteTmp); + } + + // create/update metadata properties file + File metaFile = new File(tmpDir, "meta.properties"); + File metaFileUpdated = new File(tmpDir, "meta.properties.new"); + Path nsPath = new Path(metaPath, imageCoordinates[0]); + Path metadataPath = new Path( + nsPath, imageCoordinates[1] + "@" + imageCoordinates[2] + ".properties"); + Path metadataPathTmp = new Path( + nsPath, "._TMP." + imageCoordinates[1] + "@" + + imageCoordinates[2] + ".properties"); + + Properties metadata = new Properties(); + if (fs.exists(metadataPath)) { + downloadFile(metadataPath, metaFile); + try (FileInputStream fis = new FileInputStream(metaFile)) { + metadata.load(fis); + } + } + + metadata.setProperty(MK_RUNC_IMPORT_TYPE, IT_DOCKER); + metadata.setProperty(MK_RUNC_IMPORT_SOURCE, source); + metadata.setProperty(MK_RUNC_MANIFEST, mref.getDigest()); + metadata.setProperty(MK_RUNC_IMPORT_TIME, importTime.toString()); + + try (FileOutputStream fos = new FileOutputStream(metaFileUpdated)) { + metadata.store(fos, null); + } + + LOG.info("Writing metadata properties"); + uploadFile(metaFileUpdated, metadataPath, metadataPathTmp); + } + } + + private void downloadFile(Path remoteFile, File localFile) + throws IOException { + + try (FSDataInputStream in = fs.open(remoteFile)) { + try (FileOutputStream out = new FileOutputStream(localFile)) { + IOUtils.copyBytes(in, out, 65536); + } + } + } + + private void uploadFile(File localFile, Path remoteFile, Path remoteTmp) + throws IOException { + boolean success = false; + fs.mkdirs(remoteTmp.getParent()); + fs.mkdirs(remoteFile.getParent()); + + try (InputStream in = new FileInputStream(localFile)) { + try (FSDataOutputStream out = fs.create(remoteTmp, (short) 10)) { + IOUtils.copyBytes(in, out, 65536); + } + fc.rename(remoteTmp, remoteFile, Rename.OVERWRITE); + success = true; + } finally { + if (!success) { + fs.delete(remoteTmp, false); + } + } + } + + private FSDataOutputStream createLockFile(int attempts, int sleepTimeMs) + throws IOException { + try { + fs.mkdirs(repoPath); + FSDataOutputStream out = createLockFileWithRetries( + FsPermission.getFileDefault(), attempts, sleepTimeMs); + fs.deleteOnExit(lockPath); + out.writeBytes(InetAddress.getLocalHost().toString()); + out.flush(); + out.hflush(); + return out; + } catch (RemoteException e) { + if (e.getClassName().contains("AlreadyBeingCreatedException")) { + return null; + } else { + throw e; + } + } + } + + private void unlock(FSDataOutputStream lockStream, int attempts, + int sleepTimeMs) { + int attempt = 1; + do { + try { + IOUtils.closeStream(lockStream); + fs.delete(lockPath, false); + return; + } catch (IOException ioe) { + LOG.info("Failed to delete " + lockPath + ", try=" + + attempt + " of " + attempts); + LOG.debug("Failed to delete " + lockPath, ioe); + try { + Thread.sleep(sleepTimeMs); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + LOG.warn("Interrupted while deleting lock file" + lockPath); + return; + } + } + } while (attempt < attempts); + } + + private FSDataOutputStream createLockFileWithRetries( + FsPermission defaultPerms, int attempts, int sleepTimeMs) + throws IOException { + IOException exception = null; + int attempt = 1; + do { + try { + return fs.create( + lockPath, + defaultPerms, + false, + fs.getConf().getInt( + CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, + CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT), + fs.getDefaultReplication(lockPath), + fs.getDefaultBlockSize(lockPath), + null); + } catch (IOException ioe) { + LOG.info("Failed to create lock file " + lockPath + + ", try=" + attempt + " of " + attempts); + LOG.debug("Failed to create lock file " + lockPath, + ioe); + try { + exception = ioe; + attempt++; + if (attempt < attempts) { + Thread.sleep(sleepTimeMs); + } + } catch (InterruptedException ie) { + throw (InterruptedIOException) new InterruptedIOException( + "Can't create lock file " + lockPath) + .initCause(ie); + } + } + } while (attempt < attempts); + throw exception; + } + + public void cleanup() { + if (tmpDir != null) { + deleteRecursive(tmpDir); + } + } + + private void deleteRecursive(File file) { + if (file.isDirectory()) { + for (File sub : file.listFiles()) { + deleteRecursive(sub); + } + } + file.delete(); + } + + @Override + public int run(String[] argv) throws Exception { + conf = new YarnConfiguration(getConf()); + + Options options = options(); + CommandLineParser parser = new PosixParser(); + CommandLine cmd; + try { + cmd = parser.parse(options, argv); + } catch (ParseException e) { + System.out.println( + "Error parsing command-line options: " + e.getMessage()); + printUsage(); + return -1; + } + + if (cmd.hasOption("h")) { + printUsage(); + return -1; + } + + for (Option o : cmd.getOptions()) { + switch (o.getOpt()) { + case "r": + conf.set(DEFAULT_DOCKER_REGISTRY_KEY, o.getValue()); + break; + default: + throw new UnsupportedOperationException( + "Unknown option: " + o.getOpt()); + } + } + + String[] rem = cmd.getArgs(); + if (rem.length != 2) { + printUsage(); + return -1; + } + + String source = rem[0]; + String dest = rem[1]; + + String repoDir = conf.get( + YarnConfiguration.NM_RUNC_IMAGE_TOPLEVEL_DIR, + YarnConfiguration.DEFAULT_NM_RUNC_IMAGE_TOPLEVEL_DIR); + + defaultRegistry = + conf.get(DEFAULT_DOCKER_REGISTRY_KEY, PUBLIC_DOCKER_REPO); + + Runtime.getRuntime().addShutdownHook(new Thread(this::cleanup)); + + repoPath = new Path(repoDir); + fs = repoPath.getFileSystem(conf); + fc = FileContext.getFileContext(conf); + lockPath = new Path(repoPath, ".import.lock"); + metaPath = new Path(repoPath, "meta"); + manifestPath = new Path(repoPath, "manifest"); + configPath = new Path(repoPath, "config"); + layerPath = new Path(repoPath, "layer"); + tmpDir = Files.createTempDirectory("runc-import-").toFile(); + + LOG.debug("Using default docker registry: {}", defaultRegistry); + LOG.debug("Using top-level runc repository: {}", repoPath); + LOG.debug("Using lock file: {}", lockPath); + LOG.debug("Using temporary dir: {}", tmpDir); + + FSDataOutputStream lockStream = createLockFile(10, 30000); + try { + fs.mkdirs(manifestPath); + fs.mkdirs(configPath); + fs.mkdirs(layerPath); + + importDockerImage(source, dest); + } finally { + unlock(lockStream, 10, 30000); + } + return 0; + } + + protected void printUsage() { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp( + "import-docker-image [OPTIONS] ", + new Options()); + formatter.setSyntaxPrefix(""); + formatter.printHelp("Options", options()); + ToolRunner.printGenericCommandUsage(System.out); + } + + static Options options() { + Options options = new Options(); + options.addOption("h", "help", false, "Print usage"); + options.addOption("r", "repository", true, "Default Docker repository"); + return options; + } + + public static void main(String[] argv) throws Exception { + int ret = ToolRunner.run(new ImportDockerImage(), argv); + System.exit(ret); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/docker/TestDockerClient.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/docker/TestDockerClient.java new file mode 100644 index 00000000000..a2ce50efa3c --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/docker/TestDockerClient.java @@ -0,0 +1,283 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.docker; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.commons.codec.binary.Hex; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.runc.docker.model.BlobV2; +import org.apache.hadoop.runc.docker.model.ManifestListV2; +import org.apache.hadoop.runc.docker.model.ManifestRefV2; +import org.apache.hadoop.runc.docker.model.ManifestV2; +import org.eclipse.jetty.server.Server; +import org.eclipse.jetty.server.ServerConnector; +import org.eclipse.jetty.servlet.ServletContextHandler; +import org.eclipse.jetty.servlet.ServletHolder; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; + +import static org.junit.Assert.assertEquals; + +public class TestDockerClient { + + static final Logger LOG = LoggerFactory.getLogger(TestDockerClient.class); + + static final String CONFIG_MEDIA_TYPE + = "application/vnd.docker.container.image.v1+json"; + + static final String LAYER_MEDIA_TYPE + = "application/vnd.docker.image.rootfs.diff.tar.gzip"; + + static final String MANIFEST_DIGEST = + "sha256:a4c8f3631a7d1091064fd7c7d2032688b0e547fc903092cc81ee1b33a77d161f"; + + static final String CONFIG_DIGEST = + "sha256:73b36cf85155a1c2fe3c1d2c7f7b3c78e1d060c4984a5bc5ad019e51e7f6460e"; + + static final String LAYER0_DIGEST = + "sha256:49d1d42c8dd5d9934484f8122259c16e60e31ce8665cb25fc2f909fa0c1b1521"; + + static final String LAYER1_DIGEST = + "sha256:0ce545d269b61f0f79bc6ba0c088309e474f4753970c66bb91be3cf55c5b392d"; + + Server jetty; + ServletContextHandler context; + String baseUrl; + + MessageDigest sha256; + + @Before + public void setUp() throws Exception { + sha256 = MessageDigest.getInstance("SHA-256"); + + jetty = createJettyServer(); + context = createServletContextHandler(jetty); + context.addServlet(new ServletHolder(new RepositoryServlet()), "/*"); + jetty.start(); + baseUrl = getBaseUrl(jetty); + } + + @After + public void tearDown() throws Exception { + jetty.stop(); + } + + protected static Server createJettyServer() { + try { + Server jetty = new Server(0); + ((ServerConnector) jetty.getConnectors()[0]).setHost("localhost"); + return jetty; + } catch (Exception ex) { + throw new RuntimeException("Could not setup Jetty: " + ex.getMessage(), + ex); + } + } + + protected static ServletContextHandler createServletContextHandler( + Server jetty) { + + ServletContextHandler context = new ServletContextHandler(); + context.setContextPath("/"); + jetty.setHandler(context); + + return context; + } + + protected static String getBaseUrl(Server jetty) { + ServerConnector con = (ServerConnector) jetty.getConnectors()[0]; + return String.format("http://%s:%d/v2/", con.getHost(), con.getLocalPort()); + } + + class RepositoryServlet extends HttpServlet { + @Override + protected void doGet(HttpServletRequest req, HttpServletResponse resp) + throws ServletException, IOException { + + LOG.debug("Request URI: {}", req.getRequestURI()); + + String requestUri = req.getRequestURI(); + switch (requestUri) { + case "/v2/": + resp.setStatus(200); + break; + case "/v2/test/image/manifests/latest": + ManifestListV2 manifests = createManifests(); + sendJson(resp, 200, manifests.getMediaType(), manifests); + break; + case "/v2/test/image/manifests/" + MANIFEST_DIGEST: + ManifestV2 manifest = createManifest(); + sendJson(resp, 200, manifest.getMediaType(), manifest); + break; + case "/v2/test/image/blobs/" + CONFIG_DIGEST: + Object config = createConfig(); + sendJson(resp, 200, CONFIG_MEDIA_TYPE, config); + break; + case "/v2/test/image/blobs/" + LAYER0_DIGEST: + Object layer0 = createLayer(0); + sendJson(resp, 200, LAYER_MEDIA_TYPE, layer0); + break; + case "/v2/test/image/blobs/" + LAYER1_DIGEST: + Object layer1 = createLayer(1); + sendJson(resp, 200, LAYER_MEDIA_TYPE, layer1); + break; + default: + LOG.error("Unexpected URI received: {}", requestUri); + resp.sendError(404); + } + } + } + + void sendJson(HttpServletResponse resp, int status, String contentType, + Object json) throws IOException { + byte[] data = serialize(json); + resp.setStatus(status); + resp.setContentType(contentType); + resp.setContentLength(data.length); + sha256.reset(); + sha256.digest(data); + String digest = String.format("sha256:%s", + Hex.encodeHexString(sha256.digest(data))); + sha256.reset(); + LOG.debug("Content length: {}", data.length); + LOG.debug("Digest: {}", digest); + resp.setHeader("docker-content-digest", digest); + try (OutputStream os = resp.getOutputStream()) { + os.write(data); + } + } + + byte[] serialize(Object object) { + try { + return new ObjectMapper().writer().writeValueAsBytes(object); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + ManifestListV2 createManifests() { + ManifestListV2 manifests = new ManifestListV2(); + manifests.setMediaType(ManifestListV2.CONTENT_TYPE); + manifests.setSchemaVersion(2); + + ManifestRefV2 mref = new ManifestRefV2(); + mref.setDigest(MANIFEST_DIGEST); + mref.setMediaType(ManifestRefV2.CONTENT_TYPE); + mref.setSize(580); + manifests.getManifests().add(mref); + + return manifests; + } + + ManifestV2 createManifest() { + ManifestV2 manifest = new ManifestV2(); + manifest.setMediaType(ManifestV2.CONTENT_TYPE); + manifest.setSchemaVersion(2); + + BlobV2 config = new BlobV2(); + config.setMediaType(CONFIG_MEDIA_TYPE); + config.setSize(15); + config.setDigest(CONFIG_DIGEST); + manifest.setConfig(config); + + BlobV2 layer0 = new BlobV2(); + layer0.setMediaType("application/vnd.docker.image.rootfs.diff.tar.gzip"); + layer0.setDigest(LAYER0_DIGEST); + layer0.setSize(16); + manifest.getLayers().add(layer0); + + BlobV2 layer1 = new BlobV2(); + layer1.setMediaType("application/vnd.docker.image.rootfs.diff.tar.gzip"); + layer1.setDigest(LAYER1_DIGEST); + layer1.setSize(16); + manifest.getLayers().add(layer1); + + return manifest; + } + + ObjectNode createConfig() { + ObjectMapper mapper = new ObjectMapper(); + ObjectNode root = mapper.createObjectNode(); + root.put("id", "config"); + return root; + } + + ObjectNode createLayer(int index) { + ObjectMapper mapper = new ObjectMapper(); + ObjectNode root = mapper.createObjectNode(); + root.put("id", String.format("layer-%d", index)); + return root; + } + + @Test + public void dockerImageShouldBeDownloadable() throws Exception { + String image = "test/image"; + String tag = "latest"; + + try (DockerClient client = new DockerClient()) { + DockerContext ctx = client.createContext(baseUrl); + ManifestListV2 manifests = client.listManifests(ctx, image, tag); + + ManifestRefV2 mref = client + .getManifestChooser() + .chooseManifest(manifests); + + byte[] manifestData = client + .readManifest(ctx, image, mref.getDigest()); + + ManifestV2 manifest = client.parseManifest(manifestData); + + BlobV2 configRef = manifest.getConfig(); + + byte[] configData = client + .readConfig(ctx, image, configRef.getDigest()); + + assertEquals("Wrong config data", "{\"id\":\"config\"}", + new String(configData, StandardCharsets.UTF_8)); + + int layerCount = 0; + for (BlobV2 layerRef : manifest.getLayers()) { + try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { + try (InputStream in = client + .download(ctx, image, layerRef.getDigest())) { + IOUtils.copyBytes(in, out, 1024); + } + assertEquals("Wrong layer content", + String.format("{\"id\":\"layer-%d\"}", layerCount++), + new String(out.toByteArray(), StandardCharsets.UTF_8)); + } + } + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/TestSquashFsConverter.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/TestSquashFsConverter.java new file mode 100644 index 00000000000..c9b48b04505 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/TestSquashFsConverter.java @@ -0,0 +1,80 @@ +package org.apache.hadoop.runc.squashfs; + +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; +import org.apache.hadoop.runc.squashfs.inode.DirectoryINode; +import org.apache.hadoop.runc.squashfs.inode.FileINode; +import org.apache.hadoop.runc.squashfs.inode.INode; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.zip.GZIPOutputStream; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class TestSquashFsConverter { + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + private File tgz; + + @Before + public void setUp() throws IOException { + // create a tar.gz to import from + tgz = temp.newFile("test.tar.gz"); + try (FileOutputStream fos = new FileOutputStream(tgz); + GZIPOutputStream gos = new GZIPOutputStream(fos); + TarArchiveOutputStream tos = new TarArchiveOutputStream(gos)) { + + // add a directory + TarArchiveEntry dir = new TarArchiveEntry("dir/"); + dir.setMode((short) 0755); + dir.setSize(0L); + tos.putArchiveEntry(dir); + tos.closeArchiveEntry(); + + // add a file + TarArchiveEntry file = new TarArchiveEntry("dir/file"); + file.setMode((short) 0644); + file.setSize(4); + tos.putArchiveEntry(file); + tos.write("test".getBytes(StandardCharsets.UTF_8)); + tos.closeArchiveEntry(); + } + } + + @Test + public void simpleArchiveShouldConvertSuccessfully() throws IOException { + File sqsh = temp.newFile("test.sqsh"); + + SquashFsConverter.convertToSquashFs(tgz, sqsh); + + try (SquashFsReader reader = SquashFsReader.fromFile(sqsh)) { + INode dir = reader.findInodeByPath("/dir"); + assertTrue("Dir is not a directory: " + dir.getClass().getName(), + dir instanceof DirectoryINode); + + INode file = reader.findInodeByPath("/dir/file"); + assertTrue("File is not a file: " + file.getClass().getName(), + file instanceof FileINode); + + FileINode fInode = (FileINode) file; + + assertEquals("Wrong file length", 4, fInode.getFileSize()); + + byte[] buf = new byte[4]; + reader.read(file, 0L, buf, 0, 4); + assertEquals("Wrong file data", "test", + new String(buf, StandardCharsets.UTF_8)); + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/TestSquashFsException.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/TestSquashFsException.java new file mode 100644 index 00000000000..5dd54eaa111 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/TestSquashFsException.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs; + +import org.junit.Test; + +import java.io.IOException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; + +public class TestSquashFsException { + + @Test + public void noArgConstructorShouldHaveNullMessageAndCause() { + try { + throw new SquashFsException(); + } catch (SquashFsException e) { + assertNull(e.getMessage()); + assertNull(e.getCause()); + } + } + + @Test + public void stringConstructorShouldHaveSameMessageAndNullCause() { + try { + throw new SquashFsException("test"); + } catch (SquashFsException e) { + assertEquals("test", e.getMessage()); + assertNull(e.getCause()); + } + } + + @Test + public void throwableConstructorShouldHaveGeneratedMessageAndSameCause() { + Exception cause = null; + try { + try { + throw new IOException("cause"); + } catch (IOException e2) { + cause = e2; + } + throw new SquashFsException(cause); + } catch (SquashFsException e) { + assertEquals(IOException.class.getName() + ": cause", e.getMessage()); + assertSame(cause, e.getCause()); + } + } + + @Test + public void twoArgConstructorShouldHaveSameMessageAndCause() { + Exception cause = null; + try { + try { + throw new IOException("cause"); + } catch (IOException e2) { + cause = e2; + } + throw new SquashFsException("test", cause); + } catch (SquashFsException e) { + assertEquals("test", e.getMessage()); + assertSame(cause, e.getCause()); + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/TestSquashFsInterop.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/TestSquashFsInterop.java new file mode 100644 index 00000000000..d613c639c17 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/TestSquashFsInterop.java @@ -0,0 +1,1037 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs; + +import org.apache.hadoop.runc.squashfs.data.DataBlockCache; +import org.apache.hadoop.runc.squashfs.directory.DirectoryEntry; +import org.apache.hadoop.runc.squashfs.inode.DeviceINode; +import org.apache.hadoop.runc.squashfs.inode.DirectoryINode; +import org.apache.hadoop.runc.squashfs.inode.FileINode; +import org.apache.hadoop.runc.squashfs.inode.INode; +import org.apache.hadoop.runc.squashfs.inode.INodeRef; +import org.apache.hadoop.runc.squashfs.inode.INodeType; +import org.apache.hadoop.runc.squashfs.inode.SymlinkINode; +import org.apache.hadoop.runc.squashfs.io.MappedFile; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockCache; +import org.apache.hadoop.runc.squashfs.metadata.TaggedMetadataBlockReader; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.util.Arrays; +import java.util.Collection; +import java.util.Date; +import java.util.List; +import java.util.Random; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; + +@RunWith(Parameterized.class) +public class TestSquashFsInterop { + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + @Parameters(name = "{0}") + public static Collection data() { + return Arrays.asList(new Object[][] { + { "file", (ReaderCreator) (a -> createFileReader(a)) }, + { "file-with-cache", + (ReaderCreator) (a -> createFileReaderWithCache(a)) }, + { "mapped", (ReaderCreator) (a -> createMappedReader(a)) }, + { "mapped-with-cache", + (ReaderCreator) (a -> createMappedReaderWithCache(a)) } }); + } + + @FunctionalInterface + public interface ReaderCreator { + public SquashFsReader create(File archive) + throws SquashFsException, IOException; + } + + private ReaderCreator creator; + + public TestSquashFsInterop(String testName, ReaderCreator creator) { + this.creator = creator; + } + + public SquashFsReader createReader(File archive) + throws SquashFsException, IOException { + return creator.create(archive); + } + + public static SquashFsReader createFileReader(File archive) + throws SquashFsException, IOException { + return SquashFsReader.fromFile(archive); + } + + public static SquashFsReader createFileReaderWithCache(File archive) + throws SquashFsException, IOException { + MetadataBlockCache cache = + new MetadataBlockCache(new TaggedMetadataBlockReader(true)); + return SquashFsReader.fromFile(0, archive, cache, new DataBlockCache(64), + new DataBlockCache(64)); + } + + public static SquashFsReader createMappedReader(File archive) + throws SquashFsException, IOException { + MappedFile mmap; + try (RandomAccessFile raf = new RandomAccessFile(archive, "r")) { + mmap = MappedFile.mmap(raf.getChannel(), + MappedSquashFsReader.PREFERRED_MAP_SIZE, + MappedSquashFsReader.PREFERRED_WINDOW_SIZE); + } + return SquashFsReader.fromMappedFile(0, mmap); + } + + public static SquashFsReader createMappedReaderWithCache(File archive) + throws SquashFsException, IOException { + MetadataBlockCache cache = + new MetadataBlockCache(new TaggedMetadataBlockReader(false)); + MappedFile mmap; + try (RandomAccessFile raf = new RandomAccessFile(archive, "r")) { + mmap = MappedFile.mmap(raf.getChannel(), + MappedSquashFsReader.PREFERRED_MAP_SIZE, + MappedSquashFsReader.PREFERRED_WINDOW_SIZE); + } + return SquashFsReader.fromMappedFile(0, mmap, cache, new DataBlockCache(64), + new DataBlockCache(64)); + } + + @Test + public void emptyArchiveShouldWork() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.finish(); + } + + try (SquashFsReader reader = createReader(archive)) { + assertEquals("wrong id count", (short) 1, + reader.getSuperBlock().getIdCount()); + assertEquals("wrong inode count", 1, + reader.getSuperBlock().getInodeCount()); + + DirectoryINode root = reader.getRootInode(); + assertEquals("wrong uidIdx", (short) 0, root.getUidIdx()); + assertEquals("wrong gidIdx", (short) 0, root.getGidIdx()); + assertEquals("wrong file size", 3, root.getFileSize()); + assertEquals("wrong index count", (short) 0, root.getIndexCount()); + assertEquals("wrong inode number", 1, root.getInodeNumber()); + assertSame("wrong inode type", INodeType.BASIC_DIRECTORY, + root.getInodeType()); + assertEquals("wrong nlink count", 2, root.getNlink()); + assertEquals("wrong permissions", (short) 0755, root.getPermissions()); + assertEquals("wrong xattr index", INode.XATTR_NOT_PRESENT, + root.getXattrIndex()); + } + } + + @Test + public void archiveWithDirectoriesAtRootShouldWork() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + for (int i = 0; i < 10; i++) { + SquashFsEntry entry = writer.entry(String.format("/dir-%d", i)) + .directory() + .lastModified(System.currentTimeMillis()) + .uid(0) + .gid(0) + .permissions((short) 0755) + .build(); + System.out.println(entry); + } + writer.finish(); + } + + try (SquashFsReader reader = createReader(archive)) { + assertEquals("wrong id count", (short) 1, + reader.getSuperBlock().getIdCount()); + assertEquals("wrong inode count", 11, + reader.getSuperBlock().getInodeCount()); + + for (int i = 0; i < 10; i++) { + INode dir = reader.findInodeByPath(String.format("/dir-%d", i)); + assertSame(String.format("wrong type for entry %d", i), + INodeType.BASIC_DIRECTORY, dir.getInodeType()); + } + + // verify links + List children = reader.getChildren(reader.getRootInode()); + assertEquals("wrong directory entry count", 10, children.size()); + + for (int i = 0; i < 10; i++) { + DirectoryEntry child = children.get(i); + + assertEquals(String.format("wrong name for entry %d", i), + String.format("dir-%d", i), + child.getNameAsString()); + + INode dir = reader.findInodeByDirectoryEntry(child); + assertSame(String.format("wrong type for entry %d", i), + INodeType.BASIC_DIRECTORY, dir.getInodeType()); + } + } + } + + @Test + public void archiveWithSyntheticDirectoryShouldWork() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/dir") + .directory() + .synthetic() + .uid(0) + .gid(0) + .permissions((short) 0755) + .build(); + + writer.entry("/dir") + .directory() + .uid(0) + .gid(0) + .permissions((short) 0711) + .build(); + + writer.finish(); + } + + try (SquashFsReader reader = createReader(archive)) { + assertEquals("wrong id count", (short) 1, + reader.getSuperBlock().getIdCount()); + assertEquals("wrong inode count", 2, + reader.getSuperBlock().getInodeCount()); + + INode dir = reader.findInodeByPath("/dir"); + assertSame("wrong type", INodeType.BASIC_DIRECTORY, dir.getInodeType()); + assertEquals("wrong permissions", (short) 0711, dir.getPermissions()); + } + } + + @Test + public void archiveWithMissingParentDirectoryShouldBeAutoCreated() + throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/dir/dir2/dir3") + .directory() + .synthetic() + .uid(0) + .gid(0) + .permissions((short) 0711) + .build(); + + writer.finish(); + } + + try (SquashFsReader reader = createReader(archive)) { + assertEquals("wrong id count", (short) 1, + reader.getSuperBlock().getIdCount()); + assertEquals("wrong inode count", 4, + reader.getSuperBlock().getInodeCount()); + + INode dir = reader.findInodeByPath("/dir"); + assertSame("wrong parent type", + INodeType.BASIC_DIRECTORY, dir.getInodeType()); + assertEquals("wrong parent permissions", + (short) 0755, dir.getPermissions()); + + INode dir2 = reader.findInodeByPath("/dir/dir2"); + assertSame("wrong child type", + INodeType.BASIC_DIRECTORY, dir2.getInodeType()); + assertEquals("wrong child permissions", + (short) 0755, dir2.getPermissions()); + + INode dir3 = reader.findInodeByPath("/dir/dir2/dir3"); + assertSame("wrong grandchild type", + INodeType.BASIC_DIRECTORY, dir3.getInodeType()); + assertEquals("wrong grandchild permissions", + (short) 0711, dir3.getPermissions()); + } + } + + @Test + public void archiveWithDuplicateDirectoryShouldOverwriteContent() + throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/dir") + .directory() + .uid(0) + .gid(0) + .permissions((short) 0755) + .build(); + + writer.entry("/dir") + .directory() + .synthetic() + .uid(0) + .gid(0) + .permissions((short) 0711) + .build(); + + writer.finish(); + } + + try (SquashFsReader reader = createReader(archive)) { + assertEquals("wrong id count", (short) 1, + reader.getSuperBlock().getIdCount()); + assertEquals("wrong inode count", 2, + reader.getSuperBlock().getInodeCount()); + + INode dir = reader.findInodeByPath("/dir"); + assertSame("wrong type", INodeType.BASIC_DIRECTORY, dir.getInodeType()); + assertEquals("wrong permissions", (short) 0711, dir.getPermissions()); + } + } + + @Test + public void archiveWithFilesAtRootShouldWork() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + for (int i = 0; i < 10; i++) { + writer.entry(String.format("/file-%d", i)) + .lastModified(System.currentTimeMillis()) + .uid(0) + .gid(0) + .content(new ByteArrayInputStream( + String.format("file-%d", i) + .getBytes(StandardCharsets.ISO_8859_1))) + .permissions((short) 0644) + .build(); + } + writer.finish(); + } + + try (SquashFsReader reader = createReader(archive)) { + assertEquals("wrong id count", (short) 1, + reader.getSuperBlock().getIdCount()); + assertEquals("wrong inode count", 11, + reader.getSuperBlock().getInodeCount()); + + for (int i = 0; i < 10; i++) { + INode file = reader.findInodeByPath(String.format("/file-%d", i)); + assertSame(String.format("Wrong type for entry %d", i), + INodeType.BASIC_FILE, file.getInodeType()); + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + reader.writeFileStream(file, bos); + String content = + new String(bos.toByteArray(), StandardCharsets.ISO_8859_1); + assertEquals(String.format("file-%d", i), content); + } + } + } + } + + @Test + public void archiveWithDuplicateFileShouldUseLatestContent() + throws Exception { + File archive = temp.newFile(); + + byte[] content1 = "test1".getBytes(StandardCharsets.UTF_8); + byte[] content2 = "test2".getBytes(StandardCharsets.UTF_8); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/full.dat") + .lastModified(Instant.now()) + .uid(1000) + .gid(2000) + .content(new ByteArrayInputStream(content2)) + .permissions((short) 0644) + .build(); + + writer.entry("/full.dat") + .lastModified(Instant.now()) + .uid(1000) + .gid(2000) + .content(new ByteArrayInputStream(content2)) + .permissions((short) 0644) + .build(); + + writer.finish(); + } + + try (SquashFsReader reader = createReader(archive)) { + INode file = reader.findInodeByPath("/full.dat"); + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + reader.writeFileStream(file, bos); + assertEquals( + "Wrong file content", + "test2", + new String(bos.toByteArray(), StandardCharsets.UTF_8)); + } + } + } + + @Test + public void archiveWithFileContainingFullBlockShouldWork() throws Exception { + File archive = temp.newFile(); + + byte[] content = new byte[131072]; + Random r = new Random(0L); + r.nextBytes(content); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + SquashFsEntry entry = writer.entry("/full.dat") + .lastModified(Instant.now()) + .uid(1000) + .gid(2000) + .content(new ByteArrayInputStream(content)) + .permissions((short) 0644) + .build(); + + writer.finish(); + + assertEquals("wrong child size", 0, entry.getChildren().size()); + assertEquals("wrong inode number", 1, entry.getInodeNumber()); + assertEquals("wrong short name", "full.dat", entry.getShortName()); + assertEquals("wrong parent", "", entry.getParent().getName()); + assertSame("wrong inode type", INodeType.BASIC_FILE, + entry.getInode().getInodeType()); + assertFalse("synthetic", entry.isSynthetic()); + assertEquals("wrong uid", (short) 1, entry.getUid()); + assertEquals("wrong gid", (short) 2, entry.getGid()); + assertEquals("wrong nlink count", 1, entry.getNlink()); + assertEquals("wrong file size", 131072L, entry.getFileSize()); + assertEquals("wrong last modified", System.currentTimeMillis(), + entry.getLastModified() * 1000L, 10000L); + assertEquals("wrong data block count", 1, entry.getDataBlocks().size()); + assertNull("fragment found", entry.getFragment()); + } + + try (SquashFsReader reader = createReader(archive)) { + assertEquals("wrong id count", (short) 3, + reader.getSuperBlock().getIdCount()); + assertEquals("wrong inode count", 2, + reader.getSuperBlock().getInodeCount()); + INode file = reader.findInodeByPath("/full.dat"); + assertSame("wrong type", INodeType.BASIC_FILE, file.getInodeType()); + assertEquals("wrong uid", 1000, + reader.getIdTable().idFromIndex(file.getUidIdx())); + assertEquals("wrong gid", 2000, + reader.getIdTable().idFromIndex(file.getGidIdx())); + assertEquals("wrong size", 131072L, ((FileINode) file).getFileSize()); + + // test writing entire file + { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + reader.writeFileStream(file, bos); + assertArrayEquals(content, bos.toByteArray()); + } + + INodeRef ref = + reader.getExportTable().getInodeRef(file.getInodeNumber()); + INode file2 = reader.findInodeByInodeRef(ref); + assertEquals("wrong inode number using write()", file.getInodeNumber(), + file2.getInodeNumber()); + } + + // test reading data + { + byte[] xfer = new byte[1024]; + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + int c; + long fileOffset = 0L; + while ((c = reader.read(file, fileOffset, xfer, 0, xfer.length)) + >= 0) { + if (c >= 0) { + bos.write(xfer, 0, c); + fileOffset += c; + } + } + assertArrayEquals(content, bos.toByteArray()); + } + + INodeRef ref = + reader.getExportTable().getInodeRef(file.getInodeNumber()); + INode file2 = reader.findInodeByInodeRef(ref); + assertEquals("wrong inode number using read()", file.getInodeNumber(), + file2.getInodeNumber()); + } + } + } + + @Test + public void archiveWithFileContainingSparseBlockShouldWork() + throws Exception { + File archive = temp.newFile(); + + byte[] content = new byte[262144]; + content[262143] = 1; // not sparse second block + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/sparse.dat") + .lastModified(new Date()) + .uid(0) + .gid(0) + .content(new ByteArrayInputStream(content)) + .permissions((short) 0644) + .build(); + + writer.finish(); + } + + try (SquashFsReader reader = createReader(archive)) { + assertEquals("wrong id count", (short) 1, + reader.getSuperBlock().getIdCount()); + assertEquals("wrong inode count", 2, + reader.getSuperBlock().getInodeCount()); + + INode file = reader.findInodeByPath("/sparse.dat"); + + assertSame("wrong type", INodeType.EXTENDED_FILE, file.getInodeType()); + assertEquals("wrong sparse count", 131072L, + ((FileINode) file).getSparse()); + assertEquals("wrong size", 262144L, ((FileINode) file).getFileSize()); + + // test writing data + { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + reader.writeFileStream(file, bos); + assertArrayEquals(content, bos.toByteArray()); + } + } + + // test reading data + { + byte[] xfer = new byte[1024]; + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + int c; + long fileOffset = 0L; + while ((c = reader.read(file, fileOffset, xfer, 0, xfer.length)) + >= 0) { + if (c >= 0) { + bos.write(xfer, 0, c); + fileOffset += c; + } + } + assertArrayEquals(content, bos.toByteArray()); + } + } + } + } + + @Test + public void archiveWithFileContainingAllSparseBlocksShouldWork() + throws Exception { + File archive = temp.newFile(); + + byte[] content = new byte[262144]; + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/sparse.dat") + .lastModified(new Date()) + .uid(0) + .gid(0) + .content(new ByteArrayInputStream(content)) + .permissions((short) 0644) + .build(); + + writer.finish(); + } + + try (SquashFsReader reader = createReader(archive)) { + assertEquals("wrong id count", (short) 1, + reader.getSuperBlock().getIdCount()); + assertEquals("wrong inode count", 2, + reader.getSuperBlock().getInodeCount()); + + INode file = reader.findInodeByPath("/sparse.dat"); + + assertSame("wrong type", INodeType.EXTENDED_FILE, file.getInodeType()); + assertEquals("wrong sparse count", 262143L, + ((FileINode) file).getSparse()); + assertEquals("wrong size", 262144L, ((FileINode) file).getFileSize()); + + // test writing data + { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + reader.writeFileStream(file, bos); + assertArrayEquals(content, bos.toByteArray()); + } + } + // test reading data + { + byte[] xfer = new byte[1024]; + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + int c; + long fileOffset = 0L; + while ((c = reader.read(file, fileOffset, xfer, 0, xfer.length)) + >= 0) { + if (c >= 0) { + bos.write(xfer, 0, c); + fileOffset += c; + } + } + assertArrayEquals(content, bos.toByteArray()); + } + } + + } + } + + @Test + public void archiveWithHardLinkToFileShouldWork() throws Exception { + File archive = temp.newFile(); + + byte[] content = new byte[1024]; + Random r = new Random(0L); + r.nextBytes(content); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + + // write first file + SquashFsEntry target = writer.entry("/target.dat") + .lastModified(System.currentTimeMillis()) + .uid(1) + .gid(1) + .content(new ByteArrayInputStream(content)) + .permissions((short) 0644) + .build(); + + // write second file + SquashFsEntry source = writer.entry("/source.dat") + .hardlink("/target.dat") + .build(); + + writer.finish(); + + assertEquals("wrong target name", "/target.dat", + source.getHardlinkTarget()); + assertSame("wrong target", target, source.getHardlinkEntry()); + assertSame("wrong inode", target.getInode(), source.getInode()); + assertSame("wrong link count for source", 2, source.getNlink()); + assertSame("wrong link count for target", 2, target.getNlink()); + } + + try (SquashFsReader reader = createReader(archive)) { + assertEquals("wrong id count", (short) 2, + reader.getSuperBlock().getIdCount()); + assertEquals("wrong inode count", 2, + reader.getSuperBlock().getInodeCount()); + + INode file = reader.findInodeByPath("/target.dat"); + assertSame("wrong type", INodeType.EXTENDED_FILE, file.getInodeType()); + assertEquals("wrong size", 1024L, ((FileINode) file).getFileSize()); + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + reader.writeFileStream(file, bos); + assertArrayEquals(content, bos.toByteArray()); + } + + INode file2 = reader.findInodeByPath("/source.dat"); + assertSame("wrong type", INodeType.EXTENDED_FILE, file2.getInodeType()); + assertSame("wrong inode number", file.getInodeNumber(), + file2.getInodeNumber()); + assertEquals("wrong size", 1024L, ((FileINode) file2).getFileSize()); + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + reader.writeFileStream(file, bos); + assertArrayEquals(content, bos.toByteArray()); + } + + } + } + + @Test + public void archiveWithFileContainingMultipleBlocksAndFragmentShouldWork() + throws Exception { + File archive = temp.newFile(); + + byte[] content = new byte[275000]; + Random r = new Random(0L); + r.nextBytes(content); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/multiple.dat") + .lastModified(System.currentTimeMillis()) + .uid(0) + .gid(0) + .content(new ByteArrayInputStream(content)) + .permissions((short) 0644) + .build(); + + writer.finish(); + } + + try (SquashFsReader reader = createReader(archive)) { + assertEquals("wrong id count", (short) 1, + reader.getSuperBlock().getIdCount()); + assertEquals("wrong inode count", 2, + reader.getSuperBlock().getInodeCount()); + + INode file = reader.findInodeByPath("/multiple.dat"); + assertSame("wrong type", INodeType.BASIC_FILE, file.getInodeType()); + assertEquals("wrong size", 275000L, ((FileINode) file).getFileSize()); + + // test writing data + { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + reader.writeFileStream(file, bos); + assertArrayEquals(content, bos.toByteArray()); + } + } + + // test reading data + { + byte[] xfer = new byte[1024]; + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + int c; + long fileOffset = 0L; + while ((c = reader.read(file, fileOffset, xfer, 0, xfer.length)) + >= 0) { + if (c >= 0) { + bos.write(xfer, 0, c); + fileOffset += c; + } + } + assertArrayEquals(content, bos.toByteArray()); + } + } + + } + } + + @Test + public void archiveWithCharDeviceShouldWork() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/dev") + .directory() + .uid(0) + .gid(0) + .permissions((short) 0755) + .build(); + + SquashFsEntry dev = writer.entry("/dev/zero") + .uid(0) + .gid(0) + .charDev(1, 5) + .permissions((short) 0644) + .build(); + + writer.finish(); + + assertEquals("wrong major", 1, dev.getMajor()); + assertEquals("wrong minor", 5, dev.getMinor()); + } + + try (SquashFsReader reader = createReader(archive)) { + assertEquals("wrong id count", (short) 1, + reader.getSuperBlock().getIdCount()); + assertEquals("wrong inode count", 3, + reader.getSuperBlock().getInodeCount()); + + INode dev = reader.findInodeByPath("/dev/zero"); + assertSame("wrong type", INodeType.BASIC_CHAR_DEVICE, dev.getInodeType()); + assertEquals("wrong device", (1 << 8) | 5, + ((DeviceINode) dev).getDevice()); + } + } + + @Test + public void archiveWithBlockDeviceShouldWork() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/dev") + .directory() + .uid(0) + .gid(0) + .permissions((short) 0755) + .build(); + + SquashFsEntry dev = writer.entry("/dev/loop0") + .uid(0) + .gid(0) + .blockDev(7, 0) + .permissions((short) 0644) + .build(); + + writer.finish(); + + assertEquals("wrong major", 7, dev.getMajor()); + assertEquals("wrong minor", 0, dev.getMinor()); + } + + try (SquashFsReader reader = createReader(archive)) { + assertEquals("wrong id count", (short) 1, + reader.getSuperBlock().getIdCount()); + assertEquals("wrong inode count", 3, + reader.getSuperBlock().getInodeCount()); + + INode dev = reader.findInodeByPath("/dev/loop0"); + assertSame("wrong type", INodeType.BASIC_BLOCK_DEVICE, + dev.getInodeType()); + assertEquals("wrong device", (7 << 8), ((DeviceINode) dev).getDevice()); + } + } + + @Test + public void archiveWithFifoShouldWork() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/dev") + .directory() + .uid(0) + .gid(0) + .permissions((short) 0755) + .build(); + + writer.entry("/dev/log") + .uid(0) + .gid(0) + .fifo() + .permissions((short) 0666) + .build(); + + writer.finish(); + } + + try (SquashFsReader reader = createReader(archive)) { + assertEquals("wrong id count", (short) 1, + reader.getSuperBlock().getIdCount()); + assertEquals("wrong inode count", 3, + reader.getSuperBlock().getInodeCount()); + + INode dev = reader.findInodeByPath("/dev/log"); + assertSame("wrong type", INodeType.BASIC_FIFO, dev.getInodeType()); + assertEquals("wrong permissions", (short) 0666, dev.getPermissions()); + } + } + + @Test + public void archiveWithSymlinkShouldWork() throws Exception { + File archive = temp.newFile(); + + byte[] content = new byte[1024]; + Random r = new Random(0L); + r.nextBytes(content); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/target.dat") + .lastModified(System.currentTimeMillis()) + .uid(0) + .gid(0) + .content(new ByteArrayInputStream(content)) + .permissions((short) 0644) + .build(); + + SquashFsEntry link = writer.entry("/link.dat") + .uid(0) + .gid(0) + .symlink("/target.dat") + .permissions((short) 0644) + .build(); + + writer.finish(); + + assertEquals("wrong link target", "/target.dat", link.getSymlinkTarget()); + } + + try (SquashFsReader reader = createReader(archive)) { + assertEquals("wrong id count", (short) 1, + reader.getSuperBlock().getIdCount()); + assertEquals("wrong inode count", 3, + reader.getSuperBlock().getInodeCount()); + + INode link = reader.findInodeByPath("/link.dat"); + assertSame("wrong type", INodeType.BASIC_SYMLINK, link.getInodeType()); + String target = new String(((SymlinkINode) link).getTargetPath(), + StandardCharsets.ISO_8859_1); + + INode file = reader.findInodeByPath(target); + assertSame("wrong type", INodeType.BASIC_FILE, file.getInodeType()); + assertEquals("wrong size", 1024, ((FileINode) file).getFileSize()); + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + reader.writeFileStream(file, bos); + assertArrayEquals(content, bos.toByteArray()); + } + + } + } + + @Test(expected = IllegalArgumentException.class) + public void fileWithNullNameShouldFail() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry(null) + .lastModified(System.currentTimeMillis()) + .uid(0) + .gid(0) + .content(new ByteArrayInputStream(new byte[0])) + .permissions((short) 0644) + .build(); + } + } + + @Test(expected = IllegalArgumentException.class) + public void fileWithEmptyNameShouldFail() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("") + .lastModified(System.currentTimeMillis()) + .uid(0) + .gid(0) + .content(new ByteArrayInputStream(new byte[0])) + .permissions((short) 0644) + .build(); + } + } + + @Test(expected = IllegalArgumentException.class) + public void fileWithNonAbsoluteNameShouldFail() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("foo") + .lastModified(System.currentTimeMillis()) + .uid(0) + .gid(0) + .content(new ByteArrayInputStream(new byte[0])) + .permissions((short) 0644) + .build(); + } + } + + @Test(expected = IllegalArgumentException.class) + public void fileWithNameEndingInSlashShouldFail() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/foo/") + .lastModified(System.currentTimeMillis()) + .uid(0) + .gid(0) + .content(new ByteArrayInputStream(new byte[0])) + .permissions((short) 0644) + .build(); + } + } + + @Test(expected = IllegalArgumentException.class) + public void fileWithNoTypeShouldFail() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/no-type.dat") + .lastModified(System.currentTimeMillis()) + .uid(0) + .gid(0) + .permissions((short) 0644) + .build(); + } + } + + @Test(expected = IllegalArgumentException.class) + public void fileWithNoUidShouldFail() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/no-uid.dat") + .lastModified(System.currentTimeMillis()) + .gid(0) + .content(new ByteArrayInputStream(new byte[0])) + .permissions((short) 0644) + .build(); + } + } + + @Test(expected = IllegalArgumentException.class) + public void fileWithNoGidShouldFail() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/no-gid.dat") + .lastModified(System.currentTimeMillis()) + .uid(0) + .content(new ByteArrayInputStream(new byte[0])) + .permissions((short) 0644) + .build(); + } + } + + @Test(expected = IllegalArgumentException.class) + public void fileWithNoPermissionsShouldFail() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/no-permissions.dat") + .lastModified(System.currentTimeMillis()) + .uid(0) + .gid(0) + .content(new ByteArrayInputStream(new byte[0])) + .build(); + } + } + + @Test(expected = IllegalArgumentException.class) + public void fileWithNoFileSizeShouldFail() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + writer.entry("/no-size.dat") + .file() + .lastModified(System.currentTimeMillis()) + .uid(0) + .gid(0) + .permissions((short) 0644) + .build(); + } + } + + @Test + public void fileWithNoTimestampShouldUseDefault() throws Exception { + File archive = temp.newFile(); + + try (SquashFsWriter writer = new SquashFsWriter(archive)) { + SquashFsEntry entry = writer.entry("/no-timestamp.dat") + .uid(0) + .gid(0) + .content(new ByteArrayInputStream(new byte[0])) + .permissions((short) 0644) + .build(); + + assertEquals("wrong timestamp", System.currentTimeMillis(), + entry.lastModified * 1000L, 10000L); + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlock.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlock.java new file mode 100644 index 00000000000..e5010e35245 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlock.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.data; + +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class TestDataBlock { + + DataBlock full; + DataBlock sparse; + DataBlock partial; + DataBlock empty; + + @Before + public void setUp() { + full = new DataBlock(new byte[1024], 1024, 1024); + full.getData()[0] = (byte) 0xff; + sparse = new DataBlock(new byte[1024], 1024, 0); + partial = new DataBlock(new byte[1024], 512, 1024); + empty = new DataBlock(new byte[1024], 0, 0); + } + + @Test + public void getDataShouldReturnCorrectValues() { + assertEquals((byte) 0xff, full.getData()[0]); + } + + @Test + public void getLogicalSizeShouldReturnCorrectValuesForAllCases() { + assertEquals("wrong size for full", 1024, full.getLogicalSize()); + assertEquals("wrong size for sparse", 1024, sparse.getLogicalSize()); + assertEquals("wrong size for partial", 512, partial.getLogicalSize()); + assertEquals("wrong size for empty", 0, empty.getLogicalSize()); + } + + @Test + public void getPhysicalSizeShouldReturnCorrectValuesForAllCases() { + assertEquals("wrong size for full", 1024, full.getPhysicalSize()); + assertEquals("wrong size for sparse", 0, sparse.getPhysicalSize()); + assertEquals("wrong size for partial", 1024, partial.getPhysicalSize()); + assertEquals("wrong size for empty", 0, empty.getPhysicalSize()); + } + + @Test + public void isSparseShouldReturnTrueOnlyForSparseBlock() { + assertFalse("full is sparse", full.isSparse()); + assertFalse("empty is sparse", empty.isSparse()); + assertFalse("partial is sparse", partial.isSparse()); + assertTrue("sparse is not sparse", sparse.isSparse()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlockCache.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlockCache.java new file mode 100644 index 00000000000..a5bb74103f8 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlockCache.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.data; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; + +public class TestDataBlockCache { + + DataBlockCache cache; + DataBlockCache.Key[] keys; + DataBlock[] blocks; + DataBlockCache.Key extraKey; + DataBlock extraBlock; + + @Before + public void setUp() throws Exception { + int size = 64; + keys = new DataBlockCache.Key[size]; + blocks = new DataBlock[size]; + + cache = new DataBlockCache(size); + for (int i = 0; i < size; i++) { + keys[i] = new DataBlockCache.Key(1, false, i, 32, 32); + blocks[i] = new DataBlock(new byte[32], 32, 32); + cache.put(keys[i], blocks[i]); + } + extraKey = new DataBlockCache.Key(1, false, size, 32, 32); + extraBlock = new DataBlock(new byte[32], 32, 32); + } + + @After + public void tearDown() { + cache = null; + keys = null; + blocks = null; + extraKey = null; + extraBlock = null; + } + + @Test + public void nullCacheShouldAlwaysMiss() throws Exception { + cache = DataBlockCache.NO_CACHE; + cache.clearCache(); + + for (int i = 0; i < keys.length; i++) { + assertNull("not null block", cache.get(keys[i])); + cache.put(keys[i], blocks[i]); + assertNull("not null block", cache.get(keys[i])); + } + assertEquals("wrong hit count", 0L, cache.getCacheHits()); + assertEquals("wrong miss count", (long) keys.length * 2L, + cache.getCacheMisses()); + assertEquals("wrong cache load", 0, cache.getCacheLoad()); + } + + @Test + public void readingAllBlocksShouldResultInAllCacheHits() throws Exception { + for (int i = 0; i < keys.length; i++) { + assertSame("wrong block", blocks[i], cache.get(keys[i])); + } + assertEquals("wrong hit count", (long) keys.length, cache.getCacheHits()); + assertEquals("wrong miss count", 0L, cache.getCacheMisses()); + assertEquals("wrong cache load", keys.length, cache.getCacheLoad()); + } + + @Test + public void readingMoreThanCapacityShouldResultInOneCacheMiss() + throws Exception { + cache.put(extraKey, extraBlock); + for (int i = 0; i < keys.length; i++) { + if (i == 0) { + assertNull("not null block", cache.get(keys[i])); + } else { + assertSame("wrong block", blocks[i], cache.get(keys[i])); + + } + } + assertEquals("wrong hit count", (long) (keys.length - 1), + cache.getCacheHits()); + assertEquals("wrong miss count", 1L, cache.getCacheMisses()); + assertEquals("wrong cache load", keys.length, cache.getCacheLoad()); + } + + @Test + public void readingAndWritingAllBlocksShouldResultInHalfCacheMisses() + throws Exception { + cache.clearCache(); + + for (int i = 0; i < keys.length; i++) { + assertNull("not null block", cache.get(keys[i])); + cache.put(keys[i], blocks[i]); + assertSame("wrong block", blocks[i], cache.get(keys[i])); + } + assertEquals("wrong hit count", (long) keys.length, cache.getCacheHits()); + assertEquals("wrong miss count", (long) keys.length, + cache.getCacheMisses()); + assertEquals("wrong cache load", keys.length, cache.getCacheLoad()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlockReader.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlockReader.java new file mode 100644 index 00000000000..369c49168a4 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlockReader.java @@ -0,0 +1,352 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.data; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.inode.BasicFileINode; +import org.apache.hadoop.runc.squashfs.superblock.CompressionId; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlockFlag; +import org.apache.hadoop.runc.squashfs.table.FragmentTable; +import org.apache.hadoop.runc.squashfs.table.FragmentTableEntry; +import org.apache.hadoop.runc.squashfs.test.InMemoryFragmentTable; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.Random; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + +public class TestDataBlockReader { + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + File tempFile; + RandomAccessFile raf; + SuperBlock sb; + int tag; + + @Before + public void setUp() throws Exception { + tempFile = temp.newFile(); + tag = 10101; + raf = new RandomAccessFile(tempFile, "rw"); + sb = new SuperBlock(); + sb.writeData(raf); + } + + DataBlockRef writeBlock(byte[] data, int offset, int length) + throws IOException { + DataBlockWriter writer = + new DataBlockWriter(raf, SuperBlock.DEFAULT_BLOCK_SIZE); + return writer.write(data, offset, length); + } + + FragmentRef writeFragment(FragmentWriter writer, byte[] data, int offset, + int length) throws IOException { + FragmentRef ref = writer.write(data, offset, length); + return ref; + } + + @Test + public void readOfSingleFragmentShouldSucceed() throws Exception { + byte[] data = new byte[SuperBlock.DEFAULT_BLOCK_SIZE - 1]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) 0xff; + } + + FragmentWriter fw = new FragmentWriter(raf, SuperBlock.DEFAULT_BLOCK_SIZE); + FragmentRef ref = writeFragment(fw, data, 0, data.length); + fw.flush(); + System.out.println(ref); + FragmentTableEntry entry = fw.getFragmentEntries().get(0); + System.out.println(entry); + + BasicFileINode inode = new BasicFileINode(); + inode.setFileSize(data.length); + inode.setFragmentOffset(ref.getOffset()); + inode.setFragmentBlockIndex(ref.getFragmentIndex()); + + FragmentTable ft = new InMemoryFragmentTable(entry); + + DataBlock block = + DataBlockReader.readFragment(tag, raf, sb, inode, ft, data.length); + assertEquals("wrong logical size", data.length, block.getLogicalSize()); + assertEquals("wrong physical size", data.length, block.getPhysicalSize()); + assertArrayEquals("wrong data", data, block.getData()); + } + + @Test(expected = SquashFsException.class) + public void readOfSingleFragmentShouldFailIfReadTooManyBytes() + throws Exception { + byte[] data = new byte[SuperBlock.DEFAULT_BLOCK_SIZE - 2]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) 0xff; + } + + FragmentWriter fw = new FragmentWriter(raf, SuperBlock.DEFAULT_BLOCK_SIZE); + FragmentRef ref = writeFragment(fw, data, 0, data.length); + fw.flush(); + System.out.println(ref); + FragmentTableEntry entry = fw.getFragmentEntries().get(0); + System.out.println(entry); + + BasicFileINode inode = new BasicFileINode(); + inode.setFileSize(data.length); + inode.setFragmentOffset(ref.getOffset()); + inode.setFragmentBlockIndex(ref.getFragmentIndex()); + + FragmentTable ft = new InMemoryFragmentTable(entry); + + DataBlock block = + DataBlockReader.readFragment(tag, raf, sb, inode, ft, data.length + 1); + assertEquals("wrong logical size", data.length, block.getLogicalSize()); + assertEquals("wrong physical size", data.length, block.getPhysicalSize()); + assertArrayEquals("wrong data", data, block.getData()); + } + + @Test + public void readOfSingleCompressedBlockShouldSucceed() throws Exception { + byte[] data = new byte[SuperBlock.DEFAULT_BLOCK_SIZE]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) 0xff; + } + DataBlockRef ref = writeBlock(data, 0, data.length); + System.out.println(ref); + + BasicFileINode inode = new BasicFileINode(); + inode.setFileSize(data.length); + inode.setBlocksStart(ref.getLocation()); + inode.setBlockSizes(new int[] { ref.getInodeSize() }); + DataBlock block = DataBlockReader.readBlock(tag, raf, sb, inode, 0); + assertEquals("wrong logical size", data.length, block.getLogicalSize()); + assertEquals("wrong physical size", data.length, block.getPhysicalSize()); + assertArrayEquals("wrong data", data, block.getData()); + } + + @Test(expected = SquashFsException.class) + public void readOfCompressedBlockShouldFailIfCompressionIdIsNotSet() + throws Exception { + sb.setCompressionId(CompressionId.NONE); + byte[] data = new byte[SuperBlock.DEFAULT_BLOCK_SIZE]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) 0xff; + } + DataBlockRef ref = writeBlock(data, 0, data.length); + System.out.println(ref); + + BasicFileINode inode = new BasicFileINode(); + inode.setFileSize(data.length); + inode.setBlocksStart(ref.getLocation()); + inode.setBlockSizes(new int[] { ref.getInodeSize() }); + + DataBlockReader.readBlock(tag, raf, sb, inode, 0); + } + + @Test(expected = UnsupportedOperationException.class) + public void readOfCompressedBlockShouldFailIfCompressionOptionsSet() + throws Exception { + sb.setFlags( + (short) (sb.getFlags() | SuperBlockFlag.COMPRESSOR_OPTIONS.mask())); + byte[] data = new byte[SuperBlock.DEFAULT_BLOCK_SIZE]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) 0xff; + } + DataBlockRef ref = writeBlock(data, 0, data.length); + System.out.println(ref); + + BasicFileINode inode = new BasicFileINode(); + inode.setFileSize(data.length); + inode.setBlocksStart(ref.getLocation()); + inode.setBlockSizes(new int[] { ref.getInodeSize() }); + + DataBlockReader.readBlock(tag, raf, sb, inode, 0); + } + + @Test(expected = SquashFsException.class) + public void readOfCompressedBlockShouldFailIfDecompressedTooLarge() + throws Exception { + byte[] data = new byte[SuperBlock.DEFAULT_BLOCK_SIZE]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) 0xff; + } + DataBlockRef ref = writeBlock(data, 0, data.length); + System.out.println(ref); + + sb.setBlockSize(SuperBlock.DEFAULT_BLOCK_SIZE / 2); + BasicFileINode inode = new BasicFileINode(); + inode.setFileSize(data.length / 2); + inode.setBlocksStart(ref.getLocation()); + inode.setBlockSizes(new int[] { ref.getInodeSize() }); + + DataBlockReader.readBlock(tag, raf, sb, inode, 0); + } + + @Test(expected = UnsupportedOperationException.class) + public void readOfCompressedBlockShouldFailIfCompressionIdIsSetToAnUnsupportedAlgorithm() + throws Exception { + sb.setCompressionId(CompressionId.XZ); + byte[] data = new byte[SuperBlock.DEFAULT_BLOCK_SIZE]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) 0xff; + } + DataBlockRef ref = writeBlock(data, 0, data.length); + System.out.println(ref); + + BasicFileINode inode = new BasicFileINode(); + inode.setFileSize(data.length); + inode.setBlocksStart(ref.getLocation()); + inode.setBlockSizes(new int[] { ref.getInodeSize() }); + + DataBlockReader.readBlock(tag, raf, sb, inode, 0); + } + + @Test + public void readOfMlutipleBlocksShouldSucceed() throws Exception { + byte[] data = new byte[SuperBlock.DEFAULT_BLOCK_SIZE]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) 0xff; + } + DataBlockRef ref = writeBlock(data, 0, data.length); + System.out.println(ref); + DataBlockRef ref2 = writeBlock(data, 0, data.length); + System.out.println(ref); + + BasicFileINode inode = new BasicFileINode(); + inode.setFileSize(data.length * 2); + inode.setBlocksStart(ref.getLocation()); + inode.setBlockSizes(new int[] { ref.getInodeSize(), ref2.getInodeSize() }); + DataBlock block = DataBlockReader.readBlock(tag, raf, sb, inode, 0); + assertEquals("wrong logical size", data.length, block.getLogicalSize()); + assertEquals("wrong physical size", data.length, block.getPhysicalSize()); + assertArrayEquals("wrong data", data, block.getData()); + + block = DataBlockReader.readBlock(tag, raf, sb, inode, 1); + assertEquals("wrong logical size", data.length, block.getLogicalSize()); + assertEquals("wrong physical size", data.length, block.getPhysicalSize()); + assertArrayEquals("wrong data", data, block.getData()); + } + + @Test + public void readOfSingleCompressedBlockShouldSucceedWhenFragmentPresent() + throws Exception { + byte[] data = new byte[SuperBlock.DEFAULT_BLOCK_SIZE]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) 0xff; + } + DataBlockRef ref = writeBlock(data, 0, data.length); + System.out.println(ref); + + BasicFileINode inode = new BasicFileINode(); + inode.setFileSize(data.length + 1); + inode.setFragmentBlockIndex(1); + inode.setFragmentOffset(1); + inode.setBlocksStart(ref.getLocation()); + inode.setBlockSizes(new int[] { ref.getInodeSize() }); + DataBlock block = DataBlockReader.readBlock(tag, raf, sb, inode, 0); + assertEquals("wrong logical size", data.length, block.getLogicalSize()); + assertEquals("wrong physical size", data.length, block.getPhysicalSize()); + assertArrayEquals("wrong data", data, block.getData()); + } + + @Test + public void readOfPartialBlockShouldSucceed() throws Exception { + byte[] data = new byte[SuperBlock.DEFAULT_BLOCK_SIZE]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) 0xff; + } + DataBlockRef ref = writeBlock(data, 0, data.length); + System.out.println(ref); + + BasicFileINode inode = new BasicFileINode(); + inode.setFileSize(data.length - 1); + inode.setBlocksStart(ref.getLocation()); + inode.setBlockSizes(new int[] { ref.getInodeSize() }); + + DataBlock block = DataBlockReader.readBlock(tag, raf, sb, inode, 0); + assertEquals("wrong logical size", data.length - 1, block.getLogicalSize()); + assertEquals("wrong physical size", data.length, block.getPhysicalSize()); + assertArrayEquals("wrong data", data, block.getData()); + } + + @Test + public void readOfSparseBlockShouldSucceed() throws Exception { + byte[] data = new byte[SuperBlock.DEFAULT_BLOCK_SIZE]; + DataBlockRef ref = writeBlock(data, 0, data.length); + System.out.println(ref); + + BasicFileINode inode = new BasicFileINode(); + inode.setFileSize(data.length); + inode.setBlocksStart(ref.getLocation()); + inode.setBlockSizes(new int[] { ref.getInodeSize() }); + + DataBlock block = DataBlockReader.readBlock(tag, raf, sb, inode, 0); + assertEquals("wrong logical size", data.length, block.getLogicalSize()); + assertEquals("wrong physical size", 0, block.getPhysicalSize()); + } + + @Test + public void readOfSingleUncompressedBlockShouldSucceed() throws Exception { + Random r = new Random(0L); + + byte[] data = new byte[SuperBlock.DEFAULT_BLOCK_SIZE]; + r.nextBytes(data); + + DataBlockRef ref = writeBlock(data, 0, data.length); + System.out.println(ref); + + BasicFileINode inode = new BasicFileINode(); + inode.setFileSize(data.length); + inode.setBlocksStart(ref.getLocation()); + inode.setBlockSizes(new int[] { ref.getInodeSize() }); + + DataBlock block = DataBlockReader.readBlock(tag, raf, sb, inode, 0); + assertEquals("wrong logical size", data.length, block.getLogicalSize()); + assertEquals("wrong physical size", data.length, block.getPhysicalSize()); + assertArrayEquals("wrong data", data, block.getData()); + } + + @Test(expected = SquashFsException.class) + public void readOfOutOfBoundsBlockShouldFail() throws Exception { + Random r = new Random(0L); + + byte[] data = new byte[SuperBlock.DEFAULT_BLOCK_SIZE]; + r.nextBytes(data); + + DataBlockRef ref = writeBlock(data, 0, data.length); + System.out.println(ref); + + BasicFileINode inode = new BasicFileINode(); + inode.setFileSize(data.length); + inode.setBlocksStart(ref.getLocation()); + inode.setBlockSizes(new int[] {}); + + DataBlock block = DataBlockReader.readBlock(tag, raf, sb, inode, 0); + assertEquals("wrong logical size", data.length, block.getLogicalSize()); + assertEquals("wrong physical size", data.length, block.getPhysicalSize()); + assertArrayEquals("wrong data", data, block.getData()); + } +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlockRef.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlockRef.java new file mode 100644 index 00000000000..03a299b83a4 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlockRef.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.data; + +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class TestDataBlockRef { + + DataBlockRef ref; + + @Before + public void setUp() { + ref = new DataBlockRef(1L, 2, 3, false, false); + } + + @Test + public void locationPropertyShouldWorkAsExpected() { + assertEquals(1L, ref.getLocation()); + } + + @Test + public void logicalSizePropertyShouldWorkAsExpected() { + assertEquals(2, ref.getLogicalSize()); + } + + @Test + public void physicalSizePropertyShouldWorkAsExpected() { + assertEquals(3, ref.getPhysicalSize()); + } + + @Test + public void compressedPropertyShouldWorkAsExpected() { + assertFalse(ref.isCompressed()); + assertTrue(new DataBlockRef(1L, 2, 3, true, false).isCompressed()); + } + + @Test + public void getInodeSizeShouldReturnSizeAndCompressedValues() { + assertEquals(0x1_000_003, ref.getInodeSize()); + ref = new DataBlockRef(1L, 2, 3, true, false); + assertEquals(3, ref.getInodeSize()); + } + + @Test + public void sparsePropertyShouldWorkAsExpected() { + assertFalse(ref.isSparse()); + assertTrue(new DataBlockRef(1L, 2, 3, false, true).isSparse()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(ref.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlockWriter.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlockWriter.java new file mode 100644 index 00000000000..752f2597cda --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestDataBlockWriter.java @@ -0,0 +1,133 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.data; + +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.test.DataTestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.io.RandomAccessFile; +import java.util.Random; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class TestDataBlockWriter { + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + File tempFile; + RandomAccessFile raf; + DataBlockWriter writer; + + @Before + public void setUp() throws Exception { + tempFile = temp.newFile(); + raf = new RandomAccessFile(tempFile, "rw"); + writer = new DataBlockWriter(raf, SuperBlock.DEFAULT_BLOCK_SIZE); + } + + @After + public void tearDown() throws Exception { + writer = null; + raf.close(); + raf = null; + } + + @Test + public void writerMustSaveCompressibleBlockProperly() throws Exception { + byte[] buf = new byte[SuperBlock.DEFAULT_BLOCK_SIZE]; + for (int i = 0; i < buf.length; i++) { + buf[i] = (byte) 0xff; // all ones + } + + DataBlockRef ref = writer.write(buf, 0, buf.length); + System.out.println(ref); + assertEquals("wrong location", 0L, ref.getLocation()); + assertEquals("wrong logical size", SuperBlock.DEFAULT_BLOCK_SIZE, + ref.getLogicalSize()); + assertTrue("not compressed", ref.isCompressed()); + assertFalse("sparse", ref.isSparse()); + + byte[] compressed = new byte[ref.getPhysicalSize()]; + raf.seek(0L); + raf.readFully(compressed, 0, compressed.length); + + byte[] decompressed = DataTestUtils.decompress(compressed); + assertEquals("Wrong length", buf.length, decompressed.length); + assertArrayEquals("Wrong buffer", buf, decompressed); + } + + @Test + public void writerMustSaveUncompressibleBlockProperly() throws Exception { + Random random = new Random(0L); + + byte[] buf = new byte[SuperBlock.DEFAULT_BLOCK_SIZE]; + random.nextBytes(buf); + + DataBlockRef ref = writer.write(buf, 0, buf.length); + System.out.println(ref); + assertEquals("wrong location", 0L, ref.getLocation()); + assertEquals("wrong logical size", SuperBlock.DEFAULT_BLOCK_SIZE, + ref.getLogicalSize()); + assertFalse("compressed", ref.isCompressed()); + assertFalse("sparse", ref.isSparse()); + + byte[] buf2 = new byte[ref.getPhysicalSize()]; + raf.seek(0L); + raf.readFully(buf2, 0, buf2.length); + + assertEquals("Wrong length", buf.length, buf2.length); + assertArrayEquals("Wrong buffer", buf, buf2); + } + + @Test + public void writerMustSaveSparseBlockProperly() throws Exception { + byte[] buf = new byte[SuperBlock.DEFAULT_BLOCK_SIZE]; + + DataBlockRef ref = writer.write(buf, 0, buf.length); + System.out.println(ref); + assertEquals("wrong location", 0L, ref.getLocation()); + assertEquals("wrong logical size", SuperBlock.DEFAULT_BLOCK_SIZE, + ref.getLogicalSize()); + assertEquals("wrong physical size", 0L, ref.getPhysicalSize()); + assertFalse("compressed", ref.isCompressed()); + assertTrue("sparse", ref.isSparse()); + } + + @Test(expected = IllegalArgumentException.class) + public void writeOfShortBlockMustFail() throws Exception { + writer.write(new byte[SuperBlock.DEFAULT_BLOCK_SIZE - 1], 0, + SuperBlock.DEFAULT_BLOCK_SIZE - 1); + } + + @Test(expected = IllegalArgumentException.class) + public void writeOfEmptyBlockMustFail() throws Exception { + writer.write(new byte[SuperBlock.DEFAULT_BLOCK_SIZE], 0, 0); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestFragmentRef.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestFragmentRef.java new file mode 100644 index 00000000000..fec5f01bb4f --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestFragmentRef.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.data; + +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class TestFragmentRef { + + FragmentRef ref; + + @Before + public void setUp() { + ref = new FragmentRef(1); + } + + @Test + public void getOffsetShouldReturnConstructedValue() { + assertEquals(1, ref.getOffset()); + } + + @Test + public void getFragmentIndexShouldInitiallyReturnInvalidValue() { + assertEquals(-1, ref.getFragmentIndex()); + } + + @Test + public void isValidShouldReturnFalseUntilCommitIsCalled() { + assertFalse(ref.isValid()); + ref.commit(2); + assertTrue(ref.isValid()); + } + + @Test + public void commitShouldUpdateFragmentIndex() { + ref.commit(2); + assertEquals(2, ref.getFragmentIndex()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(ref.toString()); + } + + @Test + public void toStringShouldNotFailAfterCommit() { + ref.commit(2); + System.out.println(ref.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestFragmentWriter.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestFragmentWriter.java new file mode 100644 index 00000000000..a2350bae723 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/data/TestFragmentWriter.java @@ -0,0 +1,246 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.data; + +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.table.FragmentTableEntry; +import org.apache.hadoop.runc.squashfs.test.DataTestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.Random; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class TestFragmentWriter { + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + File tempFile; + RandomAccessFile raf; + FragmentWriter writer; + + @Before + public void setUp() throws Exception { + tempFile = temp.newFile(); + raf = new RandomAccessFile(tempFile, "rw"); + writer = new FragmentWriter(raf, SuperBlock.DEFAULT_BLOCK_SIZE); + } + + @After + public void tearDown() throws Exception { + writer = null; + raf.close(); + raf = null; + } + + @Test + public void writerMustSaveCompressibleBlockProperly() throws Exception { + byte[] buf = new byte[SuperBlock.DEFAULT_BLOCK_SIZE]; + for (int i = 0; i < buf.length; i++) { + buf[i] = (byte) 0xff; // all ones + } + + writer.write(buf, 0, buf.length); + writer.flush(); + + assertEquals("wrong fragment entry count", 1, + writer.getFragmentEntryCount()); + FragmentTableEntry fte = writer.getFragmentEntries().get(0); + assertTrue("Not compressed", fte.isCompressed()); + + byte[] compressed = new byte[fte.getDiskSize()]; + raf.seek(0L); + raf.readFully(compressed, 0, compressed.length); + + byte[] decompressed = DataTestUtils.decompress(compressed); + assertEquals("Wrong length", buf.length, decompressed.length); + assertArrayEquals("Wrong buffer", buf, decompressed); + } + + @Test + public void writerMustSaveUncompressibleBlockProperly() throws Exception { + Random random = new Random(0L); + + byte[] buf = new byte[SuperBlock.DEFAULT_BLOCK_SIZE]; + random.nextBytes(buf); + + writer.write(buf, 0, buf.length); + writer.flush(); + + assertEquals("wrong fragment entry count", 1, + writer.getFragmentEntryCount()); + FragmentTableEntry fte = writer.getFragmentEntries().get(0); + assertFalse("Compressed", fte.isCompressed()); + + byte[] buf2 = new byte[fte.getDiskSize()]; + raf.seek(0L); + raf.readFully(buf2, 0, buf2.length); + + assertEquals("Wrong length", buf.length, buf2.length); + assertArrayEquals("Wrong buffer", buf, buf2); + } + + @Test(expected = IllegalArgumentException.class) + public void attemptToWriteZeroBytesShouldFail() throws Exception { + writer.write(new byte[0], 0, 0); + } + + @Test(expected = IllegalArgumentException.class) + public void attemptToWriteMoreThanBlockSizeShouldFail() throws Exception { + writer.write(new byte[SuperBlock.DEFAULT_BLOCK_SIZE + 1], 0, + SuperBlock.DEFAULT_BLOCK_SIZE + 1); + } + + @Test + public void writingBeyondBlockSizeShouldTriggerFlush() throws Exception { + writer.write(new byte[SuperBlock.DEFAULT_BLOCK_SIZE], 0, + SuperBlock.DEFAULT_BLOCK_SIZE); + assertEquals("wrong fragment entry count (before)", 0, + writer.getFragmentEntryCount()); + writer.write(new byte[1], 0, 1); + assertEquals("wrong fragment entry count (after)", 1, + writer.getFragmentEntryCount()); + } + + @Test + public void flushWithNoDataShouldNotTriggerFragmentEntryCreation() + throws Exception { + writer.flush(); + assertEquals("wrong fragment entry count", 0, + writer.getFragmentEntryCount()); + } + + @Test + public void flushWithDataShouldTriggerFragmentEntryCreation() + throws Exception { + writer.write(new byte[1], 0, 1); + writer.flush(); + assertEquals("wrong fragment entry count", 1, + writer.getFragmentEntryCount()); + } + + @Test + public void doubleFlushShouldNotTriggerAdditionalFragmentEntryCreation() + throws Exception { + writer.write(new byte[1], 0, 1); + writer.flush(); + assertEquals("wrong fragment entry count (before)", 1, + writer.getFragmentEntryCount()); + writer.flush(); + assertEquals("wrong fragment entry count (after)", 1, + writer.getFragmentEntryCount()); + } + + @Test + public void fragmentTableRefSizeShouldBeZeroIfNoDataWritten() + throws Exception { + writer.flush(); + assertEquals("wrong entry count", 0, writer.getFragmentEntryCount()); + assertEquals("wrong table size", 0, writer.getFragmentTableRefSize()); + } + + @Test + public void fragmentTableRefSizeShouldBeOneIfDataWritten() throws Exception { + writer.write(new byte[1], 0, 1); + writer.flush(); + assertEquals("wrong entry count", 1, writer.getFragmentEntryCount()); + assertEquals("wrong table size", 1, writer.getFragmentTableRefSize()); + } + + @Test + public void fragmentTableRefSizeShouldBeOneIfFullBlockWritten() + throws Exception { + for (int i = 0; i < 512; i++) { + writer.write(new byte[1], 0, 1); + writer.flush(); + } + assertEquals("wrong entry count", 512, writer.getFragmentEntryCount()); + assertEquals("wrong table size", 1, writer.getFragmentTableRefSize()); + } + + @Test + public void fragmentTableRefSizeShouldBeTwoIfFullBlockPlusOneWritten() + throws Exception { + for (int i = 0; i < 513; i++) { + writer.write(new byte[1], 0, 1); + writer.flush(); + } + assertEquals("wrong entry count", 513, writer.getFragmentEntryCount()); + assertEquals("wrong table size", 2, writer.getFragmentTableRefSize()); + } + + @Test + public void saveShouldSerializeEmptyMetadataIfNoFragmentsPresent() + throws Exception { + byte[] data = DataTestUtils.saveFragmentMetadata(writer); + assertEquals("wrong length", 0, data.length); + } + + @Test + public void saveShouldSerializeOneEntryIfOneFragmentPresent() + throws Exception { + writer.write(new byte[1], 0, 1); + writer.flush(); + + byte[] data = DataTestUtils.saveFragmentMetadata(writer); + byte[] decoded = DataTestUtils.decodeMetadataBlock(data); + assertEquals("wrong data length", 16, decoded.length); + ByteBuffer bb = ByteBuffer.wrap(decoded).order(ByteOrder.LITTLE_ENDIAN); + long start = bb.getLong(); + int size = bb.getInt(); + int unused = bb.getInt(); + assertEquals("wrong start", 0L, start); + assertEquals("wrong size", 0x1000001, size); + assertEquals("wrong unused value", 0, unused); + } + + @Test + public void saveShouldSerializeOneEntryIfTwoFragmentsPresent() + throws Exception { + writer.write(new byte[1], 0, 1); + writer.flush(); + writer.write(new byte[1], 0, 1); + writer.flush(); + + byte[] data = DataTestUtils.saveFragmentMetadata(writer); + byte[] decoded = DataTestUtils.decodeMetadataBlock(data); + assertEquals("wrong data length", 32, decoded.length); + ByteBuffer bb = ByteBuffer.wrap(decoded).order(ByteOrder.LITTLE_ENDIAN); + + assertEquals("wrong start 0", 0L, bb.getLong()); + assertEquals("wrong size 0", 0x1000001, bb.getInt()); + assertEquals("wrong unused value 0", 0, bb.getInt()); + + assertEquals("wrong start 1", 1L, bb.getLong()); + assertEquals("wrong size 1", 0x1000001, bb.getInt()); + assertEquals("wrong unused value 1", 0, bb.getInt()); + } +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/directory/TestDirectoryBuilder.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/directory/TestDirectoryBuilder.java new file mode 100644 index 00000000000..f40a9c91ecd --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/directory/TestDirectoryBuilder.java @@ -0,0 +1,204 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.directory; + +import org.apache.hadoop.runc.squashfs.inode.INodeType; +import org.apache.hadoop.runc.squashfs.test.DirectoryTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.nio.charset.StandardCharsets; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +public class TestDirectoryBuilder { + + DirectoryBuilder db; + + @Before + public void setUp() { + db = new DirectoryBuilder(); + } + + @Test + public void addShouldCreateEntry() { + db.add("test", 1, 2, (short) 3, INodeType.EXTENDED_FILE); + assertEquals("wrong entry count", 1, db.entries.size()); + DirectoryBuilder.Entry entry = db.entries.get(0); + assertEquals("wrong start block", 1, entry.startBlock); + assertEquals("wrong inode number", 2, entry.inodeNumber); + assertEquals("wrong offset", (short) 3, entry.offset); + assertEquals("wrong type", INodeType.BASIC_FILE.value(), entry.type); + assertEquals("wrong name", "test", + new String(entry.name, StandardCharsets.ISO_8859_1)); + assertEquals(24, db.getStructureSize()); + } + + @Test + public void addMultipleShouldCreateOnlyOneDirectoryHeader() { + db.add("test", 1, 2, (short) 3, INodeType.EXTENDED_FILE); + db.add("test2", 1, 4, (short) 3, INodeType.EXTENDED_FILE); + db.build(); + assertEquals("wrong element count", 3, db.elements.size()); + assertSame("wrong class for entry 0", DirectoryHeader.class, + db.elements.get(0).getClass()); + assertSame("wrong class for entry 1", DirectoryEntry.class, + db.elements.get(1).getClass()); + assertSame("wrong class for entry 2", DirectoryEntry.class, + db.elements.get(2).getClass()); + } + + @Test + public void addMultipleShouldCreateMultipleDirectoryHeadersIfStartBlockChanges() { + db.add("test", 1, 2, (short) 3, INodeType.EXTENDED_FILE); + db.add("test2", 4, 5, (short) 3, INodeType.EXTENDED_FILE); + db.build(); + assertEquals("wrong element count", 4, db.elements.size()); + assertSame("wrong class for entry 0", DirectoryHeader.class, + db.elements.get(0).getClass()); + assertSame("wrong class for entry 1", DirectoryEntry.class, + db.elements.get(1).getClass()); + assertSame("wrong class for entry 2", DirectoryHeader.class, + db.elements.get(2).getClass()); + assertSame("wrong class for entry 3", DirectoryEntry.class, + db.elements.get(3).getClass()); + } + + @Test + public void addMultipleShouldCreateMultipleDirectoryHeadersIfInodeNumberGoesBackwards() { + db.add("test", 1, 2, (short) 3, INodeType.EXTENDED_FILE); + db.add("test2", 1, 1, (short) 3, INodeType.EXTENDED_FILE); + db.build(); + assertEquals("wrong element count", 4, db.elements.size()); + assertSame("wrong class for entry 0", DirectoryHeader.class, + db.elements.get(0).getClass()); + assertSame("wrong class for entry 1", DirectoryEntry.class, + db.elements.get(1).getClass()); + assertSame("wrong class for entry 2", DirectoryHeader.class, + db.elements.get(2).getClass()); + assertSame("wrong class for entry 3", DirectoryEntry.class, + db.elements.get(3).getClass()); + } + + @Test + public void addMultipleShouldCreateMultipleDirectoryHeadersIfInodeNumberIsTooLarge() { + db.add("test", 1, 2, (short) 3, INodeType.EXTENDED_FILE); + db.add("test2", 1, 32770, (short) 3, INodeType.EXTENDED_FILE); + db.build(); + assertEquals("wrong element count", 4, db.elements.size()); + assertSame("wrong class for entry 0", DirectoryHeader.class, + db.elements.get(0).getClass()); + assertSame("wrong class for entry 1", DirectoryEntry.class, + db.elements.get(1).getClass()); + assertSame("wrong class for entry 2", DirectoryHeader.class, + db.elements.get(2).getClass()); + assertSame("wrong class for entry 3", DirectoryEntry.class, + db.elements.get(3).getClass()); + } + + @Test + public void addMultipleShouldCreateOnlyOneDirectoryHeaderIf256Entries() { + for (int i = 1; i <= 256; i++) { + db.add("test" + i, 1, i, (short) 3, INodeType.EXTENDED_FILE); + } + db.build(); + assertEquals("wrong element count", 257, db.elements.size()); + assertSame("wrong class for entry 0", DirectoryHeader.class, + db.elements.get(0).getClass()); + for (int i = 1; i <= 256; i++) { + assertSame("wrong class for entry " + i, DirectoryEntry.class, + db.elements.get(i).getClass()); + } + } + + @Test + public void addMultipleShouldCreateMultipleDirectoryHeadersIf257Entries() { + for (int i = 1; i <= 257; i++) { + db.add("test" + i, 1, i, (short) 3, INodeType.EXTENDED_FILE); + } + db.build(); + assertEquals("wrong element count", 259, db.elements.size()); + assertSame("wrong class for entry 0", DirectoryHeader.class, + db.elements.get(0).getClass()); + for (int i = 1; i <= 256; i++) { + assertSame("wrong class for entry " + i, DirectoryEntry.class, + db.elements.get(i).getClass()); + } + assertSame("wrong class for entry 257", DirectoryHeader.class, + db.elements.get(257).getClass()); + assertSame("wrong class for entry 258", DirectoryEntry.class, + db.elements.get(258).getClass()); + } + + @Test(expected = IllegalArgumentException.class) + public void addWithEmptyFilenameShouldFail() { + db.add("", 1, 2, (short) 3, INodeType.EXTENDED_FILE); + } + + @Test(expected = IllegalArgumentException.class) + public void addWithTooLongFilenameShouldFail() { + StringBuilder buf = new StringBuilder(); + for (int i = 0; i < 257; i++) { + buf.append("x"); + } + db.add(buf.toString(), 1, 2, (short) 3, INodeType.EXTENDED_FILE); + } + + @Test + public void getStructureSizeShouldReturnZeroWithNoEntries() { + assertEquals(0, db.getStructureSize()); + } + + @Test + public void writeShouldSerializeZeroBytes() throws Exception { + byte[] data = DirectoryTestUtils.serializeDirectoryBuilder(db); + assertEquals("wrong length", 0, data.length); + } + + @Test + public void writeShouldSerializeData() throws Exception { + db.add("test", 1, 2, (short) 3, INodeType.EXTENDED_FILE); + byte[] data = DirectoryTestUtils.serializeDirectoryBuilder(db); + + List elements = + DirectoryTestUtils.deserializeDirectory(data); + assertEquals("wrong length", 2, elements.size()); + assertEquals("wrong type for element 0", DirectoryHeader.class, + elements.get(0).getClass()); + assertEquals("wrong type for element 1", DirectoryEntry.class, + elements.get(1).getClass()); + + DirectoryHeader hdr = (DirectoryHeader) elements.get(0); + DirectoryEntry entry = (DirectoryEntry) elements.get(1); + + assertEquals("wrong size", 0, hdr.count); + assertEquals("wrong start block", 1, hdr.startBlock); + assertEquals("wrong inode number", 2, hdr.inodeNumber); + assertSame("wrong header", hdr, entry.header); + assertEquals("wrong offset", (short) 3, entry.offset); + assertEquals("wrong inode number delta", (short) 0, entry.inodeNumberDelta); + assertEquals("wrong type", INodeType.BASIC_FILE.value(), entry.type); + assertEquals("wrong size", (short) 3, entry.size); + assertEquals("wrong name", "test", + new String(entry.getName(), StandardCharsets.ISO_8859_1)); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/directory/TestDirectoryEntry.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/directory/TestDirectoryEntry.java new file mode 100644 index 00000000000..4dfb159a1f7 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/directory/TestDirectoryEntry.java @@ -0,0 +1,178 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.directory; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.test.DirectoryTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; + +public class TestDirectoryEntry { + + DirectoryEntry entry; + + @Before + public void setUp() { + DirectoryHeader hdr = new DirectoryHeader(); + hdr.count = 0; + hdr.startBlock = 1; + hdr.inodeNumber = 2; + + entry = new DirectoryEntry(); + entry.header = hdr; + entry.offset = (short) 3; + entry.inodeNumberDelta = (short) 4; + entry.type = (short) 5; + entry.name = "test".getBytes(StandardCharsets.ISO_8859_1); + entry.size = (short) (entry.name.length - 1); + } + + @Test + public void headerPropertyWorksAsExpected() { + assertNotNull(entry.getHeader()); + DirectoryHeader hdr2 = new DirectoryHeader(); + entry.header = hdr2; + assertSame(hdr2, entry.getHeader()); + } + + @Test + public void offsetPropertyWorksAsExpected() { + assertEquals((short) 3, entry.getOffset()); + entry.offset = (short) 4; + assertEquals((short) 4, entry.getOffset()); + } + + @Test + public void inodeNumberDeltaPropertyWorksAsExpected() { + assertEquals(4, entry.getInodeNumberDelta()); + entry.inodeNumberDelta = 5; + assertEquals(5, entry.getInodeNumberDelta()); + } + + @Test + public void typePropertyWorksAsExpected() { + assertEquals((short) 5, entry.getType()); + entry.type = (short) 6; + assertEquals((short) 6, entry.getType()); + } + + @Test + public void sizePropertyWorksAsExpected() { + assertEquals((short) 3, entry.getSize()); + entry.size = (short) 4; + assertEquals((short) 4, entry.getSize()); + } + + @Test + public void namePropertyWorksAsExpected() { + assertEquals("test", + new String(entry.getName(), StandardCharsets.ISO_8859_1)); + entry.name = "test2".getBytes(StandardCharsets.ISO_8859_1); + assertEquals("test2", + new String(entry.getName(), StandardCharsets.ISO_8859_1)); + } + + @Test + public void nameAsStringPropertyWorksAsExpected() { + assertEquals("test", entry.getNameAsString()); + entry.name = "test2".getBytes(StandardCharsets.ISO_8859_1); + assertEquals("test2", entry.getNameAsString()); + } + + @Test + public void getStructureSizeReturnsCorrectValue() { + assertEquals(12, entry.getStructureSize()); + entry.name = "test2".getBytes(StandardCharsets.ISO_8859_1); + assertEquals(13, entry.getStructureSize()); + } + + @Test + public void readShouldSucceed() throws Exception { + byte[] buf = new byte[12]; + ByteBuffer bb = ByteBuffer.wrap(buf); + bb.putShort((short) 3); // offset + bb.putShort((short) 4); // inode number delta + bb.putShort((short) 5); // type + bb.putShort((short) 3); // size + bb.put("test".getBytes(StandardCharsets.ISO_8859_1)); + + try (ByteArrayInputStream bis = new ByteArrayInputStream(buf)) { + try (DataInputStream dis = new DataInputStream(bis)) { + DirectoryEntry dest = DirectoryEntry.read(entry.header, dis); + assertSame("wrong header", entry.header, dest.header); + assertEquals("wrong offset", (short) 3, dest.getOffset()); + assertEquals("wrong inode number delta", (short) 4, + dest.getInodeNumberDelta()); + assertEquals("wrong offset", (short) 5, dest.getType()); + assertEquals("wrong size", (short) 3, dest.getSize()); + assertEquals("wrong name", "test", + new String(dest.getName(), StandardCharsets.ISO_8859_1)); + } + } + } + + @Test(expected = SquashFsException.class) + public void readShouldFailIfSizeIsTooLarge() throws Exception { + byte[] buf = new byte[261]; + ByteBuffer bb = ByteBuffer.wrap(buf); + bb.putShort((short) 3); // offset + bb.putShort((short) 4); // inode number delta + bb.putShort((short) 5); // type + bb.putShort((short) 256); // size + bb.put("test".getBytes(StandardCharsets.ISO_8859_1)); + + try (ByteArrayInputStream bis = new ByteArrayInputStream(buf)) { + try (DataInputStream dis = new DataInputStream(bis)) { + DirectoryEntry.read(entry.header, dis); + } + } + } + + @Test + public void writeDataAndReadDataShouldBeReflexive() throws IOException { + byte[] data = DirectoryTestUtils.serializeDirectoryElement(entry); + DirectoryEntry dest = + DirectoryTestUtils.deserializeDirectoryEntry(entry.header, data); + + assertSame("wrong header", entry.header, dest.header); + assertEquals("wrong offset", (short) 3, dest.getOffset()); + assertEquals("wrong inode number delta", (short) 4, + dest.getInodeNumberDelta()); + assertEquals("wrong offset", (short) 5, dest.getType()); + assertEquals("wrong size", (short) 3, dest.getSize()); + assertEquals("wrong name", "test", + new String(dest.getName(), StandardCharsets.ISO_8859_1)); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(entry.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/directory/TestDirectoryHeader.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/directory/TestDirectoryHeader.java new file mode 100644 index 00000000000..d27612f035f --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/directory/TestDirectoryHeader.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.directory; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.test.DirectoryTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.nio.ByteBuffer; + +import static org.junit.Assert.assertEquals; + +public class TestDirectoryHeader { + + DirectoryHeader hdr; + + @Before + public void setUp() { + hdr = new DirectoryHeader(); + hdr.count = 0; + hdr.startBlock = 1; + hdr.inodeNumber = 2; + } + + @Test + public void countPropertyWorksAsExpected() { + assertEquals(0, hdr.getCount()); + hdr.count = 1; + assertEquals(1, hdr.getCount()); + } + + @Test + public void startBlockPropertyWorksAsExpected() { + assertEquals(1, hdr.getStartBlock()); + hdr.startBlock = 2; + assertEquals(2, hdr.getStartBlock()); + } + + @Test + public void inodeNumberPropertyWorksAsExpected() { + assertEquals(2, hdr.getInodeNumber()); + hdr.inodeNumber = 3; + assertEquals(3, hdr.getInodeNumber()); + } + + @Test + public void getStructureSizeReturnsCorrectValue() { + assertEquals(12, hdr.getStructureSize()); + } + + @Test + public void readShouldSucceed() throws Exception { + byte[] buf = new byte[12]; + ByteBuffer bb = ByteBuffer.wrap(buf); + bb.putInt(0); + bb.putInt(1); + bb.putInt(2); + try (ByteArrayInputStream bis = new ByteArrayInputStream(buf)) { + try (DataInputStream dis = new DataInputStream(bis)) { + DirectoryHeader dest = DirectoryHeader.read(dis); + assertEquals("wrong count", 0, dest.getCount()); + assertEquals("wrong start block", 1, dest.getStartBlock()); + assertEquals("wrong inode number", 2, dest.getInodeNumber()); + } + } + } + + @Test(expected = SquashFsException.class) + public void readShouldFailIfCountIsTooLarge() throws Exception { + byte[] buf = new byte[12]; + ByteBuffer bb = ByteBuffer.wrap(buf); + bb.putInt(256); + bb.putInt(1); + bb.putInt(2); + try (ByteArrayInputStream bis = new ByteArrayInputStream(buf)) { + try (DataInputStream dis = new DataInputStream(bis)) { + DirectoryHeader.read(dis); + } + } + } + + @Test + public void writeDataAndReadDataShouldBeReflexive() throws IOException { + byte[] data = DirectoryTestUtils.serializeDirectoryElement(hdr); + DirectoryHeader dest = DirectoryTestUtils.deserializeDirectoryHeader(data); + + assertEquals("wrong count", 0, dest.getCount()); + assertEquals("wrong start block", 1, dest.getStartBlock()); + assertEquals("wrong inode number", 2, dest.getInodeNumber()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(hdr.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractBasicDeviceINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractBasicDeviceINode.java new file mode 100644 index 00000000000..83d29c5db4f --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractBasicDeviceINode.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.test.INodeTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; + +public class TestAbstractBasicDeviceINode { + + AbstractBasicDeviceINode inode; + + @Before + public void setUp() { + inode = new BasicBlockDeviceINode(); + inode.setDevice(1); + inode.setNlink(2); + } + + @Test + public void devicePropertyShouldWorkAsExpected() { + assertEquals(1, inode.getDevice()); + inode.setDevice(2); + assertEquals(2, inode.getDevice()); + } + + @Test + public void nlinkPropertyShouldWorkAsExpected() { + assertEquals(2, inode.getNlink()); + inode.setNlink(3); + assertEquals(3, inode.getNlink()); + } + + @Test + public void getXattrIndexShouldReturnNotPresent() { + assertEquals(-1, inode.getXattrIndex()); + } + + @Test + public void setXattrIndexWithNotPresentValueShouldSucceed() { + inode.setXattrIndex(-1); + } + + @Test(expected = IllegalArgumentException.class) + public void setXattrIndexWithInvalidValueShouldFail() { + inode.setXattrIndex(1); + } + + @Test + public void isXattrPresentShouldReturnFalse() { + assertFalse(inode.isXattrPresent()); + } + + @Test + public void getChildSerializedSizeShouldReturnCorrectValue() { + assertEquals(8, inode.getChildSerializedSize()); + } + + @Test + public void writeDataAndReadDataShouldBeReflexive() throws IOException { + byte[] data = INodeTestUtils.serializeINode(inode); + INode dest = INodeTestUtils.deserializeINode(data); + + assertSame("Wrong class", inode.getClass(), dest.getClass()); + AbstractBasicDeviceINode bDest = (AbstractBasicDeviceINode) dest; + + assertEquals("Wrong device", 1, bDest.getDevice()); + assertEquals("Wrong nlink count", 2, bDest.getNlink()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractBasicIpcINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractBasicIpcINode.java new file mode 100644 index 00000000000..43a3c23c4c2 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractBasicIpcINode.java @@ -0,0 +1,88 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.test.INodeTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; + +public class TestAbstractBasicIpcINode { + + AbstractBasicIpcINode inode; + + @Before + public void setUp() { + inode = new BasicSocketINode(); + inode.setNlink(2); + } + + @Test + public void nlinkPropertyShouldWorkAsExpected() { + assertEquals(2, inode.getNlink()); + inode.setNlink(3); + assertEquals(3, inode.getNlink()); + } + + @Test + public void getXattrIndexShouldReturnNotPresent() { + assertEquals(-1, inode.getXattrIndex()); + } + + @Test + public void setXattrIndexWithNotPresentValueShouldSucceed() { + inode.setXattrIndex(-1); + } + + @Test(expected = IllegalArgumentException.class) + public void setXattrIndexWithInvalidValueShouldFail() { + inode.setXattrIndex(1); + } + + @Test + public void isXattrPresentShouldReturnFalse() { + assertFalse(inode.isXattrPresent()); + } + + @Test + public void getChildSerializedSizeShouldReturnCorrectValue() { + assertEquals(4, inode.getChildSerializedSize()); + } + + @Test + public void writeDataAndReadDataShouldBeReflexive() throws IOException { + byte[] data = INodeTestUtils.serializeINode(inode); + INode dest = INodeTestUtils.deserializeINode(data); + + assertSame("Wrong class", inode.getClass(), dest.getClass()); + AbstractBasicIpcINode bDest = (AbstractBasicIpcINode) dest; + + assertEquals("Wrong nlink count", 2, bDest.getNlink()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractExtendedDeviceINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractExtendedDeviceINode.java new file mode 100644 index 00000000000..9fc054ae57c --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractExtendedDeviceINode.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.test.INodeTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +public class TestAbstractExtendedDeviceINode { + + AbstractExtendedDeviceINode inode; + + @Before + public void setUp() { + inode = new ExtendedBlockDeviceINode(); + inode.setDevice(1); + inode.setNlink(2); + inode.setXattrIndex(3); + } + + @Test + public void devicePropertyShouldWorkAsExpected() { + assertEquals(1, inode.getDevice()); + inode.setDevice(2); + assertEquals(2, inode.getDevice()); + } + + @Test + public void nlinkPropertyShouldWorkAsExpected() { + assertEquals(2, inode.getNlink()); + inode.setNlink(3); + assertEquals(3, inode.getNlink()); + } + + @Test + public void xattrIndexPropertyShouldWorkAsExpected() { + assertEquals(3, inode.getXattrIndex()); + inode.setXattrIndex(4); + assertEquals(4, inode.getXattrIndex()); + } + + @Test + public void isXattrPresentShouldReturnTrueIfPresent() { + assertTrue(inode.isXattrPresent()); + inode.setXattrIndex(-1); + assertFalse(inode.isXattrPresent()); + } + + @Test + public void getChildSerializedSizeShouldReturnCorrectValue() { + assertEquals(12, inode.getChildSerializedSize()); + } + + @Test + public void writeDataAndReadDataShouldBeReflexive() throws IOException { + byte[] data = INodeTestUtils.serializeINode(inode); + INode dest = INodeTestUtils.deserializeINode(data); + + assertSame("Wrong class", inode.getClass(), dest.getClass()); + AbstractExtendedDeviceINode bDest = (AbstractExtendedDeviceINode) dest; + + assertEquals("Wrong device", 1, bDest.getDevice()); + assertEquals("Wrong nlink count", 2, bDest.getNlink()); + assertEquals("Wrong xattr index", 3, bDest.getXattrIndex()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractExtendedIpcINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractExtendedIpcINode.java new file mode 100644 index 00000000000..de90f4bfc0c --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractExtendedIpcINode.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.test.INodeTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +public class TestAbstractExtendedIpcINode { + + AbstractExtendedIpcINode inode; + + @Before + public void setUp() { + inode = new ExtendedSocketINode(); + inode.setNlink(2); + inode.setXattrIndex(3); + } + + @Test + public void nlinkPropertyShouldWorkAsExpected() { + assertEquals(2, inode.getNlink()); + inode.setNlink(3); + assertEquals(3, inode.getNlink()); + } + + @Test + public void xattrIndexPropertyShouldWorkAsExpected() { + assertEquals(3, inode.getXattrIndex()); + inode.setXattrIndex(4); + assertEquals(4, inode.getXattrIndex()); + } + + @Test + public void isXattrPresentShouldReturnTrueIfPresent() { + assertTrue(inode.isXattrPresent()); + inode.setXattrIndex(-1); + assertFalse(inode.isXattrPresent()); + } + + @Test + public void getChildSerializedSizeShouldReturnCorrectValue() { + assertEquals(8, inode.getChildSerializedSize()); + } + + @Test + public void writeDataAndReadDataShouldBeReflexive() throws IOException { + byte[] data = INodeTestUtils.serializeINode(inode); + INode dest = INodeTestUtils.deserializeINode(data); + + assertSame("Wrong class", inode.getClass(), dest.getClass()); + AbstractExtendedIpcINode bDest = (AbstractExtendedIpcINode) dest; + + assertEquals("Wrong nlink count", 2, bDest.getNlink()); + assertEquals("Wrong xattr index", 3, bDest.getXattrIndex()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractINode.java new file mode 100644 index 00000000000..c2cd289caa0 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestAbstractINode.java @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.test.INodeTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +public class TestAbstractINode { + + AbstractINode inode; + int modifiedTime; + + @Before + public void setUp() { + inode = new BasicFifoINode(); // pick a simple one + inode.setUidIdx((short) 1); + inode.setGidIdx((short) 2); + inode.setInodeNumber(3); + inode.setPermissions((short) 0755); + modifiedTime = (int) (System.currentTimeMillis() / 1000L); + inode.setModifiedTime(modifiedTime); + } + + @Test + public void copyToShouldDuplicateCommonINodeProperties() { + INode dest = new BasicSymlinkINode(); + inode.copyTo(dest); + assertEquals("Wrong uidIdx", (short) 1, dest.getUidIdx()); + assertEquals("Wrong gidIdx", (short) 2, dest.getGidIdx()); + assertEquals("Wrong inodeNumber", 3, dest.getInodeNumber()); + assertEquals("Wrong permissions", (short) 0755, dest.getPermissions()); + assertEquals("Wrong modifiedTime", modifiedTime, dest.getModifiedTime()); + } + + @Test + public void getSerializedSizeShouldReturnSixteenPlusWhateverChildNeeds() { + assertEquals(16 + inode.getChildSerializedSize(), + inode.getSerializedSize()); + } + + @Test + public void uidIdxPropertyShouldWorkAsExpected() { + assertEquals((short) 1, inode.getUidIdx()); + inode.setUidIdx((short) 2); + assertEquals((short) 2, inode.getUidIdx()); + } + + @Test + public void gidIdxPropertyShouldWorkAsExpected() { + assertEquals((short) 2, inode.getGidIdx()); + inode.setGidIdx((short) 3); + assertEquals((short) 3, inode.getGidIdx()); + } + + @Test + public void inodeNumberPropertyShouldWorkAsExpected() { + assertEquals(3, inode.getInodeNumber()); + inode.setInodeNumber(4); + assertEquals(4, inode.getInodeNumber()); + } + + @Test + public void permissionsPropertyShouldWorkAsExpected() { + assertEquals((short) 0755, inode.getPermissions()); + inode.setPermissions((short) 0644); + assertEquals((short) 0644, inode.getPermissions()); + } + + @Test + public void modifiedTimePropertyShouldWorkAsExpected() { + assertEquals(modifiedTime, inode.getModifiedTime()); + inode.setModifiedTime(modifiedTime + 1); + assertEquals(modifiedTime + 1, inode.getModifiedTime()); + } + + @Test + public void writeDataAndReadDataShouldBeReflexive() throws IOException { + byte[] data = INodeTestUtils.serializeINode(inode); + INode dest = INodeTestUtils.deserializeINode(data); + + assertSame("Wrong class", inode.getClass(), dest.getClass()); + assertEquals("Wrong uidIdx", (short) 1, dest.getUidIdx()); + assertEquals("Wrong gidIdx", (short) 2, dest.getGidIdx()); + assertEquals("Wrong inodeNumber", 3, dest.getInodeNumber()); + assertEquals("Wrong permissions", (short) 0755, dest.getPermissions()); + assertEquals("Wrong modifiedTime", modifiedTime, dest.getModifiedTime()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicBlockDeviceINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicBlockDeviceINode.java new file mode 100644 index 00000000000..9298e237900 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicBlockDeviceINode.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +public class TestBasicBlockDeviceINode { + + BasicBlockDeviceINode inode; + + @Before + public void setUp() { + inode = new BasicBlockDeviceINode(); + inode.setDevice(1); + inode.setNlink(2); + } + + @Test + public void getNameShouldReturnCorrectValue() { + assertEquals("basic-block-dev-inode", inode.getName()); + } + + @Test + public void getInodeTypeShouldReturnCorrectValue() { + assertSame(INodeType.BASIC_BLOCK_DEVICE, inode.getInodeType()); + } + + @Test + public void simplifyShouldReturnSelf() { + assertSame(inode, inode.simplify()); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfBasic() { + assertSame(inode, BasicBlockDeviceINode.simplify(inode)); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfExtendedAttributesPresent() { + BlockDeviceINode inode2 = new ExtendedBlockDeviceINode(); + inode2.setDevice(1); + inode2.setNlink(2); + inode2.setXattrIndex(3); + assertSame(inode2, BasicBlockDeviceINode.simplify(inode2)); + } + + @Test + public void staticSimplifyMethodShouldReturnBasicIfExtendedAttributesNotPresent() { + BlockDeviceINode inode2 = new ExtendedBlockDeviceINode(); + inode2.setDevice(1); + inode2.setNlink(2); + inode2.setXattrIndex(-1); + + BlockDeviceINode result = BasicBlockDeviceINode.simplify(inode2); + assertSame("wrong class", BasicBlockDeviceINode.class, result.getClass()); + assertSame("wrong device", 1, result.getDevice()); + assertSame("wrong nlink count", 2, result.getNlink()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicCharDeviceINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicCharDeviceINode.java new file mode 100644 index 00000000000..e4e89f69f6b --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicCharDeviceINode.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +public class TestBasicCharDeviceINode { + + BasicCharDeviceINode inode; + + @Before + public void setUp() { + inode = new BasicCharDeviceINode(); + inode.setDevice(1); + inode.setNlink(2); + } + + @Test + public void getNameShouldReturnCorrectValue() { + assertEquals("basic-char-dev-inode", inode.getName()); + } + + @Test + public void getInodeTypeShouldReturnCorrectValue() { + assertSame(INodeType.BASIC_CHAR_DEVICE, inode.getInodeType()); + } + + @Test + public void simplifyShouldReturnSelf() { + assertSame(inode, inode.simplify()); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfBasic() { + assertSame(inode, BasicCharDeviceINode.simplify(inode)); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfExtendedAttributesPresent() { + CharDeviceINode inode2 = new ExtendedCharDeviceINode(); + inode2.setDevice(1); + inode2.setNlink(2); + inode2.setXattrIndex(3); + assertSame(inode2, BasicCharDeviceINode.simplify(inode2)); + } + + @Test + public void staticSimplifyMethodShouldReturnBasicIfExtendedAttributesNotPresent() { + CharDeviceINode inode2 = new ExtendedCharDeviceINode(); + inode2.setDevice(1); + inode2.setNlink(2); + inode2.setXattrIndex(-1); + + CharDeviceINode result = BasicCharDeviceINode.simplify(inode2); + assertSame("wrong class", BasicCharDeviceINode.class, result.getClass()); + assertSame("wrong device", 1, result.getDevice()); + assertSame("wrong nlink count", 2, result.getNlink()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicDirectoryINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicDirectoryINode.java new file mode 100644 index 00000000000..5a24d2dc9d5 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicDirectoryINode.java @@ -0,0 +1,232 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.test.INodeTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; + +public class TestBasicDirectoryINode { + + BasicDirectoryINode inode; + + @Before + public void setUp() { + inode = new BasicDirectoryINode(); + inode.setStartBlock(1); + inode.setNlink(2); + inode.setFileSize(3); + inode.setOffset((short) 4); + inode.setParentInodeNumber(5); + } + + @Test + public void getNameShouldReturnCorrectValue() { + assertEquals("basic-directory-inode", inode.getName()); + } + + @Test + public void getInodeTypeShouldReturnCorrectValue() { + assertSame(INodeType.BASIC_DIRECTORY, inode.getInodeType()); + } + + @Test + public void startBlockPropertyShouldWorkAsExpected() { + assertEquals(1, inode.getStartBlock()); + inode.setStartBlock(2); + assertEquals(2, inode.getStartBlock()); + } + + @Test + public void nlinkPropertyShouldWorkAsExpected() { + assertEquals(2, inode.getNlink()); + inode.setNlink(3); + assertEquals(3, inode.getNlink()); + } + + @Test + public void fileSizePropertyShouldWorkAsExpected() { + assertEquals(3, inode.getFileSize()); + inode.setFileSize(4); + assertEquals(4, inode.getFileSize()); + } + + @Test(expected = IllegalArgumentException.class) + public void fileSizePropertyShouldNotAllowMoreThanSixyFourKilobytes() { + inode.setFileSize(65536); + } + + @Test + public void offsetPropertyShouldWorkAsExpected() { + assertEquals((short) 4, inode.getOffset()); + inode.setOffset((short) 5); + assertEquals((short) 5, inode.getOffset()); + } + + @Test + public void parentInodeNumberPropertyShouldWorkAsExpected() { + assertEquals(5, inode.getParentInodeNumber()); + inode.setParentInodeNumber(6); + assertEquals(6, inode.getParentInodeNumber()); + } + + @Test + public void getIndexCountShouldReturnZero() { + assertEquals((short) 0, inode.getIndexCount()); + } + + @Test + public void setSetIndexCountWithValidValueShouldSucceed() { + inode.setIndexCount((short) 0); + } + + @Test(expected = IllegalArgumentException.class) + public void setSetIndexCountWithInvalidValueShouldFail() { + inode.setIndexCount((short) 1); + } + + @Test + public void isIndexPresentShouldReturnFalse() { + assertFalse(inode.isIndexPresent()); + } + + @Test + public void getXattrIndexShouldReturnNotPresent() { + assertEquals(-1, inode.getXattrIndex()); + } + + @Test + public void setXattrIndexWithNotPresentValueShouldSucceed() { + inode.setXattrIndex(-1); + } + + @Test(expected = IllegalArgumentException.class) + public void setXattrIndexWithInvalidValueShouldFail() { + inode.setXattrIndex(1); + } + + @Test + public void isXattrPresentShouldReturnFalse() { + assertFalse(inode.isXattrPresent()); + } + + @Test + public void getChildSerializedSizeShouldReturnCorrectValue() { + assertEquals(16, inode.getChildSerializedSize()); + } + + @Test + public void simplifyShouldReturnSelf() { + assertSame(inode, inode.simplify()); + } + + @Test + public void writeDataAndReadDataShouldBeReflexive() throws IOException { + byte[] data = INodeTestUtils.serializeINode(inode); + INode dest = INodeTestUtils.deserializeINode(data); + + assertSame("Wrong class", inode.getClass(), dest.getClass()); + BasicDirectoryINode bDest = (BasicDirectoryINode) dest; + + assertEquals("wrong start block", 1, bDest.getStartBlock()); + assertEquals("wrong nlink count", 2, bDest.getNlink()); + assertEquals("wrong file size", 3, bDest.getFileSize()); + assertEquals("wrong offset", (short) 4, bDest.getOffset()); + assertEquals("wrong parent inode number", 5, bDest.getParentInodeNumber()); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfBasic() { + assertSame(inode, BasicDirectoryINode.simplify(inode)); + } + + @Test + public void staticSimplifyMethodShouldReturnBasicIfExtendedAttributesNotNeeded() { + DirectoryINode inode2 = new ExtendedDirectoryINode(); + inode2.setStartBlock(1); + inode2.setNlink(2); + inode2.setFileSize(3); + inode2.setOffset((short) 4); + inode2.setParentInodeNumber(5); + inode2.setIndexCount((short) 0); + inode2.setXattrIndex(-1); + + DirectoryINode result = BasicDirectoryINode.simplify(inode2); + assertSame("wrong class", BasicDirectoryINode.class, result.getClass()); + + assertEquals("wrong start block", inode2.getStartBlock(), + result.getStartBlock()); + assertEquals("wrong nlink count", inode2.getNlink(), result.getNlink()); + assertEquals("wrong file size", inode2.getFileSize(), result.getFileSize()); + assertEquals("wrong offset", inode2.getOffset(), result.getOffset()); + assertEquals("wrong parent inode number", inode2.getParentInodeNumber(), + result.getParentInodeNumber()); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfFileSizeTooLarge() { + DirectoryINode inode2 = new ExtendedDirectoryINode(); + inode2.setStartBlock(1); + inode2.setNlink(2); + inode2.setFileSize(65536); + inode2.setOffset((short) 4); + inode2.setParentInodeNumber(5); + inode2.setIndexCount((short) 0); + inode2.setXattrIndex(-1); + assertSame(inode2, BasicDirectoryINode.simplify(inode2)); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfIndexPresent() { + DirectoryINode inode2 = new ExtendedDirectoryINode(); + inode2.setStartBlock(1); + inode2.setNlink(2); + inode2.setFileSize(3); + inode2.setOffset((short) 4); + inode2.setParentInodeNumber(5); + inode2.setIndexCount((short) 1); + inode2.setXattrIndex(-1); + assertSame(inode2, BasicDirectoryINode.simplify(inode2)); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfExtendedAttributesPresent() { + DirectoryINode inode2 = new ExtendedDirectoryINode(); + inode2.setStartBlock(1); + inode2.setNlink(2); + inode2.setFileSize(3); + inode2.setOffset((short) 4); + inode2.setParentInodeNumber(5); + inode2.setIndexCount((short) 0); + inode2.setXattrIndex(1); + assertSame(inode2, BasicDirectoryINode.simplify(inode2)); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicFifoINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicFifoINode.java new file mode 100644 index 00000000000..3ee7f37a6d0 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicFifoINode.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +public class TestBasicFifoINode { + + BasicFifoINode inode; + + @Before + public void setUp() { + inode = new BasicFifoINode(); + inode.setNlink(2); + } + + @Test + public void getNameShouldReturnCorrectValue() { + assertEquals("basic-fifo-inode", inode.getName()); + } + + @Test + public void getInodeTypeShouldReturnCorrectValue() { + assertSame(INodeType.BASIC_FIFO, inode.getInodeType()); + } + + @Test + public void simplifyShouldReturnSelf() { + assertSame(inode, inode.simplify()); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfBasic() { + assertSame(inode, BasicFifoINode.simplify(inode)); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfExtendedAttributesPresent() { + FifoINode inode2 = new ExtendedFifoINode(); + inode2.setNlink(2); + inode2.setXattrIndex(3); + assertSame(inode2, BasicFifoINode.simplify(inode2)); + } + + @Test + public void staticSimplifyMethodShouldReturnBasicIfExtendedAttributesNotPresent() { + FifoINode inode2 = new ExtendedFifoINode(); + inode2.setNlink(2); + inode2.setXattrIndex(-1); + + FifoINode result = BasicFifoINode.simplify(inode2); + assertSame("wrong class", BasicFifoINode.class, result.getClass()); + assertSame("wrong nlink count", 2, result.getNlink()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicFileINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicFileINode.java new file mode 100644 index 00000000000..45eb6f6b43f --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicFileINode.java @@ -0,0 +1,327 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.test.INodeTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +public class TestBasicFileINode { + + BasicFileINode inode; + + @Before + public void setUp() { + inode = new BasicFileINode(); + inode.setBlocksStart(1L); + inode.setFragmentBlockIndex(2); + inode.setFragmentOffset(3); + inode.setFileSize(131073L); + inode.setBlockSizes(new int[] { 5 }); + } + + @Test + public void getNameShouldReturnCorrectValue() { + assertEquals("basic-file-inode", inode.getName()); + } + + @Test + public void getInodeTypeShouldReturnCorrectValue() { + assertSame(INodeType.BASIC_FILE, inode.getInodeType()); + } + + @Test + public void blocksStartPropertyShouldWorkAsExpected() { + assertEquals(1L, inode.getBlocksStart()); + inode.setBlocksStart(2L); + assertEquals(2L, inode.getBlocksStart()); + } + + @Test(expected = IllegalArgumentException.class) + public void blocksStartPropertyShouldNotAllowMoreThanFourGigabytes() { + inode.setBlocksStart(0x1_0000_0000L); + } + + @Test + public void fragmentBlockIndexPropertyShouldWorkAsExpected() { + assertEquals(2, inode.getFragmentBlockIndex()); + inode.setFragmentBlockIndex(3); + assertEquals(3, inode.getFragmentBlockIndex()); + } + + @Test + public void isFragmentPresentShouldReturnTrueIfFragmentBlockIndexSet() { + assertTrue(inode.isFragmentPresent()); + inode.setFragmentBlockIndex(-1); + assertFalse(inode.isFragmentPresent()); + } + + @Test + public void getSparseShouldReturnZero() { + assertEquals(0L, inode.getSparse()); + } + + @Test + public void setSparseShouldAllowSettingZero() { + inode.setSparse(0L); + } + + @Test(expected = IllegalArgumentException.class) + public void setSparseShouldNotAllowSettingMoreThanZero() { + inode.setSparse(1L); + } + + @Test + public void isSparseBlockPresentShouldReturnFalse() { + assertFalse(inode.isSparseBlockPresent()); + } + + @Test + public void getNlinkShouldReturnOne() { + assertEquals(1, inode.getNlink()); + } + + @Test + public void setNlinkShouldAllowSettingOne() { + inode.setNlink(1); + } + + @Test(expected = IllegalArgumentException.class) + public void setNlinkShouldNotAllowSettingMoreThanOne() { + inode.setNlink(2); + } + + @Test + public void fragmentOffsetPropertyShouldWorkAsExpected() { + assertEquals(3, inode.getFragmentOffset()); + inode.setFragmentOffset(4); + assertEquals(4, inode.getFragmentOffset()); + } + + @Test + public void fileSizePropertyShouldWorkAsExpected() { + assertEquals(131073L, inode.getFileSize()); + inode.setFileSize(131074L); + assertEquals(131074L, inode.getFileSize()); + } + + @Test(expected = IllegalArgumentException.class) + public void fileSizePropertyShouldNotAllowMoreThanFourGigabytes() { + inode.setFileSize(0x1_0000_0000L); + } + + @Test + public void getXattrIndexShouldReturnNotPresent() { + assertEquals(-1, inode.getXattrIndex()); + } + + @Test + public void setXattrIndexWithNotPresentValueShouldSucceed() { + inode.setXattrIndex(-1); + } + + @Test(expected = IllegalArgumentException.class) + public void setXattrIndexWithInvalidValueShouldFail() { + inode.setXattrIndex(1); + } + + @Test + public void isXattrPresentShouldReturnFalse() { + assertFalse(inode.isXattrPresent()); + } + + @Test + public void getChildSerializedSizeShouldReturnCorrectValue() { + assertEquals(20, inode.getChildSerializedSize()); + inode.setBlockSizes(new int[] { 1, 2, 3 }); + assertEquals(28, inode.getChildSerializedSize()); + } + + @Test + public void simplifyShouldReturnSelf() { + assertSame(inode, inode.simplify()); + } + + @Test + public void writeDataAndReadDataWithFragmentsShouldBeReflexive() + throws IOException { + byte[] data = INodeTestUtils.serializeINode(inode); + INode dest = INodeTestUtils.deserializeINode(data); + + assertSame("Wrong class", inode.getClass(), dest.getClass()); + BasicFileINode bDest = (BasicFileINode) dest; + + assertEquals("wrong blocks start", 1L, bDest.getBlocksStart()); + assertEquals("wrong fragment block index", 2, + bDest.getFragmentBlockIndex()); + assertEquals("wrong fragment offset", 3, bDest.getFragmentOffset()); + assertEquals("wrong file size", 131073L, bDest.getFileSize()); + assertArrayEquals("wrong block sizes", new int[] { 5 }, + bDest.getBlockSizes()); + } + + @Test + public void writeDataAndReadDataWithoutFragmentsShouldBeReflexive() + throws IOException { + inode.setFragmentOffset(0); + inode.setFragmentBlockIndex(-1); + inode.setFileSize(131072L); + + byte[] data = INodeTestUtils.serializeINode(inode); + INode dest = INodeTestUtils.deserializeINode(data); + + assertSame("Wrong class", inode.getClass(), dest.getClass()); + BasicFileINode bDest = (BasicFileINode) dest; + + assertEquals("wrong blocks start", 1L, bDest.getBlocksStart()); + assertEquals("wrong fragment block index", -1, + bDest.getFragmentBlockIndex()); + assertEquals("wrong fragment offset", 0, bDest.getFragmentOffset()); + assertEquals("wrong file size", 131072L, bDest.getFileSize()); + assertArrayEquals("wrong block sizes", new int[] { 5 }, + bDest.getBlockSizes()); + } + + @Test + public void writeDataAndReadDataWithShortEndBlockShouldBeReflexive() + throws IOException { + inode.setFragmentOffset(0); + inode.setFragmentBlockIndex(-1); + inode.setFileSize(131071L); + + byte[] data = INodeTestUtils.serializeINode(inode); + INode dest = INodeTestUtils.deserializeINode(data); + + assertSame("Wrong class", inode.getClass(), dest.getClass()); + BasicFileINode bDest = (BasicFileINode) dest; + + assertEquals("wrong blocks start", 1L, bDest.getBlocksStart()); + assertEquals("wrong fragment block index", -1, + bDest.getFragmentBlockIndex()); + assertEquals("wrong fragment offset", 0, bDest.getFragmentOffset()); + assertEquals("wrong file size", 131071L, bDest.getFileSize()); + assertArrayEquals("wrong block sizes", new int[] { 5 }, + bDest.getBlockSizes()); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfBasic() { + assertSame(inode, BasicFileINode.simplify(inode)); + } + + @Test + public void staticSimplifyMethodShouldReturnBasicIfExtendedAttributesNotNeeded() { + FileINode inode2 = new ExtendedFileINode(); + inode2.setBlocksStart(1L); + inode2.setNlink(1); + inode2.setFragmentBlockIndex(2); + inode2.setFragmentOffset(3); + inode2.setFileSize(131073L); + inode2.setBlockSizes(new int[] { 5 }); + + FileINode result = BasicFileINode.simplify(inode2); + assertSame("wrong class", BasicFileINode.class, result.getClass()); + + assertEquals("wrong block start", inode2.getBlocksStart(), + result.getBlocksStart()); + assertEquals("wrong fragment block index", inode2.getFragmentBlockIndex(), + result.getFragmentBlockIndex()); + assertEquals("wrong fragment offset", inode2.getFragmentOffset(), + result.getFragmentOffset()); + assertEquals("wrong file size", inode2.getFileSize(), result.getFileSize()); + assertArrayEquals("wrong block sizes", inode2.getBlockSizes(), + result.getBlockSizes()); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfLinkCountGreaterThanOne() { + FileINode inode2 = new ExtendedFileINode(); + inode2.setBlocksStart(1L); + inode2.setNlink(2); + inode2.setFragmentBlockIndex(2); + inode2.setFragmentOffset(3); + inode2.setFileSize(131073L); + inode2.setBlockSizes(new int[] { 5 }); + assertSame(inode2, BasicFileINode.simplify(inode2)); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfExtendedAttributes() { + FileINode inode2 = new ExtendedFileINode(); + inode2.setBlocksStart(1L); + inode2.setNlink(1); + inode2.setXattrIndex(1); + inode2.setFragmentBlockIndex(2); + inode2.setFragmentOffset(3); + inode2.setFileSize(131073L); + inode2.setBlockSizes(new int[] { 5 }); + assertSame(inode2, BasicFileINode.simplify(inode2)); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfSparse() { + FileINode inode2 = new ExtendedFileINode(); + inode2.setBlocksStart(1L); + inode2.setNlink(1); + inode2.setSparse(3L); + inode2.setFragmentBlockIndex(2); + inode2.setFragmentOffset(3); + inode2.setFileSize(131073L); + inode2.setBlockSizes(new int[] { 5 }); + assertSame(inode2, BasicFileINode.simplify(inode2)); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfBlocksStartTooLarge() { + FileINode inode2 = new ExtendedFileINode(); + inode2.setBlocksStart(0x1_0000_0000L); + inode2.setNlink(1); + inode2.setFragmentBlockIndex(2); + inode2.setFragmentOffset(3); + inode2.setFileSize(131073L); + inode2.setBlockSizes(new int[] { 5 }); + assertSame(inode2, BasicFileINode.simplify(inode2)); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfFileSizeTooLarge() { + FileINode inode2 = new ExtendedFileINode(); + inode2.setBlocksStart(1L); + inode2.setNlink(1); + inode2.setFragmentBlockIndex(2); + inode2.setFragmentOffset(3); + inode2.setFileSize(0x1_0000_0000L); + inode2.setBlockSizes(new int[] { 5 }); + assertSame(inode2, BasicFileINode.simplify(inode2)); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicSocketINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicSocketINode.java new file mode 100644 index 00000000000..a92baea24f6 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicSocketINode.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +public class TestBasicSocketINode { + + BasicSocketINode inode; + + @Before + public void setUp() { + inode = new BasicSocketINode(); + inode.setNlink(2); + } + + @Test + public void getNameShouldReturnCorrectValue() { + assertEquals("basic-socket-inode", inode.getName()); + } + + @Test + public void getInodeTypeShouldReturnCorrectValue() { + assertSame(INodeType.BASIC_SOCKET, inode.getInodeType()); + } + + @Test + public void simplifyShouldReturnSelf() { + assertSame(inode, inode.simplify()); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfBasic() { + assertSame(inode, BasicSocketINode.simplify(inode)); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfExtendedAttributesPresent() { + SocketINode inode2 = new ExtendedSocketINode(); + inode2.setNlink(2); + inode2.setXattrIndex(3); + assertSame(inode2, BasicSocketINode.simplify(inode2)); + } + + @Test + public void staticSimplifyMethodShouldReturnBasicIfExtendedAttributesNotPresent() { + SocketINode inode2 = new ExtendedSocketINode(); + inode2.setNlink(2); + inode2.setXattrIndex(-1); + + SocketINode result = BasicSocketINode.simplify(inode2); + assertSame("wrong class", BasicSocketINode.class, result.getClass()); + assertSame("wrong nlink count", 2, result.getNlink()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicSymlinkINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicSymlinkINode.java new file mode 100644 index 00000000000..7ae3902f063 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestBasicSymlinkINode.java @@ -0,0 +1,151 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.test.INodeTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; + +public class TestBasicSymlinkINode { + + BasicSymlinkINode inode; + + @Before + public void setUp() { + inode = new BasicSymlinkINode(); + inode.setNlink(2); + inode.setTargetPath("/test".getBytes(StandardCharsets.ISO_8859_1)); + } + + @Test + public void getNameShouldReturnCorrectValue() { + assertEquals("basic-symlink-inode", inode.getName()); + } + + @Test + public void getInodeTypeShouldReturnCorrectValue() { + assertSame(INodeType.BASIC_SYMLINK, inode.getInodeType()); + } + + @Test + public void nlinkPropertyShouldWorkAsExpected() { + assertEquals(2, inode.getNlink()); + inode.setNlink(3); + assertEquals(3, inode.getNlink()); + } + + @Test + public void targetPathPropertyShouldWorkAsExpected() { + assertEquals("/test", + new String(inode.getTargetPath(), StandardCharsets.ISO_8859_1)); + inode.setTargetPath("/test2".getBytes(StandardCharsets.ISO_8859_1)); + assertEquals("/test2", + new String(inode.getTargetPath(), StandardCharsets.ISO_8859_1)); + } + + @Test + public void targetPathPropertyShouldConvertNullToEmptyString() { + inode.setTargetPath(null); + assertEquals("", + new String(inode.getTargetPath(), StandardCharsets.ISO_8859_1)); + } + + @Test + public void getXattrIndexShouldReturnNotPresent() { + assertEquals(-1, inode.getXattrIndex()); + } + + @Test + public void setXattrIndexWithNotPresentValueShouldSucceed() { + inode.setXattrIndex(-1); + } + + @Test(expected = IllegalArgumentException.class) + public void setXattrIndexWithInvalidValueShouldFail() { + inode.setXattrIndex(1); + } + + @Test + public void isXattrPresentShouldReturnFalse() { + assertFalse(inode.isXattrPresent()); + } + + @Test + public void simplifyShouldReturnSelf() { + assertSame(inode, inode.simplify()); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfBasic() { + assertSame(inode, BasicSymlinkINode.simplify(inode)); + } + + @Test + public void getChildSerializedSizeShouldReturnCorrectValue() { + assertEquals(13, inode.getChildSerializedSize()); + inode.setTargetPath("/test2".getBytes(StandardCharsets.ISO_8859_1)); + assertEquals(14, inode.getChildSerializedSize()); + } + + @Test + public void writeDataAndReadDataShouldBeReflexive() throws IOException { + byte[] data = INodeTestUtils.serializeINode(inode); + INode dest = INodeTestUtils.deserializeINode(data); + + assertSame("Wrong class", inode.getClass(), dest.getClass()); + BasicSymlinkINode bDest = (BasicSymlinkINode) dest; + + assertEquals("Wrong nlink count", 2, bDest.getNlink()); + assertEquals("Wrong target path", + new String(inode.getTargetPath(), StandardCharsets.ISO_8859_1), + new String(bDest.getTargetPath(), StandardCharsets.ISO_8859_1)); + } + + @Test + public void staticSimplifyMethodShouldReturnOriginalIfExtendedAttributesPresent() { + SymlinkINode inode2 = new ExtendedSymlinkINode(); + inode2.setNlink(2); + inode2.setXattrIndex(3); + assertSame(inode2, BasicSymlinkINode.simplify(inode2)); + } + + @Test + public void staticSimplifyMethodShouldReturnBasicIfExtendedAttributesNotPresent() { + SymlinkINode inode2 = new ExtendedSymlinkINode(); + inode2.setNlink(2); + inode2.setXattrIndex(-1); + + SymlinkINode result = BasicSymlinkINode.simplify(inode2); + assertSame("wrong class", BasicSymlinkINode.class, result.getClass()); + assertSame("wrong nlink count", 2, result.getNlink()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedBlockDeviceINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedBlockDeviceINode.java new file mode 100644 index 00000000000..ff2e16eb3bd --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedBlockDeviceINode.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +public class TestExtendedBlockDeviceINode { + + ExtendedBlockDeviceINode inode; + + @Before + public void setUp() { + inode = new ExtendedBlockDeviceINode(); + inode.setDevice(1); + inode.setNlink(2); + inode.setXattrIndex(3); + } + + @Test + public void getNameShouldReturnCorrectValue() { + assertEquals("extended-block-dev-inode", inode.getName()); + } + + @Test + public void getInodeTypeShouldReturnCorrectValue() { + assertSame(INodeType.EXTENDED_BLOCK_DEVICE, inode.getInodeType()); + } + + @Test + public void simplifyShouldReturnOriginalIfExtendedAttributesPresent() { + BlockDeviceINode inode2 = new ExtendedBlockDeviceINode(); + inode2.setDevice(1); + inode2.setNlink(2); + inode2.setXattrIndex(3); + assertSame(inode2, inode2.simplify()); + } + + @Test + public void simplifyShouldReturnBasicIfExtendedAttributesNotPresent() { + BlockDeviceINode inode2 = new ExtendedBlockDeviceINode(); + inode2.setDevice(1); + inode2.setNlink(2); + inode2.setXattrIndex(-1); + + BlockDeviceINode result = inode2.simplify(); + assertSame("wrong class", BasicBlockDeviceINode.class, result.getClass()); + assertSame("wrong device", 1, result.getDevice()); + assertSame("wrong nlink count", 2, result.getNlink()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedCharDeviceINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedCharDeviceINode.java new file mode 100644 index 00000000000..1fd21a35387 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedCharDeviceINode.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +public class TestExtendedCharDeviceINode { + + ExtendedCharDeviceINode inode; + + @Before + public void setUp() { + inode = new ExtendedCharDeviceINode(); + inode.setDevice(1); + inode.setNlink(2); + inode.setXattrIndex(3); + } + + @Test + public void getNameShouldReturnCorrectValue() { + assertEquals("extended-char-dev-inode", inode.getName()); + } + + @Test + public void getInodeTypeShouldReturnCorrectValue() { + assertSame(INodeType.EXTENDED_CHAR_DEVICE, inode.getInodeType()); + } + + @Test + public void simplifyShouldReturnOriginalIfExtendedAttributesPresent() { + CharDeviceINode inode2 = new ExtendedCharDeviceINode(); + inode2.setDevice(1); + inode2.setNlink(2); + inode2.setXattrIndex(3); + assertSame(inode2, inode2.simplify()); + } + + @Test + public void simplifyShouldReturnBasicIfExtendedAttributesNotPresent() { + CharDeviceINode inode2 = new ExtendedCharDeviceINode(); + inode2.setDevice(1); + inode2.setNlink(2); + inode2.setXattrIndex(-1); + + CharDeviceINode result = inode2.simplify(); + assertSame("wrong class", BasicCharDeviceINode.class, result.getClass()); + assertSame("wrong device", 1, result.getDevice()); + assertSame("wrong nlink count", 2, result.getNlink()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedDirectoryINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedDirectoryINode.java new file mode 100644 index 00000000000..a2d855832c3 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedDirectoryINode.java @@ -0,0 +1,210 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.test.INodeTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +public class TestExtendedDirectoryINode { + + ExtendedDirectoryINode inode; + + @Before + public void setUp() { + inode = new ExtendedDirectoryINode(); + inode.setStartBlock(1); + inode.setNlink(2); + inode.setFileSize(3); + inode.setOffset((short) 4); + inode.setParentInodeNumber(5); + inode.setIndexCount((short) 6); + inode.setXattrIndex(7); + } + + @Test + public void getNameShouldReturnCorrectValue() { + assertEquals("extended-directory-inode", inode.getName()); + } + + @Test + public void getInodeTypeShouldReturnCorrectValue() { + assertSame(INodeType.EXTENDED_DIRECTORY, inode.getInodeType()); + } + + @Test + public void startBlockPropertyShouldWorkAsExpected() { + assertEquals(1, inode.getStartBlock()); + inode.setStartBlock(2); + assertEquals(2, inode.getStartBlock()); + } + + @Test + public void nlinkPropertyShouldWorkAsExpected() { + assertEquals(2, inode.getNlink()); + inode.setNlink(3); + assertEquals(3, inode.getNlink()); + } + + @Test + public void fileSizePropertyShouldWorkAsExpected() { + assertEquals(3, inode.getFileSize()); + inode.setFileSize(4); + assertEquals(4, inode.getFileSize()); + } + + @Test + public void offsetPropertyShouldWorkAsExpected() { + assertEquals((short) 4, inode.getOffset()); + inode.setOffset((short) 5); + assertEquals((short) 5, inode.getOffset()); + } + + @Test + public void parentInodeNumberPropertyShouldWorkAsExpected() { + assertEquals(5, inode.getParentInodeNumber()); + inode.setParentInodeNumber(6); + assertEquals(6, inode.getParentInodeNumber()); + } + + @Test + public void indexCountPropertyShouldWorkAsExpected() { + assertEquals((short) 6, inode.getIndexCount()); + inode.setIndexCount((short) 7); + assertEquals((short) 7, inode.getIndexCount()); + } + + @Test + public void isIndexPresentShouldReturnCorrectValue() { + assertTrue(inode.isIndexPresent()); + inode.setIndexCount((short) 0); + assertFalse(inode.isIndexPresent()); + } + + @Test + public void xattrIndexPropertyShouldWorkAsExpected() { + assertEquals(7, inode.getXattrIndex()); + inode.setXattrIndex(8); + assertEquals(8, inode.getXattrIndex()); + } + + @Test + public void isXattrPresentShouldReturnTrueIfPresent() { + assertTrue(inode.isXattrPresent()); + inode.setXattrIndex(-1); + assertFalse(inode.isXattrPresent()); + } + + @Test + public void getChildSerializedSizeShouldReturnCorrectValue() { + assertEquals(24, inode.getChildSerializedSize()); + } + + @Test + public void writeDataAndReadDataShouldBeReflexive() throws IOException { + byte[] data = INodeTestUtils.serializeINode(inode); + INode dest = INodeTestUtils.deserializeINode(data); + + assertSame("Wrong class", inode.getClass(), dest.getClass()); + ExtendedDirectoryINode bDest = (ExtendedDirectoryINode) dest; + + assertEquals("wrong start block", 1, bDest.getStartBlock()); + assertEquals("wrong nlink count", 2, bDest.getNlink()); + assertEquals("wrong file size", 3, bDest.getFileSize()); + assertEquals("wrong offset", (short) 4, bDest.getOffset()); + assertEquals("wrong parent inode number", 5, bDest.getParentInodeNumber()); + assertEquals("wrong index count", (short) 6, bDest.getIndexCount()); + assertEquals("wrong xattr index", 7, bDest.getXattrIndex()); + } + + @Test + public void simplifyShouldReturnBasicIfExtendedAttributesNotNeeded() { + DirectoryINode inode2 = new ExtendedDirectoryINode(); + inode2.setStartBlock(1); + inode2.setNlink(2); + inode2.setFileSize(3); + inode2.setOffset((short) 4); + inode2.setParentInodeNumber(5); + inode2.setIndexCount((short) 0); + inode2.setXattrIndex(-1); + + DirectoryINode result = inode2.simplify(); + assertSame("wrong class", BasicDirectoryINode.class, result.getClass()); + + assertEquals("wrong start block", inode2.getStartBlock(), + result.getStartBlock()); + assertEquals("wrong nlink count", inode2.getNlink(), result.getNlink()); + assertEquals("wrong file size", inode2.getFileSize(), result.getFileSize()); + assertEquals("wrong offset", inode2.getOffset(), result.getOffset()); + assertEquals("wrong parent inode number", inode2.getParentInodeNumber(), + result.getParentInodeNumber()); + } + + @Test + public void simplifyShouldReturnOriginalIfFileSizeTooLarge() { + DirectoryINode inode2 = new ExtendedDirectoryINode(); + inode2.setStartBlock(1); + inode2.setNlink(2); + inode2.setFileSize(65536); + inode2.setOffset((short) 4); + inode2.setParentInodeNumber(5); + inode2.setIndexCount((short) 0); + inode2.setXattrIndex(-1); + assertSame(inode2, inode2.simplify()); + } + + @Test + public void simplifyShouldReturnOriginalIfIndexPresent() { + DirectoryINode inode2 = new ExtendedDirectoryINode(); + inode2.setStartBlock(1); + inode2.setNlink(2); + inode2.setFileSize(3); + inode2.setOffset((short) 4); + inode2.setParentInodeNumber(5); + inode2.setIndexCount((short) 1); + inode2.setXattrIndex(-1); + assertSame(inode2, inode2.simplify()); + } + + @Test + public void simplifyShouldReturnOriginalIfExtendedAttributesPresent() { + DirectoryINode inode2 = new ExtendedDirectoryINode(); + inode2.setStartBlock(1); + inode2.setNlink(2); + inode2.setFileSize(3); + inode2.setOffset((short) 4); + inode2.setParentInodeNumber(5); + inode2.setIndexCount((short) 0); + inode2.setXattrIndex(1); + assertSame(inode2, inode2.simplify()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedFifoINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedFifoINode.java new file mode 100644 index 00000000000..04bba4f5930 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedFifoINode.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +public class TestExtendedFifoINode { + + ExtendedFifoINode inode; + + @Before + public void setUp() { + inode = new ExtendedFifoINode(); + inode.setNlink(2); + } + + @Test + public void getNameShouldReturnCorrectValue() { + assertEquals("extended-fifo-inode", inode.getName()); + } + + @Test + public void getInodeTypeShouldReturnCorrectValue() { + assertSame(INodeType.EXTENDED_FIFO, inode.getInodeType()); + } + + @Test + public void simplifyShouldReturnOriginalIfExtendedAttributesPresent() { + FifoINode inode2 = new ExtendedFifoINode(); + inode2.setNlink(2); + inode2.setXattrIndex(3); + assertSame(inode2, inode2.simplify()); + } + + @Test + public void simplifyShouldReturnBasicIfExtendedAttributesNotPresent() { + FifoINode inode2 = new ExtendedFifoINode(); + inode2.setNlink(2); + inode2.setXattrIndex(-1); + + FifoINode result = inode2.simplify(); + assertSame("wrong class", BasicFifoINode.class, result.getClass()); + assertSame("wrong nlink count", 2, result.getNlink()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedFileINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedFileINode.java new file mode 100644 index 00000000000..b75fd2a1797 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedFileINode.java @@ -0,0 +1,293 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.test.INodeTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +public class TestExtendedFileINode { + + ExtendedFileINode inode; + + @Before + public void setUp() { + inode = new ExtendedFileINode(); + inode.setBlocksStart(1L); + inode.setFragmentBlockIndex(2); + inode.setFragmentOffset(3); + inode.setFileSize(131073L); + inode.setBlockSizes(new int[] { 5 }); + inode.setSparse(6L); + inode.setNlink(7); + inode.setXattrIndex(8); + } + + @Test + public void getNameShouldReturnCorrectValue() { + assertEquals("extended-file-inode", inode.getName()); + } + + @Test + public void getInodeTypeShouldReturnCorrectValue() { + assertSame(INodeType.EXTENDED_FILE, inode.getInodeType()); + } + + @Test + public void blocksStartPropertyShouldWorkAsExpected() { + assertEquals(1L, inode.getBlocksStart()); + inode.setBlocksStart(2L); + assertEquals(2L, inode.getBlocksStart()); + } + + @Test + public void fragmentBlockIndexPropertyShouldWorkAsExpected() { + assertEquals(2, inode.getFragmentBlockIndex()); + inode.setFragmentBlockIndex(3); + assertEquals(3, inode.getFragmentBlockIndex()); + } + + @Test + public void isFragmentPresentShouldReturnTrueIfFragmentBlockIndexSet() { + assertTrue(inode.isFragmentPresent()); + inode.setFragmentBlockIndex(-1); + assertFalse(inode.isFragmentPresent()); + } + + @Test + public void fragmentOffsetPropertyShouldWorkAsExpected() { + assertEquals(3, inode.getFragmentOffset()); + inode.setFragmentOffset(4); + assertEquals(4, inode.getFragmentOffset()); + } + + @Test + public void fileSizePropertyShouldWorkAsExpected() { + assertEquals(131073L, inode.getFileSize()); + inode.setFileSize(131074L); + assertEquals(131074L, inode.getFileSize()); + } + + @Test + public void sparsePropertyShouldWorkAsExpected() { + assertEquals(6L, inode.getSparse()); + inode.setSparse(7L); + assertEquals(7L, inode.getSparse()); + } + + @Test + public void isSparseBlockPresentShouldReturnTrueIfSparseNonZero() { + assertTrue(inode.isSparseBlockPresent()); + inode.setSparse(0L); + assertFalse(inode.isSparseBlockPresent()); + } + + @Test + public void getChildSerializedSizeShouldReturnCorrectValue() { + assertEquals(44, inode.getChildSerializedSize()); + inode.setBlockSizes(new int[] { 1, 2, 3 }); + assertEquals(52, inode.getChildSerializedSize()); + } + + @Test + public void xattrIndexPropertyShouldWorkAsExpected() { + assertEquals(8, inode.getXattrIndex()); + inode.setXattrIndex(9); + assertEquals(9, inode.getXattrIndex()); + } + + @Test + public void isXattrPresentShouldReturnTrueIfPresent() { + assertTrue(inode.isXattrPresent()); + inode.setXattrIndex(-1); + assertFalse(inode.isXattrPresent()); + } + + @Test + public void writeDataAndReadDataWithFragmentsShouldBeReflexive() + throws IOException { + byte[] data = INodeTestUtils.serializeINode(inode); + INode dest = INodeTestUtils.deserializeINode(data); + + assertSame("Wrong class", inode.getClass(), dest.getClass()); + ExtendedFileINode bDest = (ExtendedFileINode) dest; + + assertEquals("wrong blocks start", 1L, bDest.getBlocksStart()); + assertEquals("wrong fragment block index", 2, + bDest.getFragmentBlockIndex()); + assertEquals("wrong fragment offset", 3, bDest.getFragmentOffset()); + assertEquals("wrong file size", 131073L, bDest.getFileSize()); + assertArrayEquals("wrong block sizes", new int[] { 5 }, + bDest.getBlockSizes()); + assertEquals("wrong sparse", 6L, bDest.getSparse()); + assertEquals("wrong nlink count", 7, bDest.getNlink()); + assertEquals("wrong xattr index", 8, bDest.getXattrIndex()); + } + + @Test + public void writeDataAndReadDataWithoutFragmentsShouldBeReflexive() + throws IOException { + inode.setFragmentOffset(0); + inode.setFragmentBlockIndex(-1); + inode.setFileSize(131072L); + + byte[] data = INodeTestUtils.serializeINode(inode); + INode dest = INodeTestUtils.deserializeINode(data); + + assertSame("Wrong class", inode.getClass(), dest.getClass()); + ExtendedFileINode bDest = (ExtendedFileINode) dest; + + assertEquals("wrong blocks start", 1L, bDest.getBlocksStart()); + assertEquals("wrong fragment block index", -1, + bDest.getFragmentBlockIndex()); + assertEquals("wrong fragment offset", 0, bDest.getFragmentOffset()); + assertEquals("wrong file size", 131072L, bDest.getFileSize()); + assertArrayEquals("wrong block sizes", new int[] { 5 }, + bDest.getBlockSizes()); + assertEquals("wrong sparse", 6L, bDest.getSparse()); + assertEquals("wrong nlink count", 7, bDest.getNlink()); + assertEquals("wrong xattr index", 8, bDest.getXattrIndex()); + } + + @Test + public void writeDataAndReadDataWithShortEndBlockShouldBeReflexive() + throws IOException { + inode.setFragmentOffset(0); + inode.setFragmentBlockIndex(-1); + inode.setFileSize(131071L); + + byte[] data = INodeTestUtils.serializeINode(inode); + INode dest = INodeTestUtils.deserializeINode(data); + + assertSame("Wrong class", inode.getClass(), dest.getClass()); + ExtendedFileINode bDest = (ExtendedFileINode) dest; + + assertEquals("wrong blocks start", 1L, bDest.getBlocksStart()); + assertEquals("wrong fragment block index", -1, + bDest.getFragmentBlockIndex()); + assertEquals("wrong fragment offset", 0, bDest.getFragmentOffset()); + assertEquals("wrong file size", 131071L, bDest.getFileSize()); + assertArrayEquals("wrong block sizes", new int[] { 5 }, + bDest.getBlockSizes()); + assertEquals("wrong file size", 131071L, bDest.getFileSize()); + assertEquals("wrong sparse", 6L, bDest.getSparse()); + assertEquals("wrong nlink count", 7, bDest.getNlink()); + assertEquals("wrong xattr index", 8, bDest.getXattrIndex()); + } + + @Test + public void simplifyShouldReturnBasicIfExtendedAttributesNotNeeded() { + FileINode inode2 = new ExtendedFileINode(); + inode2.setBlocksStart(1L); + inode2.setNlink(1); + inode2.setFragmentBlockIndex(2); + inode2.setFragmentOffset(3); + inode2.setFileSize(131073L); + inode2.setBlockSizes(new int[] { 5 }); + + FileINode result = inode2.simplify(); + assertSame("wrong class", BasicFileINode.class, result.getClass()); + + assertEquals("wrong block start", inode2.getBlocksStart(), + result.getBlocksStart()); + assertEquals("wrong fragment block index", inode2.getFragmentBlockIndex(), + result.getFragmentBlockIndex()); + assertEquals("wrong fragment offset", inode2.getFragmentOffset(), + result.getFragmentOffset()); + assertEquals("wrong file size", inode2.getFileSize(), result.getFileSize()); + assertArrayEquals("wrong block sizes", inode2.getBlockSizes(), + result.getBlockSizes()); + } + + @Test + public void simplifyShouldReturnOriginalIfLinkCountGreaterThanOne() { + FileINode inode2 = new ExtendedFileINode(); + inode2.setBlocksStart(1L); + inode2.setNlink(2); + inode2.setFragmentBlockIndex(2); + inode2.setFragmentOffset(3); + inode2.setFileSize(131073L); + inode2.setBlockSizes(new int[] { 5 }); + assertSame(inode2, inode2.simplify()); + } + + @Test + public void simplifyShouldReturnOriginalIfExtendedAttributes() { + FileINode inode2 = new ExtendedFileINode(); + inode2.setBlocksStart(1L); + inode2.setNlink(1); + inode2.setXattrIndex(1); + inode2.setFragmentBlockIndex(2); + inode2.setFragmentOffset(3); + inode2.setFileSize(131073L); + inode2.setBlockSizes(new int[] { 5 }); + assertSame(inode2, inode2.simplify()); + } + + @Test + public void simplifyShouldReturnOriginalIfSparse() { + FileINode inode2 = new ExtendedFileINode(); + inode2.setBlocksStart(1L); + inode2.setNlink(1); + inode2.setSparse(3L); + inode2.setFragmentBlockIndex(2); + inode2.setFragmentOffset(3); + inode2.setFileSize(131073L); + inode2.setBlockSizes(new int[] { 5 }); + assertSame(inode2, inode2.simplify()); + } + + @Test + public void simplifyShouldReturnOriginalIfBlocksStartTooLarge() { + FileINode inode2 = new ExtendedFileINode(); + inode2.setBlocksStart(0x1_0000_0000L); + inode2.setNlink(1); + inode2.setFragmentBlockIndex(2); + inode2.setFragmentOffset(3); + inode2.setFileSize(131073L); + inode2.setBlockSizes(new int[] { 5 }); + assertSame(inode2, inode2.simplify()); + } + + @Test + public void simplifyShouldReturnOriginalIfFileSizeTooLarge() { + FileINode inode2 = new ExtendedFileINode(); + inode2.setBlocksStart(1L); + inode2.setNlink(1); + inode2.setFragmentBlockIndex(2); + inode2.setFragmentOffset(3); + inode2.setFileSize(0x1_0000_0000L); + inode2.setBlockSizes(new int[] { 5 }); + assertSame(inode2, inode2.simplify()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedSocketINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedSocketINode.java new file mode 100644 index 00000000000..c5dc343a1b9 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedSocketINode.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +public class TestExtendedSocketINode { + + ExtendedSocketINode inode; + + @Before + public void setUp() { + inode = new ExtendedSocketINode(); + inode.setNlink(2); + } + + @Test + public void getNameShouldReturnCorrectValue() { + assertEquals("extended-socket-inode", inode.getName()); + } + + @Test + public void getInodeTypeShouldReturnCorrectValue() { + assertSame(INodeType.EXTENDED_SOCKET, inode.getInodeType()); + } + + @Test + public void simplifyShouldReturnOriginalIfExtendedAttributesPresent() { + SocketINode inode2 = new ExtendedSocketINode(); + inode2.setNlink(2); + inode2.setXattrIndex(3); + assertSame(inode2, inode2.simplify()); + } + + @Test + public void simplifyShouldReturnBasicIfExtendedAttributesNotPresent() { + SocketINode inode2 = new ExtendedSocketINode(); + inode2.setNlink(2); + inode2.setXattrIndex(-1); + + SocketINode result = inode2.simplify(); + assertSame("wrong class", BasicSocketINode.class, result.getClass()); + assertSame("wrong nlink count", 2, result.getNlink()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedSymlinkINode.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedSymlinkINode.java new file mode 100644 index 00000000000..17e96a699a7 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestExtendedSymlinkINode.java @@ -0,0 +1,138 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.test.INodeTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +public class TestExtendedSymlinkINode { + + ExtendedSymlinkINode inode; + + @Before + public void setUp() { + inode = new ExtendedSymlinkINode(); + inode.setNlink(2); + inode.setTargetPath("/test".getBytes(StandardCharsets.ISO_8859_1)); + inode.setXattrIndex(3); + } + + @Test + public void getNameShouldReturnCorrectValue() { + assertEquals("extended-symlink-inode", inode.getName()); + } + + @Test + public void getInodeTypeShouldReturnCorrectValue() { + assertSame(INodeType.EXTENDED_SYMLINK, inode.getInodeType()); + } + + @Test + public void nlinkPropertyShouldWorkAsExpected() { + assertEquals(2, inode.getNlink()); + inode.setNlink(3); + assertEquals(3, inode.getNlink()); + } + + @Test + public void targetPathPropertyShouldWorkAsExpected() { + assertEquals("/test", + new String(inode.getTargetPath(), StandardCharsets.ISO_8859_1)); + inode.setTargetPath("/test2".getBytes(StandardCharsets.ISO_8859_1)); + assertEquals("/test2", + new String(inode.getTargetPath(), StandardCharsets.ISO_8859_1)); + } + + @Test + public void targetPathPropertyShouldConvertNullToEmptyString() { + inode.setTargetPath(null); + assertEquals("", + new String(inode.getTargetPath(), StandardCharsets.ISO_8859_1)); + } + + @Test + public void xattrIndexPropertyShouldWorkAsExpected() { + assertEquals(3, inode.getXattrIndex()); + inode.setXattrIndex(4); + assertEquals(4, inode.getXattrIndex()); + } + + @Test + public void isXattrPresentShouldReturnTrueIfPresent() { + assertTrue(inode.isXattrPresent()); + inode.setXattrIndex(-1); + assertFalse(inode.isXattrPresent()); + } + + @Test + public void getChildSerializedSizeShouldReturnCorrectValue() { + assertEquals(17, inode.getChildSerializedSize()); + inode.setTargetPath("/test2".getBytes(StandardCharsets.ISO_8859_1)); + assertEquals(18, inode.getChildSerializedSize()); + } + + @Test + public void writeDataAndReadDataShouldBeReflexive() throws IOException { + byte[] data = INodeTestUtils.serializeINode(inode); + INode dest = INodeTestUtils.deserializeINode(data); + + assertSame("Wrong class", inode.getClass(), dest.getClass()); + ExtendedSymlinkINode bDest = (ExtendedSymlinkINode) dest; + + assertEquals("Wrong nlink count", 2, bDest.getNlink()); + assertEquals("Wrong target path", + new String(inode.getTargetPath(), StandardCharsets.ISO_8859_1), + new String(bDest.getTargetPath(), StandardCharsets.ISO_8859_1)); + assertEquals("Wrong xattr index", 3, bDest.getXattrIndex()); + } + + @Test + public void simplifyMethodShouldReturnOriginalIfExtendedAttributesPresent() { + SymlinkINode inode2 = new ExtendedSymlinkINode(); + inode2.setNlink(2); + inode2.setXattrIndex(3); + assertSame(inode2, inode2.simplify()); + } + + @Test + public void simplifyMethodShouldReturnBasicIfExtendedAttributesNotPresent() { + SymlinkINode inode2 = new ExtendedSymlinkINode(); + inode2.setNlink(2); + inode2.setXattrIndex(-1); + + SymlinkINode result = inode2.simplify(); + assertSame("wrong class", BasicSymlinkINode.class, result.getClass()); + assertSame("wrong nlink count", 2, result.getNlink()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(inode.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestINodeRef.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestINodeRef.java new file mode 100644 index 00000000000..f6b3afe7bcf --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestINodeRef.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class TestINodeRef { + + INodeRef ref1; + INodeRef ref2; + + @Before + public void setUp() { + ref1 = new INodeRef(0x0000_1234_5678_0123L); + ref2 = new INodeRef(0x1234_5678, (short) 0x123); + } + + @Test + public void getLocationShouldReturnSameValueForBothConstructors() { + assertEquals("wrong location for 1-arg constructor", 0x1234_5678, + ref1.getLocation()); + assertEquals("wrong location for 2-arg constructor", 0x1234_5678, + ref2.getLocation()); + } + + @Test + public void getOffsetShouldReturnSameValueForBothConstructors() { + assertEquals("wrong offset for 1-arg constructor", (short) 0x123, + ref1.getOffset()); + assertEquals("wrong offset for 2-arg constructor", (short) 0x123, + ref2.getOffset()); + } + + @Test + public void getRawShouldReturnSameValueForBothConstructors() { + assertEquals("wrong raw value for 1-arg constructor", + 0x0000_1234_5678_0123L, ref1.getRaw()); + assertEquals("wrong raw value for 2-arg constructor", + 0x0000_1234_5678_0123L, ref2.getRaw()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(ref1.toString()); + System.out.println(ref2.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestINodeType.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestINodeType.java new file mode 100644 index 00000000000..052f09cb5ba --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestINodeType.java @@ -0,0 +1,211 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.junit.Test; + +import java.util.EnumSet; + +import static org.apache.hadoop.runc.squashfs.inode.INodeType.BASIC_BLOCK_DEVICE; +import static org.apache.hadoop.runc.squashfs.inode.INodeType.BASIC_CHAR_DEVICE; +import static org.apache.hadoop.runc.squashfs.inode.INodeType.BASIC_DIRECTORY; +import static org.apache.hadoop.runc.squashfs.inode.INodeType.BASIC_FIFO; +import static org.apache.hadoop.runc.squashfs.inode.INodeType.BASIC_FILE; +import static org.apache.hadoop.runc.squashfs.inode.INodeType.BASIC_SOCKET; +import static org.apache.hadoop.runc.squashfs.inode.INodeType.BASIC_SYMLINK; +import static org.apache.hadoop.runc.squashfs.inode.INodeType.EXTENDED_BLOCK_DEVICE; +import static org.apache.hadoop.runc.squashfs.inode.INodeType.EXTENDED_CHAR_DEVICE; +import static org.apache.hadoop.runc.squashfs.inode.INodeType.EXTENDED_DIRECTORY; +import static org.apache.hadoop.runc.squashfs.inode.INodeType.EXTENDED_FIFO; +import static org.apache.hadoop.runc.squashfs.inode.INodeType.EXTENDED_FILE; +import static org.apache.hadoop.runc.squashfs.inode.INodeType.EXTENDED_SOCKET; +import static org.apache.hadoop.runc.squashfs.inode.INodeType.EXTENDED_SYMLINK; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +public class TestINodeType { + + @Test + public void basicShouldReturnTrueOnlyForBasicSubtypes() { + EnumSet basics = EnumSet.of( + BASIC_BLOCK_DEVICE, + BASIC_CHAR_DEVICE, + BASIC_DIRECTORY, + BASIC_FIFO, + BASIC_FILE, + BASIC_SOCKET, + BASIC_SYMLINK); + + for (INodeType type : INodeType.values()) { + assertEquals(String.format("Wrong basic() result for %s", type), + basics.contains(type), type.basic()); + } + } + + @Test + public void directoryShouldReturnTrueOnlyForDirectorySubtypes() { + for (INodeType type : INodeType.values()) { + assertEquals(String.format("Wrong directory() result for %s", type), + type == BASIC_DIRECTORY || type == EXTENDED_DIRECTORY, + type.directory()); + } + } + + @Test + public void fileShouldReturnTrueOnlyForFileSubtypes() { + for (INodeType type : INodeType.values()) { + assertEquals(String.format("Wrong file() result for %s", type), + type == BASIC_FILE || type == EXTENDED_FILE, type.file()); + } + } + + @Test + public void symlinkShouldReturnTrueOnlyForSymlinkSubtypes() { + for (INodeType type : INodeType.values()) { + assertEquals(String.format("Wrong symlink() result for %s", type), + type == BASIC_SYMLINK || type == EXTENDED_SYMLINK, type.symlink()); + } + } + + @Test + public void blockDeviceShouldReturnTrueOnlyForBlockDeviceSubtypes() { + for (INodeType type : INodeType.values()) { + assertEquals(String.format("Wrong blockDevice() result for %s", type), + type == BASIC_BLOCK_DEVICE || type == EXTENDED_BLOCK_DEVICE, + type.blockDevice()); + } + } + + @Test + public void charDeviceShouldReturnTrueOnlyForCharDeviceSubtypes() { + for (INodeType type : INodeType.values()) { + assertEquals(String.format("Wrong blockDevice() result for %s", type), + type == BASIC_CHAR_DEVICE || type == EXTENDED_CHAR_DEVICE, + type.charDevice()); + } + } + + @Test + public void deviceShouldReturnTrueOnlyForDeviceSubtypes() { + for (INodeType type : INodeType.values()) { + assertEquals(String.format("Wrong device() result for %s", type), + type.charDevice() || type.blockDevice(), type.device()); + } + } + + @Test + public void fifoShouldReturnTrueOnlyForFifoSubtypes() { + for (INodeType type : INodeType.values()) { + assertEquals(String.format("Wrong fifo() result for %s", type), + type == BASIC_FIFO || type == EXTENDED_FIFO, type.fifo()); + } + } + + @Test + public void socketShouldReturnTrueOnlyForSocketSubtypes() { + for (INodeType type : INodeType.values()) { + assertEquals(String.format("Wrong socket() result for %s", type), + type == BASIC_SOCKET || type == EXTENDED_SOCKET, type.socket()); + } + } + + @Test + public void ipcShouldReturnTrueOnlyForIpcSubtypes() { + for (INodeType type : INodeType.values()) { + assertEquals(String.format("Wrong ipc() result for %s", type), + type.fifo() || type.socket(), type.ipc()); + } + } + + @Test + public void valueShouldReturnIncrementingValues() { + short value = 1; + for (INodeType type : INodeType.values()) { + assertEquals(String.format("Wrong value() result for %s", type), value, + type.value()); + value++; + } + } + + @Test + public void dirValueShouldAlwaysResolveToABasicTypeWithSameMode() + throws SquashFsException { + for (INodeType type : INodeType.values()) { + INodeType other = INodeType.fromValue(type.dirValue()); + assertEquals(String.format("Wrong mode() result for %s", type), + other.mode(), type.mode()); + } + } + + @Test + public void createShouldReturnAnINodeWithProperType() { + for (INodeType type : INodeType.values()) { + INode inode = type.create(); + assertSame(String.format("Wrong create() result for %s", type), + inode.getInodeType(), type); + } + } + + @Test + public void fromValueShouldProperlyFindAllValuesFromOneToFourteen() + throws SquashFsException { + EnumSet pending = EnumSet.allOf(INodeType.class); + for (short i = 1; i <= 14; i++) { + INodeType it = INodeType.fromValue(i); + assertEquals(String.format("Wrong fromValue() result for %d", i), i, + it.value()); + assertTrue(String.format("Duplicate entry found for %s", it), + pending.remove(it)); + } + assertEquals(String.format("Didn't find all values: %s", pending), 0, + pending.size()); + } + + @Test(expected = SquashFsException.class) + public void fromValueShouldThrowExceptionForOutOfRangeValue() + throws SquashFsException { + INodeType.fromValue((short) 15); + } + + @Test + public void fromDirectoryValueShouldProperlyFindAllValuesFromOneToSeven() + throws SquashFsException { + EnumSet pending = EnumSet.allOf(INodeType.class); + pending.removeIf(it -> !it.basic()); + + for (short i = 1; i <= 7; i++) { + INodeType it = INodeType.fromDirectoryValue(i); + assertEquals(String.format("Wrong fromDirectoryValue() result for %d", i), + i, it.value()); + assertTrue(String.format("Duplicate entry found for %s", it), + pending.remove(it)); + } + assertEquals(String.format("Didn't find all values: %s", pending), 0, + pending.size()); + } + + @Test(expected = SquashFsException.class) + public void fromDirectoryValueShouldThrowExceptionForOutOfRangeValue() + throws SquashFsException { + INodeType.fromDirectoryValue((short) 8); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestPermission.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestPermission.java new file mode 100644 index 00000000000..5cdd3f5f649 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/inode/TestPermission.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.inode; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +public class TestPermission { + + @Test + public void descriptionShouldReturnSomethingForAllValues() { + for (Permission p : Permission.values()) { + assertNotNull(String.format("Null description found for %s", p), + p.description()); + } + } + + @Test + public void bitShouldBeInAscendingOrderForAllValues() { + int bit = 0; + for (Permission p : Permission.values()) { + assertEquals(String.format("Wrong bit value found for %s", p), bit, + p.bit()); + bit++; + } + } + + @Test + public void maskShouldIncreaseByPowersOfTwoForAllValues() { + int mask = 1; + for (Permission p : Permission.values()) { + assertEquals(String.format("Wrong mask value found for %s", p), mask, + p.mask()); + mask = (mask << 1); + } + } + + @Test + public void toDisplayShouldSucceedForCommonValues() { + assertEquals("rwxr-xr-x", Permission.toDisplay(Permission.from(0755))); + assertEquals("rw-r--r--", Permission.toDisplay(Permission.from(0644))); + assertEquals("---------", Permission.toDisplay(Permission.from(0000))); + assertEquals("rwxrwxrwx", Permission.toDisplay(Permission.from(0777))); + assertEquals("rwxrwxrwt", Permission.toDisplay(Permission.from(01777))); + assertEquals("rwxrwsrwx", Permission.toDisplay(Permission.from(02777))); + assertEquals("rwsrwxrwx", Permission.toDisplay(Permission.from(04777))); + assertEquals("rw-r--r-T", Permission.toDisplay(Permission.from(01644))); + assertEquals("rw-r-Sr--", Permission.toDisplay(Permission.from(02644))); + assertEquals("rwSr--r--", Permission.toDisplay(Permission.from(04644))); + } + + @Test + public void toValueShouldSucceedForCommonValues() { + assertEquals(0755, Permission.toValue(Permission.from("rwxr-xr-x"))); + assertEquals(0644, Permission.toValue(Permission.from("rw-r--r--"))); + assertEquals(0000, Permission.toValue(Permission.from("---------"))); + assertEquals(0777, Permission.toValue(Permission.from("rwxrwxrwx"))); + assertEquals(01777, Permission.toValue(Permission.from("rwxrwxrwt"))); + assertEquals(02777, Permission.toValue(Permission.from("rwxrwsrwx"))); + assertEquals(04777, Permission.toValue(Permission.from("rwsrwxrwx"))); + assertEquals(01644, Permission.toValue(Permission.from("rw-r--r-T"))); + assertEquals(02644, Permission.toValue(Permission.from("rw-r-Sr--"))); + assertEquals(04644, Permission.toValue(Permission.from("rwSr--r--"))); + } + + @Test(expected = IllegalArgumentException.class) + public void toValueShouldFailForStringsLessThan9Chars() { + Permission.toValue("rwxrwxr-"); + } + + @Test + public void toDisplayAndToValueShouldBeReflexive() { + for (int i = 0; i < 07777; i++) { + String display = Permission.toDisplay(i); + int value = Permission.toValue(display); + assertEquals(String + .format("Mismatch between toDisplay('%s') and toValue(%o)", display, + i), i, value); + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/io/TestByteBufferDataInput.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/io/TestByteBufferDataInput.java new file mode 100644 index 00000000000..ba77d616c9e --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/io/TestByteBufferDataInput.java @@ -0,0 +1,198 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.io; + +import org.junit.Test; + +import java.io.EOFException; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Random; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class TestByteBufferDataInput { + + @Test + public void readFullyShouldWorkIfNotEof() throws Exception { + byte[] data = new byte[1024]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) (i & 0xff); + } + ByteBufferDataInput r = input(data); + byte[] copy = new byte[1024]; + r.readFully(copy); + assertArrayEquals(data, copy); + } + + @Test(expected = EOFException.class) + public void readFullyShouldThrowExceptionIfTooLong() throws Exception { + byte[] data = new byte[1024]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) (i & 0xff); + } + ByteBufferDataInput r = input(data); + byte[] copy = new byte[1025]; + r.readFully(copy); + } + + @Test + public void readFullyShouldWorkWithPartials() throws Exception { + byte[] data = new byte[1024]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) (i & 0xff); + } + ByteBufferDataInput r = input(data); + byte[] copy = new byte[1024]; + r.readFully(copy, 0, 512); + r.readFully(copy, 512, 512); + assertArrayEquals(data, copy); + } + + @Test + public void skipBytesShouldDoSo() throws Exception { + byte[] data = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + ByteBufferDataInput r = input(data); + assertEquals("wrong bytes skipped", 5, r.skipBytes(5)); + assertEquals("wrong next byte", (byte) 5, r.readByte()); + } + + @Test + public void skipBytesShouldDoPartialSkipIfEof() throws Exception { + byte[] data = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + ByteBufferDataInput r = input(data); + assertEquals("wrong bytes skipped", 10, r.skipBytes(15)); + } + + @Test + public void readBooleanShouldWork() throws Exception { + byte[] data = new byte[] { 0, 1 }; + ByteBufferDataInput r = input(data); + assertFalse("first value true", r.readBoolean()); + assertTrue("second value false", r.readBoolean()); + } + + @Test + public void readByteShouldWork() throws Exception { + byte[] data = new byte[] { (byte) 0xff }; + ByteBufferDataInput r = input(data); + assertEquals((byte) 0xff, r.readByte()); + } + + @Test(expected = EOFException.class) + public void readByteShouldThrowEOFExceptionIfEndOfStream() throws Exception { + byte[] data = new byte[] { (byte) 0xff }; + ByteBufferDataInput r = input(data); + assertEquals((byte) 0xff, r.readByte()); + r.readByte(); + } + + @Test + public void readUnsignedByteShouldWork() throws Exception { + byte[] data = new byte[] { (byte) 0xff }; + ByteBufferDataInput r = input(data); + assertEquals(0xff, r.readUnsignedByte()); + } + + @Test + public void readShortShouldWork() throws Exception { + byte[] data = new byte[2]; + ByteBuffer.wrap(data).asShortBuffer().put((short) 0x1234); + ByteBufferDataInput r = input(data); + assertEquals((short) 0x1234, r.readShort()); + } + + @Test + public void readUnsignedShortShouldWork() throws Exception { + byte[] data = new byte[2]; + ByteBuffer.wrap(data).asShortBuffer().put((short) 0xfedc); + ByteBufferDataInput r = input(data); + assertEquals(0xfedc, r.readUnsignedShort()); + } + + @Test + public void readCharShouldWork() throws Exception { + byte[] data = new byte[2]; + ByteBuffer.wrap(data).asCharBuffer().put((char) 0x1234); + ByteBufferDataInput r = input(data); + assertEquals((char) 0x1234, r.readChar()); + } + + @Test + public void readIntShouldWork() throws Exception { + byte[] data = new byte[4]; + ByteBuffer.wrap(data).asIntBuffer().put(0x12345678); + ByteBufferDataInput r = input(data); + assertEquals(0x12345678, r.readInt()); + } + + @Test + public void readFloatShouldWork() throws Exception { + float value = new Random(0L).nextFloat(); + + byte[] data = new byte[4]; + ByteBuffer.wrap(data).asFloatBuffer().put(value); + ByteBufferDataInput r = input(data); + assertEquals(value, r.readFloat(), 0.0000001f); + } + + @Test + public void readLongShouldWork() throws Exception { + byte[] data = new byte[8]; + ByteBuffer.wrap(data).asLongBuffer().put(0x12345678_90abcdefL); + ByteBufferDataInput r = input(data); + assertEquals(0x12345678_90abcdefL, r.readLong()); + } + + @Test + public void readDoubleShouldWork() throws Exception { + double value = new Random(0L).nextDouble(); + + byte[] data = new byte[8]; + ByteBuffer.wrap(data).asDoubleBuffer().put(value); + ByteBufferDataInput r = input(data); + assertEquals(value, r.readDouble(), 0.0000001d); + } + + @Test(expected = UnsupportedOperationException.class) + public void readLineShouldThrowUnsupportedOperationException() + throws Exception { + String value = "test\r\n"; + byte[] data = value.getBytes(StandardCharsets.ISO_8859_1); + ByteBufferDataInput r = input(data); + r.readLine(); + } + + @Test(expected = UnsupportedOperationException.class) + public void readUTFShouldThrowUnsupportedOperationException() + throws Exception { + byte[] data = new byte[1]; + ByteBufferDataInput r = input(data); + r.readUTF(); + } + + ByteBufferDataInput input(byte[] data) throws IOException { + return new ByteBufferDataInput(ByteBuffer.wrap(data)); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestFileMetadataBlockReader.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestFileMetadataBlockReader.java new file mode 100644 index 00000000000..ca10dbdcc0c --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestFileMetadataBlockReader.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.test.MetadataTestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.fail; + +public class TestFileMetadataBlockReader { + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + int tag; + File tempFile; + FileMetadataBlockReader reader; + SuperBlock sb; + byte[] block; + byte[] encoded; + + @Before + public void setUp() throws Exception { + tag = 1; + tempFile = temp.newFile(); + sb = new SuperBlock(); + try (RandomAccessFile raf = new RandomAccessFile(tempFile, "rw")) { + sb.writeData(raf); + + // write a block + block = new byte[1024]; + for (int i = 0; i < block.length; i++) { + block[i] = (byte) (i & 0xff); + } + encoded = MetadataTestUtils.saveMetadataBlock(block); + raf.write(encoded); + } + + reader = new FileMetadataBlockReader(tag, tempFile); + } + + @After + public void tearDown() throws Exception { + reader.close(); + reader = null; + encoded = null; + block = null; + sb = null; + } + + @Test + public void getSuperBlockShouldReturnVersionReadFromFile() { + assertEquals(sb.getModificationTime(), + reader.getSuperBlock(tag).getModificationTime()); + } + + @Test + public void getSuperBlockShouldReturnConstructedVersionIfApplicable() + throws Exception { + try (RandomAccessFile raf = new RandomAccessFile(tempFile, "r")) { + reader = new FileMetadataBlockReader(tag, raf, sb, true); + } + assertSame(sb, reader.getSuperBlock(tag)); + } + + @Test + public void readFromFileOffsetShouldSucceed() throws Exception { + MetadataBlock mb = reader.read(tag, SuperBlock.SIZE); + assertEquals(1024, mb.data.length); + assertArrayEquals(block, mb.data); + } + + @Test + public void readFromFileOffsetOnRandomAccessFileBackedReaderShouldSucceed() + throws Exception { + try (RandomAccessFile raf = new RandomAccessFile(tempFile, "r")) { + reader = new FileMetadataBlockReader(tag, raf, sb, true); + MetadataBlock mb = reader.read(tag, SuperBlock.SIZE); + assertEquals(1024, mb.data.length); + assertArrayEquals(block, mb.data); + } + } + + @Test + public void closeShouldCloseUnderlyingReaderIfRequested() throws Exception { + try (RandomAccessFile raf = new RandomAccessFile(tempFile, "r")) { + reader = new FileMetadataBlockReader(tag, raf, sb, true); + reader.close(); + + try { + raf.seek(0L); + fail("exception not thrown"); + } catch (IOException e) { + System.out.println("EOF"); + } + } + } + + @Test + public void closeShouldNotCloseUnderlyingReaderIfNotRequested() + throws Exception { + try (RandomAccessFile raf = new RandomAccessFile(tempFile, "r")) { + reader = new FileMetadataBlockReader(tag, raf, sb, false); + reader.close(); + raf.seek(0L); + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMappedFileMetadataBlockReader.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMappedFileMetadataBlockReader.java new file mode 100644 index 00000000000..21abe2dd896 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMappedFileMetadataBlockReader.java @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.io.MappedFile; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.test.MetadataTestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.io.RandomAccessFile; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +public class TestMappedFileMetadataBlockReader { + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + int tag; + File tempFile; + MappedFileMetadataBlockReader reader; + SuperBlock sb; + byte[] block; + byte[] encoded; + + @Before + public void setUp() throws Exception { + tag = 1; + tempFile = temp.newFile(); + sb = new SuperBlock(); + try (RandomAccessFile raf = new RandomAccessFile(tempFile, "rw")) { + sb.writeData(raf); + + // write a block + block = new byte[1024]; + for (int i = 0; i < block.length; i++) { + block[i] = (byte) (i & 0xff); + } + encoded = MetadataTestUtils.saveMetadataBlock(block); + raf.write(encoded); + } + + int mapSize = 512; + int windowSize = 1024; + MappedFile mmap; + try (RandomAccessFile raf = new RandomAccessFile(tempFile, "r")) { + mmap = MappedFile.mmap(raf.getChannel(), mapSize, windowSize); + } + reader = new MappedFileMetadataBlockReader(tag, sb, mmap); + } + + @After + public void tearDown() throws Exception { + reader.close(); + reader = null; + encoded = null; + block = null; + sb = null; + } + + @Test + public void getSuperBlockShouldReturnVersionReadFromFile() { + assertEquals(sb.getModificationTime(), + reader.getSuperBlock(tag).getModificationTime()); + } + + @Test(expected = IllegalArgumentException.class) + public void getSuperBlockShouldFailWhenTagIsInvalid() { + reader.getSuperBlock(tag + 1); + } + + @Test + public void getSuperBlockShouldReturnConstructedVersion() throws Exception { + assertSame(sb, reader.getSuperBlock(tag)); + } + + @Test + public void readFromFileOffsetShouldSucceed() throws Exception { + MetadataBlock mb = reader.read(tag, SuperBlock.SIZE); + assertEquals(1024, mb.data.length); + assertArrayEquals(block, mb.data); + } + + @Test(expected = IllegalArgumentException.class) + public void readFromFileOffsetShouldFailWhenTagIsInvalid() throws Exception { + reader.read(tag + 1, SuperBlock.SIZE); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMemoryMetadataBlockReader.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMemoryMetadataBlockReader.java new file mode 100644 index 00000000000..af3c49a2b3f --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMemoryMetadataBlockReader.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.test.MetadataTestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.EOFException; +import java.io.File; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +public class TestMemoryMetadataBlockReader { + + File tempFile; + MemoryMetadataBlockReader reader; + SuperBlock sb; + byte[] block; + byte[] block2; + byte[] encoded; + int offset2; + + @Before + public void setUp() throws Exception { + sb = new SuperBlock(); + // write a block + block = new byte[1024]; + for (int i = 0; i < block.length; i++) { + block[i] = (byte) (i & 0xff); + } + block2 = new byte[1024]; + for (int i = 0; i < block2.length; i++) { + block2[i] = (byte) ((i + 128) & 0xff); + } + byte[] data1 = MetadataTestUtils.saveMetadataBlock(block); + byte[] data2 = MetadataTestUtils.saveMetadataBlock(block2); + offset2 = data1.length; + encoded = new byte[data1.length + data2.length]; + System.arraycopy(data1, 0, encoded, 0, data1.length); + System.arraycopy(data2, 0, encoded, data1.length, data2.length); + reader = + new MemoryMetadataBlockReader(10101, sb, encoded, 0, encoded.length); + } + + @After + public void tearDown() { + reader.close(); + reader = null; + encoded = null; + block = null; + block2 = null; + sb = null; + } + + @Test + public void getSuperBlockShouldReturnConstructedInstance() { + assertSame(sb, reader.getSuperBlock(10101)); + } + + @Test + public void readFirstBlockShouldSucceed() throws Exception { + MetadataBlock mb = reader.read(10101, 0L); + assertEquals(1024, mb.data.length); + assertArrayEquals(block, mb.data); + } + + @Test + public void readSecondBlockShouldSucceed() throws Exception { + MetadataBlock mb = reader.read(10101, offset2); + assertEquals(1024, mb.data.length); + assertArrayEquals(block2, mb.data); + } + + @Test(expected = EOFException.class) + public void readPastEofShouldFail() throws Exception { + reader.read(10101, encoded.length); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataBlock.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataBlock.java new file mode 100644 index 00000000000..c721381f569 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataBlock.java @@ -0,0 +1,214 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.superblock.CompressionId; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlockFlag; +import org.apache.hadoop.runc.squashfs.test.DataTestUtils; +import org.apache.hadoop.runc.squashfs.test.MetadataTestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.ShortBuffer; +import java.util.Random; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class TestMetadataBlock { + + @Before + public void setUp() throws Exception { + } + + @After + public void tearDown() throws Exception { + } + + @Test + public void readingCompressedBlockShouldSucceed() throws Exception { + + byte[] buf = new byte[8192]; + for (int i = 0; i < buf.length; i++) { + buf[i] = (byte) 0xff; + } + byte[] blockData = MetadataTestUtils.saveMetadataBlock(buf); + + SuperBlock sb = new SuperBlock(); + try (DataInputStream dis = new DataInputStream( + new ByteArrayInputStream(blockData))) { + MetadataBlock block = MetadataBlock.read(dis, sb); + assertTrue("not compressed", block.isCompressed()); + assertArrayEquals(buf, block.getData()); + } + + } + + @Test(expected = SquashFsException.class) + public void readingCompressedBlockShouldFailIfSuperblockHasCompressionDisabled() + throws Exception { + + byte[] buf = new byte[8192]; + for (int i = 0; i < buf.length; i++) { + buf[i] = (byte) 0xff; + } + byte[] blockData = MetadataTestUtils.saveMetadataBlock(buf); + + SuperBlock sb = new SuperBlock(); + sb.setCompressionId(CompressionId.NONE); + try (DataInputStream dis = new DataInputStream( + new ByteArrayInputStream(blockData))) { + MetadataBlock.read(dis, sb); + } + + } + + @Test(expected = UnsupportedOperationException.class) + public void readingCompressedBlockShouldFailIfSuperblockHasUnsupportedCompressionAlgorithm() + throws Exception { + + byte[] buf = new byte[8192]; + for (int i = 0; i < buf.length; i++) { + buf[i] = (byte) 0xff; + } + byte[] blockData = MetadataTestUtils.saveMetadataBlock(buf); + + SuperBlock sb = new SuperBlock(); + sb.setCompressionId(CompressionId.XZ); + try (DataInputStream dis = new DataInputStream( + new ByteArrayInputStream(blockData))) { + MetadataBlock.read(dis, sb); + } + + } + + @Test(expected = UnsupportedOperationException.class) + public void readingCompressedBlockShouldFailIfSuperblockHasCompressionOptionFlag() + throws Exception { + + byte[] buf = new byte[8192]; + for (int i = 0; i < buf.length; i++) { + buf[i] = (byte) 0xff; + } + byte[] blockData = MetadataTestUtils.saveMetadataBlock(buf); + + SuperBlock sb = new SuperBlock(); + sb.setFlags( + (short) (sb.getFlags() | SuperBlockFlag.COMPRESSOR_OPTIONS.mask())); + try (DataInputStream dis = new DataInputStream( + new ByteArrayInputStream(blockData))) { + MetadataBlock.read(dis, sb); + } + + } + + @Test + public void toStringShouldNotFailWhenCompressed() throws Exception { + byte[] buf = new byte[64]; + for (int i = 0; i < buf.length; i++) { + buf[i] = (byte) 0xff; + } + byte[] blockData = MetadataTestUtils.saveMetadataBlock(buf); + + SuperBlock sb = new SuperBlock(); + try (DataInputStream dis = new DataInputStream( + new ByteArrayInputStream(blockData))) { + MetadataBlock block = MetadataBlock.read(dis, sb); + System.out.println(block); + } + } + + @Test + public void toStringShouldNotFailWhenUncompressed() throws Exception { + Random r = new Random(0L); + + byte[] buf = new byte[64]; + r.nextBytes(buf); + + byte[] blockData = MetadataTestUtils.saveMetadataBlock(buf); + + SuperBlock sb = new SuperBlock(); + try (DataInputStream dis = new DataInputStream( + new ByteArrayInputStream(blockData))) { + MetadataBlock block = MetadataBlock.read(dis, sb); + System.out.println(block); + } + } + + @Test + public void readingUncompressedBlockShouldSucceed() throws Exception { + Random r = new Random(0L); + + byte[] buf = new byte[8192]; + r.nextBytes(buf); + + byte[] blockData = MetadataTestUtils.saveMetadataBlock(buf); + + SuperBlock sb = new SuperBlock(); + try (DataInputStream dis = new DataInputStream( + new ByteArrayInputStream(blockData))) { + MetadataBlock block = MetadataBlock.read(dis, sb); + assertFalse("compressed", block.isCompressed()); + assertArrayEquals(buf, block.getData()); + } + + } + + @Test(expected = SquashFsException.class) + public void readingUncompressedDataThatIsTooLargeShouldFail() + throws Exception { + byte[] buf = new byte[8196]; + ShortBuffer sbuf = + ByteBuffer.wrap(buf).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer(); + sbuf.put((short) (8194 | 0x8000)); // 8194 bytes, uncompressed + + SuperBlock sb = new SuperBlock(); + try (DataInputStream dis = new DataInputStream( + new ByteArrayInputStream(buf))) { + MetadataBlock.read(dis, sb); + } + } + + @Test(expected = SquashFsException.class) + public void readingCompressedDataThatIsTooLargeShouldFail() throws Exception { + + byte[] buf = new byte[8194]; + byte[] compressed = DataTestUtils.compress(buf); + byte[] encoded = new byte[compressed.length + 2]; + System.arraycopy(compressed, 0, encoded, 2, compressed.length); + ShortBuffer sbuf = + ByteBuffer.wrap(encoded).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer(); + sbuf.put((short) (compressed.length & 0x7fff)); + + SuperBlock sb = new SuperBlock(); + try (DataInputStream dis = new DataInputStream( + new ByteArrayInputStream(encoded))) { + MetadataBlock.read(dis, sb); + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataBlockCache.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataBlockCache.java new file mode 100644 index 00000000000..ec0a0674b8c --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataBlockCache.java @@ -0,0 +1,262 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.test.MetadataBlockReaderMock; +import org.apache.hadoop.runc.squashfs.test.MetadataTestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +public class TestMetadataBlockCache { + + SuperBlock sb; + MetadataBlockReaderMock mbr; + TaggedMetadataBlockReader tmbr; + MetadataBlockCache cache; + SortedMap blockMap; + + @Before + public void setUp() { + sb = new SuperBlock(); + blockMap = new TreeMap<>(); + for (int i = 0; i < 10000; i++) { + blockMap.put( + Long.valueOf(i * 1000L), + MetadataTestUtils.block(new byte[] { (byte) (i % 100) })); + } + mbr = new MetadataBlockReaderMock(10101, sb, blockMap); + tmbr = new TaggedMetadataBlockReader(true); + cache = new MetadataBlockCache(tmbr); + cache.add(10101, mbr); + } + + @After + public void tearDown() { + cache = null; + mbr = null; + blockMap = null; + sb = null; + } + + @Test + public void superBlockShouldReturnUnderlyingValue() { + assertSame(sb, cache.getSuperBlock(10101)); + } + + @Test + public void readingAllBlocksTwiceShouldResultInNoCacheHits() + throws Exception { + for (Map.Entry entry : blockMap.entrySet()) { + assertSame(String + .format("wrong block for offset %d", entry.getKey().longValue()), + entry.getValue(), + cache.read(10101, entry.getKey().longValue())); + } + for (Map.Entry entry : blockMap.entrySet()) { + assertSame(String + .format("wrong block for offset %d", entry.getKey().longValue()), + entry.getValue(), + cache.read(10101, entry.getKey().longValue())); + } + assertEquals("wrong hit count", 0L, cache.getCacheHits()); + assertEquals("wrong miss count", 20000L, cache.getCacheMisses()); + } + + @Test + public void readingSameTenBlocksTwiceShouldResultInEqualCacheHits() + throws Exception { + int count = 0; + for (Map.Entry entry : blockMap.entrySet()) { + count++; + if (count > 10) { + break; + } + assertSame(String + .format("wrong block for offset %d", entry.getKey().longValue()), + entry.getValue(), + cache.read(10101, entry.getKey().longValue())); + } + count = 0; + for (Map.Entry entry : blockMap.entrySet()) { + count++; + if (count > 10) { + break; + } + assertSame(String + .format("wrong block for offset %d", entry.getKey().longValue()), + entry.getValue(), + cache.read(10101, entry.getKey().longValue())); + } + assertEquals("wrong hit count", 10L, cache.getCacheHits()); + assertEquals("wrong miss count", 10L, cache.getCacheMisses()); + } + + @Test + public void resetStatisticsShouldResetHitsAndMisses() throws Exception { + MetadataBlock block = blockMap.get(Long.valueOf(0L)); + assertSame(block, cache.read(10101, Long.valueOf(0L))); + assertSame(block, cache.read(10101, Long.valueOf(0L))); + + assertEquals("wrong hit count", 1L, cache.getCacheHits()); + assertEquals("wrong miss count", 1L, cache.getCacheMisses()); + + cache.resetStatistics(); + + assertEquals("wrong hit count", 0L, cache.getCacheHits()); + assertEquals("wrong miss count", 0L, cache.getCacheMisses()); + } + + @Test + public void clearCacheShouldResetHitsAndMisses() throws Exception { + MetadataBlock block = blockMap.get(Long.valueOf(0L)); + assertSame(block, cache.read(10101, Long.valueOf(0L))); + assertSame(block, cache.read(10101, Long.valueOf(0L))); + + assertEquals("wrong hit count", 1L, cache.getCacheHits()); + assertEquals("wrong miss count", 1L, cache.getCacheMisses()); + + cache.clearCache(); + + assertEquals("wrong hit count", 0L, cache.getCacheHits()); + assertEquals("wrong miss count", 0L, cache.getCacheMisses()); + } + + @Test + public void cacheSizeOfZeroShouldBeInterpretedAsOne() throws Exception { + cache = new MetadataBlockCache(tmbr, 0); + MetadataBlock block = blockMap.get(Long.valueOf(0L)); + assertSame(block, cache.read(10101, Long.valueOf(0L))); + assertSame(block, cache.read(10101, Long.valueOf(0L))); + + assertEquals("wrong hit count", 1L, cache.getCacheHits()); + assertEquals("wrong miss count", 1L, cache.getCacheMisses()); + } + + @Test + public void explicitCacheSizeShoudldHitIfQueryingLessThanCacheSize() + throws Exception { + cache = new MetadataBlockCache(tmbr, 10); + + int count = 0; + for (Map.Entry entry : blockMap.entrySet()) { + count++; + if (count > 10) { + break; + } + assertSame(String + .format("wrong block for offset %d", entry.getKey().longValue()), + entry.getValue(), + cache.read(10101, entry.getKey().longValue())); + } + count = 0; + for (Map.Entry entry : blockMap.entrySet()) { + count++; + if (count > 10) { + break; + } + assertSame(String + .format("wrong block for offset %d", entry.getKey().longValue()), + entry.getValue(), + cache.read(10101, entry.getKey().longValue())); + } + assertEquals("wrong hit count", 10L, cache.getCacheHits()); + assertEquals("wrong miss count", 10L, cache.getCacheMisses()); + } + + @Test + public void explicitCacheSizeShoudldMissIfQueryingMoreThanCacheSize() + throws Exception { + cache = new MetadataBlockCache(tmbr, 10); + + int count = 0; + for (Map.Entry entry : blockMap.entrySet()) { + count++; + if (count > 11) { + break; + } + assertSame(String + .format("wrong block for offset %d", entry.getKey().longValue()), + entry.getValue(), + cache.read(10101, entry.getKey().longValue())); + } + count = 0; + for (Map.Entry entry : blockMap.entrySet()) { + count++; + if (count > 11) { + break; + } + assertSame(String + .format("wrong block for offset %d", entry.getKey().longValue()), + entry.getValue(), + cache.read(10101, entry.getKey().longValue())); + } + assertEquals("wrong hit count", 0L, cache.getCacheHits()); + assertEquals("wrong miss count", 22L, cache.getCacheMisses()); + } + + @Test + public void closeShouldCloseUnderlyingBlockReaderByDefault() + throws Exception { + cache.close(); + assertTrue("not closed", mbr.isClosed()); + } + + @Test + public void closeShouldCloseUnderlyingBlockReaderIfExplicitlySet() + throws Exception { + cache = new MetadataBlockCache(tmbr, true); + cache.close(); + assertTrue("not closed", mbr.isClosed()); + } + + @Test + public void closeShouldNotCloseUnderlyingBlockReaderIfExplicitlyUnset() + throws Exception { + cache = new MetadataBlockCache(tmbr, false); + cache.close(); + assertFalse("closed", mbr.isClosed()); + } + + @Test + public void closeShouldCloseUnderlyingBlockReaderIfExplicitlySetAndSizeSpecified() + throws Exception { + cache = new MetadataBlockCache(tmbr, 1, true); + cache.close(); + assertTrue("not closed", mbr.isClosed()); + } + + @Test + public void closeShouldNotCloseUnderlyingBlockReaderIfExplicitlyUnsetAndSizeSpecified() + throws Exception { + cache = new MetadataBlockCache(tmbr, 1, false); + cache.close(); + assertFalse("closed", mbr.isClosed()); + } +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataBlockReader.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataBlockReader.java new file mode 100644 index 00000000000..29140338bbd --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataBlockReader.java @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.directory.DirectoryEntry; +import org.apache.hadoop.runc.squashfs.directory.DirectoryHeader; +import org.apache.hadoop.runc.squashfs.inode.BasicDirectoryINode; +import org.apache.hadoop.runc.squashfs.inode.INodeRef; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.test.MetadataBlockReaderMock; +import org.apache.hadoop.runc.squashfs.test.MetadataTestUtils; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class TestMetadataBlockReader { + + @Test + public void readerShouldBeAbleToLocateDataFromMetadataReference() + throws Exception { + SuperBlock sb = new SuperBlock(); + + MetadataReference meta = + new MetadataReference(10101, 12345L, (short) 6789, Integer.MAX_VALUE); + + byte[] buf = new byte[8192]; + buf[6789] = (byte) 0xff; + + MetadataBlock block = MetadataTestUtils.block(buf); + try (MetadataBlockReaderMock br = new MetadataBlockReaderMock(10101, sb, + 12345L, block)) { + MetadataReader mr = br.reader(meta); + byte result = mr.readByte(); + assertEquals((byte) 0xff, result); + } + } + + @Test + public void rawReaderShouldBeAbleToLocateDataFromLocationAndOffset() + throws Exception { + SuperBlock sb = new SuperBlock(); + + byte[] buf = new byte[8192]; + buf[6789] = (byte) 0xff; + + MetadataBlock block = MetadataTestUtils.block(buf); + try (MetadataBlockReaderMock br = new MetadataBlockReaderMock(10101, sb, + 12345L, block)) { + MetadataReader mr = br.rawReader(10101, 12345L, (short) 6789); + byte result = mr.readByte(); + assertEquals((byte) 0xff, result); + } + } + + @Test + public void inodeReaderShouldBeAbleToLocateDataFromInodeRef() + throws Exception { + SuperBlock sb = new SuperBlock(); + sb.setInodeTableStart(12345L); + + byte[] buf = new byte[8192]; + buf[1234] = (byte) 0xff; + + MetadataBlock block = MetadataTestUtils.block(buf); + try (MetadataBlockReaderMock br = new MetadataBlockReaderMock(10101, sb, + 66666L, block)) { + MetadataReader mr = + br.inodeReader(10101, new INodeRef(54321, (short) 1234).getRaw()); + byte result = mr.readByte(); + assertEquals((byte) 0xff, result); + } + } + + @Test + public void inodeReaderShouldBeAbleToLocateDataFromDirectoryEntry() + throws Exception { + SuperBlock sb = new SuperBlock(); + sb.setInodeTableStart(12345L); + + byte[] buf = new byte[8192]; + buf[1234] = (byte) 0xff; + + DirectoryHeader dh = new DirectoryHeader() { + { + this.startBlock = 54321; + } + }; + DirectoryEntry de = new DirectoryEntry() { + { + this.header = dh; + this.offset = (short) 1234; + } + }; + + MetadataBlock block = MetadataTestUtils.block(buf); + try (MetadataBlockReaderMock br = new MetadataBlockReaderMock(10101, sb, + 66666L, block)) { + MetadataReader mr = br.inodeReader(10101, de); + byte result = mr.readByte(); + assertEquals((byte) 0xff, result); + } + } + + @Test + public void directoryReaderShouldBeAbleToLocateDataFromINode() + throws Exception { + SuperBlock sb = new SuperBlock(); + sb.setDirectoryTableStart(12345L); + + byte[] buf = new byte[8192]; + buf[1234] = (byte) 0xff; + + BasicDirectoryINode inode = new BasicDirectoryINode(); + inode.setFileSize(50); + inode.setStartBlock(54321); + inode.setOffset((short) 1234); + + MetadataBlock block = MetadataTestUtils.block(buf); + try (MetadataBlockReaderMock br = new MetadataBlockReaderMock(10101, sb, + 66666L, block)) { + MetadataReader mr = br.directoryReader(10101, inode); + byte result = mr.readByte(); + assertEquals((byte) 0xff, result); + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataBlockRef.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataBlockRef.java new file mode 100644 index 00000000000..a222faefd3c --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataBlockRef.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.inode.INodeRef; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class TestMetadataBlockRef { + + MetadataBlockRef ref; + + @Before + public void setUp() { + ref = new MetadataBlockRef(1, (short) 2); + } + + @Test + public void getLocationShouldReturnConstructedValue() { + assertEquals("wrong value", 1, ref.getLocation()); + } + + @Test + public void getOffsetShouldReturnConstructedValue() { + assertEquals("wrong value", (short) 2, ref.getOffset()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(ref.toString()); + } + + @Test + public void testToINodeRef() { + INodeRef inodeRef = ref.toINodeRef(); + assertEquals(1, inodeRef.getLocation()); + assertEquals((short) 2, inodeRef.getOffset()); + } + + @Test + public void testToINodeRefRaw() { + long raw = ref.toINodeRefRaw(); + + INodeRef inodeRef = new INodeRef(raw); + assertEquals(1, inodeRef.getLocation()); + assertEquals((short) 2, inodeRef.getOffset()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataReader.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataReader.java new file mode 100644 index 00000000000..f6ea341703a --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataReader.java @@ -0,0 +1,353 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.test.MetadataBlockReaderMock; +import org.apache.hadoop.runc.squashfs.test.MetadataTestUtils; +import org.junit.Test; + +import java.io.EOFException; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.charset.StandardCharsets; +import java.util.Random; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +public class TestMetadataReader { + + @Test + public void isEofShouldReturnTrueOnEmptyStream() throws Exception { + MetadataReader r = reader(new byte[0], ref(10101, 0L, (short) 0)); + assertTrue(r.isEof()); + } + + @Test + public void isEofShouldReturnFalseOnSingleByteStream() throws Exception { + MetadataReader r = reader(new byte[1], ref(10101, 0L, (short) 0)); + assertFalse(r.isEof()); + } + + @Test + public void isEofShouldReturnTrueIfReadPastMaxLength() throws Exception { + MetadataReader r = reader(new byte[1], ref(10101, 0L, (short) 0, 1)); + r.skipBytes(1); + assertTrue(r.isEof()); + } + + @Test + public void isEofShouldReturnFalseIfPartwayThroughBlock() throws Exception { + MetadataReader r = reader(new byte[10], ref(10101, 0L, (short) 0, 10)); + r.skipBytes(5); + assertFalse(r.isEof()); + } + + @Test + public void positionShouldReturnZeroOnEmptyStream() throws Exception { + MetadataReader r = reader(new byte[0], ref(10101, 0L, (short) 0)); + assertEquals(0, r.position()); + } + + @Test + public void availableShouldReturnZeroOnEmptyStream() throws Exception { + MetadataReader r = reader(new byte[0], ref(10101, 0L, (short) 0)); + assertEquals(0, r.available()); + } + + @Test + public void availableShouldInitiallyReturnZeroOnSingleByteStream() + throws Exception { + MetadataReader r = reader(new byte[1], ref(10101, 0L, (short) 0)); + assertEquals(0, r.available()); + } + + @Test + public void availableShouldReturnOneOnSingleByteStreamAfterCheckingEof() + throws Exception { + MetadataReader r = reader(new byte[1], ref(10101, 0L, (short) 0)); + r.isEof(); + assertEquals(1, r.available()); + } + + @Test + public void availableShouldReturneZeroOnSingleByteStreamAfterConsumingByte() + throws Exception { + MetadataReader r = reader(new byte[1], ref(10101, 0L, (short) 0)); + r.readByte(); + assertEquals(0, r.available()); + } + + @Test + public void isEofShouldReturnTrueAfterConsumingSingleByteStream() + throws Exception { + MetadataReader r = reader(new byte[1], ref(10101, 0L, (short) 0)); + r.readByte(); + assertTrue(r.isEof()); + } + + @Test + public void readFullyShouldWorkIfNotEof() throws Exception { + byte[] data = new byte[1024]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) (i & 0xff); + } + MetadataReader r = reader(data, ref(10101, 0L, (short) 0)); + byte[] copy = new byte[1024]; + r.readFully(copy); + assertArrayEquals(data, copy); + } + + @Test(expected = EOFException.class) + public void readFullyShouldThrowExceptionIfTooLong() throws Exception { + byte[] data = new byte[1024]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) (i & 0xff); + } + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, 1024)); + byte[] copy = new byte[1025]; + r.readFully(copy); + } + + @Test + public void readFullyShouldWorkWithPartials() throws Exception { + byte[] data = new byte[1024]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) (i & 0xff); + } + MetadataReader r = reader(data, ref(10101, 0L, (short) 0)); + byte[] copy = new byte[1024]; + r.readFully(copy, 0, 512); + r.readFully(copy, 512, 512); + assertArrayEquals(data, copy); + } + + @Test + public void skipBytesShouldDoSo() throws Exception { + byte[] data = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + MetadataReader r = reader(data, ref(10101, 0L, (short) 0)); + assertEquals("wrong bytes skipped", 5, r.skipBytes(5)); + assertEquals("wrong next byte", (byte) 5, r.readByte()); + } + + @Test + public void skipBytesShouldDoPartialSkipIfEof() throws Exception { + byte[] data = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, 10)); + assertEquals("wrong bytes skipped", 10, r.skipBytes(15)); + } + + @Test + public void readBooleanShouldWork() throws Exception { + byte[] data = new byte[] { 0, 1 }; + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, 2)); + assertFalse("first value true", r.readBoolean()); + assertTrue("second value false", r.readBoolean()); + } + + @Test + public void readByteShouldWork() throws Exception { + byte[] data = new byte[] { (byte) 0xff }; + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, 1)); + assertEquals((byte) 0xff, r.readByte()); + } + + @Test(expected = EOFException.class) + public void readByteShouldThrowEOFExceptionIfEndOfStream() throws Exception { + byte[] data = new byte[] { (byte) 0xff }; + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, 1)); + assertEquals((byte) 0xff, r.readByte()); + r.readByte(); + } + + @Test + public void readUnsignedByteShouldWork() throws Exception { + byte[] data = new byte[] { (byte) 0xff }; + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, 1)); + assertEquals(0xff, r.readUnsignedByte()); + } + + @Test + public void readShortShouldSwapBytes() throws Exception { + byte[] data = new byte[2]; + ByteBuffer.wrap(data).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer() + .put((short) 0x1234); + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, 2)); + assertEquals((short) 0x1234, r.readShort()); + } + + @Test + public void readUnsignedShortShouldSwapBytes() throws Exception { + byte[] data = new byte[2]; + ByteBuffer.wrap(data).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer() + .put((short) 0xfedc); + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, 2)); + assertEquals(0xfedc, r.readUnsignedShort()); + } + + @Test + public void readCharShouldSwapBytes() throws Exception { + byte[] data = new byte[2]; + ByteBuffer.wrap(data).order(ByteOrder.LITTLE_ENDIAN).asCharBuffer() + .put((char) 0x1234); + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, 2)); + assertEquals((char) 0x1234, r.readChar()); + } + + @Test + public void readIntShouldSwapBytes() throws Exception { + byte[] data = new byte[4]; + ByteBuffer.wrap(data).order(ByteOrder.LITTLE_ENDIAN).asIntBuffer() + .put(0x12345678); + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, 4)); + assertEquals(0x12345678, r.readInt()); + } + + @Test + public void readFloatShouldSwapBytes() throws Exception { + float value = new Random(0L).nextFloat(); + + byte[] data = new byte[4]; + ByteBuffer.wrap(data).order(ByteOrder.LITTLE_ENDIAN).asFloatBuffer() + .put(value); + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, 4)); + assertEquals(value, r.readFloat(), 0.0000001f); + } + + @Test + public void readLongShouldSwapBytes() throws Exception { + byte[] data = new byte[8]; + ByteBuffer.wrap(data).order(ByteOrder.LITTLE_ENDIAN).asLongBuffer() + .put(0x12345678_90abcdefL); + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, 8)); + assertEquals(0x12345678_90abcdefL, r.readLong()); + } + + @Test + public void readDoubleShouldSwapBytes() throws Exception { + double value = new Random(0L).nextDouble(); + + byte[] data = new byte[8]; + ByteBuffer.wrap(data).order(ByteOrder.LITTLE_ENDIAN).asDoubleBuffer() + .put(value); + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, 8)); + assertEquals(value, r.readDouble(), 0.0000001d); + } + + @Test + public void readLineShouldReturnNullOnEof() throws Exception { + String value = ""; + byte[] data = value.getBytes(StandardCharsets.ISO_8859_1); + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, data.length)); + assertNull(r.readLine()); + } + + @Test + public void readLineShouldWorkWhenStringHitsEof() throws Exception { + String value = "test"; + byte[] data = value.getBytes(StandardCharsets.ISO_8859_1); + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, data.length)); + assertEquals("test", r.readLine()); + } + + @Test + public void readLineShouldWorkWhenStringEndsWithLF() throws Exception { + String value = "test\ntest2"; + byte[] data = value.getBytes(StandardCharsets.ISO_8859_1); + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, data.length)); + assertEquals("test", r.readLine()); + } + + @Test + public void readLineShouldWorkWhenStringEndsWithLFAndEof() throws Exception { + String value = "test\n"; + byte[] data = value.getBytes(StandardCharsets.ISO_8859_1); + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, data.length)); + assertEquals("test", r.readLine()); + } + + @Test + public void readLineShouldWorkWhenStringEndsWithCR() throws Exception { + String value = "test\rtest2"; + byte[] data = value.getBytes(StandardCharsets.ISO_8859_1); + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, data.length)); + assertEquals("test", r.readLine()); + } + + @Test + public void readLineShouldWorkWhenStringEndsWithCRAndEof() throws Exception { + String value = "test\r"; + byte[] data = value.getBytes(StandardCharsets.ISO_8859_1); + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, data.length)); + assertEquals("test", r.readLine()); + } + + @Test + public void readLineShouldWorkWhenStringEndsWithCRLF() throws Exception { + String value = "test\r\ntest2"; + byte[] data = value.getBytes(StandardCharsets.ISO_8859_1); + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, data.length)); + assertEquals("test", r.readLine()); + } + + @Test + public void readLineShouldWorkWhenStringEndsWithCRLFAndEof() + throws Exception { + String value = "test\r\n"; + byte[] data = value.getBytes(StandardCharsets.ISO_8859_1); + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, data.length)); + assertEquals("test", r.readLine()); + } + + @Test(expected = UnsupportedOperationException.class) + public void readUTFShouldThrowUnsupportedOperationException() + throws Exception { + byte[] data = new byte[1]; + MetadataReader r = reader(data, ref(10101, 0L, (short) 0, data.length)); + r.readUTF(); + } + + MetadataReference ref(int tag, long blockLocation, short offset) { + return ref(tag, blockLocation, offset, Integer.MAX_VALUE); + } + + MetadataReference ref(int tag, long blockLocation, short offset, + int maxLength) { + return new MetadataReference(tag, blockLocation, offset, maxLength); + } + + MetadataReader reader(byte[] data, MetadataReference ref) throws IOException { + return reader(new SuperBlock(), data, ref); + } + + MetadataReader reader(SuperBlock sb, byte[] data, MetadataReference ref) + throws IOException { + MetadataBlock block = MetadataTestUtils.block(data); + MetadataBlockReaderMock mbr = + new MetadataBlockReaderMock(ref.getTag(), sb, ref.getBlockLocation(), + block); + return new MetadataReader(mbr, ref); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataReference.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataReference.java new file mode 100644 index 00000000000..0197e928699 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataReference.java @@ -0,0 +1,150 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.directory.DirectoryEntry; +import org.apache.hadoop.runc.squashfs.directory.DirectoryHeader; +import org.apache.hadoop.runc.squashfs.inode.BasicDirectoryINode; +import org.apache.hadoop.runc.squashfs.inode.INodeRef; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class TestMetadataReference { + + @Test + public void inodeWithInodeRefShouldBeRelativeToInodeTable() throws Exception { + SuperBlock sb = new SuperBlock(); + sb.setInodeTableStart(12345L); + + INodeRef inodeRef = new INodeRef(54321, (short) 1234); + MetadataReference ref = + MetadataReference.inode(10101, sb, inodeRef.getRaw()); + System.out.println(ref); + assertEquals(10101, ref.getTag()); + assertEquals(66666L, ref.getBlockLocation()); + assertEquals((short) 1234, ref.getOffset()); + assertEquals(Integer.MAX_VALUE, ref.getMaxLength()); + } + + @Test(expected = SquashFsException.class) + public void inodeWithInodeRefShouldFailIfOffsetIsTooLarge() throws Exception { + SuperBlock sb = new SuperBlock(); + sb.setInodeTableStart(12345L); + + INodeRef inodeRef = new INodeRef(54321, (short) 8192); + MetadataReference.inode(1234, sb, inodeRef.getRaw()); + } + + @Test + public void inodeWithDirectoryEntryShouldBeRelativeToInodeTable() + throws Exception { + SuperBlock sb = new SuperBlock(); + sb.setInodeTableStart(12345L); + + DirectoryHeader dh = new DirectoryHeader() { + { + this.startBlock = 54321; + } + }; + DirectoryEntry de = new DirectoryEntry() { + { + this.header = dh; + this.offset = (short) 1234; + } + }; + + MetadataReference ref = MetadataReference.inode(10101, sb, de); + System.out.println(ref); + assertEquals(10101, ref.getTag()); + assertEquals(66666L, ref.getBlockLocation()); + assertEquals((short) 1234, ref.getOffset()); + assertEquals(Integer.MAX_VALUE, ref.getMaxLength()); + } + + @Test(expected = SquashFsException.class) + public void inodeWithDirectoryEntryShouldFailIfOffsetIsTooLarge() + throws Exception { + SuperBlock sb = new SuperBlock(); + sb.setInodeTableStart(12345L); + + DirectoryHeader dh = new DirectoryHeader() { + { + this.startBlock = 54321; + } + }; + DirectoryEntry de = new DirectoryEntry() { + { + this.header = dh; + this.offset = (short) 8192; + } + }; + + MetadataReference.inode(10101, sb, de); + } + + @Test + public void rawShouldBeRelativeToInodeTable() throws Exception { + MetadataReference ref = MetadataReference.raw(10101, 12345L, (short) 6789); + System.out.println(ref); + assertEquals(10101, ref.getTag()); + assertEquals(12345L, ref.getBlockLocation()); + assertEquals((short) 6789, ref.getOffset()); + assertEquals(Integer.MAX_VALUE, ref.getMaxLength()); + } + + @Test(expected = SquashFsException.class) + public void rawShouldFailIfOffsetIsTooLarge() throws Exception { + MetadataReference.raw(10101, 12345L, (short) 8192); + } + + @Test + public void directoryShouldBeRelativeToDirectoryTable() throws Exception { + SuperBlock sb = new SuperBlock(); + sb.setDirectoryTableStart(12345L); + + BasicDirectoryINode inode = new BasicDirectoryINode(); + inode.setFileSize(50); + inode.setStartBlock(54321); + inode.setOffset((short) 1234); + + MetadataReference ref = MetadataReference.directory(10101, sb, inode); + System.out.println(ref); + assertEquals(10101, ref.getTag()); + assertEquals(66666L, ref.getBlockLocation()); + assertEquals((short) 1234, ref.getOffset()); + assertEquals(47, ref.getMaxLength()); + } + + @Test(expected = SquashFsException.class) + public void directoryShouldFailIfOffsetIsTooLarge() throws Exception { + SuperBlock sb = new SuperBlock(); + sb.setDirectoryTableStart(12345L); + + BasicDirectoryINode inode = new BasicDirectoryINode(); + inode.setFileSize(50); + inode.setStartBlock(54321); + inode.setOffset((short) 8192); + + MetadataReference.directory(10101, sb, inode); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataWriter.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataWriter.java new file mode 100644 index 00000000000..56714c9d027 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestMetadataWriter.java @@ -0,0 +1,364 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.test.MetadataTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.CharBuffer; +import java.nio.DoubleBuffer; +import java.nio.FloatBuffer; +import java.nio.IntBuffer; +import java.nio.LongBuffer; +import java.nio.ShortBuffer; +import java.util.Arrays; +import java.util.Random; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + +public class TestMetadataWriter { + + MetadataWriter writer; + + @Before + public void setUp() { + writer = new MetadataWriter(); + } + + @Test + public void getCurrentReferenceShouldReturnZeroBeforeBlocksWritten() { + MetadataBlockRef ref = writer.getCurrentReference(); + assertEquals("wrong location", 0, ref.getLocation()); + assertEquals("wrong offset", (short) 0, ref.getOffset()); + } + + byte[] save() throws IOException { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (DataOutputStream dos = new DataOutputStream(bos)) { + writer.save(dos); + } + return bos.toByteArray(); + } + } + + @Test + public void writeByteArrayShouldWriteAsIs() throws Exception { + byte[] buf = new byte[] { 0, 1, 2, 3, 4 }; + writer.write(buf); + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + byte[] buf2 = MetadataTestUtils.decodeMetadataBlock(out); + assertArrayEquals(buf, buf2); + } + + @Test + public void writeLongByteArrayShouldSucceed() throws Exception { + byte[] buf = new byte[16384]; + for (int i = 0; i < buf.length; i++) { + buf[i] = (byte) (i % 32); + } + writer.write(buf); + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + byte[] buf2 = MetadataTestUtils.decodeMetadataBlocks(out, 0); + assertArrayEquals(buf, buf2); + } + + @Test + public void writeEmptyByteArrayShouldDoNothing() throws Exception { + byte[] buf = new byte[0]; + writer.write(buf); + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + assertEquals("wrong length", 0, out.length); + } + + @Test + public void flushShouldDoNothingIfNoData() throws Exception { + writer.flush(); + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + assertEquals("wrong length", 0, out.length); + } + + @Test + public void flushShouldWriteMultipleBlocksIfNecessary() throws Exception { + byte[] buf0 = new byte[] { 0, 1, 2, 3, 4 }; + byte[] buf1 = new byte[] { 5, 6, 7, 8, 9 }; + + MetadataBlockRef ref1 = writer.getCurrentReference(); + System.out.println(ref1); + writer.write(buf0); + writer.flush(); + + MetadataBlockRef ref2 = writer.getCurrentReference(); + System.out.println(ref2); + writer.write(buf1); + writer.flush(); + + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + byte[] buf2 = + MetadataTestUtils.decodeMetadataBlock(out, ref1.getLocation()); + byte[] buf3 = + MetadataTestUtils.decodeMetadataBlock(out, ref2.getLocation()); + assertArrayEquals(buf0, buf2); + assertArrayEquals(buf1, buf3); + } + + @Test + public void flushShouldHandleWritingCompressedData() throws Exception { + byte[] buf = new byte[8192]; + for (int i = 0; i < buf.length; i++) { + buf[i] = (byte) 0xff; + } + + MetadataBlockRef ref = writer.getCurrentReference(); + System.out.println(ref); + writer.write(buf); + writer.flush(); + + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + byte[] buf2 = MetadataTestUtils.decodeMetadataBlock(out, ref.getLocation()); + assertArrayEquals(buf, buf2); + } + + @Test + public void writeByteArrayWithOffsetAndLengthShouldSucceed() + throws Exception { + byte[] buf = new byte[1024]; + for (int i = 0; i < buf.length; i++) { + buf[i] = (byte) (i % 256); + } + writer.write(buf, 10, 100); + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + byte[] buf2 = MetadataTestUtils.decodeMetadataBlocks(out, 0); + byte[] bufPrime = Arrays.copyOfRange(buf, 10, 110); + assertArrayEquals(bufPrime, buf2); + } + + @Test + public void writeIntAsByteShouldStripHighBits() throws Exception { + for (int i = 0; i < 512; i++) { + writer.write(i); + } + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + byte[] buf2 = MetadataTestUtils.decodeMetadataBlocks(out, 0); + assertEquals("wrong length", 512, buf2.length); + for (int i = 0; i < buf2.length; i++) { + assertEquals(String.format("wrong value at index %d", i), + (byte) (i % 256), buf2[i]); + } + } + + @Test + public void writeByteShouldStripHighBits() throws Exception { + for (int i = 0; i < 512; i++) { + writer.writeByte(i); + } + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + byte[] buf2 = MetadataTestUtils.decodeMetadataBlocks(out, 0); + assertEquals("wrong length", 512, buf2.length); + for (int i = 0; i < buf2.length; i++) { + assertEquals(String.format("wrong value at index %d", i), + (byte) (i % 256), buf2[i]); + } + } + + @Test + public void writeBooleanShouldWorkAsExpected() throws Exception { + writer.writeBoolean(false); + writer.writeBoolean(true); + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + byte[] buf2 = MetadataTestUtils.decodeMetadataBlocks(out, 0); + assertEquals("wrong length", 2, buf2.length); + assertEquals("wrong value for false", (byte) 0, buf2[0]); + assertEquals("wrong value for true", (byte) 1, buf2[1]); + } + + @Test + public void writeShortShouldByteSwapOutput() throws Exception { + for (int i = 0; i <= 0xffff; i++) { + writer.writeShort(i); + } + + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + byte[] buf2 = MetadataTestUtils.decodeMetadataBlocks(out, 0); + assertEquals("wrong length", 0x20000, buf2.length); + ShortBuffer sb = + ByteBuffer.wrap(buf2).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer(); + for (int i = 0; i <= 0xffff; i++) { + assertEquals(String.format("wrong value at index %d", i), + (short) (i & 0xffff), sb.get(i)); + } + } + + @Test + public void writeCharShouldByteSwapOutput() throws Exception { + for (int i = 0; i <= 0xffff; i++) { + writer.writeChar(i); + } + + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + byte[] buf2 = MetadataTestUtils.decodeMetadataBlocks(out, 0); + assertEquals("wrong length", 0x20000, buf2.length); + CharBuffer cb = + ByteBuffer.wrap(buf2).order(ByteOrder.LITTLE_ENDIAN).asCharBuffer(); + for (int i = 0; i <= 0xffff; i++) { + assertEquals(String.format("wrong value at index %d", i), + (char) (i & 0xffff), cb.get(i)); + } + } + + @Test + public void writIntShouldByteSwapOutput() throws Exception { + writer.writeInt(0x000000ff); + writer.writeInt(0x0000ff00); + writer.writeInt(0x00ff0000); + writer.writeInt(0xff000000); + + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + byte[] buf2 = MetadataTestUtils.decodeMetadataBlocks(out, 0); + assertEquals("wrong length", 16, buf2.length); + IntBuffer ib = + ByteBuffer.wrap(buf2).order(ByteOrder.LITTLE_ENDIAN).asIntBuffer(); + assertEquals(0x000000ff, ib.get(0)); + assertEquals(0x0000ff00, ib.get(1)); + assertEquals(0x00ff0000, ib.get(2)); + assertEquals(0xff000000, ib.get(3)); + } + + @Test + public void writLongShouldByteSwapOutput() throws Exception { + writer.writeLong(0x0000_0000_0000_00ffL); + writer.writeLong(0x0000_0000_0000_ff00L); + writer.writeLong(0x0000_0000_00ff_0000L); + writer.writeLong(0x0000_0000_ff00_0000L); + writer.writeLong(0x0000_00ff_0000_0000L); + writer.writeLong(0x0000_ff00_0000_0000L); + writer.writeLong(0x00ff_0000_0000_0000L); + writer.writeLong(0xff00_0000_0000_0000L); + + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + byte[] buf2 = MetadataTestUtils.decodeMetadataBlocks(out, 0); + assertEquals("wrong length", 64, buf2.length); + + LongBuffer lb = + ByteBuffer.wrap(buf2).order(ByteOrder.LITTLE_ENDIAN).asLongBuffer(); + assertEquals(0x0000_0000_0000_00ffL, lb.get(0)); + assertEquals(0x0000_0000_0000_ff00L, lb.get(1)); + assertEquals(0x0000_0000_00ff_0000L, lb.get(2)); + assertEquals(0x0000_0000_ff00_0000L, lb.get(3)); + assertEquals(0x0000_00ff_0000_0000L, lb.get(4)); + assertEquals(0x0000_ff00_0000_0000L, lb.get(5)); + assertEquals(0x00ff_0000_0000_0000L, lb.get(6)); + assertEquals(0xff00_0000_0000_0000L, lb.get(7)); + } + + @Test + public void writeFloatShouldByteSwapOutput() throws Exception { + Random r = new Random(0L); + + float[] data = new float[1024]; + for (int i = 0; i < data.length; i++) { + data[i] = r.nextFloat(); + writer.writeFloat(data[i]); + } + + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + byte[] buf2 = MetadataTestUtils.decodeMetadataBlocks(out, 0); + assertEquals("wrong length", 4096, buf2.length); + + FloatBuffer fb = + ByteBuffer.wrap(buf2).order(ByteOrder.LITTLE_ENDIAN).asFloatBuffer(); + for (int i = 0; i < data.length; i++) { + assertEquals(String.format("wrong value at index %d", i), data[i], + fb.get(i), 0.0000001f); + } + } + + @Test + public void writeDoubleShouldByteSwapOutput() throws Exception { + Random r = new Random(0L); + + double[] data = new double[1024]; + for (int i = 0; i < data.length; i++) { + data[i] = r.nextDouble(); + writer.writeDouble(data[i]); + } + + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + byte[] buf2 = MetadataTestUtils.decodeMetadataBlocks(out, 0); + assertEquals("wrong length", 8192, buf2.length); + + DoubleBuffer db = + ByteBuffer.wrap(buf2).order(ByteOrder.LITTLE_ENDIAN).asDoubleBuffer(); + for (int i = 0; i < data.length; i++) { + assertEquals(String.format("wrong value at index %d", i), data[i], + db.get(i), 0.0000001d); + } + } + + @Test + public void writeCharsStringShouldByteSwapOutput() throws Exception { + StringBuilder buf = new StringBuilder(); + for (int i = 0; i <= 0xffff; i++) { + buf.append((char) i); + } + String str = buf.toString(); + writer.writeChars(str); + + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + byte[] buf2 = MetadataTestUtils.decodeMetadataBlocks(out, 0); + assertEquals("wrong length", 0x20000, buf2.length); + CharBuffer cb = + ByteBuffer.wrap(buf2).order(ByteOrder.LITTLE_ENDIAN).asCharBuffer(); + for (int i = 0; i <= 0xffff; i++) { + assertEquals(String.format("wrong value at index %d", i), str.charAt(i), + cb.get(i)); + } + } + + @Test + public void writeBytesStringShouldWorkAsExpected() throws Exception { + StringBuilder buf = new StringBuilder(); + for (int i = 0; i < 512; i++) { + buf.append((char) i); + } + String str = buf.toString(); + writer.writeBytes(str); + + byte[] out = MetadataTestUtils.saveMetadataBlock(writer); + byte[] buf2 = MetadataTestUtils.decodeMetadataBlocks(out, 0); + assertEquals("wrong length", 512, buf2.length); + ByteBuffer bb = ByteBuffer.wrap(buf2).order(ByteOrder.LITTLE_ENDIAN); + for (int i = 0; i < 512; i++) { + assertEquals(String.format("wrong value at index %d", i), + (byte) (i & 0xff), bb.get(i)); + } + } + + @Test(expected = UnsupportedOperationException.class) + public void writeUTFShouldThrowsExcpetion() throws Exception { + writer.writeUTF("test"); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestTaggedMetadataBlock.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestTaggedMetadataBlock.java new file mode 100644 index 00000000000..bec11d007ba --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/metadata/TestTaggedMetadataBlock.java @@ -0,0 +1,119 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.metadata; + +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.test.MetadataTestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.EOFException; +import java.io.File; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +public class TestTaggedMetadataBlock { + + File tempFile; + TaggedMetadataBlockReader taggedReader; + MemoryMetadataBlockReader reader; + SuperBlock sb; + byte[] block; + byte[] block2; + byte[] encoded; + int offset2; + + @Before + public void setUp() throws Exception { + sb = new SuperBlock(); + // write a block + block = new byte[1024]; + for (int i = 0; i < block.length; i++) { + block[i] = (byte) (i & 0xff); + } + block2 = new byte[1024]; + for (int i = 0; i < block2.length; i++) { + block2[i] = (byte) ((i + 128) & 0xff); + } + byte[] data1 = MetadataTestUtils.saveMetadataBlock(block); + byte[] data2 = MetadataTestUtils.saveMetadataBlock(block2); + offset2 = data1.length; + encoded = new byte[data1.length + data2.length]; + System.arraycopy(data1, 0, encoded, 0, data1.length); + System.arraycopy(data2, 0, encoded, data1.length, data2.length); + reader = + new MemoryMetadataBlockReader(10101, sb, encoded, 0, encoded.length); + taggedReader = new TaggedMetadataBlockReader(true); + taggedReader.add(10101, reader); + } + + @After + public void tearDown() throws Exception { + taggedReader.close(); + taggedReader = null; + reader = null; + encoded = null; + block = null; + block2 = null; + sb = null; + } + + @Test(expected = IllegalArgumentException.class) + public void addOfExistingTagShouldFail() { + taggedReader.add(10101, reader); + } + + @Test + public void addOfNewTagShouldSucceed() { + taggedReader.add(10102, reader); + } + + @Test + public void getSuperBlockShouldReturnConstructedInstance() { + assertSame(sb, taggedReader.getSuperBlock(10101)); + } + + @Test(expected = IllegalArgumentException.class) + public void getSuperBlockShouldFailOnIncorrectTag() { + taggedReader.getSuperBlock(10102); + } + + @Test + public void readFirstBlockShouldSucceed() throws Exception { + MetadataBlock mb = taggedReader.read(10101, 0L); + assertEquals(1024, mb.data.length); + assertArrayEquals(block, mb.data); + } + + @Test + public void readSecondBlockShouldSucceed() throws Exception { + MetadataBlock mb = taggedReader.read(10101, offset2); + assertEquals(1024, mb.data.length); + assertArrayEquals(block2, mb.data); + } + + @Test(expected = EOFException.class) + public void readPastEofShouldFail() throws Exception { + taggedReader.read(10101, encoded.length); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/superblock/TestCompressionId.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/superblock/TestCompressionId.java new file mode 100644 index 00000000000..f1a3d33124f --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/superblock/TestCompressionId.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.superblock; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +public class TestCompressionId { + + @Test + public void valueShouldReturnIncrementingValues() { + CompressionId[] values = CompressionId.values(); + + for (int i = 0; i < values.length; i++) { + assertEquals(String.format("Wrong value for %s", values[i]), (short) i, + values[i].value()); + } + } + + @Test + public void fromValueShouldReturnCorrectItem() throws Exception { + CompressionId[] values = CompressionId.values(); + + for (int i = 0; i < values.length; i++) { + assertSame(values[i], CompressionId.fromValue((short) i)); + } + } + + @Test(expected = SquashFsException.class) + public void fromValueShouldThrowExceptionOnInvalidValue() throws Exception { + CompressionId.fromValue((short) CompressionId.values().length); + } +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/superblock/TestSuperBlock.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/superblock/TestSuperBlock.java new file mode 100644 index 00000000000..b15bb84cbc9 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/superblock/TestSuperBlock.java @@ -0,0 +1,271 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.superblock; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.test.SuperBlockTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +public class TestSuperBlock { + + SuperBlock sb; + + @Before + public void setUp() { + sb = new SuperBlock(); + } + + @Test + public void inodeCountPropertyShouldWorkAsExpected() { + assertEquals(0, sb.getInodeCount()); + sb.setInodeCount(1); + assertEquals(1, sb.getInodeCount()); + } + + @Test + public void modificationTimePropertyShouldWorkAsExpected() { + assertEquals((double) System.currentTimeMillis(), + (double) (sb.getModificationTime() * 1000L), 5000d); + sb.setModificationTime(1); + assertEquals(1, sb.getModificationTime()); + } + + @Test + public void blockSizePropertyShouldWorkAsExpected() { + assertEquals(131072, sb.getBlockSize()); + sb.setBlockSize(262144); + assertEquals(262144, sb.getBlockSize()); + } + + @Test + public void fragmentEntryCountPropertyShouldWorkAsExpected() { + assertEquals(0, sb.getFragmentEntryCount()); + sb.setFragmentEntryCount(1); + assertEquals(1, sb.getFragmentEntryCount()); + } + + @Test + public void compressionIdPropertyShouldWorkAsExpected() { + assertSame(CompressionId.ZLIB, sb.getCompressionId()); + sb.setCompressionId(CompressionId.NONE); + assertSame(CompressionId.NONE, sb.getCompressionId()); + } + + @Test + public void blockLogPropertyShouldWorkAsExpected() { + assertEquals((short) 17, sb.getBlockLog()); + sb.setBlockLog((short) 18); + assertEquals((short) 18, sb.getBlockLog()); + } + + @Test + public void flagsPropertyShouldWorkAsExpected() { + assertEquals(SuperBlockFlag + .flagsFor(SuperBlockFlag.EXPORTABLE, SuperBlockFlag.DUPLICATES), + sb.getFlags()); + sb.setFlags(SuperBlockFlag.CHECK.mask()); + assertEquals(SuperBlockFlag.CHECK.mask(), sb.getFlags()); + } + + @Test + public void idCountPropertyShouldWorkAsExpected() { + assertEquals((short) 0, sb.getIdCount()); + sb.setIdCount((short) 1); + assertEquals((short) 1, sb.getIdCount()); + } + + @Test + public void versionMajorPropertyShouldWorkAsExpected() { + assertEquals((short) 4, sb.getVersionMajor()); + sb.setVersionMajor((short) 5); + assertEquals((short) 5, sb.getVersionMajor()); + } + + @Test + public void versionMinorPropertyShouldWorkAsExpected() { + assertEquals((short) 0, sb.getVersionMinor()); + sb.setVersionMinor((short) 1); + assertEquals((short) 1, sb.getVersionMinor()); + } + + @Test + public void rootInodeRefPropertyShouldWorkAsExpected() { + assertEquals(0L, sb.getRootInodeRef()); + sb.setRootInodeRef(1L); + assertEquals(1L, sb.getRootInodeRef()); + } + + @Test + public void bytesUsedPropertyShouldWorkAsExpected() { + assertEquals(0L, sb.getBytesUsed()); + sb.setBytesUsed(1L); + assertEquals(1L, sb.getBytesUsed()); + } + + @Test + public void idTableStartPropertyShouldWorkAsExpected() { + assertEquals(0L, sb.getIdTableStart()); + sb.setIdTableStart(1L); + assertEquals(1L, sb.getIdTableStart()); + } + + @Test + public void xattrIdTableStartPropertyShouldWorkAsExpected() { + assertEquals(-1L, sb.getXattrIdTableStart()); + sb.setXattrIdTableStart(1L); + assertEquals(1L, sb.getXattrIdTableStart()); + } + + @Test + public void inodeTableStartPropertyShouldWorkAsExpected() { + assertEquals(0L, sb.getInodeTableStart()); + sb.setInodeTableStart(1L); + assertEquals(1L, sb.getInodeTableStart()); + } + + @Test + public void directoryTableStartPropertyShouldWorkAsExpected() { + assertEquals(0L, sb.getDirectoryTableStart()); + sb.setDirectoryTableStart(1L); + assertEquals(1L, sb.getDirectoryTableStart()); + } + + @Test + public void fragmentTableStartPropertyShouldWorkAsExpected() { + assertEquals(0L, sb.getFragmentTableStart()); + sb.setFragmentTableStart(1L); + assertEquals(1L, sb.getFragmentTableStart()); + } + + @Test + public void exportTableStartPropertyShouldWorkAsExpected() { + assertEquals(0L, sb.getExportTableStart()); + sb.setExportTableStart(1L); + assertEquals(1L, sb.getExportTableStart()); + } + + @Test + public void hasFlagShouldCorrectlyDetectFlags() { + assertTrue("Missing exportable flag", + sb.hasFlag(SuperBlockFlag.EXPORTABLE)); + assertTrue("Missing duplicates flag", + sb.hasFlag(SuperBlockFlag.DUPLICATES)); + assertFalse("Has always fragments flag", + sb.hasFlag(SuperBlockFlag.ALWAYS_FRAGMENTS)); + } + + @Test + public void readShouldSucceedNormally() throws Exception { + byte[] data = SuperBlockTestUtils.serializeSuperBlock(sb); + + try (ByteArrayInputStream bis = new ByteArrayInputStream(data)) { + try (DataInputStream dis = new DataInputStream(bis)) { + assertNotNull(SuperBlock.read(dis)); + } + } + } + + @Test(expected = SquashFsException.class) + public void readDataShouldFailOnInvalidMagic() throws Exception { + byte[] data = SuperBlockTestUtils.serializeSuperBlock(sb); + data[0] = 0; // corrupt + SuperBlockTestUtils.deserializeSuperBlock(data); + } + + @Test(expected = SquashFsException.class) + public void readDataShouldFailOnMismatchedBlockSize() throws Exception { + byte[] data = SuperBlockTestUtils.serializeSuperBlock(sb); + data[22] = 0; // corrupt + SuperBlockTestUtils.deserializeSuperBlock(data); + } + + @Test(expected = SquashFsException.class) + public void readDataShouldFailOnUnknownVersion() throws Exception { + byte[] data = SuperBlockTestUtils.serializeSuperBlock(sb); + data[30] = 1; // corrupt + SuperBlockTestUtils.deserializeSuperBlock(data); + } + + @Test + public void writeDataAndReadDataShouldBeReflexive() throws Exception { + sb.setInodeCount(1); + sb.setBlockLog((short) 18); + sb.setBlockSize(262144); + sb.setFragmentEntryCount(2); + sb.setCompressionId(CompressionId.LZO); + sb.setFlags((short) 3); + sb.setIdCount((short) 4); + sb.setRootInodeRef(5L); + sb.setBytesUsed(6L); + sb.setIdTableStart(7L); + sb.setXattrIdTableStart(8L); + sb.setInodeTableStart(9L); + sb.setDirectoryTableStart(10L); + sb.setFragmentTableStart(11L); + sb.setExportTableStart(12L); + + byte[] data = SuperBlockTestUtils.serializeSuperBlock(sb); + + SuperBlock sb2 = SuperBlockTestUtils.deserializeSuperBlock(data); + System.out.println(sb2); + + assertEquals("Wrong inode count", sb.getInodeCount(), sb2.getInodeCount()); + assertEquals("Wrong modification time", sb.getModificationTime(), + sb2.getModificationTime()); + assertEquals("Wrong block size", sb.getBlockSize(), sb2.getBlockSize()); + assertEquals("Wrong fragment entry count", sb.getFragmentEntryCount(), + sb2.getFragmentEntryCount()); + assertSame("Wrong compression ID", sb.getCompressionId(), + sb2.getCompressionId()); + assertEquals("Wrong block log", sb.getBlockLog(), sb2.getBlockLog()); + assertEquals("Wrong flags", sb.getFlags(), sb2.getFlags()); + assertEquals("Wrong id count", sb.getIdCount(), sb2.getIdCount()); + assertEquals("Wrong root inode ref", sb.getRootInodeRef(), + sb2.getRootInodeRef()); + assertEquals("Wrong bytes used", sb.getBytesUsed(), sb2.getBytesUsed()); + assertEquals("Wrong id table start", sb.getIdTableStart(), + sb2.getIdTableStart()); + assertEquals("Wrong xattr id table start", sb.getXattrIdTableStart(), + sb2.getXattrIdTableStart()); + assertEquals("Wrong inode table start", sb.getInodeTableStart(), + sb2.getInodeTableStart()); + assertEquals("Wrong directory table start", sb.getDirectoryTableStart(), + sb2.getDirectoryTableStart()); + assertEquals("Wrong fragment table start", sb.getFragmentTableStart(), + sb2.getFragmentTableStart()); + assertEquals("Wrong export table start", sb.getExportTableStart(), + sb2.getExportTableStart()); + } + + @Test + public void toStringShouldNotFail() { + System.out.println(sb.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/superblock/TestSuperBlockFlag.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/superblock/TestSuperBlockFlag.java new file mode 100644 index 00000000000..c1b17763a7e --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/superblock/TestSuperBlockFlag.java @@ -0,0 +1,138 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.superblock; + +import org.junit.Test; + +import java.util.EnumSet; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class TestSuperBlockFlag { + + @Test + public void maskShouldReturnSuccessivePowersOfTwo() { + SuperBlockFlag[] values = SuperBlockFlag.values(); + + int mask = 1; + for (int i = 0; i < values.length; i++) { + assertEquals(String.format("Wrong mask for %s", values[i]), mask, + values[i].mask()); + mask = mask << 1; + } + } + + @Test + public void isSetShouldReturnTrueIfMaskIsPresent() { + SuperBlockFlag[] values = SuperBlockFlag.values(); + + short mask = 1; + for (int i = 0; i < values.length; i++) { + assertTrue(String.format("Wrong isSet() for %s", values[i]), + values[i].isSet(mask)); + mask = (short) (mask << 1); + } + } + + @Test + public void isSetShouldReturnFalseIfMaskIsNotPresent() { + SuperBlockFlag[] values = SuperBlockFlag.values(); + + short mask = 1; + for (int i = 0; i < values.length; i++) { + assertFalse(String.format("Wrong isSet() for %s", values[i]), + values[i].isSet((short) (~mask & 0xffff))); + mask = (short) (mask << 1); + } + } + + @Test + public void flagsPresentShouldIncludeValueContainingMask() { + SuperBlockFlag[] values = SuperBlockFlag.values(); + + short mask = 1; + for (int i = 0; i < values.length; i++) { + EnumSet flags = SuperBlockFlag.flagsPresent(mask); + assertTrue(String.format("Flag not present for %s", values[i]), + flags.contains(values[i])); + mask = (short) (mask << 1); + } + } + + @Test + public void flagsPresentShouldNotIncludeValueNotContainingMask() { + SuperBlockFlag[] values = SuperBlockFlag.values(); + + short mask = 1; + for (int i = 0; i < values.length; i++) { + EnumSet flags = + SuperBlockFlag.flagsPresent((short) (~mask & 0xffff)); + assertFalse(String.format("Flag present for %s", values[i]), + flags.contains(values[i])); + mask = (short) (mask << 1); + } + } + + @Test + public void flagsForCollectionShouldIncludePassedInValue() { + SuperBlockFlag[] values = SuperBlockFlag.values(); + + short mask = 1; + for (int i = 0; i < values.length; i++) { + short value = SuperBlockFlag.flagsFor(EnumSet.of(values[i])); + assertEquals(String.format("Wrong result for %s", values[i]), mask, + value); + mask = (short) (mask << 1); + } + } + + @Test + public void flagsForVarArgsShouldIncludePassedInValue() { + SuperBlockFlag[] values = SuperBlockFlag.values(); + + short mask = 1; + for (int i = 0; i < values.length; i++) { + short value = SuperBlockFlag.flagsFor(values[i]); + assertEquals(String.format("Wrong result for %s", values[i]), mask, + value); + mask = (short) (mask << 1); + } + } + + @Test + public void flagsForCollectionShouldIncludeMultipleFlags() { + short mask = SuperBlockFlag.flagsFor( + EnumSet.of(SuperBlockFlag.EXPORTABLE, SuperBlockFlag.DUPLICATES)); + assertEquals("Wrong mask", + SuperBlockFlag.EXPORTABLE.mask() | SuperBlockFlag.DUPLICATES.mask(), + mask); + } + + @Test + public void flagsForVarargsShouldIncludeMultipleFlags() { + short mask = SuperBlockFlag + .flagsFor(SuperBlockFlag.EXPORTABLE, SuperBlockFlag.DUPLICATES); + assertEquals("Wrong mask", + SuperBlockFlag.EXPORTABLE.mask() | SuperBlockFlag.DUPLICATES.mask(), + mask); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestExportTable.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestExportTable.java new file mode 100644 index 00000000000..d40f2d68c48 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestExportTable.java @@ -0,0 +1,178 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MemoryMetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockRef; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlockFlag; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +public class TestExportTable { + + @Test + public void readShouldHandleReadingEmptyTable() throws Exception { + verify(0, true); + } + + @Test + public void readShouldHandleReadingSingleEntry() throws Exception { + verify(1, true); + } + + @Test + public void readShouldHandleExportTableBeingDisabled() throws Exception { + verify(1, false); + } + + @Test + public void readShouldHandleReadingSinglePage() throws Exception { + verify(1024, true); + } + + @Test + public void readShouldHandleReadingMultiplePages() throws Exception { + verify(2048, true); + } + + @Test + public void toStringShouldNotFail() throws Exception { + ExportTable table = verify(10, true); + System.out.println(table.toString()); + } + + @Test + public void toStringShouldNotFailOnNotAvailable() throws Exception { + ExportTable table = verify(10, false); + System.out.println(table.toString()); + } + + @Test(expected = SquashFsException.class) + public void getInodeRefRawShouldFailOnTooLargeValue() throws Exception { + ExportTable table = verify(100, true); + table.getInodeRefRaw(101); + } + + @Test(expected = SquashFsException.class) + public void getInodeRefRawShouldFailOnTooSmallValue() throws Exception { + ExportTable table = verify(100, true); + table.getInodeRefRaw(0); + } + + @Test(expected = SquashFsException.class) + public void getInodeRefShouldFailOnTooLargeValue() throws Exception { + ExportTable table = verify(100, true); + table.getInodeRef(101); + } + + @Test(expected = SquashFsException.class) + public void getInodeRefShouldFailOnTooSmallValue() throws Exception { + ExportTable table = verify(100, true); + table.getInodeRef(0); + } + + @Test + public void getInodeRefShouldReturnSameValueAsRaw() throws Exception { + ExportTable table = verify(100, true); + assertEquals("wrong ref value", table.getInodeRefRaw(1), + table.getInodeRef(1).getRaw()); + } + + ExportTable verify(int count, boolean available) throws Exception { + byte[] tableData; + + List refs; + byte[] metadata; + + SuperBlock sb = new SuperBlock(); + sb.setInodeCount(count); + sb.setExportTableStart(SuperBlock.SIZE); + if (!available) { + sb.setFlags(SuperBlockFlag.DUPLICATES.mask()); + } + + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (DataOutputStream dos = new DataOutputStream(bos)) { + refs = createEntries(count, dos); + } + metadata = bos.toByteArray(); + } + + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (DataOutputStream dos = new DataOutputStream(bos)) { + sb.writeData(dos); + for (MetadataBlockRef ref : refs) { + byte[] buf = new byte[8]; + ByteBuffer.wrap(buf).order(ByteOrder.LITTLE_ENDIAN) + .putLong(ref.getLocation()); + dos.write(buf); + } + } + tableData = bos.toByteArray(); + } + + int tag = 0; + TableReader tr = new MemoryTableReader(sb, tableData); + MetadataBlockReader mbr = new MemoryMetadataBlockReader(tag, sb, metadata); + + ExportTable et = ExportTable.read(tag, tr, mbr); + assertEquals("available status", available, et.isAvailable()); + if (available) { + assertEquals("wrong inode count", count, et.getInodeCount()); + for (int i = 0; i < count; i++) { + assertEquals(String.format("wrong value of id %d", i + 1), + (long) (100000 + i), et.getInodeRefRaw(i + 1)); + } + } else { + assertEquals("wrong count", 0, et.getInodeCount()); + } + return et; + } + + List createEntries(int count, DataOutput out) + throws IOException { + List refs = new ArrayList<>(); + + MetadataWriter writer = new MetadataWriter(); + for (int i = 0; i < count; i++) { + if (i % 1024 == 0) { + refs.add(writer.getCurrentReference()); + } + writer.writeLong(100_000 + i); + } + writer.save(out); + + return refs; + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestFileTableReader.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestFileTableReader.java new file mode 100644 index 00000000000..eca0bc3c68d --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestFileTableReader.java @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.EOFException; +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +public class TestFileTableReader { + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + File tempFile; + + SuperBlock sb; + byte[] data; + FileTableReader reader; + + @Before + public void setUp() throws Exception { + tempFile = temp.newFile(); + sb = new SuperBlock(); + try (RandomAccessFile raf = new RandomAccessFile(tempFile, "rw")) { + sb.writeData(raf); + data = new byte[1024]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) (i & 0xff); + } + raf.write(data); + } + + reader = new FileTableReader(tempFile); + } + + @Test + public void readShouldExposeByteBuffer() throws Exception { + ByteBuffer bb = reader.read(SuperBlock.SIZE, data.length); + for (int i = 0; i < data.length; i++) { + assertEquals(String.format("Wrong value for element %d", i), + (byte) (i & 0xff), bb.get()); + } + } + + @Test + public void readShouldExposeByteBufferAtOffset() throws Exception { + ByteBuffer bb = reader.read(SuperBlock.SIZE + 1L, data.length - 1); + for (int i = 0; i < data.length - 1; i++) { + assertEquals(String.format("Wrong value for element %d", i), + (byte) ((i + 1) & 0xff), bb.get()); + } + } + + @Test(expected = EOFException.class) + public void readShouldThrowExceptionOnEof() throws Exception { + reader.read(SuperBlock.SIZE + 1023L, 2); + } + + @Test + public void getSuperBlockShouldReturnConstructedInstance() { + assertEquals(sb.getModificationTime(), + reader.getSuperBlock().getModificationTime()); + } + + @Test + public void closeShouldCloseUnderlyingReaderIfRequested() throws Exception { + try (RandomAccessFile raf = new RandomAccessFile(tempFile, "r")) { + reader = new FileTableReader(raf, sb, true); + reader.close(); + + try { + raf.seek(0L); + fail("exception not thrown"); + } catch (IOException e) { + System.out.println("Got EOF"); + } + } + } + + @Test + public void closeShouldNotCloseUnderlyingReaderIfNotRequested() + throws Exception { + try (RandomAccessFile raf = new RandomAccessFile(tempFile, "r")) { + reader = new FileTableReader(raf, sb, false); + reader.close(); + raf.seek(0L); + } + } +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestFragmentTable.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestFragmentTable.java new file mode 100644 index 00000000000..6ae080745de --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestFragmentTable.java @@ -0,0 +1,169 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MemoryMetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockRef; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlockFlag; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +public class TestFragmentTable { + + @Test + public void readShouldHandleReadingEmptyTable() throws Exception { + verify(0, true); + } + + @Test + public void readShouldHandleReadingSingleEntry() throws Exception { + verify(1, true); + } + + @Test + public void readShouldHandleFragmentTableBeingDisabled() throws Exception { + verify(1, false); + } + + @Test + public void readShouldHandleReadingSinglePage() throws Exception { + verify(512, true); + } + + @Test + public void readShouldHandleReadingMultiplePages() throws Exception { + verify(1024, true); + } + + @Test + public void toStringShouldNotFail() throws Exception { + FragmentTable table = verify(10, true); + System.out.println(table.toString()); + } + + @Test + public void toStringShouldNotFailOnNotAvailable() throws Exception { + FragmentTable table = verify(10, false); + System.out.println(table.toString()); + } + + @Test(expected = SquashFsException.class) + public void getEntryShouldFailOnTooLargeValue() throws Exception { + FragmentTable table = verify(100, true); + table.getEntry(100); + } + + @Test(expected = SquashFsException.class) + public void getEntryShouldFailOnTooSmallValue() throws Exception { + FragmentTable table = verify(100, true); + table.getEntry(-1); + } + + FragmentTable verify(int count, boolean available) throws Exception { + byte[] tableData; + + List refs; + byte[] metadata; + + SuperBlock sb = new SuperBlock(); + sb.setFragmentEntryCount(count); + sb.setFragmentTableStart(SuperBlock.SIZE); + if (!available) { + sb.setFlags((short) (sb.getFlags() | SuperBlockFlag.NO_FRAGMENTS.mask())); + } + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (DataOutputStream dos = new DataOutputStream(bos)) { + refs = createEntries(count, dos); + } + metadata = bos.toByteArray(); + } + + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (DataOutputStream dos = new DataOutputStream(bos)) { + sb.writeData(dos); + for (MetadataBlockRef ref : refs) { + byte[] buf = new byte[8]; + ByteBuffer.wrap(buf).order(ByteOrder.LITTLE_ENDIAN) + .putLong(ref.getLocation()); + dos.write(buf); + } + } + tableData = bos.toByteArray(); + } + + int tag = 0; + TableReader tr = new MemoryTableReader(sb, tableData); + MetadataBlockReader mbr = new MemoryMetadataBlockReader(tag, sb, metadata); + + FragmentTable ft = FragmentTable.read(tag, tr, mbr); + if (available) { + assertTrue("not available", ft.isAvailable()); + assertEquals(count, ft.getFragmentCount()); + for (int i = 0; i < count; i++) { + FragmentTableEntry entry = ft.getEntry(i); + assertNotNull(String.format("entry %d is null", i), entry); + assertEquals(String.format("wrong start for entry %d", i), + (long) (100_000 + i), entry.getStart()); + assertEquals(String.format("wrong size for entry %d", i), 10_000 + i, + entry.getSize()); + } + } else { + assertFalse("available", ft.isAvailable()); + assertEquals(0, ft.getFragmentCount()); + } + return ft; + } + + List createEntries(int count, DataOutput out) + throws IOException { + List refs = new ArrayList<>(); + + MetadataWriter writer = new MetadataWriter(); + for (int i = 0; i < count; i++) { + if (i % 512 == 0) { + writer.flush(); + refs.add(writer.getCurrentReference()); + } + writer.writeLong(100_000 + i); + writer.writeInt(10_000 + i); + writer.writeInt(0); + } + writer.save(out); + + return refs; + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestFragmentTableEntry.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestFragmentTableEntry.java new file mode 100644 index 00000000000..22f828d70b9 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestFragmentTableEntry.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class TestFragmentTableEntry { + + @Test + public void isCompressedShouldReturnTrueIfEntryIsCompressed() { + FragmentTableEntry entry = new FragmentTableEntry(1L, 2, true); + assertTrue(entry.isCompressed()); + } + + @Test + public void isCompressedShouldReturnFalseIfEntryIsNotCompressed() { + FragmentTableEntry entry = new FragmentTableEntry(1L, 2, false); + assertFalse(entry.isCompressed()); + } + + @Test + public void getStartShouldReturnStartValue() { + FragmentTableEntry entry = new FragmentTableEntry(1L, 2, true); + assertEquals(1L, entry.getStart()); + } + + @Test + public void getSizeShouldReturnMaskedSizeIfNotCompressed() { + FragmentTableEntry entry = new FragmentTableEntry(1L, 2, false); + assertEquals(0x1000002, entry.getSize()); + } + + @Test + public void getSizeShouldReturnSizeIfCompressed() { + FragmentTableEntry entry = new FragmentTableEntry(1L, 2, true); + assertEquals(2, entry.getSize()); + } + + @Test + public void getDiskSizeShouldReturnSizeIfNotCompressed() { + FragmentTableEntry entry = new FragmentTableEntry(1L, 2, false); + assertEquals(2, entry.getDiskSize()); + } + + @Test + public void getDiskSizeShouldReturnSizeIfCompressed() { + FragmentTableEntry entry = new FragmentTableEntry(1L, 2, true); + assertEquals(2, entry.getDiskSize()); + } + + @Test + public void toStringShouldNotFailWhenCompressed() { + System.out.println(new FragmentTableEntry(1L, 2, true).toString()); + } + + @Test + public void toStringShouldNotFailWhenNotCompressed() { + System.out.println(new FragmentTableEntry(1L, 2, false).toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestIdTable.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestIdTable.java new file mode 100644 index 00000000000..a4c811bb4bb --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestIdTable.java @@ -0,0 +1,140 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MemoryMetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockRef; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +public class TestIdTable { + + List entries; + + @Test + public void readShouldHandleReadingEmptyTable() throws Exception { + verify(0); + } + + @Test + public void readShouldHandleReadingSingleEntry() throws Exception { + verify(1); + } + + @Test + public void readShouldHandleReadingSinglePage() throws Exception { + verify(2048); + } + + @Test + public void readShouldHandleReadingFullTable() throws Exception { + verify(65535); + } + + @Test(expected = SquashFsException.class) + public void idFromIndexShouldFailOnOutOfRangeIndex() throws Exception { + IdTable table = verify(100); + table.idFromIndex((short) 100); + } + + @Test + public void toStringShouldNotFail() throws Exception { + IdTable table = verify(10); + System.out.println(table.toString()); + } + + @Test(expected = SquashFsException.class) + public void indexFromIdShouldFailOnUnknownValue() throws Exception { + IdTable table = verify(100); + table.indexFromId(100_100); + } + + IdTable verify(int count) throws Exception { + byte[] tableData; + + List refs; + byte[] metadata; + + SuperBlock sb = new SuperBlock(); + sb.setIdCount((short) (count & 0xffff)); + sb.setIdTableStart(SuperBlock.SIZE); + + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (DataOutputStream dos = new DataOutputStream(bos)) { + refs = createEntries(count, dos); + } + metadata = bos.toByteArray(); + } + + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (DataOutputStream dos = new DataOutputStream(bos)) { + sb.writeData(dos); + for (MetadataBlockRef ref : refs) { + byte[] buf = new byte[8]; + ByteBuffer.wrap(buf).order(ByteOrder.LITTLE_ENDIAN) + .putLong(ref.getLocation()); + dos.write(buf); + } + } + tableData = bos.toByteArray(); + } + + int tag = 0; + TableReader tr = new MemoryTableReader(sb, tableData); + MetadataBlockReader mbr = new MemoryMetadataBlockReader(tag, sb, metadata); + + IdTable idt = IdTable.read(tag, tr, mbr); + assertEquals(count, idt.getIdCount()); + for (int i = 0; i < count; i++) { + assertEquals((short) (i & 0xffff), idt.indexFromId(100000 + i)); + assertEquals(100000 + i, idt.idFromIndex((short) i)); + } + return idt; + } + + List createEntries(int count, DataOutput out) + throws IOException { + List refs = new ArrayList<>(); + + MetadataWriter writer = new MetadataWriter(); + for (int i = 0; i < count; i++) { + if (i % 2048 == 0) { + refs.add(writer.getCurrentReference()); + } + writer.writeInt(100_000 + i); + } + writer.save(out); + + return refs; + } +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestIdTableGenerator.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestIdTableGenerator.java new file mode 100644 index 00000000000..c73a08986b4 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestIdTableGenerator.java @@ -0,0 +1,109 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockRef; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.test.MetadataTestUtils; +import org.junit.Before; +import org.junit.Test; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.IntBuffer; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +public class TestIdTableGenerator { + + IdTableGenerator gen; + + @Before + public void setUp() { + gen = new IdTableGenerator(); + } + + @Test + public void addingDuplicateEntriesResultsInSingleMapping() { + int id = gen.addUidGid(1000); + assertEquals("wrong id", id, gen.addUidGid(1000)); + assertEquals("wrong count", 1, gen.getIdCount()); + } + + @Test + public void saveEmptyEntryWritesNoData() throws Exception { + MetadataWriter writer = new MetadataWriter(); + + List refs = gen.save(writer); + assertEquals("wrong refs size", 0, refs.size()); + + byte[] ser = MetadataTestUtils.saveMetadataBlock(writer); + assertEquals("wrong data size", 0, ser.length); + } + + @Test + public void saveSingleEntryWritesOneRef() throws Exception { + gen.addUidGid(1000); + + MetadataWriter writer = new MetadataWriter(); + + List refs = gen.save(writer); + assertEquals("wrong refs size", 1, refs.size()); + + assertEquals("wrong location", 0, refs.get(0).getLocation()); + assertEquals("wrong offset", (short) 0, refs.get(0).getOffset()); + + byte[] ser = MetadataTestUtils.saveMetadataBlock(writer); + byte[] decoded = MetadataTestUtils.decodeMetadataBlock(ser); + IntBuffer ib = + ByteBuffer.wrap(decoded).order(ByteOrder.LITTLE_ENDIAN).asIntBuffer(); + assertEquals("wrong id", 1000, ib.get()); + } + + @Test + public void saveAllPossibleEntriesShouldWork() throws Exception { + for (int i = 0; i < 65536; i++) { + gen.addUidGid(100_000 + i); + } + + MetadataWriter writer = new MetadataWriter(); + + List refs = gen.save(writer); + System.out.println(refs); + assertEquals("wrong refs size", 32, refs.size()); + + byte[] ser = MetadataTestUtils.saveMetadataBlock(writer); + byte[] decoded = MetadataTestUtils.decodeMetadataBlocks(ser); + IntBuffer ib = + ByteBuffer.wrap(decoded).order(ByteOrder.LITTLE_ENDIAN).asIntBuffer(); + + for (int i = 0; i < 65536; i++) { + assertEquals(String.format("wrong id for entry %d", i), 100_000 + i, + ib.get()); + } + } + + @Test + public void toStringShouldNotFail() { + gen.addUidGid(1000); + System.out.println(gen.toString()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestMappedFileTableReader.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestMappedFileTableReader.java new file mode 100644 index 00000000000..71930ca04a0 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestMappedFileTableReader.java @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import org.apache.hadoop.runc.squashfs.io.MappedFile; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.EOFException; +import java.io.File; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +public class TestMappedFileTableReader { + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + File tempFile; + + SuperBlock sb; + byte[] data; + MappedFile mmap; + MappedFileTableReader reader; + + @Before + public void setUp() throws Exception { + tempFile = temp.newFile(); + sb = new SuperBlock(); + try (RandomAccessFile raf = new RandomAccessFile(tempFile, "rw")) { + sb.writeData(raf); + data = new byte[1024]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) (i & 0xff); + } + raf.write(data); + } + + try (RandomAccessFile raf = new RandomAccessFile(tempFile, "r")) { + mmap = MappedFile.mmap(raf.getChannel(), 1024, 2048); + } + reader = new MappedFileTableReader(mmap); + } + + @Test + public void readShouldExposeByteBuffer() throws Exception { + ByteBuffer bb = reader.read(SuperBlock.SIZE, data.length); + for (int i = 0; i < data.length; i++) { + assertEquals(String.format("Wrong value for element %d", i), + (byte) (i & 0xff), bb.get()); + } + } + + @Test + public void readShouldExposeByteBufferAtOffset() throws Exception { + ByteBuffer bb = reader.read(SuperBlock.SIZE + 1L, data.length - 1); + for (int i = 0; i < data.length - 1; i++) { + assertEquals(String.format("Wrong value for element %d", i), + (byte) ((i + 1) & 0xff), bb.get()); + } + } + + @Test(expected = EOFException.class) + public void readShouldThrowExceptionOnEof() throws Exception { + reader.read(SuperBlock.SIZE + 1023L, 2); + } + + @Test + public void getSuperBlockShouldReturnConstructedInstance() { + assertEquals(sb.getModificationTime(), + reader.getSuperBlock().getModificationTime()); + } + + @Test + public void getSuperBlockShouldReturnProvidedInstance() { + reader = new MappedFileTableReader(mmap, sb); + assertSame(sb, reader.getSuperBlock()); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestMemoryTableReader.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestMemoryTableReader.java new file mode 100644 index 00000000000..4306ead8617 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/table/TestMemoryTableReader.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.table; + +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.junit.Before; +import org.junit.Test; + +import java.io.EOFException; +import java.nio.ByteBuffer; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +public class TestMemoryTableReader { + + SuperBlock sb; + byte[] data; + MemoryTableReader reader; + + @Before + public void setUp() throws Exception { + sb = new SuperBlock(); + data = new byte[1024]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) (i & 0xff); + } + reader = new MemoryTableReader(sb, data); + } + + @Test + public void readShouldExposeByteBuffer() throws Exception { + ByteBuffer bb = reader.read(0L, data.length); + for (int i = 0; i < data.length; i++) { + assertEquals(String.format("Wrong value for element %d", i), + (byte) (i & 0xff), bb.get()); + } + } + + @Test + public void readShouldExposeByteBufferAtOffset() throws Exception { + ByteBuffer bb = reader.read(1L, data.length - 1); + for (int i = 0; i < data.length - 1; i++) { + assertEquals(String.format("Wrong value for element %d", i), + (byte) ((i + 1) & 0xff), bb.get()); + } + } + + @Test(expected = EOFException.class) + public void readShouldThrowExceptionOnEof() throws Exception { + reader.read(1023L, 2); + } + + @Test + public void getSuperBlockShouldReturnConstructedInstance() { + assertSame(sb, reader.getSuperBlock()); + } + + @Test + public void coverClose() { + reader.close(); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/DataTestUtils.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/DataTestUtils.java new file mode 100644 index 00000000000..7f4900b6914 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/DataTestUtils.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.test; + +import org.apache.hadoop.runc.squashfs.data.FragmentWriter; +import org.apache.hadoop.runc.squashfs.metadata.MemoryMetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.CompressionId; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.util.BinUtils; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.zip.Deflater; +import java.util.zip.DeflaterOutputStream; +import java.util.zip.Inflater; +import java.util.zip.InflaterInputStream; + +public class DataTestUtils { + + public static byte[] decompress(byte[] data) throws IOException { + byte[] xfer = new byte[1024]; + try (ByteArrayInputStream bis = new ByteArrayInputStream(data)) { + try ( + InflaterInputStream iis = new InflaterInputStream(bis, new Inflater(), + 1024)) { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream(1024)) { + int c = 0; + while ((c = iis.read(xfer, 0, 1024)) >= 0) { + if (c > 0) { + bos.write(xfer, 0, c); + } + } + return bos.toByteArray(); + } + } + } + } + + public static byte[] compress(byte[] data) throws IOException { + byte[] xfer = new byte[1024]; + try (ByteArrayInputStream bis = new ByteArrayInputStream(data)) { + Deflater def = new Deflater(Deflater.BEST_COMPRESSION); + try (ByteArrayOutputStream bos = new ByteArrayOutputStream(1024)) { + try (DeflaterOutputStream dos = new DeflaterOutputStream(bos, def, + 1024)) { + int c = 0; + while ((c = bis.read(xfer, 0, 1024)) >= 0) { + if (c > 0) { + dos.write(xfer, 0, c); + } + } + } + return bos.toByteArray(); + } finally { + def.end(); + } + } + } + + public static byte[] saveFragmentMetadata(FragmentWriter fw) + throws IOException { + MetadataWriter writer = new MetadataWriter(); + fw.save(writer); + + byte[] data; + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (DataOutputStream dos = new DataOutputStream(bos)) { + writer.save(dos); + } + data = bos.toByteArray(); + + StringBuilder buf = new StringBuilder(); + BinUtils.dumpBin(buf, 15, "serialized-data", data, 0, data.length, 16, 2); + System.out.println(buf.toString()); + } + + return data; + } + + public static byte[] decodeMetadataBlock(byte[] data) throws IOException { + SuperBlock sb = new SuperBlock(); + sb.setCompressionId(CompressionId.ZLIB); + sb.setBlockSize(131072); + sb.setBlockLog((short) 17); + sb.setVersionMajor((short) 4); + sb.setVersionMinor((short) 0); + + int tag = 0; + try (MetadataBlockReader mbr = new MemoryMetadataBlockReader(tag, sb, + data)) { + MetadataReader reader = mbr.rawReader(tag, 0L, (short) 0); + reader.isEof(); + byte[] output = new byte[reader.available()]; + reader.readFully(output); + + StringBuilder buf = new StringBuilder(); + BinUtils + .dumpBin(buf, 17, "deserialized-data", output, 0, output.length, 16, + 2); + System.out.println(buf.toString()); + + return output; + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/DirectoryTestUtils.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/DirectoryTestUtils.java new file mode 100644 index 00000000000..576b98b22d3 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/DirectoryTestUtils.java @@ -0,0 +1,149 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.test; + +import org.apache.hadoop.runc.squashfs.directory.DirectoryBuilder; +import org.apache.hadoop.runc.squashfs.directory.DirectoryElement; +import org.apache.hadoop.runc.squashfs.directory.DirectoryEntry; +import org.apache.hadoop.runc.squashfs.directory.DirectoryHeader; +import org.apache.hadoop.runc.squashfs.metadata.MemoryMetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.CompressionId; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.util.BinUtils; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class DirectoryTestUtils { + + public static byte[] serializeDirectoryBuilder(DirectoryBuilder db) + throws IOException { + MetadataWriter writer = new MetadataWriter(); + db.write(writer); + + byte[] data; + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (DataOutputStream dos = new DataOutputStream(bos)) { + writer.save(dos); + } + data = bos.toByteArray(); + + StringBuilder buf = new StringBuilder(); + BinUtils.dumpBin(buf, 15, "serialized-data", data, 0, + Math.min(256, data.length), 16, 2); + System.out.println(buf.toString()); + } + + return data; + } + + public static byte[] serializeDirectoryElement(DirectoryElement entry) + throws IOException { + MetadataWriter writer = new MetadataWriter(); + entry.writeData(writer); + + byte[] data; + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (DataOutputStream dos = new DataOutputStream(bos)) { + writer.save(dos); + } + data = bos.toByteArray(); + + StringBuilder buf = new StringBuilder(); + BinUtils.dumpBin(buf, 15, "serialized-data", data, 0, + Math.min(256, data.length), 16, 2); + System.out.println(buf.toString()); + } + + return data; + } + + public static DirectoryHeader deserializeDirectoryHeader(byte[] data) + throws IOException { + SuperBlock sb = new SuperBlock(); + sb.setCompressionId(CompressionId.ZLIB); + sb.setBlockSize(131072); + sb.setBlockLog((short) 17); + sb.setVersionMajor((short) 4); + sb.setVersionMinor((short) 0); + + int tag = 0; + try (MetadataBlockReader mbr = new MemoryMetadataBlockReader(tag, sb, + data)) { + MetadataReader reader = mbr.rawReader(tag, 0L, (short) 0); + DirectoryHeader hdr = new DirectoryHeader(); + hdr.readData(reader); + return hdr; + } + } + + public static DirectoryEntry deserializeDirectoryEntry(DirectoryHeader header, + byte[] data) throws IOException { + SuperBlock sb = new SuperBlock(); + sb.setCompressionId(CompressionId.ZLIB); + sb.setBlockSize(131072); + sb.setBlockLog((short) 17); + sb.setVersionMajor((short) 4); + sb.setVersionMinor((short) 0); + + int tag = 0; + try (MetadataBlockReader mbr = new MemoryMetadataBlockReader(tag, sb, + data)) { + MetadataReader reader = mbr.rawReader(tag, 0L, (short) 0); + DirectoryEntry entry = new DirectoryEntry(); + entry.readData(header, reader); + return entry; + } + } + + public static List deserializeDirectory(byte[] data) + throws IOException { + List results = new ArrayList<>(); + + SuperBlock sb = new SuperBlock(); + sb.setCompressionId(CompressionId.ZLIB); + sb.setBlockSize(131072); + sb.setBlockLog((short) 17); + sb.setVersionMajor((short) 4); + sb.setVersionMinor((short) 0); + + int tag = 0; + try (MetadataBlockReader mbr = new MemoryMetadataBlockReader(tag, sb, + data)) { + MetadataReader reader = mbr.rawReader(tag, 0L, (short) 0); + reader.isEof(); + while (reader.available() > 0) { + DirectoryHeader header = DirectoryHeader.read(reader); + results.add(header); + for (int i = 0; i <= header.getCount(); i++) { + DirectoryEntry entry = DirectoryEntry.read(header, reader); + results.add(entry); + } + } + } + return results; + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/INodeTestUtils.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/INodeTestUtils.java new file mode 100644 index 00000000000..87c9cc5a4fc --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/INodeTestUtils.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.test; + +import org.apache.hadoop.runc.squashfs.inode.INode; +import org.apache.hadoop.runc.squashfs.metadata.MemoryMetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.CompressionId; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.util.BinUtils; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +public class INodeTestUtils { + + public static byte[] serializeINode(INode inode) throws IOException { + MetadataWriter writer = new MetadataWriter(); + inode.writeData(writer); + + byte[] data; + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (DataOutputStream dos = new DataOutputStream(bos)) { + writer.save(dos); + } + data = bos.toByteArray(); + + StringBuilder buf = new StringBuilder(); + BinUtils.dumpBin(buf, 15, "serialized-data", data, 0, + Math.min(256, data.length), 16, 2); + System.out.println(buf.toString()); + } + + return data; + } + + public static INode deserializeINode(byte[] data) throws IOException { + SuperBlock sb = new SuperBlock(); + sb.setCompressionId(CompressionId.ZLIB); + sb.setBlockSize(131072); + sb.setBlockLog((short) 17); + sb.setVersionMajor((short) 4); + sb.setVersionMinor((short) 0); + + int tag = 0; + try (MetadataBlockReader mbr = new MemoryMetadataBlockReader(tag, sb, + data)) { + MetadataReader reader = mbr.rawReader(tag, 0L, (short) 0); + + return INode.read(sb, reader); + } + } +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/InMemoryFragmentTable.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/InMemoryFragmentTable.java new file mode 100644 index 00000000000..f2d7ee92301 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/InMemoryFragmentTable.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.test; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.table.FragmentTable; +import org.apache.hadoop.runc.squashfs.table.FragmentTableEntry; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +public class InMemoryFragmentTable extends FragmentTable { + + private final List entries; + + public InMemoryFragmentTable(List entries) { + this.entries = entries; + } + + public InMemoryFragmentTable(FragmentTableEntry... entries) { + this(Arrays.asList(entries)); + } + + @Override + public int getFragmentCount() { + return entries.size(); + } + + @Override + public boolean isAvailable() { + return !entries.isEmpty(); + } + + @Override + public FragmentTableEntry getEntry(int id) + throws IOException, SquashFsException { + return entries.get(id); + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/MetadataBlockReaderMock.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/MetadataBlockReaderMock.java new file mode 100644 index 00000000000..db7b971c421 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/MetadataBlockReaderMock.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.test; + +import org.apache.hadoop.runc.squashfs.SquashFsException; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlock; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockReader; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.EOFException; +import java.io.IOException; +import java.util.Collections; +import java.util.Map; + +public class MetadataBlockReaderMock implements MetadataBlockReader { + + private final int tag; + private final SuperBlock sb; + private final Map blockMap; + private volatile boolean closed = false; + + public MetadataBlockReaderMock(int tag, SuperBlock sb, + long expectedFileOffset, MetadataBlock block) { + this(tag, sb, + Collections.singletonMap(Long.valueOf(expectedFileOffset), block)); + } + + public MetadataBlockReaderMock(int tag, SuperBlock sb, + Map blockMap) { + this.tag = tag; + this.sb = sb; + this.blockMap = blockMap; + } + + public boolean isClosed() { + return closed; + } + + @Override + public void close() throws IOException { + closed = true; + } + + @Override + public MetadataBlock read(int tag, long fileOffset) + throws IOException, SquashFsException { + if (this.tag != tag) { + throw new IllegalArgumentException(String.format("Invalid tag: %d", tag)); + } + MetadataBlock block = blockMap.get(Long.valueOf(fileOffset)); + if (block == null) { + throw new EOFException(String.format("unexpected block %d", fileOffset)); + } + return block; + } + + @Override + public SuperBlock getSuperBlock(int tag) { + if (this.tag != tag) { + throw new IllegalArgumentException(String.format("Invalid tag: %d", tag)); + } + return sb; + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/MetadataTestUtils.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/MetadataTestUtils.java new file mode 100644 index 00000000000..a7755e6ede2 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/MetadataTestUtils.java @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.test; + +import org.apache.hadoop.runc.squashfs.metadata.MemoryMetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlock; +import org.apache.hadoop.runc.squashfs.metadata.MetadataBlockReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataReader; +import org.apache.hadoop.runc.squashfs.metadata.MetadataWriter; +import org.apache.hadoop.runc.squashfs.superblock.CompressionId; +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; +import org.apache.hadoop.runc.squashfs.util.BinUtils; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +public class MetadataTestUtils { + + public static byte[] saveMetadataBlock(byte[] data) throws IOException { + MetadataWriter writer = new MetadataWriter(); + writer.write(data); + return saveMetadataBlock(writer); + } + + public static byte[] saveMetadataBlock(MetadataWriter writer) + throws IOException { + byte[] data; + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (DataOutputStream dos = new DataOutputStream(bos)) { + writer.save(dos); + } + data = bos.toByteArray(); + + StringBuilder buf = new StringBuilder(); + BinUtils.dumpBin(buf, 15, "serialized-data", data, 0, + Math.min(256, data.length), 16, 2); + System.out.println(buf.toString()); + } + + return data; + } + + public static MetadataBlock block(byte[] content) { + MetadataBlock block = new MetadataBlock() { + { + this.data = content; + this.header = (short) ((content.length & 0x7fff) | 0x8000); + this.fileLength = (short) (2 + (content.length & 0x7ffff)); + } + }; + + return block; + } + + public static byte[] decodeMetadataBlock(byte[] data) throws IOException { + return decodeMetadataBlock(data, 0); + } + + public static byte[] decodeMetadataBlocks(byte[] data) throws IOException { + return decodeMetadataBlocks(data, 0); + } + + public static byte[] decodeMetadataBlocks(byte[] data, int offset) + throws IOException { + SuperBlock sb = new SuperBlock(); + sb.setCompressionId(CompressionId.ZLIB); + sb.setBlockSize(131072); + sb.setBlockLog((short) 17); + sb.setVersionMajor((short) 4); + sb.setVersionMinor((short) 0); + + int tag = 0; + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + + try ( + MetadataBlockReader mbr = new MemoryMetadataBlockReader(tag, sb, data, + offset, data.length - offset)) { + MetadataReader reader = mbr.rawReader(tag, 0, (short) 0); + while (!reader.isEof()) { + byte[] output = new byte[reader.available()]; + reader.readFully(output); + bos.write(output); + } + } + + byte[] output = bos.toByteArray(); + StringBuilder buf = new StringBuilder(); + BinUtils.dumpBin(buf, 17, "deserialized-data", output, 0, + Math.min(256, output.length), 16, 2); + System.out.println(buf.toString()); + + return output; + } + } + + public static byte[] decodeMetadataBlock(byte[] data, int offset) + throws IOException { + SuperBlock sb = new SuperBlock(); + sb.setCompressionId(CompressionId.ZLIB); + sb.setBlockSize(131072); + sb.setBlockLog((short) 17); + sb.setVersionMajor((short) 4); + sb.setVersionMinor((short) 0); + + int tag = 0; + try (MetadataBlockReader mbr = new MemoryMetadataBlockReader(tag, sb, data, + offset, data.length - offset)) { + MetadataReader reader = mbr.rawReader(tag, 0, (short) 0); + reader.isEof(); + byte[] output = new byte[reader.available()]; + reader.readFully(output); + + StringBuilder buf = new StringBuilder(); + BinUtils.dumpBin(buf, 17, "deserialized-data", output, 0, + Math.min(256, output.length), 16, 2); + System.out.println(buf.toString()); + + return output; + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/SuperBlockTestUtils.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/SuperBlockTestUtils.java new file mode 100644 index 00000000000..53167e6e621 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/squashfs/test/SuperBlockTestUtils.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.squashfs.test; + +import org.apache.hadoop.runc.squashfs.superblock.SuperBlock; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +public class SuperBlockTestUtils { + + public static byte[] serializeSuperBlock(SuperBlock sb) throws IOException { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + try (DataOutputStream dos = new DataOutputStream(bos)) { + sb.writeData(dos); + } + return bos.toByteArray(); + } + } + + public static SuperBlock deserializeSuperBlock(byte[] data) + throws IOException { + SuperBlock sb = new SuperBlock(); + try (ByteArrayInputStream bis = new ByteArrayInputStream(data)) { + try (DataInputStream dis = new DataInputStream(bis)) { + sb.readData(dis); + } + } + return sb; + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/tools/TestImportDockerImage.java b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/tools/TestImportDockerImage.java new file mode 100644 index 00000000000..0a939cd9768 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/java/org/apache/hadoop/runc/tools/TestImportDockerImage.java @@ -0,0 +1,370 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.runc.tools; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.commons.codec.binary.Hex; +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.runc.squashfs.SquashFsReader; +import org.apache.hadoop.runc.squashfs.inode.DirectoryINode; +import org.apache.hadoop.runc.squashfs.inode.FileINode; +import org.apache.hadoop.runc.squashfs.inode.INode; +import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.eclipse.jetty.server.Server; +import org.eclipse.jetty.server.ServerConnector; +import org.eclipse.jetty.servlet.ServletContextHandler; +import org.eclipse.jetty.servlet.ServletHolder; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.security.MessageDigest; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.zip.GZIPOutputStream; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +public class TestImportDockerImage { + + static final Logger LOG = + LoggerFactory.getLogger(TestImportDockerImage.class); + + private static final String MANIFEST_MEDIA_TYPE = + "application/vnd.docker.distribution.manifest.v2+json"; + + private static final String IMAGE_MEDIA_TYPE = + "application/vnd.docker.container.image.v1+json"; + + private static final String LAYER_MEDIA_TYPE = + "application/vnd.docker.image.rootfs.diff.tar.gzip"; + + Server jetty; + ServletContextHandler context; + String registryLocation; + + MessageDigest sha256; + + File workDir; + File dfsDir; + + ObjectMapper mapper; + + Map repoBlobs = new HashMap<>(); + Map repoHashes = new HashMap<>(); + Map repoTypes = new HashMap<>(); + + byte[] config; + byte[] layer; + byte[] manifest; + + @Rule + public TemporaryFolder tmp = new TemporaryFolder(); + + @Before + public void setUp() throws Exception { + mapper = new ObjectMapper(); + + workDir = tmp.newFolder("work"); + dfsDir = tmp.newFolder("dfs"); + + sha256 = MessageDigest.getInstance("SHA-256"); + + jetty = createJettyServer(); + context = createServletContextHandler(jetty); + context.addServlet(new ServletHolder(new RepositoryServlet()), "/*"); + jetty.start(); + registryLocation = getRegistryHostAndPort(jetty); + + LOG.debug("Registry location: {}", registryLocation); + + // create some data + config = createConfig(); + layer = createLayer(); + manifest = createManifest(config, layer); + + registerObject("/v2/test/image/manifests/latest", + manifest, MANIFEST_MEDIA_TYPE); + registerObject("/v2/test/image/manifests/sha256:" + sha256(manifest), + manifest, MANIFEST_MEDIA_TYPE); + registerObject("/v2/test/image/blobs/sha256:" + sha256(config), + config, IMAGE_MEDIA_TYPE); + registerObject("/v2/test/image/blobs/sha256:" + sha256(layer), + layer, LAYER_MEDIA_TYPE); + } + + @After + public void tearDown() throws Exception { + LOG.info("teardown"); + jetty.stop(); + } + + void registerObject(String uri, byte[] data, String mediaType) { + repoBlobs.put(uri, data); + repoHashes.put(uri, sha256(data)); + repoTypes.put(uri, mediaType); + } + + byte[] createConfig() throws IOException { + ObjectNode root = mapper.createObjectNode(); + root.set("config", mapper.createObjectNode()); + return mapper.writer().writeValueAsBytes(root); + } + + byte[] createLayer() throws IOException { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + try (GZIPOutputStream gos = new GZIPOutputStream(bos); + TarArchiveOutputStream tos = new TarArchiveOutputStream(gos)) { + + // add a directory + TarArchiveEntry dir = new TarArchiveEntry("dir/"); + dir.setMode((short) 0755); + dir.setSize(0L); + tos.putArchiveEntry(dir); + tos.closeArchiveEntry(); + + // add a file + TarArchiveEntry file = new TarArchiveEntry("dir/file"); + file.setMode((short) 0644); + file.setSize(4); + tos.putArchiveEntry(file); + tos.write("test".getBytes(StandardCharsets.UTF_8)); + tos.closeArchiveEntry(); + } + bos.flush(); + return bos.toByteArray(); + } + + byte[] createManifest( + byte[] configData, byte[] layerData) throws IOException { + ObjectNode root = mapper.createObjectNode(); + root.put("schemaVersion", 2); + root.put("mediaType", MANIFEST_MEDIA_TYPE); + + ObjectNode config = mapper.createObjectNode(); + config.put("mediaType", IMAGE_MEDIA_TYPE); + config.put("size", configData.length); + config.put("digest", "sha256:" + sha256(configData)); + root.set("config", config); + + ArrayNode layers = mapper.createArrayNode(); + ObjectNode layer = mapper.createObjectNode(); + layer.put("mediaType", LAYER_MEDIA_TYPE); + layer.put("size", layerData.length); + layer.put("digest", "sha256:" + sha256(layerData)); + + layers.add(layer); + root.set("layers", layers); + + return mapper.writer().writeValueAsBytes(root); + } + + String sha256(byte[] data) { + try { + MessageDigest digester = MessageDigest.getInstance("SHA-256"); + return Hex.encodeHexString(digester.digest(data)); + } catch (Exception e) { + throw new RuntimeException("Error in sha256", e); + } + } + + protected static Server createJettyServer() { + try { + Server jetty = new Server(0); + ((ServerConnector) jetty.getConnectors()[0]).setHost("127.0.0.1"); + return jetty; + } catch (Exception ex) { + throw new RuntimeException("Could not setup Jetty: " + ex.getMessage(), + ex); + } + } + + protected static ServletContextHandler createServletContextHandler( + Server jetty) { + + ServletContextHandler context = new ServletContextHandler(); + context.setContextPath("/"); + jetty.setHandler(context); + + return context; + } + + protected static String getRegistryHostAndPort(Server jetty) { + ServerConnector con = (ServerConnector) jetty.getConnectors()[0]; + return String.format("%s:%d", con.getHost(), con.getLocalPort()); + } + + class RepositoryServlet extends HttpServlet { + @Override + protected void doGet(HttpServletRequest req, HttpServletResponse resp) + throws IOException { + + LOG.debug("Request URI: {}", req.getRequestURI()); + + String requestUri = req.getRequestURI(); + if ("/v2/".equals(requestUri)) { + resp.setStatus(200); + return; + } + + byte[] blob = repoBlobs.get(requestUri); + if (blob == null) { + LOG.error("Unexpected URI received: {}", requestUri); + resp.sendError(404); + return; + } + + String mediaType = repoTypes.get(requestUri); + String hash = repoHashes.get(requestUri); + sendObject(resp, mediaType, hash, blob); + } + + void sendObject( + HttpServletResponse resp, + String contentType, + String hash, + byte[] data) throws IOException { + resp.setStatus(200); + resp.setContentType(contentType); + resp.setContentLength(data.length); + String digest = "sha256:" + hash; + LOG.debug("Content length: {}", data.length); + LOG.debug("Digest: {}", digest); + resp.setHeader("docker-content-digest", digest); + try (OutputStream os = resp.getOutputStream()) { + os.write(data); + } + } + } + + File repoFile(String relPath) { + return new File(dfsDir, "runc-repository/" + relPath); + } + + void assertRepoContent(String path, byte[] expected) + throws IOException { + + File file = repoFile(path); + assertTrue("Repo file " + path + " does not exist", file.exists()); + + byte[] actual = Files.readAllBytes(file.toPath()); + + assertEquals("Wrong hash for " + path, sha256(expected), sha256(actual)); + } + + @Test + public void testImageShouldConvert() throws Exception { + Configuration conf = new Configuration(); + conf.set( + CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, + dfsDir.toURI().toURL().toExternalForm()); + conf.set( + YarnConfiguration.NM_RUNC_IMAGE_TOPLEVEL_DIR, + new File(dfsDir, "runc-repository").toURI().toURL().toExternalForm()); + + String src = registryLocation + "/test/image"; + String dest = "test/image"; + + ImportDockerImage tool = new ImportDockerImage(); + int result = ToolRunner.run(conf, tool, new String[] { src, dest }); + assertEquals("Tool failed", 0, result); + + // validate metadata file + File meta = repoFile("meta/test/image@latest.properties"); + assertTrue("Metadata file " + meta + " does not exist", meta.exists()); + + Properties props = new Properties(); + try (FileInputStream is = new FileInputStream(meta)) { + props.load(is); + } + + assertEquals("Wrong value for runc.import.type", + "docker", props.getProperty("runc.import.type")); + assertEquals("Wrong value for runc.import.source", + src, props.getProperty("runc.import.source")); + assertEquals("Wrong value for runc.manifest", + "sha256:" + sha256(manifest), props.getProperty("runc.manifest")); + assertNotNull("Missing value for runc.import.time", + props.getProperty("runc.import.time")); + + // validate manifest matches + String manifestPath = String.format("manifest/%s/%s", + sha256(manifest).substring(0, 2), sha256(manifest)); + assertRepoContent(manifestPath, manifest); + + // validate config matches + String configPath = String.format("config/%s/%s", + sha256(config).substring(0, 2), sha256(config)); + assertRepoContent(configPath, config); + + // validate original tar.gz matches + String layerPath = String.format("layer/%s/%s.tar.gz", + sha256(layer).substring(0, 2), sha256(layer)); + assertRepoContent(layerPath, layer); + + String sqshPath = String.format("layer/%s/%s.sqsh", + sha256(layer).substring(0, 2), sha256(layer)); + File sqsh = repoFile(sqshPath); + + assertTrue("SquashFS file " + sqsh + " does not exist", sqsh.exists()); + + try (SquashFsReader reader = SquashFsReader.fromFile(sqsh)) { + INode dir = reader.findInodeByPath("/dir"); + assertTrue("Dir is not a directory: " + dir.getClass().getName(), + dir instanceof DirectoryINode); + + INode file = reader.findInodeByPath("/dir/file"); + assertTrue("File is not a file: " + file.getClass().getName(), + file instanceof FileINode); + + FileINode fInode = (FileINode) file; + + assertEquals("Wrong file length", 4, fInode.getFileSize()); + + byte[] buf = new byte[4]; + reader.read(file, 0L, buf, 0, 4); + assertEquals("Wrong file data", "test", + new String(buf, StandardCharsets.UTF_8)); + } + } + +} diff --git a/hadoop-tools/hadoop-runc/src/test/resources/log4j.properties b/hadoop-tools/hadoop-runc/src/test/resources/log4j.properties new file mode 100644 index 00000000000..d470a31b5d1 --- /dev/null +++ b/hadoop-tools/hadoop-runc/src/test/resources/log4j.properties @@ -0,0 +1,24 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# log4j configuration used during build and unit tests + +log4j.rootLogger=info,stdout +log4j.logger.org.apache.hadoop.runc=debug +log4j.threshold=ALL +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n \ No newline at end of file diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml index f026bc261e0..859daccbc78 100644 --- a/hadoop-tools/pom.xml +++ b/hadoop-tools/pom.xml @@ -51,6 +51,7 @@ hadoop-azure-datalake hadoop-aliyun hadoop-fs2img + hadoop-runc -- 2.29.2