From 12016cef968aaf5a06a3c79f25271908957ddd3b Mon Sep 17 00:00:00 2001 From: daniel-y <130526409+daniel-y@users.noreply.github.com> Date: Sun, 7 Apr 2024 13:17:52 +0800 Subject: [PATCH] chore(s3stream): remove the source code of s3stream, and dependency it via submodule (#963) Signed-off-by: daniel-y --- .gitmodules | 3 + automq | 1 + s3stream | 1 + s3stream/.gitignore | 1 - s3stream/build.gradle | 80 - s3stream/gradle/wrapper/gradle-wrapper.jar | Bin 63721 -> 0 bytes .../gradle/wrapper/gradle-wrapper.properties | 18 - s3stream/gradlew | 249 ---- s3stream/gradlew.bat | 92 -- s3stream/pom.xml | 302 ---- s3stream/settings.gradle | 5 - .../com/automq/stream/ByteBufSeqAlloc.java | 83 -- .../automq/stream/DefaultAppendResult.java | 32 - .../com/automq/stream/DefaultRecordBatch.java | 54 - .../stream/RecordBatchWithContextWrapper.java | 73 - .../com/automq/stream/WrappedByteBuf.java | 1062 -------------- .../com/automq/stream/api/AppendResult.java | 26 - .../java/com/automq/stream/api/Client.java | 44 - .../stream/api/CreateStreamOptions.java | 54 - .../com/automq/stream/api/FetchResult.java | 35 - .../java/com/automq/stream/api/KVClient.java | 53 - .../java/com/automq/stream/api/KeyValue.java | 149 -- .../automq/stream/api/OpenStreamOptions.java | 92 -- .../com/automq/stream/api/ReadOptions.java | 58 - .../com/automq/stream/api/RecordBatch.java | 49 - .../stream/api/RecordBatchWithContext.java | 29 - .../java/com/automq/stream/api/Stream.java | 97 -- .../com/automq/stream/api/StreamClient.java | 47 - .../stream/api/exceptions/ErrorCode.java | 26 - .../exceptions/FastReadFailFastException.java | 21 - .../api/exceptions/StreamClientException.java | 37 - .../com/automq/stream/s3/ByteBufAlloc.java | 167 --- .../java/com/automq/stream/s3/Config.java | 418 ------ .../java/com/automq/stream/s3/Constants.java | 17 - .../com/automq/stream/s3/DataBlockIndex.java | 110 -- .../automq/stream/s3/DeltaWALUploadTask.java | 238 --- .../com/automq/stream/s3/ObjectReader.java | 437 ------ .../com/automq/stream/s3/ObjectWriter.java | 321 ---- .../com/automq/stream/s3/S3ObjectLogger.java | 22 - .../java/com/automq/stream/s3/S3Storage.java | 945 ------------ .../java/com/automq/stream/s3/S3Stream.java | 471 ------ .../com/automq/stream/s3/S3StreamClient.java | 261 ---- .../java/com/automq/stream/s3/Storage.java | 51 - .../com/automq/stream/s3/StreamDataBlock.java | 116 -- .../stream/s3/StreamObjectCompactor.java | 331 ----- .../stream/s3/StreamRecordBatchCodec.java | 79 - .../com/automq/stream/s3/WalWriteRequest.java | 66 - .../automq/stream/s3/cache/BlockCache.java | 484 ------ .../stream/s3/cache/CacheAccessType.java | 18 - .../s3/cache/DataBlockReadAccumulator.java | 128 -- .../stream/s3/cache/DataBlockRecords.java | 74 - .../stream/s3/cache/DefaultS3BlockCache.java | 411 ------ .../stream/s3/cache/InflightReadThrottle.java | 186 --- .../com/automq/stream/s3/cache/LRUCache.java | 66 - .../com/automq/stream/s3/cache/LogCache.java | 507 ------- .../stream/s3/cache/ObjectReaderLRUCache.java | 47 - .../stream/s3/cache/ReadAheadAgent.java | 228 --- .../stream/s3/cache/ReadAheadManager.java | 131 -- .../automq/stream/s3/cache/ReadDataBlock.java | 58 - .../automq/stream/s3/cache/S3BlockCache.java | 30 - .../automq/stream/s3/cache/StreamCache.java | 37 - .../automq/stream/s3/cache/StreamReader.java | 555 ------- .../stream/s3/compact/CompactResult.java | 19 - .../stream/s3/compact/CompactionAnalyzer.java | 395 ----- .../s3/compact/CompactionConstants.java | 17 - .../stream/s3/compact/CompactionManager.java | 735 --------- .../stream/s3/compact/CompactionPlan.java | 42 - .../stream/s3/compact/CompactionStats.java | 99 -- .../stream/s3/compact/CompactionUploader.java | 172 --- .../s3/compact/objects/CompactedObject.java | 39 - .../objects/CompactedObjectBuilder.java | 138 -- .../s3/compact/objects/CompactionType.java | 17 - .../s3/compact/operator/DataBlockReader.java | 233 --- .../s3/compact/operator/DataBlockWriter.java | 198 --- .../s3/compact/utils/CompactionUtils.java | 214 --- .../compact/utils/GroupByLimitPredicate.java | 54 - .../compact/utils/GroupByOffsetPredicate.java | 39 - .../stream/s3/context/AppendContext.java | 32 - .../stream/s3/context/FetchContext.java | 42 - .../exceptions/IndexBlockParseException.java | 24 - .../automq/stream/s3/failover/Failover.java | 115 -- .../stream/s3/failover/FailoverFactory.java | 23 - .../stream/s3/failover/FailoverRequest.java | 51 - .../stream/s3/failover/FailoverResponse.java | 33 - .../automq/stream/s3/failover/WALRecover.java | 20 - .../s3/memory/MemoryMetadataManager.java | 292 ---- .../stream/s3/metadata/ObjectUtils.java | 63 - .../stream/s3/metadata/S3ObjectMetadata.java | 170 --- .../stream/s3/metadata/S3ObjectType.java | 37 - .../stream/s3/metadata/S3StreamConstant.java | 42 - .../stream/s3/metadata/StreamMetadata.java | 83 -- .../stream/s3/metadata/StreamOffsetRange.java | 84 -- .../stream/s3/metadata/StreamState.java | 25 - .../stream/s3/metrics/AttributesUtils.java | 60 - .../stream/s3/metrics/MetricsConfig.java | 45 - .../stream/s3/metrics/MetricsLevel.java | 21 - .../stream/s3/metrics/MultiAttributes.java | 55 - .../stream/s3/metrics/NoopLongCounter.java | 33 - .../stream/s3/metrics/NoopLongHistogram.java | 33 - .../s3/metrics/NoopObservableLongGauge.java | 17 - .../s3/metrics/S3StreamMetricsConstant.java | 113 -- .../s3/metrics/S3StreamMetricsManager.java | 467 ------ .../automq/stream/s3/metrics/TimerUtil.java | 37 - .../s3/metrics/operations/S3MetricsType.java | 50 - .../s3/metrics/operations/S3ObjectStage.java | 35 - .../s3/metrics/operations/S3Operation.java | 92 -- .../stream/s3/metrics/operations/S3Stage.java | 64 - .../s3/metrics/stats/CompactionStats.java | 37 - .../stream/s3/metrics/stats/NetworkStats.java | 52 - .../s3/metrics/stats/S3ObjectStats.java | 47 - .../s3/metrics/stats/S3OperationStats.java | 144 -- .../metrics/stats/StorageOperationStats.java | 109 -- .../metrics/stats/StreamOperationStats.java | 57 - .../s3/metrics/wrapper/ConfigListener.java | 18 - .../metrics/wrapper/ConfigurableMetrics.java | 40 - .../s3/metrics/wrapper/CounterMetric.java | 39 - .../metrics/wrapper/HistogramInstrument.java | 104 -- .../wrapper/YammerHistogramMetric.java | 66 - .../stream/s3/model/StreamRecordBatch.java | 122 -- .../network/AsyncNetworkBandwidthLimiter.java | 218 --- .../stream/s3/network/ThrottleStrategy.java | 28 - .../objects/CommitStreamSetObjectRequest.java | 137 -- .../CommitStreamSetObjectResponse.java | 18 - .../objects/CompactStreamObjectRequest.java | 103 -- .../stream/s3/objects/ObjectManager.java | 89 -- .../stream/s3/objects/ObjectStreamRange.java | 76 - .../stream/s3/objects/StreamObject.java | 87 -- .../stream/s3/operator/DefaultS3Operator.java | 931 ------------ .../stream/s3/operator/MemoryS3Operator.java | 126 -- .../stream/s3/operator/MultiPartWriter.java | 266 ---- .../stream/s3/operator/ProxyWriter.java | 176 --- .../automq/stream/s3/operator/S3Operator.java | 106 -- .../com/automq/stream/s3/operator/Writer.java | 91 -- .../stream/s3/streams/StreamManager.java | 80 - .../stream/s3/trace/AttributeBindings.java | 99 -- .../automq/stream/s3/trace/MethodCache.java | 28 - .../s3/trace/SpanAttributesExtractor.java | 39 - .../automq/stream/s3/trace/TraceUtils.java | 141 -- .../s3/trace/aop/S3StreamTraceAspect.java | 35 - .../stream/s3/trace/context/TraceContext.java | 88 -- .../java/com/automq/stream/s3/wal/Block.java | 77 - .../com/automq/stream/s3/wal/BlockBatch.java | 79 - .../com/automq/stream/s3/wal/BlockImpl.java | 122 -- .../automq/stream/s3/wal/BlockWALService.java | 888 ----------- .../stream/s3/wal/MemoryWriteAheadLog.java | 73 - .../automq/stream/s3/wal/ShutdownType.java | 45 - .../stream/s3/wal/SlidingWindowService.java | 569 ------- .../stream/s3/wal/UnmarshalException.java | 18 - .../s3/wal/WALCapacityMismatchException.java | 22 - .../com/automq/stream/s3/wal/WALHeader.java | 203 --- .../com/automq/stream/s3/wal/WALMetadata.java | 30 - .../s3/wal/WALNotInitializedException.java | 22 - .../automq/stream/s3/wal/WriteAheadLog.java | 99 -- .../stream/s3/wal/benchmark/BenchTool.java | 74 - .../stream/s3/wal/benchmark/RecoverTool.java | 133 -- .../s3/wal/benchmark/RecoveryBench.java | 133 -- .../stream/s3/wal/benchmark/WriteBench.java | 370 ----- .../s3/wal/util/WALBlockDeviceChannel.java | 331 ----- .../stream/s3/wal/util/WALCachedChannel.java | 139 -- .../automq/stream/s3/wal/util/WALChannel.java | 276 ---- .../stream/s3/wal/util/WALFileChannel.java | 156 -- .../automq/stream/s3/wal/util/WALUtil.java | 133 -- .../moe/cnkirito/kdio/DirectChannel.java | 61 - .../moe/cnkirito/kdio/DirectChannelImpl.java | 122 -- .../moe/cnkirito/kdio/DirectIOLib.java | 386 ----- .../moe/cnkirito/kdio/DirectIOUtils.java | 68 - .../cnkirito/kdio/DirectRandomAccessFile.java | 76 - .../moe/cnkirito/kdio/OpenFlags.java | 60 - .../com/automq/stream/utils/Arguments.java | 28 - .../automq/stream/utils/AsyncRateLimiter.java | 107 -- .../stream/utils/ByteBufInputStream.java | 45 - .../stream/utils/CloseableIterator.java | 47 - .../automq/stream/utils/CommandResult.java | 52 - .../com/automq/stream/utils/CommandUtils.java | 35 - .../com/automq/stream/utils/FutureTicker.java | 70 - .../com/automq/stream/utils/FutureUtil.java | 104 -- .../com/automq/stream/utils/GlobalSwitch.java | 27 - .../com/automq/stream/utils/LogContext.java | 793 ---------- .../java/com/automq/stream/utils/S3Utils.java | 462 ------ .../com/automq/stream/utils/ThreadUtils.java | 60 - .../java/com/automq/stream/utils/Threads.java | 102 -- .../stream/utils/ThrowableRunnable.java | 16 - .../java/com/automq/stream/utils/Utils.java | 26 - .../AbstractOrderedCollection.java | 40 - .../utils/biniarysearch/ComparableItem.java | 18 - .../biniarysearch/IndexBlockOrderedBytes.java | 137 -- .../biniarysearch/StreamRecordBatchList.java | 38 - .../threads/S3StreamThreadPoolMonitor.java | 128 -- .../threads/ThreadPoolQueueSizeMonitor.java | 38 - .../threads/ThreadPoolStatusMonitor.java | 23 - .../utils/threads/ThreadPoolWrapper.java | 117 -- .../automq/stream/ByteBufSeqAllocTest.java | 72 - .../s3/AsyncNetworkBandwidthLimiterTest.java | 76 - .../automq/stream/s3/DefaultRecordBatch.java | 52 - .../s3/DefaultRecordBatchWithContext.java | 57 - .../stream/s3/DefaultS3BlockCacheTest.java | 140 -- .../stream/s3/DeltaWALUploadTaskTest.java | 202 --- .../automq/stream/s3/ObjectReaderTest.java | 138 -- .../automq/stream/s3/ObjectWriterTest.java | 108 -- .../com/automq/stream/s3/S3StorageTest.java | 311 ---- .../com/automq/stream/s3/S3StreamTest.java | 76 - .../stream/s3/StreamObjectCompactorTest.java | 306 ---- .../java/com/automq/stream/s3/TestUtils.java | 42 - .../stream/s3/cache/BlockCacheTest.java | 168 --- .../cache/DataBlockReadAccumulatorTest.java | 112 -- .../s3/cache/InflightReadThrottleTest.java | 38 - .../automq/stream/s3/cache/LogCacheTest.java | 74 - .../s3/cache/ObjectReaderLRUCacheTest.java | 67 - .../stream/s3/cache/ReadAheadManagerTest.java | 70 - .../stream/s3/cache/StreamCacheTest.java | 66 - .../stream/s3/cache/StreamReaderTest.java | 430 ------ .../s3/compact/CompactionAnalyzerTest.java | 451 ------ .../s3/compact/CompactionManagerTest.java | 666 --------- .../stream/s3/compact/CompactionTestBase.java | 234 --- .../s3/compact/CompactionUploaderTest.java | 169 --- .../stream/s3/compact/CompactionUtilTest.java | 141 -- .../stream/s3/failover/FailoverTest.java | 78 - .../stream/s3/metrics/AttributesUtilTest.java | 52 - .../stream/s3/metrics/MetricsLevelTest.java | 33 - .../metrics/wrapper/MetricsWrapperTest.java | 70 - .../stream/s3/objects/ObjectManagerTest.java | 224 --- .../s3/operator/DefaultS3OperatorTest.java | 159 -- .../s3/operator/MultiPartWriterTest.java | 211 --- .../stream/s3/operator/ProxyWriterTest.java | 93 -- .../stream/s3/streams/StreamManagerTest.java | 141 -- .../stream/s3/utils/AsyncRateLimiterTest.java | 42 - .../stream/s3/wal/BlockWALServiceTest.java | 1307 ----------------- .../automq/stream/s3/wal/WALHeaderTest.java | 39 - .../wal/util/WALBlockDeviceChannelTest.java | 231 --- .../stream/s3/wal/util/WALChannelTest.java | 79 - .../automq/stream/utils/FutureTickerTest.java | 58 - s3stream/src/test/resources/log4j.properties | 15 - .../rocketmq/store/MessageStoreBuilder.java | 2 +- .../automq/rocketmq/store/S3StreamStore.java | 4 +- 234 files changed, 8 insertions(+), 32723 deletions(-) create mode 100644 .gitmodules create mode 160000 automq create mode 120000 s3stream delete mode 100644 s3stream/.gitignore delete mode 100644 s3stream/build.gradle delete mode 100644 s3stream/gradle/wrapper/gradle-wrapper.jar delete mode 100644 s3stream/gradle/wrapper/gradle-wrapper.properties delete mode 100755 s3stream/gradlew delete mode 100644 s3stream/gradlew.bat delete mode 100644 s3stream/pom.xml delete mode 100644 s3stream/settings.gradle delete mode 100644 s3stream/src/main/java/com/automq/stream/ByteBufSeqAlloc.java delete mode 100644 s3stream/src/main/java/com/automq/stream/DefaultAppendResult.java delete mode 100644 s3stream/src/main/java/com/automq/stream/DefaultRecordBatch.java delete mode 100644 s3stream/src/main/java/com/automq/stream/RecordBatchWithContextWrapper.java delete mode 100644 s3stream/src/main/java/com/automq/stream/WrappedByteBuf.java delete mode 100644 s3stream/src/main/java/com/automq/stream/api/AppendResult.java delete mode 100644 s3stream/src/main/java/com/automq/stream/api/Client.java delete mode 100644 s3stream/src/main/java/com/automq/stream/api/CreateStreamOptions.java delete mode 100644 s3stream/src/main/java/com/automq/stream/api/FetchResult.java delete mode 100644 s3stream/src/main/java/com/automq/stream/api/KVClient.java delete mode 100644 s3stream/src/main/java/com/automq/stream/api/KeyValue.java delete mode 100644 s3stream/src/main/java/com/automq/stream/api/OpenStreamOptions.java delete mode 100644 s3stream/src/main/java/com/automq/stream/api/ReadOptions.java delete mode 100644 s3stream/src/main/java/com/automq/stream/api/RecordBatch.java delete mode 100644 s3stream/src/main/java/com/automq/stream/api/RecordBatchWithContext.java delete mode 100644 s3stream/src/main/java/com/automq/stream/api/Stream.java delete mode 100644 s3stream/src/main/java/com/automq/stream/api/StreamClient.java delete mode 100644 s3stream/src/main/java/com/automq/stream/api/exceptions/ErrorCode.java delete mode 100644 s3stream/src/main/java/com/automq/stream/api/exceptions/FastReadFailFastException.java delete mode 100644 s3stream/src/main/java/com/automq/stream/api/exceptions/StreamClientException.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/ByteBufAlloc.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/Config.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/Constants.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/DataBlockIndex.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/DeltaWALUploadTask.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/ObjectReader.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/ObjectWriter.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/S3ObjectLogger.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/S3Storage.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/S3Stream.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/S3StreamClient.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/Storage.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/StreamDataBlock.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/StreamObjectCompactor.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/StreamRecordBatchCodec.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/WalWriteRequest.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/cache/BlockCache.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/cache/CacheAccessType.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/cache/DataBlockReadAccumulator.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/cache/DataBlockRecords.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/cache/DefaultS3BlockCache.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/cache/InflightReadThrottle.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/cache/LRUCache.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/cache/LogCache.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/cache/ObjectReaderLRUCache.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/cache/ReadAheadAgent.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/cache/ReadAheadManager.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/cache/ReadDataBlock.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/cache/S3BlockCache.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/cache/StreamCache.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/cache/StreamReader.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/compact/CompactResult.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/compact/CompactionAnalyzer.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/compact/CompactionConstants.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/compact/CompactionManager.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/compact/CompactionPlan.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/compact/CompactionStats.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/compact/CompactionUploader.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/compact/objects/CompactedObject.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/compact/objects/CompactedObjectBuilder.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/compact/objects/CompactionType.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/compact/operator/DataBlockReader.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/compact/operator/DataBlockWriter.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/compact/utils/CompactionUtils.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/compact/utils/GroupByLimitPredicate.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/compact/utils/GroupByOffsetPredicate.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/context/AppendContext.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/context/FetchContext.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/exceptions/IndexBlockParseException.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/failover/Failover.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/failover/FailoverFactory.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/failover/FailoverRequest.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/failover/FailoverResponse.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/failover/WALRecover.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/memory/MemoryMetadataManager.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metadata/ObjectUtils.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metadata/S3ObjectMetadata.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metadata/S3ObjectType.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metadata/S3StreamConstant.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metadata/StreamMetadata.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metadata/StreamOffsetRange.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metadata/StreamState.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/AttributesUtils.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/MetricsConfig.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/MetricsLevel.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/MultiAttributes.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/NoopLongCounter.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/NoopLongHistogram.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/NoopObservableLongGauge.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/S3StreamMetricsConstant.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/S3StreamMetricsManager.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/TimerUtil.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/operations/S3MetricsType.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/operations/S3ObjectStage.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/operations/S3Operation.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/operations/S3Stage.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/stats/CompactionStats.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/stats/NetworkStats.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/stats/S3ObjectStats.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/stats/S3OperationStats.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/stats/StorageOperationStats.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/stats/StreamOperationStats.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/ConfigListener.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/ConfigurableMetrics.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/CounterMetric.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/HistogramInstrument.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/YammerHistogramMetric.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/model/StreamRecordBatch.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/network/AsyncNetworkBandwidthLimiter.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/network/ThrottleStrategy.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/objects/CommitStreamSetObjectRequest.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/objects/CommitStreamSetObjectResponse.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/objects/CompactStreamObjectRequest.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/objects/ObjectManager.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/objects/ObjectStreamRange.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/objects/StreamObject.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/operator/DefaultS3Operator.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/operator/MemoryS3Operator.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/operator/MultiPartWriter.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/operator/ProxyWriter.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/operator/S3Operator.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/operator/Writer.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/streams/StreamManager.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/trace/AttributeBindings.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/trace/MethodCache.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/trace/SpanAttributesExtractor.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/trace/TraceUtils.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/trace/aop/S3StreamTraceAspect.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/trace/context/TraceContext.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/Block.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/BlockBatch.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/BlockImpl.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/BlockWALService.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/MemoryWriteAheadLog.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/ShutdownType.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/SlidingWindowService.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/UnmarshalException.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/WALCapacityMismatchException.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/WALHeader.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/WALMetadata.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/WALNotInitializedException.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/WriteAheadLog.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/benchmark/BenchTool.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/benchmark/RecoverTool.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/benchmark/RecoveryBench.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/benchmark/WriteBench.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/util/WALBlockDeviceChannel.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/util/WALCachedChannel.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/util/WALChannel.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/util/WALFileChannel.java delete mode 100644 s3stream/src/main/java/com/automq/stream/s3/wal/util/WALUtil.java delete mode 100755 s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectChannel.java delete mode 100755 s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectChannelImpl.java delete mode 100755 s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectIOLib.java delete mode 100755 s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectIOUtils.java delete mode 100755 s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectRandomAccessFile.java delete mode 100755 s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/OpenFlags.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/Arguments.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/AsyncRateLimiter.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/ByteBufInputStream.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/CloseableIterator.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/CommandResult.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/CommandUtils.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/FutureTicker.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/FutureUtil.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/GlobalSwitch.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/LogContext.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/S3Utils.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/ThreadUtils.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/Threads.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/ThrowableRunnable.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/Utils.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/biniarysearch/AbstractOrderedCollection.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/biniarysearch/ComparableItem.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/biniarysearch/IndexBlockOrderedBytes.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/biniarysearch/StreamRecordBatchList.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/threads/S3StreamThreadPoolMonitor.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/threads/ThreadPoolQueueSizeMonitor.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/threads/ThreadPoolStatusMonitor.java delete mode 100644 s3stream/src/main/java/com/automq/stream/utils/threads/ThreadPoolWrapper.java delete mode 100644 s3stream/src/test/java/com/automq/stream/ByteBufSeqAllocTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/AsyncNetworkBandwidthLimiterTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/DefaultRecordBatch.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/DefaultRecordBatchWithContext.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/DefaultS3BlockCacheTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/DeltaWALUploadTaskTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/ObjectReaderTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/ObjectWriterTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/S3StorageTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/S3StreamTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/StreamObjectCompactorTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/TestUtils.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/cache/BlockCacheTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/cache/DataBlockReadAccumulatorTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/cache/InflightReadThrottleTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/cache/LogCacheTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/cache/ObjectReaderLRUCacheTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/cache/ReadAheadManagerTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/cache/StreamCacheTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/cache/StreamReaderTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/compact/CompactionAnalyzerTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/compact/CompactionManagerTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/compact/CompactionTestBase.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/compact/CompactionUploaderTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/compact/CompactionUtilTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/failover/FailoverTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/metrics/AttributesUtilTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/metrics/MetricsLevelTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/metrics/wrapper/MetricsWrapperTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/objects/ObjectManagerTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/operator/DefaultS3OperatorTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/operator/MultiPartWriterTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/operator/ProxyWriterTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/streams/StreamManagerTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/utils/AsyncRateLimiterTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/wal/BlockWALServiceTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/wal/WALHeaderTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/wal/util/WALBlockDeviceChannelTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/s3/wal/util/WALChannelTest.java delete mode 100644 s3stream/src/test/java/com/automq/stream/utils/FutureTickerTest.java delete mode 100644 s3stream/src/test/resources/log4j.properties diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..e29d8f785 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "automq"] + path = automq + url = git@github.com:AutoMQ/automq.git diff --git a/automq b/automq new file mode 160000 index 000000000..4b897ea98 --- /dev/null +++ b/automq @@ -0,0 +1 @@ +Subproject commit 4b897ea98e5d3cee9f4112fa8fb1e9199f4505b7 diff --git a/s3stream b/s3stream new file mode 120000 index 000000000..1188bb7e0 --- /dev/null +++ b/s3stream @@ -0,0 +1 @@ +automq/s3stream \ No newline at end of file diff --git a/s3stream/.gitignore b/s3stream/.gitignore deleted file mode 100644 index 4dd5910df..000000000 --- a/s3stream/.gitignore +++ /dev/null @@ -1 +0,0 @@ -!gradle/wrapper/* diff --git a/s3stream/build.gradle b/s3stream/build.gradle deleted file mode 100644 index 47e059998..000000000 --- a/s3stream/build.gradle +++ /dev/null @@ -1,80 +0,0 @@ -/* - * This file was generated by the Gradle 'init' task. - */ -import com.github.spotbugs.snom.Confidence -import com.github.spotbugs.snom.Effort - -plugins { - id 'java-library' - id 'maven-publish' - id("com.github.spotbugs") version "6.0.7" -} - -spotbugsMain { - reports { - html { - required = true - outputLocation = file("$buildDir/reports/spotbugs.html") - setStylesheet("fancy-hist.xsl") - } - } -} - -spotbugs { - effort = Effort.valueOf('DEFAULT') - reportLevel = Confidence.valueOf('HIGH') -} - -repositories { - mavenLocal() - maven { - url = uri('https://repo.maven.apache.org/maven2/') - } -} - -dependencies { - api 'software.amazon.awssdk:s3:2.20.127' - api 'io.netty:netty-tcnative-boringssl-static:2.0.53.Final' - api 'io.netty:netty-buffer:4.1.100.Final' - api 'com.bucket4j:bucket4j-core:8.5.0' - api 'org.apache.commons:commons-lang3:3.13.0' - api 'org.slf4j:slf4j-api:2.0.9' - api 'net.sourceforge.argparse4j:argparse4j:0.9.0' - api 'net.java.dev.jna:jna:5.2.0' - api 'com.google.guava:guava:32.0.1-jre' - api 'com.fasterxml.jackson.core:jackson-databind:2.16.0' - api 'io.opentelemetry:opentelemetry-api:1.32.0' - api 'io.opentelemetry.instrumentation:opentelemetry-instrumentation-annotations:1.32.0' - api 'org.aspectj:aspectjrt:1.9.20.1' - api 'org.aspectj:aspectjweaver:1.9.20.1' - api 'com.github.jnr:jnr-posix:3.1.19' - api 'com.yammer.metrics:metrics-core:2.2.0' - testImplementation 'org.slf4j:slf4j-simple:2.0.9' - testImplementation 'org.junit.jupiter:junit-jupiter:5.10.0' - testImplementation 'org.mockito:mockito-core:5.5.0' - testImplementation 'org.mockito:mockito-junit-jupiter:5.5.0' -} - -group = 'com.automq.elasticstream' -description = 's3stream' -java.sourceCompatibility = '11' - -java { - withSourcesJar() -} - -publishing { - publications { - maven(MavenPublication) { - from(components.java) - } - } -} - -tasks.withType(JavaCompile) { - options.encoding = 'UTF-8' -} - -tasks.withType(Javadoc) { - options.encoding = 'UTF-8' -} diff --git a/s3stream/gradle/wrapper/gradle-wrapper.jar b/s3stream/gradle/wrapper/gradle-wrapper.jar deleted file mode 100644 index 7f93135c49b765f8051ef9d0a6055ff8e46073d8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 63721 zcmb5Wb9gP!wgnp7wrv|bwr$&XvSZt}Z6`anZSUAlc9NHKf9JdJ;NJVr`=eI(_pMp0 zy1VAAG3FfAOI`{X1O)&90s;U4K;XLp008~hCjbEC_fbYfS%6kTR+JtXK>nW$ZR+`W ze|#J8f4A@M|F5BpfUJb5h>|j$jOe}0oE!`Zf6fM>CR?!y@zU(cL8NsKk`a z6tx5mAkdjD;J=LcJ;;Aw8p!v#ouk>mUDZF@ zK>yvw%+bKu+T{Nk@LZ;zkYy0HBKw06_IWcMHo*0HKpTsEFZhn5qCHH9j z)|XpN&{`!0a>Vl+PmdQc)Yg4A(AG-z!+@Q#eHr&g<9D?7E)_aEB?s_rx>UE9TUq|? z;(ggJt>9l?C|zoO@5)tu?EV0x_7T17q4fF-q3{yZ^ipUbKcRZ4Qftd!xO(#UGhb2y>?*@{xq%`(-`2T^vc=#< zx!+@4pRdk&*1ht2OWk^Z5IAQ0YTAXLkL{(D*$gENaD)7A%^XXrCchN&z2x+*>o2FwPFjWpeaL=!tzv#JOW#( z$B)Nel<+$bkH1KZv3&-}=SiG~w2sbDbAWarg%5>YbC|}*d9hBjBkR(@tyM0T)FO$# zPtRXukGPnOd)~z=?avu+4Co@wF}1T)-uh5jI<1$HLtyDrVak{gw`mcH@Q-@wg{v^c zRzu}hMKFHV<8w}o*yg6p@Sq%=gkd~;`_VGTS?L@yVu`xuGy+dH6YOwcP6ZE`_0rK% zAx5!FjDuss`FQ3eF|mhrWkjux(Pny^k$u_)dyCSEbAsecHsq#8B3n3kDU(zW5yE|( zgc>sFQywFj5}U*qtF9Y(bi*;>B7WJykcAXF86@)z|0-Vm@jt!EPoLA6>r)?@DIobIZ5Sx zsc@OC{b|3%vaMbyeM|O^UxEYlEMHK4r)V-{r)_yz`w1*xV0|lh-LQOP`OP`Pk1aW( z8DSlGN>Ts|n*xj+%If~+E_BxK)~5T#w6Q1WEKt{!Xtbd`J;`2a>8boRo;7u2M&iOop4qcy<)z023=oghSFV zST;?S;ye+dRQe>ygiJ6HCv4;~3DHtJ({fWeE~$H@mKn@Oh6Z(_sO>01JwH5oA4nvK zr5Sr^g+LC zLt(i&ecdmqsIJGNOSUyUpglvhhrY8lGkzO=0USEKNL%8zHshS>Qziu|`eyWP^5xL4 zRP122_dCJl>hZc~?58w~>`P_s18VoU|7(|Eit0-lZRgLTZKNq5{k zE?V=`7=R&ro(X%LTS*f+#H-mGo_j3dm@F_krAYegDLk6UV{`UKE;{YSsn$ z(yz{v1@p|p!0>g04!eRSrSVb>MQYPr8_MA|MpoGzqyd*$@4j|)cD_%^Hrd>SorF>@ zBX+V<@vEB5PRLGR(uP9&U&5=(HVc?6B58NJT_igiAH*q~Wb`dDZpJSKfy5#Aag4IX zj~uv74EQ_Q_1qaXWI!7Vf@ZrdUhZFE;L&P_Xr8l@GMkhc#=plV0+g(ki>+7fO%?Jb zl+bTy7q{w^pTb{>(Xf2q1BVdq?#f=!geqssXp z4pMu*q;iiHmA*IjOj4`4S&|8@gSw*^{|PT}Aw~}ZXU`6=vZB=GGeMm}V6W46|pU&58~P+?LUs%n@J}CSrICkeng6YJ^M? zS(W?K4nOtoBe4tvBXs@@`i?4G$S2W&;$z8VBSM;Mn9 zxcaEiQ9=vS|bIJ>*tf9AH~m&U%2+Dim<)E=}KORp+cZ^!@wI`h1NVBXu{@%hB2Cq(dXx_aQ9x3mr*fwL5!ZryQqi|KFJuzvP zK1)nrKZ7U+B{1ZmJub?4)Ln^J6k!i0t~VO#=q1{?T)%OV?MN}k5M{}vjyZu#M0_*u z8jwZKJ#Df~1jcLXZL7bnCEhB6IzQZ-GcoQJ!16I*39iazoVGugcKA{lhiHg4Ta2fD zk1Utyc5%QzZ$s3;p0N+N8VX{sd!~l*Ta3|t>lhI&G`sr6L~G5Lul`>m z{!^INm?J|&7X=;{XveF!(b*=?9NAp4y&r&N3(GKcW4rS(Ejk|Lzs1PrxPI_owB-`H zg3(Rruh^&)`TKA6+_!n>RdI6pw>Vt1_j&+bKIaMTYLiqhZ#y_=J8`TK{Jd<7l9&sY z^^`hmi7^14s16B6)1O;vJWOF$=$B5ONW;;2&|pUvJlmeUS&F;DbSHCrEb0QBDR|my zIs+pE0Y^`qJTyH-_mP=)Y+u^LHcuZhsM3+P||?+W#V!_6E-8boP#R-*na4!o-Q1 zVthtYhK{mDhF(&7Okzo9dTi03X(AE{8cH$JIg%MEQca`S zy@8{Fjft~~BdzWC(di#X{ny;!yYGK9b@=b|zcKZ{vv4D8i+`ilOPl;PJl{!&5-0!w z^fOl#|}vVg%=n)@_e1BrP)`A zKPgs`O0EO}Y2KWLuo`iGaKu1k#YR6BMySxQf2V++Wo{6EHmK>A~Q5o73yM z-RbxC7Qdh0Cz!nG+7BRZE>~FLI-?&W_rJUl-8FDIaXoNBL)@1hwKa^wOr1($*5h~T zF;%f^%<$p8Y_yu(JEg=c_O!aZ#)Gjh$n(hfJAp$C2he555W5zdrBqjFmo|VY+el;o z=*D_w|GXG|p0**hQ7~9-n|y5k%B}TAF0iarDM!q-jYbR^us(>&y;n^2l0C%@2B}KM zyeRT9)oMt97Agvc4sEKUEy%MpXr2vz*lb zh*L}}iG>-pqDRw7ud{=FvTD?}xjD)w{`KzjNom-$jS^;iw0+7nXSnt1R@G|VqoRhE%12nm+PH?9`(4rM0kfrZzIK9JU=^$YNyLvAIoxl#Q)xxDz!^0@zZ zSCs$nfcxK_vRYM34O<1}QHZ|hp4`ioX3x8(UV(FU$J@o%tw3t4k1QPmlEpZa2IujG&(roX_q*%e`Hq|);0;@k z0z=fZiFckp#JzW0p+2A+D$PC~IsakhJJkG(c;CqAgFfU0Z`u$PzG~-9I1oPHrCw&)@s^Dc~^)#HPW0Ra}J^=|h7Fs*<8|b13ZzG6MP*Q1dkoZ6&A^!}|hbjM{2HpqlSXv_UUg1U4gn z3Q)2VjU^ti1myodv+tjhSZp%D978m~p& z43uZUrraHs80Mq&vcetqfQpQP?m!CFj)44t8Z}k`E798wxg&~aCm+DBoI+nKq}&j^ zlPY3W$)K;KtEajks1`G?-@me7C>{PiiBu+41#yU_c(dITaqE?IQ(DBu+c^Ux!>pCj zLC|HJGU*v+!it1(;3e`6igkH(VA)-S+k(*yqxMgUah3$@C zz`7hEM47xr>j8^g`%*f=6S5n>z%Bt_Fg{Tvmr+MIsCx=0gsu_sF`q2hlkEmisz#Fy zj_0;zUWr;Gz}$BS%Y`meb(=$d%@Crs(OoJ|}m#<7=-A~PQbyN$x%2iXP2@e*nO0b7AwfH8cCUa*Wfu@b)D_>I*%uE4O3 z(lfnB`-Xf*LfC)E}e?%X2kK7DItK6Tf<+M^mX0Ijf_!IP>7c8IZX%8_#0060P{QMuV^B9i<^E`_Qf0pv9(P%_s8D`qvDE9LK9u-jB}J2S`(mCO&XHTS04Z5Ez*vl^T%!^$~EH8M-UdwhegL>3IQ*)(MtuH2Xt1p!fS4o~*rR?WLxlA!sjc2(O znjJn~wQ!Fp9s2e^IWP1C<4%sFF}T4omr}7+4asciyo3DntTgWIzhQpQirM$9{EbQd z3jz9vS@{aOqTQHI|l#aUV@2Q^Wko4T0T04Me4!2nsdrA8QY1%fnAYb~d2GDz@lAtfcHq(P7 zaMBAGo}+NcE-K*@9y;Vt3*(aCaMKXBB*BJcD_Qnxpt75r?GeAQ}*|>pYJE=uZb73 zC>sv)18)q#EGrTG6io*}JLuB_jP3AU1Uiu$D7r|2_zlIGb9 zjhst#ni)Y`$)!fc#reM*$~iaYoz~_Cy7J3ZTiPm)E?%`fbk`3Tu-F#`{i!l5pNEn5 zO-Tw-=TojYhzT{J=?SZj=Z8#|eoF>434b-DXiUsignxXNaR3 zm_}4iWU$gt2Mw5NvZ5(VpF`?X*f2UZDs1TEa1oZCif?Jdgr{>O~7}-$|BZ7I(IKW`{f;@|IZFX*R8&iT= zoWstN8&R;}@2Ka%d3vrLtR|O??ben;k8QbS-WB0VgiCz;<$pBmIZdN!aalyCSEm)crpS9dcD^Y@XT1a3+zpi-`D}e#HV<} z$Y(G&o~PvL-xSVD5D?JqF3?B9rxGWeb=oEGJ3vRp5xfBPlngh1O$yI95EL+T8{GC@ z98i1H9KhZGFl|;`)_=QpM6H?eDPpw~^(aFQWwyXZ8_EEE4#@QeT_URray*mEOGsGc z6|sdXtq!hVZo=d#+9^@lm&L5|q&-GDCyUx#YQiccq;spOBe3V+VKdjJA=IL=Zn%P} zNk=_8u}VhzFf{UYZV0`lUwcD&)9AFx0@Fc6LD9A6Rd1=ga>Mi0)_QxM2ddCVRmZ0d z+J=uXc(?5JLX3=)e)Jm$HS2yF`44IKhwRnm2*669_J=2LlwuF5$1tAo@ROSU@-y+;Foy2IEl2^V1N;fk~YR z?&EP8#t&m0B=?aJeuz~lHjAzRBX>&x=A;gIvb>MD{XEV zV%l-+9N-)i;YH%nKP?>f`=?#`>B(`*t`aiPLoQM(a6(qs4p5KFjDBN?8JGrf3z8>= zi7sD)c)Nm~x{e<^jy4nTx${P~cwz_*a>%0_;ULou3kHCAD7EYkw@l$8TN#LO9jC( z1BeFW`k+bu5e8Ns^a8dPcjEVHM;r6UX+cN=Uy7HU)j-myRU0wHd$A1fNI~`4;I~`zC)3ul#8#^rXVSO*m}Ag>c%_;nj=Nv$rCZ z*~L@C@OZg%Q^m)lc-kcX&a*a5`y&DaRxh6O*dfhLfF+fU5wKs(1v*!TkZidw*)YBP za@r`3+^IHRFeO%!ai%rxy;R;;V^Fr=OJlpBX;(b*3+SIw}7= zIq$*Thr(Zft-RlY)D3e8V;BmD&HOfX+E$H#Y@B3?UL5L~_fA-@*IB-!gItK7PIgG9 zgWuGZK_nuZjHVT_Fv(XxtU%)58;W39vzTI2n&)&4Dmq7&JX6G>XFaAR{7_3QB6zsT z?$L8c*WdN~nZGiscY%5KljQARN;`w$gho=p006z;n(qIQ*Zu<``TMO3n0{ARL@gYh zoRwS*|Niw~cR!?hE{m*y@F`1)vx-JRfqET=dJ5_(076st(=lFfjtKHoYg`k3oNmo_ zNbQEw8&sO5jAYmkD|Zaz_yUb0rC})U!rCHOl}JhbYIDLzLvrZVw0~JO`d*6f;X&?V=#T@ND*cv^I;`sFeq4 z##H5;gpZTb^0Hz@3C*~u0AqqNZ-r%rN3KD~%Gw`0XsIq$(^MEb<~H(2*5G^<2(*aI z%7}WB+TRlMIrEK#s0 z93xn*Ohb=kWFc)BNHG4I(~RPn-R8#0lqyBBz5OM6o5|>x9LK@%HaM}}Y5goCQRt2C z{j*2TtT4ne!Z}vh89mjwiSXG=%DURar~=kGNNaO_+Nkb+tRi~Rkf!7a$*QlavziD( z83s4GmQ^Wf*0Bd04f#0HX@ua_d8 z23~z*53ePD6@xwZ(vdl0DLc=>cPIOPOdca&MyR^jhhKrdQO?_jJh`xV3GKz&2lvP8 zEOwW6L*ufvK;TN{=S&R@pzV^U=QNk^Ec}5H z+2~JvEVA{`uMAr)?Kf|aW>33`)UL@bnfIUQc~L;TsTQ6>r-<^rB8uoNOJ>HWgqMI8 zSW}pZmp_;z_2O5_RD|fGyTxaxk53Hg_3Khc<8AUzV|ZeK{fp|Ne933=1&_^Dbv5^u zB9n=*)k*tjHDRJ@$bp9mrh}qFn*s}npMl5BMDC%Hs0M0g-hW~P*3CNG06G!MOPEQ_ zi}Qs-6M8aMt;sL$vlmVBR^+Ry<64jrm1EI1%#j?c?4b*7>)a{aDw#TfTYKq+SjEFA z(aJ&z_0?0JB83D-i3Vh+o|XV4UP+YJ$9Boid2^M2en@APw&wx7vU~t$r2V`F|7Qfo z>WKgI@eNBZ-+Og<{u2ZiG%>YvH2L3fNpV9J;WLJoBZda)01Rn;o@){01{7E#ke(7U zHK>S#qZ(N=aoae*4X!0A{)nu0R_sKpi1{)u>GVjC+b5Jyl6#AoQ-1_3UDovNSo`T> z?c-@7XX*2GMy?k?{g)7?Sv;SJkmxYPJPs!&QqB12ejq`Lee^-cDveVWL^CTUldb(G zjDGe(O4P=S{4fF=#~oAu>LG>wrU^z_?3yt24FOx>}{^lCGh8?vtvY$^hbZ)9I0E3r3NOlb9I?F-Yc=r$*~l`4N^xzlV~N zl~#oc>U)Yjl0BxV>O*Kr@lKT{Z09OXt2GlvE38nfs+DD7exl|&vT;)>VFXJVZp9Np zDK}aO;R3~ag$X*|hRVY3OPax|PG`@_ESc8E!mHRByJbZQRS38V2F__7MW~sgh!a>98Q2%lUNFO=^xU52|?D=IK#QjwBky-C>zOWlsiiM&1n z;!&1((Xn1$9K}xabq~222gYvx3hnZPg}VMF_GV~5ocE=-v>V=T&RsLBo&`)DOyIj* zLV{h)JU_y*7SdRtDajP_Y+rBkNN*1_TXiKwHH2&p51d(#zv~s#HwbNy?<+(=9WBvo zw2hkk2Dj%kTFhY+$T+W-b7@qD!bkfN#Z2ng@Pd=i3-i?xYfs5Z*1hO?kd7Sp^9`;Y zM2jeGg<-nJD1er@Pc_cSY7wo5dzQX44=%6rn}P_SRbpzsA{6B+!$3B0#;}qwO37G^ zL(V_5JK`XT?OHVk|{_$vQ|oNEpab*BO4F zUTNQ7RUhnRsU`TK#~`)$icsvKh~(pl=3p6m98@k3P#~upd=k*u20SNcb{l^1rUa)>qO997)pYRWMncC8A&&MHlbW?7i^7M`+B$hH~Y|J zd>FYOGQ;j>Zc2e7R{KK7)0>>nn_jYJy&o@sK!4G>-rLKM8Hv)f;hi1D2fAc$+six2 zyVZ@wZ6x|fJ!4KrpCJY=!Mq0;)X)OoS~{Lkh6u8J`eK%u0WtKh6B>GW_)PVc zl}-k`p09qwGtZ@VbYJC!>29V?Dr>>vk?)o(x?!z*9DJ||9qG-&G~#kXxbw{KKYy}J zQKa-dPt~M~E}V?PhW0R26xdA%1T*%ra6SguGu50YHngOTIv)@N|YttEXo#OZfgtP7;H?EeZZxo<}3YlYxtBq znJ!WFR^tmGf0Py}N?kZ(#=VtpC@%xJkDmfcCoBTxq zr_|5gP?u1@vJZbxPZ|G0AW4=tpb84gM2DpJU||(b8kMOV1S3|(yuwZJ&rIiFW(U;5 zUtAW`O6F6Zy+eZ1EDuP~AAHlSY-+A_eI5Gx)%*uro5tljy}kCZU*_d7)oJ>oQSZ3* zneTn`{gnNC&uJd)0aMBzAg021?YJ~b(fmkwZAd696a=0NzBAqBN54KuNDwa*no(^O z6p05bioXUR^uXjpTol*ppHp%1v9e)vkoUAUJyBx3lw0UO39b0?^{}yb!$yca(@DUn zCquRF?t=Zb9`Ed3AI6|L{eX~ijVH`VzSMheKoP7LSSf4g>md>`yi!TkoG5P>Ofp+n z(v~rW+(5L96L{vBb^g51B=(o)?%%xhvT*A5btOpw(TKh^g^4c zw>0%X!_0`{iN%RbVk+A^f{w-4-SSf*fu@FhruNL##F~sF24O~u zyYF<3el2b$$wZ_|uW#@Ak+VAGk#e|kS8nL1g>2B-SNMjMp^8;-FfeofY2fphFHO!{ z*!o4oTb{4e;S<|JEs<1_hPsmAlVNk?_5-Fp5KKU&d#FiNW~Y+pVFk@Cua1I{T+1|+ zHx6rFMor)7L)krbilqsWwy@T+g3DiH5MyVf8Wy}XbEaoFIDr~y;@r&I>FMW{ z?Q+(IgyebZ)-i4jNoXQhq4Muy9Fv+OxU;9_Jmn+<`mEC#%2Q_2bpcgzcinygNI!&^ z=V$)o2&Yz04~+&pPWWn`rrWxJ&}8khR)6B(--!9Q zubo}h+1T)>a@c)H^i``@<^j?|r4*{;tQf78(xn0g39IoZw0(CwY1f<%F>kEaJ zp9u|IeMY5mRdAlw*+gSN^5$Q)ShM<~E=(c8QM+T-Qk)FyKz#Sw0EJ*edYcuOtO#~Cx^(M7w5 z3)rl#L)rF|(Vun2LkFr!rg8Q@=r>9p>(t3Gf_auiJ2Xx9HmxYTa|=MH_SUlYL`mz9 zTTS$`%;D-|Jt}AP1&k7PcnfFNTH0A-*FmxstjBDiZX?}%u%Yq94$fUT&z6od+(Uk> zuqsld#G(b$G8tus=M!N#oPd|PVFX)?M?tCD0tS%2IGTfh}3YA3f&UM)W$_GNV8 zQo+a(ml2Km4o6O%gKTCSDNq+#zCTIQ1*`TIJh~k6Gp;htHBFnne))rlFdGqwC6dx2+La1&Mnko*352k0y z+tQcwndQlX`nc6nb$A9?<-o|r*%aWXV#=6PQic0Ok_D;q>wbv&j7cKc!w4~KF#-{6 z(S%6Za)WpGIWf7jZ3svNG5OLs0>vCL9{V7cgO%zevIVMH{WgP*^D9ws&OqA{yr|m| zKD4*07dGXshJHd#e%x%J+qmS^lS|0Bp?{drv;{@{l9ArPO&?Q5=?OO9=}h$oVe#3b z3Yofj&Cb}WC$PxmRRS)H%&$1-)z7jELS}!u!zQ?A^Y{Tv4QVt*vd@uj-^t2fYRzQj zfxGR>-q|o$3sGn^#VzZ!QQx?h9`njeJry}@x?|k0-GTTA4y3t2E`3DZ!A~D?GiJup z)8%PK2^9OVRlP(24P^4_<|D=H^7}WlWu#LgsdHzB%cPy|f8dD3|A^mh4WXxhLTVu_ z@abE{6Saz|Y{rXYPd4$tfPYo}ef(oQWZ=4Bct-=_9`#Qgp4ma$n$`tOwq#&E18$B; z@Bp)bn3&rEi0>fWWZ@7k5WazfoX`SCO4jQWwVuo+$PmSZn^Hz?O(-tW@*DGxuf)V1 zO_xm&;NVCaHD4dqt(-MlszI3F-p?0!-e$fbiCeuaw66h^TTDLWuaV<@C-`=Xe5WL) zwooG7h>4&*)p3pKMS3O!4>-4jQUN}iAMQ)2*70?hP~)TzzR?-f@?Aqy$$1Iy8VGG$ zMM?8;j!pUX7QQD$gRc_#+=raAS577ga-w?jd`vCiN5lu)dEUkkUPl9!?{$IJNxQys z*E4e$eF&n&+AMRQR2gcaFEjAy*r)G!s(P6D&TfoApMFC_*Ftx0|D0@E-=B7tezU@d zZ{hGiN;YLIoSeRS;9o%dEua4b%4R3;$SugDjP$x;Z!M!@QibuSBb)HY!3zJ7M;^jw zlx6AD50FD&p3JyP*>o+t9YWW8(7P2t!VQQ21pHJOcG_SXQD;(5aX#M6x##5H_Re>6lPyDCjxr*R(+HE%c&QN+b^tbT zXBJk?p)zhJj#I?&Y2n&~XiytG9!1ox;bw5Rbj~)7c(MFBb4>IiRATdhg zmiEFlj@S_hwYYI(ki{}&<;_7(Z0Qkfq>am z&LtL=2qc7rWguk3BtE4zL41@#S;NN*-jWw|7Kx7H7~_%7fPt;TIX}Ubo>;Rmj94V> zNB1=;-9AR7s`Pxn}t_6^3ahlq53e&!Lh85uG zec0vJY_6e`tg7LgfrJ3k!DjR)Bi#L@DHIrZ`sK=<5O0Ip!fxGf*OgGSpP@Hbbe&$9 z;ZI}8lEoC2_7;%L2=w?tb%1oL0V+=Z`7b=P&lNGY;yVBazXRYu;+cQDKvm*7NCxu&i;zub zAJh#11%?w>E2rf2e~C4+rAb-&$^vsdACs7 z@|Ra!OfVM(ke{vyiqh7puf&Yp6cd6{DptUteYfIRWG3pI+5< zBVBI_xkBAc<(pcb$!Y%dTW(b;B;2pOI-(QCsLv@U-D1XJ z(Gk8Q3l7Ws46Aktuj>|s{$6zA&xCPuXL-kB`CgYMs}4IeyG*P51IDwW?8UNQd+$i~ zlxOPtSi5L|gJcF@DwmJA5Ju8HEJ>o{{upwIpb!f{2(vLNBw`7xMbvcw<^{Fj@E~1( z?w`iIMieunS#>nXlmUcSMU+D3rX28f?s7z;X=se6bo8;5vM|O^(D6{A9*ChnGH!RG zP##3>LDC3jZPE4PH32AxrqPk|yIIrq~`aL-=}`okhNu9aT%q z1b)7iJ)CN=V#Ly84N_r7U^SH2FGdE5FpTO2 z630TF$P>GNMu8`rOytb(lB2};`;P4YNwW1<5d3Q~AX#P0aX}R2b2)`rgkp#zTxcGj zAV^cvFbhP|JgWrq_e`~exr~sIR$6p5V?o4Wym3kQ3HA+;Pr$bQ0(PmADVO%MKL!^q z?zAM8j1l4jrq|5X+V!8S*2Wl@=7*pPgciTVK6kS1Ge zMsd_u6DFK$jTnvVtE;qa+8(1sGBu~n&F%dh(&c(Zs4Fc#A=gG^^%^AyH}1^?|8quj zl@Z47h$){PlELJgYZCIHHL= z{U8O>Tw4x3<1{?$8>k-P<}1y9DmAZP_;(3Y*{Sk^H^A=_iSJ@+s5ktgwTXz_2$~W9>VVZsfwCm@s0sQ zeB50_yu@uS+e7QoPvdCwDz{prjo(AFwR%C?z`EL{1`|coJHQTk^nX=tvs1<0arUOJ z!^`*x&&BvTYmemyZ)2p~{%eYX=JVR?DYr(rNgqRMA5E1PR1Iw=prk=L2ldy3r3Vg@27IZx43+ywyzr-X*p*d@tZV+!U#~$-q=8c zgdSuh#r?b4GhEGNai)ayHQpk>5(%j5c@C1K3(W1pb~HeHpaqijJZa-e6vq_8t-^M^ zBJxq|MqZc?pjXPIH}70a5vt!IUh;l}<>VX<-Qcv^u@5(@@M2CHSe_hD$VG-eiV^V( zj7*9T0?di?P$FaD6oo?)<)QT>Npf6Og!GO^GmPV(Km0!=+dE&bk#SNI+C9RGQ|{~O*VC+tXK3!n`5 zHfl6>lwf_aEVV3`0T!aHNZLsj$paS$=LL(?b!Czaa5bbSuZ6#$_@LK<(7yrrl+80| z{tOFd=|ta2Z`^ssozD9BINn45NxUeCQis?-BKmU*Kt=FY-NJ+)8S1ecuFtN-M?&42 zl2$G>u!iNhAk*HoJ^4v^9#ORYp5t^wDj6|lx~5w45#E5wVqI1JQ~9l?nPp1YINf++ zMAdSif~_ETv@Er(EFBI^@L4BULFW>)NI+ejHFP*T}UhWNN`I)RRS8za? z*@`1>9ZB}An%aT5K=_2iQmfE;GcBVHLF!$`I99o5GO`O%O_zLr9AG18>&^HkG(;=V z%}c!OBQ~?MX(9h~tajX{=x)+!cbM7$YzTlmsPOdp2L-?GoW`@{lY9U3f;OUo*BwRB z8A+nv(br0-SH#VxGy#ZrgnGD(=@;HME;yd46EgWJ`EL%oXc&lFpc@Y}^>G(W>h_v_ zlN!`idhX+OjL+~T?19sroAFVGfa5tX-D49w$1g2g_-T|EpHL6}K_aX4$K=LTvwtlF zL*z}j{f+Uoe7{-px3_5iKPA<_7W=>Izkk)!l9ez2w%vi(?Y;i8AxRNLSOGDzNoqoI zP!1uAl}r=_871(G?y`i&)-7{u=%nxk7CZ_Qh#!|ITec zwQn`33GTUM`;D2POWnkqngqJhJRlM>CTONzTG}>^Q0wUunQyn|TAiHzyX2_%ATx%P z%7gW)%4rA9^)M<_%k@`Y?RbC<29sWU&5;@|9thf2#zf8z12$hRcZ!CSb>kUp=4N#y zl3hE#y6>kkA8VY2`W`g5Ip?2qC_BY$>R`iGQLhz2-S>x(RuWv)SPaGdl^)gGw7tjR zH@;jwk!jIaCgSg_*9iF|a);sRUTq30(8I(obh^|}S~}P4U^BIGYqcz;MPpC~Y@k_m zaw4WG1_vz2GdCAX!$_a%GHK**@IrHSkGoN>)e}>yzUTm52on`hYot7cB=oA-h1u|R ztH$11t?54Qg2L+i33FPFKKRm1aOjKST{l1*(nps`>sv%VqeVMWjl5+Gh+9);hIP8? zA@$?}Sc z3qIRpba+y5yf{R6G(u8Z^vkg0Fu&D-7?1s=QZU`Ub{-!Y`I?AGf1VNuc^L3v>)>i# z{DV9W$)>34wnzAXUiV^ZpYKw>UElrN_5Xj6{r_3| z$X5PK`e5$7>~9Dj7gK5ash(dvs`vwfk}&RD`>04;j62zoXESkFBklYaKm5seyiX(P zqQ-;XxlV*yg?Dhlx%xt!b0N3GHp@(p$A;8|%# zZ5m2KL|{on4nr>2_s9Yh=r5ScQ0;aMF)G$-9-Ca6%wA`Pa)i?NGFA|#Yi?{X-4ZO_ z^}%7%vkzvUHa$-^Y#aA+aiR5sa%S|Ebyn`EV<3Pc?ax_f>@sBZF1S;7y$CXd5t5=WGsTKBk8$OfH4v|0?0I=Yp}7c=WBSCg!{0n)XmiU;lfx)**zZaYqmDJelxk$)nZyx5`x$6R|fz(;u zEje5Dtm|a%zK!!tk3{i9$I2b{vXNFy%Bf{50X!x{98+BsDr_u9i>G5%*sqEX|06J0 z^IY{UcEbj6LDwuMh7cH`H@9sVt1l1#8kEQ(LyT@&+K}(ReE`ux8gb0r6L_#bDUo^P z3Ka2lRo52Hdtl_%+pwVs14=q`{d^L58PsU@AMf(hENumaxM{7iAT5sYmWh@hQCO^ zK&}ijo=`VqZ#a3vE?`7QW0ZREL17ZvDfdqKGD?0D4fg{7v%|Yj&_jcKJAB)>=*RS* zto8p6@k%;&^ZF>hvXm&$PCuEp{uqw3VPG$9VMdW5$w-fy2CNNT>E;>ejBgy-m_6`& z97L1p{%srn@O_JQgFpa_#f(_)eb#YS>o>q3(*uB;uZb605(iqM$=NK{nHY=+X2*G) zO3-_Xh%aG}fHWe*==58zBwp%&`mge<8uq8;xIxOd=P%9EK!34^E9sk|(Zq1QSz-JVeP12Fp)-`F|KY$LPwUE?rku zY@OJ)Z9A!ojfzfeyJ9;zv2EM7ZQB)AR5xGa-tMn^bl)FmoIiVyJ@!~@%{}qXXD&Ns zPnfe5U+&ohKefILu_1mPfLGuapX@btta5C#gPB2cjk5m4T}Nfi+Vfka!Yd(L?-c~5 z#ZK4VeQEXNPc4r$K00Fg>g#_W!YZ)cJ?JTS<&68_$#cZT-ME`}tcwqg3#``3M3UPvn+pi}(VNNx6y zFIMVb6OwYU(2`at$gHba*qrMVUl8xk5z-z~fb@Q3Y_+aXuEKH}L+>eW__!IAd@V}L zkw#s%H0v2k5-=vh$^vPCuAi22Luu3uKTf6fPo?*nvj$9(u)4$6tvF-%IM+3pt*cgs z_?wW}J7VAA{_~!?))?s6{M=KPpVhg4fNuU*|3THp@_(q!b*hdl{fjRVFWtu^1dV(f z6iOux9hi&+UK=|%M*~|aqFK{Urfl!TA}UWY#`w(0P!KMe1Si{8|o))Gy6d7;!JQYhgMYmXl?3FfOM2nQGN@~Ap6(G z3+d_5y@=nkpKAhRqf{qQ~k7Z$v&l&@m7Ppt#FSNzKPZM z8LhihcE6i=<(#87E|Wr~HKvVWhkll4iSK$^mUHaxgy8*K$_Zj;zJ`L$naPj+^3zTi z-3NTaaKnD5FPY-~?Tq6QHnmDDRxu0mh0D|zD~Y=vv_qig5r-cIbCpxlju&8Sya)@{ zsmv6XUSi)@(?PvItkiZEeN*)AE~I_?#+Ja-r8$(XiXei2d@Hi7Rx8+rZZb?ZLa{;@*EHeRQ-YDadz~M*YCM4&F-r;E#M+@CSJMJ0oU|PQ^ z=E!HBJDMQ2TN*Y(Ag(ynAL8%^v;=~q?s4plA_hig&5Z0x_^Oab!T)@6kRN$)qEJ6E zNuQjg|G7iwU(N8pI@_6==0CL;lRh1dQF#wePhmu@hADFd3B5KIH#dx(2A zp~K&;Xw}F_N6CU~0)QpQk7s$a+LcTOj1%=WXI(U=Dv!6 z{#<#-)2+gCyyv=Jw?Ab#PVkxPDeH|sAxyG`|Ys}A$PW4TdBv%zDz z^?lwrxWR<%Vzc8Sgt|?FL6ej_*e&rhqJZ3Y>k=X(^dytycR;XDU16}Pc9Vn0>_@H+ zQ;a`GSMEG64=JRAOg%~L)x*w{2re6DVprNp+FcNra4VdNjiaF0M^*>CdPkt(m150rCue?FVdL0nFL$V%5y6N z%eLr5%YN7D06k5ji5*p4v$UMM)G??Q%RB27IvH7vYr_^3>1D-M66#MN8tWGw>WED} z5AhlsanO=STFYFs)Il_0i)l)f<8qn|$DW7ZXhf5xI;m+7M5-%P63XFQrG9>DMqHc} zsgNU9nR`b}E^mL5=@7<1_R~j@q_2U^3h|+`7YH-?C=vme1C3m`Fe0HC>pjt6f_XMh zy~-i-8R46QNYneL4t@)<0VU7({aUO?aH`z4V2+kxgH5pYD5)wCh75JqQY)jIPN=U6 z+qi8cGiOtXG2tXm;_CfpH9ESCz#i5B(42}rBJJF$jh<1sbpj^8&L;gzGHb8M{of+} zzF^8VgML2O9nxBW7AvdEt90vp+#kZxWf@A)o9f9}vKJy9NDBjBW zSt=Hcs=YWCwnfY1UYx*+msp{g!w0HC<_SM!VL1(I2PE?CS}r(eh?{I)mQixmo5^p# zV?2R!R@3GV6hwTCrfHiK#3Orj>I!GS2kYhk1S;aFBD_}u2v;0HYFq}Iz1Z(I4oca4 zxquja8$+8JW_EagDHf$a1OTk5S97umGSDaj)gH=fLs9>_=XvVj^Xj9a#gLdk=&3tl zfmK9MNnIX9v{?%xdw7568 zNrZ|roYs(vC4pHB5RJ8>)^*OuyNC>x7ad)tB_}3SgQ96+-JT^Qi<`xi=)_=$Skwv~ zdqeT9Pa`LYvCAn&rMa2aCDV(TMI#PA5g#RtV|CWpgDYRA^|55LLN^uNh*gOU>Z=a06qJ;$C9z8;n-Pq=qZnc1zUwJ@t)L;&NN+E5m zRkQ(SeM8=l-aoAKGKD>!@?mWTW&~)uF2PYUJ;tB^my`r9n|Ly~0c%diYzqs9W#FTjy?h&X3TnH zXqA{QI82sdjPO->f=^K^f>N`+B`q9&rN0bOXO79S&a9XX8zund(kW7O76f4dcWhIu zER`XSMSFbSL>b;Rp#`CuGJ&p$s~G|76){d?xSA5wVg##_O0DrmyEYppyBr%fyWbbv zp`K84JwRNP$d-pJ!Qk|(RMr?*!wi1if-9G#0p>>1QXKXWFy)eB3ai)l3601q8!9JC zvU#ZWWDNKq9g6fYs?JQ)Q4C_cgTy3FhgKb8s&m)DdmL5zhNK#8wWg!J*7G7Qhe9VU zha?^AQTDpYcuN!B+#1dE*X{<#!M%zfUQbj=zLE{dW0XeQ7-oIsGY6RbkP2re@Q{}r_$iiH0xU%iN*ST`A)-EH6eaZB$GA#v)cLi z*MpA(3bYk$oBDKAzu^kJoSUsDd|856DApz={3u8sbQV@JnRkp2nC|)m;#T=DvIL-O zI4vh;g7824l}*`_p@MT4+d`JZ2%6NQh=N9bmgJ#q!hK@_<`HQq3}Z8Ij>3%~<*= zcv=!oT#5xmeGI92lqm9sGVE%#X$ls;St|F#u!?5Y7syhx6q#MVRa&lBmmn%$C0QzU z);*ldgwwCmzM3uglr}!Z2G+?& zf%Dpo&mD%2ZcNFiN-Z0f;c_Q;A%f@>26f?{d1kxIJD}LxsQkB47SAdwinfMILZdN3 zfj^HmTzS3Ku5BxY>ANutS8WPQ-G>v4^_Qndy==P3pDm+Xc?>rUHl-4+^%Sp5atOja z2oP}ftw-rqnb}+khR3CrRg^ibi6?QYk1*i^;kQGirQ=uB9Sd1NTfT-Rbv;hqnY4neE5H1YUrjS2m+2&@uXiAo- zrKUX|Ohg7(6F(AoP~tj;NZlV#xsfo-5reuQHB$&EIAhyZk;bL;k9ouDmJNBAun;H& zn;Of1z_Qj`x&M;5X;{s~iGzBQTY^kv-k{ksbE*Dl%Qf%N@hQCfY~iUw!=F-*$cpf2 z3wix|aLBV0b;W@z^%7S{>9Z^T^fLOI68_;l@+Qzaxo`nAI8emTV@rRhEKZ z?*z_{oGdI~R*#<2{bkz$G~^Qef}$*4OYTgtL$e9q!FY7EqxJ2`zk6SQc}M(k(_MaV zSLJnTXw&@djco1~a(vhBl^&w=$fa9{Sru>7g8SHahv$&Bl(D@(Zwxo_3r=;VH|uc5 zi1Ny)J!<(KN-EcQ(xlw%PNwK8U>4$9nVOhj(y0l9X^vP1TA>r_7WtSExIOsz`nDOP zs}d>Vxb2Vo2e5x8p(n~Y5ggAyvib>d)6?)|E@{FIz?G3PVGLf7-;BxaP;c?7ddH$z zA+{~k^V=bZuXafOv!RPsE1GrR3J2TH9uB=Z67gok+u`V#}BR86hB1xl}H4v`F+mRfr zYhortD%@IGfh!JB(NUNSDh+qDz?4ztEgCz&bIG-Wg7w-ua4ChgQR_c+z8dT3<1?uX z*G(DKy_LTl*Ea!%v!RhpCXW1WJO6F`bgS-SB;Xw9#! z<*K}=#wVu9$`Yo|e!z-CPYH!nj7s9dEPr-E`DXUBu0n!xX~&|%#G=BeM?X@shQQMf zMvr2!y7p_gD5-!Lnm|a@z8Of^EKboZsTMk%5VsJEm>VsJ4W7Kv{<|#4f-qDE$D-W>gWT%z-!qXnDHhOvLk=?^a1*|0j z{pW{M0{#1VcR5;F!!fIlLVNh_Gj zbnW(_j?0c2q$EHIi@fSMR{OUKBcLr{Y&$hrM8XhPByyZaXy|dd&{hYQRJ9@Fn%h3p7*VQolBIV@Eq`=y%5BU~3RPa^$a?ixp^cCg z+}Q*X+CW9~TL29@OOng(#OAOd!)e$d%sr}^KBJ-?-X&|4HTmtemxmp?cT3uA?md4% zT8yZ0U;6Rg6JHy3fJae{6TMGS?ZUX6+gGTT{Q{)SI85$5FD{g-eR%O0KMpWPY`4@O zx!hen1*8^E(*}{m^V_?}(b5k3hYo=T+$&M32+B`}81~KKZhY;2H{7O-M@vbCzuX0n zW-&HXeyr1%I3$@ns-V1~Lb@wIpkmx|8I~ob1Of7i6BTNysEwI}=!nU%q7(V_^+d*G z7G;07m(CRTJup!`cdYi93r^+LY+`M*>aMuHJm(A8_O8C#A*$!Xvddgpjx5)?_EB*q zgE8o5O>e~9IiSC@WtZpF{4Bj2J5eZ>uUzY%TgWF7wdDE!fSQIAWCP)V{;HsU3ap?4 znRsiiDbtN7i9hapO;(|Ew>Ip2TZSvK9Z^N21%J?OiA_&eP1{(Pu_=%JjKy|HOardq ze?zK^K zA%sjF64*Wufad%H<) z^|t>e*h+Z1#l=5wHexzt9HNDNXgM=-OPWKd^5p!~%SIl>Fo&7BvNpbf8{NXmH)o{r zO=aBJ;meX1^{O%q;kqdw*5k!Y7%t_30 zy{nGRVc&5qt?dBwLs+^Sfp;f`YVMSB#C>z^a9@fpZ!xb|b-JEz1LBX7ci)V@W+kvQ89KWA0T~Lj$aCcfW#nD5bt&Y_< z-q{4ZXDqVg?|0o)j1%l0^_it0WF*LCn-+)c!2y5yS7aZIN$>0LqNnkujV*YVes(v$ zY@_-!Q;!ZyJ}Bg|G-~w@or&u0RO?vlt5*9~yeoPV_UWrO2J54b4#{D(D>jF(R88u2 zo#B^@iF_%S>{iXSol8jpmsZuJ?+;epg>k=$d`?GSegAVp3n$`GVDvK${N*#L_1`44 z{w0fL{2%)0|E+qgZtjX}itZz^KJt4Y;*8uSK}Ft38+3>j|K(PxIXXR-t4VopXo#9# zt|F{LWr-?34y`$nLBVV_*UEgA6AUI65dYIbqpNq9cl&uLJ0~L}<=ESlOm?Y-S@L*d z<7vt}`)TW#f%Rp$Q}6@3=j$7Tze@_uZO@aMn<|si{?S}~maII`VTjs&?}jQ4_cut9$)PEqMukwoXobzaKx^MV z2fQwl+;LSZ$qy%Tys0oo^K=jOw$!YwCv^ei4NBVauL)tN%=wz9M{uf{IB(BxK|lT*pFkmNK_1tV`nb%jH=a0~VNq2RCKY(rG7jz!-D^k)Ec)yS%17pE#o6&eY+ z^qN(hQT$}5F(=4lgNQhlxj?nB4N6ntUY6(?+R#B?W3hY_a*)hnr4PA|vJ<6p`K3Z5Hy z{{8(|ux~NLUW=!?9Qe&WXMTAkQnLXg(g=I@(VG3{HE13OaUT|DljyWXPs2FE@?`iU z4GQlM&Q=T<4&v@Fe<+TuXiZQT3G~vZ&^POfmI1K2h6t4eD}Gk5XFGpbj1n_g*{qmD6Xy z`6Vv|lLZtLmrnv*{Q%xxtcWVj3K4M%$bdBk_a&ar{{GWyu#ljM;dII;*jP;QH z#+^o-A4np{@|Mz+LphTD0`FTyxYq#wY)*&Ls5o{0z9yg2K+K7ZN>j1>N&;r+Z`vI| zDzG1LJZ+sE?m?>x{5LJx^)g&pGEpY=fQ-4}{x=ru;}FL$inHemOg%|R*ZXPodU}Kh zFEd5#+8rGq$Y<_?k-}r5zgQ3jRV=ooHiF|@z_#D4pKVEmn5CGV(9VKCyG|sT9nc=U zEoT67R`C->KY8Wp-fEcjjFm^;Cg(ls|*ABVHq8clBE(;~K^b+S>6uj70g? z&{XQ5U&!Z$SO7zfP+y^8XBbiu*Cv-yJG|l-oe*!s5$@Lh_KpxYL2sx`B|V=dETN>5K+C+CU~a_3cI8{vbu$TNVdGf15*>D zz@f{zIlorkY>TRh7mKuAlN9A0>N>SV`X)+bEHms=mfYTMWt_AJtz_h+JMmrgH?mZt zm=lfdF`t^J*XLg7v+iS)XZROygK=CS@CvUaJo&w2W!Wb@aa?~Drtf`JV^cCMjngVZ zv&xaIBEo8EYWuML+vxCpjjY^s1-ahXJzAV6hTw%ZIy!FjI}aJ+{rE&u#>rs)vzuxz z+$5z=7W?zH2>Eb32dvgHYZtCAf!=OLY-pb4>Ae79rd68E2LkVPj-|jFeyqtBCCwiW zkB@kO_(3wFq)7qwV}bA=zD!*@UhT`geq}ITo%@O(Z5Y80nEX~;0-8kO{oB6|(4fQh z);73T!>3@{ZobPwRv*W?7m0Ml9GmJBCJd&6E?hdj9lV= z4flNfsc(J*DyPv?RCOx!MSvk(M952PJ-G|JeVxWVjN~SNS6n-_Ge3Q;TGE;EQvZg86%wZ`MB zSMQua(i*R8a75!6$QRO^(o7sGoomb+Y{OMy;m~Oa`;P9Yqo>?bJAhqXxLr7_3g_n>f#UVtxG!^F#1+y@os6x(sg z^28bsQ@8rw%Gxk-stAEPRbv^}5sLe=VMbkc@Jjimqjvmd!3E7+QnL>|(^3!R} zD-l1l7*Amu@j+PWLGHXXaFG0Ct2Q=}5YNUxEQHCAU7gA$sSC<5OGylNnQUa>>l%sM zyu}z6i&({U@x^hln**o6r2s-(C-L50tQvz|zHTqW!ir?w&V23tuYEDJVV#5pE|OJu z7^R!A$iM$YCe?8n67l*J-okwfZ+ZTkGvZ)tVPfR;|3gyFjF)8V zyXXN=!*bpyRg9#~Bg1+UDYCt0 ztp4&?t1X0q>uz;ann$OrZs{5*r`(oNvw=$7O#rD|Wuv*wIi)4b zGtq4%BX+kkagv3F9Id6~-c+1&?zny%w5j&nk9SQfo0k4LhdSU_kWGW7axkfpgR`8* z!?UTG*Zi_baA1^0eda8S|@&F z{)Rad0kiLjB|=}XFJhD(S3ssKlveFFmkN{Vl^_nb!o5M!RC=m)V&v2%e?ZoRC@h3> zJ(?pvToFd`*Zc@HFPL#=otWKwtuuQ_dT-Hr{S%pQX<6dqVJ8;f(o)4~VM_kEQkMR+ zs1SCVi~k>M`u1u2xc}>#D!V&6nOOh-E$O&SzYrjJdZpaDv1!R-QGA141WjQe2s0J~ zQ;AXG)F+K#K8_5HVqRoRM%^EduqOnS(j2)|ctA6Q^=|s_WJYU;Z%5bHp08HPL`YF2 zR)Ad1z{zh`=sDs^&V}J z%$Z$!jd7BY5AkT?j`eqMs%!Gm@T8)4w3GYEX~IwgE~`d|@T{WYHkudy(47brgHXx& zBL1yFG6!!!VOSmDxBpefy2{L_u5yTwja&HA!mYA#wg#bc-m%~8aRR|~AvMnind@zs zy>wkShe5&*un^zvSOdlVu%kHsEo>@puMQ`b1}(|)l~E{5)f7gC=E$fP(FC2=F<^|A zxeIm?{EE!3sO!Gr7e{w)Dx(uU#3WrFZ>ibmKSQ1tY?*-Nh1TDHLe+k*;{Rp!Bmd_m zb#^kh`Y*8l|9Cz2e{;RL%_lg{#^Ar+NH|3z*Zye>!alpt{z;4dFAw^^H!6ING*EFc z_yqhr8d!;%nHX9AKhFQZBGrSzfzYCi%C!(Q5*~hX>)0N`vbhZ@N|i;_972WSx*>LH z87?en(;2_`{_JHF`Sv6Wlps;dCcj+8IJ8ca6`DsOQCMb3n# z3)_w%FuJ3>fjeOOtWyq)ag|PmgQbC-s}KRHG~enBcIwqIiGW8R8jFeBNY9|YswRY5 zjGUxdGgUD26wOpwM#8a!Nuqg68*dG@VM~SbOroL_On0N6QdT9?)NeB3@0FCC?Z|E0 z6TPZj(AsPtwCw>*{eDEE}Gby>0q{*lI+g2e&(YQrsY&uGM{O~}(oM@YWmb*F zA0^rr5~UD^qmNljq$F#ARXRZ1igP`MQx4aS6*MS;Ot(1L5jF2NJ;de!NujUYg$dr# z=TEL_zTj2@>ZZN(NYCeVX2==~=aT)R30gETO{G&GM4XN<+!&W&(WcDP%oL8PyIVUC zs5AvMgh6qr-2?^unB@mXK*Dbil^y-GTC+>&N5HkzXtozVf93m~xOUHn8`HpX=$_v2 z61H;Z1qK9o;>->tb8y%#4H)765W4E>TQ1o0PFj)uTOPEvv&}%(_mG0ISmyhnQV33Z$#&yd{ zc{>8V8XK$3u8}04CmAQ#I@XvtmB*s4t8va?-IY4@CN>;)mLb_4!&P3XSw4pA_NzDb zORn!blT-aHk1%Jpi>T~oGLuh{DB)JIGZ9KOsciWs2N7mM1JWM+lna4vkDL?Q)z_Ct z`!mi0jtr+4*L&N7jk&LodVO#6?_qRGVaucqVB8*us6i3BTa^^EI0x%EREQSXV@f!lak6Wf1cNZ8>*artIJ(ADO*=<-an`3zB4d*oO*8D1K!f z*A@P1bZCNtU=p!742MrAj%&5v%Xp_dSX@4YCw%F|%Dk=u|1BOmo)HsVz)nD5USa zR~??e61sO(;PR)iaxK{M%QM_rIua9C^4ppVS$qCT9j2%?*em?`4Z;4@>I(c%M&#cH z>4}*;ej<4cKkbCAjjDsyKS8rIm90O)Jjgyxj5^venBx&7B!xLmzxW3jhj7sR(^3Fz z84EY|p1NauwXUr;FfZjdaAfh%ivyp+^!jBjJuAaKa!yCq=?T_)R!>16?{~p)FQ3LDoMyG%hL#pR!f@P%*;#90rs_y z@9}@r1BmM-SJ#DeuqCQk=J?ixDSwL*wh|G#us;dd{H}3*-Y7Tv5m=bQJMcH+_S`zVtf;!0kt*(zwJ zs+kedTm!A}cMiM!qv(c$o5K%}Yd0|nOd0iLjus&;s0Acvoi-PFrWm?+q9f^FslxGi z6ywB`QpL$rJzWDg(4)C4+!2cLE}UPCTBLa*_=c#*$b2PWrRN46$y~yST3a2$7hEH= zNjux+wna^AzQ=KEa_5#9Ph=G1{S0#hh1L3hQ`@HrVnCx{!fw_a0N5xV(iPdKZ-HOM za)LdgK}1ww*C_>V7hbQnTzjURJL`S%`6nTHcgS+dB6b_;PY1FsrdE8(2K6FN>37!62j_cBlui{jO^$dPkGHV>pXvW0EiOA zqW`YaSUBWg_v^Y5tPJfWLcLpsA8T zG)!x>pKMpt!lv3&KV!-um= zKCir6`bEL_LCFx4Z5bAFXW$g3Cq`?Q%)3q0r852XI*Der*JNuKUZ`C{cCuu8R8nkt z%pnF>R$uY8L+D!V{s^9>IC+bmt<05h**>49R*#vpM*4i0qRB2uPbg8{{s#9yC;Z18 zD7|4m<9qneQ84uX|J&f-g8a|nFKFt34@Bt{CU`v(SYbbn95Q67*)_Esl_;v291s=9 z+#2F2apZU4Tq=x+?V}CjwD(P=U~d<=mfEFuyPB`Ey82V9G#Sk8H_Ob_RnP3s?)S_3 zr%}Pb?;lt_)Nf>@zX~D~TBr;-LS<1I##8z`;0ZCvI_QbXNh8Iv)$LS=*gHr;}dgb=w5$3k2la1keIm|=7<-JD>)U%=Avl0Vj@+&vxn zt-)`vJxJr88D&!}2^{GPXc^nmRf#}nb$4MMkBA21GzB`-Or`-3lq^O^svO7Vs~FdM zv`NvzyG+0T!P8l_&8gH|pzE{N(gv_tgDU7SWeiI-iHC#0Ai%Ixn4&nt{5y3(GQs)i z&uA;~_0shP$0Wh0VooIeyC|lak__#KVJfxa7*mYmZ22@(<^W}FdKjd*U1CqSjNKW% z*z$5$=t^+;Ui=MoDW~A7;)Mj%ibX1_p4gu>RC}Z_pl`U*{_z@+HN?AF{_W z?M_X@o%w8fgFIJ$fIzBeK=v#*`mtY$HC3tqw7q^GCT!P$I%=2N4FY7j9nG8aIm$c9 zeKTxVKN!UJ{#W)zxW|Q^K!3s;(*7Gbn;e@pQBCDS(I|Y0euK#dSQ_W^)sv5pa%<^o zyu}3d?Lx`)3-n5Sy9r#`I{+t6x%I%G(iewGbvor&I^{lhu-!#}*Q3^itvY(^UWXgvthH52zLy&T+B)Pw;5>4D6>74 zO_EBS)>l!zLTVkX@NDqyN2cXTwsUVao7$HcqV2%t$YzdAC&T)dwzExa3*kt9d(}al zA~M}=%2NVNUjZiO7c>04YH)sRelXJYpWSn^aC$|Ji|E13a^-v2MB!Nc*b+=KY7MCm zqIteKfNkONq}uM;PB?vvgQvfKLPMB8u5+Am=d#>g+o&Ysb>dX9EC8q?D$pJH!MTAqa=DS5$cb+;hEvjwVfF{4;M{5U&^_+r zvZdu_rildI!*|*A$TzJ&apQWV@p{!W`=?t(o0{?9y&vM)V)ycGSlI3`;ps(vf2PUq zX745#`cmT*ra7XECC0gKkpu2eyhFEUb?;4@X7weEnLjXj_F~?OzL1U1L0|s6M+kIhmi%`n5vvDALMagi4`wMc=JV{XiO+^ z?s9i7;GgrRW{Mx)d7rj)?(;|b-`iBNPqdwtt%32se@?w4<^KU&585_kZ=`Wy^oLu9 z?DQAh5z%q;UkP48jgMFHTf#mj?#z|=w= z(q6~17Vn}P)J3M?O)x))%a5+>TFW3No~TgP;f}K$#icBh;rSS+R|}l鯊%1Et zwk~hMkhq;MOw^Q5`7oC{CUUyTw9x>^%*FHx^qJw(LB+E0WBX@{Ghw;)6aA-KyYg8p z7XDveQOpEr;B4je@2~usI5BlFadedX^ma{b{ypd|RNYqo#~d*mj&y`^iojR}s%~vF z(H!u`yx68D1Tj(3(m;Q+Ma}s2n#;O~bcB1`lYk%Irx60&-nWIUBr2x&@}@76+*zJ5 ze&4?q8?m%L9c6h=J$WBzbiTf1Z-0Eb5$IZs>lvm$>1n_Mezp*qw_pr8<8$6f)5f<@ zyV#tzMCs51nTv_5ca`x`yfE5YA^*%O_H?;tWYdM_kHPubA%vy47i=9>Bq) zRQ&0UwLQHeswmB1yP)+BiR;S+Vc-5TX84KUA;8VY9}yEj0eESSO`7HQ4lO z4(CyA8y1G7_C;6kd4U3K-aNOK!sHE}KL_-^EDl(vB42P$2Km7$WGqNy=%fqB+ zSLdrlcbEH=T@W8V4(TgoXZ*G1_aq$K^@ek=TVhoKRjw;HyI&coln|uRr5mMOy2GXP zwr*F^Y|!Sjr2YQXX(Fp^*`Wk905K%$bd03R4(igl0&7IIm*#f`A!DCarW9$h$z`kYk9MjjqN&5-DsH@8xh63!fTNPxWsFQhNv z#|3RjnP$Thdb#Ys7M+v|>AHm0BVTw)EH}>x@_f4zca&3tXJhTZ8pO}aN?(dHo)44Z z_5j+YP=jMlFqwvf3lq!57-SAuRV2_gJ*wsR_!Y4Z(trO}0wmB9%f#jNDHPdQGHFR; zZXzS-$`;7DQ5vF~oSgP3bNV$6Z(rwo6W(U07b1n3UHqml>{=6&-4PALATsH@Bh^W? z)ob%oAPaiw{?9HfMzpGb)@Kys^J$CN{uf*HX?)z=g`J(uK1YO^8~s1(ZIbG%Et(|q z$D@_QqltVZu9Py4R0Ld8!U|#`5~^M=b>fnHthzKBRr=i+w@0Vr^l|W;=zFT#PJ?*a zbC}G#It}rQP^Ait^W&aa6B;+0gNvz4cWUMzpv(1gvfw-X4xJ2Sv;mt;zb2Tsn|kSS zo*U9N?I{=-;a-OybL4r;PolCfiaL=y@o9{%`>+&FI#D^uy#>)R@b^1ue&AKKwuI*` zx%+6r48EIX6nF4o;>)zhV_8(IEX})NGU6Vs(yslrx{5fII}o3SMHW7wGtK9oIO4OM&@@ECtXSICLcPXoS|{;=_yj>hh*%hP27yZwOmj4&Lh z*Nd@OMkd!aKReoqNOkp5cW*lC)&C$P?+H3*%8)6HcpBg&IhGP^77XPZpc%WKYLX$T zsSQ$|ntaVVOoRat$6lvZO(G-QM5s#N4j*|N_;8cc2v_k4n6zx9c1L4JL*83F-C1Cn zaJhd;>rHXB%%ZN=3_o3&Qd2YOxrK~&?1=UuN9QhL$~OY-Qyg&})#ez*8NpQW_*a&kD&ANjedxT0Ar z<6r{eaVz3`d~+N~vkMaV8{F?RBVemN(jD@S8qO~L{rUw#=2a$V(7rLE+kGUZ<%pdr z?$DP|Vg#gZ9S}w((O2NbxzQ^zTot=89!0^~hE{|c9q1hVzv0?YC5s42Yx($;hAp*E zyoGuRyphQY{Q2ee0Xx`1&lv(l-SeC$NEyS~8iil3_aNlnqF_G|;zt#F%1;J)jnPT& z@iU0S;wHJ2$f!juqEzPZeZkjcQ+Pa@eERSLKsWf=`{R@yv7AuRh&ALRTAy z8=g&nxsSJCe!QLchJ=}6|LshnXIK)SNd zRkJNiqHwKK{SO;N5m5wdL&qK`v|d?5<4!(FAsDxR>Ky#0#t$8XCMptvNo?|SY?d8b z`*8dVBlXTUanlh6n)!EHf2&PDG8sXNAt6~u-_1EjPI1|<=33T8 zEnA00E!`4Ave0d&VVh0e>)Dc}=FfAFxpsC1u9ATfQ`-Cu;mhc8Z>2;uyXtqpLb7(P zd2F9<3cXS} znMg?{&8_YFTGRQZEPU-XPq55%51}RJpw@LO_|)CFAt62-_!u_Uq$csc+7|3+TV_!h z+2a7Yh^5AA{q^m|=KSJL+w-EWDBc&I_I1vOr^}P8i?cKMhGy$CP0XKrQzCheG$}G# zuglf8*PAFO8%xop7KSwI8||liTaQ9NCAFarr~psQt)g*pC@9bORZ>m`_GA`_K@~&% zijH0z;T$fd;-Liw8%EKZas>BH8nYTqsK7F;>>@YsE=Rqo?_8}UO-S#|6~CAW0Oz1} z3F(1=+#wrBJh4H)9jTQ_$~@#9|Bc1Pd3rAIA_&vOpvvbgDJOM(yNPhJJq2%PCcMaI zrbe~toYzvkZYQ{ea(Wiyu#4WB#RRN%bMe=SOk!CbJZv^m?Flo5p{W8|0i3`hI3Np# zvCZqY%o258CI=SGb+A3yJe~JH^i{uU`#U#fvSC~rWTq+K`E%J@ zasU07&pB6A4w3b?d?q}2=0rA#SA7D`X+zg@&zm^iA*HVi z009#PUH<%lk4z~p^l0S{lCJk1Uxi=F4e_DwlfHA`X`rv(|JqWKAA5nH+u4Da+E_p+ zVmH@lg^n4ixs~*@gm_dgQ&eDmE1mnw5wBz9Yg?QdZwF|an67Xd*x!He)Gc8&2!urh z4_uXzbYz-aX)X1>&iUjGp;P1u8&7TID0bTH-jCL&Xk8b&;;6p2op_=y^m@Nq*0{#o!!A;wNAFG@0%Z9rHo zcJs?Th>Ny6+hI`+1XoU*ED$Yf@9f91m9Y=#N(HJP^Y@ZEYR6I?oM{>&Wq4|v0IB(p zqX#Z<_3X(&{H+{3Tr|sFy}~=bv+l=P;|sBz$wk-n^R`G3p0(p>p=5ahpaD7>r|>pm zv;V`_IR@tvZreIuv2EM7ZQHhO+qUgw#kOs%*ekY^n|=1#x9&c;Ro&I~{rG-#_3ZB1 z?|9}IFdbP}^DneP*T-JaoYHt~r@EfvnPE5EKUwIxjPbsr$% zfWW83pgWST7*B(o=kmo)74$8UU)v0{@4DI+ci&%=#90}!CZz|rnH+Mz=HN~97G3~@ z;v5(9_2%eca(9iu@J@aqaMS6*$TMw!S>H(b z4(*B!|H|8&EuB%mITr~O?vVEf%(Gr)6E=>H~1VR z&1YOXluJSG1!?TnT)_*YmJ*o_Q@om~(GdrhI{$Fsx_zrkupc#y{DK1WOUR>tk>ZE) ziOLoBkhZZ?0Uf}cm>GsA>Rd6V8@JF)J*EQlQ<=JD@m<)hyElXR0`pTku*3MU`HJn| zIf7$)RlK^pW-$87U;431;Ye4Ie+l~_B3*bH1>*yKzn23cH0u(i5pXV! z4K?{3oF7ZavmmtTq((wtml)m6i)8X6ot_mrE-QJCW}Yn!(3~aUHYG=^fA<^~`e3yc z-NWTb{gR;DOUcK#zPbN^D*e=2eR^_!(!RKkiwMW@@yYtEoOp4XjOGgzi`;=8 zi3`Ccw1%L*y(FDj=C7Ro-V?q)-%p?Ob2ZElu`eZ99n14-ZkEV#y5C+{Pq87Gu3&>g zFy~Wk7^6v*)4pF3@F@rE__k3ikx(hzN3@e*^0=KNA6|jC^B5nf(XaoQaZN?Xi}Rn3 z$8&m*KmWvPaUQ(V<#J+S&zO|8P-#!f%7G+n_%sXp9=J%Z4&9OkWXeuZN}ssgQ#Tcj z8p6ErJQJWZ+fXLCco=RN8D{W%+*kko*2-LEb))xcHwNl~Xmir>kmAxW?eW50Osw3# zki8Fl$#fvw*7rqd?%E?}ZX4`c5-R&w!Y0#EBbelVXSng+kUfeUiqofPehl}$ormli zg%r)}?%=?_pHb9`Cq9Z|B`L8b>(!+8HSX?`5+5mm81AFXfnAt1*R3F z%b2RPIacKAddx%JfQ8l{3U|vK@W7KB$CdLqn@wP^?azRks@x8z59#$Q*7q!KilY-P zHUbs(IFYRGG1{~@RF;Lqyho$~7^hNC`NL3kn^Td%A7dRgr_&`2k=t+}D-o9&C!y^? z6MsQ=tc3g0xkK(O%DzR9nbNB(r@L;1zQrs8mzx&4dz}?3KNYozOW5;=w18U6$G4U2 z#2^qRLT*Mo4bV1Oeo1PKQ2WQS2Y-hv&S|C7`xh6=Pj7MNLC5K-zokZ67S)C;(F0Dd zloDK2_o1$Fmza>EMj3X9je7e%Q`$39Dk~GoOj89-6q9|_WJlSl!!+*{R=tGp z8u|MuSwm^t7K^nUe+^0G3dkGZr3@(X+TL5eah)K^Tn zXEtHmR9UIaEYgD5Nhh(s*fcG_lh-mfy5iUF3xxpRZ0q3nZ=1qAtUa?(LnT9I&~uxX z`pV?+=|-Gl(kz?w!zIieXT}o}7@`QO>;u$Z!QB${a08_bW0_o@&9cjJUXzVyNGCm8 zm=W+$H!;_Kzp6WQqxUI;JlPY&`V}9C$8HZ^m?NvI*JT@~BM=()T()Ii#+*$y@lTZBkmMMda>7s#O(1YZR+zTG@&}!EXFG{ zEWPSDI5bFi;NT>Yj*FjH((=oe%t%xYmE~AGaOc4#9K_XsVpl<4SP@E!TgC0qpe1oi zNpxU2b0(lEMcoibQ-G^cxO?ySVW26HoBNa;n0}CWL*{k)oBu1>F18X061$SP{Gu67 z-v-Fa=Fl^u3lnGY^o5v)Bux}bNZ~ z5pL+7F_Esoun8^5>z8NFoIdb$sNS&xT8_|`GTe8zSXQzs4r^g0kZjg(b0bJvz`g<70u9Z3fQILX1Lj@;@+##bP|FAOl)U^9U>0rx zGi)M1(Hce)LAvQO-pW!MN$;#ZMX?VE(22lTlJrk#pB0FJNqVwC+*%${Gt#r_tH9I_ z;+#)#8cWAl?d@R+O+}@1A^hAR1s3UcW{G+>;X4utD2d9X(jF555}!TVN-hByV6t+A zdFR^aE@GNNgSxxixS2p=on4(+*+f<8xrwAObC)D5)4!z7)}mTpb7&ofF3u&9&wPS< zB62WHLGMhmrmOAgmJ+|c>qEWTD#jd~lHNgT0?t-p{T=~#EMcB| z=AoDKOL+qXCfk~F)-Rv**V}}gWFl>liXOl7Uec_8v)(S#av99PX1sQIVZ9eNLkhq$ zt|qu0b?GW_uo}TbU8!jYn8iJeIP)r@;!Ze_7mj{AUV$GEz6bDSDO=D!&C9!M@*S2! zfGyA|EPlXGMjkH6x7OMF?gKL7{GvGfED=Jte^p=91FpCu)#{whAMw`vSLa`K#atdN zThnL+7!ZNmP{rc=Z>%$meH;Qi1=m1E3Lq2D_O1-X5C;!I0L>zur@tPAC9*7Jeh)`;eec}1`nkRP(%iv-`N zZ@ip-g|7l6Hz%j%gcAM}6-nrC8oA$BkOTz^?dakvX?`^=ZkYh%vUE z9+&)K1UTK=ahYiaNn&G5nHUY5niLGus@p5E2@RwZufRvF{@$hW{;{3QhjvEHMvduO z#Wf-@oYU4ht?#uP{N3utVzV49mEc9>*TV_W2TVC`6+oI)zAjy$KJrr=*q##&kobiQ z1vNbya&OVjK`2pdRrM?LuK6BgrLN7H_3m z!qpNKg~87XgCwb#I=Q&0rI*l$wM!qTkXrx1ko5q-f;=R2fImRMwt5Qs{P*p^z@9ex z`2#v(qE&F%MXlHpdO#QEZyZftn4f05ab^f2vjxuFaat2}jke{j?5GrF=WYBR?gS(^ z9SBiNi}anzBDBRc+QqizTTQuJrzm^bNA~A{j%ugXP7McZqJ}65l10({wk++$=e8O{ zxWjG!Qp#5OmI#XRQQM?n6?1ztl6^D40hDJr?4$Wc&O_{*OfMfxe)V0=e{|N?J#fgE>j9jAajze$iN!*yeF%jJU#G1c@@rm zolGW!j?W6Q8pP=lkctNFdfgUMg92wlM4E$aks1??M$~WQfzzzXtS)wKrr2sJeCN4X zY(X^H_c^PzfcO8Bq(Q*p4c_v@F$Y8cHLrH$`pJ2}=#*8%JYdqsqnGqEdBQMpl!Ot04tUGSXTQdsX&GDtjbWD=prcCT9(+ z&UM%lW%Q3yrl1yiYs;LxzIy>2G}EPY6|sBhL&X&RAQrSAV4Tlh2nITR?{6xO9ujGu zr*)^E`>o!c=gT*_@6S&>0POxcXYNQd&HMw6<|#{eSute2C3{&h?Ah|cw56-AP^f8l zT^kvZY$YiH8j)sk7_=;gx)vx-PW`hbSBXJGCTkpt;ap(}G2GY=2bbjABU5)ty%G#x zAi07{Bjhv}>OD#5zh#$0w;-vvC@^}F! z#X$@)zIs1L^E;2xDAwEjaXhTBw2<{&JkF*`;c3<1U@A4MaLPe{M5DGGkL}#{cHL%* zYMG+-Fm0#qzPL#V)TvQVI|?_M>=zVJr9>(6ib*#z8q@mYKXDP`k&A4A};xMK0h=yrMp~JW{L?mE~ph&1Y1a#4%SO)@{ zK2juwynUOC)U*hVlJU17%llUxAJFuKZh3K0gU`aP)pc~bE~mM!i1mi!~LTf>1Wp< zuG+ahp^gH8g8-M$u{HUWh0m^9Rg@cQ{&DAO{PTMudV6c?ka7+AO& z746QylZ&Oj`1aqfu?l&zGtJnpEQOt;OAFq19MXTcI~`ZcoZmyMrIKDFRIDi`FH)w; z8+*8tdevMDv*VtQi|e}CnB_JWs>fhLOH-+Os2Lh!&)Oh2utl{*AwR)QVLS49iTp{6 z;|172Jl!Ml17unF+pd+Ff@jIE-{Oxv)5|pOm@CkHW?{l}b@1>Pe!l}VccX#xp@xgJ zyE<&ep$=*vT=}7vtvif0B?9xw_3Gej7mN*dOHdQPtW5kA5_zGD zpA4tV2*0E^OUimSsV#?Tg#oiQ>%4D@1F5@AHwT8Kgen$bSMHD3sXCkq8^(uo7CWk`mT zuslYq`6Yz;L%wJh$3l1%SZv#QnG3=NZ=BK4yzk#HAPbqXa92;3K5?0kn4TQ`%E%X} z&>Lbt!!QclYKd6+J7Nl@xv!uD%)*bY-;p`y^ZCC<%LEHUi$l5biu!sT3TGGSTPA21 zT8@B&a0lJHVn1I$I3I1I{W9fJAYc+8 zVj8>HvD}&O`TqU2AAb={?eT;0hyL(R{|h23=4fDSZKC32;wWxsVj`P z3J3{M$PwdH!ro*Cn!D&=jnFR>BNGR<<|I8CI@+@658Dy(lhqbhXfPTVecY@L8%`3Q z1Fux2w?2C3th60jI~%OC9BtpNF$QPqcG+Pz96qZJ71_`0o0w_q7|h&O>`6U+^BA&5 zXd5Zp1Xkw~>M%RixTm&OqpNl8Q+ue=92Op_>T~_9UON?ZM2c0aGm=^A4ejrXj3dV9 zhh_bCt-b9`uOX#cFLj!vhZ#lS8Tc47OH>*)y#{O9?AT~KR9LntM|#l#Dlm^8{nZdk zjMl#>ZM%#^nK2TPzLcKxqx24P7R1FPlBy7LSBrRvx>fE$9AJ;7{PQm~^LBX^k#6Zq zw*Z(zJC|`!6_)EFR}8|n8&&Rbj8y028~P~sFXBFRt+tmqH-S3<%N;C&WGH!f3{7cm zy_fCAb9@HqaXa1Y5vFbxWf%#zg6SI$C+Uz5=CTO}e|2fjWkZ;Dx|84Ow~bkI=LW+U zuq;KSv9VMboRvs9)}2PAO|b(JCEC_A0wq{uEj|3x@}*=bOd zwr{TgeCGG>HT<@Zeq8y}vTpwDg#UBvD)BEs@1KP$^3$sh&_joQPn{hjBXmLPJ{tC) z*HS`*2+VtJO{|e$mM^|qv1R*8i(m1`%)}g=SU#T#0KlTM2RSvYUc1fP+va|4;5}Bfz98UvDCpq7}+SMV&;nX zQw~N6qOX{P55{#LQkrZk(e5YGzr|(B;Q;ju;2a`q+S9bsEH@i1{_Y0;hWYn1-79jl z5c&bytD*k)GqrVcHn6t-7kinadiD>B{Tl`ZY@`g|b~pvHh5!gKP4({rp?D0aFd_cN zhHRo4dd5^S6ViN(>(28qZT6E>??aRhc($kP`>@<+lIKS5HdhjVU;>f7<4))E*5|g{ z&d1}D|vpuV^eRj5j|xx9nwaCxXFG?Qbjn~_WSy=N}P0W>MP zG-F%70lX5Xr$a)2i6?i|iMyM|;Jtf*hO?=Jxj12oz&>P=1#h~lf%#fc73M2_(SUM- zf&qnjS80|_Y0lDgl&I?*eMumUklLe_=Td!9G@eR*tcPOgIShJipp3{A10u(4eT~DY zHezEj8V+7m!knn7)W!-5QI3=IvC^as5+TW1@Ern@yX| z7Nn~xVx&fGSr+L%4iohtS3w^{-H1A_5=r&x8}R!YZvp<2T^YFvj8G_vm}5q;^UOJf ztl=X3iL;;^^a#`t{Ae-%5Oq{?M#s6Npj+L(n-*LMI-yMR{)qki!~{5z{&`-iL}lgW zxo+tnvICK=lImjV$Z|O_cYj_PlEYCzu-XBz&XC-JVxUh9;6*z4fuBG+H{voCC;`~GYV|hj%j_&I zDZCj>Q_0RCwFauYoVMiUSB+*Mx`tg)bWmM^SwMA+?lBg12QUF_x2b)b?qb88K-YUd z0dO}3k#QirBV<5%jL$#wlf!60dizu;tsp(7XLdI=eQs?P`tOZYMjVq&jE)qK*6B^$ zBe>VvH5TO>s>izhwJJ$<`a8fakTL!yM^Zfr2hV9`f}}VVUXK39p@G|xYRz{fTI+Yq z20d=)iwjuG9RB$%$^&8#(c0_j0t_C~^|n+c`Apu|x7~;#cS-s=X1|C*YxX3ailhg_|0`g!E&GZJEr?bh#Tpb8siR=JxWKc{#w7g zWznLwi;zLFmM1g8V5-P#RsM@iX>TK$xsWuujcsVR^7TQ@!+vCD<>Bk9tdCo7Mzgq5 zv8d>dK9x8C@Qoh01u@3h0X_`SZluTb@5o;{4{{eF!-4405x8X7hewZWpz z2qEi4UTiXTvsa(0X7kQH{3VMF>W|6;6iTrrYD2fMggFA&-CBEfSqPlQDxqsa>{e2M z(R5PJ7uOooFc|9GU0ELA%m4&4Ja#cQpNw8i8ACAoK6?-px+oBl_yKmenZut#Xumjz zk8p^OV2KY&?5MUwGrBOo?ki`Sxo#?-Q4gw*Sh0k`@ zFTaYK2;}%Zk-68`#5DXU$2#=%YL#S&MTN8bF+!J2VT6x^XBci6O)Q#JfW{YMz) zOBM>t2rSj)n#0a3cjvu}r|k3od6W(SN}V-cL?bi*Iz-8uOcCcsX0L>ZXjLqk zZu2uHq5B|Kt>e+=pPKu=1P@1r9WLgYFq_TNV1p9pu0erHGd!+bBp!qGi+~4A(RsYN@CyXNrC&hxGmW)u5m35OmWwX`I+0yByglO`}HC4nGE^_HUs^&A(uaM zKPj^=qI{&ayOq#z=p&pnx@@k&I1JI>cttJcu@Ihljt?6p^6{|ds`0MoQwp+I{3l6` zB<9S((RpLG^>=Kic`1LnhpW2=Gu!x`m~=y;A`Qk!-w`IN;S8S930#vBVMv2vCKi}u z6<-VPrU0AnE&vzwV(CFC0gnZYcpa-l5T0ZS$P6(?9AM;`Aj~XDvt;Jua=jIgF=Fm? zdp=M$>`phx%+Gu};;-&7T|B1AcC#L4@mW5SV_^1BRbo6;2PWe$r+npRV`yc;T1mo& z+~_?7rA+(Um&o@Tddl zL_hxvWk~a)yY}%j`Y+200D%9$bWHy&;(yj{jpi?Rtz{J66ANw)UyPOm;t6FzY3$hx zcn)Ir79nhFvNa7^a{SHN7XH*|Vlsx`CddPnA&Qvh8aNhEA;mPVv;Ah=k<*u!Zq^7 z<=xs*iQTQOMMcg|(NA_auh@x`3#_LFt=)}%SQppP{E>mu_LgquAWvh<>L7tf9+~rO znwUDS52u)OtY<~!d$;m9+87aO+&`#2ICl@Y>&F{jI=H(K+@3M1$rr=*H^dye#~TyD z!){#Pyfn+|ugUu}G;a~!&&0aqQ59U@UT3|_JuBlYUpT$2+11;}JBJ`{+lQN9T@QFY z5+`t;6(TS0F?OlBTE!@7D`8#URDNqx2t6`GZ{ZgXeS@v%-eJzZOHz18aS|svxII$a zZeFjrJ*$IwX$f-Rzr_G>xbu@euGl)B7pC&S+CmDJBg$BoV~jxSO#>y z33`bupN#LDoW0feZe0%q8un0rYN|eRAnwDHQ6e_)xBTbtoZtTA=Fvk){q}9Os~6mQ zKB80VI_&6iSq`LnK7*kfHZoeX6?WE}8yjuDn=2#JG$+;-TOA1%^=DnXx%w{b=w}tS zQbU3XxtOI8E(!%`64r2`zog;5<0b4i)xBmGP^jiDZ2%HNSxIf3@wKs~uk4%3Mxz;~ zts_S~E4>W+YwI<-*-$U8*^HKDEa8oLbmqGg?3vewnaNg%Mm)W=)lcC_J+1ov^u*N3 zXJ?!BrH-+wGYziJq2Y#vyry6Z>NPgkEk+Ke`^DvNRdb>Q2Nlr#v%O@<5hbflI6EKE z9dWc0-ORk^T}jP!nkJ1imyjdVX@GrjOs%cpgA8-c&FH&$(4od#x6Y&=LiJZPINVyW z0snY$8JW@>tc2}DlrD3StQmA0Twck~@>8dSix9CyQOALcREdxoM$Sw*l!}bXKq9&r zysMWR@%OY24@e`?+#xV2bk{T^C_xSo8v2ZI=lBI*l{RciPwuE>L5@uhz@{!l)rtVlWC>)6(G)1~n=Q|S!{E9~6*fdpa*n z!()-8EpTdj=zr_Lswi;#{TxbtH$8*G=UM`I+icz7sr_SdnHXrv=?iEOF1UL+*6O;% zPw>t^kbW9X@oEXx<97%lBm-9?O_7L!DeD)Me#rwE54t~UBu9VZ zl_I1tBB~>jm@bw0Aljz8! zXBB6ATG6iByKIxs!qr%pz%wgqbg(l{65DP4#v(vqhhL{0b#0C8mq`bnqZ1OwFV z7mlZZJFMACm>h9v^2J9+^_zc1=JjL#qM5ZHaThH&n zXPTsR8(+)cj&>Un{6v*z?@VTLr{TmZ@-fY%*o2G}*G}#!bmqpoo*Ay@U!JI^Q@7gj;Kg-HIrLj4}#ec4~D2~X6vo;ghep-@&yOivYP zC19L0D`jjKy1Yi-SGPAn94(768Tcf$urAf{)1)9W58P`6MA{YG%O?|07!g9(b`8PXG1B1Sh0?HQmeJtP0M$O$hI z{5G`&9XzYhh|y@qsF1GnHN|~^ru~HVf#)lOTSrv=S@DyR$UKQk zjdEPFDz{uHM&UM;=mG!xKvp;xAGHOBo~>_=WFTmh$chpC7c`~7?36h)7$fF~Ii}8q zF|YXxH-Z?d+Q+27Rs3X9S&K3N+)OBxMHn1u(vlrUC6ckBY@@jl+mgr#KQUKo#VeFm zFwNYgv0<%~Wn}KeLeD9e1$S>jhOq&(e*I@L<=I5b(?G(zpqI*WBqf|Zge0&aoDUsC zngMRA_Kt0>La+Erl=Uv_J^p(z=!?XHpenzn$%EA`JIq#yYF?JLDMYiPfM(&Csr#f{ zdd+LJL1by?xz|D8+(fgzRs~(N1k9DSyK@LJygwaYX8dZl0W!I&c^K?7)z{2is;OkE zd$VK-(uH#AUaZrp=1z;O*n=b?QJkxu`Xsw&7yrX0?(CX=I-C#T;yi8a<{E~?vr3W> zQrpPqOW2M+AnZ&p{hqmHZU-;Q(7?- zP8L|Q0RM~sB0w1w53f&Kd*y}ofx@c z5Y6B8qGel+uT1JMot$nT1!Tim6{>oZzJXdyA+4euOLME?5Fd_85Uk%#E*ln%y{u8Q z$|?|R@Hpb~yTVK-Yr_S#%NUy7EBfYGAg>b({J|5b+j-PBpPy$Ns`PaJin4JdRfOaS zE|<HjH%NuJgsd2wOlv>~y=np%=2)$M9LS|>P)zJ+Fei5vYo_N~B0XCn+GM76 z)Xz3tg*FRVFgIl9zpESgdpWAavvVViGlU8|UFY{{gVJskg*I!ZjWyk~OW-Td4(mZ6 zB&SQreAAMqwp}rjy`HsG({l2&q5Y52<@AULVAu~rWI$UbFuZs>Sc*x+XI<+ez%$U)|a^unjpiW0l0 zj1!K0(b6$8LOjzRqQ~K&dfbMIE=TF}XFAi)$+h}5SD3lo z%%Qd>p9se=VtQG{kQ;N`sI)G^u|DN#7{aoEd zkksYP%_X$Rq08);-s6o>CGJ<}v`qs%eYf+J%DQ^2k68C%nvikRsN?$ap--f+vCS`K z#&~)f7!N^;sdUXu54gl3L=LN>FB^tuK=y2e#|hWiWUls__n@L|>xH{%8lIJTd5`w? zSwZbnS;W~DawT4OwSJVdAylbY+u5S+ZH{4hAi2&}Iv~W(UvHg(1GTZRPz`@{SOqzy z(8g&Dz=$PfRV=6FgxN~zo+G8OoPI&d-thcGVR*_^(R8COTM@bq?fDwY{}WhsQS1AK zF6R1t8!RdFmfocpJ6?9Yv~;WYi~XPgs(|>{5})j!AR!voO7y9&cMPo#80A(`za@t>cx<0;qxM@S*m(jYP)dMXr*?q0E`oL;12}VAep179uEr8c<=D zr5?A*C{eJ`z9Ee;E$8)MECqatHkbHH z&Y+ho0B$31MIB-xm&;xyaFCtg<{m~M-QDbY)fQ>Q*Xibb~8ytxZQ?QMf9!%cV zU0_X1@b4d+Pg#R!`OJ~DOrQz3@cpiGy~XSKjZQQ|^4J1puvwKeScrH8o{bscBsowomu z^f12kTvje`yEI3eEXDHJ6L+O{Jv$HVj%IKb|J{IvD*l6IG8WUgDJ*UGz z3!C%>?=dlfSJ>4U88)V+`U-!9r^@AxJBx8R;)J4Fn@`~k>8>v0M9xp90OJElWP&R5 zM#v*vtT}*Gm1^)Bv!s72T3PB0yVIjJW)H7a)ilkAvoaH?)jjb`MP>2z{%Y?}83 zUIwBKn`-MSg)=?R)1Q0z3b>dHE^)D8LFs}6ASG1|daDly_^lOSy&zIIhm*HXm1?VS=_iacG);_I9c zUQH1>i#*?oPIwBMJkzi_*>HoUe}_4o>2(SHWzqQ=;TyhAHS;Enr7!#8;sdlty&(>d zl%5cjri8`2X^Ds`jnw7>A`X|bl=U8n+3LKLy(1dAu8`g@9=5iw$R0qk)w8Vh_Dt^U zIglK}sn^)W7aB(Q>HvrX=rxB z+*L)3DiqpQ_%~|m=44LcD4-bxO3OO*LPjsh%p(k?&jvLp0py57oMH|*IMa(<|{m1(0S|x)?R-mqJ=I;_YUZA>J z62v*eSK;5w!h8J+6Z2~oyGdZ68waWfy09?4fU&m7%u~zi?YPHPgK6LDwphgaYu%0j zurtw)AYOpYKgHBrkX189mlJ`q)w-f|6>IER{5Lk97%P~a-JyCRFjejW@L>n4vt6#hq;!|m;hNE||LK3nw1{bJOy+eBJjK=QqNjI;Q6;Rp5 z&035pZDUZ#%Oa;&_7x0T<7!RW`#YBOj}F380Bq?MjjEhrvlCATPdkCTTl+2efTX$k zH&0zR1n^`C3ef~^sXzJK-)52(T}uTG%OF8yDhT76L~|^+hZ2hiSM*QA9*D5odI1>& z9kV9jC~twA5MwyOx(lsGD_ggYmztXPD`2=_V|ks_FOx!_J8!zM zTzh^cc+=VNZ&(OdN=y4Juw)@8-85lwf_#VMN!Ed(eQiRiLB2^2e`4dp286h@v@`O%_b)Y~A; zv}r6U?zs&@uD_+(_4bwoy7*uozNvp?bXFoB8?l8yG0qsm1JYzIvB_OH4_2G*IIOwT zVl%HX1562vLVcxM_RG*~w_`FbIc!(T=3>r528#%mwwMK}uEhJ()3MEby zQQjzqjWkwfI~;Fuj(Lj=Ug0y`>~C7`w&wzjK(rPw+Hpd~EvQ-ufQOiB4OMpyUKJhw zqEt~jle9d7S~LI~$6Z->J~QJ{Vdn3!c}g9}*KG^Kzr^(7VI5Gk(mHLL{itj_hG?&K4Ws0+T4gLfi3eu$N=`s36geNC?c zm!~}vG6lx9Uf^5M;bWntF<-{p^bruy~f?sk9 zcETAPQZLoJ8JzMMg<-=ju4keY@SY%Wo?u9Gx=j&dfa6LIAB|IrbORLV1-H==Z1zCM zeZcOYpm5>U2fU7V*h;%n`8 zN95QhfD994={1*<2vKLCNF)feKOGk`R#K~G=;rfq}|)s20&MCa65 zUM?xF5!&e0lF%|U!#rD@I{~OsS_?=;s_MQ_b_s=PuWdC)q|UQ&ea)DMRh5>fpQjXe z%9#*x=7{iRCtBKT#H>#v%>77|{4_slZ)XCY{s3j_r{tdpvb#|r|sbS^dU1x70$eJMU!h{Y7Kd{dl}9&vxQl6Jt1a` zHQZrWyY0?!vqf@u-fxU_@+}u(%Wm>0I#KP48tiAPYY!TdW(o|KtVI|EUB9V`CBBNaBLVih7+yMVF|GSoIQD0Jfb{ z!OXq;(>Z?O`1gap(L~bUcp>Lc@Jl-})^=6P%<~~9ywY=$iu8pJ0m*hOPzr~q`23eX zgbs;VOxxENe0UMVeN*>uCn9Gk!4siN-e>x)pIKAbQz!G)TcqIJ0`JBBaX>1-4_XO_-HCS^vr2vjv#7KltDZdyQ{tlWh4$Gm zB>|O1cBDC)yG(sbnc*@w6e%e}r*|IhpXckx&;sQCwGdKH+3oSG-2)Bf#x`@<4ETAr z0My%7RFh6ZLiZ_;X6Mu1YmXx7C$lSZ^}1h;j`EZd6@%JNUe=btBE z%s=Xmo1Ps?8G`}9+6>iaB8bgjUdXT?=trMu|4yLX^m0Dg{m7rpKNJey|EwHI+nN1e zL^>qN%5Fg)dGs4DO~uwIdXImN)QJ*Jhpj7$fq_^`{3fwpztL@WBB}OwQ#Epo-mqMO zsM$UgpFiG&d#)lzEQ{3Q;)&zTw;SzGOah-Dpm{!q7<8*)Ti_;xvV2TYXa}=faXZy? z3y?~GY@kl)>G&EvEijk9y1S`*=zBJSB1iet>0;x1Ai)*`^{pj0JMs)KAM=@UyOGtO z3y0BouW$N&TnwU6!%zS%nIrnANvZF&vB1~P5_d`x-giHuG zPJ;>XkVoghm#kZXRf>qxxEix;2;D1CC~NrbO6NBX!`&_$iXwP~P*c($EVV|669kDO zKoTLZNF4Cskh!Jz5ga9uZ`3o%7Pv`d^;a=cXI|>y;zC3rYPFLQkF*nv(r>SQvD*## z(Vo%^9g`%XwS0t#94zPq;mYGLKu4LU3;txF26?V~A0xZbU4Lmy`)>SoQX^m7fd^*E z+%{R4eN!rIk~K)M&UEzxp9dbY;_I^c} zOc{wlIrN_P(PPqi51k_$>Lt|X6A^|CGYgKAmoI#Li?;Wq%q~q*L7ehZkUrMxW67Jl zhsb~+U?33QS>eqyN{(odAkbopo=Q$Az?L+NZW>j;#~@wCDX?=L5SI|OxI~7!Pli;e zELMFcZtJY3!|=Gr2L4>z8yQ-{To>(f80*#;6`4IAiqUw`=Pg$%C?#1 z_g@hIGerILSU>=P>z{gM|DS91A4cT@PEIB^hSop!uhMo#2G;+tQSpDO_6nOnPWSLU zS;a9m^DFMXR4?*X=}d7l;nXuHk&0|m`NQn%d?8|Ab3A9l9Jh5s120ibWBdB z$5YwsK3;wvp!Kn@)Qae{ef`0#NwlRpQ}k^r>yos_Ne1;xyKLO?4)t_G4eK~wkUS2A&@_;)K0-03XGBzU+5f+uMDxC z(s8!8!RvdC#@`~fx$r)TKdLD6fWEVdEYtV#{ncT-ZMX~eI#UeQ-+H(Z43vVn%Yj9X zLdu9>o%wnWdvzA-#d6Z~vzj-}V3FQ5;axDIZ;i(95IIU=GQ4WuU{tl-{gk!5{l4_d zvvb&uE{%!iFwpymz{wh?bKr1*qzeZb5f6e6m_ozRF&zux2mlK=v_(_s^R6b5lu?_W4W3#<$zeG~Pd)^!4tzhs}-Sx$FJP>)ZGF(hVTH|C3(U zs0PO&*h_ zNA-&qZpTP$$LtIgfiCn07}XDbK#HIXdmv8zdz4TY;ifNIH-0jy(gMSByG2EF~Th#eb_TueZC` zE?3I>UTMpKQ})=C;6p!?G)M6w^u*A57bD?2X`m3X^6;&4%i_m(uGJ3Z5h`nwxM<)H z$I5m?wN>O~8`BGnZ=y^p6;0+%_0K}Dcg|K;+fEi|qoBqvHj(M&aHGqNF48~XqhtU? z^ogwBzRlOfpAJ+Rw7IED8lRbTdBdyEK$gPUpUG}j-M42xDj_&qEAQEtbs>D#dRd7Y z<&TpSZ(quQDHiCFn&0xsrz~4`4tz!CdL8m~HxZM_agu@IrBpyeL1Ft}V$HX_ZqDPm z-f89)pjuEzGdq-PRu`b1m+qBGY{zr_>{6Ss>F|xHZlJj9dt5HD$u`1*WZe)qEIuDSR)%z+|n zatVlhQ?$w#XRS7xUrFE;Y8vMGhQS5*T{ZnY=q1P?w5g$OKJ#M&e??tAmPWHMj3xhS ziGxapy?kn@$~2%ZY;M8Bc@%$pkl%Rvj!?o%agBvpQ-Q61n9kznC4ttrRNQ4%GFR5u zyv%Yo9~yxQJWJSfj z?#HY$y=O~F|2pZs22pu|_&Ajd+D(Mt!nPUG{|1nlvP`=R#kKH zO*s$r_%ss5h1YO7k0bHJ2CXN)Yd6CHn~W!R=SqkWe=&nAZu(Q1G!xgcUilM@YVei@2@a`8he z9@pM`)VB*=e7-MWgLlXlc)t;fF&-AwM{E-EX}pViFn0I0CNw2bNEnN2dj!^4(^zS3 zobUm1uQnpqk_4q{pl*n06=TfK_C>UgurKFjRXsK_LEn};=79`TB12tv6KzwSu*-C8 z;=~ohDLZylHQ|Mpx-?yql>|e=vI1Z!epyUpAcDCp4T|*RV&X`Q$0ogNwy6mFALo^@ z9=&(9txO8V@E!@6^(W0{*~CT>+-MA~vnJULBxCTUW>X5>r7*eXYUT0B6+w@lzw%n> z_VjJ<2qf|(d6jYq2(x$(ZDf!yVkfnbvNmb5c|hhZ^2TV_LBz`9w!e_V*W_(MiA7|= z&EeIIkw*+$Xd!)j8<@_<}A5;~A_>3JT*kX^@}cDoLd>Qj<`Se^wdUa(j0dp+Tl8EptwBm{9OGsdFEq zM`!pjf(Lm(`$e3FLOjqA5LnN5o!}z{ zNf}rJuZh@yUtq&ErjHeGzX4(!luV!jB&;FAP|!R_QHYw#^Z1LwTePAKJ6X&IDNO#; z)#I@Xnnzyij~C@UH~X51JCgQeF0&hTXnuoElz#m{heZRexWc0k4<>0+ClX7%0 zEBqCCld1tD9Zwkr4{?Nor19#E5-YKfB8d?qgR82-Ow2^AuNevly2*tHA|sK!ybYkX zm-sLQH72P&{vEAW6+z~O5d0qd=xW~rua~5a?ymYFSD@8&gV)E5@RNNBAj^C99+Z5Z zR@Pq55mbCQbz+Mn$d_CMW<-+?TU960agEk1J<>d>0K=pF19yN))a~4>m^G&tc*xR+yMD*S=yip-q=H zIlredHpsJV8H(32@Zxc@bX6a21dUV95Th--8pE6C&3F>pk=yv$yd6@Haw;$v4+Fcb zRwn{Qo@0`7aPa2LQOP}j9v>sjOo5Kqvn|`FLizX zB+@-u4Lw|jsvz{p^>n8Vo8H2peIqJJnMN}A)q6%$Tmig7eu^}K2 zrh$X?T|ZMsoh{6pdw1G$_T<`Ds-G=jc;qcGdK4{?dN2-XxjDNbb(7pk|3JUVCU4y; z)?LXR>f+AAu)JEiti_Zy#z5{RgsC}R(@jl%9YZ>zu~hKQ*AxbvhC378-I@{~#%Y`Z zy=a=9YpewPIC+gkEUUwtUL7|RU7=!^Aa}Mk^6uxOgRGA#JXjWLsjFUnix|Mau{hDT z7mn*z1m5g`vP(#tjT0Zy4eAY(br&!RiiXE=ZI!{sE1#^#%x^Z7t1U)b<;%Y}Q9=5v z;wpDCEZ@OE36TWT=|gxigT@VaW9BvHS05;_P(#s z8zI4XFQys}q)<`tkX$WnSarn{3e!s}4(J!=Yf>+Y>cP3f;vr63f2{|S^`_pWc)^5_!R z*(x-fuBxL51@xe!lnDBKi}Br$c$BMZ3%f2Sa6kLabiBS{pq*yj;q|k(86x`PiC{p6 z_bxCW{>Q2BA8~Ggz&0jkrcU+-$ANBsOop*ms>34K9lNYil@}jC;?cYP(m^P}nR6FV zk(M%48Z&%2Rx$A&FhOEirEhY0(dn;-k(qkTU)sFQ`+-ih+s@A8g?r8Pw+}2;35WYf zi}VO`jS`p(tc)$X$a>-#WXoW!phhatC*$}|rk>|wUU71eUJG^$c6_jwX?iSHM@6__ zvV|6%U*$sSXJu9SX?2%M^kK|}a2QJ8AhF{fuXrHZxXsI~O zGKX45!K7p*MCPEQ=gp?eu&#AW*pR{lhQR##P_*{c_DjMGL|3T3-bSJ(o$|M{ytU}> zAV>wq*uE*qFo9KvnA^@juy{x<-u*#2NvkV={Ly}ysKYB-k`K3@K#^S1Bb$8Y#0L0# z`6IkSG&|Z$ODy|VLS+y5pFJx&8tvPmMd8c9FhCyiU8~k6FwkakUd^(_ml8`rnl>JS zZV){9G*)xBqPz^LDqRwyS6w86#D^~xP4($150M)SOZRe9sn=>V#aG0Iy(_^YcPpIz8QYM-#s+n% z@Jd?xQq?Xk6=<3xSY7XYP$$yd&Spu{A#uafiIfy8gRC`o0nk{ezEDjb=q_qRAlR1d zFq^*9Gn)yTG4b}R{!+3hWQ+u3GT~8nwl2S1lpw`s0X_qpxv)g+JIkVKl${sYf_nV~B>Em>M;RlqGb5WVil(89 zs=ld@|#;dq1*vQGz=7--Br-|l) zZ%Xh@v8>B7P?~}?Cg$q9_={59l%m~O&*a6TKsCMAzG&vD>k2WDzJ6!tc!V)+oxF;h zJH;apM=wO?r_+*#;ulohuP=E>^zon}a$NnlcQ{1$SO*i=jnGVcQa^>QOILc)e6;eNTI>os=eaJ{*^DE+~jc zS}TYeOykDmJ=6O%>m`i*>&pO_S;qMySJIyP=}4E&J%#1zju$RpVAkZbEl+p%?ZP^C z*$$2b4t%a(e+%>a>d_f_<JjxI#J1x;=hPd1zFPx=6T$;;X1TD*2(edZ3f46zaAoW>L53vS_J*N8TMB|n+;LD| zC=GkQPpyDY#Am4l49chDv*gojhRj_?63&&8#doW`INATAo(qY#{q}%nf@eTIXmtU< zdB<7YWfyCmBs|c)cK>1)v&M#!yNj#4d$~pVfDWQc_ke1?fw{T1Nce_b`v|Vp5ig(H zJvRD^+ps46^hLX;=e2!2e;w9y1D@!D$c@Jc&%%%IL=+xzw55&2?darw=9g~>P z9>?Kdc$r?6c$m%x2S$sdpPl>GQZ{rC9mPS63*qjCVa?OIBj!fW zm|g?>CVfGXNjOfcyqImXR_(tXS(F{FcoNzKvG5R$IgGaxC@)i(e+$ME}vPVIhd|mx2IIE+f zM?9opQHIVgBWu)^A|RzXw!^??S!x)SZOwZaJkGjc<_}2l^eSBm!eAJG9T>EC6I_sy z?bxzDIAn&K5*mX)$RQzDA?s)-no-XF(g*yl4%+GBf`##bDXJ==AQk*xmnatI;SsLp zP9XTHq5mmS=iWu~9ES>b%Q=1aMa|ya^vj$@qz9S!ih{T8_PD%Sf_QrNKwgrXw9ldm zHRVR98*{C?_XNpJn{abA!oix_mowRMu^2lV-LPi;0+?-F(>^5#OHX-fPED zCu^l7u3E%STI}c4{J2!)9SUlGP_@!d?5W^QJXOI-Ea`hFMKjR7TluLvzC-ozCPn1`Tpy z!vlv@_Z58ILX6>nDjTp-1LlFMx~-%GA`aJvG$?8*Ihn;mH37eK**rmOEwqegf-Ccx zrIX4;{c~RK>XuTXxYo5kMiWMy)!IC{*DHG@E$hx?RwP@+wuad(P1{@%tRkyJRqD)3 zMHHHZ4boqDn>-=DgR5VlhQTpfVy182Gk;A_S8A1-;U1RR>+$62>(MUx@Nox$vTjHq z%QR=j!6Gdyb5wu7y(YUktwMuW5<@jl?m4cv4BODiT5o8qVdC0MBqGr@-YBIwnpZAY znX9(_uQjP}JJ=!~Ve9#5I~rUnN|P_3D$LqZcvBnywYhjlMSFHm`;u9GPla{5QD7(7*6Tb3Svr8;(nuAd81q$*uq6HC_&~je*Ca7hP4sJp0av{M8480wF zxASi7Qv+~@2U%Nu1Ud;s-G4CTVWIPyx!sg&8ZG0Wq zG_}i3C(6_1>q3w!EH7$Kwq8uBp2F2N7}l65mk1p*9v0&+;th=_E-W)E;w}P(j⁢ zv5o9#E7!G0XmdzfsS{efPNi`1b44~SZ4Z8fuX!I}#8g+(wxzQwUT#Xb2(tbY1+EUhGKoT@KEU9Ktl>_0 z%bjDJg;#*gtJZv!-Zs`?^}v5eKmnbjqlvnSzE@_SP|LG_PJ6CYU+6zY6>92%E+ z=j@TZf-iW4(%U{lnYxQA;7Q!b;^brF8n0D>)`q5>|WDDXLrqYU_tKN2>=#@~OE7grMnNh?UOz-O~6 z6%rHy{#h9K0AT+lDC7q4{hw^|q6*Ry;;L%Q@)Ga}$60_q%D)rv(CtS$CQbpq9|y1e zRSrN4;$Jyl{m5bZw`$8TGvb}(LpY{-cQ)fcyJv7l3S52TLXVDsphtv&aPuDk1OzCA z4A^QtC(!11`IsNx_HnSy?>EKpHJWT^wmS~hc^p^zIIh@9f6U@I2 zC=Mve{j2^)mS#U$e{@Q?SO6%LDsXz@SY+=cK_QMmXBIU)j!$ajc-zLx3V60EXJ!qC zi<%2x8Q24YN+&8U@CIlN zrZkcT9yh%LrlGS9`G)KdP(@9Eo-AQz@8GEFWcb7U=a0H^ZVbLmz{+&M7W(nXJ4sN8 zJLR7eeK(K8`2-}j(T7JsO`L!+CvbueT%izanm-^A1Dn{`1Nw`9P?cq;7no+XfC`K(GO9?O^5zNIt4M+M8LM0=7Gz8UA@Z0N+lg+cX)NfazRu z5D)~HA^(u%w^cz+@2@_#S|u>GpB+j4KzQ^&Wcl9f z&hG#bCA(Yk0D&t&aJE^xME^&E-&xGHhXn%}psEIj641H+Nl-}boj;)Zt*t(4wZ5DN z@GXF$bL=&pBq-#vkTkh>7hl%K5|3 z{`Vn9b$iR-SoGENp}bn4;fR3>9sA%X2@1L3aE9yTra;Wb#_`xWwLSLdfu+PAu+o3| zGVnpzPr=ch{uuoHjtw7+_!L_2;knQ!DuDl0R`|%jr+}jFzXtrHIKc323?JO{l&;VF z*L1+}JU7%QJOg|5|Tc|D8fN zJORAg=_vsy{ak|o);@)Yh8Lkcg@$FG3k@ep36BRa^>~UmnRPziS>Z=`Jb2x*Q#`%A zU*i3&Vg?TluO@X0O;r2Jl6LKLUOVhSqg1*qOt^|8*c7 zo(298@+r$k_wQNGHv{|$tW(T8L+4_`FQ{kEW5Jgg{yf7ey4ss_(SNKfz(N9lx&a;< je(UuV8hP?p&}TPdm1I$XmG#(RzlD&B2izSj9sl%y5~4qc diff --git a/s3stream/gradle/wrapper/gradle-wrapper.properties b/s3stream/gradle/wrapper/gradle-wrapper.properties deleted file mode 100644 index 22ec789c6..000000000 --- a/s3stream/gradle/wrapper/gradle-wrapper.properties +++ /dev/null @@ -1,18 +0,0 @@ -# -# Copyright 2024, AutoMQ CO.,LTD. -# -# Use of this software is governed by the Business Source License -# included in the file BSL.md -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0 -# - -distributionBase=GRADLE_USER_HOME -distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.1.1-bin.zip -networkTimeout=10000 -validateDistributionUrl=true -zipStoreBase=GRADLE_USER_HOME -zipStorePath=wrapper/dists diff --git a/s3stream/gradlew b/s3stream/gradlew deleted file mode 100755 index 0adc8e1a5..000000000 --- a/s3stream/gradlew +++ /dev/null @@ -1,249 +0,0 @@ -#!/bin/sh - -# -# Copyright © 2015-2021 the original authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -############################################################################## -# -# Gradle start up script for POSIX generated by Gradle. -# -# Important for running: -# -# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is -# noncompliant, but you have some other compliant shell such as ksh or -# bash, then to run this script, type that shell name before the whole -# command line, like: -# -# ksh Gradle -# -# Busybox and similar reduced shells will NOT work, because this script -# requires all of these POSIX shell features: -# * functions; -# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», -# «${var#prefix}», «${var%suffix}», and «$( cmd )»; -# * compound commands having a testable exit status, especially «case»; -# * various built-in commands including «command», «set», and «ulimit». -# -# Important for patching: -# -# (2) This script targets any POSIX shell, so it avoids extensions provided -# by Bash, Ksh, etc; in particular arrays are avoided. -# -# The "traditional" practice of packing multiple parameters into a -# space-separated string is a well documented source of bugs and security -# problems, so this is (mostly) avoided, by progressively accumulating -# options in "$@", and eventually passing that to Java. -# -# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, -# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; -# see the in-line comments for details. -# -# There are tweaks for specific operating systems such as AIX, CygWin, -# Darwin, MinGW, and NonStop. -# -# (3) This script is generated from the Groovy template -# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt -# within the Gradle project. -# -# You can find Gradle at https://github.com/gradle/gradle/. -# -############################################################################## - -# Attempt to set APP_HOME - -# Resolve links: $0 may be a link -app_path=$0 - -# Need this for daisy-chained symlinks. -while - APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path - [ -h "$app_path" ] -do - ls=$( ls -ld "$app_path" ) - link=${ls#*' -> '} - case $link in #( - /*) app_path=$link ;; #( - *) app_path=$APP_HOME$link ;; - esac -done - -# This is normally unused -# shellcheck disable=SC2034 -APP_BASE_NAME=${0##*/} -# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) -APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit - -# Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD=maximum - -warn () { - echo "$*" -} >&2 - -die () { - echo - echo "$*" - echo - exit 1 -} >&2 - -# OS specific support (must be 'true' or 'false'). -cygwin=false -msys=false -darwin=false -nonstop=false -case "$( uname )" in #( - CYGWIN* ) cygwin=true ;; #( - Darwin* ) darwin=true ;; #( - MSYS* | MINGW* ) msys=true ;; #( - NONSTOP* ) nonstop=true ;; -esac - -CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar - - -# Determine the Java command to use to start the JVM. -if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD=$JAVA_HOME/jre/sh/java - else - JAVACMD=$JAVA_HOME/bin/java - fi - if [ ! -x "$JAVACMD" ] ; then - die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." - fi -else - JAVACMD=java - if ! command -v java >/dev/null 2>&1 - then - die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." - fi -fi - -# Increase the maximum file descriptors if we can. -if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then - case $MAX_FD in #( - max*) - # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. - # shellcheck disable=SC3045 - MAX_FD=$( ulimit -H -n ) || - warn "Could not query maximum file descriptor limit" - esac - case $MAX_FD in #( - '' | soft) :;; #( - *) - # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. - # shellcheck disable=SC3045 - ulimit -n "$MAX_FD" || - warn "Could not set maximum file descriptor limit to $MAX_FD" - esac -fi - -# Collect all arguments for the java command, stacking in reverse order: -# * args from the command line -# * the main class name -# * -classpath -# * -D...appname settings -# * --module-path (only if needed) -# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. - -# For Cygwin or MSYS, switch paths to Windows format before running java -if "$cygwin" || "$msys" ; then - APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) - CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) - - JAVACMD=$( cygpath --unix "$JAVACMD" ) - - # Now convert the arguments - kludge to limit ourselves to /bin/sh - for arg do - if - case $arg in #( - -*) false ;; # don't mess with options #( - /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath - [ -e "$t" ] ;; #( - *) false ;; - esac - then - arg=$( cygpath --path --ignore --mixed "$arg" ) - fi - # Roll the args list around exactly as many times as the number of - # args, so each arg winds up back in the position where it started, but - # possibly modified. - # - # NB: a `for` loop captures its iteration list before it begins, so - # changing the positional parameters here affects neither the number of - # iterations, nor the values presented in `arg`. - shift # remove old arg - set -- "$@" "$arg" # push replacement arg - done -fi - - -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' - -# Collect all arguments for the java command; -# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of -# shell script including quotes and variable substitutions, so put them in -# double quotes to make sure that they get re-expanded; and -# * put everything else in single quotes, so that it's not re-expanded. - -set -- \ - "-Dorg.gradle.appname=$APP_BASE_NAME" \ - -classpath "$CLASSPATH" \ - org.gradle.wrapper.GradleWrapperMain \ - "$@" - -# Stop when "xargs" is not available. -if ! command -v xargs >/dev/null 2>&1 -then - die "xargs is not available" -fi - -# Use "xargs" to parse quoted args. -# -# With -n1 it outputs one arg per line, with the quotes and backslashes removed. -# -# In Bash we could simply go: -# -# readarray ARGS < <( xargs -n1 <<<"$var" ) && -# set -- "${ARGS[@]}" "$@" -# -# but POSIX shell has neither arrays nor command substitution, so instead we -# post-process each arg (as a line of input to sed) to backslash-escape any -# character that might be a shell metacharacter, then use eval to reverse -# that process (while maintaining the separation between arguments), and wrap -# the whole thing up as a single "set" statement. -# -# This will of course break if any of these variables contains a newline or -# an unmatched quote. -# - -eval "set -- $( - printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | - xargs -n1 | - sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | - tr '\n' ' ' - )" '"$@"' - -exec "$JAVACMD" "$@" diff --git a/s3stream/gradlew.bat b/s3stream/gradlew.bat deleted file mode 100644 index 6689b85be..000000000 --- a/s3stream/gradlew.bat +++ /dev/null @@ -1,92 +0,0 @@ -@rem -@rem Copyright 2015 the original author or authors. -@rem -@rem Licensed under the Apache License, Version 2.0 (the "License"); -@rem you may not use this file except in compliance with the License. -@rem You may obtain a copy of the License at -@rem -@rem https://www.apache.org/licenses/LICENSE-2.0 -@rem -@rem Unless required by applicable law or agreed to in writing, software -@rem distributed under the License is distributed on an "AS IS" BASIS, -@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -@rem See the License for the specific language governing permissions and -@rem limitations under the License. -@rem - -@if "%DEBUG%"=="" @echo off -@rem ########################################################################## -@rem -@rem Gradle startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%"=="" set DIRNAME=. -@rem This is normally unused -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME% - -@rem Resolve any "." and ".." in APP_HOME to make it shorter. -for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi - -@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if %ERRORLEVEL% equ 0 goto execute - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto execute - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar - - -@rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* - -:end -@rem End local scope for the variables with windows NT shell -if %ERRORLEVEL% equ 0 goto mainEnd - -:fail -rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -set EXIT_CODE=%ERRORLEVEL% -if %EXIT_CODE% equ 0 set EXIT_CODE=1 -if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% -exit /b %EXIT_CODE% - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega diff --git a/s3stream/pom.xml b/s3stream/pom.xml deleted file mode 100644 index 27ed9c9e2..000000000 --- a/s3stream/pom.xml +++ /dev/null @@ -1,302 +0,0 @@ - - - - - 4.0.0 - com.automq.elasticstream - s3stream - 1.0.0-SNAPSHOT - - 5.5.0 - 5.10.0 - 3.24.2 - 2.0.9 - 2.20.127 - - 3.2.0 - 11 - 11 - UTF-8 - 1.32.0 - 1.9.20.1 - - - - software.amazon.awssdk - s3 - ${s3.version} - - - - - io.netty - netty-tcnative-boringssl-static - 2.0.53.Final - - - io.netty - netty-buffer - 4.1.100.Final - - - com.bucket4j - bucket4j-core - 8.5.0 - - - org.apache.commons - commons-lang3 - 3.13.0 - - - org.slf4j - slf4j-api - ${slf4j.version} - - - org.slf4j - slf4j-simple - ${slf4j.version} - test - - - org.junit.jupiter - junit-jupiter - ${junit-jupiter.version} - test - - - org.mockito - mockito-core - ${mockito-core.version} - test - - - org.mockito - mockito-junit-jupiter - ${mockito-core.version} - test - - - net.sourceforge.argparse4j - argparse4j - 0.9.0 - - - net.java.dev.jna - jna - 5.2.0 - - - com.google.guava - guava - 32.0.1-jre - - - com.fasterxml.jackson.core - jackson-databind - 2.16.0 - - - io.opentelemetry - opentelemetry-api - ${opentelemetry.version} - - - io.opentelemetry.instrumentation - opentelemetry-instrumentation-annotations - ${opentelemetry.version} - - - org.aspectj - aspectjrt - ${aspectj.version} - - - org.aspectj - aspectjweaver - ${aspectj.version} - - - com.github.jnr - jnr-posix - 3.1.19 - - - com.yammer.metrics - metrics-core - 2.2.0 - - - - - - - maven-checkstyle-plugin - ${maven-checkstyle-plugin.version} - - - validate - validate - - ../style/rmq_checkstyle.xml - UTF-8 - true - true - true - **/generated*/**/* - **/thirdparty/**/* - - - check - - - - - - org.apache.maven.plugins - maven-enforcer-plugin - 1.4.1 - - - enforce - - enforce - - - - - - - - - - - - org.jacoco - jacoco-maven-plugin - 0.8.10 - - - "**/config/**" - "**/model/**" - "**/generated/**" - "**/metrics/**" - **/*Exception* - - - - - - prepare-agent - - - - report - test - - report - - - - - - org.apache.maven.plugins - maven-surefire-plugin - 3.1.2 - - 1 - 1 - true - - --add-opens=java.base/java.nio=ALL-UNNAMED - - - true - - - - - maven-assembly-plugin - 3.6.0 - - - jar-with-dependencies - - - - - make-assembly - package - - single - - - - - - org.apache.maven.plugins - maven-source-plugin - 3.3.0 - - - attach-sources - verify - - jar-no-fork - - - - - - dev.aspectj - aspectj-maven-plugin - 1.13.1 - - - org.aspectj - aspectjtools - ${aspectj.version} - - - - ${maven.compiler.target} - ${maven.compiler.source} - ${maven.compiler.target} - true - true - ignore - UTF-8 - - - - - compile - - - - - - - - - oss.sonatype.org - https://s01.oss.sonatype.org/content/repositories/snapshots - - - oss.sonatype.org - https://s01.oss.sonatype.org/service/local/staging/deploy/maven2/ - - - diff --git a/s3stream/settings.gradle b/s3stream/settings.gradle deleted file mode 100644 index 51b17df85..000000000 --- a/s3stream/settings.gradle +++ /dev/null @@ -1,5 +0,0 @@ -/* - * This file was generated by the Gradle 'init' task. - */ - -rootProject.name = 's3stream' diff --git a/s3stream/src/main/java/com/automq/stream/ByteBufSeqAlloc.java b/s3stream/src/main/java/com/automq/stream/ByteBufSeqAlloc.java deleted file mode 100644 index c8e32d7d0..000000000 --- a/s3stream/src/main/java/com/automq/stream/ByteBufSeqAlloc.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream; - -import com.automq.stream.s3.ByteBufAlloc; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import java.util.concurrent.atomic.AtomicReference; - -public class ByteBufSeqAlloc { - public static final int HUGE_BUF_SIZE = 8 * 1024 * 1024; - // why not use ThreadLocal? the partition open has too much threads - final AtomicReference[] hugeBufArray; - private final int allocType; - - public ByteBufSeqAlloc(int allocType, int concurrency) { - this.allocType = allocType; - hugeBufArray = new AtomicReference[concurrency]; - for (int i = 0; i < hugeBufArray.length; i++) { - hugeBufArray[i] = new AtomicReference<>(new HugeBuf(ByteBufAlloc.byteBuffer(HUGE_BUF_SIZE, allocType))); - } - } - - public ByteBuf byteBuffer(int capacity) { - if (capacity >= HUGE_BUF_SIZE) { - // if the request capacity is larger than HUGE_BUF_SIZE, just allocate a new ByteBuf - return ByteBufAlloc.byteBuffer(capacity, allocType); - } - int bufIndex = Math.abs(Thread.currentThread().hashCode() % hugeBufArray.length); - - AtomicReference bufRef = hugeBufArray[bufIndex]; - //noinspection SynchronizationOnLocalVariableOrMethodParameter - synchronized (bufRef) { - HugeBuf hugeBuf = bufRef.get(); - - if (hugeBuf.nextIndex + capacity <= hugeBuf.buf.capacity()) { - // if the request capacity can be satisfied by the current hugeBuf, return a slice of it - int nextIndex = hugeBuf.nextIndex; - hugeBuf.nextIndex += capacity; - ByteBuf slice = hugeBuf.buf.retainedSlice(nextIndex, capacity); - return slice.writerIndex(slice.readerIndex()); - } - - // if the request capacity cannot be satisfied by the current hugeBuf - // 1. slice the remaining of the current hugeBuf and release the hugeBuf - // 2. create a new hugeBuf and slice the remaining of the required capacity - // 3. return the composite ByteBuf of the two slices - CompositeByteBuf cbf = ByteBufAlloc.compositeByteBuffer(); - int readLength = hugeBuf.buf.capacity() - hugeBuf.nextIndex; - cbf.addComponent(false, hugeBuf.buf.retainedSlice(hugeBuf.nextIndex, readLength)); - capacity -= readLength; - hugeBuf.buf.release(); - - HugeBuf newHugeBuf = new HugeBuf(ByteBufAlloc.byteBuffer(HUGE_BUF_SIZE, allocType)); - bufRef.set(newHugeBuf); - - cbf.addComponent(false, newHugeBuf.buf.retainedSlice(0, capacity)); - newHugeBuf.nextIndex = capacity; - - return cbf; - } - } - - static class HugeBuf { - final ByteBuf buf; - int nextIndex; - - HugeBuf(ByteBuf buf) { - this.buf = buf; - this.nextIndex = 0; - } - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/DefaultAppendResult.java b/s3stream/src/main/java/com/automq/stream/DefaultAppendResult.java deleted file mode 100644 index 0cf07a9c9..000000000 --- a/s3stream/src/main/java/com/automq/stream/DefaultAppendResult.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream; - -import com.automq.stream.api.AppendResult; - -public class DefaultAppendResult implements AppendResult { - private final long baseOffset; - - public DefaultAppendResult(long baseOffset) { - this.baseOffset = baseOffset; - } - - @Override - public long baseOffset() { - return baseOffset; - } - - public String toString() { - return "AppendResult(baseOffset=" + baseOffset + ")"; - } -} - diff --git a/s3stream/src/main/java/com/automq/stream/DefaultRecordBatch.java b/s3stream/src/main/java/com/automq/stream/DefaultRecordBatch.java deleted file mode 100644 index d9f2179a7..000000000 --- a/s3stream/src/main/java/com/automq/stream/DefaultRecordBatch.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream; - -import com.automq.stream.api.RecordBatch; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.Map; - -public class DefaultRecordBatch implements RecordBatch { - private final int count; - private final long baseTimestamp; - private final Map properties; - private final ByteBuffer rawPayload; - - public DefaultRecordBatch(int count, long baseTimestamp, Map properties, ByteBuffer rawPayload) { - this.count = count; - this.baseTimestamp = baseTimestamp; - this.properties = properties; - this.rawPayload = rawPayload; - } - - @Override - public int count() { - return count; - } - - @Override - public long baseTimestamp() { - return baseTimestamp; - } - - @Override - public Map properties() { - if (properties == null) { - return Collections.emptyMap(); - } - return properties; - } - - @Override - public ByteBuffer rawPayload() { - return rawPayload.duplicate(); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/RecordBatchWithContextWrapper.java b/s3stream/src/main/java/com/automq/stream/RecordBatchWithContextWrapper.java deleted file mode 100644 index 7b37db2d9..000000000 --- a/s3stream/src/main/java/com/automq/stream/RecordBatchWithContextWrapper.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream; - -import com.automq.stream.api.RecordBatch; -import com.automq.stream.api.RecordBatchWithContext; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.Map; - -public class RecordBatchWithContextWrapper implements RecordBatchWithContext { - private final RecordBatch recordBatch; - private final long baseOffset; - - public RecordBatchWithContextWrapper(RecordBatch recordBatch, long baseOffset) { - this.recordBatch = recordBatch; - this.baseOffset = baseOffset; - } - - public static RecordBatchWithContextWrapper decode(ByteBuffer buffer) { - long baseOffset = buffer.getLong(); - int count = buffer.getInt(); - return new RecordBatchWithContextWrapper(new DefaultRecordBatch(count, 0, Collections.emptyMap(), buffer), baseOffset); - } - - @Override - public long baseOffset() { - return baseOffset; - } - - @Override - public long lastOffset() { - return baseOffset + recordBatch.count(); - } - - @Override - public int count() { - return recordBatch.count(); - } - - @Override - public long baseTimestamp() { - return recordBatch.baseTimestamp(); - } - - @Override - public Map properties() { - return recordBatch.properties(); - } - - @Override - public ByteBuffer rawPayload() { - return recordBatch.rawPayload().duplicate(); - } - - public byte[] encode() { - ByteBuffer buffer = ByteBuffer.allocate(8 + 4 + recordBatch.rawPayload().remaining()) - .putLong(baseOffset) - .putInt(recordBatch.count()) - .put(recordBatch.rawPayload().duplicate()) - .flip(); - return buffer.array(); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/WrappedByteBuf.java b/s3stream/src/main/java/com/automq/stream/WrappedByteBuf.java deleted file mode 100644 index cb32e041a..000000000 --- a/s3stream/src/main/java/com/automq/stream/WrappedByteBuf.java +++ /dev/null @@ -1,1062 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ - -package com.automq.stream; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.ByteBufUtil; -import io.netty.util.ByteProcessor; -import io.netty.util.internal.ObjectUtil; -import io.netty.util.internal.StringUtil; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import java.nio.channels.FileChannel; -import java.nio.channels.GatheringByteChannel; -import java.nio.channels.ScatteringByteChannel; -import java.nio.charset.Charset; - -/** - * Modify base on io.netty.buffer.WrappedByteBuf - * Wraps another {@link ByteBuf}. - *

- * It's important that the {@link #readerIndex()} and {@link #writerIndex()} will not do any adjustments on the - * indices on the fly because of internal optimizations made by {@link ByteBufUtil#writeAscii(ByteBuf, CharSequence)} - * and {@link ByteBufUtil#writeUtf8(ByteBuf, CharSequence)}. - */ -public class WrappedByteBuf extends ByteBuf { - private final ByteBuf root; - protected final ByteBuf buf; - private final Runnable releaseHook; - - public WrappedByteBuf(ByteBuf buf, Runnable releaseHook) { - this(buf, buf, releaseHook); - } - - public WrappedByteBuf(ByteBuf root, ByteBuf buf, Runnable releaseHook) { - this.root = root; - this.buf = ObjectUtil.checkNotNull(buf, "buf"); - this.releaseHook = ObjectUtil.checkNotNull(releaseHook, "releaseHook"); - } - - @Override - public final boolean hasMemoryAddress() { - return buf.hasMemoryAddress(); - } - - @Override - public boolean isContiguous() { - return buf.isContiguous(); - } - - @Override - public final long memoryAddress() { - return buf.memoryAddress(); - } - - @Override - public final int capacity() { - return buf.capacity(); - } - - @Override - public ByteBuf capacity(int newCapacity) { - buf.capacity(newCapacity); - return this; - } - - @Override - public final int maxCapacity() { - return buf.maxCapacity(); - } - - @Override - public final ByteBufAllocator alloc() { - return buf.alloc(); - } - - @Override - public final ByteOrder order() { - return buf.order(); - } - - @Override - public ByteBuf order(ByteOrder endianness) { - return new WrappedByteBuf(root, buf.order(endianness), releaseHook); - } - - @Override - public final ByteBuf unwrap() { - return buf; - } - - @Override - public ByteBuf asReadOnly() { - return buf.asReadOnly(); - } - - @Override - public boolean isReadOnly() { - return buf.isReadOnly(); - } - - @Override - public final boolean isDirect() { - return buf.isDirect(); - } - - @Override - public final int readerIndex() { - return buf.readerIndex(); - } - - @Override - public final ByteBuf readerIndex(int readerIndex) { - buf.readerIndex(readerIndex); - return this; - } - - @Override - public final int writerIndex() { - return buf.writerIndex(); - } - - @Override - public final ByteBuf writerIndex(int writerIndex) { - buf.writerIndex(writerIndex); - return this; - } - - @Override - public ByteBuf setIndex(int readerIndex, int writerIndex) { - buf.setIndex(readerIndex, writerIndex); - return this; - } - - @Override - public final int readableBytes() { - return buf.readableBytes(); - } - - @Override - public final int writableBytes() { - return buf.writableBytes(); - } - - @Override - public final int maxWritableBytes() { - return buf.maxWritableBytes(); - } - - @Override - public int maxFastWritableBytes() { - return buf.maxFastWritableBytes(); - } - - @Override - public final boolean isReadable() { - return buf.isReadable(); - } - - @Override - public final boolean isWritable() { - return buf.isWritable(); - } - - @Override - public final ByteBuf clear() { - buf.clear(); - return this; - } - - @Override - public final ByteBuf markReaderIndex() { - buf.markReaderIndex(); - return this; - } - - @Override - public final ByteBuf resetReaderIndex() { - buf.resetReaderIndex(); - return this; - } - - @Override - public final ByteBuf markWriterIndex() { - buf.markWriterIndex(); - return this; - } - - @Override - public final ByteBuf resetWriterIndex() { - buf.resetWriterIndex(); - return this; - } - - @Override - public ByteBuf discardReadBytes() { - buf.discardReadBytes(); - return this; - } - - @Override - public ByteBuf discardSomeReadBytes() { - buf.discardSomeReadBytes(); - return this; - } - - @Override - public ByteBuf ensureWritable(int minWritableBytes) { - buf.ensureWritable(minWritableBytes); - return this; - } - - @Override - public int ensureWritable(int minWritableBytes, boolean force) { - return buf.ensureWritable(minWritableBytes, force); - } - - @Override - public boolean getBoolean(int index) { - return buf.getBoolean(index); - } - - @Override - public byte getByte(int index) { - return buf.getByte(index); - } - - @Override - public short getUnsignedByte(int index) { - return buf.getUnsignedByte(index); - } - - @Override - public short getShort(int index) { - return buf.getShort(index); - } - - @Override - public short getShortLE(int index) { - return buf.getShortLE(index); - } - - @Override - public int getUnsignedShort(int index) { - return buf.getUnsignedShort(index); - } - - @Override - public int getUnsignedShortLE(int index) { - return buf.getUnsignedShortLE(index); - } - - @Override - public int getMedium(int index) { - return buf.getMedium(index); - } - - @Override - public int getMediumLE(int index) { - return buf.getMediumLE(index); - } - - @Override - public int getUnsignedMedium(int index) { - return buf.getUnsignedMedium(index); - } - - @Override - public int getUnsignedMediumLE(int index) { - return buf.getUnsignedMediumLE(index); - } - - @Override - public int getInt(int index) { - return buf.getInt(index); - } - - @Override - public int getIntLE(int index) { - return buf.getIntLE(index); - } - - @Override - public long getUnsignedInt(int index) { - return buf.getUnsignedInt(index); - } - - @Override - public long getUnsignedIntLE(int index) { - return buf.getUnsignedIntLE(index); - } - - @Override - public long getLong(int index) { - return buf.getLong(index); - } - - @Override - public long getLongLE(int index) { - return buf.getLongLE(index); - } - - @Override - public char getChar(int index) { - return buf.getChar(index); - } - - @Override - public float getFloat(int index) { - return buf.getFloat(index); - } - - @Override - public double getDouble(int index) { - return buf.getDouble(index); - } - - @Override - public ByteBuf getBytes(int index, ByteBuf dst) { - buf.getBytes(index, dst); - return this; - } - - @Override - public ByteBuf getBytes(int index, ByteBuf dst, int length) { - buf.getBytes(index, dst, length); - return this; - } - - @Override - public ByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int length) { - buf.getBytes(index, dst, dstIndex, length); - return this; - } - - @Override - public ByteBuf getBytes(int index, byte[] dst) { - buf.getBytes(index, dst); - return this; - } - - @Override - public ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) { - buf.getBytes(index, dst, dstIndex, length); - return this; - } - - @Override - public ByteBuf getBytes(int index, ByteBuffer dst) { - buf.getBytes(index, dst); - return this; - } - - @Override - public ByteBuf getBytes(int index, OutputStream out, int length) throws IOException { - buf.getBytes(index, out, length); - return this; - } - - @Override - public int getBytes(int index, GatheringByteChannel out, int length) throws IOException { - return buf.getBytes(index, out, length); - } - - @Override - public int getBytes(int index, FileChannel out, long position, int length) throws IOException { - return buf.getBytes(index, out, position, length); - } - - @Override - public CharSequence getCharSequence(int index, int length, Charset charset) { - return buf.getCharSequence(index, length, charset); - } - - @Override - public ByteBuf setBoolean(int index, boolean value) { - buf.setBoolean(index, value); - return this; - } - - @Override - public ByteBuf setByte(int index, int value) { - buf.setByte(index, value); - return this; - } - - @Override - public ByteBuf setShort(int index, int value) { - buf.setShort(index, value); - return this; - } - - @Override - public ByteBuf setShortLE(int index, int value) { - buf.setShortLE(index, value); - return this; - } - - @Override - public ByteBuf setMedium(int index, int value) { - buf.setMedium(index, value); - return this; - } - - @Override - public ByteBuf setMediumLE(int index, int value) { - buf.setMediumLE(index, value); - return this; - } - - @Override - public ByteBuf setInt(int index, int value) { - buf.setInt(index, value); - return this; - } - - @Override - public ByteBuf setIntLE(int index, int value) { - buf.setIntLE(index, value); - return this; - } - - @Override - public ByteBuf setLong(int index, long value) { - buf.setLong(index, value); - return this; - } - - @Override - public ByteBuf setLongLE(int index, long value) { - buf.setLongLE(index, value); - return this; - } - - @Override - public ByteBuf setChar(int index, int value) { - buf.setChar(index, value); - return this; - } - - @Override - public ByteBuf setFloat(int index, float value) { - buf.setFloat(index, value); - return this; - } - - @Override - public ByteBuf setDouble(int index, double value) { - buf.setDouble(index, value); - return this; - } - - @Override - public ByteBuf setBytes(int index, ByteBuf src) { - buf.setBytes(index, src); - return this; - } - - @Override - public ByteBuf setBytes(int index, ByteBuf src, int length) { - buf.setBytes(index, src, length); - return this; - } - - @Override - public ByteBuf setBytes(int index, ByteBuf src, int srcIndex, int length) { - buf.setBytes(index, src, srcIndex, length); - return this; - } - - @Override - public ByteBuf setBytes(int index, byte[] src) { - buf.setBytes(index, src); - return this; - } - - @Override - public ByteBuf setBytes(int index, byte[] src, int srcIndex, int length) { - buf.setBytes(index, src, srcIndex, length); - return this; - } - - @Override - public ByteBuf setBytes(int index, ByteBuffer src) { - buf.setBytes(index, src); - return this; - } - - @Override - public int setBytes(int index, InputStream in, int length) throws IOException { - return buf.setBytes(index, in, length); - } - - @Override - public int setBytes(int index, ScatteringByteChannel in, int length) throws IOException { - return buf.setBytes(index, in, length); - } - - @Override - public int setBytes(int index, FileChannel in, long position, int length) throws IOException { - return buf.setBytes(index, in, position, length); - } - - @Override - public ByteBuf setZero(int index, int length) { - buf.setZero(index, length); - return this; - } - - @Override - public int setCharSequence(int index, CharSequence sequence, Charset charset) { - return buf.setCharSequence(index, sequence, charset); - } - - @Override - public boolean readBoolean() { - return buf.readBoolean(); - } - - @Override - public byte readByte() { - return buf.readByte(); - } - - @Override - public short readUnsignedByte() { - return buf.readUnsignedByte(); - } - - @Override - public short readShort() { - return buf.readShort(); - } - - @Override - public short readShortLE() { - return buf.readShortLE(); - } - - @Override - public int readUnsignedShort() { - return buf.readUnsignedShort(); - } - - @Override - public int readUnsignedShortLE() { - return buf.readUnsignedShortLE(); - } - - @Override - public int readMedium() { - return buf.readMedium(); - } - - @Override - public int readMediumLE() { - return buf.readMediumLE(); - } - - @Override - public int readUnsignedMedium() { - return buf.readUnsignedMedium(); - } - - @Override - public int readUnsignedMediumLE() { - return buf.readUnsignedMediumLE(); - } - - @Override - public int readInt() { - return buf.readInt(); - } - - @Override - public int readIntLE() { - return buf.readIntLE(); - } - - @Override - public long readUnsignedInt() { - return buf.readUnsignedInt(); - } - - @Override - public long readUnsignedIntLE() { - return buf.readUnsignedIntLE(); - } - - @Override - public long readLong() { - return buf.readLong(); - } - - @Override - public long readLongLE() { - return buf.readLongLE(); - } - - @Override - public char readChar() { - return buf.readChar(); - } - - @Override - public float readFloat() { - return buf.readFloat(); - } - - @Override - public double readDouble() { - return buf.readDouble(); - } - - @Override - public ByteBuf readBytes(int length) { - return buf.readBytes(length); - } - - @Override - public ByteBuf readSlice(int length) { - return new WrappedByteBuf(root, buf.readSlice(length), releaseHook); - } - - @Override - public ByteBuf readRetainedSlice(int length) { - return new WrappedByteBuf(root, buf.readRetainedSlice(length), releaseHook); - } - - @Override - public ByteBuf readBytes(ByteBuf dst) { - buf.readBytes(dst); - return this; - } - - @Override - public ByteBuf readBytes(ByteBuf dst, int length) { - buf.readBytes(dst, length); - return this; - } - - @Override - public ByteBuf readBytes(ByteBuf dst, int dstIndex, int length) { - buf.readBytes(dst, dstIndex, length); - return this; - } - - @Override - public ByteBuf readBytes(byte[] dst) { - buf.readBytes(dst); - return this; - } - - @Override - public ByteBuf readBytes(byte[] dst, int dstIndex, int length) { - buf.readBytes(dst, dstIndex, length); - return this; - } - - @Override - public ByteBuf readBytes(ByteBuffer dst) { - buf.readBytes(dst); - return this; - } - - @Override - public ByteBuf readBytes(OutputStream out, int length) throws IOException { - buf.readBytes(out, length); - return this; - } - - @Override - public int readBytes(GatheringByteChannel out, int length) throws IOException { - return buf.readBytes(out, length); - } - - @Override - public int readBytes(FileChannel out, long position, int length) throws IOException { - return buf.readBytes(out, position, length); - } - - @Override - public CharSequence readCharSequence(int length, Charset charset) { - return buf.readCharSequence(length, charset); - } - - @Override - public ByteBuf skipBytes(int length) { - buf.skipBytes(length); - return this; - } - - @Override - public ByteBuf writeBoolean(boolean value) { - buf.writeBoolean(value); - return this; - } - - @Override - public ByteBuf writeByte(int value) { - buf.writeByte(value); - return this; - } - - @Override - public ByteBuf writeShort(int value) { - buf.writeShort(value); - return this; - } - - @Override - public ByteBuf writeShortLE(int value) { - buf.writeShortLE(value); - return this; - } - - @Override - public ByteBuf writeMedium(int value) { - buf.writeMedium(value); - return this; - } - - @Override - public ByteBuf writeMediumLE(int value) { - buf.writeMediumLE(value); - return this; - } - - @Override - public ByteBuf writeInt(int value) { - buf.writeInt(value); - return this; - } - - @Override - public ByteBuf writeIntLE(int value) { - buf.writeIntLE(value); - return this; - } - - @Override - public ByteBuf writeLong(long value) { - buf.writeLong(value); - return this; - } - - @Override - public ByteBuf writeLongLE(long value) { - buf.writeLongLE(value); - return this; - } - - @Override - public ByteBuf writeChar(int value) { - buf.writeChar(value); - return this; - } - - @Override - public ByteBuf writeFloat(float value) { - buf.writeFloat(value); - return this; - } - - @Override - public ByteBuf writeDouble(double value) { - buf.writeDouble(value); - return this; - } - - @Override - public ByteBuf writeBytes(ByteBuf src) { - buf.writeBytes(src); - return this; - } - - @Override - public ByteBuf writeBytes(ByteBuf src, int length) { - buf.writeBytes(src, length); - return this; - } - - @Override - public ByteBuf writeBytes(ByteBuf src, int srcIndex, int length) { - buf.writeBytes(src, srcIndex, length); - return this; - } - - @Override - public ByteBuf writeBytes(byte[] src) { - buf.writeBytes(src); - return this; - } - - @Override - public ByteBuf writeBytes(byte[] src, int srcIndex, int length) { - buf.writeBytes(src, srcIndex, length); - return this; - } - - @Override - public ByteBuf writeBytes(ByteBuffer src) { - buf.writeBytes(src); - return this; - } - - @Override - public int writeBytes(InputStream in, int length) throws IOException { - return buf.writeBytes(in, length); - } - - @Override - public int writeBytes(ScatteringByteChannel in, int length) throws IOException { - return buf.writeBytes(in, length); - } - - @Override - public int writeBytes(FileChannel in, long position, int length) throws IOException { - return buf.writeBytes(in, position, length); - } - - @Override - public ByteBuf writeZero(int length) { - buf.writeZero(length); - return this; - } - - @Override - public int writeCharSequence(CharSequence sequence, Charset charset) { - return buf.writeCharSequence(sequence, charset); - } - - @Override - public int indexOf(int fromIndex, int toIndex, byte value) { - return buf.indexOf(fromIndex, toIndex, value); - } - - @Override - public int bytesBefore(byte value) { - return buf.bytesBefore(value); - } - - @Override - public int bytesBefore(int length, byte value) { - return buf.bytesBefore(length, value); - } - - @Override - public int bytesBefore(int index, int length, byte value) { - return buf.bytesBefore(index, length, value); - } - - @Override - public int forEachByte(ByteProcessor processor) { - return buf.forEachByte(processor); - } - - @Override - public int forEachByte(int index, int length, ByteProcessor processor) { - return buf.forEachByte(index, length, processor); - } - - @Override - public int forEachByteDesc(ByteProcessor processor) { - return buf.forEachByteDesc(processor); - } - - @Override - public int forEachByteDesc(int index, int length, ByteProcessor processor) { - return buf.forEachByteDesc(index, length, processor); - } - - @Override - public ByteBuf copy() { - return buf.copy(); - } - - @Override - public ByteBuf copy(int index, int length) { - return buf.copy(index, length); - } - - @Override - public ByteBuf slice() { - return new WrappedByteBuf(root, buf.slice(), releaseHook); - } - - @Override - public ByteBuf retainedSlice() { - return new WrappedByteBuf(root, buf.retainedSlice(), releaseHook); - } - - @Override - public ByteBuf slice(int index, int length) { - return new WrappedByteBuf(root, buf.slice(index, length), releaseHook); - } - - @Override - public ByteBuf retainedSlice(int index, int length) { - return new WrappedByteBuf(root, buf.retainedSlice(index, length), releaseHook); - } - - @Override - public ByteBuf duplicate() { - return new WrappedByteBuf(root, buf.duplicate(), releaseHook); - } - - @Override - public ByteBuf retainedDuplicate() { - return new WrappedByteBuf(root, buf.retainedDuplicate(), releaseHook); - } - - @Override - public int nioBufferCount() { - return buf.nioBufferCount(); - } - - @Override - public ByteBuffer nioBuffer() { - return buf.nioBuffer(); - } - - @Override - public ByteBuffer nioBuffer(int index, int length) { - return buf.nioBuffer(index, length); - } - - @Override - public ByteBuffer[] nioBuffers() { - return buf.nioBuffers(); - } - - @Override - public ByteBuffer[] nioBuffers(int index, int length) { - return buf.nioBuffers(index, length); - } - - @Override - public ByteBuffer internalNioBuffer(int index, int length) { - return buf.internalNioBuffer(index, length); - } - - @Override - public boolean hasArray() { - return buf.hasArray(); - } - - @Override - public byte[] array() { - return buf.array(); - } - - @Override - public int arrayOffset() { - return buf.arrayOffset(); - } - - @Override - public String toString(Charset charset) { - return buf.toString(charset); - } - - @Override - public String toString(int index, int length, Charset charset) { - return buf.toString(index, length, charset); - } - - @Override - public int hashCode() { - return buf.hashCode(); - } - - @Override - @SuppressWarnings("EqualsWhichDoesntCheckParameterClass") - public boolean equals(Object obj) { - return buf.equals(obj); - } - - @Override - public int compareTo(ByteBuf buffer) { - return buf.compareTo(buffer); - } - - @Override - public String toString() { - return StringUtil.simpleClassName(this) + '(' + buf.toString() + ')'; - } - - @Override - public ByteBuf retain(int increment) { - buf.retain(increment); - return this; - } - - @Override - public ByteBuf retain() { - buf.retain(); - return this; - } - - @Override - public ByteBuf touch() { - buf.touch(); - return this; - } - - @Override - public ByteBuf touch(Object hint) { - buf.touch(hint); - return this; - } - - @Override - public final boolean isReadable(int size) { - return buf.isReadable(size); - } - - @Override - public final boolean isWritable(int size) { - return buf.isWritable(size); - } - - @Override - public final int refCnt() { - return buf.refCnt(); - } - - @Override - public boolean release() { - boolean rst = buf.release(); - if (rst && root != null && root.refCnt() == 0) { - releaseHook.run(); - } - return rst; - } - - @Override - public boolean release(int decrement) { - boolean rst = buf.release(decrement); - if (rst && root != null && root.refCnt() == 0) { - releaseHook.run(); - } - return rst; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/api/AppendResult.java b/s3stream/src/main/java/com/automq/stream/api/AppendResult.java deleted file mode 100644 index 9034643b3..000000000 --- a/s3stream/src/main/java/com/automq/stream/api/AppendResult.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.api; - -/** - * Append RecordBatch to stream result. - */ -public interface AppendResult { - - /** - * Get record batch base offset. - * - * @return record batch base offset. - */ - long baseOffset(); - -} \ No newline at end of file diff --git a/s3stream/src/main/java/com/automq/stream/api/Client.java b/s3stream/src/main/java/com/automq/stream/api/Client.java deleted file mode 100644 index 1a4f6df67..000000000 --- a/s3stream/src/main/java/com/automq/stream/api/Client.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.api; - -import com.automq.stream.s3.failover.FailoverRequest; -import com.automq.stream.s3.failover.FailoverResponse; -import java.util.concurrent.CompletableFuture; - -/** - * Elastic Stream client. - */ -public interface Client { - void start(); - - void shutdown(); - - /** - * Get stream client. - * - * @return {@link StreamClient} - */ - StreamClient streamClient(); - - /** - * Get KV client. - * - * @return {@link KVClient} - */ - KVClient kvClient(); - - /** - * Failover the another node volume - */ - CompletableFuture failover(FailoverRequest request); -} diff --git a/s3stream/src/main/java/com/automq/stream/api/CreateStreamOptions.java b/s3stream/src/main/java/com/automq/stream/api/CreateStreamOptions.java deleted file mode 100644 index 1c62c0033..000000000 --- a/s3stream/src/main/java/com/automq/stream/api/CreateStreamOptions.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.api; - -import com.automq.stream.utils.Arguments; - -public class CreateStreamOptions { - private int replicaCount; - private long epoch; - - private CreateStreamOptions() { - } - - public static Builder builder() { - return new Builder(); - } - - public int replicaCount() { - return replicaCount; - } - - public long epoch() { - return epoch; - } - - public static class Builder { - private final CreateStreamOptions options = new CreateStreamOptions(); - - public Builder replicaCount(int replicaCount) { - Arguments.check(replicaCount > 0, "replica count should larger than 0"); - options.replicaCount = replicaCount; - return this; - } - - public Builder epoch(long epoch) { - options.epoch = epoch; - return this; - } - - public CreateStreamOptions build() { - return options; - } - - } -} diff --git a/s3stream/src/main/java/com/automq/stream/api/FetchResult.java b/s3stream/src/main/java/com/automq/stream/api/FetchResult.java deleted file mode 100644 index d63746412..000000000 --- a/s3stream/src/main/java/com/automq/stream/api/FetchResult.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.api; - -import com.automq.stream.s3.cache.CacheAccessType; -import java.util.List; - -public interface FetchResult { - - /** - * Get fetched RecordBatch list. - * - * @return {@link RecordBatchWithContext} list. - */ - List recordBatchList(); - - default CacheAccessType getCacheAccessType() { - return CacheAccessType.DELTA_WAL_CACHE_HIT; - } - - /** - * Free fetch result backend memory. - */ - default void free() { - } -} diff --git a/s3stream/src/main/java/com/automq/stream/api/KVClient.java b/s3stream/src/main/java/com/automq/stream/api/KVClient.java deleted file mode 100644 index 7a0bd7ca1..000000000 --- a/s3stream/src/main/java/com/automq/stream/api/KVClient.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.api; - -import com.automq.stream.api.KeyValue.Key; -import com.automq.stream.api.KeyValue.Value; -import java.util.concurrent.CompletableFuture; - -/** - * Light KV client, support light & simple kv operations. - */ -public interface KVClient { - /** - * Put key value if key not exist, return current key value after putting. - * - * @param keyValue {@link KeyValue} k-v pair - * @return async put result. {@link Value} current value after putting. - */ - CompletableFuture putKVIfAbsent(KeyValue keyValue); - - /** - * Put key value, overwrite if key exist, return current key value after putting. - * - * @param keyValue {@link KeyValue} k-v pair - * @return async put result. {@link KeyValue} current value after putting. - */ - CompletableFuture putKV(KeyValue keyValue); - - /** - * Get value by key. - * - * @param key key. - * @return async get result. {@link KeyValue} k-v pair, null if key not exist. - */ - CompletableFuture getKV(Key key); - - /** - * Delete key value by key. If key not exist, return null. - * - * @param key key. - * @return async delete result. {@link Value} deleted value, null if key not exist. - */ - CompletableFuture delKV(Key key); -} diff --git a/s3stream/src/main/java/com/automq/stream/api/KeyValue.java b/s3stream/src/main/java/com/automq/stream/api/KeyValue.java deleted file mode 100644 index 2d8dee94d..000000000 --- a/s3stream/src/main/java/com/automq/stream/api/KeyValue.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.api; - -import java.nio.ByteBuffer; -import java.util.Objects; - -public class KeyValue { - private final Key key; - private final Value value; - - private KeyValue(Key key, Value value) { - this.key = key; - this.value = value; - } - - public static KeyValue of(String key, ByteBuffer value) { - return new KeyValue(Key.of(key), Value.of(value)); - } - - public Key key() { - return key; - } - - public Value value() { - return value; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - KeyValue keyValue = (KeyValue) o; - return Objects.equals(key, keyValue.key) && Objects.equals(value, keyValue.value); - } - - @Override - public int hashCode() { - return Objects.hash(key, value); - } - - @Override - public String toString() { - return "KeyValue{" + - "key=" + key + - ", value=" + value + - '}'; - } - - public static class Key { - private final String key; - - private Key(String key) { - this.key = key; - } - - public static Key of(String key) { - return new Key(key); - } - - public String get() { - return key; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - Key key1 = (Key) o; - return Objects.equals(key, key1.key); - } - - @Override - public int hashCode() { - return Objects.hash(key); - } - - @Override - public String toString() { - return "Key{" + - "key='" + key + '\'' + - '}'; - } - } - - public static class Value { - private final ByteBuffer value; - - private Value(ByteBuffer value) { - this.value = value; - } - - public static Value of(ByteBuffer value) { - return new Value(value); - } - - public static Value of(byte[] value) { - if (value == null) { - return new Value(null); - } - return new Value(ByteBuffer.wrap(value)); - } - - public ByteBuffer get() { - return value; - } - - public boolean isNull() { - return value == null; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (!(o instanceof Value)) - return false; - Value value1 = (Value) o; - return Objects.equals(value, value1.value); - } - - @Override - public int hashCode() { - return Objects.hash(value); - } - - @Override - public String toString() { - return "Value{" + - "value=" + value + - '}'; - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/api/OpenStreamOptions.java b/s3stream/src/main/java/com/automq/stream/api/OpenStreamOptions.java deleted file mode 100644 index 6e9beeffb..000000000 --- a/s3stream/src/main/java/com/automq/stream/api/OpenStreamOptions.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.api; - -import com.automq.stream.utils.Arguments; - -public class OpenStreamOptions { - private WriteMode writeMode = WriteMode.SINGLE; - private ReadMode readMode = ReadMode.MULTIPLE; - private long epoch; - - private OpenStreamOptions() { - } - - public static Builder builder() { - return new Builder(); - } - - public WriteMode writeMode() { - return writeMode; - } - - public ReadMode readMode() { - return readMode; - } - - public long epoch() { - return epoch; - } - - public enum WriteMode { - SINGLE(0), MULTIPLE(1); - - final int code; - - WriteMode(int code) { - this.code = code; - } - - public int getCode() { - return code; - } - } - - public enum ReadMode { - SINGLE(0), MULTIPLE(1); - - final int code; - - ReadMode(int code) { - this.code = code; - } - - public int getCode() { - return code; - } - } - - public static class Builder { - private final OpenStreamOptions options = new OpenStreamOptions(); - - public Builder writeMode(WriteMode writeMode) { - Arguments.isNotNull(writeMode, "WriteMode should be set with SINGLE or MULTIPLE"); - options.writeMode = writeMode; - return this; - } - - public Builder readMode(ReadMode readMode) { - Arguments.isNotNull(readMode, "ReadMode should be set with SINGLE or MULTIPLE"); - options.readMode = readMode; - return this; - } - - public Builder epoch(long epoch) { - options.epoch = epoch; - return this; - } - - public OpenStreamOptions build() { - return options; - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/api/ReadOptions.java b/s3stream/src/main/java/com/automq/stream/api/ReadOptions.java deleted file mode 100644 index 04dea7b90..000000000 --- a/s3stream/src/main/java/com/automq/stream/api/ReadOptions.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.api; - -import com.automq.stream.api.exceptions.FastReadFailFastException; - -public class ReadOptions { - public static final ReadOptions DEFAULT = new ReadOptions(); - - private boolean fastRead; - private boolean pooledBuf; - - public static Builder builder() { - return new Builder(); - } - - public boolean fastRead() { - return fastRead; - } - - public boolean pooledBuf() { - return pooledBuf; - } - - public static class Builder { - private final ReadOptions options = new ReadOptions(); - - /** - * Read from cache, if the data is not in cache, then fail fast with {@link FastReadFailFastException}. - */ - public Builder fastRead(boolean fastRead) { - options.fastRead = fastRead; - return this; - } - - /** - * Use pooled buffer for reading. The caller is responsible for releasing the buffer. - */ - public Builder pooledBuf(boolean pooledBuf) { - options.pooledBuf = pooledBuf; - return this; - } - - public ReadOptions build() { - return options; - } - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/api/RecordBatch.java b/s3stream/src/main/java/com/automq/stream/api/RecordBatch.java deleted file mode 100644 index 0ee6c0733..000000000 --- a/s3stream/src/main/java/com/automq/stream/api/RecordBatch.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.api; - -import java.nio.ByteBuffer; -import java.util.Map; - -/** - * Record batch. - */ -public interface RecordBatch { - - /** - * Get payload record count. - * - * @return record count. - */ - int count(); - - /** - * Get min timestamp of records. - * - * @return min timestamp of records. - */ - long baseTimestamp(); - - /** - * Get record batch extension properties. - * - * @return batch extension properties. - */ - Map properties(); - - /** - * Get raw payload. - * - * @return raw payload. - */ - ByteBuffer rawPayload(); -} diff --git a/s3stream/src/main/java/com/automq/stream/api/RecordBatchWithContext.java b/s3stream/src/main/java/com/automq/stream/api/RecordBatchWithContext.java deleted file mode 100644 index 0075e05a0..000000000 --- a/s3stream/src/main/java/com/automq/stream/api/RecordBatchWithContext.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.api; - -public interface RecordBatchWithContext extends RecordBatch { - - /** - * Get record batch base offset. - * - * @return base offset. - */ - long baseOffset(); - - /** - * Get record batch exclusive last offset. - * - * @return exclusive last offset. - */ - long lastOffset(); -} diff --git a/s3stream/src/main/java/com/automq/stream/api/Stream.java b/s3stream/src/main/java/com/automq/stream/api/Stream.java deleted file mode 100644 index 5999332cb..000000000 --- a/s3stream/src/main/java/com/automq/stream/api/Stream.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.api; - -import com.automq.stream.api.exceptions.StreamClientException; -import com.automq.stream.s3.context.AppendContext; -import com.automq.stream.s3.context.FetchContext; -import java.util.concurrent.CompletableFuture; - -/** - * Record stream. - */ -public interface Stream { - - /** - * Get stream id - */ - long streamId(); - - /** - * Get stream epoch. - */ - long streamEpoch(); - - /** - * Get stream start offset. - */ - long startOffset(); - - /** - * Get stream confirm record offset. - */ - long confirmOffset(); - - /** - * Get stream next append record offset. - */ - long nextOffset(); - - /** - * Append recordBatch to stream. - * - * @param recordBatch {@link RecordBatch}. - * @return - complete success with async {@link AppendResult}, when append success. - * - complete exception with {@link StreamClientException}, when append fail. TODO: specify the exception. - */ - CompletableFuture append(AppendContext context, RecordBatch recordBatch); - - default CompletableFuture append(RecordBatch recordBatch) { - return append(AppendContext.DEFAULT, recordBatch); - } - - /** - * Fetch recordBatch list from stream. Note the startOffset may be in the middle in the first recordBatch. - * It is strongly recommended to handle the completion of the returned CompletableFuture in a separate thread. - * - * @param context fetch context, {@link FetchContext}. - * @param startOffset start offset, if the startOffset in middle of a recordBatch, the recordBatch will be returned. - * @param endOffset exclusive end offset, if the endOffset in middle of a recordBatch, the recordBatch will be returned. - * @param maxBytesHint max fetch data size hint, the real return data size may be larger than maxBytesHint. - * @return - complete success with {@link FetchResult}, when fetch success. - * - complete exception with {@link StreamClientException}, when startOffset is bigger than stream end offset. - */ - CompletableFuture fetch(FetchContext context, long startOffset, long endOffset, int maxBytesHint); - - default CompletableFuture fetch(long startOffset, long endOffset, int maxBytesHint) { - return fetch(FetchContext.DEFAULT, startOffset, endOffset, maxBytesHint); - } - - /** - * Trim stream. - * - * @param newStartOffset new start offset. - * @return - complete success with async {@link Void}, when trim success. - * - complete exception with {@link StreamClientException}, when trim fail. - */ - CompletableFuture trim(long newStartOffset); - - /** - * Close the stream. - */ - CompletableFuture close(); - - /** - * Destroy stream. - */ - CompletableFuture destroy(); -} diff --git a/s3stream/src/main/java/com/automq/stream/api/StreamClient.java b/s3stream/src/main/java/com/automq/stream/api/StreamClient.java deleted file mode 100644 index 7211127b4..000000000 --- a/s3stream/src/main/java/com/automq/stream/api/StreamClient.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.api; - -import java.util.Optional; -import java.util.concurrent.CompletableFuture; - -/** - * Stream client, support stream create and open operation. - */ -public interface StreamClient { - /** - * Create and open stream. - * - * @param options create stream options. - * @return {@link Stream}. - */ - CompletableFuture createAndOpenStream(CreateStreamOptions options); - - /** - * Open stream. - * - * @param streamId stream id. - * @param options open stream options. - * @return {@link Stream}. - */ - CompletableFuture openStream(long streamId, OpenStreamOptions options); - - /** - * Retrive an opened stream. - * - * @param streamId stream id. - * @return {@link Optional}. - */ - Optional getStream(long streamId); - - void shutdown(); -} diff --git a/s3stream/src/main/java/com/automq/stream/api/exceptions/ErrorCode.java b/s3stream/src/main/java/com/automq/stream/api/exceptions/ErrorCode.java deleted file mode 100644 index 66543a61d..000000000 --- a/s3stream/src/main/java/com/automq/stream/api/exceptions/ErrorCode.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.api.exceptions; - -public class ErrorCode { - - public static final short UNEXPECTED = 99; - - public static final short STREAM_ALREADY_CLOSED = 1; - public static final short STREAM_NOT_EXIST = 2; - public static final short EXPIRED_STREAM_EPOCH = 3; - public static final short STREAM_NOT_CLOSED = 4; - - public static final short OFFSET_OUT_OF_RANGE_BOUNDS = 10; - public static final short FAST_READ_FAIL_FAST = 11; - -} diff --git a/s3stream/src/main/java/com/automq/stream/api/exceptions/FastReadFailFastException.java b/s3stream/src/main/java/com/automq/stream/api/exceptions/FastReadFailFastException.java deleted file mode 100644 index 6d8d0147c..000000000 --- a/s3stream/src/main/java/com/automq/stream/api/exceptions/FastReadFailFastException.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.api.exceptions; - -/** - * Fail fast exception when fast read is enabled and read need read from S3. - */ -public class FastReadFailFastException extends StreamClientException { - public FastReadFailFastException() { - super(ErrorCode.FAST_READ_FAIL_FAST, "", false); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/api/exceptions/StreamClientException.java b/s3stream/src/main/java/com/automq/stream/api/exceptions/StreamClientException.java deleted file mode 100644 index 443fa7afd..000000000 --- a/s3stream/src/main/java/com/automq/stream/api/exceptions/StreamClientException.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.api.exceptions; - -/** - * All stream client exceptions will list extends StreamClientException and list here. - */ -public class StreamClientException extends RuntimeException { - private final int code; - - public StreamClientException(int code, String str) { - this(code, str, null); - } - - public StreamClientException(int code, String str, Throwable e) { - super("code: " + code + ", " + str, e); - this.code = code; - } - - public StreamClientException(int code, String str, boolean writableStackTrace) { - super("code: " + code + ", " + str, null, false, writableStackTrace); - this.code = code; - } - - public int getCode() { - return this.code; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/ByteBufAlloc.java b/s3stream/src/main/java/com/automq/stream/s3/ByteBufAlloc.java deleted file mode 100644 index dfe72bede..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/ByteBufAlloc.java +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.WrappedByteBuf; -import io.netty.buffer.AbstractByteBufAllocator; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocatorMetric; -import io.netty.buffer.ByteBufAllocatorMetricProvider; -import io.netty.buffer.CompositeByteBuf; -import io.netty.buffer.PooledByteBufAllocator; -import io.netty.buffer.UnpooledByteBufAllocator; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.LongAdder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class ByteBufAlloc { - public static final boolean MEMORY_USAGE_DETECT = Boolean.parseBoolean(System.getenv("AUTOMQ_MEMORY_USAGE_DETECT")); - public static final boolean ALLOCATOR_USAGE_UNPOOLED = Boolean.parseBoolean(System.getenv("AUTOMQ_ALLOCATOR_USAGE_UNPOOLED")); - public static final boolean BUFFER_USAGE_HEAPED = Boolean.parseBoolean(System.getenv("AUTOMQ_BUFFER_USAGE_HEAPED")); - - private static final Logger LOGGER = LoggerFactory.getLogger(ByteBufAlloc.class); - private static final AbstractByteBufAllocator ALLOC = ALLOCATOR_USAGE_UNPOOLED ? UnpooledByteBufAllocator.DEFAULT : PooledByteBufAllocator.DEFAULT; - private static final Map USAGE_STATS = new ConcurrentHashMap<>(); - private static long lastMetricLogTime = System.currentTimeMillis(); - private static final Map ALLOC_TYPE = new HashMap<>(); - - public static final int DEFAULT = 0; - public static final int ENCODE_RECORD = 1; - public static final int DECODE_RECORD = 2; - public static final int WRITE_INDEX_BLOCK = 3; - public static final int READ_INDEX_BLOCK = 4; - public static final int WRITE_DATA_BLOCK_HEADER = 5; - public static final int WRITE_FOOTER = 6; - public static final int STREAM_OBJECT_COMPACTION_READ = 7; - public static final int STREAM_OBJECT_COMPACTION_WRITE = 8; - public static final int STREAM_SET_OBJECT_COMPACTION_READ = 9; - public static final int STREAM_SET_OBJECT_COMPACTION_WRITE = 10; - public static ByteBufAllocMetric byteBufAllocMetric = null; - - static { - registerAllocType(DEFAULT, "default"); - registerAllocType(ENCODE_RECORD, "write_record"); - registerAllocType(DECODE_RECORD, "read_record"); - registerAllocType(WRITE_INDEX_BLOCK, "write_index_block"); - registerAllocType(READ_INDEX_BLOCK, "read_index_block"); - registerAllocType(WRITE_DATA_BLOCK_HEADER, "write_data_block_header"); - registerAllocType(WRITE_FOOTER, "write_footer"); - registerAllocType(STREAM_OBJECT_COMPACTION_READ, "stream_object_compaction_read"); - registerAllocType(STREAM_OBJECT_COMPACTION_WRITE, "stream_object_compaction_write"); - registerAllocType(STREAM_SET_OBJECT_COMPACTION_READ, "stream_set_object_compaction_read"); - registerAllocType(STREAM_SET_OBJECT_COMPACTION_WRITE, "stream_set_object_compaction_write"); - - } - - public static CompositeByteBuf compositeByteBuffer() { - return ALLOC.compositeDirectBuffer(Integer.MAX_VALUE); - } - - public static ByteBuf byteBuffer(int initCapacity) { - return byteBuffer(initCapacity, DEFAULT); - } - - public static ByteBuf byteBuffer(int initCapacity, int type) { - try { - if (MEMORY_USAGE_DETECT) { - LongAdder usage = USAGE_STATS.compute(type, (k, v) -> { - if (v == null) { - v = new LongAdder(); - } - v.add(initCapacity); - return v; - }); - long now = System.currentTimeMillis(); - if (now - lastMetricLogTime > 60000) { - // it's ok to be not thread safe - lastMetricLogTime = now; - ByteBufAlloc.byteBufAllocMetric = new ByteBufAllocMetric(); - LOGGER.info("Buffer usage: {}", ByteBufAlloc.byteBufAllocMetric); - } - return new WrappedByteBuf(BUFFER_USAGE_HEAPED ? ALLOC.heapBuffer(initCapacity) : ALLOC.directBuffer(initCapacity), () -> usage.add(-initCapacity)); - } else { - return BUFFER_USAGE_HEAPED ? ALLOC.heapBuffer(initCapacity) : ALLOC.directBuffer(initCapacity); - } - } catch (OutOfMemoryError e) { - if (MEMORY_USAGE_DETECT) { - ByteBufAlloc.byteBufAllocMetric = new ByteBufAllocMetric(); - LOGGER.error("alloc buffer OOM, {}", ByteBufAlloc.byteBufAllocMetric, e); - } else { - LOGGER.error("alloc buffer OOM", e); - } - System.err.println("alloc buffer OOM"); - Runtime.getRuntime().halt(1); - throw e; - } - } - - public static void registerAllocType(int type, String name) { - if (ALLOC_TYPE.containsKey(type)) { - throw new IllegalArgumentException("type already registered: " + type + "=" + ALLOC_TYPE.get(type)); - } - ALLOC_TYPE.put(type, name); - } - - public static class ByteBufAllocMetric { - private final long usedMemory; - private final long allocatedMemory; - private final Map detail = new HashMap<>(); - - public ByteBufAllocMetric() { - USAGE_STATS.forEach((k, v) -> { - detail.put(k + "/" + ALLOC_TYPE.get(k), v.longValue()); - }); - ByteBufAllocatorMetric metric = ((ByteBufAllocatorMetricProvider) ALLOC).metric(); - this.usedMemory = BUFFER_USAGE_HEAPED ? metric.usedHeapMemory() : metric.usedDirectMemory(); - this.allocatedMemory = this.detail.values().stream().mapToLong(Long::longValue).sum(); - } - - public long getUsedMemory() { - return usedMemory; - } - - public Map getDetailedMap() { - return detail; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("ByteBufAllocMetric{usedMemory="); - sb.append(usedMemory); - sb.append(", allocatedMemory="); - sb.append(allocatedMemory); - sb.append(", detail="); - for (Map.Entry entry : detail.entrySet()) { - sb.append(entry.getKey()).append("=").append(entry.getValue()).append(","); - } - sb.append(", pooled="); - sb.append(!ALLOCATOR_USAGE_UNPOOLED); - sb.append(", direct="); - sb.append(!BUFFER_USAGE_HEAPED); - sb.append("}"); - return sb.toString(); - } - } - - public interface OOMHandler { - /** - * Try handle OOM exception. - * - * @param memoryRequired the memory required - * @return freed memory. - */ - int handle(int memoryRequired); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/Config.java b/s3stream/src/main/java/com/automq/stream/s3/Config.java deleted file mode 100644 index 596d73290..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/Config.java +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -// TODO: rename & init -public class Config { - private int nodeId; - private String endpoint; - private String region; - private String bucket; - private boolean forcePathStyle = false; - private String walPath = "/tmp/s3stream_wal"; - private long walCacheSize = 200 * 1024 * 1024; - private long walCapacity = 1024L * 1024 * 1024; - private int walInitBufferSize = 1024 * 1024; - private int walMaxBufferSize = 16 * 1024 * 1024; - private int walThread = 8; - private long walWindowInitial = 1048576L; - private long walWindowIncrement = 4194304L; - private long walWindowMax = 536870912L; - private long walBlockSoftLimit = 256 * 1024; - private int walWriteRateLimit = 3000; - private long walUploadThreshold = 100 * 1024 * 1024; - private int streamSplitSize = 16777216; - private int objectBlockSize = 1048576; - private int objectPartSize = 16777216; - private long blockCacheSize = 100 * 1024 * 1024; - private int streamObjectCompactionIntervalMinutes = 60; - private long streamObjectCompactionMaxSizeBytes = 10737418240L; - private int controllerRequestRetryMaxCount = Integer.MAX_VALUE; - private long controllerRequestRetryBaseDelayMs = 500; - private long nodeEpoch = 0L; - private int streamSetObjectCompactionInterval = 20; - private long streamSetObjectCompactionCacheSize = 200 * 1024 * 1024; - private int streamSetObjectCompactionUploadConcurrency = 8; - private long streamSetObjectCompactionStreamSplitSize = 16 * 1024 * 1024; - private int streamSetObjectCompactionForceSplitPeriod = 120; - private int streamSetObjectCompactionMaxObjectNum = 500; - private int maxStreamNumPerStreamSetObject = 100000; - private int maxStreamObjectNumPerCommit = 10000; - private boolean mockEnable = false; - private boolean objectLogEnable = false; - // 100MB/s - private long networkBaselineBandwidth = 100 * 1024 * 1024; - private int refillPeriodMs = 1000; - private long objectRetentionTimeInSecond = 10 * 60; // 10min - private boolean failoverEnable = false; - - public int nodeId() { - return nodeId; - } - - public String endpoint() { - return endpoint; - } - - public String region() { - return region; - } - - public String bucket() { - return bucket; - } - - public boolean forcePathStyle() { - return forcePathStyle; - } - - public String walPath() { - return walPath; - } - - public long walCacheSize() { - return walCacheSize; - } - - public long walCapacity() { - return walCapacity; - } - - public int walInitBufferSize() { - return walInitBufferSize; - } - - public int walMaxBufferSize() { - return walMaxBufferSize; - } - - public int walThread() { - return walThread; - } - - public long walWindowInitial() { - return walWindowInitial; - } - - public long walWindowIncrement() { - return walWindowIncrement; - } - - public long walWindowMax() { - return walWindowMax; - } - - public long walBlockSoftLimit() { - return walBlockSoftLimit; - } - - public int walWriteRateLimit() { - return walWriteRateLimit; - } - - public long walUploadThreshold() { - return walUploadThreshold; - } - - public int streamSplitSize() { - return streamSplitSize; - } - - public int objectBlockSize() { - return objectBlockSize; - } - - public int objectPartSize() { - return objectPartSize; - } - - public long blockCacheSize() { - return blockCacheSize; - } - - public int streamObjectCompactionIntervalMinutes() { - return streamObjectCompactionIntervalMinutes; - } - - public long streamObjectCompactionMaxSizeBytes() { - return streamObjectCompactionMaxSizeBytes; - } - - public int controllerRequestRetryMaxCount() { - return controllerRequestRetryMaxCount; - } - - public long controllerRequestRetryBaseDelayMs() { - return controllerRequestRetryBaseDelayMs; - } - - public long nodeEpoch() { - return nodeEpoch; - } - - public int streamSetObjectCompactionInterval() { - return streamSetObjectCompactionInterval; - } - - public long streamSetObjectCompactionCacheSize() { - return streamSetObjectCompactionCacheSize; - } - - public int streamSetObjectCompactionUploadConcurrency() { - return streamSetObjectCompactionUploadConcurrency; - } - - public long streamSetObjectCompactionStreamSplitSize() { - return streamSetObjectCompactionStreamSplitSize; - } - - public int streamSetObjectCompactionForceSplitPeriod() { - return streamSetObjectCompactionForceSplitPeriod; - } - - public int streamSetObjectCompactionMaxObjectNum() { - return streamSetObjectCompactionMaxObjectNum; - } - - public int maxStreamNumPerStreamSetObject() { - return maxStreamNumPerStreamSetObject; - } - - public int maxStreamObjectNumPerCommit() { - return maxStreamObjectNumPerCommit; - } - - public boolean mockEnable() { - return mockEnable; - } - - public boolean objectLogEnable() { - return objectLogEnable; - } - - public long networkBaselineBandwidth() { - return networkBaselineBandwidth; - } - - public int refillPeriodMs() { - return refillPeriodMs; - } - - public Config nodeId(int brokerId) { - this.nodeId = brokerId; - return this; - } - - public Config endpoint(String s3Endpoint) { - this.endpoint = s3Endpoint; - return this; - } - - public Config region(String s3Region) { - this.region = s3Region; - return this; - } - - public Config bucket(String s3Bucket) { - this.bucket = s3Bucket; - return this; - } - - public Config forcePathStyle(boolean s3ForcePathStyle) { - this.forcePathStyle = s3ForcePathStyle; - return this; - } - - public Config walPath(String s3WALPath) { - this.walPath = s3WALPath; - return this; - } - - public Config walCacheSize(long s3WALCacheSize) { - this.walCacheSize = s3WALCacheSize; - return this; - } - - public Config walCapacity(long s3WALCapacity) { - this.walCapacity = s3WALCapacity; - return this; - } - - public Config walInitBufferSize(int walInitBufferSize) { - this.walInitBufferSize = walInitBufferSize; - return this; - } - - public Config walMaxBufferSize(int walMaxBufferSize) { - this.walMaxBufferSize = walMaxBufferSize; - return this; - } - - public Config walThread(int s3WALThread) { - this.walThread = s3WALThread; - return this; - } - - public Config walWindowInitial(long s3WALWindowInitial) { - this.walWindowInitial = s3WALWindowInitial; - return this; - } - - public Config walWindowIncrement(long s3WALWindowIncrement) { - this.walWindowIncrement = s3WALWindowIncrement; - return this; - } - - public Config walWindowMax(long s3WALWindowMax) { - this.walWindowMax = s3WALWindowMax; - return this; - } - - public Config walBlockSoftLimit(long s3WALBlockSoftLimit) { - this.walBlockSoftLimit = s3WALBlockSoftLimit; - return this; - } - - public Config walWriteRateLimit(int s3WALWriteRateLimit) { - this.walWriteRateLimit = s3WALWriteRateLimit; - return this; - } - - public Config walUploadThreshold(long s3WALObjectSize) { - this.walUploadThreshold = s3WALObjectSize; - return this; - } - - public Config streamSplitSize(int s3StreamSplitSize) { - this.streamSplitSize = s3StreamSplitSize; - return this; - } - - public Config objectBlockSize(int s3ObjectBlockSize) { - this.objectBlockSize = s3ObjectBlockSize; - return this; - } - - public Config objectPartSize(int s3ObjectPartSize) { - this.objectPartSize = s3ObjectPartSize; - return this; - } - - public Config blockCacheSize(long s3CacheSize) { - this.blockCacheSize = s3CacheSize; - return this; - } - - public Config streamObjectCompactionIntervalMinutes(int s3StreamObjectCompactionIntervalMinutes) { - this.streamObjectCompactionIntervalMinutes = s3StreamObjectCompactionIntervalMinutes; - return this; - } - - public Config streamObjectCompactionMaxSizeBytes(long s3StreamObjectCompactionMaxSizeBytes) { - this.streamObjectCompactionMaxSizeBytes = s3StreamObjectCompactionMaxSizeBytes; - return this; - } - - public Config controllerRequestRetryMaxCount(int s3ControllerRequestRetryMaxCount) { - this.controllerRequestRetryMaxCount = s3ControllerRequestRetryMaxCount; - return this; - } - - public Config controllerRequestRetryBaseDelayMs(long s3ControllerRequestRetryBaseDelayMs) { - this.controllerRequestRetryBaseDelayMs = s3ControllerRequestRetryBaseDelayMs; - return this; - } - - public Config nodeEpoch(long brokerEpoch) { - this.nodeEpoch = brokerEpoch; - return this; - } - - public Config streamSetObjectCompactionInterval(int streamSetObjectCompactionInterval) { - this.streamSetObjectCompactionInterval = streamSetObjectCompactionInterval; - return this; - } - - public Config streamSetObjectCompactionCacheSize(long streamSetObjectCompactionCacheSize) { - this.streamSetObjectCompactionCacheSize = streamSetObjectCompactionCacheSize; - return this; - } - - public Config streamSetObjectCompactionUploadConcurrency(int streamSetObjectCompactionUploadConcurrency) { - this.streamSetObjectCompactionUploadConcurrency = streamSetObjectCompactionUploadConcurrency; - return this; - } - - public Config streamSetObjectCompactionStreamSplitSize(long streamSetObjectCompactionStreamSplitSize) { - this.streamSetObjectCompactionStreamSplitSize = streamSetObjectCompactionStreamSplitSize; - return this; - } - - public Config streamSetObjectCompactionForceSplitPeriod(int streamSetObjectCompactionForceSplitPeriod) { - this.streamSetObjectCompactionForceSplitPeriod = streamSetObjectCompactionForceSplitPeriod; - return this; - } - - public Config streamSetObjectCompactionMaxObjectNum(int streamSetObjectCompactionMaxObjectNum) { - this.streamSetObjectCompactionMaxObjectNum = streamSetObjectCompactionMaxObjectNum; - return this; - } - - public Config maxStreamNumPerStreamSetObject(int maxStreamNumPerStreamSetObject) { - this.maxStreamNumPerStreamSetObject = maxStreamNumPerStreamSetObject; - return this; - } - - public Config maxStreamObjectNumPerCommit(int maxStreamObjectNumPerCommit) { - this.maxStreamObjectNumPerCommit = maxStreamObjectNumPerCommit; - return this; - } - - public Config mockEnable(boolean s3MockEnable) { - this.mockEnable = s3MockEnable; - return this; - } - - public Config objectLogEnable(boolean s3ObjectLogEnable) { - this.objectLogEnable = s3ObjectLogEnable; - return this; - } - - public Config networkBaselineBandwidth(long networkBaselineBandwidth) { - this.networkBaselineBandwidth = networkBaselineBandwidth; - return this; - } - - public Config refillPeriodMs(int refillPeriodMs) { - this.refillPeriodMs = refillPeriodMs; - return this; - } - - public Config objectRetentionTimeInSecond(long seconds) { - objectRetentionTimeInSecond = seconds; - return this; - } - - public long objectRetentionTimeInSecond() { - return objectRetentionTimeInSecond; - } - - public Config failoverEnable(boolean failoverEnable) { - this.failoverEnable = failoverEnable; - return this; - } - - public boolean failoverEnable() { - return failoverEnable; - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/Constants.java b/s3stream/src/main/java/com/automq/stream/s3/Constants.java deleted file mode 100644 index d36122d26..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/Constants.java +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ -package com.automq.stream.s3; - -public class Constants { - public static final int CAPACITY_NOT_SET = -1; - public static final int NOOP_NODE_ID = -1; - public static final long NOOP_EPOCH = -1L; -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/DataBlockIndex.java b/s3stream/src/main/java/com/automq/stream/s3/DataBlockIndex.java deleted file mode 100644 index eea88fd23..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/DataBlockIndex.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import io.netty.buffer.ByteBuf; -import java.util.Objects; - -public final class DataBlockIndex { - - public static final int BLOCK_INDEX_SIZE = 8/* streamId */ + 8 /* startOffset */ + 4 /* endOffset delta */ - + 4 /* record count */ + 8 /* block position */ + 4 /* block size */; - private final long streamId; - private final long startOffset; - private final int endOffsetDelta; - private final int recordCount; - private final long startPosition; - private final int size; - - public DataBlockIndex(long streamId, long startOffset, int endOffsetDelta, int recordCount, long startPosition, - int size) { - this.streamId = streamId; - this.startOffset = startOffset; - this.endOffsetDelta = endOffsetDelta; - this.recordCount = recordCount; - this.startPosition = startPosition; - this.size = size; - } - - public long endOffset() { - return startOffset + endOffsetDelta; - } - - public long endPosition() { - return startPosition + size; - } - - public void encode(ByteBuf buf) { - buf.writeLong(streamId); - buf.writeLong(startOffset); - buf.writeInt(endOffsetDelta); - buf.writeInt(recordCount); - buf.writeLong(startPosition); - buf.writeInt(size); - } - - public long streamId() { - return streamId; - } - - public long startOffset() { - return startOffset; - } - - public int endOffsetDelta() { - return endOffsetDelta; - } - - public int recordCount() { - return recordCount; - } - - public long startPosition() { - return startPosition; - } - - public int size() { - return size; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj == null || obj.getClass() != this.getClass()) - return false; - var that = (DataBlockIndex) obj; - return this.streamId == that.streamId && - this.startOffset == that.startOffset && - this.endOffsetDelta == that.endOffsetDelta && - this.recordCount == that.recordCount && - this.startPosition == that.startPosition && - this.size == that.size; - } - - @Override - public int hashCode() { - return Objects.hash(streamId, startOffset, endOffsetDelta, recordCount, startPosition, size); - } - - @Override - public String toString() { - return "DataBlockIndex[" + - "streamId=" + streamId + ", " + - "startOffset=" + startOffset + ", " + - "endOffsetDelta=" + endOffsetDelta + ", " + - "recordCount=" + recordCount + ", " + - "startPosition=" + startPosition + ", " + - "size=" + size + ']'; - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/DeltaWALUploadTask.java b/s3stream/src/main/java/com/automq/stream/s3/DeltaWALUploadTask.java deleted file mode 100644 index ab2e54fc0..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/DeltaWALUploadTask.java +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.objects.CommitStreamSetObjectRequest; -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.objects.ObjectStreamRange; -import com.automq.stream.s3.objects.StreamObject; -import com.automq.stream.s3.operator.S3Operator; -import com.automq.stream.utils.AsyncRateLimiter; -import com.automq.stream.utils.FutureUtil; -import java.util.ArrayList; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static com.automq.stream.s3.metadata.ObjectUtils.NOOP_OBJECT_ID; - -public class DeltaWALUploadTask { - private static final Logger LOGGER = LoggerFactory.getLogger(DeltaWALUploadTask.class); - final boolean forceSplit; - private final Logger s3ObjectLogger; - private final Map> streamRecordsMap; - private final int objectBlockSize; - private final int objectPartSize; - private final int streamSplitSizeThreshold; - private final ObjectManager objectManager; - private final S3Operator s3Operator; - private final boolean s3ObjectLogEnable; - private final CompletableFuture prepareCf = new CompletableFuture<>(); - private final CompletableFuture uploadCf = new CompletableFuture<>(); - private final ExecutorService executor; - private final double rate; - private final AsyncRateLimiter limiter; - private long startTimestamp; - private volatile CommitStreamSetObjectRequest commitStreamSetObjectRequest; - - public DeltaWALUploadTask(Config config, Map> streamRecordsMap, - ObjectManager objectManager, S3Operator s3Operator, - ExecutorService executor, boolean forceSplit, double rate) { - this.s3ObjectLogger = S3ObjectLogger.logger(String.format("[DeltaWALUploadTask id=%d] ", config.nodeId())); - this.streamRecordsMap = streamRecordsMap; - this.objectBlockSize = config.objectBlockSize(); - this.objectPartSize = config.objectPartSize(); - this.streamSplitSizeThreshold = config.streamSplitSize(); - this.s3ObjectLogEnable = config.objectLogEnable(); - this.objectManager = objectManager; - this.s3Operator = s3Operator; - this.forceSplit = forceSplit; - this.executor = executor; - this.rate = rate; - this.limiter = new AsyncRateLimiter(rate); - } - - public static Builder builder() { - return new Builder(); - } - - public CompletableFuture prepare() { - startTimestamp = System.currentTimeMillis(); - if (forceSplit) { - prepareCf.complete(NOOP_OBJECT_ID); - } else { - objectManager - .prepareObject(1, TimeUnit.MINUTES.toMillis(60)) - .thenAcceptAsync(prepareCf::complete, executor) - .exceptionally(ex -> { - prepareCf.completeExceptionally(ex); - return null; - }); - } - return prepareCf; - } - - public CompletableFuture upload() { - prepareCf.thenAcceptAsync(objectId -> FutureUtil.exec(() -> upload0(objectId), uploadCf, LOGGER, "upload"), executor); - return uploadCf; - } - - private void upload0(long objectId) { - List streamIds = new ArrayList<>(streamRecordsMap.keySet()); - Collections.sort(streamIds); - CommitStreamSetObjectRequest request = new CommitStreamSetObjectRequest(); - - ObjectWriter streamSetObject; - if (forceSplit) { - // when only has one stream, we only need to write the stream data. - streamSetObject = ObjectWriter.noop(objectId); - } else { - streamSetObject = ObjectWriter.writer(objectId, s3Operator, objectBlockSize, objectPartSize); - } - - List> streamObjectCfList = new LinkedList<>(); - - List> streamSetWriteCfList = new LinkedList<>(); - for (Long streamId : streamIds) { - List streamRecords = streamRecordsMap.get(streamId); - int streamSize = streamRecords.stream().mapToInt(StreamRecordBatch::size).sum(); - if (forceSplit || streamSize >= streamSplitSizeThreshold) { - streamObjectCfList.add(writeStreamObject(streamRecords, streamSize).thenAccept(so -> { - synchronized (request) { - request.addStreamObject(so); - } - })); - } else { - streamSetWriteCfList.add(limiter.acquire(streamSize).thenAccept(nil -> streamSetObject.write(streamId, streamRecords))); - long startOffset = streamRecords.get(0).getBaseOffset(); - long endOffset = streamRecords.get(streamRecords.size() - 1).getLastOffset(); - request.addStreamRange(new ObjectStreamRange(streamId, -1L, startOffset, endOffset, streamSize)); - } - } - request.setObjectId(objectId); - request.setOrderId(objectId); - CompletableFuture streamSetObjectCf = CompletableFuture.allOf(streamSetWriteCfList.toArray(new CompletableFuture[0])) - .thenCompose(nil -> streamSetObject.close().thenAccept(nil2 -> request.setObjectSize(streamSetObject.size()))); - List> allCf = new LinkedList<>(streamObjectCfList); - allCf.add(streamSetObjectCf); - CompletableFuture.allOf(allCf.toArray(new CompletableFuture[0])).thenAccept(nil -> { - commitStreamSetObjectRequest = request; - uploadCf.complete(request); - }).exceptionally(ex -> { - uploadCf.completeExceptionally(ex); - return null; - }); - } - - public CompletableFuture commit() { - return uploadCf.thenCompose(request -> objectManager.commitStreamSetObject(request).thenAccept(resp -> { - LOGGER.info("Upload delta WAL {}, cost {}ms, rate limiter {}bytes/s", commitStreamSetObjectRequest, - System.currentTimeMillis() - startTimestamp, rate); - if (s3ObjectLogEnable) { - s3ObjectLogger.trace("{}", commitStreamSetObjectRequest); - } - }).whenComplete((nil, ex) -> limiter.close())); - } - - private CompletableFuture writeStreamObject(List streamRecords, int streamSize) { - CompletableFuture cf = objectManager.prepareObject(1, TimeUnit.MINUTES.toMillis(60)); - cf = cf.thenCompose(objectId -> limiter.acquire(streamSize).thenApply(nil -> objectId)); - return cf.thenComposeAsync(objectId -> { - ObjectWriter streamObjectWriter = ObjectWriter.writer(objectId, s3Operator, objectBlockSize, objectPartSize); - long streamId = streamRecords.get(0).getStreamId(); - streamObjectWriter.write(streamId, streamRecords); - long startOffset = streamRecords.get(0).getBaseOffset(); - long endOffset = streamRecords.get(streamRecords.size() - 1).getLastOffset(); - StreamObject streamObject = new StreamObject(); - streamObject.setObjectId(objectId); - streamObject.setStreamId(streamId); - streamObject.setStartOffset(startOffset); - streamObject.setEndOffset(endOffset); - return streamObjectWriter.close().thenApply(nil -> { - streamObject.setObjectSize(streamObjectWriter.size()); - return streamObject; - }); - }, executor); - } - - public static class Builder { - private Config config; - private Map> streamRecordsMap; - private ObjectManager objectManager; - private S3Operator s3Operator; - private ExecutorService executor; - private Boolean forceSplit; - private double rate = Long.MAX_VALUE; - - public Builder config(Config config) { - this.config = config; - return this; - } - - public Builder streamRecordsMap(Map> streamRecordsMap) { - this.streamRecordsMap = streamRecordsMap; - return this; - } - - public Builder objectManager(ObjectManager objectManager) { - this.objectManager = objectManager; - return this; - } - - public Builder s3Operator(S3Operator s3Operator) { - this.s3Operator = s3Operator; - return this; - } - - public Builder executor(ExecutorService executor) { - this.executor = executor; - return this; - } - - public Builder forceSplit(boolean forceSplit) { - this.forceSplit = forceSplit; - return this; - } - - public Builder rate(double rate) { - this.rate = rate; - return this; - } - - public DeltaWALUploadTask build() { - if (forceSplit == null) { - boolean forceSplit = streamRecordsMap.size() == 1; - if (!forceSplit) { - Optional hasStreamSetData = streamRecordsMap.values() - .stream() - .map(records -> records.stream().mapToLong(StreamRecordBatch::size).sum() >= config.streamSplitSize()) - .filter(split -> !split) - .findAny(); - if (hasStreamSetData.isEmpty()) { - forceSplit = true; - } - } - this.forceSplit = forceSplit; - } - return new DeltaWALUploadTask(config, streamRecordsMap, objectManager, s3Operator, executor, forceSplit, rate); - } - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/ObjectReader.java b/s3stream/src/main/java/com/automq/stream/s3/ObjectReader.java deleted file mode 100644 index d5441408a..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/ObjectReader.java +++ /dev/null @@ -1,437 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.network.ThrottleStrategy; -import com.automq.stream.s3.operator.S3Operator; -import com.automq.stream.utils.CloseableIterator; -import com.automq.stream.utils.biniarysearch.IndexBlockOrderedBytes; -import io.netty.buffer.ByteBuf; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.Objects; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.atomic.AtomicInteger; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static com.automq.stream.s3.ByteBufAlloc.READ_INDEX_BLOCK; -import static com.automq.stream.s3.ObjectWriter.Footer.FOOTER_SIZE; -import static com.automq.stream.s3.metadata.ObjectUtils.NOOP_OFFSET; - -public class ObjectReader implements AutoCloseable { - private static final Logger LOGGER = LoggerFactory.getLogger(ObjectReader.class); - private final S3ObjectMetadata metadata; - private final String objectKey; - private final S3Operator s3Operator; - private final CompletableFuture basicObjectInfoCf; - private final AtomicInteger refCount = new AtomicInteger(1); - - public ObjectReader(S3ObjectMetadata metadata, S3Operator s3Operator) { - this.metadata = metadata; - this.objectKey = metadata.key(); - this.s3Operator = s3Operator; - this.basicObjectInfoCf = new CompletableFuture<>(); - asyncGetBasicObjectInfo(); - } - - public String objectKey() { - return objectKey; - } - - public CompletableFuture basicObjectInfo() { - return basicObjectInfoCf; - } - - public CompletableFuture find(long streamId, long startOffset, long endOffset) { - return find(streamId, startOffset, endOffset, Integer.MAX_VALUE); - } - - public CompletableFuture find(long streamId, long startOffset, long endOffset, int maxBytes) { - return basicObjectInfoCf.thenApply(basicObjectInfo -> basicObjectInfo.indexBlock().find(streamId, startOffset, endOffset, maxBytes)); - } - - public CompletableFuture read(DataBlockIndex block) { - CompletableFuture rangeReadCf = s3Operator.rangeRead(objectKey, block.startPosition(), block.endPosition(), ThrottleStrategy.THROTTLE_1); - return rangeReadCf.thenApply(DataBlockGroup::new); - } - - void asyncGetBasicObjectInfo() { - int guessIndexBlockSize = 1024 + (int) (metadata.objectSize() / (1024 * 1024 /* 1MB */) * 36 /* index unit size*/); - asyncGetBasicObjectInfo0(Math.max(0, metadata.objectSize() - guessIndexBlockSize), true); - } - - private void asyncGetBasicObjectInfo0(long startPosition, boolean firstAttempt) { - CompletableFuture cf = s3Operator.rangeRead(objectKey, startPosition, metadata.objectSize()); - cf.thenAccept(buf -> { - try { - BasicObjectInfo basicObjectInfo = BasicObjectInfo.parse(buf, metadata); - basicObjectInfoCf.complete(basicObjectInfo); - } catch (IndexBlockParseException ex) { - asyncGetBasicObjectInfo0(ex.indexBlockPosition, false); - } - }).exceptionally(ex -> { - LOGGER.warn("s3 range read from {} [{}, {}) failed", objectKey, startPosition, metadata.objectSize(), ex); - // TODO: delay retry. - if (firstAttempt) { - asyncGetBasicObjectInfo0(startPosition, false); - } else { - basicObjectInfoCf.completeExceptionally(ex); - } - return null; - }); - } - - public ObjectReader retain() { - refCount.incrementAndGet(); - return this; - } - - public ObjectReader release() { - if (refCount.decrementAndGet() == 0) { - close0(); - } - return this; - } - - @Override - public void close() { - release(); - } - - public void close0() { - basicObjectInfoCf.thenAccept(BasicObjectInfo::close); - } - - /** - * - */ - public static final class BasicObjectInfo { - private final long dataBlockSize; - private final IndexBlock indexBlock; - - /** - * @param dataBlockSize The total size of the data blocks, which equals to index start position. - * @param indexBlock raw index data. - */ - public BasicObjectInfo(long dataBlockSize, IndexBlock indexBlock) { - this.dataBlockSize = dataBlockSize; - this.indexBlock = indexBlock; - } - - public static BasicObjectInfo parse(ByteBuf objectTailBuf, - S3ObjectMetadata s3ObjectMetadata) throws IndexBlockParseException { - objectTailBuf = objectTailBuf.slice(); - long indexBlockPosition = objectTailBuf.getLong(objectTailBuf.readableBytes() - FOOTER_SIZE); - int indexBlockSize = objectTailBuf.getInt(objectTailBuf.readableBytes() - 40); - if (indexBlockPosition + objectTailBuf.readableBytes() < s3ObjectMetadata.objectSize()) { - objectTailBuf.release(); - throw new IndexBlockParseException(indexBlockPosition); - } else { - int indexRelativePosition = objectTailBuf.readableBytes() - (int) (s3ObjectMetadata.objectSize() - indexBlockPosition); - - // trim the ByteBuf to avoid extra memory occupy. - ByteBuf indexBlockBuf = objectTailBuf.slice(objectTailBuf.readerIndex() + indexRelativePosition, indexBlockSize); - ByteBuf copy = ByteBufAlloc.byteBuffer(indexBlockBuf.readableBytes(), READ_INDEX_BLOCK); - indexBlockBuf.readBytes(copy, indexBlockBuf.readableBytes()); - objectTailBuf.release(); - indexBlockBuf = copy; - return new BasicObjectInfo(indexBlockPosition, new IndexBlock(s3ObjectMetadata, indexBlockBuf)); - } - } - - public int size() { - return indexBlock.size(); - } - - void close() { - indexBlock.close(); - } - - public long dataBlockSize() { - return dataBlockSize; - } - - public IndexBlock indexBlock() { - return indexBlock; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj == null || obj.getClass() != this.getClass()) - return false; - var that = (BasicObjectInfo) obj; - return this.dataBlockSize == that.dataBlockSize && - Objects.equals(this.indexBlock, that.indexBlock); - } - - @Override - public int hashCode() { - return Objects.hash(dataBlockSize, indexBlock); - } - - @Override - public String toString() { - return "BasicObjectInfo[" + - "dataBlockSize=" + dataBlockSize + ", " + - "indexBlock=" + indexBlock + ']'; - } - - } - - public static class IndexBlock { - public static final int INDEX_BLOCK_UNIT_SIZE = 8/* streamId */ + 8 /* startOffset */ + 4 /* endOffset delta */ - + 4 /* record count */ + 8 /* block position */ + 4 /* block size */; - private final S3ObjectMetadata s3ObjectMetadata; - private final ByteBuf buf; - private final int size; - private final int count; - - public IndexBlock(S3ObjectMetadata s3ObjectMetadata, ByteBuf buf) { - this.s3ObjectMetadata = s3ObjectMetadata; - this.buf = buf; - this.size = buf.readableBytes(); - this.count = buf.readableBytes() / INDEX_BLOCK_UNIT_SIZE; - } - - public Iterator iterator() { - AtomicInteger getIndex = new AtomicInteger(0); - return new Iterator<>() { - @Override - public boolean hasNext() { - return getIndex.get() < count; - } - - @Override - public DataBlockIndex next() { - return get(getIndex.getAndIncrement()); - } - }; - } - - public DataBlockIndex get(int index) { - if (index < 0 || index >= count) { - throw new IllegalArgumentException("index" + index + " is out of range [0, " + count + ")"); - } - int base = index * INDEX_BLOCK_UNIT_SIZE; - long streamId = buf.getLong(base); - long startOffset = buf.getLong(base + 8); - int endOffsetDelta = buf.getInt(base + 16); - int recordCount = buf.getInt(base + 20); - long blockPosition = buf.getLong(base + 24); - int blockSize = buf.getInt(base + 32); - return new DataBlockIndex(streamId, startOffset, endOffsetDelta, recordCount, blockPosition, blockSize); - } - - public FindIndexResult find(long streamId, long startOffset, long endOffset) { - return find(streamId, startOffset, endOffset, Integer.MAX_VALUE); - } - - public FindIndexResult find(long streamId, long startOffset, long endOffset, int maxBytes) { - long nextStartOffset = startOffset; - int nextMaxBytes = maxBytes; - boolean matched = false; - boolean isFulfilled = false; - List rst = new LinkedList<>(); - IndexBlockOrderedBytes indexBlockOrderedBytes = new IndexBlockOrderedBytes(this); - int startIndex = indexBlockOrderedBytes.search(new IndexBlockOrderedBytes.TargetStreamOffset(streamId, startOffset)); - if (startIndex == -1) { - // mismatched - return new FindIndexResult(false, nextStartOffset, nextMaxBytes, rst); - } - for (int i = startIndex; i < count(); i++) { - DataBlockIndex index = get(i); - if (index.streamId() == streamId) { - if (nextStartOffset < index.startOffset()) { - break; - } - if (index.endOffset() <= nextStartOffset) { - continue; - } - matched = nextStartOffset == index.startOffset(); - nextStartOffset = index.endOffset(); - rst.add(new StreamDataBlock(s3ObjectMetadata.objectId(), index)); - - // we consider first block as not matched because we do not know exactly how many bytes are within - // the range in first block, as a result we may read one more block than expected. - if (matched) { - int recordPayloadSize = index.size() - - index.recordCount() * StreamRecordBatchCodec.HEADER_SIZE // sum of encoded record header size - - ObjectWriter.DataBlock.BLOCK_HEADER_SIZE; // block header size - nextMaxBytes -= Math.min(nextMaxBytes, recordPayloadSize); - } - if ((endOffset != NOOP_OFFSET && nextStartOffset >= endOffset) || nextMaxBytes == 0) { - isFulfilled = true; - break; - } - } else if (matched) { - break; - } - } - return new FindIndexResult(isFulfilled, nextStartOffset, nextMaxBytes, rst); - } - - public int size() { - return size; - } - - public int count() { - return count; - } - - void close() { - buf.release(); - } - } - - public static final class FindIndexResult { - private final boolean isFulfilled; - private final long nextStartOffset; - private final int nextMaxBytes; - private final List streamDataBlocks; - - public FindIndexResult(boolean isFulfilled, long nextStartOffset, int nextMaxBytes, - List streamDataBlocks) { - this.isFulfilled = isFulfilled; - this.nextStartOffset = nextStartOffset; - this.nextMaxBytes = nextMaxBytes; - this.streamDataBlocks = streamDataBlocks; - } - - public boolean isFulfilled() { - return isFulfilled; - } - - public long nextStartOffset() { - return nextStartOffset; - } - - public int nextMaxBytes() { - return nextMaxBytes; - } - - public List streamDataBlocks() { - return streamDataBlocks; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj == null || obj.getClass() != this.getClass()) - return false; - var that = (FindIndexResult) obj; - return this.isFulfilled == that.isFulfilled && - this.nextStartOffset == that.nextStartOffset && - this.nextMaxBytes == that.nextMaxBytes && - Objects.equals(this.streamDataBlocks, that.streamDataBlocks); - } - - @Override - public int hashCode() { - return Objects.hash(isFulfilled, nextStartOffset, nextMaxBytes, streamDataBlocks); - } - - @Override - public String toString() { - return "FindIndexResult[" + - "isFulfilled=" + isFulfilled + ", " + - "nextStartOffset=" + nextStartOffset + ", " + - "nextMaxBytes=" + nextMaxBytes + ", " + - "streamDataBlocks=" + streamDataBlocks + ']'; - } - - } - - public static class IndexBlockParseException extends Exception { - long indexBlockPosition; - - public IndexBlockParseException(long indexBlockPosition) { - this.indexBlockPosition = indexBlockPosition; - } - - } - - public static class DataBlockGroup implements AutoCloseable { - private final ByteBuf buf; - private final int recordCount; - - public DataBlockGroup(ByteBuf buf) { - this.buf = buf.duplicate(); - this.recordCount = check(buf); - } - - private static int check(ByteBuf buf) { - buf = buf.duplicate(); - int recordCount = 0; - while (buf.readableBytes() > 0) { - byte magicCode = buf.readByte(); - if (magicCode != ObjectWriter.DATA_BLOCK_MAGIC) { - LOGGER.error("magic code mismatch, expected {}, actual {}", ObjectWriter.DATA_BLOCK_MAGIC, magicCode); - throw new RuntimeException("[FATAL] magic code mismatch, data is corrupted"); - } - buf.readByte(); // flag - recordCount += buf.readInt(); - int dataLength = buf.readInt(); - buf.skipBytes(dataLength); - } - return recordCount; - } - - public CloseableIterator iterator() { - ByteBuf buf = this.buf.duplicate(); - AtomicInteger currentBlockRecordCount = new AtomicInteger(0); - AtomicInteger remainingRecordCount = new AtomicInteger(recordCount); - return new CloseableIterator<>() { - @Override - public boolean hasNext() { - // in.available() is not reliable. ZstdInputStreamNoFinalizer might return 1 when there is no more data. - return remainingRecordCount.get() != 0; - } - - @Override - public StreamRecordBatch next() { - if (remainingRecordCount.decrementAndGet() < 0) { - throw new NoSuchElementException(); - } - if (currentBlockRecordCount.get() == 0) { - buf.skipBytes(1 /* magic */ + 1 /* flag */); - currentBlockRecordCount.set(buf.readInt()); - buf.skipBytes(4); - } - currentBlockRecordCount.decrementAndGet(); - return StreamRecordBatchCodec.duplicateDecode(buf); - } - - @Override - public void close() { - } - }; - } - - public int recordCount() { - return recordCount; - } - - @Override - public void close() { - buf.release(); - } - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/ObjectWriter.java b/s3stream/src/main/java/com/automq/stream/s3/ObjectWriter.java deleted file mode 100644 index 85863fb5e..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/ObjectWriter.java +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.s3.metadata.ObjectUtils; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.objects.ObjectStreamRange; -import com.automq.stream.s3.operator.S3Operator; -import com.automq.stream.s3.operator.Writer; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import java.util.ArrayList; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.CompletableFuture; - -import static com.automq.stream.s3.ByteBufAlloc.WRITE_DATA_BLOCK_HEADER; -import static com.automq.stream.s3.ByteBufAlloc.WRITE_FOOTER; -import static com.automq.stream.s3.ByteBufAlloc.WRITE_INDEX_BLOCK; - -/** - * Write stream records to a single object. - */ -public interface ObjectWriter { - - byte DATA_BLOCK_MAGIC = 0x5A; - // TODO: first n bit is the compressed flag - byte DATA_BLOCK_DEFAULT_FLAG = 0x02; - - static ObjectWriter writer(long objectId, S3Operator s3Operator, int blockSizeThreshold, int partSizeThreshold) { - return new DefaultObjectWriter(objectId, s3Operator, blockSizeThreshold, partSizeThreshold); - } - - static ObjectWriter noop(long objectId) { - return new NoopObjectWriter(objectId); - } - - void write(long streamId, List records); - - CompletableFuture close(); - - List getStreamRanges(); - - long objectId(); - - long size(); - - class DefaultObjectWriter implements ObjectWriter { - - private final int blockSizeThreshold; - private final int partSizeThreshold; - private final List waitingUploadBlocks; - private final List completedBlocks; - private final Writer writer; - private final long objectId; - private int waitingUploadBlocksSize; - private IndexBlock indexBlock; - private long size; - - /** - * Create a new object writer. - * - * @param objectId object id - * @param s3Operator S3 operator - * @param blockSizeThreshold the max size of a block - * @param partSizeThreshold the max size of a part. If it is smaller than {@link Writer#MIN_PART_SIZE}, it will be set to {@link Writer#MIN_PART_SIZE}. - */ - public DefaultObjectWriter(long objectId, S3Operator s3Operator, int blockSizeThreshold, - int partSizeThreshold) { - this.objectId = objectId; - String objectKey = ObjectUtils.genKey(0, objectId); - this.blockSizeThreshold = blockSizeThreshold; - this.partSizeThreshold = Math.max(Writer.MIN_PART_SIZE, partSizeThreshold); - waitingUploadBlocks = new LinkedList<>(); - completedBlocks = new LinkedList<>(); - writer = s3Operator.writer(objectKey); - } - - public void write(long streamId, List records) { - List> blocks = groupByBlock(records); - blocks.forEach(blockRecords -> { - DataBlock block = new DataBlock(streamId, blockRecords); - waitingUploadBlocks.add(block); - waitingUploadBlocksSize += block.size(); - }); - if (waitingUploadBlocksSize >= partSizeThreshold) { - tryUploadPart(); - } - } - - private List> groupByBlock(List records) { - List> blocks = new LinkedList<>(); - List blockRecords = new ArrayList<>(records.size()); - for (StreamRecordBatch record : records) { - size += record.size(); - blockRecords.add(record); - if (size >= blockSizeThreshold) { - blocks.add(blockRecords); - blockRecords = new ArrayList<>(records.size()); - size = 0; - } - } - if (!blockRecords.isEmpty()) { - blocks.add(blockRecords); - } - return blocks; - } - - private synchronized void tryUploadPart() { - for (; ; ) { - List uploadBlocks = new ArrayList<>(waitingUploadBlocks.size()); - boolean partFull = false; - int size = 0; - for (DataBlock block : waitingUploadBlocks) { - uploadBlocks.add(block); - size += block.size(); - if (size >= partSizeThreshold) { - partFull = true; - break; - } - } - if (partFull) { - CompositeByteBuf partBuf = ByteBufAlloc.compositeByteBuffer(); - for (DataBlock block : uploadBlocks) { - waitingUploadBlocksSize -= block.size(); - partBuf.addComponent(true, block.buffer()); - } - writer.write(partBuf); - completedBlocks.addAll(uploadBlocks); - waitingUploadBlocks.removeIf(uploadBlocks::contains); - } else { - break; - } - } - } - - public CompletableFuture close() { - CompositeByteBuf buf = ByteBufAlloc.compositeByteBuffer(); - for (DataBlock block : waitingUploadBlocks) { - buf.addComponent(true, block.buffer()); - completedBlocks.add(block); - } - waitingUploadBlocks.clear(); - indexBlock = new IndexBlock(); - buf.addComponent(true, indexBlock.buffer()); - Footer footer = new Footer(indexBlock.position(), indexBlock.size()); - buf.addComponent(true, footer.buffer()); - writer.write(buf.duplicate()); - size = indexBlock.position() + indexBlock.size() + footer.size(); - return writer.close(); - } - - public List getStreamRanges() { - List streamRanges = new LinkedList<>(); - ObjectStreamRange lastStreamRange = null; - for (DataBlock block : completedBlocks) { - ObjectStreamRange streamRange = block.getStreamRange(); - if (lastStreamRange == null || lastStreamRange.getStreamId() != streamRange.getStreamId()) { - if (lastStreamRange != null) { - streamRanges.add(lastStreamRange); - } - lastStreamRange = new ObjectStreamRange(); - lastStreamRange.setStreamId(streamRange.getStreamId()); - lastStreamRange.setEpoch(streamRange.getEpoch()); - lastStreamRange.setStartOffset(streamRange.getStartOffset()); - } - lastStreamRange.setEndOffset(streamRange.getEndOffset()); - } - if (lastStreamRange != null) { - streamRanges.add(lastStreamRange); - } - return streamRanges; - } - - public long objectId() { - return objectId; - } - - public long size() { - return size; - } - - class IndexBlock { - private final ByteBuf buf; - private final long position; - - public IndexBlock() { - long nextPosition = 0; - int indexBlockSize = DataBlockIndex.BLOCK_INDEX_SIZE * completedBlocks.size(); - buf = ByteBufAlloc.byteBuffer(indexBlockSize, WRITE_INDEX_BLOCK); - for (DataBlock block : completedBlocks) { - ObjectStreamRange streamRange = block.getStreamRange(); - new DataBlockIndex(streamRange.getStreamId(), streamRange.getStartOffset(), (int) (streamRange.getEndOffset() - streamRange.getStartOffset()), - block.recordCount(), nextPosition, block.size()).encode(buf); - nextPosition += block.size(); - } - position = nextPosition; - } - - public ByteBuf buffer() { - return buf.duplicate(); - } - - public long position() { - return position; - } - - public int size() { - return buf.readableBytes(); - } - } - } - - class DataBlock { - public static final int BLOCK_HEADER_SIZE = 1 /* magic */ + 1/* flag */ + 4 /* record count*/ + 4 /* data length */; - private final CompositeByteBuf encodedBuf; - private final ObjectStreamRange streamRange; - private final int recordCount; - private final int size; - - public DataBlock(long streamId, List records) { - this.recordCount = records.size(); - this.encodedBuf = ByteBufAlloc.compositeByteBuffer(); - ByteBuf header = ByteBufAlloc.byteBuffer(BLOCK_HEADER_SIZE, WRITE_DATA_BLOCK_HEADER); - header.writeByte(DATA_BLOCK_MAGIC); - header.writeByte(DATA_BLOCK_DEFAULT_FLAG); - header.writeInt(recordCount); - header.writeInt(0); // data length - encodedBuf.addComponent(true, header); - records.forEach(r -> encodedBuf.addComponent(true, r.encoded().retain())); - this.size = encodedBuf.readableBytes(); - encodedBuf.setInt(BLOCK_HEADER_SIZE - 4, size - BLOCK_HEADER_SIZE); - this.streamRange = new ObjectStreamRange(streamId, records.get(0).getEpoch(), records.get(0).getBaseOffset(), records.get(records.size() - 1).getLastOffset(), size); - } - - public int size() { - return size; - } - - public int recordCount() { - return recordCount; - } - - public ObjectStreamRange getStreamRange() { - return streamRange; - } - - public ByteBuf buffer() { - return encodedBuf.duplicate(); - } - } - - class Footer { - public static final int FOOTER_SIZE = 48; - private static final long MAGIC = 0x88e241b785f4cff7L; - private final ByteBuf buf; - - public Footer(long indexStartPosition, int indexBlockLength) { - buf = ByteBufAlloc.byteBuffer(FOOTER_SIZE, WRITE_FOOTER); - // start position of index block - buf.writeLong(indexStartPosition); - // size of index block - buf.writeInt(indexBlockLength); - // reserved for future - buf.writeZero(40 - 8 - 4); - buf.writeLong(MAGIC); - } - - public ByteBuf buffer() { - return buf.duplicate(); - } - - public int size() { - return FOOTER_SIZE; - } - - } - - class NoopObjectWriter implements ObjectWriter { - private final long objectId; - - public NoopObjectWriter(long objectId) { - this.objectId = objectId; - } - - @Override - public void write(long streamId, List records) { - } - - @Override - public CompletableFuture close() { - return CompletableFuture.completedFuture(null); - } - - @Override - public List getStreamRanges() { - return Collections.emptyList(); - } - - @Override - public long objectId() { - return objectId; - } - - @Override - public long size() { - return 0; - } - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/S3ObjectLogger.java b/s3stream/src/main/java/com/automq/stream/s3/S3ObjectLogger.java deleted file mode 100644 index d703511bb..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/S3ObjectLogger.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.utils.LogContext; -import org.slf4j.Logger; - -public class S3ObjectLogger { - - public static Logger logger(String prefix) { - return new LogContext(prefix).logger("s3.object.logger"); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/S3Storage.java b/s3stream/src/main/java/com/automq/stream/s3/S3Storage.java deleted file mode 100644 index 966623d4e..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/S3Storage.java +++ /dev/null @@ -1,945 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.api.exceptions.FastReadFailFastException; -import com.automq.stream.s3.cache.CacheAccessType; -import com.automq.stream.s3.cache.LogCache; -import com.automq.stream.s3.cache.ReadDataBlock; -import com.automq.stream.s3.cache.S3BlockCache; -import com.automq.stream.s3.context.AppendContext; -import com.automq.stream.s3.context.FetchContext; -import com.automq.stream.s3.metadata.StreamMetadata; -import com.automq.stream.s3.metrics.S3StreamMetricsManager; -import com.automq.stream.s3.metrics.TimerUtil; -import com.automq.stream.s3.metrics.stats.StorageOperationStats; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.operator.S3Operator; -import com.automq.stream.s3.streams.StreamManager; -import com.automq.stream.s3.trace.context.TraceContext; -import com.automq.stream.s3.wal.WriteAheadLog; -import com.automq.stream.utils.FutureTicker; -import com.automq.stream.utils.FutureUtil; -import com.automq.stream.utils.ThreadUtils; -import com.automq.stream.utils.Threads; -import io.netty.buffer.ByteBuf; -import io.netty.util.HashedWheelTimer; -import io.netty.util.Timeout; -import io.opentelemetry.instrumentation.annotations.SpanAttribute; -import io.opentelemetry.instrumentation.annotations.WithSpan; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.PriorityQueue; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static com.automq.stream.utils.FutureUtil.suppress; - -public class S3Storage implements Storage { - private static final Logger LOGGER = LoggerFactory.getLogger(S3Storage.class); - private static final FastReadFailFastException FAST_READ_FAIL_FAST_EXCEPTION = new FastReadFailFastException(); - private static final int NUM_STREAM_CALLBACK_LOCKS = 128; - private final long maxDeltaWALCacheSize; - private final Config config; - private final WriteAheadLog deltaWAL; - /** - * WAL log cache - */ - private final LogCache deltaWALCache; - /** - * WAL out of order callback sequencer. {@link #streamCallbackLocks} will ensure the memory safety. - */ - private final WALCallbackSequencer callbackSequencer = new WALCallbackSequencer(); - private final WALConfirmOffsetCalculator confirmOffsetCalculator = new WALConfirmOffsetCalculator(); - private final Queue walPrepareQueue = new LinkedList<>(); - private final Queue walCommitQueue = new LinkedList<>(); - private final List inflightWALUploadTasks = new CopyOnWriteArrayList<>(); - private final ScheduledExecutorService backgroundExecutor = Threads.newSingleThreadScheduledExecutor( - ThreadUtils.createThreadFactory("s3-storage-background", true), LOGGER); - private final ExecutorService uploadWALExecutor = Threads.newFixedThreadPoolWithMonitor( - 4, "s3-storage-upload-wal", true, LOGGER); - /** - * A ticker used for batching force upload WAL. - * - * @see #forceUpload - */ - private final FutureTicker forceUploadTicker = new FutureTicker(500, TimeUnit.MILLISECONDS, backgroundExecutor); - private final Queue backoffRecords = new LinkedBlockingQueue<>(); - private final ScheduledFuture drainBackoffTask; - private final StreamManager streamManager; - private final ObjectManager objectManager; - private final S3Operator s3Operator; - private final S3BlockCache blockCache; - /** - * Stream callback locks. Used to ensure the stream callbacks will not be called concurrently. - * - * @see #handleAppendCallback - */ - private final Lock[] streamCallbackLocks = IntStream.range(0, NUM_STREAM_CALLBACK_LOCKS).mapToObj(i -> new ReentrantLock()).toArray(Lock[]::new); - private final HashedWheelTimer timeoutDetect = new HashedWheelTimer( - ThreadUtils.createThreadFactory("storage-timeout-detect", true), 1, TimeUnit.SECONDS, 100); - private long lastLogTimestamp = 0L; - private volatile double maxDataWriteRate = 0.0; - - public S3Storage(Config config, WriteAheadLog deltaWAL, StreamManager streamManager, ObjectManager objectManager, - S3BlockCache blockCache, S3Operator s3Operator) { - this.config = config; - this.maxDeltaWALCacheSize = config.walCacheSize(); - this.deltaWAL = deltaWAL; - this.blockCache = blockCache; - this.deltaWALCache = new LogCache(config.walCacheSize(), config.walUploadThreshold(), config.maxStreamNumPerStreamSetObject()); - this.streamManager = streamManager; - this.objectManager = objectManager; - this.s3Operator = s3Operator; - this.drainBackoffTask = this.backgroundExecutor.scheduleWithFixedDelay(this::tryDrainBackoffRecords, 100, 100, TimeUnit.MILLISECONDS); - S3StreamMetricsManager.registerInflightWALUploadTasksCountSupplier(this.inflightWALUploadTasks::size); - } - - static LogCache.LogCacheBlock recoverContinuousRecords(Iterator it, - List openingStreams) { - return recoverContinuousRecords(it, openingStreams, LOGGER); - } - - /** - * Recover continuous records in each stream from the WAL, and put them into the returned {@link LogCache.LogCacheBlock}. - * It will filter out - *

    - *
  • the records that are not in the opening streams
  • - *
  • the records that have been committed
  • - *
  • the records that are not continuous, which means, all records after the first discontinuous record
  • - *
- * - * It throws {@link IllegalStateException} if the start offset of the first recovered record mismatches - * the end offset of any opening stream, which indicates data loss. - *

- * If there are out of order records (which should never happen or there is a BUG), it will try to re-order them. - *

- * For example, if we recover following records from the WAL in a stream: - *

    1, 2, 3, 5, 4, 6, 10, 11
- * and the {@link StreamMetadata#endOffset()} of this stream is 3. Then the returned {@link LogCache.LogCacheBlock} - * will contain records - *
    3, 4, 5, 6
- * Here, - *
    - *
  • The record 1 and 2 are discarded because they have been committed (less than 3, the end offset of the stream)
  • - *
  • The record 10 and 11 are discarded because they are not continuous (10 is not 7, the next offset of 6)
  • - *
  • The record 5 and 4 are reordered because they are out of order, and we handle this bug here
  • - *
- */ - static LogCache.LogCacheBlock recoverContinuousRecords(Iterator it, - List openingStreams, Logger logger) { - Map openingStreamEndOffsets = openingStreams.stream().collect(Collectors.toMap(StreamMetadata::streamId, StreamMetadata::endOffset)); - LogCache.LogCacheBlock cacheBlock = new LogCache.LogCacheBlock(1024L * 1024 * 1024); - long logEndOffset = -1L; - Map streamNextOffsets = new HashMap<>(); - Map> streamDiscontinuousRecords = new HashMap<>(); - while (it.hasNext()) { - WriteAheadLog.RecoverResult recoverResult = it.next(); - logEndOffset = recoverResult.recordOffset(); - ByteBuf recordBuf = recoverResult.record().duplicate(); - StreamRecordBatch streamRecordBatch = StreamRecordBatchCodec.decode(recordBuf); - long streamId = streamRecordBatch.getStreamId(); - Long openingStreamEndOffset = openingStreamEndOffsets.get(streamId); - if (openingStreamEndOffset == null) { - // stream is already safe closed. so skip the stream records. - recordBuf.release(); - continue; - } - if (streamRecordBatch.getBaseOffset() < openingStreamEndOffset) { - // filter committed records. - recordBuf.release(); - continue; - } - - Long expectNextOffset = streamNextOffsets.get(streamId); - Queue discontinuousRecords = streamDiscontinuousRecords.get(streamId); - if (expectNextOffset == null || expectNextOffset == streamRecordBatch.getBaseOffset()) { - // continuous record, put it into cache. - cacheBlock.put(streamRecordBatch); - expectNextOffset = streamRecordBatch.getLastOffset(); - // check if there are some out of order records in the queue. - if (discontinuousRecords != null) { - while (!discontinuousRecords.isEmpty()) { - StreamRecordBatch peek = discontinuousRecords.peek(); - if (peek.getBaseOffset() == expectNextOffset) { - // should never happen, log it. - logger.error("[BUG] recover an out of order record, streamId={}, expectNextOffset={}, record={}", streamId, expectNextOffset, peek); - cacheBlock.put(peek); - discontinuousRecords.poll(); - expectNextOffset = peek.getLastOffset(); - } else { - break; - } - } - } - // update next offset. - streamNextOffsets.put(streamRecordBatch.getStreamId(), expectNextOffset); - } else { - // unexpected record, put it into discontinuous records queue. - if (discontinuousRecords == null) { - discontinuousRecords = new PriorityQueue<>(Comparator.comparingLong(StreamRecordBatch::getBaseOffset)); - streamDiscontinuousRecords.put(streamId, discontinuousRecords); - } - discontinuousRecords.add(streamRecordBatch); - } - } - // release all discontinuous records. - streamDiscontinuousRecords.values().forEach(queue -> { - if (queue.isEmpty()) { - return; - } - logger.info("drop discontinuous records, records={}", queue); - queue.forEach(StreamRecordBatch::release); - }); - - if (logEndOffset >= 0L) { - cacheBlock.confirmOffset(logEndOffset); - } - cacheBlock.records().forEach((streamId, records) -> { - if (!records.isEmpty()) { - long startOffset = records.get(0).getBaseOffset(); - long expectedStartOffset = openingStreamEndOffsets.getOrDefault(streamId, startOffset); - if (startOffset != expectedStartOffset) { - throw new IllegalStateException(String.format("[BUG] WAL data may lost, streamId %d endOffset=%d from controller, " + - "but WAL recovered records startOffset=%s", streamId, expectedStartOffset, startOffset)); - } - } - - }); - - return cacheBlock; - } - - @Override - public void startup() { - try { - LOGGER.info("S3Storage starting"); - recover(); - LOGGER.info("S3Storage start completed"); - } catch (Throwable e) { - LOGGER.error("S3Storage start fail", e); - throw new RuntimeException(e); - } - } - - /** - * Upload WAL to S3 and close opening streams. - */ - public void recover() throws Throwable { - recover0(this.deltaWAL, this.streamManager, this.objectManager, LOGGER); - } - - public void recover(WriteAheadLog deltaWAL, StreamManager streamManager, ObjectManager objectManager, - Logger logger) throws Throwable { - recover0(deltaWAL, streamManager, objectManager, logger); - } - - void recover0(WriteAheadLog deltaWAL, StreamManager streamManager, ObjectManager objectManager, - Logger logger) throws Throwable { - deltaWAL.start(); - List streams = streamManager.getOpeningStreams().get(); - - LogCache.LogCacheBlock cacheBlock = recoverContinuousRecords(deltaWAL.recover(), streams, logger); - Map streamEndOffsets = new HashMap<>(); - cacheBlock.records().forEach((streamId, records) -> { - if (!records.isEmpty()) { - streamEndOffsets.put(streamId, records.get(records.size() - 1).getLastOffset()); - } - }); - - if (cacheBlock.size() != 0) { - logger.info("try recover from crash, recover records bytes size {}", cacheBlock.size()); - DeltaWALUploadTask task = DeltaWALUploadTask.builder().config(config).streamRecordsMap(cacheBlock.records()) - .objectManager(objectManager).s3Operator(s3Operator).executor(uploadWALExecutor).build(); - task.prepare().thenCompose(nil -> task.upload()).thenCompose(nil -> task.commit()).get(); - cacheBlock.records().forEach((streamId, records) -> records.forEach(StreamRecordBatch::release)); - } - deltaWAL.reset().get(); - for (StreamMetadata stream : streams) { - long newEndOffset = streamEndOffsets.getOrDefault(stream.streamId(), stream.endOffset()); - logger.info("recover try close stream {} with new end offset {}", stream, newEndOffset); - } - CompletableFuture.allOf( - streams - .stream() - .map(s -> streamManager.closeStream(s.streamId(), s.epoch())) - .toArray(CompletableFuture[]::new) - ).get(); - } - - @Override - public void shutdown() { - drainBackoffTask.cancel(false); - for (WalWriteRequest request : backoffRecords) { - request.cf.completeExceptionally(new IOException("S3Storage is shutdown")); - } - deltaWAL.shutdownGracefully(); - backgroundExecutor.shutdown(); - try { - if (backgroundExecutor.awaitTermination(10, TimeUnit.SECONDS)) { - LOGGER.warn("await backgroundExecutor timeout 10s"); - } - } catch (InterruptedException e) { - backgroundExecutor.shutdownNow(); - LOGGER.warn("await backgroundExecutor close fail", e); - } - } - - @Override - @WithSpan - public CompletableFuture append(AppendContext context, StreamRecordBatch streamRecord) { - TimerUtil timerUtil = new TimerUtil(); - CompletableFuture cf = new CompletableFuture<>(); - // encoded before append to free heap ByteBuf. - streamRecord.encoded(); - WalWriteRequest writeRequest = new WalWriteRequest(streamRecord, -1L, cf, context); - handleAppendRequest(writeRequest); - append0(context, writeRequest, false); - cf.whenComplete((nil, ex) -> { - streamRecord.release(); - StorageOperationStats.getInstance().appendStats.record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - }); - return cf; - } - - /** - * Append record to WAL. - * - * @return backoff status. - */ - public boolean append0(AppendContext context, WalWriteRequest request, boolean fromBackoff) { - // TODO: storage status check, fast fail the request when storage closed. - if (!fromBackoff && !backoffRecords.isEmpty()) { - backoffRecords.offer(request); - return true; - } - if (!tryAcquirePermit()) { - if (!fromBackoff) { - backoffRecords.offer(request); - } - StorageOperationStats.getInstance().appendLogCacheFullStats.record(0L); - if (System.currentTimeMillis() - lastLogTimestamp > 1000L) { - LOGGER.warn("[BACKOFF] log cache size {} is larger than {}", deltaWALCache.size(), maxDeltaWALCacheSize); - lastLogTimestamp = System.currentTimeMillis(); - } - return true; - } - WriteAheadLog.AppendResult appendResult; - try { - try { - StreamRecordBatch streamRecord = request.record; - streamRecord.retain(); - Lock lock = confirmOffsetCalculator.addLock(); - lock.lock(); - try { - appendResult = deltaWAL.append(new TraceContext(context), streamRecord.encoded()); - } finally { - lock.unlock(); - } - } catch (WriteAheadLog.OverCapacityException e) { - // the WAL write data align with block, 'WAL is full but LogCacheBlock is not full' may happen. - confirmOffsetCalculator.update(); - forceUpload(LogCache.MATCH_ALL_STREAMS); - if (!fromBackoff) { - backoffRecords.offer(request); - } - if (System.currentTimeMillis() - lastLogTimestamp > 1000L) { - LOGGER.warn("[BACKOFF] log over capacity", e); - lastLogTimestamp = System.currentTimeMillis(); - } - return true; - } - request.offset = appendResult.recordOffset(); - confirmOffsetCalculator.add(request); - } catch (Throwable e) { - LOGGER.error("[UNEXPECTED] append WAL fail", e); - request.cf.completeExceptionally(e); - return false; - } - appendResult.future().whenComplete((nil, ex) -> { - if (ex != null) { - // no exception should be thrown from the WAL - LOGGER.error("[UNEXPECTED] append WAL fail, request {}", request, ex); - return; - } - handleAppendCallback(request); - }); - return false; - } - - @SuppressWarnings("BooleanMethodIsAlwaysInverted") - private boolean tryAcquirePermit() { - return deltaWALCache.size() < maxDeltaWALCacheSize; - } - - private void tryDrainBackoffRecords() { - try { - for (; ; ) { - WalWriteRequest request = backoffRecords.peek(); - if (request == null) { - break; - } - if (append0(request.context, request, true)) { - LOGGER.warn("try drain backoff record fail, still backoff"); - break; - } - backoffRecords.poll(); - } - } catch (Throwable e) { - LOGGER.error("[UNEXPECTED] tryDrainBackoffRecords fail", e); - } - } - - @Override - @WithSpan - public CompletableFuture read(FetchContext context, - @SpanAttribute long streamId, - @SpanAttribute long startOffset, - @SpanAttribute long endOffset, - @SpanAttribute int maxBytes) { - TimerUtil timerUtil = new TimerUtil(); - CompletableFuture cf = new CompletableFuture<>(); - FutureUtil.propagate(read0(context, streamId, startOffset, endOffset, maxBytes), cf); - cf.whenComplete((nil, ex) -> StorageOperationStats.getInstance().readStats.record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS))); - return cf; - } - - @WithSpan - private CompletableFuture read0(FetchContext context, - @SpanAttribute long streamId, - @SpanAttribute long startOffset, - @SpanAttribute long endOffset, - @SpanAttribute int maxBytes) { - List logCacheRecords = deltaWALCache.get(context, streamId, startOffset, endOffset, maxBytes); - if (!logCacheRecords.isEmpty() && logCacheRecords.get(0).getBaseOffset() <= startOffset) { - return CompletableFuture.completedFuture(new ReadDataBlock(logCacheRecords, CacheAccessType.DELTA_WAL_CACHE_HIT)); - } - if (context.readOptions().fastRead()) { - // fast read fail fast when need read from block cache. - logCacheRecords.forEach(StreamRecordBatch::release); - logCacheRecords.clear(); - return CompletableFuture.failedFuture(FAST_READ_FAIL_FAST_EXCEPTION); - } - if (!logCacheRecords.isEmpty()) { - endOffset = logCacheRecords.get(0).getBaseOffset(); - } - Timeout timeout = timeoutDetect.newTimeout(t -> LOGGER.warn("read from block cache timeout, stream={}, {}, maxBytes: {}", streamId, startOffset, maxBytes), 1, TimeUnit.MINUTES); - long finalEndOffset = endOffset; - return blockCache.read(context, streamId, startOffset, endOffset, maxBytes).thenApply(blockCacheRst -> { - List rst = new ArrayList<>(blockCacheRst.getRecords()); - int remainingBytesSize = maxBytes - rst.stream().mapToInt(StreamRecordBatch::size).sum(); - int readIndex = -1; - for (int i = 0; i < logCacheRecords.size() && remainingBytesSize > 0; i++) { - readIndex = i; - StreamRecordBatch record = logCacheRecords.get(i); - rst.add(record); - remainingBytesSize -= record.size(); - } - try { - continuousCheck(rst); - } catch (IllegalArgumentException e) { - blockCacheRst.getRecords().forEach(StreamRecordBatch::release); - throw e; - } - if (readIndex < logCacheRecords.size()) { - // release unnecessary record - logCacheRecords.subList(readIndex + 1, logCacheRecords.size()).forEach(StreamRecordBatch::release); - } - return new ReadDataBlock(rst, blockCacheRst.getCacheAccessType()); - }).whenComplete((rst, ex) -> { - timeout.cancel(); - if (ex != null) { - LOGGER.error("read from block cache failed, stream={}, {}-{}, maxBytes: {}", - streamId, startOffset, finalEndOffset, maxBytes, ex); - logCacheRecords.forEach(StreamRecordBatch::release); - } - }); - } - - private void continuousCheck(List records) { - long expectStartOffset = -1L; - for (StreamRecordBatch record : records) { - if (expectStartOffset == -1L || record.getBaseOffset() == expectStartOffset) { - expectStartOffset = record.getLastOffset(); - } else { - throw new IllegalArgumentException(String.format("Continuous check failed, expect offset: %d," + - " actual: %d, records: %s", expectStartOffset, record.getBaseOffset(), records)); - } - } - } - - /** - * Force upload stream WAL cache to S3. Use group upload to avoid generate too many S3 objects when broker shutdown. - * {@code streamId} can be {@link LogCache#MATCH_ALL_STREAMS} to force upload all streams. - */ - @Override - public CompletableFuture forceUpload(long streamId) { - TimerUtil timer = new TimerUtil(); - CompletableFuture cf = new CompletableFuture<>(); - // Wait for a while to group force upload tasks. - forceUploadTicker.tick().whenComplete((nil, ex) -> { - StorageOperationStats.getInstance().forceUploadWALAwaitStats.record(timer.elapsedAs(TimeUnit.NANOSECONDS)); - uploadDeltaWAL(streamId, true); - // Wait for all tasks contains streamId complete. - FutureUtil.propagate(CompletableFuture.allOf(this.inflightWALUploadTasks.stream() - .filter(it -> it.cache.containsStream(streamId)) - .map(it -> it.cf).toArray(CompletableFuture[]::new)), cf); - if (LogCache.MATCH_ALL_STREAMS != streamId) { - callbackSequencer.tryFree(streamId); - } - }); - cf.whenComplete((nil, ex) -> StorageOperationStats.getInstance().forceUploadWALCompleteStats.record(timer.elapsedAs(TimeUnit.NANOSECONDS))); - return cf; - } - - private void handleAppendRequest(WalWriteRequest request) { - callbackSequencer.before(request); - } - - private void handleAppendCallback(WalWriteRequest request) { - suppress(() -> handleAppendCallback0(request), LOGGER); - } - - private void handleAppendCallback0(WalWriteRequest request) { - TimerUtil timer = new TimerUtil(); - List waitingAckRequests; - Lock lock = getStreamCallbackLock(request.record.getStreamId()); - lock.lock(); - try { - waitingAckRequests = callbackSequencer.after(request); - waitingAckRequests.forEach(r -> r.record.retain()); - for (WalWriteRequest waitingAckRequest : waitingAckRequests) { - boolean full = deltaWALCache.put(waitingAckRequest.record); - waitingAckRequest.confirmed = true; - if (full) { - // cache block is full, trigger WAL upload. - uploadDeltaWAL(); - } - } - } finally { - lock.unlock(); - } - for (WalWriteRequest waitingAckRequest : waitingAckRequests) { - waitingAckRequest.cf.complete(null); - } - StorageOperationStats.getInstance().appendCallbackStats.record(timer.elapsedAs(TimeUnit.NANOSECONDS)); - } - - private Lock getStreamCallbackLock(long streamId) { - return streamCallbackLocks[(int) ((streamId & Long.MAX_VALUE) % NUM_STREAM_CALLBACK_LOCKS)]; - } - - @SuppressWarnings("UnusedReturnValue") - CompletableFuture uploadDeltaWAL() { - return uploadDeltaWAL(LogCache.MATCH_ALL_STREAMS, false); - } - - CompletableFuture uploadDeltaWAL(long streamId, boolean force) { - synchronized (deltaWALCache) { - deltaWALCache.setConfirmOffset(confirmOffsetCalculator.get()); - Optional blockOpt = deltaWALCache.archiveCurrentBlockIfContains(streamId); - if (blockOpt.isPresent()) { - LogCache.LogCacheBlock logCacheBlock = blockOpt.get(); - DeltaWALUploadTaskContext context = new DeltaWALUploadTaskContext(logCacheBlock); - context.objectManager = this.objectManager; - context.force = force; - return uploadDeltaWAL(context); - } else { - return CompletableFuture.completedFuture(null); - } - } - } - - // only for test - CompletableFuture uploadDeltaWAL(LogCache.LogCacheBlock logCacheBlock) { - DeltaWALUploadTaskContext context = new DeltaWALUploadTaskContext(logCacheBlock); - context.objectManager = this.objectManager; - return uploadDeltaWAL(context); - } - - /** - * Upload cache block to S3. The earlier cache block will have smaller objectId and commit first. - */ - CompletableFuture uploadDeltaWAL(DeltaWALUploadTaskContext context) { - context.timer = new TimerUtil(); - CompletableFuture cf = new CompletableFuture<>(); - context.cf = cf; - inflightWALUploadTasks.add(context); - backgroundExecutor.execute(() -> FutureUtil.exec(() -> uploadDeltaWAL0(context), cf, LOGGER, "uploadDeltaWAL")); - cf.whenComplete((nil, ex) -> { - StorageOperationStats.getInstance().uploadWALCompleteStats.record(context.timer.elapsedAs(TimeUnit.NANOSECONDS)); - inflightWALUploadTasks.remove(context); - if (ex != null) { - LOGGER.error("upload delta WAL fail", ex); - } - }); - return cf; - } - - private void uploadDeltaWAL0(DeltaWALUploadTaskContext context) { - // calculate upload rate - long elapsed = System.currentTimeMillis() - context.cache.createdTimestamp(); - double rate; - if (context.force || elapsed <= 100L) { - rate = Long.MAX_VALUE; - } else { - rate = context.cache.size() * 1000.0 / Math.min(5000L, elapsed); - if (rate > maxDataWriteRate) { - maxDataWriteRate = rate; - } - rate = maxDataWriteRate; - } - context.task = DeltaWALUploadTask.builder() - .config(config) - .streamRecordsMap(context.cache.records()) - .objectManager(objectManager) - .s3Operator(s3Operator) - .executor(uploadWALExecutor) - .rate(rate) - .build(); - boolean walObjectPrepareQueueEmpty = walPrepareQueue.isEmpty(); - walPrepareQueue.add(context); - if (!walObjectPrepareQueueEmpty) { - // there is another WAL upload task is preparing, just return. - return; - } - prepareDeltaWALUpload(context); - } - - private void prepareDeltaWALUpload(DeltaWALUploadTaskContext context) { - context.task.prepare().thenAcceptAsync(nil -> { - StorageOperationStats.getInstance().uploadWALPrepareStats.record(context.timer.elapsedAs(TimeUnit.NANOSECONDS)); - // 1. poll out current task and trigger upload. - DeltaWALUploadTaskContext peek = walPrepareQueue.poll(); - Objects.requireNonNull(peek).task.upload().thenAccept(nil2 -> StorageOperationStats.getInstance() - .uploadWALUploadStats.record(context.timer.elapsedAs(TimeUnit.NANOSECONDS))); - // 2. add task to commit queue. - boolean walObjectCommitQueueEmpty = walCommitQueue.isEmpty(); - walCommitQueue.add(peek); - if (walObjectCommitQueueEmpty) { - commitDeltaWALUpload(peek); - } - // 3. trigger next task to prepare. - DeltaWALUploadTaskContext next = walPrepareQueue.peek(); - if (next != null) { - prepareDeltaWALUpload(next); - } - }, backgroundExecutor); - } - - private void commitDeltaWALUpload(DeltaWALUploadTaskContext context) { - context.task.commit().thenAcceptAsync(nil -> { - StorageOperationStats.getInstance().uploadWALCommitStats.record(context.timer.elapsedAs(TimeUnit.NANOSECONDS)); - // 1. poll out current task - walCommitQueue.poll(); - if (context.cache.confirmOffset() != 0) { - LOGGER.info("try trim WAL to {}", context.cache.confirmOffset()); - deltaWAL.trim(context.cache.confirmOffset()); - } - // transfer records ownership to block cache. - freeCache(context.cache); - context.cf.complete(null); - - // 2. trigger next task to commit. - DeltaWALUploadTaskContext next = walCommitQueue.peek(); - if (next != null) { - commitDeltaWALUpload(next); - } - }, backgroundExecutor).exceptionally(ex -> { - LOGGER.error("Unexpected exception when commit stream set object", ex); - context.cf.completeExceptionally(ex); - System.err.println("Unexpected exception when commit stream set object"); - //noinspection CallToPrintStackTrace - ex.printStackTrace(); - Runtime.getRuntime().halt(1); - return null; - }); - } - - private void freeCache(LogCache.LogCacheBlock cacheBlock) { - deltaWALCache.markFree(cacheBlock); - } - - /** - * WALConfirmOffsetCalculator is used to calculate the confirmed offset of WAL. - */ - static class WALConfirmOffsetCalculator { - public static final long NOOP_OFFSET = -1L; - private final ReadWriteLock rwLock = new ReentrantReadWriteLock(); - private final Queue queue = new ConcurrentLinkedQueue<>(); - private final AtomicLong confirmOffset = new AtomicLong(NOOP_OFFSET); - - public WALConfirmOffsetCalculator() { - // Update the confirmed offset periodically. - Threads.newSingleThreadScheduledExecutor(ThreadUtils.createThreadFactory("wal-calculator-update-confirm-offset", true), LOGGER) - .scheduleAtFixedRate(this::update, 100, 100, TimeUnit.MILLISECONDS); - } - - /** - * Lock of {@link #add}. - * Operations of assigning offsets, for example {@link WriteAheadLog#append}, need to be performed while holding the lock. - */ - public Lock addLock() { - return rwLock.readLock(); - } - - public void add(WalWriteRequest request) { - assert null != request; - queue.add(new WalWriteRequestWrapper(request)); - } - - /** - * Return the offset before and including which all records have been persisted. - * Note: It is updated by {@link #update} periodically, and is not real-time. - */ - public Long get() { - return confirmOffset.get(); - } - - /** - * Calculate and update the confirmed offset. - */ - public void update() { - long offset = calculate(); - if (offset != NOOP_OFFSET) { - confirmOffset.set(offset); - } - } - - /** - * Calculate the offset before and including which all records have been persisted. - * All records whose offset is not larger than the returned offset will be removed from the queue. - * It returns {@link #NOOP_OFFSET} if the first record is not persisted yet. - */ - synchronized private long calculate() { - Lock lock = rwLock.writeLock(); - lock.lock(); - try { - // Insert a flag. - queue.add(WalWriteRequestWrapper.flag()); - } finally { - lock.unlock(); - } - - long minUnconfirmedOffset = Long.MAX_VALUE; - boolean reachFlag = false; - for (WalWriteRequestWrapper wrapper : queue) { - // Iterate the queue to find the min unconfirmed offset. - if (wrapper.isFlag()) { - // Reach the flag. - reachFlag = true; - break; - } - WalWriteRequest request = wrapper.request; - assert request.offset != NOOP_OFFSET; - if (!request.confirmed) { - minUnconfirmedOffset = Math.min(minUnconfirmedOffset, request.offset); - } - } - assert reachFlag; - - long confirmedOffset = NOOP_OFFSET; - // Iterate the queue to find the max offset less than minUnconfirmedOffset. - // Remove all records whose offset is less than minUnconfirmedOffset. - for (Iterator iterator = queue.iterator(); iterator.hasNext(); ) { - WalWriteRequestWrapper wrapper = iterator.next(); - if (wrapper.isFlag()) { - /// Reach and remove the flag. - iterator.remove(); - break; - } - WalWriteRequest request = wrapper.request; - if (request.confirmed && request.offset < minUnconfirmedOffset) { - confirmedOffset = Math.max(confirmedOffset, request.offset); - iterator.remove(); - } - } - return confirmedOffset; - } - - /** - * Wrapper of {@link WalWriteRequest}. - * When the {@code request} is null, it is used as a flag. - */ - static final class WalWriteRequestWrapper { - private final WalWriteRequest request; - - /** - * - */ - WalWriteRequestWrapper(WalWriteRequest request) { - this.request = request; - } - - static WalWriteRequestWrapper flag() { - return new WalWriteRequestWrapper(null); - } - - public boolean isFlag() { - return request == null; - } - - public WalWriteRequest request() { - return request; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj == null || obj.getClass() != this.getClass()) - return false; - var that = (WalWriteRequestWrapper) obj; - return Objects.equals(this.request, that.request); - } - - @Override - public int hashCode() { - return Objects.hash(request); - } - - @Override - public String toString() { - return "WalWriteRequestWrapper[" + - "request=" + request + ']'; - } - - } - } - - /** - * WALCallbackSequencer is used to sequence the unordered returned persistent data. - */ - static class WALCallbackSequencer { - private final Map> stream2requests = new ConcurrentHashMap<>(); - - /** - * Add request to stream sequence queue. - * When the {@code request.record.getStreamId()} is different, concurrent calls are allowed. - * When the {@code request.record.getStreamId()} is the same, concurrent calls are not allowed. And it is - * necessary to ensure that calls are made in the order of increasing offsets. - */ - public void before(WalWriteRequest request) { - try { - Queue streamRequests = stream2requests.computeIfAbsent(request.record.getStreamId(), - s -> new ConcurrentLinkedQueue<>()); - streamRequests.add(request); - } catch (Throwable ex) { - request.cf.completeExceptionally(ex); - } - } - - /** - * Try pop sequence persisted request from stream queue and move forward wal inclusive confirm offset. - * When the {@code request.record.getStreamId()} is different, concurrent calls are allowed. - * When the {@code request.record.getStreamId()} is the same, concurrent calls are not allowed. - * - * @return popped sequence persisted request. - */ - public List after(WalWriteRequest request) { - request.persisted = true; - - // Try to pop sequential persisted requests from the queue. - long streamId = request.record.getStreamId(); - Queue streamRequests = stream2requests.get(streamId); - WalWriteRequest peek = streamRequests.peek(); - if (peek == null || peek.offset != request.offset) { - return Collections.emptyList(); - } - - LinkedList rst = new LinkedList<>(); - WalWriteRequest poll = streamRequests.poll(); - assert poll == peek; - rst.add(poll); - - for (; ; ) { - peek = streamRequests.peek(); - if (peek == null || !peek.persisted) { - break; - } - poll = streamRequests.poll(); - assert poll == peek; - assert poll.record.getBaseOffset() == rst.getLast().record.getLastOffset(); - rst.add(poll); - } - - return rst; - } - - /** - * Try free stream related resources. - */ - public void tryFree(long streamId) { - Queue queue = stream2requests.get(streamId); - if (queue != null && queue.isEmpty()) { - stream2requests.remove(streamId, queue); - } - } - } - - public static class DeltaWALUploadTaskContext { - TimerUtil timer; - LogCache.LogCacheBlock cache; - DeltaWALUploadTask task; - CompletableFuture cf; - ObjectManager objectManager; - /** - * Indicate whether to force upload the delta wal. - * If true, the delta wal will be uploaded without rate limit. - */ - boolean force; - - public DeltaWALUploadTaskContext(LogCache.LogCacheBlock cache) { - this.cache = cache; - } - } - - class LogCacheEvictOOMHandler implements ByteBufAlloc.OOMHandler { - @Override - public int handle(int memoryRequired) { - try { - CompletableFuture cf = new CompletableFuture<>(); - FutureUtil.exec(() -> cf.complete(deltaWALCache.forceFree(memoryRequired)), cf, LOGGER, "handleOOM"); - return cf.get(); - } catch (Throwable e) { - return 0; - } - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/S3Stream.java b/s3stream/src/main/java/com/automq/stream/s3/S3Stream.java deleted file mode 100644 index a2890f60a..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/S3Stream.java +++ /dev/null @@ -1,471 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.DefaultAppendResult; -import com.automq.stream.RecordBatchWithContextWrapper; -import com.automq.stream.api.AppendResult; -import com.automq.stream.api.FetchResult; -import com.automq.stream.api.RecordBatch; -import com.automq.stream.api.RecordBatchWithContext; -import com.automq.stream.api.Stream; -import com.automq.stream.api.exceptions.ErrorCode; -import com.automq.stream.api.exceptions.FastReadFailFastException; -import com.automq.stream.api.exceptions.StreamClientException; -import com.automq.stream.s3.cache.CacheAccessType; -import com.automq.stream.s3.context.AppendContext; -import com.automq.stream.s3.context.FetchContext; -import com.automq.stream.s3.metrics.TimerUtil; -import com.automq.stream.s3.metrics.stats.StreamOperationStats; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.network.AsyncNetworkBandwidthLimiter; -import com.automq.stream.s3.streams.StreamManager; -import com.automq.stream.utils.FutureUtil; -import com.automq.stream.utils.GlobalSwitch; -import io.netty.buffer.Unpooled; -import io.opentelemetry.instrumentation.annotations.SpanAttribute; -import io.opentelemetry.instrumentation.annotations.WithSpan; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.LongAdder; -import java.util.concurrent.locks.ReentrantLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static com.automq.stream.utils.FutureUtil.exec; -import static com.automq.stream.utils.FutureUtil.propagate; - -public class S3Stream implements Stream { - private static final Logger LOGGER = LoggerFactory.getLogger(S3Stream.class); - final AtomicLong confirmOffset; - private final String logIdent; - private final long streamId; - private final long epoch; - private final AtomicLong nextOffset; - private final Storage storage; - private final StreamManager streamManager; - private final Status status; - private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - private final ReentrantReadWriteLock.WriteLock writeLock = lock.writeLock(); - private final ReentrantReadWriteLock.ReadLock readLock = lock.readLock(); - private final ReentrantLock appendLock = new ReentrantLock(); - private final Set> pendingAppends = ConcurrentHashMap.newKeySet(); - private final Set> pendingFetches = ConcurrentHashMap.newKeySet(); - private final AsyncNetworkBandwidthLimiter networkInboundLimiter; - private final AsyncNetworkBandwidthLimiter networkOutboundLimiter; - private long startOffset; - private CompletableFuture lastPendingTrim = CompletableFuture.completedFuture(null); - - public S3Stream(long streamId, long epoch, long startOffset, long nextOffset, Storage storage, - StreamManager streamManager) { - this(streamId, epoch, startOffset, nextOffset, storage, streamManager, null, null); - } - - public S3Stream(long streamId, long epoch, long startOffset, long nextOffset, Storage storage, - StreamManager streamManager, AsyncNetworkBandwidthLimiter networkInboundLimiter, AsyncNetworkBandwidthLimiter networkOutboundLimiter) { - this.streamId = streamId; - this.epoch = epoch; - this.startOffset = startOffset; - this.logIdent = "[Stream id=" + streamId + " epoch=" + epoch + "]"; - this.nextOffset = new AtomicLong(nextOffset); - this.confirmOffset = new AtomicLong(nextOffset); - this.status = new Status(); - this.storage = storage; - this.streamManager = streamManager; - this.networkInboundLimiter = networkInboundLimiter; - this.networkOutboundLimiter = networkOutboundLimiter; - } - - public boolean isClosed() { - return status.isClosed(); - } - - @Override - public long streamId() { - return this.streamId; - } - - @Override - public long streamEpoch() { - return this.epoch; - } - - @Override - public long startOffset() { - return this.startOffset; - } - - @Override - public long confirmOffset() { - return this.confirmOffset.get(); - } - - @Override - public long nextOffset() { - return nextOffset.get(); - } - - @Override - @WithSpan - public CompletableFuture append(AppendContext context, RecordBatch recordBatch) { - TimerUtil timerUtil = new TimerUtil(); - readLock.lock(); - try { - CompletableFuture cf = exec(() -> { - if (networkInboundLimiter != null) { - networkInboundLimiter.forceConsume(recordBatch.rawPayload().remaining()); - } - appendLock.lock(); - try { - return append0(context, recordBatch); - } finally { - appendLock.unlock(); - } - }, LOGGER, "append"); - pendingAppends.add(cf); - cf.whenComplete((nil, ex) -> { - StreamOperationStats.getInstance().appendStreamStats.record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - pendingAppends.remove(cf); - }); - return cf; - } finally { - readLock.unlock(); - } - } - - @WithSpan - private CompletableFuture append0(AppendContext context, RecordBatch recordBatch) { - if (!status.isWritable()) { - return FutureUtil.failedFuture(new StreamClientException(ErrorCode.STREAM_ALREADY_CLOSED, logIdent + " stream is not writable")); - } - long offset = nextOffset.getAndAdd(recordBatch.count()); - StreamRecordBatch streamRecordBatch = new StreamRecordBatch(streamId, epoch, offset, recordBatch.count(), Unpooled.wrappedBuffer(recordBatch.rawPayload())); - CompletableFuture cf = storage.append(context, streamRecordBatch).thenApply(nil -> { - updateConfirmOffset(offset + recordBatch.count()); - return new DefaultAppendResult(offset); - }); - return cf.whenComplete((rst, ex) -> { - if (ex == null) { - return; - } - // Wal should keep retry append until stream is fenced or wal is closed. - status.markFenced(); - if (ex instanceof StreamClientException && ((StreamClientException) ex).getCode() == ErrorCode.EXPIRED_STREAM_EPOCH) { - LOGGER.info("{} stream append, stream is fenced", logIdent); - } else { - LOGGER.warn("{} stream append fail", logIdent, ex); - } - }); - } - - @Override - @WithSpan - public CompletableFuture fetch(FetchContext context, - @SpanAttribute long startOffset, - @SpanAttribute long endOffset, - @SpanAttribute int maxBytes) { - TimerUtil timerUtil = new TimerUtil(); - readLock.lock(); - try { - CompletableFuture cf = exec(() -> fetch0(context, startOffset, endOffset, maxBytes), LOGGER, "fetch"); - pendingFetches.add(cf); - cf.whenComplete((rs, ex) -> { - StreamOperationStats.getInstance().fetchStreamStats.record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - if (ex != null) { - Throwable cause = FutureUtil.cause(ex); - if (!(cause instanceof FastReadFailFastException)) { - LOGGER.error("{} stream fetch [{}, {}) {} fail", logIdent, startOffset, endOffset, maxBytes, ex); - } - } else if (networkOutboundLimiter != null) { - long totalSize = 0L; - for (RecordBatch recordBatch : rs.recordBatchList()) { - totalSize += recordBatch.rawPayload().remaining(); - } - networkOutboundLimiter.forceConsume(totalSize); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] fetch data, stream={}, {}-{}, total bytes: {}, cost={}ms", streamId, - startOffset, endOffset, totalSize, timerUtil.elapsedAs(TimeUnit.MILLISECONDS)); - } - } - pendingFetches.remove(cf); - }); - return cf; - } finally { - readLock.unlock(); - } - } - - @WithSpan - private CompletableFuture fetch0(FetchContext context, long startOffset, long endOffset, - int maxBytes) { - if (!status.isReadable()) { - return FutureUtil.failedFuture(new StreamClientException(ErrorCode.STREAM_ALREADY_CLOSED, logIdent + " stream is already closed")); - } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("{} stream try fetch, startOffset: {}, endOffset: {}, maxBytes: {}", logIdent, startOffset, endOffset, maxBytes); - } - long confirmOffset = this.confirmOffset.get(); - if (startOffset < startOffset() || endOffset > confirmOffset) { - return FutureUtil.failedFuture( - new StreamClientException( - ErrorCode.OFFSET_OUT_OF_RANGE_BOUNDS, - String.format("fetch range[%s, %s) is out of stream bound [%s, %s)", startOffset, endOffset, startOffset(), confirmOffset) - )); - } - if (startOffset > endOffset) { - return FutureUtil.failedFuture(new IllegalArgumentException(String.format("fetch startOffset %s is greater than endOffset %s", startOffset, endOffset))); - } - if (startOffset == endOffset) { - return CompletableFuture.completedFuture(new DefaultFetchResult(Collections.emptyList(), CacheAccessType.DELTA_WAL_CACHE_HIT, false)); - } - return storage.read(context, streamId, startOffset, endOffset, maxBytes).thenApply(dataBlock -> { - List records = dataBlock.getRecords(); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("{} stream fetch, startOffset: {}, endOffset: {}, maxBytes: {}, records: {}", logIdent, startOffset, endOffset, maxBytes, records.size()); - } - return new DefaultFetchResult(records, dataBlock.getCacheAccessType(), context.readOptions().pooledBuf()); - }); - } - - @Override - public CompletableFuture trim(long newStartOffset) { - writeLock.lock(); - try { - TimerUtil timerUtil = new TimerUtil(); - return exec(() -> { - CompletableFuture cf = new CompletableFuture<>(); - lastPendingTrim.whenComplete((nil, ex) -> propagate(trim0(newStartOffset), cf)); - this.lastPendingTrim = cf; - cf.whenComplete((nil, ex) -> StreamOperationStats.getInstance().trimStreamStats.record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS))); - return cf; - }, LOGGER, "trim"); - } finally { - writeLock.unlock(); - } - } - - private CompletableFuture trim0(long newStartOffset) { - if (newStartOffset < this.startOffset) { - LOGGER.warn("{} trim newStartOffset[{}] less than current start offset[{}]", logIdent, newStartOffset, startOffset); - return CompletableFuture.completedFuture(null); - } - this.startOffset = newStartOffset; - CompletableFuture trimCf = new CompletableFuture<>(); - // await all pending fetches complete to avoid trim offset intersect with fetches. - CompletableFuture awaitPendingFetchesCf = CompletableFuture.allOf(pendingFetches.toArray(new CompletableFuture[0])); - awaitPendingFetchesCf.whenComplete((nil, ex) -> propagate(streamManager.trimStream(streamId, epoch, newStartOffset), trimCf)); - trimCf.whenComplete((nil, ex) -> { - if (ex != null) { - LOGGER.error("{} trim fail", logIdent, ex); - } else { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("{} trim to {}", logIdent, newStartOffset); - } - } - }); - return trimCf; - } - - @Override - public CompletableFuture close() { - TimerUtil timerUtil = new TimerUtil(); - writeLock.lock(); - try { - status.markClosed(); - - // await all pending append/fetch/trim request - List> pendingRequests = new ArrayList<>(pendingAppends); - if (GlobalSwitch.STRICT) { - pendingRequests.addAll(pendingFetches); - } - pendingRequests.add(lastPendingTrim); - CompletableFuture awaitPendingRequestsCf = CompletableFuture.allOf(pendingRequests.toArray(new CompletableFuture[0])); - CompletableFuture closeCf = new CompletableFuture<>(); - - awaitPendingRequestsCf.whenComplete((nil, ex) -> propagate(exec(this::close0, LOGGER, "close"), closeCf)); - - closeCf.whenComplete((nil, ex) -> { - if (ex != null) { - LOGGER.error("{} close fail", logIdent, ex); - StreamOperationStats.getInstance().closeStreamStats(false).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - } else { - LOGGER.info("{} closed", logIdent); - StreamOperationStats.getInstance().closeStreamStats(true).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - } - }); - - return closeCf; - } finally { - writeLock.unlock(); - } - } - - private CompletableFuture close0() { - return storage.forceUpload(streamId) - .thenCompose(nil -> streamManager.closeStream(streamId, epoch)); - } - - @Override - public CompletableFuture destroy() { - writeLock.lock(); - try { - CompletableFuture destroyCf = close().thenCompose(nil -> exec(this::destroy0, LOGGER, "destroy")); - destroyCf.whenComplete((nil, ex) -> { - if (ex != null) { - LOGGER.error("{} destroy fail", logIdent, ex); - } else { - LOGGER.info("{} destroyed", logIdent); - } - }); - return destroyCf; - } finally { - writeLock.unlock(); - } - } - - private CompletableFuture destroy0() { - status.markDestroy(); - startOffset = this.confirmOffset.get(); - return streamManager.deleteStream(streamId, epoch); - } - - private void updateConfirmOffset(long newOffset) { - for (; ; ) { - long oldConfirmOffset = confirmOffset.get(); - if (oldConfirmOffset >= newOffset) { - break; - } - if (confirmOffset.compareAndSet(oldConfirmOffset, newOffset)) { - LOGGER.trace("{} stream update confirm offset from {} to {}", logIdent, oldConfirmOffset, newOffset); - break; - } - } - } - - static class DefaultFetchResult implements FetchResult { - private static final LongAdder INFLIGHT = new LongAdder(); - private final List pooledRecords; - private final List records; - private final CacheAccessType cacheAccessType; - private final boolean pooledBuf; - private volatile boolean freed = false; - - public DefaultFetchResult(List streamRecords, CacheAccessType cacheAccessType, - boolean pooledBuf) { - this.pooledRecords = streamRecords; - this.pooledBuf = pooledBuf; - this.records = new ArrayList<>(streamRecords.size()); - for (StreamRecordBatch streamRecordBatch : streamRecords) { - RecordBatch recordBatch = covert(streamRecordBatch, pooledBuf); - records.add(new RecordBatchWithContextWrapper(recordBatch, streamRecordBatch.getBaseOffset())); - } - this.cacheAccessType = cacheAccessType; - if (!pooledBuf) { - streamRecords.forEach(StreamRecordBatch::release); - } else { - INFLIGHT.increment(); - } - } - - private static RecordBatch covert(StreamRecordBatch streamRecordBatch, boolean pooledBuf) { - ByteBuffer buf; - if (pooledBuf) { - buf = streamRecordBatch.getPayload().nioBuffer(); - } else { - buf = ByteBuffer.allocate(streamRecordBatch.size()); - streamRecordBatch.getPayload().duplicate().readBytes(buf); - buf.flip(); - } - return new RecordBatch() { - @Override - public int count() { - return streamRecordBatch.getCount(); - } - - @Override - public long baseTimestamp() { - return streamRecordBatch.getEpoch(); - } - - @Override - public Map properties() { - return Collections.emptyMap(); - } - - @Override - public ByteBuffer rawPayload() { - return buf; - } - }; - } - - @Override - public List recordBatchList() { - return records; - } - - @Override - public CacheAccessType getCacheAccessType() { - return cacheAccessType; - } - - @Override - public void free() { - if (!freed && pooledBuf) { - pooledRecords.forEach(StreamRecordBatch::release); - INFLIGHT.decrement(); - } - freed = true; - } - } - - static class Status { - private static final int CLOSED_MARK = 1; - private static final int FENCED_MARK = 1 << 1; - private static final int DESTROY_MARK = 1 << 2; - private final AtomicInteger status = new AtomicInteger(); - - public void markFenced() { - status.getAndUpdate(operand -> operand | FENCED_MARK); - } - - public void markClosed() { - status.getAndUpdate(operand -> operand | CLOSED_MARK); - } - - public void markDestroy() { - status.getAndUpdate(operand -> operand | DESTROY_MARK); - } - - public boolean isClosed() { - return (status.get() & CLOSED_MARK) != 0; - } - - public boolean isWritable() { - return status.get() == 0; - } - - public boolean isReadable() { - return status.get() == 0; - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/S3StreamClient.java b/s3stream/src/main/java/com/automq/stream/s3/S3StreamClient.java deleted file mode 100644 index 2dee62a96..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/S3StreamClient.java +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.api.AppendResult; -import com.automq.stream.api.CreateStreamOptions; -import com.automq.stream.api.FetchResult; -import com.automq.stream.api.OpenStreamOptions; -import com.automq.stream.api.RecordBatch; -import com.automq.stream.api.Stream; -import com.automq.stream.api.StreamClient; -import com.automq.stream.s3.context.AppendContext; -import com.automq.stream.s3.context.FetchContext; -import com.automq.stream.s3.metrics.TimerUtil; -import com.automq.stream.s3.metrics.stats.StreamOperationStats; -import com.automq.stream.s3.network.AsyncNetworkBandwidthLimiter; -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.operator.S3Operator; -import com.automq.stream.s3.streams.StreamManager; -import com.automq.stream.utils.FutureUtil; -import com.automq.stream.utils.ThreadUtils; -import com.automq.stream.utils.Threads; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class S3StreamClient implements StreamClient { - private static final Logger LOGGER = LoggerFactory.getLogger(S3StreamClient.class); - private static final long STREAM_OBJECT_COMPACTION_INTERVAL_MS = TimeUnit.MINUTES.toMillis(1); - private final ScheduledExecutorService streamObjectCompactionScheduler = Threads.newSingleThreadScheduledExecutor( - ThreadUtils.createThreadFactory("stream-object-compaction-scheduler", true), LOGGER, true); - private final Map openedStreams; - private final StreamManager streamManager; - private final Storage storage; - private final ObjectManager objectManager; - private final S3Operator s3Operator; - private final Config config; - private final AsyncNetworkBandwidthLimiter networkInboundBucket; - private final AsyncNetworkBandwidthLimiter networkOutboundBucket; - private ScheduledFuture scheduledCompactionTaskFuture; - - @SuppressWarnings("unused") - public S3StreamClient(StreamManager streamManager, Storage storage, ObjectManager objectManager, - S3Operator s3Operator, Config config) { - this(streamManager, storage, objectManager, s3Operator, config, null, null); - } - - public S3StreamClient(StreamManager streamManager, Storage storage, ObjectManager objectManager, - S3Operator s3Operator, Config config, - AsyncNetworkBandwidthLimiter networkInboundBucket, AsyncNetworkBandwidthLimiter networkOutboundBucket) { - this.streamManager = streamManager; - this.storage = storage; - this.openedStreams = new ConcurrentHashMap<>(); - this.objectManager = objectManager; - this.s3Operator = s3Operator; - this.config = config; - this.networkInboundBucket = networkInboundBucket; - this.networkOutboundBucket = networkOutboundBucket; - startStreamObjectsCompactions(); - } - - @Override - public CompletableFuture createAndOpenStream(CreateStreamOptions options) { - TimerUtil timerUtil = new TimerUtil(); - return FutureUtil.exec(() -> streamManager.createStream().thenCompose(streamId -> { - StreamOperationStats.getInstance().createStreamStats.record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - return openStream0(streamId, options.epoch()); - }), LOGGER, "createAndOpenStream"); - } - - @Override - public CompletableFuture openStream(long streamId, OpenStreamOptions openStreamOptions) { - return FutureUtil.exec(() -> openStream0(streamId, openStreamOptions.epoch()), LOGGER, "openStream"); - } - - @Override - public Optional getStream(long streamId) { - return Optional.ofNullable(openedStreams.get(streamId)); - } - - /** - * Start stream objects compactions. - */ - private void startStreamObjectsCompactions() { - scheduledCompactionTaskFuture = streamObjectCompactionScheduler.scheduleWithFixedDelay(() -> { - List operationStreams = new ArrayList<>(openedStreams.values()); - operationStreams.forEach(StreamWrapper::compactStreamObject); - }, config.streamObjectCompactionIntervalMinutes(), config.streamObjectCompactionIntervalMinutes(), TimeUnit.MINUTES); - } - - private CompletableFuture openStream0(long streamId, long epoch) { - TimerUtil timerUtil = new TimerUtil(); - return streamManager.openStream(streamId, epoch). - thenApply(metadata -> { - StreamWrapper stream = new StreamWrapper(new S3Stream( - metadata.streamId(), metadata.epoch(), - metadata.startOffset(), metadata.endOffset(), - storage, streamManager, networkInboundBucket, networkOutboundBucket)); - openedStreams.put(streamId, stream); - StreamOperationStats.getInstance().openStreamStats.record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - return stream; - }); - } - - @Override - public void shutdown() { - // cancel the submitted task if not started; do not interrupt the task if it is running. - if (scheduledCompactionTaskFuture != null) { - scheduledCompactionTaskFuture.cancel(false); - } - streamObjectCompactionScheduler.shutdown(); - try { - if (!streamObjectCompactionScheduler.awaitTermination(10, TimeUnit.SECONDS)) { - LOGGER.warn("await streamObjectCompactionExecutor timeout 10s"); - streamObjectCompactionScheduler.shutdownNow(); - } - } catch (InterruptedException e) { - streamObjectCompactionScheduler.shutdownNow(); - LOGGER.warn("await streamObjectCompactionExecutor close fail", e); - } - - TimerUtil timerUtil = new TimerUtil(); - Map> streamCloseFutures = new ConcurrentHashMap<>(); - openedStreams.forEach((streamId, stream) -> streamCloseFutures.put(streamId, stream.close())); - for (; ; ) { - Threads.sleep(1000); - List closingStreams = streamCloseFutures.entrySet().stream().filter(e -> !e.getValue().isDone()).map(Map.Entry::getKey).collect(Collectors.toList()); - LOGGER.info("waiting streams close, closed {} / all {}, closing[{}]", streamCloseFutures.size() - closingStreams.size(), streamCloseFutures.size(), closingStreams); - if (closingStreams.isEmpty()) { - break; - } - } - LOGGER.info("wait streams[{}] closed cost {}ms", streamCloseFutures.keySet(), timerUtil.elapsedAs(TimeUnit.MILLISECONDS)); - } - - class StreamWrapper implements Stream { - private final S3Stream stream; - private final Semaphore trimCompactionSemaphore = new Semaphore(1); - private volatile long lastCompactionTimestamp = 0; - - public StreamWrapper(S3Stream stream) { - this.stream = stream; - } - - @Override - public long streamId() { - return stream.streamId(); - } - - @Override - public long streamEpoch() { - return stream.streamEpoch(); - } - - @Override - public long startOffset() { - return stream.startOffset(); - } - - @Override - public long confirmOffset() { - return stream.confirmOffset(); - } - - @Override - public long nextOffset() { - return stream.nextOffset(); - } - - @Override - public CompletableFuture append(AppendContext context, RecordBatch recordBatch) { - return stream.append(context, recordBatch); - } - - @Override - public CompletableFuture fetch(FetchContext context, long startOffset, long endOffset, - int maxBytesHint) { - return stream.fetch(context, startOffset, endOffset, maxBytesHint); - } - - @Override - public CompletableFuture trim(long newStartOffset) { - return stream.trim(newStartOffset).whenComplete((nil, ex) -> { - if (!trimCompactionSemaphore.tryAcquire()) { - // ensure only one compaction task which trim triggers - return; - } - streamObjectCompactionScheduler.execute(() -> { - try { - // trigger compaction after trim to clean up the expired stream objects. - this.cleanupStreamObject(); - } finally { - trimCompactionSemaphore.release(); - } - }); - }); - - } - - @Override - public CompletableFuture close() { - return stream.close().whenComplete((v, e) -> openedStreams.remove(streamId(), this)); - } - - @Override - public CompletableFuture destroy() { - return stream.destroy().whenComplete((v, e) -> openedStreams.remove(streamId(), this)); - } - - public boolean isClosed() { - return stream.isClosed(); - } - - public void cleanupStreamObject() { - compactStreamObject0(true); - } - - public void compactStreamObject() { - compactStreamObject0(false); - } - - public void compactStreamObject0(boolean onlyCleanup) { - if (isClosed()) { - // the compaction task may be taking a long time, - // so we need to check if the stream is closed before starting the compaction. - return; - } - if (System.currentTimeMillis() - lastCompactionTimestamp > STREAM_OBJECT_COMPACTION_INTERVAL_MS) { - // skip compaction if the last compaction is within the interval. - return; - } - StreamObjectCompactor task = StreamObjectCompactor.builder().objectManager(objectManager).stream(this) - .s3Operator(s3Operator).maxStreamObjectSize(config.streamObjectCompactionMaxSizeBytes()).build(); - if (onlyCleanup) { - task.cleanup(); - } else { - task.compact(); - } - lastCompactionTimestamp = System.currentTimeMillis(); - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/Storage.java b/s3stream/src/main/java/com/automq/stream/s3/Storage.java deleted file mode 100644 index b2b3219e9..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/Storage.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.s3.cache.ReadDataBlock; -import com.automq.stream.s3.context.AppendContext; -import com.automq.stream.s3.context.FetchContext; -import com.automq.stream.s3.model.StreamRecordBatch; -import java.util.concurrent.CompletableFuture; - -/** - * Write ahead log for server. - */ -public interface Storage { - - void startup(); - - void shutdown(); - - /** - * Append stream record. - * - * @param streamRecord {@link StreamRecordBatch} - */ - CompletableFuture append(AppendContext context, StreamRecordBatch streamRecord); - - default CompletableFuture append(StreamRecordBatch streamRecord) { - return append(AppendContext.DEFAULT, streamRecord); - } - - CompletableFuture read(FetchContext context, long streamId, long startOffset, long endOffset, - int maxBytes); - - default CompletableFuture read(long streamId, long startOffset, long endOffset, int maxBytes) { - return read(FetchContext.DEFAULT, streamId, startOffset, endOffset, maxBytes); - } - - /** - * Force stream record in WAL upload to s3 - */ - CompletableFuture forceUpload(long streamId); -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/StreamDataBlock.java b/s3stream/src/main/java/com/automq/stream/s3/StreamDataBlock.java deleted file mode 100644 index 040d6567f..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/StreamDataBlock.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import io.netty.buffer.ByteBuf; -import java.util.Comparator; -import java.util.Objects; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.atomic.AtomicInteger; - -public class StreamDataBlock { - public static final Comparator STREAM_OFFSET_COMPARATOR = Comparator.comparingLong(StreamDataBlock::getStartOffset); - public static final Comparator BLOCK_POSITION_COMPARATOR = Comparator.comparingLong(StreamDataBlock::getBlockStartPosition); - private final long objectId; - private final DataBlockIndex dataBlockIndex; - private final CompletableFuture dataCf = new CompletableFuture<>(); - private final AtomicInteger refCount = new AtomicInteger(1); - - public StreamDataBlock(long objectId, DataBlockIndex dataBlockIndex) { - this.dataBlockIndex = dataBlockIndex; - this.objectId = objectId; - } - - public StreamDataBlock(long streamId, long startOffset, long endOffset, - long objectId, long blockPosition, int blockSize, int recordCount) { - this.objectId = objectId; - this.dataBlockIndex = new DataBlockIndex(streamId, startOffset, (int) (endOffset - startOffset), recordCount, blockPosition, blockSize); - } - - public long getStreamId() { - return dataBlockIndex.streamId(); - } - - public long getStartOffset() { - return dataBlockIndex.startOffset(); - } - - public long getEndOffset() { - return dataBlockIndex.endOffset(); - } - - public long getStreamRangeSize() { - return dataBlockIndex.endOffsetDelta(); - } - - public long getObjectId() { - return objectId; - } - - public long getBlockStartPosition() { - return dataBlockIndex.startPosition(); - } - - public long getBlockEndPosition() { - return dataBlockIndex.endPosition(); - } - - public int getBlockSize() { - return dataBlockIndex.size(); - } - - public DataBlockIndex dataBlockIndex() { - return dataBlockIndex; - } - - public CompletableFuture getDataCf() { - return this.dataCf; - } - - public void releaseRef() { - refCount.decrementAndGet(); - } - - public void release() { - if (refCount.decrementAndGet() == 0) { - dataCf.thenAccept(buf -> { - if (buf != null) { - buf.release(); - } - }); - } - } - - @Override - public String toString() { - return "StreamDataBlock{" + - "objectId=" + objectId + - ", dataBlockIndex=" + dataBlockIndex + - '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - StreamDataBlock that = (StreamDataBlock) o; - return objectId == that.objectId && dataBlockIndex.equals(that.dataBlockIndex); - } - - @Override - public int hashCode() { - return Objects.hash(objectId, dataBlockIndex); - } - -} \ No newline at end of file diff --git a/s3stream/src/main/java/com/automq/stream/s3/StreamObjectCompactor.java b/s3stream/src/main/java/com/automq/stream/s3/StreamObjectCompactor.java deleted file mode 100644 index d0218a5c9..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/StreamObjectCompactor.java +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.api.Stream; -import com.automq.stream.s3.metadata.ObjectUtils; -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.network.ThrottleStrategy; -import com.automq.stream.s3.objects.CompactStreamObjectRequest; -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.operator.S3Operator; -import com.automq.stream.s3.operator.Writer; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static com.automq.stream.s3.ByteBufAlloc.STREAM_OBJECT_COMPACTION_READ; -import static com.automq.stream.s3.ByteBufAlloc.STREAM_OBJECT_COMPACTION_WRITE; -import static com.automq.stream.s3.metadata.ObjectUtils.NOOP_OBJECT_ID; -import static com.automq.stream.s3.metadata.ObjectUtils.NOOP_OFFSET; - -/** - * Stream objects compaction task. - * It intends to: - * 1. Clean up expired stream objects. - * 2. Compact some stream objects with the same stream ID into bigger stream objects. - */ -public class StreamObjectCompactor { - /** - * max object count in one group, the group count will limit the compact request size to kraft and multipart object - * part count (less than {@code Writer.MAX_PART_COUNT}). - */ - private static final int MAX_OBJECT_GROUP_COUNT = Math.min(5000, Writer.MAX_PART_COUNT / 2); - private static final Logger LOGGER = LoggerFactory.getLogger(StreamObjectCompactor.class); - public static final int DEFAULT_DATA_BLOCK_GROUP_SIZE_THRESHOLD = 1024 * 1024; // 1MiB - private final Logger s3ObjectLogger; - private final long maxStreamObjectSize; - private final Stream stream; - private final ObjectManager objectManager; - private final S3Operator s3Operator; - private final int dataBlockGroupSizeThreshold; - private CompactStreamObjectRequest request; - - private StreamObjectCompactor(ObjectManager objectManager, S3Operator s3Operator, Stream stream, - long maxStreamObjectSize, int dataBlockGroupSizeThreshold) { - this.objectManager = objectManager; - this.s3Operator = s3Operator; - this.stream = stream; - this.maxStreamObjectSize = Math.min(maxStreamObjectSize, Writer.MAX_OBJECT_SIZE); - String logIdent = "[StreamObjectsCompactionTask streamId=" + stream.streamId() + "] "; - this.s3ObjectLogger = S3ObjectLogger.logger(logIdent); - this.dataBlockGroupSizeThreshold = dataBlockGroupSizeThreshold; - } - - public void compact() { - try { - compact0(false); - } catch (Throwable e) { - handleCompactException(false, e); - } - } - - /** - * Cleanup expired stream objects - */ - public void cleanup() { - try { - compact0(true); - } catch (Throwable e) { - handleCompactException(true, e); - } - } - - private void handleCompactException(boolean onlyCleanup, Throwable e) { - if (stream instanceof S3StreamClient.StreamWrapper && ((S3StreamClient.StreamWrapper) stream).isClosed()) { - LOGGER.warn("[STREAM_OBJECT_COMPACT_FAIL],[STREAM_CLOSED],{},onlyCleanup={},req={}", stream.streamId(), onlyCleanup, request, e); - } else { - LOGGER.error("[STREAM_OBJECT_COMPACT_FAIL],[UNEXPECTED],{},onlyCleanup={},req={}", stream.streamId(), onlyCleanup, request, e); - } - } - - void compact0(boolean onlyCleanup) throws ExecutionException, InterruptedException { - long streamId = stream.streamId(); - long startOffset = stream.startOffset(); - - List objects = objectManager.getStreamObjects(stream.streamId(), 0L, stream.confirmOffset(), Integer.MAX_VALUE).get(); - List expiredObjects = new ArrayList<>(objects.size()); - List livingObjects = new ArrayList<>(objects.size()); - for (S3ObjectMetadata object : objects) { - if (object.endOffset() <= startOffset) { - expiredObjects.add(object); - } else { - livingObjects.add(object); - } - } - - // clean up the expired objects - if (!expiredObjects.isEmpty()) { - List compactedObjectIds = expiredObjects.stream().map(S3ObjectMetadata::objectId).collect(Collectors.toList()); - request = new CompactStreamObjectRequest(NOOP_OBJECT_ID, 0, - streamId, stream.streamEpoch(), NOOP_OFFSET, NOOP_OFFSET, compactedObjectIds); - objectManager.compactStreamObject(request).get(); - if (s3ObjectLogger.isTraceEnabled()) { - s3ObjectLogger.trace("{}", request); - } - } - - if (onlyCleanup) { - return; - } - - // compact the living objects - List> objectGroups = group0(livingObjects, maxStreamObjectSize); - for (List objectGroup : objectGroups) { - // the object group is single object and there is no data block need to be removed. - if (objectGroup.size() == 1 && objectGroup.get(0).startOffset() >= startOffset) { - continue; - } - long objectId = objectManager.prepareObject(1, TimeUnit.MINUTES.toMillis(60)).get(); - Optional requestOpt = new StreamObjectGroupCompactor(streamId, stream.streamEpoch(), - startOffset, objectGroup, objectId, dataBlockGroupSizeThreshold, s3Operator).compact(); - if (requestOpt.isPresent()) { - request = requestOpt.get(); - objectManager.compactStreamObject(request).get(); - if (s3ObjectLogger.isTraceEnabled()) { - s3ObjectLogger.trace("{}", request); - } - } - } - } - - static class StreamObjectGroupCompactor { - private final List objectGroup; - private final long streamId; - private final long streamEpoch; - private final long startOffset; - // compact object group to the new object - private final long objectId; - private final S3Operator s3Operator; - private final int dataBlockGroupSizeThreshold; - - public StreamObjectGroupCompactor(long streamId, long streamEpoch, long startOffset, - List objectGroup, - long objectId, int dataBlockGroupSizeThreshold, S3Operator s3Operator) { - this.streamId = streamId; - this.streamEpoch = streamEpoch; - this.startOffset = startOffset; - this.objectGroup = objectGroup; - this.objectId = objectId; - this.dataBlockGroupSizeThreshold = dataBlockGroupSizeThreshold; - this.s3Operator = s3Operator; - } - - public Optional compact() throws ExecutionException, InterruptedException { - long nextBlockPosition = 0; - long objectSize = 0; - long compactedStartOffset = objectGroup.get(0).startOffset(); - long compactedEndOffset = objectGroup.get(objectGroup.size() - 1).endOffset(); - List compactedObjectIds = new LinkedList<>(); - CompositeByteBuf indexes = ByteBufAlloc.compositeByteBuffer(); - Writer writer = s3Operator.writer(new Writer.Context(STREAM_OBJECT_COMPACTION_READ), ObjectUtils.genKey(0, objectId), ThrottleStrategy.THROTTLE_2); - long groupStartOffset = -1L; - long groupStartPosition = -1L; - int groupSize = 0; - int groupRecordCount = 0; - DataBlockIndex lastIndex = null; - for (S3ObjectMetadata object : objectGroup) { - try (ObjectReader reader = new ObjectReader(object, s3Operator)) { - ObjectReader.BasicObjectInfo basicObjectInfo = reader.basicObjectInfo().get(); - ByteBuf subIndexes = ByteBufAlloc.byteBuffer(basicObjectInfo.indexBlock().count() * DataBlockIndex.BLOCK_INDEX_SIZE, STREAM_OBJECT_COMPACTION_WRITE); - Iterator it = basicObjectInfo.indexBlock().iterator(); - long validDataBlockStartPosition = 0; - while (it.hasNext()) { - DataBlockIndex dataBlock = it.next(); - if (dataBlock.endOffset() <= startOffset) { - validDataBlockStartPosition = dataBlock.endPosition(); - compactedStartOffset = dataBlock.endOffset(); - continue; - } - if (groupSize == 0 // the first data block - || (long) groupSize + dataBlock.size() > dataBlockGroupSizeThreshold - || (long) groupRecordCount + dataBlock.recordCount() > Integer.MAX_VALUE - || dataBlock.endOffset() - groupStartOffset > Integer.MAX_VALUE) { - if (groupSize != 0) { - new DataBlockIndex(streamId, groupStartOffset, (int) (lastIndex.endOffset() - groupStartOffset), - groupRecordCount, groupStartPosition, groupSize).encode(subIndexes); - } - groupStartOffset = dataBlock.startOffset(); - groupStartPosition = nextBlockPosition; - groupSize = 0; - groupRecordCount = 0; - } - groupSize += dataBlock.size(); - groupRecordCount += dataBlock.recordCount(); - nextBlockPosition += dataBlock.size(); - lastIndex = dataBlock; - } - writer.copyWrite(ObjectUtils.genKey(0, object.objectId()), validDataBlockStartPosition, basicObjectInfo.dataBlockSize()); - objectSize += basicObjectInfo.dataBlockSize() - validDataBlockStartPosition; - indexes.addComponent(true, subIndexes); - compactedObjectIds.add(object.objectId()); - } - } - if (lastIndex != null) { - ByteBuf subIndexes = ByteBufAlloc.byteBuffer(DataBlockIndex.BLOCK_INDEX_SIZE, STREAM_OBJECT_COMPACTION_WRITE); - new DataBlockIndex(streamId, groupStartOffset, (int) (lastIndex.endOffset() - groupStartOffset), - groupRecordCount, groupStartPosition, groupSize).encode(subIndexes); - indexes.addComponent(true, subIndexes); - } - - CompositeByteBuf indexBlockAndFooter = ByteBufAlloc.compositeByteBuffer(); - indexBlockAndFooter.addComponent(true, indexes); - indexBlockAndFooter.addComponent(true, new ObjectWriter.Footer(nextBlockPosition, indexBlockAndFooter.readableBytes()).buffer()); - - objectSize += indexBlockAndFooter.readableBytes(); - writer.write(indexBlockAndFooter.duplicate()); - writer.close().get(); - return Optional.of(new CompactStreamObjectRequest(objectId, objectSize, streamId, streamEpoch, - compactedStartOffset, compactedEndOffset, compactedObjectIds)); - } - - } - - static List> group0(List objects, long maxStreamObjectSize) { - List> objectGroups = new LinkedList<>(); - long groupSize = 0; - long groupNextOffset = -1L; - List group = new LinkedList<>(); - int partCount = 0; - for (S3ObjectMetadata object : objects) { - int objectPartCount = (int) ((object.objectSize() + Writer.MAX_PART_SIZE - 1) / Writer.MAX_PART_SIZE); - if (objectPartCount >= Writer.MAX_PART_COUNT) { - continue; - } - if (groupNextOffset == -1L) { - groupNextOffset = object.startOffset(); - } - // group the objects when the object's range is continuous - if (groupNextOffset != object.startOffset() - // the group object size is less than maxStreamObjectSize - || (groupSize + object.objectSize() > maxStreamObjectSize && !group.isEmpty()) - // object count in a group is larger than MAX_OBJECT_GROUP_COUNT - || group.size() >= MAX_OBJECT_GROUP_COUNT - || partCount + objectPartCount > Writer.MAX_PART_COUNT - ) { - objectGroups.add(group); - group = new LinkedList<>(); - groupSize = 0; - } - group.add(object); - groupSize += object.objectSize(); - groupNextOffset = object.endOffset(); - partCount += objectPartCount; - } - if (!group.isEmpty()) { - objectGroups.add(group); - } - return objectGroups; - } - - // no operation for now. - public void close() { - } - - public static Builder builder() { - return new Builder(); - } - - public static class Builder { - private ObjectManager objectManager; - private S3Operator s3Operator; - private Stream stream; - private long maxStreamObjectSize; - private int dataBlockGroupSizeThreshold = DEFAULT_DATA_BLOCK_GROUP_SIZE_THRESHOLD; - - public Builder objectManager(ObjectManager objectManager) { - this.objectManager = objectManager; - return this; - } - - public Builder s3Operator(S3Operator s3Operator) { - this.s3Operator = s3Operator; - return this; - } - - public Builder stream(Stream stream) { - this.stream = stream; - return this; - } - - /** - * Set compacted stream object max size. - * - * @param maxStreamObjectSize compacted stream object max size in bytes. - * If it is bigger than {@link Writer#MAX_OBJECT_SIZE}, - * it will be set to {@link Writer#MAX_OBJECT_SIZE}. - * @return builder. - */ - public Builder maxStreamObjectSize(long maxStreamObjectSize) { - this.maxStreamObjectSize = maxStreamObjectSize; - return this; - } - - public Builder dataBlockGroupSizeThreshold(int dataBlockGroupSizeThreshold) { - this.dataBlockGroupSizeThreshold = dataBlockGroupSizeThreshold; - return this; - } - - public StreamObjectCompactor build() { - return new StreamObjectCompactor(objectManager, s3Operator, stream, maxStreamObjectSize, dataBlockGroupSizeThreshold); - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/StreamRecordBatchCodec.java b/s3stream/src/main/java/com/automq/stream/s3/StreamRecordBatchCodec.java deleted file mode 100644 index a7a4033ad..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/StreamRecordBatchCodec.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.ByteBufSeqAlloc; -import com.automq.stream.s3.model.StreamRecordBatch; -import io.netty.buffer.ByteBuf; - -import static com.automq.stream.s3.ByteBufAlloc.ENCODE_RECORD; - -public class StreamRecordBatchCodec { - public static final byte MAGIC_V0 = 0x22; - public static final int HEADER_SIZE = - 1 // magic - + 8 // streamId - + 8 // epoch - + 8 // baseOffset - + 4 // lastOffsetDelta - + 4; // payload length - private static final ByteBufSeqAlloc ENCODE_ALLOC = new ByteBufSeqAlloc(ENCODE_RECORD, 8); - - public static ByteBuf encode(StreamRecordBatch streamRecord) { - int totalLength = HEADER_SIZE + streamRecord.size(); // payload - // use sequential allocator to avoid memory fragmentation - ByteBuf buf = ENCODE_ALLOC.byteBuffer(totalLength); - buf.writeByte(MAGIC_V0); - buf.writeLong(streamRecord.getStreamId()); - buf.writeLong(streamRecord.getEpoch()); - buf.writeLong(streamRecord.getBaseOffset()); - buf.writeInt(streamRecord.getCount()); - buf.writeInt(streamRecord.size()); - buf.writeBytes(streamRecord.getPayload().duplicate()); - return buf; - } - - /** - * Decode a stream record batch from a byte buffer and move the reader index. - * The returned stream record batch does NOT share the payload buffer with the input buffer. - */ - public static StreamRecordBatch duplicateDecode(ByteBuf buf) { - byte magic = buf.readByte(); // magic - if (magic != MAGIC_V0) { - throw new RuntimeException("Invalid magic byte " + magic); - } - long streamId = buf.readLong(); - long epoch = buf.readLong(); - long baseOffset = buf.readLong(); - int lastOffsetDelta = buf.readInt(); - int payloadLength = buf.readInt(); - ByteBuf payload = ByteBufAlloc.byteBuffer(payloadLength, ByteBufAlloc.DECODE_RECORD); - buf.readBytes(payload); - return new StreamRecordBatch(streamId, epoch, baseOffset, lastOffsetDelta, payload); - } - - /** - * Decode a stream record batch from a byte buffer and move the reader index. - * The returned stream record batch shares the payload buffer with the input buffer. - */ - public static StreamRecordBatch decode(ByteBuf buf) { - buf.readByte(); // magic - long streamId = buf.readLong(); - long epoch = buf.readLong(); - long baseOffset = buf.readLong(); - int lastOffsetDelta = buf.readInt(); - int payloadLength = buf.readInt(); - ByteBuf payload = buf.slice(buf.readerIndex(), payloadLength); - buf.skipBytes(payloadLength); - return new StreamRecordBatch(streamId, epoch, baseOffset, lastOffsetDelta, payload); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/WalWriteRequest.java b/s3stream/src/main/java/com/automq/stream/s3/WalWriteRequest.java deleted file mode 100644 index c843e095f..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/WalWriteRequest.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.s3.cache.LogCache; -import com.automq.stream.s3.context.AppendContext; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.wal.WriteAheadLog; -import java.util.concurrent.CompletableFuture; - -public class WalWriteRequest implements Comparable { - final StreamRecordBatch record; - final AppendContext context; - final CompletableFuture cf; - long offset; - /** - * Whether the record has been persisted to the {@link WriteAheadLog} - * When a continuous series of records IN A STREAM have been persisted to the WAL, they can be uploaded to S3. - * - * @see S3Storage.WALCallbackSequencer - */ - boolean persisted; - - /** - * Whether the record has been put to the {@link LogCache} - * When a continuous series of records have been persisted to the WAL and uploaded to S3, they can be trimmed. - * - * @see S3Storage.WALConfirmOffsetCalculator - */ - boolean confirmed; - - public WalWriteRequest(StreamRecordBatch record, long offset, CompletableFuture cf) { - this(record, offset, cf, AppendContext.DEFAULT); - } - - public WalWriteRequest(StreamRecordBatch record, long offset, CompletableFuture cf, AppendContext context) { - this.record = record; - this.offset = offset; - this.cf = cf; - this.context = context; - } - - @Override - public int compareTo(WalWriteRequest o) { - return record.compareTo(o.record); - } - - @Override - public String toString() { - return "WalWriteRequest{" + - "record=" + record + - ", offset=" + offset + - ", persisted=" + persisted + - ", confirmed=" + confirmed + - '}'; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/BlockCache.java b/s3stream/src/main/java/com/automq/stream/s3/cache/BlockCache.java deleted file mode 100644 index 497d3b6da..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/BlockCache.java +++ /dev/null @@ -1,484 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.ByteBufAlloc; -import com.automq.stream.s3.cache.DefaultS3BlockCache.ReadAheadRecord; -import com.automq.stream.s3.metrics.S3StreamMetricsManager; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.trace.context.TraceContext; -import com.automq.stream.utils.biniarysearch.StreamRecordBatchList; -import io.opentelemetry.instrumentation.annotations.SpanAttribute; -import io.opentelemetry.instrumentation.annotations.WithSpan; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.NavigableMap; -import java.util.Objects; -import java.util.SortedMap; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static com.automq.stream.s3.model.StreamRecordBatch.OBJECT_OVERHEAD; - -public class BlockCache implements ByteBufAlloc.OOMHandler { - public static final Integer ASYNC_READ_AHEAD_NOOP_OFFSET = -1; - static final int BLOCK_SIZE = 1024 * 1024; - private static final Logger LOGGER = LoggerFactory.getLogger(BlockCache.class); - final Map stream2cache = new HashMap<>(); - private final long maxSize; - private final LRUCache inactive = new LRUCache<>(); - private final LRUCache active = new LRUCache<>(); - private final AtomicLong size = new AtomicLong(); - private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - private final ReentrantReadWriteLock.WriteLock writeLock = lock.writeLock(); - private final ReentrantReadWriteLock.ReadLock readLock = lock.readLock(); - private final List cacheEvictListeners = new ArrayList<>(); - - public BlockCache(long maxSize) { - this.maxSize = maxSize; - S3StreamMetricsManager.registerBlockCacheSizeSupplier(size::get); - } - - public void registerListener(CacheEvictListener listener) { - cacheEvictListeners.add(listener); - } - - public void put(long streamId, List records) { - put(streamId, ASYNC_READ_AHEAD_NOOP_OFFSET, ASYNC_READ_AHEAD_NOOP_OFFSET, records); - } - - public void put(long streamId, long raAsyncOffset, long raEndOffset, List records) { - writeLock.lock(); - try { - put0(streamId, raAsyncOffset, raEndOffset, records); - } finally { - writeLock.unlock(); - } - } - - void put0(long streamId, long raAsyncOffset, long raEndOffset, List records) { - if (maxSize == 0 || records.isEmpty()) { - records.forEach(StreamRecordBatch::release); - return; - } - records = new ArrayList<>(records); - StreamCache streamCache = stream2cache.computeIfAbsent(streamId, id -> new StreamCache()); - long startOffset = records.get(0).getBaseOffset(); - long endOffset = records.get(records.size() - 1).getLastOffset(); - - if (raAsyncOffset != ASYNC_READ_AHEAD_NOOP_OFFSET && (raAsyncOffset < startOffset || raAsyncOffset >= endOffset)) { - LOGGER.warn("raAsyncOffset out of range, stream={}, raAsyncOffset: {}, startOffset: {}, endOffset: {}", streamId, raAsyncOffset, startOffset, endOffset); - } - - int size = records.stream().mapToInt(StreamRecordBatch::size).sum(); - size += records.size() * OBJECT_OVERHEAD; - - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] put block cache, stream={}, {}-{}, raAsyncOffset: {}, raEndOffset: {}, total bytes: {} ", streamId, startOffset, endOffset, raAsyncOffset, raEndOffset, size); - } - - // remove overlapped part. - SortedMap tailMap = streamCache.tailBlocks(startOffset); - for (Map.Entry entry : tailMap.entrySet()) { - CacheBlock cacheBlock = entry.getValue(); - if (cacheBlock.firstOffset >= endOffset) { - break; - } - if (isWithinRange(raAsyncOffset, cacheBlock.firstOffset, cacheBlock.lastOffset) && cacheBlock.readAheadRecord == null) { - cacheBlock.readAheadRecord = new ReadAheadRecord(raEndOffset); - } - // overlap is a rare case, so removeIf is fine for the performance. - records.removeIf(record -> { - boolean remove = record.getLastOffset() > cacheBlock.firstOffset && record.getBaseOffset() < cacheBlock.lastOffset; - if (remove) { - record.release(); - } - return remove; - }); - } - - // ensure the cache size. - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] block cache size: {}/{}, ensure size: {} ", this.size.get(), maxSize, size); - } - ensureCapacity(size); - - // split to 1MB cache blocks which one block contains sequential records. - long expectStartOffset = -1L; - LinkedList batchList = new LinkedList<>(); - int partSize = 0; - for (StreamRecordBatch record : records) { - if ((expectStartOffset == -1L || record.getBaseOffset() == expectStartOffset) && partSize < BLOCK_SIZE) { - batchList.add(record); - partSize += record.size(); - } else { - ReadAheadRecord raRecord = isWithinRange(raAsyncOffset, batchList.getFirst().getBaseOffset(), batchList.getLast().getLastOffset()) ? - new ReadAheadRecord(raEndOffset) : null; - put(streamId, streamCache, new CacheBlock(batchList, raRecord)); - batchList = new LinkedList<>(); - batchList.add(record); - partSize = record.size(); - } - expectStartOffset = record.getLastOffset(); - } - if (!batchList.isEmpty()) { - ReadAheadRecord raRecord = isWithinRange(raAsyncOffset, batchList.getFirst().getBaseOffset(), batchList.getLast().getLastOffset()) ? - new ReadAheadRecord(raEndOffset) : null; - put(streamId, streamCache, new CacheBlock(batchList, raRecord)); - } - } - - public void setReadAheadRecord(long streamId, long raAsyncOffset, long raEndOffset) { - writeLock.lock(); - try { - StreamCache streamCache = stream2cache.get(streamId); - if (streamCache == null) { - return; - } - NavigableMap streamCacheBlocks = streamCache.tailBlocks(raAsyncOffset); - for (Map.Entry entry : streamCacheBlocks.entrySet()) { - CacheBlock cacheBlock = entry.getValue(); - if (isWithinRange(raAsyncOffset, cacheBlock.firstOffset, cacheBlock.lastOffset)) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] set read ahead record, stream={}, raAsyncOffset: {}, raEndOffset: {}", streamId, raAsyncOffset, raEndOffset); - } - cacheBlock.readAheadRecord = new ReadAheadRecord(raEndOffset); - break; - } - } - } finally { - writeLock.unlock(); - } - } - - private boolean isWithinRange(long raAsyncOffset, long startOffset, long endOffset) { - return raAsyncOffset >= startOffset && raAsyncOffset < endOffset; - } - - public boolean checkRange(long streamId, long startOffset, int maxBytes) { - if (maxBytes <= 0) { - return true; - } - readLock.lock(); - try { - return checkRange0(streamId, startOffset, maxBytes); - } finally { - readLock.unlock(); - } - } - - boolean checkRange0(long streamId, long startOffset, int maxBytes) { - StreamCache streamCache = stream2cache.get(streamId); - if (streamCache == null) { - return false; - } - - NavigableMap streamCacheBlocks = streamCache.tailBlocks(startOffset); - long nextStartOffset = startOffset; - int nextMaxBytes = maxBytes; - LinkedList records = new LinkedList<>(); - for (Map.Entry entry : streamCacheBlocks.entrySet()) { - CacheBlock cacheBlock = entry.getValue(); - if (cacheBlock.lastOffset <= nextStartOffset || nextStartOffset < cacheBlock.firstOffset) { - break; - } - nextMaxBytes = readFromCacheBlock(records, cacheBlock, nextStartOffset, Long.MAX_VALUE, nextMaxBytes); - nextStartOffset = records.getLast().getLastOffset(); - if (nextMaxBytes <= 0) { - return true; - } - } - return nextMaxBytes <= 0; - } - - public GetCacheResult get(long streamId, long startOffset, long endOffset, int maxBytes) { - return get(TraceContext.DEFAULT, streamId, startOffset, endOffset, maxBytes); - } - - /** - * Get records from cache. - * Note: the records is retained, the caller should release it. - */ - @WithSpan - public GetCacheResult get(TraceContext context, - @SpanAttribute long streamId, - @SpanAttribute long startOffset, - @SpanAttribute long endOffset, - @SpanAttribute int maxBytes) { - context.currentContext(); - if (startOffset >= endOffset || maxBytes <= 0) { - return GetCacheResult.empty(); - } - - readLock.lock(); - try { - return get0(streamId, startOffset, endOffset, maxBytes); - } finally { - readLock.unlock(); - } - } - - public GetCacheResult get0(long streamId, long startOffset, long endOffset, int maxBytes) { - StreamCache streamCache = stream2cache.get(streamId); - if (streamCache == null) { - return GetCacheResult.empty(); - } - NavigableMap streamCacheBlocks = streamCache.tailBlocks(startOffset); - long nextStartOffset = startOffset; - int nextMaxBytes = maxBytes; - List readAheadRecords = new ArrayList<>(); - LinkedList records = new LinkedList<>(); - for (Map.Entry entry : streamCacheBlocks.entrySet()) { - CacheBlock cacheBlock = entry.getValue(); - if (cacheBlock.lastOffset <= nextStartOffset || nextStartOffset < cacheBlock.firstOffset) { - break; - } - if (cacheBlock.readAheadRecord != null) { - readAheadRecords.add(cacheBlock.readAheadRecord); - cacheBlock.readAheadRecord = null; - } - nextMaxBytes = readFromCacheBlock(records, cacheBlock, nextStartOffset, endOffset, nextMaxBytes); - nextStartOffset = records.getLast().getLastOffset(); - boolean blockCompletedRead = nextStartOffset >= cacheBlock.lastOffset; - CacheBlockKey cacheBlockKey = new CacheBlockKey(streamId, cacheBlock.firstOffset); - if (blockCompletedRead) { - active.remove(cacheBlockKey); - inactive.put(cacheBlockKey, cacheBlock.size); - } else { - if (!active.touch(cacheBlockKey)) { - inactive.touch(cacheBlockKey); - } - } - - if (nextStartOffset >= endOffset || nextMaxBytes <= 0) { - break; - } - - } - - records.forEach(StreamRecordBatch::retain); - return GetCacheResult.of(records, readAheadRecords); - } - - private int readFromCacheBlock(LinkedList records, CacheBlock cacheBlock, - long nextStartOffset, long endOffset, int nextMaxBytes) { - boolean matched = false; - StreamRecordBatchList streamRecordBatchList = new StreamRecordBatchList(cacheBlock.records); - int startIndex = streamRecordBatchList.search(nextStartOffset); - if (startIndex == -1) { - // mismatched - return nextMaxBytes; - } - for (int i = startIndex; i < cacheBlock.records.size(); i++) { - StreamRecordBatch record = cacheBlock.records.get(i); - if (record.getBaseOffset() <= nextStartOffset && record.getLastOffset() > nextStartOffset) { - records.add(record); - nextStartOffset = record.getLastOffset(); - nextMaxBytes -= record.size(); - matched = true; - if (nextStartOffset >= endOffset || nextMaxBytes <= 0) { - break; - } - } else if (matched) { - break; - } - } - return nextMaxBytes; - } - - private void ensureCapacity(int size) { - ensureCapacity0(size, false); - } - - private int ensureCapacity0(int size, boolean forceEvict) { - if (!forceEvict && (maxSize - this.size.get() >= size)) { - return 0; - } - int evictBytes = 0; - for (LRUCache lru : List.of(inactive, active)) { - for (; ; ) { - Map.Entry entry = lru.pop(); - if (entry == null) { - break; - } - StreamCache streamCache = stream2cache.get(entry.getKey().streamId); - if (streamCache == null) { - LOGGER.error("[BUG] Stream cache not found for streamId: {}", entry.getKey().streamId); - continue; - } - CacheBlock cacheBlock = streamCache.remove(entry.getKey().startOffset); - if (cacheBlock == null) { - LOGGER.error("[BUG] Cannot find stream cache block: {} {}", entry.getKey().streamId, entry.getKey().startOffset); - } else { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("[S3BlockCache] evict block, stream={}, {}-{}, total bytes: {} ", entry.getKey().streamId, cacheBlock.firstOffset, cacheBlock.lastOffset, cacheBlock.size); - } - cacheBlock.free(); - evictBytes += cacheBlock.size; - cacheEvictListeners.forEach(listener -> listener.onCacheEvict(entry.getKey().streamId, cacheBlock.firstOffset, cacheBlock.lastOffset, cacheBlock.size)); - if (forceEvict) { - if (evictBytes >= size) { - return evictBytes; - } - } else if (maxSize - this.size.addAndGet(-cacheBlock.size) >= size) { - return evictBytes; - } - } - } - } - return evictBytes; - } - - private void logCacheStatus() { - try { - readLock.lock(); - List sortedStreamIds = new ArrayList<>(stream2cache.keySet()); - sortedStreamIds.sort(Long::compareTo); - for (Long streamId : sortedStreamIds) { - StreamCache streamCache = stream2cache.get(streamId); - if (streamCache == null) { - continue; - } - for (Map.Entry entry : streamCache.blocks().entrySet()) { - CacheBlockKey key = new CacheBlockKey(streamId, entry.getValue().firstOffset); - LOGGER.debug("[S3BlockCache] stream cache block, stream={}, {}-{}, inactive={}, active={}, total bytes: {} ", - streamId, entry.getValue().firstOffset, entry.getValue().lastOffset, inactive.containsKey(key), active.containsKey(key), entry.getValue().size); - } - } - } finally { - readLock.unlock(); - } - } - - private void put(long streamId, StreamCache streamCache, CacheBlock cacheBlock) { - streamCache.put(cacheBlock); - active.put(new CacheBlockKey(streamId, cacheBlock.firstOffset), cacheBlock.size); - size.getAndAdd(cacheBlock.size); - } - - @Override - public int handle(int memoryRequired) { - writeLock.lock(); - try { - return ensureCapacity0(memoryRequired, true); - } catch (Throwable e) { - LOGGER.error("[UNEXPECTED] handle OOM failed", e); - return 0; - } finally { - writeLock.unlock(); - } - } - - public interface CacheEvictListener { - void onCacheEvict(long streamId, long startOffset, long endOffset, int size); - } - - static final class CacheBlockKey { - private final long streamId; - private final long startOffset; - - CacheBlockKey(long streamId, long startOffset) { - this.streamId = streamId; - this.startOffset = startOffset; - } - - public long streamId() { - return streamId; - } - - public long startOffset() { - return startOffset; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj == null || obj.getClass() != this.getClass()) - return false; - var that = (CacheBlockKey) obj; - return this.streamId == that.streamId && - this.startOffset == that.startOffset; - } - - @Override - public int hashCode() { - return Objects.hash(streamId, startOffset); - } - - @Override - public String toString() { - return "CacheBlockKey[" + - "streamId=" + streamId + ", " + - "startOffset=" + startOffset + ']'; - } - - } - - public static class CacheBlock { - List records; - long firstOffset; - long lastOffset; - int size; - ReadAheadRecord readAheadRecord; - - public CacheBlock(List records, ReadAheadRecord readAheadRecord) { - this.records = records; - this.firstOffset = records.get(0).getBaseOffset(); - this.lastOffset = records.get(records.size() - 1).getLastOffset(); - this.size = records.stream().mapToInt(StreamRecordBatch::size).sum(); - this.size += records.size() * OBJECT_OVERHEAD; - this.readAheadRecord = readAheadRecord; - } - - public void free() { - records.forEach(StreamRecordBatch::release); - records = null; - } - - public long size() { - return size; - } - } - - public static class GetCacheResult { - private final List records; - private final List readAheadRecords; - - private GetCacheResult(List records, List readAheadRecords) { - this.records = records; - this.readAheadRecords = readAheadRecords; - } - - public static GetCacheResult empty() { - return new GetCacheResult(Collections.emptyList(), Collections.emptyList()); - } - - public static GetCacheResult of(List records, List readAheadRecords) { - return new GetCacheResult(records, readAheadRecords); - } - - public List getRecords() { - return records; - } - - public List getReadAheadRecords() { - return readAheadRecords; - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/CacheAccessType.java b/s3stream/src/main/java/com/automq/stream/s3/cache/CacheAccessType.java deleted file mode 100644 index 429b37220..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/CacheAccessType.java +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -public enum CacheAccessType { - DELTA_WAL_CACHE_HIT, - BLOCK_CACHE_HIT, - BLOCK_CACHE_MISS, -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/DataBlockReadAccumulator.java b/s3stream/src/main/java/com/automq/stream/s3/cache/DataBlockReadAccumulator.java deleted file mode 100644 index 7cdfc954a..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/DataBlockReadAccumulator.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.DataBlockIndex; -import com.automq.stream.s3.ObjectReader; -import com.automq.stream.s3.StreamDataBlock; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.function.BiConsumer; -import org.apache.commons.lang3.tuple.Pair; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Accumulate inflight data block read requests to one real read request. - */ -public class DataBlockReadAccumulator { - private static final Logger LOGGER = LoggerFactory.getLogger(DataBlockReadAccumulator.class); - private final Map, DataBlockRecords> inflightDataBlockReads = new ConcurrentHashMap<>(); - - public List reserveDataBlock(List> dataBlockPairList) { - List reserveResults = new ArrayList<>(); - synchronized (inflightDataBlockReads) { - for (Pair pair : dataBlockPairList) { - ObjectReader reader = pair.getLeft(); - DataBlockIndex blockIndex = pair.getRight().dataBlockIndex(); - Pair key = Pair.of(reader.objectKey(), blockIndex.startPosition()); - DataBlockRecords records = inflightDataBlockReads.get(key); - CompletableFuture cf = new CompletableFuture<>(); - BiConsumer listener = (rst, ex) -> { - if (ex != null) { - cf.completeExceptionally(ex); - rst.release(); - } else { - // consumer of DataBlockRecords should release it on completion - cf.complete(rst); - } - }; - int reservedSize = 0; - if (records == null) { - records = new DataBlockRecords(); - records.registerListener(listener); - inflightDataBlockReads.put(key, records); - reservedSize = blockIndex.size(); - } else { - records.registerListener(listener); - } - reserveResults.add(new ReserveResult(reservedSize, cf)); - } - } - return reserveResults; - } - - public void readDataBlock(ObjectReader reader, DataBlockIndex blockIndex) { - Pair key = Pair.of(reader.objectKey(), blockIndex.startPosition()); - synchronized (inflightDataBlockReads) { - DataBlockRecords records = inflightDataBlockReads.get(key); - if (records != null) { - reader.read(blockIndex).whenComplete((dataBlock, ex) -> { - try (dataBlock) { - synchronized (inflightDataBlockReads) { - inflightDataBlockReads.remove(key, records); - } - records.complete(dataBlock, ex); - } finally { - records.release(); - } - }); - } - } - } - - public static final class ReserveResult { - private final int reserveSize; - private final CompletableFuture cf; - - public ReserveResult(int reserveSize, CompletableFuture cf) { - this.reserveSize = reserveSize; - this.cf = cf; - } - - public int reserveSize() { - return reserveSize; - } - - public CompletableFuture cf() { - return cf; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj == null || obj.getClass() != this.getClass()) - return false; - var that = (ReserveResult) obj; - return this.reserveSize == that.reserveSize && - Objects.equals(this.cf, that.cf); - } - - @Override - public int hashCode() { - return Objects.hash(reserveSize, cf); - } - - @Override - public String toString() { - return "ReserveResult[" + - "reserveSize=" + reserveSize + ", " + - "cf=" + cf + ']'; - } - - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/DataBlockRecords.java b/s3stream/src/main/java/com/automq/stream/s3/cache/DataBlockRecords.java deleted file mode 100644 index f1b0598e7..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/DataBlockRecords.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.ObjectReader; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.utils.CloseableIterator; -import java.util.ArrayList; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.BiConsumer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DataBlockRecords { - private static final Logger LOGGER = LoggerFactory.getLogger(DataBlockRecords.class); - final AtomicInteger refCount = new AtomicInteger(1); - private final List> listeners = new LinkedList<>(); - private List records = Collections.emptyList(); - - public void registerListener(BiConsumer listener) { - retain(); - listeners.add(listener); - } - - public void complete(ObjectReader.DataBlockGroup dataBlockGroup, Throwable ex) { - if (ex == null) { - records = new ArrayList<>(dataBlockGroup.recordCount()); - try (CloseableIterator it = dataBlockGroup.iterator()) { - while (it.hasNext()) { - records.add(it.next()); - } - } catch (Throwable e) { - LOGGER.error("parse data block fail", e); - records.forEach(StreamRecordBatch::release); - ex = e; - } - } - Throwable finalEx = ex; - listeners.forEach(listener -> { - try { - listener.accept(this, finalEx); - } catch (Throwable e) { - release(); - LOGGER.error("DataBlockRecords fail to notify listener {}", listener, e); - } - }); - } - - public List records() { - return Collections.unmodifiableList(records); - } - - void retain() { - refCount.incrementAndGet(); - } - - void release() { - if (refCount.decrementAndGet() == 0) { - records.forEach(StreamRecordBatch::release); - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/DefaultS3BlockCache.java b/s3stream/src/main/java/com/automq/stream/s3/cache/DefaultS3BlockCache.java deleted file mode 100644 index 6ea9d9f05..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/DefaultS3BlockCache.java +++ /dev/null @@ -1,411 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.Config; -import com.automq.stream.s3.metrics.TimerUtil; -import com.automq.stream.s3.metrics.stats.StorageOperationStats; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.operator.S3Operator; -import com.automq.stream.s3.trace.context.TraceContext; -import com.automq.stream.utils.FutureUtil; -import com.automq.stream.utils.Threads; -import io.opentelemetry.api.trace.Span; -import io.opentelemetry.instrumentation.annotations.SpanAttribute; -import io.opentelemetry.instrumentation.annotations.WithSpan; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static com.automq.stream.s3.metadata.ObjectUtils.NOOP_OFFSET; - -public class DefaultS3BlockCache implements S3BlockCache { - private static final Logger LOGGER = LoggerFactory.getLogger(DefaultS3BlockCache.class); - private final Map inflightReadAheadTasks = new ConcurrentHashMap<>(); - private final Map inflightReadStatusMap = new ConcurrentHashMap<>(); - private final BlockCache cache; - private final ExecutorService mainExecutor; - private final ReadAheadManager readAheadManager; - private final StreamReader streamReader; - private final InflightReadThrottle inflightReadThrottle; - - public DefaultS3BlockCache(Config config, ObjectManager objectManager, S3Operator s3Operator) { - int blockSize = config.objectBlockSize(); - - this.cache = new BlockCache(config.blockCacheSize()); - this.readAheadManager = new ReadAheadManager(blockSize, this.cache); - this.mainExecutor = Threads.newFixedThreadPoolWithMonitor( - 2, - "s3-block-cache-main", - false, - LOGGER); - this.inflightReadThrottle = new InflightReadThrottle(); - this.streamReader = new StreamReader(s3Operator, objectManager, cache, inflightReadAheadTasks, inflightReadThrottle); - } - - public void shutdown() { - this.mainExecutor.shutdown(); - this.streamReader.shutdown(); - this.inflightReadThrottle.shutdown(); - - } - - @Override - @WithSpan - public CompletableFuture read(TraceContext traceContext, - @SpanAttribute long streamId, - @SpanAttribute long startOffset, - @SpanAttribute long endOffset, - @SpanAttribute int maxBytes) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] read data, stream={}, {}-{}, total bytes: {}", streamId, startOffset, endOffset, maxBytes); - } - final TraceContext finalTraceContext = new TraceContext(traceContext); - this.readAheadManager.updateReadProgress(streamId, startOffset); - TimerUtil timerUtil = new TimerUtil(); - CompletableFuture readCf = new CompletableFuture<>(); - ReadAheadAgent agent = this.readAheadManager.getOrCreateReadAheadAgent(streamId, startOffset); - UUID uuid = UUID.randomUUID(); - ReadTaskKey key = new ReadTaskKey(streamId, startOffset, endOffset, maxBytes, uuid); - ReadTaskContext context = new ReadTaskContext(agent, ReadBlockCacheStatus.INIT); - this.inflightReadStatusMap.put(key, context); - // submit read task to mainExecutor to avoid read slower the caller thread. - mainExecutor.execute(() -> { - try { - FutureUtil.propagate(read0(finalTraceContext, streamId, startOffset, endOffset, maxBytes, uuid, context).whenComplete((ret, ex) -> { - if (ex != null) { - LOGGER.error("read {} [{}, {}), maxBytes: {} from block cache fail", streamId, startOffset, endOffset, maxBytes, ex); - this.inflightReadThrottle.release(uuid); - this.inflightReadStatusMap.remove(key); - return; - } - int totalReturnedSize = ret.getRecords().stream().mapToInt(StreamRecordBatch::size).sum(); - this.readAheadManager.updateReadResult(streamId, startOffset, - ret.getRecords().get(ret.getRecords().size() - 1).getLastOffset(), totalReturnedSize); - - long timeElapsed = timerUtil.elapsedAs(TimeUnit.NANOSECONDS); - boolean isCacheHit = ret.getCacheAccessType() == CacheAccessType.BLOCK_CACHE_HIT; - StorageOperationStats.getInstance().readBlockCacheStats(isCacheHit).record(timeElapsed); - Span.fromContext(finalTraceContext.currentContext()).setAttribute("cache_hit", isCacheHit); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] read data complete, cache hit: {}, stream={}, {}-{}, total bytes: {}", - ret.getCacheAccessType() == CacheAccessType.BLOCK_CACHE_HIT, streamId, startOffset, endOffset, totalReturnedSize); - } - this.inflightReadThrottle.release(uuid); - this.inflightReadStatusMap.remove(key); - }), readCf); - } catch (Exception e) { - LOGGER.error("read {} [{}, {}), maxBytes: {} from block cache fail, {}", streamId, startOffset, endOffset, maxBytes, e); - this.inflightReadThrottle.release(uuid); - this.inflightReadStatusMap.remove(key); - readCf.completeExceptionally(e); - } - }); - return readCf; - } - - @WithSpan - public CompletableFuture read0(TraceContext traceContext, - @SpanAttribute long streamId, - @SpanAttribute long startOffset, - @SpanAttribute long endOffset, - @SpanAttribute int maxBytes, - UUID uuid, ReadTaskContext context) { - ReadAheadAgent agent = context.agent; - - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] read0, stream={}, {}-{}, total bytes: {}, uuid: {} ", streamId, startOffset, endOffset, maxBytes, uuid); - } - - if (startOffset >= endOffset || maxBytes <= 0) { - return CompletableFuture.completedFuture(new ReadDataBlock(Collections.emptyList(), CacheAccessType.BLOCK_CACHE_MISS)); - } - - long nextStartOffset = startOffset; - int nextMaxBytes = maxBytes; - - ReadAheadTaskContext inflightReadAheadTaskContext = inflightReadAheadTasks.get(new ReadAheadTaskKey(streamId, nextStartOffset)); - if (inflightReadAheadTaskContext != null) { - CompletableFuture readCf = new CompletableFuture<>(); - context.setStatus(ReadBlockCacheStatus.WAIT_INFLIGHT_RA); - inflightReadAheadTaskContext.cf.whenComplete((nil, ex) -> FutureUtil.exec(() -> FutureUtil.propagate( - read0(traceContext, streamId, startOffset, endOffset, maxBytes, uuid, context), readCf), readCf, LOGGER, "read0")); - return readCf; - } - - // 1. get from cache - context.setStatus(ReadBlockCacheStatus.GET_FROM_CACHE); - BlockCache.GetCacheResult cacheRst = cache.get(traceContext, streamId, nextStartOffset, endOffset, nextMaxBytes); - List cacheRecords = cacheRst.getRecords(); - if (!cacheRecords.isEmpty()) { - asyncReadAhead(streamId, agent, cacheRst.getReadAheadRecords()); - nextStartOffset = cacheRecords.get(cacheRecords.size() - 1).getLastOffset(); - nextMaxBytes -= Math.min(nextMaxBytes, cacheRecords.stream().mapToInt(StreamRecordBatch::size).sum()); - if (nextStartOffset >= endOffset || nextMaxBytes == 0) { - // cache hit - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] read data hit cache, stream={}, {}-{}, total bytes: {} ", streamId, startOffset, endOffset, maxBytes); - } - return CompletableFuture.completedFuture(new ReadDataBlock(cacheRecords, CacheAccessType.BLOCK_CACHE_HIT)); - } else { - // cache partially hit - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] read data partially hit cache, stream={}, {}-{}, total bytes: {} ", streamId, nextStartOffset, endOffset, nextMaxBytes); - } - return read0(traceContext, streamId, nextStartOffset, endOffset, nextMaxBytes, uuid, context).thenApply(rst -> { - List records = new ArrayList<>(cacheRecords); - records.addAll(rst.getRecords()); - return new ReadDataBlock(records, CacheAccessType.BLOCK_CACHE_MISS); - }); - } - } - - // 2. get from s3 - context.setStatus(ReadBlockCacheStatus.GET_FROM_S3); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] read data cache miss, stream={}, {}-{}, total bytes: {} ", streamId, startOffset, endOffset, maxBytes); - } - return streamReader.syncReadAhead(traceContext, streamId, startOffset, endOffset, maxBytes, agent, uuid) - .thenCompose(rst -> { - if (!rst.isEmpty()) { - int remainBytes = maxBytes - rst.stream().mapToInt(StreamRecordBatch::size).sum(); - long lastOffset = rst.get(rst.size() - 1).getLastOffset(); - if (remainBytes > 0 && lastOffset < endOffset) { - // retry read - return read0(traceContext, streamId, lastOffset, endOffset, remainBytes, uuid, context).thenApply(rst2 -> { - List records = new ArrayList<>(rst); - records.addAll(rst2.getRecords()); - return new ReadDataBlock(records, CacheAccessType.BLOCK_CACHE_MISS); - }); - } - } - return CompletableFuture.completedFuture(new ReadDataBlock(rst, CacheAccessType.BLOCK_CACHE_MISS)); - }); - } - - private void asyncReadAhead(long streamId, ReadAheadAgent agent, List readAheadRecords) { - //TODO: read ahead only when there are enough inactive bytes to evict - if (readAheadRecords.isEmpty()) { - return; - } - ReadAheadRecord lastRecord = readAheadRecords.get(readAheadRecords.size() - 1); - long nextRaOffset = lastRecord.nextRAOffset(); - int nextRaSize = agent.getNextReadAheadSize(); - - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] async read ahead, stream={}, {}-{}, total bytes: {} ", - streamId, nextRaOffset, NOOP_OFFSET, nextRaSize); - } - - // check if next ra hits cache - if (cache.checkRange(streamId, nextRaOffset, nextRaSize)) { - return; - } - - streamReader.asyncReadAhead(streamId, nextRaOffset, NOOP_OFFSET, nextRaSize, agent); - } - - public enum ReadBlockCacheStatus { - /* Status for read request */ - INIT, - WAIT_INFLIGHT_RA, - GET_FROM_CACHE, - GET_FROM_S3, - - /* Status for read ahead request */ - WAIT_DATA_INDEX, - WAIT_FETCH_DATA, - WAIT_THROTTLE, - } - - public static final class ReadAheadTaskKey { - private final long streamId; - private final long startOffset; - - public ReadAheadTaskKey(long streamId, long startOffset) { - this.streamId = streamId; - this.startOffset = startOffset; - } - - public long streamId() { - return streamId; - } - - public long startOffset() { - return startOffset; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj == null || obj.getClass() != this.getClass()) - return false; - var that = (ReadAheadTaskKey) obj; - return this.streamId == that.streamId && - this.startOffset == that.startOffset; - } - - @Override - public int hashCode() { - return Objects.hash(streamId, startOffset); - } - - @Override - public String toString() { - return "ReadAheadTaskKey[" + - "streamId=" + streamId + ", " + - "startOffset=" + startOffset + ']'; - } - - } - - public static class ReadAheadTaskContext { - final CompletableFuture cf; - ReadBlockCacheStatus status; - - public ReadAheadTaskContext(CompletableFuture cf, ReadBlockCacheStatus status) { - this.cf = cf; - this.status = status; - } - - void setStatus(ReadBlockCacheStatus status) { - this.status = status; - } - } - - public static final class ReadTaskKey { - private final long streamId; - private final long startOffset; - private final long endOffset; - private final int maxBytes; - private final UUID uuid; - - public ReadTaskKey(long streamId, long startOffset, long endOffset, int maxBytes, UUID uuid) { - this.streamId = streamId; - this.startOffset = startOffset; - this.endOffset = endOffset; - this.maxBytes = maxBytes; - this.uuid = uuid; - } - - @Override - public String toString() { - return "ReadTaskKey{" + - "streamId=" + streamId + - ", startOffset=" + startOffset + - ", endOffset=" + endOffset + - ", maxBytes=" + maxBytes + - ", uuid=" + uuid + - '}'; - } - - public long streamId() { - return streamId; - } - - public long startOffset() { - return startOffset; - } - - public long endOffset() { - return endOffset; - } - - public int maxBytes() { - return maxBytes; - } - - public UUID uuid() { - return uuid; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj == null || obj.getClass() != this.getClass()) - return false; - var that = (ReadTaskKey) obj; - return this.streamId == that.streamId && - this.startOffset == that.startOffset && - this.endOffset == that.endOffset && - this.maxBytes == that.maxBytes && - Objects.equals(this.uuid, that.uuid); - } - - @Override - public int hashCode() { - return Objects.hash(streamId, startOffset, endOffset, maxBytes, uuid); - } - - } - - public static class ReadTaskContext { - final ReadAheadAgent agent; - ReadBlockCacheStatus status; - - public ReadTaskContext(ReadAheadAgent agent, ReadBlockCacheStatus status) { - this.agent = agent; - this.status = status; - } - - void setStatus(ReadBlockCacheStatus status) { - this.status = status; - } - } - - public static final class ReadAheadRecord { - private final long nextRAOffset; - - public ReadAheadRecord(long nextRAOffset) { - this.nextRAOffset = nextRAOffset; - } - - public long nextRAOffset() { - return nextRAOffset; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj == null || obj.getClass() != this.getClass()) - return false; - var that = (ReadAheadRecord) obj; - return this.nextRAOffset == that.nextRAOffset; - } - - @Override - public int hashCode() { - return Objects.hash(nextRAOffset); - } - - @Override - public String toString() { - return "ReadAheadRecord[" + - "nextRAOffset=" + nextRAOffset + ']'; - } - - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/InflightReadThrottle.java b/s3stream/src/main/java/com/automq/stream/s3/cache/InflightReadThrottle.java deleted file mode 100644 index 197fcd8c1..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/InflightReadThrottle.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.metrics.S3StreamMetricsManager; -import com.automq.stream.s3.trace.context.TraceContext; -import com.automq.stream.utils.ThreadUtils; -import com.automq.stream.utils.Threads; -import com.automq.stream.utils.Utils; -import io.opentelemetry.instrumentation.annotations.WithSpan; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.Map; -import java.util.Objects; -import java.util.Queue; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class InflightReadThrottle implements Runnable { - private static final Logger LOGGER = LoggerFactory.getLogger(InflightReadThrottle.class); - private static final Integer MAX_INFLIGHT_READ_SIZE = 256 * 1024 * 1024; //256MB - private final int maxInflightReadBytes; - private final Lock lock = new ReentrantLock(); - private final Condition condition = lock.newCondition(); - private final Map inflightQuotaMap = new HashMap<>(); - private final Queue inflightReadQueue = new LinkedList<>(); - private final ExecutorService executorService = Threads.newFixedThreadPool(1, - ThreadUtils.createThreadFactory("inflight-read-throttle-%d", false), LOGGER); - - private int remainingInflightReadBytes; - - public InflightReadThrottle() { - this((int) (MAX_INFLIGHT_READ_SIZE * (1 - Utils.getMaxMergeReadSparsityRate()))); - } - - public InflightReadThrottle(int maxInflightReadBytes) { - this.maxInflightReadBytes = maxInflightReadBytes; - this.remainingInflightReadBytes = maxInflightReadBytes; - executorService.execute(this); - S3StreamMetricsManager.registerInflightReadSizeLimiterSupplier(this::getRemainingInflightReadBytes); - } - - public void shutdown() { - executorService.shutdown(); - } - - public int getInflightQueueSize() { - lock.lock(); - try { - return inflightReadQueue.size(); - } finally { - lock.unlock(); - } - } - - public int getRemainingInflightReadBytes() { - lock.lock(); - try { - return remainingInflightReadBytes; - } finally { - lock.unlock(); - } - } - - public CompletableFuture acquire(UUID uuid, int readSize) { - return acquire(TraceContext.DEFAULT, uuid, readSize); - } - - @WithSpan - public CompletableFuture acquire(TraceContext context, UUID uuid, int readSize) { - context.currentContext(); - lock.lock(); - try { - if (readSize > maxInflightReadBytes) { - return CompletableFuture.failedFuture(new IllegalArgumentException(String.format( - "read size %d exceeds max inflight read size %d", readSize, maxInflightReadBytes))); - } - if (readSize <= 0) { - return CompletableFuture.completedFuture(null); - } - inflightQuotaMap.put(uuid, readSize); - if (readSize <= remainingInflightReadBytes) { - remainingInflightReadBytes -= readSize; - return CompletableFuture.completedFuture(null); - } - CompletableFuture cf = new CompletableFuture<>(); - inflightReadQueue.offer(new InflightReadItem(readSize, cf)); - condition.signalAll(); - return cf; - } finally { - lock.unlock(); - } - } - - public void release(UUID uuid) { - lock.lock(); - try { - Integer inflightReadSize = inflightQuotaMap.remove(uuid); - if (inflightReadSize != null) { - remainingInflightReadBytes += inflightReadSize; - condition.signalAll(); - } - } finally { - lock.unlock(); - } - } - - @Override - public void run() { - while (true) { - lock.lock(); - try { - while (inflightReadQueue.isEmpty() || inflightReadQueue.peek().readSize > remainingInflightReadBytes) { - condition.await(); - } - InflightReadItem inflightReadItem = inflightReadQueue.poll(); - if (inflightReadItem == null) { - continue; - } - remainingInflightReadBytes -= inflightReadItem.readSize; - inflightReadItem.cf.complete(null); - } catch (Exception e) { - break; - } finally { - lock.unlock(); - } - } - } - - static final class InflightReadItem { - private final int readSize; - private final CompletableFuture cf; - - InflightReadItem(int readSize, CompletableFuture cf) { - this.readSize = readSize; - this.cf = cf; - } - - public int readSize() { - return readSize; - } - - public CompletableFuture cf() { - return cf; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj == null || obj.getClass() != this.getClass()) - return false; - var that = (InflightReadItem) obj; - return this.readSize == that.readSize && - Objects.equals(this.cf, that.cf); - } - - @Override - public int hashCode() { - return Objects.hash(readSize, cf); - } - - @Override - public String toString() { - return "InflightReadItem[" + - "readSize=" + readSize + ", " + - "cf=" + cf + ']'; - } - - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/LRUCache.java b/s3stream/src/main/java/com/automq/stream/s3/cache/LRUCache.java deleted file mode 100644 index bf52bf33b..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/LRUCache.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Set; - -public class LRUCache { - protected final LinkedHashMap cache; - protected final Set> cacheEntrySet; - - public LRUCache() { - cache = new LinkedHashMap<>(16, .75f, true); - cacheEntrySet = cache.entrySet(); - } - - public synchronized boolean touch(K key) { - return cache.get(key) != null; - } - - public synchronized void put(K key, V value) { - if (cache.put(key, value) != null) { - touch(key); - } - } - - public synchronized V get(K key) { - return cache.get(key); - } - - public synchronized Map.Entry pop() { - Iterator> it = cacheEntrySet.iterator(); - if (!it.hasNext()) { - return null; - } - Map.Entry entry = it.next(); - if (entry == null) { - return null; - } - it.remove(); - return entry; - } - - public synchronized boolean remove(K key) { - return cache.remove(key) != null; - } - - public synchronized int size() { - return cache.size(); - } - - public synchronized boolean containsKey(K key) { - return cache.containsKey(key); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/LogCache.java b/s3stream/src/main/java/com/automq/stream/s3/cache/LogCache.java deleted file mode 100644 index 85e2623e2..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/LogCache.java +++ /dev/null @@ -1,507 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.metrics.S3StreamMetricsManager; -import com.automq.stream.s3.metrics.TimerUtil; -import com.automq.stream.s3.metrics.stats.StorageOperationStats; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.trace.context.TraceContext; -import com.automq.stream.utils.biniarysearch.StreamRecordBatchList; -import io.opentelemetry.instrumentation.annotations.SpanAttribute; -import io.opentelemetry.instrumentation.annotations.WithSpan; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.function.Consumer; -import java.util.stream.Collectors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static com.automq.stream.s3.cache.LogCache.StreamRange.NOOP_OFFSET; -import static com.automq.stream.s3.model.StreamRecordBatch.OBJECT_OVERHEAD; -import static com.automq.stream.utils.FutureUtil.suppress; - -public class LogCache { - public static final long MATCH_ALL_STREAMS = -1L; - private static final Logger LOGGER = LoggerFactory.getLogger(LogCache.class); - private static final int DEFAULT_MAX_BLOCK_STREAM_COUNT = 10000; - private static final Consumer DEFAULT_BLOCK_FREE_LISTENER = block -> { - }; - final List blocks = new ArrayList<>(); - private final long capacity; - private final long cacheBlockMaxSize; - private final int maxCacheBlockStreamCount; - private final AtomicLong size = new AtomicLong(); - private final Consumer blockFreeListener; - // read write lock which guards the LogCache.blocks - private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - private final ReentrantReadWriteLock.ReadLock readLock = lock.readLock(); - private final ReentrantReadWriteLock.WriteLock writeLock = lock.writeLock(); - private LogCacheBlock activeBlock; - private long confirmOffset; - - public LogCache(long capacity, long cacheBlockMaxSize, int maxCacheBlockStreamCount, - Consumer blockFreeListener) { - this.capacity = capacity; - this.cacheBlockMaxSize = cacheBlockMaxSize; - this.maxCacheBlockStreamCount = maxCacheBlockStreamCount; - this.activeBlock = new LogCacheBlock(cacheBlockMaxSize, maxCacheBlockStreamCount); - this.blocks.add(activeBlock); - this.blockFreeListener = blockFreeListener; - S3StreamMetricsManager.registerDeltaWalCacheSizeSupplier(size::get); - } - - public LogCache(long capacity, long cacheBlockMaxSize) { - this(capacity, cacheBlockMaxSize, DEFAULT_MAX_BLOCK_STREAM_COUNT, DEFAULT_BLOCK_FREE_LISTENER); - } - - public LogCache(long capacity, long cacheBlockMaxSize, int maxCacheBlockStreamCount) { - this(capacity, cacheBlockMaxSize, maxCacheBlockStreamCount, DEFAULT_BLOCK_FREE_LISTENER); - } - - /** - * Put a record batch into the cache. - * record batched in the same stream should be put in order. - */ - public boolean put(StreamRecordBatch recordBatch) { - TimerUtil timerUtil = new TimerUtil(); - tryRealFree(); - size.addAndGet(recordBatch.size() + OBJECT_OVERHEAD); - readLock.lock(); - boolean full; - try { - full = activeBlock.put(recordBatch); - } finally { - readLock.unlock(); - } - StorageOperationStats.getInstance().appendLogCacheStats.record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - return full; - } - - public List get(long streamId, long startOffset, long endOffset, int maxBytes) { - return get(TraceContext.DEFAULT, streamId, startOffset, endOffset, maxBytes); - } - - /** - * Get streamId [startOffset, endOffset) range records with maxBytes limit. - *

- * - If the requested range can be fully satisfied, then return the corresponding cached records. - * - Otherwise, return the latest continuous records and leave the remaining range to block cache. - *

- * e.g. Cached block: [0, 10], [100, 200] - *

- * - query [0,10] returns [0,10] (fully satisfied) - *

- *

- * - query [0, 11] returns empty list (left intersect, leave all data to block cache for simplification) - *

- *

- * - query [5, 20] returns empty list (left intersect, leave all data to block cache for simplification) - *

- *

- * - query [90, 110) returns [100, 110] (right intersect, leave[90, 100) to block cache) - *

- *

- * - query [40, 50] returns empty list (miss match) - *

- * Note: the records is retained, the caller should release it. - */ - @WithSpan - public List get(TraceContext context, - @SpanAttribute long streamId, - @SpanAttribute long startOffset, - @SpanAttribute long endOffset, - @SpanAttribute int maxBytes) { - context.currentContext(); - TimerUtil timerUtil = new TimerUtil(); - List records; - readLock.lock(); - try { - records = get0(streamId, startOffset, endOffset, maxBytes); - records.forEach(StreamRecordBatch::retain); - } finally { - readLock.unlock(); - } - - long timeElapsed = timerUtil.elapsedAs(TimeUnit.NANOSECONDS); - boolean isCacheHit = !records.isEmpty() && records.get(0).getBaseOffset() <= startOffset; - StorageOperationStats.getInstance().readLogCacheStats(isCacheHit).record(timeElapsed); - return records; - } - - public List get0(long streamId, long startOffset, long endOffset, int maxBytes) { - List rst = new LinkedList<>(); - long nextStartOffset = startOffset; - int nextMaxBytes = maxBytes; - boolean fulfill = false; - List blocks = this.blocks; - for (LogCacheBlock archiveBlock : blocks) { - List records = archiveBlock.get(streamId, nextStartOffset, endOffset, nextMaxBytes); - if (records.isEmpty()) { - continue; - } - nextStartOffset = records.get(records.size() - 1).getLastOffset(); - int recordsSize = 0; - for (StreamRecordBatch record : records) { - recordsSize += record.size(); - } - nextMaxBytes -= Math.min(nextMaxBytes, recordsSize); - rst.addAll(records); - if (nextStartOffset >= endOffset || nextMaxBytes == 0) { - fulfill = true; - break; - } - } - if (fulfill) { - return rst; - } else { - long lastBlockStreamStartOffset = NOOP_OFFSET; - for (int i = blocks.size() - 1; i >= 0; i--) { - LogCacheBlock block = blocks.get(i); - StreamRange streamRange = block.getStreamRange(streamId); - if (streamRange.endOffset == NOOP_OFFSET) { - continue; - } - if (lastBlockStreamStartOffset == NOOP_OFFSET || lastBlockStreamStartOffset == streamRange.endOffset) { - lastBlockStreamStartOffset = streamRange.startOffset; - } else { - break; - } - } - if (lastBlockStreamStartOffset == NOOP_OFFSET /* Mismatch */ - || lastBlockStreamStartOffset >= endOffset /* non-right intersect */ - || lastBlockStreamStartOffset <= startOffset /* left intersect */) { - return Collections.emptyList(); - } - return get0(streamId, lastBlockStreamStartOffset, endOffset, maxBytes); - } - } - - public LogCacheBlock archiveCurrentBlock() { - writeLock.lock(); - try { - LogCacheBlock block = activeBlock; - block.confirmOffset = confirmOffset; - activeBlock = new LogCacheBlock(cacheBlockMaxSize, maxCacheBlockStreamCount); - blocks.add(activeBlock); - return block; - } finally { - writeLock.unlock(); - } - } - - public Optional archiveCurrentBlockIfContains(long streamId) { - writeLock.lock(); - try { - return archiveCurrentBlockIfContains0(streamId); - } finally { - writeLock.unlock(); - } - } - - Optional archiveCurrentBlockIfContains0(long streamId) { - if (streamId == MATCH_ALL_STREAMS) { - if (activeBlock.size() > 0) { - return Optional.of(archiveCurrentBlock()); - } else { - return Optional.empty(); - } - } else { - if (activeBlock.map.containsKey(streamId)) { - return Optional.of(archiveCurrentBlock()); - } else { - return Optional.empty(); - } - } - - } - - public void markFree(LogCacheBlock block) { - block.free = true; - tryRealFree(); - } - - private void tryRealFree() { - if (size.get() <= capacity * 0.9) { - return; - } - List removed = new ArrayList<>(); - writeLock.lock(); - try { - blocks.removeIf(b -> { - if (size.get() <= capacity * 0.9) { - return false; - } - if (b.free) { - size.addAndGet(-b.size()); - removed.add(b); - } - return b.free; - }); - } finally { - writeLock.unlock(); - } - removed.forEach(b -> { - blockFreeListener.accept(b); - b.free(); - }); - } - - public int forceFree(int required) { - AtomicInteger freedBytes = new AtomicInteger(); - List removed = new ArrayList<>(); - writeLock.lock(); - try { - blocks.removeIf(block -> { - if (!block.free || freedBytes.get() >= required) { - return false; - } - long blockSize = block.size(); - size.addAndGet(-blockSize); - freedBytes.addAndGet((int) blockSize); - removed.add(block); - return true; - }); - } finally { - writeLock.unlock(); - } - removed.forEach(b -> { - blockFreeListener.accept(b); - b.free(); - }); - return freedBytes.get(); - } - - public void setConfirmOffset(long confirmOffset) { - this.confirmOffset = confirmOffset; - } - - public long size() { - return size.get(); - } - - public static class LogCacheBlock { - private static final AtomicLong BLOCK_ID_ALLOC = new AtomicLong(); - final Map map = new ConcurrentHashMap<>(); - private final long blockId; - private final long maxSize; - private final int maxStreamCount; - private final long createdTimestamp = System.currentTimeMillis(); - private final AtomicLong size = new AtomicLong(); - volatile boolean free; - private long confirmOffset; - - public LogCacheBlock(long maxSize, int maxStreamCount) { - this.blockId = BLOCK_ID_ALLOC.getAndIncrement(); - this.maxSize = maxSize; - this.maxStreamCount = maxStreamCount; - } - - public LogCacheBlock(long maxSize) { - this(maxSize, DEFAULT_MAX_BLOCK_STREAM_COUNT); - } - - public long blockId() { - return blockId; - } - - public boolean put(StreamRecordBatch recordBatch) { - map.compute(recordBatch.getStreamId(), (id, cache) -> { - if (cache == null) { - cache = new StreamCache(); - } - cache.add(recordBatch); - return cache; - }); - int recordSize = recordBatch.size(); - return size.addAndGet(recordSize + OBJECT_OVERHEAD) >= maxSize || map.size() >= maxStreamCount; - } - - public List get(long streamId, long startOffset, long endOffset, int maxBytes) { - StreamCache cache = map.get(streamId); - if (cache == null) { - return Collections.emptyList(); - } - return cache.get(startOffset, endOffset, maxBytes); - } - - StreamRange getStreamRange(long streamId) { - StreamCache streamCache = map.get(streamId); - if (streamCache == null) { - return new StreamRange(NOOP_OFFSET, NOOP_OFFSET); - } else { - return streamCache.range(); - } - } - - public Map> records() { - return map.entrySet().stream() - .map(e -> Map.entry(e.getKey(), e.getValue().records)) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - } - - public long confirmOffset() { - return confirmOffset; - } - - public void confirmOffset(long confirmOffset) { - this.confirmOffset = confirmOffset; - } - - public long size() { - return size.get(); - } - - public void free() { - suppress(() -> { - map.forEach((streamId, records) -> records.free()); - map.clear(); - }, LOGGER); - } - - public long createdTimestamp() { - return createdTimestamp; - } - - public boolean containsStream(long streamId) { - if (MATCH_ALL_STREAMS == streamId) { - return true; - } - return map.containsKey(streamId); - } - } - - static class StreamRange { - public static final long NOOP_OFFSET = -1L; - long startOffset; - long endOffset; - - public StreamRange(long startOffset, long endOffset) { - this.startOffset = startOffset; - this.endOffset = endOffset; - } - } - - static class StreamCache { - List records = new ArrayList<>(); - long startOffset = NOOP_OFFSET; - long endOffset = NOOP_OFFSET; - Map offsetIndexMap = new HashMap<>(); - - synchronized void add(StreamRecordBatch recordBatch) { - if (recordBatch.getBaseOffset() != endOffset && endOffset != NOOP_OFFSET) { - RuntimeException ex = new IllegalArgumentException(String.format("streamId=%s record batch base offset mismatch, expect %s, actual %s", - recordBatch.getStreamId(), endOffset, recordBatch.getBaseOffset())); - LOGGER.error("[FATAL]", ex); - } - records.add(recordBatch); - if (startOffset == NOOP_OFFSET) { - startOffset = recordBatch.getBaseOffset(); - } - endOffset = recordBatch.getLastOffset(); - } - - synchronized List get(long startOffset, long endOffset, int maxBytes) { - if (this.startOffset > startOffset || this.endOffset <= startOffset) { - return Collections.emptyList(); - } - int startIndex = searchStartIndex(startOffset); - if (startIndex == -1) { - // mismatched - return Collections.emptyList(); - } - int endIndex = -1; - int remainingBytesSize = maxBytes; - long rstEndOffset = NOOP_OFFSET; - for (int i = startIndex; i < records.size(); i++) { - StreamRecordBatch record = records.get(i); - endIndex = i + 1; - remainingBytesSize -= Math.min(remainingBytesSize, record.size()); - rstEndOffset = record.getLastOffset(); - if (record.getLastOffset() >= endOffset || remainingBytesSize == 0) { - break; - } - } - if (rstEndOffset != NOOP_OFFSET) { - map(rstEndOffset, endIndex); - } - return new ArrayList<>(records.subList(startIndex, endIndex)); - } - - int searchStartIndex(long startOffset) { - IndexAndCount indexAndCount = offsetIndexMap.get(startOffset); - if (indexAndCount != null) { - unmap(startOffset, indexAndCount); - return indexAndCount.index; - } else { - // slow path - StreamRecordBatchList search = new StreamRecordBatchList(records); - return search.search(startOffset); - } - } - - final void map(long offset, int index) { - offsetIndexMap.compute(offset, (k, v) -> { - if (v == null) { - return new IndexAndCount(index); - } else { - v.inc(); - return v; - } - }); - } - - final void unmap(long startOffset, IndexAndCount indexAndCount) { - if (indexAndCount.dec() == 0) { - offsetIndexMap.remove(startOffset); - } - } - - synchronized StreamRange range() { - return new StreamRange(startOffset, endOffset); - } - - synchronized void free() { - records.forEach(StreamRecordBatch::release); - records.clear(); - } - } - - static class IndexAndCount { - int index; - int count; - - public IndexAndCount(int index) { - this.index = index; - this.count = 1; - } - - public void inc() { - count++; - } - - public int dec() { - return --count; - } - - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/ObjectReaderLRUCache.java b/s3stream/src/main/java/com/automq/stream/s3/cache/ObjectReaderLRUCache.java deleted file mode 100644 index 8103ac802..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/ObjectReaderLRUCache.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.ObjectReader; -import java.util.Optional; - -public class ObjectReaderLRUCache extends LRUCache { - - private final int maxObjectSize; - - public ObjectReaderLRUCache(int maxObjectSize) { - super(); - if (maxObjectSize <= 0) { - throw new IllegalArgumentException("maxObjectSize must be positive"); - } - this.maxObjectSize = maxObjectSize; - } - - @Override - public synchronized void put(Long key, ObjectReader value) { - while (objectSize() > maxObjectSize) { - Optional.ofNullable(pop()).ifPresent(entry -> entry.getValue().close()); - } - super.put(key, value); - } - - private int objectSize() { - return cacheEntrySet.stream().filter(entry -> entry.getValue().basicObjectInfo().isDone()) - .mapToInt(entry -> { - try { - return entry.getValue().basicObjectInfo().get().size(); - } catch (Exception e) { - return 0; - } - }).sum(); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/ReadAheadAgent.java b/s3stream/src/main/java/com/automq/stream/s3/cache/ReadAheadAgent.java deleted file mode 100644 index efdeab097..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/ReadAheadAgent.java +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.metrics.TimerUtil; -import com.automq.stream.s3.metrics.stats.StorageOperationStats; -import com.automq.stream.utils.LogContext; -import com.google.common.base.Objects; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import org.apache.commons.lang3.tuple.Pair; -import org.slf4j.Logger; - -public class ReadAheadAgent { - private static final Integer MAX_READ_AHEAD_SIZE = 40 * 1024 * 1024; // 40MB - private static final Integer S3_OPERATION_DELAY_MS = 400; // 400ms - private final Logger logger; - private final Lock lock = new ReentrantLock(); - private final TimerUtil timer; - private final long streamId; - private final int dataBlockSize; - private final List> evictedOffsetRanges = new ArrayList<>(); - private double bytePerSecond; - private long readCount; - private long lastReadOffset; - private int lastReadSize; - private long readAheadEndOffset; - private int lastReadAheadSize; - - public ReadAheadAgent(int dataBlockSize, long streamId, long startOffset) { - this.logger = new LogContext(String.format("[S3BlockCache] stream=%d ", streamId)).logger(ReadAheadAgent.class); - this.timer = new TimerUtil(); - this.dataBlockSize = dataBlockSize; - this.streamId = streamId; - this.lastReadOffset = startOffset; - this.readCount = 0; - logger.info("create read ahead agent for stream={}, startOffset={}", streamId, startOffset); - } - - public void updateReadProgress(long startOffset) { - try { - lock.lock(); - if (startOffset != lastReadOffset) { - logger.error("update read progress for stream={} failed, offset not match: expected offset {}, but get {}", streamId, lastReadOffset, startOffset); - return; - } - long timeElapsedNanos = timer.elapsedAs(TimeUnit.NANOSECONDS); - double bytesPerSec = (double) this.lastReadSize / timeElapsedNanos * TimeUnit.SECONDS.toNanos(1); - readCount++; - double factor = (double) readCount / (1 + readCount); - bytePerSecond = (1 - factor) * bytePerSecond + factor * bytesPerSec; - if (logger.isDebugEnabled()) { - logger.debug("update read progress offset {}, lastReadSpeed: {} bytes/s, corrected speed: {} bytes/s", startOffset, bytesPerSec, bytePerSecond); - } - } finally { - lock.unlock(); - } - } - - public void updateReadResult(long startOffset, long endOffset, int size) { - try { - lock.lock(); - if (startOffset != lastReadOffset) { - logger.error("update read result for stream={} failed, offset not match: expected offset {}, but get {}", streamId, lastReadOffset, startOffset); - return; - } - this.lastReadSize = size; - this.lastReadOffset = endOffset; - timer.reset(); - if (logger.isDebugEnabled()) { - logger.debug("update read result offset {}-{}, size: {}, readAheadOffset: {}", startOffset, endOffset, size, readAheadEndOffset); - } - } finally { - lock.unlock(); - } - } - - public void updateReadAheadResult(long readAheadEndOffset, int readAheadSize) { - try { - lock.lock(); - this.readAheadEndOffset = readAheadEndOffset; - this.lastReadAheadSize = readAheadSize; - StorageOperationStats.getInstance().readAheadSizeStats.record(readAheadSize); - if (logger.isDebugEnabled()) { - logger.debug("update read ahead offset {}, size: {}, lastReadOffset: {}", readAheadEndOffset, readAheadSize, lastReadOffset); - } - } finally { - lock.unlock(); - } - } - - public int getNextReadAheadSize() { - try { - lock.lock(); - // remove range that is not within read ahead - this.evictedOffsetRanges.removeIf(range -> range.getLeft() >= readAheadEndOffset || range.getRight() <= lastReadOffset); - int nextSize = calculateNextSize(); - this.lastReadAheadSize = nextSize; - if (logger.isDebugEnabled()) { - logger.debug("get next read ahead size {}, {}", nextSize, this); - } - return nextSize; - } finally { - lock.unlock(); - } - - } - - private int calculateNextSize() { - long totalEvictedSize = this.evictedOffsetRanges.stream().mapToLong(range -> { - long left = Math.max(range.getLeft(), lastReadOffset); - long right = Math.min(range.getRight(), readAheadEndOffset); - return right - left; - }).sum(); - double evictedFraction = (double) totalEvictedSize / (readAheadEndOffset - lastReadOffset); - int nextSize = (int) (bytePerSecond * ((double) S3_OPERATION_DELAY_MS / TimeUnit.SECONDS.toMillis(1)) * (1 - evictedFraction)); - nextSize = Math.max(dataBlockSize, Math.min(nextSize, MAX_READ_AHEAD_SIZE)); - return nextSize; - } - - public long getStreamId() { - return streamId; - } - - public long getReadAheadOffset() { - try { - lock.lock(); - return readAheadEndOffset; - } finally { - lock.unlock(); - } - } - - public long getLastReadAheadSize() { - try { - lock.lock(); - return lastReadAheadSize; - } finally { - lock.unlock(); - } - } - - public long getLastReadOffset() { - try { - lock.lock(); - return lastReadOffset; - } finally { - lock.unlock(); - } - } - - public int getLastReadSize() { - try { - lock.lock(); - return lastReadSize; - } finally { - lock.unlock(); - } - } - - public double getBytePerSecond() { - try { - lock.lock(); - return bytePerSecond; - } finally { - lock.unlock(); - } - } - - public void evict(long startOffset, long endOffset) { - try { - lock.lock(); - if (startOffset >= endOffset - || lastReadOffset >= readAheadEndOffset - || endOffset <= lastReadOffset - || startOffset >= readAheadEndOffset) { - return; - } - - if (logger.isDebugEnabled()) { - logger.debug("evict range [{}, {}], lastReadOffset: {}, readAheadOffset: {}", startOffset, endOffset, lastReadOffset, readAheadEndOffset); - } - - this.evictedOffsetRanges.add(Pair.of(startOffset, endOffset)); - } finally { - lock.unlock(); - } - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - ReadAheadAgent agent = (ReadAheadAgent) o; - return streamId == agent.streamId && lastReadOffset == agent.lastReadOffset; - } - - @Override - public int hashCode() { - return Objects.hashCode(streamId, lastReadOffset); - } - - @Override - public String toString() { - return "ReadAheadAgent{" + - "stream=" + streamId + - ", bytesPerSecond=" + bytePerSecond + - ", lastReadOffset=" + lastReadOffset + - ", lastReadSize=" + lastReadSize + - ", readAheadEndOffset=" + readAheadEndOffset + - ", evictedOffsetRanges=" + evictedOffsetRanges + - '}'; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/ReadAheadManager.java b/s3stream/src/main/java/com/automq/stream/s3/cache/ReadAheadManager.java deleted file mode 100644 index ac53e69fa..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/ReadAheadManager.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.utils.LogContext; -import java.util.Map; -import java.util.NavigableMap; -import java.util.Objects; -import java.util.Set; -import java.util.TreeMap; -import java.util.concurrent.ConcurrentHashMap; -import org.slf4j.Logger; - -public class ReadAheadManager implements BlockCache.CacheEvictListener { - private static final Logger LOGGER = new LogContext("[S3BlockCache] ").logger(ReadAheadManager.class); - private static final Integer MAX_READ_AHEAD_AGENT_NUM = 1024; - // > - private final Map> readAheadAgentMap; - private final LRUCache readAheadAgentLRUCache = new LRUCache<>(); - private final int dataBlockSize; - private final BlockCache blockCache; - - public ReadAheadManager(int dataBlockSize, BlockCache blockCache) { - this.dataBlockSize = dataBlockSize; - this.readAheadAgentMap = new ConcurrentHashMap<>(); - this.blockCache = blockCache; - this.blockCache.registerListener(this); - } - - public void updateReadResult(long streamId, long startOffset, long endOffset, int size) { - NavigableMap agentMap = readAheadAgentMap.get(streamId); - if (agentMap != null) { - synchronized (agentMap) { - ReadAheadAgent agent = agentMap.get(startOffset); - if (agent == null) { - return; - } - readAheadAgentLRUCache.remove(agent); - agent.updateReadResult(startOffset, endOffset, size); - agentMap.remove(startOffset); - agentMap.put(endOffset, agent); - readAheadAgentLRUCache.put(agent, null); - } - } - } - - public void updateReadProgress(long streamId, long startOffset) { - NavigableMap agentMap = readAheadAgentMap.get(streamId); - if (agentMap != null) { - synchronized (agentMap) { - ReadAheadAgent agent = agentMap.get(startOffset); - if (agent == null) { - return; - } - agent.updateReadProgress(startOffset); - readAheadAgentLRUCache.touch(agent); - } - } - } - - public ReadAheadAgent getReadAheadAgent(long streamId, long startOffset) { - NavigableMap agentMap = readAheadAgentMap.get(streamId); - if (agentMap != null) { - synchronized (agentMap) { - ReadAheadAgent agent = agentMap.get(startOffset); - if (agent != null) { - readAheadAgentLRUCache.touch(agent); - } - return agent; - } - } - return null; - } - - public ReadAheadAgent getOrCreateReadAheadAgent(long streamId, long startOffset) { - NavigableMap agentMap = readAheadAgentMap.computeIfAbsent(streamId, k -> new TreeMap<>()); - synchronized (agentMap) { - while (readAheadAgentLRUCache.size() > MAX_READ_AHEAD_AGENT_NUM) { - Map.Entry entry = readAheadAgentLRUCache.pop(); - if (entry == null) { - LOGGER.error("read ahead agent num exceed limit, but no agent can be evicted"); - return null; - } - ReadAheadAgent agent = entry.getKey(); - agentMap.remove(agent.getLastReadOffset()); - LOGGER.info("evict read ahead agent for stream={}, startOffset={}", agent.getStreamId(), agent.getLastReadOffset()); - } - return agentMap.computeIfAbsent(startOffset, k -> { - ReadAheadAgent agent = new ReadAheadAgent(dataBlockSize, streamId, k); - readAheadAgentLRUCache.put(agent, null); - LOGGER.info("put read ahead agent for stream={}, startOffset={}, total agent num={}", agent.getStreamId(), agent.getLastReadOffset(), readAheadAgentLRUCache.size()); - return agent; - }); - } - } - - Set getReadAheadAgents() { - return readAheadAgentLRUCache.cache.keySet(); - } - - @Override - public void onCacheEvict(long streamId, long startOffset, long endOffset, int size) { - NavigableMap agentMap = readAheadAgentMap.get(streamId); - if (agentMap != null) { - synchronized (agentMap) { - Long floor = agentMap.floorKey(startOffset); - if (floor == null) { - floor = agentMap.firstKey(); - } - Long ceil = agentMap.ceilingKey(endOffset); - if (ceil == null) { - ceil = agentMap.lastKey(); - } - NavigableMap subMap = agentMap.subMap(floor, true, ceil, Objects.equals(ceil, agentMap.lastKey())); - for (Map.Entry entry : subMap.entrySet()) { - ReadAheadAgent agent = entry.getValue(); - agent.evict(startOffset, endOffset); - } - } - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/ReadDataBlock.java b/s3stream/src/main/java/com/automq/stream/s3/cache/ReadDataBlock.java deleted file mode 100644 index 6d6cb1374..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/ReadDataBlock.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.model.StreamRecordBatch; -import java.util.List; -import java.util.OptionalLong; - -public class ReadDataBlock { - private final CacheAccessType cacheAccessType; - private List records; - - public ReadDataBlock(List records, CacheAccessType cacheAccessType) { - this.records = records; - this.cacheAccessType = cacheAccessType; - } - - public CacheAccessType getCacheAccessType() { - return cacheAccessType; - } - - public List getRecords() { - return records; - } - - public void setRecords(List records) { - this.records = records; - } - - public OptionalLong startOffset() { - if (records.isEmpty()) { - return OptionalLong.empty(); - } else { - return OptionalLong.of(records.get(0).getBaseOffset()); - } - } - - public OptionalLong endOffset() { - if (records.isEmpty()) { - return OptionalLong.empty(); - } else { - return OptionalLong.of(records.get(records.size() - 1).getLastOffset()); - } - } - - public int sizeInBytes() { - return records.stream().mapToInt(StreamRecordBatch::size).sum(); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/S3BlockCache.java b/s3stream/src/main/java/com/automq/stream/s3/cache/S3BlockCache.java deleted file mode 100644 index 4fa377e98..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/S3BlockCache.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.trace.context.TraceContext; -import java.util.concurrent.CompletableFuture; - -/** - * Like linux page cache, S3BlockCache is responsible for: - * 1. read from S3 when the data block is not in cache. - * 2. caching the data blocks of S3 objects. - */ -public interface S3BlockCache { - - CompletableFuture read(TraceContext context, long streamId, long startOffset, long endOffset, - int maxBytes); - - default CompletableFuture read(long streamId, long startOffset, long endOffset, int maxBytes) { - return read(TraceContext.DEFAULT, streamId, startOffset, endOffset, maxBytes); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/StreamCache.java b/s3stream/src/main/java/com/automq/stream/s3/cache/StreamCache.java deleted file mode 100644 index 8215d725e..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/StreamCache.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import java.util.Map; -import java.util.NavigableMap; -import java.util.TreeMap; - -public class StreamCache { - private final NavigableMap blocks = new TreeMap<>(); - - public void put(BlockCache.CacheBlock cacheBlock) { - blocks.put(cacheBlock.firstOffset, cacheBlock); - } - - public BlockCache.CacheBlock remove(long startOffset) { - return blocks.remove(startOffset); - } - - NavigableMap blocks() { - return blocks; - } - - public NavigableMap tailBlocks(long startOffset) { - Map.Entry floorEntry = blocks.floorEntry(startOffset); - return blocks.tailMap(floorEntry != null ? floorEntry.getKey() : startOffset, true); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/cache/StreamReader.java b/s3stream/src/main/java/com/automq/stream/s3/cache/StreamReader.java deleted file mode 100644 index 6f3610a08..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/cache/StreamReader.java +++ /dev/null @@ -1,555 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.ObjectReader; -import com.automq.stream.s3.StreamDataBlock; -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metrics.TimerUtil; -import com.automq.stream.s3.metrics.stats.StorageOperationStats; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.operator.S3Operator; -import com.automq.stream.s3.trace.TraceUtils; -import com.automq.stream.s3.trace.context.TraceContext; -import com.automq.stream.utils.Threads; -import io.opentelemetry.api.common.Attributes; -import io.opentelemetry.instrumentation.annotations.SpanAttribute; -import io.opentelemetry.instrumentation.annotations.WithSpan; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.commons.lang3.tuple.Pair; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class StreamReader { - public static final Integer MAX_OBJECT_READER_SIZE = 100 * 1024 * 1024; // 100MB; - private static final Logger LOGGER = LoggerFactory.getLogger(StreamReader.class); - private static final Integer READ_OBJECT_INDEX_STEP = 2; - private final S3Operator s3Operator; - private final ObjectManager objectManager; - private final ObjectReaderLRUCache objectReaders; - private final DataBlockReadAccumulator dataBlockReadAccumulator; - private final BlockCache blockCache; - private final Map inflightReadAheadTaskMap; - private final InflightReadThrottle inflightReadThrottle; - private final ExecutorService streamReaderExecutor = Threads.newFixedThreadPoolWithMonitor( - 2, - "s3-stream-reader", - false, - LOGGER); - private final ExecutorService backgroundExecutor = Threads.newFixedThreadPoolWithMonitor( - 2, - "s3-stream-reader-background", - true, - LOGGER); - private final ExecutorService errorHandlerExecutor = Threads.newFixedThreadPoolWithMonitor( - 1, - "s3-stream-reader-error-handler", - true, - LOGGER); - - public StreamReader(S3Operator operator, ObjectManager objectManager, BlockCache blockCache, - Map inflightReadAheadTaskMap, - InflightReadThrottle inflightReadThrottle) { - this.s3Operator = operator; - this.objectManager = objectManager; - this.objectReaders = new ObjectReaderLRUCache(MAX_OBJECT_READER_SIZE); - this.dataBlockReadAccumulator = new DataBlockReadAccumulator(); - this.blockCache = blockCache; - this.inflightReadAheadTaskMap = Objects.requireNonNull(inflightReadAheadTaskMap); - this.inflightReadThrottle = Objects.requireNonNull(inflightReadThrottle); - } - - // for test - public StreamReader(S3Operator operator, ObjectManager objectManager, BlockCache blockCache, - ObjectReaderLRUCache objectReaders, - DataBlockReadAccumulator dataBlockReadAccumulator, - Map inflightReadAheadTaskMap, - InflightReadThrottle inflightReadThrottle) { - this.s3Operator = operator; - this.objectManager = objectManager; - this.objectReaders = objectReaders; - this.dataBlockReadAccumulator = dataBlockReadAccumulator; - this.blockCache = blockCache; - this.inflightReadAheadTaskMap = Objects.requireNonNull(inflightReadAheadTaskMap); - this.inflightReadThrottle = Objects.requireNonNull(inflightReadThrottle); - } - - public void shutdown() { - streamReaderExecutor.shutdown(); - backgroundExecutor.shutdown(); - errorHandlerExecutor.shutdown(); - } - - @WithSpan - public CompletableFuture> syncReadAhead(TraceContext traceContext, - @SpanAttribute long streamId, - @SpanAttribute long startOffset, - @SpanAttribute long endOffset, - @SpanAttribute int maxBytes, ReadAheadAgent agent, UUID uuid) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] sync read ahead, stream={}, {}-{}, maxBytes={}", streamId, startOffset, endOffset, maxBytes); - } - ReadContext context = new ReadContext(startOffset, maxBytes); - TimerUtil timer = new TimerUtil(); - DefaultS3BlockCache.ReadAheadTaskKey readAheadTaskKey = new DefaultS3BlockCache.ReadAheadTaskKey(streamId, startOffset); - // put a placeholder task at start offset to prevent next cache miss request spawn duplicated read ahead task - DefaultS3BlockCache.ReadAheadTaskContext readAheadTaskContext = new DefaultS3BlockCache.ReadAheadTaskContext(new CompletableFuture<>(), - DefaultS3BlockCache.ReadBlockCacheStatus.WAIT_DATA_INDEX); - if (inflightReadAheadTaskMap.putIfAbsent(readAheadTaskKey, readAheadTaskContext) == null) { - context.taskKeySet.add(readAheadTaskKey); - } - return getDataBlockIndices(traceContext, streamId, endOffset, context) - .thenComposeAsync(v -> - handleSyncReadAhead(traceContext, streamId, startOffset, endOffset, maxBytes, agent, uuid, timer, context), streamReaderExecutor) - .whenComplete((nil, ex) -> { - for (DefaultS3BlockCache.ReadAheadTaskKey key : context.taskKeySet) { - completeInflightTask0(key, ex); - } - context.taskKeySet.clear(); - StorageOperationStats.getInstance().blockCacheReadAheadStats(true).record(timer.elapsedAs(TimeUnit.NANOSECONDS)); - }); - } - - @WithSpan - CompletableFuture> handleSyncReadAhead(TraceContext traceContext, long streamId, - long startOffset, long endOffset, - int maxBytes, ReadAheadAgent agent, UUID uuid, - TimerUtil timer, ReadContext context) { - if (context.streamDataBlocksPair.isEmpty()) { - return CompletableFuture.completedFuture(Collections.emptyList()); - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] stream={}, {}-{}, read data block indices cost: {} ms", streamId, startOffset, endOffset, - timer.elapsedAs(TimeUnit.MILLISECONDS)); - } - - List>> cfList = new ArrayList<>(); - Map> recordsMap = new ConcurrentHashMap<>(); - List sortedDataBlockKeyList = new ArrayList<>(); - - // collect all data blocks to read from S3 - List> streamDataBlocksToRead = collectStreamDataBlocksToRead(context); - - // reserve all data blocks to read - List reserveResults = dataBlockReadAccumulator.reserveDataBlock(streamDataBlocksToRead); - int totalReserveSize = reserveResults.stream().mapToInt(DataBlockReadAccumulator.ReserveResult::reserveSize).sum(); - - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] sync ra acquire size: {}, uuid={}, stream={}, {}-{}, {}", - totalReserveSize, uuid, streamId, startOffset, endOffset, maxBytes); - } - - TimerUtil throttleTimer = new TimerUtil(); - CompletableFuture throttleCf = inflightReadThrottle.acquire(traceContext, uuid, totalReserveSize); - return throttleCf.thenComposeAsync(nil -> { - // concurrently read all data blocks - StorageOperationStats.getInstance().readAheadLimiterQueueTimeStats.record(throttleTimer.elapsedAs(TimeUnit.NANOSECONDS)); - for (int i = 0; i < streamDataBlocksToRead.size(); i++) { - Pair pair = streamDataBlocksToRead.get(i); - ObjectReader objectReader = pair.getLeft(); - StreamDataBlock streamDataBlock = pair.getRight(); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] sync ra, stream={}, {}-{}, read data block {} from {} [{}, {}), size={}", - streamId, startOffset, endOffset, streamDataBlock, objectReader.objectKey(), - streamDataBlock.getBlockStartPosition(), streamDataBlock.getBlockEndPosition(), streamDataBlock.getBlockSize()); - } - String dataBlockKey = streamDataBlock.getObjectId() + "-" + streamDataBlock.getBlockStartPosition(); - sortedDataBlockKeyList.add(dataBlockKey); - DataBlockReadAccumulator.ReserveResult reserveResult = reserveResults.get(i); - DefaultS3BlockCache.ReadAheadTaskKey taskKey = new DefaultS3BlockCache.ReadAheadTaskKey(streamId, streamDataBlock.getStartOffset()); - if (context.taskKeySet.contains(taskKey)) { - setInflightReadAheadStatus(taskKey, DefaultS3BlockCache.ReadBlockCacheStatus.WAIT_FETCH_DATA); - } - boolean isNotAlignedFirstBlock = i == 0 && startOffset != streamDataBlock.getStartOffset(); - if (isNotAlignedFirstBlock && context.taskKeySet.contains(taskKey)) { - setInflightReadAheadStatus(new DefaultS3BlockCache.ReadAheadTaskKey(streamId, startOffset), - DefaultS3BlockCache.ReadBlockCacheStatus.WAIT_FETCH_DATA); - } - try { - CompletableFuture> cf = TraceUtils.runWithSpanAsync(new TraceContext(traceContext), Attributes.empty(), "StreamReader::readDataBlock", - () -> reserveResult.cf().thenApplyAsync(dataBlock -> { - if (dataBlock.records().isEmpty()) { - return new ArrayList(); - } - // retain records to be returned - dataBlock.records().forEach(StreamRecordBatch::retain); - recordsMap.put(dataBlockKey, dataBlock.records()); - - // retain records to be put into block cache - dataBlock.records().forEach(StreamRecordBatch::retain); - blockCache.put(streamId, dataBlock.records()); - dataBlock.release(); - - return dataBlock.records(); - }, backgroundExecutor).whenComplete((ret, ex) -> { - if (ex != null) { - LOGGER.error("[S3BlockCache] sync ra fail to read data block, stream={}, {}-{}, data block: {}", - streamId, startOffset, endOffset, streamDataBlock, ex); - } - completeInflightTask(context, taskKey, ex); - if (isNotAlignedFirstBlock) { - // in case of first data block and startOffset is not aligned with start of data block - completeInflightTask(context, new DefaultS3BlockCache.ReadAheadTaskKey(streamId, startOffset), ex); - } - })); - cfList.add(cf); - } catch (Throwable e) { - throw new IllegalArgumentException(e); - } - if (reserveResult.reserveSize() > 0) { - dataBlockReadAccumulator.readDataBlock(objectReader, streamDataBlock.dataBlockIndex()); - } - } - return CompletableFuture.allOf(cfList.toArray(CompletableFuture[]::new)).thenApply(vv -> { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] sync read ahead complete, stream={}, {}-{}, maxBytes: {}, " + - "result: {}-{}, {}, cost: {} ms", streamId, startOffset, endOffset, maxBytes, - startOffset, context.lastOffset, context.totalReadSize, timer.elapsedAs(TimeUnit.MILLISECONDS)); - } - - List recordsToReturn = new LinkedList<>(); - List totalRecords = new ArrayList<>(); - for (String dataBlockKey : sortedDataBlockKeyList) { - List recordList = recordsMap.get(dataBlockKey); - if (recordList != null) { - totalRecords.addAll(recordList); - } - } - // collect records to return - int remainBytes = maxBytes; - for (StreamRecordBatch record : totalRecords) { - if (remainBytes <= 0 || record.getLastOffset() <= startOffset || record.getBaseOffset() >= endOffset) { - // release record that won't be returned - record.release(); - continue; - } - recordsToReturn.add(record); - remainBytes -= record.size(); - } - long lastReadOffset = recordsToReturn.isEmpty() ? totalRecords.get(0).getBaseOffset() - : recordsToReturn.get(recordsToReturn.size() - 1).getLastOffset(); - blockCache.setReadAheadRecord(streamId, lastReadOffset, context.lastOffset); - agent.updateReadAheadResult(context.lastOffset, context.totalReadSize); - return recordsToReturn; - }).whenComplete((ret, ex) -> { - if (ex != null) { - LOGGER.error("[S3BlockCache] sync read ahead fail, stream={}, {}-{}, maxBytes: {}, cost: {} ms", - streamId, startOffset, endOffset, maxBytes, timer.elapsedAs(TimeUnit.MILLISECONDS), ex); - errorHandlerExecutor.execute(() -> cleanUpOnCompletion(cfList)); - } - context.releaseReader(); - }); - }, streamReaderExecutor); - } - - private void cleanUpOnCompletion(List>> cfList) { - cfList.forEach(cf -> cf.whenComplete((ret, ex) -> { - if (ex == null) { - // release records that won't be returned - ret.forEach(StreamRecordBatch::release); - } - })); - } - - public void asyncReadAhead(long streamId, long startOffset, long endOffset, int maxBytes, ReadAheadAgent agent) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] async read ahead, stream={}, {}-{}, maxBytes={}", streamId, startOffset, endOffset, maxBytes); - } - ReadContext context = new ReadContext(startOffset, maxBytes); - TimerUtil timer = new TimerUtil(); - DefaultS3BlockCache.ReadAheadTaskKey readAheadTaskKey = new DefaultS3BlockCache.ReadAheadTaskKey(streamId, startOffset); - // put a placeholder task at start offset to prevent next cache miss request spawn duplicated read ahead task - DefaultS3BlockCache.ReadAheadTaskContext readAheadTaskContext = new DefaultS3BlockCache.ReadAheadTaskContext(new CompletableFuture<>(), - DefaultS3BlockCache.ReadBlockCacheStatus.WAIT_DATA_INDEX); - inflightReadAheadTaskMap.putIfAbsent(readAheadTaskKey, readAheadTaskContext); - context.taskKeySet.add(readAheadTaskKey); - getDataBlockIndices(TraceContext.DEFAULT, streamId, endOffset, context) - .thenAcceptAsync(v -> - handleAsyncReadAhead(streamId, startOffset, endOffset, maxBytes, agent, timer, context), streamReaderExecutor) - .whenComplete((nil, ex) -> { - for (DefaultS3BlockCache.ReadAheadTaskKey key : context.taskKeySet) { - completeInflightTask0(key, ex); - } - context.taskKeySet.clear(); - StorageOperationStats.getInstance().blockCacheReadAheadStats(false).record(timer.elapsedAs(TimeUnit.NANOSECONDS)); - }); - } - - CompletableFuture handleAsyncReadAhead(long streamId, long startOffset, long endOffset, int maxBytes, - ReadAheadAgent agent, - TimerUtil timer, ReadContext context) { - if (context.streamDataBlocksPair.isEmpty()) { - return CompletableFuture.completedFuture(null); - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] stream={}, {}-{}, read data block indices cost: {} ms", streamId, startOffset, endOffset, - timer.elapsedAs(TimeUnit.MILLISECONDS)); - } - - List> cfList = new ArrayList<>(); - // collect all data blocks to read from S3 - List> streamDataBlocksToRead = collectStreamDataBlocksToRead(context); - - // concurrently read all data blocks - for (int i = 0; i < streamDataBlocksToRead.size(); i++) { - Pair pair = streamDataBlocksToRead.get(i); - ObjectReader objectReader = pair.getLeft(); - StreamDataBlock streamDataBlock = pair.getRight(); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] async ra, stream={}, {}-{}, read data block {} from {} [{}, {}), size={}", - streamId, startOffset, endOffset, streamDataBlock, objectReader.objectKey(), - streamDataBlock.getBlockStartPosition(), streamDataBlock.getBlockEndPosition(), streamDataBlock.getBlockSize()); - } - UUID uuid = UUID.randomUUID(); - DefaultS3BlockCache.ReadAheadTaskKey taskKey = new DefaultS3BlockCache.ReadAheadTaskKey(streamId, streamDataBlock.getStartOffset()); - DataBlockReadAccumulator.ReserveResult reserveResult = dataBlockReadAccumulator.reserveDataBlock(List.of(pair)).get(0); - int readIndex = i; - boolean isNotAlignedFirstBlock = i == 0 && startOffset != streamDataBlock.getStartOffset(); - CompletableFuture cf = reserveResult.cf().thenAcceptAsync(dataBlock -> { - if (dataBlock.records().isEmpty()) { - return; - } - // retain records to be put into block cache - dataBlock.records().forEach(StreamRecordBatch::retain); - if (readIndex == 0) { - long firstOffset = dataBlock.records().get(0).getBaseOffset(); - blockCache.put(streamId, firstOffset, context.lastOffset, dataBlock.records()); - } else { - blockCache.put(streamId, dataBlock.records()); - } - dataBlock.release(); - }, backgroundExecutor).whenComplete((ret, ex) -> { - if (ex != null) { - LOGGER.error("[S3BlockCache] async ra fail to read data block, stream={}, {}-{}, data block: {}", - streamId, startOffset, endOffset, streamDataBlock, ex); - } - inflightReadThrottle.release(uuid); - completeInflightTask(context, taskKey, ex); - if (isNotAlignedFirstBlock) { - // in case of first data block and startOffset is not aligned with start of data block - completeInflightTask(context, new DefaultS3BlockCache.ReadAheadTaskKey(streamId, startOffset), ex); - } - }); - cfList.add(cf); - - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] async ra acquire size: {}, uuid={}, stream={}, {}-{}, {}", - reserveResult.reserveSize(), uuid, streamId, startOffset, endOffset, maxBytes); - } - if (reserveResult.reserveSize() > 0) { - TimerUtil throttleTimer = new TimerUtil(); - inflightReadThrottle.acquire(TraceContext.DEFAULT, uuid, reserveResult.reserveSize()).thenAcceptAsync(nil -> { - StorageOperationStats.getInstance().readAheadLimiterQueueTimeStats.record(throttleTimer.elapsedAs(TimeUnit.NANOSECONDS)); - // read data block - if (context.taskKeySet.contains(taskKey)) { - setInflightReadAheadStatus(taskKey, DefaultS3BlockCache.ReadBlockCacheStatus.WAIT_FETCH_DATA); - } - if (isNotAlignedFirstBlock && context.taskKeySet.contains(taskKey)) { - setInflightReadAheadStatus(new DefaultS3BlockCache.ReadAheadTaskKey(streamId, startOffset), - DefaultS3BlockCache.ReadBlockCacheStatus.WAIT_FETCH_DATA); - } - dataBlockReadAccumulator.readDataBlock(objectReader, streamDataBlock.dataBlockIndex()); - }, streamReaderExecutor).exceptionally(ex -> { - cf.completeExceptionally(ex); - return null; - }); - } - } - return CompletableFuture.allOf(cfList.toArray(CompletableFuture[]::new)).whenComplete((ret, ex) -> { - if (ex != null) { - LOGGER.error("[S3BlockCache] async ra failed, stream={}, {}-{}, maxBytes: {}, " + - "result: {}-{}, {}, cost: {} ms, ", streamId, startOffset, endOffset, maxBytes, - startOffset, context.lastOffset, context.totalReadSize, timer.elapsedAs(TimeUnit.MILLISECONDS), ex); - } else { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] async ra complete, stream={}, {}-{}, maxBytes: {}, " + - "result: {}-{}, {}, cost: {} ms", streamId, startOffset, endOffset, maxBytes, - startOffset, context.lastOffset, context.totalReadSize, timer.elapsedAs(TimeUnit.MILLISECONDS)); - } - } - context.releaseReader(); - agent.updateReadAheadResult(context.lastOffset, context.totalReadSize); - }); - } - - @WithSpan - CompletableFuture getDataBlockIndices(TraceContext traceContext, long streamId, long endOffset, - ReadContext context) { - traceContext.currentContext(); - CompletableFuture getObjectsCf = CompletableFuture.completedFuture(false); - if (context.objectIndex >= context.objects.size()) { - getObjectsCf = objectManager - .getObjects(streamId, context.nextStartOffset, endOffset, READ_OBJECT_INDEX_STEP) - .thenApply(objects -> { - context.objects = objects; - context.objectIndex = 0; - if (context.objects.isEmpty()) { - if (endOffset == -1L) { // background read ahead - return true; - } else { - LOGGER.error("[BUG] fail to read, expect objects not empty, streamId={}, startOffset={}, endOffset={}", - streamId, context.nextStartOffset, endOffset); - throw new IllegalStateException("fail to read, expect objects not empty"); - } - } - return false; - }); - } - CompletableFuture findIndexCf = getObjectsCf.thenComposeAsync(emptyObjects -> { - if (emptyObjects) { - return CompletableFuture.completedFuture(null); - } - S3ObjectMetadata objectMetadata = context.objects.get(context.objectIndex); - ObjectReader reader = getObjectReader(objectMetadata); - context.objectReaderMap.put(objectMetadata.objectId(), reader); - return reader.find(streamId, context.nextStartOffset, endOffset, context.nextMaxBytes); - }, streamReaderExecutor); - - return findIndexCf.thenComposeAsync(findIndexResult -> { - if (findIndexResult == null) { - return CompletableFuture.completedFuture(null); - } - List streamDataBlocks = findIndexResult.streamDataBlocks(); - if (streamDataBlocks.isEmpty()) { - return CompletableFuture.completedFuture(null); - } - - for (StreamDataBlock streamDataBlock : streamDataBlocks) { - DefaultS3BlockCache.ReadAheadTaskKey taskKey = new DefaultS3BlockCache.ReadAheadTaskKey(streamId, streamDataBlock.getStartOffset()); - DefaultS3BlockCache.ReadAheadTaskContext readAheadContext = new DefaultS3BlockCache.ReadAheadTaskContext(new CompletableFuture<>(), - DefaultS3BlockCache.ReadBlockCacheStatus.WAIT_THROTTLE); - if (inflightReadAheadTaskMap.putIfAbsent(taskKey, readAheadContext) == null) { - context.taskKeySet.add(taskKey); - } - if (context.isFirstDataBlock && streamDataBlock.getStartOffset() != context.nextStartOffset) { - context.isFirstDataBlock = false; - DefaultS3BlockCache.ReadAheadTaskKey key = new DefaultS3BlockCache.ReadAheadTaskKey(streamId, context.nextStartOffset); - if (context.taskKeySet.contains(key)) { - inflightReadAheadTaskMap.computeIfPresent(key, (k, v) -> { - v.status = DefaultS3BlockCache.ReadBlockCacheStatus.WAIT_THROTTLE; - return v; - }); - } - } - } - - S3ObjectMetadata objectMetadata = context.objects.get(context.objectIndex); - long lastOffset = streamDataBlocks.get(streamDataBlocks.size() - 1).getEndOffset(); - context.lastOffset = Math.max(lastOffset, context.lastOffset); - context.streamDataBlocksPair.add(new ImmutablePair<>(objectMetadata.objectId(), streamDataBlocks)); - context.totalReadSize += streamDataBlocks.stream().mapToInt(StreamDataBlock::getBlockSize).sum(); - context.nextMaxBytes = findIndexResult.nextMaxBytes(); - context.nextStartOffset = findIndexResult.nextStartOffset(); - context.objectIndex++; - if (findIndexResult.isFulfilled()) { - return CompletableFuture.completedFuture(null); - } - return getDataBlockIndices(traceContext, streamId, endOffset, context); - }, streamReaderExecutor); - } - - private void setInflightReadAheadStatus(DefaultS3BlockCache.ReadAheadTaskKey key, - DefaultS3BlockCache.ReadBlockCacheStatus status) { - inflightReadAheadTaskMap.computeIfPresent(key, (k, readAheadContext) -> { - readAheadContext.status = status; - return readAheadContext; - }); - } - - private void completeInflightTask(ReadContext readContext, DefaultS3BlockCache.ReadAheadTaskKey key, Throwable ex) { - if (!readContext.taskKeySet.contains(key)) { - return; - } - completeInflightTask0(key, ex); - readContext.taskKeySet.remove(key); - } - - private void completeInflightTask0(DefaultS3BlockCache.ReadAheadTaskKey key, Throwable ex) { - DefaultS3BlockCache.ReadAheadTaskContext context = inflightReadAheadTaskMap.remove(key); - if (context != null) { - if (ex != null) { - context.cf.completeExceptionally(ex); - } else { - context.cf.complete(null); - } - } - } - - private List> collectStreamDataBlocksToRead(ReadContext context) { - List> result = new ArrayList<>(); - for (Pair> entry : context.streamDataBlocksPair) { - long objectId = entry.getKey(); - ObjectReader objectReader = context.objectReaderMap.get(objectId); - for (StreamDataBlock streamDataBlock : entry.getValue()) { - result.add(Pair.of(objectReader, streamDataBlock)); - } - } - return result; - } - - ObjectReader getObjectReader(S3ObjectMetadata metadata) { - synchronized (objectReaders) { - ObjectReader objectReader = objectReaders.get(metadata.objectId()); - if (objectReader == null) { - objectReader = new ObjectReader(metadata, s3Operator); - objectReaders.put(metadata.objectId(), objectReader); - } - return objectReader.retain(); - } - } - - static class ReadContext { - List objects; - List>> streamDataBlocksPair; - Map objectReaderMap; - Set taskKeySet; - boolean isFirstDataBlock = true; - int objectIndex; - long nextStartOffset; - int nextMaxBytes; - int totalReadSize; - long lastOffset; - - public ReadContext(long startOffset, int maxBytes) { - this.objects = new LinkedList<>(); - this.objectIndex = 0; - this.streamDataBlocksPair = new LinkedList<>(); - this.objectReaderMap = new HashMap<>(); - this.taskKeySet = new HashSet<>(); - this.nextStartOffset = startOffset; - this.nextMaxBytes = maxBytes; - } - - public void releaseReader() { - for (Map.Entry entry : objectReaderMap.entrySet()) { - entry.getValue().release(); - } - objectReaderMap.clear(); - } - - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/compact/CompactResult.java b/s3stream/src/main/java/com/automq/stream/s3/compact/CompactResult.java deleted file mode 100644 index f9f18489f..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/compact/CompactResult.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact; - -public enum CompactResult { - SUCCESS, - SKIPPED, - FAILED - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionAnalyzer.java b/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionAnalyzer.java deleted file mode 100644 index 94ce4b9c7..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionAnalyzer.java +++ /dev/null @@ -1,395 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact; - -import com.automq.stream.s3.StreamDataBlock; -import com.automq.stream.s3.compact.objects.CompactedObject; -import com.automq.stream.s3.compact.objects.CompactedObjectBuilder; -import com.automq.stream.s3.compact.objects.CompactionType; -import com.automq.stream.s3.compact.utils.CompactionUtils; -import com.automq.stream.utils.LogContext; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.commons.lang3.tuple.Pair; -import org.slf4j.Logger; - -public class CompactionAnalyzer { - private final Logger logger; - private final long compactionCacheSize; - private final long streamSplitSize; - private final int maxStreamNumInStreamSet; - private final int maxStreamObjectNum; - - public CompactionAnalyzer(long compactionCacheSize, long streamSplitSize, int maxStreamNumInStreamSet, - int maxStreamObjectNum) { - this(compactionCacheSize, streamSplitSize, maxStreamNumInStreamSet, maxStreamObjectNum, new LogContext("[CompactionAnalyzer]")); - } - - public CompactionAnalyzer(long compactionCacheSize, long streamSplitSize, - int maxStreamNumInStreamSet, int maxStreamObjectNum, LogContext logContext) { - this.logger = logContext.logger(CompactionAnalyzer.class); - this.compactionCacheSize = compactionCacheSize; - this.streamSplitSize = streamSplitSize; - this.maxStreamNumInStreamSet = maxStreamNumInStreamSet; - this.maxStreamObjectNum = maxStreamObjectNum; - } - - public List analyze(Map> streamDataBlockMap, - Set excludedObjectIds) { - if (streamDataBlockMap.isEmpty()) { - return Collections.emptyList(); - } - streamDataBlockMap = filterBlocksToCompact(streamDataBlockMap); - this.logger.info("{} stream set objects to compact after filter", streamDataBlockMap.size()); - if (streamDataBlockMap.isEmpty()) { - return Collections.emptyList(); - } - try { - List compactedObjectBuilders = groupObjectWithLimits(streamDataBlockMap, excludedObjectIds); - return generatePlanWithCacheLimit(compactedObjectBuilders); - } catch (Exception e) { - logger.error("Error while analyzing compaction plan", e); - } - return Collections.emptyList(); - } - - /** - * Group stream data blocks into different compaction type ({@link CompactionType#COMPACT} & {@link CompactionType#SPLIT}) - * with compaction limitation ({@code maxStreamObjectNum} and {@code maxStreamNumInStreamSet}). - * - * @param streamDataBlockMap stream data blocks map, key: object id, value: stream data blocks - * @param excludedObjectIds objects that are excluded from compaction because of compaction limitation - * @return list of {@link CompactedObjectBuilder} - */ - List groupObjectWithLimits(Map> streamDataBlockMap, - Set excludedObjectIds) { - List sortedStreamDataBlocks = CompactionUtils.sortStreamRangePositions(streamDataBlockMap); - List compactedObjectBuilders = new ArrayList<>(); - CompactionStats stats = null; - int streamNumInStreamSet = -1; - int streamObjectNum = -1; - do { - final Set objectsToRemove = new HashSet<>(); - if (stats != null) { - if (streamObjectNum > maxStreamObjectNum) { - logger.warn("Stream object num {} exceeds limit {}, try to reduce number of objects to compact", streamObjectNum, maxStreamObjectNum); - addObjectsToRemove(CompactionType.SPLIT, compactedObjectBuilders, stats, objectsToRemove); - } else { - logger.warn("Stream number {} exceeds limit {}, try to reduce number of objects to compact", streamNumInStreamSet, maxStreamNumInStreamSet); - addObjectsToRemove(CompactionType.COMPACT, compactedObjectBuilders, stats, objectsToRemove); - } - if (objectsToRemove.isEmpty()) { - logger.error("Unable to derive objects to exclude, compaction failed"); - return new ArrayList<>(); - } - } - if (!objectsToRemove.isEmpty()) { - logger.info("Excluded objects {} for compaction", objectsToRemove); - excludedObjectIds.addAll(objectsToRemove); - } - sortedStreamDataBlocks.removeIf(e -> objectsToRemove.contains(e.getObjectId())); - objectsToRemove.forEach(streamDataBlockMap::remove); - streamDataBlockMap = filterBlocksToCompact(streamDataBlockMap); - if (streamDataBlockMap.isEmpty()) { - logger.warn("No viable objects to compact after exclusion"); - return new ArrayList<>(); - } - compactedObjectBuilders = compactObjects(sortedStreamDataBlocks); - stats = CompactionStats.of(compactedObjectBuilders); - streamNumInStreamSet = stats.getStreamRecord().streamNumInStreamSet(); - streamObjectNum = stats.getStreamRecord().streamObjectNum(); - logger.info("Current stream num in stream set: {}, max: {}, stream object num: {}, max: {}", streamNumInStreamSet, maxStreamNumInStreamSet, streamObjectNum, maxStreamObjectNum); - } - while (streamNumInStreamSet > maxStreamNumInStreamSet || streamObjectNum > maxStreamObjectNum); - - return compactedObjectBuilders; - } - - /** - * Find objects to exclude from compaction. - * - * @param compactionType {@link CompactionType#COMPACT} means to exclude objects to reduce stream number in stream set object; - * {@link CompactionType#SPLIT} means to exclude objects to reduce stream object number - * @param compactedObjectBuilders all compacted object builders - * @param stats compaction stats - * @param objectsToRemove objects to remove - */ - private void addObjectsToRemove(CompactionType compactionType, List compactedObjectBuilders, - CompactionStats stats, Set objectsToRemove) { - List sortedCompactedObjectIndexList = new ArrayList<>(); - for (CompactedObjectBuilder compactedObjectBuilder : compactedObjectBuilders) { - // find all compacted objects of the same type - if (compactedObjectBuilder.type() == compactionType) { - sortedCompactedObjectIndexList.add(compactedObjectBuilder); - } - } - if (compactionType == CompactionType.SPLIT) { - // try to find out one stream object to remove - sortedCompactedObjectIndexList.sort(new StreamObjectComparator(stats.getS3ObjectToCompactedObjectNumMap())); - // remove compacted object with the highest priority - CompactedObjectBuilder compactedObjectToRemove = sortedCompactedObjectIndexList.get(0); - // add all objects in the compacted object - objectsToRemove.addAll(compactedObjectToRemove.streamDataBlocks().stream() - .map(StreamDataBlock::getObjectId) - .collect(Collectors.toSet())); - } else { - // try to find out one stream to remove - // key: stream id, value: id of all objects that contains the stream - Map> streamObjectIdsMap = new HashMap<>(); - // key: object id, value: id of all streams from the object, used to describe the dispersion of streams in the object - Map> objectStreamIdsMap = new HashMap<>(); - for (CompactedObjectBuilder compactedObjectBuilder : sortedCompactedObjectIndexList) { - for (StreamDataBlock streamDataBlock : compactedObjectBuilder.streamDataBlocks()) { - Set objectIds = streamObjectIdsMap.computeIfAbsent(streamDataBlock.getStreamId(), k -> new HashSet<>()); - objectIds.add(streamDataBlock.getObjectId()); - Set streamIds = objectStreamIdsMap.computeIfAbsent(streamDataBlock.getObjectId(), k -> new HashSet<>()); - streamIds.add(streamDataBlock.getStreamId()); - } - } - List> sortedStreamObjectStatsList = new ArrayList<>(); - for (Map.Entry> entry : streamObjectIdsMap.entrySet()) { - long streamId = entry.getKey(); - Set objectIds = entry.getValue(); - int objectStreamNum = 0; - for (long objectId : objectIds) { - objectStreamNum += objectStreamIdsMap.get(objectId).size(); - } - sortedStreamObjectStatsList.add(new ImmutablePair<>(streamId, objectStreamNum)); - } - sortedStreamObjectStatsList.sort(Comparator.comparingInt(Pair::getRight)); - // remove stream with minimum object dispersion - objectsToRemove.addAll(streamObjectIdsMap.get(sortedStreamObjectStatsList.get(0).getKey())); - } - } - - /** - * Generate compaction plan with cache size limit. - * - * @param compactedObjectBuilders compacted object builders - * @return list of {@link CompactionPlan} with each plan's memory consumption is less than {@code compactionCacheSize} - */ - List generatePlanWithCacheLimit(List compactedObjectBuilders) { - List compactionPlans = new ArrayList<>(); - List compactedObjects = new ArrayList<>(); - CompactedObjectBuilder compactedStreamSetObjectBuilder = null; - long totalSize = 0L; - int compactionOrder = 0; - for (int i = 0; i < compactedObjectBuilders.size(); ) { - CompactedObjectBuilder compactedObjectBuilder = compactedObjectBuilders.get(i); - if (totalSize + compactedObjectBuilder.totalBlockSize() > compactionCacheSize) { - if (shouldSplitObject(compactedObjectBuilder)) { - // split object to fit into cache - int endOffset = 0; - long tmpSize = totalSize; - for (int j = 0; j < compactedObjectBuilder.streamDataBlocks().size(); j++) { - tmpSize += compactedObjectBuilder.streamDataBlocks().get(j).getBlockSize(); - if (tmpSize > compactionCacheSize) { - endOffset = j; - break; - } - } - if (endOffset != 0) { - CompactedObjectBuilder builder = compactedObjectBuilder.split(0, endOffset); - compactedStreamSetObjectBuilder = addOrMergeCompactedObject(builder, compactedObjects, compactedStreamSetObjectBuilder); - } - } - compactionPlans.add(generateCompactionPlan(compactionOrder++, compactedObjects, compactedStreamSetObjectBuilder)); - compactedObjects.clear(); - compactedStreamSetObjectBuilder = null; - totalSize = 0; - } else { - // object fits into cache size - compactedStreamSetObjectBuilder = addOrMergeCompactedObject(compactedObjectBuilder, compactedObjects, compactedStreamSetObjectBuilder); - totalSize += compactedObjectBuilder.totalBlockSize(); - i++; - } - - } - if (!compactedObjects.isEmpty() || compactedStreamSetObjectBuilder != null) { - compactionPlans.add(generateCompactionPlan(compactionOrder, compactedObjects, compactedStreamSetObjectBuilder)); - } - return compactionPlans; - } - - private CompactedObjectBuilder addOrMergeCompactedObject(CompactedObjectBuilder compactedObjectBuilder, - List compactedObjects, - CompactedObjectBuilder compactedStreamSetObjectBuilder) { - if (compactedObjectBuilder.type() == CompactionType.SPLIT) { - compactedObjects.add(compactedObjectBuilder.build()); - } else { - if (compactedStreamSetObjectBuilder == null) { - compactedStreamSetObjectBuilder = new CompactedObjectBuilder(); - } - compactedStreamSetObjectBuilder.merge(compactedObjectBuilder); - } - return compactedStreamSetObjectBuilder; - } - - private boolean shouldSplitObject(CompactedObjectBuilder compactedObjectBuilder) { - //TODO: split object depends on available cache size and current object size - //TODO: use multipart upload to upload split stream object - return true; - } - - private CompactionPlan generateCompactionPlan(int order, List compactedObjects, - CompactedObjectBuilder compactedStreamSetObject) { - if (compactedStreamSetObject != null) { - compactedObjects.add(compactedStreamSetObject.build()); - } - Map> streamDataBlockMap = new HashMap<>(); - for (CompactedObject compactedObject : compactedObjects) { - for (StreamDataBlock streamDataBlock : compactedObject.streamDataBlocks()) { - streamDataBlockMap.computeIfAbsent(streamDataBlock.getObjectId(), k -> new ArrayList<>()).add(streamDataBlock); - } - } - for (List dataBlocks : streamDataBlockMap.values()) { - dataBlocks.sort(StreamDataBlock.BLOCK_POSITION_COMPARATOR); - } - - return new CompactionPlan(order, new ArrayList<>(compactedObjects), streamDataBlockMap); - } - - /** - * Iterate through stream data blocks and group them into different {@link CompactionType}. - * - * @param streamDataBlocks stream data blocks - * @return list of {@link CompactedObjectBuilder} - */ - private List compactObjects(List streamDataBlocks) { - List compactedObjectBuilders = new ArrayList<>(); - CompactedObjectBuilder builder = new CompactedObjectBuilder(); - for (StreamDataBlock streamDataBlock : streamDataBlocks) { - if (builder.lastStreamId() == -1L) { - // init state - builder.addStreamDataBlock(streamDataBlock); - } else if (builder.lastStreamId() == streamDataBlock.getStreamId()) { - // data range from same stream - if (streamDataBlock.getStartOffset() > builder.lastOffset()) { - // data range is not continuous, split current object as StreamObject - builder = splitObject(builder, compactedObjectBuilders); - builder.addStreamDataBlock(streamDataBlock); - } else if (streamDataBlock.getStartOffset() == builder.lastOffset()) { - builder.addStreamDataBlock(streamDataBlock); - } else { - // should not go there - logger.error("FATAL ERROR: illegal stream range position, last offset: {}, curr: {}", - builder.lastOffset(), streamDataBlock); - return new ArrayList<>(); - } - } else { - builder = splitAndAddBlock(builder, streamDataBlock, compactedObjectBuilders); - } - } - if (builder.currStreamBlockSize() > streamSplitSize) { - splitObject(builder, compactedObjectBuilders); - } else { - compactedObjectBuilders.add(builder); - } - return compactedObjectBuilders; - } - - /** - * Filter out objects that have at least one stream data block which can be compacted with other objects. - * - * @param streamDataBlocksMap stream data blocks map, key: object id, value: stream data blocks - * @return filtered stream data blocks map - */ - Map> filterBlocksToCompact(Map> streamDataBlocksMap) { - // group stream data blocks by stream id, key: stream id, value: ids of objects that contains this stream - Map> streamToObjectIds = streamDataBlocksMap.values().stream() - .flatMap(Collection::stream) - .collect(Collectors.groupingBy(StreamDataBlock::getStreamId, Collectors.mapping(StreamDataBlock::getObjectId, Collectors.toSet()))); - Set objectIdsToCompact = streamToObjectIds - .entrySet().stream() - .filter(e -> e.getValue().size() > 1) - .flatMap(e -> e.getValue().stream()) - .collect(Collectors.toSet()); - return streamDataBlocksMap.entrySet().stream() - .filter(e -> objectIdsToCompact.contains(e.getKey())) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - } - - private CompactedObjectBuilder splitAndAddBlock(CompactedObjectBuilder builder, - StreamDataBlock streamDataBlock, - List compactedObjectBuilders) { - if (builder.currStreamBlockSize() > streamSplitSize) { - builder = splitObject(builder, compactedObjectBuilders); - } - builder.addStreamDataBlock(streamDataBlock); - return builder; - } - - private CompactedObjectBuilder splitObject(CompactedObjectBuilder builder, - List compactedObjectBuilders) { - CompactedObjectBuilder splitBuilder = builder.splitCurrentStream(); - splitBuilder.setType(CompactionType.SPLIT); - if (builder.totalBlockSize() != 0) { - compactedObjectBuilders.add(builder); - } - compactedObjectBuilders.add(splitBuilder); - builder = new CompactedObjectBuilder(); - return builder; - } - - private static abstract class AbstractCompactedObjectComparator implements Comparator { - protected final Map objectStatsMap; - - public AbstractCompactedObjectComparator(Map objectStatsMap) { - this.objectStatsMap = objectStatsMap; - } - - protected int compareCompactedObject(CompactedObjectBuilder o1, CompactedObjectBuilder o2) { - return Integer.compare(CompactionUtils.getTotalObjectStats(o1, objectStatsMap), - CompactionUtils.getTotalObjectStats(o2, objectStatsMap)); - } - } - - private static class CompactObjectComparator extends AbstractCompactedObjectComparator { - public CompactObjectComparator(Map objectStatsMap) { - super(objectStatsMap); - } - - @Override - public int compare(CompactedObjectBuilder o1, CompactedObjectBuilder o2) { - int compare = Integer.compare(o1.totalStreamNum(), o2.totalStreamNum()); - if (compare == 0) { - return compareCompactedObject(o1, o2); - } - return compare; - } - } - - private static class StreamObjectComparator extends AbstractCompactedObjectComparator { - public StreamObjectComparator(Map objectStatsMap) { - super(objectStatsMap); - } - - @Override - public int compare(CompactedObjectBuilder o1, CompactedObjectBuilder o2) { - int compare = Integer.compare(o1.streamDataBlocks().size(), o2.streamDataBlocks().size()); - if (compare == 0) { - return compareCompactedObject(o1, o2); - } - return compare; - } - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionConstants.java b/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionConstants.java deleted file mode 100644 index de900fbee..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionConstants.java +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact; - -public class CompactionConstants { - public static final int S3_OBJECT_TTL_MINUTES = 24 * 60; - public static final int S3_OBJECT_MAX_READ_BATCH = 16 * 1024 * 1024; // 16MB -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionManager.java b/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionManager.java deleted file mode 100644 index 31473a115..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionManager.java +++ /dev/null @@ -1,735 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ -package com.automq.stream.s3.compact; - -import com.automq.stream.s3.Config; -import com.automq.stream.s3.S3ObjectLogger; -import com.automq.stream.s3.StreamDataBlock; -import com.automq.stream.s3.compact.objects.CompactedObject; -import com.automq.stream.s3.compact.objects.CompactionType; -import com.automq.stream.s3.compact.operator.DataBlockReader; -import com.automq.stream.s3.compact.operator.DataBlockWriter; -import com.automq.stream.s3.compact.utils.CompactionUtils; -import com.automq.stream.s3.compact.utils.GroupByOffsetPredicate; -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metadata.StreamMetadata; -import com.automq.stream.s3.metadata.StreamOffsetRange; -import com.automq.stream.s3.metrics.TimerUtil; -import com.automq.stream.s3.objects.CommitStreamSetObjectRequest; -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.objects.ObjectStreamRange; -import com.automq.stream.s3.objects.StreamObject; -import com.automq.stream.s3.operator.S3Operator; -import com.automq.stream.s3.streams.StreamManager; -import com.automq.stream.utils.LogContext; -import com.automq.stream.utils.ThreadUtils; -import com.automq.stream.utils.Threads; -import io.github.bucket4j.Bucket; -import io.netty.util.concurrent.DefaultThreadFactory; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.CancellationException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.commons.lang3.tuple.Pair; -import org.slf4j.Logger; - -public class CompactionManager { - private static final int MIN_COMPACTION_DELAY_MS = 60000; - // Max refill rate for Bucket: 1 token per nanosecond - private static final int MAX_THROTTLE_BYTES_PER_SEC = 1000000000; - private final Logger logger; - private final Logger s3ObjectLogger; - private final ObjectManager objectManager; - private final StreamManager streamManager; - private final S3Operator s3Operator; - private final CompactionAnalyzer compactionAnalyzer; - private final ScheduledExecutorService compactionScheduledExecutor; - private final ScheduledExecutorService bucketCallbackScheduledExecutor; - private final ExecutorService compactThreadPool; - private final ExecutorService forceSplitThreadPool; - private final CompactionUploader uploader; - private final Config config; - private final int maxObjectNumToCompact; - private final int compactionInterval; - private final int forceSplitObjectPeriod; - private final int maxStreamNumPerStreamSetObject; - private final int maxStreamObjectNumPerCommit; - private final long networkBandwidth; - private final boolean s3ObjectLogEnable; - private final long compactionCacheSize; - private final AtomicBoolean running = new AtomicBoolean(false); - private volatile CompletableFuture forceSplitCf = null; - private volatile CompletableFuture compactionCf = null; - private Bucket compactionBucket = null; - - public CompactionManager(Config config, ObjectManager objectManager, StreamManager streamManager, - S3Operator s3Operator) { - String logPrefix = String.format("[CompactionManager id=%d] ", config.nodeId()); - this.logger = new LogContext(logPrefix).logger(CompactionManager.class); - this.s3ObjectLogger = S3ObjectLogger.logger(logPrefix); - this.config = config; - this.objectManager = objectManager; - this.streamManager = streamManager; - this.s3Operator = s3Operator; - this.compactionInterval = config.streamSetObjectCompactionInterval(); - this.forceSplitObjectPeriod = config.streamSetObjectCompactionForceSplitPeriod(); - this.maxObjectNumToCompact = config.streamSetObjectCompactionMaxObjectNum(); - this.s3ObjectLogEnable = config.objectLogEnable(); - this.networkBandwidth = config.networkBaselineBandwidth(); - this.uploader = new CompactionUploader(objectManager, s3Operator, config); - this.compactionCacheSize = config.streamSetObjectCompactionCacheSize(); - long streamSplitSize = config.streamSetObjectCompactionStreamSplitSize(); - maxStreamNumPerStreamSetObject = config.maxStreamNumPerStreamSetObject(); - maxStreamObjectNumPerCommit = config.maxStreamObjectNumPerCommit(); - this.compactionAnalyzer = new CompactionAnalyzer(compactionCacheSize, streamSplitSize, maxStreamNumPerStreamSetObject, - maxStreamObjectNumPerCommit, new LogContext(String.format("[CompactionAnalyzer id=%d] ", config.nodeId()))); - this.compactionScheduledExecutor = Threads.newSingleThreadScheduledExecutor( - ThreadUtils.createThreadFactory("schedule-compact-executor-%d", true), logger, true, false); - this.bucketCallbackScheduledExecutor = Threads.newSingleThreadScheduledExecutor( - ThreadUtils.createThreadFactory("s3-data-block-reader-bucket-cb-%d", true), logger, true, false); - this.compactThreadPool = Executors.newFixedThreadPool(1, new DefaultThreadFactory("object-compaction-manager")); - this.forceSplitThreadPool = Executors.newFixedThreadPool(1, new DefaultThreadFactory("force-split-executor")); - this.running.set(true); - this.logger.info("Compaction manager initialized with config: compactionInterval: {} min, compactionCacheSize: {} bytes, " + - "streamSplitSize: {} bytes, forceSplitObjectPeriod: {} min, maxObjectNumToCompact: {}, maxStreamNumInStreamSet: {}, maxStreamObjectNum: {}", - compactionInterval, compactionCacheSize, streamSplitSize, forceSplitObjectPeriod, maxObjectNumToCompact, maxStreamNumPerStreamSetObject, maxStreamObjectNumPerCommit); - } - - public void start() { - scheduleNextCompaction((long) this.compactionInterval * 60 * 1000); - } - - void scheduleNextCompaction(long delayMillis) { - if (!running.get()) { - logger.info("Compaction manager is shutdown, skip scheduling next compaction"); - return; - } - logger.info("Next Compaction started in {} ms", delayMillis); - this.compactionScheduledExecutor.schedule(() -> { - TimerUtil timerUtil = new TimerUtil(); - try { - logger.info("Compaction started"); - this.compact() - .thenAccept(result -> logger.info("Compaction complete, total cost {} ms", timerUtil.elapsedAs(TimeUnit.MILLISECONDS))) - .exceptionally(ex -> { - logger.error("Compaction failed, cost {} ms, ", timerUtil.elapsedAs(TimeUnit.MILLISECONDS), ex); - return null; - }).join(); - } catch (Exception ex) { - logger.error("Error while compacting objects ", ex); - } - long nextDelay = Math.max(MIN_COMPACTION_DELAY_MS, (long) this.compactionInterval * 60 * 1000 - timerUtil.elapsedAs(TimeUnit.MILLISECONDS)); - scheduleNextCompaction(nextDelay); - }, delayMillis, TimeUnit.MILLISECONDS); - } - - public void shutdown() { - if (!running.compareAndSet(true, false)) { - logger.warn("Compaction manager is already shutdown"); - return; - } - logger.info("Shutting down compaction manager"); - synchronized (this) { - if (forceSplitCf != null) { - // prevent block-waiting for force splitting objects - forceSplitCf.cancel(true); - } - if (compactionCf != null) { - // prevent block-waiting for uploading compacted objects - compactionCf.cancel(true); - } - } - this.compactionScheduledExecutor.shutdown(); - try { - if (!this.compactionScheduledExecutor.awaitTermination(10, TimeUnit.SECONDS)) { - this.compactionScheduledExecutor.shutdownNow(); - } - } catch (InterruptedException ignored) { - } - this.bucketCallbackScheduledExecutor.shutdown(); - try { - if (!this.bucketCallbackScheduledExecutor.awaitTermination(10, TimeUnit.SECONDS)) { - this.bucketCallbackScheduledExecutor.shutdownNow(); - } - } catch (InterruptedException ignored) { - } - this.uploader.shutdown(); - logger.info("Compaction manager shutdown complete"); - } - - public CompletableFuture compact() { - return this.objectManager.getServerObjects().thenComposeAsync(objectMetadataList -> { - List streamIds = objectMetadataList.stream().flatMap(e -> e.getOffsetRanges().stream()) - .map(StreamOffsetRange::streamId).distinct().collect(Collectors.toList()); - return this.streamManager.getStreams(streamIds).thenAcceptAsync(streamMetadataList -> - this.compact(streamMetadataList, objectMetadataList), compactThreadPool); - }, compactThreadPool); - } - - private void compact(List streamMetadataList, - List objectMetadataList) throws CompletionException { - if (!running.get()) { - logger.info("Compaction manager is shutdown, skip compaction"); - return; - } - logger.info("Get {} stream set objects from metadata", objectMetadataList.size()); - if (objectMetadataList.isEmpty()) { - return; - } - Map> objectMetadataFilterMap = convertS3Objects(objectMetadataList); - List objectsToForceSplit = objectMetadataFilterMap.get(true); - List objectsToCompact = objectMetadataFilterMap.get(false); - - long totalSize = objectsToForceSplit.stream().mapToLong(S3ObjectMetadata::objectSize).sum(); - totalSize += objectsToCompact.stream().mapToLong(S3ObjectMetadata::objectSize).sum(); - // throttle compaction read to half of compaction interval because of write overhead - int expectCompleteTime = compactionInterval / 2; - long expectReadBytesPerSec; - if (expectCompleteTime > 0) { - expectReadBytesPerSec = totalSize / expectCompleteTime / 60; - if (expectReadBytesPerSec < MAX_THROTTLE_BYTES_PER_SEC) { - compactionBucket = Bucket.builder().addLimit(limit -> limit - .capacity(expectReadBytesPerSec) - .refillIntervally(expectReadBytesPerSec, Duration.ofSeconds(1))).build(); - logger.info("Throttle compaction read to {} bytes/s, expect to complete in no less than {}min", - expectReadBytesPerSec, expectCompleteTime); - } else { - logger.warn("Compaction throttle rate {} bytes/s exceeds bucket refill limit, there will be no throttle for compaction this time", expectReadBytesPerSec); - compactionBucket = null; - } - } else { - logger.warn("Compaction interval {}min is too small, there will be no throttle for compaction this time", compactionInterval); - compactionBucket = null; - } - - if (!objectsToForceSplit.isEmpty()) { - // split stream set objects to seperated stream objects - forceSplitObjects(streamMetadataList, objectsToForceSplit); - } - // compact stream set objects - compactObjects(streamMetadataList, objectsToCompact); - } - - void forceSplitObjects(List streamMetadataList, List objectsToForceSplit) { - logger.info("Force split {} stream set objects", objectsToForceSplit.size()); - TimerUtil timerUtil = new TimerUtil(); - for (int i = 0; i < objectsToForceSplit.size(); i++) { - if (!running.get()) { - logger.info("Compaction manager is shutdown, abort force split progress"); - return; - } - timerUtil.reset(); - S3ObjectMetadata objectToForceSplit = objectsToForceSplit.get(i); - logger.info("Force split progress {}/{}, splitting object {}, object size {}", i + 1, objectsToForceSplit.size(), - objectToForceSplit.objectId(), objectToForceSplit.objectSize()); - CommitStreamSetObjectRequest request; - try { - request = buildSplitRequest(streamMetadataList, objectToForceSplit); - } catch (Exception ex) { - logger.error("Build force split request for object {} failed, ex: ", objectToForceSplit.objectId(), ex); - continue; - } - if (request == null) { - continue; - } - logger.info("Build force split request for object {} complete, generated {} stream objects, time cost: {} ms, start committing objects", - objectToForceSplit.objectId(), request.getStreamObjects().size(), timerUtil.elapsedAs(TimeUnit.MILLISECONDS)); - timerUtil.reset(); - objectManager.commitStreamSetObject(request) - .thenAccept(resp -> { - logger.info("Commit force split request succeed, time cost: {} ms", timerUtil.elapsedAs(TimeUnit.MILLISECONDS)); - if (s3ObjectLogEnable) { - s3ObjectLogger.trace("[Compact] {}", request); - } - }) - .exceptionally(ex -> { - logger.error("Commit force split request failed, ex: ", ex); - return null; - }) - .join(); - } - } - - private void compactObjects(List streamMetadataList, List objectsToCompact) - throws CompletionException { - if (!running.get()) { - logger.info("Compaction manager is shutdown, skip compacting objects"); - return; - } - if (objectsToCompact.isEmpty()) { - return; - } - // sort by S3 object data time in descending order - objectsToCompact.sort((o1, o2) -> Long.compare(o2.dataTimeInMs(), o1.dataTimeInMs())); - if (maxObjectNumToCompact < objectsToCompact.size()) { - // compact latest S3 objects first when number of objects to compact exceeds maxObjectNumToCompact - objectsToCompact = objectsToCompact.subList(0, maxObjectNumToCompact); - } - logger.info("Compact {} stream set objects", objectsToCompact.size()); - TimerUtil timerUtil = new TimerUtil(); - CommitStreamSetObjectRequest request = buildCompactRequest(streamMetadataList, objectsToCompact); - if (!running.get()) { - logger.info("Compaction manager is shutdown, skip committing compaction request"); - return; - } - if (request == null) { - return; - } - if (request.getCompactedObjectIds().isEmpty()) { - logger.info("No stream set objects to compact"); - return; - } - logger.info("Build compact request for {} stream set objects complete, stream set object id: {}, stresam set object size: {}, stream object num: {}, time cost: {}, start committing objects", - request.getCompactedObjectIds().size(), request.getObjectId(), request.getObjectSize(), request.getStreamObjects().size(), timerUtil.elapsedAs(TimeUnit.MILLISECONDS)); - timerUtil.reset(); - objectManager.commitStreamSetObject(request) - .thenAccept(resp -> { - logger.info("Commit compact request succeed, time cost: {} ms", timerUtil.elapsedAs(TimeUnit.MILLISECONDS)); - if (s3ObjectLogEnable) { - s3ObjectLogger.trace("[Compact] {}", request); - } - }) - .exceptionally(ex -> { - logger.error("Commit compact request failed, ex: ", ex); - return null; - }) - .join(); - } - - private void logCompactionPlans(List compactionPlans, Set excludedObjectIds) { - if (compactionPlans.isEmpty()) { - logger.info("No compaction plans to execute"); - return; - } - long streamObjectNum = compactionPlans.stream() - .mapToLong(p -> p.compactedObjects().stream() - .filter(o -> o.type() == CompactionType.SPLIT) - .count()) - .sum(); - long streamSetObjectSize = compactionPlans.stream() - .mapToLong(p -> p.compactedObjects().stream() - .filter(o -> o.type() == CompactionType.COMPACT) - .mapToLong(CompactedObject::size) - .sum()) - .sum(); - int streamSetObjectNum = streamSetObjectSize > 0 ? 1 : 0; - logger.info("Compaction plans: expect to generate {} Stream Object, {} stream set object with size {} in {} iterations, objects excluded: {}", - streamObjectNum, streamSetObjectNum, streamSetObjectSize, compactionPlans.size(), excludedObjectIds); - } - - public CompletableFuture forceSplitAll() { - CompletableFuture cf = new CompletableFuture<>(); - //TODO: deal with metadata delay - this.compactionScheduledExecutor.execute(() -> this.objectManager.getServerObjects().thenAcceptAsync(objectMetadataList -> { - List streamIds = objectMetadataList.stream().flatMap(e -> e.getOffsetRanges().stream()) - .map(StreamOffsetRange::streamId).distinct().collect(Collectors.toList()); - this.streamManager.getStreams(streamIds).thenAcceptAsync(streamMetadataList -> { - if (objectMetadataList.isEmpty()) { - logger.info("No stream set objects to force split"); - return; - } - forceSplitObjects(streamMetadataList, objectMetadataList); - cf.complete(null); - }, forceSplitThreadPool); - }, forceSplitThreadPool).exceptionally(ex -> { - logger.error("Error while force split all stream set objects ", ex); - cf.completeExceptionally(ex); - return null; - })); - - return cf; - } - - /** - * Split specified stream set object into stream objects. - * - * @param streamMetadataList metadata of opened streams - * @param objectMetadata stream set object to split - * @param cfs List of CompletableFuture of StreamObject - * @return true if split succeed, false otherwise - */ - private boolean splitStreamSetObject(List streamMetadataList, - S3ObjectMetadata objectMetadata, Collection> cfs) { - if (objectMetadata == null) { - return false; - } - - Map> streamDataBlocksMap = CompactionUtils.blockWaitObjectIndices(streamMetadataList, - Collections.singletonList(objectMetadata), s3Operator, logger); - if (streamDataBlocksMap.isEmpty()) { - logger.warn("Read index for object {} failed", objectMetadata.objectId()); - return false; - } - List streamDataBlocks = streamDataBlocksMap.get(objectMetadata.objectId()); - if (streamDataBlocks.isEmpty()) { - // object is empty, metadata is out of date - logger.info("Object {} is out of date, will be deleted after compaction", objectMetadata.objectId()); - return true; - } - - cfs.addAll(groupAndSplitStreamDataBlocks(objectMetadata, streamDataBlocks)); - return true; - } - - Collection> groupAndSplitStreamDataBlocks(S3ObjectMetadata objectMetadata, - List streamDataBlocks) { - List, CompletableFuture>> groupedDataBlocks = new ArrayList<>(); - List> groupedStreamDataBlocks = CompactionUtils.groupStreamDataBlocks(streamDataBlocks, new GroupByOffsetPredicate()); - for (List group : groupedStreamDataBlocks) { - groupedDataBlocks.add(new ImmutablePair<>(group, new CompletableFuture<>())); - } - logger.info("Force split object {}, expect to generate {} stream objects", objectMetadata.objectId(), groupedDataBlocks.size()); - - int index = 0; - while (index < groupedDataBlocks.size()) { - List, CompletableFuture>> batchGroup = new ArrayList<>(); - long readSize = 0; - while (index < groupedDataBlocks.size()) { - Pair, CompletableFuture> group = groupedDataBlocks.get(index); - List groupedStreamDataBlock = group.getLeft(); - long size = groupedStreamDataBlock.get(groupedStreamDataBlock.size() - 1).getBlockEndPosition() - - groupedStreamDataBlock.get(0).getBlockStartPosition(); - if (readSize + size > compactionCacheSize) { - break; - } - readSize += size; - batchGroup.add(group); - index++; - } - if (batchGroup.isEmpty()) { - logger.error("Force split object failed, not be able to read any data block, maybe compactionCacheSize is too small"); - return new ArrayList<>(); - } - // prepare N stream objects at one time - objectManager.prepareObject(batchGroup.size(), TimeUnit.MINUTES.toMillis(CompactionConstants.S3_OBJECT_TTL_MINUTES)) - .thenComposeAsync(objectId -> { - List blocksToRead = batchGroup.stream().flatMap(p -> p.getLeft().stream()).collect(Collectors.toList()); - DataBlockReader reader = new DataBlockReader(objectMetadata, s3Operator, compactionBucket, bucketCallbackScheduledExecutor); - // batch read - reader.readBlocks(blocksToRead, Math.min(CompactionConstants.S3_OBJECT_MAX_READ_BATCH, networkBandwidth)); - - List> cfs = new ArrayList<>(); - for (Pair, CompletableFuture> pair : batchGroup) { - List blocks = pair.getLeft(); - DataBlockWriter writer = new DataBlockWriter(objectId, s3Operator, config.objectPartSize()); - CompletableFuture cf = CompactionUtils.chainWriteDataBlock(writer, blocks, forceSplitThreadPool); - long finalObjectId = objectId; - cfs.add(cf.thenAccept(nil -> writer.close()).whenComplete((ret, ex) -> { - if (ex != null) { - logger.error("write to stream object {} failed", finalObjectId, ex); - writer.release(); - blocks.forEach(StreamDataBlock::release); - return; - } - StreamObject streamObject = new StreamObject(); - streamObject.setObjectId(finalObjectId); - streamObject.setStreamId(blocks.get(0).getStreamId()); - streamObject.setStartOffset(blocks.get(0).getStartOffset()); - streamObject.setEndOffset(blocks.get(blocks.size() - 1).getEndOffset()); - streamObject.setObjectSize(writer.size()); - pair.getValue().complete(streamObject); - })); - objectId++; - } - return CompletableFuture.allOf(cfs.toArray(new CompletableFuture[0])); - }, forceSplitThreadPool) - .exceptionally(ex -> { - logger.error("Force split object {} failed", objectMetadata.objectId(), ex); - for (Pair, CompletableFuture> pair : groupedDataBlocks) { - pair.getValue().completeExceptionally(ex); - } - throw new IllegalStateException(String.format("Force split object %d failed", objectMetadata.objectId()), ex); - }).join(); - } - - return groupedDataBlocks.stream().map(Pair::getValue).collect(Collectors.toList()); - } - - CommitStreamSetObjectRequest buildSplitRequest(List streamMetadataList, - S3ObjectMetadata objectToSplit) throws CompletionException { - List> cfs = new ArrayList<>(); - boolean status = splitStreamSetObject(streamMetadataList, objectToSplit, cfs); - if (!status) { - logger.error("Force split object {} failed, no stream object generated", objectToSplit.objectId()); - return null; - } - - CommitStreamSetObjectRequest request = new CommitStreamSetObjectRequest(); - request.setObjectId(-1L); - - // wait for all force split objects to complete - synchronized (this) { - if (!running.get()) { - logger.info("Compaction manager is shutdown, skip waiting for force splitting objects"); - return null; - } - forceSplitCf = CompletableFuture.allOf(cfs.toArray(new CompletableFuture[0])); - } - try { - forceSplitCf.join(); - } catch (CancellationException exception) { - logger.info("Force split objects cancelled"); - return null; - } - forceSplitCf = null; - cfs.stream().map(e -> { - try { - return e.join(); - } catch (Exception ignored) { - return null; - } - }).filter(Objects::nonNull).forEach(request::addStreamObject); - - request.setCompactedObjectIds(Collections.singletonList(objectToSplit.objectId())); - if (isSanityCheckFailed(streamMetadataList, Collections.singletonList(objectToSplit), request)) { - logger.error("Sanity check failed, force split result is illegal"); - return null; - } - - return request; - } - - CommitStreamSetObjectRequest buildCompactRequest(List streamMetadataList, - List objectsToCompact) - throws CompletionException { - CommitStreamSetObjectRequest request = new CommitStreamSetObjectRequest(); - request.setObjectId(-1L); - - Set compactedObjectIds = new HashSet<>(); - logger.info("{} stream set objects as compact candidates, total compaction size: {}", - objectsToCompact.size(), objectsToCompact.stream().mapToLong(S3ObjectMetadata::objectSize).sum()); - Map> streamDataBlockMap = CompactionUtils.blockWaitObjectIndices(streamMetadataList, - objectsToCompact, s3Operator, logger); - for (List blocks : streamDataBlockMap.values()) { - for (StreamDataBlock block : blocks) { - if (block.getBlockSize() > compactionCacheSize) { - logger.error("Block {} size exceeds compaction cache size {}, skip compaction", block, compactionCacheSize); - return null; - } - } - } - long now = System.currentTimeMillis(); - Set excludedObjectIds = new HashSet<>(); - List compactionPlans = this.compactionAnalyzer.analyze(streamDataBlockMap, excludedObjectIds); - logger.info("Analyze compaction plans complete, cost {}ms", System.currentTimeMillis() - now); - logCompactionPlans(compactionPlans, excludedObjectIds); - objectsToCompact = objectsToCompact.stream().filter(e -> !excludedObjectIds.contains(e.objectId())).collect(Collectors.toList()); - executeCompactionPlans(request, compactionPlans, objectsToCompact); - - if (!running.get()) { - logger.info("Compaction manager is shutdown, skip constructing compaction request"); - return null; - } - - compactionPlans.forEach(c -> c.streamDataBlocksMap().values().forEach(v -> v.forEach(b -> compactedObjectIds.add(b.getObjectId())))); - - // compact out-dated objects directly - streamDataBlockMap.entrySet().stream().filter(e -> e.getValue().isEmpty()).forEach(e -> { - logger.info("Object {} is out of date, will be deleted after compaction", e.getKey()); - compactedObjectIds.add(e.getKey()); - }); - - request.setCompactedObjectIds(new ArrayList<>(compactedObjectIds)); - List compactedObjectMetadata = objectsToCompact.stream() - .filter(e -> compactedObjectIds.contains(e.objectId())).collect(Collectors.toList()); - if (isSanityCheckFailed(streamMetadataList, compactedObjectMetadata, request)) { - logger.error("Sanity check failed, compaction result is illegal"); - return null; - } - - return request; - } - - boolean isSanityCheckFailed(List streamMetadataList, List compactedObjects, - CommitStreamSetObjectRequest request) { - Map streamMetadataMap = streamMetadataList.stream() - .collect(Collectors.toMap(StreamMetadata::streamId, e -> e)); - Map objectMetadataMap = compactedObjects.stream() - .collect(Collectors.toMap(S3ObjectMetadata::objectId, e -> e)); - - List compactedStreamOffsetRanges = new ArrayList<>(); - request.getStreamRanges().forEach(o -> compactedStreamOffsetRanges.add(new StreamOffsetRange(o.getStreamId(), o.getStartOffset(), o.getEndOffset()))); - request.getStreamObjects().forEach(o -> compactedStreamOffsetRanges.add(new StreamOffsetRange(o.getStreamId(), o.getStartOffset(), o.getEndOffset()))); - Map> sortedStreamOffsetRanges = compactedStreamOffsetRanges.stream() - .collect(Collectors.groupingBy(StreamOffsetRange::streamId)); - sortedStreamOffsetRanges.replaceAll((k, v) -> sortAndMerge(v)); - for (long objectId : request.getCompactedObjectIds()) { - S3ObjectMetadata metadata = objectMetadataMap.get(objectId); - for (StreamOffsetRange streamOffsetRange : metadata.getOffsetRanges()) { - if (!streamMetadataMap.containsKey(streamOffsetRange.streamId())) { - // skip non-exist stream - continue; - } - long streamStartOffset = streamMetadataMap.get(streamOffsetRange.streamId()).startOffset(); - if (streamOffsetRange.endOffset() <= streamStartOffset) { - // skip stream offset range that has been trimmed - continue; - } - if (streamOffsetRange.startOffset() < streamStartOffset) { - // trim stream offset range - streamOffsetRange = new StreamOffsetRange(streamOffsetRange.streamId(), streamStartOffset, streamOffsetRange.endOffset()); - } - if (!sortedStreamOffsetRanges.containsKey(streamOffsetRange.streamId())) { - logger.error("Sanity check failed, stream {} is missing after compact", streamOffsetRange.streamId()); - return true; - } - boolean contained = false; - for (StreamOffsetRange compactedStreamOffsetRange : sortedStreamOffsetRanges.get(streamOffsetRange.streamId())) { - if (streamOffsetRange.startOffset() >= compactedStreamOffsetRange.startOffset() - && streamOffsetRange.endOffset() <= compactedStreamOffsetRange.endOffset()) { - contained = true; - break; - } - } - if (!contained) { - logger.error("Sanity check failed, object {} offset range {} is missing after compact", objectId, streamOffsetRange); - return true; - } - } - } - - return false; - } - - private List sortAndMerge(List streamOffsetRangeList) { - if (streamOffsetRangeList.size() < 2) { - return streamOffsetRangeList; - } - long streamId = streamOffsetRangeList.get(0).streamId(); - Collections.sort(streamOffsetRangeList); - List mergedList = new ArrayList<>(); - long start = -1L; - long end = -1L; - for (int i = 0; i < streamOffsetRangeList.size() - 1; i++) { - StreamOffsetRange curr = streamOffsetRangeList.get(i); - StreamOffsetRange next = streamOffsetRangeList.get(i + 1); - if (start == -1) { - start = curr.startOffset(); - end = curr.endOffset(); - } - if (curr.endOffset() < next.startOffset()) { - mergedList.add(new StreamOffsetRange(curr.streamId(), start, end)); - start = next.startOffset(); - } - end = next.endOffset(); - } - mergedList.add(new StreamOffsetRange(streamId, start, end)); - - return mergedList; - } - - Map> convertS3Objects(List streamSetObjectMetadata) { - return new HashMap<>(streamSetObjectMetadata.stream() - .collect(Collectors.partitioningBy(e -> (System.currentTimeMillis() - e.dataTimeInMs()) - >= TimeUnit.MINUTES.toMillis(this.forceSplitObjectPeriod)))); - } - - void executeCompactionPlans(CommitStreamSetObjectRequest request, List compactionPlans, - List s3ObjectMetadata) - throws CompletionException { - if (compactionPlans.isEmpty()) { - return; - } - Map s3ObjectMetadataMap = s3ObjectMetadata.stream() - .collect(Collectors.toMap(S3ObjectMetadata::objectId, e -> e)); - List sortedStreamDataBlocks = new ArrayList<>(); - for (int i = 0; i < compactionPlans.size(); i++) { - if (!running.get()) { - logger.info("Compaction manager is shutdown, abort compaction progress"); - return; - } - // iterate over each compaction plan - CompactionPlan compactionPlan = compactionPlans.get(i); - long totalSize = compactionPlan.streamDataBlocksMap().values().stream().flatMap(List::stream) - .mapToLong(StreamDataBlock::getBlockSize).sum(); - logger.info("Compaction progress {}/{}, read from {} stream set objects, total size: {}", i + 1, compactionPlans.size(), - compactionPlan.streamDataBlocksMap().size(), totalSize); - for (Map.Entry> streamDataBlocEntry : compactionPlan.streamDataBlocksMap().entrySet()) { - S3ObjectMetadata metadata = s3ObjectMetadataMap.get(streamDataBlocEntry.getKey()); - List streamDataBlocks = streamDataBlocEntry.getValue(); - DataBlockReader reader = new DataBlockReader(metadata, s3Operator, compactionBucket, bucketCallbackScheduledExecutor); - reader.readBlocks(streamDataBlocks, Math.min(CompactionConstants.S3_OBJECT_MAX_READ_BATCH, networkBandwidth)); - } - List> streamObjectCfList = new ArrayList<>(); - CompletableFuture streamSetObjectChainWriteCf = CompletableFuture.completedFuture(null); - for (CompactedObject compactedObject : compactionPlan.compactedObjects()) { - if (compactedObject.type() == CompactionType.COMPACT) { - sortedStreamDataBlocks.addAll(compactedObject.streamDataBlocks()); - streamSetObjectChainWriteCf = uploader.chainWriteStreamSetObject(streamSetObjectChainWriteCf, compactedObject); - } else { - streamObjectCfList.add(uploader.writeStreamObject(compactedObject)); - } - } - - List> cfList = new ArrayList<>(); - cfList.add(streamSetObjectChainWriteCf); - cfList.addAll(streamObjectCfList); - synchronized (this) { - if (!running.get()) { - logger.info("Compaction manager is shutdown, skip waiting for uploading objects"); - return; - } - // wait for all stream objects and stream set object part to be uploaded - compactionCf = CompletableFuture.allOf(cfList.toArray(new CompletableFuture[0])) - .thenCompose(v -> uploader.forceUploadStreamSetObject()) - .exceptionally(ex -> { - uploader.release().thenAccept(v -> { - for (CompactedObject compactedObject : compactionPlan.compactedObjects()) { - compactedObject.streamDataBlocks().forEach(StreamDataBlock::release); - } - }).join(); - throw new IllegalStateException("Error while uploading compaction objects", ex); - }); - } - try { - compactionCf.join(); - } catch (CancellationException ex) { - logger.warn("Compaction progress {}/{} is cancelled", i + 1, compactionPlans.size()); - return; - } - compactionCf = null; - - streamObjectCfList.stream().map(CompletableFuture::join).forEach(request::addStreamObject); - - List compactedObjects = compactionPlan.compactedObjects(); - for (CompactedObject compactedObject : compactedObjects) { - for (StreamDataBlock block : compactedObject.streamDataBlocks()) { - if (block.getDataCf().join().refCnt() > 0) { - logger.error("Block {} is not released after compaction, compact type: {}", block, compactedObject.type()); - } - } - } - } - List objectStreamRanges = CompactionUtils.buildObjectStreamRangeFromGroup( - CompactionUtils.groupStreamDataBlocks(sortedStreamDataBlocks, new GroupByOffsetPredicate())); - objectStreamRanges.forEach(request::addStreamRange); - request.setObjectId(uploader.getStreamSetObjectId()); - // set stream set object id to be the first object id of compacted objects - request.setOrderId(s3ObjectMetadata.get(0).objectId()); - request.setObjectSize(uploader.complete()); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionPlan.java b/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionPlan.java deleted file mode 100644 index 268a8412b..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionPlan.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact; - -import com.automq.stream.s3.StreamDataBlock; -import com.automq.stream.s3.compact.objects.CompactedObject; -import java.util.List; -import java.util.Map; - -public class CompactionPlan { - private final int order; - private final List compactedObjects; - private final Map> streamDataBlocksMap; - - public CompactionPlan(int order, List compactedObjects, - Map> streamDataBlocksMap) { - this.order = order; - this.compactedObjects = compactedObjects; - this.streamDataBlocksMap = streamDataBlocksMap; - } - - public int order() { - return order; - } - - public List compactedObjects() { - return compactedObjects; - } - - public Map> streamDataBlocksMap() { - return streamDataBlocksMap; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionStats.java b/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionStats.java deleted file mode 100644 index 83b5a51ba..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionStats.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact; - -import com.automq.stream.s3.compact.objects.CompactedObjectBuilder; -import com.automq.stream.s3.compact.objects.CompactionType; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; - -public class CompactionStats { - private final CompactionStreamRecord streamRecord; - private final Map s3ObjectToCompactedObjectNumMap; - - private CompactionStats(CompactionStreamRecord streamRecord, Map s3ObjectToCompactedObjectNumMap) { - this.streamRecord = streamRecord; - this.s3ObjectToCompactedObjectNumMap = s3ObjectToCompactedObjectNumMap; - } - - public static CompactionStats of(List compactedObjectBuilders) { - int streamNumInStreamSet = 0; - int streamObjectNum = 0; - Map tmpObjectRecordMap = new HashMap<>(); - for (CompactedObjectBuilder compactedObjectBuilder : compactedObjectBuilders) { - Set objectIdSet = compactedObjectBuilder.uniqueObjectIds(); - for (Long objectId : objectIdSet) { - tmpObjectRecordMap.putIfAbsent(objectId, 0); - tmpObjectRecordMap.put(objectId, tmpObjectRecordMap.get(objectId) + 1); - } - if (compactedObjectBuilder.type() == CompactionType.SPLIT && !compactedObjectBuilder.streamDataBlocks().isEmpty()) { - streamObjectNum++; - } else if (compactedObjectBuilder.type() == CompactionType.COMPACT) { - streamNumInStreamSet += compactedObjectBuilder.totalStreamNum(); - } - } - return new CompactionStats(new CompactionStreamRecord(streamNumInStreamSet, streamObjectNum), tmpObjectRecordMap); - } - - public CompactionStreamRecord getStreamRecord() { - return streamRecord; - } - - public Map getS3ObjectToCompactedObjectNumMap() { - return s3ObjectToCompactedObjectNumMap; - } - - public static final class CompactionStreamRecord { - private final int streamNumInStreamSet; - private final int streamObjectNum; - - public CompactionStreamRecord(int streamNumInStreamSet, int streamObjectNum) { - this.streamNumInStreamSet = streamNumInStreamSet; - this.streamObjectNum = streamObjectNum; - } - - public int streamNumInStreamSet() { - return streamNumInStreamSet; - } - - public int streamObjectNum() { - return streamObjectNum; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj == null || obj.getClass() != this.getClass()) - return false; - var that = (CompactionStreamRecord) obj; - return this.streamNumInStreamSet == that.streamNumInStreamSet && - this.streamObjectNum == that.streamObjectNum; - } - - @Override - public int hashCode() { - return Objects.hash(streamNumInStreamSet, streamObjectNum); - } - - @Override - public String toString() { - return "CompactionStreamRecord[" + - "streamNumInStreamSet=" + streamNumInStreamSet + ", " + - "streamObjectNum=" + streamObjectNum + ']'; - } - - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionUploader.java b/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionUploader.java deleted file mode 100644 index dfa76c147..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/compact/CompactionUploader.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact; - -import com.automq.stream.s3.Config; -import com.automq.stream.s3.StreamDataBlock; -import com.automq.stream.s3.compact.objects.CompactedObject; -import com.automq.stream.s3.compact.objects.CompactionType; -import com.automq.stream.s3.compact.operator.DataBlockWriter; -import com.automq.stream.s3.compact.utils.CompactionUtils; -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.objects.StreamObject; -import com.automq.stream.s3.operator.S3Operator; -import com.automq.stream.utils.ThreadUtils; -import com.automq.stream.utils.Threads; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class CompactionUploader { - private final static Logger LOGGER = LoggerFactory.getLogger(CompactionUploader.class); - private final ObjectManager objectManager; - private final ExecutorService streamObjectUploadPool; - private final ExecutorService streamSetObjectUploadPool; - private final S3Operator s3Operator; - private final Config config; - private CompletableFuture streamSetObjectIdCf = null; - private DataBlockWriter streamSetObjectWriter = null; - private volatile boolean isAborted = false; - private volatile boolean isShutdown = false; - - public CompactionUploader(ObjectManager objectManager, S3Operator s3Operator, Config config) { - this.objectManager = objectManager; - this.s3Operator = s3Operator; - this.config = config; - this.streamObjectUploadPool = Threads.newFixedThreadPool(config.streamSetObjectCompactionUploadConcurrency(), - ThreadUtils.createThreadFactory("compaction-stream-object-uploader-%d", true), LOGGER); - this.streamSetObjectUploadPool = Threads.newSingleThreadScheduledExecutor( - ThreadUtils.createThreadFactory("compaction-stream-set-object-uploader-%d", true), LOGGER); - } - - public void shutdown() { - this.isShutdown = true; - this.streamSetObjectUploadPool.shutdown(); - try { - if (!this.streamSetObjectUploadPool.awaitTermination(10, TimeUnit.SECONDS)) { - this.streamSetObjectUploadPool.shutdownNow(); - } - } catch (InterruptedException ignored) { - } - - this.streamObjectUploadPool.shutdown(); - try { - if (!this.streamObjectUploadPool.awaitTermination(10, TimeUnit.SECONDS)) { - this.streamObjectUploadPool.shutdownNow(); - } - } catch (InterruptedException ignored) { - } - } - - public CompletableFuture chainWriteStreamSetObject(CompletableFuture prev, - CompactedObject compactedObject) { - if (compactedObject.type() != CompactionType.COMPACT) { - return CompletableFuture.failedFuture(new IllegalArgumentException("wrong compacted object type, expected COMPACT")); - } - if (compactedObject.streamDataBlocks().isEmpty()) { - return CompletableFuture.completedFuture(null); - } - if (prev == null) { - return prepareObjectAndWrite(compactedObject); - } - return prev.thenCompose(v -> prepareObjectAndWrite(compactedObject)); - } - - private CompletableFuture prepareObjectAndWrite(CompactedObject compactedObject) { - if (streamSetObjectIdCf == null) { - streamSetObjectIdCf = this.objectManager.prepareObject(1, TimeUnit.MINUTES.toMillis(CompactionConstants.S3_OBJECT_TTL_MINUTES)); - } - return streamSetObjectIdCf.thenComposeAsync(objectId -> { - if (streamSetObjectWriter == null) { - streamSetObjectWriter = new DataBlockWriter(objectId, s3Operator, config.objectPartSize()); - } - return CompactionUtils.chainWriteDataBlock(streamSetObjectWriter, compactedObject.streamDataBlocks(), streamSetObjectUploadPool); - }, streamSetObjectUploadPool); - } - - public CompletableFuture writeStreamObject(CompactedObject compactedObject) { - if (compactedObject.type() != CompactionType.SPLIT) { - return CompletableFuture.failedFuture(new IllegalArgumentException("wrong compacted object type, expected SPLIT")); - } - if (compactedObject.streamDataBlocks().isEmpty()) { - return CompletableFuture.completedFuture(null); - } - return objectManager.prepareObject(1, TimeUnit.MINUTES.toMillis(CompactionConstants.S3_OBJECT_TTL_MINUTES)) - .thenComposeAsync(objectId -> { - if (isAborted) { - // release data that has not been uploaded - compactedObject.streamDataBlocks().forEach(StreamDataBlock::release); - return CompletableFuture.completedFuture(null); - } - DataBlockWriter dataBlockWriter = new DataBlockWriter(objectId, s3Operator, config.objectPartSize()); - CompletableFuture cf = CompactionUtils.chainWriteDataBlock(dataBlockWriter, compactedObject.streamDataBlocks(), streamObjectUploadPool); - return cf.thenCompose(nil -> dataBlockWriter.close()).thenApply(nil -> { - StreamObject streamObject = new StreamObject(); - streamObject.setObjectId(objectId); - streamObject.setStreamId(compactedObject.streamDataBlocks().get(0).getStreamId()); - streamObject.setStartOffset(compactedObject.streamDataBlocks().get(0).getStartOffset()); - streamObject.setEndOffset(compactedObject.streamDataBlocks().get(compactedObject.streamDataBlocks().size() - 1).getEndOffset()); - streamObject.setObjectSize(dataBlockWriter.size()); - return streamObject; - }).whenComplete((ret, ex) -> { - if (ex != null) { - if (!isShutdown) { - LOGGER.error("write to stream object {} failed", objectId, ex); - } - dataBlockWriter.release(); - compactedObject.streamDataBlocks().forEach(StreamDataBlock::release); - } - }); - }, streamObjectUploadPool); - } - - public CompletableFuture forceUploadStreamSetObject() { - if (streamSetObjectWriter == null) { - return CompletableFuture.completedFuture(null); - } - return streamSetObjectWriter.forceUpload(); - } - - public long complete() { - if (streamSetObjectWriter == null) { - return 0L; - } - streamSetObjectWriter.close().join(); - long writeSize = streamSetObjectWriter.size(); - reset(); - return writeSize; - } - - public CompletableFuture release() { - isAborted = true; - CompletableFuture cf = CompletableFuture.completedFuture(null); - if (streamSetObjectWriter != null) { - cf = streamSetObjectWriter.release(); - } - return cf.thenAccept(nil -> reset()); - } - - private void reset() { - streamSetObjectIdCf = null; - streamSetObjectWriter = null; - isAborted = false; - } - - public long getStreamSetObjectId() { - if (streamSetObjectIdCf == null) { - return -1; - } - return streamSetObjectIdCf.getNow(-1L); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/compact/objects/CompactedObject.java b/s3stream/src/main/java/com/automq/stream/s3/compact/objects/CompactedObject.java deleted file mode 100644 index 5f40d4004..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/compact/objects/CompactedObject.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact.objects; - -import com.automq.stream.s3.StreamDataBlock; -import java.util.List; - -public class CompactedObject { - private final CompactionType type; - private final List streamDataBlocks; - private final long size; - - public CompactedObject(CompactionType type, List streamDataBlocks) { - this.type = type; - this.streamDataBlocks = streamDataBlocks; - this.size = streamDataBlocks.stream().mapToLong(StreamDataBlock::getBlockSize).sum(); - } - - public CompactionType type() { - return type; - } - - public List streamDataBlocks() { - return this.streamDataBlocks; - } - - public long size() { - return size; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/compact/objects/CompactedObjectBuilder.java b/s3stream/src/main/java/com/automq/stream/s3/compact/objects/CompactedObjectBuilder.java deleted file mode 100644 index c2ef2a567..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/compact/objects/CompactedObjectBuilder.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact.objects; - -import com.automq.stream.s3.StreamDataBlock; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; - -public class CompactedObjectBuilder { - private final List streamDataBlocks; - private CompactionType type; - private int currStreamIndexHead; - private int currStreamIndexTail; - - public CompactedObjectBuilder() { - this.type = CompactionType.COMPACT; - this.streamDataBlocks = new ArrayList<>(); - this.currStreamIndexHead = -1; - this.currStreamIndexTail = -1; - } - - public CompactedObjectBuilder splitCurrentStream() { - return split(currStreamIndexHead, currStreamIndexTail); - } - - public CompactedObjectBuilder split(int start, int end) { - if (start < 0 || end > currStreamIndexTail) { - // split out of range - return new CompactedObjectBuilder(); - } - CompactedObjectBuilder builder = new CompactedObjectBuilder(); - List streamRangePositionsSubList = streamDataBlocks.subList(start, end); - for (StreamDataBlock streamRangePosition : streamRangePositionsSubList) { - builder.addStreamDataBlock(streamRangePosition); - } - builder.setType(type); - streamRangePositionsSubList.clear(); - resetCurrStreamPosition(); - return builder; - } - - private void resetCurrStreamPosition() { - currStreamIndexHead = -1; - currStreamIndexTail = -1; - long currStreamId = -1; - for (int i = 0; i < streamDataBlocks.size(); i++) { - StreamDataBlock streamDataBlock = streamDataBlocks.get(i); - if (currStreamId != streamDataBlock.getStreamId()) { - currStreamId = streamDataBlock.getStreamId(); - currStreamIndexHead = i; - } - currStreamIndexTail = i + 1; - } - } - - private List getCurrentStreamRangePositions() { - if (currStreamIndexHead == -1 || currStreamIndexTail == -1) { - return new ArrayList<>(); - } - return streamDataBlocks.subList(currStreamIndexHead, currStreamIndexTail); - } - - public CompactedObjectBuilder setType(CompactionType type) { - this.type = type; - return this; - } - - public CompactionType type() { - return this.type; - } - - public long lastStreamId() { - if (streamDataBlocks.isEmpty()) { - return -1; - } - return streamDataBlocks.get(streamDataBlocks.size() - 1).getStreamId(); - } - - public long lastOffset() { - if (streamDataBlocks.isEmpty()) { - return -1; - } - return streamDataBlocks.get(streamDataBlocks.size() - 1).getEndOffset(); - } - - public CompactedObjectBuilder addStreamDataBlock(StreamDataBlock streamDataBlock) { - if (streamDataBlock.getStreamId() != lastStreamId()) { - this.currStreamIndexHead = this.streamDataBlocks.size(); - } - this.streamDataBlocks.add(streamDataBlock); - this.currStreamIndexTail = this.streamDataBlocks.size(); - return this; - } - - public List streamDataBlocks() { - return this.streamDataBlocks; - } - - public int totalStreamNum() { - return this.streamDataBlocks.stream().map(StreamDataBlock::getStreamId).collect(Collectors.toSet()).size(); - } - - public long currStreamBlockSize() { - return getCurrentStreamRangePositions().stream().mapToLong(StreamDataBlock::getBlockSize).sum(); - } - - public Set uniqueObjectIds() { - return this.streamDataBlocks.stream().map(StreamDataBlock::getObjectId).collect(Collectors.toSet()); - } - - public long totalBlockSize() { - return streamDataBlocks.stream().mapToLong(StreamDataBlock::getBlockSize).sum(); - } - - public void merge(CompactedObjectBuilder other) { - if (other.type == CompactionType.SPLIT) { - // cannot merge compacted object of split type as split strategy is determined when constructing compacted objects - return; - } - this.streamDataBlocks.addAll(other.streamDataBlocks); - resetCurrStreamPosition(); - } - - public CompactedObject build() { - return new CompactedObject(type, streamDataBlocks); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/compact/objects/CompactionType.java b/s3stream/src/main/java/com/automq/stream/s3/compact/objects/CompactionType.java deleted file mode 100644 index 17a4da96a..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/compact/objects/CompactionType.java +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact.objects; - -public enum CompactionType { - COMPACT, - SPLIT, -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/compact/operator/DataBlockReader.java b/s3stream/src/main/java/com/automq/stream/s3/compact/operator/DataBlockReader.java deleted file mode 100644 index 8c9db2b63..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/compact/operator/DataBlockReader.java +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact.operator; - -import com.automq.stream.ByteBufSeqAlloc; -import com.automq.stream.s3.ByteBufAlloc; -import com.automq.stream.s3.DataBlockIndex; -import com.automq.stream.s3.ObjectReader; -import com.automq.stream.s3.StreamDataBlock; -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metrics.MetricsLevel; -import com.automq.stream.s3.metrics.stats.CompactionStats; -import com.automq.stream.s3.network.ThrottleStrategy; -import com.automq.stream.s3.operator.S3Operator; -import io.github.bucket4j.Bucket; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ScheduledExecutorService; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static com.automq.stream.s3.ByteBufAlloc.STREAM_SET_OBJECT_COMPACTION_READ; - -//TODO: refactor to reduce duplicate code with ObjectWriter -public class DataBlockReader { - private static final Logger LOGGER = LoggerFactory.getLogger(DataBlockReader.class); - private static final ByteBufSeqAlloc DIRECT_ALLOC = new ByteBufSeqAlloc(STREAM_SET_OBJECT_COMPACTION_READ, 1); - private final S3ObjectMetadata metadata; - private final String objectKey; - private final S3Operator s3Operator; - private final CompletableFuture> indexBlockCf = new CompletableFuture<>(); - private final Bucket throttleBucket; - private final ScheduledExecutorService bucketCallbackExecutor; - - public DataBlockReader(S3ObjectMetadata metadata, S3Operator s3Operator) { - this(metadata, s3Operator, null, null); - } - - public DataBlockReader(S3ObjectMetadata metadata, S3Operator s3Operator, Bucket throttleBucket, - ScheduledExecutorService bucketCallbackExecutor) { - this.metadata = metadata; - this.objectKey = metadata.key(); - this.s3Operator = s3Operator; - this.throttleBucket = throttleBucket; - if (this.throttleBucket != null) { - this.bucketCallbackExecutor = Objects.requireNonNull(bucketCallbackExecutor); - } else { - this.bucketCallbackExecutor = null; - } - } - - public CompletableFuture> getDataBlockIndex() { - return indexBlockCf; - } - - public void parseDataBlockIndex() { - // TODO: throttle level - @SuppressWarnings("resource") ObjectReader objectReader = new ObjectReader(metadata, s3Operator); - objectReader.basicObjectInfo().thenAccept(info -> { - List blocks = new ArrayList<>(info.indexBlock().count()); - Iterator it = info.indexBlock().iterator(); - while (it.hasNext()) { - blocks.add(new StreamDataBlock(metadata.objectId(), it.next())); - } - indexBlockCf.complete(blocks); - }).exceptionally(ex -> { - // unrecoverable error, possibly read on a deleted object - LOGGER.warn("object {} index parse fail", objectKey, ex); - indexBlockCf.completeExceptionally(ex); - return null; - }).whenComplete((nil, ex) -> objectReader.release()); - } - - public void readBlocks(List streamDataBlocks) { - readBlocks(streamDataBlocks, -1); - } - - public void readBlocks(List streamDataBlocks, long maxReadBatchSize) { - if (streamDataBlocks.isEmpty()) { - return; - } - int start = 0; - int end = 0; - long offset = -1; - // split streamDataBlocks to blocks with continuous offset - while (end < streamDataBlocks.size()) { - if (offset != -1 && streamDataBlocks.get(end).getBlockStartPosition() != offset) { - readContinuousBlocks(streamDataBlocks.subList(start, end), maxReadBatchSize); - start = end; - } - offset = streamDataBlocks.get(end).getBlockEndPosition(); - end++; - } - if (end > start) { - readContinuousBlocks(streamDataBlocks.subList(start, end), maxReadBatchSize); - } - } - - public void readContinuousBlocks(List streamDataBlocks, long maxReadBatchSize) { - long objectId = metadata.objectId(); - if (maxReadBatchSize <= 0) { - readContinuousBlocks0(streamDataBlocks); - return; - } - - long currentReadSize = 0; - int start = 0; - int end = 0; - while (end < streamDataBlocks.size()) { - currentReadSize += streamDataBlocks.get(end).getBlockSize(); - if (currentReadSize >= maxReadBatchSize) { - final int finalStart = start; - if (start == end) { - // split single data block to multiple read - long remainBytes = streamDataBlocks.get(end).getBlockSize(); - long startPosition = streamDataBlocks.get(end).getBlockStartPosition(); - long endPosition; - List> cfList = new ArrayList<>(); - Map bufferMap = new ConcurrentHashMap<>(); - int cnt = 0; - while (remainBytes > 0) { - long readSize = Math.min(remainBytes, maxReadBatchSize); - endPosition = startPosition + readSize; - final int finalCnt = cnt; - cfList.add(rangeRead(startPosition, endPosition).thenAccept(buf -> bufferMap.put(finalCnt, buf))); - remainBytes -= readSize; - startPosition += readSize; - cnt++; - } - final int iterations = cnt; - final int finalEnd = end + 1; // include current block - CompletableFuture.allOf(cfList.toArray(new CompletableFuture[0])) - .thenAccept(v -> { - CompositeByteBuf compositeByteBuf = ByteBufAlloc.compositeByteBuffer(); - for (int j = 0; j < iterations; j++) { - compositeByteBuf.addComponent(true, bufferMap.get(j)); - } - parseDataBlocks(compositeByteBuf, streamDataBlocks.subList(finalStart, finalEnd)); - }) - .exceptionally(ex -> { - LOGGER.error("read data from object {} failed", objectId, ex); - failDataBlocks(streamDataBlocks, ex); - return null; - }); - end++; - } else { - // read before current block - readContinuousBlocks0(streamDataBlocks.subList(start, end)); - } - start = end; - currentReadSize = 0; - } else { - end++; - } - } - if (start < end) { - readContinuousBlocks0(streamDataBlocks.subList(start, end)); - } - } - - private void readContinuousBlocks0(List streamDataBlocks) { - rangeRead(streamDataBlocks.get(0).getBlockStartPosition(), - streamDataBlocks.get(streamDataBlocks.size() - 1).getBlockEndPosition()) - .thenAccept(buf -> parseDataBlocks(buf, streamDataBlocks)) - .exceptionally(ex -> { - LOGGER.error("read data from object {} failed", metadata.objectId(), ex); - failDataBlocks(streamDataBlocks, ex); - return null; - }); - } - - private CompletableFuture rangeRead(long start, long end) { - return rangeRead0(start, end).whenComplete((ret, ex) -> { - if (ex == null) { - CompactionStats.getInstance().compactionReadSizeStats.add(MetricsLevel.INFO, ret.readableBytes()); - } - }); - } - - private CompletableFuture rangeRead0(long start, long end) { - if (throttleBucket == null) { - return s3Operator.rangeRead(objectKey, start, end, ThrottleStrategy.THROTTLE_2).thenApply(buf -> { - // convert heap buffer to direct buffer - ByteBuf directBuf = DIRECT_ALLOC.byteBuffer(buf.readableBytes()); - directBuf.writeBytes(buf); - buf.release(); - return directBuf; - }); - } else { - return throttleBucket.asScheduler().consume(end - start + 1, bucketCallbackExecutor) - .thenCompose(v -> - s3Operator.rangeRead(objectKey, start, end, ThrottleStrategy.THROTTLE_2).thenApply(buf -> { - // convert heap buffer to direct buffer - ByteBuf directBuf = DIRECT_ALLOC.byteBuffer(buf.readableBytes()); - directBuf.writeBytes(buf); - buf.release(); - return directBuf; - })); - } - } - - private void parseDataBlocks(ByteBuf buf, List streamDataBlocks) { - for (StreamDataBlock streamDataBlock : streamDataBlocks) { - int blockSize = streamDataBlock.getBlockSize(); - ByteBuf blockBuf = buf.retainedSlice(buf.readerIndex(), blockSize); - buf.skipBytes(blockSize); - streamDataBlock.getDataCf().complete(blockBuf); - } - buf.release(); - } - - private void failDataBlocks(List streamDataBlocks, Throwable ex) { - for (StreamDataBlock streamDataBlock : streamDataBlocks) { - streamDataBlock.getDataCf().completeExceptionally(ex); - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/compact/operator/DataBlockWriter.java b/s3stream/src/main/java/com/automq/stream/s3/compact/operator/DataBlockWriter.java deleted file mode 100644 index bb082540d..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/compact/operator/DataBlockWriter.java +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact.operator; - -import com.automq.stream.s3.ByteBufAlloc; -import com.automq.stream.s3.DataBlockIndex; -import com.automq.stream.s3.StreamDataBlock; -import com.automq.stream.s3.compact.utils.CompactionUtils; -import com.automq.stream.s3.compact.utils.GroupByLimitPredicate; -import com.automq.stream.s3.metadata.ObjectUtils; -import com.automq.stream.s3.metrics.MetricsLevel; -import com.automq.stream.s3.metrics.stats.CompactionStats; -import com.automq.stream.s3.network.ThrottleStrategy; -import com.automq.stream.s3.operator.S3Operator; -import com.automq.stream.s3.operator.Writer; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; - -import static com.automq.stream.s3.ByteBufAlloc.STREAM_OBJECT_COMPACTION_WRITE; -import static com.automq.stream.s3.ByteBufAlloc.STREAM_SET_OBJECT_COMPACTION_READ; -import static com.automq.stream.s3.operator.Writer.MIN_PART_SIZE; - -//TODO: refactor to reduce duplicate code with ObjectWriter -public class DataBlockWriter { - private final int partSizeThreshold; - private final List waitingUploadBlocks; - private final Map> waitingUploadBlockCfs; - private final List completedBlocks; - private final Writer writer; - private final long objectId; - private IndexBlock indexBlock; - private long nextDataBlockPosition; - private long size; - - public DataBlockWriter(long objectId, S3Operator s3Operator, int partSizeThreshold) { - this.objectId = objectId; - String objectKey = ObjectUtils.genKey(0, objectId); - this.partSizeThreshold = Math.max(MIN_PART_SIZE, partSizeThreshold); - waitingUploadBlocks = new LinkedList<>(); - waitingUploadBlockCfs = new ConcurrentHashMap<>(); - completedBlocks = new LinkedList<>(); - writer = s3Operator.writer(new Writer.Context(STREAM_SET_OBJECT_COMPACTION_READ), objectKey, ThrottleStrategy.THROTTLE_2); - } - - public long getObjectId() { - return objectId; - } - - public void write(StreamDataBlock dataBlock) { - CompletableFuture cf = new CompletableFuture<>(); - cf.whenComplete((nil, ex) -> CompactionStats.getInstance().compactionWriteSizeStats.add(MetricsLevel.INFO, dataBlock.getBlockSize())); - waitingUploadBlockCfs.put(dataBlock, cf); - waitingUploadBlocks.add(dataBlock); - long waitingUploadSize = waitingUploadBlocks.stream().mapToLong(StreamDataBlock::getBlockSize).sum(); - if (waitingUploadSize >= partSizeThreshold) { - uploadWaitingList(); - } - } - - public CompletableFuture forceUpload() { - uploadWaitingList(); - writer.copyOnWrite(); - return CompletableFuture.allOf(waitingUploadBlockCfs.values().toArray(new CompletableFuture[0])); - } - - private void uploadWaitingList() { - CompositeByteBuf buf = groupWaitingBlocks(); - List blocks = new LinkedList<>(waitingUploadBlocks); - writer.write(buf).thenAccept(v -> { - for (StreamDataBlock block : blocks) { - waitingUploadBlockCfs.computeIfPresent(block, (k, cf) -> { - cf.complete(null); - return null; - }); - } - }); - if (writer.hasBatchingPart()) { - // prevent blocking on part that's waiting for batch when force upload waiting list - for (StreamDataBlock block : blocks) { - waitingUploadBlockCfs.computeIfPresent(block, (k, cf) -> { - cf.complete(null); - return null; - }); - } - } - waitingUploadBlocks.clear(); - } - - public CompletableFuture close() { - CompositeByteBuf buf = groupWaitingBlocks(); - List blocks = new LinkedList<>(waitingUploadBlocks); - waitingUploadBlocks.clear(); - indexBlock = new IndexBlock(); - buf.addComponent(true, indexBlock.buffer()); - Footer footer = new Footer(); - buf.addComponent(true, footer.buffer()); - writer.write(buf.duplicate()); - size = indexBlock.position() + indexBlock.size() + footer.size(); - return writer.close().thenAccept(nil -> { - for (StreamDataBlock block : blocks) { - waitingUploadBlockCfs.computeIfPresent(block, (k, cf) -> { - cf.complete(null); - return null; - }); - } - }); - } - - private CompositeByteBuf groupWaitingBlocks() { - CompositeByteBuf buf = ByteBufAlloc.compositeByteBuffer(); - for (StreamDataBlock block : waitingUploadBlocks) { - buf.addComponent(true, block.getDataCf().join()); - block.releaseRef(); - completedBlocks.add(block); - nextDataBlockPosition += block.getBlockSize(); - } - return buf; - } - - public CompletableFuture release() { - // release buffer that is batching for upload - return writer.release(); - } - - public long objectId() { - return objectId; - } - - public long size() { - return size; - } - - class IndexBlock { - private static final int DEFAULT_DATA_BLOCK_GROUP_SIZE_THRESHOLD = 1024 * 1024; // 1MiB - private final ByteBuf buf; - private final long position; - - public IndexBlock() { - position = nextDataBlockPosition; - - List dataBlockIndices = CompactionUtils.buildDataBlockIndicesFromGroup( - CompactionUtils.groupStreamDataBlocks(completedBlocks, new GroupByLimitPredicate(DEFAULT_DATA_BLOCK_GROUP_SIZE_THRESHOLD))); - buf = ByteBufAlloc.byteBuffer(dataBlockIndices.size() * DataBlockIndex.BLOCK_INDEX_SIZE, ByteBufAlloc.STREAM_SET_OBJECT_COMPACTION_WRITE); - for (DataBlockIndex dataBlockIndex : dataBlockIndices) { - dataBlockIndex.encode(buf); - } - } - - public ByteBuf buffer() { - return buf.duplicate(); - } - - public long position() { - return position; - } - - public int size() { - return buf.readableBytes(); - } - } - - class Footer { - private static final int FOOTER_SIZE = 48; - private static final long MAGIC = 0x88e241b785f4cff7L; - private final ByteBuf buf; - - public Footer() { - buf = ByteBufAlloc.byteBuffer(FOOTER_SIZE, STREAM_OBJECT_COMPACTION_WRITE); - buf.writeLong(indexBlock.position()); - buf.writeInt(indexBlock.size()); - buf.writeZero(40 - 8 - 4); - buf.writeLong(MAGIC); - } - - public ByteBuf buffer() { - return buf.duplicate(); - } - - public int size() { - return FOOTER_SIZE; - } - - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/compact/utils/CompactionUtils.java b/s3stream/src/main/java/com/automq/stream/s3/compact/utils/CompactionUtils.java deleted file mode 100644 index c2ad818b4..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/compact/utils/CompactionUtils.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact.utils; - -import com.automq.stream.s3.DataBlockIndex; -import com.automq.stream.s3.StreamDataBlock; -import com.automq.stream.s3.compact.objects.CompactedObjectBuilder; -import com.automq.stream.s3.compact.operator.DataBlockReader; -import com.automq.stream.s3.compact.operator.DataBlockWriter; -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metadata.StreamMetadata; -import com.automq.stream.s3.objects.ObjectStreamRange; -import com.automq.stream.s3.operator.S3Operator; -import java.util.AbstractMap; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.TreeMap; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import java.util.function.Predicate; -import java.util.stream.Collectors; -import org.slf4j.Logger; - -public class CompactionUtils { - public static List buildObjectStreamRange(List streamDataBlocks) { - List objectStreamRanges = new ArrayList<>(); - ObjectStreamRange currObjectStreamRange = null; - for (StreamDataBlock streamDataBlock : streamDataBlocks) { - if (currObjectStreamRange == null) { - currObjectStreamRange = new ObjectStreamRange(streamDataBlock.getStreamId(), -1L, - streamDataBlock.getStartOffset(), streamDataBlock.getEndOffset(), streamDataBlock.getBlockSize()); - } else { - if (currObjectStreamRange.getStreamId() == streamDataBlock.getStreamId()) { - currObjectStreamRange.setEndOffset(streamDataBlock.getEndOffset()); - currObjectStreamRange.setSize(currObjectStreamRange.getSize() + streamDataBlock.getBlockSize()); - } else { - objectStreamRanges.add(currObjectStreamRange); - currObjectStreamRange = new ObjectStreamRange(streamDataBlock.getStreamId(), -1L, - streamDataBlock.getStartOffset(), streamDataBlock.getEndOffset(), streamDataBlock.getBlockSize()); - } - } - } - if (currObjectStreamRange != null) { - objectStreamRanges.add(currObjectStreamRange); - } - return objectStreamRanges; - } - - public static Map> blockWaitObjectIndices(List streamMetadataList, - List objectMetadataList, - S3Operator s3Operator) { - return blockWaitObjectIndices(streamMetadataList, objectMetadataList, s3Operator, null); - } - - public static Map> blockWaitObjectIndices(List streamMetadataList, - List objectMetadataList, - S3Operator s3Operator, - Logger logger) { - Map streamMetadataMap = streamMetadataList.stream() - .collect(Collectors.toMap(StreamMetadata::streamId, s -> s)); - Map>> objectStreamRangePositionFutures = new HashMap<>(); - for (S3ObjectMetadata objectMetadata : objectMetadataList) { - DataBlockReader dataBlockReader = new DataBlockReader(objectMetadata, s3Operator); - dataBlockReader.parseDataBlockIndex(); - objectStreamRangePositionFutures.put(objectMetadata.objectId(), dataBlockReader.getDataBlockIndex()); - } - return objectStreamRangePositionFutures.entrySet().stream() - .map(f -> { - try { - List streamDataBlocks = f.getValue().join(); - List validStreamDataBlocks = new ArrayList<>(); - // filter out invalid stream data blocks in case metadata is inconsistent with S3 index block - for (StreamDataBlock streamDataBlock : streamDataBlocks) { - if (!streamMetadataMap.containsKey(streamDataBlock.getStreamId())) { - // non-exist stream - continue; - } - if (streamDataBlock.getEndOffset() <= streamMetadataMap.get(streamDataBlock.getStreamId()).startOffset()) { - // trimmed stream data block - continue; - } - validStreamDataBlocks.add(streamDataBlock); - } - return new AbstractMap.SimpleEntry<>(f.getKey(), validStreamDataBlocks); - } catch (Exception ex) { - // continue compaction without invalid object - if (logger != null) { - logger.warn("failed to get data block index for object {}", f.getKey(), ex); - } - return null; - } - }) - .filter(Objects::nonNull) - .collect(Collectors.toMap(AbstractMap.SimpleEntry::getKey, AbstractMap.SimpleEntry::getValue)); - } - - /** - * Sort stream data blocks by stream id and start offset. - * - * @param streamDataBlocksMap streamDataBlocksMap stream data blocks map, key: object id, value: stream data blocks - * @return sorted stream data blocks - */ - public static List sortStreamRangePositions(Map> streamDataBlocksMap) { - //TODO: use merge sort - Map> sortedStreamObjectMap = new TreeMap<>(); - for (List streamDataBlocks : streamDataBlocksMap.values()) { - streamDataBlocks.forEach(e -> sortedStreamObjectMap.computeIfAbsent(e.getStreamId(), k -> new ArrayList<>()).add(e)); - } - return sortedStreamObjectMap.values().stream().flatMap(list -> { - list.sort(StreamDataBlock.STREAM_OFFSET_COMPARATOR); - return list.stream(); - }).collect(Collectors.toList()); - } - - /** - * Group stream data blocks by certain conditions. - * - * @param streamDataBlocks stream data blocks to be grouped - * @param predicate the predicate to check whether a stream data block should be grouped with the previous one - * @return grouped stream data blocks - */ - public static List> groupStreamDataBlocks(List streamDataBlocks, - Predicate predicate) { - List> groupedStreamDataBlocks = new ArrayList<>(); - List currGroup = new ArrayList<>(); - for (StreamDataBlock streamDataBlock : streamDataBlocks) { - if (predicate.test(streamDataBlock)) { - currGroup.add(streamDataBlock); - } else if (!currGroup.isEmpty()) { - groupedStreamDataBlocks.add(currGroup); - currGroup = new ArrayList<>(); - currGroup.add(streamDataBlock); - } - } - if (!currGroup.isEmpty()) { - groupedStreamDataBlocks.add(currGroup); - } - return groupedStreamDataBlocks; - } - - public static List buildObjectStreamRangeFromGroup( - List> streamDataBlockGroup) { - List objectStreamRanges = new ArrayList<>(); - - for (List streamDataBlocks : streamDataBlockGroup) { - if (streamDataBlocks.isEmpty()) { - continue; - } - objectStreamRanges.add(new ObjectStreamRange( - streamDataBlocks.get(0).getStreamId(), - -1L, - streamDataBlocks.get(0).getStartOffset(), - streamDataBlocks.get(streamDataBlocks.size() - 1).getEndOffset(), - streamDataBlocks.stream().mapToInt(StreamDataBlock::getBlockSize).sum())); - } - - return objectStreamRanges; - } - - public static List buildDataBlockIndicesFromGroup( - List> streamDataBlockGroup) { - List dataBlockIndices = new ArrayList<>(); - - long blockStartPosition = 0; - for (List streamDataBlocks : streamDataBlockGroup) { - if (streamDataBlocks.isEmpty()) { - continue; - } - dataBlockIndices.add(new DataBlockIndex( - streamDataBlocks.get(0).getStreamId(), - streamDataBlocks.get(0).getStartOffset(), - (int) (streamDataBlocks.get(streamDataBlocks.size() - 1).getEndOffset() - streamDataBlocks.get(0).getStartOffset()), - streamDataBlocks.stream().map(StreamDataBlock::dataBlockIndex).mapToInt(DataBlockIndex::recordCount).sum(), - blockStartPosition, - streamDataBlocks.stream().mapToInt(StreamDataBlock::getBlockSize).sum())); - blockStartPosition += streamDataBlocks.stream().mapToInt(StreamDataBlock::getBlockSize).sum(); - } - - return dataBlockIndices; - } - - public static int getTotalObjectStats(CompactedObjectBuilder o, Map objectStatsMap) { - int totalCompactedObjects = 0; - for (Long objectId : o.uniqueObjectIds()) { - totalCompactedObjects += objectStatsMap.get(objectId); - } - return totalCompactedObjects; - } - - public static CompletableFuture chainWriteDataBlock(DataBlockWriter dataBlockWriter, - List streamDataBlocks, ExecutorService executorService) { - CompletableFuture cf = null; - for (StreamDataBlock streamDataBlock : streamDataBlocks) { - if (cf == null) { - cf = streamDataBlock.getDataCf().thenAcceptAsync(data -> dataBlockWriter.write(streamDataBlock), executorService); - } else { - cf = cf.thenCompose(nil -> streamDataBlock.getDataCf().thenAcceptAsync(data -> dataBlockWriter.write(streamDataBlock), executorService)); - } - } - return cf; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/compact/utils/GroupByLimitPredicate.java b/s3stream/src/main/java/com/automq/stream/s3/compact/utils/GroupByLimitPredicate.java deleted file mode 100644 index 70c77fe1d..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/compact/utils/GroupByLimitPredicate.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact.utils; - -import com.automq.stream.s3.StreamDataBlock; -import java.util.function.Predicate; - -public class GroupByLimitPredicate implements Predicate { - private final long blockSizeThreshold; - private long streamId = -1; - private long startOffset = 0; - private long nextStartOffset = 0; - private int blockSize = 0; - private int recordCnt = 0; - - public GroupByLimitPredicate(long blockSizeThreshold) { - this.blockSizeThreshold = blockSizeThreshold; - } - - @Override - public boolean test(StreamDataBlock block) { - boolean flag = true; - if (streamId == -1 // first block - || block.getStreamId() != streamId // iterate to next stream - || block.getStartOffset() != nextStartOffset // block start offset is not continuous for same stream (unlikely to happen) - || (long) blockSize + block.getBlockSize() >= blockSizeThreshold // group size exceeds threshold - || (long) recordCnt + block.dataBlockIndex().recordCount() > Integer.MAX_VALUE // group record count exceeds int32 - || (block.getEndOffset() - startOffset) > Integer.MAX_VALUE) { // group delta offset exceeds int32 - - if (streamId != -1) { - flag = false; - } - - streamId = block.getStreamId(); - startOffset = block.getStartOffset(); - blockSize = 0; - recordCnt = 0; - } - - nextStartOffset = block.getEndOffset(); - blockSize += block.getBlockSize(); - recordCnt += block.dataBlockIndex().recordCount(); - return flag; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/compact/utils/GroupByOffsetPredicate.java b/s3stream/src/main/java/com/automq/stream/s3/compact/utils/GroupByOffsetPredicate.java deleted file mode 100644 index cd8bb59d9..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/compact/utils/GroupByOffsetPredicate.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact.utils; - -import com.automq.stream.s3.StreamDataBlock; -import java.util.function.Predicate; - -public class GroupByOffsetPredicate implements Predicate { - - private long currStreamId = -1; - private long nextStartOffset = 0; - - @Override - public boolean test(StreamDataBlock block) { - if (currStreamId == -1) { - currStreamId = block.getStreamId(); - nextStartOffset = block.getEndOffset(); - return true; - } else { - if (currStreamId == block.getStreamId() && nextStartOffset == block.getStartOffset()) { - nextStartOffset = block.getEndOffset(); - return true; - } else { - currStreamId = block.getStreamId(); - nextStartOffset = block.getEndOffset(); - return false; - } - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/context/AppendContext.java b/s3stream/src/main/java/com/automq/stream/s3/context/AppendContext.java deleted file mode 100644 index 04a77e1ea..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/context/AppendContext.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.context; - -import com.automq.stream.s3.trace.context.TraceContext; -import io.opentelemetry.api.trace.Tracer; -import io.opentelemetry.context.Context; - -public class AppendContext extends TraceContext { - public static final AppendContext DEFAULT = new AppendContext(); - - public AppendContext() { - super(false, null, null); - } - - public AppendContext(TraceContext context) { - super(context); - } - - public AppendContext(boolean isTraceEnabled, Tracer tracer, Context currentContext) { - super(isTraceEnabled, tracer, currentContext); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/context/FetchContext.java b/s3stream/src/main/java/com/automq/stream/s3/context/FetchContext.java deleted file mode 100644 index 902020adb..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/context/FetchContext.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.context; - -import com.automq.stream.api.ReadOptions; -import com.automq.stream.s3.trace.context.TraceContext; -import io.opentelemetry.api.trace.Tracer; -import io.opentelemetry.context.Context; - -public class FetchContext extends TraceContext { - public static final FetchContext DEFAULT = new FetchContext(); - private ReadOptions readOptions = ReadOptions.DEFAULT; - - public FetchContext() { - super(false, null, null); - } - - public FetchContext(TraceContext context) { - super(context); - } - - public FetchContext(boolean isTraceEnabled, Tracer tracer, Context currentContext) { - super(isTraceEnabled, tracer, currentContext); - } - - public ReadOptions readOptions() { - return readOptions; - } - - public void setReadOptions(ReadOptions readOptions) { - this.readOptions = readOptions; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/exceptions/IndexBlockParseException.java b/s3stream/src/main/java/com/automq/stream/s3/exceptions/IndexBlockParseException.java deleted file mode 100644 index fd9dd2ada..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/exceptions/IndexBlockParseException.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.exceptions; - -public class IndexBlockParseException extends Exception { - private final long indexBlockPosition; - - public IndexBlockParseException(long indexBlockPosition) { - this.indexBlockPosition = indexBlockPosition; - } - - public long getIndexBlockPosition() { - return indexBlockPosition; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/failover/Failover.java b/s3stream/src/main/java/com/automq/stream/s3/failover/Failover.java deleted file mode 100644 index d540690ba..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/failover/Failover.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.failover; - -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.streams.StreamManager; -import com.automq.stream.s3.wal.BlockWALService; -import com.automq.stream.s3.wal.WALMetadata; -import com.automq.stream.s3.wal.WALNotInitializedException; -import com.automq.stream.utils.FutureUtil; -import com.automq.stream.utils.LogContext; -import com.automq.stream.utils.ThreadUtils; -import com.automq.stream.utils.Threads; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static com.automq.stream.s3.Constants.NOOP_EPOCH; -import static com.automq.stream.s3.Constants.NOOP_NODE_ID; - -/** - * To perform a Delta WAL failover, follow these steps: - * 1. Ensure the old node stops writing to the delta WAL. - * 2. Instruct the controller to reject all requests sent by the old node. - * 3. Upload the delta WAL to S3. - * 4. Lastly, close any streams that were opened by the old node and are currently active. - */ -public class Failover { - private static final Logger LOGGER = LoggerFactory.getLogger(Failover.class); - private final ExecutorService executor = Threads.newFixedThreadPool(1, ThreadUtils.createThreadFactory("wal-failover-%d", true), LOGGER); - private final FailoverFactory factory; - private final WALRecover walRecover; - - public Failover(FailoverFactory factory, WALRecover walRecover) { - this.factory = factory; - this.walRecover = walRecover; - } - - public CompletableFuture failover(FailoverRequest request) { - CompletableFuture cf = new CompletableFuture<>(); - executor.submit(() -> FutureUtil.exec(() -> { - try { - cf.complete(new FailoverTask(request).failover()); - } catch (Throwable e) { - LOGGER.error("failover {} fail", request, e); - cf.completeExceptionally(e); - } - }, cf, LOGGER, "failover")); - return cf; - } - - class FailoverTask { - private final FailoverRequest request; - private int nodeId = NOOP_NODE_ID; - private long epoch = NOOP_EPOCH; - - public FailoverTask(FailoverRequest request) { - this.request = request; - } - - public FailoverResponse failover() throws Throwable { - LOGGER.info("failover start {}", request); - FailoverResponse resp = new FailoverResponse(); - resp.setNodeId(request.getNodeId()); - // fence the device to ensure the old node stops writing to the delta WAL - // recover WAL data and upload to S3 - BlockWALService wal = BlockWALService.recoveryBuilder(request.getDevice()).build(); - try { - wal.start(); - } catch (WALNotInitializedException ex) { - LOGGER.info("fail over empty wal {}", request); - return resp; - } - try { - WALMetadata metadata = wal.metadata(); - this.nodeId = metadata.nodeId(); - this.epoch = metadata.epoch(); - if (nodeId != request.getNodeId()) { - throw new IllegalArgumentException(String.format("nodeId mismatch, request=%s, wal=%s", request, metadata)); - } - resp.setNodeId(nodeId); - resp.setEpoch(epoch); - Logger taskLogger = new LogContext(String.format("[Failover nodeId=%s epoch=%s]", nodeId, epoch)).logger(FailoverTask.class); - StreamManager streamManager = factory.getStreamManager(nodeId, epoch); - ObjectManager objectManager = factory.getObjectManager(nodeId, epoch); - LOGGER.info("failover recover {}", request); - walRecover.recover(wal, streamManager, objectManager, taskLogger); - } finally { - wal.shutdownGracefully(); - } - LOGGER.info("failover done {}", request); - return resp; - } - - @Override - public String toString() { - return "FailoverTask{" + - "request=" + request + - ", nodeId=" + nodeId + - ", epoch=" + epoch + - '}'; - } - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/failover/FailoverFactory.java b/s3stream/src/main/java/com/automq/stream/s3/failover/FailoverFactory.java deleted file mode 100644 index 77e793047..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/failover/FailoverFactory.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.failover; - -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.streams.StreamManager; - -public interface FailoverFactory { - - StreamManager getStreamManager(int nodeId, long epoch); - - ObjectManager getObjectManager(int nodeId, long epoch); - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/failover/FailoverRequest.java b/s3stream/src/main/java/com/automq/stream/s3/failover/FailoverRequest.java deleted file mode 100644 index 0f5cbd70e..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/failover/FailoverRequest.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.failover; - -public class FailoverRequest { - private int nodeId; - private String volumeId; - private String device; - - public int getNodeId() { - return nodeId; - } - - public void setNodeId(int nodeId) { - this.nodeId = nodeId; - } - - public String getVolumeId() { - return volumeId; - } - - public void setVolumeId(String volumeId) { - this.volumeId = volumeId; - } - - public String getDevice() { - return device; - } - - public void setDevice(String device) { - this.device = device; - } - - @Override - public String toString() { - return "FailoverRequest{" + - "nodeId=" + nodeId + - ", volumeId='" + volumeId + '\'' + - ", device='" + device + '\'' + - '}'; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/failover/FailoverResponse.java b/s3stream/src/main/java/com/automq/stream/s3/failover/FailoverResponse.java deleted file mode 100644 index 3e2a01c9d..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/failover/FailoverResponse.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.failover; - -public class FailoverResponse { - private int nodeId; - private long epoch; - - public int getNodeId() { - return nodeId; - } - - public void setNodeId(int nodeId) { - this.nodeId = nodeId; - } - - public long getEpoch() { - return epoch; - } - - public void setEpoch(long epoch) { - this.epoch = epoch; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/failover/WALRecover.java b/s3stream/src/main/java/com/automq/stream/s3/failover/WALRecover.java deleted file mode 100644 index 80c9f6751..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/failover/WALRecover.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ -package com.automq.stream.s3.failover; - -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.streams.StreamManager; -import com.automq.stream.s3.wal.WriteAheadLog; -import org.slf4j.Logger; - -public interface WALRecover { - void recover(WriteAheadLog deltaWAL, StreamManager streamManager, ObjectManager objectManager, Logger logger); -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/memory/MemoryMetadataManager.java b/s3stream/src/main/java/com/automq/stream/s3/memory/MemoryMetadataManager.java deleted file mode 100644 index 911ae53b9..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/memory/MemoryMetadataManager.java +++ /dev/null @@ -1,292 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.memory; - -import com.automq.stream.s3.metadata.ObjectUtils; -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metadata.S3ObjectType; -import com.automq.stream.s3.metadata.StreamMetadata; -import com.automq.stream.s3.metadata.StreamOffsetRange; -import com.automq.stream.s3.metadata.StreamState; -import com.automq.stream.s3.objects.CommitStreamSetObjectRequest; -import com.automq.stream.s3.objects.CommitStreamSetObjectResponse; -import com.automq.stream.s3.objects.CompactStreamObjectRequest; -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.objects.ObjectStreamRange; -import com.automq.stream.s3.objects.StreamObject; -import com.automq.stream.s3.streams.StreamManager; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; -import org.apache.commons.lang3.tuple.Pair; - -public class MemoryMetadataManager implements StreamManager, ObjectManager { - private final static AtomicLong NODE_ID_ALLOC = new AtomicLong(); - - // Data structure of stream metadata - private final AtomicLong streamIdAlloc = new AtomicLong(); - private final ConcurrentMap streams = new ConcurrentHashMap<>(); - - // Data structure of object metadata - private final AtomicLong objectIdAlloc = new AtomicLong(); - private final ConcurrentMap> streamObjects = new ConcurrentHashMap<>(); - private final ConcurrentMap> streamSetObjects = new ConcurrentHashMap<>(); - - public static void advanceNodeId() { - NODE_ID_ALLOC.getAndIncrement(); - } - - private static StreamOffsetRange to(ObjectStreamRange s) { - return new StreamOffsetRange(s.getStreamId(), s.getStartOffset(), s.getEndOffset()); - } - - @Override - public synchronized CompletableFuture prepareObject(int count, long ttl) { - return CompletableFuture.completedFuture(objectIdAlloc.getAndAdd(count)); - } - - @Override - public synchronized CompletableFuture commitStreamSetObject( - CommitStreamSetObjectRequest request) { - long dataTimeInMs = System.currentTimeMillis(); - if (!request.getCompactedObjectIds().isEmpty()) { - for (long id : request.getCompactedObjectIds()) { - dataTimeInMs = Math.min(streamSetObjects.get(id).getRight().dataTimeInMs(), dataTimeInMs); - streamSetObjects.remove(id); - } - } - long now = System.currentTimeMillis(); - if (request.getObjectId() != ObjectUtils.NOOP_OBJECT_ID) { - for (ObjectStreamRange range : request.getStreamRanges()) { - StreamMetadata stream = streams.get(range.getStreamId()); - assert stream != null; - if (request.getCompactedObjectIds().isEmpty()) { - // Commit new object. - if (stream.endOffset() != range.getStartOffset()) { - throw new IllegalArgumentException("stream " + range.getStreamId() + " end offset " + stream.endOffset() + " is not equal to start offset of request " + range.getStartOffset()); - } - stream.endOffset(range.getEndOffset()); - } else { - // Compact old object. - if (stream.endOffset() < range.getEndOffset()) { - throw new IllegalArgumentException("stream " + range.getStreamId() + " end offset " + stream.endOffset() + " is lesser than request " + range.getEndOffset()); - } - if (stream.startOffset() > range.getStartOffset()) { - throw new IllegalArgumentException("stream " + range.getStreamId() + " start offset " + stream.startOffset() + " is greater than request " + range.getStartOffset()); - } - } - } - - S3ObjectMetadata object = new S3ObjectMetadata( - request.getObjectId(), S3ObjectType.STREAM_SET, request.getStreamRanges().stream().map(MemoryMetadataManager::to).collect(Collectors.toList()), - dataTimeInMs, now, request.getObjectSize(), request.getOrderId()); - streamSetObjects.put(request.getObjectId(), Pair.of(NODE_ID_ALLOC.get(), object)); - } - - for (StreamObject streamObject : request.getStreamObjects()) { - long streamId = streamObject.getStreamId(); - StreamMetadata stream = streams.get(streamId); - assert stream != null; - if (request.getCompactedObjectIds().isEmpty()) { - // Commit new object. - if (stream.endOffset() != streamObject.getStartOffset()) { - throw new IllegalArgumentException("stream " + streamObject.getStreamId() + " end offset " + stream.endOffset() + " is not equal to start offset of request " + streamObject.getStartOffset()); - } - stream.endOffset(streamObject.getEndOffset()); - } else { - // Compact old object. - if (stream.endOffset() < streamObject.getEndOffset()) { - throw new IllegalArgumentException("stream " + streamObject.getStreamId() + " end offset " + stream.endOffset() + " is lesser than request " + streamObject.getEndOffset()); - } - if (stream.startOffset() > streamObject.getStartOffset()) { - throw new IllegalArgumentException("stream " + streamObject.getStreamId() + " start offset " + stream.startOffset() + " is greater than request " + streamObject.getStartOffset()); - } - } - - List metadataList = streamObjects.computeIfAbsent(streamId, id -> new LinkedList<>()); - metadataList.add( - new S3ObjectMetadata( - streamObject.getObjectId(), S3ObjectType.STREAM, List.of(new StreamOffsetRange(streamId, streamObject.getStartOffset(), streamObject.getEndOffset())), - dataTimeInMs, now, streamObject.getObjectSize(), 0 - ) - ); - } - request.getCompactedObjectIds().forEach(streamSetObjects::remove); - return CompletableFuture.completedFuture(new CommitStreamSetObjectResponse()); - } - - @Override - public synchronized CompletableFuture compactStreamObject(CompactStreamObjectRequest request) { - long streamId = request.getStreamId(); - StreamMetadata stream = streams.get(streamId); - assert stream != null; - if (stream.epoch() != request.getStreamEpoch()) { - throw new IllegalArgumentException("stream " + streamId + " epoch " + stream.epoch() + " is not equal to request " + request.getStreamEpoch()); - } - if (stream.endOffset() < request.getEndOffset()) { - throw new IllegalArgumentException("stream " + streamId + " end offset " + stream.endOffset() + " is lesser than request " + request.getEndOffset()); - } - if (stream.startOffset() > request.getStartOffset()) { - throw new IllegalArgumentException("stream " + streamId + " start offset " + stream.startOffset() + " is greater than request " + request.getStartOffset()); - } - - streamObjects.computeIfAbsent(streamId, id -> new LinkedList<>()) - .add(new S3ObjectMetadata( - request.getObjectId(), S3ObjectType.STREAM, List.of(new StreamOffsetRange(streamId, request.getStartOffset(), request.getEndOffset())), - System.currentTimeMillis(), System.currentTimeMillis(), request.getObjectSize(), 0 - )); - - HashSet idSet = new HashSet<>(request.getSourceObjectIds()); - streamObjects.get(streamId).removeIf(metadata -> idSet.contains(metadata.objectId())); - return CompletableFuture.completedFuture(null); - } - - @Override - public synchronized CompletableFuture> getObjects(long streamId, long startOffset, - long endOffset, int limit) { - List streamSetObjectList = streamSetObjects.values() - .stream() - .map(Pair::getRight) - .filter(o -> o.getOffsetRanges().stream().anyMatch(r -> r.streamId() == streamId && r.endOffset() > startOffset && (r.startOffset() < endOffset || endOffset == -1))) - .collect(Collectors.toList()); - List streamObjectList = streamObjects.computeIfAbsent(streamId, id -> new LinkedList<>()) - .stream() - .filter(o -> o.getOffsetRanges().stream().anyMatch(r -> r.streamId() == streamId && r.endOffset() > startOffset && (r.startOffset() < endOffset || endOffset == -1))) - .collect(Collectors.toList()); - - List result = new ArrayList<>(); - result.addAll(streamSetObjectList); - result.addAll(streamObjectList); - result.sort((o1, o2) -> { - long startOffset1 = o1.getOffsetRanges().stream().filter(r -> r.streamId() == streamId).findFirst().get().startOffset(); - long startOffset2 = o2.getOffsetRanges().stream().filter(r -> r.streamId() == streamId).findFirst().get().startOffset(); - return Long.compare(startOffset1, startOffset2); - }); - - return CompletableFuture.completedFuture(result.stream().limit(limit).collect(Collectors.toList())); - } - - @Override - public synchronized CompletableFuture> getServerObjects() { - List result = streamSetObjects.values() - .stream() - .filter(pair -> pair.getLeft() == NODE_ID_ALLOC.get()) - .map(Pair::getRight).collect(Collectors.toList()); - return CompletableFuture.completedFuture(result); - } - - @Override - public synchronized CompletableFuture> getStreamObjects(long streamId, long startOffset, - long endOffset, int limit) { - List streamObjectList = streamObjects.computeIfAbsent(streamId, id -> new LinkedList<>()) - .stream() - .filter(o -> o.getOffsetRanges().stream().anyMatch(r -> r.streamId() == streamId && r.endOffset() > startOffset && (r.startOffset() < endOffset || endOffset == -1))) - .limit(limit) - .collect(Collectors.toList()); - return CompletableFuture.completedFuture(streamObjectList); - } - - @Override - public synchronized CompletableFuture> getOpeningStreams() { - return CompletableFuture.completedFuture(streams.values().stream().filter(stream -> stream.state() == StreamState.OPENED).collect(Collectors.toList())); - } - - @Override - public CompletableFuture> getStreams(List streamIds) { - return CompletableFuture.completedFuture(streamIds.stream().map(streams::get).filter(Objects::nonNull).collect(Collectors.toList())); - } - - @Override - public synchronized CompletableFuture createStream() { - long streamId = streamIdAlloc.getAndIncrement(); - streams.put(streamId, new StreamMetadata(streamId, -1, 0, 0, StreamState.CLOSED)); - return CompletableFuture.completedFuture(streamId); - } - - @Override - public synchronized CompletableFuture openStream(long streamId, long epoch) { - StreamMetadata stream = streams.get(streamId); - if (stream == null) { - return CompletableFuture.failedFuture(new IllegalArgumentException("stream " + streamId + " not found")); - } - if (stream.state() == StreamState.OPENED) { - return CompletableFuture.failedFuture(new IllegalArgumentException("stream " + streamId + " has been opened")); - } - if (stream.epoch() >= epoch) { - return CompletableFuture.failedFuture(new IllegalArgumentException("stream " + streamId + " epoch " + epoch + " is not newer than current epoch " + stream.epoch())); - } - stream.epoch(epoch); - stream.state(StreamState.OPENED); - return CompletableFuture.completedFuture(stream); - } - - @Override - public synchronized CompletableFuture trimStream(long streamId, long epoch, long newStartOffset) { - StreamMetadata stream = streams.get(streamId); - if (stream == null) { - return CompletableFuture.failedFuture(new IllegalArgumentException("stream " + streamId + " not found")); - } - if (stream.state() != StreamState.OPENED) { - return CompletableFuture.failedFuture(new IllegalArgumentException("stream " + streamId + " is not opened")); - } - if (stream.epoch() != epoch) { - return CompletableFuture.failedFuture(new IllegalArgumentException("stream " + streamId + " epoch " + epoch + " is not equal to current epoch " + stream.epoch())); - } - if (newStartOffset < stream.startOffset()) { - return CompletableFuture.failedFuture(new IllegalArgumentException("stream " + streamId + " new start offset " + newStartOffset + " is less than current start offset " + stream.startOffset())); - } - if (newStartOffset > stream.endOffset()) { - return CompletableFuture.failedFuture(new IllegalArgumentException("stream " + streamId + " new start offset " + newStartOffset + " is greater than current end offset " + stream.endOffset())); - } - stream.startOffset(newStartOffset); - return CompletableFuture.completedFuture(null); - } - - @Override - public synchronized CompletableFuture closeStream(long streamId, long epoch) { - StreamMetadata stream = streams.get(streamId); - if (stream == null) { - return CompletableFuture.failedFuture(new IllegalArgumentException("stream " + streamId + " not found")); - } - if (stream.state() != StreamState.OPENED) { - return CompletableFuture.failedFuture(new IllegalArgumentException("stream " + streamId + " is not opened")); - } - if (stream.epoch() != epoch) { - return CompletableFuture.failedFuture(new IllegalArgumentException("stream " + streamId + " epoch " + epoch + " is not equal to current epoch " + stream.epoch())); - } - stream.state(StreamState.CLOSED); - return CompletableFuture.completedFuture(null); - } - - @Override - public synchronized CompletableFuture deleteStream(long streamId, long epoch) { - StreamMetadata stream = streams.get(streamId); - if (stream == null) { - return CompletableFuture.failedFuture(new IllegalArgumentException("stream " + streamId + " not found")); - } - if (stream.state() != StreamState.CLOSED) { - return CompletableFuture.failedFuture(new IllegalArgumentException("stream " + streamId + " is not closed")); - } - if (stream.epoch() != epoch) { - return CompletableFuture.failedFuture(new IllegalArgumentException("stream " + streamId + " epoch " + epoch + " is not equal to current epoch " + stream.epoch())); - } - streams.remove(streamId); - return CompletableFuture.completedFuture(null); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metadata/ObjectUtils.java b/s3stream/src/main/java/com/automq/stream/s3/metadata/ObjectUtils.java deleted file mode 100644 index 920fcf89f..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metadata/ObjectUtils.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metadata; - -public class ObjectUtils { - public static final long NOOP_OBJECT_ID = -1L; - public static final long NOOP_OFFSET = -1L; - private static String namespace = "DEFAULT"; - - public static void setNamespace(String namespace) { - ObjectUtils.namespace = namespace; - } - - public static void main(String[] args) { - System.out.printf("%s%n", genKey(0, 11154)); - } - - public static String genKey(int version, long objectId) { - if (namespace.isEmpty()) { - throw new IllegalStateException("NAMESPACE is not set"); - } - return genKey(version, namespace, objectId); - } - - public static String genKey(int version, String namespace, long objectId) { - if (version == 0) { - String objectIdHex = String.format("%08x", objectId); - String hashPrefix = new StringBuilder(objectIdHex).reverse().toString(); - return hashPrefix + "/" + namespace + "/" + objectId; - } else { - throw new UnsupportedOperationException("Unsupported version: " + version); - } - } - - public static long parseObjectId(int version, String key) { - if (namespace.isEmpty()) { - throw new IllegalStateException("NAMESPACE is not set"); - } - return parseObjectId(version, key, namespace); - } - - public static long parseObjectId(int version, String key, String namespace) { - if (version == 0) { - String[] parts = key.split("/"); - if (parts.length != 3) { - throw new IllegalArgumentException("Invalid key: " + key); - } - return Long.parseLong(parts[2]); - } else { - throw new UnsupportedOperationException("Unsupported version: " + version); - } - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metadata/S3ObjectMetadata.java b/s3stream/src/main/java/com/automq/stream/s3/metadata/S3ObjectMetadata.java deleted file mode 100644 index 57118e276..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metadata/S3ObjectMetadata.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metadata; - -import java.util.Collections; -import java.util.List; -import java.util.Objects; - -public class S3ObjectMetadata { - - private final long objectId; - - /** - * order id of the object. - *
    - *
  • stream set object: order id of the stream set object. - *
  • STREAM object: meaningless. - *
- */ - private final long orderId; - private final S3ObjectType type; - /** - * stream offset ranges of the object. - *
    - *
  • stream set object: one or more stream offset ranges. - *
  • STREAM object: only one stream offset range. - *
- */ - private final List offsetRanges; - /** - * logical timestamp in ms of the data in the object. - */ - private final long dataTimeInMs; - private long objectSize; - /** - * real committed timestamp of the data in the object. - */ - private long committedTimestamp; - - // Only used for testing - public S3ObjectMetadata(long objectId, long objectSize, S3ObjectType type) { - this(objectId, type, Collections.emptyList(), S3StreamConstant.INVALID_TS, S3StreamConstant.INVALID_TS, objectSize, - S3StreamConstant.INVALID_ORDER_ID); - } - - public S3ObjectMetadata(long objectId, S3ObjectType type, List offsetRanges, long dataTimeInMs) { - this(objectId, type, offsetRanges, dataTimeInMs, S3StreamConstant.INVALID_TS, S3StreamConstant.INVALID_OBJECT_SIZE, - S3StreamConstant.INVALID_ORDER_ID); - } - - public S3ObjectMetadata(long objectId, S3ObjectType type, List offsetRanges, long dataTimeInMs, - long orderId) { - this(objectId, type, offsetRanges, dataTimeInMs, S3StreamConstant.INVALID_TS, S3StreamConstant.INVALID_OBJECT_SIZE, - orderId); - } - - public S3ObjectMetadata( - // these four params come from S3StreamSetObject or S3StreamObject - long objectId, S3ObjectType type, List offsetRanges, long dataTimeInMs, - // these two params come from S3Object - long committedTimestamp, long objectSize, - // this param only comes from S3StreamSetObject - long orderId) { - this.objectId = objectId; - this.orderId = orderId; - this.objectSize = objectSize; - this.type = type; - this.offsetRanges = offsetRanges; - this.dataTimeInMs = dataTimeInMs; - this.committedTimestamp = committedTimestamp; - } - - public void setObjectSize(long objectSize) { - this.objectSize = objectSize; - } - - public void setCommittedTimestamp(long committedTimestamp) { - this.committedTimestamp = committedTimestamp; - } - - public long objectId() { - return objectId; - } - - public long objectSize() { - return objectSize; - } - - public S3ObjectType getType() { - return type; - } - - public long getOrderId() { - return orderId; - } - - public long committedTimestamp() { - return committedTimestamp; - } - - public long dataTimeInMs() { - return dataTimeInMs; - } - - public List getOffsetRanges() { - return offsetRanges; - } - - public long startOffset() { - if (offsetRanges == null || offsetRanges.isEmpty()) { - return S3StreamConstant.INVALID_OFFSET; - } - return offsetRanges.get(0).startOffset(); - } - - public long endOffset() { - if (offsetRanges == null || offsetRanges.isEmpty()) { - return S3StreamConstant.INVALID_OFFSET; - } - return offsetRanges.get(offsetRanges.size() - 1).endOffset(); - } - - public boolean intersect(long streamId, long startOffset, long endOffset) { - if (offsetRanges == null || offsetRanges.isEmpty()) { - return false; - } - for (StreamOffsetRange offsetRange : offsetRanges) { - if (offsetRange.streamId() == streamId && offsetRange.intersect(startOffset, endOffset)) { - return true; - } - } - return false; - } - - public String toString() { - return "S3ObjectMetadata(objectId=" + objectId + ", objectSize=" + objectSize + ", type=" + type + ", offsetRanges=" + offsetRanges - + ", committedTimestamp=" + committedTimestamp + ", dataTimestamp=" + dataTimeInMs + ")"; - } - - public String key() { - return ObjectUtils.genKey(0, objectId); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - S3ObjectMetadata that = (S3ObjectMetadata) o; - return objectId == that.objectId && orderId == that.orderId && objectSize == that.objectSize && committedTimestamp == that.committedTimestamp - && dataTimeInMs == that.dataTimeInMs && type == that.type && offsetRanges.equals(that.offsetRanges); - } - - @Override - public int hashCode() { - return Objects.hash(objectId, orderId, objectSize, type, offsetRanges, committedTimestamp, dataTimeInMs); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metadata/S3ObjectType.java b/s3stream/src/main/java/com/automq/stream/s3/metadata/S3ObjectType.java deleted file mode 100644 index 9fe012276..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metadata/S3ObjectType.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metadata; - -public enum S3ObjectType { - /** - * STREAM_SET object which contains multiple streams' records - */ - STREAM_SET, - - /** - * STREAM object which only contains one stream's records. - */ - STREAM, - - /** - * UNKNOWN object type - */ - UNKNOWN; - - public static S3ObjectType fromByte(Byte b) { - int ordinal = b.intValue(); - if (ordinal < 0 || ordinal >= values().length) { - return UNKNOWN; - } - return values()[ordinal]; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metadata/S3StreamConstant.java b/s3stream/src/main/java/com/automq/stream/s3/metadata/S3StreamConstant.java deleted file mode 100644 index e64d9d783..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metadata/S3StreamConstant.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metadata; - -public class S3StreamConstant { - - public static final long INIT_EPOCH = -1L; - - public static final int INIT_RANGE_INDEX = -1; - - public static final long INIT_START_OFFSET = 0L; - - public static final long INIT_END_OFFSET = 0L; - - public static final long INVALID_STREAM_ID = -1L; - - public static final long INVALID_OBJECT_ID = -1L; - - public static final long INVALID_OFFSET = -1L; - - public static final int INVALID_BROKER_ID = -1; - - public static final long MAX_OBJECT_ID = Long.MAX_VALUE; - - public static final long INVALID_ORDER_ID = -1L; - - public static final long INVALID_TS = -1L; - - public static final long INVALID_OBJECT_SIZE = -1L; - - public static final long INVALID_BROKER_EPOCH = -1L; - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metadata/StreamMetadata.java b/s3stream/src/main/java/com/automq/stream/s3/metadata/StreamMetadata.java deleted file mode 100644 index c09d0c64d..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metadata/StreamMetadata.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metadata; - -public class StreamMetadata { - private long streamId; - private long epoch; - private long startOffset; - private long endOffset; - private StreamState state; - - @SuppressWarnings("unused") - public StreamMetadata() { - } - - public StreamMetadata(long streamId, long epoch, long startOffset, long endOffset, StreamState state) { - this.streamId = streamId; - this.epoch = epoch; - this.startOffset = startOffset; - this.endOffset = endOffset; - this.state = state; - } - - public long streamId() { - return streamId; - } - - public void streamId(long streamId) { - this.streamId = streamId; - } - - public long epoch() { - return epoch; - } - - public void epoch(long epoch) { - this.epoch = epoch; - } - - public long startOffset() { - return startOffset; - } - - public void startOffset(long startOffset) { - this.startOffset = startOffset; - } - - public long endOffset() { - return endOffset; - } - - public void endOffset(long endOffset) { - this.endOffset = endOffset; - } - - public StreamState state() { - return state; - } - - public void state(StreamState state) { - this.state = state; - } - - @Override - public String toString() { - return "StreamMetadata{" + - "streamId=" + streamId + - ", epoch=" + epoch + - ", startOffset=" + startOffset + - ", endOffset=" + endOffset + - ", state=" + state + - '}'; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metadata/StreamOffsetRange.java b/s3stream/src/main/java/com/automq/stream/s3/metadata/StreamOffsetRange.java deleted file mode 100644 index e07dedf39..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metadata/StreamOffsetRange.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metadata; - -import java.util.Objects; - -/** - * StreamOffsetRange represents [startOffset, endOffset) in the stream. - */ -public class StreamOffsetRange implements Comparable { - - public static final StreamOffsetRange INVALID = new StreamOffsetRange(S3StreamConstant.INVALID_STREAM_ID, - S3StreamConstant.INVALID_OFFSET, S3StreamConstant.INVALID_OFFSET); - - private final long streamId; - - private final long startOffset; - - private final long endOffset; - - public StreamOffsetRange(long streamId, long startOffset, long endOffset) { - this.streamId = streamId; - this.startOffset = startOffset; - this.endOffset = endOffset; - } - - public long streamId() { - return streamId; - } - - public long startOffset() { - return startOffset; - } - - public long endOffset() { - return endOffset; - } - - public boolean intersect(long startOffset, long endOffset) { - return startOffset <= endOffset - && startOffset >= this.startOffset && startOffset <= this.endOffset - && endOffset <= this.endOffset; - } - - @Override - public int compareTo(StreamOffsetRange o) { - int res = Long.compare(this.streamId, o.streamId); - if (res != 0) - return res; - res = Long.compare(this.startOffset, o.startOffset); - return res == 0 ? Long.compare(this.endOffset, o.endOffset) : res; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - StreamOffsetRange that = (StreamOffsetRange) o; - return streamId == that.streamId && startOffset == that.startOffset && endOffset == that.endOffset; - } - - @Override - public int hashCode() { - return Objects.hash(streamId, startOffset, endOffset); - } - - @Override - public String toString() { - return "StreamOffsetRange(streamId=" + streamId + ", startOffset=" + startOffset + ", endOffset=" + endOffset + ")"; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metadata/StreamState.java b/s3stream/src/main/java/com/automq/stream/s3/metadata/StreamState.java deleted file mode 100644 index 70bf44544..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metadata/StreamState.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metadata; - -public enum StreamState { - CLOSED, - OPENED; - - public static StreamState fromByte(byte b) { - return values()[b]; - } - - public byte toByte() { - return (byte) ordinal(); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/AttributesUtils.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/AttributesUtils.java deleted file mode 100644 index e4583d978..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/AttributesUtils.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics; - -import com.automq.stream.s3.metrics.operations.S3ObjectStage; -import com.automq.stream.s3.metrics.operations.S3Operation; -import com.automq.stream.s3.metrics.operations.S3Stage; -import io.opentelemetry.api.common.Attributes; - -public class AttributesUtils { - - public static Attributes buildAttributes(S3Operation operation) { - return Attributes.builder() - .put(S3StreamMetricsConstant.LABEL_OPERATION_TYPE, operation.getType().getName()) - .put(S3StreamMetricsConstant.LABEL_OPERATION_NAME, operation.getName()) - .build(); - } - - public static Attributes buildAttributes(S3Operation operation, String status) { - return Attributes.builder() - .putAll(buildAttributes(operation)) - .put(S3StreamMetricsConstant.LABEL_STATUS, status) - .build(); - } - - public static Attributes buildAttributes(S3Stage stage) { - return Attributes.builder() - .putAll(buildAttributes(stage.getOperation())) - .put(S3StreamMetricsConstant.LABEL_STAGE, stage.getName()) - .build(); - } - - public static Attributes buildAttributes(S3Operation operation, String status, String sizeLabelName) { - return Attributes.builder() - .putAll(buildAttributes(operation, status)) - .put(S3StreamMetricsConstant.LABEL_SIZE_NAME, sizeLabelName) - .build(); - } - - public static Attributes buildAttributes(S3ObjectStage objectStage) { - return Attributes.builder() - .put(S3StreamMetricsConstant.LABEL_STAGE, objectStage.getName()) - .build(); - } - - public static String getObjectBucketLabel(long objectSize) { - int index = (int) Math.ceil(Math.log((double) objectSize / (16 * 1024)) / Math.log(2)); - index = Math.min(S3StreamMetricsConstant.OBJECT_SIZE_BUCKET_NAMES.length - 1, Math.max(0, index)); - return S3StreamMetricsConstant.OBJECT_SIZE_BUCKET_NAMES[index]; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/MetricsConfig.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/MetricsConfig.java deleted file mode 100644 index 1bcc5a54e..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/MetricsConfig.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics; - -import io.opentelemetry.api.common.Attributes; - -public class MetricsConfig { - private MetricsLevel metricsLevel; - private Attributes baseAttributes; - - public MetricsConfig() { - this.metricsLevel = MetricsLevel.INFO; - this.baseAttributes = Attributes.empty(); - } - - public MetricsConfig(MetricsLevel metricsLevel, Attributes baseAttributes) { - this.metricsLevel = metricsLevel; - this.baseAttributes = baseAttributes; - } - - public MetricsLevel getMetricsLevel() { - return metricsLevel; - } - - public Attributes getBaseAttributes() { - return baseAttributes; - } - - public void setMetricsLevel(MetricsLevel metricsLevel) { - this.metricsLevel = metricsLevel; - } - - public void setBaseAttributes(Attributes baseAttributes) { - this.baseAttributes = baseAttributes; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/MetricsLevel.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/MetricsLevel.java deleted file mode 100644 index 6125288ee..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/MetricsLevel.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics; - -public enum MetricsLevel { - INFO, - DEBUG; - - public boolean isWithin(MetricsLevel level) { - return this.ordinal() <= level.ordinal(); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/MultiAttributes.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/MultiAttributes.java deleted file mode 100644 index 7adfaff47..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/MultiAttributes.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.automq.stream.s3.metrics; - -import com.automq.stream.s3.metrics.wrapper.ConfigListener; -import io.opentelemetry.api.common.AttributeKey; -import io.opentelemetry.api.common.Attributes; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -public class MultiAttributes implements ConfigListener { - private final Map attributesMap = new ConcurrentHashMap<>(); - private final AttributeKey keyName; - private Attributes baseAttributes; - - public MultiAttributes(Attributes baseAttributes, AttributeKey keyName) { - this.baseAttributes = baseAttributes; - this.keyName = keyName; - } - - public Attributes get(K key) { - return attributesMap.computeIfAbsent(key, k -> buildAttributes(baseAttributes, Attributes.of(keyName, key))); - } - - private Attributes buildAttributes(Attributes baseAttributes, Attributes attributes) { - return Attributes.builder().putAll(baseAttributes).putAll(attributes).build(); - } - - private void reBuildAttributes(Attributes baseAttributes) { - for (Map.Entry entry : attributesMap.entrySet()) { - attributesMap.replace(entry.getKey(), buildAttributes(baseAttributes, entry.getValue())); - } - } - - @Override - public void onConfigChange(MetricsConfig metricsConfig) { - this.baseAttributes = metricsConfig.getBaseAttributes(); - reBuildAttributes(metricsConfig.getBaseAttributes()); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/NoopLongCounter.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/NoopLongCounter.java deleted file mode 100644 index e1b6779dc..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/NoopLongCounter.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics; - -import io.opentelemetry.api.common.Attributes; -import io.opentelemetry.api.metrics.LongCounter; -import io.opentelemetry.context.Context; - -public class NoopLongCounter implements LongCounter { - @Override - public void add(long l) { - - } - - @Override - public void add(long l, Attributes attributes) { - - } - - @Override - public void add(long l, Attributes attributes, Context context) { - - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/NoopLongHistogram.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/NoopLongHistogram.java deleted file mode 100644 index 7d3fe568d..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/NoopLongHistogram.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics; - -import io.opentelemetry.api.common.Attributes; -import io.opentelemetry.api.metrics.LongHistogram; -import io.opentelemetry.context.Context; - -public class NoopLongHistogram implements LongHistogram { - @Override - public void record(long l) { - - } - - @Override - public void record(long l, Attributes attributes) { - - } - - @Override - public void record(long l, Attributes attributes, Context context) { - - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/NoopObservableLongGauge.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/NoopObservableLongGauge.java deleted file mode 100644 index c9677ad09..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/NoopObservableLongGauge.java +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics; - -import io.opentelemetry.api.metrics.ObservableLongGauge; - -public class NoopObservableLongGauge implements ObservableLongGauge { -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/S3StreamMetricsConstant.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/S3StreamMetricsConstant.java deleted file mode 100644 index 5d5dbf460..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/S3StreamMetricsConstant.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics; - -import io.opentelemetry.api.common.AttributeKey; -import java.util.List; -import java.util.concurrent.TimeUnit; - -public class S3StreamMetricsConstant { - // value = 16KB * 2^i - public static final String[] OBJECT_SIZE_BUCKET_NAMES = { - "16KB", - "32KB", - "64KB", - "128KB", - "256KB", - "512KB", - "1MB", - "2MB", - "4MB", - "8MB", - "16MB", - "32MB", - "64MB", - "128MB", - "inf"}; - public static final List LATENCY_BOUNDARIES = List.of( - TimeUnit.MICROSECONDS.toNanos(1), - TimeUnit.MICROSECONDS.toNanos(10), - TimeUnit.MICROSECONDS.toNanos(100), - TimeUnit.MILLISECONDS.toNanos(1), - TimeUnit.MILLISECONDS.toNanos(3), - TimeUnit.MILLISECONDS.toNanos(5), - TimeUnit.MILLISECONDS.toNanos(7), - TimeUnit.MILLISECONDS.toNanos(10), - TimeUnit.MILLISECONDS.toNanos(20), - TimeUnit.MILLISECONDS.toNanos(30), - TimeUnit.MILLISECONDS.toNanos(40), - TimeUnit.MILLISECONDS.toNanos(50), - TimeUnit.MILLISECONDS.toNanos(60), - TimeUnit.MILLISECONDS.toNanos(70), - TimeUnit.MILLISECONDS.toNanos(80), - TimeUnit.MILLISECONDS.toNanos(90), - TimeUnit.MILLISECONDS.toNanos(100), - TimeUnit.MILLISECONDS.toNanos(200), - TimeUnit.MILLISECONDS.toNanos(500), - TimeUnit.SECONDS.toNanos(1), - TimeUnit.SECONDS.toNanos(3), - TimeUnit.SECONDS.toNanos(5), - TimeUnit.SECONDS.toNanos(10), - TimeUnit.SECONDS.toNanos(30), - TimeUnit.MINUTES.toNanos(1), - TimeUnit.MINUTES.toNanos(3), - TimeUnit.MINUTES.toNanos(5) - ); - - public static final String UPLOAD_SIZE_METRIC_NAME = "upload_size"; - public static final String DOWNLOAD_SIZE_METRIC_NAME = "download_size"; - public static final String OPERATION_COUNT_METRIC_NAME = "operation_count"; - public static final String OPERATION_LATENCY_METRIC_NAME = "operation_latency"; - public static final String OBJECT_COUNT_METRIC_NAME = "object_count"; - public static final String OBJECT_STAGE_COST_METRIC_NAME = "object_stage_cost"; - public static final String NETWORK_INBOUND_USAGE_METRIC_NAME = "network_inbound_usage"; - public static final String NETWORK_OUTBOUND_USAGE_METRIC_NAME = "network_outbound_usage"; - public static final String NETWORK_INBOUND_AVAILABLE_BANDWIDTH_METRIC_NAME = "network_inbound_available_bandwidth"; - public static final String NETWORK_OUTBOUND_AVAILABLE_BANDWIDTH_METRIC_NAME = "network_outbound_available_bandwidth"; - public static final String NETWORK_INBOUND_LIMITER_QUEUE_SIZE_METRIC_NAME = "network_inbound_limiter_queue_size"; - public static final String NETWORK_OUTBOUND_LIMITER_QUEUE_SIZE_METRIC_NAME = "network_outbound_limiter_queue_size"; - public static final String NETWORK_INBOUND_LIMITER_QUEUE_TIME_METRIC_NAME = "network_inbound_limiter_queue_time"; - public static final String NETWORK_OUTBOUND_LIMITER_QUEUE_TIME_METRIC_NAME = "network_outbound_limiter_queue_time"; - public static final String READ_AHEAD_SIZE_METRIC_NAME = "read_ahead_size"; - public static final String SUM_METRIC_NAME_SUFFIX = "_sum"; - public static final String COUNT_METRIC_NAME_SUFFIX = "_count"; - public static final String P50_METRIC_NAME_SUFFIX = "_50p"; - public static final String P99_METRIC_NAME_SUFFIX = "_99p"; - public static final String MEAN_METRIC_NAME_SUFFIX = "_mean"; - public static final String MAX_METRIC_NAME_SUFFIX = "_max"; - public static final String WAL_START_OFFSET = "wal_start_offset"; - public static final String WAL_TRIMMED_OFFSET = "wal_trimmed_offset"; - public static final String DELTA_WAL_CACHE_SIZE = "delta_wal_cache_size"; - public static final String BLOCK_CACHE_SIZE = "block_cache_size"; - public static final String AVAILABLE_INFLIGHT_READ_AHEAD_SIZE_METRIC_NAME = "available_inflight_read_ahead_size"; - public static final String READ_AHEAD_QUEUE_TIME_METRIC_NAME = "read_ahead_limiter_queue_time"; - public static final String AVAILABLE_S3_INFLIGHT_READ_QUOTA_METRIC_NAME = "available_s3_inflight_read_quota"; - public static final String AVAILABLE_S3_INFLIGHT_WRITE_QUOTA_METRIC_NAME = "available_s3_inflight_write_quota"; - public static final String INFLIGHT_WAL_UPLOAD_TASKS_COUNT_METRIC_NAME = "inflight_wal_upload_tasks_count"; - public static final String COMPACTION_READ_SIZE_METRIC_NAME = "compaction_read_size"; - public static final String COMPACTION_WRITE_SIZE_METRIC_NAME = "compaction_write_size"; - public static final String BUFFER_ALLOCATED_MEMORY_SIZE_METRIC_NAME = "buffer_allocated_memory_size"; - public static final String BUFFER_USED_MEMORY_SIZE_METRIC_NAME = "buffer_used_memory_size"; - public static final AttributeKey LABEL_OPERATION_TYPE = AttributeKey.stringKey("operation_type"); - public static final AttributeKey LABEL_OPERATION_NAME = AttributeKey.stringKey("operation_name"); - public static final AttributeKey LABEL_SIZE_NAME = AttributeKey.stringKey("size"); - public static final AttributeKey LABEL_STAGE = AttributeKey.stringKey("stage"); - public static final AttributeKey LABEL_STATUS = AttributeKey.stringKey("status"); - public static final AttributeKey LABEL_ALLOC_TYPE = AttributeKey.stringKey("type"); - public static final String LABEL_STATUS_SUCCESS = "success"; - public static final String LABEL_STATUS_FAILED = "failed"; - public static final String LABEL_STATUS_HIT = "hit"; - public static final String LABEL_STATUS_MISS = "miss"; - public static final String LABEL_STATUS_SYNC = "sync"; - public static final String LABEL_STATUS_ASYNC = "async"; - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/S3StreamMetricsManager.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/S3StreamMetricsManager.java deleted file mode 100644 index c50bda24d..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/S3StreamMetricsManager.java +++ /dev/null @@ -1,467 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics; - -import com.automq.stream.s3.ByteBufAlloc; -import com.automq.stream.s3.metrics.operations.S3ObjectStage; -import com.automq.stream.s3.metrics.operations.S3Operation; -import com.automq.stream.s3.metrics.operations.S3Stage; -import com.automq.stream.s3.metrics.wrapper.ConfigListener; -import com.automq.stream.s3.metrics.wrapper.CounterMetric; -import com.automq.stream.s3.metrics.wrapper.HistogramInstrument; -import com.automq.stream.s3.metrics.wrapper.YammerHistogramMetric; -import com.automq.stream.s3.network.AsyncNetworkBandwidthLimiter; -import com.yammer.metrics.core.MetricName; -import com.yammer.metrics.core.MetricsRegistry; -import io.opentelemetry.api.common.Attributes; -import io.opentelemetry.api.metrics.LongCounter; -import io.opentelemetry.api.metrics.Meter; -import io.opentelemetry.api.metrics.ObservableLongGauge; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.function.Supplier; - -public class S3StreamMetricsManager { - private static final List BASE_ATTRIBUTES_LISTENERS = new ArrayList<>(); - public static final MetricsRegistry METRICS_REGISTRY = new MetricsRegistry(); - public static final List OPERATION_LATENCY_METRICS = new CopyOnWriteArrayList<>(); - public static final List OBJECT_STAGE_METRICS = new CopyOnWriteArrayList<>(); - public static final List NETWORK_INBOUND_LIMITER_QUEUE_TIME_METRICS = new CopyOnWriteArrayList<>(); - public static final List NETWORK_OUTBOUND_LIMITER_QUEUE_TIME_METRICS = new CopyOnWriteArrayList<>(); - public static final List READ_AHEAD_SIZE_METRICS = new CopyOnWriteArrayList<>(); - public static final List READ_AHEAD_LIMITER_QUEUE_TIME_METRICS = new CopyOnWriteArrayList<>(); - private static LongCounter s3DownloadSizeInTotal = new NoopLongCounter(); - private static LongCounter s3UploadSizeInTotal = new NoopLongCounter(); - private static HistogramInstrument operationLatency; - private static LongCounter objectNumInTotal = new NoopLongCounter(); - private static HistogramInstrument objectStageCost; - private static LongCounter networkInboundUsageInTotal = new NoopLongCounter(); - private static LongCounter networkOutboundUsageInTotal = new NoopLongCounter(); - private static ObservableLongGauge networkInboundAvailableBandwidth = new NoopObservableLongGauge(); - private static ObservableLongGauge networkOutboundAvailableBandwidth = new NoopObservableLongGauge(); - private static ObservableLongGauge networkInboundLimiterQueueSize = new NoopObservableLongGauge(); - private static ObservableLongGauge networkOutboundLimiterQueueSize = new NoopObservableLongGauge(); - private static HistogramInstrument networkInboundLimiterQueueTime; - private static HistogramInstrument networkOutboundLimiterQueueTime; - private static HistogramInstrument readAheadSize; - private static HistogramInstrument readAheadLimierQueueTime; - private static ObservableLongGauge deltaWalStartOffset = new NoopObservableLongGauge(); - private static ObservableLongGauge deltaWalTrimmedOffset = new NoopObservableLongGauge(); - private static ObservableLongGauge deltaWalCacheSize = new NoopObservableLongGauge(); - private static ObservableLongGauge blockCacheSize = new NoopObservableLongGauge(); - private static ObservableLongGauge availableInflightReadAheadSize = new NoopObservableLongGauge(); - private static ObservableLongGauge availableInflightS3ReadQuota = new NoopObservableLongGauge(); - private static ObservableLongGauge availableInflightS3WriteQuota = new NoopObservableLongGauge(); - private static ObservableLongGauge inflightWALUploadTasksCount = new NoopObservableLongGauge(); - private static ObservableLongGauge allocatedMemorySize = new NoopObservableLongGauge(); - private static ObservableLongGauge usedMemorySize = new NoopObservableLongGauge(); - private static LongCounter compactionReadSizeInTotal = new NoopLongCounter(); - private static LongCounter compactionWriteSizeInTotal = new NoopLongCounter(); - private static Supplier networkInboundAvailableBandwidthSupplier = () -> 0L; - private static Supplier networkOutboundAvailableBandwidthSupplier = () -> 0L; - private static Supplier networkInboundLimiterQueueSizeSupplier = () -> 0; - private static Supplier networkOutboundLimiterQueueSizeSupplier = () -> 0; - private static Supplier availableInflightReadAheadSizeSupplier = () -> 0; - private static Supplier deltaWalStartOffsetSupplier = () -> 0L; - private static Supplier deltaWalTrimmedOffsetSupplier = () -> 0L; - private static Supplier deltaWALCacheSizeSupplier = () -> 0L; - private static Supplier blockCacheSizeSupplier = () -> 0L; - private static Supplier availableInflightS3ReadQuotaSupplier = () -> 0; - private static Supplier availableInflightS3WriteQuotaSupplier = () -> 0; - private static Supplier inflightWALUploadTasksCountSupplier = () -> 0; - private static MetricsConfig metricsConfig = new MetricsConfig(MetricsLevel.INFO, Attributes.empty()); - private static final MultiAttributes ALLOC_TYPE_ATTRIBUTES = new MultiAttributes<>(Attributes.empty(), - S3StreamMetricsConstant.LABEL_ALLOC_TYPE); - - static { - BASE_ATTRIBUTES_LISTENERS.add(ALLOC_TYPE_ATTRIBUTES); - } - - public static void configure(MetricsConfig metricsConfig) { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - S3StreamMetricsManager.metricsConfig = metricsConfig; - for (ConfigListener listener : BASE_ATTRIBUTES_LISTENERS) { - listener.onConfigChange(metricsConfig); - } - } - } - - public static void initMetrics(Meter meter) { - initMetrics(meter, ""); - } - - public static void initMetrics(Meter meter, String prefix) { - s3DownloadSizeInTotal = meter.counterBuilder(prefix + S3StreamMetricsConstant.DOWNLOAD_SIZE_METRIC_NAME) - .setDescription("S3 download size") - .setUnit("bytes") - .build(); - s3UploadSizeInTotal = meter.counterBuilder(prefix + S3StreamMetricsConstant.UPLOAD_SIZE_METRIC_NAME) - .setDescription("S3 upload size") - .setUnit("bytes") - .build(); - operationLatency = new HistogramInstrument(meter, prefix + S3StreamMetricsConstant.OPERATION_LATENCY_METRIC_NAME, - "Operation latency", "nanoseconds", () -> OPERATION_LATENCY_METRICS); - objectNumInTotal = meter.counterBuilder(prefix + S3StreamMetricsConstant.OBJECT_COUNT_METRIC_NAME) - .setDescription("Objects count") - .build(); - objectStageCost = new HistogramInstrument(meter, prefix + S3StreamMetricsConstant.OBJECT_STAGE_COST_METRIC_NAME, - "Objects stage cost", "nanoseconds", () -> OBJECT_STAGE_METRICS); - networkInboundUsageInTotal = meter.counterBuilder(prefix + S3StreamMetricsConstant.NETWORK_INBOUND_USAGE_METRIC_NAME) - .setDescription("Network inbound usage") - .setUnit("bytes") - .build(); - networkOutboundUsageInTotal = meter.counterBuilder(prefix + S3StreamMetricsConstant.NETWORK_OUTBOUND_USAGE_METRIC_NAME) - .setDescription("Network outbound usage") - .setUnit("bytes") - .build(); - networkInboundAvailableBandwidth = meter.gaugeBuilder(prefix + S3StreamMetricsConstant.NETWORK_INBOUND_AVAILABLE_BANDWIDTH_METRIC_NAME) - .setDescription("Network inbound available bandwidth") - .setUnit("bytes") - .ofLongs() - .buildWithCallback(result -> { - if (MetricsLevel.INFO.isWithin(metricsConfig.getMetricsLevel())) { - result.record(networkInboundAvailableBandwidthSupplier.get(), metricsConfig.getBaseAttributes()); - } - }); - networkOutboundAvailableBandwidth = meter.gaugeBuilder(prefix + S3StreamMetricsConstant.NETWORK_OUTBOUND_AVAILABLE_BANDWIDTH_METRIC_NAME) - .setDescription("Network outbound available bandwidth") - .setUnit("bytes") - .ofLongs() - .buildWithCallback(result -> { - if (MetricsLevel.INFO.isWithin(metricsConfig.getMetricsLevel())) { - result.record(networkOutboundAvailableBandwidthSupplier.get(), metricsConfig.getBaseAttributes()); - } - }); - networkInboundLimiterQueueSize = meter.gaugeBuilder(prefix + S3StreamMetricsConstant.NETWORK_INBOUND_LIMITER_QUEUE_SIZE_METRIC_NAME) - .setDescription("Network inbound limiter queue size") - .ofLongs() - .buildWithCallback(result -> { - if (MetricsLevel.DEBUG.isWithin(metricsConfig.getMetricsLevel())) { - result.record((long) networkInboundLimiterQueueSizeSupplier.get(), metricsConfig.getBaseAttributes()); - } - }); - networkOutboundLimiterQueueSize = meter.gaugeBuilder(prefix + S3StreamMetricsConstant.NETWORK_OUTBOUND_LIMITER_QUEUE_SIZE_METRIC_NAME) - .setDescription("Network outbound limiter queue size") - .ofLongs() - .buildWithCallback(result -> { - if (MetricsLevel.DEBUG.isWithin(metricsConfig.getMetricsLevel())) { - result.record((long) networkOutboundLimiterQueueSizeSupplier.get(), metricsConfig.getBaseAttributes()); - } - }); - networkInboundLimiterQueueTime = new HistogramInstrument(meter, prefix + S3StreamMetricsConstant.NETWORK_INBOUND_LIMITER_QUEUE_TIME_METRIC_NAME, - "Network inbound limiter queue time", "nanoseconds", () -> NETWORK_INBOUND_LIMITER_QUEUE_TIME_METRICS); - networkOutboundLimiterQueueTime = new HistogramInstrument(meter, prefix + S3StreamMetricsConstant.NETWORK_OUTBOUND_LIMITER_QUEUE_TIME_METRIC_NAME, - "Network outbound limiter queue time", "nanoseconds", () -> NETWORK_OUTBOUND_LIMITER_QUEUE_TIME_METRICS); - readAheadSize = new HistogramInstrument(meter, prefix + S3StreamMetricsConstant.READ_AHEAD_SIZE_METRIC_NAME, - "Read ahead size", "bytes", () -> READ_AHEAD_SIZE_METRICS); - readAheadLimierQueueTime = new HistogramInstrument(meter, prefix + S3StreamMetricsConstant.READ_AHEAD_QUEUE_TIME_METRIC_NAME, - "Read ahead limiter queue time", "nanoseconds", () -> READ_AHEAD_LIMITER_QUEUE_TIME_METRICS); - deltaWalStartOffset = meter.gaugeBuilder(prefix + S3StreamMetricsConstant.WAL_START_OFFSET) - .setDescription("Delta WAL start offset") - .ofLongs() - .buildWithCallback(result -> { - if (MetricsLevel.DEBUG.isWithin(metricsConfig.getMetricsLevel())) { - result.record(deltaWalStartOffsetSupplier.get(), metricsConfig.getBaseAttributes()); - } - }); - deltaWalTrimmedOffset = meter.gaugeBuilder(prefix + S3StreamMetricsConstant.WAL_TRIMMED_OFFSET) - .setDescription("Delta WAL trimmed offset") - .ofLongs() - .buildWithCallback(result -> { - if (MetricsLevel.DEBUG.isWithin(metricsConfig.getMetricsLevel())) { - result.record(deltaWalTrimmedOffsetSupplier.get(), metricsConfig.getBaseAttributes()); - } - }); - deltaWalCacheSize = meter.gaugeBuilder(prefix + S3StreamMetricsConstant.DELTA_WAL_CACHE_SIZE) - .setDescription("Delta WAL cache size") - .setUnit("bytes") - .ofLongs() - .buildWithCallback(result -> { - if (MetricsLevel.DEBUG.isWithin(metricsConfig.getMetricsLevel())) { - result.record(deltaWALCacheSizeSupplier.get(), metricsConfig.getBaseAttributes()); - } - }); - blockCacheSize = meter.gaugeBuilder(prefix + S3StreamMetricsConstant.BLOCK_CACHE_SIZE) - .setDescription("Block cache size") - .setUnit("bytes") - .ofLongs() - .buildWithCallback(result -> { - if (MetricsLevel.DEBUG.isWithin(metricsConfig.getMetricsLevel())) { - result.record(blockCacheSizeSupplier.get(), metricsConfig.getBaseAttributes()); - } - }); - availableInflightReadAheadSize = meter.gaugeBuilder(prefix + S3StreamMetricsConstant.AVAILABLE_INFLIGHT_READ_AHEAD_SIZE_METRIC_NAME) - .setDescription("Available inflight read ahead size") - .setUnit("bytes") - .ofLongs() - .buildWithCallback(result -> { - if (MetricsLevel.INFO.isWithin(metricsConfig.getMetricsLevel())) { - result.record((long) availableInflightReadAheadSizeSupplier.get(), metricsConfig.getBaseAttributes()); - } - }); - availableInflightS3ReadQuota = meter.gaugeBuilder(prefix + S3StreamMetricsConstant.AVAILABLE_S3_INFLIGHT_READ_QUOTA_METRIC_NAME) - .setDescription("Available inflight S3 read quota") - .ofLongs() - .buildWithCallback(result -> { - if (MetricsLevel.DEBUG.isWithin(metricsConfig.getMetricsLevel())) { - result.record((long) availableInflightS3ReadQuotaSupplier.get(), metricsConfig.getBaseAttributes()); - } - }); - availableInflightS3WriteQuota = meter.gaugeBuilder(prefix + S3StreamMetricsConstant.AVAILABLE_S3_INFLIGHT_WRITE_QUOTA_METRIC_NAME) - .setDescription("Available inflight S3 write quota") - .ofLongs() - .buildWithCallback(result -> { - if (MetricsLevel.DEBUG.isWithin(metricsConfig.getMetricsLevel())) { - result.record((long) availableInflightS3WriteQuotaSupplier.get(), metricsConfig.getBaseAttributes()); - } - }); - inflightWALUploadTasksCount = meter.gaugeBuilder(prefix + S3StreamMetricsConstant.INFLIGHT_WAL_UPLOAD_TASKS_COUNT_METRIC_NAME) - .setDescription("Inflight upload WAL tasks count") - .ofLongs() - .buildWithCallback(result -> { - if (MetricsLevel.DEBUG.isWithin(metricsConfig.getMetricsLevel())) { - result.record((long) inflightWALUploadTasksCountSupplier.get(), metricsConfig.getBaseAttributes()); - } - }); - compactionReadSizeInTotal = meter.counterBuilder(prefix + S3StreamMetricsConstant.COMPACTION_READ_SIZE_METRIC_NAME) - .setDescription("Compaction read size") - .setUnit("bytes") - .build(); - compactionWriteSizeInTotal = meter.counterBuilder(prefix + S3StreamMetricsConstant.COMPACTION_WRITE_SIZE_METRIC_NAME) - .setDescription("Compaction write size") - .setUnit("bytes") - .build(); - allocatedMemorySize = meter.gaugeBuilder(prefix + S3StreamMetricsConstant.BUFFER_ALLOCATED_MEMORY_SIZE_METRIC_NAME) - .setDescription("Buffer allocated memory size") - .setUnit("bytes") - .ofLongs() - .buildWithCallback(result -> { - if (MetricsLevel.INFO.isWithin(metricsConfig.getMetricsLevel()) && ByteBufAlloc.byteBufAllocMetric != null) { - Map allocateSizeMap = ByteBufAlloc.byteBufAllocMetric.getDetailedMap(); - for (Map.Entry entry : allocateSizeMap.entrySet()) { - result.record(entry.getValue(), ALLOC_TYPE_ATTRIBUTES.get(entry.getKey())); - } - } - }); - usedMemorySize = meter.gaugeBuilder(prefix + S3StreamMetricsConstant.BUFFER_USED_MEMORY_SIZE_METRIC_NAME) - .setDescription("Buffer used memory size") - .setUnit("bytes") - .ofLongs() - .buildWithCallback(result -> { - if (MetricsLevel.DEBUG.isWithin(metricsConfig.getMetricsLevel()) && ByteBufAlloc.byteBufAllocMetric != null) { - result.record(ByteBufAlloc.byteBufAllocMetric.getUsedMemory(), metricsConfig.getBaseAttributes()); - } - }); - } - - public static void registerNetworkLimiterSupplier(AsyncNetworkBandwidthLimiter.Type type, - Supplier networkAvailableBandwidthSupplier, - Supplier networkLimiterQueueSizeSupplier) { - switch (type) { - case INBOUND: - S3StreamMetricsManager.networkInboundAvailableBandwidthSupplier = networkAvailableBandwidthSupplier; - S3StreamMetricsManager.networkInboundLimiterQueueSizeSupplier = networkLimiterQueueSizeSupplier; - break; - case OUTBOUND: - S3StreamMetricsManager.networkOutboundAvailableBandwidthSupplier = networkAvailableBandwidthSupplier; - S3StreamMetricsManager.networkOutboundLimiterQueueSizeSupplier = networkLimiterQueueSizeSupplier; - break; - } - } - - public static void registerDeltaWalOffsetSupplier(Supplier deltaWalStartOffsetSupplier, - Supplier deltaWalTrimmedOffsetSupplier) { - S3StreamMetricsManager.deltaWalStartOffsetSupplier = deltaWalStartOffsetSupplier; - S3StreamMetricsManager.deltaWalTrimmedOffsetSupplier = deltaWalTrimmedOffsetSupplier; - } - - public static void registerDeltaWalCacheSizeSupplier(Supplier deltaWalCacheSizeSupplier) { - S3StreamMetricsManager.deltaWALCacheSizeSupplier = deltaWalCacheSizeSupplier; - } - - public static void registerBlockCacheSizeSupplier(Supplier blockCacheSizeSupplier) { - S3StreamMetricsManager.blockCacheSizeSupplier = blockCacheSizeSupplier; - } - - public static void registerInflightS3ReadQuotaSupplier(Supplier inflightS3ReadQuotaSupplier) { - S3StreamMetricsManager.availableInflightS3ReadQuotaSupplier = inflightS3ReadQuotaSupplier; - } - - public static void registerInflightS3WriteQuotaSupplier(Supplier inflightS3WriteQuotaSupplier) { - S3StreamMetricsManager.availableInflightS3WriteQuotaSupplier = inflightS3WriteQuotaSupplier; - } - - public static void registerInflightReadSizeLimiterSupplier( - Supplier availableInflightReadAheadSizeSupplier) { - S3StreamMetricsManager.availableInflightReadAheadSizeSupplier = availableInflightReadAheadSizeSupplier; - } - - public static void registerInflightWALUploadTasksCountSupplier( - Supplier inflightWALUploadTasksCountSupplier) { - S3StreamMetricsManager.inflightWALUploadTasksCountSupplier = inflightWALUploadTasksCountSupplier; - } - - public static CounterMetric buildS3UploadSizeMetric() { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - CounterMetric metric = new CounterMetric(metricsConfig, s3UploadSizeInTotal); - BASE_ATTRIBUTES_LISTENERS.add(metric); - return metric; - } - } - - public static CounterMetric buildS3DownloadSizeMetric() { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - CounterMetric metric = new CounterMetric(metricsConfig, s3DownloadSizeInTotal); - BASE_ATTRIBUTES_LISTENERS.add(metric); - return metric; - } - } - - public static YammerHistogramMetric buildStageOperationMetric(MetricName metricName, MetricsLevel metricsLevel, S3Stage stage) { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - YammerHistogramMetric metric = new YammerHistogramMetric(metricName, metricsLevel, - metricsConfig, AttributesUtils.buildAttributes(stage)); - BASE_ATTRIBUTES_LISTENERS.add(metric); - OPERATION_LATENCY_METRICS.add(metric); - return metric; - } - } - - public static YammerHistogramMetric buildOperationMetric(MetricName metricName, MetricsLevel metricsLevel, S3Operation operation) { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - YammerHistogramMetric metric = new YammerHistogramMetric(metricName, metricsLevel, - metricsConfig, AttributesUtils.buildAttributes(operation)); - BASE_ATTRIBUTES_LISTENERS.add(metric); - OPERATION_LATENCY_METRICS.add(metric); - return metric; - } - } - - public static YammerHistogramMetric buildOperationMetric(MetricName metricName, MetricsLevel metricsLevel, - S3Operation operation, String status, String sizeLabelName) { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - YammerHistogramMetric metric = new YammerHistogramMetric(metricName, metricsLevel, metricsConfig, - AttributesUtils.buildAttributes(operation, status, sizeLabelName)); - BASE_ATTRIBUTES_LISTENERS.add(metric); - OPERATION_LATENCY_METRICS.add(metric); - return metric; - } - } - - public static YammerHistogramMetric buildOperationMetric(MetricName metricName, MetricsLevel metricsLevel, S3Operation operation, String status) { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - YammerHistogramMetric metric = new YammerHistogramMetric(metricName, metricsLevel, metricsConfig, - AttributesUtils.buildAttributes(operation, status)); - BASE_ATTRIBUTES_LISTENERS.add(metric); - OPERATION_LATENCY_METRICS.add(metric); - return metric; - } - } - - public static CounterMetric buildObjectNumMetric() { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - CounterMetric metric = new CounterMetric(metricsConfig, objectNumInTotal); - BASE_ATTRIBUTES_LISTENERS.add(metric); - return metric; - } - } - - public static YammerHistogramMetric buildObjectStageCostMetric(MetricName metricName, MetricsLevel metricsLevel, S3ObjectStage stage) { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - YammerHistogramMetric metric = new YammerHistogramMetric(metricName, metricsLevel, metricsConfig, - AttributesUtils.buildAttributes(stage)); - BASE_ATTRIBUTES_LISTENERS.add(metric); - OBJECT_STAGE_METRICS.add(metric); - return metric; - } - } - - public static YammerHistogramMetric buildObjectUploadSizeMetric(MetricName metricName, MetricsLevel metricsLevel) { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - YammerHistogramMetric metric = new YammerHistogramMetric(metricName, metricsLevel, metricsConfig); - BASE_ATTRIBUTES_LISTENERS.add(metric); - OBJECT_STAGE_METRICS.add(metric); - return metric; - } - } - - public static CounterMetric buildNetworkInboundUsageMetric() { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - CounterMetric metric = new CounterMetric(metricsConfig, networkInboundUsageInTotal); - BASE_ATTRIBUTES_LISTENERS.add(metric); - return metric; - } - } - - public static CounterMetric buildNetworkOutboundUsageMetric() { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - CounterMetric metric = new CounterMetric(metricsConfig, networkOutboundUsageInTotal); - BASE_ATTRIBUTES_LISTENERS.add(metric); - return metric; - } - } - - public static YammerHistogramMetric buildNetworkInboundLimiterQueueTimeMetric(MetricName metricName, MetricsLevel metricsLevel) { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - YammerHistogramMetric metric = new YammerHistogramMetric(metricName, metricsLevel, metricsConfig); - BASE_ATTRIBUTES_LISTENERS.add(metric); - NETWORK_INBOUND_LIMITER_QUEUE_TIME_METRICS.add(metric); - return metric; - } - } - - public static YammerHistogramMetric buildNetworkOutboundLimiterQueueTimeMetric(MetricName metricName, MetricsLevel metricsLevel) { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - YammerHistogramMetric metric = new YammerHistogramMetric(metricName, metricsLevel, metricsConfig); - BASE_ATTRIBUTES_LISTENERS.add(metric); - NETWORK_OUTBOUND_LIMITER_QUEUE_TIME_METRICS.add(metric); - return metric; - } - } - - public static YammerHistogramMetric buildReadAheadSizeMetric(MetricName metricName, MetricsLevel metricsLevel) { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - YammerHistogramMetric metric = new YammerHistogramMetric(metricName, metricsLevel, metricsConfig); - BASE_ATTRIBUTES_LISTENERS.add(metric); - READ_AHEAD_SIZE_METRICS.add(metric); - return metric; - } - - } - - public static YammerHistogramMetric buildReadAheadLimiterQueueTimeMetric(MetricName metricName, MetricsLevel metricsLevel) { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - YammerHistogramMetric metric = new YammerHistogramMetric(metricName, metricsLevel, metricsConfig); - BASE_ATTRIBUTES_LISTENERS.add(metric); - READ_AHEAD_LIMITER_QUEUE_TIME_METRICS.add(metric); - return metric; - } - } - - public static CounterMetric buildCompactionReadSizeMetric() { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - CounterMetric metric = new CounterMetric(metricsConfig, compactionReadSizeInTotal); - BASE_ATTRIBUTES_LISTENERS.add(metric); - return metric; - } - } - - public static CounterMetric buildCompactionWriteSizeMetric() { - synchronized (BASE_ATTRIBUTES_LISTENERS) { - CounterMetric metric = new CounterMetric(metricsConfig, compactionWriteSizeInTotal); - BASE_ATTRIBUTES_LISTENERS.add(metric); - return metric; - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/TimerUtil.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/TimerUtil.java deleted file mode 100644 index 9059b2d76..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/TimerUtil.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics; - -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -public class TimerUtil { - private final AtomicLong last = new AtomicLong(System.nanoTime()); - - public TimerUtil() { - reset(); - } - - public void reset() { - last.set(System.nanoTime()); - } - - public long elapsedAs(TimeUnit timeUnit) { - return timeUnit.convert(System.nanoTime() - last.get(), TimeUnit.NANOSECONDS); - } - - public long elapsedAndResetAs(TimeUnit timeUnit) { - long now = System.nanoTime(); - return timeUnit.convert(now - last.getAndSet(now), TimeUnit.NANOSECONDS); - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/operations/S3MetricsType.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/operations/S3MetricsType.java deleted file mode 100644 index 0670d6307..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/operations/S3MetricsType.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics.operations; - -import java.util.Map; - -public enum S3MetricsType { - S3Stream("S3Stream"), - S3Storage("S3Storage"), - S3Request("S3Request"), - S3Object("S3Object"), - S3Network("S3Network"); - - private static final Map MAP = Map.of( - "S3Stream", S3Stream, - "S3Storage", S3Storage, - "S3Request", S3Request, - "S3Object", S3Object - ); - - private final String name; - - S3MetricsType(String name) { - this.name = name; - } - - public String getName() { - return name; - } - - public S3MetricsType of(String name) { - return MAP.get(name); - } - - @Override - public String toString() { - return "S3MetricsType{" + - "name='" + name + '\'' + - '}'; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/operations/S3ObjectStage.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/operations/S3ObjectStage.java deleted file mode 100644 index 22270feb0..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/operations/S3ObjectStage.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics.operations; - -/** - * TODO: Maybe merge into {@link S3Stage} - */ -public enum S3ObjectStage { - UPLOAD_PART("upload_part"), - READY_CLOSE("ready_close"), - TOTAL("total"); - - private final String name; - - S3ObjectStage(String name) { - this.name = name; - } - - public String getName() { - return name; - } - - public String getUniqueKey() { - return "s3_object_stage_" + name; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/operations/S3Operation.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/operations/S3Operation.java deleted file mode 100644 index f5608e4c5..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/operations/S3Operation.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics.operations; - -public enum S3Operation { - /* S3 stream operations start */ - CREATE_STREAM(S3MetricsType.S3Stream, "create"), - OPEN_STREAM(S3MetricsType.S3Stream, "open"), - APPEND_STREAM(S3MetricsType.S3Stream, "append"), - FETCH_STREAM(S3MetricsType.S3Stream, "fetch"), - TRIM_STREAM(S3MetricsType.S3Stream, "trim"), - CLOSE_STREAM(S3MetricsType.S3Stream, "close"), - /* S3 stream operations end */ - - /* S3 storage operations start */ - APPEND_STORAGE(S3MetricsType.S3Storage, "append"), - APPEND_STORAGE_WAL(S3MetricsType.S3Storage, "append_wal"), - APPEND_STORAGE_APPEND_CALLBACK(S3MetricsType.S3Storage, "append_callback"), - APPEND_STORAGE_WAL_FULL(S3MetricsType.S3Storage, "append_wal_full"), - APPEND_STORAGE_LOG_CACHE(S3MetricsType.S3Storage, "append_log_cache"), - APPEND_STORAGE_LOG_CACHE_FULL(S3MetricsType.S3Storage, "append_log_cache_full"), - UPLOAD_STORAGE_WAL(S3MetricsType.S3Storage, "upload_wal"), - FORCE_UPLOAD_STORAGE_WAL_AWAIT(S3MetricsType.S3Storage, "force_upload_wal_await"), - FORCE_UPLOAD_STORAGE_WAL(S3MetricsType.S3Storage, "force_upload_wal"), - READ_STORAGE(S3MetricsType.S3Storage, "read"), - READ_STORAGE_LOG_CACHE(S3MetricsType.S3Storage, "read_log_cache"), - READ_STORAGE_BLOCK_CACHE(S3MetricsType.S3Storage, "read_block_cache"), - BLOCK_CACHE_READ_AHEAD(S3MetricsType.S3Storage, "read_ahead"), - /* S3 storage operations end */ - - /* S3 request operations start */ - GET_OBJECT(S3MetricsType.S3Request, "get_object"), - PUT_OBJECT(S3MetricsType.S3Request, "put_object"), - DELETE_OBJECT(S3MetricsType.S3Request, "delete_object"), - DELETE_OBJECTS(S3MetricsType.S3Request, "delete_objects"), - CREATE_MULTI_PART_UPLOAD(S3MetricsType.S3Request, "create_multi_part_upload"), - UPLOAD_PART(S3MetricsType.S3Request, "upload_part"), - UPLOAD_PART_COPY(S3MetricsType.S3Request, "upload_part_copy"), - COMPLETE_MULTI_PART_UPLOAD(S3MetricsType.S3Request, "complete_multi_part_upload"), - /* S3 request operations end */ - - /* S3 object operations start */ - PREPARE_OBJECT(S3MetricsType.S3Object, "prepare"), - COMMIT_STREAM_SET_OBJECT(S3MetricsType.S3Object, "commit_stream_set_object"), - COMPACTED_OBJECT(S3MetricsType.S3Object, "compacted_object"), - COMMIT_STREAM_OBJECT(S3MetricsType.S3Object, "commit_stream_object"), - GET_OBJECTS(S3MetricsType.S3Object, "get_objects"), - GET_SERVER_OBJECTS(S3MetricsType.S3Object, "get_server_objects"), - GET_STREAM_OBJECTS(S3MetricsType.S3Object, "get_stream_objects"), - /* S3 object operations end */ - - ALLOC_BUFFER(S3MetricsType.S3Storage, "alloc_buffer"); - - private final S3MetricsType type; - private final String name; - private final String uniqueKey; - - S3Operation(S3MetricsType type, String name) { - this.type = type; - this.name = name; - uniqueKey = type.getName() + "-" + name; - } - - public String getName() { - return name; - } - - public S3MetricsType getType() { - return type; - } - - public String getUniqueKey() { - return uniqueKey; - } - - @Override - public String toString() { - return "Operation{" + - "type='" + type.getName() + '\'' + - ", name='" + name + '\'' + - '}'; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/operations/S3Stage.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/operations/S3Stage.java deleted file mode 100644 index 34aab3977..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/operations/S3Stage.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics.operations; - -public enum S3Stage { - - /* Append WAL stages start */ - APPEND_WAL_BEFORE(S3Operation.APPEND_STORAGE_WAL, "before"), - APPEND_WAL_BLOCK_POLLED(S3Operation.APPEND_STORAGE_WAL, "block_polled"), - APPEND_WAL_AWAIT(S3Operation.APPEND_STORAGE_WAL, "await"), - APPEND_WAL_WRITE(S3Operation.APPEND_STORAGE_WAL, "write"), - APPEND_WAL_AFTER(S3Operation.APPEND_STORAGE_WAL, "after"), - APPEND_WAL_COMPLETE(S3Operation.APPEND_STORAGE_WAL, "complete"), - /* Append WAL stages end */ - - /* Force upload WAL start */ - FORCE_UPLOAD_WAL_AWAIT(S3Operation.FORCE_UPLOAD_STORAGE_WAL, "await"), - FORCE_UPLOAD_WAL_COMPLETE(S3Operation.FORCE_UPLOAD_STORAGE_WAL, "complete"), - /* Force upload WAL end */ - - /* Upload WAL start */ - UPLOAD_WAL_PREPARE(S3Operation.UPLOAD_STORAGE_WAL, "prepare"), - UPLOAD_WAL_UPLOAD(S3Operation.UPLOAD_STORAGE_WAL, "upload"), - UPLOAD_WAL_COMMIT(S3Operation.UPLOAD_STORAGE_WAL, "commit"), - UPLOAD_WAL_COMPLETE(S3Operation.UPLOAD_STORAGE_WAL, "complete"); - /* Upload WAL end */ - - private final S3Operation operation; - private final String name; - - S3Stage(S3Operation operation, String name) { - this.operation = operation; - this.name = name; - } - - public S3Operation getOperation() { - return operation; - } - - public String getName() { - return name; - } - - public String getUniqueKey() { - return operation.getUniqueKey() + "-" + name; - } - - @Override - public String toString() { - return "S3Stage{" + - "operation=" + operation.getName() + - ", name='" + name + '\'' + - '}'; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/CompactionStats.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/CompactionStats.java deleted file mode 100644 index dc168815c..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/CompactionStats.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics.stats; - -import com.automq.stream.s3.metrics.S3StreamMetricsManager; -import com.automq.stream.s3.metrics.wrapper.CounterMetric; - -public class CompactionStats { - private volatile static CompactionStats instance = null; - - public final CounterMetric compactionReadSizeStats = S3StreamMetricsManager.buildCompactionReadSizeMetric(); - public final CounterMetric compactionWriteSizeStats = S3StreamMetricsManager.buildCompactionWriteSizeMetric(); - - private CompactionStats() { - } - - public static CompactionStats getInstance() { - if (instance == null) { - synchronized (CompactionStats.class) { - if (instance == null) { - instance = new CompactionStats(); - } - } - } - return instance; - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/NetworkStats.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/NetworkStats.java deleted file mode 100644 index ef2d07313..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/NetworkStats.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics.stats; - -import com.automq.stream.s3.metrics.MetricsLevel; -import com.automq.stream.s3.metrics.S3StreamMetricsManager; -import com.automq.stream.s3.metrics.wrapper.CounterMetric; -import com.automq.stream.s3.metrics.wrapper.YammerHistogramMetric; -import com.automq.stream.s3.network.AsyncNetworkBandwidthLimiter; -import com.yammer.metrics.core.MetricName; - -public class NetworkStats { - private volatile static NetworkStats instance = null; - - private final CounterMetric networkInboundUsageStats = S3StreamMetricsManager.buildNetworkInboundUsageMetric(); - private final CounterMetric networkOutboundUsageStats = S3StreamMetricsManager.buildNetworkOutboundUsageMetric(); - private final YammerHistogramMetric networkInboundLimiterQueueTimeStats = S3StreamMetricsManager.buildNetworkInboundLimiterQueueTimeMetric( - new MetricName(NetworkStats.class, "NetworkInboundLimiterQueueTime"), MetricsLevel.INFO); - private final YammerHistogramMetric networkOutboundLimiterQueueTimeStats = S3StreamMetricsManager.buildNetworkOutboundLimiterQueueTimeMetric( - new MetricName(NetworkStats.class, "NetworkOutboundLimiterQueueTime"), MetricsLevel.INFO); - - private NetworkStats() { - } - - public static NetworkStats getInstance() { - if (instance == null) { - synchronized (NetworkStats.class) { - if (instance == null) { - instance = new NetworkStats(); - } - } - } - return instance; - } - - public CounterMetric networkUsageStats(AsyncNetworkBandwidthLimiter.Type type) { - return type == AsyncNetworkBandwidthLimiter.Type.INBOUND ? networkInboundUsageStats : networkOutboundUsageStats; - } - - public YammerHistogramMetric networkLimiterQueueTimeStats(AsyncNetworkBandwidthLimiter.Type type) { - return type == AsyncNetworkBandwidthLimiter.Type.INBOUND ? networkInboundLimiterQueueTimeStats : networkOutboundLimiterQueueTimeStats; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/S3ObjectStats.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/S3ObjectStats.java deleted file mode 100644 index 4187a9ccf..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/S3ObjectStats.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics.stats; - -import com.automq.stream.s3.metrics.MetricsLevel; -import com.automq.stream.s3.metrics.S3StreamMetricsManager; -import com.automq.stream.s3.metrics.operations.S3ObjectStage; -import com.automq.stream.s3.metrics.wrapper.CounterMetric; -import com.automq.stream.s3.metrics.wrapper.YammerHistogramMetric; -import com.yammer.metrics.core.MetricName; - -public class S3ObjectStats { - private volatile static S3ObjectStats instance = null; - - public final CounterMetric objectNumInTotalStats = S3StreamMetricsManager.buildObjectNumMetric(); - public final YammerHistogramMetric objectStageUploadPartStats = S3StreamMetricsManager.buildObjectStageCostMetric( - new MetricName(S3ObjectStats.class, S3ObjectStage.UPLOAD_PART.getUniqueKey()), MetricsLevel.DEBUG, S3ObjectStage.UPLOAD_PART); - public final YammerHistogramMetric objectStageReadyCloseStats = S3StreamMetricsManager.buildObjectStageCostMetric( - new MetricName(S3ObjectStats.class, S3ObjectStage.READY_CLOSE.getUniqueKey()), MetricsLevel.DEBUG, S3ObjectStage.READY_CLOSE); - public final YammerHistogramMetric objectStageTotalStats = S3StreamMetricsManager.buildObjectStageCostMetric( - new MetricName(S3ObjectStats.class, S3ObjectStage.TOTAL.getUniqueKey()), MetricsLevel.DEBUG, S3ObjectStage.TOTAL); - public final YammerHistogramMetric objectUploadSizeStats = S3StreamMetricsManager.buildObjectUploadSizeMetric( - new MetricName(S3ObjectStats.class, "ObjectUploadSize"), MetricsLevel.DEBUG); - - private S3ObjectStats() { - } - - public static S3ObjectStats getInstance() { - if (instance == null) { - synchronized (S3ObjectStats.class) { - if (instance == null) { - instance = new S3ObjectStats(); - } - } - } - return instance; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/S3OperationStats.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/S3OperationStats.java deleted file mode 100644 index 1d6d8d719..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/S3OperationStats.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics.stats; - -import com.automq.stream.s3.metrics.AttributesUtils; -import com.automq.stream.s3.metrics.MetricsLevel; -import com.automq.stream.s3.metrics.S3StreamMetricsConstant; -import com.automq.stream.s3.metrics.S3StreamMetricsManager; -import com.automq.stream.s3.metrics.operations.S3Operation; -import com.automq.stream.s3.metrics.wrapper.CounterMetric; -import com.automq.stream.s3.metrics.wrapper.YammerHistogramMetric; -import com.yammer.metrics.core.MetricName; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -public class S3OperationStats { - private volatile static S3OperationStats instance = null; - public final CounterMetric uploadSizeTotalStats = S3StreamMetricsManager.buildS3UploadSizeMetric(); - public final CounterMetric downloadSizeTotalStats = S3StreamMetricsManager.buildS3DownloadSizeMetric(); - private final Map getObjectSuccessStats = new ConcurrentHashMap<>(); - private final Map getObjectFailedStats = new ConcurrentHashMap<>(); - private final Map putObjectSuccessStats = new ConcurrentHashMap<>(); - private final Map putObjectFailedStats = new ConcurrentHashMap<>(); - private final YammerHistogramMetric deleteObjectSuccessStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(S3OperationStats.class, S3Operation.DELETE_OBJECT.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_SUCCESS), - MetricsLevel.INFO, S3Operation.DELETE_OBJECT, S3StreamMetricsConstant.LABEL_STATUS_SUCCESS); - private final YammerHistogramMetric deleteObjectFailedStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(S3OperationStats.class, S3Operation.DELETE_OBJECT.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_FAILED), - MetricsLevel.INFO, S3Operation.DELETE_OBJECT, S3StreamMetricsConstant.LABEL_STATUS_FAILED); - private final YammerHistogramMetric deleteObjectsSuccessStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(S3OperationStats.class, S3Operation.DELETE_OBJECTS.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_SUCCESS), - MetricsLevel.INFO, S3Operation.DELETE_OBJECTS, S3StreamMetricsConstant.LABEL_STATUS_SUCCESS); - private final YammerHistogramMetric deleteObjectsFailedStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(S3OperationStats.class, S3Operation.DELETE_OBJECTS.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_FAILED), - MetricsLevel.INFO, S3Operation.DELETE_OBJECTS, S3StreamMetricsConstant.LABEL_STATUS_FAILED); - private final YammerHistogramMetric createMultiPartUploadSuccessStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(S3OperationStats.class, S3Operation.CREATE_MULTI_PART_UPLOAD.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_SUCCESS), - MetricsLevel.INFO, S3Operation.CREATE_MULTI_PART_UPLOAD, S3StreamMetricsConstant.LABEL_STATUS_SUCCESS); - private final YammerHistogramMetric createMultiPartUploadFailedStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(S3OperationStats.class, S3Operation.CREATE_MULTI_PART_UPLOAD.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_FAILED), - MetricsLevel.INFO, S3Operation.CREATE_MULTI_PART_UPLOAD, S3StreamMetricsConstant.LABEL_STATUS_FAILED); - private final Map uploadPartSuccessStats = new ConcurrentHashMap<>(); - private final Map uploadPartFailedStats = new ConcurrentHashMap<>(); - private final YammerHistogramMetric uploadPartCopySuccessStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(S3OperationStats.class, S3Operation.UPLOAD_PART_COPY.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_SUCCESS), - MetricsLevel.INFO, S3Operation.UPLOAD_PART_COPY, S3StreamMetricsConstant.LABEL_STATUS_SUCCESS); - private final YammerHistogramMetric uploadPartCopyFailedStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(S3OperationStats.class, S3Operation.UPLOAD_PART_COPY.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_FAILED), - MetricsLevel.INFO, S3Operation.UPLOAD_PART_COPY, S3StreamMetricsConstant.LABEL_STATUS_FAILED); - private final YammerHistogramMetric completeMultiPartUploadSuccessStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(S3OperationStats.class, S3Operation.COMPLETE_MULTI_PART_UPLOAD.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_SUCCESS), - MetricsLevel.INFO, S3Operation.COMPLETE_MULTI_PART_UPLOAD, S3StreamMetricsConstant.LABEL_STATUS_SUCCESS); - private final YammerHistogramMetric completeMultiPartUploadFailedStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(S3OperationStats.class, S3Operation.COMPLETE_MULTI_PART_UPLOAD.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_FAILED), - MetricsLevel.INFO, S3Operation.COMPLETE_MULTI_PART_UPLOAD, S3StreamMetricsConstant.LABEL_STATUS_FAILED); - - private S3OperationStats() { - } - - public static S3OperationStats getInstance() { - if (instance == null) { - synchronized (StreamOperationStats.class) { - if (instance == null) { - instance = new S3OperationStats(); - } - } - } - return instance; - } - - public YammerHistogramMetric getObjectStats(long size, boolean isSuccess) { - String label = AttributesUtils.getObjectBucketLabel(size); - if (isSuccess) { - return getObjectSuccessStats.computeIfAbsent(label, name -> S3StreamMetricsManager.buildOperationMetric( - new MetricName(S3OperationStats.class, S3Operation.GET_OBJECT.getUniqueKey() - + S3StreamMetricsConstant.LABEL_STATUS_SUCCESS + label), - MetricsLevel.INFO, S3Operation.GET_OBJECT, S3StreamMetricsConstant.LABEL_STATUS_SUCCESS, label)); - } else { - return getObjectFailedStats.computeIfAbsent(label, name -> S3StreamMetricsManager.buildOperationMetric( - new MetricName(S3OperationStats.class, S3Operation.GET_OBJECT.getUniqueKey() - + S3StreamMetricsConstant.LABEL_STATUS_FAILED + label), - MetricsLevel.INFO, S3Operation.GET_OBJECT, S3StreamMetricsConstant.LABEL_STATUS_FAILED, label)); - } - } - - public YammerHistogramMetric putObjectStats(long size, boolean isSuccess) { - String label = AttributesUtils.getObjectBucketLabel(size); - if (isSuccess) { - return putObjectSuccessStats.computeIfAbsent(label, name -> S3StreamMetricsManager.buildOperationMetric( - new MetricName(S3OperationStats.class, S3Operation.PUT_OBJECT.getUniqueKey() - + S3StreamMetricsConstant.LABEL_STATUS_SUCCESS + label), - MetricsLevel.INFO, S3Operation.PUT_OBJECT, S3StreamMetricsConstant.LABEL_STATUS_SUCCESS, label)); - } else { - return putObjectFailedStats.computeIfAbsent(label, name -> S3StreamMetricsManager.buildOperationMetric( - new MetricName(S3OperationStats.class, S3Operation.PUT_OBJECT.getUniqueKey() + - S3StreamMetricsConstant.LABEL_STATUS_FAILED + label), - MetricsLevel.INFO, S3Operation.PUT_OBJECT, S3StreamMetricsConstant.LABEL_STATUS_FAILED, label)); - } - } - - public YammerHistogramMetric uploadPartStats(long size, boolean isSuccess) { - String label = AttributesUtils.getObjectBucketLabel(size); - if (isSuccess) { - return uploadPartSuccessStats.computeIfAbsent(label, name -> S3StreamMetricsManager.buildOperationMetric( - new MetricName(S3OperationStats.class, S3Operation.UPLOAD_PART.getUniqueKey() + - S3StreamMetricsConstant.LABEL_STATUS_SUCCESS + label), - MetricsLevel.INFO, S3Operation.UPLOAD_PART, S3StreamMetricsConstant.LABEL_STATUS_SUCCESS, label)); - } else { - return uploadPartFailedStats.computeIfAbsent(label, name -> S3StreamMetricsManager.buildOperationMetric( - new MetricName(S3OperationStats.class, S3Operation.UPLOAD_PART.getUniqueKey() + - S3StreamMetricsConstant.LABEL_STATUS_FAILED + label), - MetricsLevel.INFO, S3Operation.UPLOAD_PART, S3StreamMetricsConstant.LABEL_STATUS_FAILED, label)); - } - } - - public YammerHistogramMetric deleteObjectStats(boolean isSuccess) { - return isSuccess ? deleteObjectSuccessStats : deleteObjectFailedStats; - } - - public YammerHistogramMetric deleteObjectsStats(boolean isSuccess) { - return isSuccess ? deleteObjectsSuccessStats : deleteObjectsFailedStats; - } - - public YammerHistogramMetric uploadPartCopyStats(boolean isSuccess) { - return isSuccess ? uploadPartCopySuccessStats : uploadPartCopyFailedStats; - } - - public YammerHistogramMetric createMultiPartUploadStats(boolean isSuccess) { - return isSuccess ? createMultiPartUploadSuccessStats : createMultiPartUploadFailedStats; - } - - public YammerHistogramMetric completeMultiPartUploadStats(boolean isSuccess) { - return isSuccess ? completeMultiPartUploadSuccessStats : completeMultiPartUploadFailedStats; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/StorageOperationStats.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/StorageOperationStats.java deleted file mode 100644 index 8e46c592f..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/StorageOperationStats.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics.stats; - -import com.automq.stream.s3.metrics.MetricsLevel; -import com.automq.stream.s3.metrics.S3StreamMetricsConstant; -import com.automq.stream.s3.metrics.S3StreamMetricsManager; -import com.automq.stream.s3.metrics.operations.S3Operation; -import com.automq.stream.s3.metrics.operations.S3Stage; -import com.automq.stream.s3.metrics.wrapper.YammerHistogramMetric; -import com.yammer.metrics.core.MetricName; - -public class StorageOperationStats { - private volatile static StorageOperationStats instance = null; - - public final YammerHistogramMetric appendStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StorageOperationStats.class, S3Operation.APPEND_STORAGE.getUniqueKey()), MetricsLevel.INFO, S3Operation.APPEND_STORAGE); - public final YammerHistogramMetric appendWALBeforeStats = S3StreamMetricsManager.buildStageOperationMetric( - new MetricName(StorageOperationStats.class, S3Stage.APPEND_WAL_BEFORE.getUniqueKey()), MetricsLevel.DEBUG, S3Stage.APPEND_WAL_BEFORE); - public final YammerHistogramMetric appendWALBlockPolledStats = S3StreamMetricsManager.buildStageOperationMetric( - new MetricName(StorageOperationStats.class, S3Stage.APPEND_WAL_BLOCK_POLLED.getUniqueKey()), MetricsLevel.DEBUG, S3Stage.APPEND_WAL_BLOCK_POLLED); - public final YammerHistogramMetric appendWALAwaitStats = S3StreamMetricsManager.buildStageOperationMetric( - new MetricName(StorageOperationStats.class, S3Stage.APPEND_WAL_AWAIT.getUniqueKey()), MetricsLevel.DEBUG, S3Stage.APPEND_WAL_AWAIT); - public final YammerHistogramMetric appendWALWriteStats = S3StreamMetricsManager.buildStageOperationMetric( - new MetricName(StorageOperationStats.class, S3Stage.APPEND_WAL_WRITE.getUniqueKey()), MetricsLevel.DEBUG, S3Stage.APPEND_WAL_WRITE); - public final YammerHistogramMetric appendWALAfterStats = S3StreamMetricsManager.buildStageOperationMetric( - new MetricName(StorageOperationStats.class, S3Stage.APPEND_WAL_AFTER.getUniqueKey()), MetricsLevel.DEBUG, S3Stage.APPEND_WAL_AFTER); - public final YammerHistogramMetric appendWALCompleteStats = S3StreamMetricsManager.buildStageOperationMetric( - new MetricName(StorageOperationStats.class, S3Stage.APPEND_WAL_COMPLETE.getUniqueKey()), MetricsLevel.INFO, S3Stage.APPEND_WAL_COMPLETE); - public final YammerHistogramMetric appendCallbackStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StorageOperationStats.class, S3Operation.APPEND_STORAGE_APPEND_CALLBACK.getUniqueKey()), MetricsLevel.DEBUG, S3Operation.APPEND_STORAGE_APPEND_CALLBACK); - public final YammerHistogramMetric appendWALFullStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StorageOperationStats.class, S3Operation.APPEND_STORAGE_WAL_FULL.getUniqueKey()), MetricsLevel.INFO, S3Operation.APPEND_STORAGE_WAL_FULL); - public final YammerHistogramMetric appendLogCacheStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StorageOperationStats.class, S3Operation.APPEND_STORAGE_LOG_CACHE.getUniqueKey()), MetricsLevel.INFO, S3Operation.APPEND_STORAGE_LOG_CACHE); - public final YammerHistogramMetric appendLogCacheFullStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StorageOperationStats.class, S3Operation.APPEND_STORAGE_LOG_CACHE_FULL.getUniqueKey()), MetricsLevel.INFO, S3Operation.APPEND_STORAGE_LOG_CACHE_FULL); - public final YammerHistogramMetric uploadWALPrepareStats = S3StreamMetricsManager.buildStageOperationMetric( - new MetricName(StorageOperationStats.class, S3Stage.UPLOAD_WAL_PREPARE.getUniqueKey()), MetricsLevel.DEBUG, S3Stage.UPLOAD_WAL_PREPARE); - public final YammerHistogramMetric uploadWALUploadStats = S3StreamMetricsManager.buildStageOperationMetric( - new MetricName(StorageOperationStats.class, S3Stage.UPLOAD_WAL_UPLOAD.getUniqueKey()), MetricsLevel.DEBUG, S3Stage.UPLOAD_WAL_UPLOAD); - public final YammerHistogramMetric uploadWALCommitStats = S3StreamMetricsManager.buildStageOperationMetric( - new MetricName(StorageOperationStats.class, S3Stage.UPLOAD_WAL_COMMIT.getUniqueKey()), MetricsLevel.DEBUG, S3Stage.UPLOAD_WAL_COMMIT); - public final YammerHistogramMetric uploadWALCompleteStats = S3StreamMetricsManager.buildStageOperationMetric( - new MetricName(StorageOperationStats.class, S3Stage.UPLOAD_WAL_COMPLETE.getUniqueKey()), MetricsLevel.INFO, S3Stage.UPLOAD_WAL_COMPLETE); - public final YammerHistogramMetric forceUploadWALAwaitStats = S3StreamMetricsManager.buildStageOperationMetric( - new MetricName(StorageOperationStats.class, S3Stage.FORCE_UPLOAD_WAL_AWAIT.getUniqueKey()), MetricsLevel.DEBUG, S3Stage.FORCE_UPLOAD_WAL_AWAIT); - public final YammerHistogramMetric forceUploadWALCompleteStats = S3StreamMetricsManager.buildStageOperationMetric( - new MetricName(StorageOperationStats.class, S3Stage.FORCE_UPLOAD_WAL_COMPLETE.getUniqueKey()), MetricsLevel.DEBUG, S3Stage.FORCE_UPLOAD_WAL_COMPLETE); - public final YammerHistogramMetric readStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StorageOperationStats.class, S3Operation.READ_STORAGE.getUniqueKey()), MetricsLevel.INFO, S3Operation.READ_STORAGE); - private final YammerHistogramMetric readLogCacheHitStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StorageOperationStats.class, S3Operation.READ_STORAGE_LOG_CACHE.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_HIT), - MetricsLevel.INFO, S3Operation.READ_STORAGE_LOG_CACHE, S3StreamMetricsConstant.LABEL_STATUS_HIT); - private final YammerHistogramMetric readLogCacheMissStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StorageOperationStats.class, S3Operation.READ_STORAGE_LOG_CACHE.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_MISS), - MetricsLevel.INFO, S3Operation.READ_STORAGE_LOG_CACHE, S3StreamMetricsConstant.LABEL_STATUS_MISS); - private final YammerHistogramMetric readBlockCacheHitStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StorageOperationStats.class, S3Operation.READ_STORAGE_BLOCK_CACHE.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_HIT), - MetricsLevel.INFO, S3Operation.READ_STORAGE_BLOCK_CACHE, S3StreamMetricsConstant.LABEL_STATUS_HIT); - private final YammerHistogramMetric readBlockCacheMissStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StorageOperationStats.class, S3Operation.READ_STORAGE_BLOCK_CACHE.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_MISS), - MetricsLevel.INFO, S3Operation.READ_STORAGE_BLOCK_CACHE, S3StreamMetricsConstant.LABEL_STATUS_MISS); - private final YammerHistogramMetric blockCacheReadAheadSyncStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StorageOperationStats.class, S3Operation.BLOCK_CACHE_READ_AHEAD.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_SYNC), - MetricsLevel.INFO, S3Operation.BLOCK_CACHE_READ_AHEAD, S3StreamMetricsConstant.LABEL_STATUS_SYNC); - private final YammerHistogramMetric blockCacheReadAheadAsyncStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StorageOperationStats.class, S3Operation.BLOCK_CACHE_READ_AHEAD.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_ASYNC), - MetricsLevel.INFO, S3Operation.BLOCK_CACHE_READ_AHEAD, S3StreamMetricsConstant.LABEL_STATUS_ASYNC); - public final YammerHistogramMetric readAheadSizeStats = S3StreamMetricsManager.buildReadAheadSizeMetric( - new MetricName(StorageOperationStats.class, "ReadAheadSize"), MetricsLevel.INFO); - public final YammerHistogramMetric readAheadLimiterQueueTimeStats = S3StreamMetricsManager.buildReadAheadLimiterQueueTimeMetric( - new MetricName(StorageOperationStats.class, "ReadAheadLimitQueueTime"), MetricsLevel.INFO); - - private StorageOperationStats() { - } - - public static StorageOperationStats getInstance() { - if (instance == null) { - synchronized (StorageOperationStats.class) { - if (instance == null) { - instance = new StorageOperationStats(); - } - } - } - return instance; - } - - public YammerHistogramMetric readLogCacheStats(boolean isCacheHit) { - return isCacheHit ? readLogCacheHitStats : readLogCacheMissStats; - } - - public YammerHistogramMetric readBlockCacheStats(boolean isCacheHit) { - return isCacheHit ? readBlockCacheHitStats : readBlockCacheMissStats; - } - - public YammerHistogramMetric blockCacheReadAheadStats(boolean isSync) { - return isSync ? blockCacheReadAheadSyncStats : blockCacheReadAheadAsyncStats; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/StreamOperationStats.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/StreamOperationStats.java deleted file mode 100644 index f2f168b2c..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/stats/StreamOperationStats.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics.stats; - -import com.automq.stream.s3.metrics.MetricsLevel; -import com.automq.stream.s3.metrics.S3StreamMetricsConstant; -import com.automq.stream.s3.metrics.S3StreamMetricsManager; -import com.automq.stream.s3.metrics.operations.S3Operation; -import com.automq.stream.s3.metrics.wrapper.YammerHistogramMetric; -import com.yammer.metrics.core.MetricName; - -public class StreamOperationStats { - private volatile static StreamOperationStats instance = null; - public final YammerHistogramMetric createStreamStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StreamOperationStats.class, S3Operation.CREATE_STREAM.getUniqueKey()), MetricsLevel.INFO, S3Operation.CREATE_STREAM); - public final YammerHistogramMetric openStreamStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StreamOperationStats.class, S3Operation.OPEN_STREAM.getUniqueKey()), MetricsLevel.INFO, S3Operation.OPEN_STREAM); - public final YammerHistogramMetric appendStreamStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StreamOperationStats.class, S3Operation.APPEND_STREAM.getUniqueKey()), MetricsLevel.INFO, S3Operation.APPEND_STREAM); - public final YammerHistogramMetric fetchStreamStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StreamOperationStats.class, S3Operation.FETCH_STREAM.getUniqueKey()), MetricsLevel.INFO, S3Operation.FETCH_STREAM); - public final YammerHistogramMetric trimStreamStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StreamOperationStats.class, S3Operation.TRIM_STREAM.getUniqueKey()), MetricsLevel.INFO, S3Operation.TRIM_STREAM); - private final YammerHistogramMetric closeStreamSuccessStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StreamOperationStats.class, S3Operation.CLOSE_STREAM.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_SUCCESS), - MetricsLevel.INFO, S3Operation.CLOSE_STREAM, S3StreamMetricsConstant.LABEL_STATUS_SUCCESS); - private final YammerHistogramMetric closeStreamFailStats = S3StreamMetricsManager.buildOperationMetric( - new MetricName(StreamOperationStats.class, S3Operation.CLOSE_STREAM.getUniqueKey() + S3StreamMetricsConstant.LABEL_STATUS_FAILED), - MetricsLevel.INFO, S3Operation.CLOSE_STREAM, S3StreamMetricsConstant.LABEL_STATUS_FAILED); - - private StreamOperationStats() { - } - - public static StreamOperationStats getInstance() { - if (instance == null) { - synchronized (StreamOperationStats.class) { - if (instance == null) { - instance = new StreamOperationStats(); - } - } - } - return instance; - } - - public YammerHistogramMetric closeStreamStats(boolean isSuccess) { - return isSuccess ? closeStreamSuccessStats : closeStreamFailStats; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/ConfigListener.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/ConfigListener.java deleted file mode 100644 index 11c7e93f2..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/ConfigListener.java +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics.wrapper; - -import com.automq.stream.s3.metrics.MetricsConfig; - -public interface ConfigListener { - void onConfigChange(MetricsConfig metricsConfig); -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/ConfigurableMetrics.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/ConfigurableMetrics.java deleted file mode 100644 index a82cffe1e..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/ConfigurableMetrics.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics.wrapper; - -import com.automq.stream.s3.metrics.MetricsConfig; -import com.automq.stream.s3.metrics.MetricsLevel; -import io.opentelemetry.api.common.Attributes; - -public class ConfigurableMetrics implements ConfigListener { - private final Attributes extraAttributes; - Attributes attributes; - MetricsLevel metricsLevel; - - public ConfigurableMetrics(MetricsConfig metricsConfig, Attributes extraAttributes) { - this.metricsLevel = metricsConfig.getMetricsLevel(); - this.extraAttributes = extraAttributes; - this.attributes = buildAttributes(metricsConfig.getBaseAttributes()); - } - - Attributes buildAttributes(Attributes baseAttributes) { - return Attributes.builder() - .putAll(baseAttributes) - .putAll(this.extraAttributes).build(); - } - - @Override - public void onConfigChange(MetricsConfig metricsConfig) { - this.metricsLevel = metricsConfig.getMetricsLevel(); - this.attributes = buildAttributes(metricsConfig.getBaseAttributes()); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/CounterMetric.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/CounterMetric.java deleted file mode 100644 index 51d167340..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/CounterMetric.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics.wrapper; - -import com.automq.stream.s3.metrics.MetricsConfig; -import com.automq.stream.s3.metrics.MetricsLevel; -import io.opentelemetry.api.common.Attributes; -import io.opentelemetry.api.metrics.LongCounter; - -public class CounterMetric extends ConfigurableMetrics { - private final LongCounter longCounter; - - public CounterMetric(MetricsConfig metricsConfig, LongCounter longCounter) { - super(metricsConfig, Attributes.empty()); - this.longCounter = longCounter; - } - - public CounterMetric(MetricsConfig metricsConfig, Attributes extraAttributes, LongCounter longCounter) { - super(metricsConfig, extraAttributes); - this.longCounter = longCounter; - } - - public boolean add(MetricsLevel metricsLevel, long value) { - if (metricsLevel.isWithin(this.metricsLevel)) { - longCounter.add(value, attributes); - return true; - } - return false; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/HistogramInstrument.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/HistogramInstrument.java deleted file mode 100644 index 2b9066020..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/HistogramInstrument.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.automq.stream.s3.metrics.wrapper; - -import com.automq.stream.s3.metrics.S3StreamMetricsConstant; -import io.opentelemetry.api.metrics.Meter; -import io.opentelemetry.api.metrics.ObservableDoubleGauge; -import io.opentelemetry.api.metrics.ObservableLongGauge; -import java.util.List; -import java.util.function.Supplier; - -public class HistogramInstrument { - private final ObservableLongGauge count; - private final ObservableLongGauge sum; - private final ObservableDoubleGauge histP50Value; - private final ObservableDoubleGauge histP99Value; - private final ObservableDoubleGauge histMeanValue; - private final ObservableDoubleGauge histMaxValue; - - public HistogramInstrument(Meter meter, String name, String desc, String unit, Supplier> histogramsSupplier) { - this.count = meter.gaugeBuilder(name + S3StreamMetricsConstant.COUNT_METRIC_NAME_SUFFIX) - .setDescription(desc + " (count)") - .ofLongs() - .buildWithCallback(result -> { - List histograms = histogramsSupplier.get(); - histograms.forEach(histogram -> { - if (histogram.shouldRecord()) { - result.record(histogram.count(), histogram.attributes); - } - }); - }); - this.sum = meter.gaugeBuilder(name + S3StreamMetricsConstant.SUM_METRIC_NAME_SUFFIX) - .setDescription(desc + " (sum)") - .ofLongs() - .setUnit(unit) - .buildWithCallback(result -> { - List histograms = histogramsSupplier.get(); - histograms.forEach(histogram -> { - if (histogram.shouldRecord()) { - result.record(histogram.sum(), histogram.attributes); - } - }); - }); - this.histP50Value = meter.gaugeBuilder(name + S3StreamMetricsConstant.P50_METRIC_NAME_SUFFIX) - .setDescription(desc + " (50th percentile)") - .setUnit(unit) - .buildWithCallback(result -> { - List histograms = histogramsSupplier.get(); - histograms.forEach(histogram -> { - if (histogram.shouldRecord()) { - result.record(histogram.p50(), histogram.attributes); - } - }); - }); - this.histP99Value = meter.gaugeBuilder(name + S3StreamMetricsConstant.P99_METRIC_NAME_SUFFIX) - .setDescription(desc + " (99th percentile)") - .setUnit(unit) - .buildWithCallback(result -> { - List histograms = histogramsSupplier.get(); - histograms.forEach(histogram -> { - if (histogram.shouldRecord()) { - result.record(histogram.p99(), histogram.attributes); - } - }); - }); - this.histMeanValue = meter.gaugeBuilder(name + S3StreamMetricsConstant.MEAN_METRIC_NAME_SUFFIX) - .setDescription(desc + " (mean)") - .setUnit(unit) - .buildWithCallback(result -> { - List histograms = histogramsSupplier.get(); - histograms.forEach(histogram -> { - if (histogram.shouldRecord()) { - result.record(histogram.mean(), histogram.attributes); - } - }); - }); - this.histMaxValue = meter.gaugeBuilder(name + S3StreamMetricsConstant.MAX_METRIC_NAME_SUFFIX) - .setDescription(desc + " (max)") - .setUnit(unit) - .buildWithCallback(result -> { - List histograms = histogramsSupplier.get(); - histograms.forEach(histogram -> { - if (histogram.shouldRecord()) { - result.record(histogram.max(), histogram.attributes); - } - }); - }); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/YammerHistogramMetric.java b/s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/YammerHistogramMetric.java deleted file mode 100644 index a7b62d50d..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/metrics/wrapper/YammerHistogramMetric.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics.wrapper; - -import com.automq.stream.s3.metrics.MetricsConfig; -import com.automq.stream.s3.metrics.MetricsLevel; -import com.automq.stream.s3.metrics.S3StreamMetricsManager; -import com.yammer.metrics.core.Histogram; -import com.yammer.metrics.core.MetricName; -import io.opentelemetry.api.common.Attributes; - -public class YammerHistogramMetric extends ConfigurableMetrics { - private final Histogram histogram; - private final MetricsLevel currentMetricsLevel; - - public YammerHistogramMetric(MetricName metricName, MetricsLevel currentMetricsLevel, MetricsConfig metricsConfig) { - this(metricName, currentMetricsLevel, metricsConfig, Attributes.empty()); - } - - public YammerHistogramMetric(MetricName metricName, MetricsLevel currentMetricsLevel, MetricsConfig metricsConfig, Attributes extraAttributes) { - super(metricsConfig, extraAttributes); - this.histogram = S3StreamMetricsManager.METRICS_REGISTRY.newHistogram(metricName, true); - this.currentMetricsLevel = currentMetricsLevel; - } - - public long count() { - return histogram.count(); - } - - public long sum() { - return (long) histogram.sum(); - } - - public double p50() { - return histogram.getSnapshot().getMedian(); - } - - public double p99() { - return histogram.getSnapshot().get99thPercentile(); - } - - public double mean() { - return histogram.mean(); - } - - public double max() { - return histogram.max(); - } - - public void record(long value) { - histogram.update(value); - } - - public boolean shouldRecord() { - return currentMetricsLevel.isWithin(metricsLevel); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/model/StreamRecordBatch.java b/s3stream/src/main/java/com/automq/stream/s3/model/StreamRecordBatch.java deleted file mode 100644 index 801346f30..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/model/StreamRecordBatch.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.model; - -import com.automq.stream.s3.StreamRecordBatchCodec; -import com.automq.stream.utils.biniarysearch.ComparableItem; -import io.netty.buffer.ByteBuf; - -public class StreamRecordBatch implements Comparable, ComparableItem { - public static final int OBJECT_OVERHEAD = 52; - private final long streamId; - private final long epoch; - private final long baseOffset; - private final int count; - private ByteBuf payload; - private ByteBuf encoded; - - public StreamRecordBatch(long streamId, long epoch, long baseOffset, int count, ByteBuf payload) { - this.streamId = streamId; - this.epoch = epoch; - this.baseOffset = baseOffset; - this.count = count; - this.payload = payload; - } - - public ByteBuf encoded() { - // TODO: keep the ref count - if (encoded == null) { - encoded = StreamRecordBatchCodec.encode(this); - ByteBuf oldPayload = payload; - payload = encoded.slice(encoded.readerIndex() + encoded.readableBytes() - payload.readableBytes(), payload.readableBytes()); - oldPayload.release(); - } - return encoded.duplicate(); - } - - public long getStreamId() { - return streamId; - } - - public long getEpoch() { - return epoch; - } - - public long getBaseOffset() { - return baseOffset; - } - - public long getLastOffset() { - return baseOffset + count; - } - - public int getCount() { - return count; - } - - public ByteBuf getPayload() { - return payload; - } - - public int size() { - return payload.readableBytes(); - } - - public void retain() { - if (encoded != null) { - encoded.retain(); - } else { - payload.retain(); - } - } - - public void release() { - if (encoded != null) { - encoded.release(); - } else { - payload.release(); - } - } - - @Override - public int compareTo(StreamRecordBatch o) { - int rst = Long.compare(streamId, o.streamId); - if (rst != 0) { - return rst; - } - rst = Long.compare(epoch, o.epoch); - if (rst != 0) { - return rst; - } - return Long.compare(baseOffset, o.baseOffset); - } - - @Override - public String toString() { - return "StreamRecordBatch{" + - "streamId=" + streamId + - ", epoch=" + epoch + - ", baseOffset=" + baseOffset + - ", count=" + count + - ", size=" + size() + '}'; - } - - @Override - public boolean isLessThan(Long value) { - return getLastOffset() <= value; - } - - @Override - public boolean isGreaterThan(Long value) { - return getBaseOffset() > value; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/network/AsyncNetworkBandwidthLimiter.java b/s3stream/src/main/java/com/automq/stream/s3/network/AsyncNetworkBandwidthLimiter.java deleted file mode 100644 index 6f0b74402..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/network/AsyncNetworkBandwidthLimiter.java +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.network; - -import com.automq.stream.s3.metrics.MetricsLevel; -import com.automq.stream.s3.metrics.S3StreamMetricsManager; -import com.automq.stream.s3.metrics.stats.NetworkStats; -import io.netty.util.concurrent.DefaultThreadFactory; -import java.util.Objects; -import java.util.PriorityQueue; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -public class AsyncNetworkBandwidthLimiter { - private final Lock lock = new ReentrantLock(); - private final Condition condition = lock.newCondition(); - private final long maxTokens; - private final ScheduledExecutorService refillThreadPool; - private final ExecutorService callbackThreadPool; - private final Queue queuedCallbacks; - private final Type type; - private long availableTokens; - - public AsyncNetworkBandwidthLimiter(Type type, long tokenSize, int refillIntervalMs, long maxTokenSize) { - this.type = type; - this.availableTokens = tokenSize; - this.maxTokens = maxTokenSize; - this.queuedCallbacks = new PriorityQueue<>(); - this.refillThreadPool = Executors.newSingleThreadScheduledExecutor(new DefaultThreadFactory("refill-bucket-thread")); - this.callbackThreadPool = Executors.newFixedThreadPool(1, new DefaultThreadFactory("callback-thread")); - this.callbackThreadPool.execute(() -> { - while (true) { - lock.lock(); - try { - while (queuedCallbacks.isEmpty() || availableTokens <= 0) { - condition.await(); - } - while (!queuedCallbacks.isEmpty() && availableTokens > 0) { - BucketItem head = queuedCallbacks.poll(); - availableTokens -= head.size; - logMetrics(head.size); - head.cf.complete(null); - } - } catch (InterruptedException ignored) { - break; - } finally { - lock.unlock(); - } - } - }); - this.refillThreadPool.scheduleAtFixedRate(() -> { - lock.lock(); - try { - availableTokens = Math.min(availableTokens + tokenSize, maxTokenSize); - condition.signalAll(); - } finally { - lock.unlock(); - } - }, refillIntervalMs, refillIntervalMs, TimeUnit.MILLISECONDS); - S3StreamMetricsManager.registerNetworkLimiterSupplier(type, this::getAvailableTokens, this::getQueueSize); - } - - public void shutdown() { - this.callbackThreadPool.shutdown(); - this.refillThreadPool.shutdown(); - } - - public long getMaxTokens() { - return maxTokens; - } - - public long getAvailableTokens() { - lock.lock(); - try { - return availableTokens; - } finally { - lock.unlock(); - } - } - - public int getQueueSize() { - lock.lock(); - try { - return queuedCallbacks.size(); - } finally { - lock.unlock(); - } - } - - public void forceConsume(long size) { - lock.lock(); - try { - availableTokens -= size; - logMetrics(size); - } finally { - lock.unlock(); - } - } - - public CompletableFuture consume(ThrottleStrategy throttleStrategy, long size) { - CompletableFuture cf = new CompletableFuture<>(); - if (Objects.requireNonNull(throttleStrategy) == ThrottleStrategy.BYPASS) { - forceConsume(size); - cf.complete(null); - } else { - cf = consume(throttleStrategy.priority(), size); - } - return cf; - } - - private CompletableFuture consume(int priority, long size) { - CompletableFuture cf = new CompletableFuture<>(); - lock.lock(); - try { - if (availableTokens < 0 || !queuedCallbacks.isEmpty()) { - queuedCallbacks.add(new BucketItem(priority, size, cf)); - condition.signalAll(); - } else { - availableTokens -= size; - cf.complete(null); - logMetrics(size); - } - } finally { - lock.unlock(); - } - return cf; - } - - private void logMetrics(long size) { - NetworkStats.getInstance().networkUsageStats(type).add(MetricsLevel.INFO, size); - } - - public enum Type { - INBOUND("Inbound"), - OUTBOUND("Outbound"); - - private final String name; - - Type(String name) { - this.name = name; - } - - public String getName() { - return name; - } - } - - static final class BucketItem implements Comparable { - private final int priority; - private final long size; - private final CompletableFuture cf; - - BucketItem(int priority, long size, CompletableFuture cf) { - this.priority = priority; - this.size = size; - this.cf = cf; - } - - @Override - public int compareTo(BucketItem o) { - return Long.compare(priority, o.priority); - } - - public int priority() { - return priority; - } - - public long size() { - return size; - } - - public CompletableFuture cf() { - return cf; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj == null || obj.getClass() != this.getClass()) - return false; - var that = (BucketItem) obj; - return this.priority == that.priority && - this.size == that.size && - Objects.equals(this.cf, that.cf); - } - - @Override - public int hashCode() { - return Objects.hash(priority, size, cf); - } - - @Override - public String toString() { - return "BucketItem[" + - "priority=" + priority + ", " + - "size=" + size + ", " + - "cf=" + cf + ']'; - } - - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/network/ThrottleStrategy.java b/s3stream/src/main/java/com/automq/stream/s3/network/ThrottleStrategy.java deleted file mode 100644 index 7a3547d25..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/network/ThrottleStrategy.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.network; - -public enum ThrottleStrategy { - BYPASS(0), - THROTTLE_1(1), - THROTTLE_2(2); - - private final int priority; - - ThrottleStrategy(int priority) { - this.priority = priority; - } - - public int priority() { - return priority; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/objects/CommitStreamSetObjectRequest.java b/s3stream/src/main/java/com/automq/stream/s3/objects/CommitStreamSetObjectRequest.java deleted file mode 100644 index 71d206955..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/objects/CommitStreamSetObjectRequest.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.objects; - -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; - -public class CommitStreamSetObjectRequest { - - /** - * The object id of the stream set object. - */ - private long objectId; - - /** - * The order id of the stream set object. - *

- * When the stream set object is generated by compacting, the order id is the first compacted object's order id. - */ - private long orderId; - - /** - * The real size of the stream set object in Storage. - */ - private long objectSize; - /** - * The stream ranges of the stream set object. - *

- * The stream ranges are sorted by [stream][epoch][startOffset] - */ - private List streamRanges; - - /** - * The stream objects which split from the stream set object. - *

- * The stream objects are sorted by [stream][startOffset] - */ - private List streamObjects; - - /** - * The object ids which are compacted by the stream set object. - */ - private List compactedObjectIds; - - public long getObjectId() { - return objectId; - } - - public void setObjectId(long objectId) { - this.objectId = objectId; - } - - public long getObjectSize() { - return objectSize; - } - - public void setObjectSize(long objectSize) { - this.objectSize = objectSize; - } - - public List getCompactedObjectIds() { - if (compactedObjectIds == null) { - return Collections.emptyList(); - } - return compactedObjectIds; - } - - public void setCompactedObjectIds(List compactedObjectIds) { - this.compactedObjectIds = compactedObjectIds; - } - - public List getStreamRanges() { - if (streamRanges == null) { - return Collections.emptyList(); - } - return streamRanges; - } - - public void setStreamRanges(List streamRanges) { - this.streamRanges = streamRanges; - } - - public void addStreamRange(ObjectStreamRange streamRange) { - if (streamRanges == null) { - streamRanges = new LinkedList<>(); - } - streamRanges.add(streamRange); - } - - public List getStreamObjects() { - if (streamObjects == null) { - return Collections.emptyList(); - } - return streamObjects; - } - - public void setStreamObjects(List streamObjects) { - this.streamObjects = streamObjects; - } - - public void addStreamObject(StreamObject streamObject) { - if (streamObjects == null) { - streamObjects = new LinkedList<>(); - } - streamObjects.add(streamObject); - } - - public long getOrderId() { - return orderId; - } - - public void setOrderId(long orderId) { - this.orderId = orderId; - } - - @Override - public String toString() { - return "CommitStreamSetObjectRequest{" + - "objectId=" + objectId + - ", orderId=" + orderId + - ", objectSize=" + objectSize + - ", streamRanges=" + streamRanges + - ", streamObjects=" + streamObjects + - ", compactedObjectIds=" + compactedObjectIds + - '}'; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/objects/CommitStreamSetObjectResponse.java b/s3stream/src/main/java/com/automq/stream/s3/objects/CommitStreamSetObjectResponse.java deleted file mode 100644 index 60a95b1f9..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/objects/CommitStreamSetObjectResponse.java +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.objects; - -/** - * Commit stream set object response. - */ -public class CommitStreamSetObjectResponse { -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/objects/CompactStreamObjectRequest.java b/s3stream/src/main/java/com/automq/stream/s3/objects/CompactStreamObjectRequest.java deleted file mode 100644 index 508a89d3f..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/objects/CompactStreamObjectRequest.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.objects; - -import java.util.List; - -public class CompactStreamObjectRequest { - private long objectId; - private long objectSize; - private long streamId; - private long startOffset; - private long endOffset; - private final long streamEpoch; - /** - * The source objects' id of the stream object. - */ - private List sourceObjectIds; - - public CompactStreamObjectRequest(long objectId, long objectSize, long streamId, long streamEpoch, long startOffset, - long endOffset, List sourceObjectIds) { - this.objectId = objectId; - this.objectSize = objectSize; - this.streamId = streamId; - this.startOffset = startOffset; - this.endOffset = endOffset; - this.streamEpoch = streamEpoch; - this.sourceObjectIds = sourceObjectIds; - } - - public long getObjectId() { - return objectId; - } - - public void setObjectId(long objectId) { - this.objectId = objectId; - } - - public long getObjectSize() { - return objectSize; - } - - public void setObjectSize(long objectSize) { - this.objectSize = objectSize; - } - - public long getStreamId() { - return streamId; - } - - public void setStreamId(long streamId) { - this.streamId = streamId; - } - - public long getStartOffset() { - return startOffset; - } - - public void setStartOffset(long startOffset) { - this.startOffset = startOffset; - } - - public long getEndOffset() { - return endOffset; - } - - public void setEndOffset(long endOffset) { - this.endOffset = endOffset; - } - - public long getStreamEpoch() { - return streamEpoch; - } - - public List getSourceObjectIds() { - return sourceObjectIds; - } - - public void setSourceObjectIds(List sourceObjectIds) { - this.sourceObjectIds = sourceObjectIds; - } - - @Override - public String toString() { - return "CommitStreamObjectRequest{" + - "objectId=" + objectId + - ", objectSize=" + objectSize + - ", streamId=" + streamId + - ", startOffset=" + startOffset + - ", endOffset=" + endOffset + - ", streamEpoch=" + streamEpoch + - ", sourceObjectIds=" + sourceObjectIds + - '}'; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/objects/ObjectManager.java b/s3stream/src/main/java/com/automq/stream/s3/objects/ObjectManager.java deleted file mode 100644 index 27a3f1a9a..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/objects/ObjectManager.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.objects; - -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import java.util.List; -import java.util.concurrent.CompletableFuture; - -/** - * Object metadata registry. - */ -public interface ObjectManager { - - /** - * Prepare object id for write, if the objects is not committed in ttl, then delete it. - * - * @param count object id count. - * @param ttl ttl in milliseconds. - * @return object id range start. - */ - CompletableFuture prepareObject(int count, long ttl); - - /** - * Commit stream set object. - * - * @param request {@link CommitStreamSetObjectRequest} - * @return {@link CommitStreamSetObjectResponse} - */ - CompletableFuture commitStreamSetObject(CommitStreamSetObjectRequest request); - - /** - * Compact stream object. When the source object has no reference, then delete it. - * - * @param request {@link CompactStreamObjectRequest} - */ - CompletableFuture compactStreamObject(CompactStreamObjectRequest request); - - /** - * Get objects by stream range. - * When obj1 contains stream0 [0, 100) [200, 300) and obj2 contains stream1 [100, 200), - * expect getObjects(streamId, 0, 300) return [obj1, obj2, obj1] - *

    - *
  • Concern two types of objects: stream object and stream set object. - *
  • Returned objects must be continuous of stream range. - *
  • Returned objects aren't physical object concept, they are logical object concept. - * (regard each returned object-metadata as a slice of object) - *
- * - * @param streamId stream id. - * @param startOffset get range start offset. - * @param endOffset get range end offset. NOOP_OFFSET represent endOffset is unlimited. - * @param limit max object range count. - * @return {@link S3ObjectMetadata} - */ - CompletableFuture> getObjects(long streamId, long startOffset, long endOffset, int limit); - - /** - * Get current server stream set objects. - * When server is starting, server need server stream set objects to recover. - */ - CompletableFuture> getServerObjects(); - - /** - * Get stream objects by stream range. - *
    - *
  • Only concern about stream objects, ignore stream set objects. - *
  • Returned stream objects can be discontinuous of stream range. - *
  • Ranges of the returned stream objects are in ascending order. - *
- * - * @param streamId stream id. - * @param startOffset get range start offset. - * @param endOffset get range end offset. - * @param limit max object count. - * @return {@link S3ObjectMetadata} - */ - CompletableFuture> getStreamObjects(long streamId, long startOffset, long endOffset, - int limit); -} - diff --git a/s3stream/src/main/java/com/automq/stream/s3/objects/ObjectStreamRange.java b/s3stream/src/main/java/com/automq/stream/s3/objects/ObjectStreamRange.java deleted file mode 100644 index 09df0c5fa..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/objects/ObjectStreamRange.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.objects; - -public class ObjectStreamRange { - private long streamId; - private long epoch; - private long startOffset; - private long endOffset; - private int size; - - public ObjectStreamRange() { - } - - public ObjectStreamRange(long streamId, long epoch, long startOffset, long endOffset, int size) { - this.streamId = streamId; - this.epoch = epoch; - this.startOffset = startOffset; - this.endOffset = endOffset; - this.size = size; - } - - public long getStreamId() { - return streamId; - } - - public void setStreamId(long streamId) { - this.streamId = streamId; - } - - public long getEpoch() { - return epoch; - } - - public void setEpoch(long epoch) { - this.epoch = epoch; - } - - public long getStartOffset() { - return startOffset; - } - - public void setStartOffset(long startOffset) { - this.startOffset = startOffset; - } - - public long getEndOffset() { - return endOffset; - } - - public void setEndOffset(long endOffset) { - this.endOffset = endOffset; - } - - public int getSize() { - return size; - } - - public void setSize(int size) { - this.size = size; - } - - @Override - public String toString() { - return "(" + streamId + "-" + epoch + "," + startOffset + "-" + endOffset + "-" + size + ")"; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/objects/StreamObject.java b/s3stream/src/main/java/com/automq/stream/s3/objects/StreamObject.java deleted file mode 100644 index 778101313..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/objects/StreamObject.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.objects; - -import java.util.List; - -public class StreamObject { - private long objectId; - private long objectSize; - private long streamId; - private long startOffset; - private long endOffset; - - /** - * The source objects' id of the stream object. - */ - private List sourceObjectIds; - - public long getObjectId() { - return objectId; - } - - public void setObjectId(long objectId) { - this.objectId = objectId; - } - - public long getObjectSize() { - return objectSize; - } - - public void setObjectSize(long objectSize) { - this.objectSize = objectSize; - } - - public long getStreamId() { - return streamId; - } - - public void setStreamId(long streamId) { - this.streamId = streamId; - } - - public long getStartOffset() { - return startOffset; - } - - public void setStartOffset(long startOffset) { - this.startOffset = startOffset; - } - - public long getEndOffset() { - return endOffset; - } - - public void setEndOffset(long endOffset) { - this.endOffset = endOffset; - } - - public List getSourceObjectIds() { - return sourceObjectIds; - } - - public void setSourceObjectIds(List sourceObjectIds) { - this.sourceObjectIds = sourceObjectIds; - } - - @Override - public String toString() { - return "StreamObject{" + - "objectId=" + objectId + - ", objectSize=" + objectSize + - ", streamId=" + streamId + - ", startOffset=" + startOffset + - ", endOffset=" + endOffset + - ", sourceObjectIds=" + sourceObjectIds + - '}'; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/operator/DefaultS3Operator.java b/s3stream/src/main/java/com/automq/stream/s3/operator/DefaultS3Operator.java deleted file mode 100644 index 43c274d3b..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/operator/DefaultS3Operator.java +++ /dev/null @@ -1,931 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.operator; - -import com.automq.stream.s3.ByteBufAlloc; -import com.automq.stream.s3.metrics.MetricsLevel; -import com.automq.stream.s3.metrics.S3StreamMetricsManager; -import com.automq.stream.s3.metrics.TimerUtil; -import com.automq.stream.s3.metrics.stats.NetworkStats; -import com.automq.stream.s3.metrics.stats.S3OperationStats; -import com.automq.stream.s3.network.AsyncNetworkBandwidthLimiter; -import com.automq.stream.s3.network.ThrottleStrategy; -import com.automq.stream.utils.FutureUtil; -import com.automq.stream.utils.S3Utils; -import com.automq.stream.utils.ThreadUtils; -import com.automq.stream.utils.Threads; -import com.automq.stream.utils.Utils; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.util.HashedWheelTimer; -import io.netty.util.Timeout; -import java.net.URI; -import java.nio.charset.StandardCharsets; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.function.Consumer; -import java.util.stream.Collectors; -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.exception.ExceptionUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain; -import software.amazon.awssdk.auth.credentials.InstanceProfileCredentialsProvider; -import software.amazon.awssdk.core.async.AsyncRequestBody; -import software.amazon.awssdk.core.async.AsyncResponseTransformer; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.http.HttpStatusCode; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3.S3AsyncClient; -import software.amazon.awssdk.services.s3.S3AsyncClientBuilder; -import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; -import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload; -import software.amazon.awssdk.services.s3.model.CompletedPart; -import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; -import software.amazon.awssdk.services.s3.model.Delete; -import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; -import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; -import software.amazon.awssdk.services.s3.model.DeletedObject; -import software.amazon.awssdk.services.s3.model.GetObjectRequest; -import software.amazon.awssdk.services.s3.model.NoSuchBucketException; -import software.amazon.awssdk.services.s3.model.ObjectIdentifier; -import software.amazon.awssdk.services.s3.model.PutObjectRequest; -import software.amazon.awssdk.services.s3.model.S3Exception; -import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest; -import software.amazon.awssdk.services.s3.model.UploadPartRequest; -import software.amazon.awssdk.services.s3.model.UploadPartResponse; - -import static com.automq.stream.utils.FutureUtil.cause; - -public class DefaultS3Operator implements S3Operator { - private static final Logger LOGGER = LoggerFactory.getLogger(DefaultS3Operator.class); - public final float maxMergeReadSparsityRate; - private final String bucket; - private final S3AsyncClient writeS3Client; - private final S3AsyncClient readS3Client; - private final Semaphore inflightWriteLimiter; - private final Semaphore inflightReadLimiter; - private final List waitingReadTasks = new LinkedList<>(); - private final AsyncNetworkBandwidthLimiter networkInboundBandwidthLimiter; - private final AsyncNetworkBandwidthLimiter networkOutboundBandwidthLimiter; - private final ScheduledExecutorService scheduler = Threads.newSingleThreadScheduledExecutor( - ThreadUtils.createThreadFactory("s3operator", true), LOGGER); - private final ExecutorService readLimiterCallbackExecutor = Threads.newFixedThreadPoolWithMonitor(1, - "s3-read-limiter-cb-executor", true, LOGGER); - private final ExecutorService writeLimiterCallbackExecutor = Threads.newFixedThreadPoolWithMonitor(1, - "s3-write-limiter-cb-executor", true, LOGGER); - private final ExecutorService readCallbackExecutor = Threads.newFixedThreadPoolWithMonitor(1, - "s3-read-cb-executor", true, LOGGER); - private final ExecutorService writeCallbackExecutor = Threads.newFixedThreadPoolWithMonitor(1, - "s3-write-cb-executor", true, LOGGER); - private final HashedWheelTimer timeoutDetect = new HashedWheelTimer( - ThreadUtils.createThreadFactory("s3-timeout-detect", true), 1, TimeUnit.SECONDS, 100); - - public DefaultS3Operator(String endpoint, String region, String bucket, boolean forcePathStyle, - List credentialsProviders) { - this(endpoint, region, bucket, forcePathStyle, credentialsProviders, null, null, false); - } - - public DefaultS3Operator(String endpoint, String region, String bucket, boolean forcePathStyle, - List credentialsProviders, - AsyncNetworkBandwidthLimiter networkInboundBandwidthLimiter, - AsyncNetworkBandwidthLimiter networkOutboundBandwidthLimiter, boolean readWriteIsolate) { - this.maxMergeReadSparsityRate = Utils.getMaxMergeReadSparsityRate(); - this.networkInboundBandwidthLimiter = networkInboundBandwidthLimiter; - this.networkOutboundBandwidthLimiter = networkOutboundBandwidthLimiter; - this.writeS3Client = newS3Client(endpoint, region, forcePathStyle, credentialsProviders); - this.readS3Client = readWriteIsolate ? newS3Client(endpoint, region, forcePathStyle, credentialsProviders) : writeS3Client; - this.inflightWriteLimiter = new Semaphore(50); - this.inflightReadLimiter = readWriteIsolate ? new Semaphore(50) : inflightWriteLimiter; - this.bucket = bucket; - scheduler.scheduleWithFixedDelay(this::tryMergeRead, 1, 1, TimeUnit.MILLISECONDS); - checkConfig(); - S3Utils.S3Context s3Context = S3Utils.S3Context.builder() - .setEndpoint(endpoint) - .setRegion(region) - .setBucketName(bucket) - .setForcePathStyle(forcePathStyle) - .setCredentialsProviders(credentialsProviders) - .build(); - LOGGER.info("You are using s3Context: {}", s3Context); - checkAvailable(s3Context); - S3StreamMetricsManager.registerInflightS3ReadQuotaSupplier(inflightReadLimiter::availablePermits); - S3StreamMetricsManager.registerInflightS3WriteQuotaSupplier(inflightWriteLimiter::availablePermits); - } - - // used for test only. - public DefaultS3Operator(S3AsyncClient s3Client, String bucket) { - this(s3Client, bucket, false); - } - - // used for test only. - DefaultS3Operator(S3AsyncClient s3Client, String bucket, boolean manualMergeRead) { - this.maxMergeReadSparsityRate = Utils.getMaxMergeReadSparsityRate(); - this.writeS3Client = s3Client; - this.readS3Client = s3Client; - this.bucket = bucket; - this.networkInboundBandwidthLimiter = null; - this.networkOutboundBandwidthLimiter = null; - this.inflightWriteLimiter = new Semaphore(50); - this.inflightReadLimiter = new Semaphore(50); - if (!manualMergeRead) { - scheduler.scheduleWithFixedDelay(this::tryMergeRead, 1, 1, TimeUnit.MILLISECONDS); - } - } - - public static Builder builder() { - return new Builder(); - } - - private static boolean checkPartNumbers(CompletedMultipartUpload multipartUpload) { - Optional maxOpt = multipartUpload.parts().stream().map(CompletedPart::partNumber).max(Integer::compareTo); - return maxOpt.isPresent() && maxOpt.get() == multipartUpload.parts().size(); - } - - private static boolean isUnrecoverable(Throwable ex) { - ex = cause(ex); - if (ex instanceof S3Exception) { - S3Exception s3Ex = (S3Exception) ex; - return s3Ex.statusCode() == HttpStatusCode.FORBIDDEN || s3Ex.statusCode() == HttpStatusCode.NOT_FOUND; - } - return false; - } - - @Override - public void close() { - // TODO: complete in-flight CompletableFuture with ClosedException. - writeS3Client.close(); - if (readS3Client != writeS3Client) { - readS3Client.close(); - } - scheduler.shutdown(); - readLimiterCallbackExecutor.shutdown(); - writeLimiterCallbackExecutor.shutdown(); - readCallbackExecutor.shutdown(); - writeCallbackExecutor.shutdown(); - } - - @Override - public CompletableFuture rangeRead(String path, long start, long end, ThrottleStrategy throttleStrategy) { - CompletableFuture cf = new CompletableFuture<>(); - if (start > end) { - IllegalArgumentException ex = new IllegalArgumentException(); - LOGGER.error("[UNEXPECTED] rangeRead [{}, {})", start, end, ex); - cf.completeExceptionally(ex); - return cf; - } else if (start == end) { - cf.complete(Unpooled.EMPTY_BUFFER); - return cf; - } - - if (networkInboundBandwidthLimiter != null) { - TimerUtil timerUtil = new TimerUtil(); - networkInboundBandwidthLimiter.consume(throttleStrategy, end - start).whenCompleteAsync((v, ex) -> { - NetworkStats.getInstance().networkLimiterQueueTimeStats(AsyncNetworkBandwidthLimiter.Type.INBOUND) - .record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - if (ex != null) { - cf.completeExceptionally(ex); - } else { - rangeRead0(path, start, end, cf); - } - }, readLimiterCallbackExecutor); - } else { - rangeRead0(path, start, end, cf); - } - - Timeout timeout = timeoutDetect.newTimeout((t) -> LOGGER.warn("rangeRead {} {}-{} timeout", path, start, end), 3, TimeUnit.MINUTES); - return cf.whenComplete((rst, ex) -> timeout.cancel()); - } - - private void rangeRead0(String path, long start, long end, CompletableFuture cf) { - synchronized (waitingReadTasks) { - waitingReadTasks.add(new ReadTask(path, start, end, cf)); - } - } - - void tryMergeRead() { - try { - tryMergeRead0(); - } catch (Throwable e) { - LOGGER.error("[UNEXPECTED] tryMergeRead fail", e); - } - } - - /** - * Get adjacent read tasks and merge them into one read task which read range is not exceed 16MB. - */ - private void tryMergeRead0() { - List mergedReadTasks = new ArrayList<>(); - synchronized (waitingReadTasks) { - if (waitingReadTasks.isEmpty()) { - return; - } - int readPermit = availableReadPermit(); - while (readPermit > 0 && !waitingReadTasks.isEmpty()) { - Iterator it = waitingReadTasks.iterator(); - Map mergingReadTasks = new HashMap<>(); - while (it.hasNext()) { - ReadTask readTask = it.next(); - MergedReadTask mergedReadTask = mergingReadTasks.get(readTask.path); - if (mergedReadTask == null) { - if (readPermit > 0) { - readPermit -= 1; - mergedReadTask = new MergedReadTask(readTask, maxMergeReadSparsityRate); - mergingReadTasks.put(readTask.path, mergedReadTask); - mergedReadTasks.add(mergedReadTask); - it.remove(); - } - } else { - if (mergedReadTask.tryMerge(readTask)) { - it.remove(); - } - } - } - } - } - mergedReadTasks.forEach( - mergedReadTask -> { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] merge read: {}, {}-{}, size: {}, sparsityRate: {}", - mergedReadTask.path, mergedReadTask.start, mergedReadTask.end, - mergedReadTask.end - mergedReadTask.start, mergedReadTask.dataSparsityRate); - } - mergedRangeRead(mergedReadTask.path, mergedReadTask.start, mergedReadTask.end) - .whenComplete((rst, ex) -> FutureUtil.suppress(() -> mergedReadTask.handleReadCompleted(rst, ex), LOGGER)); - } - ); - } - - private int availableReadPermit() { - return inflightReadLimiter.availablePermits(); - } - - CompletableFuture mergedRangeRead(String path, long start, long end) { - end = end - 1; - CompletableFuture cf = new CompletableFuture<>(); - CompletableFuture retCf = acquireReadPermit(cf); - if (retCf.isDone()) { - return retCf; - } - mergedRangeRead0(path, start, end, cf); - return retCf; - } - - void mergedRangeRead0(String path, long start, long end, CompletableFuture cf) { - TimerUtil timerUtil = new TimerUtil(); - long size = end - start + 1; - GetObjectRequest request = GetObjectRequest.builder().bucket(bucket).key(path).range(range(start, end)).build(); - Consumer failHandler = (ex) -> { - if (isUnrecoverable(ex)) { - LOGGER.error("GetObject for object {} [{}, {}) fail", path, start, end, ex); - cf.completeExceptionally(ex); - } else { - LOGGER.warn("GetObject for object {} [{}, {}) fail, retry later", path, start, end, ex); - scheduler.schedule(() -> mergedRangeRead0(path, start, end, cf), 100, TimeUnit.MILLISECONDS); - } - S3OperationStats.getInstance().getObjectStats(size, false).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - }; - - readS3Client.getObject(request, AsyncResponseTransformer.toPublisher()) - .thenAccept(responsePublisher -> { - CompositeByteBuf buf = ByteBufAlloc.compositeByteBuffer(); - responsePublisher.subscribe((bytes) -> { - // the aws client will copy DefaultHttpContent to heap ByteBuffer - buf.addComponent(true, Unpooled.wrappedBuffer(bytes)); - }).thenAccept(v -> { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("[S3BlockCache] getObject from path: {}, {}-{}, size: {}, cost: {} ms", - path, start, end, size, timerUtil.elapsedAs(TimeUnit.MILLISECONDS)); - } - S3OperationStats.getInstance().downloadSizeTotalStats.add(MetricsLevel.INFO, size); - S3OperationStats.getInstance().getObjectStats(size, true).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - cf.complete(buf); - }).exceptionally(ex -> { - buf.release(); - failHandler.accept(ex); - return null; - }); - }) - .exceptionally(ex -> { - failHandler.accept(ex); - return null; - }); - } - - @Override - public CompletableFuture write(String path, ByteBuf data, ThrottleStrategy throttleStrategy) { - CompletableFuture cf = new CompletableFuture<>(); - CompletableFuture retCf = acquireWritePermit(cf); - if (retCf.isDone()) { - return retCf; - } - if (networkOutboundBandwidthLimiter != null) { - TimerUtil timerUtil = new TimerUtil(); - networkOutboundBandwidthLimiter.consume(throttleStrategy, data.readableBytes()).whenCompleteAsync((v, ex) -> { - NetworkStats.getInstance().networkLimiterQueueTimeStats(AsyncNetworkBandwidthLimiter.Type.OUTBOUND) - .record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - if (ex != null) { - cf.completeExceptionally(ex); - } else { - write0(path, data, cf); - } - }, writeLimiterCallbackExecutor); - } else { - write0(path, data, cf); - } - return retCf; - } - - private void write0(String path, ByteBuf data, CompletableFuture cf) { - TimerUtil timerUtil = new TimerUtil(); - int objectSize = data.readableBytes(); - PutObjectRequest request = PutObjectRequest.builder().bucket(bucket).key(path).build(); - AsyncRequestBody body = AsyncRequestBody.fromByteBuffersUnsafe(data.nioBuffers()); - writeS3Client.putObject(request, body).thenAccept(putObjectResponse -> { - S3OperationStats.getInstance().uploadSizeTotalStats.add(MetricsLevel.INFO, objectSize); - S3OperationStats.getInstance().putObjectStats(objectSize, true).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - LOGGER.debug("put object {} with size {}, cost {}ms", path, objectSize, timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - data.release(); - cf.complete(null); - }).exceptionally(ex -> { - S3OperationStats.getInstance().putObjectStats(objectSize, false).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - if (isUnrecoverable(ex)) { - LOGGER.error("PutObject for object {} fail", path, ex); - cf.completeExceptionally(ex); - data.release(); - } else { - LOGGER.warn("PutObject for object {} fail, retry later", path, ex); - scheduler.schedule(() -> write0(path, data, cf), 100, TimeUnit.MILLISECONDS); - } - return null; - }); - } - - @Override - public Writer writer(Writer.Context context, String path, ThrottleStrategy throttleStrategy) { - return new ProxyWriter(context, this, path, throttleStrategy); - } - - @Override - public CompletableFuture delete(String path) { - TimerUtil timerUtil = new TimerUtil(); - DeleteObjectRequest request = DeleteObjectRequest.builder().bucket(bucket).key(path).build(); - return writeS3Client.deleteObject(request).thenAccept(deleteObjectResponse -> { - S3OperationStats.getInstance().deleteObjectStats(true).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - LOGGER.info("[ControllerS3Operator]: Delete object finished, path: {}, cost: {}", path, timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - }).exceptionally(ex -> { - S3OperationStats.getInstance().deleteObjectsStats(false).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - LOGGER.info("[ControllerS3Operator]: Delete object failed, path: {}, cost: {}, ex: {}", path, timerUtil.elapsedAs(TimeUnit.NANOSECONDS), ex.getMessage()); - return null; - }); - } - - @Override - public CompletableFuture> delete(List objectKeys) { - TimerUtil timerUtil = new TimerUtil(); - ObjectIdentifier[] toDeleteKeys = objectKeys.stream().map(key -> - ObjectIdentifier.builder() - .key(key) - .build() - ).toArray(ObjectIdentifier[]::new); - DeleteObjectsRequest request = DeleteObjectsRequest.builder() - .bucket(bucket) - .delete(Delete.builder().objects(toDeleteKeys).build()) - .build(); - // TODO: handle not exist object, should we regard it as deleted or ignore it. - return this.writeS3Client.deleteObjects(request).thenApply(resp -> { - S3OperationStats.getInstance().deleteObjectsStats(true).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - LOGGER.info("[ControllerS3Operator]: Delete objects finished, count: {}, cost: {}", resp.deleted().size(), timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - return resp.deleted().stream().map(DeletedObject::key).collect(Collectors.toList()); - }).exceptionally(ex -> { - S3OperationStats.getInstance().deleteObjectsStats(false).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - LOGGER.info("[ControllerS3Operator]: Delete objects failed, count: {}, cost: {}, ex: {}", objectKeys.size(), timerUtil.elapsedAs(TimeUnit.NANOSECONDS), ex.getMessage()); - return Collections.emptyList(); - }); - } - - @Override - public CompletableFuture createMultipartUpload(String path) { - CompletableFuture cf = new CompletableFuture<>(); - CompletableFuture retCf = acquireWritePermit(cf); - if (retCf.isDone()) { - return retCf; - } - createMultipartUpload0(path, cf); - return retCf; - } - - void createMultipartUpload0(String path, CompletableFuture cf) { - TimerUtil timerUtil = new TimerUtil(); - CreateMultipartUploadRequest request = CreateMultipartUploadRequest.builder().bucket(bucket).key(path).build(); - writeS3Client.createMultipartUpload(request).thenAccept(createMultipartUploadResponse -> { - S3OperationStats.getInstance().createMultiPartUploadStats(true).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - cf.complete(createMultipartUploadResponse.uploadId()); - }).exceptionally(ex -> { - S3OperationStats.getInstance().createMultiPartUploadStats(false).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - if (isUnrecoverable(ex)) { - LOGGER.error("CreateMultipartUpload for object {} fail", path, ex); - cf.completeExceptionally(ex); - } else { - LOGGER.warn("CreateMultipartUpload for object {} fail, retry later", path, ex); - scheduler.schedule(() -> createMultipartUpload0(path, cf), 100, TimeUnit.MILLISECONDS); - } - return null; - }); - } - - @Override - public CompletableFuture uploadPart(String path, String uploadId, int partNumber, ByteBuf data, - ThrottleStrategy throttleStrategy) { - CompletableFuture cf = new CompletableFuture<>(); - CompletableFuture refCf = acquireWritePermit(cf); - if (refCf.isDone()) { - return refCf; - } - if (networkOutboundBandwidthLimiter != null) { - networkOutboundBandwidthLimiter.consume(throttleStrategy, data.readableBytes()).whenCompleteAsync((v, ex) -> { - if (ex != null) { - cf.completeExceptionally(ex); - } else { - uploadPart0(path, uploadId, partNumber, data, cf); - } - }, writeLimiterCallbackExecutor); - } else { - uploadPart0(path, uploadId, partNumber, data, cf); - } - return refCf; - } - - private void uploadPart0(String path, String uploadId, int partNumber, ByteBuf part, - CompletableFuture cf) { - TimerUtil timerUtil = new TimerUtil(); - int size = part.readableBytes(); - AsyncRequestBody body = AsyncRequestBody.fromByteBuffersUnsafe(part.nioBuffers()); - UploadPartRequest request = UploadPartRequest.builder().bucket(bucket).key(path).uploadId(uploadId) - .partNumber(partNumber).build(); - CompletableFuture uploadPartCf = writeS3Client.uploadPart(request, body); - uploadPartCf.thenAccept(uploadPartResponse -> { - S3OperationStats.getInstance().uploadSizeTotalStats.add(MetricsLevel.INFO, size); - S3OperationStats.getInstance().uploadPartStats(size, true).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - part.release(); - CompletedPart completedPart = CompletedPart.builder().partNumber(partNumber).eTag(uploadPartResponse.eTag()).build(); - cf.complete(completedPart); - }).exceptionally(ex -> { - S3OperationStats.getInstance().uploadPartStats(size, false).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - if (isUnrecoverable(ex)) { - LOGGER.error("UploadPart for object {}-{} fail", path, partNumber, ex); - part.release(); - cf.completeExceptionally(ex); - } else { - LOGGER.warn("UploadPart for object {}-{} fail, retry later", path, partNumber, ex); - scheduler.schedule(() -> uploadPart0(path, uploadId, partNumber, part, cf), 100, TimeUnit.MILLISECONDS); - } - return null; - }); - } - - @Override - public CompletableFuture uploadPartCopy(String sourcePath, String path, long start, long end, - String uploadId, int partNumber) { - CompletableFuture cf = new CompletableFuture<>(); - CompletableFuture retCf = acquireWritePermit(cf); - if (retCf.isDone()) { - return retCf; - } - uploadPartCopy0(sourcePath, path, start, end, uploadId, partNumber, cf); - return retCf; - } - - private void uploadPartCopy0(String sourcePath, String path, long start, long end, String uploadId, int partNumber, - CompletableFuture cf) { - TimerUtil timerUtil = new TimerUtil(); - long inclusiveEnd = end - 1; - UploadPartCopyRequest request = UploadPartCopyRequest.builder().sourceBucket(bucket).sourceKey(sourcePath) - .destinationBucket(bucket).destinationKey(path).copySourceRange(range(start, inclusiveEnd)).uploadId(uploadId).partNumber(partNumber).build(); - writeS3Client.uploadPartCopy(request).thenAccept(uploadPartCopyResponse -> { - S3OperationStats.getInstance().uploadPartCopyStats(true).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - CompletedPart completedPart = CompletedPart.builder().partNumber(partNumber) - .eTag(uploadPartCopyResponse.copyPartResult().eTag()).build(); - cf.complete(completedPart); - }).exceptionally(ex -> { - S3OperationStats.getInstance().uploadPartCopyStats(false).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - if (isUnrecoverable(ex)) { - LOGGER.warn("UploadPartCopy for object {}-{} fail", path, partNumber, ex); - cf.completeExceptionally(ex); - } else { - LOGGER.warn("UploadPartCopy for object {}-{} fail, retry later", path, partNumber, ex); - scheduler.schedule(() -> uploadPartCopy0(sourcePath, path, start, end, uploadId, partNumber, cf), 100, TimeUnit.MILLISECONDS); - } - return null; - }); - } - - @Override - public CompletableFuture completeMultipartUpload(String path, String uploadId, List parts) { - CompletableFuture cf = new CompletableFuture<>(); - CompletableFuture retCf = acquireWritePermit(cf); - if (retCf.isDone()) { - return retCf; - } - completeMultipartUpload0(path, uploadId, parts, cf); - return retCf; - } - - public void completeMultipartUpload0(String path, String uploadId, List parts, - CompletableFuture cf) { - TimerUtil timerUtil = new TimerUtil(); - CompletedMultipartUpload multipartUpload = CompletedMultipartUpload.builder().parts(parts).build(); - CompleteMultipartUploadRequest request = CompleteMultipartUploadRequest.builder().bucket(bucket).key(path).uploadId(uploadId).multipartUpload(multipartUpload).build(); - - writeS3Client.completeMultipartUpload(request).thenAccept(completeMultipartUploadResponse -> { - S3OperationStats.getInstance().completeMultiPartUploadStats(true).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - cf.complete(null); - }).exceptionally(ex -> { - S3OperationStats.getInstance().completeMultiPartUploadStats(false).record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - if (isUnrecoverable(ex)) { - LOGGER.error("CompleteMultipartUpload for object {} fail", path, ex); - cf.completeExceptionally(ex); - } else if (!checkPartNumbers(request.multipartUpload())) { - LOGGER.error("CompleteMultipartUpload for object {} fail, part numbers are not continuous", path); - cf.completeExceptionally(new IllegalArgumentException("Part numbers are not continuous")); - } else { - LOGGER.warn("CompleteMultipartUpload for object {} fail, retry later", path, ex); - scheduler.schedule(() -> completeMultipartUpload0(path, uploadId, parts, cf), 100, TimeUnit.MILLISECONDS); - } - return null; - }); - } - - private String range(long start, long end) { - if (end == -1L) { - return "bytes=" + start + "-"; - } - return "bytes=" + start + "-" + end; - } - - private void checkConfig() { - if (this.networkInboundBandwidthLimiter != null) { - if (this.networkInboundBandwidthLimiter.getMaxTokens() < Writer.MIN_PART_SIZE) { - throw new IllegalArgumentException(String.format("Network inbound burst bandwidth limit %d must be no less than min part size %d", - this.networkInboundBandwidthLimiter.getMaxTokens(), Writer.MIN_PART_SIZE)); - } - } - if (this.networkOutboundBandwidthLimiter != null) { - if (this.networkOutboundBandwidthLimiter.getMaxTokens() < Writer.MIN_PART_SIZE) { - throw new IllegalArgumentException(String.format("Network outbound burst bandwidth limit %d must be no less than min part size %d", - this.networkOutboundBandwidthLimiter.getMaxTokens(), Writer.MIN_PART_SIZE)); - } - } - } - - private void checkAvailable(S3Utils.S3Context s3Context) { - byte[] content = new Date().toString().getBytes(StandardCharsets.UTF_8); - String path = String.format("check_available/%d", System.nanoTime()); - String multipartPath = String.format("check_available_multipart/%d", System.nanoTime()); - try { - // Check network and bucket -// readS3Client.getBucketAcl(b -> b.bucket(bucket)).get(3, TimeUnit.SECONDS); - - // Simple write/read/delete - this.write(path, Unpooled.wrappedBuffer(content)).get(30, TimeUnit.SECONDS); - ByteBuf read = this.rangeRead(path, 0, content.length).get(30, TimeUnit.SECONDS); - read.release(); - this.delete(path).get(30, TimeUnit.SECONDS); - - // Multipart write/read/delete - Writer writer = this.writer(multipartPath); - writer.write(Unpooled.wrappedBuffer(content)); - writer.close().get(30, TimeUnit.SECONDS); - read = this.rangeRead(multipartPath, 0, content.length).get(30, TimeUnit.SECONDS); - read.release(); - this.delete(multipartPath).get(30, TimeUnit.SECONDS); - } catch (Throwable e) { - LOGGER.error("Failed to write/read/delete object on S3 ", e); - String exceptionMsg = String.format("Failed to write/read/delete object on S3. You are using s3Context: %s.", s3Context); - - Throwable cause = e.getCause() != null ? e.getCause() : e; - if (cause instanceof SdkClientException) { - if (cause.getMessage().contains("UnknownHostException")) { - Throwable rootCause = ExceptionUtils.getRootCause(cause); - exceptionMsg += "\nUnable to resolve Host \"" + rootCause.getMessage() + "\". Please check your S3 endpoint."; - } else if (cause.getMessage().startsWith("Unable to execute HTTP request")) { - exceptionMsg += "\nUnable to execute HTTP request. Please check your network connection and make sure you can access S3."; - } - } - - if (e instanceof TimeoutException || cause instanceof TimeoutException) { - exceptionMsg += "\nConnection timeout. Please check your network connection and make sure you can access S3."; - } - - if (cause instanceof NoSuchBucketException) { - exceptionMsg += "\nBucket \"" + bucket + "\" not found. Please check your bucket name."; - } - - List advices = s3Context.advices(); - if (!advices.isEmpty()) { - exceptionMsg += "\nHere are some advices: \n" + String.join("\n", advices); - } - throw new RuntimeException(exceptionMsg, e); - } - } - - public S3AsyncClient newS3Client(String endpoint, String region, boolean forcePathStyle, - List credentialsProviders) { - S3AsyncClientBuilder builder = S3AsyncClient.builder().region(Region.of(region)); - if (StringUtils.isNotBlank(endpoint)) { - builder.endpointOverride(URI.create(endpoint)); - } - builder.serviceConfiguration(c -> c.pathStyleAccessEnabled(forcePathStyle)); - builder.credentialsProvider(newCredentialsProviderChain(credentialsProviders)); - builder.overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(2)) - .apiCallAttemptTimeout(Duration.ofSeconds(60))); - return builder.build(); - } - - private AwsCredentialsProvider newCredentialsProviderChain(List credentialsProviders) { - List providers = new ArrayList<>(credentialsProviders); - // Add default providers to the end of the chain - providers.add(InstanceProfileCredentialsProvider.create()); - providers.add(AnonymousCredentialsProvider.create()); - return AwsCredentialsProviderChain.builder() - .reuseLastProviderEnabled(true) - .credentialsProviders(providers) - .build(); - } - - /** - * Acquire read permit, permit will auto release when cf complete. - * - * @return retCf the retCf should be used as method return value to ensure release before following operations. - */ - CompletableFuture acquireReadPermit(CompletableFuture cf) { - // TODO: async acquire? - try { - inflightReadLimiter.acquire(); - CompletableFuture newCf = new CompletableFuture<>(); - cf.whenComplete((rst, ex) -> { - inflightReadLimiter.release(); - readCallbackExecutor.execute(() -> { - if (ex != null) { - newCf.completeExceptionally(ex); - } else { - newCf.complete(rst); - } - }); - }); - return newCf; - } catch (InterruptedException e) { - cf.completeExceptionally(e); - return cf; - } - } - - /** - * Acquire write permit, permit will auto release when cf complete. - * - * @return retCf the retCf should be used as method return value to ensure release before following operations. - */ - CompletableFuture acquireWritePermit(CompletableFuture cf) { - try { - inflightWriteLimiter.acquire(); - CompletableFuture newCf = new CompletableFuture<>(); - cf.whenComplete((rst, ex) -> { - inflightWriteLimiter.release(); - writeCallbackExecutor.execute(() -> { - if (ex != null) { - newCf.completeExceptionally(ex); - } else { - newCf.complete(rst); - } - }); - }); - return newCf; - } catch (InterruptedException e) { - cf.completeExceptionally(e); - return cf; - } - } - - static class MergedReadTask { - static final int MAX_MERGE_READ_SIZE = 32 * 1024 * 1024; - final String path; - final List readTasks = new ArrayList<>(); - long start; - long end; - long uniqueDataSize; - float dataSparsityRate = 0f; - float maxMergeReadSparsityRate; - - MergedReadTask(ReadTask readTask, float maxMergeReadSparsityRate) { - this.path = readTask.path; - this.start = readTask.start; - this.end = readTask.end; - this.readTasks.add(readTask); - this.uniqueDataSize = readTask.end - readTask.start; - this.maxMergeReadSparsityRate = maxMergeReadSparsityRate; - } - - boolean tryMerge(ReadTask readTask) { - if (!path.equals(readTask.path) || dataSparsityRate > this.maxMergeReadSparsityRate) { - return false; - } - - long newStart = Math.min(start, readTask.start); - long newEnd = Math.max(end, readTask.end); - boolean merge = newEnd - newStart <= MAX_MERGE_READ_SIZE; - if (merge) { - // insert read task in order - int i = 0; - long overlap = 0L; - for (; i < readTasks.size(); i++) { - ReadTask task = readTasks.get(i); - if (task.start >= readTask.start) { - readTasks.add(i, readTask); - // calculate data overlap - ReadTask prev = i > 0 ? readTasks.get(i - 1) : null; - ReadTask next = readTasks.get(i + 1); - - if (prev != null && readTask.start < prev.end) { - overlap += prev.end - readTask.start; - } - if (readTask.end > next.start) { - overlap += readTask.end - next.start; - } - break; - } - } - if (i == readTasks.size()) { - readTasks.add(readTask); - ReadTask prev = i >= 1 ? readTasks.get(i - 1) : null; - if (prev != null && readTask.start < prev.end) { - overlap += prev.end - readTask.start; - } - } - long uniqueSize = readTask.end - readTask.start - overlap; - long tmpUniqueSize = uniqueDataSize + uniqueSize; - float tmpSparsityRate = 1 - (float) tmpUniqueSize / (newEnd - newStart); - if (tmpSparsityRate > maxMergeReadSparsityRate) { - // remove read task - readTasks.remove(i); - return false; - } - uniqueDataSize = tmpUniqueSize; - dataSparsityRate = tmpSparsityRate; - start = newStart; - end = newEnd; - } - return merge; - } - - void handleReadCompleted(ByteBuf rst, Throwable ex) { - if (ex != null) { - readTasks.forEach(readTask -> readTask.cf.completeExceptionally(ex)); - } else { - for (ReadTask readTask : readTasks) { - readTask.cf.complete(rst.retainedSlice((int) (readTask.start - start), (int) (readTask.end - readTask.start))); - } - rst.release(); - } - } - } - - static final class ReadTask { - private final String path; - private final long start; - private final long end; - private final CompletableFuture cf; - - ReadTask(String path, long start, long end, CompletableFuture cf) { - this.path = path; - this.start = start; - this.end = end; - this.cf = cf; - } - - public String path() { - return path; - } - - public long start() { - return start; - } - - public long end() { - return end; - } - - public CompletableFuture cf() { - return cf; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj == null || obj.getClass() != this.getClass()) - return false; - var that = (ReadTask) obj; - return Objects.equals(this.path, that.path) && - this.start == that.start && - this.end == that.end && - Objects.equals(this.cf, that.cf); - } - - @Override - public int hashCode() { - return Objects.hash(path, start, end, cf); - } - - @Override - public String toString() { - return "ReadTask[" + - "path=" + path + ", " + - "start=" + start + ", " + - "end=" + end + ", " + - "cf=" + cf + ']'; - } - } - - public static class Builder { - private String endpoint; - private String region; - private String bucket; - private boolean forcePathStyle; - private List credentialsProviders; - private AsyncNetworkBandwidthLimiter inboundLimiter; - private AsyncNetworkBandwidthLimiter outboundLimiter; - private boolean readWriteIsolate; - - public Builder endpoint(String endpoint) { - this.endpoint = endpoint; - return this; - } - - public Builder region(String region) { - this.region = region; - return this; - } - - public Builder bucket(String bucket) { - this.bucket = bucket; - return this; - } - - public Builder forcePathStyle(boolean forcePathStyle) { - this.forcePathStyle = forcePathStyle; - return this; - } - - public Builder credentialsProviders(List credentialsProviders) { - this.credentialsProviders = credentialsProviders; - return this; - } - - public Builder inboundLimiter(AsyncNetworkBandwidthLimiter inboundLimiter) { - this.inboundLimiter = inboundLimiter; - return this; - } - - public Builder outboundLimiter(AsyncNetworkBandwidthLimiter outboundLimiter) { - this.outboundLimiter = outboundLimiter; - return this; - } - - public Builder readWriteIsolate(boolean readWriteIsolate) { - this.readWriteIsolate = readWriteIsolate; - return this; - } - - public DefaultS3Operator build() { - return new DefaultS3Operator(endpoint, region, bucket, forcePathStyle, credentialsProviders, - inboundLimiter, outboundLimiter, readWriteIsolate); - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/operator/MemoryS3Operator.java b/s3stream/src/main/java/com/automq/stream/s3/operator/MemoryS3Operator.java deleted file mode 100644 index 8ab57b818..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/operator/MemoryS3Operator.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ -package com.automq.stream.s3.operator; - -import com.automq.stream.s3.network.ThrottleStrategy; -import com.automq.stream.utils.FutureUtil; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import software.amazon.awssdk.services.s3.model.CompletedPart; - -public class MemoryS3Operator implements S3Operator { - private final Map storage = new ConcurrentHashMap<>(); - - @Override - public void close() { - } - - @Override - public CompletableFuture rangeRead(String path, long start, long end, ThrottleStrategy throttleStrategy) { - ByteBuf value = storage.get(path); - if (value == null) { - return FutureUtil.failedFuture(new IllegalArgumentException("object not exist")); - } - int length = (int) (end - start); - return CompletableFuture.completedFuture(value.retainedSlice(value.readerIndex() + (int) start, length)); - } - - @Override - public CompletableFuture write(String path, ByteBuf data, ThrottleStrategy throttleStrategy) { - ByteBuf buf = Unpooled.buffer(data.readableBytes()); - buf.writeBytes(data.duplicate()); - storage.put(path, buf); - return CompletableFuture.completedFuture(null); - } - - @Override - public Writer writer(Writer.Context context, String path, ThrottleStrategy throttleStrategy) { - ByteBuf buf = Unpooled.buffer(); - storage.put(path, buf); - return new Writer() { - @Override - public CompletableFuture write(ByteBuf part) { - buf.writeBytes(part); - // Keep the same behavior as a real S3Operator - // Release the part after write - part.release(); - return CompletableFuture.completedFuture(null); - } - - @Override - public void copyOnWrite() { - - } - - @Override - public boolean hasBatchingPart() { - return false; - } - - @Override - public void copyWrite(String sourcePath, long start, long end) { - ByteBuf source = storage.get(sourcePath); - if (source == null) { - throw new IllegalArgumentException("object not exist"); - } - buf.writeBytes(source.slice(source.readerIndex() + (int) start, (int) (end - start))); - } - - @Override - public CompletableFuture close() { - return CompletableFuture.completedFuture(null); - } - - @Override - public CompletableFuture release() { - return CompletableFuture.completedFuture(null); - } - }; - } - - @Override - public CompletableFuture delete(String path) { - storage.remove(path); - return CompletableFuture.completedFuture(null); - } - - @Override - public CompletableFuture> delete(List objectKeys) { - objectKeys.forEach(storage::remove); - return CompletableFuture.completedFuture(null); - } - - @Override - public CompletableFuture createMultipartUpload(String path) { - return FutureUtil.failedFuture(new UnsupportedOperationException()); - } - - @Override - public CompletableFuture uploadPart(String path, String uploadId, int partNumber, ByteBuf data, - ThrottleStrategy throttleStrategy) { - return FutureUtil.failedFuture(new UnsupportedOperationException()); - } - - @Override - public CompletableFuture uploadPartCopy(String sourcePath, String path, long start, long end, - String uploadId, int partNumber) { - return FutureUtil.failedFuture(new UnsupportedOperationException()); - } - - @Override - public CompletableFuture completeMultipartUpload(String path, String uploadId, List parts) { - return FutureUtil.failedFuture(new UnsupportedOperationException()); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/operator/MultiPartWriter.java b/s3stream/src/main/java/com/automq/stream/s3/operator/MultiPartWriter.java deleted file mode 100644 index 76103c951..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/operator/MultiPartWriter.java +++ /dev/null @@ -1,266 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.operator; - -import com.automq.stream.s3.ByteBufAlloc; -import com.automq.stream.s3.metrics.MetricsLevel; -import com.automq.stream.s3.metrics.TimerUtil; -import com.automq.stream.s3.metrics.stats.S3ObjectStats; -import com.automq.stream.s3.network.ThrottleStrategy; -import com.automq.stream.utils.FutureUtil; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; -import software.amazon.awssdk.services.s3.model.CompletedPart; - - -public class MultiPartWriter implements Writer { - private static final long MAX_MERGE_WRITE_SIZE = 16L * 1024 * 1024; - final CompletableFuture uploadIdCf = new CompletableFuture<>(); - private final Context context; - private final S3Operator operator; - private final String path; - private final List> parts = new LinkedList<>(); - private final AtomicInteger nextPartNumber = new AtomicInteger(1); - /** - * The minPartSize represents the minimum size of a part for a multipart object. - */ - private final long minPartSize; - private final TimerUtil timerUtil = new TimerUtil(); - private final ThrottleStrategy throttleStrategy; - private final AtomicLong totalWriteSize = new AtomicLong(0L); - private String uploadId; - private CompletableFuture closeCf; - private ObjectPart objectPart = null; - - public MultiPartWriter(Context context, S3Operator operator, String path, long minPartSize, ThrottleStrategy throttleStrategy) { - this.context = context; - this.operator = operator; - this.path = path; - this.minPartSize = minPartSize; - this.throttleStrategy = throttleStrategy; - init(); - } - - private void init() { - FutureUtil.propagate( - operator.createMultipartUpload(path).thenApply(uploadId -> { - this.uploadId = uploadId; - return uploadId; - }), - uploadIdCf - ); - } - - @Override - public CompletableFuture write(ByteBuf data) { - totalWriteSize.addAndGet(data.readableBytes()); - - if (objectPart == null) { - objectPart = new ObjectPart(throttleStrategy); - } - ObjectPart objectPart = this.objectPart; - - objectPart.write(data); - if (objectPart.size() > minPartSize) { - objectPart.upload(); - // finish current part. - this.objectPart = null; - } - return objectPart.getFuture(); - } - - @Override - public void copyOnWrite() { - if (objectPart != null) { - objectPart.copyOnWrite(); - } - } - - @Override - public boolean hasBatchingPart() { - return objectPart != null; - } - - @Override - public void copyWrite(String sourcePath, long start, long end) { - long nextStart = start; - for (; ; ) { - long currentEnd = Math.min(nextStart + Writer.MAX_PART_SIZE, end); - copyWrite0(sourcePath, nextStart, currentEnd); - nextStart = currentEnd; - if (currentEnd == end) { - break; - } - } - } - - public void copyWrite0(String sourcePath, long start, long end) { - long targetSize = end - start; - if (objectPart == null) { - if (targetSize < minPartSize) { - this.objectPart = new ObjectPart(throttleStrategy); - objectPart.readAndWrite(sourcePath, start, end); - } else { - new CopyObjectPart(sourcePath, start, end); - } - } else { - if (objectPart.size() + targetSize > MAX_MERGE_WRITE_SIZE) { - long readAndWriteCopyEnd = start + minPartSize - objectPart.size(); - objectPart.readAndWrite(sourcePath, start, readAndWriteCopyEnd); - objectPart.upload(); - this.objectPart = null; - new CopyObjectPart(sourcePath, readAndWriteCopyEnd, end); - } else { - objectPart.readAndWrite(sourcePath, start, end); - if (objectPart.size() > minPartSize) { - objectPart.upload(); - this.objectPart = null; - } - } - } - } - - @Override - public CompletableFuture close() { - if (closeCf != null) { - return closeCf; - } - - if (objectPart != null) { - // force upload the last part which can be smaller than minPartSize. - objectPart.upload(); - objectPart = null; - } - - S3ObjectStats.getInstance().objectStageReadyCloseStats.record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - closeCf = new CompletableFuture<>(); - CompletableFuture uploadDoneCf = uploadIdCf.thenCompose(uploadId -> CompletableFuture.allOf(parts.toArray(new CompletableFuture[0]))); - FutureUtil.propagate(uploadDoneCf.thenCompose(nil -> operator.completeMultipartUpload(path, uploadId, genCompleteParts())), closeCf); - closeCf.whenComplete((nil, ex) -> { - S3ObjectStats.getInstance().objectStageTotalStats.record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - S3ObjectStats.getInstance().objectNumInTotalStats.add(MetricsLevel.DEBUG, 1); - S3ObjectStats.getInstance().objectUploadSizeStats.record(totalWriteSize.get()); - }); - return closeCf; - } - - @Override - public CompletableFuture release() { - // wait for all ongoing uploading parts to finish and release pending part - return CompletableFuture.allOf(parts.toArray(new CompletableFuture[0])).whenComplete((nil, ex) -> { - if (objectPart != null) { - objectPart.release(); - } - }); - } - - private List genCompleteParts() { - return this.parts.stream().map(cf -> { - try { - return cf.get(); - } catch (Throwable e) { - // won't happen. - throw new RuntimeException(e); - } - }).collect(Collectors.toList()); - } - - class ObjectPart { - private final int partNumber = nextPartNumber.getAndIncrement(); - private final CompletableFuture partCf = new CompletableFuture<>(); - private final ThrottleStrategy throttleStrategy; - private CompositeByteBuf partBuf = ByteBufAlloc.compositeByteBuffer(); - private CompletableFuture lastRangeReadCf = CompletableFuture.completedFuture(null); - private long size; - - public ObjectPart(ThrottleStrategy throttleStrategy) { - this.throttleStrategy = throttleStrategy; - parts.add(partCf); - } - - public void write(ByteBuf data) { - size += data.readableBytes(); - // ensure addComponent happen before following write or copyWrite. - this.lastRangeReadCf = lastRangeReadCf.thenAccept(nil -> partBuf.addComponent(true, data)); - } - - public void copyOnWrite() { - int size = partBuf.readableBytes(); - if (size > 0) { - ByteBuf buf = ByteBufAlloc.byteBuffer(size, context.allocType()); - buf.writeBytes(partBuf.duplicate()); - CompositeByteBuf copy = ByteBufAlloc.compositeByteBuffer().addComponent(true, buf); - this.partBuf.release(); - this.partBuf = copy; - } - } - - public void readAndWrite(String sourcePath, long start, long end) { - size += end - start; - // TODO: parallel read and sequence add. - this.lastRangeReadCf = lastRangeReadCf - .thenCompose(nil -> operator.rangeRead(sourcePath, start, end, throttleStrategy)) - .thenAccept(buf -> partBuf.addComponent(true, buf)); - } - - public void upload() { - this.lastRangeReadCf.whenComplete((nil, ex) -> { - if (ex != null) { - partCf.completeExceptionally(ex); - } else { - upload0(); - } - }); - } - - private void upload0() { - TimerUtil timerUtil = new TimerUtil(); - FutureUtil.propagate(uploadIdCf.thenCompose(uploadId -> operator.uploadPart(path, uploadId, partNumber, partBuf, throttleStrategy)), partCf); - partCf.whenComplete((nil, ex) -> { - S3ObjectStats.getInstance().objectStageUploadPartStats.record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - }); - } - - public long size() { - return size; - } - - public CompletableFuture getFuture() { - return partCf.thenApply(nil -> null); - } - - public void release() { - partBuf.release(); - } - } - - class CopyObjectPart { - private final CompletableFuture partCf = new CompletableFuture<>(); - - public CopyObjectPart(String sourcePath, long start, long end) { - int partNumber = nextPartNumber.getAndIncrement(); - parts.add(partCf); - FutureUtil.propagate(uploadIdCf.thenCompose(uploadId -> operator.uploadPartCopy(sourcePath, path, start, end, uploadId, partNumber)), partCf); - } - - public CompletableFuture getFuture() { - return partCf.thenApply(nil -> null); - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/operator/ProxyWriter.java b/s3stream/src/main/java/com/automq/stream/s3/operator/ProxyWriter.java deleted file mode 100644 index 8821586a3..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/operator/ProxyWriter.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.operator; - -import com.automq.stream.s3.ByteBufAlloc; -import com.automq.stream.s3.metrics.MetricsLevel; -import com.automq.stream.s3.metrics.TimerUtil; -import com.automq.stream.s3.metrics.stats.S3ObjectStats; -import com.automq.stream.s3.network.ThrottleStrategy; -import com.automq.stream.utils.FutureUtil; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; - -/** - * If object data size is less than ObjectWriter.MAX_UPLOAD_SIZE, we should use single upload to upload it. - * Else, we should use multi-part upload to upload it. - */ -class ProxyWriter implements Writer { - final ObjectWriter objectWriter = new ObjectWriter(); - private final Context context; - private final S3Operator operator; - private final String path; - private final long minPartSize; - private final ThrottleStrategy throttleStrategy; - Writer multiPartWriter = null; - - public ProxyWriter(Context context, S3Operator operator, String path, long minPartSize, - ThrottleStrategy throttleStrategy) { - this.context = context; - this.operator = operator; - this.path = path; - this.minPartSize = minPartSize; - this.throttleStrategy = throttleStrategy; - } - - public ProxyWriter(Context context, S3Operator operator, String path, ThrottleStrategy throttleStrategy) { - this(context, operator, path, MIN_PART_SIZE, throttleStrategy); - } - - @Override - public CompletableFuture write(ByteBuf part) { - if (multiPartWriter != null) { - return multiPartWriter.write(part); - } else { - objectWriter.write(part); - if (objectWriter.isFull()) { - newMultiPartWriter(); - } - return objectWriter.cf; - } - } - - @Override - public void copyOnWrite() { - if (multiPartWriter != null) { - multiPartWriter.copyOnWrite(); - } else { - objectWriter.copyOnWrite(); - } - } - - @Override - public void copyWrite(String sourcePath, long start, long end) { - if (multiPartWriter == null) { - newMultiPartWriter(); - } - multiPartWriter.copyWrite(sourcePath, start, end); - } - - @Override - public boolean hasBatchingPart() { - if (multiPartWriter != null) { - return multiPartWriter.hasBatchingPart(); - } else { - return objectWriter.hasBatchingPart(); - } - } - - @Override - public CompletableFuture close() { - if (multiPartWriter != null) { - return multiPartWriter.close(); - } else { - return objectWriter.close(); - } - } - - @Override - public CompletableFuture release() { - if (multiPartWriter != null) { - return multiPartWriter.release(); - } else { - return objectWriter.release(); - } - } - - private void newMultiPartWriter() { - this.multiPartWriter = new MultiPartWriter(context, operator, path, minPartSize, throttleStrategy); - if (objectWriter.data.readableBytes() > 0) { - FutureUtil.propagate(multiPartWriter.write(objectWriter.data), objectWriter.cf); - } else { - objectWriter.data.release(); - objectWriter.cf.complete(null); - } - } - - class ObjectWriter implements Writer { - // max upload size, when object data size is larger MAX_UPLOAD_SIZE, we should use multi-part upload to upload it. - static final long MAX_UPLOAD_SIZE = 32L * 1024 * 1024; - CompletableFuture cf = new CompletableFuture<>(); - CompositeByteBuf data = ByteBufAlloc.compositeByteBuffer(); - TimerUtil timerUtil = new TimerUtil(); - - @Override - public CompletableFuture write(ByteBuf part) { - data.addComponent(true, part); - return cf; - } - - @Override - public void copyOnWrite() { - int size = data.readableBytes(); - if (size > 0) { - ByteBuf buf = ByteBufAlloc.byteBuffer(size, context.allocType()); - buf.writeBytes(data.duplicate()); - CompositeByteBuf copy = ByteBufAlloc.compositeByteBuffer().addComponent(true, buf); - this.data.release(); - this.data = copy; - } - } - - @Override - public void copyWrite(String sourcePath, long start, long end) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean hasBatchingPart() { - return true; - } - - @Override - public CompletableFuture close() { - S3ObjectStats.getInstance().objectStageReadyCloseStats.record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - int size = data.readableBytes(); - FutureUtil.propagate(operator.write(path, data, throttleStrategy), cf); - cf.whenComplete((nil, e) -> { - S3ObjectStats.getInstance().objectStageTotalStats.record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - S3ObjectStats.getInstance().objectNumInTotalStats.add(MetricsLevel.DEBUG, 1); - S3ObjectStats.getInstance().objectUploadSizeStats.record(size); - }); - return cf; - } - - @Override - public CompletableFuture release() { - data.release(); - return CompletableFuture.completedFuture(null); - } - - public boolean isFull() { - return data.readableBytes() > MAX_UPLOAD_SIZE; - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/operator/S3Operator.java b/s3stream/src/main/java/com/automq/stream/s3/operator/S3Operator.java deleted file mode 100644 index 31ec18ec6..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/operator/S3Operator.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.operator; - -import com.automq.stream.s3.network.ThrottleStrategy; -import io.netty.buffer.ByteBuf; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import software.amazon.awssdk.services.s3.model.CompletedPart; - -public interface S3Operator { - - void close(); - - /** - * Range read from object. - * - * @param path object path. - * @param start range start. - * @param end range end. - * @param throttleStrategy throttle strategy. - * @return data. - */ - CompletableFuture rangeRead(String path, long start, long end, ThrottleStrategy throttleStrategy); - - default CompletableFuture rangeRead(String path, long start, long end) { - return rangeRead(path, start, end, ThrottleStrategy.BYPASS); - } - - /** - * Write data to object. - * - * @param path object path. The path should not start with '/' since Aliyun OSS does not support it. - * @param data data. - * @param throttleStrategy throttle strategy. - */ - CompletableFuture write(String path, ByteBuf data, ThrottleStrategy throttleStrategy); - - default CompletableFuture write(String path, ByteBuf data) { - return write(path, data, ThrottleStrategy.BYPASS); - } - - /** - * New multipart object writer. - * - * @param path object path - * @param throttleStrategy throttle strategy. - * @return {@link Writer} - */ - Writer writer(Writer.Context ctx, String path, ThrottleStrategy throttleStrategy); - - default Writer writer(String path) { - return writer(Writer.Context.DEFAULT, path, ThrottleStrategy.BYPASS); - } - - CompletableFuture delete(String path); - - /** - * Delete a list of objects. - * - * @param objectKeys object keys to delete. - * @return deleted object keys. - */ - CompletableFuture> delete(List objectKeys); - - // low level API - - /** - * Create mutlipart upload - * - * @param path object path. - * @return upload id. - */ - CompletableFuture createMultipartUpload(String path); - - /** - * Upload part. - * - * @return {@link CompletedPart} - */ - CompletableFuture uploadPart(String path, String uploadId, int partNumber, ByteBuf data, - ThrottleStrategy throttleStrategy); - - default CompletableFuture uploadPart(String path, String uploadId, int partNumber, ByteBuf data) { - return uploadPart(path, uploadId, partNumber, data, ThrottleStrategy.BYPASS); - } - - /** - * Upload part copy - * - * @return {@link CompletedPart} - */ - CompletableFuture uploadPartCopy(String sourcePath, String path, long start, long end, - String uploadId, int partNumber); - - CompletableFuture completeMultipartUpload(String path, String uploadId, List parts); -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/operator/Writer.java b/s3stream/src/main/java/com/automq/stream/s3/operator/Writer.java deleted file mode 100644 index cdafae314..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/operator/Writer.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.operator; - -import com.automq.stream.s3.ByteBufAlloc; -import io.netty.buffer.ByteBuf; -import java.util.concurrent.CompletableFuture; - -/** - * Multipart object writer. - *

- * Writer should ensure that a part, even with size smaller than {@link Writer#MIN_PART_SIZE}, can still be uploaded. - * For other S3 limits, it is upper layer's responsibility to prevent reaching the limits. - */ -public interface Writer { - /** - * The max number of parts. It comes from the limit of S3 multipart upload. - */ - int MAX_PART_COUNT = 10000; - /** - * The max size of a part, i.e. 5GB. It comes from the limit of S3 multipart upload. - */ - long MAX_PART_SIZE = 5L * 1024 * 1024 * 1024; - /** - * The min size of a part, i.e. 5MB. It comes from the limit of S3 multipart upload. - * Note that the last part can be smaller than this size. - */ - int MIN_PART_SIZE = 5 * 1024 * 1024; - /** - * The max size of an object, i.e. 5TB. It comes from the limit of S3 object size. - */ - long MAX_OBJECT_SIZE = 5L * 1024 * 1024 * 1024 * 1024; - - /** - * Write a part of the object. The parts will parallel upload to S3. - * - * @param part object part. - */ - CompletableFuture write(ByteBuf part); - - /** - * Make a copy of all cached buffer and release old one to prevent outside modification to underlying data and - * avoid holding buffer reference for too long. - */ - void copyOnWrite(); - - /** - * Copy a part of the object. - * - * @param sourcePath source object path. - * @param start start position of the source object. - * @param end end position of the source object. - */ - void copyWrite(String sourcePath, long start, long end); - - boolean hasBatchingPart(); - - /** - * Complete the object. - */ - CompletableFuture close(); - - /** - * Release all resources held by this writer. - */ - CompletableFuture release(); - - class Context { - public static final Context DEFAULT = new Context(ByteBufAlloc.DEFAULT); - - private final int allocType; - - public Context(int allocType) { - this.allocType = allocType; - } - - public int allocType() { - return allocType; - } - - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/streams/StreamManager.java b/s3stream/src/main/java/com/automq/stream/s3/streams/StreamManager.java deleted file mode 100644 index 63a3e8b71..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/streams/StreamManager.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.streams; - -import com.automq.stream.s3.metadata.StreamMetadata; -import java.util.List; -import java.util.concurrent.CompletableFuture; - -public interface StreamManager { - - /** - * Get current server opening streams. - * When server is starting or recovering, WAL in EBS need streams offset to determine the recover point. - * - * @return list of {@link StreamMetadata} - */ - CompletableFuture> getOpeningStreams(); - - /** - * Get streams metadata by stream id. - * - * @param streamIds stream ids. - * @return list of {@link StreamMetadata} - */ - CompletableFuture> getStreams(List streamIds); - - /** - * Create a new stream. - * - * @return stream id. - */ - CompletableFuture createStream(); - - /** - * Open stream with newer epoch. The controller will: - * 1. update stream epoch to fence old stream writer to commit object. - * 2. calculate the last range endOffset. - * 2. create a new range with serverId = current serverId, startOffset = last range endOffset. - * - * @param streamId stream id. - * @param epoch stream epoch. - * @return {@link StreamMetadata} - */ - CompletableFuture openStream(long streamId, long epoch); - - /** - * Trim stream to new start offset. - * - * @param streamId stream id. - * @param epoch stream epoch. - * @param newStartOffset new start offset. - */ - CompletableFuture trimStream(long streamId, long epoch, long newStartOffset); - - /** - * Close stream. Other server can open stream with newer epoch. - * - * @param streamId stream id. - * @param epoch stream epoch. - */ - CompletableFuture closeStream(long streamId, long epoch); - - /** - * Delete stream. - * - * @param streamId stream id. - * @param epoch stream epoch. - */ - CompletableFuture deleteStream(long streamId, long epoch); -} - diff --git a/s3stream/src/main/java/com/automq/stream/s3/trace/AttributeBindings.java b/s3stream/src/main/java/com/automq/stream/s3/trace/AttributeBindings.java deleted file mode 100644 index 03f7dda48..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/trace/AttributeBindings.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.trace; - -import io.opentelemetry.api.common.AttributeKey; -import io.opentelemetry.api.common.AttributesBuilder; -import io.opentelemetry.instrumentation.annotations.SpanAttribute; -import java.lang.reflect.Method; -import java.lang.reflect.Parameter; -import java.lang.reflect.Type; -import java.util.function.BiFunction; - -public class AttributeBindings { - private final BiFunction[] bindings; - - private AttributeBindings(BiFunction[] bindings) { - this.bindings = bindings; - } - - public static AttributeBindings bind(Method method, String[] parametersNames) { - Parameter[] parameters = method.getParameters(); - if (parameters.length != parametersNames.length) { - return new AttributeBindings(null); - } - - BiFunction[] bindings = new BiFunction[parametersNames.length]; - for (int i = 0; i < parametersNames.length; i++) { - Parameter parameter = parameters[i]; - - SpanAttribute spanAttribute = parameter.getAnnotation(SpanAttribute.class); - if (spanAttribute == null) { - bindings[i] = emptyBinding(); - } else { - String attributeName = spanAttribute.value().isEmpty() ? parametersNames[i] : spanAttribute.value(); - bindings[i] = createBinding(attributeName, parameter.getParameterizedType()); - } - } - return new AttributeBindings(bindings); - } - - static BiFunction emptyBinding() { - return (builder, arg) -> builder; - } - - static BiFunction createBinding(String name, Type type) { - // Simple scalar parameter types - if (type == String.class) { - AttributeKey key = AttributeKey.stringKey(name); - return (builder, arg) -> builder.put(key, (String) arg); - } - if (type == long.class || type == Long.class) { - AttributeKey key = AttributeKey.longKey(name); - return (builder, arg) -> builder.put(key, (Long) arg); - } - if (type == double.class || type == Double.class) { - AttributeKey key = AttributeKey.doubleKey(name); - return (builder, arg) -> builder.put(key, (Double) arg); - } - if (type == boolean.class || type == Boolean.class) { - AttributeKey key = AttributeKey.booleanKey(name); - return (builder, arg) -> builder.put(key, (Boolean) arg); - } - if (type == int.class || type == Integer.class) { - AttributeKey key = AttributeKey.longKey(name); - return (builder, arg) -> builder.put(key, ((Integer) arg).longValue()); - } - if (type == float.class || type == Float.class) { - AttributeKey key = AttributeKey.doubleKey(name); - return (builder, arg) -> builder.put(key, ((Float) arg).doubleValue()); - } - - // Default parameter types - AttributeKey key = AttributeKey.stringKey(name); - return (builder, arg) -> builder.put(key, arg.toString()); - } - - public boolean isEmpty() { - return bindings == null || bindings.length == 0; - } - - public void apply(AttributesBuilder target, Object[] args) { - if (args.length != bindings.length) { - return; - } - - for (int i = 0; i < args.length; i++) { - bindings[i].apply(target, args[i]); - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/trace/MethodCache.java b/s3stream/src/main/java/com/automq/stream/s3/trace/MethodCache.java deleted file mode 100644 index 73ffd5b06..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/trace/MethodCache.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.trace; - -import java.lang.reflect.Method; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Function; - -final class MethodCache extends ClassValue> { - public V computeIfAbsent(Method key, Function mappingFunction) { - return this.get(key.getDeclaringClass()).computeIfAbsent(key, mappingFunction); - } - - @Override - protected Map computeValue(Class type) { - return new ConcurrentHashMap<>(); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/trace/SpanAttributesExtractor.java b/s3stream/src/main/java/com/automq/stream/s3/trace/SpanAttributesExtractor.java deleted file mode 100644 index 33fcc5ff1..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/trace/SpanAttributesExtractor.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.trace; - -import io.opentelemetry.api.common.Attributes; -import io.opentelemetry.api.common.AttributesBuilder; -import java.lang.reflect.Method; - -public final class SpanAttributesExtractor { - - private final MethodCache cache; - - SpanAttributesExtractor(MethodCache cache) { - this.cache = cache; - } - - public static SpanAttributesExtractor create() { - return new SpanAttributesExtractor(new MethodCache<>()); - } - - public Attributes extract(Method method, String[] parametersNames, Object[] args) { - AttributesBuilder attributes = Attributes.builder(); - AttributeBindings bindings = - cache.computeIfAbsent(method, (Method m) -> AttributeBindings.bind(m, parametersNames)); - if (!bindings.isEmpty()) { - bindings.apply(attributes, args); - } - return attributes.build(); - } -} \ No newline at end of file diff --git a/s3stream/src/main/java/com/automq/stream/s3/trace/TraceUtils.java b/s3stream/src/main/java/com/automq/stream/s3/trace/TraceUtils.java deleted file mode 100644 index 7662aeae6..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/trace/TraceUtils.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.trace; - -import com.automq.stream.s3.trace.context.TraceContext; -import io.opentelemetry.api.common.Attributes; -import io.opentelemetry.api.trace.Span; -import io.opentelemetry.api.trace.StatusCode; -import io.opentelemetry.api.trace.Tracer; -import io.opentelemetry.context.Context; -import io.opentelemetry.instrumentation.annotations.WithSpan; -import java.lang.reflect.Method; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletableFuture; -import org.aspectj.lang.ProceedingJoinPoint; -import org.aspectj.lang.reflect.MethodSignature; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TraceUtils { - private static final Logger LOGGER = LoggerFactory.getLogger(TraceUtils.class); - private static final SpanAttributesExtractor EXTRACTOR = SpanAttributesExtractor.create(); - - public static Object trace(TraceContext context, ProceedingJoinPoint joinPoint, - WithSpan withSpan) throws Throwable { - if (context.isTraceDisabled()) { - return joinPoint.proceed(); - } - - MethodSignature signature = (MethodSignature) joinPoint.getSignature(); - Method method = signature.getMethod(); - Object[] args = joinPoint.getArgs(); - - String className = method.getDeclaringClass().getSimpleName(); - String spanName = withSpan.value().isEmpty() ? className + "::" + method.getName() : withSpan.value(); - - TraceContext.Scope scope = createAndStartSpan(context, spanName); - if (scope == null) { - return joinPoint.proceed(); - } - Span span = scope.getSpan(); - Attributes attributes = EXTRACTOR.extract(method, signature.getParameterNames(), args); - span.setAllAttributes(attributes); - - try { - if (method.getReturnType() == CompletableFuture.class) { - return doTraceWhenReturnCompletableFuture(scope, joinPoint); - } else { - return doTraceWhenReturnObject(scope, joinPoint); - } - } catch (Throwable t) { - endSpan(scope, t); - throw t; - } - } - - public static T runWithSpanSync(TraceContext context, Attributes attributes, String spanName, - Callable callable) throws Throwable { - TraceContext.Scope scope = createAndStartSpan(context, spanName); - if (scope == null) { - return callable.call(); - } - scope.getSpan().setAllAttributes(attributes); - try (scope) { - T ret = callable.call(); - endSpan(scope, null); - return ret; - } catch (Throwable t) { - endSpan(scope, t); - throw t; - } - } - - public static CompletableFuture runWithSpanAsync(TraceContext context, Attributes attributes, - String spanName, - Callable> callable) throws Throwable { - TraceContext.Scope scope = createAndStartSpan(context, spanName); - if (scope == null) { - return callable.call(); - } - scope.getSpan().setAllAttributes(attributes); - try (scope) { - CompletableFuture cf = callable.call(); - cf.whenComplete((nil, ex) -> endSpan(scope, ex)); - return cf; - } catch (Throwable t) { - endSpan(scope, t); - throw t; - } - } - - public static TraceContext.Scope createAndStartSpan(TraceContext context, String name) { - if (context.isTraceDisabled()) { - return null; - } - Tracer tracer = context.tracer(); - Context parentContext = context.currentContext(); - Span span = tracer.spanBuilder(name) - .setParent(parentContext) - .startSpan(); - - return context.attachContext(parentContext.with(span)); - } - - public static void endSpan(TraceContext.Scope scope, Throwable t) { - if (scope == null) { - return; - } - if (t != null) { - scope.getSpan().recordException(t); - scope.getSpan().setStatus(StatusCode.ERROR, t.getMessage()); - } else { - scope.getSpan().setStatus(StatusCode.OK); - } - scope.getSpan().end(); - scope.close(); - } - - private static CompletableFuture doTraceWhenReturnCompletableFuture(TraceContext.Scope scope, - ProceedingJoinPoint joinPoint) throws Throwable { - CompletableFuture future = (CompletableFuture) joinPoint.proceed(); - return future.whenComplete((r, t) -> endSpan(scope, t)); - } - - private static Object doTraceWhenReturnObject(TraceContext.Scope scope, - ProceedingJoinPoint joinPoint) throws Throwable { - Object result = joinPoint.proceed(); - endSpan(scope, null); - return result; - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/trace/aop/S3StreamTraceAspect.java b/s3stream/src/main/java/com/automq/stream/s3/trace/aop/S3StreamTraceAspect.java deleted file mode 100644 index 3a44d439a..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/trace/aop/S3StreamTraceAspect.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.trace.aop; - -import org.aspectj.lang.annotation.Aspect; - -@Aspect -public class S3StreamTraceAspect { - - // Commented out because it's costly to trace - // - // @Pointcut("@annotation(withSpan)") - // public void trace(WithSpan withSpan) { - // } - // - // @Around(value = "trace(withSpan) && execution(* com.automq.stream..*(..))", argNames = "joinPoint,withSpan") - // public Object createSpan(ProceedingJoinPoint joinPoint, WithSpan withSpan) throws Throwable { - // Object[] args = joinPoint.getArgs(); - // if (args.length > 0 && args[0] instanceof TraceContext) { - // TraceContext context = (TraceContext) args[0]; - // return TraceUtils.trace(context, joinPoint, withSpan); - // } - // - // return joinPoint.proceed(); - // } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/trace/context/TraceContext.java b/s3stream/src/main/java/com/automq/stream/s3/trace/context/TraceContext.java deleted file mode 100644 index 3ab87dddd..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/trace/context/TraceContext.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.trace.context; - -import io.opentelemetry.api.GlobalOpenTelemetry; -import io.opentelemetry.api.trace.Span; -import io.opentelemetry.api.trace.Tracer; -import io.opentelemetry.context.Context; -import javax.annotation.concurrent.NotThreadSafe; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Trace context that holds the current trace context. This class is not thread safe and should be copied before - * asynchronous usage. - */ -@NotThreadSafe -public class TraceContext { - public static final TraceContext DEFAULT = new TraceContext(false, null, null); - private static final Logger LOGGER = LoggerFactory.getLogger(TraceContext.class); - private final boolean isTraceEnabled; - private final Tracer tracer; - private Context currContext; - - public TraceContext(boolean isTraceEnabled, Tracer tracer, Context currContext) { - this.isTraceEnabled = isTraceEnabled; - if (isTraceEnabled && tracer == null) { - this.tracer = GlobalOpenTelemetry.getTracer("s3stream"); - } else { - this.tracer = tracer; - } - if (isTraceEnabled && currContext == null) { - this.currContext = Context.current(); - } else { - this.currContext = currContext; - } - } - - public TraceContext(TraceContext traceContext) { - this(traceContext.isTraceEnabled, traceContext.tracer, traceContext.currContext); - } - - public boolean isTraceDisabled() { - return !isTraceEnabled; - } - - public Tracer tracer() { - return tracer; - } - - public Context currentContext() { - return currContext; - } - - public Scope attachContext(Context contextToAttach) { - return new Scope(contextToAttach); - } - - public class Scope implements AutoCloseable { - private final Context prevContext; - private final Span span; - - private Scope(Context contextToAttach) { - this.prevContext = currContext; - this.span = Span.fromContext(contextToAttach); - currContext = contextToAttach; - } - - public Span getSpan() { - return span; - } - - @Override - public void close() { - currContext = prevContext; - } - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/Block.java b/s3stream/src/main/java/com/automq/stream/s3/wal/Block.java deleted file mode 100644 index 8d35841db..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/Block.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal; - -import com.automq.stream.s3.wal.util.WALUtil; -import io.netty.buffer.ByteBuf; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.function.Function; - -import static com.automq.stream.s3.wal.WriteAheadLog.AppendResult; - -/** - * A Block contains multiple records, and will be written to the WAL in one batch. - */ -public interface Block { - /** - * The start offset of this block. - * Align to {@link WALUtil#BLOCK_SIZE} - */ - long startOffset(); - - /** - * Append a record to this block. - * Cannot be called after {@link #data()} is called. - * - * @param recordSize The size of this record. - * @param recordSupplier The supplier of this record which receives the start offset of this record as the parameter. - * @param future The future of this record, which will be completed when the record is written to the WAL. - * @return The start offset of this record. If the size of this block exceeds the limit, return -1. - */ - long addRecord(long recordSize, Function recordSupplier, - CompletableFuture future); - - /** - * Futures of all records in this block. - */ - List> futures(); - - default boolean isEmpty() { - return futures().isEmpty(); - } - - /** - * The content of this block, which contains multiple records. - * The first call of this method will marshal all records in this block to a ByteBuf. It will be cached for later calls. - * It returns null if this block is empty. - */ - ByteBuf data(); - - /** - * The size of this block. - */ - long size(); - - default void release() { - ByteBuf data = data(); - if (null != data) { - data.release(); - } - } - - /** - * Called when this block is polled and sent to the writer. - * Used for metrics. - */ - void polled(); -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/BlockBatch.java b/s3stream/src/main/java/com/automq/stream/s3/wal/BlockBatch.java deleted file mode 100644 index 2bb6b1903..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/BlockBatch.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal; - -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.concurrent.CompletableFuture; - -public class BlockBatch { - - private final Collection blocks; - private final long startOffset; - private final long endOffset; - - public BlockBatch(Collection blocks) { - assert !blocks.isEmpty(); - this.blocks = blocks; - this.startOffset = blocks.stream() - .map(Block::startOffset) - .min(Long::compareTo) - .orElseThrow(); - this.endOffset = blocks.stream() - .map(b -> b.startOffset() + b.size()) - .max(Long::compareTo) - .orElseThrow(); - } - - public long startOffset() { - return startOffset; - } - - public long endOffset() { - return endOffset; - } - - public Collection blocks() { - return Collections.unmodifiableCollection(blocks); - } - - public Iterator> futures() { - return new Iterator<>() { - private final Iterator blockIterator = blocks.iterator(); - private Iterator> futureIterator = blockIterator.next().futures().iterator(); - - @Override - public boolean hasNext() { - if (futureIterator.hasNext()) { - return true; - } else { - if (blockIterator.hasNext()) { - futureIterator = blockIterator.next().futures().iterator(); - return hasNext(); - } else { - return false; - } - } - } - - @Override - public CompletableFuture next() { - return futureIterator.next(); - } - }; - } - - public void release() { - blocks.forEach(Block::release); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/BlockImpl.java b/s3stream/src/main/java/com/automq/stream/s3/wal/BlockImpl.java deleted file mode 100644 index c5d2c7ec8..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/BlockImpl.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal; - -import com.automq.stream.s3.ByteBufAlloc; -import com.automq.stream.s3.metrics.TimerUtil; -import com.automq.stream.s3.metrics.stats.StorageOperationStats; -import com.automq.stream.s3.wal.util.WALUtil; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import java.util.function.Supplier; - -public class BlockImpl implements Block { - - private final long startOffset; - /** - * The max size of this block. - * Any try to add a record to this block will fail if the size of this block exceeds this limit. - */ - private final long maxSize; - /** - * The soft limit of this block. - * Any try to add a record to this block will fail if the size of this block exceeds this limit, - * unless the block is empty. - */ - private final long softLimit; - private final List> futures = new LinkedList<>(); - private final List> records = new LinkedList<>(); - private final TimerUtil timer; - /** - * The next offset to write in this block. - * Align to {@link WALUtil#BLOCK_SIZE} - */ - private long nextOffset = 0; - private CompositeByteBuf data = null; - - /** - * Create a block. - * {@link #release()} must be called when this block is no longer used. - */ - public BlockImpl(long startOffset, long maxSize, long softLimit) { - this.startOffset = startOffset; - this.maxSize = maxSize; - this.softLimit = softLimit; - this.timer = new TimerUtil(); - } - - @Override - public long startOffset() { - return startOffset; - } - - /** - * Note: this method is NOT thread safe. - */ - @Override - public long addRecord(long recordSize, Function recordSupplier, - CompletableFuture future) { - assert data == null; - long requiredCapacity = nextOffset + recordSize; - if (requiredCapacity > maxSize) { - return -1; - } - // if there is no record in this block, we can write a record larger than SOFT_BLOCK_SIZE_LIMIT - if (requiredCapacity > softLimit && !futures.isEmpty()) { - return -1; - } - - long recordOffset = startOffset + nextOffset; - records.add(() -> recordSupplier.apply(recordOffset)); - nextOffset += recordSize; - futures.add(future); - - return recordOffset; - } - - @Override - public List> futures() { - return futures; - } - - @Override - public ByteBuf data() { - if (null != data) { - return data; - } - if (records.isEmpty()) { - return null; - } - - data = ByteBufAlloc.compositeByteBuffer(); - for (Supplier supplier : records) { - ByteBuf record = supplier.get(); - data.addComponent(true, record); - } - return data; - } - - @Override - public long size() { - return nextOffset; - } - - @Override - public void polled() { - StorageOperationStats.getInstance().appendWALBlockPolledStats.record(timer.elapsedAs(TimeUnit.NANOSECONDS)); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/BlockWALService.java b/s3stream/src/main/java/com/automq/stream/s3/wal/BlockWALService.java deleted file mode 100644 index edbe4450b..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/BlockWALService.java +++ /dev/null @@ -1,888 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal; - -import com.automq.stream.s3.ByteBufAlloc; -import com.automq.stream.s3.Config; -import com.automq.stream.s3.metrics.S3StreamMetricsManager; -import com.automq.stream.s3.metrics.TimerUtil; -import com.automq.stream.s3.metrics.stats.StorageOperationStats; -import com.automq.stream.s3.trace.TraceUtils; -import com.automq.stream.s3.trace.context.TraceContext; -import com.automq.stream.s3.wal.util.WALCachedChannel; -import com.automq.stream.s3.wal.util.WALChannel; -import com.automq.stream.s3.wal.util.WALUtil; -import com.automq.stream.utils.ThreadUtils; -import com.automq.stream.utils.Threads; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import java.io.IOException; -import java.util.Collections; -import java.util.Iterator; -import java.util.NoSuchElementException; -import java.util.Objects; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.Lock; -import java.util.function.Function; -import org.apache.commons.lang3.time.StopWatch; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static com.automq.stream.s3.Constants.CAPACITY_NOT_SET; -import static com.automq.stream.s3.Constants.NOOP_EPOCH; -import static com.automq.stream.s3.Constants.NOOP_NODE_ID; - -/** - * /** - * BlockWALService provides an infinite WAL, which is implemented based on block devices. - * The capacity of the block device is configured by the application and may be smaller than the system allocation. - *

- * Usage: - *

- * 1. Call {@link BlockWALService#start} to start the service. Any other methods will throw an - * {@link IllegalStateException} if called before {@link BlockWALService#start}. - *

- * 2. Call {@link BlockWALService#recover} to recover all untrimmed records if any. - *

- * 3. Call {@link BlockWALService#reset} to reset the service. This will clear all records, so make sure - * all recovered records are processed before calling this method. - *

- * 4. Call {@link BlockWALService#append} to append records. As records are written in a circular way similar to - * RingBuffer, if the caller does not call {@link BlockWALService#trim} in time, an {@link OverCapacityException} - * will be thrown when calling {@link BlockWALService#append}. - *

- * 5. Call {@link BlockWALService#shutdownGracefully} to shut down the service gracefully, which will wait for - * all pending writes to complete. - *

- * Implementation: - *

- * WAL Header - *

- * There are {@link BlockWALService#WAL_HEADER_COUNT} WAL headers, each of which is {@link WALUtil#BLOCK_SIZE} bytes. - * The WAL header is used to record the meta information of the WAL, and is used to recover the WAL when the service is restarted. - *

- * Sliding Window - *

- * The sliding window contains all records that have not been successfully written to the block device. - * So when recovering, we only need to try to recover the records in the sliding window. - *

- * Record Header - *

- * Layout: - *

- * 0 - [4B] {@link SlidingWindowService.RecordHeaderCoreData#getMagicCode} Magic code of the record header, - * used to verify the start of the record header - *

- * 1 - [4B] {@link SlidingWindowService.RecordHeaderCoreData#getRecordBodyLength} The length of the record body - *

- * 2 - [8B] {@link SlidingWindowService.RecordHeaderCoreData#getRecordBodyOffset} The logical start offset of the record body - *

- * 3 - [4B] {@link SlidingWindowService.RecordHeaderCoreData#getRecordBodyCRC} CRC of the record body, used to verify - * the correctness of the record body - *

- * 4 - [4B] {@link SlidingWindowService.RecordHeaderCoreData#getRecordHeaderCRC} CRC of the rest of the record header, - * used to verify the correctness of the record header - */ -public class BlockWALService implements WriteAheadLog { - public static final int RECORD_HEADER_SIZE = 4 + 4 + 8 + 4 + 4; - public static final int RECORD_HEADER_WITHOUT_CRC_SIZE = RECORD_HEADER_SIZE - 4; - public static final int RECORD_HEADER_MAGIC_CODE = 0x87654321; - public static final int WAL_HEADER_COUNT = 2; - public static final int WAL_HEADER_CAPACITY = WALUtil.BLOCK_SIZE; - public static final int WAL_HEADER_TOTAL_CAPACITY = WAL_HEADER_CAPACITY * WAL_HEADER_COUNT; - private static final Logger LOGGER = LoggerFactory.getLogger(BlockWALService.class); - private final AtomicBoolean started = new AtomicBoolean(false); - private final AtomicBoolean resetFinished = new AtomicBoolean(false); - private final AtomicLong writeHeaderRoundTimes = new AtomicLong(0); - private final ExecutorService walHeaderFlusher = Threads.newFixedThreadPool(1, ThreadUtils.createThreadFactory("flush-wal-header-thread-%d", true), LOGGER); - private long initialWindowSize; - private WALCachedChannel walChannel; - private SlidingWindowService slidingWindowService; - private WALHeader walHeader; - private boolean recoveryMode; - private boolean firstStart; - private int nodeId = NOOP_NODE_ID; - private long epoch = NOOP_EPOCH; - /** - * The offset at which the recovery is complete. It is safe to write records at this offset. - * It is always aligned to the {@link WALUtil#BLOCK_SIZE}. - */ - private long recoveryCompleteOffset = -1; - - private BlockWALService() { - } - - /** - * A protected constructor for testing purpose. - */ - protected BlockWALService(BlockWALServiceBuilder builder) { - BlockWALService that = builder.build(); - this.initialWindowSize = that.initialWindowSize; - this.walChannel = that.walChannel; - this.slidingWindowService = that.slidingWindowService; - this.walHeader = that.walHeader; - this.recoveryMode = that.recoveryMode; - this.nodeId = that.nodeId; - this.epoch = that.epoch; - } - - public static BlockWALServiceBuilder builder(String path, long capacity) { - return new BlockWALServiceBuilder(path, capacity); - } - - public static BlockWALServiceBuilder recoveryBuilder(String path) { - return new BlockWALServiceBuilder(path); - } - - private void flushWALHeader(ShutdownType shutdownType) { - walHeader.setShutdownType(shutdownType); - flushWALHeader(); - } - - private synchronized void flushWALHeader() { - long position = writeHeaderRoundTimes.getAndIncrement() % WAL_HEADER_COUNT * WAL_HEADER_CAPACITY; - walHeader.setLastWriteTimestamp(System.nanoTime()); - long trimOffset = walHeader.getTrimOffset(); - ByteBuf buf = walHeader.marshal(); - this.walChannel.retryWriteAndFlush(buf, position); - buf.release(); - walHeader.updateFlushedTrimOffset(trimOffset); - LOGGER.debug("WAL header flushed, position: {}, header: {}", position, walHeader); - } - - /** - * Try to read a record at the given offset. - * The returned record should be released by the caller. - * - * @throws ReadRecordException if the record is not found or the record is corrupted - */ - private ByteBuf readRecord(long recoverStartOffset, - Function logicalToPhysical) throws ReadRecordException { - final ByteBuf recordHeader = ByteBufAlloc.byteBuffer(RECORD_HEADER_SIZE); - SlidingWindowService.RecordHeaderCoreData readRecordHeader; - try { - readRecordHeader = parseRecordHeader(recoverStartOffset, recordHeader, logicalToPhysical); - } finally { - recordHeader.release(); - } - - int recordBodyLength = readRecordHeader.getRecordBodyLength(); - ByteBuf recordBody = ByteBufAlloc.byteBuffer(recordBodyLength); - try { - parseRecordBody(recoverStartOffset, readRecordHeader, recordBody, logicalToPhysical); - } catch (ReadRecordException e) { - recordBody.release(); - throw e; - } - - return recordBody; - } - - private SlidingWindowService.RecordHeaderCoreData parseRecordHeader(long recoverStartOffset, ByteBuf recordHeader, - Function logicalToPhysical) throws ReadRecordException { - final long position = logicalToPhysical.apply(recoverStartOffset); - int read = walChannel.retryRead(recordHeader, position); - if (read != RECORD_HEADER_SIZE) { - throw new ReadRecordException( - WALUtil.alignNextBlock(recoverStartOffset), - String.format("failed to read record header: expected %d bytes, actual %d bytes, recoverStartOffset: %d", RECORD_HEADER_SIZE, read, recoverStartOffset) - ); - } - - SlidingWindowService.RecordHeaderCoreData readRecordHeader = SlidingWindowService.RecordHeaderCoreData.unmarshal(recordHeader); - if (readRecordHeader.getMagicCode() != RECORD_HEADER_MAGIC_CODE) { - throw new ReadRecordException( - WALUtil.alignNextBlock(recoverStartOffset), - String.format("magic code mismatch: expected %d, actual %d, recoverStartOffset: %d", RECORD_HEADER_MAGIC_CODE, readRecordHeader.getMagicCode(), recoverStartOffset) - ); - } - - int recordHeaderCRC = readRecordHeader.getRecordHeaderCRC(); - int calculatedRecordHeaderCRC = WALUtil.crc32(recordHeader, RECORD_HEADER_WITHOUT_CRC_SIZE); - if (recordHeaderCRC != calculatedRecordHeaderCRC) { - throw new ReadRecordException( - WALUtil.alignNextBlock(recoverStartOffset), - String.format("record header crc mismatch: expected %d, actual %d, recoverStartOffset: %d", calculatedRecordHeaderCRC, recordHeaderCRC, recoverStartOffset) - ); - } - - int recordBodyLength = readRecordHeader.getRecordBodyLength(); - if (recordBodyLength <= 0) { - throw new ReadRecordException( - WALUtil.alignNextBlock(recoverStartOffset), - String.format("invalid record body length: %d, recoverStartOffset: %d", recordBodyLength, recoverStartOffset) - ); - } - - long recordBodyOffset = readRecordHeader.getRecordBodyOffset(); - if (recordBodyOffset != recoverStartOffset + RECORD_HEADER_SIZE) { - throw new ReadRecordException( - WALUtil.alignNextBlock(recoverStartOffset), - String.format("invalid record body offset: expected %d, actual %d, recoverStartOffset: %d", recoverStartOffset + RECORD_HEADER_SIZE, recordBodyOffset, recoverStartOffset) - ); - } - return readRecordHeader; - } - - private void parseRecordBody(long recoverStartOffset, SlidingWindowService.RecordHeaderCoreData readRecordHeader, - ByteBuf recordBody, Function logicalToPhysical) throws ReadRecordException { - long recordBodyOffset = readRecordHeader.getRecordBodyOffset(); - int recordBodyLength = readRecordHeader.getRecordBodyLength(); - long position = logicalToPhysical.apply(recordBodyOffset); - int read = walChannel.retryRead(recordBody, position); - if (read != recordBodyLength) { - throw new ReadRecordException( - WALUtil.alignNextBlock(recoverStartOffset + RECORD_HEADER_SIZE + recordBodyLength), - String.format("failed to read record body: expected %d bytes, actual %d bytes, recoverStartOffset: %d", recordBodyLength, read, recoverStartOffset) - ); - } - - int recordBodyCRC = readRecordHeader.getRecordBodyCRC(); - int calculatedRecordBodyCRC = WALUtil.crc32(recordBody); - if (recordBodyCRC != calculatedRecordBodyCRC) { - throw new ReadRecordException( - WALUtil.alignNextBlock(recoverStartOffset + RECORD_HEADER_SIZE + recordBodyLength), - String.format("record body crc mismatch: expected %d, actual %d, recoverStartOffset: %d", calculatedRecordBodyCRC, recordBodyCRC, recoverStartOffset) - ); - } - } - - @Override - public WriteAheadLog start() throws IOException { - StopWatch stopWatch = StopWatch.createStarted(); - - walChannel.open(channel -> Optional.ofNullable(tryReadWALHeader(walChannel)) - .map(WALHeader::getCapacity) - .orElse(null)); - - WALHeader header = tryReadWALHeader(walChannel); - if (null == header) { - assert !recoveryMode; - header = newWALHeader(); - firstStart = true; - LOGGER.info("no available WALHeader, create a new one: {}", header); - } else { - LOGGER.info("read WALHeader from WAL: {}", header); - } - walHeaderReady(header); - - started.set(true); - LOGGER.info("block WAL service started, cost: {} ms", stopWatch.getTime(TimeUnit.MILLISECONDS)); - return this; - } - - private void registerMetrics() { - S3StreamMetricsManager.registerDeltaWalOffsetSupplier(() -> { - try { - return this.getCurrentStartOffset(); - } catch (Exception e) { - LOGGER.error("failed to get current start offset", e); - return 0L; - } - }, () -> walHeader.getFlushedTrimOffset()); - } - - private long getCurrentStartOffset() { - Lock lock = slidingWindowService.getBlockLock(); - lock.lock(); - try { - Block block = slidingWindowService.getCurrentBlockLocked(); - return block.startOffset() + block.size(); - } finally { - lock.unlock(); - } - } - - /** - * Protected method for testing purpose. - */ - protected WALHeader tryReadWALHeader() { - return tryReadWALHeader(walChannel); - } - - /** - * Try to read the header from WAL, return the latest one. - */ - private WALHeader tryReadWALHeader(WALChannel walChannel) { - WALHeader header = null; - for (int i = 0; i < WAL_HEADER_COUNT; i++) { - ByteBuf buf = ByteBufAlloc.byteBuffer(WALHeader.WAL_HEADER_SIZE); - try { - int read = walChannel.retryRead(buf, i * WAL_HEADER_CAPACITY); - if (read != WALHeader.WAL_HEADER_SIZE) { - continue; - } - WALHeader tmpHeader = WALHeader.unmarshal(buf); - if (header == null || header.getLastWriteTimestamp() < tmpHeader.getLastWriteTimestamp()) { - header = tmpHeader; - } - } catch (UnmarshalException ignored) { - // failed to parse WALHeader, ignore - } finally { - buf.release(); - } - } - return header; - } - - private WALHeader newWALHeader() { - return new WALHeader(walChannel.capacity(), initialWindowSize); - } - - private void walHeaderReady(WALHeader header) { - if (nodeId != NOOP_NODE_ID) { - header.setNodeId(nodeId); - header.setEpoch(epoch); - } - this.walHeader = header; - flushWALHeader(); - } - - @Override - public void shutdownGracefully() { - StopWatch stopWatch = StopWatch.createStarted(); - - if (!started.getAndSet(false)) { - LOGGER.warn("block WAL service already shutdown or not started yet"); - return; - } - walHeaderFlusher.shutdown(); - try { - if (!walHeaderFlusher.awaitTermination(5, TimeUnit.SECONDS)) { - walHeaderFlusher.shutdownNow(); - } - } catch (InterruptedException e) { - walHeaderFlusher.shutdownNow(); - } - - boolean gracefulShutdown = Optional.ofNullable(slidingWindowService) - .map(s -> s.shutdown(1, TimeUnit.DAYS)) - .orElse(true); - flushWALHeader(gracefulShutdown ? ShutdownType.GRACEFULLY : ShutdownType.UNGRACEFULLY); - - walChannel.close(); - - LOGGER.info("block WAL service shutdown gracefully: {}, cost: {} ms", gracefulShutdown, stopWatch.getTime(TimeUnit.MILLISECONDS)); - } - - @Override - public WALMetadata metadata() { - return new WALMetadata(walHeader.getNodeId(), walHeader.getEpoch()); - } - - @Override - public AppendResult append(TraceContext context, ByteBuf buf, int crc) throws OverCapacityException { - // get current method name - TraceContext.Scope scope = TraceUtils.createAndStartSpan(context, "BlockWALService::append"); - TimerUtil timerUtil = new TimerUtil(); - try { - AppendResult result = append0(buf, crc); - result.future().whenComplete((nil, ex) -> TraceUtils.endSpan(scope, ex)); - return result; - } catch (OverCapacityException ex) { - buf.release(); - StorageOperationStats.getInstance().appendWALFullStats.record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - TraceUtils.endSpan(scope, ex); - throw ex; - } - } - - private AppendResult append0(ByteBuf body, int crc) throws OverCapacityException { - TimerUtil timerUtil = new TimerUtil(); - checkStarted(); - checkWriteMode(); - checkResetFinished(); - - final long recordSize = RECORD_HEADER_SIZE + body.readableBytes(); - final CompletableFuture appendResultFuture = new CompletableFuture<>(); - long expectedWriteOffset; - - Lock lock = slidingWindowService.getBlockLock(); - lock.lock(); - try { - Block block = slidingWindowService.getCurrentBlockLocked(); - expectedWriteOffset = block.addRecord(recordSize, (offset) -> record(body, crc, offset), appendResultFuture); - if (expectedWriteOffset < 0) { - // this block is full, create a new one - block = slidingWindowService.sealAndNewBlockLocked(block, recordSize, walHeader.getFlushedTrimOffset(), walHeader.getCapacity() - WAL_HEADER_TOTAL_CAPACITY); - expectedWriteOffset = block.addRecord(recordSize, (offset) -> record(body, crc, offset), appendResultFuture); - } - } finally { - lock.unlock(); - } - slidingWindowService.tryWriteBlock(); - - final AppendResult appendResult = new AppendResultImpl(expectedWriteOffset, appendResultFuture); - appendResult.future().whenComplete((nil, ex) -> StorageOperationStats.getInstance().appendWALCompleteStats.record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS))); - StorageOperationStats.getInstance().appendWALBeforeStats.record(timerUtil.elapsedAs(TimeUnit.NANOSECONDS)); - return appendResult; - } - - private ByteBuf recordHeader(ByteBuf body, int crc, long start) { - return new SlidingWindowService.RecordHeaderCoreData() - .setMagicCode(RECORD_HEADER_MAGIC_CODE) - .setRecordBodyLength(body.readableBytes()) - .setRecordBodyOffset(start + RECORD_HEADER_SIZE) - .setRecordBodyCRC(crc) - .marshal(); - } - - private ByteBuf record(ByteBuf body, int crc, long start) { - CompositeByteBuf record = ByteBufAlloc.compositeByteBuffer(); - crc = 0 == crc ? WALUtil.crc32(body) : crc; - record.addComponents(true, recordHeader(body, crc, start), body); - return record; - } - - @Override - public Iterator recover() { - checkStarted(); - if (firstStart) { - recoveryCompleteOffset = 0; - return Collections.emptyIterator(); - } - - long trimmedOffset = walHeader.getTrimOffset(); - long recoverStartOffset = trimmedOffset; - if (recoverStartOffset < 0) { - recoverStartOffset = 0; - } - long windowLength = walHeader.getSlidingWindowMaxLength(); - return new RecoverIterator(recoverStartOffset, windowLength, trimmedOffset); - } - - @Override - public CompletableFuture reset() { - checkStarted(); - checkRecoverFinished(); - - if (!recoveryMode) { - // in recovery mode, no need to start sliding window service - slidingWindowService.start(walHeader.getAtomicSlidingWindowMaxLength(), recoveryCompleteOffset); - } - LOGGER.info("reset sliding window to offset: {}", recoveryCompleteOffset); - CompletableFuture cf = trim(recoveryCompleteOffset - 1, true) - .thenRun(() -> resetFinished.set(true)); - - if (!recoveryMode) { - // Only register metrics when not in recovery mode - return cf.thenRun(this::registerMetrics); - } - return cf; - } - - @Override - public CompletableFuture trim(long offset) { - return trim(offset, false); - } - - private CompletableFuture trim(long offset, boolean internal) { - checkStarted(); - if (!internal) { - checkWriteMode(); - checkResetFinished(); - if (offset >= slidingWindowService.getWindowCoreData().getStartOffset()) { - throw new IllegalArgumentException("failed to trim: record at offset " + offset + " has not been flushed yet"); - } - } - - walHeader.updateTrimOffset(offset); - return CompletableFuture.runAsync(this::flushWALHeader, walHeaderFlusher); - } - - private void checkStarted() { - if (!started.get()) { - throw new IllegalStateException("WriteAheadLog has not been started yet"); - } - } - - private void checkWriteMode() { - if (recoveryMode) { - throw new IllegalStateException("WriteAheadLog is in recovery mode"); - } - } - - private void checkRecoverFinished() { - if (recoveryCompleteOffset < 0) { - throw new IllegalStateException("WriteAheadLog has not been completely recovered yet"); - } - } - - private void checkResetFinished() { - if (!resetFinished.get()) { - throw new IllegalStateException("WriteAheadLog has not been reset yet"); - } - } - - private SlidingWindowService.WALHeaderFlusher flusher() { - return () -> flushWALHeader(ShutdownType.UNGRACEFULLY); - } - - public static class BlockWALServiceBuilder { - private final String blockDevicePath; - private long blockDeviceCapacityWant = CAPACITY_NOT_SET; - private Boolean direct = null; - private int initBufferSize = 1 << 20; // 1MiB - private int maxBufferSize = 1 << 24; // 16MiB - private int ioThreadNums = 8; - private long slidingWindowInitialSize = 1 << 20; // 1MiB - private long slidingWindowUpperLimit = 1 << 29; // 512MiB - private long slidingWindowScaleUnit = 1 << 22; // 4MiB - private long blockSoftLimit = 1 << 18; // 256KiB - private int writeRateLimit = 3000; - private int nodeId = NOOP_NODE_ID; - private long epoch = NOOP_EPOCH; - private boolean recoveryMode = false; - - public BlockWALServiceBuilder(String blockDevicePath, long capacity) { - this.blockDevicePath = blockDevicePath; - this.blockDeviceCapacityWant = capacity; - } - - public BlockWALServiceBuilder(String blockDevicePath) { - this.blockDevicePath = blockDevicePath; - this.recoveryMode = true; - } - - public BlockWALServiceBuilder capacity(long capacity) { - this.blockDeviceCapacityWant = capacity; - return this; - } - - public BlockWALServiceBuilder config(Config config) { - return this - .capacity(config.walCapacity()) - .initBufferSize(config.walInitBufferSize()) - .maxBufferSize(config.walMaxBufferSize()) - .ioThreadNums(config.walThread()) - .slidingWindowInitialSize(config.walWindowInitial()) - .slidingWindowScaleUnit(config.walWindowIncrement()) - .slidingWindowUpperLimit(config.walWindowMax()) - .blockSoftLimit(config.walBlockSoftLimit()) - .writeRateLimit(config.walWriteRateLimit()) - .nodeId(config.nodeId()) - .epoch(config.nodeEpoch()); - } - - public BlockWALServiceBuilder direct(boolean direct) { - this.direct = direct; - return this; - } - - public BlockWALServiceBuilder initBufferSize(int initBufferSize) { - this.initBufferSize = initBufferSize; - return this; - } - - public BlockWALServiceBuilder maxBufferSize(int maxBufferSize) { - this.maxBufferSize = maxBufferSize; - return this; - } - - public BlockWALServiceBuilder ioThreadNums(int ioThreadNums) { - this.ioThreadNums = ioThreadNums; - return this; - } - - public BlockWALServiceBuilder slidingWindowInitialSize(long slidingWindowInitialSize) { - this.slidingWindowInitialSize = slidingWindowInitialSize; - return this; - } - - public BlockWALServiceBuilder slidingWindowUpperLimit(long slidingWindowUpperLimit) { - this.slidingWindowUpperLimit = slidingWindowUpperLimit; - return this; - } - - public BlockWALServiceBuilder slidingWindowScaleUnit(long slidingWindowScaleUnit) { - this.slidingWindowScaleUnit = slidingWindowScaleUnit; - return this; - } - - public BlockWALServiceBuilder blockSoftLimit(long blockSoftLimit) { - this.blockSoftLimit = blockSoftLimit; - return this; - } - - public BlockWALServiceBuilder writeRateLimit(int writeRateLimit) { - this.writeRateLimit = writeRateLimit; - return this; - } - - public BlockWALServiceBuilder nodeId(int nodeId) { - this.nodeId = nodeId; - return this; - } - - public BlockWALServiceBuilder epoch(long epoch) { - this.epoch = epoch; - return this; - } - - public BlockWALService build() { - if (recoveryMode) { - assert blockDeviceCapacityWant == CAPACITY_NOT_SET; - assert nodeId == NOOP_NODE_ID; - assert epoch == NOOP_EPOCH; - } else { - // make blockDeviceCapacityWant align to BLOCK_SIZE - blockDeviceCapacityWant = blockDeviceCapacityWant / WALUtil.BLOCK_SIZE * WALUtil.BLOCK_SIZE; - } - - BlockWALService blockWALService = new BlockWALService(); - - WALChannel.WALChannelBuilder walChannelBuilder = WALChannel.builder(blockDevicePath) - .capacity(blockDeviceCapacityWant) - .initBufferSize(initBufferSize) - .maxBufferSize(maxBufferSize) - .recoveryMode(recoveryMode); - if (direct != null) { - walChannelBuilder.direct(direct); - } - WALChannel channel = walChannelBuilder.build(); - blockWALService.walChannel = WALCachedChannel.of(channel); - if (!blockWALService.walChannel.useDirectIO()) { - LOGGER.warn("block wal not using direct IO"); - } - - if (!recoveryMode) { - // in recovery mode, no need to create sliding window service - // make sure window size is less than capacity - slidingWindowInitialSize = Math.min(slidingWindowInitialSize, blockDeviceCapacityWant - WAL_HEADER_TOTAL_CAPACITY); - slidingWindowUpperLimit = Math.min(slidingWindowUpperLimit, blockDeviceCapacityWant - WAL_HEADER_TOTAL_CAPACITY); - blockWALService.initialWindowSize = slidingWindowInitialSize; - blockWALService.slidingWindowService = new SlidingWindowService( - channel, - ioThreadNums, - slidingWindowUpperLimit, - slidingWindowScaleUnit, - blockSoftLimit, - writeRateLimit, - blockWALService.flusher() - ); - } - - blockWALService.recoveryMode = recoveryMode; - - if (nodeId != NOOP_NODE_ID) { - blockWALService.nodeId = nodeId; - blockWALService.epoch = epoch; - } - - LOGGER.info("build BlockWALService: {}", this); - - return blockWALService; - } - - @Override - public String toString() { - return "BlockWALServiceBuilder{" - + "blockDevicePath='" + blockDevicePath - + ", blockDeviceCapacityWant=" + blockDeviceCapacityWant - + ", direct=" + direct - + ", initBufferSize=" + initBufferSize - + ", maxBufferSize=" + maxBufferSize - + ", ioThreadNums=" + ioThreadNums - + ", slidingWindowInitialSize=" + slidingWindowInitialSize - + ", slidingWindowUpperLimit=" + slidingWindowUpperLimit - + ", slidingWindowScaleUnit=" + slidingWindowScaleUnit - + ", blockSoftLimit=" + blockSoftLimit - + ", writeRateLimit=" + writeRateLimit - + ", nodeId=" + nodeId - + ", epoch=" + epoch - + ", recoveryMode=" + recoveryMode - + '}'; - } - } - - static final class AppendResultImpl implements AppendResult { - private final long recordOffset; - private final CompletableFuture future; - - AppendResultImpl(long recordOffset, CompletableFuture future) { - this.recordOffset = recordOffset; - this.future = future; - } - - @Override - public String toString() { - return "AppendResultImpl{" + "recordOffset=" + recordOffset + '}'; - } - - @Override - public long recordOffset() { - return recordOffset; - } - - @Override - public CompletableFuture future() { - return future; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) { - return true; - } - if (obj == null || obj.getClass() != this.getClass()) { - return false; - } - var that = (AppendResultImpl) obj; - return this.recordOffset == that.recordOffset && - Objects.equals(this.future, that.future); - } - - @Override - public int hashCode() { - return Objects.hash(recordOffset, future); - } - - } - - static final class RecoverResultImpl implements RecoverResult { - private final ByteBuf record; - private final long recordOffset; - - RecoverResultImpl(ByteBuf record, long recordOffset) { - this.record = record; - this.recordOffset = recordOffset; - } - - @Override - public String toString() { - return "RecoverResultImpl{" - + "record=" + record - + ", recordOffset=" + recordOffset - + '}'; - } - - @Override - public ByteBuf record() { - return record; - } - - @Override - public long recordOffset() { - return recordOffset; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) { - return true; - } - if (obj == null || obj.getClass() != this.getClass()) { - return false; - } - var that = (RecoverResultImpl) obj; - return Objects.equals(this.record, that.record) && - this.recordOffset == that.recordOffset; - } - - @Override - public int hashCode() { - return Objects.hash(record, recordOffset); - } - - } - - static class ReadRecordException extends Exception { - long jumpNextRecoverOffset; - - public ReadRecordException(long offset, String message) { - super(message); - this.jumpNextRecoverOffset = offset; - } - - public long getJumpNextRecoverOffset() { - return jumpNextRecoverOffset; - } - } - - /** - * Protected for testing purpose. - */ - protected class RecoverIterator implements Iterator { - private final long windowLength; - private final long skipRecordAtOffset; - private long nextRecoverOffset; - private long firstInvalidOffset = -1; - private RecoverResult next; - - public RecoverIterator(long nextRecoverOffset, long windowLength, long skipRecordAtOffset) { - this.nextRecoverOffset = nextRecoverOffset; - this.skipRecordAtOffset = skipRecordAtOffset; - this.windowLength = windowLength; - } - - @Override - public boolean hasNext() { - boolean hasNext = tryReadNextRecord(); - if (!hasNext) { - // recovery complete - recoveryCompleteOffset = WALUtil.alignLargeByBlockSize(nextRecoverOffset); - walChannel.releaseCache(); - } - return hasNext; - } - - @Override - public RecoverResult next() { - if (!tryReadNextRecord()) { - throw new NoSuchElementException(); - } - - RecoverResult rst = next; - this.next = null; - return rst; - } - - /** - * Try to read next record. - * - * @return true if read success, false if no more record. {@link #next} will be null if and only if return false. - */ - private boolean tryReadNextRecord() { - if (next != null) { - return true; - } - while (firstInvalidOffset == -1 || nextRecoverOffset < firstInvalidOffset + windowLength) { - try { - boolean skip = nextRecoverOffset == skipRecordAtOffset; - ByteBuf nextRecordBody = readRecord(nextRecoverOffset, (offset) -> WALUtil.recordOffsetToPosition(offset, walHeader.getCapacity(), WAL_HEADER_TOTAL_CAPACITY)); - RecoverResultImpl recoverResult = new RecoverResultImpl(nextRecordBody, nextRecoverOffset); - nextRecoverOffset += RECORD_HEADER_SIZE + nextRecordBody.readableBytes(); - if (skip) { - nextRecordBody.release(); - continue; - } - next = recoverResult; - return true; - } catch (ReadRecordException e) { - if (firstInvalidOffset == -1 && WALUtil.isAligned(nextRecoverOffset) && nextRecoverOffset != skipRecordAtOffset) { - // first invalid offset - LOGGER.info("meet the first invalid offset during recovery. offset: {}, window: {}, detail: '{}'", - nextRecoverOffset, windowLength, e.getMessage()); - firstInvalidOffset = nextRecoverOffset; - } - nextRecoverOffset = e.getJumpNextRecoverOffset(); - } - } - return false; - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/MemoryWriteAheadLog.java b/s3stream/src/main/java/com/automq/stream/s3/wal/MemoryWriteAheadLog.java deleted file mode 100644 index 890084105..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/MemoryWriteAheadLog.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal; - -import com.automq.stream.s3.trace.context.TraceContext; -import io.netty.buffer.ByteBuf; -import java.io.IOException; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.atomic.AtomicLong; - -public class MemoryWriteAheadLog implements WriteAheadLog { - private final AtomicLong offsetAlloc = new AtomicLong(); - - @Override - public WriteAheadLog start() throws IOException { - return this; - } - - @Override - public void shutdownGracefully() { - - } - - @Override - public WALMetadata metadata() { - return new WALMetadata(0, 0); - } - - @Override - public AppendResult append(TraceContext traceContext, ByteBuf data, int crc) { - data.release(); - long offset = offsetAlloc.getAndIncrement(); - return new AppendResult() { - @Override - public long recordOffset() { - return offset; - } - - @Override - public CompletableFuture future() { - return CompletableFuture.completedFuture(null); - } - }; - } - - @Override - public Iterator recover() { - List l = Collections.emptyList(); - return l.iterator(); - } - - @Override - public CompletableFuture reset() { - return CompletableFuture.completedFuture(null); - } - - @Override - public CompletableFuture trim(long offset) { - return CompletableFuture.completedFuture(null); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/ShutdownType.java b/s3stream/src/main/java/com/automq/stream/s3/wal/ShutdownType.java deleted file mode 100644 index 2962f3084..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/ShutdownType.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal; - -import java.util.Objects; - -public enum ShutdownType { - /** - * shutdown gracefully - */ - GRACEFULLY(0), - - /** - * shutdown ungracefully - */ - UNGRACEFULLY(1); - - private final Integer code; - - ShutdownType(Integer code) { - this.code = code; - } - - public static ShutdownType fromCode(Integer code) { - for (ShutdownType type : ShutdownType.values()) { - if (Objects.equals(type.getCode(), code)) { - return type; - } - } - return null; - } - - public Integer getCode() { - return code; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/SlidingWindowService.java b/s3stream/src/main/java/com/automq/stream/s3/wal/SlidingWindowService.java deleted file mode 100644 index ceb15991c..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/SlidingWindowService.java +++ /dev/null @@ -1,569 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal; - -import com.automq.stream.s3.ByteBufAlloc; -import com.automq.stream.s3.metrics.TimerUtil; -import com.automq.stream.s3.metrics.stats.StorageOperationStats; -import com.automq.stream.s3.wal.util.WALChannel; -import com.automq.stream.s3.wal.util.WALUtil; -import com.automq.stream.utils.FutureUtil; -import com.automq.stream.utils.ThreadUtils; -import com.automq.stream.utils.Threads; -import io.netty.buffer.ByteBuf; -import java.util.Collection; -import java.util.LinkedList; -import java.util.PriorityQueue; -import java.util.Queue; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static com.automq.stream.s3.wal.BlockWALService.RECORD_HEADER_MAGIC_CODE; -import static com.automq.stream.s3.wal.BlockWALService.RECORD_HEADER_SIZE; -import static com.automq.stream.s3.wal.BlockWALService.RECORD_HEADER_WITHOUT_CRC_SIZE; -import static com.automq.stream.s3.wal.BlockWALService.WAL_HEADER_TOTAL_CAPACITY; -import static com.automq.stream.s3.wal.WriteAheadLog.AppendResult; -import static com.automq.stream.s3.wal.WriteAheadLog.OverCapacityException; - -/** - * The sliding window contains all records that have not been flushed to the disk yet. - * All records are written to the disk asynchronously by the AIO thread pool. - * When the sliding window is full, the current thread will be blocked until the sliding window is expanded. - * When the asynchronous write is completed, the start offset of the sliding window will be updated. - */ -public class SlidingWindowService { - private static final Logger LOGGER = LoggerFactory.getLogger(SlidingWindowService.class.getSimpleName()); - /** - * The minimum interval between two scheduled write operations. At most 1000 per second. - * - * @see this#pollBlockScheduler - */ - private static final long MIN_SCHEDULED_WRITE_INTERVAL_NANOS = TimeUnit.SECONDS.toNanos(1) / 1000; - private final int ioThreadNums; - private final long upperLimit; - private final long scaleUnit; - private final long blockSoftLimit; - private final long minWriteIntervalNanos; - private final WALChannel walChannel; - private final WALHeaderFlusher walHeaderFlusher; - - /** - * The lock of {@link #pendingBlocks}, {@link #writingBlocks}, {@link #currentBlock}. - */ - private final Lock blockLock = new ReentrantLock(); - /** - * Blocks that are being written. - */ - private final Queue writingBlocks = new PriorityQueue<>(); - /** - * Whether the service is initialized. - * After the service is initialized, data in {@link #windowCoreData} is valid. - */ - private final AtomicBoolean initialized = new AtomicBoolean(false); - - /** - * The core data of the sliding window. Initialized when the service is started. - */ - private WindowCoreData windowCoreData; - /** - * Blocks that are waiting to be written. - * All blocks in this queue are ordered by the start offset. - */ - private Queue pendingBlocks = new LinkedList<>(); - /** - * The current block, records are added to this block. - */ - private Block currentBlock; - - /** - * The thread pool for write operations. - */ - private ExecutorService ioExecutor; - /** - * The scheduler for polling blocks and sending them to @{@link #ioExecutor}. - */ - private ScheduledExecutorService pollBlockScheduler; - - /** - * The last time when a batch of blocks is written to the disk. - */ - private long lastWriteTimeNanos = 0; - - public SlidingWindowService(WALChannel walChannel, int ioThreadNums, long upperLimit, long scaleUnit, - long blockSoftLimit, int writeRateLimit, WALHeaderFlusher flusher) { - this.walChannel = walChannel; - this.ioThreadNums = ioThreadNums; - this.upperLimit = upperLimit; - this.scaleUnit = scaleUnit; - this.blockSoftLimit = blockSoftLimit; - this.minWriteIntervalNanos = TimeUnit.SECONDS.toNanos(1) / writeRateLimit; - this.walHeaderFlusher = flusher; - } - - public WindowCoreData getWindowCoreData() { - assert initialized(); - return windowCoreData; - } - - public void start(AtomicLong windowMaxLength, long windowStartOffset) { - this.windowCoreData = new WindowCoreData(windowMaxLength, windowStartOffset, windowStartOffset); - this.ioExecutor = Threads.newFixedThreadPoolWithMonitor(ioThreadNums, - "block-wal-io-thread", false, LOGGER); - - long scheduledInterval = Math.max(MIN_SCHEDULED_WRITE_INTERVAL_NANOS, minWriteIntervalNanos); - this.pollBlockScheduler = Threads.newSingleThreadScheduledExecutor( - ThreadUtils.createThreadFactory("wal-poll-block-thread-%d", false), LOGGER); - pollBlockScheduler.scheduleAtFixedRate(this::tryWriteBlock, 0, scheduledInterval, TimeUnit.NANOSECONDS); - - initialized.set(true); - } - - public boolean initialized() { - return initialized.get(); - } - - public boolean shutdown(long timeout, TimeUnit unit) { - if (this.ioExecutor == null) { - return true; - } - - boolean gracefulShutdown; - this.ioExecutor.shutdown(); - this.pollBlockScheduler.shutdownNow(); - try { - gracefulShutdown = this.ioExecutor.awaitTermination(timeout, unit); - } catch (InterruptedException e) { - this.ioExecutor.shutdownNow(); - gracefulShutdown = false; - } - return gracefulShutdown; - } - - /** - * Try to write a block. If it exceeds the rate limit, it will return immediately. - */ - public void tryWriteBlock() { - assert initialized(); - if (!tryAcquireWriteRateLimit()) { - return; - } - BlockBatch blocks = pollBlocks(); - if (blocks != null) { - blocks.blocks().forEach(Block::polled); - ioExecutor.submit(new WriteBlockProcessor(blocks)); - } - } - - /** - * Try to acquire the write rate limit. - */ - synchronized private boolean tryAcquireWriteRateLimit() { - long now = System.nanoTime(); - if (now - lastWriteTimeNanos < minWriteIntervalNanos) { - return false; - } - lastWriteTimeNanos = now; - return true; - } - - public Lock getBlockLock() { - assert initialized(); - return blockLock; - } - - /** - * Seal and create a new block. It - * - puts the previous block to the write queue - * - creates a new block, sets it as the current block and returns it - * Note: this method is NOT thread safe, and it should be called with {@link #blockLock} locked. - */ - public Block sealAndNewBlockLocked(Block previousBlock, long minSize, long trimOffset, - long recordSectionCapacity) throws OverCapacityException { - assert initialized(); - long startOffset = nextBlockStartOffset(previousBlock); - - // If the end of the physical device is insufficient for this block, jump to the start of the physical device - if ((recordSectionCapacity - startOffset % recordSectionCapacity) < minSize) { - startOffset = startOffset + recordSectionCapacity - startOffset % recordSectionCapacity; - } - - // Not enough space for this block - if (startOffset + minSize - trimOffset > recordSectionCapacity) { - LOGGER.warn("failed to allocate write offset as the ring buffer is full: startOffset: {}, minSize: {}, trimOffset: {}, recordSectionCapacity: {}", - startOffset, minSize, trimOffset, recordSectionCapacity); - throw new OverCapacityException(String.format("failed to allocate write offset: ring buffer is full: startOffset: %d, minSize: %d, trimOffset: %d, recordSectionCapacity: %d", - startOffset, minSize, trimOffset, recordSectionCapacity)); - } - - long maxSize = upperLimit; - // The size of the block should not be larger than writable size of the ring buffer - // Let capacity=100, start=148, trim=49, then maxSize=100-148+49=1 - maxSize = Math.min(recordSectionCapacity - startOffset + trimOffset, maxSize); - // The size of the block should not be larger than the end of the physical device - // Let capacity=100, start=198, trim=198, then maxSize=100-198%100=2 - maxSize = Math.min(recordSectionCapacity - startOffset % recordSectionCapacity, maxSize); - - Block newBlock = new BlockImpl(startOffset, maxSize, blockSoftLimit); - if (!previousBlock.isEmpty()) { - // There are some records to be written in the previous block - pendingBlocks.add(previousBlock); - } else { - // The previous block is empty, so it can be released directly - previousBlock.release(); - } - setCurrentBlockLocked(newBlock); - return newBlock; - } - - /** - * Get the current block. - * Note: this method is NOT thread safe, and it should be called with {@link #blockLock} locked. - */ - public Block getCurrentBlockLocked() { - assert initialized(); - // The current block is null only when no record has been written - if (null == currentBlock) { - currentBlock = nextBlock(windowCoreData.getNextWriteOffset()); - } - return currentBlock; - } - - /** - * Set the current block. - * Note: this method is NOT thread safe, and it should be called with {@link #blockLock} locked. - */ - private void setCurrentBlockLocked(Block block) { - this.currentBlock = block; - } - - /** - * Get the start offset of the next block. - */ - private long nextBlockStartOffset(Block block) { - return block.startOffset() + WALUtil.alignLargeByBlockSize(block.size()); - } - - /** - * Create a new block with the given start offset. - * This method is only used when we don't know the maximum length of the new block. - */ - private Block nextBlock(long startOffset) { - // Trick: we cannot determine the maximum length of the block here, so we set it to 0 first. - // When we try to write a record, this block will be found full, and then a new block will be created. - return new BlockImpl(startOffset, 0, 0); - } - - /** - * Create a new block with the given previous block. - * This method is only used when we don't know the maximum length of the new block. - */ - private Block nextBlock(Block previousBlock) { - return nextBlock(nextBlockStartOffset(previousBlock)); - } - - /** - * Get all blocks to be written. If there is no non-empty block, return null. - */ - private BlockBatch pollBlocks() { - blockLock.lock(); - try { - return pollBlocksLocked(); - } finally { - blockLock.unlock(); - } - } - - /** - * Get all blocks to be written. If there is no non-empty block, return null. - * Note: this method is NOT thread safe, and it should be called with {@link #blockLock} locked. - */ - private BlockBatch pollBlocksLocked() { - Block currentBlock = getCurrentBlockLocked(); - - boolean isPendingBlockEmpty = pendingBlocks.isEmpty(); - boolean isCurrentBlockEmpty = currentBlock == null || currentBlock.isEmpty(); - if (isPendingBlockEmpty && isCurrentBlockEmpty) { - // No record to be written - return null; - } - - Collection blocks; - if (!isPendingBlockEmpty) { - blocks = pendingBlocks; - pendingBlocks = new LinkedList<>(); - } else { - blocks = new LinkedList<>(); - } - if (!isCurrentBlockEmpty) { - blocks.add(currentBlock); - setCurrentBlockLocked(nextBlock(currentBlock)); - } - - BlockBatch blockBatch = new BlockBatch(blocks); - writingBlocks.add(blockBatch.startOffset()); - - return blockBatch; - } - - /** - * Finish the given block batch, and return the start offset of the first block which has not been flushed yet. - */ - private long wroteBlocks(BlockBatch wroteBlocks) { - blockLock.lock(); - try { - return wroteBlocksLocked(wroteBlocks); - } finally { - blockLock.unlock(); - } - } - - /** - * Finish the given block batch, and return the start offset of the first block which has not been flushed yet. - * Note: this method is NOT thread safe, and it should be called with {@link #blockLock} locked. - */ - private long wroteBlocksLocked(BlockBatch wroteBlocks) { - boolean removed = writingBlocks.remove(wroteBlocks.startOffset()); - assert removed; - if (writingBlocks.isEmpty()) { - return getCurrentBlockLocked().startOffset(); - } - return writingBlocks.peek(); - } - - private void writeBlockData(BlockBatch blocks) { - TimerUtil timer = new TimerUtil(); - for (Block block : blocks.blocks()) { - long position = WALUtil.recordOffsetToPosition(block.startOffset(), walChannel.capacity(), WAL_HEADER_TOTAL_CAPACITY); - walChannel.retryWrite(block.data(), position); - } - walChannel.retryFlush(); - StorageOperationStats.getInstance().appendWALWriteStats.record(timer.elapsedAs(TimeUnit.NANOSECONDS)); - } - - private void makeWriteOffsetMatchWindow(long newWindowEndOffset) { - // align to block size - newWindowEndOffset = WALUtil.alignLargeByBlockSize(newWindowEndOffset); - long windowStartOffset = windowCoreData.getStartOffset(); - long windowMaxLength = windowCoreData.getMaxLength(); - if (newWindowEndOffset > windowStartOffset + windowMaxLength) { - // endOffset - startOffset <= block.maxSize <= upperLimit in {@link #sealAndNewBlockLocked} - assert newWindowEndOffset - windowStartOffset <= upperLimit; - long newWindowMaxLength = Math.min(newWindowEndOffset - windowStartOffset + scaleUnit, upperLimit); - windowCoreData.scaleOutWindow(walHeaderFlusher, newWindowMaxLength); - } - } - - public interface WALHeaderFlusher { - void flush(); - } - - public static class RecordHeaderCoreData { - private int magicCode0 = RECORD_HEADER_MAGIC_CODE; - private int recordBodyLength1; - private long recordBodyOffset2; - private int recordBodyCRC3; - private int recordHeaderCRC4; - - public static RecordHeaderCoreData unmarshal(ByteBuf byteBuf) { - RecordHeaderCoreData recordHeaderCoreData = new RecordHeaderCoreData(); - byteBuf.markReaderIndex(); - recordHeaderCoreData.magicCode0 = byteBuf.readInt(); - recordHeaderCoreData.recordBodyLength1 = byteBuf.readInt(); - recordHeaderCoreData.recordBodyOffset2 = byteBuf.readLong(); - recordHeaderCoreData.recordBodyCRC3 = byteBuf.readInt(); - recordHeaderCoreData.recordHeaderCRC4 = byteBuf.readInt(); - byteBuf.resetReaderIndex(); - return recordHeaderCoreData; - } - - public int getMagicCode() { - return magicCode0; - } - - public RecordHeaderCoreData setMagicCode(int magicCode) { - this.magicCode0 = magicCode; - return this; - } - - public int getRecordBodyLength() { - return recordBodyLength1; - } - - public RecordHeaderCoreData setRecordBodyLength(int recordBodyLength) { - this.recordBodyLength1 = recordBodyLength; - return this; - } - - public long getRecordBodyOffset() { - return recordBodyOffset2; - } - - public RecordHeaderCoreData setRecordBodyOffset(long recordBodyOffset) { - this.recordBodyOffset2 = recordBodyOffset; - return this; - } - - public int getRecordBodyCRC() { - return recordBodyCRC3; - } - - public RecordHeaderCoreData setRecordBodyCRC(int recordBodyCRC) { - this.recordBodyCRC3 = recordBodyCRC; - return this; - } - - public int getRecordHeaderCRC() { - return recordHeaderCRC4; - } - - @Override - public String toString() { - return "RecordHeaderCoreData{" + - "magicCode=" + magicCode0 + - ", recordBodyLength=" + recordBodyLength1 + - ", recordBodyOffset=" + recordBodyOffset2 + - ", recordBodyCRC=" + recordBodyCRC3 + - ", recordHeaderCRC=" + recordHeaderCRC4 + - '}'; - } - - private ByteBuf marshalHeaderExceptCRC() { - ByteBuf buf = ByteBufAlloc.byteBuffer(RECORD_HEADER_SIZE); - buf.writeInt(magicCode0); - buf.writeInt(recordBodyLength1); - buf.writeLong(recordBodyOffset2); - buf.writeInt(recordBodyCRC3); - return buf; - } - - public ByteBuf marshal() { - ByteBuf buf = marshalHeaderExceptCRC(); - buf.writeInt(WALUtil.crc32(buf, RECORD_HEADER_WITHOUT_CRC_SIZE)); - return buf; - } - } - - public static class WindowCoreData { - private final Lock scaleOutLock = new ReentrantLock(); - private final AtomicLong maxLength; - /** - * Next write offset of sliding window, always aligned to the {@link WALUtil#BLOCK_SIZE}. - */ - private final AtomicLong nextWriteOffset; - /** - * Start offset of sliding window, always aligned to the {@link WALUtil#BLOCK_SIZE}. - * The data before this offset has already been written to the disk. - */ - private final AtomicLong startOffset; - - public WindowCoreData(AtomicLong maxLength, long nextWriteOffset, long startOffset) { - this.maxLength = maxLength; - this.nextWriteOffset = new AtomicLong(nextWriteOffset); - this.startOffset = new AtomicLong(startOffset); - } - - public long getMaxLength() { - return maxLength.get(); - } - - public void setMaxLength(long maxLength) { - this.maxLength.set(maxLength); - } - - public long getNextWriteOffset() { - return nextWriteOffset.get(); - } - - public long getStartOffset() { - return startOffset.get(); - } - - public void updateWindowStartOffset(long offset) { - this.startOffset.accumulateAndGet(offset, Math::max); - } - - public void scaleOutWindow(WALHeaderFlusher flusher, long newMaxLength) { - boolean scaleWindowHappened = false; - scaleOutLock.lock(); - try { - if (newMaxLength < getMaxLength()) { - // Another thread has already scaled out the window. - return; - } - - setMaxLength(newMaxLength); - flusher.flush(); - scaleWindowHappened = true; - } finally { - scaleOutLock.unlock(); - if (scaleWindowHappened) { - LOGGER.info("window scale out to {}", newMaxLength); - } else { - LOGGER.debug("window already scale out, ignore"); - } - } - } - } - - class WriteBlockProcessor implements Runnable { - private final BlockBatch blocks; - private final TimerUtil timer; - - public WriteBlockProcessor(BlockBatch blocks) { - this.blocks = blocks; - this.timer = new TimerUtil(); - } - - @Override - public void run() { - StorageOperationStats.getInstance().appendWALAwaitStats.record(timer.elapsedAs(TimeUnit.NANOSECONDS)); - try { - writeBlock(this.blocks); - } catch (Exception e) { - // should not happen, but just in case - FutureUtil.completeExceptionally(blocks.futures(), e); - LOGGER.error(String.format("failed to write blocks, startOffset: %s", blocks.startOffset()), e); - } finally { - blocks.release(); - } - } - - private void writeBlock(BlockBatch blocks) { - makeWriteOffsetMatchWindow(blocks.endOffset()); - writeBlockData(blocks); - - TimerUtil timer = new TimerUtil(); - // Update the start offset of the sliding window after finishing writing the record. - windowCoreData.updateWindowStartOffset(wroteBlocks(blocks)); - - FutureUtil.complete(blocks.futures(), new AppendResult.CallbackResult() { - @Override - public long flushedOffset() { - return windowCoreData.getStartOffset(); - } - - @Override - public String toString() { - return "CallbackResult{" + "flushedOffset=" + flushedOffset() + '}'; - } - }); - StorageOperationStats.getInstance().appendWALAfterStats.record(timer.elapsedAs(TimeUnit.NANOSECONDS)); - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/UnmarshalException.java b/s3stream/src/main/java/com/automq/stream/s3/wal/UnmarshalException.java deleted file mode 100644 index 469e7d8ba..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/UnmarshalException.java +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal; - -public class UnmarshalException extends Exception { - public UnmarshalException(String message) { - super(message); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/WALCapacityMismatchException.java b/s3stream/src/main/java/com/automq/stream/s3/wal/WALCapacityMismatchException.java deleted file mode 100644 index 7dd1d0a32..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/WALCapacityMismatchException.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal; - -import java.io.IOException; - -public class WALCapacityMismatchException extends IOException { - - public WALCapacityMismatchException(String path, long expected, long actual) { - super(String.format("WAL capacity mismatch for %s: expected %d, actual %d", path, expected, actual)); - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/WALHeader.java b/s3stream/src/main/java/com/automq/stream/s3/wal/WALHeader.java deleted file mode 100644 index d006de683..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/WALHeader.java +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal; - -import com.automq.stream.s3.ByteBufAlloc; -import com.automq.stream.s3.wal.util.WALUtil; -import io.netty.buffer.ByteBuf; -import java.util.concurrent.atomic.AtomicLong; - -/** - *

- * Layout: - *

- * 0 - [4B] {@link WALHeader#magicCode0} Magic code of the WAL header, used to verify the start of the WAL header - *

- * 1 - [8B] {@link WALHeader#capacity1} Capacity of the block device, which is configured by the application - * and should not be modified after the first start of the service - *

- * 2 - [8B] {@link WALHeader#trimOffset2} The logical start offset of the WAL, records before which are - * considered useless and have been deleted - *

- * 3 - [8B] {@link WALHeader#lastWriteTimestamp3} The timestamp of the last write to the WAL header, used to - * determine which WAL header is the latest when recovering - *

- * 4 - [8B] {@link WALHeader#slidingWindowMaxLength4} The maximum size of the sliding window, which can be - * scaled up when needed, and is used to determine when to stop recovering - *

- * 5 - [4B] {@link WALHeader#shutdownType5} The shutdown type of the service, {@link ShutdownType#GRACEFULLY} or - * {@link ShutdownType#UNGRACEFULLY} - *

- * 6 - [4B] {@link WALHeader#nodeId6} the node id of the WAL - *

- * 7 - [4B] {@link WALHeader#epoch7} the epoch id of the node - *

- * 8 - [4B] {@link WALHeader#crc8} CRC of the rest of the WAL header, used to verify the correctness of the - * WAL header - */ -public class WALHeader { - public static final int WAL_HEADER_MAGIC_CODE = 0x12345678; - public static final int WAL_HEADER_SIZE = 4 // magic code - + 8 // capacity - + 8 // trim offset - + 8 // last write timestamp - + 8 // sliding window max length - + 4 // shutdown type - + 4 // node id - + 4 // node epoch - + 8; // crc - public static final int WAL_HEADER_WITHOUT_CRC_SIZE = WAL_HEADER_SIZE - 4; - private final AtomicLong trimOffset2 = new AtomicLong(-1); - private final AtomicLong flushedTrimOffset = new AtomicLong(0); - private final AtomicLong slidingWindowMaxLength4 = new AtomicLong(0); - private int magicCode0 = WAL_HEADER_MAGIC_CODE; - private long capacity1; - private long lastWriteTimestamp3 = System.nanoTime(); - private ShutdownType shutdownType5 = ShutdownType.UNGRACEFULLY; - private int nodeId6; - private long epoch7; - private int crc8; - - public WALHeader(long capacity, long windowMaxLength) { - this.capacity1 = capacity; - this.slidingWindowMaxLength4.set(windowMaxLength); - } - - public static WALHeader unmarshal(ByteBuf buf) throws UnmarshalException { - WALHeader walHeader = new WALHeader(0, 0); - buf.markReaderIndex(); - walHeader.magicCode0 = buf.readInt(); - walHeader.capacity1 = buf.readLong(); - long trimOffset = buf.readLong(); - walHeader.trimOffset2.set(trimOffset); - walHeader.flushedTrimOffset.set(trimOffset); - walHeader.lastWriteTimestamp3 = buf.readLong(); - walHeader.slidingWindowMaxLength4.set(buf.readLong()); - walHeader.shutdownType5 = ShutdownType.fromCode(buf.readInt()); - walHeader.nodeId6 = buf.readInt(); - walHeader.epoch7 = buf.readLong(); - walHeader.crc8 = buf.readInt(); - buf.resetReaderIndex(); - - if (walHeader.magicCode0 != WAL_HEADER_MAGIC_CODE) { - throw new UnmarshalException(String.format("WALHeader MagicCode not match, Recovered: [%d] expect: [%d]", walHeader.magicCode0, WAL_HEADER_MAGIC_CODE)); - } - - int crc = WALUtil.crc32(buf, WAL_HEADER_WITHOUT_CRC_SIZE); - if (crc != walHeader.crc8) { - throw new UnmarshalException(String.format("WALHeader CRC not match, Recovered: [%d] expect: [%d]", walHeader.crc8, crc)); - } - - return walHeader; - } - - public long getCapacity() { - return capacity1; - } - - public long getTrimOffset() { - return trimOffset2.get(); - } - - // Update the trim offset if the given trim offset is larger than the current one. - public WALHeader updateTrimOffset(long trimOffset) { - trimOffset2.accumulateAndGet(trimOffset, Math::max); - return this; - } - - public long getFlushedTrimOffset() { - return flushedTrimOffset.get(); - } - - public void updateFlushedTrimOffset(long flushedTrimOffset) { - this.flushedTrimOffset.accumulateAndGet(flushedTrimOffset, Math::max); - } - - public long getLastWriteTimestamp() { - return lastWriteTimestamp3; - } - - public WALHeader setLastWriteTimestamp(long lastWriteTimestamp) { - this.lastWriteTimestamp3 = lastWriteTimestamp; - return this; - } - - public long getSlidingWindowMaxLength() { - return slidingWindowMaxLength4.get(); - } - - public AtomicLong getAtomicSlidingWindowMaxLength() { - return slidingWindowMaxLength4; - } - - public ShutdownType getShutdownType() { - return shutdownType5; - } - - public WALHeader setShutdownType(ShutdownType shutdownType) { - this.shutdownType5 = shutdownType; - return this; - } - - public int getNodeId() { - return nodeId6; - } - - public WALHeader setNodeId(int nodeId) { - this.nodeId6 = nodeId; - return this; - } - - public long getEpoch() { - return epoch7; - } - - public WALHeader setEpoch(long epoch) { - this.epoch7 = epoch; - return this; - } - - @Override - public String toString() { - return "WALHeader{" - + "magicCode=" + magicCode0 - + ", capacity=" + capacity1 - + ", trimOffset=" + trimOffset2 - + ", lastWriteTimestamp=" + lastWriteTimestamp3 - + ", slidingWindowMaxLength=" + slidingWindowMaxLength4 - + ", shutdownType=" + shutdownType5 - + ", nodeId=" + nodeId6 - + ", epoch=" + epoch7 - + ", crc=" + crc8 - + '}'; - } - - private ByteBuf marshalHeaderExceptCRC() { - ByteBuf buf = ByteBufAlloc.byteBuffer(WAL_HEADER_SIZE); - buf.writeInt(magicCode0); - buf.writeLong(capacity1); - buf.writeLong(trimOffset2.get()); - buf.writeLong(lastWriteTimestamp3); - buf.writeLong(slidingWindowMaxLength4.get()); - buf.writeInt(shutdownType5.getCode()); - buf.writeInt(nodeId6); - buf.writeLong(epoch7); - return buf; - } - - ByteBuf marshal() { - ByteBuf buf = marshalHeaderExceptCRC(); - this.crc8 = WALUtil.crc32(buf, WAL_HEADER_WITHOUT_CRC_SIZE); - buf.writeInt(crc8); - return buf; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/WALMetadata.java b/s3stream/src/main/java/com/automq/stream/s3/wal/WALMetadata.java deleted file mode 100644 index 884e37b78..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/WALMetadata.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal; - -public class WALMetadata { - private final int nodeId; - private final long epoch; - - public WALMetadata(int nodeId, long epoch) { - this.nodeId = nodeId; - this.epoch = epoch; - } - - public int nodeId() { - return nodeId; - } - - public long epoch() { - return epoch; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/WALNotInitializedException.java b/s3stream/src/main/java/com/automq/stream/s3/wal/WALNotInitializedException.java deleted file mode 100644 index 5a95a5edf..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/WALNotInitializedException.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal; - -import java.io.IOException; - -public class WALNotInitializedException extends IOException { - - public WALNotInitializedException(String message) { - super(message); - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/WriteAheadLog.java b/s3stream/src/main/java/com/automq/stream/s3/wal/WriteAheadLog.java deleted file mode 100644 index ad20a8748..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/WriteAheadLog.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal; - -import com.automq.stream.s3.trace.context.TraceContext; -import io.netty.buffer.ByteBuf; -import java.io.IOException; -import java.util.Iterator; -import java.util.concurrent.CompletableFuture; - -public interface WriteAheadLog { - - WriteAheadLog start() throws IOException; - - void shutdownGracefully(); - - /** - * Get write ahead log metadata - * - * @return {@link WALMetadata} - */ - WALMetadata metadata(); - - /** - * Append data to log, note append may be out of order. - * ex. when sequence append R1 R2 , R2 maybe complete before R1. - * {@link ByteBuf#release()} will be called whatever append success or not. - * - * @return The data position will be written. - */ - AppendResult append(TraceContext context, ByteBuf data, int crc) throws OverCapacityException; - - default AppendResult append(TraceContext context, ByteBuf data) throws OverCapacityException { - return append(context, data, 0); - } - - default AppendResult append(ByteBuf data, int crc) throws OverCapacityException { - return append(TraceContext.DEFAULT, data, crc); - } - - default AppendResult append(ByteBuf data) throws OverCapacityException { - return append(TraceContext.DEFAULT, data, 0); - } - - Iterator recover(); - - /** - * Reset all data in log. - * Equivalent to trim to the end of the log. - * - * @return future complete when reset done. - */ - CompletableFuture reset(); - - /** - * Trim data <= offset in log. - * - * @param offset inclusive trim offset. - * @return future complete when trim done. - */ - CompletableFuture trim(long offset); - - interface AppendResult { - // The pre-allocated starting offset of the record - long recordOffset(); - - CompletableFuture future(); - - interface CallbackResult { - // The record before this offset has been flushed to disk - long flushedOffset(); - } - } - - interface RecoverResult { - ByteBuf record(); - - /** - * @see AppendResult#recordOffset() - */ - long recordOffset(); - } - - class OverCapacityException extends Exception { - public OverCapacityException(String message) { - super(message); - } - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/benchmark/BenchTool.java b/s3stream/src/main/java/com/automq/stream/s3/wal/benchmark/BenchTool.java deleted file mode 100644 index 1cf658ca4..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/benchmark/BenchTool.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal.benchmark; - -import com.automq.stream.s3.ByteBufAlloc; -import com.automq.stream.s3.wal.BlockWALService; -import com.automq.stream.s3.wal.WriteAheadLog; -import com.automq.stream.s3.wal.util.WALChannel; -import io.netty.buffer.ByteBuf; -import java.io.File; -import java.io.IOException; -import java.util.Iterator; -import net.sourceforge.argparse4j.helper.HelpScreenException; -import net.sourceforge.argparse4j.inf.ArgumentParser; -import net.sourceforge.argparse4j.inf.ArgumentParserException; -import net.sourceforge.argparse4j.inf.Namespace; - -import static com.automq.stream.s3.wal.util.WALUtil.isBlockDevice; - -public class BenchTool { - - public static Namespace parseArgs(ArgumentParser parser, String[] args) { - Namespace ns = null; - try { - ns = parser.parseArgs(args); - } catch (HelpScreenException e) { - System.exit(0); - } catch (ArgumentParserException e) { - parser.handleError(e); - System.exit(1); - } - return ns; - } - - public static int recoverAndReset(WriteAheadLog wal) { - int recovered = 0; - for (Iterator it = wal.recover(); it.hasNext(); ) { - it.next().record().release(); - recovered++; - } - wal.reset().join(); - return recovered; - } - - public static void resetWALHeader(String path) throws IOException { - System.out.println("Resetting WAL header"); - if (isBlockDevice(path)) { - // block device - int capacity = BlockWALService.WAL_HEADER_TOTAL_CAPACITY; - WALChannel channel = WALChannel.builder(path).capacity(capacity).build(); - channel.open(); - ByteBuf buf = ByteBufAlloc.byteBuffer(capacity); - buf.writeZero(capacity); - channel.write(buf, 0); - buf.release(); - channel.close(); - } else { - // normal file - File file = new File(path); - if (file.isFile() && !file.delete()) { - throw new IOException("Failed to delete existing file " + file); - } - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/benchmark/RecoverTool.java b/s3stream/src/main/java/com/automq/stream/s3/wal/benchmark/RecoverTool.java deleted file mode 100644 index d306410ee..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/benchmark/RecoverTool.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal.benchmark; - -import com.automq.stream.s3.StreamRecordBatchCodec; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.wal.BlockWALService; -import com.automq.stream.s3.wal.WALHeader; -import io.netty.buffer.ByteBuf; -import java.io.IOException; -import java.util.Iterator; -import java.util.function.Function; -import java.util.stream.StreamSupport; -import net.sourceforge.argparse4j.ArgumentParsers; -import net.sourceforge.argparse4j.inf.ArgumentParser; -import net.sourceforge.argparse4j.inf.Namespace; - -import static com.automq.stream.s3.wal.benchmark.BenchTool.parseArgs; - -/** - * RecoverTool is a tool to recover records in a WAL manually. - * It extends {@link BlockWALService} to use tools provided by {@link BlockWALService} - */ -public class RecoverTool extends BlockWALService implements AutoCloseable { - - public RecoverTool(Config config) throws IOException { - super(BlockWALService.recoveryBuilder(config.path)); - super.start(); - } - - public static void main(String[] args) throws IOException { - Namespace ns = parseArgs(Config.parser(), args); - Config config = new Config(ns); - - try (RecoverTool tool = new RecoverTool(config)) { - tool.run(config); - } - } - - private void run(Config config) { - WALHeader header = super.tryReadWALHeader(); - System.out.println(header); - - Iterable recordsSupplier = () -> recover(header, config); - Function decoder = StreamRecordBatchCodec::decode; - Function stringer = decoder.andThen(StreamRecordBatch::toString); - StreamSupport.stream(recordsSupplier.spliterator(), false) - .map(it -> new RecoverResultWrapper(it, stringer)) - .peek(System.out::println) - .forEach(RecoverResultWrapper::release); - } - - private Iterator recover(WALHeader header, Config config) { - long recoverOffset = config.offset != null ? config.offset : header.getTrimOffset(); - long windowLength = header.getSlidingWindowMaxLength(); - long skipRecordAtOffset = config.skipTrimmed ? header.getTrimOffset() : -1; - return new RecoverIterator(recoverOffset, windowLength, skipRecordAtOffset); - } - - @Override - public void close() { - super.shutdownGracefully(); - } - - /** - * A wrapper for {@link RecoverResult} to provide a function to convert {@link RecoverResult#record} to string - */ - public static class RecoverResultWrapper { - private final RecoverResult inner; - /** - * A function to convert {@link RecoverResult#record} to string - */ - private final Function stringer; - - public RecoverResultWrapper(RecoverResult inner, Function stringer) { - this.inner = inner; - this.stringer = stringer; - } - - public void release() { - inner.record().release(); - } - - @Override - public String toString() { - return String.format("%s{", inner.getClass().getSimpleName()) - + String.format("record=(%d)", inner.record().readableBytes()) + stringer.apply(inner.record()) - + ", offset=" + inner.recordOffset() - + '}'; - } - } - - public static class Config { - final String path; - final Long offset; - final Boolean skipTrimmed; - - Config(Namespace ns) { - this.path = ns.getString("path"); - this.offset = ns.getLong("offset"); - this.skipTrimmed = ns.getBoolean("skipTrimmed"); - } - - static ArgumentParser parser() { - ArgumentParser parser = ArgumentParsers - .newFor("RecoverTool") - .build() - .defaultHelp(true) - .description("Recover records in a WAL file"); - parser.addArgument("-p", "--path") - .required(true) - .help("Path of the WAL file"); - parser.addArgument("--offset") - .type(Long.class) - .help("Offset to start recovering, default to the trimmed offset in the WAL header"); - parser.addArgument("--skip-trimmed") - .dest("skipTrimmed") - .type(Boolean.class) - .setDefault(true) - .help("Whether to skip the record at the trimmed offset"); - return parser; - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/benchmark/RecoveryBench.java b/s3stream/src/main/java/com/automq/stream/s3/wal/benchmark/RecoveryBench.java deleted file mode 100644 index f4d8f2345..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/benchmark/RecoveryBench.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal.benchmark; - -import com.automq.stream.s3.wal.BlockWALService; -import com.automq.stream.s3.wal.WriteAheadLog; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import java.io.IOException; -import java.util.Random; -import java.util.concurrent.atomic.AtomicInteger; -import net.sourceforge.argparse4j.ArgumentParsers; -import net.sourceforge.argparse4j.inf.ArgumentParser; -import net.sourceforge.argparse4j.inf.Namespace; -import org.apache.commons.lang3.time.StopWatch; - -import static com.automq.stream.s3.wal.benchmark.BenchTool.parseArgs; -import static com.automq.stream.s3.wal.benchmark.BenchTool.recoverAndReset; -import static com.automq.stream.s3.wal.benchmark.BenchTool.resetWALHeader; - -/** - * RecoveryBench is a tool to benchmark the recovery performance of {@link BlockWALService} - */ -public class RecoveryBench implements AutoCloseable { - - private final WriteAheadLog log; - private Random random = new Random(); - - public RecoveryBench(Config config) throws IOException { - this.log = BlockWALService.builder(config.path, config.capacity).build().start(); - recoverAndReset(log); - } - - public static void main(String[] args) throws Exception { - Namespace ns = parseArgs(Config.parser(), args); - Config config = new Config(ns); - - resetWALHeader(config.path); - try (RecoveryBench bench = new RecoveryBench(config)) { - bench.run(config); - } - } - - private void run(Config config) throws Exception { - writeRecords(config.numRecords, config.recordSizeBytes); - recoverRecords(config.path); - } - - private void writeRecords(int numRecords, int recordSizeBytes) throws WriteAheadLog.OverCapacityException { - System.out.println("Writing " + numRecords + " records of size " + recordSizeBytes + " bytes"); - byte[] bytes = new byte[recordSizeBytes]; - random.nextBytes(bytes); - ByteBuf payload = Unpooled.wrappedBuffer(bytes).retain(); - - AtomicInteger appended = new AtomicInteger(); - for (int i = 0; i < numRecords; i++) { - WriteAheadLog.AppendResult result = log.append(payload.retainedDuplicate()); - result.future().whenComplete((r, e) -> { - if (e != null) { - System.err.println("Failed to append record: " + e.getMessage()); - e.printStackTrace(); - } else { - appended.incrementAndGet(); - } - }); - } - System.out.println("Appended " + appended.get() + " records (may not be the final number)"); - } - - private void recoverRecords(String path) throws IOException { - System.out.println("Recovering records from " + path); - WriteAheadLog recoveryLog = BlockWALService.recoveryBuilder(path).build().start(); - StopWatch stopWatch = StopWatch.createStarted(); - int recovered = recoverAndReset(recoveryLog); - System.out.println("Recovered " + recovered + " records in " + stopWatch.getTime() + " ms"); - } - - @Override - public void close() { - log.shutdownGracefully(); - } - - static class Config { - // following fields are WAL configuration - final String path; - final Long capacity; - - // following fields are benchmark configuration - final Integer numRecords; - final Integer recordSizeBytes; - - Config(Namespace ns) { - this.path = ns.getString("path"); - this.capacity = ns.getLong("capacity"); - this.numRecords = ns.getInt("records"); - this.recordSizeBytes = ns.getInt("recordSize"); - } - - static ArgumentParser parser() { - ArgumentParser parser = ArgumentParsers - .newFor("RecoveryBench") - .build() - .defaultHelp(true) - .description("Benchmark the recovery performance of BlockWALService"); - parser.addArgument("-p", "--path") - .required(true) - .help("Path of the WAL file"); - parser.addArgument("-c", "--capacity") - .type(Long.class) - .setDefault((long) 3 << 30) - .help("Capacity of the WAL in bytes"); - parser.addArgument("--records") - .type(Integer.class) - .setDefault(1 << 20) - .help("number of records to write"); - parser.addArgument("--record-size") - .dest("recordSize") - .type(Integer.class) - .setDefault(1 << 10) - .help("size of each record in bytes"); - return parser; - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/benchmark/WriteBench.java b/s3stream/src/main/java/com/automq/stream/s3/wal/benchmark/WriteBench.java deleted file mode 100644 index 201eaf1ec..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/benchmark/WriteBench.java +++ /dev/null @@ -1,370 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal.benchmark; - -import com.automq.stream.s3.wal.BlockWALService; -import com.automq.stream.s3.wal.WriteAheadLog; -import com.automq.stream.utils.ThreadUtils; -import com.automq.stream.utils.Threads; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import java.io.IOException; -import java.util.NavigableSet; -import java.util.Random; -import java.util.concurrent.ConcurrentSkipListSet; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.LockSupport; -import java.util.concurrent.locks.ReentrantLock; -import net.sourceforge.argparse4j.ArgumentParsers; -import net.sourceforge.argparse4j.inf.ArgumentParser; -import net.sourceforge.argparse4j.inf.Namespace; - -import static com.automq.stream.s3.wal.benchmark.BenchTool.parseArgs; -import static com.automq.stream.s3.wal.benchmark.BenchTool.recoverAndReset; -import static com.automq.stream.s3.wal.benchmark.BenchTool.resetWALHeader; - -/** - * WriteBench is a tool for benchmarking write performance of {@link BlockWALService} - */ -public class WriteBench implements AutoCloseable { - private static final int LOG_INTERVAL_SECONDS = 1; - private static final int TRIM_INTERVAL_MILLIS = 100; - - private final WriteAheadLog log; - private final TrimOffset trimOffset = new TrimOffset(); - - // Generate random payloads for this benchmark tool - private Random random = new Random(); - - public WriteBench(Config config) throws IOException { - BlockWALService.BlockWALServiceBuilder builder = BlockWALService.builder(config.path, config.capacity); - if (config.depth != null) { - builder.ioThreadNums(config.depth); - } - if (config.iops != null) { - builder.writeRateLimit(config.iops); - } - this.log = builder.build(); - this.log.start(); - recoverAndReset(this.log); - } - - public static void main(String[] args) throws IOException { - Namespace ns = parseArgs(Config.parser(), args); - Config config = new Config(ns); - - resetWALHeader(config.path); - try (WriteBench bench = new WriteBench(config)) { - bench.run(config); - } - } - - private static Runnable logIt(Config config, Stat stat) { - ScheduledExecutorService statExecutor = Threads.newSingleThreadScheduledExecutor( - ThreadUtils.createThreadFactory("stat-thread-%d", true), null); - statExecutor.scheduleAtFixedRate(() -> { - Stat.Result result = stat.reset(); - if (0 != result.count()) { - System.out.printf("Append task | Append Rate %d msg/s %d KB/s | Avg Latency %.3f ms | Max Latency %.3f ms\n", - TimeUnit.SECONDS.toNanos(1) * result.count() / result.elapsedTimeNanos(), - TimeUnit.SECONDS.toNanos(1) * (result.count() * config.recordSizeBytes) / result.elapsedTimeNanos() / 1024, - (double) result.costNanos() / TimeUnit.MILLISECONDS.toNanos(1) / result.count(), - (double) result.maxCostNanos() / TimeUnit.MILLISECONDS.toNanos(1)); - } - }, LOG_INTERVAL_SECONDS, LOG_INTERVAL_SECONDS, TimeUnit.SECONDS); - return statExecutor::shutdownNow; - } - - private void run(Config config) { - System.out.println("Starting benchmark"); - - ExecutorService executor = Threads.newFixedThreadPool( - config.threads, ThreadUtils.createThreadFactory("append-thread-%d", false), null); - AppendTaskConfig appendTaskConfig = new AppendTaskConfig(config); - Stat stat = new Stat(); - Runnable stopTrim = runTrimTask(); - for (int i = 0; i < config.threads; i++) { - int index = i; - executor.submit(() -> { - try { - runAppendTask(index, appendTaskConfig, stat); - } catch (Exception e) { - System.err.printf("Append task %d failed, %s\n", index, e.getMessage()); - e.printStackTrace(); - } - }); - } - Runnable stopLog = logIt(config, stat); - - executor.shutdown(); - try { - if (!executor.awaitTermination(config.durationSeconds + 10, TimeUnit.SECONDS)) { - executor.shutdownNow(); - } - } catch (InterruptedException e) { - executor.shutdownNow(); - } - stopLog.run(); - stopTrim.run(); - - System.out.println("Benchmark finished"); - } - - private Runnable runTrimTask() { - ScheduledExecutorService trimExecutor = Threads.newSingleThreadScheduledExecutor( - ThreadUtils.createThreadFactory("trim-thread-%d", true), null); - trimExecutor.scheduleAtFixedRate(() -> { - try { - log.trim(trimOffset.get()); - } catch (Exception e) { - System.err.printf("Trim task failed, %s\n", e.getMessage()); - e.printStackTrace(); - } - }, TRIM_INTERVAL_MILLIS, TRIM_INTERVAL_MILLIS, TimeUnit.MILLISECONDS); - return trimExecutor::shutdownNow; - } - - private void runAppendTask(int index, AppendTaskConfig config, Stat stat) throws Exception { - System.out.printf("Append task %d started\n", index); - - byte[] bytes = new byte[config.recordSizeBytes]; - random.nextBytes(bytes); - ByteBuf payload = Unpooled.wrappedBuffer(bytes).retain(); - int intervalNanos = (int) TimeUnit.SECONDS.toNanos(1) / Math.max(1, config.throughputBytes / config.recordSizeBytes); - long lastAppendTimeNanos = System.nanoTime(); - long taskStartTimeMillis = System.currentTimeMillis(); - - while (true) { - while (true) { - long now = System.nanoTime(); - long elapsedNanos = now - lastAppendTimeNanos; - if (elapsedNanos >= intervalNanos) { - lastAppendTimeNanos += intervalNanos; - break; - } - LockSupport.parkNanos((intervalNanos - elapsedNanos) >> 2); - } - - long now = System.currentTimeMillis(); - if (now - taskStartTimeMillis > TimeUnit.SECONDS.toMillis(config.durationSeconds)) { - break; - } - - long appendStartTimeNanos = System.nanoTime(); - WriteAheadLog.AppendResult result; - try { - result = log.append(payload.retainedDuplicate()); - } catch (WriteAheadLog.OverCapacityException e) { - System.err.printf("Append task %d failed, retry it, %s\n", index, e.getMessage()); - continue; - } - trimOffset.appended(result.recordOffset()); - result.future().thenAccept(v -> { - long costNanosValue = System.nanoTime() - appendStartTimeNanos; - stat.update(costNanosValue); - trimOffset.flushed(v.flushedOffset()); - }).whenComplete((v, e) -> { - if (e != null) { - System.err.printf("Append task %d failed, %s\n", index, e.getMessage()); - e.printStackTrace(); - } - }); - } - - System.out.printf("Append task %d finished\n", index); - } - - @Override - public void close() { - log.shutdownGracefully(); - } - - static class Config { - // following fields are WAL configuration - final String path; - final Long capacity; - final Integer depth; - final Integer iops; - - // following fields are benchmark configuration - final Integer threads; - final Integer throughputBytes; - final Integer recordSizeBytes; - final Long durationSeconds; - - Config(Namespace ns) { - this.path = ns.getString("path"); - this.capacity = ns.getLong("capacity"); - this.depth = ns.getInt("depth"); - this.iops = ns.getInt("iops"); - this.threads = ns.getInt("threads"); - this.throughputBytes = ns.getInt("throughput"); - this.recordSizeBytes = ns.getInt("recordSize"); - this.durationSeconds = ns.getLong("duration"); - } - - static ArgumentParser parser() { - ArgumentParser parser = ArgumentParsers - .newFor("WriteBench") - .build() - .defaultHelp(true) - .description("Benchmark write performance of BlockWALService"); - parser.addArgument("-p", "--path") - .required(true) - .help("Path of the WAL file"); - parser.addArgument("-c", "--capacity") - .type(Long.class) - .setDefault((long) 1 << 30) - .help("Capacity of the WAL in bytes"); - parser.addArgument("-d", "--depth") - .type(Integer.class) - .help("IO depth of the WAL"); - parser.addArgument("--iops") - .type(Integer.class) - .help("IOPS of the WAL"); - parser.addArgument("--threads") - .type(Integer.class) - .setDefault(1) - .help("Number of threads to use to write"); - parser.addArgument("--throughput") - .type(Integer.class) - .setDefault(1 << 20) - .help("Expected throughput in total in bytes per second"); - parser.addArgument("--record-size") - .dest("recordSize") - .type(Integer.class) - .setDefault(1 << 10) - .help("Size of each record in bytes"); - parser.addArgument("--duration") - .type(Long.class) - .setDefault(60L) - .help("Duration of the benchmark in seconds"); - return parser; - } - } - - static class AppendTaskConfig { - final int throughputBytes; - final int recordSizeBytes; - final long durationSeconds; - - AppendTaskConfig(Config config) { - this.throughputBytes = config.throughputBytes / config.threads; - this.recordSizeBytes = config.recordSizeBytes; - this.durationSeconds = config.durationSeconds; - } - } - - static class Stat { - final AtomicLong count = new AtomicLong(); - final AtomicLong costNanos = new AtomicLong(); - final AtomicLong maxCostNanos = new AtomicLong(); - long lastResetTimeNanos = System.nanoTime(); - - public void update(long costNanosValue) { - count.incrementAndGet(); - costNanos.addAndGet(costNanosValue); - maxCostNanos.accumulateAndGet(costNanosValue, Math::max); - } - - /** - * NOT thread-safe - */ - public Result reset() { - long countValue = count.getAndSet(0); - long costNanosValue = costNanos.getAndSet(0); - long maxCostNanosValue = maxCostNanos.getAndSet(0); - - long now = System.nanoTime(); - long elapsedTimeNanos = now - lastResetTimeNanos; - lastResetTimeNanos = now; - - return new Result(countValue, costNanosValue, maxCostNanosValue, elapsedTimeNanos); - } - - public static final class Result { - private final long count; - private final long costNanos; - private final long maxCostNanos; - private final long elapsedTimeNanos; - - public Result(long count, long costNanos, long maxCostNanos, long elapsedTimeNanos) { - this.count = count; - this.costNanos = costNanos; - this.maxCostNanos = maxCostNanos; - this.elapsedTimeNanos = elapsedTimeNanos; - } - - public long count() { - return count; - } - - public long costNanos() { - return costNanos; - } - - public long maxCostNanos() { - return maxCostNanos; - } - - public long elapsedTimeNanos() { - return elapsedTimeNanos; - } - } - } - - public static class TrimOffset { - private final Lock lock = new ReentrantLock(); - // Offsets of all data appended but not yet flushed to disk - private final NavigableSet appendedOffsets = new ConcurrentSkipListSet<>(); - // Offset before which all data has been flushed to disk - private long flushedOffset = -1; - // Offset at which all data has been flushed to disk - private long committedOffset = -1; - - public void appended(long offset) { - appendedOffsets.add(offset); - } - - public void flushed(long offset) { - lock.lock(); - try { - if (offset > flushedOffset) { - flushedOffset = offset; - Long lower = appendedOffsets.lower(flushedOffset); - if (lower != null) { - appendedOffsets.headSet(lower).clear(); - committedOffset = lower; - } - } - } finally { - lock.unlock(); - } - } - - /** - * @return the offset at which all data has been flushed to disk, or -1 if no data has been flushed to disk - */ - public long get() { - lock.lock(); - try { - return committedOffset; - } finally { - lock.unlock(); - } - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/util/WALBlockDeviceChannel.java b/s3stream/src/main/java/com/automq/stream/s3/wal/util/WALBlockDeviceChannel.java deleted file mode 100644 index 2fa9c48c6..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/util/WALBlockDeviceChannel.java +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal.util; - -import com.automq.stream.s3.wal.WALCapacityMismatchException; -import com.automq.stream.s3.wal.WALNotInitializedException; -import com.automq.stream.thirdparty.moe.cnkirito.kdio.DirectIOLib; -import com.automq.stream.thirdparty.moe.cnkirito.kdio.DirectIOUtils; -import com.automq.stream.thirdparty.moe.cnkirito.kdio.DirectRandomAccessFile; -import io.netty.buffer.ByteBuf; -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.concurrent.ExecutionException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static com.automq.stream.s3.Constants.CAPACITY_NOT_SET; -import static com.automq.stream.s3.wal.util.WALUtil.isBlockDevice; - -public class WALBlockDeviceChannel implements WALChannel { - private static final Logger LOGGER = LoggerFactory.getLogger(WALBlockDeviceChannel.class); - private static final String CHECK_DIRECT_IO_AVAILABLE_FORMAT = "%s.check_direct_io_available"; - final String path; - final long capacityWant; - final boolean recoveryMode; - final DirectIOLib directIOLib; - /** - * 0 means allocate on demand - */ - final int initTempBufferSize; - /** - * 0 means no limit - */ - final int maxTempBufferSize; - /** - * Flag indicating whether unaligned write is allowed. - * Currently, it is only allowed when testing. - */ - public boolean unalignedWrite = false; - - long capacityFact = 0; - DirectRandomAccessFile randomAccessFile; - - ThreadLocal threadLocalByteBuffer = new ThreadLocal<>() { - @Override - protected ByteBuffer initialValue() { - return DirectIOUtils.allocateForDirectIO(directIOLib, initTempBufferSize); - } - }; - - public WALBlockDeviceChannel(String path, long capacityWant) { - this(path, capacityWant, 0, 0, false); - } - - public WALBlockDeviceChannel(String path, long capacityWant, int initTempBufferSize, int maxTempBufferSize, - boolean recoveryMode) { - this.path = path; - this.recoveryMode = recoveryMode; - if (recoveryMode) { - this.capacityWant = CAPACITY_NOT_SET; - } else { - assert capacityWant > 0; - this.capacityWant = capacityWant; - if (!WALUtil.isAligned(capacityWant)) { - throw new RuntimeException("wal capacity must be aligned by block size when using block device"); - } - } - this.initTempBufferSize = initTempBufferSize; - this.maxTempBufferSize = maxTempBufferSize; - - DirectIOLib lib = DirectIOLib.getLibForPath(path); - if (null == lib) { - throw new RuntimeException("O_DIRECT not supported"); - } else { - this.directIOLib = lib; - } - } - - /** - * Check whether the {@link WALBlockDeviceChannel} is available for the given path. - * - * @return null if available, otherwise the reason why it's not available - */ - public static String checkAvailable(String path) { - if (!DirectIOLib.binit) { - return "O_DIRECT not supported"; - } - if (!DirectIOUtils.allocatorAvailable()) { - return "java.nio.DirectByteBuffer.(long, int) not available." + - " Add --add-opens=java.base/java.nio=ALL-UNNAMED and -Dio.netty.tryReflectionSetAccessible=true to JVM options may fix this."; - } - if (!isBlockDevice(path)) { - String reason = tryOpenFileWithDirectIO(String.format(CHECK_DIRECT_IO_AVAILABLE_FORMAT, path)); - if (null != reason) { - return "O_DIRECT not supported by the file system, path: " + path + ", reason: " + reason; - } - } - return null; - } - - /** - * Try to open a file with O_DIRECT flag to check whether the file system supports O_DIRECT. - * The file will be deleted after the test. - * - * @return null if the file is opened successfully, otherwise the reason why it's not available - */ - private static String tryOpenFileWithDirectIO(String path) { - File file = new File(path); - try { - DirectRandomAccessFile randomAccessFile = new DirectRandomAccessFile(file, "rw"); - randomAccessFile.close(); - return null; - } catch (IOException e) { - return e.getMessage(); - } finally { - // the file may be created in {@link DirectRandomAccessFile(File, String)}, so delete it - file.delete(); - } - } - - @Override - public void open(CapacityReader reader) throws IOException { - if (!isBlockDevice(path)) { - openAndCheckFile(); - } else { - try { - long capacity = WALUtil.getBlockDeviceCapacity(path); - if (!recoveryMode && capacityWant > capacity) { - // the real capacity of the block device is smaller than requested - throw new WALCapacityMismatchException(path, capacityWant, capacity); - } - } catch (ExecutionException e) { - LOGGER.warn("failed to get the real capacity of the block device {}, just skip checking", path, e); - } - // We could not get the real capacity of the WAL in block device, so we just use the `capacityWant` as the capacity here - // It will be checked and updated in `checkCapacity` later - capacityFact = capacityWant; - } - - randomAccessFile = new DirectRandomAccessFile(new File(path), "rw"); - - checkCapacity(reader); - } - - /** - * Create the file and set length if not exists, and check the file size if exists. - */ - private void openAndCheckFile() throws IOException { - File file = new File(path); - if (file.exists()) { - if (!file.isFile()) { - throw new IOException(path + " is not a file"); - } - capacityFact = file.length(); - if (!recoveryMode && capacityFact != capacityWant) { - // the file exists but not the same size as requested - throw new WALCapacityMismatchException(path, capacityWant, capacityFact); - } - } else { - // the file does not exist - if (recoveryMode) { - throw new WALNotInitializedException("try to open an uninitialized WAL in recovery mode: file not exists. path: " + path); - } - WALUtil.createFile(path, capacityWant); - capacityFact = capacityWant; - } - } - - private void checkCapacity(CapacityReader reader) throws IOException { - if (null == reader) { - return; - } - Long capacity = reader.capacity(this); - if (null == capacity) { - if (recoveryMode) { - throw new WALNotInitializedException("try to open an uninitialized WAL in recovery mode: empty header. path: " + path); - } - } else if (capacityFact == CAPACITY_NOT_SET) { - // recovery mode on block device - capacityFact = capacity; - } else if (capacityFact != capacity) { - throw new WALCapacityMismatchException(path, capacityFact, capacity); - } - assert capacityFact != CAPACITY_NOT_SET; - } - - @Override - public void close() { - try { - if (randomAccessFile != null) { - randomAccessFile.close(); - } - } catch (IOException ignored) { - } - } - - @Override - public long capacity() { - return capacityFact; - } - - @Override - public String path() { - return path; - } - - private ByteBuffer getBuffer(int alignedSize) { - assert WALUtil.isAligned(alignedSize); - - ByteBuffer currentBuf = threadLocalByteBuffer.get(); - if (alignedSize <= currentBuf.capacity()) { - return currentBuf; - } - if (maxTempBufferSize > 0 && alignedSize > maxTempBufferSize) { - throw new RuntimeException("too large write size"); - } - - ByteBuffer newBuf = DirectIOUtils.allocateForDirectIO(directIOLib, alignedSize); - threadLocalByteBuffer.set(newBuf); - DirectIOUtils.releaseDirectBuffer(currentBuf); - return newBuf; - } - - @Override - public void write(ByteBuf src, long position) throws IOException { - if (unalignedWrite) { - // unaligned write, just used for testing - unalignedWrite(src, position); - return; - } - assert WALUtil.isAligned(position); - - int alignedSize = (int) WALUtil.alignLargeByBlockSize(src.readableBytes()); - assert position + alignedSize <= capacity(); - ByteBuffer tmpBuf = getBuffer(alignedSize); - tmpBuf.clear(); - - for (ByteBuffer buffer : src.nioBuffers()) { - tmpBuf.put(buffer); - } - tmpBuf.position(0).limit(alignedSize); - - write(tmpBuf, position); - } - - private void unalignedWrite(ByteBuf src, long position) throws IOException { - long start = position; - long end = position + src.readableBytes(); - long alignedStart = WALUtil.alignSmallByBlockSize(start); - long alignedEnd = WALUtil.alignLargeByBlockSize(end); - int alignedSize = (int) (alignedEnd - alignedStart); - - // read the data in the range [alignedStart, alignedEnd) to tmpBuf - ByteBuffer tmpBuf = getBuffer(alignedSize); - tmpBuf.position(0).limit(alignedSize); - read(tmpBuf, alignedStart); - - // overwrite the data in the range [start, end) in tmpBuf - for (ByteBuffer buffer : src.nioBuffers()) { - tmpBuf.position((int) (start - alignedStart)); - start += buffer.remaining(); - tmpBuf.put(buffer); - } - tmpBuf.position(0).limit(alignedSize); - - // write it - write(tmpBuf, alignedStart); - } - - private int write(ByteBuffer src, long position) throws IOException { - assert WALUtil.isAligned(src.remaining()); - - int bytesWritten = 0; - while (src.hasRemaining()) { - int written = randomAccessFile.write(src, position + bytesWritten); - // kdio will throw an exception rather than return -1, so we don't need to check for -1 - bytesWritten += written; - } - return bytesWritten; - } - - @Override - public void flush() { - } - - @Override - public int read(ByteBuf dst, long position, int length) throws IOException { - long start = position; - length = Math.min(length, dst.writableBytes()); - long end = position + length; - long alignedStart = WALUtil.alignSmallByBlockSize(start); - long alignedEnd = WALUtil.alignLargeByBlockSize(end); - int alignedSize = (int) (alignedEnd - alignedStart); - // capacity may be CAPACITY_NOT_SET only when we call {@link CapacityReader#capacity} in recovery mode - assert CAPACITY_NOT_SET == capacity() || alignedEnd <= capacity(); - - ByteBuffer tmpBuf = getBuffer(alignedSize); - tmpBuf.position(0).limit(alignedSize); - - read(tmpBuf, alignedStart); - tmpBuf.position((int) (start - alignedStart)).limit((int) (end - alignedStart)); - - dst.writeBytes(tmpBuf); - return (int) (end - start); - } - - private int read(ByteBuffer dst, long position) throws IOException { - int bytesRead = 0; - while (dst.hasRemaining()) { - int read = randomAccessFile.read(dst, position + bytesRead); - // kdio will throw an exception rather than return -1, so we don't need to check for -1 - bytesRead += read; - } - return bytesRead; - } - - @Override - public boolean useDirectIO() { - return true; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/util/WALCachedChannel.java b/s3stream/src/main/java/com/automq/stream/s3/wal/util/WALCachedChannel.java deleted file mode 100644 index e9a396c39..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/util/WALCachedChannel.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal.util; - -import com.automq.stream.s3.ByteBufAlloc; -import io.netty.buffer.ByteBuf; -import java.io.IOException; - -import static com.automq.stream.s3.Constants.CAPACITY_NOT_SET; - -/** - * A wrapper of {@link WALChannel} that caches for read to reduce I/O. - */ -public class WALCachedChannel implements WALChannel { - - private static final int DEFAULT_CACHE_SIZE = 1 << 20; - - private final WALChannel channel; - private final int cacheSize; - - private ByteBuf cache; - private long cachePosition = -1; - - private WALCachedChannel(WALChannel channel, int cacheSize) { - this.channel = channel; - this.cacheSize = cacheSize; - } - - public static WALCachedChannel of(WALChannel channel) { - return new WALCachedChannel(channel, DEFAULT_CACHE_SIZE); - } - - public static WALCachedChannel of(WALChannel channel, int cacheSize) { - return new WALCachedChannel(channel, cacheSize); - } - - /** - * As we use a common cache for all threads, we need to synchronize the read. - */ - @Override - public synchronized int read(ByteBuf dst, long position, int length) throws IOException { - if (CAPACITY_NOT_SET == channel.capacity()) { - // If we don't know the capacity now, we can't cache. - return channel.read(dst, position, length); - } - - long start = position; - length = Math.min(length, dst.writableBytes()); - long end = position + length; - - ByteBuf cache = getCache(); - if (length > cache.capacity()) { - // If the length is larger than the cache capacity, we can't cache. - return channel.read(dst, position, length); - } - - boolean fallWithinCache = cachePosition >= 0 && cachePosition <= start && end <= cachePosition + cache.readableBytes(); - if (!fallWithinCache) { - cache.clear(); - cachePosition = start; - // Make sure the cache is not larger than the channel capacity. - int cacheLength = (int) Math.min(cache.writableBytes(), channel.capacity() - cachePosition); - channel.read(cache, cachePosition, cacheLength); - } - - // Now the cache is ready. - int relativePosition = (int) (start - cachePosition); - dst.writeBytes(cache, relativePosition, length); - return length; - } - - @Override - public void close() { - releaseCache(); - this.channel.close(); - } - - /** - * Release the cache if it is not null. - * This method should be called when no more {@link #read}s will be called to release the allocated memory. - */ - public synchronized void releaseCache() { - if (this.cache != null) { - this.cache.release(); - this.cache = null; - } - this.cachePosition = -1; - } - - /** - * Get the cache. If the cache is not initialized, initialize it. - * Should be called under synchronized. - */ - private ByteBuf getCache() { - if (this.cache == null) { - this.cache = ByteBufAlloc.byteBuffer(cacheSize); - } - return this.cache; - } - - @Override - public void open(CapacityReader reader) throws IOException { - this.channel.open(reader); - } - - @Override - public long capacity() { - return this.channel.capacity(); - } - - @Override - public String path() { - return this.channel.path(); - } - - @Override - public void write(ByteBuf src, long position) throws IOException { - this.channel.write(src, position); - } - - @Override - public void flush() throws IOException { - this.channel.flush(); - } - - @Override - public boolean useDirectIO() { - return channel.useDirectIO(); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/util/WALChannel.java b/s3stream/src/main/java/com/automq/stream/s3/wal/util/WALChannel.java deleted file mode 100644 index 45e0ceafc..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/util/WALChannel.java +++ /dev/null @@ -1,276 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal.util; - -import com.automq.stream.s3.wal.WALCapacityMismatchException; -import com.automq.stream.s3.wal.WALNotInitializedException; -import com.automq.stream.utils.Threads; -import io.netty.buffer.ByteBuf; -import java.io.File; -import java.io.IOException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static com.automq.stream.s3.Constants.CAPACITY_NOT_SET; -import static com.automq.stream.s3.wal.util.WALUtil.isBlockDevice; - -/** - * There are two implementations of WALChannel: - * 1. WALFileChannel based on file system, which calls fsync after each write to ensure data is flushed to disk. - * 2. WALBlockDeviceChannel based on block device, which uses O_DIRECT to bypass page cache. - */ -public interface WALChannel { - - Logger LOGGER = LoggerFactory.getLogger(WALChannel.class); - - long DEFAULT_RETRY_INTERVAL = 100L; - - static WALChannelBuilder builder(String path) { - return new WALChannelBuilder(path); - } - - /** - * Open the channel for read and write. - * If {@code reader} is null, checks will be skipped. - * - * @param reader the reader to get the capacity of the channel - * @throws WALCapacityMismatchException if the capacity of the channel does not match the expected capacity - * @throws WALNotInitializedException if try to open an un-initialized channel in recovery mode - * @throws IOException if any I/O error happens - */ - void open(CapacityReader reader) throws IOException; - - default void open() throws IOException { - open(null); - } - - void close(); - - long capacity(); - - String path(); - - /** - * Write bytes from the given buffer to the given position of the channel from the current reader index - * to the end of the buffer. It only returns when all bytes are written successfully. - * {@link #flush()} should be called after this method to ensure data is flushed to disk. - * This method will change the reader index of the given buffer to the end of the written bytes. - * This method will not change the writer index of the given buffer. - */ - void write(ByteBuf src, long position) throws IOException; - - default void retryWrite(ByteBuf src, long position) { - retryWrite(src, position, DEFAULT_RETRY_INTERVAL); - } - - /** - * Retry {@link #write(ByteBuf, long)} with the given interval until success. - */ - default void retryWrite(ByteBuf src, long position, long retryIntervalMillis) { - while (true) { - try { - write(src, position); - break; - } catch (IOException e) { - LOGGER.error("Failed to write, retrying in {}ms", retryIntervalMillis, e); - Threads.sleep(retryIntervalMillis); - } - } - } - - /** - * Flush to disk. - */ - void flush() throws IOException; - - default void retryFlush() { - retryFlush(DEFAULT_RETRY_INTERVAL); - } - - /** - * Retry {@link #flush()} with the given interval until success. - */ - default void retryFlush(long retryIntervalMillis) { - while (true) { - try { - flush(); - break; - } catch (IOException e) { - LOGGER.error("Failed to flush, retrying in {}ms", retryIntervalMillis, e); - Threads.sleep(retryIntervalMillis); - } - } - } - - /** - * Call {@link #write(ByteBuf, long)} and {@link #flush()}. - */ - default void writeAndFlush(ByteBuf src, long position) throws IOException { - write(src, position); - flush(); - } - - default void retryWriteAndFlush(ByteBuf src, long position) { - retryWriteAndFlush(src, position, DEFAULT_RETRY_INTERVAL); - } - - /** - * Retry {@link #writeAndFlush(ByteBuf, long)} with the given interval until success. - */ - default void retryWriteAndFlush(ByteBuf src, long position, long retryIntervalMillis) { - while (true) { - try { - writeAndFlush(src, position); - break; - } catch (IOException e) { - LOGGER.error("Failed to write and flush, retrying in {}ms", retryIntervalMillis, e); - Threads.sleep(retryIntervalMillis); - } - } - } - - /** - * Read bytes from the given position of the channel to the given buffer from the current writer index - * until reaching the capacity of the buffer or the end of the channel. - * This method will change the writer index of the given buffer to the end of the read bytes. - * This method will not change the reader index of the given buffer. - */ - default int read(ByteBuf dst, long position) throws IOException { - return read(dst, position, dst.writableBytes()); - } - - default int retryRead(ByteBuf dst, long position) { - return retryRead(dst, position, DEFAULT_RETRY_INTERVAL); - } - - /** - * Retry {@link #read(ByteBuf, long)} with the given interval until success. - */ - default int retryRead(ByteBuf dst, long position, long retryIntervalMillis) { - while (true) { - try { - return read(dst, position); - } catch (IOException e) { - LOGGER.error("Failed to read, retrying in {}ms", retryIntervalMillis, e); - Threads.sleep(retryIntervalMillis); - } - } - } - - /** - * Read bytes from the given position of the channel to the given buffer from the current writer index - * until reaching the given length or the end of the channel. - * This method will change the writer index of the given buffer to the end of the read bytes. - * This method will not change the reader index of the given buffer. - * If the given length is larger than the writable bytes of the given buffer, only the first - * {@code dst.writableBytes()} bytes will be read. - */ - int read(ByteBuf dst, long position, int length) throws IOException; - - default int retryRead(ByteBuf dst, long position, int length) { - return retryRead(dst, position, length, DEFAULT_RETRY_INTERVAL); - } - - /** - * Retry {@link #read(ByteBuf, long, int)} with the given interval until success. - */ - default int retryRead(ByteBuf dst, long position, int length, long retryIntervalMillis) { - while (true) { - try { - return read(dst, position, length); - } catch (IOException e) { - LOGGER.error("Failed to read, retrying in {}ms", retryIntervalMillis, e); - Threads.sleep(retryIntervalMillis); - } - } - } - - boolean useDirectIO(); - - interface CapacityReader { - /** - * Get the capacity of the given channel. - * It returns null if the channel has not been initialized before. - */ - Long capacity(WALChannel channel); - } - - class WALChannelBuilder { - private static final Logger LOGGER = LoggerFactory.getLogger(WALChannelBuilder.class); - private final String path; - private Boolean direct; - private long capacity; - private int initBufferSize; - private int maxBufferSize; - private boolean recoveryMode; - - private WALChannelBuilder(String path) { - this.path = path; - } - - public WALChannelBuilder direct(boolean direct) { - this.direct = direct; - return this; - } - - public WALChannelBuilder capacity(long capacity) { - assert capacity == CAPACITY_NOT_SET || WALUtil.isAligned(capacity); - this.capacity = capacity; - return this; - } - - public WALChannelBuilder initBufferSize(int initBufferSize) { - this.initBufferSize = initBufferSize; - return this; - } - - public WALChannelBuilder maxBufferSize(int maxBufferSize) { - this.maxBufferSize = maxBufferSize; - return this; - } - - public WALChannelBuilder recoveryMode(boolean recoveryMode) { - this.recoveryMode = recoveryMode; - return this; - } - - public WALChannel build() { - String directNotAvailableMsg = WALBlockDeviceChannel.checkAvailable(path); - boolean isBlockDevice = isBlockDevice(path); - boolean useDirect = false; - if (direct != null) { - // Set by user. - useDirect = direct; - } else if (isBlockDevice) { - // We can only use direct IO for block devices. - useDirect = true; - } else if (directNotAvailableMsg == null) { - // If direct IO is available, we use it by default. - useDirect = true; - } - - if (useDirect && directNotAvailableMsg != null) { - throw new IllegalArgumentException(directNotAvailableMsg); - } - - if (useDirect) { - if (!isBlockDevice) { - LOGGER.warn("WAL in a file system, which may cause performance degradation. path: {}", new File(path).getAbsolutePath()); - } - return new WALBlockDeviceChannel(path, capacity, initBufferSize, maxBufferSize, recoveryMode); - } else { - LOGGER.warn("Direct IO not used for WAL, which may cause performance degradation. path: {}", new File(path).getAbsolutePath()); - return new WALFileChannel(path, capacity, recoveryMode); - } - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/util/WALFileChannel.java b/s3stream/src/main/java/com/automq/stream/s3/wal/util/WALFileChannel.java deleted file mode 100644 index 66b7b765a..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/util/WALFileChannel.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal.util; - -import com.automq.stream.s3.wal.WALCapacityMismatchException; -import com.automq.stream.s3.wal.WALNotInitializedException; -import io.netty.buffer.ByteBuf; -import java.io.File; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; - -import static com.automq.stream.s3.Constants.CAPACITY_NOT_SET; - -public class WALFileChannel implements WALChannel { - final String filePath; - final long fileCapacityWant; - /** - * When set to true, the file should exist and the file size does not need to be verified. - */ - final boolean recoveryMode; - long fileCapacityFact = 0; - RandomAccessFile randomAccessFile; - FileChannel fileChannel; - - public WALFileChannel(String filePath, long fileCapacityWant, boolean recoveryMode) { - this.filePath = filePath; - this.recoveryMode = recoveryMode; - if (recoveryMode) { - this.fileCapacityWant = CAPACITY_NOT_SET; - } else { - assert fileCapacityWant > 0; - this.fileCapacityWant = fileCapacityWant; - } - } - - @Override - public void open(CapacityReader reader) throws IOException { - File file = new File(filePath); - if (file.exists()) { - if (!file.isFile()) { - throw new IOException(filePath + " is not a file"); - } - randomAccessFile = new RandomAccessFile(file, "rw"); - fileCapacityFact = randomAccessFile.length(); - if (!recoveryMode && fileCapacityFact != fileCapacityWant) { - // the file exists but not the same size as requested - throw new WALCapacityMismatchException(filePath, fileCapacityWant, fileCapacityFact); - } - } else { - // the file does not exist - if (recoveryMode) { - throw new WALNotInitializedException("try to open an uninitialized WAL in recovery mode: file not exists: " + filePath); - } - WALUtil.createFile(filePath, fileCapacityWant); - randomAccessFile = new RandomAccessFile(filePath, "rw"); - fileCapacityFact = fileCapacityWant; - } - - fileChannel = randomAccessFile.getChannel(); - - checkCapacity(reader); - } - - private void checkCapacity(CapacityReader reader) throws IOException { - if (null == reader) { - return; - } - Long capacity = reader.capacity(this); - if (null == capacity) { - if (recoveryMode) { - throw new WALNotInitializedException("try to open an uninitialized WAL in recovery mode: empty header. path: " + filePath); - } - } else if (fileCapacityFact != capacity) { - throw new WALCapacityMismatchException(filePath, fileCapacityFact, capacity); - } - assert fileCapacityFact != CAPACITY_NOT_SET; - } - - @Override - public void close() { - try { - fileChannel.close(); - randomAccessFile.close(); - } catch (IOException ignored) { - } - } - - @Override - public long capacity() { - return fileCapacityFact; - } - - @Override - public String path() { - return filePath; - } - - @Override - public void write(ByteBuf src, long position) throws IOException { - assert src.readableBytes() + position <= capacity(); - ByteBuffer[] nioBuffers = src.nioBuffers(); - for (ByteBuffer nioBuffer : nioBuffers) { - int bytesWritten = write(nioBuffer, position); - position += bytesWritten; - } - } - - @Override - public void flush() throws IOException { - fileChannel.force(false); - } - - @Override - public int read(ByteBuf dst, long position, int length) throws IOException { - length = Math.min(length, dst.writableBytes()); - assert position + length <= capacity(); - int bytesRead = 0; - while (dst.isWritable()) { - int read = dst.writeBytes(fileChannel, position + bytesRead, length); - if (read == -1) { - // EOF - break; - } - bytesRead += read; - } - return bytesRead; - } - - private int write(ByteBuffer src, long position) throws IOException { - int bytesWritten = 0; - while (src.hasRemaining()) { - int written = fileChannel.write(src, position + bytesWritten); - if (written == -1) { - throw new IOException("write -1"); - } - bytesWritten += written; - } - return bytesWritten; - } - - @Override - public boolean useDirectIO() { - return false; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/s3/wal/util/WALUtil.java b/s3stream/src/main/java/com/automq/stream/s3/wal/util/WALUtil.java deleted file mode 100644 index f5ceb8f7d..000000000 --- a/s3stream/src/main/java/com/automq/stream/s3/wal/util/WALUtil.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal.util; - -import com.automq.stream.utils.CommandResult; -import com.automq.stream.utils.CommandUtils; -import io.netty.buffer.ByteBuf; -import java.io.File; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.nio.ByteBuffer; -import java.util.concurrent.ExecutionException; -import java.util.zip.CRC32; -import jnr.posix.POSIXFactory; - -public class WALUtil { - public static final int BLOCK_SIZE = Integer.parseInt(System.getProperty( - "automq.ebswal.blocksize", - "4096" - )); - - /** - * Get CRC32 of the given ByteBuf from current reader index to the end. - * This method will not change the reader index of the given ByteBuf. - */ - public static int crc32(ByteBuf buf) { - return crc32(buf, buf.readableBytes()); - } - - /** - * Get CRC32 of the given ByteBuf from current reader index to the given length. - * This method will not change the reader index of the given ByteBuf. - */ - public static int crc32(ByteBuf buf, int length) { - CRC32 crc32 = new CRC32(); - ByteBuf slice = buf.slice(buf.readerIndex(), length); - for (ByteBuffer buffer : slice.nioBuffers()) { - crc32.update(buffer); - } - return (int) (crc32.getValue() & 0x7FFFFFFF); - } - - public static long recordOffsetToPosition(long offset, long physicalCapacity, long headerSize) { - long capacity = physicalCapacity - headerSize; - return offset % capacity + headerSize; - } - - public static long alignLargeByBlockSize(long offset) { - return offset % BLOCK_SIZE == 0 ? offset : offset + BLOCK_SIZE - offset % BLOCK_SIZE; - } - - public static long alignNextBlock(long offset) { - return offset % BLOCK_SIZE == 0 ? offset + BLOCK_SIZE : offset + BLOCK_SIZE - offset % BLOCK_SIZE; - } - - public static long alignSmallByBlockSize(long offset) { - return offset % BLOCK_SIZE == 0 ? offset : offset - offset % BLOCK_SIZE; - } - - public static boolean isAligned(long offset) { - return offset % BLOCK_SIZE == 0; - } - - /** - * Create a file with the given path and length. - * Note {@code path} must NOT exist. - */ - public static void createFile(String path, long length) throws IOException { - File file = new File(path); - assert !file.exists(); - - File parent = file.getParentFile(); - if (null != parent && !parent.exists() && !parent.mkdirs()) { - throw new IOException("mkdirs " + parent + " fail"); - } - if (!file.createNewFile()) { - throw new IOException("create " + path + " fail"); - } - if (!file.setReadable(true)) { - throw new IOException("set " + path + " readable fail"); - } - if (!file.setWritable(true)) { - throw new IOException("set " + path + " writable fail"); - } - - // set length - try (RandomAccessFile raf = new RandomAccessFile(file, "rw")) { - raf.setLength(length); - } - } - - /** - * Get the capacity of the given block device. - */ - public static long getBlockDeviceCapacity(String path) throws ExecutionException { - String[] cmd = new String[] { - "lsblk", - "--bytes", - "--nodeps", - "--output", "SIZE", - "--noheadings", - "--raw", - path - }; - CommandResult result = CommandUtils.run(cmd); - if (!result.success()) { - throw new ExecutionException("get block device capacity fail: " + result, null); - } - return Long.parseLong(result.stdout().trim()); - } - - /** - * Check if the given path is a block device. - * It returns false if the path does not exist. - */ - public static boolean isBlockDevice(String path) { - if (!new File(path).exists()) { - return false; - } - return POSIXFactory.getPOSIX() - .stat(path) - .isBlockDev(); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectChannel.java b/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectChannel.java deleted file mode 100755 index 9bbdd3a8b..000000000 --- a/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectChannel.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ -package com.automq.stream.thirdparty.moe.cnkirito.kdio; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.Channel; - -public interface DirectChannel extends Channel { - /** - * Writes from the src buffer into this channel at position. - * - * @param src The {@link ByteBuffer} to write from - * @param position The position within the file at which to start writing - * @return How many bytes were written from src into the file - * @throws IOException - */ - int write(ByteBuffer src, long position) throws IOException; - - /** - * Reads from this channel into the dst buffer from position. - * - * @param dst The {@link ByteBuffer} to read into - * @param position The position within the file at which to start reading - * @return How many bytes were placed into dst - * @throws IOException - */ - int read(ByteBuffer dst, long position) throws IOException; - - /** - * @return The file size for this channel - */ - long size(); - - /** - * @return true if this channel is read only, false otherwise - */ - boolean isReadOnly(); - - /** - * Truncates this file's length to fileLength. - * - * @param fileLength The length to which to truncate - * @return This UnsafeByteAlignedChannel - * @throws IOException - */ - DirectChannel truncate(long fileLength) throws IOException; - - /** - * @return The file descriptor for this channel - */ - int getFD(); -} diff --git a/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectChannelImpl.java b/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectChannelImpl.java deleted file mode 100755 index 437d3feea..000000000 --- a/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectChannelImpl.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ -package com.automq.stream.thirdparty.moe.cnkirito.kdio; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.nio.channels.NonWritableChannelException; - -public class DirectChannelImpl implements DirectChannel { - private final DirectIOLib lib; - private final int fd; - private final boolean isReadOnly; - private boolean isOpen; - private long fileLength; - - private DirectChannelImpl(DirectIOLib lib, int fd, long fileLength, boolean readOnly) { - this.lib = lib; - this.fd = fd; - this.isOpen = true; - this.isReadOnly = readOnly; - this.fileLength = fileLength; - } - - public static DirectChannel getChannel(File file, boolean readOnly) throws IOException { - DirectIOLib lib = DirectIOLib.getLibForPath(file.toString()); - if (null == lib) { - throw new IOException("No DirectIOLib found for path " + file); - } - return getChannel(lib, file, readOnly); - } - - public static DirectChannel getChannel(DirectIOLib lib, File file, boolean readOnly) throws IOException { - int fd = lib.oDirectOpen(file.toString(), readOnly); - long length = file.length(); - return new DirectChannelImpl(lib, fd, length, readOnly); - } - - private void ensureOpen() throws ClosedChannelException { - if (!isOpen()) { - throw new ClosedChannelException(); - } - } - - private void ensureWritable() { - if (isReadOnly()) { - throw new NonWritableChannelException(); - } - } - - @Override - public int read(ByteBuffer dst, long position) throws IOException { - ensureOpen(); - return lib.pread(fd, dst, position); - } - - @Override - public int write(ByteBuffer src, long position) throws IOException { - ensureOpen(); - ensureWritable(); - assert src.position() == lib.blockStart(src.position()); - - int written = lib.pwrite(fd, src, position); - - // update file length if we wrote past it - fileLength = Math.max(position + written, fileLength); - return written; - } - - @Override - public DirectChannel truncate(final long length) throws IOException { - ensureOpen(); - ensureWritable(); - if (DirectIOLib.ftruncate(fd, length) < 0) { - throw new IOException("Error during truncate on descriptor " + fd + ": " + - DirectIOLib.getLastError()); - } - fileLength = length; - return this; - } - - @Override - public long size() { - return fileLength; - } - - @Override - public int getFD() { - return fd; - } - - @Override - public boolean isOpen() { - return isOpen; - } - - @Override - public boolean isReadOnly() { - return isReadOnly; - } - - @Override - public void close() throws IOException { - if (!isOpen()) { - return; - } - isOpen = false; - if (lib.close(fd) < 0) { - throw new IOException("Error closing file with descriptor " + fd + ": " + - DirectIOLib.getLastError()); - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectIOLib.java b/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectIOLib.java deleted file mode 100755 index 1e0e113e0..000000000 --- a/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectIOLib.java +++ /dev/null @@ -1,386 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ -package com.automq.stream.thirdparty.moe.cnkirito.kdio; - -import com.sun.jna.Native; -import com.sun.jna.NativeLong; -import com.sun.jna.Platform; -import com.sun.jna.Pointer; -import com.sun.jna.ptr.PointerByReference; -import io.netty.util.internal.PlatformDependent; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class containing native hooks and utility methods for performing direct I/O, using - * the Linux O_DIRECT flag. - *

- * This class is initialized at class load time, by registering JNA hooks into native methods. - * It also calculates Linux kernel version-dependent alignment amount (in bytes) for use with the O_DIRECT flag, - * when given a string for a file or directory. - */ -public class DirectIOLib { - static final int PC_REC_XFER_ALIGN = 0x11; - private static final Logger logger = LoggerFactory.getLogger(DirectIOLib.class); - public static boolean binit; - - static { - binit = false; - try { - if (!Platform.isLinux()) { - logger.warn("Not running Linux, jaydio support disabled"); - } else { // now check to see if we have O_DIRECT... - - final int linuxVersion = 0; - final int majorRev = 1; - final int minorRev = 2; - - List versionNumbers = new ArrayList(); - for (String v : System.getProperty("os.version").split("[.\\-]")) { - if (v.matches("\\d")) { - versionNumbers.add(Integer.parseInt(v)); - } - } - - /* From "man 2 open": - * - * O_DIRECT support was added under Linux in kernel version 2.4.10. Older Linux kernels simply ignore this flag. Some file systems may not implement - * the flag and open() will fail with EINVAL if it is used. - */ - - // test to see whether kernel version >= 2.4.10 - if (versionNumbers.get(linuxVersion) > 2) { - binit = true; - } else if (versionNumbers.get(linuxVersion) == 2) { - if (versionNumbers.get(majorRev) > 4) { - binit = true; - } else if (versionNumbers.get(majorRev) == 4 && versionNumbers.get(minorRev) >= 10) { - binit = true; - } - } - - if (binit) { - // get access to open(), pread(), etc - Native.register(Platform.C_LIBRARY_NAME); - } else { - logger.warn(String.format("O_DIRECT not supported on your version of Linux: %d.%d.%d", linuxVersion, majorRev, minorRev)); - } - } - } catch (Throwable e) { - logger.warn("Unable to register libc at class load time: " + e.getMessage(), e); - } - } - - private final int fsBlockSize; - private final long fsBlockNotMask; - - public DirectIOLib(int fsBlockSize) { - this.fsBlockSize = fsBlockSize; - this.fsBlockNotMask = -((long) fsBlockSize); - } - - /** - * Static method to register JNA hooks for doing direct I/O - * - * @param workingDir A directory within the mounted file system on which we'll be working - * Should preferably BE the directory in which we'll be working. - */ - public static DirectIOLib getLibForPath(String workingDir) { - int fsBlockSize = initilizeSoftBlockSize(workingDir); - if (fsBlockSize == -1) { - logger.warn("O_DIRECT support non available on your version of Linux (" + System.getProperty("os.version") + "), " + - "please upgrade your kernel in order to use jaydio."); - return null; - } - return new DirectIOLib(fsBlockSize); - } - - /** - * Finds a block size for use with O_DIRECT. Choose it in the most paranoid - * way possible to maximize probability that things work. - * - * @param fileOrDir A file or directory within which O_DIRECT access will be performed. - */ - private static int initilizeSoftBlockSize(String fileOrDir) { - - int fsBlockSize = -1; - - if (binit) { - // get file system block size for use with workingDir - // see "man 3 posix_memalign" for why we do this - fsBlockSize = pathconf(fileOrDir, PC_REC_XFER_ALIGN); - /* conservative for version >= 2.6 - * "man 2 open": - * - * Under Linux 2.6, alignment - * to 512-byte boundaries suffices. - */ - - // Since O_DIRECT requires pages to be memory aligned with the file system block size, - // we will do this too in case the page size and the block size are different for - // whatever reason. By taking the least common multiple, everything should be happy: - int pageSize = getpagesize(); - fsBlockSize = lcm(fsBlockSize, pageSize); - - // just being completely paranoid: - // (512 is the rule for 2.6+ kernels as mentioned before) - fsBlockSize = lcm(fsBlockSize, 512); - - // lastly, a sanity check - if (fsBlockSize <= 0 || ((fsBlockSize & (fsBlockSize - 1)) != 0)) { - logger.warn("file system block size should be a power of two, was found to be " + fsBlockSize); - logger.warn("Disabling O_DIRECT support"); - return -1; - } - } - - return fsBlockSize; - } - - // -- Java interfaces to native methods - - /** - * Hooks into errno using Native.getLastError(), and parses it with native strerror function. - * - * @return An error message corresponding to the last errno - */ - public static String getLastError() { - return strerror(Native.getLastError()); - } - - /** - * Static variant of {@link #blockEnd(int)}. - * - * @param blockSize - * @param position - * @return The smallest number greater than or equal to position - * which is a multiple of the blockSize - */ - public static long blockEnd(int blockSize, long position) { - long ceil = (position + blockSize - 1) / blockSize; - return ceil * blockSize; - } - - /** - * Euclid's algo for gcd is more general than we need - * since we only have powers of 2, but w/e - * - * @param x - * @param y - * @return The least common multiple of x and y - */ - public static int lcm(long x, long y) { - // will hold gcd - long g = x; - long yc = y; - - // get the gcd first - while (yc != 0) { - long t = g; - g = yc; - yc = t % yc; - } - - return (int) (x * y / g); - } - - /** - * Given a pointer-to-pointer memptr, sets the dereferenced value to point to the start - * of an allocated block of size bytes, where the starting address is a multiple of - * alignment. It is guaranteed that the block may be freed by calling @{link {@link #free(Pointer)} - * on the starting address. See "man 3 posix_memalign". - * - * @param memptr The pointer-to-pointer which will point to the address of the allocated aligned block - * @param alignment The alignment multiple of the starting address of the allocated block - * @param size The number of bytes to allocate - * @return 0 on success, one of the C error codes on failure. - */ - public static native int posix_memalign(PointerByReference memptr, NativeLong alignment, NativeLong size); - - // -- alignment logic utility methods - - /** - * See "man 3 free". - * - * @param ptr The pointer to the hunk of memory which needs freeing - */ - public static native void free(Pointer ptr); - - public static native int ftruncate(int fd, long length); - - private static native NativeLong pwrite(int fd, Pointer buf, NativeLong count, NativeLong offset); - - private static native NativeLong pread(int fd, Pointer buf, NativeLong count, NativeLong offset); - - private static native int open(String pathname, int flags); - - private static native int open(String pathname, int flags, int mode); - - private static native int getpagesize(); - - private static native int pathconf(String path, int name); - - private static native String strerror(int errnum); - - /** - * Interface into native pread function. - * - * @param fd A file discriptor to pass to native pread - * @param buf The direct buffer into which to record the file read - * @param offset The file offset at which to read - * @return The number of bytes successfully read from the file - * @throws IOException - */ - public int pread(int fd, ByteBuffer buf, long offset) throws IOException { - final int start = buf.position(); - assert start == blockStart(start); - final int toRead = buf.remaining(); - assert toRead == blockStart(toRead); - assert offset == blockStart(offset); - - final long address = PlatformDependent.directBufferAddress(buf); - Pointer pointer = new Pointer(address); - int n = pread(fd, pointer.share(start), new NativeLong(toRead), new NativeLong(offset)).intValue(); - if (n < 0) { - throw new IOException("error reading file at offset " + offset + ": " + getLastError()); - } - buf.position(n); - return n; - } - - /** - * Interface into native pwrite function. Writes bytes corresponding to the nearest file - * system block boundaries between buf.position() and buf.limit(). - * - * @param fd A file descriptor to pass to native pwrite - * @param buf The direct buffer from which to write - * @param offset The file offset at which to write - * @return The number of bytes successfully written to the file - * @throws IOException - */ - public int pwrite(int fd, ByteBuffer buf, long offset) throws IOException { - - // must always write to end of current block - // To handle writes past the logical file size, - // we will later truncate. - final int start = buf.position(); - assert start == blockStart(start); - final int toWrite = buf.remaining(); - assert toWrite == blockStart(toWrite); - assert offset == blockStart(offset); - - final long address = PlatformDependent.directBufferAddress(buf); - Pointer pointer = new Pointer(address); - - int n = pwrite(fd, pointer.share(start), new NativeLong(toWrite), new NativeLong(offset)).intValue(); - if (n < 0) { - throw new IOException("error writing file at offset " + offset + ": " + getLastError()); - } - buf.position(start + n); - return n; - } - - // -- more native function hooks -- - - /** - * Use the open Linux system call and pass in the O_DIRECT flag. - * Currently the only other flags passed in are O_RDONLY if readOnly - * is true, and (if not) O_RDWR and O_CREAT. - * - * @param pathname The path to the file to open. If file does not exist and we are opening - * with readOnly, this will throw an error. Otherwise, if it does - * not exist but we have readOnly set to false, create the file. - * @param readOnly Whether to pass in O_RDONLY - * @return An integer file descriptor for the opened file - */ - public int oDirectOpen(String pathname, boolean readOnly) throws IOException { - int flags = OpenFlags.INSTANCE.oDIRECT(); - if (readOnly) { - flags |= OpenFlags.INSTANCE.oRDONLY(); - } else { - flags |= OpenFlags.INSTANCE.oRDWR() | OpenFlags.INSTANCE.oCREAT(); - } - int fd = open(pathname, flags, 00644); - if (fd < 0) { - throw new IOException("Error opening " + pathname + ", got " + getLastError()); - } - return fd; - } - - /** - * @return The soft block size for use with transfer multiples - * and memory alignment multiples - */ - public int blockSize() { - return fsBlockSize; - } - - /** - * Returns the default buffer size for file channels doing O_DIRECT - * I/O. By default this is equal to the block size. - * - * @return The default buffer size - */ - public int defaultBufferSize() { - return fsBlockSize; - } - - /** - * Given value, find the largest number less than or equal - * to value which is a multiple of the fs block size. - * - * @param value - * @return The largest number less than or equal to value - * which is a multiple of the soft block size - */ - public long blockStart(long value) { - return value & fsBlockNotMask; - } - - /** - * @see #blockStart(long) - */ - public int blockStart(int value) { - return (int) (value & fsBlockNotMask); - } - - /** - * Given value, find the smallest number greater than or equal - * to value which is a multiple of the fs block size. - * - * @param value - * @return The smallest number greater than or equal to value - * which is a multiple of the soft block size - */ - public long blockEnd(long value) { - return (value + fsBlockSize - 1) & fsBlockNotMask; - } - - /** - * @see #blockEnd(long) - */ - public int blockEnd(int value) { - return (int) ((value + fsBlockSize - 1) & fsBlockNotMask); - } - - /** - * See "man 2 close" - * - * @param fd The file descriptor of the file to close - * @return 0 on success, -1 on error - */ - public native int close(int fd); // mustn't forget to do this - -} diff --git a/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectIOUtils.java b/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectIOUtils.java deleted file mode 100755 index ee631ba86..000000000 --- a/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectIOUtils.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ -package com.automq.stream.thirdparty.moe.cnkirito.kdio; - -import com.sun.jna.NativeLong; -import com.sun.jna.Pointer; -import com.sun.jna.ptr.PointerByReference; -import io.netty.util.internal.PlatformDependent; -import java.nio.ByteBuffer; - -public class DirectIOUtils { - - /** - * Allocate capacity bytes of native memory for use as a buffer, and - * return a {@link ByteBuffer} which gives an interface to this memory. The - * memory is allocated with - * {@link DirectIOLib#posix_memalign(PointerByReference, NativeLong, NativeLong) DirectIOLib#posix_memalign()} - * to ensure that the buffer can be used with O_DIRECT. - * * - * - * @param capacity The requested number of bytes to allocate - * @return A new JnaMemAlignedBuffer of capacity bytes aligned in native memory. - */ - public static ByteBuffer allocateForDirectIO(DirectIOLib lib, int capacity) { - if (capacity % lib.blockSize() > 0) { - throw new IllegalArgumentException("Capacity (" + capacity + ") must be a multiple" - + "of the block size (" + lib.blockSize() + ")"); - } - NativeLong blockSize = new NativeLong(lib.blockSize()); - PointerByReference pointerToPointer = new PointerByReference(); - - // align memory for use with O_DIRECT - DirectIOLib.posix_memalign(pointerToPointer, blockSize, new NativeLong(capacity)); - return wrapPointer(Pointer.nativeValue(pointerToPointer.getValue()), capacity); - } - - /** - * @param ptr Pointer to wrap. - * @param len Memory location length. - * @return Byte buffer wrapping the given memory. - */ - public static ByteBuffer wrapPointer(long ptr, int len) { - ByteBuffer buf = PlatformDependent.directBuffer(ptr, len); - - assert buf.isDirect(); - return buf; - } - - public static boolean allocatorAvailable() { - return PlatformDependent.hasDirectBufferNoCleanerConstructor(); - } - - /** - * Release the memory of the buffer. - */ - public static void releaseDirectBuffer(ByteBuffer buffer) { - assert buffer.isDirect(); - PlatformDependent.freeDirectBuffer(buffer); - } -} diff --git a/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectRandomAccessFile.java b/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectRandomAccessFile.java deleted file mode 100755 index b7268b7f1..000000000 --- a/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/DirectRandomAccessFile.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ -package com.automq.stream.thirdparty.moe.cnkirito.kdio; - -import java.io.Closeable; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.nio.ByteBuffer; - -/** - * Class to emulate the behavior of {@link RandomAccessFile}, but using direct I/O. - */ -public class DirectRandomAccessFile implements Closeable { - - private final DirectChannel channel; - - /** - * @param file The file to open - * @param mode Either "rw" or "r", depending on whether this file is read only - * @throws IOException - */ - public DirectRandomAccessFile(File file, String mode) - throws IOException { - - boolean readOnly = false; - if ("r".equals(mode)) { - readOnly = true; - } else if (!"rw".equals(mode)) { - throw new IllegalArgumentException("only r and rw modes supported"); - } - - if (readOnly && !file.isFile()) { - throw new FileNotFoundException("couldn't find file " + file); - } - - this.channel = DirectChannelImpl.getChannel(file, readOnly); - } - - @Override - public void close() throws IOException { - channel.close(); - } - - public int write(ByteBuffer src, long position) throws IOException { - return channel.write(src, position); - } - - public int read(ByteBuffer dst, long position) throws IOException { - return channel.read(dst, position); - } - - /** - * @return The current position in the file - */ - public long getFilePointer() { - return channel.getFD(); - } - - /** - * @return The current length of the file - */ - public long length() { - return channel.size(); - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/OpenFlags.java b/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/OpenFlags.java deleted file mode 100755 index 50612dea3..000000000 --- a/s3stream/src/main/java/com/automq/stream/thirdparty/moe/cnkirito/kdio/OpenFlags.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ -package com.automq.stream.thirdparty.moe.cnkirito.kdio; - -/** - * Constants for {@link DirectIOLib#oDirectOpen(String, boolean)}. - */ -public interface OpenFlags { - OpenFlags INSTANCE = instance(); - - private static OpenFlags instance() { - String arch = System.getProperty("os.arch"); - switch (arch) { - case "aarch64": - return new Aarch64OpenFlags(); - default: - return new DefaultOpenFlags(); - } - } - - default int oRDONLY() { - return 00; - } - default int oWRONLY() { - return 01; - } - default int oRDWR() { - return 02; - } - default int oCREAT() { - return 0100; - } - default int oTRUNC() { - return 01000; - } - default int oDIRECT() { - return 040000; - } - default int oSYNC() { - return 04010000; - } - - class DefaultOpenFlags implements OpenFlags { - } - - class Aarch64OpenFlags implements OpenFlags { - @Override - public int oDIRECT() { - return 0200000; - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/Arguments.java b/s3stream/src/main/java/com/automq/stream/utils/Arguments.java deleted file mode 100644 index c2623a02a..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/Arguments.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils; - -public class Arguments { - - public static void check(boolean checkResult, String errorMessage) { - if (!checkResult) { - throw new IllegalArgumentException(errorMessage); - } - } - - public static void isNotNull(Object obj, String errorMessage) { - if (obj == null) { - throw new IllegalArgumentException(errorMessage); - } - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/AsyncRateLimiter.java b/s3stream/src/main/java/com/automq/stream/utils/AsyncRateLimiter.java deleted file mode 100644 index f31de7a17..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/AsyncRateLimiter.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils; - -import com.google.common.util.concurrent.RateLimiter; -import java.util.Objects; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings("UnstableApiUsage") -public class AsyncRateLimiter { - private static final Logger LOGGER = LoggerFactory.getLogger(AsyncRateLimiter.class); - private static final ScheduledExecutorService SCHEDULER = Threads.newSingleThreadScheduledExecutor("async-rate-limiter", true, LOGGER); - private final Queue acquireQueue = new ConcurrentLinkedQueue<>(); - private final RateLimiter rateLimiter; - private final ScheduledFuture tickTask; - - public AsyncRateLimiter(double bytesPerSec) { - rateLimiter = RateLimiter.create(bytesPerSec, 100, TimeUnit.MILLISECONDS); - tickTask = SCHEDULER.scheduleAtFixedRate(this::tick, 1, 1, TimeUnit.MILLISECONDS); - } - - public synchronized CompletableFuture acquire(int size) { - if (acquireQueue.isEmpty() && rateLimiter.tryAcquire(size)) { - return CompletableFuture.completedFuture(null); - } else { - CompletableFuture cf = new CompletableFuture<>(); - acquireQueue.add(new Acquire(cf, size)); - return cf; - } - } - - public void close() { - tickTask.cancel(false); - } - - private synchronized void tick() { - for (; ; ) { - Acquire acquire = acquireQueue.peek(); - if (acquire == null) { - break; - } - if (rateLimiter.tryAcquire(acquire.size)) { - acquireQueue.poll(); - acquire.cf.complete(null); - } else { - break; - } - } - } - - static final class Acquire { - private final CompletableFuture cf; - private final int size; - - Acquire(CompletableFuture cf, int size) { - this.cf = cf; - this.size = size; - } - - public CompletableFuture cf() { - return cf; - } - - public int size() { - return size; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj == null || obj.getClass() != this.getClass()) - return false; - var that = (Acquire) obj; - return Objects.equals(this.cf, that.cf) && - this.size == that.size; - } - - @Override - public int hashCode() { - return Objects.hash(cf, size); - } - - @Override - public String toString() { - return "Acquire[" + - "cf=" + cf + ", " + - "size=" + size + ']'; - } - } -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/ByteBufInputStream.java b/s3stream/src/main/java/com/automq/stream/utils/ByteBufInputStream.java deleted file mode 100644 index 121a5f022..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/ByteBufInputStream.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ -package com.automq.stream.utils; - -import io.netty.buffer.ByteBuf; -import java.io.InputStream; - -/** - * A byte buffer backed input inputStream - */ -public final class ByteBufInputStream extends InputStream { - private final ByteBuf buffer; - - public ByteBufInputStream(ByteBuf buffer) { - this.buffer = buffer; - } - - public int read() { - if (buffer.readableBytes() == 0) { - return -1; - } - return buffer.readByte() & 0xFF; - } - - public int read(byte[] bytes, int off, int len) { - if (len == 0) { - return 0; - } - if (buffer.readableBytes() == 0) { - return -1; - } - - len = Math.min(len, buffer.readableBytes()); - buffer.readBytes(bytes, off, len); - return len; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/CloseableIterator.java b/s3stream/src/main/java/com/automq/stream/utils/CloseableIterator.java deleted file mode 100644 index 09855c5a3..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/CloseableIterator.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ -package com.automq.stream.utils; - -import java.io.Closeable; -import java.util.Iterator; - -/** - * Iterators that need to be closed in order to release resources should implement this interface. - *

- * Warning: before implementing this interface, consider if there are better options. The chance of misuse is - * a bit high since people are used to iterating without closing. - */ -public interface CloseableIterator extends Iterator, Closeable { - static CloseableIterator wrap(Iterator inner) { - return new CloseableIterator() { - @Override - public void close() { - } - - @Override - public boolean hasNext() { - return inner.hasNext(); - } - - @Override - public R next() { - return inner.next(); - } - - @Override - public void remove() { - inner.remove(); - } - }; - } - - void close(); -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/CommandResult.java b/s3stream/src/main/java/com/automq/stream/utils/CommandResult.java deleted file mode 100644 index 48a17ae0f..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/CommandResult.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils; - -public class CommandResult { - private final int code; - private final String stdout; - private final String stderr; - - public CommandResult(int code, String stdout, String stderr) { - this.code = code; - this.stdout = stdout; - this.stderr = stderr; - } - - /** - * Returns true if the command exited with a zero exit code. - */ - public boolean success() { - return code == 0; - } - - public int code() { - return code; - } - - public String stdout() { - return stdout; - } - - public String stderr() { - return stderr; - } - - @Override - public String toString() { - return "CommandResult{" + - "code=" + code + - ", stdout='" + stdout + '\'' + - ", stderr='" + stderr + '\'' + - '}'; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/CommandUtils.java b/s3stream/src/main/java/com/automq/stream/utils/CommandUtils.java deleted file mode 100644 index bcca1945f..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/CommandUtils.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.util.stream.Collectors; - -public class CommandUtils { - public static CommandResult run(String... cmd) { - try { - Process p = Runtime.getRuntime().exec(cmd); - try (BufferedReader inputReader = new BufferedReader(new InputStreamReader(p.getInputStream())); - BufferedReader errorReader = new BufferedReader(new InputStreamReader(p.getErrorStream()))) { - String stdout = inputReader.lines().collect(Collectors.joining("\n")); - String stderr = errorReader.lines().collect(Collectors.joining("\n")); - int code = p.waitFor(); - return new CommandResult(code, stdout, stderr); - } - } catch (IOException | InterruptedException e) { - return new CommandResult(-1, "", e.getMessage()); - } - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/FutureTicker.java b/s3stream/src/main/java/com/automq/stream/utils/FutureTicker.java deleted file mode 100644 index c377e1b94..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/FutureTicker.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils; - -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.Executor; -import java.util.concurrent.TimeUnit; - -/** - * A ticker base on {@link CompletableFuture}. It is used to batch operations. - *

- * For example, if we want to batch operations every 100ms, we can use the following code: - *

- * {@code
- * FutureTicker ticker = new FutureTicker(100, TimeUnit.MILLISECONDS, executor);
- * while (true) {
- *     ticker.tick().thenAccept(v -> operation());
- *     Thread.sleep(1);
- * }
- * }
- * 
- * Operations will be batched every 100ms. - */ -public class FutureTicker { - - private final Executor delayedExecutor; - - private CompletableFuture currentTick = CompletableFuture.completedFuture(null); - - /** - * Create a ticker with a delay and a executor - * - * @param delay the delay - * @param unit the time unit of the delay - * @param executor the executor, the {@link CompletableFuture} returned by {@link #tick()} will be completed by this executor - */ - public FutureTicker(long delay, TimeUnit unit, Executor executor) { - this.delayedExecutor = CompletableFuture.delayedExecutor(delay, unit, executor); - } - - /** - * Tick the ticker. It returns a future which will complete after the delay. - * If the ticker is already ticking, the same future will be returned. - * It is thread safe to call this method. - */ - public CompletableFuture tick() { - return maybeNextTick(); - } - - /** - * Generate a new tick if the current tick is done - */ - private synchronized CompletableFuture maybeNextTick() { - if (currentTick.isDone()) { - // a future which will complete after delay - currentTick = CompletableFuture.runAsync(() -> { - }, delayedExecutor); - } - return currentTick; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/FutureUtil.java b/s3stream/src/main/java/com/automq/stream/utils/FutureUtil.java deleted file mode 100644 index c2782d138..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/FutureUtil.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils; - -import java.util.Iterator; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.ExecutionException; -import java.util.function.Supplier; -import org.slf4j.Logger; - -public class FutureUtil { - public static CompletableFuture failedFuture(Throwable ex) { - CompletableFuture cf = new CompletableFuture<>(); - cf.completeExceptionally(ex); - return cf; - } - - public static void suppress(ThrowableRunnable run, Logger logger) { - try { - run.run(); - } catch (Throwable t) { - logger.error("Suppress error", t); - } - } - - /** - * Propagate CompleteFuture result / error from source to dest. - */ - public static void propagate(CompletableFuture source, CompletableFuture dest) { - source.whenComplete((rst, ex) -> { - if (ex != null) { - dest.completeExceptionally(ex); - } else { - dest.complete(rst); - } - }); - } - - /** - * Catch exceptions as a last resort to avoid unresponsiveness. - */ - public static CompletableFuture exec(Supplier> run, Logger logger, String name) { - try { - return run.get(); - } catch (Throwable ex) { - logger.error("{} run with unexpected exception", name, ex); - return failedFuture(ex); - } - } - - /** - * Catch exceptions as a last resort to avoid unresponsiveness. - */ - public static void exec(Runnable run, CompletableFuture cf, Logger logger, String name) { - try { - run.run(); - } catch (Throwable ex) { - logger.error("{} run with unexpected exception", name, ex); - cf.completeExceptionally(ex); - } - } - - public static Throwable cause(Throwable ex) { - if (ex instanceof ExecutionException) { - if (ex.getCause() != null) { - return cause(ex.getCause()); - } else { - return ex; - } - } else if (ex instanceof CompletionException) { - if (ex.getCause() != null) { - return cause(ex.getCause()); - } else { - return ex; - } - } - return ex; - } - - public static void completeExceptionally(Iterator> futures, Throwable ex) { - while (futures.hasNext()) { - CompletableFuture future = futures.next(); - future.completeExceptionally(ex); - } - } - - public static void complete(Iterator> futures, T value) { - while (futures.hasNext()) { - CompletableFuture future = futures.next(); - future.complete(value); - } - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/GlobalSwitch.java b/s3stream/src/main/java/com/automq/stream/utils/GlobalSwitch.java deleted file mode 100644 index e56686e41..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/GlobalSwitch.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils; - -public class GlobalSwitch { - - public static final boolean STRICT = getBoolean("AUTOMQ_S3STREAM_STRICT", false); - - private static boolean getBoolean(String name, boolean defaultValue) { - String value = System.getenv(name); - if (value == null) { - return defaultValue; - } else { - return Boolean.parseBoolean(value); - } - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/LogContext.java b/s3stream/src/main/java/com/automq/stream/utils/LogContext.java deleted file mode 100644 index a88e090c6..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/LogContext.java +++ /dev/null @@ -1,793 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ -package com.automq.stream.utils; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.slf4j.Marker; -import org.slf4j.helpers.FormattingTuple; -import org.slf4j.helpers.MessageFormatter; -import org.slf4j.spi.LocationAwareLogger; - -/** - * This class provides a way to instrument loggers with a common context which can be used to - * automatically enrich log messages. - */ -public class LogContext { - - private final String logPrefix; - - public LogContext(String logPrefix) { - this.logPrefix = logPrefix == null ? "" : logPrefix; - } - - public LogContext() { - this(""); - } - - public Logger logger(Class clazz) { - Logger logger = LoggerFactory.getLogger(clazz); - if (logger instanceof LocationAwareLogger) { - return new LocationAwareKafkaLogger(logPrefix, (LocationAwareLogger) logger); - } else { - return new LocationIgnorantKafkaLogger(logPrefix, logger); - } - } - - public Logger logger(String clazz) { - Logger logger = LoggerFactory.getLogger(clazz); - if (logger instanceof LocationAwareLogger) { - return new LocationAwareKafkaLogger(logPrefix, (LocationAwareLogger) logger); - } else { - return new LocationIgnorantKafkaLogger(logPrefix, logger); - } - } - - public String logPrefix() { - return logPrefix; - } - - private static abstract class AbstractKafkaLogger implements Logger { - private final String prefix; - - protected AbstractKafkaLogger(final String prefix) { - this.prefix = prefix; - } - - protected String addPrefix(final String message) { - return prefix + message; - } - } - - private static class LocationAwareKafkaLogger extends AbstractKafkaLogger { - private final LocationAwareLogger logger; - private final String fqcn; - - LocationAwareKafkaLogger(String logPrefix, LocationAwareLogger logger) { - super(logPrefix); - this.logger = logger; - this.fqcn = LocationAwareKafkaLogger.class.getName(); - } - - @Override - public String getName() { - return logger.getName(); - } - - @Override - public boolean isTraceEnabled() { - return logger.isTraceEnabled(); - } - - @Override - public boolean isTraceEnabled(Marker marker) { - return logger.isTraceEnabled(marker); - } - - @Override - public boolean isDebugEnabled() { - return logger.isDebugEnabled(); - } - - @Override - public boolean isDebugEnabled(Marker marker) { - return logger.isDebugEnabled(marker); - } - - @Override - public boolean isInfoEnabled() { - return logger.isInfoEnabled(); - } - - @Override - public boolean isInfoEnabled(Marker marker) { - return logger.isInfoEnabled(marker); - } - - @Override - public boolean isWarnEnabled() { - return logger.isWarnEnabled(); - } - - @Override - public boolean isWarnEnabled(Marker marker) { - return logger.isWarnEnabled(marker); - } - - @Override - public boolean isErrorEnabled() { - return logger.isErrorEnabled(); - } - - @Override - public boolean isErrorEnabled(Marker marker) { - return logger.isErrorEnabled(marker); - } - - @Override - public void trace(String message) { - if (logger.isTraceEnabled()) { - writeLog(null, LocationAwareLogger.TRACE_INT, message, null, null); - } - } - - @Override - public void trace(String format, Object arg) { - if (logger.isTraceEnabled()) { - writeLog(null, LocationAwareLogger.TRACE_INT, format, new Object[] {arg}, null); - } - } - - @Override - public void trace(String format, Object arg1, Object arg2) { - if (logger.isTraceEnabled()) { - writeLog(null, LocationAwareLogger.TRACE_INT, format, new Object[] {arg1, arg2}, null); - } - } - - @Override - public void trace(String format, Object... args) { - if (logger.isTraceEnabled()) { - writeLog(null, LocationAwareLogger.TRACE_INT, format, args, null); - } - } - - @Override - public void trace(String msg, Throwable t) { - if (logger.isTraceEnabled()) { - writeLog(null, LocationAwareLogger.TRACE_INT, msg, null, t); - } - } - - @Override - public void trace(Marker marker, String msg) { - if (logger.isTraceEnabled()) { - writeLog(marker, LocationAwareLogger.TRACE_INT, msg, null, null); - } - } - - @Override - public void trace(Marker marker, String format, Object arg) { - if (logger.isTraceEnabled()) { - writeLog(marker, LocationAwareLogger.TRACE_INT, format, new Object[] {arg}, null); - } - } - - @Override - public void trace(Marker marker, String format, Object arg1, Object arg2) { - if (logger.isTraceEnabled()) { - writeLog(marker, LocationAwareLogger.TRACE_INT, format, new Object[] {arg1, arg2}, null); - } - } - - @Override - public void trace(Marker marker, String format, Object... argArray) { - if (logger.isTraceEnabled()) { - writeLog(marker, LocationAwareLogger.TRACE_INT, format, argArray, null); - } - } - - @Override - public void trace(Marker marker, String msg, Throwable t) { - if (logger.isTraceEnabled()) { - writeLog(marker, LocationAwareLogger.TRACE_INT, msg, null, t); - } - } - - @Override - public void debug(String message) { - if (logger.isDebugEnabled()) { - writeLog(null, LocationAwareLogger.DEBUG_INT, message, null, null); - } - } - - @Override - public void debug(String format, Object arg) { - if (logger.isDebugEnabled()) { - writeLog(null, LocationAwareLogger.DEBUG_INT, format, new Object[] {arg}, null); - } - } - - @Override - public void debug(String format, Object arg1, Object arg2) { - if (logger.isDebugEnabled()) { - writeLog(null, LocationAwareLogger.DEBUG_INT, format, new Object[] {arg1, arg2}, null); - } - } - - @Override - public void debug(String format, Object... args) { - if (logger.isDebugEnabled()) { - writeLog(null, LocationAwareLogger.DEBUG_INT, format, args, null); - } - } - - @Override - public void debug(String msg, Throwable t) { - if (logger.isDebugEnabled()) { - writeLog(null, LocationAwareLogger.DEBUG_INT, msg, null, t); - } - } - - @Override - public void debug(Marker marker, String msg) { - if (logger.isDebugEnabled()) { - writeLog(marker, LocationAwareLogger.DEBUG_INT, msg, null, null); - } - } - - @Override - public void debug(Marker marker, String format, Object arg) { - if (logger.isDebugEnabled()) { - writeLog(marker, LocationAwareLogger.DEBUG_INT, format, new Object[] {arg}, null); - } - } - - @Override - public void debug(Marker marker, String format, Object arg1, Object arg2) { - if (logger.isDebugEnabled()) { - writeLog(marker, LocationAwareLogger.DEBUG_INT, format, new Object[] {arg1, arg2}, null); - } - } - - @Override - public void debug(Marker marker, String format, Object... arguments) { - if (logger.isDebugEnabled()) { - writeLog(marker, LocationAwareLogger.DEBUG_INT, format, arguments, null); - } - } - - @Override - public void debug(Marker marker, String msg, Throwable t) { - if (logger.isDebugEnabled()) { - writeLog(marker, LocationAwareLogger.DEBUG_INT, msg, null, t); - } - } - - @Override - public void warn(String message) { - writeLog(null, LocationAwareLogger.WARN_INT, message, null, null); - } - - @Override - public void warn(String format, Object arg) { - writeLog(null, LocationAwareLogger.WARN_INT, format, new Object[] {arg}, null); - } - - @Override - public void warn(String message, Object arg1, Object arg2) { - writeLog(null, LocationAwareLogger.WARN_INT, message, new Object[] {arg1, arg2}, null); - } - - @Override - public void warn(String format, Object... args) { - writeLog(null, LocationAwareLogger.WARN_INT, format, args, null); - } - - @Override - public void warn(String msg, Throwable t) { - writeLog(null, LocationAwareLogger.WARN_INT, msg, null, t); - } - - @Override - public void warn(Marker marker, String msg) { - writeLog(marker, LocationAwareLogger.WARN_INT, msg, null, null); - } - - @Override - public void warn(Marker marker, String format, Object arg) { - writeLog(marker, LocationAwareLogger.WARN_INT, format, new Object[] {arg}, null); - } - - @Override - public void warn(Marker marker, String format, Object arg1, Object arg2) { - writeLog(marker, LocationAwareLogger.WARN_INT, format, new Object[] {arg1, arg2}, null); - } - - @Override - public void warn(Marker marker, String format, Object... arguments) { - writeLog(marker, LocationAwareLogger.WARN_INT, format, arguments, null); - } - - @Override - public void warn(Marker marker, String msg, Throwable t) { - writeLog(marker, LocationAwareLogger.WARN_INT, msg, null, t); - } - - @Override - public void error(String message) { - writeLog(null, LocationAwareLogger.ERROR_INT, message, null, null); - } - - @Override - public void error(String format, Object arg) { - writeLog(null, LocationAwareLogger.ERROR_INT, format, new Object[] {arg}, null); - } - - @Override - public void error(String format, Object arg1, Object arg2) { - writeLog(null, LocationAwareLogger.ERROR_INT, format, new Object[] {arg1, arg2}, null); - } - - @Override - public void error(String format, Object... args) { - writeLog(null, LocationAwareLogger.ERROR_INT, format, args, null); - } - - @Override - public void error(String msg, Throwable t) { - writeLog(null, LocationAwareLogger.ERROR_INT, msg, null, t); - } - - @Override - public void error(Marker marker, String msg) { - writeLog(marker, LocationAwareLogger.ERROR_INT, msg, null, null); - } - - @Override - public void error(Marker marker, String format, Object arg) { - writeLog(marker, LocationAwareLogger.ERROR_INT, format, new Object[] {arg}, null); - } - - @Override - public void error(Marker marker, String format, Object arg1, Object arg2) { - writeLog(marker, LocationAwareLogger.ERROR_INT, format, new Object[] {arg1, arg2}, null); - } - - @Override - public void error(Marker marker, String format, Object... arguments) { - writeLog(marker, LocationAwareLogger.ERROR_INT, format, arguments, null); - } - - @Override - public void error(Marker marker, String msg, Throwable t) { - writeLog(marker, LocationAwareLogger.ERROR_INT, msg, null, t); - } - - @Override - public void info(String msg) { - writeLog(null, LocationAwareLogger.INFO_INT, msg, null, null); - } - - @Override - public void info(String format, Object arg) { - writeLog(null, LocationAwareLogger.INFO_INT, format, new Object[] {arg}, null); - } - - @Override - public void info(String format, Object arg1, Object arg2) { - writeLog(null, LocationAwareLogger.INFO_INT, format, new Object[] {arg1, arg2}, null); - } - - @Override - public void info(String format, Object... args) { - writeLog(null, LocationAwareLogger.INFO_INT, format, args, null); - } - - @Override - public void info(String msg, Throwable t) { - writeLog(null, LocationAwareLogger.INFO_INT, msg, null, t); - } - - @Override - public void info(Marker marker, String msg) { - writeLog(marker, LocationAwareLogger.INFO_INT, msg, null, null); - } - - @Override - public void info(Marker marker, String format, Object arg) { - writeLog(marker, LocationAwareLogger.INFO_INT, format, new Object[] {arg}, null); - } - - @Override - public void info(Marker marker, String format, Object arg1, Object arg2) { - writeLog(marker, LocationAwareLogger.INFO_INT, format, new Object[] {arg1, arg2}, null); - } - - @Override - public void info(Marker marker, String format, Object... arguments) { - writeLog(marker, LocationAwareLogger.INFO_INT, format, arguments, null); - } - - @Override - public void info(Marker marker, String msg, Throwable t) { - writeLog(marker, LocationAwareLogger.INFO_INT, msg, null, t); - } - - private void writeLog(Marker marker, int level, String format, Object[] args, Throwable exception) { - String message = format; - if (args != null && args.length > 0) { - FormattingTuple formatted = MessageFormatter.arrayFormat(format, args); - if (exception == null && formatted.getThrowable() != null) { - exception = formatted.getThrowable(); - } - message = formatted.getMessage(); - } - logger.log(marker, fqcn, level, addPrefix(message), null, exception); - } - } - - private static class LocationIgnorantKafkaLogger extends AbstractKafkaLogger { - private final Logger logger; - - LocationIgnorantKafkaLogger(String logPrefix, Logger logger) { - super(logPrefix); - this.logger = logger; - } - - @Override - public String getName() { - return logger.getName(); - } - - @Override - public boolean isTraceEnabled() { - return logger.isTraceEnabled(); - } - - @Override - public boolean isTraceEnabled(Marker marker) { - return logger.isTraceEnabled(marker); - } - - @Override - public boolean isDebugEnabled() { - return logger.isDebugEnabled(); - } - - @Override - public boolean isDebugEnabled(Marker marker) { - return logger.isDebugEnabled(marker); - } - - @Override - public boolean isInfoEnabled() { - return logger.isInfoEnabled(); - } - - @Override - public boolean isInfoEnabled(Marker marker) { - return logger.isInfoEnabled(marker); - } - - @Override - public boolean isWarnEnabled() { - return logger.isWarnEnabled(); - } - - @Override - public boolean isWarnEnabled(Marker marker) { - return logger.isWarnEnabled(marker); - } - - @Override - public boolean isErrorEnabled() { - return logger.isErrorEnabled(); - } - - @Override - public boolean isErrorEnabled(Marker marker) { - return logger.isErrorEnabled(marker); - } - - @Override - public void trace(String message) { - if (logger.isTraceEnabled()) { - logger.trace(addPrefix(message)); - } - } - - @Override - public void trace(String message, Object arg) { - if (logger.isTraceEnabled()) { - logger.trace(addPrefix(message), arg); - } - } - - @Override - public void trace(String message, Object arg1, Object arg2) { - if (logger.isTraceEnabled()) { - logger.trace(addPrefix(message), arg1, arg2); - } - } - - @Override - public void trace(String message, Object... args) { - if (logger.isTraceEnabled()) { - logger.trace(addPrefix(message), args); - } - } - - @Override - public void trace(String msg, Throwable t) { - if (logger.isTraceEnabled()) { - logger.trace(addPrefix(msg), t); - } - } - - @Override - public void trace(Marker marker, String msg) { - if (logger.isTraceEnabled()) { - logger.trace(marker, addPrefix(msg)); - } - } - - @Override - public void trace(Marker marker, String format, Object arg) { - if (logger.isTraceEnabled()) { - logger.trace(marker, addPrefix(format), arg); - } - } - - @Override - public void trace(Marker marker, String format, Object arg1, Object arg2) { - if (logger.isTraceEnabled()) { - logger.trace(marker, addPrefix(format), arg1, arg2); - } - } - - @Override - public void trace(Marker marker, String format, Object... argArray) { - if (logger.isTraceEnabled()) { - logger.trace(marker, addPrefix(format), argArray); - } - } - - @Override - public void trace(Marker marker, String msg, Throwable t) { - if (logger.isTraceEnabled()) { - logger.trace(marker, addPrefix(msg), t); - } - } - - @Override - public void debug(String message) { - if (logger.isDebugEnabled()) { - logger.debug(addPrefix(message)); - } - } - - @Override - public void debug(String message, Object arg) { - if (logger.isDebugEnabled()) { - logger.debug(addPrefix(message), arg); - } - } - - @Override - public void debug(String message, Object arg1, Object arg2) { - if (logger.isDebugEnabled()) { - logger.debug(addPrefix(message), arg1, arg2); - } - } - - @Override - public void debug(String message, Object... args) { - if (logger.isDebugEnabled()) { - logger.debug(addPrefix(message), args); - } - } - - @Override - public void debug(String msg, Throwable t) { - if (logger.isDebugEnabled()) { - logger.debug(addPrefix(msg), t); - } - } - - @Override - public void debug(Marker marker, String msg) { - if (logger.isDebugEnabled()) { - logger.debug(marker, addPrefix(msg)); - } - } - - @Override - public void debug(Marker marker, String format, Object arg) { - if (logger.isDebugEnabled()) { - logger.debug(marker, addPrefix(format), arg); - } - } - - @Override - public void debug(Marker marker, String format, Object arg1, Object arg2) { - if (logger.isDebugEnabled()) { - logger.debug(marker, addPrefix(format), arg1, arg2); - } - } - - @Override - public void debug(Marker marker, String format, Object... arguments) { - if (logger.isDebugEnabled()) { - logger.debug(marker, addPrefix(format), arguments); - } - } - - @Override - public void debug(Marker marker, String msg, Throwable t) { - if (logger.isDebugEnabled()) { - logger.debug(marker, addPrefix(msg), t); - } - } - - @Override - public void warn(String message) { - logger.warn(addPrefix(message)); - } - - @Override - public void warn(String message, Object arg) { - logger.warn(addPrefix(message), arg); - } - - @Override - public void warn(String message, Object arg1, Object arg2) { - logger.warn(addPrefix(message), arg1, arg2); - } - - @Override - public void warn(String message, Object... args) { - logger.warn(addPrefix(message), args); - } - - @Override - public void warn(String msg, Throwable t) { - logger.warn(addPrefix(msg), t); - } - - @Override - public void warn(Marker marker, String msg) { - logger.warn(marker, addPrefix(msg)); - } - - @Override - public void warn(Marker marker, String format, Object arg) { - logger.warn(marker, addPrefix(format), arg); - } - - @Override - public void warn(Marker marker, String format, Object arg1, Object arg2) { - logger.warn(marker, addPrefix(format), arg1, arg2); - } - - @Override - public void warn(Marker marker, String format, Object... arguments) { - logger.warn(marker, addPrefix(format), arguments); - } - - @Override - public void warn(Marker marker, String msg, Throwable t) { - logger.warn(marker, addPrefix(msg), t); - } - - @Override - public void error(String message) { - logger.error(addPrefix(message)); - } - - @Override - public void error(String message, Object arg) { - logger.error(addPrefix(message), arg); - } - - @Override - public void error(String message, Object arg1, Object arg2) { - logger.error(addPrefix(message), arg1, arg2); - } - - @Override - public void error(String message, Object... args) { - logger.error(addPrefix(message), args); - } - - @Override - public void error(String msg, Throwable t) { - logger.error(addPrefix(msg), t); - } - - @Override - public void error(Marker marker, String msg) { - logger.error(marker, addPrefix(msg)); - } - - @Override - public void error(Marker marker, String format, Object arg) { - logger.error(marker, addPrefix(format), arg); - } - - @Override - public void error(Marker marker, String format, Object arg1, Object arg2) { - logger.error(marker, addPrefix(format), arg1, arg2); - } - - @Override - public void error(Marker marker, String format, Object... arguments) { - logger.error(marker, addPrefix(format), arguments); - } - - @Override - public void error(Marker marker, String msg, Throwable t) { - logger.error(marker, addPrefix(msg), t); - } - - @Override - public void info(String message) { - logger.info(addPrefix(message)); - } - - @Override - public void info(String message, Object arg) { - logger.info(addPrefix(message), arg); - } - - @Override - public void info(String message, Object arg1, Object arg2) { - logger.info(addPrefix(message), arg1, arg2); - } - - @Override - public void info(String message, Object... args) { - logger.info(addPrefix(message), args); - } - - @Override - public void info(String msg, Throwable t) { - logger.info(addPrefix(msg), t); - } - - @Override - public void info(Marker marker, String msg) { - logger.info(marker, addPrefix(msg)); - } - - @Override - public void info(Marker marker, String format, Object arg) { - logger.info(marker, addPrefix(format), arg); - } - - @Override - public void info(Marker marker, String format, Object arg1, Object arg2) { - logger.info(marker, addPrefix(format), arg1, arg2); - } - - @Override - public void info(Marker marker, String format, Object... arguments) { - logger.info(marker, addPrefix(format), arguments); - } - - @Override - public void info(Marker marker, String msg, Throwable t) { - logger.info(marker, addPrefix(msg), t); - } - - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/S3Utils.java b/s3stream/src/main/java/com/automq/stream/utils/S3Utils.java deleted file mode 100644 index 1a644aede..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/S3Utils.java +++ /dev/null @@ -1,462 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils; - -import com.automq.stream.s3.ByteBufAlloc; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import io.netty.buffer.Unpooled; -import java.net.URI; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Date; -import java.util.List; -import java.util.Objects; -import java.util.Random; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.exception.ExceptionUtils; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain; -import software.amazon.awssdk.core.async.AsyncRequestBody; -import software.amazon.awssdk.core.async.AsyncResponseTransformer; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3.S3AsyncClient; -import software.amazon.awssdk.services.s3.S3AsyncClientBuilder; -import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; -import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload; -import software.amazon.awssdk.services.s3.model.CompletedPart; -import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; -import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; -import software.amazon.awssdk.services.s3.model.GetObjectRequest; -import software.amazon.awssdk.services.s3.model.PutObjectRequest; -import software.amazon.awssdk.services.s3.model.S3Exception; -import software.amazon.awssdk.services.s3.model.UploadPartRequest; -import software.amazon.awssdk.services.s3.model.UploadPartResponse; - -public class S3Utils { - - /** - * Check s3 access with context. - * This method is generally used to help users figure out problems in using S3. - * - * @param context s3 context. - */ - public static void checkS3Access(S3Context context) { - try (ObjectOperationTask task = new ObjectOperationTask(context)) { - task.run(); - } catch (Throwable e) { - System.out.println("ERROR: " + ExceptionUtils.getRootCause(e)); - System.exit(1); - } - - try (S3MultipartUploadTestTask task = new S3MultipartUploadTestTask(context)) { - task.run(); - } catch (Throwable e) { - System.out.println("ERROR: " + ExceptionUtils.getRootCause(e)); - System.exit(1); - } - } - - private static String range(long start, long end) { - if (end == -1L) { - return "bytes=" + start + "-"; - } - return "bytes=" + start + "-" + end; - } - - private static S3AsyncClient newS3AsyncClient(String endpoint, String region, boolean forcePathStyle, - List credentialsProviders) { - S3AsyncClientBuilder builder = S3AsyncClient.builder().region(Region.of(region)); - if (StringUtils.isNotBlank(endpoint)) { - builder.endpointOverride(URI.create(endpoint)); - } - builder.serviceConfiguration(c -> c.pathStyleAccessEnabled(forcePathStyle)); - builder.credentialsProvider(AwsCredentialsProviderChain.builder().credentialsProviders(credentialsProviders).build()); - builder.overrideConfiguration(b -> b.apiCallTimeout(Duration.ofMinutes(1)) - .apiCallAttemptTimeout(Duration.ofSeconds(30))); - return builder.build(); - } - - private static abstract class S3CheckTask implements AutoCloseable { - protected final S3AsyncClient client; - protected final String bucketName; - private final String taskName; - - public S3CheckTask(S3Context context, String taskName) { - this.client = newS3AsyncClient(context.endpoint, context.region, context.forcePathStyle, context.credentialsProviders); - this.bucketName = context.bucketName; - this.taskName = taskName; - } - - protected static void showErrorInfo(Exception e) { - if (e.getCause() instanceof S3Exception) { - S3Exception se = (S3Exception) e.getCause(); - // Do not use system.err because automq admin tool suppress system.err - System.out.println("get S3 exception: "); - se.printStackTrace(System.out); - } else { - System.out.println("get other exception: "); - e.printStackTrace(System.out); - } - } - - protected void run() { - } - - public String getTaskName() { - return taskName; - } - - @Override - public void close() { - if (this.client != null) { - client.close(); - } - } - } - - // This task is used to test s3 multipart upload - private static class S3MultipartUploadTestTask extends ObjectOperationTask { - private Random random = new Random(); - public S3MultipartUploadTestTask(S3Context context) { - super(context, S3MultipartUploadTestTask.class.getSimpleName()); - } - - @Override - public void run() { - ByteBuf byteBuf = null; - try { - // Simple write/read/delete - String uploadId = createMultipartUpload(client, bucketName, path).get(); - List parts = new ArrayList<>(); - int data1Size = 1024 * 1024 * 5; - int data2Size = 1024; - int totalSize = data1Size + data2Size; - - byte[] randomBytes = new byte[data1Size]; - random.nextBytes(randomBytes); - ByteBuf data1 = Unpooled.wrappedBuffer(randomBytes); - writePart(uploadId, path, bucketName, data1, 1).thenAccept(parts::add).get(); - - byte[] randomBytes2 = new byte[data2Size]; - random.nextBytes(randomBytes2); - ByteBuf data2 = Unpooled.wrappedBuffer(randomBytes2); - writePart(uploadId, path, bucketName, data2, 2).thenAccept(parts::add).get(); - - System.out.println("[ OK ] Write S3 object"); - - completeMultipartUpload(client, path, bucketName, uploadId, parts).get(); - System.out.println("[ OK ] Upload s3 multipart object"); - - CompletableFuture readCf = new CompletableFuture<>(); - readRange(client, path, readCf, bucketName, 0, -1); - byteBuf = readCf.get(); - if (byteBuf == null) { - System.out.println("[ FAILED ] Read s3 multipart object"); - throw new RuntimeException("read multipart object " + path + " fail. got null"); - } else if (byteBuf.readableBytes() != totalSize) { - System.out.println("[ FAILED ] Read s3 multipart object"); - throw new RuntimeException("read multipart object " + path + " fail. expected size " + totalSize + ", actual size " + byteBuf.readableBytes()); - } - System.out.println("[ OK ] Read s3 multipart object"); - } catch (ExecutionException | InterruptedException e) { - showErrorInfo(e); - throw new RuntimeException(e); - } finally { - if (byteBuf != null) { - byteBuf.release(); - } - } - } - - private CompletableFuture createMultipartUpload(S3AsyncClient writeS3Client, String bucketName, - String path) { - CompletableFuture cf = new CompletableFuture<>(); - CreateMultipartUploadRequest request = CreateMultipartUploadRequest.builder().bucket(bucketName).key(path).build(); - writeS3Client.createMultipartUpload(request).thenAccept(createMultipartUploadResponse -> { - cf.complete(createMultipartUploadResponse.uploadId()); - }).exceptionally(ex -> { - System.out.println("[ FAILED ] Upload s3 multipart object"); - cf.completeExceptionally(ex); - return null; - }); - return cf; - } - - public CompletableFuture completeMultipartUpload(S3AsyncClient writeS3Client, String path, String bucket, - String uploadId, List parts) { - CompletableFuture cf = new CompletableFuture<>(); - CompletedMultipartUpload multipartUpload = CompletedMultipartUpload.builder().parts(parts).build(); - CompleteMultipartUploadRequest request = CompleteMultipartUploadRequest.builder().bucket(bucket).key(path).uploadId(uploadId).multipartUpload(multipartUpload).build(); - - writeS3Client.completeMultipartUpload(request).thenAccept(completeMultipartUploadResponse -> { - cf.complete(null); - }).exceptionally(ex -> { - System.out.println("[ FAILED ] Upload s3 multipart object, upload id is " + uploadId); - cf.completeExceptionally(ex); - return null; - }); - return cf; - } - - private CompletableFuture writePart(String uploadId, String path, String bucket, ByteBuf data, - int partNum) { - CompletableFuture cf = new CompletableFuture<>(); - uploadPart(client, cf, path, uploadId, partNum, bucket, data); - return cf; - } - - private void uploadPart(S3AsyncClient writeS3Client, CompletableFuture cf, String path, - String uploadId, int partNumber, String bucket, ByteBuf part) { - AsyncRequestBody body = AsyncRequestBody.fromByteBuffersUnsafe(part.nioBuffers()); - UploadPartRequest request = UploadPartRequest.builder().bucket(bucket).key(path).uploadId(uploadId) - .partNumber(partNumber).build(); - CompletableFuture uploadPartCf = writeS3Client.uploadPart(request, body); - uploadPartCf.thenAccept(uploadPartResponse -> { - CompletedPart completedPart = CompletedPart.builder().partNumber(partNumber).eTag(uploadPartResponse.eTag()).build(); - cf.complete(completedPart); - }).exceptionally(ex -> { - cf.completeExceptionally(ex); - return null; - }); - cf.whenComplete((rst, ex) -> part.release()); - } - } - - private static class ObjectOperationTask extends S3CheckTask { - protected final String path; - - public ObjectOperationTask(S3Context context) { - this(context, ObjectOperationTask.class.getSimpleName()); - } - - protected ObjectOperationTask(S3Context context, String taskName) { - super(context, taskName); - this.path = String.format("%d/%s", System.nanoTime(), getTaskName()); - } - - @Override - public void run() { - byte[] content = new Date().toString().getBytes(StandardCharsets.UTF_8); - ByteBuf byteBuf = null; - try { - // Simple write/read/delete - CompletableFuture writeCf = new CompletableFuture<>(); - writeObject(client, path, ByteBuffer.wrap(content), writeCf, bucketName); - writeCf.get(); - System.out.println("[ OK ] Write s3 object"); - - CompletableFuture readCf = new CompletableFuture<>(); - readRange(client, path, readCf, bucketName, 0, -1); - byteBuf = readCf.get(); - if (byteBuf == null) { - System.out.println("[ Failed ] Read s3 object"); - throw new RuntimeException("read object " + path + " fail. got null"); - } else if (byteBuf.readableBytes() != content.length) { - System.out.println("[ Failed ] Read s3 object"); - throw new RuntimeException("read object " + path + " fail. expected size " + content.length + ", actual size " + byteBuf.readableBytes()); - } - byte[] readContent = new byte[byteBuf.readableBytes()]; - byteBuf.readBytes(readContent); - if (!StringUtils.equals(new String(readContent, StandardCharsets.UTF_8), new String(content, StandardCharsets.UTF_8))) { - System.out.println("[ Failed ] Read s3 object"); - throw new RuntimeException("read object " + path + " fail. expected content " + new String(content, StandardCharsets.UTF_8) + ", actual content " + new String(readContent, StandardCharsets.UTF_8)); - } - System.out.println("[ OK ] Read s3 object"); - } catch (ExecutionException | InterruptedException e) { - showErrorInfo(e); - throw new RuntimeException(e); - } finally { - if (byteBuf != null) { - byteBuf.release(); - } - } - } - - private void writeObject(S3AsyncClient writeS3Client, String path, ByteBuffer data, CompletableFuture cf, - String bucket) { - PutObjectRequest request = PutObjectRequest.builder().bucket(bucket).key(path).build(); - AsyncRequestBody body = AsyncRequestBody.fromByteBuffersUnsafe(data); - writeS3Client.putObject(request, body).thenAccept(putObjectResponse -> { - cf.complete(null); - }).exceptionally(ex -> { - System.out.printf("[ Failed ] Write s3 object. PutObject for object %s fail with msg %s %n", path, ex.getMessage()); - cf.completeExceptionally(ex); - return null; - }); - } - - protected void readRange(S3AsyncClient readS3Client, String path, CompletableFuture cf, String bucket, - long start, long end) { - GetObjectRequest request = GetObjectRequest.builder().bucket(bucket).key(path).range(range(start, end)).build(); - readS3Client.getObject(request, AsyncResponseTransformer.toPublisher()) - .thenAccept(responsePublisher -> { - CompositeByteBuf buf = ByteBufAlloc.compositeByteBuffer(); - responsePublisher.subscribe((bytes) -> { - // the aws client will copy DefaultHttpContent to heap ByteBuffer - buf.addComponent(true, Unpooled.wrappedBuffer(bytes)); - }).thenAccept(v -> { - cf.complete(buf); - }); - }).exceptionally(ex -> { - cf.completeExceptionally(ex); - return null; - }); - } - - protected void deleteObject(S3AsyncClient deleteS3Client, String path, CompletableFuture cf, - String bucket) { - DeleteObjectRequest request = DeleteObjectRequest.builder().bucket(bucket).key(path).build(); - deleteS3Client.deleteObject(request).thenAccept(deleteObjectResponse -> { - cf.complete(null); - }).exceptionally(ex -> { - System.out.printf("[ FAILED ] Delete s3 object. Delete object %s fail with msg %s %n", path, ex.getMessage()); - cf.completeExceptionally(ex); - return null; - }); - } - - @Override - public void close() { - try { - CompletableFuture deleteCf = new CompletableFuture<>(); - deleteObject(client, path, deleteCf, bucketName); - deleteCf.get(); - } catch (InterruptedException | ExecutionException e) { - - System.out.println("[ FAILED ] Delete s3 object. NOTICE: please delete object " + path + " manually!!!"); - showErrorInfo(e); - throw new RuntimeException(e); - } finally { - super.close(); - } - System.out.println("[ OK ] Delete s3 object"); - - } - } - - public static class S3Context { - private final String endpoint; - private final List credentialsProviders; - private final String bucketName; - private final String region; - private final boolean forcePathStyle; - - public S3Context(String endpoint, List credentialsProviders, String bucketName, - String region, - boolean forcePathStyle) { - this.endpoint = endpoint; - this.credentialsProviders = credentialsProviders; - this.bucketName = bucketName; - this.region = region; - this.forcePathStyle = forcePathStyle; - } - - public static Builder builder() { - return new Builder(); - } - - public List advices() { - List advises = new ArrayList<>(); - if (StringUtils.isBlank(bucketName)) { - advises.add("bucketName is blank. Please supply a valid bucketName."); - } - if (StringUtils.isBlank(endpoint)) { - advises.add("endpoint is blank. Please supply a valid endpoint."); - } else { - if (endpoint.startsWith("https")) { - advises.add("You are using https endpoint. Please make sure your object storage service supports https."); - } - String[] splits = endpoint.split("//"); - if (splits.length < 2) { - advises.add("endpoint is invalid. Please supply a valid endpoint."); - } else { - String[] dotSplits = splits[1].split("\\."); - if (dotSplits.length == 0 || StringUtils.isBlank(dotSplits[0])) { - advises.add("endpoint is invalid. Please supply a valid endpoint."); - } else if (!StringUtils.isBlank(bucketName) && Objects.equals(bucketName.toLowerCase(), dotSplits[0].toLowerCase())) { - advises.add("bucket name should not be included in endpoint."); - } - } - } - if (credentialsProviders == null || credentialsProviders.isEmpty()) { - advises.add("no credentials provider is supplied. Please supply a credentials provider."); - } - try (AwsCredentialsProviderChain chain = AwsCredentialsProviderChain.builder().credentialsProviders(credentialsProviders).build()) { - chain.resolveCredentials(); - } catch (SdkClientException e) { - advises.add("all provided credentials providers are invalid. Please supply a valid credentials provider. Error msg: " + e.getMessage()); - } - if (StringUtils.isBlank(region)) { - advises.add("region is blank. Please supply a valid region."); - } - if (!forcePathStyle) { - advises.add("forcePathStyle is set as false. Please set it as true if you are using minio."); - } - return advises; - } - - @Override - public String toString() { - return "S3CheckContext{" + - "endpoint='" + endpoint + '\'' + - ", credentialsProviders=" + credentialsProviders + - ", bucketName='" + bucketName + '\'' + - ", region='" + region + '\'' + - ", forcePathStyle=" + forcePathStyle + - '}'; - } - - public static class Builder { - private String endpoint; - private List credentialsProviders; - private String bucketName; - private String region; - private boolean forcePathStyle; - - public Builder setEndpoint(String endpoint) { - this.endpoint = endpoint; - return this; - } - - public Builder setCredentialsProviders(List credentialsProviders) { - this.credentialsProviders = credentialsProviders; - return this; - } - - public Builder setBucketName(String bucketName) { - this.bucketName = bucketName; - return this; - } - - public Builder setRegion(String region) { - this.region = region; - return this; - } - - public Builder setForcePathStyle(boolean forcePathStyle) { - this.forcePathStyle = forcePathStyle; - return this; - } - - public S3Context build() { - return new S3Context(endpoint, credentialsProviders, bucketName, region, forcePathStyle); - } - } - - } -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/ThreadUtils.java b/s3stream/src/main/java/com/automq/stream/utils/ThreadUtils.java deleted file mode 100644 index 6b84a2c44..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/ThreadUtils.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils; - -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.atomic.AtomicLong; -import org.slf4j.Logger; - -/** - * Utilities for working with threads. - */ -public class ThreadUtils { - /** - * Create a new ThreadFactory. - * - * @param pattern The pattern to use. If this contains %d, it will be - * replaced with a thread number. It should not contain more - * than one %d. - * @param daemon True if we want daemon threads. - * @return The new ThreadFactory. - */ - public static ThreadFactory createThreadFactory(final String pattern, - final boolean daemon) { - return new ThreadFactory() { - private final AtomicLong threadEpoch = new AtomicLong(0); - - @Override - public Thread newThread(Runnable r) { - String threadName; - if (pattern.contains("%d")) { - threadName = String.format(pattern, threadEpoch.addAndGet(1)); - } else { - threadName = pattern; - } - Thread thread = new Thread(r, threadName); - thread.setDaemon(daemon); - return thread; - } - }; - } - - public static Runnable wrapRunnable(Runnable runnable, Logger logger) { - return () -> { - try { - runnable.run(); - } catch (Throwable throwable) { - logger.error("[FATAL] Uncaught exception in executor thread {}", Thread.currentThread().getName(), throwable); - } - }; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/Threads.java b/s3stream/src/main/java/com/automq/stream/utils/Threads.java deleted file mode 100644 index 25df67a9d..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/Threads.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils; - -import com.automq.stream.utils.threads.S3StreamThreadPoolMonitor; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import org.slf4j.Logger; - -public class Threads { - - public static ExecutorService newFixedThreadPool(int nThreads, ThreadFactory threadFactory, Logger logger) { - return new ThreadPoolExecutor(nThreads, nThreads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(), threadFactory) { - @Override - protected void afterExecute(Runnable r, Throwable t) { - super.afterExecute(r, t); - if (t != null) { - logger.error("[FATAL] Uncaught exception in executor thread {}", Thread.currentThread().getName(), t); - } - } - }; - } - - public static ExecutorService newFixedThreadPoolWithMonitor(int nThreads, String namePrefix, boolean isDaemen, - Logger logger) { - return S3StreamThreadPoolMonitor.createAndMonitor(nThreads, nThreads, 0L, TimeUnit.MILLISECONDS, namePrefix, isDaemen, Integer.MAX_VALUE, throwable -> { - if (throwable != null) { - logger.error("[FATAL] Uncaught exception in executor thread {}", Thread.currentThread().getName(), throwable); - } - return null; - }); - } - - public static ScheduledExecutorService newSingleThreadScheduledExecutor(String name, boolean daemon, - Logger logger) { - return newSingleThreadScheduledExecutor(ThreadUtils.createThreadFactory(name, true), logger, false, true); - } - - public static ScheduledExecutorService newSingleThreadScheduledExecutor(ThreadFactory threadFactory, - Logger logger) { - return newSingleThreadScheduledExecutor(threadFactory, logger, false, true); - } - - public static ScheduledExecutorService newSingleThreadScheduledExecutor(ThreadFactory threadFactory, - Logger logger, boolean removeOnCancelPolicy) { - return newSingleThreadScheduledExecutor(threadFactory, logger, removeOnCancelPolicy, true); - } - - public static ScheduledExecutorService newSingleThreadScheduledExecutor(ThreadFactory threadFactory, - Logger logger, boolean removeOnCancelPolicy, boolean executeExistingDelayedTasksAfterShutdownPolicy) { - ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1, threadFactory) { - @Override - public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) { - command = ThreadUtils.wrapRunnable(command, logger); - return super.schedule(command, delay, unit); - } - - @Override - public ScheduledFuture scheduleAtFixedRate(Runnable command, long initialDelay, long period, - TimeUnit unit) { - command = ThreadUtils.wrapRunnable(command, logger); - return super.scheduleAtFixedRate(command, initialDelay, period, unit); - } - - @Override - public ScheduledFuture scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, - TimeUnit unit) { - command = ThreadUtils.wrapRunnable(command, logger); - return super.scheduleWithFixedDelay(command, initialDelay, delay, unit); - } - }; - executor.setRemoveOnCancelPolicy(removeOnCancelPolicy); - executor.setExecuteExistingDelayedTasksAfterShutdownPolicy(executeExistingDelayedTasksAfterShutdownPolicy); - return executor; - } - - public static boolean sleep(long millis) { - try { - Thread.sleep(millis); - } catch (InterruptedException e) { - // ignore - return true; - } - return false; - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/ThrowableRunnable.java b/s3stream/src/main/java/com/automq/stream/utils/ThrowableRunnable.java deleted file mode 100644 index 16f0926b4..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/ThrowableRunnable.java +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils; - -public interface ThrowableRunnable { - void run() throws Throwable; -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/Utils.java b/s3stream/src/main/java/com/automq/stream/utils/Utils.java deleted file mode 100644 index 5436e0ec6..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/Utils.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils; - -public class Utils { - public static final String MAX_MERGE_READ_SPARSITY_RATE_NAME = "MERGE_READ_SPARSITY_RATE"; - - public static float getMaxMergeReadSparsityRate() { - float rate; - try { - rate = Float.parseFloat(System.getenv(MAX_MERGE_READ_SPARSITY_RATE_NAME)); - } catch (Exception e) { - rate = 0.5f; - } - return rate; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/biniarysearch/AbstractOrderedCollection.java b/s3stream/src/main/java/com/automq/stream/utils/biniarysearch/AbstractOrderedCollection.java deleted file mode 100644 index 2cb0f0157..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/biniarysearch/AbstractOrderedCollection.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils.biniarysearch; - -public abstract class AbstractOrderedCollection { - - protected abstract int size(); - - protected abstract ComparableItem get(int index); - - public int search(T target) { - int low = 0; - int high = size() - 1; - while (low <= high) { - int mid = low + ((high - low) >>> 1); - ComparableItem midVal = get(mid); - if (midVal.isLessThan(target)) { - low = mid + 1; - } else if (midVal.isGreaterThan(target)) { - high = mid - 1; - } else { - low = mid; - break; - } - } - if (low > high) { - return -1; - } - return low; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/biniarysearch/ComparableItem.java b/s3stream/src/main/java/com/automq/stream/utils/biniarysearch/ComparableItem.java deleted file mode 100644 index 054cfa6ba..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/biniarysearch/ComparableItem.java +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils.biniarysearch; - -public interface ComparableItem { - boolean isLessThan(T value); - - boolean isGreaterThan(T value); -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/biniarysearch/IndexBlockOrderedBytes.java b/s3stream/src/main/java/com/automq/stream/utils/biniarysearch/IndexBlockOrderedBytes.java deleted file mode 100644 index c64d0a24f..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/biniarysearch/IndexBlockOrderedBytes.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils.biniarysearch; - -import com.automq.stream.s3.DataBlockIndex; -import com.automq.stream.s3.ObjectReader; -import java.util.Objects; - -public class IndexBlockOrderedBytes extends AbstractOrderedCollection { - private final ObjectReader.IndexBlock indexBlock; - - public IndexBlockOrderedBytes(ObjectReader.IndexBlock indexBlock) { - this.indexBlock = indexBlock; - } - - @Override - protected int size() { - return this.indexBlock.count(); - } - - @Override - protected ComparableItem get(int index) { - return new ComparableStreamRange(indexBlock.get(index)); - } - - public static final class TargetStreamOffset { - private final long streamId; - private final long offset; - - public TargetStreamOffset(long streamId, long offset) { - this.streamId = streamId; - this.offset = offset; - } - - public long streamId() { - return streamId; - } - - public long offset() { - return offset; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj == null || obj.getClass() != this.getClass()) - return false; - var that = (TargetStreamOffset) obj; - return this.streamId == that.streamId && - this.offset == that.offset; - } - - @Override - public int hashCode() { - return Objects.hash(streamId, offset); - } - - @Override - public String toString() { - return "TargetStreamOffset[" + - "streamId=" + streamId + ", " + - "offset=" + offset + ']'; - } - - } - - private static final class ComparableStreamRange - implements ComparableItem { - private final DataBlockIndex index; - - private ComparableStreamRange(DataBlockIndex index) { - this.index = index; - } - - public long endOffset() { - return index.endOffset(); - } - - @Override - public boolean isLessThan(TargetStreamOffset value) { - if (this.index().streamId() < value.streamId) { - return true; - } else if (this.index().streamId() > value.streamId) { - return false; - } else { - return this.endOffset() <= value.offset; - } - } - - @Override - public boolean isGreaterThan(TargetStreamOffset value) { - if (this.index().streamId() > value.streamId) { - return true; - } else if (this.index().streamId() < value.streamId) { - return false; - } else { - return this.index().startOffset() > value.offset; - } - } - - public DataBlockIndex index() { - return index; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj == null || obj.getClass() != this.getClass()) - return false; - var that = (ComparableStreamRange) obj; - return Objects.equals(this.index, that.index); - } - - @Override - public int hashCode() { - return Objects.hash(index); - } - - @Override - public String toString() { - return "ComparableStreamRange[" + - "index=" + index + ']'; - } - - } -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/biniarysearch/StreamRecordBatchList.java b/s3stream/src/main/java/com/automq/stream/utils/biniarysearch/StreamRecordBatchList.java deleted file mode 100644 index d791daf9f..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/biniarysearch/StreamRecordBatchList.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils.biniarysearch; - -import com.automq.stream.s3.model.StreamRecordBatch; - -import java.util.List; - -public class StreamRecordBatchList extends AbstractOrderedCollection { - - private final List records; - private final int size; - - public StreamRecordBatchList(List records) { - this.records = records; - this.size = records.size(); - } - - @Override - public int size() { - return size; - } - - @Override - protected ComparableItem get(int index) { - return records.get(index); - } - -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/threads/S3StreamThreadPoolMonitor.java b/s3stream/src/main/java/com/automq/stream/utils/threads/S3StreamThreadPoolMonitor.java deleted file mode 100644 index d3ca6a730..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/threads/S3StreamThreadPoolMonitor.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils.threads; - -import com.automq.stream.utils.ThreadUtils; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.Executors; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class S3StreamThreadPoolMonitor { - private static final List MONITOR_EXECUTOR = new CopyOnWriteArrayList<>(); - private static final ScheduledExecutorService MONITOR_SCHEDULED = Executors.newSingleThreadScheduledExecutor( - ThreadUtils.createThreadFactory("ThreadPoolMonitor-%d", true)); - private static Logger waterMarkLogger = LoggerFactory.getLogger(S3StreamThreadPoolMonitor.class); - private static volatile long threadPoolStatusPeriodTime = TimeUnit.SECONDS.toMillis(3); - - public static void config(Logger waterMarkLoggerConfig, long threadPoolStatusPeriodTimeConfig) { - waterMarkLogger = waterMarkLoggerConfig; - threadPoolStatusPeriodTime = threadPoolStatusPeriodTimeConfig; - } - - public static ThreadPoolExecutor createAndMonitor(int corePoolSize, - int maximumPoolSize, - long keepAliveTime, - TimeUnit unit, - String name, - boolean isDaemon, - int queueCapacity) { - return createAndMonitor(corePoolSize, maximumPoolSize, keepAliveTime, unit, name, isDaemon, queueCapacity, throwable -> null); - } - - public static ThreadPoolExecutor createAndMonitor(int corePoolSize, - int maximumPoolSize, - long keepAliveTime, - TimeUnit unit, - String name, - boolean isDaemon, - int queueCapacity, - Function afterExecutionHook) { - return createAndMonitor(corePoolSize, maximumPoolSize, keepAliveTime, unit, name, isDaemon, queueCapacity, afterExecutionHook, Collections.emptyList()); - } - - public static ThreadPoolExecutor createAndMonitor(int corePoolSize, - int maximumPoolSize, - long keepAliveTime, - TimeUnit unit, - String name, - boolean isDaemon, - int queueCapacity, Function afterExecutionHook, - ThreadPoolStatusMonitor... threadPoolStatusMonitors) { - return createAndMonitor(corePoolSize, maximumPoolSize, keepAliveTime, unit, name, isDaemon, queueCapacity, afterExecutionHook, - List.of(threadPoolStatusMonitors)); - } - - public static ThreadPoolExecutor createAndMonitor(int corePoolSize, - int maximumPoolSize, - long keepAliveTime, - TimeUnit unit, - String name, - boolean isDaemon, - int queueCapacity, - Function afterExecutionHook, - List threadPoolStatusMonitors) { - ThreadPoolExecutor executor = new ThreadPoolExecutor( - corePoolSize, - maximumPoolSize, - keepAliveTime, - unit, - new LinkedBlockingQueue<>(queueCapacity), - ThreadUtils.createThreadFactory(name + "-%d", isDaemon), - new ThreadPoolExecutor.DiscardOldestPolicy()) { - @Override - protected void afterExecute(Runnable r, Throwable t) { - super.afterExecute(r, t); - afterExecutionHook.apply(t); - } - }; - List printers = new ArrayList<>(); - printers.add(new ThreadPoolQueueSizeMonitor(queueCapacity)); - printers.addAll(threadPoolStatusMonitors); - - MONITOR_EXECUTOR.add(ThreadPoolWrapper.builder() - .name(name) - .threadPoolExecutor(executor) - .statusPrinters(printers) - .build()); - return executor; - } - - public static void logThreadPoolStatus() { - for (ThreadPoolWrapper threadPoolWrapper : MONITOR_EXECUTOR) { - List monitors = threadPoolWrapper.getStatusPrinters(); - for (ThreadPoolStatusMonitor monitor : monitors) { - double value = monitor.value(threadPoolWrapper.getThreadPoolExecutor()); - waterMarkLogger.info("\t{}\t{}\t{}", threadPoolWrapper.getName(), - monitor.describe(), - value); - } - } - } - - public static void init() { - MONITOR_SCHEDULED.scheduleAtFixedRate(S3StreamThreadPoolMonitor::logThreadPoolStatus, 20, - threadPoolStatusPeriodTime, TimeUnit.MILLISECONDS); - } - - public static void shutdown() { - MONITOR_SCHEDULED.shutdown(); - } -} \ No newline at end of file diff --git a/s3stream/src/main/java/com/automq/stream/utils/threads/ThreadPoolQueueSizeMonitor.java b/s3stream/src/main/java/com/automq/stream/utils/threads/ThreadPoolQueueSizeMonitor.java deleted file mode 100644 index de312cc33..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/threads/ThreadPoolQueueSizeMonitor.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils.threads; - -import java.util.concurrent.ThreadPoolExecutor; - -public class ThreadPoolQueueSizeMonitor implements ThreadPoolStatusMonitor { - - private final int maxQueueCapacity; - - public ThreadPoolQueueSizeMonitor(int maxQueueCapacity) { - this.maxQueueCapacity = maxQueueCapacity; - } - - @Override - public String describe() { - return "queueSize"; - } - - @Override - public double value(ThreadPoolExecutor executor) { - return executor.getQueue().size(); - } - - @Override - public boolean needPrintJstack(ThreadPoolExecutor executor, double value) { - return value > maxQueueCapacity * 0.85; - } -} diff --git a/s3stream/src/main/java/com/automq/stream/utils/threads/ThreadPoolStatusMonitor.java b/s3stream/src/main/java/com/automq/stream/utils/threads/ThreadPoolStatusMonitor.java deleted file mode 100644 index fa2c6eb2e..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/threads/ThreadPoolStatusMonitor.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils.threads; - -import java.util.concurrent.ThreadPoolExecutor; - -public interface ThreadPoolStatusMonitor { - - String describe(); - - double value(ThreadPoolExecutor executor); - - boolean needPrintJstack(ThreadPoolExecutor executor, double value); -} \ No newline at end of file diff --git a/s3stream/src/main/java/com/automq/stream/utils/threads/ThreadPoolWrapper.java b/s3stream/src/main/java/com/automq/stream/utils/threads/ThreadPoolWrapper.java deleted file mode 100644 index d424e3fcd..000000000 --- a/s3stream/src/main/java/com/automq/stream/utils/threads/ThreadPoolWrapper.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils.threads; - -import com.google.common.base.MoreObjects; -import com.google.common.base.Objects; -import java.util.List; -import java.util.concurrent.ThreadPoolExecutor; - -public class ThreadPoolWrapper { - private String name; - private ThreadPoolExecutor threadPoolExecutor; - private List statusPrinters; - - ThreadPoolWrapper(final String name, final ThreadPoolExecutor threadPoolExecutor, - final List statusPrinters) { - this.name = name; - this.threadPoolExecutor = threadPoolExecutor; - this.statusPrinters = statusPrinters; - } - - public static ThreadPoolWrapperBuilder builder() { - return new ThreadPoolWrapperBuilder(); - } - - public String getName() { - return this.name; - } - - public void setName(final String name) { - this.name = name; - } - - public ThreadPoolExecutor getThreadPoolExecutor() { - return this.threadPoolExecutor; - } - - public void setThreadPoolExecutor(final ThreadPoolExecutor threadPoolExecutor) { - this.threadPoolExecutor = threadPoolExecutor; - } - - public List getStatusPrinters() { - return this.statusPrinters; - } - - public void setStatusPrinters(final List statusPrinters) { - this.statusPrinters = statusPrinters; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - ThreadPoolWrapper wrapper = (ThreadPoolWrapper) o; - return Objects.equal(name, wrapper.name) && Objects.equal(threadPoolExecutor, wrapper.threadPoolExecutor) && Objects.equal(statusPrinters, wrapper.statusPrinters); - } - - @Override - public int hashCode() { - return Objects.hashCode(name, threadPoolExecutor, statusPrinters); - } - - @Override - public String toString() { - return MoreObjects.toStringHelper(this) - .add("name", name) - .add("threadPoolExecutor", threadPoolExecutor) - .add("statusPrinters", statusPrinters) - .toString(); - } - - public static class ThreadPoolWrapperBuilder { - private String name; - private ThreadPoolExecutor threadPoolExecutor; - private List statusPrinters; - - ThreadPoolWrapperBuilder() { - } - - public ThreadPoolWrapperBuilder name(final String name) { - this.name = name; - return this; - } - - public ThreadPoolWrapperBuilder threadPoolExecutor( - final ThreadPoolExecutor threadPoolExecutor) { - this.threadPoolExecutor = threadPoolExecutor; - return this; - } - - public ThreadPoolWrapperBuilder statusPrinters( - final List statusPrinters) { - this.statusPrinters = statusPrinters; - return this; - } - - public ThreadPoolWrapper build() { - return new ThreadPoolWrapper(this.name, this.threadPoolExecutor, this.statusPrinters); - } - - @Override - public String toString() { - return "ThreadPoolWrapper.ThreadPoolWrapperBuilder(name=" + this.name + ", threadPoolExecutor=" + this.threadPoolExecutor + ", statusPrinters=" + this.statusPrinters + ")"; - } - } -} diff --git a/s3stream/src/test/java/com/automq/stream/ByteBufSeqAllocTest.java b/s3stream/src/test/java/com/automq/stream/ByteBufSeqAllocTest.java deleted file mode 100644 index 1a398ab4e..000000000 --- a/s3stream/src/test/java/com/automq/stream/ByteBufSeqAllocTest.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ -package com.automq.stream; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import java.util.concurrent.atomic.AtomicReference; -import org.junit.jupiter.api.Test; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; -import static org.junit.jupiter.api.Assertions.assertTrue; - -public class ByteBufSeqAllocTest { - - @Test - public void testAlloc() { - ByteBufSeqAlloc alloc = new ByteBufSeqAlloc(0, 1); - - AtomicReference bufRef = alloc.hugeBufArray[Math.abs(Thread.currentThread().hashCode() % alloc.hugeBufArray.length)]; - - ByteBuf buf1 = alloc.byteBuffer(12); - buf1.writeLong(1); - buf1.writeInt(2); - - ByteBuf buf2 = alloc.byteBuffer(20); - buf2.writeLong(3); - buf2.writeInt(4); - buf2.writeLong(5); - - ByteBuf buf3 = alloc.byteBuffer(ByteBufSeqAlloc.HUGE_BUF_SIZE - 12 - 20 - 4); - - ByteBuf oldHugeBuf = bufRef.get().buf; - - ByteBuf buf4 = alloc.byteBuffer(16); - buf4.writeLong(6); - buf4.writeLong(7); - - assertTrue(oldHugeBuf != bufRef.get().buf); - - assertEquals(1, buf1.readLong()); - assertEquals(2, buf1.readInt()); - assertEquals(3, buf2.readLong()); - assertEquals(4, buf2.readInt()); - assertEquals(5, buf2.readLong()); - assertInstanceOf(CompositeByteBuf.class, buf4); - assertEquals(6, buf4.readLong()); - assertEquals(7, buf4.readLong()); - - buf1.release(); - buf2.release(); - buf3.release(); - buf4.release(); - assertEquals(0, oldHugeBuf.refCnt()); - assertEquals(1, bufRef.get().buf.refCnt()); - - ByteBuf oldHugeBuf2 = bufRef.get().buf; - - alloc.byteBuffer(ByteBufSeqAlloc.HUGE_BUF_SIZE - 12).release(); - alloc.byteBuffer(12).release(); - assertEquals(0, oldHugeBuf2.refCnt()); - } - -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/AsyncNetworkBandwidthLimiterTest.java b/s3stream/src/test/java/com/automq/stream/s3/AsyncNetworkBandwidthLimiterTest.java deleted file mode 100644 index 4bb289e61..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/AsyncNetworkBandwidthLimiterTest.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.s3.network.AsyncNetworkBandwidthLimiter; -import com.automq.stream.s3.network.ThrottleStrategy; -import java.util.concurrent.CompletableFuture; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -public class AsyncNetworkBandwidthLimiterTest { - - @Test - public void testByPassConsume() { - AsyncNetworkBandwidthLimiter bucket = new AsyncNetworkBandwidthLimiter(AsyncNetworkBandwidthLimiter.Type.INBOUND, 10, 5000, 100); - CompletableFuture cf = bucket.consume(ThrottleStrategy.BYPASS, 1); - Assertions.assertEquals(9, bucket.getAvailableTokens()); - Assertions.assertTrue(cf.isDone()); - } - - @Test - public void testByPassConsume2() { - AsyncNetworkBandwidthLimiter bucket = new AsyncNetworkBandwidthLimiter(AsyncNetworkBandwidthLimiter.Type.INBOUND, 10, 1000, 100); - CompletableFuture cf = bucket.consume(ThrottleStrategy.BYPASS, 20); - Assertions.assertEquals(-10, bucket.getAvailableTokens()); - cf.whenComplete((v, e) -> { - Assertions.assertNull(e); - Assertions.assertEquals(-10, bucket.getAvailableTokens()); - }); - cf.join(); - } - - @Test - public void testThrottleConsume() { - AsyncNetworkBandwidthLimiter bucket = new AsyncNetworkBandwidthLimiter(AsyncNetworkBandwidthLimiter.Type.INBOUND, 10, 1000, 100); - CompletableFuture cf = bucket.consume(ThrottleStrategy.THROTTLE_1, 1); - Assertions.assertEquals(9, bucket.getAvailableTokens()); - Assertions.assertTrue(cf.isDone()); - } - - @Test - public void testThrottleConsume2() { - AsyncNetworkBandwidthLimiter bucket = new AsyncNetworkBandwidthLimiter(AsyncNetworkBandwidthLimiter.Type.INBOUND, 10, 1000, 100); - CompletableFuture cf = bucket.consume(ThrottleStrategy.THROTTLE_1, 20); - Assertions.assertEquals(-10, bucket.getAvailableTokens()); - cf.whenComplete((v, e) -> { - Assertions.assertNull(e); - Assertions.assertEquals(-10, bucket.getAvailableTokens()); - }); - cf.join(); - } - - @Test - public void testThrottleConsume3() { - AsyncNetworkBandwidthLimiter bucket = new AsyncNetworkBandwidthLimiter(AsyncNetworkBandwidthLimiter.Type.INBOUND, 10, 1000, 100); - CompletableFuture cf = bucket.consume(ThrottleStrategy.BYPASS, 20); - Assertions.assertEquals(-10, bucket.getAvailableTokens()); - Assertions.assertTrue(cf.isDone()); - cf = bucket.consume(ThrottleStrategy.THROTTLE_1, 10); - Assertions.assertEquals(-10, bucket.getAvailableTokens()); - cf.whenComplete((v, e) -> { - Assertions.assertNull(e); - Assertions.assertEquals(0, bucket.getAvailableTokens()); - }); - cf.join(); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/DefaultRecordBatch.java b/s3stream/src/test/java/com/automq/stream/s3/DefaultRecordBatch.java deleted file mode 100644 index 85d5622f6..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/DefaultRecordBatch.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.api.RecordBatch; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.Map; -import java.util.Random; - -public class DefaultRecordBatch implements RecordBatch { - int count; - ByteBuffer payload; - - public static RecordBatch of(int count, int size) { - DefaultRecordBatch record = new DefaultRecordBatch(); - record.count = count; - byte[] bytes = new byte[size]; - new Random().nextBytes(bytes); - record.payload = ByteBuffer.wrap(bytes); - return record; - } - - @Override - public int count() { - return count; - } - - @Override - public long baseTimestamp() { - return 0; - } - - @Override - public Map properties() { - return Collections.emptyMap(); - } - - @Override - public ByteBuffer rawPayload() { - return payload.duplicate(); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/DefaultRecordBatchWithContext.java b/s3stream/src/test/java/com/automq/stream/s3/DefaultRecordBatchWithContext.java deleted file mode 100644 index 739ed6f56..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/DefaultRecordBatchWithContext.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.api.RecordBatch; -import com.automq.stream.api.RecordBatchWithContext; -import java.nio.ByteBuffer; -import java.util.Map; - -public class DefaultRecordBatchWithContext implements RecordBatchWithContext { - private final RecordBatch recordBatch; - private final long baseOffset; - - public DefaultRecordBatchWithContext(RecordBatch recordBatch, long baseOffset) { - this.recordBatch = recordBatch; - this.baseOffset = baseOffset; - } - - @Override - public long baseOffset() { - return baseOffset; - } - - @Override - public long lastOffset() { - return baseOffset + recordBatch.count(); - } - - @Override - public int count() { - return recordBatch.count(); - } - - @Override - public long baseTimestamp() { - return recordBatch.baseTimestamp(); - } - - @Override - public Map properties() { - return recordBatch.properties(); - } - - @Override - public ByteBuffer rawPayload() { - return recordBatch.rawPayload(); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/DefaultS3BlockCacheTest.java b/s3stream/src/test/java/com/automq/stream/s3/DefaultS3BlockCacheTest.java deleted file mode 100644 index e63b7237d..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/DefaultS3BlockCacheTest.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.s3.cache.CacheAccessType; -import com.automq.stream.s3.cache.DefaultS3BlockCache; -import com.automq.stream.s3.cache.ReadDataBlock; -import com.automq.stream.s3.metadata.ObjectUtils; -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metadata.S3ObjectType; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.operator.MemoryS3Operator; -import com.automq.stream.s3.operator.S3Operator; -import com.automq.stream.utils.Threads; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; -import org.mockito.ArgumentMatchers; -import org.mockito.Mockito; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -@Tag("S3Unit") -public class DefaultS3BlockCacheTest { - ObjectManager objectManager; - S3Operator s3Operator; - DefaultS3BlockCache s3BlockCache; - Config config; - - @BeforeEach - public void setup() { - config = new Config(); - config.blockCacheSize(0); - objectManager = Mockito.mock(ObjectManager.class); - s3Operator = new MemoryS3Operator(); - s3BlockCache = new DefaultS3BlockCache(config, objectManager, s3Operator); - } - - @Test - public void testRead() throws Exception { - ObjectWriter objectWriter = ObjectWriter.writer(0, s3Operator, 1024, 1024); - objectWriter.write(233, List.of( - newRecord(233, 10, 5, 512), - newRecord(233, 15, 10, 512), - newRecord(233, 25, 5, 512) - )); - objectWriter.write(234, List.of(newRecord(234, 0, 5, 512))); - objectWriter.close(); - S3ObjectMetadata metadata1 = new S3ObjectMetadata(0, objectWriter.size(), S3ObjectType.STREAM_SET); - - objectWriter = ObjectWriter.writer(1, s3Operator, 1024, 1024); - objectWriter.write(233, List.of(newRecord(233, 30, 10, 512))); - objectWriter.close(); - S3ObjectMetadata metadata2 = new S3ObjectMetadata(1, objectWriter.size(), S3ObjectType.STREAM_SET); - - objectWriter = ObjectWriter.writer(2, s3Operator, 1024, 1024); - objectWriter.write(233, List.of(newRecord(233, 40, 20, 512))); - objectWriter.close(); - S3ObjectMetadata metadata3 = new S3ObjectMetadata(2, objectWriter.size(), S3ObjectType.STREAM_SET); - - when(objectManager.getObjects(eq(233L), eq(11L), eq(60L), eq(2))).thenReturn(CompletableFuture.completedFuture(List.of( - metadata1, metadata2 - ))); - when(objectManager.getObjects(eq(233L), eq(40L), eq(60L), eq(2))).thenReturn(CompletableFuture.completedFuture(List.of( - metadata3 - ))); - - ReadDataBlock rst = s3BlockCache.read(233L, 11L, 60L, 10000).get(3000, TimeUnit.SECONDS); - assertEquals(5, rst.getRecords().size()); - assertEquals(10, rst.getRecords().get(0).getBaseOffset()); - assertEquals(60, rst.getRecords().get(4).getLastOffset()); - } - - @Test - public void testRead_readAhead() throws ExecutionException, InterruptedException { - objectManager = Mockito.mock(ObjectManager.class); - s3Operator = Mockito.spy(new MemoryS3Operator()); - config.blockCacheSize(1024 * 1024); - s3BlockCache = new DefaultS3BlockCache(config, objectManager, s3Operator); - - ObjectWriter objectWriter = ObjectWriter.writer(0, s3Operator, 1024, 1024); - objectWriter.write(233, List.of( - newRecord(233, 10, 5, 512), - newRecord(233, 15, 5, 4096) - )); - objectWriter.close(); - S3ObjectMetadata metadata1 = new S3ObjectMetadata(0, objectWriter.size(), S3ObjectType.STREAM_SET); - - objectWriter = ObjectWriter.writer(1, s3Operator, 1024, 1024); - objectWriter.write(233, List.of(newRecord(233, 20, 10, 512))); - objectWriter.close(); - S3ObjectMetadata metadata2 = new S3ObjectMetadata(1, objectWriter.size(), S3ObjectType.STREAM_SET); - - when(objectManager.getObjects(eq(233L), eq(10L), eq(11L), eq(2))).thenReturn(CompletableFuture.completedFuture(List.of(metadata1))); - - s3BlockCache.read(233L, 10L, 11L, 10000).get(); - // range read index and range read data - verify(s3Operator, Mockito.times(2)).rangeRead(eq(ObjectUtils.genKey(0, 0)), ArgumentMatchers.anyLong(), ArgumentMatchers.anyLong(), ArgumentMatchers.any()); - verify(s3Operator, Mockito.times(0)).rangeRead(eq(ObjectUtils.genKey(0, 1)), ArgumentMatchers.anyLong(), ArgumentMatchers.anyLong(), ArgumentMatchers.any()); - // trigger read ahead - when(objectManager.getObjects(eq(233L), eq(20L), eq(-1L), eq(2))).thenReturn(CompletableFuture.completedFuture(List.of(metadata2))); - when(objectManager.getObjects(eq(233L), eq(30L), eq(-1L), eq(2))).thenReturn(CompletableFuture.completedFuture(Collections.emptyList())); - s3BlockCache.read(233L, 15L, 16L, 10000).get(); - verify(s3Operator, timeout(1000).times(2)).rangeRead(eq(ObjectUtils.genKey(0, 1)), ArgumentMatchers.anyLong(), ArgumentMatchers.anyLong(), ArgumentMatchers.any()); - verify(objectManager, timeout(1000).times(1)).getObjects(eq(233L), eq(30L), eq(-1L), eq(2)); - - Threads.sleep(1000); - - // expect read ahead already cached the records - ReadDataBlock ret = s3BlockCache.read(233L, 20L, 30L, 10000).get(); - assertEquals(CacheAccessType.BLOCK_CACHE_HIT, ret.getCacheAccessType()); - List records = ret.getRecords(); - assertEquals(1, records.size()); - assertEquals(20L, records.get(0).getBaseOffset()); - } - - StreamRecordBatch newRecord(long streamId, long offset, int count, int payloadSize) { - return new StreamRecordBatch(streamId, 0, offset, count, TestUtils.random(payloadSize)); - } - -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/DeltaWALUploadTaskTest.java b/s3stream/src/test/java/com/automq/stream/s3/DeltaWALUploadTaskTest.java deleted file mode 100644 index 8925e296d..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/DeltaWALUploadTaskTest.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metadata.S3ObjectType; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.objects.CommitStreamSetObjectRequest; -import com.automq.stream.s3.objects.CommitStreamSetObjectResponse; -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.objects.StreamObject; -import com.automq.stream.s3.operator.MemoryS3Operator; -import com.automq.stream.s3.operator.S3Operator; -import com.automq.stream.utils.CloseableIterator; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ForkJoinPool; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicLong; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; -import org.mockito.ArgumentCaptor; - -import static com.automq.stream.s3.TestUtils.random; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -@Tag("S3Unit") -public class DeltaWALUploadTaskTest { - ObjectManager objectManager; - S3Operator s3Operator; - DeltaWALUploadTask deltaWALUploadTask; - - @BeforeEach - public void setup() { - objectManager = mock(ObjectManager.class); - s3Operator = new MemoryS3Operator(); - } - - @Test - public void testUpload() throws Exception { - AtomicLong objectIdAlloc = new AtomicLong(10); - doAnswer(invocation -> CompletableFuture.completedFuture(objectIdAlloc.getAndIncrement())).when(objectManager).prepareObject(anyInt(), anyLong()); - when(objectManager.commitStreamSetObject(any())).thenReturn(CompletableFuture.completedFuture(new CommitStreamSetObjectResponse())); - - Map> map = new HashMap<>(); - map.put(233L, List.of( - new StreamRecordBatch(233, 0, 10, 2, random(512)), - new StreamRecordBatch(233, 0, 12, 2, random(128)), - new StreamRecordBatch(233, 0, 14, 2, random(512)) - )); - map.put(234L, List.of( - new StreamRecordBatch(234, 0, 20, 2, random(128)), - new StreamRecordBatch(234, 0, 22, 2, random(128)) - )); - - Config config = new Config() - .objectBlockSize(16 * 1024 * 1024) - .objectPartSize(16 * 1024 * 1024) - .streamSplitSize(1000); - deltaWALUploadTask = DeltaWALUploadTask.builder().config(config).streamRecordsMap(map).objectManager(objectManager) - .s3Operator(s3Operator).executor(ForkJoinPool.commonPool()).build(); - - deltaWALUploadTask.prepare().get(); - deltaWALUploadTask.upload().get(); - deltaWALUploadTask.commit().get(); - - // Release all the buffers - map.values().forEach(batches -> batches.forEach(StreamRecordBatch::release)); - - ArgumentCaptor reqArg = ArgumentCaptor.forClass(CommitStreamSetObjectRequest.class); - verify(objectManager, times(1)).commitStreamSetObject(reqArg.capture()); - // expect - // - stream233 split - // - stream234 write to one stream range - CommitStreamSetObjectRequest request = reqArg.getValue(); - assertEquals(10, request.getObjectId()); - assertEquals(1, request.getStreamRanges().size()); - assertEquals(234, request.getStreamRanges().get(0).getStreamId()); - assertEquals(20, request.getStreamRanges().get(0).getStartOffset()); - assertEquals(24, request.getStreamRanges().get(0).getEndOffset()); - - assertEquals(1, request.getStreamObjects().size()); - StreamObject streamObject = request.getStreamObjects().get(0); - assertEquals(233, streamObject.getStreamId()); - assertEquals(11, streamObject.getObjectId()); - assertEquals(10, streamObject.getStartOffset()); - assertEquals(16, streamObject.getEndOffset()); - - { - S3ObjectMetadata s3ObjectMetadata = new S3ObjectMetadata(request.getObjectId(), request.getObjectSize(), S3ObjectType.STREAM_SET); - ObjectReader objectReader = new ObjectReader(s3ObjectMetadata, s3Operator); - DataBlockIndex blockIndex = objectReader.find(234, 20, 24).get() - .streamDataBlocks().get(0).dataBlockIndex(); - ObjectReader.DataBlockGroup dataBlockGroup = objectReader.read(blockIndex).get(); - try (CloseableIterator it = dataBlockGroup.iterator()) { - StreamRecordBatch record = it.next(); - assertEquals(20, record.getBaseOffset()); - record = it.next(); - assertEquals(24, record.getLastOffset()); - record.release(); - } - } - - { - S3ObjectMetadata streamObjectMetadata = new S3ObjectMetadata(11, request.getStreamObjects().get(0).getObjectSize(), S3ObjectType.STREAM); - ObjectReader objectReader = new ObjectReader(streamObjectMetadata, s3Operator); - DataBlockIndex blockIndex = objectReader.find(233, 10, 16).get() - .streamDataBlocks().get(0).dataBlockIndex(); - ObjectReader.DataBlockGroup dataBlockGroup = objectReader.read(blockIndex).get(); - try (CloseableIterator it = dataBlockGroup.iterator()) { - StreamRecordBatch r1 = it.next(); - assertEquals(10, r1.getBaseOffset()); - r1.release(); - StreamRecordBatch r2 = it.next(); - assertEquals(12, r2.getBaseOffset()); - r2.release(); - StreamRecordBatch r3 = it.next(); - assertEquals(14, r3.getBaseOffset()); - r3.release(); - } - } - } - - @Test - public void testUpload_oneStream() throws Exception { - AtomicLong objectIdAlloc = new AtomicLong(10); - doAnswer(invocation -> CompletableFuture.completedFuture(objectIdAlloc.getAndIncrement())).when(objectManager).prepareObject(anyInt(), anyLong()); - when(objectManager.commitStreamSetObject(any())).thenReturn(CompletableFuture.completedFuture(new CommitStreamSetObjectResponse())); - - Map> map = new HashMap<>(); - map.put(233L, List.of( - new StreamRecordBatch(233, 0, 10, 2, random(512)), - new StreamRecordBatch(233, 0, 12, 2, random(128)), - new StreamRecordBatch(233, 0, 14, 2, random(512)) - )); - Config config = new Config() - .objectBlockSize(16 * 1024 * 1024) - .objectPartSize(16 * 1024 * 1024) - .streamSplitSize(16 * 1024 * 1024); - deltaWALUploadTask = DeltaWALUploadTask.builder().config(config).streamRecordsMap(map).objectManager(objectManager) - .s3Operator(s3Operator).executor(ForkJoinPool.commonPool()).build(); - - deltaWALUploadTask.prepare().get(); - deltaWALUploadTask.upload().get(); - deltaWALUploadTask.commit().get(); - - // Release all the buffers - map.values().forEach(batches -> batches.forEach(StreamRecordBatch::release)); - - ArgumentCaptor reqArg = ArgumentCaptor.forClass(CommitStreamSetObjectRequest.class); - verify(objectManager, times(1)).commitStreamSetObject(reqArg.capture()); - CommitStreamSetObjectRequest request = reqArg.getValue(); - assertEquals(0, request.getObjectSize()); - assertEquals(0, request.getStreamRanges().size()); - assertEquals(1, request.getStreamObjects().size()); - } - - @Test - public void test_emptyWALData() throws ExecutionException, InterruptedException, TimeoutException { - AtomicLong objectIdAlloc = new AtomicLong(10); - doAnswer(invocation -> CompletableFuture.completedFuture(objectIdAlloc.getAndIncrement())).when(objectManager).prepareObject(anyInt(), anyLong()); - when(objectManager.commitStreamSetObject(any())).thenReturn(CompletableFuture.completedFuture(new CommitStreamSetObjectResponse())); - - Map> map = new HashMap<>(); - map.put(233L, List.of( - new StreamRecordBatch(233, 0, 10, 2, random(512)) - )); - map.put(234L, List.of( - new StreamRecordBatch(234, 0, 20, 2, random(128)) - )); - - Config config = new Config() - .objectBlockSize(16 * 1024 * 1024) - .objectPartSize(16 * 1024 * 1024) - .streamSplitSize(64); - deltaWALUploadTask = DeltaWALUploadTask.builder().config(config).streamRecordsMap(map).objectManager(objectManager) - .s3Operator(s3Operator).executor(ForkJoinPool.commonPool()).build(); - assertTrue(deltaWALUploadTask.forceSplit); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/ObjectReaderTest.java b/s3stream/src/test/java/com/automq/stream/s3/ObjectReaderTest.java deleted file mode 100644 index cc4b06be1..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/ObjectReaderTest.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.s3.metadata.ObjectUtils; -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metadata.S3ObjectType; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.operator.MemoryS3Operator; -import com.automq.stream.s3.operator.S3Operator; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.ExecutionException; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; -import org.mockito.Mockito; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; - -@Tag("S3Unit") -public class ObjectReaderTest { - - private int recordCntToBlockSize(int recordCnt, int bodySize) { - return (bodySize + StreamRecordBatchCodec.HEADER_SIZE) * recordCnt + ObjectWriter.DataBlock.BLOCK_HEADER_SIZE; - } - - @Test - public void testIndexBlock() { - // block0: s1 [0, 100) - // block1: s1 [100, 150) - // block2: s1 [150, 200) - // block3: s2 [110, 200) - int bodySize = 10; - int recordCnt1 = 100; - int blockSize1 = recordCntToBlockSize(recordCnt1, bodySize); - int recordCnt2 = 50; - int blockSize2 = recordCntToBlockSize(recordCnt2, bodySize); - int recordCnt3 = 90; - int blockSize3 = recordCntToBlockSize(recordCnt3, bodySize); - long streamId1 = 1; - long streamId2 = 2; - ByteBuf indexBuf = Unpooled.buffer(3 * DataBlockIndex.BLOCK_INDEX_SIZE); - new DataBlockIndex(streamId1, 0, recordCnt1, recordCnt1, 0, blockSize1).encode(indexBuf); - new DataBlockIndex(streamId1, recordCnt1, recordCnt2, recordCnt2, blockSize1, blockSize2).encode(indexBuf); - new DataBlockIndex(streamId1, recordCnt1 + recordCnt2, recordCnt3, recordCnt3, blockSize1 + blockSize2, blockSize3).encode(indexBuf); - new DataBlockIndex(streamId2, 110, recordCnt3, recordCnt3, blockSize1 + blockSize2 + blockSize3, blockSize3).encode(indexBuf); - - ObjectReader.IndexBlock indexBlock = new ObjectReader.IndexBlock(Mockito.mock(S3ObjectMetadata.class), indexBuf); - - ObjectReader.FindIndexResult rst = indexBlock.find(1, 10, 150, 100000); - assertTrue(rst.isFulfilled()); - List streamDataBlocks = rst.streamDataBlocks(); - assertEquals(2, streamDataBlocks.size()); - assertEquals(0, streamDataBlocks.get(0).getBlockStartPosition()); - assertEquals(blockSize1, streamDataBlocks.get(0).getBlockEndPosition()); - assertEquals(blockSize1, streamDataBlocks.get(1).getBlockStartPosition()); - assertEquals((long) blockSize1 + blockSize2, streamDataBlocks.get(1).getBlockEndPosition()); - - rst = indexBlock.find(1, 10, 200); - assertTrue(rst.isFulfilled()); - assertEquals(3, rst.streamDataBlocks().size()); - - rst = indexBlock.find(1L, 10, 10000, 80 * bodySize); - assertTrue(rst.isFulfilled()); - assertEquals(3, rst.streamDataBlocks().size()); - - rst = indexBlock.find(1L, 10, 10000, 160 * bodySize); - assertFalse(rst.isFulfilled()); - assertEquals(3, rst.streamDataBlocks().size()); - - rst = indexBlock.find(1, 10, 800); - assertFalse(rst.isFulfilled()); - assertEquals(3, rst.streamDataBlocks().size()); - } - - @Test - public void testGetBasicObjectInfo() throws ExecutionException, InterruptedException { - S3Operator s3Operator = new MemoryS3Operator(); - ObjectWriter objectWriter = ObjectWriter.writer(233L, s3Operator, 1024, 1024); - // make index block bigger than 1M - int streamCount = 2 * 1024 * 1024 / 40; - for (int i = 0; i < streamCount; i++) { - StreamRecordBatch r = new StreamRecordBatch(i, 0, i, 1, TestUtils.random(1)); - objectWriter.write(i, List.of(r)); - } - objectWriter.close().get(); - S3ObjectMetadata metadata = new S3ObjectMetadata(233L, objectWriter.size(), S3ObjectType.STREAM_SET); - try (ObjectReader objectReader = new ObjectReader(metadata, s3Operator)) { - ObjectReader.BasicObjectInfo info = objectReader.basicObjectInfo().get(); - assertEquals(streamCount, info.indexBlock().count()); - } - } - - @Test - public void testReadBlockGroup() throws ExecutionException, InterruptedException { - S3Operator s3Operator = new MemoryS3Operator(); - ByteBuf buf = ByteBufAlloc.byteBuffer(0); - buf.writeBytes(new ObjectWriter.DataBlock(233L, List.of( - new StreamRecordBatch(233L, 0, 10, 1, TestUtils.random(100)), - new StreamRecordBatch(233L, 0, 11, 2, TestUtils.random(100)) - )).buffer()); - buf.writeBytes(new ObjectWriter.DataBlock(233L, List.of( - new StreamRecordBatch(233L, 0, 13, 1, TestUtils.random(100)) - )).buffer()); - int indexPosition = buf.readableBytes(); - new DataBlockIndex(233L, 10, 4, 3, 0, buf.readableBytes()).encode(buf); - int indexSize = buf.readableBytes() - indexPosition; - buf.writeBytes(new ObjectWriter.Footer(indexPosition, indexSize).buffer()); - int objectSize = buf.readableBytes(); - s3Operator.write(ObjectUtils.genKey(0, 1L), buf); - buf.release(); - try (ObjectReader reader = new ObjectReader(new S3ObjectMetadata(1L, objectSize, S3ObjectType.STREAM), s3Operator)) { - ObjectReader.FindIndexResult rst = reader.find(233L, 10L, 14L, 1024).get(); - assertEquals(1, rst.streamDataBlocks().size()); - try (ObjectReader.DataBlockGroup dataBlockGroup = reader.read(rst.streamDataBlocks().get(0).dataBlockIndex()).get()) { - assertEquals(3, dataBlockGroup.recordCount()); - Iterator it = dataBlockGroup.iterator(); - assertEquals(10, it.next().getBaseOffset()); - assertEquals(11, it.next().getBaseOffset()); - assertEquals(13, it.next().getBaseOffset()); - } - } - } - -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/ObjectWriterTest.java b/s3stream/src/test/java/com/automq/stream/s3/ObjectWriterTest.java deleted file mode 100644 index 5a959ecdf..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/ObjectWriterTest.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metadata.S3ObjectType; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.objects.ObjectStreamRange; -import com.automq.stream.s3.operator.MemoryS3Operator; -import com.automq.stream.s3.operator.S3Operator; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.ExecutionException; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; - -@Tag("S3Unit") -public class ObjectWriterTest { - - @Test - public void testWrite() throws ExecutionException, InterruptedException { - S3ObjectMetadata metadata = new S3ObjectMetadata(1, 0, S3ObjectType.STREAM_SET); - - S3Operator s3Operator = new MemoryS3Operator(); - ObjectWriter objectWriter = ObjectWriter.writer(1, s3Operator, 1024, 1024); - StreamRecordBatch r1 = newRecord(233, 10, 5, 512); - StreamRecordBatch r2 = newRecord(233, 15, 10, 512); - StreamRecordBatch r3 = newRecord(233, 25, 5, 512); - objectWriter.write(233, List.of(r1, r2, r3)); - - StreamRecordBatch r4 = newRecord(234, 0, 5, 512); - objectWriter.write(234, List.of(r4)); - objectWriter.close().get(); - - List streamRanges = objectWriter.getStreamRanges(); - assertEquals(2, streamRanges.size()); - assertEquals(233, streamRanges.get(0).getStreamId()); - assertEquals(10, streamRanges.get(0).getStartOffset()); - assertEquals(30, streamRanges.get(0).getEndOffset()); - assertEquals(234, streamRanges.get(1).getStreamId()); - assertEquals(0, streamRanges.get(1).getStartOffset()); - assertEquals(5, streamRanges.get(1).getEndOffset()); - - int objectSize = s3Operator.rangeRead(metadata.key(), 0L, objectWriter.size()).get().readableBytes(); - assertEquals(objectSize, objectWriter.size()); - - metadata = new S3ObjectMetadata(1, objectSize, S3ObjectType.STREAM_SET); - ObjectReader objectReader = new ObjectReader(metadata, s3Operator); - List streamDataBlocks = objectReader.find(233, 10, 30).get().streamDataBlocks(); - assertEquals(2, streamDataBlocks.size()); - { - Iterator it = objectReader.read(streamDataBlocks.get(0).dataBlockIndex()).get().iterator(); - StreamRecordBatch r = it.next(); - assertEquals(233L, r.getStreamId()); - assertEquals(10L, r.getBaseOffset()); - assertEquals(5L, r.getCount()); - assertEquals(r1.getPayload(), r.getPayload()); - r.release(); - r = it.next(); - assertEquals(233L, r.getStreamId()); - assertEquals(15L, r.getBaseOffset()); - assertEquals(10L, r.getCount()); - assertEquals(r2.getPayload(), r.getPayload()); - assertFalse(it.hasNext()); - r.release(); - } - - { - Iterator it = objectReader.read(streamDataBlocks.get(1).dataBlockIndex()).get().iterator(); - StreamRecordBatch r = it.next(); - assertEquals(233L, r.getStreamId()); - assertEquals(25L, r.getBaseOffset()); - assertEquals(5L, r.getCount()); - assertEquals(r3.getPayload(), r.getPayload()); - r.release(); - } - - streamDataBlocks = objectReader.find(234, 1, 2).get().streamDataBlocks(); - assertEquals(1, streamDataBlocks.size()); - assertEquals(0, streamDataBlocks.get(0).getStartOffset()); - { - Iterator it = objectReader.read(streamDataBlocks.get(0).dataBlockIndex()).get().iterator(); - StreamRecordBatch r = it.next(); - assertEquals(234L, r.getStreamId()); - assertEquals(0L, r.getBaseOffset()); - assertEquals(5L, r.getCount()); - assertEquals(r4.getPayload(), r.getPayload()); - assertFalse(it.hasNext()); - r.release(); - } - } - - StreamRecordBatch newRecord(long streamId, long offset, int count, int payloadSize) { - return new StreamRecordBatch(streamId, 0, offset, count, TestUtils.random(payloadSize)); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/S3StorageTest.java b/s3stream/src/test/java/com/automq/stream/s3/S3StorageTest.java deleted file mode 100644 index f7b8f4a8f..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/S3StorageTest.java +++ /dev/null @@ -1,311 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.s3.cache.DefaultS3BlockCache; -import com.automq.stream.s3.cache.LogCache; -import com.automq.stream.s3.cache.ReadDataBlock; -import com.automq.stream.s3.metadata.StreamMetadata; -import com.automq.stream.s3.metadata.StreamState; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.objects.CommitStreamSetObjectRequest; -import com.automq.stream.s3.objects.CommitStreamSetObjectResponse; -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.objects.ObjectStreamRange; -import com.automq.stream.s3.objects.StreamObject; -import com.automq.stream.s3.operator.MemoryS3Operator; -import com.automq.stream.s3.operator.S3Operator; -import com.automq.stream.s3.streams.StreamManager; -import com.automq.stream.s3.wal.MemoryWriteAheadLog; -import com.automq.stream.s3.wal.WriteAheadLog; -import io.netty.buffer.ByteBuf; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicInteger; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.ArgumentMatchers; -import org.mockito.Mockito; - -import static com.automq.stream.s3.TestUtils.random; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; - -@Tag("S3Unit") -public class S3StorageTest { - StreamManager streamManager; - ObjectManager objectManager; - WriteAheadLog wal; - S3Operator s3Operator; - S3Storage storage; - Config config; - - private static StreamRecordBatch newRecord(long streamId, long offset) { - return new StreamRecordBatch(streamId, 0, offset, 1, random(1)); - } - - @BeforeEach - public void setup() { - config = new Config(); - config.blockCacheSize(0); - objectManager = mock(ObjectManager.class); - streamManager = mock(StreamManager.class); - wal = spy(new MemoryWriteAheadLog()); - s3Operator = new MemoryS3Operator(); - storage = new S3Storage(config, wal, - streamManager, objectManager, new DefaultS3BlockCache(config, objectManager, s3Operator), s3Operator); - } - - @Test - public void testAppend() throws Exception { - Mockito.when(objectManager.prepareObject(eq(1), anyLong())).thenReturn(CompletableFuture.completedFuture(16L)); - CommitStreamSetObjectResponse resp = new CommitStreamSetObjectResponse(); - Mockito.when(objectManager.commitStreamSetObject(any())).thenReturn(CompletableFuture.completedFuture(resp)); - - CompletableFuture cf1 = storage.append( - new StreamRecordBatch(233, 1, 10, 1, random(100)) - ); - CompletableFuture cf2 = storage.append( - new StreamRecordBatch(233, 1, 11, 2, random(100)) - ); - CompletableFuture cf3 = storage.append( - new StreamRecordBatch(234, 3, 100, 1, random(100)) - ); - - cf1.get(3, TimeUnit.SECONDS); - cf2.get(3, TimeUnit.SECONDS); - cf3.get(3, TimeUnit.SECONDS); - - ReadDataBlock readRst = storage.read(233, 10, 13, 90).get(); - assertEquals(1, readRst.getRecords().size()); - readRst = storage.read(233, 10, 13, 200).get(); - assertEquals(2, readRst.getRecords().size()); - - storage.forceUpload(233L).get(); - ArgumentCaptor commitArg = ArgumentCaptor.forClass(CommitStreamSetObjectRequest.class); - verify(objectManager).commitStreamSetObject(commitArg.capture()); - CommitStreamSetObjectRequest commitReq = commitArg.getValue(); - assertEquals(16L, commitReq.getObjectId()); - List streamRanges = commitReq.getStreamRanges(); - assertEquals(2, streamRanges.size()); - assertEquals(233, streamRanges.get(0).getStreamId()); - assertEquals(10, streamRanges.get(0).getStartOffset()); - assertEquals(13, streamRanges.get(0).getEndOffset()); - assertEquals(234, streamRanges.get(1).getStreamId()); - assertEquals(100, streamRanges.get(1).getStartOffset()); - assertEquals(101, streamRanges.get(1).getEndOffset()); - } - - @Test - public void testWALConfirmOffsetCalculator() { - S3Storage.WALConfirmOffsetCalculator calc = new S3Storage.WALConfirmOffsetCalculator(); - WalWriteRequest r0 = new WalWriteRequest(null, 0L, null); - WalWriteRequest r1 = new WalWriteRequest(null, 1L, null); - WalWriteRequest r2 = new WalWriteRequest(null, 2L, null); - WalWriteRequest r3 = new WalWriteRequest(null, 3L, null); - - calc.add(r3); - calc.add(r1); - calc.add(r2); - calc.add(r0); - - calc.update(); - assertEquals(-1L, calc.get()); - - r0.confirmed = true; - calc.update(); - assertEquals(0L, calc.get()); - - r3.confirmed = true; - calc.update(); - assertEquals(0L, calc.get()); - - r1.confirmed = true; - calc.update(); - assertEquals(1L, calc.get()); - - r2.confirmed = true; - calc.update(); - assertEquals(3L, calc.get()); - } - - @Test - public void testWALCallbackSequencer() { - S3Storage.WALCallbackSequencer seq = new S3Storage.WALCallbackSequencer(); - WalWriteRequest r0 = new WalWriteRequest(newRecord(233L, 10L), 100L, new CompletableFuture<>()); - WalWriteRequest r1 = new WalWriteRequest(newRecord(233L, 11L), 101L, new CompletableFuture<>()); - WalWriteRequest r2 = new WalWriteRequest(newRecord(234L, 20L), 102L, new CompletableFuture<>()); - WalWriteRequest r3 = new WalWriteRequest(newRecord(234L, 21L), 103L, new CompletableFuture<>()); - - seq.before(r0); - seq.before(r1); - seq.before(r2); - seq.before(r3); - - assertEquals(Collections.emptyList(), seq.after(r3)); - assertEquals(List.of(r2, r3), seq.after(r2)); - assertEquals(List.of(r0), seq.after(r0)); - assertEquals(List.of(r1), seq.after(r1)); - } - - @Test - public void testUploadWALObject_sequence() throws ExecutionException, InterruptedException, TimeoutException { - List> objectIdCfList = List.of(new CompletableFuture<>(), new CompletableFuture<>()); - AtomicInteger objectCfIndex = new AtomicInteger(); - Mockito.doAnswer(invocation -> objectIdCfList.get(objectCfIndex.getAndIncrement())).when(objectManager).prepareObject(ArgumentMatchers.anyInt(), anyLong()); - - List> commitCfList = List.of(new CompletableFuture<>(), new CompletableFuture<>()); - AtomicInteger commitCfIndex = new AtomicInteger(); - Mockito.doAnswer(invocation -> commitCfList.get(commitCfIndex.getAndIncrement())).when(objectManager).commitStreamSetObject(any()); - - LogCache.LogCacheBlock logCacheBlock1 = new LogCache.LogCacheBlock(1024); - logCacheBlock1.put(newRecord(233L, 10L)); - logCacheBlock1.put(newRecord(234L, 10L)); - logCacheBlock1.confirmOffset(10L); - CompletableFuture cf1 = storage.uploadDeltaWAL(logCacheBlock1); - - LogCache.LogCacheBlock logCacheBlock2 = new LogCache.LogCacheBlock(1024); - logCacheBlock2.put(newRecord(233L, 20L)); - logCacheBlock2.put(newRecord(234L, 20L)); - logCacheBlock2.confirmOffset(20L); - CompletableFuture cf2 = storage.uploadDeltaWAL(logCacheBlock2); - - // sequence get objectId - verify(objectManager, Mockito.timeout(1000).times(1)).prepareObject(ArgumentMatchers.anyInt(), anyLong()); - - objectIdCfList.get(0).complete(1L); - // trigger next upload prepare objectId - verify(objectManager, Mockito.timeout(1000).times(2)).prepareObject(ArgumentMatchers.anyInt(), anyLong()); - verify(objectManager, Mockito.timeout(1000).times(1)).commitStreamSetObject(any()); - - objectIdCfList.get(1).complete(2L); - Thread.sleep(10); - verify(objectManager, Mockito.times(1)).commitStreamSetObject(any()); - - commitCfList.get(0).complete(new CommitStreamSetObjectResponse()); - verify(objectManager, Mockito.timeout(1000).times(2)).commitStreamSetObject(any()); - commitCfList.get(1).complete(new CommitStreamSetObjectResponse()); - cf1.get(1, TimeUnit.SECONDS); - cf2.get(1, TimeUnit.SECONDS); - } - - @Test - public void testRecoverContinuousRecords() { - List recoverResults = List.of( - new TestRecoverResult(StreamRecordBatchCodec.encode(newRecord(233L, 10L))), - new TestRecoverResult(StreamRecordBatchCodec.encode(newRecord(233L, 11L))), - new TestRecoverResult(StreamRecordBatchCodec.encode(newRecord(233L, 12L))), - new TestRecoverResult(StreamRecordBatchCodec.encode(newRecord(233L, 15L))), - new TestRecoverResult(StreamRecordBatchCodec.encode(newRecord(234L, 20L))) - ); - - List openingStreams = List.of(new StreamMetadata(233L, 0L, 0L, 11L, StreamState.OPENED)); - LogCache.LogCacheBlock cacheBlock = S3Storage.recoverContinuousRecords(recoverResults.iterator(), openingStreams); - // ignore closed stream and noncontinuous records. - assertEquals(1, cacheBlock.records().size()); - List streamRecords = cacheBlock.records().get(233L); - assertEquals(2, streamRecords.size()); - assertEquals(11L, streamRecords.get(0).getBaseOffset()); - assertEquals(12L, streamRecords.get(1).getBaseOffset()); - - // simulate data loss - openingStreams = List.of( - new StreamMetadata(233L, 0L, 0L, 5L, StreamState.OPENED)); - boolean exception = false; - try { - S3Storage.recoverContinuousRecords(recoverResults.iterator(), openingStreams); - } catch (IllegalStateException e) { - exception = true; - } - Assertions.assertTrue(exception); - } - - @Test - public void testRecoverOutOfOrderRecords() { - List recoverResults = List.of( - new TestRecoverResult(StreamRecordBatchCodec.encode(newRecord(42L, 9L))), - new TestRecoverResult(StreamRecordBatchCodec.encode(newRecord(42L, 10L))), - new TestRecoverResult(StreamRecordBatchCodec.encode(newRecord(42L, 13L))), - new TestRecoverResult(StreamRecordBatchCodec.encode(newRecord(42L, 11L))), - new TestRecoverResult(StreamRecordBatchCodec.encode(newRecord(42L, 12L))), - new TestRecoverResult(StreamRecordBatchCodec.encode(newRecord(42L, 14L))), - new TestRecoverResult(StreamRecordBatchCodec.encode(newRecord(42L, 20L))) - ); - - List openingStreams = List.of(new StreamMetadata(42L, 0L, 0L, 10L, StreamState.OPENED)); - LogCache.LogCacheBlock cacheBlock = S3Storage.recoverContinuousRecords(recoverResults.iterator(), openingStreams); - // ignore closed stream and noncontinuous records. - assertEquals(1, cacheBlock.records().size()); - List streamRecords = cacheBlock.records().get(42L); - assertEquals(5, streamRecords.size()); - assertEquals(10L, streamRecords.get(0).getBaseOffset()); - assertEquals(11L, streamRecords.get(1).getBaseOffset()); - assertEquals(12L, streamRecords.get(2).getBaseOffset()); - assertEquals(13L, streamRecords.get(3).getBaseOffset()); - assertEquals(14L, streamRecords.get(4).getBaseOffset()); - } - - @Test - public void testWALOverCapacity() throws WriteAheadLog.OverCapacityException { - storage.append(newRecord(233L, 10L)); - storage.append(newRecord(233L, 11L)); - doThrow(new WriteAheadLog.OverCapacityException("test")).when(wal).append(any(), any()); - - Mockito.when(objectManager.prepareObject(eq(1), anyLong())).thenReturn(CompletableFuture.completedFuture(16L)); - CommitStreamSetObjectResponse resp = new CommitStreamSetObjectResponse(); - Mockito.when(objectManager.commitStreamSetObject(any())).thenReturn(CompletableFuture.completedFuture(resp)); - - storage.append(newRecord(233L, 12L)); - - ArgumentCaptor commitArg = ArgumentCaptor.forClass(CommitStreamSetObjectRequest.class); - verify(objectManager, timeout(1000L).times(1)).commitStreamSetObject(commitArg.capture()); - CommitStreamSetObjectRequest commitRequest = commitArg.getValue(); - assertEquals(1, commitRequest.getStreamObjects().size()); - assertEquals(0, commitRequest.getStreamRanges().size()); - StreamObject range = commitRequest.getStreamObjects().get(0); - assertEquals(233L, range.getStreamId()); - assertEquals(10L, range.getStartOffset()); - assertEquals(12L, range.getEndOffset()); - } - - static class TestRecoverResult implements WriteAheadLog.RecoverResult { - private final ByteBuf record; - - public TestRecoverResult(ByteBuf record) { - this.record = record; - } - - @Override - public ByteBuf record() { - return record; - } - - @Override - public long recordOffset() { - return 0; - } - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/S3StreamTest.java b/s3stream/src/test/java/com/automq/stream/s3/S3StreamTest.java deleted file mode 100644 index b5928b2dd..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/S3StreamTest.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.api.FetchResult; -import com.automq.stream.api.exceptions.StreamClientException; -import com.automq.stream.s3.cache.CacheAccessType; -import com.automq.stream.s3.cache.ReadDataBlock; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.streams.StreamManager; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; -import org.mockito.Mockito; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; - -@Tag("S3Unit") -public class S3StreamTest { - Storage storage; - StreamManager streamManager; - S3Stream stream; - - @BeforeEach - public void setup() { - storage = mock(Storage.class); - streamManager = mock(StreamManager.class); - stream = new S3Stream(233, 1, 100, 233, storage, streamManager); - } - - @Test - public void testFetch() throws Throwable { - stream.confirmOffset.set(120L); - Mockito.when(storage.read(any(), eq(233L), eq(110L), eq(120L), eq(100))) - .thenReturn(CompletableFuture.completedFuture(newReadDataBlock(110, 115, 110))); - FetchResult rst = stream.fetch(110, 120, 100).get(1, TimeUnit.SECONDS); - assertEquals(1, rst.recordBatchList().size()); - assertEquals(110, rst.recordBatchList().get(0).baseOffset()); - assertEquals(115, rst.recordBatchList().get(0).lastOffset()); - assertEquals(CacheAccessType.DELTA_WAL_CACHE_HIT, rst.getCacheAccessType()); - - // TODO: add fetch from WAL cache - - boolean isException = false; - try { - stream.fetch(120, 140, 100).get(); - } catch (ExecutionException e) { - if (e.getCause() instanceof StreamClientException) { - isException = true; - } - } - Assertions.assertTrue(isException); - } - - ReadDataBlock newReadDataBlock(long start, long end, int size) { - StreamRecordBatch record = new StreamRecordBatch(0, 0, start, (int) (end - start), TestUtils.random(size)); - return new ReadDataBlock(List.of(record), CacheAccessType.DELTA_WAL_CACHE_HIT); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/StreamObjectCompactorTest.java b/s3stream/src/test/java/com/automq/stream/s3/StreamObjectCompactorTest.java deleted file mode 100644 index 90b40f06b..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/StreamObjectCompactorTest.java +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import com.automq.stream.s3.metadata.ObjectUtils; -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metadata.S3ObjectType; -import com.automq.stream.s3.metadata.StreamOffsetRange; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.objects.CompactStreamObjectRequest; -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.operator.MemoryS3Operator; -import com.automq.stream.s3.operator.S3Operator; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.Mockito; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -class StreamObjectCompactorTest { - - private ObjectManager objectManager; - private S3Operator s3Operator; - private S3Stream stream; - private final long streamId = 233L; - - @BeforeEach - void setUp() { - objectManager = Mockito.mock(ObjectManager.class); - s3Operator = new MemoryS3Operator(); - stream = Mockito.mock(S3Stream.class); - } - - List prepareData() throws ExecutionException, InterruptedException { - // prepare object - List objects = new LinkedList<>(); - { - // object-1: offset 10~15 - ObjectWriter writer = ObjectWriter.writer(1, s3Operator, Integer.MAX_VALUE, Integer.MAX_VALUE); - writer.write(233L, List.of( - newRecord(10L, 1, 1024), - newRecord(11L, 1, 1024), - newRecord(12L, 1, 1024) - )); - writer.write(233L, List.of( - newRecord(13L, 1, 1024), - newRecord(14L, 1, 1024), - newRecord(15L, 1, 1024) - )); - writer.close().get(); - objects.add(new S3ObjectMetadata(1, S3ObjectType.STREAM, List.of(new StreamOffsetRange(streamId, 10, 16)), - System.currentTimeMillis(), System.currentTimeMillis(), writer.size(), 1)); - } - { - // object-2: offset 16~17 - ObjectWriter writer = ObjectWriter.writer(2, s3Operator, Integer.MAX_VALUE, Integer.MAX_VALUE); - writer.write(233L, List.of( - newRecord(16L, 1, 1024) - )); - writer.write(233L, List.of( - newRecord(17L, 1, 1024) - )); - writer.close().get(); - objects.add(new S3ObjectMetadata(2, S3ObjectType.STREAM, List.of(new StreamOffsetRange(streamId, 16, 18)), - System.currentTimeMillis(), System.currentTimeMillis(), writer.size(), 2)); - } - { - // object-3: offset 30 - ObjectWriter writer = ObjectWriter.writer(3, s3Operator, Integer.MAX_VALUE, Integer.MAX_VALUE); - writer.write(233L, List.of( - newRecord(30L, 1, 1024) - )); - writer.close().get(); - objects.add(new S3ObjectMetadata(3, S3ObjectType.STREAM, List.of(new StreamOffsetRange(streamId, 30, 31)), - System.currentTimeMillis(), System.currentTimeMillis(), writer.size(), 3)); - } - { - // object-4: offset 31-32 - ObjectWriter writer = ObjectWriter.writer(4, s3Operator, Integer.MAX_VALUE, Integer.MAX_VALUE); - writer.write(233L, List.of( - newRecord(31L, 1, 1024) - )); - writer.write(233L, List.of( - newRecord(32L, 1, 1024) - )); - writer.close().get(); - objects.add(new S3ObjectMetadata(4, S3ObjectType.STREAM, List.of(new StreamOffsetRange(streamId, 31, 33)), - System.currentTimeMillis(), System.currentTimeMillis(), writer.size(), 4)); - } - return objects; - } - - @Test - public void testCompact() throws ExecutionException, InterruptedException { - List objects = prepareData(); - when(objectManager.getStreamObjects(eq(streamId), eq(0L), eq(32L), eq(Integer.MAX_VALUE))) - .thenReturn(CompletableFuture.completedFuture(objects)); - AtomicLong nextObjectId = new AtomicLong(5); - doAnswer(invocationOnMock -> CompletableFuture.completedFuture(nextObjectId.getAndIncrement())).when(objectManager).prepareObject(anyInt(), anyLong()); - when(objectManager.compactStreamObject(any())).thenReturn(CompletableFuture.completedFuture(null)); - when(stream.streamId()).thenReturn(streamId); - when(stream.startOffset()).thenReturn(14L); - when(stream.confirmOffset()).thenReturn(32L); - - StreamObjectCompactor task = StreamObjectCompactor.builder().objectManager(objectManager).s3Operator(s3Operator) - .maxStreamObjectSize(1024 * 1024 * 1024).stream(stream).dataBlockGroupSizeThreshold(1).build(); - task.compact(); - - ArgumentCaptor ac = ArgumentCaptor.forClass(CompactStreamObjectRequest.class); - verify(objectManager, times(2)).compactStreamObject(ac.capture()); - - // verify compact request - List requests = ac.getAllValues(); - CompactStreamObjectRequest req1 = requests.get(0); - assertEquals(5, req1.getObjectId()); - assertEquals(233L, req1.getStreamId()); - assertEquals(13L, req1.getStartOffset()); - assertEquals(18L, req1.getEndOffset()); - assertEquals(List.of(1L, 2L), req1.getSourceObjectIds()); - - CompactStreamObjectRequest req2 = requests.get(1); - assertEquals(6, req2.getObjectId()); - assertEquals(233L, req2.getStreamId()); - assertEquals(30L, req2.getStartOffset()); - assertEquals(33L, req2.getEndOffset()); - assertEquals(List.of(3L, 4L), req2.getSourceObjectIds()); - - // verify compacted object record - { - ObjectReader objectReader = new ObjectReader(new S3ObjectMetadata(5, req1.getObjectSize(), S3ObjectType.STREAM), s3Operator); - assertEquals(3, objectReader.basicObjectInfo().get().indexBlock().count()); - ObjectReader.FindIndexResult rst = objectReader.find(streamId, 13L, 18L).get(); - assertEquals(3, rst.streamDataBlocks().size()); - ObjectReader.DataBlockGroup dataBlockGroup1 = objectReader.read(rst.streamDataBlocks().get(0).dataBlockIndex()).get(); - try (dataBlockGroup1) { - assertEquals(3, dataBlockGroup1.recordCount()); - Iterator it = dataBlockGroup1.iterator(); - assertEquals(13L, it.next().getBaseOffset()); - assertEquals(14L, it.next().getBaseOffset()); - assertEquals(15L, it.next().getBaseOffset()); - assertFalse(it.hasNext()); - } - ObjectReader.DataBlockGroup dataBlockGroup2 = objectReader.read(rst.streamDataBlocks().get(1).dataBlockIndex()).get(); - try (dataBlockGroup2) { - assertEquals(1, dataBlockGroup2.recordCount()); - Iterator it = dataBlockGroup2.iterator(); - assertEquals(16L, it.next().getBaseOffset()); - } - ObjectReader.DataBlockGroup dataBlockGroup3 = objectReader.read(rst.streamDataBlocks().get(2).dataBlockIndex()).get(); - try (dataBlockGroup3) { - assertEquals(1, dataBlockGroup3.recordCount()); - Iterator it = dataBlockGroup3.iterator(); - assertEquals(17L, it.next().getBaseOffset()); - } - objectReader.close(); - } - { - ObjectReader objectReader = new ObjectReader(new S3ObjectMetadata(6, req2.getObjectSize(), S3ObjectType.STREAM), s3Operator); - assertEquals(3, objectReader.basicObjectInfo().get().indexBlock().count()); - ObjectReader.FindIndexResult rst = objectReader.find(streamId, 30L, 33L).get(); - assertEquals(3, rst.streamDataBlocks().size()); - ObjectReader.DataBlockGroup dataBlockGroup1 = objectReader.read(rst.streamDataBlocks().get(0).dataBlockIndex()).get(); - try (dataBlockGroup1) { - assertEquals(1, dataBlockGroup1.recordCount()); - Iterator it = dataBlockGroup1.iterator(); - assertEquals(30L, it.next().getBaseOffset()); - assertFalse(it.hasNext()); - } - ObjectReader.DataBlockGroup dataBlockGroup2 = objectReader.read(rst.streamDataBlocks().get(1).dataBlockIndex()).get(); - try (dataBlockGroup2) { - assertEquals(1, dataBlockGroup2.recordCount()); - Iterator it = dataBlockGroup2.iterator(); - assertEquals(31L, it.next().getBaseOffset()); - } - ObjectReader.DataBlockGroup dataBlockGroup3 = objectReader.read(rst.streamDataBlocks().get(2).dataBlockIndex()).get(); - try (dataBlockGroup3) { - assertEquals(1, dataBlockGroup3.recordCount()); - Iterator it = dataBlockGroup3.iterator(); - assertEquals(32L, it.next().getBaseOffset()); - } - objectReader.close(); - } - } - - @Test - public void testCleanup() throws ExecutionException, InterruptedException { - List objects = prepareData(); - when(objectManager.getStreamObjects(eq(streamId), eq(0L), eq(32L), eq(Integer.MAX_VALUE))) - .thenReturn(CompletableFuture.completedFuture(objects)); - AtomicLong nextObjectId = new AtomicLong(5); - doAnswer(invocationOnMock -> CompletableFuture.completedFuture(nextObjectId.getAndIncrement())).when(objectManager).prepareObject(anyInt(), anyLong()); - when(objectManager.compactStreamObject(any())).thenReturn(CompletableFuture.completedFuture(null)); - when(stream.streamId()).thenReturn(streamId); - when(stream.startOffset()).thenReturn(17L); - when(stream.confirmOffset()).thenReturn(32L); - - StreamObjectCompactor task = StreamObjectCompactor.builder().objectManager(objectManager).s3Operator(s3Operator) - .maxStreamObjectSize(1024 * 1024 * 1024).stream(stream).dataBlockGroupSizeThreshold(1).build(); - task.compact(); - - ArgumentCaptor ac = ArgumentCaptor.forClass(CompactStreamObjectRequest.class); - verify(objectManager, times(3)).compactStreamObject(ac.capture()); - CompactStreamObjectRequest clean = ac.getAllValues().get(0); - assertEquals(ObjectUtils.NOOP_OBJECT_ID, clean.getObjectId()); - assertEquals(List.of(1L), clean.getSourceObjectIds()); - - CompactStreamObjectRequest compact0 = ac.getAllValues().get(1); - assertEquals(5, compact0.getObjectId()); - CompactStreamObjectRequest compact1 = ac.getAllValues().get(2); - assertEquals(6, compact1.getObjectId()); - } - - @Test - public void testCompact_groupBlocks() throws ExecutionException, InterruptedException { - List objects = prepareData(); - - CompactStreamObjectRequest req = new StreamObjectCompactor.StreamObjectGroupCompactor(streamId, 0L, 14L, - objects.subList(0, 2), 5, 5000, s3Operator).compact().get(); - // verify compact request - assertEquals(5, req.getObjectId()); - assertEquals(233L, req.getStreamId()); - assertEquals(13L, req.getStartOffset()); - assertEquals(18L, req.getEndOffset()); - assertEquals(List.of(1L, 2L), req.getSourceObjectIds()); - - // verify compacted object record, expect [13,16) + [16, 17) compact to one data block group. - { - ObjectReader objectReader = new ObjectReader(new S3ObjectMetadata(5, req.getObjectSize(), S3ObjectType.STREAM), s3Operator); - assertEquals(2, objectReader.basicObjectInfo().get().indexBlock().count()); - ObjectReader.FindIndexResult rst = objectReader.find(streamId, 13L, 18L).get(); - assertEquals(2, rst.streamDataBlocks().size()); - ObjectReader.DataBlockGroup dataBlockGroup1 = objectReader.read(rst.streamDataBlocks().get(0).dataBlockIndex()).get(); - try (dataBlockGroup1) { - assertEquals(4, dataBlockGroup1.recordCount()); - Iterator it = dataBlockGroup1.iterator(); - assertEquals(13L, it.next().getBaseOffset()); - assertEquals(14L, it.next().getBaseOffset()); - assertEquals(15L, it.next().getBaseOffset()); - assertEquals(16L, it.next().getBaseOffset()); - assertFalse(it.hasNext()); - } - ObjectReader.DataBlockGroup dataBlockGroup2 = objectReader.read(rst.streamDataBlocks().get(1).dataBlockIndex()).get(); - try (dataBlockGroup2) { - assertEquals(1, dataBlockGroup2.recordCount()); - Iterator it = dataBlockGroup2.iterator(); - StreamRecordBatch record = it.next(); - assertEquals(17L, record.getBaseOffset()); - assertEquals(18L, record.getLastOffset()); - } - objectReader.close(); - } - } - - @Test - public void testGroup() { - List objects = List.of( - new S3ObjectMetadata(2, S3ObjectType.STREAM, List.of(new StreamOffsetRange(streamId, 16, 18)), - System.currentTimeMillis(), System.currentTimeMillis(), 1024, 2), - - new S3ObjectMetadata(3, S3ObjectType.STREAM, List.of(new StreamOffsetRange(streamId, 18, 19)), - System.currentTimeMillis(), System.currentTimeMillis(), 1, 3), - new S3ObjectMetadata(4, S3ObjectType.STREAM, List.of(new StreamOffsetRange(streamId, 19, 20)), - System.currentTimeMillis(), System.currentTimeMillis(), 1, 4), - - new S3ObjectMetadata(5, S3ObjectType.STREAM, List.of(new StreamOffsetRange(streamId, 30, 31)), - System.currentTimeMillis(), System.currentTimeMillis(), 1, 5), - new S3ObjectMetadata(6, S3ObjectType.STREAM, List.of(new StreamOffsetRange(streamId, 31, 32)), - System.currentTimeMillis(), System.currentTimeMillis(), 1, 6) - ); - List> groups = StreamObjectCompactor.group0(objects, 512); - assertEquals(3, groups.size()); - assertEquals(List.of(2L), groups.get(0).stream().map(S3ObjectMetadata::objectId).collect(Collectors.toList())); - assertEquals(List.of(3L, 4L), groups.get(1).stream().map(S3ObjectMetadata::objectId).collect(Collectors.toList())); - assertEquals(List.of(5L, 6L), groups.get(2).stream().map(S3ObjectMetadata::objectId).collect(Collectors.toList())); - } - - StreamRecordBatch newRecord(long offset, int count, int payloadSize) { - return new StreamRecordBatch(streamId, 0, offset, count, TestUtils.random(payloadSize)); - } -} \ No newline at end of file diff --git a/s3stream/src/test/java/com/automq/stream/s3/TestUtils.java b/s3stream/src/test/java/com/automq/stream/s3/TestUtils.java deleted file mode 100644 index ee43f7b57..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/TestUtils.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.PooledByteBufAllocator; -import io.netty.buffer.Unpooled; -import java.util.Random; -import java.util.UUID; - -public class TestUtils { - - public static ByteBuf random(int size) { - byte[] bytes = new byte[size]; - new Random().nextBytes(bytes); - // In the most test cases, the generated ByteBuf will be released after write to S3. - // To give the ByteBuf a chance to assert in the unit tests, we just retain it here. - // Since the retained ByteBuf is unpooled, it will be released by the GC. - return Unpooled.wrappedBuffer(bytes).retain(); - } - - public static ByteBuf randomPooled(int size) { - byte[] bytes = new byte[size]; - new Random().nextBytes(bytes); - ByteBuf buf = PooledByteBufAllocator.DEFAULT.heapBuffer(size); - buf.writeBytes(bytes); - return buf; - } - - public static String tempFilePath() { - return System.getProperty("java.io.tmpdir") + "/kos-" + UUID.randomUUID(); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/cache/BlockCacheTest.java b/s3stream/src/test/java/com/automq/stream/s3/cache/BlockCacheTest.java deleted file mode 100644 index fd3fcc49f..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/cache/BlockCacheTest.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.TestUtils; -import com.automq.stream.s3.model.StreamRecordBatch; -import java.util.List; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertTrue; - -@Tag("S3Unit") -public class BlockCacheTest { - - private static StreamRecordBatch newRecord(long streamId, long offset, int count, int size) { - return new StreamRecordBatch(streamId, 0, offset, count, TestUtils.random(size)); - } - - private BlockCache createBlockCache() { - BlockCache blockCache = new BlockCache(1024 * 1024 * 1024); - - blockCache.put(233L, List.of( - newRecord(233L, 10L, 2, 1), - newRecord(233L, 12L, 2, 1) - )); - - blockCache.put(233L, List.of( - newRecord(233L, 16L, 4, 1), - newRecord(233L, 20L, 2, 1) - )); - - // overlap - blockCache.put(233L, List.of( - newRecord(233L, 12L, 2, 1), - newRecord(233L, 14L, 1, 1), - newRecord(233L, 15L, 1, BlockCache.BLOCK_SIZE), - newRecord(233L, 16L, 4, 1), - newRecord(233L, 20L, 2, 1), - newRecord(233L, 22L, 1, 1), - newRecord(233L, 23L, 1, 1) - )); - return blockCache; - } - - @Test - public void testPutGet() { - BlockCache blockCache = createBlockCache(); - - BlockCache.GetCacheResult rst = blockCache.get(233L, 10L, 24L, BlockCache.BLOCK_SIZE * 2); - List records = rst.getRecords(); - assertEquals(8, records.size()); - assertEquals(10L, records.get(0).getBaseOffset()); - assertEquals(12L, records.get(1).getBaseOffset()); - assertEquals(14L, records.get(2).getBaseOffset()); - assertEquals(15L, records.get(3).getBaseOffset()); - assertEquals(16L, records.get(4).getBaseOffset()); - assertEquals(20L, records.get(5).getBaseOffset()); - assertEquals(22L, records.get(6).getBaseOffset()); - assertEquals(23L, records.get(7).getBaseOffset()); - } - - @Test - public void testPutGet2() { - BlockCache blockCache = createBlockCache(); - - BlockCache.GetCacheResult rst = blockCache.get(233L, 18L, 22L, BlockCache.BLOCK_SIZE * 2); - List records = rst.getRecords(); - assertEquals(2, records.size()); - assertEquals(16L, records.get(0).getBaseOffset()); - assertEquals(20L, records.get(1).getBaseOffset()); - } - - @Test - public void testPutGet3() { - BlockCache blockCache = createBlockCache(); - blockCache.put(233L, 26L, 40L, List.of( - newRecord(233L, 26L, 4, 1), - newRecord(233L, 30L, 10, 4) - )); - - BlockCache.GetCacheResult rst = blockCache.get(233L, 27L, 35L, BlockCache.BLOCK_SIZE * 2); - List records = rst.getRecords(); - assertEquals(2, records.size()); - assertEquals(26L, records.get(0).getBaseOffset()); - assertEquals(30L, records.get(1).getBaseOffset()); - assertEquals(1, rst.getReadAheadRecords().size()); - assertEquals(new DefaultS3BlockCache.ReadAheadRecord(40L), rst.getReadAheadRecords().get(0)); - } - - @Test - public void testRangeCheck() { - BlockCache blockCache = createBlockCache(); - blockCache.put(233L, List.of( - newRecord(233L, 26L, 4, 1), - newRecord(233L, 30L, 10, 4) - )); - - assertTrue(blockCache.checkRange(233, 10, 2)); - assertTrue(blockCache.checkRange(233, 11, BlockCache.BLOCK_SIZE)); - assertTrue(blockCache.checkRange(233, 20, 3)); - assertTrue(blockCache.checkRange(233, 26, 4)); - assertFalse(blockCache.checkRange(233, 20, 6)); - } - - @Test - public void testEvict() { - BlockCache blockCache = new BlockCache(4); - blockCache.put(233L, List.of( - newRecord(233L, 10L, 2, 2), - newRecord(233L, 12L, 2, 1) - )); - - assertEquals(2, blockCache.get(233L, 10L, 20L, 1000).getRecords().size()); - - blockCache.put(233L, List.of( - newRecord(233L, 16L, 4, 1), - newRecord(233L, 20L, 2, 1) - )); - assertEquals(0, blockCache.get(233L, 10L, 20L, 1000).getRecords().size()); - assertEquals(2, blockCache.get(233L, 16, 21L, 1000).getRecords().size()); - } - - @Test - public void testLRU() { - LRUCache lru = new LRUCache<>(); - lru.put(1L, true); - lru.put(2L, true); - lru.put(3L, true); - lru.touch(2L); - assertEquals(1, lru.pop().getKey()); - assertEquals(3, lru.pop().getKey()); - assertEquals(2, lru.pop().getKey()); - assertNull(lru.pop()); - } - - @Test - public void testReadAhead() { - BlockCache blockCache = new BlockCache(16 * 1024 * 1024); - blockCache.put(233L, 10L, 12L, List.of( - newRecord(233L, 10, 1, 1024 * 1024), - newRecord(233L, 11, 1, 1024) - )); - - // first read the block - BlockCache.GetCacheResult rst = blockCache.get(233L, 10, 11, Integer.MAX_VALUE); - assertEquals(1, rst.getRecords().size()); - assertEquals(10L, rst.getRecords().get(0).getBaseOffset()); - assertEquals(12, rst.getReadAheadRecords().get(0).nextRAOffset()); - - // repeat read the block, the readahead mark is clear. - rst = blockCache.get(233L, 10, 11, Integer.MAX_VALUE); - assertTrue(rst.getReadAheadRecords().isEmpty()); - } - -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/cache/DataBlockReadAccumulatorTest.java b/s3stream/src/test/java/com/automq/stream/s3/cache/DataBlockReadAccumulatorTest.java deleted file mode 100644 index 8922b79e7..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/cache/DataBlockReadAccumulatorTest.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.DataBlockIndex; -import com.automq.stream.s3.ObjectReader; -import com.automq.stream.s3.StreamDataBlock; -import com.automq.stream.s3.TestUtils; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.utils.CloseableIterator; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class DataBlockReadAccumulatorTest { - - private static StreamRecordBatch newRecord(long streamId, long offset, int count, int size) { - return new StreamRecordBatch(streamId, 0, offset, count, TestUtils.random(size)); - } - - @Test - public void test() throws ExecutionException, InterruptedException, TimeoutException { - DataBlockReadAccumulator accumulator = new DataBlockReadAccumulator(); - - ObjectReader reader = mock(ObjectReader.class); - DataBlockIndex dataBlockIndex = new DataBlockIndex(10, 0, 12, 2, 10, 100); - StreamDataBlock streamDataBlock = new StreamDataBlock(1, dataBlockIndex); - CompletableFuture readerCf = new CompletableFuture<>(); - when(reader.read(eq(dataBlockIndex))).thenReturn(readerCf); - - List reserveResults = accumulator.reserveDataBlock(List.of(new ImmutablePair<>(reader, streamDataBlock))); - Assertions.assertEquals(1, reserveResults.size()); - Assertions.assertEquals(100, reserveResults.get(0).reserveSize()); - - List reserveResults2 = accumulator.reserveDataBlock(List.of(new ImmutablePair<>(reader, streamDataBlock))); - Assertions.assertEquals(1, reserveResults2.size()); - Assertions.assertEquals(0, reserveResults2.get(0).reserveSize()); - - accumulator.readDataBlock(reader, dataBlockIndex); - - ObjectReader.DataBlockGroup dataBlockGroup = mock(ObjectReader.DataBlockGroup.class); - List records = List.of( - newRecord(10, 10, 2, 1), - newRecord(10, 12, 2, 1) - ); - when(dataBlockGroup.recordCount()).thenReturn(2); - when(dataBlockGroup.iterator()).thenAnswer(args -> { - Iterator it = records.iterator(); - return new CloseableIterator() { - - @Override - public boolean hasNext() { - return it.hasNext(); - } - - @Override - public StreamRecordBatch next() { - return it.next(); - } - - @Override - public void close() { - - } - }; - }); - when(dataBlockGroup.recordCount()).thenReturn(2); - readerCf.complete(dataBlockGroup); - - verify(reader, times(1)).read(any()); - - CompletableFuture dataBlockCf1 = reserveResults.get(0).cf(); - CompletableFuture dataBlockCf2 = reserveResults2.get(0).cf(); - assertEquals(2, dataBlockCf1.get(1, TimeUnit.SECONDS).records().size()); - assertEquals(12, dataBlockCf1.get(1, TimeUnit.SECONDS).records().get(1).getBaseOffset()); - dataBlockCf1.get().release(); - assertEquals(2, dataBlockCf2.get(1, TimeUnit.SECONDS).records().size()); - dataBlockCf2.get().release(); - - // next round read, expected new read - List reserveResults3 = accumulator.reserveDataBlock(List.of(new ImmutablePair<>(reader, streamDataBlock))); - Assertions.assertEquals(1, reserveResults3.size()); - Assertions.assertEquals(100, reserveResults3.get(0).reserveSize()); - accumulator.readDataBlock(reader, dataBlockIndex); - verify(reader, times(2)).read(any()); - reserveResults3.get(0).cf().get().release(); - } - -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/cache/InflightReadThrottleTest.java b/s3stream/src/test/java/com/automq/stream/s3/cache/InflightReadThrottleTest.java deleted file mode 100644 index 56d791c18..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/cache/InflightReadThrottleTest.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.utils.Threads; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -public class InflightReadThrottleTest { - @Test - public void testThrottle() { - InflightReadThrottle throttle = new InflightReadThrottle(1024); - UUID uuid = UUID.randomUUID(); - CompletableFuture cf = throttle.acquire(uuid, 512); - Assertions.assertEquals(512, throttle.getRemainingInflightReadBytes()); - Assertions.assertTrue(cf.isDone()); - UUID uuid2 = UUID.randomUUID(); - CompletableFuture cf2 = throttle.acquire(uuid2, 600); - Assertions.assertEquals(512, throttle.getRemainingInflightReadBytes()); - Assertions.assertEquals(1, throttle.getInflightQueueSize()); - Assertions.assertFalse(cf2.isDone()); - throttle.release(uuid); - Threads.sleep(1000); - Assertions.assertEquals(424, throttle.getRemainingInflightReadBytes()); - Assertions.assertTrue(cf2.isDone()); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/cache/LogCacheTest.java b/s3stream/src/test/java/com/automq/stream/s3/cache/LogCacheTest.java deleted file mode 100644 index 716c84d00..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/cache/LogCacheTest.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.TestUtils; -import com.automq.stream.s3.model.StreamRecordBatch; -import java.util.List; -import java.util.Map; -import org.junit.jupiter.api.Test; - -import static org.junit.jupiter.api.Assertions.assertEquals; - -public class LogCacheTest { - - @Test - public void testPutGet() { - LogCache logCache = new LogCache(1024 * 1024, 1024 * 1024); - - logCache.put(new StreamRecordBatch(233L, 0L, 10L, 1, TestUtils.random(20))); - logCache.put(new StreamRecordBatch(233L, 0L, 11L, 2, TestUtils.random(20))); - - logCache.archiveCurrentBlock(); - logCache.put(new StreamRecordBatch(233L, 0L, 13L, 2, TestUtils.random(20))); - - logCache.archiveCurrentBlock(); - logCache.put(new StreamRecordBatch(233L, 0L, 20L, 1, TestUtils.random(20))); - logCache.put(new StreamRecordBatch(233L, 0L, 21L, 1, TestUtils.random(20))); - - List records = logCache.get(233L, 10L, 21L, 1000); - assertEquals(1, records.size()); - assertEquals(20L, records.get(0).getBaseOffset()); - - records = logCache.get(233L, 10L, 15L, 1000); - assertEquals(3, records.size()); - assertEquals(10L, records.get(0).getBaseOffset()); - - records = logCache.get(233L, 0L, 9L, 1000); - assertEquals(0, records.size()); - - records = logCache.get(233L, 10L, 16L, 1000); - assertEquals(0, records.size()); - - records = logCache.get(233L, 12L, 16L, 1000); - assertEquals(0, records.size()); - } - - @Test - public void testOffsetIndex() { - LogCache cache = new LogCache(Integer.MAX_VALUE, Integer.MAX_VALUE); - - for (int i = 0; i < 100000; i++) { - cache.put(new StreamRecordBatch(233L, 0L, i, 1, TestUtils.random(1))); - } - - long start = System.nanoTime(); - for (int i = 0; i < 100000; i++) { - cache.get(233L, i, i + 1, 1000); - } - System.out.println("cost: " + (System.nanoTime() - start) / 1000 + "us"); - Map offsetIndexMap = cache.blocks.get(0).map.get(233L).offsetIndexMap; - assertEquals(1, offsetIndexMap.size()); - assertEquals(100000, offsetIndexMap.get(100000L).index); - } - -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/cache/ObjectReaderLRUCacheTest.java b/s3stream/src/test/java/com/automq/stream/s3/cache/ObjectReaderLRUCacheTest.java deleted file mode 100644 index f2046a98f..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/cache/ObjectReaderLRUCacheTest.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.ObjectReader; -import com.automq.stream.s3.ObjectWriter; -import com.automq.stream.s3.TestUtils; -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metadata.S3ObjectType; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.operator.MemoryS3Operator; -import com.automq.stream.s3.operator.S3Operator; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -public class ObjectReaderLRUCacheTest { - - private void writeStream(int streamCount, ObjectWriter objectWriter) { - for (int i = 0; i < streamCount; i++) { - StreamRecordBatch r = new StreamRecordBatch(i, 0, i, 1, TestUtils.random(1)); - objectWriter.write(i, List.of(r)); - } - } - - @Test - public void testGetPut() throws ExecutionException, InterruptedException { - S3Operator s3Operator = new MemoryS3Operator(); - ObjectWriter objectWriter = ObjectWriter.writer(233L, s3Operator, 1024, 1024); - writeStream(1000, objectWriter); - objectWriter.close().get(); - - ObjectWriter objectWriter2 = ObjectWriter.writer(234L, s3Operator, 1024, 1024); - writeStream(2000, objectWriter2); - objectWriter2.close().get(); - - ObjectWriter objectWriter3 = ObjectWriter.writer(235L, s3Operator, 1024, 1024); - writeStream(3000, objectWriter3); - objectWriter3.close().get(); - - ObjectReader objectReader = new ObjectReader(new S3ObjectMetadata(233L, objectWriter.size(), S3ObjectType.STREAM_SET), s3Operator); - ObjectReader objectReader2 = new ObjectReader(new S3ObjectMetadata(234L, objectWriter2.size(), S3ObjectType.STREAM_SET), s3Operator); - ObjectReader objectReader3 = new ObjectReader(new S3ObjectMetadata(235L, objectWriter3.size(), S3ObjectType.STREAM_SET), s3Operator); - Assertions.assertEquals(36000, objectReader.basicObjectInfo().get().size()); - Assertions.assertEquals(72000, objectReader2.basicObjectInfo().get().size()); - Assertions.assertEquals(108000, objectReader3.basicObjectInfo().get().size()); - - ObjectReaderLRUCache cache = new ObjectReaderLRUCache(100000); - cache.put(235L, objectReader3); - cache.put(234L, objectReader2); - cache.put(233L, objectReader); - Map.Entry entry = cache.pop(); - Assertions.assertNotNull(entry); - Assertions.assertEquals(objectReader2, entry.getValue()); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/cache/ReadAheadManagerTest.java b/s3stream/src/test/java/com/automq/stream/s3/cache/ReadAheadManagerTest.java deleted file mode 100644 index 9986a90dd..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/cache/ReadAheadManagerTest.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.utils.Threads; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import org.mockito.Mockito; - -public class ReadAheadManagerTest { - - @Test - public void testUpdateAgent() { - BlockCache blockCache = Mockito.mock(BlockCache.class); - ReadAheadManager manager = new ReadAheadManager(10, blockCache); - manager.updateReadProgress(233L, 0); - Assertions.assertNull(manager.getReadAheadAgent(233L, 0)); - manager.getOrCreateReadAheadAgent(233L, 0); - ReadAheadAgent agent = manager.getReadAheadAgent(233L, 0); - Assertions.assertNotNull(agent); - agent.updateReadAheadResult(1024, 1224); - manager.updateReadResult(233L, 0, 512, 612); - Assertions.assertEquals(1024, agent.getReadAheadOffset()); - Assertions.assertEquals(1224, agent.getLastReadAheadSize()); - Assertions.assertEquals(512, agent.getLastReadOffset()); - Assertions.assertEquals(612, agent.getLastReadSize()); - Assertions.assertEquals(0, agent.getBytePerSecond()); - - Threads.sleep(1000); - manager.updateReadProgress(233L, 512); - Assertions.assertEquals(306, agent.getBytePerSecond(), 10); - Assertions.assertEquals(122, agent.getNextReadAheadSize(), 5); - manager.updateReadResult(233L, 512, 1024, 612); - agent.updateReadAheadResult(2048, 1224); - - manager.onCacheEvict(233L, 0, 256, 257); - manager.onCacheEvict(233L, 768, 1345, 712); - manager.onCacheEvict(233L, 1678, 1789, 299); - - Assertions.assertEquals(70, agent.getNextReadAheadSize(), 1); - } - - @Test - public void testReadAheadAgents() { - BlockCache blockCache = Mockito.mock(BlockCache.class); - ReadAheadManager manager = new ReadAheadManager(10, blockCache); - manager.updateReadProgress(233L, 0); - manager.getOrCreateReadAheadAgent(233L, 0); - manager.updateReadResult(233L, 0, 10, 10); - - manager.updateReadProgress(233L, 10); - manager.getOrCreateReadAheadAgent(233L, 10); - manager.updateReadResult(233L, 10, 20, 10); - - manager.updateReadProgress(233L, 20); - manager.getOrCreateReadAheadAgent(233L, 20); - manager.updateReadResult(233L, 20, 50, 30); - - Assertions.assertEquals(1, manager.getReadAheadAgents().size()); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/cache/StreamCacheTest.java b/s3stream/src/test/java/com/automq/stream/s3/cache/StreamCacheTest.java deleted file mode 100644 index 515d41857..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/cache/StreamCacheTest.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.TestUtils; -import com.automq.stream.s3.model.StreamRecordBatch; -import java.util.List; -import java.util.NavigableMap; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -public class StreamCacheTest { - - @Test - public void testTailMap() { - StreamCache streamCache = new StreamCache(); - BlockCache.CacheBlock block1 = new BlockCache.CacheBlock(List.of( - new StreamRecordBatch(0, 0, 0, 10, TestUtils.random(8)), - new StreamRecordBatch(0, 0, 10, 20, TestUtils.random(8))), null); - streamCache.put(block1); - - BlockCache.CacheBlock block2 = new BlockCache.CacheBlock(List.of( - new StreamRecordBatch(0, 0, 50, 20, TestUtils.random(8)), - new StreamRecordBatch(0, 0, 70, 30, TestUtils.random(8))), null); - streamCache.put(block2); - - BlockCache.CacheBlock block3 = new BlockCache.CacheBlock(List.of( - new StreamRecordBatch(0, 0, 30, 20, TestUtils.random(8))), null); - streamCache.put(block3); - - NavigableMap tailBlocks = streamCache.tailBlocks(5); - Assertions.assertEquals(streamCache.blocks(), tailBlocks); - - tailBlocks = streamCache.tailBlocks(80); - Assertions.assertEquals(1, tailBlocks.size()); - Assertions.assertEquals(50, tailBlocks.firstKey()); - Assertions.assertEquals(block2, tailBlocks.firstEntry().getValue()); - } - - @Test - public void testRemove() { - StreamCache streamCache = new StreamCache(); - streamCache.put(new BlockCache.CacheBlock(List.of( - new StreamRecordBatch(0, 0, 0, 10, TestUtils.random(8)), - new StreamRecordBatch(0, 0, 10, 20, TestUtils.random(8))), null)); - - streamCache.put(new BlockCache.CacheBlock(List.of( - new StreamRecordBatch(0, 0, 50, 20, TestUtils.random(8)), - new StreamRecordBatch(0, 0, 70, 30, TestUtils.random(8))), null)); - - streamCache.put(new BlockCache.CacheBlock(List.of( - new StreamRecordBatch(0, 0, 30, 20, TestUtils.random(8))), null)); - - streamCache.remove(30); - - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/cache/StreamReaderTest.java b/s3stream/src/test/java/com/automq/stream/s3/cache/StreamReaderTest.java deleted file mode 100644 index 1952a3b9a..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/cache/StreamReaderTest.java +++ /dev/null @@ -1,430 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.cache; - -import com.automq.stream.s3.DataBlockIndex; -import com.automq.stream.s3.ObjectReader; -import com.automq.stream.s3.ObjectWriter; -import com.automq.stream.s3.StreamDataBlock; -import com.automq.stream.s3.TestUtils; -import com.automq.stream.s3.cache.DefaultS3BlockCache.ReadAheadTaskKey; -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metadata.S3ObjectType; -import com.automq.stream.s3.metrics.TimerUtil; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.operator.MemoryS3Operator; -import com.automq.stream.s3.operator.S3Operator; -import com.automq.stream.s3.trace.context.TraceContext; -import com.automq.stream.utils.CloseableIterator; -import com.automq.stream.utils.Threads; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.atomic.AtomicInteger; -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import org.mockito.Mockito; - -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doReturn; - -public class StreamReaderTest { - - @Test - public void testGetDataBlockIndices() { - S3Operator s3Operator = new MemoryS3Operator(); - ObjectManager objectManager = Mockito.mock(ObjectManager.class); - ObjectWriter objectWriter = ObjectWriter.writer(0, s3Operator, 1024, 1024); - objectWriter.write(233, List.of( - newRecord(233, 10, 5, 512), - newRecord(233, 15, 10, 512) - )); - objectWriter.close(); - ObjectWriter objectWriter2 = ObjectWriter.writer(1, s3Operator, 1024, 1024); - objectWriter2.write(233, List.of( - newRecord(233, 25, 5, 512), - newRecord(233, 30, 10, 512) - )); - objectWriter2.close(); - - S3ObjectMetadata metadata1 = new S3ObjectMetadata(0, objectWriter.size(), S3ObjectType.STREAM); - S3ObjectMetadata metadata2 = new S3ObjectMetadata(1, objectWriter2.size(), S3ObjectType.STREAM); - - doAnswer(invocation -> CompletableFuture.completedFuture(List.of(metadata1, metadata2))) - .when(objectManager).getObjects(eq(233L), eq(15L), eq(1024L), eq(2)); - - StreamReader streamReader = new StreamReader(s3Operator, objectManager, Mockito.mock(BlockCache.class), new HashMap<>(), new InflightReadThrottle()); - StreamReader.ReadContext context = new StreamReader.ReadContext(15L, 1024); - streamReader.getDataBlockIndices(TraceContext.DEFAULT, 233L, 1024L, context).thenAccept(v -> { - Assertions.assertEquals(40L, context.nextStartOffset); - Assertions.assertEquals(0, context.nextMaxBytes); - Assertions.assertEquals(2, context.streamDataBlocksPair.size()); - }).join(); - - } - - private StreamRecordBatch newRecord(long streamId, long offset, int count, int payloadSize) { - return new StreamRecordBatch(streamId, 0, offset, count, TestUtils.random(payloadSize)); - } - - @Test - public void testSyncReadAheadInflight() { - DataBlockReadAccumulator accumulator = new DataBlockReadAccumulator(); - ObjectReaderLRUCache cache = Mockito.mock(ObjectReaderLRUCache.class); - S3Operator s3Operator = Mockito.mock(S3Operator.class); - ObjectManager objectManager = Mockito.mock(ObjectManager.class); - BlockCache blockCache = Mockito.mock(BlockCache.class); - Map inflightReadAheadTasks = new HashMap<>(); - StreamReader streamReader = Mockito.spy(new StreamReader(s3Operator, objectManager, blockCache, cache, accumulator, inflightReadAheadTasks, new InflightReadThrottle())); - - long streamId = 233L; - long startOffset = 70; - long endOffset = 1024; - int maxBytes = 64; - long objectId = 1; - S3ObjectMetadata metadata = new S3ObjectMetadata(objectId, -1, S3ObjectType.STREAM); - doAnswer(invocation -> CompletableFuture.completedFuture(List.of(metadata))) - .when(objectManager).getObjects(eq(streamId), eq(startOffset), anyLong(), anyInt()); - - ObjectReader reader = Mockito.mock(ObjectReader.class); - DataBlockIndex index1 = new DataBlockIndex(0, 64, 128, 128, 0, 256); - doReturn(reader).when(streamReader).getObjectReader(metadata); - doAnswer(invocation -> CompletableFuture.completedFuture(new ObjectReader.FindIndexResult(true, -1, -1, - List.of(new StreamDataBlock(objectId, index1))))).when(reader).find(eq(streamId), eq(startOffset), anyLong(), eq(maxBytes)); - doReturn(new CompletableFuture<>()).when(reader).read(index1); - - streamReader.syncReadAhead(TraceContext.DEFAULT, streamId, startOffset, endOffset, maxBytes, Mockito.mock(ReadAheadAgent.class), UUID.randomUUID()); - Threads.sleep(1000); - Assertions.assertEquals(2, inflightReadAheadTasks.size()); - ReadAheadTaskKey key1 = new ReadAheadTaskKey(233L, startOffset); - ReadAheadTaskKey key2 = new ReadAheadTaskKey(233L, 64); - Assertions.assertTrue(inflightReadAheadTasks.containsKey(key1)); - Assertions.assertTrue(inflightReadAheadTasks.containsKey(key2)); - Assertions.assertEquals(DefaultS3BlockCache.ReadBlockCacheStatus.WAIT_FETCH_DATA, inflightReadAheadTasks.get(key1).status); - Assertions.assertEquals(DefaultS3BlockCache.ReadBlockCacheStatus.WAIT_FETCH_DATA, inflightReadAheadTasks.get(key2).status); - } - - @Test - public void testSyncReadAhead() { - DataBlockReadAccumulator accumulator = new DataBlockReadAccumulator(); - ObjectReaderLRUCache cache = Mockito.mock(ObjectReaderLRUCache.class); - S3Operator s3Operator = Mockito.mock(S3Operator.class); - ObjectManager objectManager = Mockito.mock(ObjectManager.class); - BlockCache blockCache = Mockito.mock(BlockCache.class); - StreamReader streamReader = new StreamReader(s3Operator, objectManager, blockCache, cache, accumulator, new HashMap<>(), new InflightReadThrottle()); - - StreamReader.ReadContext context = new StreamReader.ReadContext(0, 256); - DataBlockIndex index1 = new DataBlockIndex(0, 0, 128, 128, 0, 256); - context.streamDataBlocksPair = List.of( - new ImmutablePair<>(1L, List.of( - new StreamDataBlock(1, index1)))); - - ObjectReader reader = Mockito.mock(ObjectReader.class); - ObjectReader.DataBlockGroup dataBlockGroup1 = Mockito.mock(ObjectReader.DataBlockGroup.class); - StreamRecordBatch record1 = new StreamRecordBatch(233L, 0, 0, 64, TestUtils.random(128)); - record1.release(); - StreamRecordBatch record2 = new StreamRecordBatch(233L, 0, 64, 64, TestUtils.random(128)); - record2.release(); - List records = List.of(record1, record2); - AtomicInteger remaining = new AtomicInteger(0); - Assertions.assertEquals(1, record1.getPayload().refCnt()); - Assertions.assertEquals(1, record2.getPayload().refCnt()); - Mockito.when(dataBlockGroup1.iterator()).thenReturn(new CloseableIterator<>() { - @Override - public void close() { - } - - @Override - public boolean hasNext() { - return remaining.get() < records.size(); - } - - @Override - public StreamRecordBatch next() { - if (!hasNext()) { - throw new IllegalStateException("no more elements"); - } - return records.get(remaining.getAndIncrement()); - } - }); - Mockito.when(reader.read(index1)).thenReturn(CompletableFuture.completedFuture(dataBlockGroup1)); - context.objectReaderMap = new HashMap<>(Map.of(1L, reader)); - CompletableFuture> cf = streamReader.handleSyncReadAhead(TraceContext.DEFAULT, 233L, 0, - 999, 64, Mockito.mock(ReadAheadAgent.class), UUID.randomUUID(), new TimerUtil(), context); - - cf.whenComplete((rst, ex) -> { - Assertions.assertNull(ex); - Assertions.assertEquals(1, rst.size()); - Assertions.assertEquals(record1, rst.get(0)); - Assertions.assertEquals(2, record1.getPayload().refCnt()); - Assertions.assertEquals(1, record2.getPayload().refCnt()); - }).join(); - } - - @Test - public void testSyncReadAheadNotAlign() { - DataBlockReadAccumulator accumulator = new DataBlockReadAccumulator(); - ObjectReaderLRUCache cache = Mockito.mock(ObjectReaderLRUCache.class); - S3Operator s3Operator = Mockito.mock(S3Operator.class); - ObjectManager objectManager = Mockito.mock(ObjectManager.class); - BlockCache blockCache = Mockito.mock(BlockCache.class); - Map inflightReadAheadTasks = new HashMap<>(); - StreamReader streamReader = new StreamReader(s3Operator, objectManager, blockCache, cache, accumulator, inflightReadAheadTasks, new InflightReadThrottle()); - - long startOffset = 32; - StreamReader.ReadContext context = new StreamReader.ReadContext(startOffset, 256); - DataBlockIndex index1 = new DataBlockIndex(0, 0, 128, 128, 0, 256); - context.streamDataBlocksPair = List.of( - new ImmutablePair<>(1L, List.of( - new StreamDataBlock(1, index1)))); - - ObjectReader reader = Mockito.mock(ObjectReader.class); - ObjectReader.DataBlockGroup dataBlockGroup1 = Mockito.mock(ObjectReader.DataBlockGroup.class); - StreamRecordBatch record1 = new StreamRecordBatch(233L, 0, 0, 64, TestUtils.random(128)); - record1.release(); - StreamRecordBatch record2 = new StreamRecordBatch(233L, 0, 64, 64, TestUtils.random(128)); - record2.release(); - List records = List.of(record1, record2); - AtomicInteger remaining = new AtomicInteger(0); - Assertions.assertEquals(1, record1.getPayload().refCnt()); - Assertions.assertEquals(1, record2.getPayload().refCnt()); - Mockito.when(dataBlockGroup1.iterator()).thenReturn(new CloseableIterator<>() { - @Override - public void close() { - } - - @Override - public boolean hasNext() { - return remaining.get() < records.size(); - } - - @Override - public StreamRecordBatch next() { - if (!hasNext()) { - throw new IllegalStateException("no more elements"); - } - return records.get(remaining.getAndIncrement()); - } - }); - Mockito.when(reader.read(index1)).thenReturn(CompletableFuture.completedFuture(dataBlockGroup1)); - context.objectReaderMap = new HashMap<>(Map.of(1L, reader)); - ReadAheadTaskKey key = new ReadAheadTaskKey(233L, startOffset); - context.taskKeySet.add(key); - inflightReadAheadTasks.put(key, new DefaultS3BlockCache.ReadAheadTaskContext(new CompletableFuture<>(), DefaultS3BlockCache.ReadBlockCacheStatus.INIT)); - CompletableFuture> cf = streamReader.handleSyncReadAhead(TraceContext.DEFAULT, 233L, startOffset, - 999, 64, Mockito.mock(ReadAheadAgent.class), UUID.randomUUID(), new TimerUtil(), context); - - cf.whenComplete((rst, ex) -> { - Assertions.assertNull(ex); - Assertions.assertTrue(inflightReadAheadTasks.isEmpty()); - Assertions.assertEquals(1, rst.size()); - Assertions.assertEquals(record1, rst.get(0)); - Assertions.assertEquals(2, record1.getPayload().refCnt()); - Assertions.assertEquals(1, record2.getPayload().refCnt()); - }).join(); - } - - @Test - public void testSyncReadAheadException() { - DataBlockReadAccumulator accumulator = new DataBlockReadAccumulator(); - ObjectReaderLRUCache cache = Mockito.mock(ObjectReaderLRUCache.class); - S3Operator s3Operator = Mockito.mock(S3Operator.class); - ObjectManager objectManager = Mockito.mock(ObjectManager.class); - BlockCache blockCache = Mockito.mock(BlockCache.class); - StreamReader streamReader = new StreamReader(s3Operator, objectManager, blockCache, cache, accumulator, new HashMap<>(), new InflightReadThrottle()); - - StreamReader.ReadContext context = new StreamReader.ReadContext(0, 512); - DataBlockIndex index1 = new DataBlockIndex(0, 0, 128, 128, 0, 256); - DataBlockIndex index2 = new DataBlockIndex(1, 128, 236, 128, 256, 256); - context.streamDataBlocksPair = List.of( - new ImmutablePair<>(1L, List.of( - new StreamDataBlock(1, index1), - new StreamDataBlock(1, index2)))); - - ObjectReader reader = Mockito.mock(ObjectReader.class); - ObjectReader.DataBlockGroup dataBlockGroup1 = Mockito.mock(ObjectReader.DataBlockGroup.class); - StreamRecordBatch record1 = new StreamRecordBatch(233L, 0, 0, 64, TestUtils.random(128)); - record1.release(); - StreamRecordBatch record2 = new StreamRecordBatch(233L, 0, 0, 64, TestUtils.random(128)); - record2.release(); - List records = List.of(record1, record2); - AtomicInteger remaining = new AtomicInteger(records.size()); - Assertions.assertEquals(1, record1.getPayload().refCnt()); - Assertions.assertEquals(1, record2.getPayload().refCnt()); - Mockito.when(dataBlockGroup1.iterator()).thenReturn(new CloseableIterator<>() { - @Override - public void close() { - } - - @Override - public boolean hasNext() { - return remaining.get() > 0; - } - - @Override - public StreamRecordBatch next() { - if (remaining.decrementAndGet() < 0) { - throw new IllegalStateException("no more elements"); - } - return records.get(remaining.get()); - } - }); - Mockito.when(reader.read(index1)).thenReturn(CompletableFuture.completedFuture(dataBlockGroup1)); - Mockito.when(reader.read(index2)).thenReturn(CompletableFuture.failedFuture(new RuntimeException("exception"))); - context.objectReaderMap = new HashMap<>(Map.of(1L, reader)); - CompletableFuture> cf = streamReader.handleSyncReadAhead(TraceContext.DEFAULT, 233L, 0, - 512, 1024, Mockito.mock(ReadAheadAgent.class), UUID.randomUUID(), new TimerUtil(), context); - - Threads.sleep(1000); - - try { - cf.whenComplete((rst, ex) -> { - Assertions.assertThrowsExactly(CompletionException.class, () -> { - throw ex; - }); - Assertions.assertNull(rst); - Assertions.assertEquals(1, record1.getPayload().refCnt()); - Assertions.assertEquals(1, record2.getPayload().refCnt()); - }).join(); - } catch (CompletionException e) { - Assertions.assertEquals("exception", e.getCause().getMessage()); - } - } - - @Test - public void testAsyncReadAhead() { - DataBlockReadAccumulator accumulator = new DataBlockReadAccumulator(); - ObjectReaderLRUCache cache = Mockito.mock(ObjectReaderLRUCache.class); - S3Operator s3Operator = Mockito.mock(S3Operator.class); - ObjectManager objectManager = Mockito.mock(ObjectManager.class); - BlockCache blockCache = Mockito.mock(BlockCache.class); - StreamReader streamReader = new StreamReader(s3Operator, objectManager, blockCache, cache, accumulator, new HashMap<>(), new InflightReadThrottle()); - - StreamReader.ReadContext context = new StreamReader.ReadContext(0, 256); - DataBlockIndex index1 = new DataBlockIndex(0, 0, 128, 128, 0, 256); - context.streamDataBlocksPair = List.of( - new ImmutablePair<>(1L, List.of( - new StreamDataBlock(1, index1)))); - - ObjectReader reader = Mockito.mock(ObjectReader.class); - ObjectReader.DataBlockGroup dataBlockGroup1 = Mockito.mock(ObjectReader.DataBlockGroup.class); - StreamRecordBatch record1 = new StreamRecordBatch(233L, 0, 0, 64, TestUtils.random(128)); - record1.release(); - StreamRecordBatch record2 = new StreamRecordBatch(233L, 0, 64, 64, TestUtils.random(128)); - record2.release(); - List records = List.of(record1, record2); - AtomicInteger remaining = new AtomicInteger(0); - Assertions.assertEquals(1, record1.getPayload().refCnt()); - Assertions.assertEquals(1, record2.getPayload().refCnt()); - Mockito.when(dataBlockGroup1.iterator()).thenReturn(new CloseableIterator<>() { - @Override - public void close() { - } - - @Override - public boolean hasNext() { - return remaining.get() < records.size(); - } - - @Override - public StreamRecordBatch next() { - if (!hasNext()) { - throw new IllegalStateException("no more elements"); - } - return records.get(remaining.getAndIncrement()); - } - }); - Mockito.when(reader.read(index1)).thenReturn(CompletableFuture.completedFuture(dataBlockGroup1)); - context.objectReaderMap = new HashMap<>(Map.of(1L, reader)); - - CompletableFuture cf = streamReader.handleAsyncReadAhead(233L, 0, 999, 1024, Mockito.mock(ReadAheadAgent.class), new TimerUtil(), context); - - cf.whenComplete((rst, ex) -> { - Assertions.assertNull(ex); - Assertions.assertEquals(1, record1.getPayload().refCnt()); - Assertions.assertEquals(1, record2.getPayload().refCnt()); - }).join(); - } - - @Test - public void testAsyncReadAheadException() { - DataBlockReadAccumulator accumulator = new DataBlockReadAccumulator(); - ObjectReaderLRUCache cache = Mockito.mock(ObjectReaderLRUCache.class); - S3Operator s3Operator = Mockito.mock(S3Operator.class); - ObjectManager objectManager = Mockito.mock(ObjectManager.class); - BlockCache blockCache = Mockito.mock(BlockCache.class); - StreamReader streamReader = new StreamReader(s3Operator, objectManager, blockCache, cache, accumulator, new HashMap<>(), new InflightReadThrottle()); - - StreamReader.ReadContext context = new StreamReader.ReadContext(0, 256); - DataBlockIndex index1 = new DataBlockIndex(0, 0, 128, 128, 0, 256); - DataBlockIndex index2 = new DataBlockIndex(1, 128, 256, 128, 256, 256); - context.streamDataBlocksPair = List.of( - new ImmutablePair<>(1L, List.of( - new StreamDataBlock(1, index1), - new StreamDataBlock(1, index2)))); - - ObjectReader reader = Mockito.mock(ObjectReader.class); - ObjectReader.DataBlockGroup dataBlockGroup1 = Mockito.mock(ObjectReader.DataBlockGroup.class); - StreamRecordBatch record1 = new StreamRecordBatch(233L, 0, 0, 64, TestUtils.random(128)); - record1.release(); - StreamRecordBatch record2 = new StreamRecordBatch(233L, 0, 64, 64, TestUtils.random(128)); - record2.release(); - List records = List.of(record1, record2); - AtomicInteger remaining = new AtomicInteger(0); - Assertions.assertEquals(1, record1.getPayload().refCnt()); - Assertions.assertEquals(1, record2.getPayload().refCnt()); - Mockito.when(dataBlockGroup1.iterator()).thenReturn(new CloseableIterator<>() { - @Override - public void close() { - } - - @Override - public boolean hasNext() { - return remaining.get() < records.size(); - } - - @Override - public StreamRecordBatch next() { - if (!hasNext()) { - throw new IllegalStateException("no more elements"); - } - return records.get(remaining.getAndIncrement()); - } - }); - Mockito.when(reader.read(index1)).thenReturn(CompletableFuture.completedFuture(dataBlockGroup1)); - Mockito.when(reader.read(index2)).thenReturn(CompletableFuture.failedFuture(new RuntimeException("exception"))); - context.objectReaderMap = new HashMap<>(Map.of(1L, reader)); - - CompletableFuture cf = streamReader.handleAsyncReadAhead(233L, 0, 999, 1024, Mockito.mock(ReadAheadAgent.class), new TimerUtil(), context); - - try { - cf.whenComplete((rst, ex) -> { - Assertions.assertThrowsExactly(CompletionException.class, () -> { - throw ex; - }); - Assertions.assertEquals(1, record1.getPayload().refCnt()); - Assertions.assertEquals(1, record2.getPayload().refCnt()); - }).join(); - } catch (CompletionException e) { - Assertions.assertEquals("exception", e.getCause().getMessage()); - } - - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/compact/CompactionAnalyzerTest.java b/s3stream/src/test/java/com/automq/stream/s3/compact/CompactionAnalyzerTest.java deleted file mode 100644 index db969b844..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/compact/CompactionAnalyzerTest.java +++ /dev/null @@ -1,451 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact; - -import com.automq.stream.s3.StreamDataBlock; -import com.automq.stream.s3.compact.objects.CompactedObject; -import com.automq.stream.s3.compact.objects.CompactedObjectBuilder; -import com.automq.stream.s3.compact.objects.CompactionType; -import com.automq.stream.s3.compact.utils.CompactionUtils; -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metadata.S3ObjectType; -import com.automq.stream.s3.metadata.StreamMetadata; -import com.automq.stream.s3.metadata.StreamOffsetRange; -import com.automq.stream.s3.metadata.StreamState; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; - -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.when; - -@Timeout(30) -@Tag("S3Unit") -public class CompactionAnalyzerTest extends CompactionTestBase { - - private static Map> generateStreamDataBlocks() { - return Map.of( - OBJECT_0, List.of( - new StreamDataBlock(STREAM_0, 0, 20, OBJECT_0, -1, 20, 1), - new StreamDataBlock(STREAM_1, 30, 60, OBJECT_0, -1, 30, 1), - new StreamDataBlock(STREAM_2, 30, 60, OBJECT_0, -1, 30, 1)), - OBJECT_1, List.of( - new StreamDataBlock(STREAM_0, 20, 25, OBJECT_1, -1, 5, 1), - new StreamDataBlock(STREAM_1, 60, 120, OBJECT_1, -1, 60, 1)), - OBJECT_2, List.of( - new StreamDataBlock(STREAM_1, 400, 500, OBJECT_2, -1, 100, 1), - new StreamDataBlock(STREAM_2, 230, 270, OBJECT_2, -1, 40, 1)) - ); - } - - @BeforeEach - public void setUp() throws Exception { - super.setUp(); - } - - @AfterEach - public void tearDown() { - super.tearDown(); - } - - @Test - public void testReadObjectIndices() { - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - Map> streamDataBlocksMap = CompactionUtils.blockWaitObjectIndices(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST, s3Operator, null); - Map> expectedBlocksMap = Map.of( - OBJECT_0, List.of( - new StreamDataBlock(STREAM_0, 0, 15, OBJECT_0, -1, -1, 1), - new StreamDataBlock(STREAM_1, 25, 30, OBJECT_0, -1, -1, 1), - new StreamDataBlock(STREAM_1, 30, 60, OBJECT_0, -1, -1, 1), - new StreamDataBlock(STREAM_2, 30, 60, OBJECT_0, -1, -1, 1)), - OBJECT_1, List.of( - new StreamDataBlock(STREAM_0, 15, 20, OBJECT_1, -1, -1, 1), - new StreamDataBlock(STREAM_1, 60, 120, OBJECT_1, -1, -1, 1)), - OBJECT_2, List.of( - new StreamDataBlock(STREAM_1, 400, 500, OBJECT_2, -1, -1, 1), - new StreamDataBlock(STREAM_2, 230, 270, OBJECT_2, -1, -1, 1))); - assertTrue(compare(streamDataBlocksMap, expectedBlocksMap)); - } - - @Test - public void testReadObjectIndicesWithTrimmedData() { - when(streamManager.getStreams(Collections.emptyList())).thenReturn(CompletableFuture.completedFuture( - List.of(new StreamMetadata(STREAM_0, 0, 15, 20, StreamState.OPENED), - new StreamMetadata(STREAM_1, 0, 25, 500, StreamState.OPENED), - new StreamMetadata(STREAM_2, 0, 30, 270, StreamState.OPENED)))); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - Map> streamDataBlocksMap = CompactionUtils.blockWaitObjectIndices(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST, s3Operator); - Map> expectedBlocksMap = Map.of( - OBJECT_0, List.of( - new StreamDataBlock(STREAM_1, 25, 30, OBJECT_0, -1, -1, 1), - new StreamDataBlock(STREAM_1, 30, 60, OBJECT_0, -1, -1, 1), - new StreamDataBlock(STREAM_2, 30, 60, OBJECT_0, -1, -1, 1)), - OBJECT_1, List.of( - new StreamDataBlock(STREAM_0, 15, 20, OBJECT_1, -1, -1, 1), - new StreamDataBlock(STREAM_1, 60, 120, OBJECT_1, -1, -1, 1)), - OBJECT_2, List.of( - new StreamDataBlock(STREAM_1, 400, 500, OBJECT_2, -1, -1, 1), - new StreamDataBlock(STREAM_2, 230, 270, OBJECT_2, -1, -1, 1))); - assertTrue(compare(streamDataBlocksMap, expectedBlocksMap)); - } - - @Test - public void testFilterBlocksToCompact() { - CompactionAnalyzer compactionAnalyzer = new CompactionAnalyzer(CACHE_SIZE, STREAM_SPLIT_SIZE, MAX_STREAM_NUM_IN_WAL, MAX_STREAM_OBJECT_NUM); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - Map> streamDataBlocksMap = CompactionUtils.blockWaitObjectIndices(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST, s3Operator); - Map> filteredMap = compactionAnalyzer.filterBlocksToCompact(streamDataBlocksMap); - assertTrue(compare(filteredMap, streamDataBlocksMap)); - } - - @Test - public void testFilterBlocksToCompact2() { - CompactionAnalyzer compactionAnalyzer = new CompactionAnalyzer(CACHE_SIZE, STREAM_SPLIT_SIZE, MAX_STREAM_NUM_IN_WAL, MAX_STREAM_OBJECT_NUM); - Map> streamDataBlocksMap = Map.of( - OBJECT_0, List.of( - new StreamDataBlock(STREAM_0, 0, 20, OBJECT_0, -1, -1, 1), - new StreamDataBlock(STREAM_1, 30, 60, OBJECT_0, -1, -1, 1)), - OBJECT_1, List.of( - new StreamDataBlock(STREAM_0, 20, 25, OBJECT_1, -1, -1, 1), - new StreamDataBlock(STREAM_1, 60, 120, OBJECT_1, -1, -1, 1)), - OBJECT_2, List.of( - new StreamDataBlock(STREAM_2, 230, 270, OBJECT_2, -1, -1, 1)), - OBJECT_3, List.of( - new StreamDataBlock(STREAM_3, 0, 50, OBJECT_3, -1, -1, 1))); - Map> result = compactionAnalyzer.filterBlocksToCompact(streamDataBlocksMap); - Map> expectedBlocksMap = Map.of( - OBJECT_0, List.of( - new StreamDataBlock(STREAM_0, 0, 20, OBJECT_0, -1, -1, 1), - new StreamDataBlock(STREAM_1, 30, 60, OBJECT_0, -1, -1, 1)), - OBJECT_1, List.of( - new StreamDataBlock(STREAM_0, 20, 25, OBJECT_1, -1, -1, 1), - new StreamDataBlock(STREAM_1, 60, 120, OBJECT_1, -1, -1, 1))); - assertTrue(compare(result, expectedBlocksMap)); - } - - @Test - public void testSortStreamRangePositions() { - CompactionAnalyzer compactionAnalyzer = new CompactionAnalyzer(CACHE_SIZE, STREAM_SPLIT_SIZE, MAX_STREAM_NUM_IN_WAL, MAX_STREAM_OBJECT_NUM); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - Map> streamDataBlocksMap = CompactionUtils.blockWaitObjectIndices(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST, s3Operator); - List sortedStreamDataBlocks = CompactionUtils.sortStreamRangePositions(streamDataBlocksMap); - List expectedBlocks = List.of( - new StreamDataBlock(STREAM_0, 0, 15, OBJECT_0, -1, -1, 1), - new StreamDataBlock(STREAM_0, 15, 20, OBJECT_1, -1, -1, 1), - new StreamDataBlock(STREAM_1, 25, 30, OBJECT_0, -1, -1, 1), - new StreamDataBlock(STREAM_1, 30, 60, OBJECT_0, -1, -1, 1), - new StreamDataBlock(STREAM_1, 60, 120, OBJECT_1, -1, -1, 1), - new StreamDataBlock(STREAM_1, 400, 500, OBJECT_2, -1, -1, 1), - new StreamDataBlock(STREAM_2, 30, 60, OBJECT_0, -1, -1, 1), - new StreamDataBlock(STREAM_2, 230, 270, OBJECT_2, -1, -1, 1)); - for (int i = 0; i < sortedStreamDataBlocks.size(); i++) { - assertTrue(compare(sortedStreamDataBlocks.get(i), expectedBlocks.get(i))); - } - } - - @Test - public void testGroupObjectWithLimit() { - CompactionAnalyzer compactionAnalyzer = new CompactionAnalyzer(CACHE_SIZE, 100, MAX_STREAM_NUM_IN_WAL, MAX_STREAM_OBJECT_NUM); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - Map> streamDataBlocksMap = CompactionUtils.blockWaitObjectIndices(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST, s3Operator); - Set objectsToRemove = new HashSet<>(); - List compactedObjectBuilders = compactionAnalyzer.groupObjectWithLimits(streamDataBlocksMap, objectsToRemove); - Assertions.assertTrue(objectsToRemove.isEmpty()); - List expectedCompactedObject = List.of( - new CompactedObjectBuilder() - .setType(CompactionType.COMPACT) - .addStreamDataBlock(new StreamDataBlock(STREAM_0, 0, 15, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_0, 15, 20, OBJECT_1, -1, -1, 1)), - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 25, 30, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 30, 60, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 60, 120, OBJECT_1, -1, -1, 1)), - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 400, 500, OBJECT_2, -1, -1, 1)), - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_2, 30, 60, OBJECT_0, -1, -1, 1)), - new CompactedObjectBuilder() - .setType(CompactionType.COMPACT) - .addStreamDataBlock(new StreamDataBlock(STREAM_2, 230, 270, OBJECT_2, -1, -1, 1))); - for (int i = 0; i < compactedObjectBuilders.size(); i++) { - assertTrue(compare(compactedObjectBuilders.get(i), expectedCompactedObject.get(i))); - } - } - - @Test - public void testGroupObjectWithLimit2() { - CompactionAnalyzer compactionAnalyzer = new CompactionAnalyzer(CACHE_SIZE, 30, MAX_STREAM_NUM_IN_WAL, MAX_STREAM_OBJECT_NUM); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - Map> streamDataBlocksMap = CompactionUtils.blockWaitObjectIndices(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST, s3Operator); - Set objectsToRemove = new HashSet<>(); - List compactedObjectBuilders = compactionAnalyzer.groupObjectWithLimits(streamDataBlocksMap, objectsToRemove); - Assertions.assertTrue(objectsToRemove.isEmpty()); - List expectedCompactedObject = List.of( - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_0, 0, 15, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_0, 15, 20, OBJECT_1, -1, -1, 1)), - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 25, 30, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 30, 60, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 60, 120, OBJECT_1, -1, -1, 1)), - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 400, 500, OBJECT_2, -1, -1, 1)), - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_2, 30, 60, OBJECT_0, -1, -1, 1)), - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_2, 230, 270, OBJECT_2, -1, -1, 1))); - for (int i = 0; i < compactedObjectBuilders.size(); i++) { - assertTrue(compare(compactedObjectBuilders.get(i), expectedCompactedObject.get(i))); - } - } - - @Test - public void testGroupObjectWithLimit3() { - CompactionAnalyzer compactionAnalyzer = new CompactionAnalyzer(CACHE_SIZE, 70, MAX_STREAM_NUM_IN_WAL, 2); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - Map> streamDataBlocksMap = CompactionUtils.blockWaitObjectIndices(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST, s3Operator); - Set objectsToRemove = new HashSet<>(); - List compactedObjectBuilders = compactionAnalyzer.groupObjectWithLimits(streamDataBlocksMap, objectsToRemove); - Assertions.assertEquals(Set.of(OBJECT_2), objectsToRemove); - List expectedCompactedObject = List.of( - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_0, 0, 15, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_0, 15, 20, OBJECT_1, -1, -1, 1)), - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 25, 30, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 30, 60, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 60, 120, OBJECT_1, -1, -1, 1)), - new CompactedObjectBuilder() - .setType(CompactionType.COMPACT) - .addStreamDataBlock(new StreamDataBlock(STREAM_2, 30, 60, OBJECT_0, -1, -1, 1))); - for (int i = 0; i < compactedObjectBuilders.size(); i++) { - assertTrue(compare(compactedObjectBuilders.get(i), expectedCompactedObject.get(i))); - } - } - - @Test - public void testGroupObjectWithLimit4() { - CompactionAnalyzer compactionAnalyzer = new CompactionAnalyzer(CACHE_SIZE, 50, MAX_STREAM_NUM_IN_WAL, 1); - Set objectsToRemove = new HashSet<>(); - List compactedObjectBuilders = compactionAnalyzer.groupObjectWithLimits(generateStreamDataBlocks(), objectsToRemove); - Assertions.assertEquals(Set.of(OBJECT_2), objectsToRemove); - List expectedCompactedObject = List.of( - new CompactedObjectBuilder() - .setType(CompactionType.COMPACT) - .addStreamDataBlock(new StreamDataBlock(STREAM_0, 0, 20, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_0, 20, 25, OBJECT_1, -1, -1, 1)), - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 30, 60, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 60, 120, OBJECT_1, -1, -1, 1)), - new CompactedObjectBuilder() - .setType(CompactionType.COMPACT) - .addStreamDataBlock(new StreamDataBlock(STREAM_2, 30, 60, OBJECT_0, -1, -1, 1))); - for (int i = 0; i < compactedObjectBuilders.size(); i++) { - assertTrue(compare(compactedObjectBuilders.get(i), expectedCompactedObject.get(i))); - } - } - - @Test - public void testGroupObjectWithLimit5() { - CompactionAnalyzer compactionAnalyzer = new CompactionAnalyzer(CACHE_SIZE, 50, 1, MAX_STREAM_OBJECT_NUM); - Set objectsToRemove = new HashSet<>(); - List compactedObjectBuilders = compactionAnalyzer.groupObjectWithLimits(generateStreamDataBlocks(), objectsToRemove); - Assertions.assertEquals(Set.of(OBJECT_0, OBJECT_2), objectsToRemove); - Assertions.assertTrue(compactedObjectBuilders.isEmpty()); - } - - @Test - public void testGroupObjectWithLimit6() { - CompactionAnalyzer compactionAnalyzer = new CompactionAnalyzer(CACHE_SIZE, 29, MAX_STREAM_NUM_IN_WAL, 2); - Set objectsToRemove = new HashSet<>(); - List compactedObjectBuilders = compactionAnalyzer.groupObjectWithLimits(generateStreamDataBlocks(), objectsToRemove); - Assertions.assertEquals(Set.of(OBJECT_2), objectsToRemove); - List expectedCompactedObject = List.of( - new CompactedObjectBuilder() - .setType(CompactionType.COMPACT) - .addStreamDataBlock(new StreamDataBlock(STREAM_0, 0, 20, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_0, 20, 25, OBJECT_1, -1, -1, 1)), - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 30, 60, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 60, 120, OBJECT_1, -1, -1, 1)), - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_2, 30, 60, OBJECT_0, -1, -1, 1))); - for (int i = 0; i < compactedObjectBuilders.size(); i++) { - assertTrue(compare(compactedObjectBuilders.get(i), expectedCompactedObject.get(i))); - } - } - - @Test - public void testGroupObjectWithLimit7() { - CompactionAnalyzer compactionAnalyzer = new CompactionAnalyzer(CACHE_SIZE, 29, MAX_STREAM_NUM_IN_WAL, 1); - Set objectsToRemove = new HashSet<>(); - List compactedObjectBuilders = compactionAnalyzer.groupObjectWithLimits(generateStreamDataBlocks(), objectsToRemove); - Assertions.assertEquals(Set.of(OBJECT_0, OBJECT_2), objectsToRemove); - Assertions.assertTrue(compactedObjectBuilders.isEmpty()); - } - - @Test - public void testCompactionPlans1() { - CompactionAnalyzer compactionAnalyzer = new CompactionAnalyzer(CACHE_SIZE, 100, MAX_STREAM_NUM_IN_WAL, MAX_STREAM_OBJECT_NUM); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - Map> streamDataBlocksMap = CompactionUtils.blockWaitObjectIndices(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST, s3Operator); - List compactionPlans = compactionAnalyzer.analyze(streamDataBlocksMap, new HashSet<>()); - Assertions.assertEquals(1, compactionPlans.size()); - List expectCompactedObjects = List.of( - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 25, 30, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 30, 60, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 60, 120, OBJECT_1, -1, -1, 1)) - .build(), - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 400, 500, OBJECT_2, -1, -1, 1)) - .build(), - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_2, 30, 60, OBJECT_0, -1, -1, 1)) - .build(), - new CompactedObjectBuilder() - .setType(CompactionType.COMPACT) - .addStreamDataBlock(new StreamDataBlock(STREAM_0, 0, 15, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_0, 15, 20, OBJECT_1, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_2, 230, 270, OBJECT_2, -1, -1, 1)) - .build()); - Map> expectObjectStreamDataBlocks = Map.of( - OBJECT_0, List.of( - new StreamDataBlock(STREAM_0, 0, 15, OBJECT_0, -1, -1, 1), - new StreamDataBlock(STREAM_1, 25, 30, OBJECT_0, -1, -1, 1), - new StreamDataBlock(STREAM_1, 30, 60, OBJECT_0, -1, -1, 1), - new StreamDataBlock(STREAM_2, 30, 60, OBJECT_0, -1, -1, 1)), - OBJECT_1, List.of( - new StreamDataBlock(STREAM_0, 15, 20, OBJECT_1, -1, -1, 1), - new StreamDataBlock(STREAM_1, 60, 120, OBJECT_1, -1, -1, 1)), - OBJECT_2, List.of( - new StreamDataBlock(STREAM_1, 400, 500, OBJECT_2, -1, -1, 1), - new StreamDataBlock(STREAM_2, 230, 270, OBJECT_2, -1, -1, 1))); - CompactionPlan compactionPlan = compactionPlans.get(0); - for (int i = 0; i < compactionPlan.compactedObjects().size(); i++) { - assertTrue(compare(compactionPlan.compactedObjects().get(i), expectCompactedObjects.get(i))); - } - for (Long objectId : compactionPlan.streamDataBlocksMap().keySet()) { - assertTrue(compare(compactionPlan.streamDataBlocksMap().get(objectId), expectObjectStreamDataBlocks.get(objectId))); - } - } - - private void checkCompactionPlan2(List compactionPlans) { - Assertions.assertEquals(2, compactionPlans.size()); - - // first iteration - List expectCompactedObjects = List.of( - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 25, 30, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 30, 60, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 60, 120, OBJECT_1, -1, -1, 1)) - .build(), - new CompactedObjectBuilder() - .setType(CompactionType.COMPACT) - .addStreamDataBlock(new StreamDataBlock(STREAM_0, 0, 15, OBJECT_0, -1, -1, 1)) - .addStreamDataBlock(new StreamDataBlock(STREAM_0, 15, 20, OBJECT_1, -1, -1, 1)) - .build()); - Map> expectObjectStreamDataBlocks = Map.of( - OBJECT_0, List.of( - new StreamDataBlock(STREAM_0, 0, 15, OBJECT_0, -1, -1, 1), - new StreamDataBlock(STREAM_1, 25, 30, OBJECT_0, -1, -1, 1), - new StreamDataBlock(STREAM_1, 30, 60, OBJECT_0, -1, -1, 1)), - OBJECT_1, List.of( - new StreamDataBlock(STREAM_0, 15, 20, OBJECT_1, -1, -1, 1), - new StreamDataBlock(STREAM_1, 60, 120, OBJECT_1, -1, -1, 1))); - CompactionPlan compactionPlan = compactionPlans.get(0); - for (int i = 0; i < compactionPlan.compactedObjects().size(); i++) { - assertTrue(compare(compactionPlan.compactedObjects().get(i), expectCompactedObjects.get(i))); - } - for (Long objectId : compactionPlan.streamDataBlocksMap().keySet()) { - assertTrue(compare(compactionPlan.streamDataBlocksMap().get(objectId), expectObjectStreamDataBlocks.get(objectId))); - } - - // second iteration - expectCompactedObjects = List.of( - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_1, 400, 500, OBJECT_2, -1, -1, 1)) - .build(), - new CompactedObjectBuilder() - .setType(CompactionType.SPLIT) - .addStreamDataBlock(new StreamDataBlock(STREAM_2, 30, 60, OBJECT_0, -1, -1, 1)) - .build(), - new CompactedObjectBuilder() - .setType(CompactionType.COMPACT) - .addStreamDataBlock(new StreamDataBlock(STREAM_2, 230, 270, OBJECT_2, -1, -1, 1)) - .build()); - expectObjectStreamDataBlocks = Map.of( - OBJECT_0, List.of( - new StreamDataBlock(STREAM_2, 30, 60, OBJECT_0, -1, -1, 1)), - OBJECT_2, List.of( - new StreamDataBlock(STREAM_1, 400, 500, OBJECT_2, -1, -1, 1), - new StreamDataBlock(STREAM_2, 230, 270, OBJECT_2, -1, -1, 1))); - compactionPlan = compactionPlans.get(1); - for (int i = 0; i < compactionPlan.compactedObjects().size(); i++) { - assertTrue(compare(compactionPlan.compactedObjects().get(i), expectCompactedObjects.get(i))); - } - for (Long objectId : compactionPlan.streamDataBlocksMap().keySet()) { - assertTrue(compare(compactionPlan.streamDataBlocksMap().get(objectId), expectObjectStreamDataBlocks.get(objectId))); - } - } - - @Test - public void testCompactionPlans2() { - CompactionAnalyzer compactionAnalyzer = new CompactionAnalyzer(300, 100, MAX_STREAM_NUM_IN_WAL, MAX_STREAM_OBJECT_NUM); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - Map> streamDataBlocksMap = CompactionUtils.blockWaitObjectIndices(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST, s3Operator); - List compactionPlans = compactionAnalyzer.analyze(streamDataBlocksMap, new HashSet<>()); - checkCompactionPlan2(compactionPlans); - } - - @Test - public void testCompactionPlansWithInvalidObject() { - CompactionAnalyzer compactionAnalyzer = new CompactionAnalyzer(300, 100, MAX_STREAM_NUM_IN_WAL, MAX_STREAM_OBJECT_NUM); - List s3ObjectMetadata = new ArrayList<>(S3_WAL_OBJECT_METADATA_LIST); - s3ObjectMetadata.add( - new S3ObjectMetadata(100, S3ObjectType.STREAM_SET, - List.of(new StreamOffsetRange(STREAM_2, 1000, 1200)), System.currentTimeMillis(), - System.currentTimeMillis(), 512, 100)); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - Map> streamDataBlocksMap = CompactionUtils.blockWaitObjectIndices(streamMetadataList, s3ObjectMetadata, s3Operator); - List compactionPlans = compactionAnalyzer.analyze(streamDataBlocksMap, new HashSet<>()); - checkCompactionPlan2(compactionPlans); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/compact/CompactionManagerTest.java b/s3stream/src/test/java/com/automq/stream/s3/compact/CompactionManagerTest.java deleted file mode 100644 index 5879567c6..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/compact/CompactionManagerTest.java +++ /dev/null @@ -1,666 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact; - -import com.automq.stream.s3.Config; -import com.automq.stream.s3.DataBlockIndex; -import com.automq.stream.s3.ObjectWriter; -import com.automq.stream.s3.StreamDataBlock; -import com.automq.stream.s3.TestUtils; -import com.automq.stream.s3.compact.operator.DataBlockReader; -import com.automq.stream.s3.compact.utils.CompactionUtils; -import com.automq.stream.s3.memory.MemoryMetadataManager; -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metadata.S3ObjectType; -import com.automq.stream.s3.metadata.S3StreamConstant; -import com.automq.stream.s3.metadata.StreamMetadata; -import com.automq.stream.s3.metadata.StreamOffsetRange; -import com.automq.stream.s3.metadata.StreamState; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.network.ThrottleStrategy; -import com.automq.stream.s3.objects.CommitStreamSetObjectRequest; -import com.automq.stream.s3.objects.ObjectStreamRange; -import com.automq.stream.s3.objects.StreamObject; -import com.automq.stream.s3.operator.DefaultS3Operator; -import com.automq.stream.s3.operator.MemoryS3Operator; -import io.netty.buffer.ByteBuf; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import org.apache.commons.lang3.tuple.Pair; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import software.amazon.awssdk.core.async.AsyncRequestBody; -import software.amazon.awssdk.services.s3.S3AsyncClient; -import software.amazon.awssdk.services.s3.model.PutObjectRequest; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.when; - -@Timeout(60) -@Tag("S3Unit") -public class CompactionManagerTest extends CompactionTestBase { - private static final int BROKER0 = 0; - private CompactionAnalyzer compactionAnalyzer; - private CompactionManager compactionManager; - private Config config; - - private static Map> getStreamDataBlockMap() { - StreamDataBlock block1 = new StreamDataBlock(OBJECT_0, new DataBlockIndex(0, 0, 15, 15, 0, 15)); - StreamDataBlock block2 = new StreamDataBlock(OBJECT_0, new DataBlockIndex(1, 0, 20, 20, 15, 50)); - - StreamDataBlock block3 = new StreamDataBlock(OBJECT_1, new DataBlockIndex(0, 15, 12, 12, 0, 20)); - StreamDataBlock block4 = new StreamDataBlock(OBJECT_1, new DataBlockIndex(1, 20, 25, 25, 20, 60)); - - StreamDataBlock block5 = new StreamDataBlock(OBJECT_2, new DataBlockIndex(0, 27, 13, 20, 0, 20)); - StreamDataBlock block6 = new StreamDataBlock(OBJECT_2, new DataBlockIndex(3, 0, 30, 30, 20, 30)); - return Map.of( - OBJECT_0, List.of( - block1, - block2 - ), - OBJECT_1, List.of( - block3, - block4 - ), - OBJECT_2, List.of( - block5, - block6 - ) - ); - } - - @BeforeEach - public void setUp() throws Exception { - super.setUp(); - config = Mockito.mock(Config.class); - when(config.nodeId()).thenReturn(BROKER0); - when(config.streamSetObjectCompactionUploadConcurrency()).thenReturn(3); - when(config.objectPartSize()).thenReturn(100); - when(config.streamSetObjectCompactionCacheSize()).thenReturn(300L); - when(config.streamSetObjectCompactionStreamSplitSize()).thenReturn(100L); - when(config.streamSetObjectCompactionForceSplitPeriod()).thenReturn(120); - when(config.streamSetObjectCompactionMaxObjectNum()).thenReturn(100); - when(config.maxStreamNumPerStreamSetObject()).thenReturn(100); - when(config.maxStreamObjectNumPerCommit()).thenReturn(100); -// when(config.networkInboundBaselineBandwidth()).thenReturn(1000L); - compactionAnalyzer = new CompactionAnalyzer(config.streamSetObjectCompactionCacheSize(), config.streamSetObjectCompactionStreamSplitSize(), - config.maxStreamNumPerStreamSetObject(), config.maxStreamObjectNumPerCommit()); - } - - @AfterEach - public void tearDown() { - super.tearDown(); - if (compactionManager != null) { - compactionManager.shutdown(); - } - } - - @Test - public void testForceSplit() { - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - List s3ObjectMetadata = this.objectManager.getServerObjects().join(); - when(config.streamSetObjectCompactionForceSplitPeriod()).thenReturn(0); - compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - - CommitStreamSetObjectRequest request = compactionManager.buildSplitRequest(streamMetadataList, s3ObjectMetadata.get(0)); - Assertions.assertEquals(-1, request.getObjectId()); - Assertions.assertEquals(List.of(OBJECT_0), request.getCompactedObjectIds()); - Assertions.assertEquals(3, request.getStreamObjects().size()); - Assertions.assertTrue(checkDataIntegrity(streamMetadataList, Collections.singletonList(s3ObjectMetadata.get(0)), request)); - - request = compactionManager.buildSplitRequest(streamMetadataList, s3ObjectMetadata.get(1)); - Assertions.assertEquals(-1, request.getObjectId()); - Assertions.assertEquals(List.of(OBJECT_1), request.getCompactedObjectIds()); - Assertions.assertEquals(2, request.getStreamObjects().size()); - Assertions.assertTrue(checkDataIntegrity(streamMetadataList, Collections.singletonList(s3ObjectMetadata.get(1)), request)); - - request = compactionManager.buildSplitRequest(streamMetadataList, s3ObjectMetadata.get(2)); - Assertions.assertEquals(-1, request.getObjectId()); - Assertions.assertEquals(List.of(OBJECT_2), request.getCompactedObjectIds()); - Assertions.assertEquals(2, request.getStreamObjects().size()); - Assertions.assertTrue(checkDataIntegrity(streamMetadataList, Collections.singletonList(s3ObjectMetadata.get(2)), request)); - } - - @Test - public void testForceSplitWithOutDatedObject() { - when(streamManager.getStreams(Collections.emptyList())).thenReturn(CompletableFuture.completedFuture( - List.of(new StreamMetadata(STREAM_0, 0, 999, 9999, StreamState.OPENED), - new StreamMetadata(STREAM_1, 0, 999, 9999, StreamState.OPENED), - new StreamMetadata(STREAM_2, 0, 999, 9999, StreamState.OPENED)))); - - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - List s3ObjectMetadata = this.objectManager.getServerObjects().join(); - when(config.streamSetObjectCompactionForceSplitPeriod()).thenReturn(0); - compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - - CommitStreamSetObjectRequest request = compactionManager.buildSplitRequest(streamMetadataList, s3ObjectMetadata.get(0)); - Assertions.assertEquals(-1, request.getObjectId()); - Assertions.assertEquals(List.of(OBJECT_0), request.getCompactedObjectIds()); - Assertions.assertTrue(request.getStreamObjects().isEmpty()); - Assertions.assertTrue(request.getStreamRanges().isEmpty()); - - request = compactionManager.buildSplitRequest(streamMetadataList, s3ObjectMetadata.get(1)); - Assertions.assertEquals(-1, request.getObjectId()); - Assertions.assertEquals(List.of(OBJECT_1), request.getCompactedObjectIds()); - Assertions.assertTrue(request.getStreamObjects().isEmpty()); - Assertions.assertTrue(request.getStreamRanges().isEmpty()); - - request = compactionManager.buildSplitRequest(streamMetadataList, s3ObjectMetadata.get(2)); - Assertions.assertEquals(-1, request.getObjectId()); - Assertions.assertEquals(List.of(OBJECT_2), request.getCompactedObjectIds()); - Assertions.assertTrue(request.getStreamObjects().isEmpty()); - Assertions.assertTrue(request.getStreamRanges().isEmpty()); - } - - @Test - public void testForceSplitWithException() { - S3AsyncClient s3AsyncClient = Mockito.mock(S3AsyncClient.class); - doAnswer(invocation -> CompletableFuture.completedFuture(null)).when(s3AsyncClient).putObject(any(PutObjectRequest.class), any(AsyncRequestBody.class)); - - Map> streamDataBlockMap = getStreamDataBlockMap(); - S3ObjectMetadata objectMetadata = new S3ObjectMetadata(OBJECT_0, 0, S3ObjectType.STREAM_SET); - DefaultS3Operator s3Operator = Mockito.spy(new DefaultS3Operator(s3AsyncClient, "")); - doReturn(CompletableFuture.failedFuture(new IllegalArgumentException("exception"))).when(s3Operator).rangeRead(eq(objectMetadata.key()), anyLong(), anyLong(), any()); - - CompactionManager compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - Assertions.assertThrowsExactly(CompletionException.class, () -> compactionManager.groupAndSplitStreamDataBlocks(objectMetadata, streamDataBlockMap.get(OBJECT_0))); - } - - @Test - public void testForceSplitWithLimit() { - when(config.streamSetObjectCompactionCacheSize()).thenReturn(5L); - List s3ObjectMetadata = this.objectManager.getServerObjects().join(); - when(config.streamSetObjectCompactionForceSplitPeriod()).thenReturn(0); - compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - CommitStreamSetObjectRequest request = compactionManager.buildSplitRequest(streamMetadataList, s3ObjectMetadata.get(0)); - Assertions.assertNull(request); - } - - @Test - public void testForceSplitWithLimit2() { - when(config.streamSetObjectCompactionCacheSize()).thenReturn(150L); - List s3ObjectMetadata = this.objectManager.getServerObjects().join(); - when(config.streamSetObjectCompactionForceSplitPeriod()).thenReturn(0); - compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - CommitStreamSetObjectRequest request = compactionManager.buildSplitRequest(streamMetadataList, s3ObjectMetadata.get(0)); - Assertions.assertEquals(-1, request.getObjectId()); - Assertions.assertEquals(List.of(OBJECT_0), request.getCompactedObjectIds()); - Assertions.assertEquals(3, request.getStreamObjects().size()); - Assertions.assertTrue(checkDataIntegrity(streamMetadataList, Collections.singletonList(s3ObjectMetadata.get(0)), request)); - } - - @Test - public void testCompact() { - List s3ObjectMetadata = this.objectManager.getServerObjects().join(); - compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - CommitStreamSetObjectRequest request = compactionManager.buildCompactRequest(streamMetadataList, s3ObjectMetadata); - - assertEquals(List.of(OBJECT_0, OBJECT_1, OBJECT_2), request.getCompactedObjectIds()); - assertEquals(OBJECT_0, request.getOrderId()); - assertTrue(request.getObjectId() > OBJECT_2); - request.getStreamObjects().forEach(s -> assertTrue(s.getObjectId() > OBJECT_2)); - assertEquals(3, request.getStreamObjects().size()); - assertEquals(2, request.getStreamRanges().size()); - - Assertions.assertTrue(checkDataIntegrity(streamMetadataList, s3ObjectMetadata, request)); - } - - @Test - public void testCompactSingleObject() { - List s3ObjectMetadataList = new ArrayList<>(); - objectManager.prepareObject(1, TimeUnit.MINUTES.toMillis(30)).thenAccept(objectId -> { - assertEquals(OBJECT_3, objectId); - ObjectWriter objectWriter = ObjectWriter.writer(OBJECT_3, s3Operator, 1024, 1024); - StreamRecordBatch r1 = new StreamRecordBatch(STREAM_1, 0, 500, 20, TestUtils.random(20)); - StreamRecordBatch r2 = new StreamRecordBatch(STREAM_3, 0, 0, 10, TestUtils.random(1024)); - StreamRecordBatch r3 = new StreamRecordBatch(STREAM_3, 0, 10, 10, TestUtils.random(1024)); - objectWriter.write(STREAM_1, List.of(r1)); - objectWriter.write(STREAM_3, List.of(r2, r3)); - objectWriter.close().join(); - List streamsIndices = List.of( - new StreamOffsetRange(STREAM_1, 500, 520), - new StreamOffsetRange(STREAM_3, 0, 20) - ); - S3ObjectMetadata objectMetadata = new S3ObjectMetadata(OBJECT_3, S3ObjectType.STREAM_SET, streamsIndices, System.currentTimeMillis(), - System.currentTimeMillis(), objectWriter.size(), OBJECT_3); - s3ObjectMetadataList.add(objectMetadata); - List.of(r1, r2, r3).forEach(StreamRecordBatch::release); - }).join(); - when(streamManager.getStreams(Collections.emptyList())).thenReturn(CompletableFuture.completedFuture( - List.of(new StreamMetadata(STREAM_0, 0, 1024, 2048, StreamState.OPENED), - new StreamMetadata(STREAM_3, 0, 1024, 2048, StreamState.OPENED)))); - compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - CommitStreamSetObjectRequest request = compactionManager.buildCompactRequest(streamMetadataList, s3ObjectMetadataList); - assertEquals(-1L, request.getObjectId()); - assertEquals(List.of(OBJECT_3), request.getCompactedObjectIds()); - assertTrue(request.getStreamObjects().isEmpty()); - assertTrue(request.getStreamRanges().isEmpty()); - - Assertions.assertTrue(checkDataIntegrity(streamMetadataList, s3ObjectMetadataList, request)); - } - - @Test - public void testCompactWithDataTrimmed() { - when(streamManager.getStreams(Collections.emptyList())).thenReturn(CompletableFuture.completedFuture( - List.of(new StreamMetadata(STREAM_0, 0, 5, 20, StreamState.OPENED), - new StreamMetadata(STREAM_1, 0, 25, 500, StreamState.OPENED), - new StreamMetadata(STREAM_2, 0, 30, 270, StreamState.OPENED)))); - compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - CommitStreamSetObjectRequest request = compactionManager.buildCompactRequest(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST); - - assertEquals(List.of(OBJECT_0, OBJECT_1, OBJECT_2), request.getCompactedObjectIds()); - assertEquals(OBJECT_0, request.getOrderId()); - assertTrue(request.getObjectId() > OBJECT_2); - request.getStreamObjects().forEach(s -> assertTrue(s.getObjectId() > OBJECT_2)); - assertEquals(3, request.getStreamObjects().size()); - assertEquals(2, request.getStreamRanges().size()); - - Assertions.assertTrue(checkDataIntegrity(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST, request)); - } - - @Test - public void testCompactWithDataTrimmed2() { - when(streamManager.getStreams(Collections.emptyList())).thenReturn(CompletableFuture.completedFuture( - List.of(new StreamMetadata(STREAM_0, 0, 15, 20, StreamState.OPENED), - new StreamMetadata(STREAM_1, 0, 25, 500, StreamState.OPENED), - new StreamMetadata(STREAM_2, 0, 30, 270, StreamState.OPENED)))); - compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - CommitStreamSetObjectRequest request = compactionManager.buildCompactRequest(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST); - - assertEquals(List.of(OBJECT_0, OBJECT_1, OBJECT_2), request.getCompactedObjectIds()); - assertEquals(OBJECT_0, request.getOrderId()); - assertTrue(request.getObjectId() > OBJECT_2); - request.getStreamObjects().forEach(s -> assertTrue(s.getObjectId() > OBJECT_2)); - assertEquals(3, request.getStreamObjects().size()); - assertEquals(2, request.getStreamRanges().size()); - - Assertions.assertTrue(checkDataIntegrity(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST, request)); - } - - @Test - public void testCompactionWithDataTrimmed3() { - objectManager.prepareObject(1, TimeUnit.MINUTES.toMillis(30)).thenAccept(objectId -> { - assertEquals(OBJECT_3, objectId); - ObjectWriter objectWriter = ObjectWriter.writer(OBJECT_3, s3Operator, 1024, 1024); - StreamRecordBatch r1 = new StreamRecordBatch(STREAM_1, 0, 500, 20, TestUtils.random(20)); - StreamRecordBatch r2 = new StreamRecordBatch(STREAM_3, 0, 0, 10, TestUtils.random(1024)); - StreamRecordBatch r3 = new StreamRecordBatch(STREAM_3, 0, 10, 10, TestUtils.random(1024)); - objectWriter.write(STREAM_1, List.of(r1)); - objectWriter.write(STREAM_3, List.of(r2, r3)); - objectWriter.close().join(); - List streamsIndices = List.of( - new StreamOffsetRange(STREAM_1, 500, 520), - new StreamOffsetRange(STREAM_3, 0, 20) - ); - S3ObjectMetadata objectMetadata = new S3ObjectMetadata(OBJECT_3, S3ObjectType.STREAM_SET, streamsIndices, System.currentTimeMillis(), - System.currentTimeMillis(), objectWriter.size(), OBJECT_3); - S3_WAL_OBJECT_METADATA_LIST.add(objectMetadata); - List.of(r1, r2, r3).forEach(StreamRecordBatch::release); - }).join(); - when(streamManager.getStreams(Collections.emptyList())).thenReturn(CompletableFuture.completedFuture( - List.of(new StreamMetadata(STREAM_0, 0, 0, 20, StreamState.OPENED), - new StreamMetadata(STREAM_1, 0, 25, 500, StreamState.OPENED), - new StreamMetadata(STREAM_2, 0, 30, 270, StreamState.OPENED), - new StreamMetadata(STREAM_3, 0, 10, 20, StreamState.OPENED)))); - compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - CommitStreamSetObjectRequest request = compactionManager.buildCompactRequest(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST); - assertNull(request); - } - - @Test - public void testCompactionWithDataTrimmed4() { - objectManager.prepareObject(1, TimeUnit.MINUTES.toMillis(30)).thenAccept(objectId -> { - assertEquals(OBJECT_3, objectId); - ObjectWriter objectWriter = ObjectWriter.writer(OBJECT_3, s3Operator, 200, 1024); - StreamRecordBatch r1 = new StreamRecordBatch(STREAM_1, 0, 500, 20, TestUtils.random(20)); - StreamRecordBatch r2 = new StreamRecordBatch(STREAM_3, 0, 0, 10, TestUtils.random(200)); - StreamRecordBatch r3 = new StreamRecordBatch(STREAM_3, 0, 10, 10, TestUtils.random(200)); - objectWriter.write(STREAM_1, List.of(r1)); - objectWriter.write(STREAM_3, List.of(r2, r3)); - objectWriter.close().join(); - List streamsIndices = List.of( - new StreamOffsetRange(STREAM_1, 500, 520), - new StreamOffsetRange(STREAM_3, 0, 20) - ); - S3ObjectMetadata objectMetadata = new S3ObjectMetadata(OBJECT_3, S3ObjectType.STREAM_SET, streamsIndices, System.currentTimeMillis(), - System.currentTimeMillis(), objectWriter.size(), OBJECT_3); - S3_WAL_OBJECT_METADATA_LIST.add(objectMetadata); - List.of(r1, r2, r3).forEach(StreamRecordBatch::release); - }).join(); - when(streamManager.getStreams(Collections.emptyList())).thenReturn(CompletableFuture.completedFuture( - List.of(new StreamMetadata(STREAM_0, 0, 0, 20, StreamState.OPENED), - new StreamMetadata(STREAM_1, 0, 25, 500, StreamState.OPENED), - new StreamMetadata(STREAM_2, 0, 30, 270, StreamState.OPENED), - new StreamMetadata(STREAM_3, 0, 10, 20, StreamState.OPENED)))); - compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - CommitStreamSetObjectRequest request = compactionManager.buildCompactRequest(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST); - - assertEquals(List.of(OBJECT_0, OBJECT_1, OBJECT_2, OBJECT_3), request.getCompactedObjectIds()); - assertEquals(OBJECT_0, request.getOrderId()); - assertTrue(request.getObjectId() > OBJECT_3); - request.getStreamObjects().forEach(s -> assertTrue(s.getObjectId() > OBJECT_3)); - assertEquals(4, request.getStreamObjects().size()); - assertEquals(2, request.getStreamRanges().size()); - - Assertions.assertTrue(checkDataIntegrity(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST, request)); - } - - @Test - public void testCompactWithOutdatedObject() { - when(streamManager.getStreams(Collections.emptyList())).thenReturn(CompletableFuture.completedFuture( - List.of(new StreamMetadata(STREAM_0, 0, 15, 20, StreamState.OPENED), - new StreamMetadata(STREAM_1, 0, 60, 500, StreamState.OPENED), - new StreamMetadata(STREAM_2, 0, 60, 270, StreamState.OPENED)))); - compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - CommitStreamSetObjectRequest request = compactionManager.buildCompactRequest(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST); - - assertEquals(List.of(OBJECT_0, OBJECT_1, OBJECT_2), request.getCompactedObjectIds()); - assertEquals(OBJECT_0, request.getOrderId()); - assertTrue(request.getObjectId() > OBJECT_2); - request.getStreamObjects().forEach(s -> assertTrue(s.getObjectId() > OBJECT_2)); - assertEquals(2, request.getStreamObjects().size()); - assertEquals(2, request.getStreamRanges().size()); - - Assertions.assertTrue(checkDataIntegrity(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST, request)); - } - - @Test - public void testCompactWithNonExistStream() { - when(streamManager.getStreams(Collections.emptyList())).thenReturn(CompletableFuture.completedFuture( - List.of(new StreamMetadata(STREAM_1, 0, 25, 500, StreamState.OPENED), - new StreamMetadata(STREAM_2, 0, 30, 270, StreamState.OPENED)))); - compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - CommitStreamSetObjectRequest request = compactionManager.buildCompactRequest(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST); - - Set streamIds = request.getStreamObjects().stream().map(StreamObject::getStreamId).collect(Collectors.toSet()); - streamIds.addAll(request.getStreamRanges().stream().map(ObjectStreamRange::getStreamId).collect(Collectors.toSet())); - assertEquals(Set.of(STREAM_1, STREAM_2), streamIds); - assertEquals(List.of(OBJECT_0, OBJECT_1, OBJECT_2), request.getCompactedObjectIds()); - assertEquals(OBJECT_0, request.getOrderId()); - assertTrue(request.getObjectId() > OBJECT_2); - request.getStreamObjects().forEach(s -> assertTrue(s.getObjectId() > OBJECT_2)); - assertEquals(3, request.getStreamObjects().size()); - assertEquals(1, request.getStreamRanges().size()); - - Assertions.assertTrue(checkDataIntegrity(streamMetadataList, S3_WAL_OBJECT_METADATA_LIST, request)); - } - - @Test - public void testCompactNoneExistObjects() { - when(config.streamSetObjectCompactionStreamSplitSize()).thenReturn(100L); - when(config.streamSetObjectCompactionCacheSize()).thenReturn(9999L); - Map> streamDataBlockMap = getStreamDataBlockMap(); - S3ObjectMetadata objectMetadata0 = new S3ObjectMetadata(OBJECT_0, 0, S3ObjectType.STREAM_SET); - S3ObjectMetadata objectMetadata1 = new S3ObjectMetadata(OBJECT_1, 0, S3ObjectType.STREAM_SET); - S3ObjectMetadata objectMetadata2 = new S3ObjectMetadata(OBJECT_2, 0, S3ObjectType.STREAM_SET); - List s3ObjectMetadata = List.of(objectMetadata0, objectMetadata1, objectMetadata2); - this.compactionAnalyzer = new CompactionAnalyzer(config.streamSetObjectCompactionCacheSize(), config.streamSetObjectCompactionStreamSplitSize(), - config.maxStreamNumPerStreamSetObject(), config.maxStreamObjectNumPerCommit()); - List compactionPlans = this.compactionAnalyzer.analyze(streamDataBlockMap, new HashSet<>()); - CommitStreamSetObjectRequest request = new CommitStreamSetObjectRequest(); - - S3AsyncClient s3AsyncClient = Mockito.mock(S3AsyncClient.class); - doAnswer(invocation -> CompletableFuture.completedFuture(null)).when(s3AsyncClient).putObject(any(PutObjectRequest.class), any(AsyncRequestBody.class)); - - DefaultS3Operator s3Operator = Mockito.spy(new DefaultS3Operator(s3AsyncClient, "")); - doAnswer(invocation -> CompletableFuture.completedFuture(TestUtils.randomPooled(65))).when(s3Operator).rangeRead(eq(objectMetadata0.key()), anyLong(), anyLong(), any()); - doAnswer(invocation -> CompletableFuture.completedFuture(TestUtils.randomPooled(80))).when(s3Operator).rangeRead(eq(objectMetadata1.key()), anyLong(), anyLong(), any()); - doAnswer(invocation -> CompletableFuture.failedFuture(new IllegalArgumentException("exception"))).when(s3Operator).rangeRead(eq(objectMetadata2.key()), anyLong(), anyLong(), any()); - - CompactionManager compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - Assertions.assertThrowsExactly(CompletionException.class, - () -> compactionManager.executeCompactionPlans(request, compactionPlans, s3ObjectMetadata)); - for (CompactionPlan plan : compactionPlans) { - plan.streamDataBlocksMap().forEach((streamId, blocks) -> blocks.forEach(block -> { - if (block.getObjectId() != OBJECT_2) { - block.getDataCf().thenAccept(data -> { - Assertions.assertEquals(0, data.refCnt()); - }).join(); - } - })); - } - } - - @Test - public void testCompactNoneExistObjects2() { - when(config.streamSetObjectCompactionStreamSplitSize()).thenReturn(100L); - when(config.streamSetObjectCompactionCacheSize()).thenReturn(9999L); - Map> streamDataBlockMap = getStreamDataBlockMap(); - S3ObjectMetadata objectMetadata0 = new S3ObjectMetadata(OBJECT_0, 0, S3ObjectType.STREAM_SET); - S3ObjectMetadata objectMetadata1 = new S3ObjectMetadata(OBJECT_1, 0, S3ObjectType.STREAM_SET); - S3ObjectMetadata objectMetadata2 = new S3ObjectMetadata(OBJECT_2, 0, S3ObjectType.STREAM_SET); - List s3ObjectMetadata = List.of(objectMetadata0, objectMetadata1, objectMetadata2); - this.compactionAnalyzer = new CompactionAnalyzer(config.streamSetObjectCompactionCacheSize(), config.streamSetObjectCompactionStreamSplitSize(), - config.maxStreamNumPerStreamSetObject(), config.maxStreamObjectNumPerCommit()); - List compactionPlans = this.compactionAnalyzer.analyze(streamDataBlockMap, new HashSet<>()); - CommitStreamSetObjectRequest request = new CommitStreamSetObjectRequest(); - - S3AsyncClient s3AsyncClient = Mockito.mock(S3AsyncClient.class); - doAnswer(invocation -> CompletableFuture.completedFuture(null)).when(s3AsyncClient).putObject(any(PutObjectRequest.class), any(AsyncRequestBody.class)); - - DefaultS3Operator s3Operator = Mockito.spy(new DefaultS3Operator(s3AsyncClient, "")); - doAnswer(invocation -> CompletableFuture.completedFuture(TestUtils.randomPooled(65))).when(s3Operator).rangeRead(eq(objectMetadata0.key()), anyLong(), anyLong(), any()); - doAnswer(invocation -> CompletableFuture.failedFuture(new IllegalArgumentException("exception"))).when(s3Operator).rangeRead(eq(objectMetadata1.key()), anyLong(), anyLong(), any()); - doAnswer(invocation -> CompletableFuture.completedFuture(TestUtils.randomPooled(50))).when(s3Operator).rangeRead(eq(objectMetadata2.key()), anyLong(), anyLong(), any()); - - CompactionManager compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - Assertions.assertThrowsExactly(CompletionException.class, - () -> compactionManager.executeCompactionPlans(request, compactionPlans, s3ObjectMetadata)); - for (CompactionPlan plan : compactionPlans) { - plan.streamDataBlocksMap().forEach((streamId, blocks) -> blocks.forEach(block -> { - if (block.getObjectId() != OBJECT_1) { - block.getDataCf().thenAccept(data -> { - Assertions.assertEquals(0, data.refCnt()); - }).join(); - } - })); - } - } - - @Test - public void testCompactWithLimit() { - when(config.streamSetObjectCompactionStreamSplitSize()).thenReturn(70L); - when(config.maxStreamNumPerStreamSetObject()).thenReturn(MAX_STREAM_NUM_IN_WAL); - when(config.maxStreamObjectNumPerCommit()).thenReturn(4); - List s3ObjectMetadata = this.objectManager.getServerObjects().join(); - compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - List streamMetadataList = this.streamManager.getStreams(Collections.emptyList()).join(); - CommitStreamSetObjectRequest request = compactionManager.buildCompactRequest(streamMetadataList, s3ObjectMetadata); - - assertEquals(List.of(OBJECT_0, OBJECT_1), request.getCompactedObjectIds()); - assertEquals(OBJECT_0, request.getOrderId()); - assertTrue(request.getObjectId() > OBJECT_2); - request.getStreamObjects().forEach(s -> assertTrue(s.getObjectId() > OBJECT_2)); - assertEquals(2, request.getStreamObjects().size()); - assertEquals(1, request.getStreamRanges().size()); - - Set compactedObjectIds = new HashSet<>(request.getCompactedObjectIds()); - s3ObjectMetadata = s3ObjectMetadata.stream().filter(s -> compactedObjectIds.contains(s.objectId())).collect(Collectors.toList()); - Assertions.assertTrue(checkDataIntegrity(streamMetadataList, s3ObjectMetadata, request)); - } - - @Test - public void testCompactionShutdown() throws Throwable { - streamManager = Mockito.mock(MemoryMetadataManager.class); - when(streamManager.getStreams(Mockito.anyList())).thenReturn(CompletableFuture.completedFuture( - List.of(new StreamMetadata(STREAM_0, 0, 0, 200, StreamState.OPENED)))); - - objectManager = Mockito.spy(MemoryMetadataManager.class); - s3Operator = Mockito.spy(MemoryS3Operator.class); - List>> invocations = new ArrayList<>(); - when(s3Operator.rangeRead(Mockito.anyString(), Mockito.anyLong(), Mockito.anyLong(), Mockito.eq(ThrottleStrategy.THROTTLE_2))) - .thenAnswer(invocation -> { - CompletableFuture cf = new CompletableFuture<>(); - invocations.add(Pair.of(invocation, cf)); - return cf; - }); - - List s3ObjectMetadataList = new ArrayList<>(); - // stream data for object 0 - objectManager.prepareObject(1, TimeUnit.MINUTES.toMillis(30)).thenAccept(objectId -> { - assertEquals(OBJECT_0, objectId); - ObjectWriter objectWriter = ObjectWriter.writer(objectId, s3Operator, 1024, 1024); - StreamRecordBatch r1 = new StreamRecordBatch(STREAM_0, 0, 0, 80, TestUtils.random(80)); - objectWriter.write(STREAM_0, List.of(r1)); - objectWriter.close().join(); - List streamsIndices = List.of( - new StreamOffsetRange(STREAM_0, 0, 80) - ); - S3ObjectMetadata objectMetadata = new S3ObjectMetadata(OBJECT_0, S3ObjectType.STREAM_SET, streamsIndices, System.currentTimeMillis(), - System.currentTimeMillis(), objectWriter.size(), OBJECT_0); - s3ObjectMetadataList.add(objectMetadata); - r1.release(); - }).join(); - - // stream data for object 1 - objectManager.prepareObject(1, TimeUnit.MINUTES.toMillis(30)).thenAccept(objectId -> { - assertEquals(OBJECT_1, objectId); - ObjectWriter objectWriter = ObjectWriter.writer(OBJECT_1, s3Operator, 1024, 1024); - StreamRecordBatch r2 = new StreamRecordBatch(STREAM_0, 0, 80, 120, TestUtils.random(120)); - objectWriter.write(STREAM_0, List.of(r2)); - objectWriter.close().join(); - List streamsIndices = List.of( - new StreamOffsetRange(STREAM_0, 80, 120) - ); - S3ObjectMetadata objectMetadata = new S3ObjectMetadata(OBJECT_1, S3ObjectType.STREAM_SET, streamsIndices, System.currentTimeMillis(), - System.currentTimeMillis(), objectWriter.size(), OBJECT_1); - s3ObjectMetadataList.add(objectMetadata); - r2.release(); - }).join(); - - doReturn(CompletableFuture.completedFuture(s3ObjectMetadataList)).when(objectManager).getServerObjects(); - - compactionManager = new CompactionManager(config, objectManager, streamManager, s3Operator); - - CompletableFuture cf = compactionManager.compact(); - Thread.sleep(2000); - compactionManager.shutdown(); - for (Pair> pair : invocations) { - CompletableFuture realCf = (CompletableFuture) pair.getLeft().callRealMethod(); - pair.getRight().complete(realCf.get()); - } - try { - cf.join(); - } catch (Exception e) { - fail("Should not throw exception"); - } - } - - private boolean checkDataIntegrity(List streamMetadataList, List s3ObjectMetadata, - CommitStreamSetObjectRequest request) { - Map s3WALObjectMetadataMap = s3ObjectMetadata.stream() - .collect(Collectors.toMap(S3ObjectMetadata::objectId, e -> e)); - Map> streamDataBlocks = CompactionUtils.blockWaitObjectIndices(streamMetadataList, s3ObjectMetadata, s3Operator); - for (Map.Entry> entry : streamDataBlocks.entrySet()) { - long objectId = entry.getKey(); - DataBlockReader reader = new DataBlockReader(new S3ObjectMetadata(objectId, - s3WALObjectMetadataMap.get(objectId).objectSize(), S3ObjectType.STREAM_SET), s3Operator); - reader.readBlocks(entry.getValue()); - } - - Map compactedObjectMap = new HashMap<>(); - for (StreamObject streamObject : request.getStreamObjects()) { - S3ObjectMetadata objectMetadata = new S3ObjectMetadata(streamObject.getObjectId(), S3ObjectType.STREAM, - List.of(new StreamOffsetRange(streamObject.getStreamId(), streamObject.getStartOffset(), streamObject.getEndOffset())), - System.currentTimeMillis(), System.currentTimeMillis(), streamObject.getObjectSize(), S3StreamConstant.INVALID_ORDER_ID); - compactedObjectMap.put(streamObject.getObjectId(), objectMetadata); - } - List streamOffsetRanges = new ArrayList<>(); - for (ObjectStreamRange objectStreamRange : request.getStreamRanges()) { - streamOffsetRanges.add(new StreamOffsetRange(objectStreamRange.getStreamId(), - objectStreamRange.getStartOffset(), objectStreamRange.getEndOffset())); - } - if (request.getObjectId() != -1) { - S3ObjectMetadata metadata = new S3ObjectMetadata(request.getObjectId(), S3ObjectType.STREAM_SET, - streamOffsetRanges, System.currentTimeMillis(), System.currentTimeMillis(), request.getObjectSize(), request.getOrderId()); - compactedObjectMap.put(request.getObjectId(), metadata); - } - - Map> compactedStreamDataBlocksMap = CompactionUtils.blockWaitObjectIndices(streamMetadataList, new ArrayList<>(compactedObjectMap.values()), s3Operator); - for (Map.Entry> entry : compactedStreamDataBlocksMap.entrySet()) { - long objectId = entry.getKey(); - DataBlockReader reader = new DataBlockReader(new S3ObjectMetadata(objectId, - compactedObjectMap.get(objectId).objectSize(), S3ObjectType.STREAM_SET), s3Operator); - reader.readBlocks(entry.getValue()); - } - List expectedStreamDataBlocks = CompactionUtils.sortStreamRangePositions(streamDataBlocks); - List compactedStreamDataBlocks = CompactionUtils.sortStreamRangePositions(compactedStreamDataBlocksMap); - - int i = 0; - for (StreamDataBlock compactedStreamDataBlock : compactedStreamDataBlocks) { - long currStreamId = compactedStreamDataBlock.getStreamId(); - long startOffset = compactedStreamDataBlock.getStartOffset(); - if (i == expectedStreamDataBlocks.size()) { - return false; - } - List groupedStreamDataBlocks = new ArrayList<>(); - for (; i < expectedStreamDataBlocks.size(); i++) { - StreamDataBlock expectedBlock = expectedStreamDataBlocks.get(i); - - if (startOffset == compactedStreamDataBlock.getEndOffset()) { - break; - } - if (currStreamId != expectedBlock.getStreamId()) { - return false; - } - if (startOffset != expectedBlock.getStartOffset()) { - return false; - } - if (expectedBlock.getEndOffset() > compactedStreamDataBlock.getEndOffset()) { - return false; - } - startOffset = expectedBlock.getEndOffset(); - groupedStreamDataBlocks.add(expectedBlock); - } - List compactedGroupedStreamDataBlocks = mergeStreamDataBlocksForGroup(List.of(groupedStreamDataBlocks)); - if (!compare(compactedStreamDataBlock, compactedGroupedStreamDataBlocks.get(0))) { - return false; - } - } - return true; - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/compact/CompactionTestBase.java b/s3stream/src/test/java/com/automq/stream/s3/compact/CompactionTestBase.java deleted file mode 100644 index 0bbea6925..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/compact/CompactionTestBase.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact; - -import com.automq.stream.s3.ByteBufAlloc; -import com.automq.stream.s3.DataBlockIndex; -import com.automq.stream.s3.ObjectWriter; -import com.automq.stream.s3.StreamDataBlock; -import com.automq.stream.s3.TestUtils; -import com.automq.stream.s3.compact.objects.CompactedObject; -import com.automq.stream.s3.compact.objects.CompactedObjectBuilder; -import com.automq.stream.s3.memory.MemoryMetadataManager; -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metadata.S3ObjectType; -import com.automq.stream.s3.metadata.StreamMetadata; -import com.automq.stream.s3.metadata.StreamOffsetRange; -import com.automq.stream.s3.metadata.StreamState; -import com.automq.stream.s3.model.StreamRecordBatch; -import com.automq.stream.s3.operator.MemoryS3Operator; -import com.automq.stream.s3.operator.S3Operator; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; -import org.mockito.Mockito; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.when; - -public class CompactionTestBase { - protected static final int BROKER_0 = 0; - protected static final long STREAM_0 = 0; - protected static final long STREAM_1 = 1; - protected static final long STREAM_2 = 2; - protected static final long STREAM_3 = 3; - protected static final long OBJECT_0 = 0; - protected static final long OBJECT_1 = 1; - protected static final long OBJECT_2 = 2; - protected static final long OBJECT_3 = 3; - protected static final long CACHE_SIZE = 1024; - protected static final double EXECUTION_SCORE_THRESHOLD = 0.5; - protected static final long STREAM_SPLIT_SIZE = 30; - protected static final int MAX_STREAM_NUM_IN_WAL = 100; - protected static final int MAX_STREAM_OBJECT_NUM = 100; - protected static final List S3_WAL_OBJECT_METADATA_LIST = new ArrayList<>(); - protected MemoryMetadataManager streamManager; - protected MemoryMetadataManager objectManager; - protected S3Operator s3Operator; - - public void setUp() throws Exception { - streamManager = Mockito.mock(MemoryMetadataManager.class); - when(streamManager.getStreams(Mockito.anyList())).thenReturn(CompletableFuture.completedFuture( - List.of(new StreamMetadata(STREAM_0, 0, 0, 20, StreamState.OPENED), - new StreamMetadata(STREAM_1, 0, 25, 500, StreamState.OPENED), - new StreamMetadata(STREAM_2, 0, 30, 270, StreamState.OPENED)))); - - objectManager = Mockito.spy(MemoryMetadataManager.class); - s3Operator = new MemoryS3Operator(); - // stream data for object 0 - objectManager.prepareObject(1, TimeUnit.MINUTES.toMillis(30)).thenAccept(objectId -> { - assertEquals(OBJECT_0, objectId); - ObjectWriter objectWriter = ObjectWriter.writer(objectId, s3Operator, 1024, 1024); - StreamRecordBatch r1 = new StreamRecordBatch(STREAM_0, 0, 0, 15, TestUtils.random(2)); - StreamRecordBatch r2 = new StreamRecordBatch(STREAM_1, 0, 25, 5, TestUtils.random(2)); - StreamRecordBatch r3 = new StreamRecordBatch(STREAM_1, 0, 30, 30, TestUtils.random(22)); - StreamRecordBatch r4 = new StreamRecordBatch(STREAM_2, 0, 30, 30, TestUtils.random(22)); - objectWriter.write(STREAM_0, List.of(r1)); - objectWriter.write(STREAM_1, List.of(r2)); - objectWriter.write(STREAM_1, List.of(r3)); - objectWriter.write(STREAM_2, List.of(r4)); - objectWriter.close().join(); - List streamsIndices = List.of( - new StreamOffsetRange(STREAM_0, 0, 15), - new StreamOffsetRange(STREAM_1, 25, 30), - new StreamOffsetRange(STREAM_1, 30, 60), - new StreamOffsetRange(STREAM_2, 30, 60) - ); - S3ObjectMetadata objectMetadata = new S3ObjectMetadata(OBJECT_0, S3ObjectType.STREAM_SET, streamsIndices, System.currentTimeMillis(), - System.currentTimeMillis(), objectWriter.size(), OBJECT_0); - S3_WAL_OBJECT_METADATA_LIST.add(objectMetadata); - List.of(r1, r2, r3, r4).forEach(StreamRecordBatch::release); - }).join(); - - // stream data for object 1 - objectManager.prepareObject(1, TimeUnit.MINUTES.toMillis(30)).thenAccept(objectId -> { - assertEquals(OBJECT_1, objectId); - ObjectWriter objectWriter = ObjectWriter.writer(OBJECT_1, s3Operator, 1024, 1024); - StreamRecordBatch r5 = new StreamRecordBatch(STREAM_0, 0, 15, 5, TestUtils.random(1)); - StreamRecordBatch r6 = new StreamRecordBatch(STREAM_1, 0, 60, 60, TestUtils.random(52)); - objectWriter.write(STREAM_0, List.of(r5)); - objectWriter.write(STREAM_1, List.of(r6)); - objectWriter.close().join(); - List streamsIndices = List.of( - new StreamOffsetRange(STREAM_0, 15, 20), - new StreamOffsetRange(STREAM_1, 60, 120) - ); - S3ObjectMetadata objectMetadata = new S3ObjectMetadata(OBJECT_1, S3ObjectType.STREAM_SET, streamsIndices, System.currentTimeMillis(), - System.currentTimeMillis(), objectWriter.size(), OBJECT_1); - S3_WAL_OBJECT_METADATA_LIST.add(objectMetadata); - List.of(r5, r6).forEach(StreamRecordBatch::release); - }).join(); - - // stream data for object 2 - objectManager.prepareObject(1, TimeUnit.MINUTES.toMillis(30)).thenAccept(objectId -> { - assertEquals(OBJECT_2, objectId); - ObjectWriter objectWriter = ObjectWriter.writer(OBJECT_2, s3Operator, 1024, 1024); - StreamRecordBatch r8 = new StreamRecordBatch(STREAM_1, 0, 400, 100, TestUtils.random(92)); - StreamRecordBatch r9 = new StreamRecordBatch(STREAM_2, 0, 230, 40, TestUtils.random(32)); - objectWriter.write(STREAM_1, List.of(r8)); - objectWriter.write(STREAM_2, List.of(r9)); - objectWriter.close().join(); - List streamsIndices = List.of( - new StreamOffsetRange(STREAM_1, 400, 500), - new StreamOffsetRange(STREAM_2, 230, 270) - ); - S3ObjectMetadata objectMetadata = new S3ObjectMetadata(OBJECT_2, S3ObjectType.STREAM_SET, streamsIndices, System.currentTimeMillis(), - System.currentTimeMillis(), objectWriter.size(), OBJECT_2); - S3_WAL_OBJECT_METADATA_LIST.add(objectMetadata); - List.of(r8, r9).forEach(StreamRecordBatch::release); - }).join(); - doReturn(CompletableFuture.completedFuture(S3_WAL_OBJECT_METADATA_LIST)).when(objectManager).getServerObjects(); - } - - public void tearDown() { - S3_WAL_OBJECT_METADATA_LIST.clear(); - } - - protected boolean compare(StreamDataBlock block1, StreamDataBlock block2) { - boolean attr = block1.getStreamId() == block2.getStreamId() && - block1.getStartOffset() == block2.getStartOffset() && - block1.getEndOffset() == block2.getEndOffset() && - block1.dataBlockIndex().recordCount() == block2.dataBlockIndex().recordCount(); - if (!attr) { - return false; - } - if (!block1.getDataCf().isDone()) { - return !block2.getDataCf().isDone(); - } else { - if (!block2.getDataCf().isDone()) { - return false; - } else { - return block1.getDataCf().join().compareTo(block2.getDataCf().join()) == 0; - } - } - } - - protected boolean compare(List streamDataBlocks1, List streamDataBlocks2) { - if (streamDataBlocks1.size() != streamDataBlocks2.size()) { - return false; - } - for (int i = 0; i < streamDataBlocks1.size(); i++) { - if (!compare(streamDataBlocks1.get(i), streamDataBlocks2.get(i))) { - return false; - } - } - return true; - } - - protected boolean compare(Map> streamDataBlockMap1, - Map> streamDataBlockMap2) { - if (streamDataBlockMap1.size() != streamDataBlockMap2.size()) { - return false; - } - for (Map.Entry> entry : streamDataBlockMap1.entrySet()) { - long objectId = entry.getKey(); - List streamDataBlocks = entry.getValue(); - assertTrue(streamDataBlockMap2.containsKey(objectId)); - if (!compare(streamDataBlocks, streamDataBlockMap2.get(objectId))) { - return false; - } - } - return true; - } - - protected boolean compare(CompactedObjectBuilder builder1, CompactedObjectBuilder builder2) { - if (builder1.type() != builder2.type()) { - return false; - } - return compare(builder1.streamDataBlocks(), builder2.streamDataBlocks()); - } - - protected boolean compare(CompactedObject compactedObject1, CompactedObject compactedObject2) { - if (compactedObject1.type() != compactedObject2.type()) { - return false; - } - return compare(compactedObject1.streamDataBlocks(), compactedObject2.streamDataBlocks()); - } - - protected long calculateObjectSize(List streamDataBlocksGroups) { - long bodySize = streamDataBlocksGroups.stream().mapToLong(StreamDataBlock::getBlockSize).sum(); - int indexBlockSize = DataBlockIndex.BLOCK_INDEX_SIZE * streamDataBlocksGroups.size(); - long tailSize = ObjectWriter.Footer.FOOTER_SIZE; - return bodySize + indexBlockSize + tailSize; - } - - protected List mergeStreamDataBlocksForGroup(List> streamDataBlockGroups) { - List mergedStreamDataBlocks = new ArrayList<>(); - for (List streamDataBlocks : streamDataBlockGroups) { - StreamDataBlock mergedBlock = new StreamDataBlock( - streamDataBlocks.get(0).getStreamId(), - streamDataBlocks.get(0).getStartOffset(), - streamDataBlocks.get(streamDataBlocks.size() - 1).getEndOffset(), - streamDataBlocks.get(0).getObjectId(), - streamDataBlocks.get(0).getBlockStartPosition(), - streamDataBlocks.stream().mapToInt(StreamDataBlock::getBlockSize).sum(), - streamDataBlocks.stream().map(StreamDataBlock::dataBlockIndex).mapToInt(DataBlockIndex::recordCount).sum()); - mergedBlock.getDataCf().complete(mergeStreamDataBlocksData(streamDataBlocks)); - mergedStreamDataBlocks.add(mergedBlock); - } - return mergedStreamDataBlocks; - } - - private ByteBuf mergeStreamDataBlocksData(List streamDataBlocks) { - CompositeByteBuf buf = ByteBufAlloc.compositeByteBuffer(); - for (StreamDataBlock block : streamDataBlocks) { - buf.addComponent(true, block.getDataCf().join()); - } - return buf; - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/compact/CompactionUploaderTest.java b/s3stream/src/test/java/com/automq/stream/s3/compact/CompactionUploaderTest.java deleted file mode 100644 index c4e68ac1e..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/compact/CompactionUploaderTest.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact; - -import com.automq.stream.s3.Config; -import com.automq.stream.s3.StreamDataBlock; -import com.automq.stream.s3.TestUtils; -import com.automq.stream.s3.compact.objects.CompactedObject; -import com.automq.stream.s3.compact.objects.CompactionType; -import com.automq.stream.s3.compact.operator.DataBlockReader; -import com.automq.stream.s3.compact.utils.CompactionUtils; -import com.automq.stream.s3.compact.utils.GroupByOffsetPredicate; -import com.automq.stream.s3.memory.MemoryMetadataManager; -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metadata.S3ObjectType; -import com.automq.stream.s3.objects.StreamObject; -import com.automq.stream.s3.operator.MemoryS3Operator; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -@Timeout(30) -@Tag("S3Unit") -public class CompactionUploaderTest extends CompactionTestBase { - - private MemoryMetadataManager objectManager; - private Config config; - - @BeforeEach - public void setUp() throws Exception { - s3Operator = new MemoryS3Operator(); - objectManager = new MemoryMetadataManager(); - config = mock(Config.class); - when(config.networkBaselineBandwidth()).thenReturn(500L); - when(config.streamSetObjectCompactionUploadConcurrency()).thenReturn(3); - when(config.objectPartSize()).thenReturn(100); - } - - @Test - public void testWriteWALObject() { - List streamDataBlocks = List.of( - new StreamDataBlock(STREAM_0, 0, 20, 1, 30, 20, 1), - new StreamDataBlock(STREAM_0, 20, 25, 0, 10, 5, 1), - new StreamDataBlock(STREAM_2, 40, 120, 2, 100, 80, 1), - new StreamDataBlock(STREAM_2, 120, 150, 3, 0, 30, 1)); - CompactedObject compactedObject = new CompactedObject(CompactionType.COMPACT, streamDataBlocks); - CompactionUploader uploader = new CompactionUploader(objectManager, s3Operator, config); - CompletableFuture cf = uploader.chainWriteStreamSetObject(null, compactedObject); - for (StreamDataBlock streamDataBlock : streamDataBlocks) { - streamDataBlock.getDataCf().complete(TestUtils.random(streamDataBlock.getBlockSize())); - } - cf.thenAccept(v -> uploader.forceUploadStreamSetObject()).join(); - uploader.forceUploadStreamSetObject().join(); - long walObjectSize = uploader.complete(); - System.out.printf("write size: %d%n", walObjectSize); - - List group = mergeStreamDataBlocksForGroup(CompactionUtils.groupStreamDataBlocks(streamDataBlocks, new GroupByOffsetPredicate())); - assertEquals(walObjectSize, calculateObjectSize(group)); - - //check s3 object - DataBlockReader reader = new DataBlockReader(new S3ObjectMetadata(OBJECT_0, walObjectSize, S3ObjectType.STREAM_SET), s3Operator); - reader.parseDataBlockIndex(); - List streamDataBlocksFromS3 = reader.getDataBlockIndex().join(); - assertEquals(streamDataBlocksFromS3.size(), group.size()); - reader.readBlocks(streamDataBlocksFromS3); - long expectedBlockPosition = 0; - for (int i = 0; i < group.size(); i++) { - assertEquals(expectedBlockPosition, streamDataBlocksFromS3.get(i).getBlockStartPosition()); - expectedBlockPosition += streamDataBlocksFromS3.get(i).getBlockSize(); - compare(streamDataBlocksFromS3.get(i), group.get(i)); - } - } - - @Test - public void testWriteWALObject2() { - List streamDataBlocks1 = List.of( - new StreamDataBlock(STREAM_0, 0, 20, 1, 30, 20, 1), - new StreamDataBlock(STREAM_0, 20, 25, 0, 10, 5, 1), - new StreamDataBlock(STREAM_2, 40, 120, 2, 100, 80, 1), - new StreamDataBlock(STREAM_2, 120, 150, 3, 0, 30, 1)); - CompactedObject compactedObject = new CompactedObject(CompactionType.COMPACT, streamDataBlocks1); - - List streamDataBlocks2 = List.of( - new StreamDataBlock(STREAM_3, 0, 15, 4, 0, 15, 1), - new StreamDataBlock(STREAM_3, 15, 20, 5, 20, 5, 1)); - CompactedObject compactedObject2 = new CompactedObject(CompactionType.COMPACT, streamDataBlocks2); - - CompactionUploader uploader = new CompactionUploader(objectManager, s3Operator, config); - CompletableFuture cf = uploader.chainWriteStreamSetObject(null, compactedObject); - cf = uploader.chainWriteStreamSetObject(cf, compactedObject2); - - for (StreamDataBlock streamDataBlock : streamDataBlocks2) { - streamDataBlock.getDataCf().complete(TestUtils.random(streamDataBlock.getBlockSize())); - } - - for (StreamDataBlock streamDataBlock : streamDataBlocks1) { - streamDataBlock.getDataCf().complete(TestUtils.random(streamDataBlock.getBlockSize())); - } - - cf.thenAccept(v -> uploader.forceUploadStreamSetObject()).join(); - uploader.forceUploadStreamSetObject().join(); - long walObjectSize = uploader.complete(); - - List expectedDataBlocks = new ArrayList<>(streamDataBlocks1); - expectedDataBlocks.addAll(streamDataBlocks2); - List group = mergeStreamDataBlocksForGroup(CompactionUtils.groupStreamDataBlocks(expectedDataBlocks, new GroupByOffsetPredicate())); - assertEquals(walObjectSize, calculateObjectSize(group)); - - //check s3 object - DataBlockReader reader = new DataBlockReader(new S3ObjectMetadata(OBJECT_0, walObjectSize, S3ObjectType.STREAM_SET), s3Operator); - reader.parseDataBlockIndex(); - List streamDataBlocksFromS3 = reader.getDataBlockIndex().join(); - assertEquals(streamDataBlocksFromS3.size(), group.size()); - reader.readBlocks(streamDataBlocksFromS3); - long expectedBlockPosition = 0; - for (int i = 0; i < group.size(); i++) { - assertEquals(expectedBlockPosition, streamDataBlocksFromS3.get(i).getBlockStartPosition()); - expectedBlockPosition += streamDataBlocksFromS3.get(i).getBlockSize(); - compare(streamDataBlocksFromS3.get(i), group.get(i)); - } - } - - @Test - public void testWriteStreamObject() { - List streamDataBlocks = List.of( - new StreamDataBlock(STREAM_0, 0, 60, 0, 23, 60, 1), - new StreamDataBlock(STREAM_0, 60, 120, 1, 45, 60, 1)); - CompactedObject compactedObject = new CompactedObject(CompactionType.SPLIT, streamDataBlocks); - - CompactionUploader uploader = new CompactionUploader(objectManager, s3Operator, config); - CompletableFuture cf = uploader.writeStreamObject(compactedObject); - for (StreamDataBlock streamDataBlock : streamDataBlocks) { - streamDataBlock.getDataCf().complete(TestUtils.random((int) streamDataBlock.getStreamRangeSize())); - } - StreamObject streamObject = cf.join(); - List group = mergeStreamDataBlocksForGroup(CompactionUtils.groupStreamDataBlocks(streamDataBlocks, new GroupByOffsetPredicate())); - assertEquals(streamObject.getObjectSize(), calculateObjectSize(group)); - - //check s3 object - DataBlockReader reader = new DataBlockReader(new S3ObjectMetadata(OBJECT_0, streamObject.getObjectSize(), S3ObjectType.STREAM), s3Operator); - reader.parseDataBlockIndex(); - List streamDataBlocksFromS3 = reader.getDataBlockIndex().join(); - assertEquals(streamDataBlocksFromS3.size(), group.size()); - reader.readBlocks(streamDataBlocksFromS3); - long expectedBlockPosition = 0; - for (int i = 0; i < group.size(); i++) { - assertEquals(expectedBlockPosition, streamDataBlocksFromS3.get(i).getBlockStartPosition()); - expectedBlockPosition += streamDataBlocksFromS3.get(i).getBlockSize(); - compare(streamDataBlocksFromS3.get(i), group.get(i)); - } - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/compact/CompactionUtilTest.java b/s3stream/src/test/java/com/automq/stream/s3/compact/CompactionUtilTest.java deleted file mode 100644 index abd9396d8..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/compact/CompactionUtilTest.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.compact; - -import com.automq.stream.s3.DataBlockIndex; -import com.automq.stream.s3.StreamDataBlock; -import com.automq.stream.s3.compact.objects.CompactedObject; -import com.automq.stream.s3.compact.objects.CompactionType; -import com.automq.stream.s3.compact.utils.CompactionUtils; -import com.automq.stream.s3.compact.utils.GroupByLimitPredicate; -import com.automq.stream.s3.compact.utils.GroupByOffsetPredicate; -import com.automq.stream.s3.objects.ObjectStreamRange; -import java.util.List; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; - -import static org.junit.jupiter.api.Assertions.assertEquals; - -@Timeout(30) -@Tag("S3Unit") -public class CompactionUtilTest extends CompactionTestBase { - - @Test - public void testMergeStreamDataBlocks() { - List streamDataBlocks = List.of( - new StreamDataBlock(STREAM_0, 0, 15, 1, 0, 20, 1), - new StreamDataBlock(STREAM_0, 15, 30, 1, 20, 5, 1), - new StreamDataBlock(STREAM_0, 30, 100, 1, 25, 80, 1), - new StreamDataBlock(STREAM_2, 40, 100, 1, 105, 80, 1), - new StreamDataBlock(STREAM_2, 120, 150, 1, 185, 30, 1)); - List> result = CompactionUtils.groupStreamDataBlocks(streamDataBlocks, new GroupByOffsetPredicate()); - assertEquals(3, result.size()); - Assertions.assertEquals(List.of(streamDataBlocks.get(0), streamDataBlocks.get(1), streamDataBlocks.get(2)), result.get(0)); - Assertions.assertEquals(List.of(streamDataBlocks.get(3)), result.get(1)); - Assertions.assertEquals(List.of(streamDataBlocks.get(4)), result.get(2)); - } - - @Test - public void testMergeStreamDataBlocks2() { - List streamDataBlocks = List.of( - new StreamDataBlock(STREAM_0, 0, 15, 1, 0, 20, 1), - new StreamDataBlock(STREAM_0, 15, 30, 1, 20, 5, 1), - new StreamDataBlock(STREAM_0, 30, 100, 1, 25, 80, 1), - new StreamDataBlock(STREAM_2, 40, 100, 1, 105, 80, 1), - new StreamDataBlock(STREAM_2, 120, 150, 1, 185, 30, 1)); - - List> result = CompactionUtils.groupStreamDataBlocks(streamDataBlocks, new GroupByLimitPredicate(30)); - assertEquals(4, result.size()); - Assertions.assertEquals(List.of(streamDataBlocks.get(0), streamDataBlocks.get(1)), result.get(0)); - Assertions.assertEquals(List.of(streamDataBlocks.get(2)), result.get(1)); - Assertions.assertEquals(List.of(streamDataBlocks.get(3)), result.get(2)); - Assertions.assertEquals(List.of(streamDataBlocks.get(4)), result.get(3)); - } - - @Test - public void testBuildObjectStreamRanges() { - List streamDataBlocks = List.of( - new StreamDataBlock(STREAM_0, 0, 20, 1, 0, 20, 1), - new StreamDataBlock(STREAM_0, 20, 25, 0, 20, 5, 1), - new StreamDataBlock(STREAM_2, 40, 120, 2, 25, 80, 1), - new StreamDataBlock(STREAM_2, 120, 150, 3, 105, 30, 1)); - CompactedObject compactedObject = new CompactedObject(CompactionType.COMPACT, streamDataBlocks); - List result = CompactionUtils.buildObjectStreamRangeFromGroup( - CompactionUtils.groupStreamDataBlocks(compactedObject.streamDataBlocks(), new GroupByOffsetPredicate())); - assertEquals(2, result.size()); - assertEquals(STREAM_0, result.get(0).getStreamId()); - assertEquals(0, result.get(0).getStartOffset()); - assertEquals(25, result.get(0).getEndOffset()); - assertEquals(STREAM_2, result.get(1).getStreamId()); - assertEquals(40, result.get(1).getStartOffset()); - assertEquals(150, result.get(1).getEndOffset()); - } - - @Test - public void testBuildDataIndices() { - List streamDataBlocks = List.of( - new StreamDataBlock(STREAM_0, 0, 15, 1, 0, 20, 1), - new StreamDataBlock(STREAM_0, 15, 30, 1, 20, 5, 2), - new StreamDataBlock(STREAM_0, 30, 100, 1, 25, 80, 3), - new StreamDataBlock(STREAM_2, 40, 100, 1, 105, 80, 4), - new StreamDataBlock(STREAM_2, 120, 150, 1, 185, 30, 5)); - CompactedObject compactedObject = new CompactedObject(CompactionType.COMPACT, streamDataBlocks); - List result = CompactionUtils.buildDataBlockIndicesFromGroup( - CompactionUtils.groupStreamDataBlocks(compactedObject.streamDataBlocks(), new GroupByLimitPredicate(30))); - - assertEquals(4, result.size()); - assertEquals(new DataBlockIndex(STREAM_0, 0, 30, 3, 0, 25), result.get(0)); - assertEquals(new DataBlockIndex(STREAM_0, 30, 70, 3, 25, 80), result.get(1)); - assertEquals(new DataBlockIndex(STREAM_2, 40, 60, 4, 105, 80), result.get(2)); - assertEquals(new DataBlockIndex(STREAM_2, 120, 30, 5, 185, 30), result.get(3)); - } - - @Test - public void testBuildDataIndices2() { - List streamDataBlocks = List.of( - new StreamDataBlock(STREAM_0, 0, 15, 1, 0, 20, 1), - new StreamDataBlock(STREAM_0, 15, 30, 1, 20, 5, 2), - new StreamDataBlock(STREAM_0, 30, (long) (Integer.MAX_VALUE) + 30, 1, 25, 80, 3), - new StreamDataBlock(STREAM_2, 40, 100, 1, 105, 80, 4), - new StreamDataBlock(STREAM_2, 120, 150, 1, 185, 30, 5)); - CompactedObject compactedObject = new CompactedObject(CompactionType.COMPACT, streamDataBlocks); - List result = CompactionUtils.buildDataBlockIndicesFromGroup( - CompactionUtils.groupStreamDataBlocks(compactedObject.streamDataBlocks(), new GroupByLimitPredicate(999))); - - assertEquals(4, result.size()); - assertEquals(new DataBlockIndex(STREAM_0, 0, 30, 3, 0, 25), result.get(0)); - assertEquals(new DataBlockIndex(STREAM_0, 30, Integer.MAX_VALUE, 3, 25, 80), result.get(1)); - assertEquals(new DataBlockIndex(STREAM_2, 40, 60, 4, 105, 80), result.get(2)); - assertEquals(new DataBlockIndex(STREAM_2, 120, 30, 5, 185, 30), result.get(3)); - } - - @Test - public void testBuildDataIndices3() { - List streamDataBlocks = List.of( - new StreamDataBlock(STREAM_0, 0, 15, 1, 0, 20, 1), - new StreamDataBlock(STREAM_0, 15, 30, 1, 20, 5, 2), - new StreamDataBlock(STREAM_0, 30, 100, 1, 25, 80, Integer.MAX_VALUE), - new StreamDataBlock(STREAM_2, 40, 100, 1, 105, 80, 4), - new StreamDataBlock(STREAM_2, 120, 150, 1, 185, 30, 5)); - CompactedObject compactedObject = new CompactedObject(CompactionType.COMPACT, streamDataBlocks); - List result = CompactionUtils.buildDataBlockIndicesFromGroup( - CompactionUtils.groupStreamDataBlocks(compactedObject.streamDataBlocks(), new GroupByLimitPredicate(999))); - - assertEquals(4, result.size()); - assertEquals(new DataBlockIndex(STREAM_0, 0, 30, 3, 0, 25), result.get(0)); - assertEquals(new DataBlockIndex(STREAM_0, 30, 70, Integer.MAX_VALUE, 25, 80), result.get(1)); - assertEquals(new DataBlockIndex(STREAM_2, 40, 60, 4, 105, 80), result.get(2)); - assertEquals(new DataBlockIndex(STREAM_2, 120, 30, 5, 185, 30), result.get(3)); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/failover/FailoverTest.java b/s3stream/src/test/java/com/automq/stream/s3/failover/FailoverTest.java deleted file mode 100644 index 567c9c61b..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/failover/FailoverTest.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.failover; - -import com.automq.stream.s3.wal.BlockWALService; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; - -public class FailoverTest { - String path; - FailoverFactory failoverFactory; - WALRecover walRecover; - Failover failover; - - @BeforeEach - public void setup() { - path = "/tmp/" + System.currentTimeMillis() + "/failover_test_wal"; - failoverFactory = mock(FailoverFactory.class); - walRecover = mock(WALRecover.class); - failover = spy(new Failover(failoverFactory, walRecover)); - } - - @AfterEach - public void cleanup() throws IOException { - Files.delete(Path.of(path)); - } - - @Test - public void test() throws IOException, ExecutionException, InterruptedException, TimeoutException { - BlockWALService wal = BlockWALService.builder(path, 1024 * 1024).nodeId(233).epoch(100).build(); - wal.start(); - wal.shutdownGracefully(); - - FailoverRequest request = new FailoverRequest(); - - // node mismatch - request.setNodeId(234); - request.setDevice(path); - request.setVolumeId("test_volume_id"); - - boolean exceptionThrown = false; - try { - failover.failover(request).get(100, TimeUnit.SECONDS); - } catch (ExecutionException e) { - if (e.getCause() instanceof IllegalArgumentException) { - exceptionThrown = true; - } - } - Assertions.assertTrue(exceptionThrown); - - // node match - request.setNodeId(233); - FailoverResponse resp = failover.failover(request).get(1, TimeUnit.SECONDS); - assertEquals(233, resp.getNodeId()); - } - -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/metrics/AttributesUtilTest.java b/s3stream/src/test/java/com/automq/stream/s3/metrics/AttributesUtilTest.java deleted file mode 100644 index 94a036ca1..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/metrics/AttributesUtilTest.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.metrics; - -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -public class AttributesUtilTest { - - @Test - public void testGetObjectSizeBucket() { - Assertions.assertEquals("16KB", AttributesUtils.getObjectBucketLabel(8 * 1024)); - Assertions.assertEquals("16KB", AttributesUtils.getObjectBucketLabel(16 * 1024)); - Assertions.assertEquals("32KB", AttributesUtils.getObjectBucketLabel(17 * 1024)); - Assertions.assertEquals("32KB", AttributesUtils.getObjectBucketLabel(32 * 1024)); - Assertions.assertEquals("64KB", AttributesUtils.getObjectBucketLabel(33 * 1024)); - Assertions.assertEquals("64KB", AttributesUtils.getObjectBucketLabel(64 * 1024)); - Assertions.assertEquals("128KB", AttributesUtils.getObjectBucketLabel(65 * 1024)); - Assertions.assertEquals("128KB", AttributesUtils.getObjectBucketLabel(128 * 1024)); - Assertions.assertEquals("256KB", AttributesUtils.getObjectBucketLabel(129 * 1024)); - Assertions.assertEquals("256KB", AttributesUtils.getObjectBucketLabel(256 * 1024)); - Assertions.assertEquals("512KB", AttributesUtils.getObjectBucketLabel(257 * 1024)); - Assertions.assertEquals("512KB", AttributesUtils.getObjectBucketLabel(512 * 1024)); - Assertions.assertEquals("1MB", AttributesUtils.getObjectBucketLabel(513 * 1024)); - Assertions.assertEquals("1MB", AttributesUtils.getObjectBucketLabel(1024 * 1024)); - Assertions.assertEquals("2MB", AttributesUtils.getObjectBucketLabel(1025 * 1024)); - Assertions.assertEquals("2MB", AttributesUtils.getObjectBucketLabel(2 * 1024 * 1024)); - Assertions.assertEquals("4MB", AttributesUtils.getObjectBucketLabel(2 * 1024 * 1024 + 1)); - Assertions.assertEquals("4MB", AttributesUtils.getObjectBucketLabel(4 * 1024 * 1024)); - Assertions.assertEquals("8MB", AttributesUtils.getObjectBucketLabel(4 * 1024 * 1024 + 1)); - Assertions.assertEquals("8MB", AttributesUtils.getObjectBucketLabel(8 * 1024 * 1024)); - Assertions.assertEquals("16MB", AttributesUtils.getObjectBucketLabel(8 * 1024 * 1024 + 1)); - Assertions.assertEquals("16MB", AttributesUtils.getObjectBucketLabel(16 * 1024 * 1024)); - Assertions.assertEquals("32MB", AttributesUtils.getObjectBucketLabel(16 * 1024 * 1024 + 1)); - Assertions.assertEquals("32MB", AttributesUtils.getObjectBucketLabel(32 * 1024 * 1024)); - Assertions.assertEquals("64MB", AttributesUtils.getObjectBucketLabel(32 * 1024 * 1024 + 1)); - Assertions.assertEquals("64MB", AttributesUtils.getObjectBucketLabel(64 * 1024 * 1024)); - Assertions.assertEquals("128MB", AttributesUtils.getObjectBucketLabel(64 * 1024 * 1024 + 1)); - Assertions.assertEquals("128MB", AttributesUtils.getObjectBucketLabel(128 * 1024 * 1024)); - Assertions.assertEquals("inf", AttributesUtils.getObjectBucketLabel(128 * 1024 * 1024 + 1)); - Assertions.assertEquals("inf", AttributesUtils.getObjectBucketLabel(1024 * 1024 * 1024)); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/metrics/MetricsLevelTest.java b/s3stream/src/test/java/com/automq/stream/s3/metrics/MetricsLevelTest.java deleted file mode 100644 index f2ef07f59..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/metrics/MetricsLevelTest.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.automq.stream.s3.metrics; - -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -public class MetricsLevelTest { - - @Test - public void testIsWithin() { - Assertions.assertTrue(MetricsLevel.INFO.isWithin(MetricsLevel.INFO)); - Assertions.assertFalse(MetricsLevel.DEBUG.isWithin(MetricsLevel.INFO)); - - Assertions.assertTrue(MetricsLevel.INFO.isWithin(MetricsLevel.DEBUG)); - Assertions.assertTrue(MetricsLevel.DEBUG.isWithin(MetricsLevel.DEBUG)); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/metrics/wrapper/MetricsWrapperTest.java b/s3stream/src/test/java/com/automq/stream/s3/metrics/wrapper/MetricsWrapperTest.java deleted file mode 100644 index fbea91eaf..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/metrics/wrapper/MetricsWrapperTest.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.automq.stream.s3.metrics.wrapper; - -import com.automq.stream.s3.metrics.MetricsConfig; -import com.automq.stream.s3.metrics.MetricsLevel; -import com.yammer.metrics.core.MetricName; -import io.opentelemetry.api.common.Attributes; -import io.opentelemetry.api.metrics.LongCounter; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import org.mockito.Mockito; - -public class MetricsWrapperTest { - - @Test - public void testConfigurableMetrics() { - CounterMetric metric = new CounterMetric(new MetricsConfig(), Attributes.builder().put("extra", "v").build(), - Mockito.mock(LongCounter.class)); - Assertions.assertEquals(MetricsLevel.INFO, metric.metricsLevel); - - metric.onConfigChange(new MetricsConfig(MetricsLevel.DEBUG, Attributes.builder().put("base", "v2").build())); - Assertions.assertEquals(MetricsLevel.DEBUG, metric.metricsLevel); - Assertions.assertEquals(Attributes.builder().put("extra", "v").put("base", "v2").build(), metric.attributes); - - YammerHistogramMetric yammerHistogramMetric = new YammerHistogramMetric(Mockito.mock(MetricName.class), MetricsLevel.INFO, new MetricsConfig(), - Attributes.builder().put("extra", "v").build()); - Assertions.assertEquals(MetricsLevel.INFO, yammerHistogramMetric.metricsLevel); - - yammerHistogramMetric.onConfigChange(new MetricsConfig(MetricsLevel.DEBUG, Attributes.builder().put("base", "v2").build())); - Assertions.assertEquals(MetricsLevel.DEBUG, yammerHistogramMetric.metricsLevel); - Assertions.assertEquals(Attributes.builder().put("extra", "v").put("base", "v2").build(), yammerHistogramMetric.attributes); - } - - @Test - public void testMetricsLevel() { - CounterMetric metric = new CounterMetric(new MetricsConfig(MetricsLevel.INFO, null), Mockito.mock(LongCounter.class)); - Assertions.assertTrue(metric.add(MetricsLevel.INFO, 1)); - Assertions.assertFalse(metric.add(MetricsLevel.DEBUG, 1)); - metric.onConfigChange(new MetricsConfig(MetricsLevel.DEBUG, null)); - Assertions.assertTrue(metric.add(MetricsLevel.INFO, 1)); - Assertions.assertTrue(metric.add(MetricsLevel.DEBUG, 1)); - - YammerHistogramMetric yammerHistogramMetric = new YammerHistogramMetric(Mockito.mock(MetricName.class), MetricsLevel.INFO, new MetricsConfig(), - Attributes.builder().put("extra", "v").build()); - Assertions.assertTrue(yammerHistogramMetric.shouldRecord()); - yammerHistogramMetric.onConfigChange(new MetricsConfig(MetricsLevel.DEBUG, null)); - Assertions.assertTrue(yammerHistogramMetric.shouldRecord()); - yammerHistogramMetric = new YammerHistogramMetric(Mockito.mock(MetricName.class), MetricsLevel.DEBUG, new MetricsConfig(), - Attributes.builder().put("extra", "v").build()); - Assertions.assertFalse(yammerHistogramMetric.shouldRecord()); - yammerHistogramMetric.onConfigChange(new MetricsConfig(MetricsLevel.DEBUG, null)); - Assertions.assertTrue(yammerHistogramMetric.shouldRecord()); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/objects/ObjectManagerTest.java b/s3stream/src/test/java/com/automq/stream/s3/objects/ObjectManagerTest.java deleted file mode 100644 index 2638602d7..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/objects/ObjectManagerTest.java +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.objects; - -import com.automq.stream.s3.memory.MemoryMetadataManager; -import com.automq.stream.s3.metadata.ObjectUtils; -import com.automq.stream.s3.metadata.S3ObjectMetadata; -import com.automq.stream.s3.metadata.StreamMetadata; -import com.automq.stream.s3.metadata.StreamOffsetRange; -import com.automq.stream.s3.streams.StreamManager; -import java.util.ArrayList; -import java.util.List; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; - -import static org.junit.jupiter.api.Assertions.assertEquals; - -class ObjectManagerTest { - private StreamManager streamManager; - private ObjectManager objectManager; - - @BeforeEach - void setUp() { - MemoryMetadataManager metadataManager = new MemoryMetadataManager(); - streamManager = metadataManager; - objectManager = metadataManager; - } - - @Test - void prepareObject() { - assertEquals(0, objectManager.prepareObject(1, 1000).join()); - assertEquals(1, objectManager.prepareObject(1, 1000).join()); - assertEquals(2, objectManager.prepareObject(8, 1000).join()); - assertEquals(10, objectManager.prepareObject(1, 1000).join()); - } - - @Test - void testCommitAndCompact() { - Long streamId = streamManager.createStream().join(); - streamManager.openStream(streamId, 0).join(); - streamId = streamManager.createStream().join(); - streamManager.openStream(streamId, 0).join(); - streamId = streamManager.createStream().join(); - streamManager.openStream(streamId, 0).join(); - - CommitStreamSetObjectRequest request = new CommitStreamSetObjectRequest(); - // Commit stream set object with stream 0 offset [0, 3) and stream 1 offset [0, 5). - ArrayList streamRangeList = new ArrayList<>(); - streamRangeList.add(new ObjectStreamRange(0, 0, 0, 3, 300)); - streamRangeList.add(new ObjectStreamRange(1, 0, 0, 5, 500)); - request.setStreamRanges(streamRangeList); - request.setOrderId(0); - - // Commit stream object with stream 2 offset [0, 10). - List streamObjectList = new ArrayList<>(); - StreamObject streamObject = new StreamObject(); - streamObject.setObjectId(1); - streamObject.setStreamId(2); - streamObject.setStartOffset(0); - streamObject.setEndOffset(10); - streamObjectList.add(streamObject); - - streamObject = new StreamObject(); - streamObject.setObjectId(4); - streamObject.setStreamId(2); - streamObject.setStartOffset(10); - streamObject.setEndOffset(20); - streamObjectList.add(streamObject); - - request.setStreamObjects(streamObjectList); - objectManager.commitStreamSetObject(request).join(); - - List streamMetadataList = streamManager.getStreams(List.of(0L, 1L, 2L)).join(); - assertEquals(3, streamMetadataList.size()); - assertEquals(3, streamMetadataList.get(0).endOffset()); - assertEquals(5, streamMetadataList.get(1).endOffset()); - assertEquals(20, streamMetadataList.get(2).endOffset()); - - List streamSetObjectMetadataList = objectManager.getServerObjects().join(); - assertEquals(1, streamSetObjectMetadataList.size()); - S3ObjectMetadata streamSetMetadata = streamSetObjectMetadataList.get(0); - List ranges = streamSetMetadata.getOffsetRanges(); - assertEquals(2, ranges.size()); - - assertEquals(0, ranges.get(0).streamId()); - assertEquals(0, ranges.get(0).startOffset()); - assertEquals(3, ranges.get(0).endOffset()); - - assertEquals(1, ranges.get(1).streamId()); - assertEquals(0, ranges.get(1).startOffset()); - assertEquals(5, ranges.get(1).endOffset()); - - List streamObjectMetadataList = objectManager.getStreamObjects(2, 0, 10, 100).join(); - assertEquals(1, streamObjectMetadataList.size()); - ranges = streamObjectMetadataList.get(0).getOffsetRanges(); - assertEquals(1, ranges.size()); - assertEquals(2, ranges.get(0).streamId()); - assertEquals(0, ranges.get(0).startOffset()); - assertEquals(10, ranges.get(0).endOffset()); - - streamObjectMetadataList = objectManager.getStreamObjects(2, 0, 20, 100).join(); - assertEquals(2, streamObjectMetadataList.size()); - - // Compact stream set object and commit stream object. - request = new CommitStreamSetObjectRequest(); - request.setObjectId(ObjectUtils.NOOP_OBJECT_ID); - request.setCompactedObjectIds(List.of(0L)); - - streamObjectList = new ArrayList<>(); - streamObject = new StreamObject(); - streamObject.setObjectId(2); - streamObject.setStreamId(0); - streamObject.setStartOffset(0); - streamObject.setEndOffset(3); - streamObjectList.add(streamObject); - - streamObject = new StreamObject(); - streamObject.setObjectId(3); - streamObject.setStreamId(1); - streamObject.setStartOffset(0); - streamObject.setEndOffset(5); - streamObjectList.add(streamObject); - - request.setStreamObjects(streamObjectList); - objectManager.commitStreamSetObject(request).join(); - - streamSetObjectMetadataList = objectManager.getServerObjects().join(); - assertEquals(0, streamSetObjectMetadataList.size()); - - streamObjectMetadataList = objectManager.getStreamObjects(0, 0, 10, 100).join(); - assertEquals(1, streamObjectMetadataList.size()); - streamObjectMetadataList = objectManager.getStreamObjects(1, 0, 10, 100).join(); - assertEquals(1, streamObjectMetadataList.size()); - - // Compact stream object. - objectManager.compactStreamObject(new CompactStreamObjectRequest(5, 2000, 2, 0L, 0, 20, List.of(1L, 4L))).join(); - streamObjectMetadataList = objectManager.getStreamObjects(2, 0, 10, 100).join(); - assertEquals(1, streamObjectMetadataList.size()); - ranges = streamObjectMetadataList.get(0).getOffsetRanges(); - assertEquals(1, ranges.size()); - assertEquals(2, ranges.get(0).streamId()); - assertEquals(0, ranges.get(0).startOffset()); - assertEquals(20, ranges.get(0).endOffset()); - } - - @Test - void testGetObject() { - // Create and open stream 0 and 1. - Long streamId = streamManager.createStream().join(); - streamManager.openStream(streamId, 0).join(); - streamId = streamManager.createStream().join(); - streamManager.openStream(streamId, 0).join(); - - CommitStreamSetObjectRequest request = new CommitStreamSetObjectRequest(); - // Commit stream set object with stream 0 offset [0, 3) and stream 1 offset [0, 5). - ArrayList streamRangeList = new ArrayList<>(); - streamRangeList.add(new ObjectStreamRange(0, 0, 0, 3, 300)); - streamRangeList.add(new ObjectStreamRange(1, 0, 0, 5, 500)); - request.setStreamRanges(streamRangeList); - request.setOrderId(0); - - List streamObjectList = new ArrayList<>(); - StreamObject streamObject = new StreamObject(); - streamObject.setObjectId(1); - streamObject.setStreamId(0); - streamObject.setStartOffset(3); - streamObject.setEndOffset(5); - streamObjectList.add(streamObject); - - streamObject = new StreamObject(); - streamObject.setObjectId(2); - streamObject.setStreamId(0); - streamObject.setStartOffset(5); - streamObject.setEndOffset(10); - streamObjectList.add(streamObject); - - request.setStreamObjects(streamObjectList); - objectManager.commitStreamSetObject(request).join(); - - // Get object with stream 0 offset [0, 10). - List streamObjectMetadataList = objectManager.getObjects(0, 0, 10, 100).join(); - assertEquals(3, streamObjectMetadataList.size()); - // Get object with stream 0 offset [1, 9). - streamObjectMetadataList = objectManager.getObjects(0, 1, 9, 100).join(); - assertEquals(3, streamObjectMetadataList.size()); - // Get object with stream 0 offset [3, 4). - streamObjectMetadataList = objectManager.getObjects(0, 0, 1, 100).join(); - assertEquals(1, streamObjectMetadataList.size()); - assertEquals(0, streamObjectMetadataList.get(0).objectId()); - // Get object with limit 1. - streamObjectMetadataList = objectManager.getObjects(0, 0, 10, 1).join(); - assertEquals(1, streamObjectMetadataList.size()); - assertEquals(0, streamObjectMetadataList.get(0).objectId()); - - // Get stream object with stream 0 offset [3, 10). - streamObjectMetadataList = objectManager.getStreamObjects(0, 3, 10, 100).join(); - assertEquals(2, streamObjectMetadataList.size()); - // Get stream object with stream 0 offset [5, 10). - streamObjectMetadataList = objectManager.getStreamObjects(0, 5, 10, 100).join(); - assertEquals(1, streamObjectMetadataList.size()); - assertEquals(2, streamObjectMetadataList.get(0).objectId()); - // Get object with limit 1. - streamObjectMetadataList = objectManager.getStreamObjects(0, 0, 10, 1).join(); - assertEquals(1, streamObjectMetadataList.size()); - assertEquals(1, streamObjectMetadataList.get(0).objectId()); - - // Get all stream set objects belonging current node. - streamObjectMetadataList = objectManager.getServerObjects().join(); - assertEquals(1, streamObjectMetadataList.size()); - // Change node id. - MemoryMetadataManager.advanceNodeId(); - streamObjectMetadataList = objectManager.getServerObjects().join(); - assertEquals(0, streamObjectMetadataList.size()); - } -} \ No newline at end of file diff --git a/s3stream/src/test/java/com/automq/stream/s3/operator/DefaultS3OperatorTest.java b/s3stream/src/test/java/com/automq/stream/s3/operator/DefaultS3OperatorTest.java deleted file mode 100644 index d65894f85..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/operator/DefaultS3OperatorTest.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.operator; - -import com.automq.stream.s3.TestUtils; -import io.netty.buffer.ByteBuf; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.stream.Collectors; - -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; -import software.amazon.awssdk.services.s3.S3AsyncClient; -import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; -import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; -import software.amazon.awssdk.services.s3.model.DeletedObject; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -@Tag("S3Unit") -class DefaultS3OperatorTest { - - private S3AsyncClient s3; - private DefaultS3Operator operator; - - @BeforeEach - void setUp() { - s3 = mock(S3AsyncClient.class); - operator = new DefaultS3Operator(s3, "test-bucket"); - } - - @AfterEach - void tearDown() { - operator.close(); - } - - @Test - void testDeleteObjectsSuccess() { - when(s3.deleteObjects(any(DeleteObjectsRequest.class))) - .thenAnswer(invocation -> { - DeleteObjectsRequest request = invocation.getArgument(0); - DeleteObjectsResponse response = DeleteObjectsResponse.builder() - .deleted(request.delete().objects().stream() - .map(o -> DeletedObject.builder() - .key(o.key()) - .build()) - .collect(Collectors.toList())) - .build(); - return CompletableFuture.completedFuture(response); - }); - List keys = List.of("test1", "test2"); - List deleted = operator.delete(keys).join(); - assertEquals(keys, deleted); - } - - @Test - void testDeleteObjectsFail() { - when(s3.deleteObjects(any(DeleteObjectsRequest.class))) - .thenReturn(CompletableFuture.failedFuture(new RuntimeException("test"))); - List keys = List.of("test1", "test2"); - List deleted = operator.delete(keys).join(); - assertEquals(Collections.emptyList(), deleted); - } - - @Test - public void testMergeTask() { - DefaultS3Operator.MergedReadTask mergedReadTask = new DefaultS3Operator.MergedReadTask( - new DefaultS3Operator.ReadTask("obj0", 0, 1024, new CompletableFuture<>()), 0); - boolean ret = mergedReadTask.tryMerge(new DefaultS3Operator.ReadTask("obj0", 1024, 2048, new CompletableFuture<>())); - assertTrue(ret); - assertEquals(0, mergedReadTask.dataSparsityRate); - assertEquals(0, mergedReadTask.start); - assertEquals(2048, mergedReadTask.end); - ret = mergedReadTask.tryMerge(new DefaultS3Operator.ReadTask("obj0", 2049, 3000, new CompletableFuture<>())); - assertFalse(ret); - assertEquals(0, mergedReadTask.dataSparsityRate); - assertEquals(0, mergedReadTask.start); - assertEquals(2048, mergedReadTask.end); - } - - @Test - public void testMergeTask2() { - DefaultS3Operator.MergedReadTask mergedReadTask = new DefaultS3Operator.MergedReadTask( - new DefaultS3Operator.ReadTask("obj0", 0, 1024, new CompletableFuture<>()), 0.5f); - boolean ret = mergedReadTask.tryMerge(new DefaultS3Operator.ReadTask("obj0", 2048, 4096, new CompletableFuture<>())); - assertTrue(ret); - assertEquals(0.25, mergedReadTask.dataSparsityRate, 0.01); - assertEquals(0, mergedReadTask.start); - assertEquals(4096, mergedReadTask.end); - ret = mergedReadTask.tryMerge(new DefaultS3Operator.ReadTask("obj0", 1024, 1536, new CompletableFuture<>())); - assertTrue(ret); - assertEquals(0.125, mergedReadTask.dataSparsityRate, 0.01); - assertEquals(0, mergedReadTask.start); - assertEquals(4096, mergedReadTask.end); - } - - @Test - void testMergeRead() throws ExecutionException, InterruptedException { - operator = new DefaultS3Operator(s3, "test-bucket", true) { - @Override - CompletableFuture mergedRangeRead(String path, long start, long end) { - return CompletableFuture.completedFuture(TestUtils.random((int) (end - start + 1))); - } - }; - operator = spy(operator); - - // obj0_0_1024 obj_1_1024_2048 obj_0_16776192_16777216 obj_0_2048_4096 obj_0_16777216_16778240 - CompletableFuture cf1 = operator.rangeRead("obj0", 0, 1024); - CompletableFuture cf2 = operator.rangeRead("obj1", 1024, 3072); - CompletableFuture cf3 = operator.rangeRead("obj0", 31457280, 31461376); - CompletableFuture cf4 = operator.rangeRead("obj0", 2048, 4096); - CompletableFuture cf5 = operator.rangeRead("obj0", 33554432, 33554944); - - operator.tryMergeRead(); - - verify(operator, timeout(1000L).times(1)).mergedRangeRead(eq("obj0"), eq(0L), eq(4096L)); - verify(operator, timeout(1000L).times(1)).mergedRangeRead(eq("obj1"), eq(1024L), eq(3072L)); - verify(operator, timeout(1000L).times(1)).mergedRangeRead(eq("obj0"), eq(31457280L), eq(31461376L)); - verify(operator, timeout(1000L).times(1)).mergedRangeRead(eq("obj0"), eq(33554432L), eq(33554944L)); - - ByteBuf buf = cf1.get(); - assertEquals(1024, buf.readableBytes()); - buf.release(); - buf = cf2.get(); - assertEquals(2048, buf.readableBytes()); - buf.release(); - buf = cf3.get(); - assertEquals(4096, buf.readableBytes()); - buf.release(); - buf = cf4.get(); - assertEquals(2048, buf.readableBytes()); - buf.release(); - buf = cf5.get(); - assertEquals(512, buf.readableBytes()); - buf.release(); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/operator/MultiPartWriterTest.java b/s3stream/src/test/java/com/automq/stream/s3/operator/MultiPartWriterTest.java deleted file mode 100644 index 370699f54..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/operator/MultiPartWriterTest.java +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.operator; - -import com.automq.stream.s3.TestUtils; -import io.netty.buffer.ByteBuf; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.stream.Collectors; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; -import software.amazon.awssdk.core.async.AsyncRequestBody; -import software.amazon.awssdk.core.async.AsyncResponseTransformer; -import software.amazon.awssdk.core.async.ResponsePublisher; -import software.amazon.awssdk.services.s3.S3AsyncClient; -import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; -import software.amazon.awssdk.services.s3.model.CopyPartResult; -import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; -import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; -import software.amazon.awssdk.services.s3.model.GetObjectRequest; -import software.amazon.awssdk.services.s3.model.GetObjectResponse; -import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest; -import software.amazon.awssdk.services.s3.model.UploadPartCopyResponse; -import software.amazon.awssdk.services.s3.model.UploadPartRequest; -import software.amazon.awssdk.services.s3.model.UploadPartResponse; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -@Tag("S3Unit") -class MultiPartWriterTest { - private S3AsyncClient s3; - private DefaultS3Operator operator; - private MultiPartWriter writer; - - @BeforeEach - void setUp() { - s3 = mock(S3AsyncClient.class); - operator = new DefaultS3Operator(s3, "unit-test-bucket"); - CreateMultipartUploadResponse.Builder builder = CreateMultipartUploadResponse.builder(); - when(s3.createMultipartUpload(any(CreateMultipartUploadRequest.class))).thenReturn(CompletableFuture.completedFuture(builder.build())); - } - - @Test - void testWrite() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException, ExecutionException, InterruptedException { - writer = new MultiPartWriter(Writer.Context.DEFAULT, operator, "test-path", 100, null); - - List requests = new ArrayList<>(); - List contentLengths = new ArrayList<>(); - - UploadPartResponse.Builder builder = UploadPartResponse.builder(); - Method method = builder.getClass().getDeclaredMethod("setETag", String.class); - method.setAccessible(true); - method.invoke(builder, "unit-test-etag"); - - when(s3.uploadPart(any(UploadPartRequest.class), any(AsyncRequestBody.class))).thenAnswer(invocation -> { - UploadPartRequest request = invocation.getArgument(0); - requests.add(request); - AsyncRequestBody body = invocation.getArgument(1); - contentLengths.add(body.contentLength().orElse(0L)); - return CompletableFuture.completedFuture(builder.build()); - }); - when(s3.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))).thenReturn(CompletableFuture.completedFuture(null)); - writer.uploadIdCf.get(); - - List payloads = List.of( - // case 2 - TestUtils.random(120), - // case 1 - TestUtils.random(20), - // case 3 - TestUtils.random(40), - // case 4 - TestUtils.random(60), - // case 1 - TestUtils.random(80), - // case 5 - TestUtils.random(200), - // last part - TestUtils.random(10) - ); - - payloads.forEach(writer::write); - writer.close().get(); - assertEquals(4, requests.size()); - assertEquals("unit-test-bucket", requests.get(0).bucket()); - assertEquals("test-path", requests.get(0).key()); - assertEquals(List.of(1, 2, 3, 4), requests.stream() - .map(UploadPartRequest::partNumber) - .collect(Collectors.toList())); - assertEquals(List.of(120L, 120L, 280L, 10L), contentLengths); - } - - @Test - @SuppressWarnings("unchecked") - void testCopyWrite() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException, ExecutionException, InterruptedException { - writer = new MultiPartWriter(Writer.Context.DEFAULT, operator, "test-path-2", 100, null); - List uploadPartRequests = new ArrayList<>(); - List uploadPartCopyRequests = new ArrayList<>(); - List writeContentLengths = new ArrayList<>(); - - UploadPartResponse.Builder builder = UploadPartResponse.builder(); - Method method = builder.getClass().getDeclaredMethod("setETag", String.class); - method.setAccessible(true); - method.invoke(builder, "unit-test-etag"); - - CopyPartResult.Builder copyResultBuilder = CopyPartResult.builder(); - method = copyResultBuilder.getClass().getDeclaredMethod("setETag", String.class); - method.setAccessible(true); - method.invoke(copyResultBuilder, "unit-test-copy-etag"); - - UploadPartCopyResponse.Builder copyBuilder = UploadPartCopyResponse.builder(); - method = copyBuilder.getClass().getDeclaredMethod("setCopyPartResult", copyResultBuilder.getClass()); - method.setAccessible(true); - method.invoke(copyBuilder, copyResultBuilder); - - when(s3.uploadPart(any(UploadPartRequest.class), any(AsyncRequestBody.class))).thenAnswer(invocation -> { - UploadPartRequest request = invocation.getArgument(0); - uploadPartRequests.add(request); - AsyncRequestBody body = invocation.getArgument(1); - writeContentLengths.add(body.contentLength().orElse(0L)); - return CompletableFuture.completedFuture(builder.build()); - }); - - when(s3.uploadPartCopy(any(UploadPartCopyRequest.class))).thenAnswer(invocation -> { - UploadPartCopyRequest request = invocation.getArgument(0); - uploadPartCopyRequests.add(request); - return CompletableFuture.completedFuture(copyBuilder.build()); - }); - when(s3.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))).thenReturn(CompletableFuture.completedFuture(null)); - - when(s3.getObject(any(GetObjectRequest.class), any(AsyncResponseTransformer.class))).thenAnswer(invocation -> { - GetObjectRequest request = invocation.getArgument(0); - String[] startEnd = request.range().split("=")[1].split("-"); - long start = Long.parseLong(startEnd[0]); - long end = Long.parseLong(startEnd[1]); - - GetObjectResponse.Builder responseBuilder = GetObjectResponse.builder(); - software.amazon.awssdk.core.async.ResponsePublisher responsePublisher - = new ResponsePublisher<>(responseBuilder.build(), AsyncRequestBody.fromByteBuffer(TestUtils.random((int) (end - start + 1)).nioBuffer())); - return CompletableFuture.completedFuture(responsePublisher); - }); - - // case 2 - writer.copyWrite("path-1", 0, 120); - // case 1 - writer.copyWrite("path-2", 20, 40); - // case 3 - writer.copyWrite("path-3", 60, 100); - // case 4 - writer.copyWrite("path-4", 140, 200); - // case 1 - writer.copyWrite("path-5", 200, 280); - // case 5 - writer.copyWrite("path-6", 400, 600); - // last part - writer.copyWrite("path-7", 10, 20); - - writer.close().get(); - assertEquals(3, uploadPartRequests.size()); - assertEquals("unit-test-bucket", uploadPartRequests.get(0).bucket()); - assertEquals("test-path-2", uploadPartRequests.get(0).key()); - for (int i = 0; i < 3; i++) { - int partNum = uploadPartRequests.get(i).partNumber(); - switch (partNum) { - case 2: - assertEquals(120L, writeContentLengths.get(i)); - break; - case 3: - assertEquals(280L, writeContentLengths.get(i)); - break; - case 4: - assertEquals(10L, writeContentLengths.get(i)); - break; - default: - throw new IllegalStateException(); - } - } - - assertEquals(1, uploadPartCopyRequests.size()); - assertEquals("unit-test-bucket", uploadPartCopyRequests.get(0).sourceBucket()); - assertEquals("unit-test-bucket", uploadPartCopyRequests.get(0).destinationBucket()); - assertEquals(List.of("path-1"), uploadPartCopyRequests.stream() - .map(UploadPartCopyRequest::sourceKey) - .collect(Collectors.toList())); - assertEquals("test-path-2", uploadPartCopyRequests.get(0).destinationKey()); - assertEquals(List.of(1), uploadPartCopyRequests.stream() - .map(UploadPartCopyRequest::partNumber) - .collect(Collectors.toList())); - assertEquals(List.of("bytes=0-119"), uploadPartCopyRequests.stream() - .map(UploadPartCopyRequest::copySourceRange) - .collect(Collectors.toList())); - } - -} \ No newline at end of file diff --git a/s3stream/src/test/java/com/automq/stream/s3/operator/ProxyWriterTest.java b/s3stream/src/test/java/com/automq/stream/s3/operator/ProxyWriterTest.java deleted file mode 100644 index dcf4acc3f..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/operator/ProxyWriterTest.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.operator; - -import com.automq.stream.s3.TestUtils; -import com.automq.stream.s3.network.ThrottleStrategy; -import io.netty.buffer.ByteBuf; -import java.util.concurrent.CompletableFuture; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.mockito.ArgumentCaptor; -import software.amazon.awssdk.services.s3.model.CompletedPart; - -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class ProxyWriterTest { - - S3Operator operator; - ProxyWriter writer; - - @BeforeEach - public void setup() { - operator = mock(S3Operator.class); - writer = new ProxyWriter(Writer.Context.DEFAULT, operator, "testpath", null); - } - - @Test - public void testWrite_onePart() { - writer.write(TestUtils.random(15 * 1024 * 1024)); - writer.write(TestUtils.random(1024 * 1024)); - when(operator.write(eq("testpath"), any(), any())).thenReturn(CompletableFuture.completedFuture(null)); - assertTrue(writer.hasBatchingPart()); - assertTrue(writer.close().isDone()); - ArgumentCaptor captor = ArgumentCaptor.forClass(ByteBuf.class); - ArgumentCaptor captor2 = ArgumentCaptor.forClass(ThrottleStrategy.class); - verify(operator, times(1)).write(eq("testpath"), captor.capture(), captor2.capture()); - Assertions.assertEquals(16 * 1024 * 1024, captor.getValue().readableBytes()); - } - - @Test - public void testWrite_dataLargerThanMaxUploadSize() { - when(operator.createMultipartUpload(eq("testpath"))).thenReturn(CompletableFuture.completedFuture("test_upload_id")); - when(operator.uploadPart(eq("testpath"), eq("test_upload_id"), eq(1), any(), any())).thenReturn(CompletableFuture.completedFuture(CompletedPart.builder().partNumber(1).eTag("etag1").build())); - when(operator.uploadPart(eq("testpath"), eq("test_upload_id"), eq(2), any(), any())).thenReturn(CompletableFuture.completedFuture(CompletedPart.builder().partNumber(1).eTag("etag2").build())); - when(operator.completeMultipartUpload(eq("testpath"), eq("test_upload_id"), any())).thenReturn(CompletableFuture.completedFuture(null)); - writer.write(TestUtils.random(17 * 1024 * 1024)); - assertTrue(writer.hasBatchingPart()); - assertNull(writer.multiPartWriter); - writer.write(TestUtils.random(17 * 1024 * 1024)); - assertNotNull(writer.multiPartWriter); - assertFalse(writer.hasBatchingPart()); - writer.write(TestUtils.random(17 * 1024 * 1024)); - assertNotNull(writer.multiPartWriter); - assertFalse(writer.hasBatchingPart()); - writer.close(); - verify(operator, times(2)).uploadPart(any(), any(), anyInt(), any(), any()); - } - - @Test - public void testWrite_copyWrite() { - when(operator.createMultipartUpload(eq("testpath"))).thenReturn(CompletableFuture.completedFuture("test_upload_id")); - when(operator.uploadPartCopy(eq("test_src_path"), eq("testpath"), eq(0L), eq(15L * 1024 * 1024), eq("test_upload_id"), eq(1))) - .thenReturn(CompletableFuture.completedFuture(CompletedPart.builder().partNumber(1).eTag("etag1").build())); - when(operator.completeMultipartUpload(eq("testpath"), eq("test_upload_id"), any())).thenReturn(CompletableFuture.completedFuture(null)); - - writer.copyWrite("test_src_path", 0, 15 * 1024 * 1024); - Assertions.assertTrue(writer.close().isDone()); - - verify(operator, times(1)).uploadPartCopy(any(), any(), anyLong(), anyLong(), any(), anyInt()); - } - -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/streams/StreamManagerTest.java b/s3stream/src/test/java/com/automq/stream/s3/streams/StreamManagerTest.java deleted file mode 100644 index a74aeb9fa..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/streams/StreamManagerTest.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.streams; - -import com.automq.stream.s3.memory.MemoryMetadataManager; -import com.automq.stream.s3.metadata.StreamMetadata; -import com.automq.stream.s3.metadata.StreamState; -import com.automq.stream.s3.objects.CommitStreamSetObjectRequest; -import com.automq.stream.s3.objects.ObjectManager; -import com.automq.stream.s3.objects.StreamObject; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; - -class StreamManagerTest { - StreamManager streamManager; - - @BeforeEach - public void setUp() throws Exception { - streamManager = new MemoryMetadataManager(); - } - - @Test - public void testCreateAndOpenStream() { - // Create and open stream with epoch 0. - Long streamId = streamManager.createStream().join(); - StreamMetadata streamMetadata = streamManager.openStream(streamId, 0).join(); - assertEquals(streamId, streamMetadata.streamId()); - assertEquals(0, streamMetadata.epoch()); - assertEquals(0, streamMetadata.startOffset()); - assertEquals(0, streamMetadata.endOffset()); - assertEquals(StreamState.OPENED, streamMetadata.state()); - } - - @Test - public void testOpenAndCloseStream() { - // Create and open stream with epoch 0. - Long streamId = streamManager.createStream().join(); - StreamMetadata streamMetadata = streamManager.openStream(streamId, 0).join(); - - // Close stream with epoch 1. - CompletableFuture future = streamManager.closeStream(streamId, 1); - assertEquals(StreamState.OPENED, streamMetadata.state()); - assertTrue(future.isCompletedExceptionally()); - - // Close stream with epoch 0. - streamManager.closeStream(streamId, 0).join(); - assertEquals(StreamState.CLOSED, streamMetadata.state()); - - // Open stream with epoch 0. - CompletableFuture future1 = streamManager.openStream(streamId, 0); - assertTrue(future1.isCompletedExceptionally()); - - // Open stream with epoch 1. - streamMetadata = streamManager.openStream(streamId, 1).join(); - assertEquals(streamId, streamMetadata.streamId()); - assertEquals(1, streamMetadata.epoch()); - assertEquals(0, streamMetadata.startOffset()); - assertEquals(0, streamMetadata.endOffset()); - assertEquals(StreamState.OPENED, streamMetadata.state()); - - // Close stream with epoch 1. - streamManager.closeStream(streamId, 1).join(); - assertEquals(StreamState.CLOSED, streamMetadata.state()); - streamManager.deleteStream(streamId, 1).join(); - List streamMetadataList = streamManager.getOpeningStreams().join(); - assertEquals(0, streamMetadataList.size()); - } - - @Test - public void testTrimStream() { - // Create and open stream with epoch 0. - Long streamId = streamManager.createStream().join(); - StreamMetadata streamMetadata = streamManager.openStream(streamId, 0).join(); - - // Trim stream with epoch 1. - CompletableFuture future = streamManager.trimStream(streamId, 1, 1); - assertTrue(future.isCompletedExceptionally()); - - // Trim stream to invalid offset. - CompletableFuture future1 = streamManager.trimStream(streamId, 0, -1); - assertTrue(future1.isCompletedExceptionally()); - future1 = streamManager.trimStream(streamId, 0, 1); - assertTrue(future1.isCompletedExceptionally()); - - // Advance offset and trim stream. - CommitStreamSetObjectRequest request = new CommitStreamSetObjectRequest(); - ArrayList streamObjectList = new ArrayList<>(); - StreamObject streamObject = new StreamObject(); - streamObject.setStreamId(streamId); - streamObject.setStartOffset(0); - streamObject.setEndOffset(10); - streamObjectList.add(streamObject); - request.setStreamObjects(streamObjectList); - ((ObjectManager) streamManager).commitStreamSetObject(request).join(); - assertEquals(10, streamMetadata.endOffset()); - - streamManager.trimStream(streamId, 0, 5).join(); - assertEquals(5, streamMetadata.startOffset()); - } - - @Test - public void testGetStreams() { - ArrayList streamIds = new ArrayList<>(); - // Create and open stream with epoch 0. - Long streamId = streamManager.createStream().join(); - streamManager.openStream(streamId, 0).join(); - streamIds.add(streamId); - - streamId = streamManager.createStream().join(); - streamManager.openStream(streamId, 0).join(); - streamIds.add(streamId); - - // Get streams. - List streamMetadataList = streamManager.getStreams(streamIds).join(); - assertEquals(2, streamMetadataList.size()); - assertEquals(streamId, streamMetadataList.get(1).streamId()); - assertEquals(0, streamMetadataList.get(1).epoch()); - assertEquals(0, streamMetadataList.get(1).startOffset()); - assertEquals(0, streamMetadataList.get(1).endOffset()); - assertEquals(StreamState.OPENED, streamMetadataList.get(1).state()); - - streamIds.add(Long.MAX_VALUE); - streamMetadataList = streamManager.getStreams(streamIds).join(); - assertEquals(2, streamMetadataList.size()); - } -} \ No newline at end of file diff --git a/s3stream/src/test/java/com/automq/stream/s3/utils/AsyncRateLimiterTest.java b/s3stream/src/test/java/com/automq/stream/s3/utils/AsyncRateLimiterTest.java deleted file mode 100644 index ba0bba188..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/utils/AsyncRateLimiterTest.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.utils; - -import com.automq.stream.utils.AsyncRateLimiter; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -public class AsyncRateLimiterTest { - - @Test - public void test() throws ExecutionException, InterruptedException { - AsyncRateLimiter asyncRateLimiter = new AsyncRateLimiter(1); - long start = System.nanoTime(); - CompletableFuture cf1 = asyncRateLimiter.acquire(2); - CompletableFuture cf2 = asyncRateLimiter.acquire(1).thenAccept(nil -> { - long elapsed = System.nanoTime() - start; - Assertions.assertTrue(elapsed > TimeUnit.SECONDS.toNanos(2) && elapsed < TimeUnit.SECONDS.toNanos(4)); - }); - CompletableFuture cf3 = asyncRateLimiter.acquire(1).thenAccept(nil -> { - long elapsed = System.nanoTime() - start; - Assertions.assertTrue(elapsed > TimeUnit.SECONDS.toNanos(3) && elapsed < TimeUnit.SECONDS.toNanos(5)); - }); - cf1.get(); - cf2.get(); - cf3.get(); - asyncRateLimiter.close(); - } - -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/wal/BlockWALServiceTest.java b/s3stream/src/test/java/com/automq/stream/s3/wal/BlockWALServiceTest.java deleted file mode 100644 index fbb4a2894..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/wal/BlockWALServiceTest.java +++ /dev/null @@ -1,1307 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal; - -import com.automq.stream.s3.ByteBufAlloc; -import com.automq.stream.s3.TestUtils; -import com.automq.stream.s3.wal.benchmark.WriteBench; -import com.automq.stream.s3.wal.util.WALBlockDeviceChannel; -import com.automq.stream.s3.wal.util.WALChannel; -import com.automq.stream.s3.wal.util.WALUtil; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import io.netty.buffer.Unpooled; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; -import java.util.stream.Stream; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.condition.EnabledOnOs; -import org.junit.jupiter.api.condition.OS; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.CsvSource; -import org.junit.jupiter.params.provider.MethodSource; -import org.junit.jupiter.params.provider.ValueSource; - -import static com.automq.stream.s3.wal.BlockWALService.RECORD_HEADER_SIZE; -import static com.automq.stream.s3.wal.BlockWALService.WAL_HEADER_TOTAL_CAPACITY; -import static com.automq.stream.s3.wal.WriteAheadLog.AppendResult; -import static com.automq.stream.s3.wal.WriteAheadLog.OverCapacityException; -import static com.automq.stream.s3.wal.WriteAheadLog.RecoverResult; -import static com.automq.stream.s3.wal.util.WALChannelTest.TEST_BLOCK_DEVICE_KEY; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; - -@Tag("S3Unit") -class BlockWALServiceTest { - - static final String TEST_BLOCK_DEVICE = System.getenv(TEST_BLOCK_DEVICE_KEY); - - private static void testSingleThreadAppendBasic0(boolean mergeWrite, - boolean directIO) throws IOException, OverCapacityException { - final int recordSize = 4096 + 1; - final int recordCount = 100; - final long blockDeviceCapacity = WALUtil.alignLargeByBlockSize(recordSize) * recordCount + WAL_HEADER_TOTAL_CAPACITY; - - String path = TestUtils.tempFilePath(); - if (directIO && TEST_BLOCK_DEVICE != null) { - path = TEST_BLOCK_DEVICE; - resetBlockDevice(path, blockDeviceCapacity); - } - - BlockWALService.BlockWALServiceBuilder builder = BlockWALService.builder(path, blockDeviceCapacity) - .direct(directIO) - .slidingWindowInitialSize(0) - .slidingWindowScaleUnit(4096); - if (!mergeWrite) { - builder.blockSoftLimit(0); - } - final WriteAheadLog wal = builder.build().start(); - recoverAndReset(wal); - - AtomicLong maxFlushedOffset = new AtomicLong(-1); - AtomicLong maxRecordOffset = new AtomicLong(-1); - try { - for (int i = 0; i < recordCount; i++) { - ByteBuf data = TestUtils.random(recordSize); - - final AppendResult appendResult = wal.append(data.retainedDuplicate()); - - if (!mergeWrite) { - assertEquals(i * WALUtil.alignLargeByBlockSize(recordSize), appendResult.recordOffset()); - assertEquals(0, appendResult.recordOffset() % WALUtil.BLOCK_SIZE); - } - appendResult.future().whenComplete((callbackResult, throwable) -> { - assertNull(throwable); - maxFlushedOffset.accumulateAndGet(callbackResult.flushedOffset(), Math::max); - maxRecordOffset.accumulateAndGet(appendResult.recordOffset(), Math::max); - if (!mergeWrite) { - assertEquals(0, callbackResult.flushedOffset() % WALUtil.alignLargeByBlockSize(recordSize)); - } else { - assertEquals(0, callbackResult.flushedOffset() % WALUtil.BLOCK_SIZE); - } - }).whenComplete((callbackResult, throwable) -> { - if (null != throwable) { - throwable.printStackTrace(); - System.exit(1); - } - }); - } - } finally { - wal.shutdownGracefully(); - } - assertTrue(maxFlushedOffset.get() > maxRecordOffset.get(), - "maxFlushedOffset should be greater than maxRecordOffset. maxFlushedOffset: " + maxFlushedOffset.get() + ", maxRecordOffset: " + maxRecordOffset.get()); - } - - private static void testSingleThreadAppendWhenOverCapacity0(boolean mergeWrite, - boolean directIO) throws IOException { - final int recordSize = 4096 + 1; - final int recordCount = 100; - long blockDeviceCapacity; - if (!mergeWrite) { - blockDeviceCapacity = recordCount / 3 * WALUtil.alignLargeByBlockSize(recordSize) + WAL_HEADER_TOTAL_CAPACITY; - } else { - blockDeviceCapacity = WALUtil.alignLargeByBlockSize(recordSize * recordCount / 3) + WAL_HEADER_TOTAL_CAPACITY; - } - - String path = TestUtils.tempFilePath(); - if (directIO && TEST_BLOCK_DEVICE != null) { - path = TEST_BLOCK_DEVICE; - blockDeviceCapacity = WALUtil.alignLargeByBlockSize(blockDeviceCapacity); - resetBlockDevice(path, blockDeviceCapacity); - } - - BlockWALService.BlockWALServiceBuilder builder = BlockWALService.builder(path, blockDeviceCapacity) - .direct(directIO) - .slidingWindowInitialSize(0) - .slidingWindowScaleUnit(4096); - if (!mergeWrite) { - builder.blockSoftLimit(0); - } - final WriteAheadLog wal = builder.build().start(); - recoverAndReset(wal); - - AtomicLong maxFlushedOffset = new AtomicLong(-1); - AtomicLong maxRecordOffset = new AtomicLong(-1); - try { - WriteBench.TrimOffset trimOffset = new WriteBench.TrimOffset(); - for (int i = 0; i < recordCount; i++) { - ByteBuf data = TestUtils.random(recordSize); - AppendResult appendResult; - - while (true) { - try { - appendResult = wal.append(data.retainedDuplicate()); - } catch (OverCapacityException e) { - Thread.yield(); - wal.trim(trimOffset.get()).join(); - continue; - } - break; - } - - final long recordOffset = appendResult.recordOffset(); - if (!mergeWrite) { - assertEquals(0, recordOffset % WALUtil.BLOCK_SIZE); - } - trimOffset.appended(recordOffset); - appendResult.future().whenComplete((callbackResult, throwable) -> { - assertNull(throwable); - maxFlushedOffset.accumulateAndGet(callbackResult.flushedOffset(), Math::max); - maxRecordOffset.accumulateAndGet(recordOffset, Math::max); - if (!mergeWrite) { - assertEquals(0, callbackResult.flushedOffset() % WALUtil.alignLargeByBlockSize(recordSize)); - } else { - assertEquals(0, callbackResult.flushedOffset() % WALUtil.BLOCK_SIZE); - } - - trimOffset.flushed(callbackResult.flushedOffset()); - }).whenComplete((callbackResult, throwable) -> { - if (null != throwable) { - throwable.printStackTrace(); - System.exit(1); - } - }); - } - } finally { - wal.shutdownGracefully(); - } - assertTrue(maxFlushedOffset.get() > maxRecordOffset.get(), - "maxFlushedOffset should be greater than maxRecordOffset. maxFlushedOffset: " + maxFlushedOffset.get() + ", maxRecordOffset: " + maxRecordOffset.get()); - } - - private static void testMultiThreadAppend0(boolean mergeWrite, - boolean directIO) throws IOException, InterruptedException { - final int recordSize = 4096 + 1; - final int recordCount = 10; - final int threadCount = 8; - final long blockDeviceCapacity = WALUtil.alignLargeByBlockSize(recordSize) * recordCount * threadCount + WAL_HEADER_TOTAL_CAPACITY; - - String path = TestUtils.tempFilePath(); - if (directIO && TEST_BLOCK_DEVICE != null) { - path = TEST_BLOCK_DEVICE; - resetBlockDevice(path, blockDeviceCapacity); - } - - BlockWALService.BlockWALServiceBuilder builder = BlockWALService.builder(path, blockDeviceCapacity) - .direct(directIO); - if (!mergeWrite) { - builder.blockSoftLimit(0); - } - final WriteAheadLog wal = builder.build().start(); - recoverAndReset(wal); - - ExecutorService executorService = Executors.newFixedThreadPool(threadCount); - AtomicLong maxFlushedOffset = new AtomicLong(-1); - AtomicLong maxRecordOffset = new AtomicLong(-1); - try { - for (int t = 0; t < threadCount; t++) { - executorService.submit(() -> Assertions.assertDoesNotThrow(() -> { - for (int i = 0; i < recordCount; i++) { - ByteBuf data = TestUtils.random(recordSize); - - final AppendResult appendResult = wal.append(data.retainedDuplicate()); - - appendResult.future().whenComplete((callbackResult, throwable) -> { - assertNull(throwable); - if (!mergeWrite) { - assertEquals(0, appendResult.recordOffset() % WALUtil.BLOCK_SIZE); - } - maxFlushedOffset.accumulateAndGet(callbackResult.flushedOffset(), Math::max); - maxRecordOffset.accumulateAndGet(appendResult.recordOffset(), Math::max); - if (!mergeWrite) { - assertEquals(0, callbackResult.flushedOffset() % WALUtil.alignLargeByBlockSize(recordSize)); - } else { - assertEquals(0, callbackResult.flushedOffset() % WALUtil.BLOCK_SIZE); - } - }).whenComplete((callbackResult, throwable) -> { - if (null != throwable) { - throwable.printStackTrace(); - System.exit(1); - } - }); - } - })); - } - } finally { - executorService.shutdown(); - assertTrue(executorService.awaitTermination(15, TimeUnit.SECONDS)); - wal.shutdownGracefully(); - } - assertTrue(maxFlushedOffset.get() > maxRecordOffset.get(), - "maxFlushedOffset should be greater than maxRecordOffset. maxFlushedOffset: " + maxFlushedOffset.get() + ", maxRecordOffset: " + maxRecordOffset.get()); - } - - private static void testRecoverAfterMergeWrite0(boolean shutdown, boolean overCapacity, - boolean directIO) throws IOException { - final int recordSize = 1024 + 1; - final int recordCount = 100; - long blockDeviceCapacity; - if (overCapacity) { - blockDeviceCapacity = recordSize * recordCount + WAL_HEADER_TOTAL_CAPACITY; - } else { - blockDeviceCapacity = WALUtil.alignLargeByBlockSize(recordSize) * recordCount + WAL_HEADER_TOTAL_CAPACITY; - } - String path = TestUtils.tempFilePath(); - - if (directIO && TEST_BLOCK_DEVICE != null) { - path = TEST_BLOCK_DEVICE; - blockDeviceCapacity = WALUtil.alignLargeByBlockSize(blockDeviceCapacity); - resetBlockDevice(path, blockDeviceCapacity); - } - - // Append records - final WriteAheadLog previousWAL = BlockWALService.builder(path, blockDeviceCapacity) - .direct(directIO) - .build() - .start(); - recoverAndReset(previousWAL); - List appended = appendAsync(previousWAL, recordSize, recordCount); - if (shutdown) { - previousWAL.shutdownGracefully(); - } - - // Recover records - final WriteAheadLog wal = BlockWALService.builder(path, blockDeviceCapacity) - .direct(directIO) - .build() - .start(); - try { - Iterator recover = wal.recover(); - assertNotNull(recover); - - List recovered = new ArrayList<>(recordCount); - while (recover.hasNext()) { - RecoverResult next = recover.next(); - next.record().release(); - recovered.add(next.recordOffset()); - } - assertEquals(appended, recovered); - wal.reset().join(); - } finally { - wal.shutdownGracefully(); - } - } - - private static List appendAsync(WriteAheadLog wal, int recordSize, int recordCount) { - List appended = new ArrayList<>(recordCount); - List> appendFutures = new LinkedList<>(); - WriteBench.TrimOffset trimOffset = new WriteBench.TrimOffset(); - for (int i = 0; i < recordCount; i++) { - ByteBuf data = TestUtils.random(recordSize); - AppendResult appendResult; - try { - appendResult = wal.append(data.retainedDuplicate()); - } catch (OverCapacityException e) { - long offset = trimOffset.get(); - wal.trim(offset).join(); - appended = appended.stream() - .filter(recordOffset -> recordOffset > offset) - .collect(Collectors.toList()); - i--; - continue; - } - appended.add(appendResult.recordOffset()); - trimOffset.appended(appendResult.recordOffset()); - appendFutures.add(appendResult.future().whenComplete((callbackResult, throwable) -> { - assertNull(throwable); - assertEquals(0, callbackResult.flushedOffset() % WALUtil.BLOCK_SIZE); - trimOffset.flushed(callbackResult.flushedOffset()); - }).whenComplete((callbackResult, throwable) -> { - if (null != throwable) { - throwable.printStackTrace(); - System.exit(1); - } - }).thenApply(ignored -> null)); - } - CompletableFuture.allOf(appendFutures.toArray(new CompletableFuture[0])).join(); - return appended; - } - - public static Stream testRecoverFromDisasterData() { - return Stream.of( - new RecoverFromDisasterParam( - WALUtil.BLOCK_SIZE + 1, - 100L, - -1L, - 50L, - Arrays.asList(0L, 2L, 4L), - Arrays.asList(0L, 2L, 4L), - WALUtil.BLOCK_SIZE - ).toArguments("base"), - new RecoverFromDisasterParam( - WALUtil.BLOCK_SIZE + 1, - 100L, - 0L, - 50L, - Arrays.asList(0L, 2L, 4L), - Arrays.asList(2L, 4L), - WALUtil.BLOCK_SIZE - ).toArguments("trimmed at zero"), - new RecoverFromDisasterParam( - WALUtil.BLOCK_SIZE + 1, - 100L, - 2L, - 50L, - Arrays.asList(0L, 2L, 4L, 6L), - Arrays.asList(4L, 6L), - WALUtil.BLOCK_SIZE - ).toArguments("trimmed"), - new RecoverFromDisasterParam( - WALUtil.BLOCK_SIZE + 1, - 100L, - 2L, - 50L, - Arrays.asList(0L, 2L, 4L, 6L, 8L, 10L, 12L, 14L, 16L, 18L, 20L), - Arrays.asList(4L, 6L, 8L, 10L, 12L, 14L, 16L, 18L, 20L), - WALUtil.BLOCK_SIZE - ).toArguments("WAL header flushed slow"), - new RecoverFromDisasterParam( - WALUtil.BLOCK_SIZE + 1, - 100L, - 2L, - 50L, - Arrays.asList(0L, 2L, 8L, 10L, 14L, 20L), - Arrays.asList(8L, 10L, 14L, 20L), - WALUtil.BLOCK_SIZE - ).toArguments("many invalid records"), - new RecoverFromDisasterParam( - WALUtil.BLOCK_SIZE + 1, - 100L, - 2L, - 50L, - Arrays.asList(14L, 8L, 10L, 20L, 0L, 2L), - Arrays.asList(8L, 10L, 14L, 20L), - WALUtil.BLOCK_SIZE - ).toArguments("write in random order"), - new RecoverFromDisasterParam( - WALUtil.BLOCK_SIZE + 1, - 100L, - 20230920L, - 50L, - Arrays.asList(20230900L, 20230910L, 20230916L, 20230920L, 20230930L, 20230940L, 20230950L, 20230960L, 20230970L, 20230980L), - Arrays.asList(20230930L, 20230940L, 20230950L, 20230960L, 20230970L), - WALUtil.BLOCK_SIZE - ).toArguments("big logic offset"), - new RecoverFromDisasterParam( - WALUtil.BLOCK_SIZE + 1, - 100L, - 180L, - 50L, - Arrays.asList(150L, 160L, 170L, 180L, 190L, 200L, 202L, 210L, 220L, 230L, 240L), - Arrays.asList(190L, 200L, 202L, 210L, 220L, 230L), - WALUtil.BLOCK_SIZE - ).toArguments("round robin"), - new RecoverFromDisasterParam( - WALUtil.BLOCK_SIZE + 1, - 100L, - 210L, - 50L, - Arrays.asList(111L, 113L, 115L, 117L, 119L, 120L, 130L, - 210L, 215L, 220L, 230L, 240L, 250L, 260L, 270L, 280L, 290L), - Arrays.asList(215L, 220L, 230L, 240L, 250L, 260L), - WALUtil.BLOCK_SIZE - ).toArguments("overwrite"), - new RecoverFromDisasterParam( - WALUtil.BLOCK_SIZE + 1, - 100L, - -1L, - 1L, - Arrays.asList(0L, 2L, 5L, 7L), - List.of(0L, 2L), - WALUtil.BLOCK_SIZE - ).toArguments("small window - record size not aligned"), - new RecoverFromDisasterParam( - WALUtil.BLOCK_SIZE + 1, - 100L, - 10L, - 3L, - Arrays.asList(10L, 12L, 15L, 17L, 19L), - List.of(12L, 15L), - WALUtil.BLOCK_SIZE - ).toArguments("invalid record in window - record size not aligned"), - new RecoverFromDisasterParam( - WALUtil.BLOCK_SIZE + 1, - 100L, - 10L, - 9L, - Arrays.asList(9L, 14L, 18L, 20L), - List.of(14L, 18L), - WALUtil.BLOCK_SIZE - ).toArguments("trim at an invalid record - record size not aligned"), - new RecoverFromDisasterParam( - WALUtil.BLOCK_SIZE, - 100L, - -1L, - 1L, - Arrays.asList(0L, 1L, 3L, 4L, 5L), - List.of(0L, 1L), - WALUtil.BLOCK_SIZE - ).toArguments("small window - record size aligned"), - new RecoverFromDisasterParam( - WALUtil.BLOCK_SIZE, - 100L, - 10L, - 3L, - Arrays.asList(10L, 11L, 13L, 14L, 15L, 16L), - List.of(11L, 13L, 14L), - WALUtil.BLOCK_SIZE - ).toArguments("invalid record in window - record size aligned"), - new RecoverFromDisasterParam( - WALUtil.BLOCK_SIZE, - 100L, - 10L, - 5L, - Arrays.asList(9L, 11L, 13L, 15L, 16L, 17L), - List.of(11L, 13L, 15L, 16L), - WALUtil.BLOCK_SIZE - ).toArguments("trim at an invalid record - record size aligned"), - new RecoverFromDisasterParam( - WALUtil.BLOCK_SIZE, - 100L, - 10L, - 0L, - Arrays.asList(10L, 11L, 12L, 14L), - List.of(11L, 12L), - WALUtil.BLOCK_SIZE - ).toArguments("zero window"), - new RecoverFromDisasterParam( - 42, - 8192L, - -1L, - 8192L, - Arrays.asList(0L, 42L, 84L), - Arrays.asList(0L, 42L, 84L), - 1 - ).toArguments("merge write - base"), - new RecoverFromDisasterParam( - 42, - 8192L, - 42L, - 8192L, - Arrays.asList(0L, 42L, 84L, 126L), - Arrays.asList(84L, 126L), - 1 - ).toArguments("merge write - trimmed"), - new RecoverFromDisasterParam( - 42, - 8192L, - 42L, - 8192L, - Arrays.asList(0L, 42L, 42 * 2L, 42 * 4L, 4096L, 4096L + 42L, 4096L + 42 * 3L), - Arrays.asList(42 * 2L, 4096L, 4096L + 42L), - 1 - ).toArguments("merge write - some invalid records"), - new RecoverFromDisasterParam( - 42, - 8192L, - 42L, - 8192L, - Arrays.asList(42L, 42 * 4L, 42 * 2L, 4096L + 42 * 3L, 0L, 4096L, 4096L + 42L), - Arrays.asList(42 * 2L, 4096L, 4096L + 42L), - 1 - ).toArguments("merge write - random order"), - new RecoverFromDisasterParam( - 1000, - 8192L, - 2000L, - 8192L, - Arrays.asList(0L, 1000L, 2000L, 3000L, 4000L, 5000L, 7000L), - Arrays.asList(3000L, 4000L, 5000L), - 1 - ).toArguments("merge write - record in the middle"), - new RecoverFromDisasterParam( - 42, - 8192L, - 8192L + 4096L + 42L, - 8192L, - Arrays.asList(8192L + 4096L, 8192L + 4096L + 42L, 8192L + 4096L + 42 * 2L, 8192L + 4096L + 42 * 4L, 16384L, 16384L + 42L, 16384L + 42 * 3L), - Arrays.asList(8192L + 4096L + 42 * 2L, 16384L, 16384L + 42L), - 1 - ).toArguments("merge write - round robin"), - new RecoverFromDisasterParam( - 1000, - 8192L, - 12000L, - 8192L, - Arrays.asList(1000L, 2000L, 3000L, 4000L, 5000L, 6000L, 7000L, - 9000L, 10000L, 11000L, 12000L, 13000L, 14000L, 15000L), - Arrays.asList(13000L, 14000L, 15000L), - 1 - ).toArguments("merge write - overwrite"), - new RecoverFromDisasterParam( - 42, - 4096L * 20, - -1L, - 4096L, - Arrays.asList(0L, 42L, 42 * 3L, 4096L, 4096L + 42L, 4096L + 42 * 3L, 12288L, 12288L + 42L, 12288L + 42 * 3L, 16384L), - Arrays.asList(0L, 42L, 4096L, 4096L + 42L), - 1 - ).toArguments("merge write - small window"), - new RecoverFromDisasterParam( - 42, - 4096L * 20, - 4096L * 2, - 4096L * 4, - Arrays.asList(4096L * 2, 4096L * 2 + 42L, 4096L * 2 + 42 * 3L, - 4096L * 4, 4096L * 4 + 42L, 4096L * 4 + 42 * 3L, - 4096L * 5, 4096L * 5 + 42L, 4096L * 5 + 42 * 3L, - 4096L * 6, 4096L * 6 + 42L, 4096L * 6 + 42 * 3L, - 4096L * 7, 4096L * 7 + 42L, 4096L * 7 + 42 * 3L, - 4096L * 8), - Arrays.asList(4096L * 2 + 42L, 4096L * 4, 4096L * 4 + 42L, - 4096L * 5, 4096L * 5 + 42L, 4096L * 6, 4096L * 6 + 42L), - 1 - ).toArguments("merge write - invalid record in window"), - new RecoverFromDisasterParam( - 42, - 4096L * 20, - 4096L * 2 + 42 * 2L, - 4096L * 2, - Arrays.asList(4096L * 2, 4096L * 2 + 42L, 4096L * 2 + 42 * 3L, - 4096L * 3, 4096L * 3 + 42L, 4096L * 3 + 42 * 3L, - 4096L * 5, 4096L * 5 + 42L, 4096L * 5 + 42 * 3L, - 4096L * 6, 4096L * 6 + 42L, 4096L * 6 + 42 * 3L, - 4096L * 7), - Arrays.asList(4096L * 3, 4096L * 3 + 42L, 4096L * 5, 4096L * 5 + 42L), - 1 - ).toArguments("merge write - trim at an invalid record"), - new RecoverFromDisasterParam( - 42, - 4096L * 20, - 4096L * 2, - 0L, - Arrays.asList(4096L * 2, 4096L * 2 + 42L, 4096L * 2 + 42 * 3L, - 4096L * 3, 4096L * 3 + 42L, 4096L * 3 + 42 * 3L, - 4096L * 5, 4096L * 5 + 42L, 4096L * 5 + 42 * 3L, - 4096L * 6), - Arrays.asList(4096L * 2 + 42L, 4096L * 3, 4096L * 3 + 42L), - 1 - ).toArguments("merge write - zero window") - ); - } - - /** - * Call {@link WriteAheadLog#recover()} {@link WriteAheadLog#reset()} and drop all records. - */ - private static void recoverAndReset(WriteAheadLog wal) { - for (Iterator it = wal.recover(); it.hasNext(); ) { - it.next().record().release(); - } - wal.reset().join(); - } - - /** - * Write "0"s to the block device to reset it. - */ - private static void resetBlockDevice(String path, long capacity) throws IOException { - WALChannel channel = WALChannel.builder(path) - .capacity(capacity) - .direct(true) - .build(); - channel.open(); - ByteBuf buf = Unpooled.buffer((int) capacity); - buf.writeZero((int) capacity); - channel.write(buf, 0); - channel.close(); - } - - @ParameterizedTest(name = "Test {index}: mergeWrite={0}") - @ValueSource(booleans = {false, true}) - public void testSingleThreadAppendBasic(boolean mergeWrite) throws IOException, OverCapacityException { - testSingleThreadAppendBasic0(mergeWrite, false); - } - - @ParameterizedTest(name = "Test {index}: mergeWrite={0}") - @ValueSource(booleans = {false, true}) - @EnabledOnOs(OS.LINUX) - public void testSingleThreadAppendBasicDirectIO(boolean mergeWrite) throws IOException, OverCapacityException { - testSingleThreadAppendBasic0(mergeWrite, true); - } - - @ParameterizedTest(name = "Test {index}: mergeWrite={0}") - @ValueSource(booleans = {false, true}) - public void testSingleThreadAppendWhenOverCapacity(boolean mergeWrite) throws IOException { - testSingleThreadAppendWhenOverCapacity0(mergeWrite, false); - } - - @ParameterizedTest(name = "Test {index}: mergeWrite={0}") - @ValueSource(booleans = {false, true}) - @EnabledOnOs(OS.LINUX) - public void testSingleThreadAppendWhenOverCapacityDirectIO(boolean mergeWrite) throws IOException { - testSingleThreadAppendWhenOverCapacity0(mergeWrite, true); - } - - @ParameterizedTest(name = "Test {index}: mergeWrite={0}") - @ValueSource(booleans = {false, true}) - public void testMultiThreadAppend(boolean mergeWrite) throws InterruptedException, IOException { - testMultiThreadAppend0(mergeWrite, false); - } - - @ParameterizedTest(name = "Test {index}: mergeWrite={0}") - @ValueSource(booleans = {false, true}) - @EnabledOnOs(OS.LINUX) - public void testMultiThreadAppendDirectIO(boolean mergeWrite) throws InterruptedException, IOException { - testMultiThreadAppend0(mergeWrite, true); - } - - private long append(WriteAheadLog wal, int recordSize) throws OverCapacityException { - final AppendResult appendResult = wal.append(TestUtils.random(recordSize)); - final long recordOffset = appendResult.recordOffset(); - assertEquals(0, recordOffset % WALUtil.BLOCK_SIZE); - appendResult.future().whenComplete((callbackResult, throwable) -> { - assertNull(throwable); - assertTrue(callbackResult.flushedOffset() > recordOffset, "flushedOffset: " + callbackResult.flushedOffset() + ", recordOffset: " + recordOffset); - assertEquals(0, callbackResult.flushedOffset() % WALUtil.BLOCK_SIZE); - }).join(); - return recordOffset; - } - - private List append(WriteAheadLog wal, int recordSize, int recordCount) { - List recordOffsets = new ArrayList<>(recordCount); - long offset = 0; - for (int i = 0; i < recordCount; i++) { - try { - offset = append(wal, recordSize); - recordOffsets.add(offset); - } catch (OverCapacityException e) { - wal.trim(offset).join(); - final long trimmedOffset = offset; - recordOffsets = recordOffsets.stream() - .filter(recordOffset -> recordOffset > trimmedOffset) - .collect(Collectors.toList()); - i--; - } - } - return recordOffsets; - } - - private List appendWithoutTrim(WriteAheadLog wal, int recordSize, - int recordCount) throws OverCapacityException { - List recordOffsets = new ArrayList<>(recordCount); - for (int i = 0; i < recordCount; i++) { - long offset = append(wal, recordSize); - recordOffsets.add(offset); - } - return recordOffsets; - } - - @ParameterizedTest(name = "Test {index}: shutdown={0}, overCapacity={1}, recordCount={2}") - @CsvSource({ - "true, false, 10", - "true, true, 9", - "true, true, 10", - "true, true, 11", - - "false, false, 10", - "false, true, 9", - "false, true, 10", - "false, true, 11", - }) - public void testSingleThreadRecover(boolean shutdown, boolean overCapacity, int recordCount) throws IOException { - testSingleThreadRecover0(shutdown, overCapacity, recordCount, false); - } - - @ParameterizedTest(name = "Test {index}: shutdown={0}, overCapacity={1}, recordCount={2}") - @CsvSource({ - "true, false, 10", - "true, true, 9", - "true, true, 10", - "true, true, 11", - - "false, false, 10", - "false, true, 9", - "false, true, 10", - "false, true, 11", - }) - @EnabledOnOs(OS.LINUX) - public void testSingleThreadRecoverDirectIO(boolean shutdown, boolean overCapacity, - int recordCount) throws IOException { - testSingleThreadRecover0(shutdown, overCapacity, recordCount, true); - } - - private void testSingleThreadRecover0(boolean shutdown, boolean overCapacity, int recordCount, - boolean directIO) throws IOException { - final int recordSize = 4096 + 1; - long blockDeviceCapacity; - if (overCapacity) { - blockDeviceCapacity = WALUtil.alignLargeByBlockSize(recordSize) * recordCount / 3 + WAL_HEADER_TOTAL_CAPACITY; - } else { - blockDeviceCapacity = WALUtil.alignLargeByBlockSize(recordSize) * recordCount + WAL_HEADER_TOTAL_CAPACITY; - } - String path = TestUtils.tempFilePath(); - - if (directIO && TEST_BLOCK_DEVICE != null) { - path = TEST_BLOCK_DEVICE; - blockDeviceCapacity = WALUtil.alignLargeByBlockSize(blockDeviceCapacity); - resetBlockDevice(path, blockDeviceCapacity); - } - - // Append records - final WriteAheadLog previousWAL = BlockWALService.builder(path, blockDeviceCapacity) - .direct(directIO) - .build() - .start(); - recoverAndReset(previousWAL); - List appended = append(previousWAL, recordSize, recordCount); - if (shutdown) { - previousWAL.shutdownGracefully(); - } - - // Recover records - final WriteAheadLog wal = BlockWALService.builder(path, blockDeviceCapacity) - .direct(directIO) - .build() - .start(); - try { - Iterator recover = wal.recover(); - assertNotNull(recover); - - List recovered = new ArrayList<>(recordCount); - while (recover.hasNext()) { - RecoverResult next = recover.next(); - next.record().release(); - recovered.add(next.recordOffset()); - } - assertEquals(appended, recovered); - wal.reset().join(); - } finally { - wal.shutdownGracefully(); - } - } - - @ParameterizedTest(name = "Test {index}: shutdown={0}, overCapacity={1}") - @CsvSource({ - "true, false", - "true, true", - "false, false", - "false, true", - }) - public void testRecoverAfterMergeWrite(boolean shutdown, boolean overCapacity) throws IOException { - testRecoverAfterMergeWrite0(shutdown, overCapacity, false); - } - - @ParameterizedTest(name = "Test {index}: shutdown={0}, overCapacity={1}") - @CsvSource({ - "true, false", - "true, true", - "false, false", - "false, true", - }) - @EnabledOnOs(OS.LINUX) - public void testRecoverAfterMergeWriteDirectIO(boolean shutdown, boolean overCapacity) throws IOException { - testRecoverAfterMergeWrite0(shutdown, overCapacity, true); - } - - @Test - public void testAppendAfterRecover() throws IOException, OverCapacityException { - testAppendAfterRecover0(false); - } - - @Test - @EnabledOnOs(OS.LINUX) - public void testAppendAfterRecoverDirectIO() throws IOException, OverCapacityException { - testAppendAfterRecover0(true); - } - - private void testAppendAfterRecover0(boolean directIO) throws IOException, OverCapacityException { - final int recordSize = 4096 + 1; - String path = TestUtils.tempFilePath(); - - if (directIO && TEST_BLOCK_DEVICE != null) { - path = TEST_BLOCK_DEVICE; - resetBlockDevice(path, 1 << 20); - } - - final WriteAheadLog previousWAL = BlockWALService.builder(path, 1 << 20) - .direct(directIO) - .build() - .start(); - recoverAndReset(previousWAL); - // Append 2 records - long appended0 = append(previousWAL, recordSize); - assertEquals(0, appended0); - long appended1 = append(previousWAL, recordSize); - assertEquals(WALUtil.alignLargeByBlockSize(recordSize), appended1); - - final WriteAheadLog wal = BlockWALService.builder(path, 1 << 20) - .direct(directIO) - .build() - .start(); - try { - // Recover records - Iterator recover = wal.recover(); - assertNotNull(recover); - - List recovered = new ArrayList<>(); - while (recover.hasNext()) { - RecoverResult next = recover.next(); - next.record().release(); - recovered.add(next.recordOffset()); - } - assertEquals(Arrays.asList(appended0, appended1), recovered); - - // Reset after recover - wal.reset().join(); - - // Append another 2 records - long appended2 = append(wal, recordSize); - long appended3 = append(wal, recordSize); - assertEquals(WALUtil.alignLargeByBlockSize(recordSize) + appended2, appended3); - } finally { - wal.shutdownGracefully(); - } - } - - private ByteBuf recordHeader(ByteBuf body, long offset) { - return new SlidingWindowService.RecordHeaderCoreData() - .setMagicCode(BlockWALService.RECORD_HEADER_MAGIC_CODE) - .setRecordBodyLength(body.readableBytes()) - .setRecordBodyOffset(offset + BlockWALService.RECORD_HEADER_SIZE) - .setRecordBodyCRC(WALUtil.crc32(body)) - .marshal(); - } - - private void write(WALChannel walChannel, long logicOffset, int recordSize) throws IOException { - ByteBuf recordBody = TestUtils.random(recordSize - RECORD_HEADER_SIZE); - ByteBuf recordHeader = recordHeader(recordBody, logicOffset); - - CompositeByteBuf record = ByteBufAlloc.compositeByteBuffer(); - record.addComponents(true, recordHeader, recordBody); - - long position = WALUtil.recordOffsetToPosition(logicOffset, walChannel.capacity(), WAL_HEADER_TOTAL_CAPACITY); - walChannel.writeAndFlush(record, position); - } - - private void writeWALHeader(WALChannel walChannel, long trimOffset, long maxLength) throws IOException { - ByteBuf header = new WALHeader(walChannel.capacity(), maxLength) - .updateTrimOffset(trimOffset) - .marshal(); - walChannel.writeAndFlush(header, 0); - } - - @ParameterizedTest(name = "Test {index} {0}") - @MethodSource("testRecoverFromDisasterData") - public void testRecoverFromDisaster( - String name, - int recordSize, - long capacity, - long trimOffset, - long maxLength, - List writeOffsets, - List recoveredOffsets - ) throws IOException { - testRecoverFromDisaster0(name, recordSize, capacity, trimOffset, maxLength, writeOffsets, recoveredOffsets, false); - } - - @ParameterizedTest(name = "Test {index} {0}") - @MethodSource("testRecoverFromDisasterData") - @EnabledOnOs({OS.LINUX}) - public void testRecoverFromDisasterDirectIO( - String name, - int recordSize, - long capacity, - long trimOffset, - long maxLength, - List writeOffsets, - List recoveredOffsets - ) throws IOException { - testRecoverFromDisaster0(name, recordSize, capacity, trimOffset, maxLength, writeOffsets, recoveredOffsets, true); - } - - private void testRecoverFromDisaster0( - String name, - int recordSize, - long capacity, - long trimOffset, - long maxLength, - List writeOffsets, - List recoveredOffsets, - boolean directIO - ) throws IOException { - String path = TestUtils.tempFilePath(); - if (directIO && TEST_BLOCK_DEVICE != null) { - path = TEST_BLOCK_DEVICE; - capacity = WALUtil.alignLargeByBlockSize(capacity); - resetBlockDevice(path, capacity); - } - - WALChannel walChannel; - if (directIO) { - WALBlockDeviceChannel blockDeviceChannel = new WALBlockDeviceChannel(path, capacity); - blockDeviceChannel.unalignedWrite = true; - walChannel = blockDeviceChannel; - } else { - walChannel = WALChannel.builder(path) - .capacity(capacity) - .direct(false) - .build(); - } - - // Simulate disaster - walChannel.open(); - writeWALHeader(walChannel, trimOffset, maxLength); - for (long writeOffset : writeOffsets) { - write(walChannel, writeOffset, recordSize); - } - walChannel.close(); - - final WriteAheadLog wal = BlockWALService.builder(path, capacity) - .direct(directIO) - .build() - .start(); - try { - // Recover records - Iterator recover = wal.recover(); - assertNotNull(recover); - - List recovered = new ArrayList<>(); - while (recover.hasNext()) { - RecoverResult next = recover.next(); - next.record().release(); - recovered.add(next.recordOffset()); - } - assertEquals(recoveredOffsets, recovered, name); - wal.reset().join(); - } finally { - wal.shutdownGracefully(); - } - } - - @Test - public void testRecoverAfterReset() throws IOException, OverCapacityException { - testRecoverAfterReset0(false); - } - - @Test - @EnabledOnOs({OS.LINUX}) - public void testRecoverAfterResetDirectIO() throws IOException, OverCapacityException { - testRecoverAfterReset0(true); - } - - private void testRecoverAfterReset0(boolean directIO) throws IOException, OverCapacityException { - final int recordSize = 4096 + 1; - final int recordCount = 10; - final long blockDeviceCapacity = WALUtil.alignLargeByBlockSize(recordSize) * recordCount * 2 + WAL_HEADER_TOTAL_CAPACITY; - String path = TestUtils.tempFilePath(); - - if (directIO && TEST_BLOCK_DEVICE != null) { - path = TEST_BLOCK_DEVICE; - resetBlockDevice(path, blockDeviceCapacity); - } - - // 1. append and force shutdown - final WriteAheadLog wal1 = BlockWALService.builder(path, blockDeviceCapacity) - .direct(directIO) - .build() - .start(); - recoverAndReset(wal1); - List appended1 = appendWithoutTrim(wal1, recordSize, recordCount); - - // 2. recover and reset - final WriteAheadLog wal2 = BlockWALService.builder(path, blockDeviceCapacity) - .direct(directIO) - .build() - .start(); - Iterator recover = wal2.recover(); - assertNotNull(recover); - List recovered1 = new ArrayList<>(recordCount); - while (recover.hasNext()) { - RecoverResult next = recover.next(); - next.record().release(); - recovered1.add(next.recordOffset()); - } - assertEquals(appended1, recovered1); - wal2.reset().join(); - - // 3. append and force shutdown again - List appended2 = appendWithoutTrim(wal2, recordSize, recordCount); - - // 4. recover again - final WriteAheadLog wal3 = BlockWALService.builder(path, blockDeviceCapacity) - .direct(directIO) - .build() - .start(); - recover = wal3.recover(); - assertNotNull(recover); - List recovered2 = new ArrayList<>(recordCount); - while (recover.hasNext()) { - RecoverResult next = recover.next(); - next.record().release(); - recovered2.add(next.recordOffset()); - } - assertEquals(appended2, recovered2); - } - - @Test - public void testTrimInvalidOffset() throws IOException, OverCapacityException { - final WriteAheadLog wal = BlockWALService.builder(TestUtils.tempFilePath(), 16384) - .build() - .start(); - recoverAndReset(wal); - try { - long appended = append(wal, 42); - Assertions.assertThrows(IllegalArgumentException.class, () -> wal.trim(appended + 4096 + 1).join()); - } finally { - wal.shutdownGracefully(); - } - } - - @Test - public void testWindowGreaterThanCapacity() throws IOException, OverCapacityException { - final WriteAheadLog wal = BlockWALService.builder(TestUtils.tempFilePath(), WALUtil.BLOCK_SIZE * 3L) - .slidingWindowUpperLimit(WALUtil.BLOCK_SIZE * 4L) - .build() - .start(); - recoverAndReset(wal); - try { - append(wal, 42); - Assertions.assertThrows(OverCapacityException.class, () -> append(wal, 42)); - } finally { - wal.shutdownGracefully(); - } - } - - @Test - public void testRecoveryMode() throws IOException, OverCapacityException { - testRecoveryMode0(false); - } - - @Test - @EnabledOnOs({OS.LINUX}) - public void testRecoveryModeDirectIO() throws IOException, OverCapacityException { - testRecoveryMode0(true); - } - - private void testRecoveryMode0(boolean directIO) throws IOException, OverCapacityException { - final long capacity = 1 << 20; - final int nodeId = 10; - final long epoch = 100; - String path = TestUtils.tempFilePath(); - - if (directIO && TEST_BLOCK_DEVICE != null) { - path = TEST_BLOCK_DEVICE; - resetBlockDevice(path, capacity); - } - - // simulate a crash - WriteAheadLog wal1 = BlockWALService.builder(path, capacity) - .nodeId(nodeId) - .epoch(epoch) - .direct(directIO) - .build() - .start(); - recoverAndReset(wal1); - wal1.append(TestUtils.random(4097)).future().join(); - - // open in recovery mode - WriteAheadLog wal2 = BlockWALService.recoveryBuilder(path) - .direct(directIO) - .build() - .start(); - assertEquals(nodeId, wal2.metadata().nodeId()); - assertEquals(epoch, wal2.metadata().epoch()); - // we can recover and reset the WAL - recoverAndReset(wal2); - // but we can't append to or trim it - assertThrows(IllegalStateException.class, () -> wal2.append(TestUtils.random(4097)).future().join()); - assertThrows(IllegalStateException.class, () -> wal2.trim(0).join()); - } - - @Test - public void testCapacityMismatchFileSize() throws IOException { - testCapacityMismatchFileSize0(false); - } - - @Test - @EnabledOnOs({OS.LINUX}) - public void testCapacityMismatchFileSizeDirectIO() throws IOException { - testCapacityMismatchFileSize0(true); - } - - private void testCapacityMismatchFileSize0(boolean directIO) throws IOException { - final long capacity1 = 1 << 20; - final long capacity2 = 1 << 21; - final String path = TestUtils.tempFilePath(); - - // init a WAL with capacity1 - WriteAheadLog wal1 = BlockWALService.builder(path, capacity1) - .direct(directIO) - .build() - .start(); - recoverAndReset(wal1); - wal1.shutdownGracefully(); - - // try to open it with capacity2 - WriteAheadLog wal2 = BlockWALService.builder(path, capacity2) - .direct(directIO) - .build(); - assertThrows(WALCapacityMismatchException.class, wal2::start); - } - - @Test - public void testCapacityMismatchInHeader() throws IOException { - testCapacityMismatchInHeader0(false); - } - - @Test - @EnabledOnOs({OS.LINUX}) - public void testCapacityMismatchInHeaderDirectIO() throws IOException { - testCapacityMismatchInHeader0(true); - } - - private void testCapacityMismatchInHeader0(boolean directIO) throws IOException { - final long capacity1 = 1 << 20; - final long capacity2 = 1 << 21; - String path = TestUtils.tempFilePath(); - - if (directIO && TEST_BLOCK_DEVICE != null) { - path = TEST_BLOCK_DEVICE; - resetBlockDevice(path, capacity1); - } - - // init a WAL with capacity1 - WriteAheadLog wal1 = BlockWALService.builder(path, capacity1) - .direct(directIO) - .build() - .start(); - recoverAndReset(wal1); - wal1.shutdownGracefully(); - - // overwrite the capacity in the header with capacity2 - WALChannel walChannel = WALChannel.builder(path) - .capacity(capacity1) - .direct(directIO) - .build(); - walChannel.open(); - walChannel.writeAndFlush(new WALHeader(capacity2, 42).marshal(), 0); - walChannel.close(); - - // try to open it with capacity1 - WriteAheadLog wal2 = BlockWALService.builder(path, capacity1) - .direct(directIO) - .build(); - assertThrows(WALCapacityMismatchException.class, wal2::start); - } - - @Test - public void testRecoveryModeWALFileNotExist() throws IOException { - testRecoveryModeWALFileNotExist0(false); - } - - @Test - @EnabledOnOs({OS.LINUX}) - public void testRecoveryModeWALFileNotExistDirectIO() throws IOException { - testRecoveryModeWALFileNotExist0(true); - } - - private void testRecoveryModeWALFileNotExist0(boolean directIO) throws IOException { - final String path = TestUtils.tempFilePath(); - - WriteAheadLog wal = BlockWALService.recoveryBuilder(path) - .direct(directIO) - .build(); - assertThrows(WALNotInitializedException.class, wal::start); - } - - @Test - public void testRecoveryModeNoHeader() throws IOException { - testRecoveryModeNoHeader0(false); - } - - @Test - @EnabledOnOs({OS.LINUX}) - public void testRecoveryModeNoHeaderDirectIO() throws IOException { - testRecoveryModeNoHeader0(true); - } - - private void testRecoveryModeNoHeader0(boolean directIO) throws IOException { - final long capacity = 1 << 20; - String path = TestUtils.tempFilePath(); - - if (directIO && TEST_BLOCK_DEVICE != null) { - path = TEST_BLOCK_DEVICE; - resetBlockDevice(path, capacity); - } - - // init a WAL - WriteAheadLog wal1 = BlockWALService.builder(path, capacity) - .direct(directIO) - .build() - .start(); - recoverAndReset(wal1); - wal1.shutdownGracefully(); - - // clear the WAL header - WALChannel walChannel = WALChannel.builder(path) - .capacity(capacity) - .direct(directIO) - .build(); - walChannel.open(); - walChannel.writeAndFlush(Unpooled.buffer(WAL_HEADER_TOTAL_CAPACITY).writeZero(WAL_HEADER_TOTAL_CAPACITY), 0); - walChannel.close(); - - // try to open it in recovery mode - WriteAheadLog wal2 = BlockWALService.recoveryBuilder(path) - .direct(directIO) - .build(); - assertThrows(WALNotInitializedException.class, wal2::start); - } - - private static class RecoverFromDisasterParam { - int recordSize; - long capacity; - // WAL header - long trimOffset; - long maxLength; - // WAL records - List writeOffsets; - List recoveredOffsets; - - public RecoverFromDisasterParam( - int recordSize, - long capacity, - long trimOffset, - long maxLength, - List writeOffsets, - List recoveredOffsets, - int unit - ) { - this.recordSize = recordSize; - this.capacity = capacity * unit + WAL_HEADER_TOTAL_CAPACITY; - this.trimOffset = trimOffset * unit; - this.maxLength = maxLength * unit; - this.writeOffsets = writeOffsets.stream().map(offset -> offset * unit).collect(Collectors.toList()); - this.recoveredOffsets = recoveredOffsets.stream().map(offset -> offset * unit).collect(Collectors.toList()); - } - - public Arguments toArguments(String name) { - return Arguments.of(name, recordSize, capacity, trimOffset, maxLength, writeOffsets, recoveredOffsets); - } - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/wal/WALHeaderTest.java b/s3stream/src/test/java/com/automq/stream/s3/wal/WALHeaderTest.java deleted file mode 100644 index 4b241f193..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/wal/WALHeaderTest.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal; - -import org.junit.jupiter.api.Test; - -import static org.junit.jupiter.api.Assertions.assertEquals; - -public class WALHeaderTest { - - @Test - public void test() throws UnmarshalException { - WALHeader header = new WALHeader(128 * 1024, 100); - header.updateTrimOffset(10); - header.setLastWriteTimestamp(11); - header.setShutdownType(ShutdownType.GRACEFULLY); - header.setNodeId(233); - header.setEpoch(234); - - WALHeader unmarshal = WALHeader.unmarshal(header.marshal().duplicate()); - assertEquals(header.getCapacity(), unmarshal.getCapacity()); - assertEquals(header.getTrimOffset(), unmarshal.getTrimOffset()); - assertEquals(header.getLastWriteTimestamp(), unmarshal.getLastWriteTimestamp()); - assertEquals(header.getSlidingWindowMaxLength(), unmarshal.getSlidingWindowMaxLength()); - assertEquals(header.getShutdownType(), unmarshal.getShutdownType()); - assertEquals(header.getNodeId(), unmarshal.getNodeId()); - assertEquals(header.getEpoch(), unmarshal.getEpoch()); - } - -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/wal/util/WALBlockDeviceChannelTest.java b/s3stream/src/test/java/com/automq/stream/s3/wal/util/WALBlockDeviceChannelTest.java deleted file mode 100644 index 689ccc87b..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/wal/util/WALBlockDeviceChannelTest.java +++ /dev/null @@ -1,231 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal.util; - -import com.automq.stream.s3.TestUtils; -import com.automq.stream.utils.ThreadUtils; -import com.automq.stream.utils.Threads; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import io.netty.buffer.Unpooled; -import java.io.IOException; -import java.util.Optional; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ThreadLocalRandom; -import java.util.concurrent.TimeUnit; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.condition.EnabledOnOs; -import org.junit.jupiter.api.condition.OS; - -import static com.automq.stream.s3.wal.util.WALChannelTest.TEST_BLOCK_DEVICE_KEY; -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; - -@Tag("S3Unit") -@EnabledOnOs(OS.LINUX) -public class WALBlockDeviceChannelTest { - - static final String TEST_BLOCK_DEVICE = System.getenv(TEST_BLOCK_DEVICE_KEY); - - private String getTestPath() { - return Optional.ofNullable(TEST_BLOCK_DEVICE).orElse(TestUtils.tempFilePath()); - } - - @Test - public void testSingleThreadWriteBasic() throws IOException { - final int size = 4096 + 1; - final int count = 100; - final long capacity = WALUtil.alignLargeByBlockSize(size) * count; - - WALBlockDeviceChannel channel = new WALBlockDeviceChannel(getTestPath(), capacity); - channel.open(); - - for (int i = 0; i < count; i++) { - ByteBuf data = TestUtils.random(size); - long pos = WALUtil.alignLargeByBlockSize(size) * i; - channel.writeAndFlush(data, pos); - } - - channel.close(); - } - - @Test - public void testSingleThreadWriteComposite() throws IOException { - final int maxSize = 4096 * 4; - final int count = 100; - final int batch = 10; - final long capacity = WALUtil.alignLargeByBlockSize(maxSize) * count; - - WALBlockDeviceChannel channel = new WALBlockDeviceChannel(getTestPath(), capacity); - channel.open(); - - for (int i = 0; i < count; i += batch) { - CompositeByteBuf data = Unpooled.compositeBuffer(); - for (int j = 0; j < batch; j++) { - int size = ThreadLocalRandom.current().nextInt(1, maxSize); - data.addComponent(true, TestUtils.random(size)); - } - long pos = WALUtil.alignLargeByBlockSize(maxSize) * i; - channel.writeAndFlush(data, pos); - } - - channel.close(); - } - - @Test - public void testMultiThreadWrite() throws IOException, InterruptedException { - final int size = 4096 + 1; - final int count = 1000; - final int threads = 8; - final long capacity = WALUtil.alignLargeByBlockSize(size) * count; - - WALBlockDeviceChannel channel = new WALBlockDeviceChannel(getTestPath(), capacity); - channel.open(); - - ExecutorService executor = Threads.newFixedThreadPool(threads, - ThreadUtils.createThreadFactory("test-block-device-channel-write-%d", false), null); - for (int i = 0; i < count; i++) { - final int index = i; - executor.submit(() -> { - ByteBuf data = TestUtils.random(size); - long pos = WALUtil.alignLargeByBlockSize(size) * index; - try { - channel.writeAndFlush(data, pos); - } catch (IOException e) { - throw new RuntimeException(e); - } - }); - } - executor.shutdown(); - assertTrue(executor.awaitTermination(10, TimeUnit.SECONDS)); - - channel.close(); - } - - @Test - public void testWriteNotAlignedBufferSize() throws IOException { - WALBlockDeviceChannel channel = new WALBlockDeviceChannel(getTestPath(), 1 << 20); - channel.open(); - - ByteBuf data = TestUtils.random(42); - // It's ok to do this - assertDoesNotThrow(() -> channel.writeAndFlush(data, 0)); - - channel.close(); - } - - @Test - public void testWriteNotAlignedPosition() throws IOException { - WALBlockDeviceChannel channel = new WALBlockDeviceChannel(getTestPath(), 1 << 20); - channel.open(); - - ByteBuf data = TestUtils.random(4096); - assertThrows(AssertionError.class, () -> channel.writeAndFlush(data, 42)); - - channel.close(); - } - - @Test - public void testWriteOutOfBound() throws IOException { - WALBlockDeviceChannel channel = new WALBlockDeviceChannel(getTestPath(), 4096); - channel.open(); - - ByteBuf data = TestUtils.random(4096); - assertThrows(AssertionError.class, () -> channel.writeAndFlush(data, 8192)); - - channel.close(); - } - - @Test - public void testReadBasic() throws IOException { - final int size = 4096 + 1; - final int count = 100; - final long capacity = WALUtil.alignLargeByBlockSize(size) * count; - final String path = getTestPath(); - - WALBlockDeviceChannel wChannel = new WALBlockDeviceChannel(path, capacity); - wChannel.open(); - WALBlockDeviceChannel rChannel = new WALBlockDeviceChannel(path, capacity); - rChannel.open(); - - for (int i = 0; i < count; i++) { - ByteBuf data = TestUtils.random(size); - long pos = ThreadLocalRandom.current().nextLong(0, capacity - size); - pos = WALUtil.alignSmallByBlockSize(pos); - wChannel.writeAndFlush(data, pos); - - ByteBuf buf = Unpooled.buffer(size); - int read = rChannel.read(buf, pos); - assert read == size; - assert data.equals(buf); - } - - rChannel.close(); - wChannel.close(); - } - - @Test - public void testReadInside() throws IOException { - final int size = 4096 * 4 + 1; - final int count = 100; - final long capacity = WALUtil.alignLargeByBlockSize(size) * count; - final String path = getTestPath(); - - WALBlockDeviceChannel wChannel = new WALBlockDeviceChannel(path, capacity); - wChannel.open(); - WALBlockDeviceChannel rChannel = new WALBlockDeviceChannel(path, capacity); - rChannel.open(); - - for (int i = 0; i < count; i++) { - ByteBuf data = TestUtils.random(size); - long pos = ThreadLocalRandom.current().nextLong(0, capacity - size); - pos = WALUtil.alignSmallByBlockSize(pos); - wChannel.writeAndFlush(data, pos); - - int start = ThreadLocalRandom.current().nextInt(0, size - 1); - int end = ThreadLocalRandom.current().nextInt(start + 1, size); - ByteBuf buf = Unpooled.buffer(end - start); - int read = rChannel.read(buf, pos + start); - assert read == end - start; - assert data.slice(start, end - start).equals(buf); - } - - rChannel.close(); - wChannel.close(); - } - - @Test - public void testReadNotAlignedBufferSize() throws IOException { - WALBlockDeviceChannel channel = new WALBlockDeviceChannel(getTestPath(), 1 << 20); - channel.open(); - - ByteBuf data = Unpooled.buffer(42); - // It's ok to do this - assertDoesNotThrow(() -> channel.read(data, 0)); - - channel.close(); - } - - @Test - public void testReadNotAlignedPosition() throws IOException { - WALBlockDeviceChannel channel = new WALBlockDeviceChannel(getTestPath(), 1 << 20); - channel.open(); - - ByteBuf data = Unpooled.buffer(4096); - // It's ok to do this - assertDoesNotThrow(() -> channel.read(data, 42)); - - channel.close(); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/s3/wal/util/WALChannelTest.java b/s3stream/src/test/java/com/automq/stream/s3/wal/util/WALChannelTest.java deleted file mode 100644 index a254e7f3b..000000000 --- a/s3stream/src/test/java/com/automq/stream/s3/wal/util/WALChannelTest.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.s3.wal.util; - -import com.automq.stream.s3.TestUtils; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import java.io.IOException; -import java.nio.ByteBuffer; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Tag; -import org.junit.jupiter.api.Test; - -@Tag("S3Unit") -public class WALChannelTest { - public static final String TEST_BLOCK_DEVICE_KEY = "WAL_TEST_BLOCK_DEVICE"; - - WALChannel walChannel; - - @BeforeEach - void setUp() { - walChannel = WALChannel.builder(String.format("%s/WALChannelUnitTest.data", TestUtils.tempFilePath())).direct(false).capacity(1024 * 1024 * 20).build(); - try { - walChannel.open(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @AfterEach - void tearDown() { - walChannel.close(); - } - - ByteBuffer createRandomTextByteBuffer(int size) { - ByteBuffer byteBuffer = ByteBuffer.allocate(size); - - for (int i = 0; i < size; i++) { - byteBuffer.put("ABCDEFGH".getBytes()[i % 8]); - } - - return byteBuffer.flip(); - } - - @Test - void testWriteAndRead() throws IOException { - ByteBuf data = TestUtils.random(1024 * 3); - for (int i = 0; i < 100; i++) { - try { - walChannel.writeAndFlush(data, (long) i * data.readableBytes()); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - final String content = "Hello World"; - walChannel.writeAndFlush(Unpooled.wrappedBuffer(content.getBytes()), 100); - - ByteBuf readBuffer = Unpooled.buffer(content.length()); - int read = walChannel.read(readBuffer, 100); - - String readString = new String(readBuffer.array()); - System.out.println(new String(readBuffer.array())); - System.out.println(read); - - assert read == content.length(); - assert readString.equals(content); - } -} diff --git a/s3stream/src/test/java/com/automq/stream/utils/FutureTickerTest.java b/s3stream/src/test/java/com/automq/stream/utils/FutureTickerTest.java deleted file mode 100644 index f81f26803..000000000 --- a/s3stream/src/test/java/com/automq/stream/utils/FutureTickerTest.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2024, AutoMQ CO.,LTD. - * - * Use of this software is governed by the Business Source License - * included in the file BSL.md - * - * As of the Change Date specified in that file, in accordance with - * the Business Source License, use of this software will be governed - * by the Apache License, Version 2.0 - */ - -package com.automq.stream.utils; - -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; - -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNotSame; -import static org.junit.jupiter.api.Assertions.assertSame; - -class FutureTickerTest { - - private FutureTicker ticker = new FutureTicker(10, TimeUnit.MILLISECONDS, Executors.newSingleThreadExecutor()); - - @BeforeEach - void setUp() { - ticker = new FutureTicker(10, TimeUnit.MILLISECONDS, Executors.newSingleThreadExecutor()); - } - - @Test - void testFirstTick() { - CompletableFuture tick = ticker.tick(); - assertNotNull(tick); - assertFalse(tick.isDone()); - } - - @Test - void testTwoTicks() { - CompletableFuture tick1 = ticker.tick(); - CompletableFuture tick2 = ticker.tick(); - assertSame(tick1, tick2); - } - - @Test - void testDelay() throws InterruptedException { - CompletableFuture tick1 = ticker.tick(); - // complete the first tick manually to mock the delay - // Thread.sleep(20); - tick1.complete(null); - CompletableFuture tick2 = ticker.tick(); - assertFalse(tick2.isDone()); - assertNotSame(tick1, tick2); - } -} diff --git a/s3stream/src/test/resources/log4j.properties b/s3stream/src/test/resources/log4j.properties deleted file mode 100644 index 83f41ba48..000000000 --- a/s3stream/src/test/resources/log4j.properties +++ /dev/null @@ -1,15 +0,0 @@ -# -# Copyright 2024, AutoMQ CO.,LTD. -# -# Use of this software is governed by the Business Source License -# included in the file BSL.md -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0 -# -log4j.rootLogger=OFF, stdout -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n -log4j.logger.com.automq=WARN diff --git a/store/src/main/java/com/automq/rocketmq/store/MessageStoreBuilder.java b/store/src/main/java/com/automq/rocketmq/store/MessageStoreBuilder.java index f7d4dd46a..6c2cda50f 100644 --- a/store/src/main/java/com/automq/rocketmq/store/MessageStoreBuilder.java +++ b/store/src/main/java/com/automq/rocketmq/store/MessageStoreBuilder.java @@ -60,7 +60,7 @@ public static MessageStoreImpl build(StoreConfig storeConfig, S3StreamConfig s3S // S3 object manager, such as trim expired messages, etc. S3Operator operator = new DefaultS3Operator(s3StreamConfig.s3Endpoint(), s3StreamConfig.s3Region(), s3StreamConfig.s3Bucket(), - s3StreamConfig.s3ForcePathStyle(), List.of(() -> AwsBasicCredentials.create(s3StreamConfig.s3AccessKey(), s3StreamConfig.s3SecretKey()))); + s3StreamConfig.s3ForcePathStyle(), List.of(() -> AwsBasicCredentials.create(s3StreamConfig.s3AccessKey(), s3StreamConfig.s3SecretKey())), false); S3ObjectOperator objectOperator = new S3ObjectOperatorImpl(operator); TransactionService transactionService = new TransactionService(storeConfig, timerService); diff --git a/store/src/main/java/com/automq/rocketmq/store/S3StreamStore.java b/store/src/main/java/com/automq/rocketmq/store/S3StreamStore.java index 6e6515582..b21222fdb 100644 --- a/store/src/main/java/com/automq/rocketmq/store/S3StreamStore.java +++ b/store/src/main/java/com/automq/rocketmq/store/S3StreamStore.java @@ -88,7 +88,7 @@ public S3StreamStore(StoreConfig storeConfig, S3StreamConfig streamConfig, Store S3Operator defaultOperator = new DefaultS3Operator(streamConfig.s3Endpoint(), streamConfig.s3Region(), streamConfig.s3Bucket(), streamConfig.s3ForcePathStyle(), List.of(() -> AwsBasicCredentials.create(streamConfig.s3AccessKey(), streamConfig.s3SecretKey())), - networkInboundLimiter, networkOutboundLimiter, true); + false, networkInboundLimiter, networkOutboundLimiter, true); WriteAheadLog writeAheadLog = BlockWALService.builder(s3Config.walPath(), s3Config.walCapacity()).config(s3Config).build(); S3BlockCache blockCache = new DefaultS3BlockCache(s3Config, objectManager, defaultOperator); @@ -99,7 +99,7 @@ public S3StreamStore(StoreConfig storeConfig, S3StreamConfig streamConfig, Store // Build the compaction manager S3Operator compactionOperator = new DefaultS3Operator(streamConfig.s3Endpoint(), streamConfig.s3Region(), streamConfig.s3Bucket(), streamConfig.s3ForcePathStyle(), List.of(() -> AwsBasicCredentials.create(streamConfig.s3AccessKey(), streamConfig.s3SecretKey())), - networkInboundLimiter, networkOutboundLimiter, true); + false, networkInboundLimiter, networkOutboundLimiter, true); this.compactionManager = new CompactionManager(s3Config, objectManager, streamManager, compactionOperator); this.streamClient = new S3StreamClient(streamManager, storage, objectManager, defaultOperator, s3Config, networkInboundLimiter, networkOutboundLimiter);