From c870864d290b7e1939c9b73c07ef74ec0fd604f4 Mon Sep 17 00:00:00 2001 From: BlindSpot <127803250+blindspotbounty@users.noreply.github.com> Date: Wed, 26 Jul 2023 18:36:18 +0300 Subject: [PATCH 01/20] introduce statistics for producer --- Package.swift | 2 + .../Configuration/KafkaConfiguration.swift | 7 + .../KafkaProducerConfiguration.swift | 11 ++ Sources/SwiftKafka/KafkaProducerEvent.swift | 4 + .../SwiftKafka/RDKafka/RDKafkaClient.swift | 8 + .../Utilities/KafkaStatistics.swift | 25 +++ .../Utilities/KafkaStatisticsJsonModel.swift | 177 ++++++++++++++++++ .../SwiftKafkaTests/KafkaConsumerTests.swift | 102 ++++++++++ .../SwiftKafkaTests/KafkaProducerTests.swift | 54 ++++++ 9 files changed, 390 insertions(+) create mode 100644 Sources/SwiftKafka/Utilities/KafkaStatistics.swift create mode 100644 Sources/SwiftKafka/Utilities/KafkaStatisticsJsonModel.swift diff --git a/Package.swift b/Package.swift index 33d6397c..aac18bf8 100644 --- a/Package.swift +++ b/Package.swift @@ -47,6 +47,7 @@ let package = Package( // The zstd Swift package produces warnings that we cannot resolve: // https://github.com/facebook/zstd/issues/3328 .package(url: "https://github.com/facebook/zstd.git", from: "1.5.0"), + .package(url: "https://github.com/swift-extras/swift-extras-json.git", .upToNextMajor(from: "0.6.0")), ], targets: [ .target( @@ -76,6 +77,7 @@ let package = Package( .product(name: "NIOCore", package: "swift-nio"), .product(name: "ServiceLifecycle", package: "swift-service-lifecycle"), .product(name: "Logging", package: "swift-log"), + .product(name: "ExtrasJSON", package: "swift-extras-json"), ] ), .systemLibrary( diff --git a/Sources/SwiftKafka/Configuration/KafkaConfiguration.swift b/Sources/SwiftKafka/Configuration/KafkaConfiguration.swift index e78bd062..1be45a5a 100644 --- a/Sources/SwiftKafka/Configuration/KafkaConfiguration.swift +++ b/Sources/SwiftKafka/Configuration/KafkaConfiguration.swift @@ -206,3 +206,10 @@ public enum KafkaConfiguration { public static let v6 = IPAddressFamily(description: "v6") } } + +extension Duration { + // Calculated total milliseconds + internal var totalMilliseconds: Int64 { + self.components.seconds * 1000 + self.components.attoseconds / 1_000_000_000_000_000 + } +} diff --git a/Sources/SwiftKafka/Configuration/KafkaProducerConfiguration.swift b/Sources/SwiftKafka/Configuration/KafkaProducerConfiguration.swift index 6130c03e..4fc6e2c4 100644 --- a/Sources/SwiftKafka/Configuration/KafkaProducerConfiguration.swift +++ b/Sources/SwiftKafka/Configuration/KafkaProducerConfiguration.swift @@ -20,6 +20,11 @@ public struct KafkaProducerConfiguration { /// Default: `.milliseconds(100)` public var pollInterval: Duration = .milliseconds(100) + /// Interval for librdkafka statistics reports + /// 0ms - disabled + /// >= 1ms - statistics provided every specified interval + public var statisticsInterval: Duration = .zero + /// Maximum timeout for flushing outstanding produce requests when the ``KakfaProducer`` is shutting down. /// Default: `10000` public var flushTimeoutMilliseconds: Int = 10000 { @@ -107,6 +112,12 @@ extension KafkaProducerConfiguration { internal var dictionary: [String: String] { var resultDict: [String: String] = [:] + // we only check that it is 0 or >=1 ms, librdkafka checks for negativity + // in both debug and release + // FIXME: should we make `get throws` and throw exception instead of assert? + assert(self.statisticsInterval == .zero || self.statisticsInterval > Duration.milliseconds(1), "Statistics interval must be expressed in milliseconds") + resultDict["statistics.interval.ms"] = String(self.statisticsInterval.totalMilliseconds) + resultDict["enable.idempotence"] = String(self.enableIdempotence) resultDict["queue.buffering.max.messages"] = String(self.queue.bufferingMaxMessages) resultDict["queue.buffering.max.kbytes"] = String(self.queue.bufferingMaxKBytes) diff --git a/Sources/SwiftKafka/KafkaProducerEvent.swift b/Sources/SwiftKafka/KafkaProducerEvent.swift index 8afbf8e8..f2b88706 100644 --- a/Sources/SwiftKafka/KafkaProducerEvent.swift +++ b/Sources/SwiftKafka/KafkaProducerEvent.swift @@ -16,6 +16,8 @@ public enum KafkaProducerEvent: Sendable, Hashable { /// A collection of delivery reports received from the Kafka cluster indicating the status of produced messages. case deliveryReports([KafkaDeliveryReport]) + /// Statistics from librdkafka + case statistics(KafkaStatistics) /// - Important: Always provide a `default` case when switching over this `enum`. case DO_NOT_SWITCH_OVER_THIS_EXHAUSITVELY @@ -23,6 +25,8 @@ public enum KafkaProducerEvent: Sendable, Hashable { switch event { case .deliveryReport(results: let results): self = .deliveryReports(results) + case .statistics(let stat): + self = .statistics(stat) case .consumerMessages: fatalError("Cannot cast \(event) to KafkaProducerEvent") } diff --git a/Sources/SwiftKafka/RDKafka/RDKafkaClient.swift b/Sources/SwiftKafka/RDKafka/RDKafkaClient.swift index a33620b8..8aef17b4 100644 --- a/Sources/SwiftKafka/RDKafka/RDKafkaClient.swift +++ b/Sources/SwiftKafka/RDKafka/RDKafkaClient.swift @@ -136,6 +136,7 @@ final class RDKafkaClient: Sendable { enum KafkaEvent { case deliveryReport(results: [KafkaDeliveryReport]) case consumerMessages(result: Result) + case statistics(KafkaStatistics) } /// Poll the event `rd_kafka_queue_t` for new events. @@ -166,6 +167,8 @@ final class RDKafkaClient: Sendable { self.handleLogEvent(event) case .offsetCommit: self.handleOffsetCommitEvent(event) + case .statistics: + events.append(self.handleStatistics(event)) case .none: // Finished reading events, return early return events @@ -217,6 +220,11 @@ final class RDKafkaClient: Sendable { // The returned message(s) MUST NOT be freed with rd_kafka_message_destroy(). } + private func handleStatistics(_ event: OpaquePointer?) -> KafkaEvent { + let jsonStr = String(cString: rd_kafka_event_stats(event)) + return .statistics(KafkaStatistics(jsonString: jsonStr)) + } + /// Handle event of type `RDKafkaEvent.log`. /// /// - Parameter event: Pointer to underlying `rd_kafka_event_t`. diff --git a/Sources/SwiftKafka/Utilities/KafkaStatistics.swift b/Sources/SwiftKafka/Utilities/KafkaStatistics.swift new file mode 100644 index 00000000..2f9be9f2 --- /dev/null +++ b/Sources/SwiftKafka/Utilities/KafkaStatistics.swift @@ -0,0 +1,25 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the swift-kafka-gsoc open source project +// +// Copyright (c) 2022 Apple Inc. and the swift-kafka-gsoc project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of swift-kafka-gsoc project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +import ExtrasJSON + +public struct KafkaStatistics: Sendable, Hashable { + public let jsonString: String + + public var json: KafkaStatisticsJson { + get throws { + return try XJSONDecoder().decode(KafkaStatisticsJson.self, from: self.jsonString.utf8) + } + } +} diff --git a/Sources/SwiftKafka/Utilities/KafkaStatisticsJsonModel.swift b/Sources/SwiftKafka/Utilities/KafkaStatisticsJsonModel.swift new file mode 100644 index 00000000..23217a62 --- /dev/null +++ b/Sources/SwiftKafka/Utilities/KafkaStatisticsJsonModel.swift @@ -0,0 +1,177 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the swift-kafka-gsoc open source project +// +// Copyright (c) 2022 Apple Inc. and the swift-kafka-gsoc project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of swift-kafka-gsoc project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// This file was generated from JSON Schema using quicktype, do not modify it directly. +// To parse the JSON, add this file to your project and do: +// +// let statistics = try? newJSONDecoder().decode(KafkaStatisticsJsonModel.self, from: jsonData) + +// MARK: - Statistics + +public struct KafkaStatisticsJson: Hashable, Codable { + let name, clientID, type: String? + let ts, time, age, replyq: Int? + let msgCnt, msgSize, msgMax, msgSizeMax: Int? + let simpleCnt, metadataCacheCnt: Int? + let brokers: [String: Broker]? + let topics: [String: Topic]? + let cgrp: Cgrp? + let tx, txBytes, rx, rxBytes: Int? + let txmsgs, txmsgBytes, rxmsgs, rxmsgBytes: Int? + + enum CodingKeys: String, CodingKey { + case name + case clientID = "client_id" + case type, ts, time, age, replyq + case msgCnt = "msg_cnt" + case msgSize = "msg_size" + case msgMax = "msg_max" + case msgSizeMax = "msg_size_max" + case simpleCnt = "simple_cnt" + case metadataCacheCnt = "metadata_cache_cnt" + case brokers, topics, cgrp, tx + case txBytes = "tx_bytes" + case rx + case rxBytes = "rx_bytes" + case txmsgs + case txmsgBytes = "txmsg_bytes" + case rxmsgs + case rxmsgBytes = "rxmsg_bytes" + } +} + +// MARK: - Broker + +public struct Broker: Hashable, Codable { + let name: String? + let nodeid: Int? + let nodename, source, state: String? + let stateage, outbufCnt, outbufMsgCnt, waitrespCnt: Int? + let waitrespMsgCnt, tx, txbytes, txerrs: Int? + let txretries, txidle, reqTimeouts, rx: Int? + let rxbytes, rxerrs, rxcorriderrs, rxpartial: Int? + let rxidle, zbufGrow, bufGrow, wakeups: Int? + let connects, disconnects: Int? + let intLatency, outbufLatency, rtt, throttle: [String: Int]? + let req: [String: Int]? + let toppars: [String: Toppar]? + + enum CodingKeys: String, CodingKey { + case name, nodeid, nodename, source, state, stateage + case outbufCnt = "outbuf_cnt" + case outbufMsgCnt = "outbuf_msg_cnt" + case waitrespCnt = "waitresp_cnt" + case waitrespMsgCnt = "waitresp_msg_cnt" + case tx, txbytes, txerrs, txretries, txidle + case reqTimeouts = "req_timeouts" + case rx, rxbytes, rxerrs, rxcorriderrs, rxpartial, rxidle + case zbufGrow = "zbuf_grow" + case bufGrow = "buf_grow" + case wakeups, connects, disconnects + case intLatency = "int_latency" + case outbufLatency = "outbuf_latency" + case rtt, throttle, req, toppars + } +} + +// MARK: - Toppars + +struct Toppar: Hashable, Codable { + let topic: String? + let partition: Int? + + enum CodingKeys: String, CodingKey { + case topic, partition + } +} + +// MARK: - Cgrp + +struct Cgrp: Hashable, Codable { + let state: String? + let stateage: Int? + let joinState: String? + let rebalanceAge, rebalanceCnt: Int? + let rebalanceReason: String? + let assignmentSize: Int? + + enum CodingKeys: String, CodingKey { + case state, stateage + case joinState = "join_state" + case rebalanceAge = "rebalance_age" + case rebalanceCnt = "rebalance_cnt" + case rebalanceReason = "rebalance_reason" + case assignmentSize = "assignment_size" + } +} + +// MARK: - Topic + +struct Topic: Hashable, Codable { + let topic: String? + let age, metadataAge: Int? + let batchsize, batchcnt: [String: Int]? + let partitions: [String: Partition]? + + enum CodingKeys: String, CodingKey { + case topic, age + case metadataAge = "metadata_age" + case batchsize, batchcnt, partitions + } +} + +// MARK: - Partition + +struct Partition: Hashable, Codable { + let partition, broker, leader: Int? + let desired, unknown: Bool? + let msgqCnt, msgqBytes, xmitMsgqCnt, xmitMsgqBytes: Int? + let fetchqCnt, fetchqSize: Int? + let fetchState: String? + let queryOffset, nextOffset, appOffset, storedOffset: Int? + let commitedOffset, committedOffset, eofOffset, loOffset: Int? + let hiOffset, lsOffset, consumerLag, consumerLagStored: Int? + let txmsgs, txbytes, rxmsgs, rxbytes: Int? + let msgs, rxVerDrops, msgsInflight, nextACKSeq: Int? + let nextErrSeq, ackedMsgid: Int? + + enum CodingKeys: String, CodingKey { + case partition, broker, leader, desired, unknown + case msgqCnt = "msgq_cnt" + case msgqBytes = "msgq_bytes" + case xmitMsgqCnt = "xmit_msgq_cnt" + case xmitMsgqBytes = "xmit_msgq_bytes" + case fetchqCnt = "fetchq_cnt" + case fetchqSize = "fetchq_size" + case fetchState = "fetch_state" + case queryOffset = "query_offset" + case nextOffset = "next_offset" + case appOffset = "app_offset" + case storedOffset = "stored_offset" + case commitedOffset = "commited_offset" + case committedOffset = "committed_offset" + case eofOffset = "eof_offset" + case loOffset = "lo_offset" + case hiOffset = "hi_offset" + case lsOffset = "ls_offset" + case consumerLag = "consumer_lag" + case consumerLagStored = "consumer_lag_stored" + case txmsgs, txbytes, rxmsgs, rxbytes, msgs + case rxVerDrops = "rx_ver_drops" + case msgsInflight = "msgs_inflight" + case nextACKSeq = "next_ack_seq" + case nextErrSeq = "next_err_seq" + case ackedMsgid = "acked_msgid" + } +} diff --git a/Tests/SwiftKafkaTests/KafkaConsumerTests.swift b/Tests/SwiftKafkaTests/KafkaConsumerTests.swift index 6f2fab18..d2591353 100644 --- a/Tests/SwiftKafkaTests/KafkaConsumerTests.swift +++ b/Tests/SwiftKafkaTests/KafkaConsumerTests.swift @@ -14,6 +14,7 @@ import struct Foundation.UUID import Logging +import NIOConcurrencyHelpers import ServiceLifecycle @testable import SwiftKafka import XCTest @@ -85,4 +86,105 @@ final class KafkaConsumerTests: XCTestCase { ) } } +/* + func testConsumerStatistics() async throws { + // Set no bootstrap servers to trigger librdkafka configuration warning + let uniqueGroupID = UUID().uuidString + var config = KafkaConsumerConfiguration( + consumptionStrategy: .group(id: uniqueGroupID, topics: ["this-topic-does-not-exist"]) + ) + config.statisticsInterval = Duration.milliseconds(100) + + let stringJson = NIOLockedValueBox(String()) + let consumer = try KafkaConsumer(config: config, logger: .kafkaTest) + + guard let statistics = consumer.statistics else { + XCTFail("Statistics was not instantiated") + return + } + + let serviceGroup = ServiceGroup( + services: [consumer], + configuration: ServiceGroupConfiguration(gracefulShutdownSignals: []), + logger: .kafkaTest + ) + + try await withThrowingTaskGroup(of: Void.self) { group in + // Run Task + group.addTask { + try await serviceGroup.run() + } + + // check for librdkafka statistics + group.addTask { + for try await stat in statistics { + stringJson.withLockedValue { + $0 = stat + } + } + } + + // Sleep for 1s to let poll loop receive statistics callback + try! await Task.sleep(for: .milliseconds(500)) + + // Shutdown the serviceGroup + await serviceGroup.triggerGracefulShutdown() + + try await group.next() + } + + let stats = stringJson.withLockedValue { $0 } + XCTAssertFalse(stats.isEmpty) + } + + func testConsumerStatisticsJson() async throws { + // Set no bootstrap servers to trigger librdkafka configuration warning + let uniqueGroupID = UUID().uuidString + var config = KafkaConsumerConfiguration( + consumptionStrategy: .group(id: uniqueGroupID, topics: ["this-topic-does-not-exist"]) + ) + config.statisticsInterval = Duration.milliseconds(100) + + let stringJson = NIOLockedValueBox(nil) + let consumer = try KafkaConsumer(config: config, logger: .kafkaTest) + + guard let statistics = consumer.statistics else { + XCTFail("Statistics was not instantiated") + return + } + + let serviceGroup = ServiceGroup( + services: [consumer], + configuration: ServiceGroupConfiguration(gracefulShutdownSignals: []), + logger: .kafkaTest + ) + + try await withThrowingTaskGroup(of: Void.self) { group in + // Run Task + group.addTask { + try await serviceGroup.run() + } + + // check for librdkafka statistics + group.addTask { + for try await stat in KafkaStatisticsJsonSequence(wrappedSequence: statistics) { + stringJson.withLockedValue { + $0 = stat + } + } + } + + // Sleep for 1s to let poll loop receive statistics callback + try! await Task.sleep(for: .milliseconds(500)) + + // Shutdown the serviceGroup + await serviceGroup.triggerGracefulShutdown() + + try await group.next() + } + + let stats = stringJson.withLockedValue { $0 } + XCTAssertNotNil(stats) + } + */ } diff --git a/Tests/SwiftKafkaTests/KafkaProducerTests.swift b/Tests/SwiftKafkaTests/KafkaProducerTests.swift index a551c386..e7b16d09 100644 --- a/Tests/SwiftKafkaTests/KafkaProducerTests.swift +++ b/Tests/SwiftKafkaTests/KafkaProducerTests.swift @@ -13,6 +13,7 @@ //===----------------------------------------------------------------------===// import Logging +import NIOConcurrencyHelpers import NIOCore import ServiceLifecycle @testable import SwiftKafka @@ -356,4 +357,57 @@ final class KafkaProducerTests: XCTestCase { XCTAssertNil(producerCopy) } + + func testProducerStatistics() async throws { + self.config.statisticsInterval = Duration.milliseconds(100) + self.config.debug = [.all] + + let statistics = NIOLockedValueBox(nil) + let (producer, events) = try KafkaProducer.makeProducerWithEvents( + config: self.config, + logger: .kafkaTest + ) + + let serviceGroup = ServiceGroup( + services: [producer], + configuration: ServiceGroupConfiguration(gracefulShutdownSignals: []), + logger: .kafkaTest + ) + + try await withThrowingTaskGroup(of: Void.self) { group in + // Run Task + group.addTask { + try await serviceGroup.run() + } + + // check for librdkafka statistics + group.addTask { + for try await e in events { + switch e { + case .statistics(let stat): + statistics.withLockedValue { + $0 = stat + } + default: + break + } + } + } + + // Sleep for 1s to let poll loop receive statistics callback + try! await Task.sleep(for: .milliseconds(500)) + + // Shutdown the serviceGroup + await serviceGroup.triggerGracefulShutdown() + + try await group.next() + } + let stats = statistics.withLockedValue { $0 } + guard let stats else { + XCTFail("stats are not occurred") + return + } + XCTAssertFalse(stats.jsonString.isEmpty) + XCTAssertNoThrow(try stats.json) + } } From d9887b955944a53c570ee91f711f30d9da8f64b0 Mon Sep 17 00:00:00 2001 From: BlindSpot <127803250+blindspotbounty@users.noreply.github.com> Date: Mon, 31 Jul 2023 19:56:24 +0300 Subject: [PATCH 02/20] add statistics to new consumer with events --- .../KafkaConsumerConfiguration.swift | 13 +++ .../KafkaProducerConfiguration.swift | 14 ++- Sources/SwiftKafka/KafkaConsumer.swift | 109 ++++++++++++------ Sources/SwiftKafka/KafkaConsumerEvent.swift | 6 +- Sources/SwiftKafka/KafkaProducer.swift | 16 ++- .../SwiftKafkaTests/KafkaConsumerTests.swift | 84 +++----------- .../SwiftKafkaTests/KafkaProducerTests.swift | 12 +- 7 files changed, 135 insertions(+), 119 deletions(-) diff --git a/Sources/SwiftKafka/Configuration/KafkaConsumerConfiguration.swift b/Sources/SwiftKafka/Configuration/KafkaConsumerConfiguration.swift index 3b63562a..d6fb84b9 100644 --- a/Sources/SwiftKafka/Configuration/KafkaConsumerConfiguration.swift +++ b/Sources/SwiftKafka/Configuration/KafkaConsumerConfiguration.swift @@ -23,6 +23,18 @@ public struct KafkaConsumerConfiguration { /// Default: `.milliseconds(100)` public var pollInterval: Duration = .milliseconds(100) + /// Interval for librdkafka statistics reports + /// 0ms - disabled + /// >= 1ms - statistics provided every specified interval + public var statisticsInterval: Duration = .zero { + didSet { + precondition( + self.statisticsInterval.totalMilliseconds > 0 || self.statisticsInterval == .zero /*self.statisticsInterval.canBeRepresentedAsMilliseconds*/, + "Lowest granularity is milliseconds" + ) + } + } + /// The strategy used for consuming messages. /// See ``KafkaConfiguration/ConsumptionStrategy`` for more information. public var consumptionStrategy: KafkaConfiguration.ConsumptionStrategy @@ -128,6 +140,7 @@ extension KafkaConsumerConfiguration { resultDict["group.id"] = groupID } + resultDict["statistics.interval.ms"] = String(self.statisticsInterval.totalMilliseconds) resultDict["session.timeout.ms"] = String(session.timeoutMilliseconds) resultDict["heartbeat.interval.ms"] = String(heartbeatIntervalMilliseconds) resultDict["max.poll.interval.ms"] = String(maxPollInvervalMilliseconds) diff --git a/Sources/SwiftKafka/Configuration/KafkaProducerConfiguration.swift b/Sources/SwiftKafka/Configuration/KafkaProducerConfiguration.swift index 4fc6e2c4..2213fe2a 100644 --- a/Sources/SwiftKafka/Configuration/KafkaProducerConfiguration.swift +++ b/Sources/SwiftKafka/Configuration/KafkaProducerConfiguration.swift @@ -23,7 +23,14 @@ public struct KafkaProducerConfiguration { /// Interval for librdkafka statistics reports /// 0ms - disabled /// >= 1ms - statistics provided every specified interval - public var statisticsInterval: Duration = .zero + public var statisticsInterval: Duration = .zero { + didSet { + precondition( + self.statisticsInterval.totalMilliseconds > 0 || self.statisticsInterval == .zero /*self.statisticsInterval.canBeRepresentedAsMilliseconds*/, + "Lowest granularity is milliseconds" + ) + } + } /// Maximum timeout for flushing outstanding produce requests when the ``KakfaProducer`` is shutting down. /// Default: `10000` @@ -112,12 +119,7 @@ extension KafkaProducerConfiguration { internal var dictionary: [String: String] { var resultDict: [String: String] = [:] - // we only check that it is 0 or >=1 ms, librdkafka checks for negativity - // in both debug and release - // FIXME: should we make `get throws` and throw exception instead of assert? - assert(self.statisticsInterval == .zero || self.statisticsInterval > Duration.milliseconds(1), "Statistics interval must be expressed in milliseconds") resultDict["statistics.interval.ms"] = String(self.statisticsInterval.totalMilliseconds) - resultDict["enable.idempotence"] = String(self.enableIdempotence) resultDict["queue.buffering.max.messages"] = String(self.queue.bufferingMaxMessages) resultDict["queue.buffering.max.kbytes"] = String(self.queue.bufferingMaxKBytes) diff --git a/Sources/SwiftKafka/KafkaConsumer.swift b/Sources/SwiftKafka/KafkaConsumer.swift index 16b25679..f3faefd1 100644 --- a/Sources/SwiftKafka/KafkaConsumer.swift +++ b/Sources/SwiftKafka/KafkaConsumer.swift @@ -22,6 +22,7 @@ import ServiceLifecycle /// `NIOAsyncSequenceProducerDelegate` that terminates the closes the producer when /// `didTerminate()` is invoked. internal struct KafkaConsumerCloseOnTerminate: Sendable { + let isMessageSequence: Bool let stateMachine: NIOLockedValueBox } @@ -31,7 +32,7 @@ extension KafkaConsumerCloseOnTerminate: NIOAsyncSequenceProducerDelegate { } func didTerminate() { - self.stateMachine.withLockedValue { $0.messageSequenceTerminated() } + self.stateMachine.withLockedValue { $0.messageSequenceTerminated(isMessageSequence: isMessageSequence) } } } @@ -121,6 +122,12 @@ public final class KafkaConsumer: Sendable, Service { NIOAsyncSequenceProducerBackPressureStrategies.NoBackPressure, KafkaConsumerCloseOnTerminate > + typealias ProducerEvents = NIOAsyncSequenceProducer< + KafkaConsumerEvent, + NIOAsyncSequenceProducerBackPressureStrategies.NoBackPressure, + KafkaConsumerCloseOnTerminate + > + /// The configuration object of the consumer client. private let config: KafkaConsumerConfiguration /// A logger. @@ -146,7 +153,8 @@ public final class KafkaConsumer: Sendable, Service { client: RDKafkaClient, stateMachine: NIOLockedValueBox, config: KafkaConsumerConfiguration, - logger: Logger + logger: Logger, + eventSource: ProducerEvents.Source? = nil ) throws { self.config = config self.stateMachine = stateMachine @@ -155,7 +163,7 @@ public final class KafkaConsumer: Sendable, Service { let sourceAndSequence = NIOThrowingAsyncSequenceProducer.makeSequence( elementType: KafkaConsumerMessage.self, backPressureStrategy: NIOAsyncSequenceProducerBackPressureStrategies.NoBackPressure(), - delegate: KafkaConsumerCloseOnTerminate(stateMachine: self.stateMachine) + delegate: KafkaConsumerCloseOnTerminate(isMessageSequence: true, stateMachine: self.stateMachine) ) self.messages = KafkaConsumerMessages( @@ -166,7 +174,8 @@ public final class KafkaConsumer: Sendable, Service { self.stateMachine.withLockedValue { $0.initialize( client: client, - source: sourceAndSequence.source + source: sourceAndSequence.source, + eventSource: eventSource ) } @@ -242,6 +251,11 @@ public final class KafkaConsumer: Sendable, Service { if config.enableAutoCommit == false { subscribedEvents.append(.offsetCommit) } +// Don't listen to statistics even if configured +// As there are no events instantiated +// if config.statisticsInterval != .zero { +// subscribedEvents.append(.statistics) +// } let client = try RDKafkaClient.makeClient( type: .consumer, @@ -250,20 +264,22 @@ public final class KafkaConsumer: Sendable, Service { logger: logger ) - let consumer = try KafkaConsumer( - client: client, - stateMachine: stateMachine, - config: config, - logger: logger - ) - let sourceAndSequence = NIOAsyncSequenceProducer.makeSequence( elementType: KafkaConsumerEvent.self, backPressureStrategy: NIOAsyncSequenceProducerBackPressureStrategies.NoBackPressure(), - delegate: KafkaConsumerCloseOnTerminate(stateMachine: stateMachine) + delegate: KafkaConsumerCloseOnTerminate(isMessageSequence: false, stateMachine: stateMachine) ) let eventsSequence = KafkaConsumerEvents(wrappedSequence: sourceAndSequence.sequence) + + let consumer = try KafkaConsumer( + client: client, + stateMachine: stateMachine, + config: config, + logger: logger, + eventSource: sourceAndSequence.source + ) + return (consumer, eventsSequence) } @@ -321,7 +337,7 @@ public final class KafkaConsumer: Sendable, Service { while !Task.isCancelled { let nextAction = self.stateMachine.withLockedValue { $0.nextPollLoopAction() } switch nextAction { - case .pollForAndYieldMessage(let client, let source): + case .pollForAndYieldMessage(let client, let source, let eventSource): let events = client.eventPoll() for event in events { switch event { @@ -332,8 +348,11 @@ public final class KafkaConsumer: Sendable, Service { _ = source.yield(message) case .failure(let error): source.finish() + eventSource?.finish() throw error } + case .statistics(let statistics): + _ = eventSource?.yield(.statistics(statistics)) default: break // Ignore } @@ -383,8 +402,9 @@ public final class KafkaConsumer: Sendable, Service { client: client, logger: self.logger ) - case .triggerGracefulShutdownAndFinishSource(let client, let source): + case .triggerGracefulShutdownAndFinishSource(let client, let source, let eventSource): source.finish() + eventSource?.finish() self._triggerGracefulShutdown( client: client, logger: self.logger @@ -428,17 +448,20 @@ extension KafkaConsumer { /// /// - Parameter client: Client used for handling the connection to the Kafka cluster. /// - Parameter source: ``NIOAsyncSequenceProducer/Source`` used for yielding new elements. + /// - Parameter eventSource: ``NIOAsyncSequenceProducer/Source`` used for yielding new events. case initializing( client: RDKafkaClient, - source: Producer.Source + source: Producer.Source, + eventSource: ProducerEvents.Source? ) /// The ``KafkaConsumer`` is consuming messages. /// /// - Parameter client: Client used for handling the connection to the Kafka cluster. - /// - Parameter source: ``NIOAsyncSequenceProducer/Source`` used for yielding new elements. + /// - Parameter eventSource: ``NIOAsyncSequenceProducer/Source`` used for yielding new elements. case consuming( client: RDKafkaClient, - source: Producer.Source + source: Producer.Source, + eventSource: ProducerEvents.Source? ) /// Consumer is still running but the messages asynchronous sequence was terminated. /// All incoming messages will be dropped. @@ -461,14 +484,16 @@ extension KafkaConsumer { /// not yet available when the normal initialization occurs. mutating func initialize( client: RDKafkaClient, - source: Producer.Source + source: Producer.Source, + eventSource: ProducerEvents.Source? ) { guard case .uninitialized = self.state else { fatalError("\(#function) can only be invoked in state .uninitialized, but was invoked in state \(self.state)") } self.state = .initializing( client: client, - source: source + source: source, + eventSource: eventSource ) } @@ -480,7 +505,8 @@ extension KafkaConsumer { /// - Parameter source: ``NIOAsyncSequenceProducer/Source`` used for yielding new elements. case pollForAndYieldMessage( client: RDKafkaClient, - source: Producer.Source + source: Producer.Source, + eventSource: ProducerEvents.Source? ) /// The ``KafkaConsumer`` stopped consuming messages or /// is in the process of shutting down. @@ -502,8 +528,8 @@ extension KafkaConsumer { fatalError("\(#function) invoked while still in state \(self.state)") case .initializing: fatalError("Subscribe to consumer group / assign to topic partition pair before reading messages") - case .consuming(let client, let source): - return .pollForAndYieldMessage(client: client, source: source) + case .consuming(let client, let source, let eventSource): + return .pollForAndYieldMessage(client: client, source: source, eventSource: eventSource) case .consumptionStopped(let client): return .pollWithoutYield(client: client) case .finishing(let client): @@ -532,10 +558,11 @@ extension KafkaConsumer { switch self.state { case .uninitialized: fatalError("\(#function) invoked while still in state \(self.state)") - case .initializing(let client, let source): + case .initializing(let client, let source, let eventSource): self.state = .consuming( client: client, - source: source + source: source, + eventSource: eventSource ) return .setUpConnection(client: client) case .consuming, .consumptionStopped, .finishing, .finished: @@ -545,16 +572,30 @@ extension KafkaConsumer { /// The messages asynchronous sequence was terminated. /// All incoming messages will be dropped. - mutating func messageSequenceTerminated() { + mutating func messageSequenceTerminated(isMessageSequence: Bool) { switch self.state { case .uninitialized: fatalError("\(#function) invoked while still in state \(self.state)") case .initializing: fatalError("Call to \(#function) before setUpConnection() was invoked") case .consumptionStopped: - fatalError("messageSequenceTerminated() must not be invoked more than once") - case .consuming(let client, _): - self.state = .consumptionStopped(client: client) + if isMessageSequence { + fatalError("messageSequenceTerminated() must not be invoked more than once") + } + case .consuming(let client, let source, let eventSource): + // only move to stopping if messages sequence was finished + if isMessageSequence { + self.state = .consumptionStopped(client: client) + // If message sequence is being terminated, it means class deinit is called + // see `messages` field, it is last change to call finish for `eventSource` + eventSource?.finish() + } + else { + // Messages are still consuming, only event source was finished + // Ok, probably, noone wants to listen to events, + // though it might be very bad for rebalancing + self.state = .consuming(client: client, source: source, eventSource: nil) + } case .finishing, .finished: break } @@ -576,7 +617,7 @@ extension KafkaConsumer { fatalError("Subscribe to consumer group / assign to topic partition pair before committing offsets") case .consumptionStopped: fatalError("Cannot store offset when consumption has been stopped") - case .consuming(let client, _): + case .consuming(let client, _, _): return .storeOffset(client: client) case .finishing, .finished: fatalError("\(#function) invoked while still in state \(self.state)") @@ -607,7 +648,7 @@ extension KafkaConsumer { fatalError("Subscribe to consumer group / assign to topic partition pair before committing offsets") case .consumptionStopped: fatalError("Cannot commit when consumption has been stopped") - case .consuming(let client, _): + case .consuming(let client, _, _): return .commitSync(client: client) case .finishing, .finished: return .throwClosedError @@ -628,7 +669,8 @@ extension KafkaConsumer { /// - Parameter source: ``NIOAsyncSequenceProducer/Source`` used for yielding new elements. case triggerGracefulShutdownAndFinishSource( client: RDKafkaClient, - source: Producer.Source + source: Producer.Source, + eventSource: ProducerEvents.Source? ) } @@ -642,11 +684,12 @@ extension KafkaConsumer { fatalError("\(#function) invoked while still in state \(self.state)") case .initializing: fatalError("subscribe() / assign() should have been invoked before \(#function)") - case .consuming(let client, let source): + case .consuming(let client, let source, let eventSource): self.state = .finishing(client: client) return .triggerGracefulShutdownAndFinishSource( client: client, - source: source + source: source, + eventSource: eventSource ) case .consumptionStopped(let client): self.state = .finishing(client: client) diff --git a/Sources/SwiftKafka/KafkaConsumerEvent.swift b/Sources/SwiftKafka/KafkaConsumerEvent.swift index 287ddd33..75b5bf64 100644 --- a/Sources/SwiftKafka/KafkaConsumerEvent.swift +++ b/Sources/SwiftKafka/KafkaConsumerEvent.swift @@ -14,11 +14,15 @@ /// An enumeration representing events that can be received through the ``KafkaConsumerEvents`` asynchronous sequence. public enum KafkaConsumerEvent: Sendable, Hashable { + /// Statistics from librdkafka + case statistics(KafkaStatistics) /// - Important: Always provide a `default` case when switiching over this `enum`. case DO_NOT_SWITCH_OVER_THIS_EXHAUSITVELY - internal init(_ event: RDKafkaClient.KafkaEvent) { + internal init?(_ event: RDKafkaClient.KafkaEvent) { switch event { + case .statistics(let stat): + self = .statistics(stat) case .deliveryReport: fatalError("Cannot cast \(event) to KafkaConsumerEvent") case .consumerMessages: diff --git a/Sources/SwiftKafka/KafkaProducer.swift b/Sources/SwiftKafka/KafkaProducer.swift index 5af88d71..af18157d 100644 --- a/Sources/SwiftKafka/KafkaProducer.swift +++ b/Sources/SwiftKafka/KafkaProducer.swift @@ -118,10 +118,16 @@ public final class KafkaProducer: Service, Sendable { ) throws { let stateMachine = NIOLockedValueBox(StateMachine(logger: logger)) + var subscribedEvents: [RDKafkaEvent] = [.log] // No .deliveryReport here! + // Listen to statistics events when statistics enabled + if config.statisticsInterval != .zero { + subscribedEvents.append(.statistics) + } + let client = try RDKafkaClient.makeClient( type: .producer, configDictionary: config.dictionary, - events: [.log], // No .deliveryReport here! + events: subscribedEvents, logger: logger ) @@ -165,11 +171,17 @@ public final class KafkaProducer: Service, Sendable { delegate: KafkaProducerCloseOnTerminate(stateMachine: stateMachine) ) let source = sourceAndSequence.source + + var subscribedEvents: [RDKafkaEvent] = [.log, .deliveryReport] + // Listen to statistics events when statistics enabled + if config.statisticsInterval != .zero { + subscribedEvents.append(.statistics) + } let client = try RDKafkaClient.makeClient( type: .producer, configDictionary: config.dictionary, - events: [.log, .deliveryReport], + events: subscribedEvents, logger: logger ) diff --git a/Tests/SwiftKafkaTests/KafkaConsumerTests.swift b/Tests/SwiftKafkaTests/KafkaConsumerTests.swift index d2591353..d42b4da1 100644 --- a/Tests/SwiftKafkaTests/KafkaConsumerTests.swift +++ b/Tests/SwiftKafkaTests/KafkaConsumerTests.swift @@ -86,22 +86,16 @@ final class KafkaConsumerTests: XCTestCase { ) } } -/* + func testConsumerStatistics() async throws { - // Set no bootstrap servers to trigger librdkafka configuration warning let uniqueGroupID = UUID().uuidString var config = KafkaConsumerConfiguration( consumptionStrategy: .group(id: uniqueGroupID, topics: ["this-topic-does-not-exist"]) ) - config.statisticsInterval = Duration.milliseconds(100) - - let stringJson = NIOLockedValueBox(String()) - let consumer = try KafkaConsumer(config: config, logger: .kafkaTest) + config.statisticsInterval = Duration.milliseconds(10) - guard let statistics = consumer.statistics else { - XCTFail("Statistics was not instantiated") - return - } + let statistics = NIOLockedValueBox(nil) + let (consumer, events) = try KafkaConsumer.makeConsumerWithEvents(config: config, logger: .kafkaTest) let serviceGroup = ServiceGroup( services: [consumer], @@ -117,74 +111,28 @@ final class KafkaConsumerTests: XCTestCase { // check for librdkafka statistics group.addTask { - for try await stat in statistics { - stringJson.withLockedValue { - $0 = stat + for try await event in events { + if case let .statistics(stat) = event { + statistics.withLockedValue { + $0 = stat + } + break } } } - // Sleep for 1s to let poll loop receive statistics callback - try! await Task.sleep(for: .milliseconds(500)) + try await group.next() // Shutdown the serviceGroup await serviceGroup.triggerGracefulShutdown() - - try await group.next() } - let stats = stringJson.withLockedValue { $0 } - XCTAssertFalse(stats.isEmpty) - } - - func testConsumerStatisticsJson() async throws { - // Set no bootstrap servers to trigger librdkafka configuration warning - let uniqueGroupID = UUID().uuidString - var config = KafkaConsumerConfiguration( - consumptionStrategy: .group(id: uniqueGroupID, topics: ["this-topic-does-not-exist"]) - ) - config.statisticsInterval = Duration.milliseconds(100) - - let stringJson = NIOLockedValueBox(nil) - let consumer = try KafkaConsumer(config: config, logger: .kafkaTest) - - guard let statistics = consumer.statistics else { - XCTFail("Statistics was not instantiated") + let stats = statistics.withLockedValue { $0 } + guard let stats else { + XCTFail("stats are not occurred") return } - - let serviceGroup = ServiceGroup( - services: [consumer], - configuration: ServiceGroupConfiguration(gracefulShutdownSignals: []), - logger: .kafkaTest - ) - - try await withThrowingTaskGroup(of: Void.self) { group in - // Run Task - group.addTask { - try await serviceGroup.run() - } - - // check for librdkafka statistics - group.addTask { - for try await stat in KafkaStatisticsJsonSequence(wrappedSequence: statistics) { - stringJson.withLockedValue { - $0 = stat - } - } - } - - // Sleep for 1s to let poll loop receive statistics callback - try! await Task.sleep(for: .milliseconds(500)) - - // Shutdown the serviceGroup - await serviceGroup.triggerGracefulShutdown() - - try await group.next() - } - - let stats = stringJson.withLockedValue { $0 } - XCTAssertNotNil(stats) + XCTAssertFalse(stats.jsonString.isEmpty) + XCTAssertNoThrow(try stats.json) } - */ } diff --git a/Tests/SwiftKafkaTests/KafkaProducerTests.swift b/Tests/SwiftKafkaTests/KafkaProducerTests.swift index fbf1b2f4..9e0be7a1 100644 --- a/Tests/SwiftKafkaTests/KafkaProducerTests.swift +++ b/Tests/SwiftKafkaTests/KafkaProducerTests.swift @@ -358,7 +358,7 @@ final class KafkaProducerTests: XCTestCase { } func testProducerStatistics() async throws { - self.config.statisticsInterval = Duration.milliseconds(100) + self.config.statisticsInterval = Duration.milliseconds(10) self.config.debug = [.all] let statistics = NIOLockedValueBox(nil) @@ -382,24 +382,18 @@ final class KafkaProducerTests: XCTestCase { // check for librdkafka statistics group.addTask { for try await e in events { - switch e { - case .statistics(let stat): + if case let .statistics(stat) = e { statistics.withLockedValue { $0 = stat } - default: break } } } - // Sleep for 1s to let poll loop receive statistics callback - try! await Task.sleep(for: .milliseconds(500)) - + try await group.next() // Shutdown the serviceGroup await serviceGroup.triggerGracefulShutdown() - - try await group.next() } let stats = statistics.withLockedValue { $0 } guard let stats else { From d55a7fda49c809189f08fc0514b6c59c655168a4 Mon Sep 17 00:00:00 2001 From: BlindSpot <127803250+blindspotbounty@users.noreply.github.com> Date: Thu, 10 Aug 2023 16:16:27 +0300 Subject: [PATCH 03/20] fix some artefacts --- .../KafkaProducerConfiguration.swift | 2 +- Sources/Kafka/KafkaConsumer.swift | 2 - Sources/Kafka/KafkaConsumerEvent.swift | 2 +- Sources/Kafka/KafkaProducer.swift | 2 +- .../KafkaProducerConfiguration.swift | 190 ------------------ 5 files changed, 3 insertions(+), 195 deletions(-) delete mode 100644 Sources/SwiftKafka/Configuration/KafkaProducerConfiguration.swift diff --git a/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift b/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift index 31764c47..9db3fd5d 100644 --- a/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift @@ -107,7 +107,7 @@ public struct KafkaProducerConfiguration { public var isAutoCreateTopicsEnabled: Bool = true // MARK: - Common Client Config Properties - + /// Client identifier. /// Default: `"rdkafka"` public var identifier: String = "rdkafka" diff --git a/Sources/Kafka/KafkaConsumer.swift b/Sources/Kafka/KafkaConsumer.swift index 1d4cf54c..40728742 100644 --- a/Sources/Kafka/KafkaConsumer.swift +++ b/Sources/Kafka/KafkaConsumer.swift @@ -250,8 +250,6 @@ public final class KafkaConsumer: Sendable, Service { if configuration.isAutoCommitEnabled == false { subscribedEvents.append(.offsetCommit) } -// Don't listen to statistics even if configured -// As there are no events instantiated if configuration.statisticsInterval != .zero { subscribedEvents.append(.statistics) } diff --git a/Sources/Kafka/KafkaConsumerEvent.swift b/Sources/Kafka/KafkaConsumerEvent.swift index 5ea5bc09..a4592d3e 100644 --- a/Sources/Kafka/KafkaConsumerEvent.swift +++ b/Sources/Kafka/KafkaConsumerEvent.swift @@ -19,7 +19,7 @@ public enum KafkaConsumerEvent: Sendable, Hashable { /// - Important: Always provide a `default` case when switiching over this `enum`. case DO_NOT_SWITCH_OVER_THIS_EXHAUSITVELY - internal init?(_ event: RDKafkaClient.KafkaEvent) { + internal init(_ event: RDKafkaClient.KafkaEvent) { switch event { case .statistics(let stat): self = .statistics(stat) diff --git a/Sources/Kafka/KafkaProducer.swift b/Sources/Kafka/KafkaProducer.swift index 84c66eba..eef531b7 100644 --- a/Sources/Kafka/KafkaProducer.swift +++ b/Sources/Kafka/KafkaProducer.swift @@ -119,7 +119,7 @@ public final class KafkaProducer: Service, Sendable { let client = try RDKafkaClient.makeClient( type: .producer, configDictionary: configuration.dictionary, - events: [.log], + events: [.log], // No .deliveryReport here! logger: logger ) diff --git a/Sources/SwiftKafka/Configuration/KafkaProducerConfiguration.swift b/Sources/SwiftKafka/Configuration/KafkaProducerConfiguration.swift deleted file mode 100644 index b8ccd24a..00000000 --- a/Sources/SwiftKafka/Configuration/KafkaProducerConfiguration.swift +++ /dev/null @@ -1,190 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// This source file is part of the swift-kafka-gsoc open source project -// -// Copyright (c) 2022 Apple Inc. and the swift-kafka-gsoc project authors -// Licensed under Apache License v2.0 -// -// See LICENSE.txt for license information -// See CONTRIBUTORS.txt for the list of swift-kafka-gsoc project authors -// -// SPDX-License-Identifier: Apache-2.0 -// -//===----------------------------------------------------------------------===// - -public struct KafkaProducerConfiguration { - // MARK: - SwiftKafka-specific Config properties - - /// The time between two consecutive polls. - /// Effectively controls the rate at which incoming events are consumed. - /// Default: `.milliseconds(100)` - public var pollInterval: Duration = .milliseconds(100) - - - /// Maximum timeout for flushing outstanding produce requests when the ``KakfaProducer`` is shutting down. - /// Default: `10000` - public var flushTimeoutMilliseconds: Int = 10000 { - didSet { - precondition( - 0...Int(Int32.max) ~= self.flushTimeoutMilliseconds, - "Flush timeout outside of valid range \(0...Int32.max)" - ) - } - } - - // MARK: - Producer-specific Config Properties - - /// When set to true, the producer will ensure that messages are successfully produced exactly once and in the original produce order. The following configuration properties are adjusted automatically (if not modified by the user) when idempotence is enabled: max.in.flight.requests.per.connection=5 (must be less than or equal to 5), retries=INT32_MAX (must be greater than 0), acks=all, queuing.strategy=fifo. Producer instantation will fail if user-supplied configuration is incompatible. - /// Default: `false` - public var enableIdempotence: Bool = false - - /// Producer queue options. - public var queue: KafkaConfiguration.QueueOptions = .init() - - /// How many times to retry sending a failing Message. Note: retrying may cause reordering unless enable.idempotence is set to true. - /// Default: `2_147_483_647` - public var messageSendMaxRetries: Int = 2_147_483_647 - - /// Allow automatic topic creation on the broker when producing to non-existent topics. - /// The broker must also be configured with auto.create.topics.enable=true for this configuration to take effect. - /// Default: `true` - public var allowAutoCreateTopics: Bool = true - - // MARK: - Common Client Config Properties - - /// Client identifier. - /// Default: `"rdkafka"` - public var clientID: String = "rdkafka" - - /// Initial list of brokers. - /// Default: `[]` - public var bootstrapServers: [KafkaConfiguration.Broker] = [] - - /// Message options. - public var message: KafkaConfiguration.MessageOptions = .init() - - /// Maximum Kafka protocol response message size. This serves as a safety precaution to avoid memory exhaustion in case of protocol hickups. This value must be at least fetch.max.bytes + 512 to allow for protocol overhead; the value is adjusted automatically unless the configuration property is explicitly set. - /// Default: `100_000_000` - public var receiveMessageMaxBytes: Int = 100_000_000 - - /// Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one. - /// Default: `1_000_000` - public var maxInFlightRequestsPerConnection: Int = 1_000_000 - - /// Metadata cache max age. - /// Default: `900_000` - public var metadataMaxAgeMilliseconds: Int = 900_000 - - /// Topic metadata options. - public var topicMetadata: KafkaConfiguration.TopicMetadataOptions = .init() - - /// Topic denylist. - /// Default: `[]` - public var topicDenylist: [String] = [] - - /// Debug options. - /// Default: `[]` - public var debug: [KafkaConfiguration.DebugOption] = [] - - /// Socket options. - public var socket: KafkaConfiguration.SocketOptions = .init() - - /// Broker options. - public var broker: KafkaConfiguration.BrokerOptions = .init() - - /// Reconnect options. - public var reconnect: KafkaConfiguration.ReconnectOptions = .init() - - /// Security protocol to use (plaintext, ssl, sasl_plaintext, sasl_ssl). - /// Default: `.plaintext` - public var securityProtocol: KafkaConfiguration.SecurityProtocol = .plaintext - - public init() {} -} - -// MARK: - KafkaProducerConfiguration + Dictionary - -extension KafkaProducerConfiguration { - internal var dictionary: [String: String] { - var resultDict: [String: String] = [:] - - resultDict["statistics.interval.ms"] = String(self.statisticsInterval.totalMilliseconds) - resultDict["enable.idempotence"] = String(self.enableIdempotence) - resultDict["queue.buffering.max.messages"] = String(self.queue.bufferingMaxMessages) - resultDict["queue.buffering.max.kbytes"] = String(self.queue.bufferingMaxKBytes) - resultDict["queue.buffering.max.ms"] = String(self.queue.bufferingMaxMilliseconds) - resultDict["message.send.max.retries"] = String(self.messageSendMaxRetries) - resultDict["allow.auto.create.topics"] = String(self.allowAutoCreateTopics) - - resultDict["client.id"] = self.clientID - resultDict["bootstrap.servers"] = self.bootstrapServers.map(\.description).joined(separator: ",") - resultDict["message.max.bytes"] = String(self.message.maxBytes) - resultDict["message.copy.max.bytes"] = String(self.message.copyMaxBytes) - resultDict["receive.message.max.bytes"] = String(self.receiveMessageMaxBytes) - resultDict["max.in.flight.requests.per.connection"] = String(self.maxInFlightRequestsPerConnection) - resultDict["metadata.max.age.ms"] = String(self.metadataMaxAgeMilliseconds) - resultDict["topic.metadata.refresh.interval.ms"] = String(self.topicMetadata.refreshIntervalMilliseconds) - resultDict["topic.metadata.refresh.fast.interval.ms"] = String(self.topicMetadata.refreshFastIntervalMilliseconds) - resultDict["topic.metadata.refresh.sparse"] = String(self.topicMetadata.refreshSparse) - resultDict["topic.metadata.propagation.max.ms"] = String(self.topicMetadata.propagationMaxMilliseconds) - resultDict["topic.blacklist"] = self.topicDenylist.joined(separator: ",") - if !self.debug.isEmpty { - resultDict["debug"] = self.debug.map(\.description).joined(separator: ",") - } - resultDict["socket.timeout.ms"] = String(self.socket.timeoutMilliseconds) - resultDict["socket.send.buffer.bytes"] = String(self.socket.sendBufferBytes) - resultDict["socket.receive.buffer.bytes"] = String(self.socket.receiveBufferBytes) - resultDict["socket.keepalive.enable"] = String(self.socket.keepaliveEnable) - resultDict["socket.nagle.disable"] = String(self.socket.nagleDisable) - resultDict["socket.max.fails"] = String(self.socket.maxFails) - resultDict["socket.connection.setup.timeout.ms"] = String(self.socket.connectionSetupTimeoutMilliseconds) - resultDict["broker.address.ttl"] = String(self.broker.addressTTL) - resultDict["broker.address.family"] = self.broker.addressFamily.description - resultDict["reconnect.backoff.ms"] = String(self.reconnect.backoffMilliseconds) - resultDict["reconnect.backoff.max.ms"] = String(self.reconnect.backoffMaxMilliseconds) - - // Merge with SecurityProtocol configuration dictionary - resultDict.merge(self.securityProtocol.dictionary) { _, _ in - fatalError("securityProtocol and \(#file) should not have duplicate keys") - } - - return resultDict - } -} - -// MARK: - KafkaProducerConfiguration + Hashable - -extension KafkaProducerConfiguration: Hashable {} - -// MARK: - KafkaProducerConfiguration + Sendable - -extension KafkaProducerConfiguration: Sendable {} - -// MARK: - KafkaConfiguration + Producer Additions - -extension KafkaConfiguration { - /// Producer queue options. - public struct QueueOptions: Sendable, Hashable { - /// Maximum number of messages allowed on the producer queue. This queue is shared by all topics and partitions. A value of 0 disables this limit. - /// Default: `100_000` - public var bufferingMaxMessages: Int = 100_000 - - /// Maximum total message size sum allowed on the producer queue. This queue is shared by all topics and partitions. This property has higher priority than queue.buffering.max.messages. - /// Default: `1_048_576` - public var bufferingMaxKBytes: Int = 1_048_576 - - /// Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency. - /// Default: `5` - public var bufferingMaxMilliseconds: Int = 5 - - public init( - bufferingMaxMessages: Int = 100_000, - bufferingMaxKBytes: Int = 1_048_576, - bufferingMaxMilliseconds: Int = 5 - ) { - self.bufferingMaxMessages = bufferingMaxMessages - self.bufferingMaxKBytes = bufferingMaxKBytes - self.bufferingMaxMilliseconds = bufferingMaxMilliseconds - } - } -} From 8b4525bf641081baec2ea8406d5dcc35101d5b8c Mon Sep 17 00:00:00 2001 From: BlindSpot <127803250+blindspotbounty@users.noreply.github.com> Date: Thu, 10 Aug 2023 16:31:26 +0300 Subject: [PATCH 04/20] adjust to KeyRefreshAttempts --- .../Configuration/KafkaConfiguration.swift | 21 +++++++++++++++++++ .../KafkaConsumerConfiguration.swift | 13 ++---------- .../KafkaProducerConfiguration.swift | 13 ++---------- Sources/Kafka/KafkaConsumer.swift | 2 +- Sources/Kafka/KafkaProducer.swift | 2 +- .../Kafka/Utilities/Duration+Helpers.swift | 4 ---- Tests/KafkaTests/KafkaConsumerTests.swift | 2 +- Tests/KafkaTests/KafkaProducerTests.swift | 3 +-- 8 files changed, 29 insertions(+), 31 deletions(-) diff --git a/Sources/Kafka/Configuration/KafkaConfiguration.swift b/Sources/Kafka/Configuration/KafkaConfiguration.swift index 88798752..5cf57f6d 100644 --- a/Sources/Kafka/Configuration/KafkaConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaConfiguration.swift @@ -283,4 +283,25 @@ public enum KafkaConfiguration { /// Use the IPv6 address family. public static let v6 = IPAddressFamily(description: "v6") } + + /// Minimum time between key refresh attempts. + public struct KeyRefreshAttempts: Sendable, Hashable { + internal let rawValue: UInt + + private init(rawValue: UInt) { + self.rawValue = rawValue + } + + /// (Lowest granularity is milliseconds) + public static func value(_ value: Duration) -> KeyRefreshAttempts { + precondition( + value.canBeRepresentedAsMilliseconds, + "Lowest granularity is milliseconds" + ) + return .init(rawValue: UInt(value.inMilliseconds)) + } + + /// Disable automatic key refresh by setting this property. + public static let disable: KeyRefreshAttempts = .init(rawValue: 0) + } } diff --git a/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift b/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift index 4eb5aee4..1c59b525 100644 --- a/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift @@ -208,16 +208,7 @@ public struct KafkaConsumerConfiguration { public var reconnect: KafkaConfiguration.ReconnectOptions = .init() /// Interval for librdkafka statistics reports - /// 0ms - disabled - /// >= 1ms - statistics provided every specified interval - public var statisticsInterval: Duration = .zero { - didSet { - precondition( - self.statisticsInterval.canBeRepresentedAsMilliseconds || self.statisticsInterval == .disabled, - "Lowest granularity is milliseconds or disabled" - ) - } - } + public var statisticsInterval: KafkaConfiguration.KeyRefreshAttempts = .disable /// Security protocol to use (plaintext, ssl, sasl_plaintext, sasl_ssl). /// Default: `.plaintext` @@ -283,7 +274,7 @@ extension KafkaConsumerConfiguration { resultDict["reconnect.backoff.ms"] = String(reconnect.backoff.rawValue) resultDict["reconnect.backoff.max.ms"] = String(reconnect.maximumBackoff.inMilliseconds) - resultDict["statistics.interval.ms"] = String(statisticsInterval.inMilliseconds) + resultDict["statistics.interval.ms"] = String(statisticsInterval.rawValue) // Merge with SecurityProtocol configuration dictionary resultDict.merge(securityProtocol.dictionary) { _, _ in diff --git a/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift b/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift index 9db3fd5d..f1e6396a 100644 --- a/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift @@ -162,16 +162,7 @@ public struct KafkaProducerConfiguration { public var reconnect: KafkaConfiguration.ReconnectOptions = .init() /// Interval for librdkafka statistics reports - /// 0ms - disabled - /// >= 1ms - statistics provided every specified interval - public var statisticsInterval: Duration = .zero { - didSet { - precondition( - self.statisticsInterval.canBeRepresentedAsMilliseconds || self.statisticsInterval == .disabled, - "Lowest granularity is milliseconds or disabled" - ) - } - } + public var statisticsInterval: KafkaConfiguration.KeyRefreshAttempts = .disable /// Security protocol to use (plaintext, ssl, sasl_plaintext, sasl_ssl). /// Default: `.plaintext` @@ -224,7 +215,7 @@ extension KafkaProducerConfiguration { resultDict["reconnect.backoff.ms"] = String(self.reconnect.backoff.rawValue) resultDict["reconnect.backoff.max.ms"] = String(self.reconnect.maximumBackoff.inMilliseconds) - resultDict["statistics.interval.ms"] = String(statisticsInterval.inMilliseconds) + resultDict["statistics.interval.ms"] = String(statisticsInterval.rawValue) // Merge with SecurityProtocol configuration dictionary resultDict.merge(self.securityProtocol.dictionary) { _, _ in diff --git a/Sources/Kafka/KafkaConsumer.swift b/Sources/Kafka/KafkaConsumer.swift index 40728742..85981441 100644 --- a/Sources/Kafka/KafkaConsumer.swift +++ b/Sources/Kafka/KafkaConsumer.swift @@ -250,7 +250,7 @@ public final class KafkaConsumer: Sendable, Service { if configuration.isAutoCommitEnabled == false { subscribedEvents.append(.offsetCommit) } - if configuration.statisticsInterval != .zero { + if configuration.statisticsInterval != .disable { subscribedEvents.append(.statistics) } diff --git a/Sources/Kafka/KafkaProducer.swift b/Sources/Kafka/KafkaProducer.swift index eef531b7..68ad28fe 100644 --- a/Sources/Kafka/KafkaProducer.swift +++ b/Sources/Kafka/KafkaProducer.swift @@ -165,7 +165,7 @@ public final class KafkaProducer: Service, Sendable { var subscribedEvents: [RDKafkaEvent] = [.log, .deliveryReport] // Listen to statistics events when statistics enabled - if configuration.statisticsInterval != .disabled { + if configuration.statisticsInterval != .disable { subscribedEvents.append(.statistics) } diff --git a/Sources/Kafka/Utilities/Duration+Helpers.swift b/Sources/Kafka/Utilities/Duration+Helpers.swift index 99930d03..d964aafc 100644 --- a/Sources/Kafka/Utilities/Duration+Helpers.swift +++ b/Sources/Kafka/Utilities/Duration+Helpers.swift @@ -22,8 +22,4 @@ extension Duration { internal var canBeRepresentedAsMilliseconds: Bool { return self.inMilliseconds > 0 } - - internal static var disabled: Duration { // FIXME: public? - return .zero - } } diff --git a/Tests/KafkaTests/KafkaConsumerTests.swift b/Tests/KafkaTests/KafkaConsumerTests.swift index 56dd9f4c..3c56cbe6 100644 --- a/Tests/KafkaTests/KafkaConsumerTests.swift +++ b/Tests/KafkaTests/KafkaConsumerTests.swift @@ -92,7 +92,7 @@ final class KafkaConsumerTests: XCTestCase { consumptionStrategy: .group(id: uniqueGroupID, topics: ["this-topic-does-not-exist"]), bootstrapBrokerAddresses: [] ) - config.statisticsInterval = Duration.milliseconds(10) + config.statisticsInterval = .value(.milliseconds(10)) let (consumer, events) = try KafkaConsumer.makeConsumerWithEvents(configuration: config, logger: .kafkaTest) diff --git a/Tests/KafkaTests/KafkaProducerTests.swift b/Tests/KafkaTests/KafkaProducerTests.swift index 259efb67..c6b2807a 100644 --- a/Tests/KafkaTests/KafkaProducerTests.swift +++ b/Tests/KafkaTests/KafkaProducerTests.swift @@ -360,8 +360,7 @@ final class KafkaProducerTests: XCTestCase { } func testProducerStatistics() async throws { - self.config.statisticsInterval = Duration.milliseconds(10) - self.config.debugOptions = [.all] + self.config.statisticsInterval = .value(.milliseconds(10)) let (producer, events) = try KafkaProducer.makeProducerWithEvents( configuration: self.config, From 23e08fcab1d54ae4f03140ecf8f205a7d119a19b Mon Sep 17 00:00:00 2001 From: BlindSpot <127803250+blindspotbounty@users.noreply.github.com> Date: Thu, 17 Aug 2023 13:23:59 +0300 Subject: [PATCH 05/20] draft: statistics with metrics --- Package.swift | 2 + .../KafkaConfiguration+Metrics.swift | 65 ++++++++++++++ .../Configuration/KafkaConfiguration.swift | 3 - .../KafkaConsumerConfiguration.swift | 8 +- .../KafkaProducerConfiguration.swift | 8 +- Sources/Kafka/KafkaConsumer.swift | 89 +++++++------------ Sources/Kafka/KafkaConsumerEvent.swift | 7 +- Sources/Kafka/KafkaProducer.swift | 8 +- Sources/Kafka/KafkaProducerEvent.swift | 7 +- Sources/Kafka/Utilities/KafkaStatistics.swift | 14 +++ Tests/KafkaTests/KafkaConsumerTests.swift | 50 +++++++---- Tests/KafkaTests/KafkaProducerTests.swift | 45 ++++++---- Tests/KafkaTests/Utilities.swift | 9 ++ 13 files changed, 204 insertions(+), 111 deletions(-) create mode 100644 Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift diff --git a/Package.swift b/Package.swift index d20afa5e..efc493b2 100644 --- a/Package.swift +++ b/Package.swift @@ -48,6 +48,7 @@ let package = Package( .package(url: "https://github.com/apple/swift-nio.git", from: "2.55.0"), .package(url: "https://github.com/swift-server/swift-service-lifecycle.git", from: "2.0.0-alpha.1"), .package(url: "https://github.com/apple/swift-log.git", from: "1.0.0"), + .package(url: "https://github.com/apple/swift-metrics", from: "2.4.1"), // The zstd Swift package produces warnings that we cannot resolve: // https://github.com/facebook/zstd/issues/3328 .package(url: "https://github.com/facebook/zstd.git", from: "1.5.0"), @@ -81,6 +82,7 @@ let package = Package( .product(name: "NIOCore", package: "swift-nio"), .product(name: "ServiceLifecycle", package: "swift-service-lifecycle"), .product(name: "Logging", package: "swift-log"), + .product(name: "Metrics", package: "swift-metrics"), .product(name: "ExtrasJSON", package: "swift-extras-json"), ] ), diff --git a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift new file mode 100644 index 00000000..b2363b43 --- /dev/null +++ b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift @@ -0,0 +1,65 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the swift-kafka-client open source project +// +// Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of swift-kafka-client project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +import Metrics + +extension KafkaConfiguration { + // MARK: - Metrics + + /// Use to configure metrics. + public struct MetricsOptions: Sendable { + /// librdkafka's internal monotonic clock (microseconds) + public var ts: Gauge? + /// Wall clock time in seconds since the epoch + public var time: Timer? + /// Time since this client instance was created + public var age: Timer? + /// Number of ops (callbacks, events, etc) waiting in queue for application to serve + public var replyQueue: Gauge? + /// Current number of messages in producer queues + public var msgCount: Gauge? + /// Current total size of messages in producer queues + public var msgSize: Gauge? + /// Threshold: maximum number of messages allowed allowed on the producer queues + public var msgMax: Gauge? + /// Threshold: maximum total size of messages allowed on the producer queues + public var msgSizeMax: Gauge? + + /// Total number of requests sent to Kafka brokers + public var tx: Gauge? + /// Total number of bytes transmitted to Kafka brokers + public var txBytex: Gauge? + /// Total number of responses received from Kafka brokers + public var rx: Gauge? + /// Total number of bytes received from Kafka brokers + public var rxBytex: Gauge? + + /// Total number of messages transmitted (produced) to Kafka brokers + public var txMessages: Gauge? + /// Total number of message bytes (including framing, such as per-Message framing and MessageSet/batch framing) transmitted to Kafka brokers + public var txMessagesBytex: Gauge? + /// Total number of messages consumed, not including ignored messages (due to offset, etc), from Kafka brokers. + public var rxMessages: Gauge? + /// Total number of message bytes (including framing) received from Kafka brokers + public var rxMessagesBytex: Gauge? + + /// Number of topics in the metadata cache. + public var metadataCacheCount: Gauge? + } + + public enum Metrics: Sendable { + case disable + case enable(updateInterval: KafkaConfiguration.KeyRefreshAttempts, options: MetricsOptions) + } +} diff --git a/Sources/Kafka/Configuration/KafkaConfiguration.swift b/Sources/Kafka/Configuration/KafkaConfiguration.swift index 5cf57f6d..4923799e 100644 --- a/Sources/Kafka/Configuration/KafkaConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaConfiguration.swift @@ -300,8 +300,5 @@ public enum KafkaConfiguration { ) return .init(rawValue: UInt(value.inMilliseconds)) } - - /// Disable automatic key refresh by setting this property. - public static let disable: KeyRefreshAttempts = .init(rawValue: 0) } } diff --git a/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift b/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift index 1c59b525..426462e4 100644 --- a/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift @@ -207,8 +207,8 @@ public struct KafkaConsumerConfiguration { /// Reconnect options. public var reconnect: KafkaConfiguration.ReconnectOptions = .init() - /// Interval for librdkafka statistics reports - public var statisticsInterval: KafkaConfiguration.KeyRefreshAttempts = .disable + /// Options for librdkafka metrics updates + public var metrics: KafkaConfiguration.Metrics = .disable /// Security protocol to use (plaintext, ssl, sasl_plaintext, sasl_ssl). /// Default: `.plaintext` @@ -274,7 +274,9 @@ extension KafkaConsumerConfiguration { resultDict["reconnect.backoff.ms"] = String(reconnect.backoff.rawValue) resultDict["reconnect.backoff.max.ms"] = String(reconnect.maximumBackoff.inMilliseconds) - resultDict["statistics.interval.ms"] = String(statisticsInterval.rawValue) + if case .enable(let interval, _) = metrics { + resultDict["statistics.interval.ms"] = String(interval.rawValue) + } // Merge with SecurityProtocol configuration dictionary resultDict.merge(securityProtocol.dictionary) { _, _ in diff --git a/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift b/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift index f1e6396a..97005a94 100644 --- a/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift @@ -161,8 +161,8 @@ public struct KafkaProducerConfiguration { /// Reconnect options. public var reconnect: KafkaConfiguration.ReconnectOptions = .init() - /// Interval for librdkafka statistics reports - public var statisticsInterval: KafkaConfiguration.KeyRefreshAttempts = .disable + /// Options for librdkafka metrics updates + public var metrics: KafkaConfiguration.Metrics = .disable /// Security protocol to use (plaintext, ssl, sasl_plaintext, sasl_ssl). /// Default: `.plaintext` @@ -215,7 +215,9 @@ extension KafkaProducerConfiguration { resultDict["reconnect.backoff.ms"] = String(self.reconnect.backoff.rawValue) resultDict["reconnect.backoff.max.ms"] = String(self.reconnect.maximumBackoff.inMilliseconds) - resultDict["statistics.interval.ms"] = String(statisticsInterval.rawValue) + if case .enable(let interval, _) = metrics { + resultDict["statistics.interval.ms"] = String(interval.rawValue) + } // Merge with SecurityProtocol configuration dictionary resultDict.merge(self.securityProtocol.dictionary) { _, _ in diff --git a/Sources/Kafka/KafkaConsumer.swift b/Sources/Kafka/KafkaConsumer.swift index 85981441..5ccdf6fe 100644 --- a/Sources/Kafka/KafkaConsumer.swift +++ b/Sources/Kafka/KafkaConsumer.swift @@ -22,7 +22,6 @@ import ServiceLifecycle /// `NIOAsyncSequenceProducerDelegate` that terminates the closes the producer when /// `didTerminate()` is invoked. internal struct KafkaConsumerCloseOnTerminate: Sendable { - let isMessageSequence: Bool let stateMachine: NIOLockedValueBox } @@ -32,7 +31,7 @@ extension KafkaConsumerCloseOnTerminate: NIOAsyncSequenceProducerDelegate { } func didTerminate() { - self.stateMachine.withLockedValue { $0.messageSequenceTerminated(isMessageSequence: isMessageSequence) } + self.stateMachine.withLockedValue { $0.messageSequenceTerminated() } } } @@ -153,8 +152,7 @@ public final class KafkaConsumer: Sendable, Service { client: RDKafkaClient, stateMachine: NIOLockedValueBox, configuration: KafkaConsumerConfiguration, - logger: Logger, - eventSource: ProducerEvents.Source? = nil + logger: Logger ) throws { self.configuration = configuration self.stateMachine = stateMachine @@ -163,7 +161,7 @@ public final class KafkaConsumer: Sendable, Service { let sourceAndSequence = NIOThrowingAsyncSequenceProducer.makeSequence( elementType: KafkaConsumerMessage.self, backPressureStrategy: NIOAsyncSequenceProducerBackPressureStrategies.NoBackPressure(), - delegate: KafkaConsumerCloseOnTerminate(isMessageSequence: true, stateMachine: self.stateMachine) + delegate: KafkaConsumerCloseOnTerminate(stateMachine: self.stateMachine) ) self.messages = KafkaConsumerMessages( @@ -174,8 +172,7 @@ public final class KafkaConsumer: Sendable, Service { self.stateMachine.withLockedValue { $0.initialize( client: client, - source: sourceAndSequence.source, - eventSource: eventSource + source: sourceAndSequence.source ) } @@ -250,7 +247,7 @@ public final class KafkaConsumer: Sendable, Service { if configuration.isAutoCommitEnabled == false { subscribedEvents.append(.offsetCommit) } - if configuration.statisticsInterval != .disable { + if case .enable = configuration.metrics { subscribedEvents.append(.statistics) } @@ -264,7 +261,7 @@ public final class KafkaConsumer: Sendable, Service { let sourceAndSequence = NIOAsyncSequenceProducer.makeSequence( elementType: KafkaConsumerEvent.self, backPressureStrategy: NIOAsyncSequenceProducerBackPressureStrategies.NoBackPressure(), - delegate: KafkaConsumerCloseOnTerminate(isMessageSequence: false, stateMachine: stateMachine) + delegate: KafkaConsumerCloseOnTerminate(stateMachine: stateMachine) ) let eventsSequence = KafkaConsumerEvents(wrappedSequence: sourceAndSequence.sequence) @@ -273,8 +270,7 @@ public final class KafkaConsumer: Sendable, Service { client: client, stateMachine: stateMachine, configuration: configuration, - logger: logger, - eventSource: sourceAndSequence.source + logger: logger ) return (consumer, eventsSequence) @@ -334,7 +330,7 @@ public final class KafkaConsumer: Sendable, Service { while !Task.isCancelled { let nextAction = self.stateMachine.withLockedValue { $0.nextPollLoopAction() } switch nextAction { - case .pollForAndYieldMessage(let client, let source, let eventSource): + case .pollForAndYieldMessage(let client, let source): let events = client.eventPoll() for event in events { switch event { @@ -345,11 +341,12 @@ public final class KafkaConsumer: Sendable, Service { _ = source.yield(message) case .failure(let error): source.finish() - eventSource?.finish() throw error } case .statistics(let statistics): - _ = eventSource?.yield(.statistics(statistics)) + if case let .enable(_, options) = self.configuration.metrics { + statistics.fill(options) + } default: break // Ignore } @@ -402,9 +399,8 @@ public final class KafkaConsumer: Sendable, Service { client: client, logger: self.logger ) - case .triggerGracefulShutdownAndFinishSource(let client, let source, let eventSource): + case .triggerGracefulShutdownAndFinishSource(let client, let source): source.finish() - eventSource?.finish() self._triggerGracefulShutdown( client: client, logger: self.logger @@ -448,20 +444,17 @@ extension KafkaConsumer { /// /// - Parameter client: Client used for handling the connection to the Kafka cluster. /// - Parameter source: ``NIOAsyncSequenceProducer/Source`` used for yielding new elements. - /// - Parameter eventSource: ``NIOAsyncSequenceProducer/Source`` used for yielding new events. case initializing( client: RDKafkaClient, - source: Producer.Source, - eventSource: ProducerEvents.Source? + source: Producer.Source ) /// The ``KafkaConsumer`` is consuming messages. /// /// - Parameter client: Client used for handling the connection to the Kafka cluster. - /// - Parameter eventSource: ``NIOAsyncSequenceProducer/Source`` used for yielding new elements. + /// - Parameter source: ``NIOAsyncSequenceProducer/Source`` used for yielding new elements. case consuming( client: RDKafkaClient, - source: Producer.Source, - eventSource: ProducerEvents.Source? + source: Producer.Source ) /// Consumer is still running but the messages asynchronous sequence was terminated. /// All incoming messages will be dropped. @@ -484,16 +477,14 @@ extension KafkaConsumer { /// not yet available when the normal initialization occurs. mutating func initialize( client: RDKafkaClient, - source: Producer.Source, - eventSource: ProducerEvents.Source? + source: Producer.Source ) { guard case .uninitialized = self.state else { fatalError("\(#function) can only be invoked in state .uninitialized, but was invoked in state \(self.state)") } self.state = .initializing( client: client, - source: source, - eventSource: eventSource + source: source ) } @@ -505,8 +496,7 @@ extension KafkaConsumer { /// - Parameter source: ``NIOAsyncSequenceProducer/Source`` used for yielding new elements. case pollForAndYieldMessage( client: RDKafkaClient, - source: Producer.Source, - eventSource: ProducerEvents.Source? + source: Producer.Source ) /// The ``KafkaConsumer`` stopped consuming messages or /// is in the process of shutting down. @@ -528,8 +518,8 @@ extension KafkaConsumer { fatalError("\(#function) invoked while still in state \(self.state)") case .initializing: fatalError("Subscribe to consumer group / assign to topic partition pair before reading messages") - case .consuming(let client, let source, let eventSource): - return .pollForAndYieldMessage(client: client, source: source, eventSource: eventSource) + case .consuming(let client, let source): + return .pollForAndYieldMessage(client: client, source: source) case .consumptionStopped(let client): return .pollWithoutYield(client: client) case .finishing(let client): @@ -558,11 +548,10 @@ extension KafkaConsumer { switch self.state { case .uninitialized: fatalError("\(#function) invoked while still in state \(self.state)") - case .initializing(let client, let source, let eventSource): + case .initializing(let client, let source): self.state = .consuming( client: client, - source: source, - eventSource: eventSource + source: source ) return .setUpConnection(client: client) case .consuming, .consumptionStopped, .finishing, .finished: @@ -572,30 +561,16 @@ extension KafkaConsumer { /// The messages asynchronous sequence was terminated. /// All incoming messages will be dropped. - mutating func messageSequenceTerminated(isMessageSequence: Bool) { + mutating func messageSequenceTerminated() { switch self.state { case .uninitialized: fatalError("\(#function) invoked while still in state \(self.state)") case .initializing: fatalError("Call to \(#function) before setUpConnection() was invoked") case .consumptionStopped: - if isMessageSequence { - fatalError("messageSequenceTerminated() must not be invoked more than once") - } - case .consuming(let client, let source, let eventSource): - // only move to stopping if messages sequence was finished - if isMessageSequence { - self.state = .consumptionStopped(client: client) - // If message sequence is being terminated, it means class deinit is called - // see `messages` field, it is last change to call finish for `eventSource` - eventSource?.finish() - } - else { - // Messages are still consuming, only event source was finished - // Ok, probably, noone wants to listen to events, - // though it might be very bad for rebalancing - self.state = .consuming(client: client, source: source, eventSource: nil) - } + fatalError("messageSequenceTerminated() must not be invoked more than once") + case .consuming(let client, _): + self.state = .consumptionStopped(client: client) case .finishing, .finished: break } @@ -617,7 +592,7 @@ extension KafkaConsumer { fatalError("Subscribe to consumer group / assign to topic partition pair before committing offsets") case .consumptionStopped: fatalError("Cannot store offset when consumption has been stopped") - case .consuming(let client, _, _): + case .consuming(let client, _): return .storeOffset(client: client) case .finishing, .finished: fatalError("\(#function) invoked while still in state \(self.state)") @@ -648,7 +623,7 @@ extension KafkaConsumer { fatalError("Subscribe to consumer group / assign to topic partition pair before committing offsets") case .consumptionStopped: fatalError("Cannot commit when consumption has been stopped") - case .consuming(let client, _, _): + case .consuming(let client, _): return .commitSync(client: client) case .finishing, .finished: return .throwClosedError @@ -669,8 +644,7 @@ extension KafkaConsumer { /// - Parameter source: ``NIOAsyncSequenceProducer/Source`` used for yielding new elements. case triggerGracefulShutdownAndFinishSource( client: RDKafkaClient, - source: Producer.Source, - eventSource: ProducerEvents.Source? + source: Producer.Source ) } @@ -684,12 +658,11 @@ extension KafkaConsumer { fatalError("\(#function) invoked while still in state \(self.state)") case .initializing: fatalError("subscribe() / assign() should have been invoked before \(#function)") - case .consuming(let client, let source, let eventSource): + case .consuming(let client, let source): self.state = .finishing(client: client) return .triggerGracefulShutdownAndFinishSource( client: client, - source: source, - eventSource: eventSource + source: source ) case .consumptionStopped(let client): self.state = .finishing(client: client) diff --git a/Sources/Kafka/KafkaConsumerEvent.swift b/Sources/Kafka/KafkaConsumerEvent.swift index a4592d3e..5cb91023 100644 --- a/Sources/Kafka/KafkaConsumerEvent.swift +++ b/Sources/Kafka/KafkaConsumerEvent.swift @@ -14,15 +14,16 @@ /// An enumeration representing events that can be received through the ``KafkaConsumerEvents`` asynchronous sequence. public enum KafkaConsumerEvent: Sendable, Hashable { - /// Statistics from librdkafka - case statistics(KafkaStatistics) +// /// Statistics from librdkafka +// case statistics(KafkaStatistics) /// - Important: Always provide a `default` case when switiching over this `enum`. case DO_NOT_SWITCH_OVER_THIS_EXHAUSITVELY internal init(_ event: RDKafkaClient.KafkaEvent) { switch event { case .statistics(let stat): - self = .statistics(stat) + fatalError("Cannot cast \(event) to KafkaConsumerEvent") +// self = .statistics(stat) case .deliveryReport: fatalError("Cannot cast \(event) to KafkaConsumerEvent") case .consumerMessages: diff --git a/Sources/Kafka/KafkaProducer.swift b/Sources/Kafka/KafkaProducer.swift index 68ad28fe..65d2d112 100644 --- a/Sources/Kafka/KafkaProducer.swift +++ b/Sources/Kafka/KafkaProducer.swift @@ -165,7 +165,7 @@ public final class KafkaProducer: Service, Sendable { var subscribedEvents: [RDKafkaEvent] = [.log, .deliveryReport] // Listen to statistics events when statistics enabled - if configuration.statisticsInterval != .disable { + if case .enable = configuration.metrics { subscribedEvents.append(.statistics) } @@ -214,6 +214,12 @@ public final class KafkaProducer: Service, Sendable { case .pollAndYield(let client, let source): let events = client.eventPoll() for event in events { + if case let .statistics(kafkaStatistics) = event { + if case let .enable(_, options) = self.configuration.metrics { + kafkaStatistics.fill(options) + } + continue + } let producerEvent = KafkaProducerEvent(event) // Ignore YieldResult as we don't support back pressure in KafkaProducer _ = source?.yield(producerEvent) diff --git a/Sources/Kafka/KafkaProducerEvent.swift b/Sources/Kafka/KafkaProducerEvent.swift index 863fa812..e09f9d68 100644 --- a/Sources/Kafka/KafkaProducerEvent.swift +++ b/Sources/Kafka/KafkaProducerEvent.swift @@ -16,8 +16,8 @@ public enum KafkaProducerEvent: Sendable, Hashable { /// A collection of delivery reports received from the Kafka cluster indicating the status of produced messages. case deliveryReports([KafkaDeliveryReport]) - /// Statistics from librdkafka - case statistics(KafkaStatistics) +// /// Statistics from librdkafka +// case statistics(KafkaStatistics) /// - Important: Always provide a `default` case when switching over this `enum`. case DO_NOT_SWITCH_OVER_THIS_EXHAUSITVELY @@ -26,7 +26,8 @@ public enum KafkaProducerEvent: Sendable, Hashable { case .deliveryReport(results: let results): self = .deliveryReports(results) case .statistics(let stat): - self = .statistics(stat) +// self = .statistics(stat) + fatalError("Cannot cast \(event) to KafkaProducerEvent") case .consumerMessages: fatalError("Cannot cast \(event) to KafkaProducerEvent") } diff --git a/Sources/Kafka/Utilities/KafkaStatistics.swift b/Sources/Kafka/Utilities/KafkaStatistics.swift index cb0eaad2..cb37dd25 100644 --- a/Sources/Kafka/Utilities/KafkaStatistics.swift +++ b/Sources/Kafka/Utilities/KafkaStatistics.swift @@ -22,4 +22,18 @@ public struct KafkaStatistics: Sendable, Hashable { return try XJSONDecoder().decode(KafkaStatisticsJson.self, from: self.jsonString.utf8) } } + + internal func fill(_ options: KafkaConfiguration.MetricsOptions) { + do { + let json = try XJSONDecoder().decode(KafkaStatisticsJson.self, from: self.jsonString.utf8) + if let age = options.age, + let jsonAge = json.age { + age.recordMicroseconds(jsonAge) + } + + // TODO: other metrics + } catch { + fatalError("Statistics json decode error \(error)") + } + } } diff --git a/Tests/KafkaTests/KafkaConsumerTests.swift b/Tests/KafkaTests/KafkaConsumerTests.swift index 3c56cbe6..8b9a875f 100644 --- a/Tests/KafkaTests/KafkaConsumerTests.swift +++ b/Tests/KafkaTests/KafkaConsumerTests.swift @@ -13,10 +13,12 @@ //===----------------------------------------------------------------------===// import struct Foundation.UUID +import Atomics @testable import Kafka import Logging import ServiceLifecycle import XCTest +import Metrics // For testing locally on Mac, do the following: // @@ -85,14 +87,20 @@ final class KafkaConsumerTests: XCTestCase { ) } } - + func testConsumerStatistics() async throws { let uniqueGroupID = UUID().uuidString var config = KafkaConsumerConfiguration( consumptionStrategy: .group(id: uniqueGroupID, topics: ["this-topic-does-not-exist"]), bootstrapBrokerAddresses: [] ) - config.statisticsInterval = .value(.milliseconds(10)) + + var metricsOptions = KafkaConfiguration.MetricsOptions() + + let handler = MockTimerHandler() + metricsOptions.age = .init(label: "age", dimensions: [], handler: handler) + + config.metrics = .enable(updateInterval: .value(.milliseconds(10)), options: metricsOptions) let (consumer, events) = try KafkaConsumer.makeConsumerWithEvents(configuration: config, logger: .kafkaTest) @@ -109,26 +117,30 @@ final class KafkaConsumerTests: XCTestCase { } // check for librdkafka statistics - group.addTask { - var statistics: KafkaStatistics? = nil - for try await event in events { - if case let .statistics(stat) = event { - statistics = stat - break - } - } - guard let statistics else { - XCTFail("stats are not occurred") - return - } - XCTAssertFalse(statistics.jsonString.isEmpty) - XCTAssertNoThrow(try statistics.json) - } - - try await group.next() +// group.addTask { +// var statistics: KafkaStatistics? = nil +// for try await event in events { +// if case let .statistics(stat) = event { +// statistics = stat +// break +// } +// } +// guard let statistics else { +// XCTFail("stats are not occurred") +// return +// } +// XCTAssertFalse(statistics.jsonString.isEmpty) +// XCTAssertNoThrow(try statistics.json) +// } + + try await Task.sleep(for: .milliseconds(500)) + +// try await group.next() // Shutdown the serviceGroup await serviceGroup.triggerGracefulShutdown() } + let value = handler.duration.load(ordering: .relaxed) + XCTAssertNotEqual(value, 0) } } diff --git a/Tests/KafkaTests/KafkaProducerTests.swift b/Tests/KafkaTests/KafkaProducerTests.swift index c6b2807a..ebdbe191 100644 --- a/Tests/KafkaTests/KafkaProducerTests.swift +++ b/Tests/KafkaTests/KafkaProducerTests.swift @@ -360,7 +360,14 @@ final class KafkaProducerTests: XCTestCase { } func testProducerStatistics() async throws { - self.config.statisticsInterval = .value(.milliseconds(10)) + var metricsOptions = KafkaConfiguration.MetricsOptions() + + let handler = MockTimerHandler() + metricsOptions.age = .init(label: "age", dimensions: [], handler: handler) + + self.config.metrics = .enable(updateInterval: .value(.milliseconds(10)), options: metricsOptions) + +// self.config.statisticsInterval = .value(.milliseconds(10)) let (producer, events) = try KafkaProducer.makeProducerWithEvents( configuration: self.config, @@ -380,25 +387,27 @@ final class KafkaProducerTests: XCTestCase { } // check for librdkafka statistics - group.addTask { - var statistics: KafkaStatistics? = nil - for try await e in events { - if case let .statistics(stat) = e { - statistics = stat - break - } - } - guard let statistics else { - XCTFail("stats are not occurred") - return - } - XCTAssertFalse(statistics.jsonString.isEmpty) - XCTAssertNoThrow(try statistics.json) - } - - try await group.next() +// group.addTask { +// var statistics: KafkaStatistics? = nil +// for try await e in events { +// if case let .statistics(stat) = e { +// statistics = stat +// break +// } +// } +// guard let statistics else { +// XCTFail("stats are not occurred") +// return +// } +// XCTAssertFalse(statistics.jsonString.isEmpty) +// XCTAssertNoThrow(try statistics.json) +// } + try await Task.sleep(for: .milliseconds(500)) +// try await group.next() // Shutdown the serviceGroup await serviceGroup.triggerGracefulShutdown() } + let value = handler.duration.load(ordering: .relaxed) + XCTAssertNotEqual(value, 0) } } diff --git a/Tests/KafkaTests/Utilities.swift b/Tests/KafkaTests/Utilities.swift index f7fbfbf8..fd9cccc9 100644 --- a/Tests/KafkaTests/Utilities.swift +++ b/Tests/KafkaTests/Utilities.swift @@ -12,8 +12,10 @@ // //===----------------------------------------------------------------------===// +import Atomics import Logging import NIOConcurrencyHelpers +import Metrics extension Logger { static var kafkaTest: Logger { @@ -98,3 +100,10 @@ internal struct MockLogHandler: LogHandler { } } } + +class MockTimerHandler: TimerHandler { + let duration = ManagedAtomic(0) + func recordNanoseconds(_ duration: Int64) { + self.duration.store(duration, ordering: .relaxed) + } +} From 612a3c4e1c506654933ad4af64fa1f5a25eb9906 Mon Sep 17 00:00:00 2001 From: BlindSpot <127803250+blindspotbounty@users.noreply.github.com> Date: Thu, 17 Aug 2023 13:47:01 +0300 Subject: [PATCH 06/20] make structures internal --- Sources/Kafka/Utilities/KafkaStatistics.swift | 12 +++--------- .../Kafka/Utilities/KafkaStatisticsJsonModel.swift | 4 ++-- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/Sources/Kafka/Utilities/KafkaStatistics.swift b/Sources/Kafka/Utilities/KafkaStatistics.swift index cb37dd25..f0b3c907 100644 --- a/Sources/Kafka/Utilities/KafkaStatistics.swift +++ b/Sources/Kafka/Utilities/KafkaStatistics.swift @@ -14,16 +14,10 @@ import ExtrasJSON -public struct KafkaStatistics: Sendable, Hashable { - public let jsonString: String - - public var json: KafkaStatisticsJson { - get throws { - return try XJSONDecoder().decode(KafkaStatisticsJson.self, from: self.jsonString.utf8) - } - } +struct KafkaStatistics: Sendable, Hashable { + let jsonString: String - internal func fill(_ options: KafkaConfiguration.MetricsOptions) { + func fill(_ options: KafkaConfiguration.MetricsOptions) { do { let json = try XJSONDecoder().decode(KafkaStatisticsJson.self, from: self.jsonString.utf8) if let age = options.age, diff --git a/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift b/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift index 16c167b0..ceda2b6e 100644 --- a/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift +++ b/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift @@ -19,7 +19,7 @@ // MARK: - Statistics -public struct KafkaStatisticsJson: Hashable, Codable { +struct KafkaStatisticsJson: Hashable, Codable { let name, clientID, type: String? let ts, time, age, replyq: Int? let msgCnt, msgSize, msgMax, msgSizeMax: Int? @@ -53,7 +53,7 @@ public struct KafkaStatisticsJson: Hashable, Codable { // MARK: - Broker -public struct Broker: Hashable, Codable { +struct Broker: Hashable, Codable { let name: String? let nodeid: Int? let nodename, source, state: String? From 5c10435afbdc04604985ea4e4370d0d9d7119aa2 Mon Sep 17 00:00:00 2001 From: blindspotbounty <127803250+blindspotbounty@users.noreply.github.com> Date: Mon, 21 Aug 2023 11:00:34 +0300 Subject: [PATCH 07/20] Update Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift Co-authored-by: Felix Schlegel --- Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift index b2363b43..07871bd7 100644 --- a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift +++ b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift @@ -59,7 +59,7 @@ extension KafkaConfiguration { } public enum Metrics: Sendable { - case disable + case disabled case enable(updateInterval: KafkaConfiguration.KeyRefreshAttempts, options: MetricsOptions) } } From 2be2bd9d958ffe9bcfc93213a08b45253fcd0eb9 Mon Sep 17 00:00:00 2001 From: blindspotbounty <127803250+blindspotbounty@users.noreply.github.com> Date: Mon, 21 Aug 2023 11:00:55 +0300 Subject: [PATCH 08/20] Update Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift Co-authored-by: Felix Schlegel --- Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift b/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift index 426462e4..ad930bcf 100644 --- a/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift @@ -208,7 +208,7 @@ public struct KafkaConsumerConfiguration { public var reconnect: KafkaConfiguration.ReconnectOptions = .init() /// Options for librdkafka metrics updates - public var metrics: KafkaConfiguration.Metrics = .disable + public var metrics: KafkaConfiguration.Metrics = .disabled /// Security protocol to use (plaintext, ssl, sasl_plaintext, sasl_ssl). /// Default: `.plaintext` From 2cd0f8b2b57a3428dad68f450b21a0818a73277e Mon Sep 17 00:00:00 2001 From: blindspotbounty <127803250+blindspotbounty@users.noreply.github.com> Date: Mon, 21 Aug 2023 11:01:10 +0300 Subject: [PATCH 09/20] Update Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift Co-authored-by: Felix Schlegel --- Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift index 07871bd7..d02634d6 100644 --- a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift +++ b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift @@ -18,7 +18,7 @@ extension KafkaConfiguration { // MARK: - Metrics /// Use to configure metrics. - public struct MetricsOptions: Sendable { + public struct KafkaMetrics: Sendable { /// librdkafka's internal monotonic clock (microseconds) public var ts: Gauge? /// Wall clock time in seconds since the epoch From abd97deb6ae442131d7b63e0b98349776bc450e7 Mon Sep 17 00:00:00 2001 From: blindspotbounty <127803250+blindspotbounty@users.noreply.github.com> Date: Mon, 21 Aug 2023 11:01:19 +0300 Subject: [PATCH 10/20] Update Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift Co-authored-by: Felix Schlegel --- Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift index d02634d6..c306136c 100644 --- a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift +++ b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift @@ -60,6 +60,6 @@ extension KafkaConfiguration { public enum Metrics: Sendable { case disabled - case enable(updateInterval: KafkaConfiguration.KeyRefreshAttempts, options: MetricsOptions) + case enabled(updateInterval: KafkaConfiguration.KeyRefreshAttempts, options: MetricsOptions) } } From 15284e1fc960b35fdf6ccf89bc5899d9bc562c92 Mon Sep 17 00:00:00 2001 From: BlindSpot <127803250+blindspotbounty@users.noreply.github.com> Date: Mon, 21 Aug 2023 12:28:19 +0300 Subject: [PATCH 11/20] address review comments --- .../KafkaConfiguration+Metrics.swift | 47 +++-- .../Configuration/KafkaConfiguration.swift | 18 -- .../KafkaConsumerConfiguration.swift | 22 ++- .../KafkaProducerConfiguration.swift | 22 ++- Sources/Kafka/KafkaConsumer.swift | 11 +- Sources/Kafka/KafkaConsumerEvent.swift | 5 +- Sources/Kafka/KafkaProducer.swift | 29 ++- Sources/Kafka/KafkaProducerEvent.swift | 5 +- Sources/Kafka/Utilities/KafkaStatistics.swift | 82 +++++++- .../Utilities/KafkaStatisticsJsonModel.swift | 177 +++++++++--------- Tests/KafkaTests/KafkaConsumerTests.swift | 26 +-- Tests/KafkaTests/KafkaProducerTests.swift | 27 +-- Tests/KafkaTests/Utilities.swift | 12 +- 13 files changed, 290 insertions(+), 193 deletions(-) diff --git a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift index c306136c..2152c04d 100644 --- a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift +++ b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift @@ -19,8 +19,27 @@ extension KafkaConfiguration { /// Use to configure metrics. public struct KafkaMetrics: Sendable { + internal var someMetricsSet: Bool { + self.timestamp != nil || + self.time != nil || + self.age != nil || + self.replyQueue != nil || + self.messageCount != nil || + self.messageSize != nil || + self.messageMax != nil || + self.messageSizeMax != nil || + self.totalRequestsSent != nil || + self.totalBytesSent != nil || + self.totalResponsesRecieved != nil || + self.totalBytesReceived != nil || + self.totalMessagesSent != nil || + self.totalMessagesBytesSent != nil || + self.totalBytesReceived != nil || + self.metadataCacheCount != nil + } + /// librdkafka's internal monotonic clock (microseconds) - public var ts: Gauge? + public var timestamp: Gauge? /// Wall clock time in seconds since the epoch public var time: Timer? /// Time since this client instance was created @@ -28,31 +47,31 @@ extension KafkaConfiguration { /// Number of ops (callbacks, events, etc) waiting in queue for application to serve public var replyQueue: Gauge? /// Current number of messages in producer queues - public var msgCount: Gauge? + public var messageCount: Gauge? /// Current total size of messages in producer queues - public var msgSize: Gauge? + public var messageSize: Gauge? /// Threshold: maximum number of messages allowed allowed on the producer queues - public var msgMax: Gauge? + public var messageMax: Gauge? /// Threshold: maximum total size of messages allowed on the producer queues - public var msgSizeMax: Gauge? + public var messageSizeMax: Gauge? /// Total number of requests sent to Kafka brokers - public var tx: Gauge? + public var totalRequestsSent: Gauge? /// Total number of bytes transmitted to Kafka brokers - public var txBytex: Gauge? + public var totalBytesSent: Gauge? /// Total number of responses received from Kafka brokers - public var rx: Gauge? + public var totalResponsesRecieved: Gauge? /// Total number of bytes received from Kafka brokers - public var rxBytex: Gauge? + public var totalBytesReceived: Gauge? /// Total number of messages transmitted (produced) to Kafka brokers - public var txMessages: Gauge? + public var totalMessagesSent: Gauge? /// Total number of message bytes (including framing, such as per-Message framing and MessageSet/batch framing) transmitted to Kafka brokers - public var txMessagesBytex: Gauge? + public var totalMessagesBytesSent: Gauge? /// Total number of messages consumed, not including ignored messages (due to offset, etc), from Kafka brokers. - public var rxMessages: Gauge? + public var totalMessagesRecieved: Gauge? /// Total number of message bytes (including framing) received from Kafka brokers - public var rxMessagesBytex: Gauge? + public var totalMessagesBytesRecieved: Gauge? /// Number of topics in the metadata cache. public var metadataCacheCount: Gauge? @@ -60,6 +79,6 @@ extension KafkaConfiguration { public enum Metrics: Sendable { case disabled - case enabled(updateInterval: KafkaConfiguration.KeyRefreshAttempts, options: MetricsOptions) + case enabled(updateInterval: Duration, options: KafkaMetrics) } } diff --git a/Sources/Kafka/Configuration/KafkaConfiguration.swift b/Sources/Kafka/Configuration/KafkaConfiguration.swift index 4923799e..88798752 100644 --- a/Sources/Kafka/Configuration/KafkaConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaConfiguration.swift @@ -283,22 +283,4 @@ public enum KafkaConfiguration { /// Use the IPv6 address family. public static let v6 = IPAddressFamily(description: "v6") } - - /// Minimum time between key refresh attempts. - public struct KeyRefreshAttempts: Sendable, Hashable { - internal let rawValue: UInt - - private init(rawValue: UInt) { - self.rawValue = rawValue - } - - /// (Lowest granularity is milliseconds) - public static func value(_ value: Duration) -> KeyRefreshAttempts { - precondition( - value.canBeRepresentedAsMilliseconds, - "Lowest granularity is milliseconds" - ) - return .init(rawValue: UInt(value.inMilliseconds)) - } - } } diff --git a/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift b/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift index ad930bcf..9be4c8be 100644 --- a/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift @@ -208,7 +208,20 @@ public struct KafkaConsumerConfiguration { public var reconnect: KafkaConfiguration.ReconnectOptions = .init() /// Options for librdkafka metrics updates - public var metrics: KafkaConfiguration.Metrics = .disabled + public var metrics: KafkaConfiguration.Metrics = .disabled { + didSet { + if case .enabled(let updateInterval, let options) = metrics { + precondition( + updateInterval.canBeRepresentedAsMilliseconds, + "Lowest granularity is milliseconds" + ) + precondition( + options.someMetricsSet, + "No metrics set but enabled" + ) + } + } + } /// Security protocol to use (plaintext, ssl, sasl_plaintext, sasl_ssl). /// Default: `.plaintext` @@ -274,8 +287,11 @@ extension KafkaConsumerConfiguration { resultDict["reconnect.backoff.ms"] = String(reconnect.backoff.rawValue) resultDict["reconnect.backoff.max.ms"] = String(reconnect.maximumBackoff.inMilliseconds) - if case .enable(let interval, _) = metrics { - resultDict["statistics.interval.ms"] = String(interval.rawValue) + switch self.metrics { + case .disabled: + resultDict["statistics.interval.ms"] = "0" + case .enabled(let interval, _): + resultDict["statistics.interval.ms"] = String(interval.inMilliseconds) } // Merge with SecurityProtocol configuration dictionary diff --git a/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift b/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift index 97005a94..af773282 100644 --- a/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift @@ -162,7 +162,20 @@ public struct KafkaProducerConfiguration { public var reconnect: KafkaConfiguration.ReconnectOptions = .init() /// Options for librdkafka metrics updates - public var metrics: KafkaConfiguration.Metrics = .disable + public var metrics: KafkaConfiguration.Metrics = .disabled { + didSet { + if case .enabled(let updateInterval, let options) = metrics { + precondition( + updateInterval.canBeRepresentedAsMilliseconds, + "Lowest granularity is milliseconds" + ) + precondition( + options.someMetricsSet, + "No metrics set but enabled" + ) + } + } + } /// Security protocol to use (plaintext, ssl, sasl_plaintext, sasl_ssl). /// Default: `.plaintext` @@ -215,8 +228,11 @@ extension KafkaProducerConfiguration { resultDict["reconnect.backoff.ms"] = String(self.reconnect.backoff.rawValue) resultDict["reconnect.backoff.max.ms"] = String(self.reconnect.maximumBackoff.inMilliseconds) - if case .enable(let interval, _) = metrics { - resultDict["statistics.interval.ms"] = String(interval.rawValue) + switch self.metrics { + case .disabled: + resultDict["statistics.interval.ms"] = "0" + case .enabled(let interval, _): + resultDict["statistics.interval.ms"] = String(interval.inMilliseconds) } // Merge with SecurityProtocol configuration dictionary diff --git a/Sources/Kafka/KafkaConsumer.swift b/Sources/Kafka/KafkaConsumer.swift index 5ccdf6fe..4cd2e4d8 100644 --- a/Sources/Kafka/KafkaConsumer.swift +++ b/Sources/Kafka/KafkaConsumer.swift @@ -207,6 +207,9 @@ public final class KafkaConsumer: Sendable, Service { if configuration.isAutoCommitEnabled == false { subscribedEvents.append(.offsetCommit) } + if case .enabled = configuration.metrics { + subscribedEvents.append(.statistics) + } let client = try RDKafkaClient.makeClient( type: .consumer, @@ -247,7 +250,7 @@ public final class KafkaConsumer: Sendable, Service { if configuration.isAutoCommitEnabled == false { subscribedEvents.append(.offsetCommit) } - if case .enable = configuration.metrics { + if case .enabled = configuration.metrics { subscribedEvents.append(.statistics) } @@ -344,8 +347,12 @@ public final class KafkaConsumer: Sendable, Service { throw error } case .statistics(let statistics): - if case let .enable(_, options) = self.configuration.metrics { + switch self.configuration.metrics { + case .enabled(_, let options): + assert(options.someMetricsSet, "Unexpected statistics received when no metrics configured") statistics.fill(options) + case .disabled: + assert(false, "Unexpected statistics received when metrics disabled") } default: break // Ignore diff --git a/Sources/Kafka/KafkaConsumerEvent.swift b/Sources/Kafka/KafkaConsumerEvent.swift index 5cb91023..2ad58b49 100644 --- a/Sources/Kafka/KafkaConsumerEvent.swift +++ b/Sources/Kafka/KafkaConsumerEvent.swift @@ -14,16 +14,13 @@ /// An enumeration representing events that can be received through the ``KafkaConsumerEvents`` asynchronous sequence. public enum KafkaConsumerEvent: Sendable, Hashable { -// /// Statistics from librdkafka -// case statistics(KafkaStatistics) /// - Important: Always provide a `default` case when switiching over this `enum`. case DO_NOT_SWITCH_OVER_THIS_EXHAUSITVELY internal init(_ event: RDKafkaClient.KafkaEvent) { switch event { - case .statistics(let stat): + case .statistics: fatalError("Cannot cast \(event) to KafkaConsumerEvent") -// self = .statistics(stat) case .deliveryReport: fatalError("Cannot cast \(event) to KafkaConsumerEvent") case .consumerMessages: diff --git a/Sources/Kafka/KafkaProducer.swift b/Sources/Kafka/KafkaProducer.swift index 65d2d112..4c584268 100644 --- a/Sources/Kafka/KafkaProducer.swift +++ b/Sources/Kafka/KafkaProducer.swift @@ -115,11 +115,17 @@ public final class KafkaProducer: Service, Sendable { logger: Logger ) throws { let stateMachine = NIOLockedValueBox(StateMachine(logger: logger)) + + var subscribedEvents: [RDKafkaEvent] = [.log] // No .deliveryReport here! + + if case .enabled = configuration.metrics { + subscribedEvents.append(.statistics) + } let client = try RDKafkaClient.makeClient( type: .producer, configDictionary: configuration.dictionary, - events: [.log], // No .deliveryReport here! + events: subscribedEvents, logger: logger ) @@ -165,7 +171,7 @@ public final class KafkaProducer: Service, Sendable { var subscribedEvents: [RDKafkaEvent] = [.log, .deliveryReport] // Listen to statistics events when statistics enabled - if case .enable = configuration.metrics { + if case .enabled = configuration.metrics { subscribedEvents.append(.statistics) } @@ -214,15 +220,20 @@ public final class KafkaProducer: Service, Sendable { case .pollAndYield(let client, let source): let events = client.eventPoll() for event in events { - if case let .statistics(kafkaStatistics) = event { - if case let .enable(_, options) = self.configuration.metrics { - kafkaStatistics.fill(options) + switch event { + case .statistics(let statistics): + switch self.configuration.metrics { + case .enabled(_, let options): + assert(options.someMetricsSet, "Unexpected statistics received when no metrics configured") + statistics.fill(options) + case .disabled: + assert(false, "Unexpected statistics received when metrics disabled") } - continue + case .deliveryReport(let reports): + _ = source?.yield(.deliveryReports(reports)) + case .consumerMessages: + fatalError("Unexpected event for producer \(event)") } - let producerEvent = KafkaProducerEvent(event) - // Ignore YieldResult as we don't support back pressure in KafkaProducer - _ = source?.yield(producerEvent) } try await Task.sleep(for: self.configuration.pollInterval) case .flushFinishSourceAndTerminatePollLoop(let client, let source): diff --git a/Sources/Kafka/KafkaProducerEvent.swift b/Sources/Kafka/KafkaProducerEvent.swift index e09f9d68..f6d95228 100644 --- a/Sources/Kafka/KafkaProducerEvent.swift +++ b/Sources/Kafka/KafkaProducerEvent.swift @@ -16,8 +16,6 @@ public enum KafkaProducerEvent: Sendable, Hashable { /// A collection of delivery reports received from the Kafka cluster indicating the status of produced messages. case deliveryReports([KafkaDeliveryReport]) -// /// Statistics from librdkafka -// case statistics(KafkaStatistics) /// - Important: Always provide a `default` case when switching over this `enum`. case DO_NOT_SWITCH_OVER_THIS_EXHAUSITVELY @@ -25,8 +23,7 @@ public enum KafkaProducerEvent: Sendable, Hashable { switch event { case .deliveryReport(results: let results): self = .deliveryReports(results) - case .statistics(let stat): -// self = .statistics(stat) + case .statistics: fatalError("Cannot cast \(event) to KafkaProducerEvent") case .consumerMessages: fatalError("Cannot cast \(event) to KafkaProducerEvent") diff --git a/Sources/Kafka/Utilities/KafkaStatistics.swift b/Sources/Kafka/Utilities/KafkaStatistics.swift index f0b3c907..75e24bab 100644 --- a/Sources/Kafka/Utilities/KafkaStatistics.swift +++ b/Sources/Kafka/Utilities/KafkaStatistics.swift @@ -17,15 +17,93 @@ import ExtrasJSON struct KafkaStatistics: Sendable, Hashable { let jsonString: String - func fill(_ options: KafkaConfiguration.MetricsOptions) { + func fill(_ options: KafkaConfiguration.KafkaMetrics) { do { let json = try XJSONDecoder().decode(KafkaStatisticsJson.self, from: self.jsonString.utf8) + if let timestamp = options.timestamp, + let jsonTimestamp = json.ts { + timestamp.record(jsonTimestamp) + } + + if let time = options.time, + let jsonTime = json.time { + time.record(.init(jsonTime)) + } + if let age = options.age, let jsonAge = json.age { age.recordMicroseconds(jsonAge) } - // TODO: other metrics + if let replyQueue = options.replyQueue, + let jsonReplyQueue = json.replyq { + replyQueue.record(jsonReplyQueue) + } + + if let messageCount = options.messageCount, + let jsonMessageCount = json.msgCnt { + messageCount.record(jsonMessageCount) + } + + if let messageSize = options.messageSize, + let jsonMessageSize = json.msgSize { + messageSize.record(jsonMessageSize) + } + + if let messageMax = options.messageMax, + let jsonMessageMax = json.msgMax { + messageMax.record(jsonMessageMax) + } + + if let messageSizeMax = options.messageSizeMax, + let jsonMessageSizeMax = json.msgSizeMax { + messageSizeMax.record(jsonMessageSizeMax) + } + + if let totalRequestsSent = options.totalRequestsSent, + let jsonTx = json.tx { + totalRequestsSent.record(jsonTx) + } + + if let totalBytesSent = options.totalBytesSent, + let jsonTxBytes = json.txBytes { + totalBytesSent.record(jsonTxBytes) + } + + if let totalResponsesRecieved = options.totalResponsesRecieved, + let jsonRx = json.rx { + totalResponsesRecieved.record(jsonRx) + } + + if let totalBytesReceived = options.totalBytesReceived, + let jsonRxBytes = json.rxBytes { + totalBytesReceived.record(jsonRxBytes) + } + + if let totalMessagesSent = options.totalMessagesSent, + let jsonTxMessages = json.txmsgs { + totalMessagesSent.record(jsonTxMessages) + } + + if let totalMessagesBytesSent = options.totalMessagesBytesSent, + let jsonTxMessagesBytes = json.txmsgBytes { + totalMessagesBytesSent.record(jsonTxMessagesBytes) + } + + if let totalMessagesRecieved = options.totalMessagesRecieved, + let jsonRxMessages = json.rxmsgs { + totalMessagesRecieved.record(jsonRxMessages) + } + + if let totalMessagesBytesRecieved = options.totalMessagesBytesRecieved, + let jsonRxMessagesBytes = json.rxmsgBytes { + totalMessagesBytesRecieved.record(jsonRxMessagesBytes) + } + + if let metadataCacheCount = options.metadataCacheCount, + let jsonMetaDataCacheCount = json.metadataCacheCnt { + metadataCacheCount.record(jsonMetaDataCacheCount) + } } catch { fatalError("Statistics json decode error \(error)") } diff --git a/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift b/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift index ceda2b6e..67a28bfd 100644 --- a/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift +++ b/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift @@ -24,8 +24,8 @@ struct KafkaStatisticsJson: Hashable, Codable { let ts, time, age, replyq: Int? let msgCnt, msgSize, msgMax, msgSizeMax: Int? let simpleCnt, metadataCacheCnt: Int? - let brokers: [String: Broker]? - let topics: [String: Topic]? +// let brokers: [String: Broker]? +// let topics: [String: Topic]? let cgrp: Cgrp? let tx, txBytes, rx, rxBytes: Int? let txmsgs, txmsgBytes, rxmsgs, rxmsgBytes: Int? @@ -40,7 +40,8 @@ struct KafkaStatisticsJson: Hashable, Codable { case msgSizeMax = "msg_size_max" case simpleCnt = "simple_cnt" case metadataCacheCnt = "metadata_cache_cnt" - case brokers, topics, cgrp, tx +// case brokers, topics + case cgrp, tx case txBytes = "tx_bytes" case rx case rxBytes = "rx_bytes" @@ -53,37 +54,37 @@ struct KafkaStatisticsJson: Hashable, Codable { // MARK: - Broker -struct Broker: Hashable, Codable { - let name: String? - let nodeid: Int? - let nodename, source, state: String? - let stateage, outbufCnt, outbufMsgCnt, waitrespCnt: Int? - let waitrespMsgCnt, tx, txbytes, txerrs: Int? - let txretries, txidle, reqTimeouts, rx: Int? - let rxbytes, rxerrs, rxcorriderrs, rxpartial: Int? - let rxidle, zbufGrow, bufGrow, wakeups: Int? - let connects, disconnects: Int? - let intLatency, outbufLatency, rtt, throttle: [String: Int]? - let req: [String: Int]? - let toppars: [String: Toppar]? - - enum CodingKeys: String, CodingKey { - case name, nodeid, nodename, source, state, stateage - case outbufCnt = "outbuf_cnt" - case outbufMsgCnt = "outbuf_msg_cnt" - case waitrespCnt = "waitresp_cnt" - case waitrespMsgCnt = "waitresp_msg_cnt" - case tx, txbytes, txerrs, txretries, txidle - case reqTimeouts = "req_timeouts" - case rx, rxbytes, rxerrs, rxcorriderrs, rxpartial, rxidle - case zbufGrow = "zbuf_grow" - case bufGrow = "buf_grow" - case wakeups, connects, disconnects - case intLatency = "int_latency" - case outbufLatency = "outbuf_latency" - case rtt, throttle, req, toppars - } -} +//struct Broker: Hashable, Codable { +// let name: String? +// let nodeid: Int? +// let nodename, source, state: String? +// let stateage, outbufCnt, outbufMsgCnt, waitrespCnt: Int? +// let waitrespMsgCnt, tx, txbytes, txerrs: Int? +// let txretries, txidle, reqTimeouts, rx: Int? +// let rxbytes, rxerrs, rxcorriderrs, rxpartial: Int? +// let rxidle, zbufGrow, bufGrow, wakeups: Int? +// let connects, disconnects: Int? +// let intLatency, outbufLatency, rtt, throttle: [String: Int]? +// let req: [String: Int]? +// let toppars: [String: Toppar]? +// +// enum CodingKeys: String, CodingKey { +// case name, nodeid, nodename, source, state, stateage +// case outbufCnt = "outbuf_cnt" +// case outbufMsgCnt = "outbuf_msg_cnt" +// case waitrespCnt = "waitresp_cnt" +// case waitrespMsgCnt = "waitresp_msg_cnt" +// case tx, txbytes, txerrs, txretries, txidle +// case reqTimeouts = "req_timeouts" +// case rx, rxbytes, rxerrs, rxcorriderrs, rxpartial, rxidle +// case zbufGrow = "zbuf_grow" +// case bufGrow = "buf_grow" +// case wakeups, connects, disconnects +// case intLatency = "int_latency" +// case outbufLatency = "outbuf_latency" +// case rtt, throttle, req, toppars +// } +//} // MARK: - Toppars @@ -118,60 +119,60 @@ struct Cgrp: Hashable, Codable { // MARK: - Topic -struct Topic: Hashable, Codable { - let topic: String? - let age, metadataAge: Int? - let batchsize, batchcnt: [String: Int]? - let partitions: [String: Partition]? - - enum CodingKeys: String, CodingKey { - case topic, age - case metadataAge = "metadata_age" - case batchsize, batchcnt, partitions - } -} +//struct Topic: Hashable, Codable { +// let topic: String? +// let age, metadataAge: Int? +// let batchsize, batchcnt: [String: Int]? +// let partitions: [String: Partition]? +// +// enum CodingKeys: String, CodingKey { +// case topic, age +// case metadataAge = "metadata_age" +// case batchsize, batchcnt, partitions +// } +//} // MARK: - Partition -struct Partition: Hashable, Codable { - let partition, broker, leader: Int? - let desired, unknown: Bool? - let msgqCnt, msgqBytes, xmitMsgqCnt, xmitMsgqBytes: Int? - let fetchqCnt, fetchqSize: Int? - let fetchState: String? - let queryOffset, nextOffset, appOffset, storedOffset: Int? - let commitedOffset, committedOffset, eofOffset, loOffset: Int? - let hiOffset, lsOffset, consumerLag, consumerLagStored: Int? - let txmsgs, txbytes, rxmsgs, rxbytes: Int? - let msgs, rxVerDrops, msgsInflight, nextACKSeq: Int? - let nextErrSeq, ackedMsgid: Int? - - enum CodingKeys: String, CodingKey { - case partition, broker, leader, desired, unknown - case msgqCnt = "msgq_cnt" - case msgqBytes = "msgq_bytes" - case xmitMsgqCnt = "xmit_msgq_cnt" - case xmitMsgqBytes = "xmit_msgq_bytes" - case fetchqCnt = "fetchq_cnt" - case fetchqSize = "fetchq_size" - case fetchState = "fetch_state" - case queryOffset = "query_offset" - case nextOffset = "next_offset" - case appOffset = "app_offset" - case storedOffset = "stored_offset" - case commitedOffset = "commited_offset" - case committedOffset = "committed_offset" - case eofOffset = "eof_offset" - case loOffset = "lo_offset" - case hiOffset = "hi_offset" - case lsOffset = "ls_offset" - case consumerLag = "consumer_lag" - case consumerLagStored = "consumer_lag_stored" - case txmsgs, txbytes, rxmsgs, rxbytes, msgs - case rxVerDrops = "rx_ver_drops" - case msgsInflight = "msgs_inflight" - case nextACKSeq = "next_ack_seq" - case nextErrSeq = "next_err_seq" - case ackedMsgid = "acked_msgid" - } -} +//struct Partition: Hashable, Codable { +// let partition, broker, leader: Int? +// let desired, unknown: Bool? +// let msgqCnt, msgqBytes, xmitMsgqCnt, xmitMsgqBytes: Int? +// let fetchqCnt, fetchqSize: Int? +// let fetchState: String? +// let queryOffset, nextOffset, appOffset, storedOffset: Int? +// let commitedOffset, committedOffset, eofOffset, loOffset: Int? +// let hiOffset, lsOffset, consumerLag, consumerLagStored: Int? +// let txmsgs, txbytes, rxmsgs, rxbytes: Int? +// let msgs, rxVerDrops, msgsInflight, nextACKSeq: Int? +// let nextErrSeq, ackedMsgid: Int? +// +// enum CodingKeys: String, CodingKey { +// case partition, broker, leader, desired, unknown +// case msgqCnt = "msgq_cnt" +// case msgqBytes = "msgq_bytes" +// case xmitMsgqCnt = "xmit_msgq_cnt" +// case xmitMsgqBytes = "xmit_msgq_bytes" +// case fetchqCnt = "fetchq_cnt" +// case fetchqSize = "fetchq_size" +// case fetchState = "fetch_state" +// case queryOffset = "query_offset" +// case nextOffset = "next_offset" +// case appOffset = "app_offset" +// case storedOffset = "stored_offset" +// case commitedOffset = "commited_offset" +// case committedOffset = "committed_offset" +// case eofOffset = "eof_offset" +// case loOffset = "lo_offset" +// case hiOffset = "hi_offset" +// case lsOffset = "ls_offset" +// case consumerLag = "consumer_lag" +// case consumerLagStored = "consumer_lag_stored" +// case txmsgs, txbytes, rxmsgs, rxbytes, msgs +// case rxVerDrops = "rx_ver_drops" +// case msgsInflight = "msgs_inflight" +// case nextACKSeq = "next_ack_seq" +// case nextErrSeq = "next_err_seq" +// case ackedMsgid = "acked_msgid" +// } +//} diff --git a/Tests/KafkaTests/KafkaConsumerTests.swift b/Tests/KafkaTests/KafkaConsumerTests.swift index 8b9a875f..d3f6e542 100644 --- a/Tests/KafkaTests/KafkaConsumerTests.swift +++ b/Tests/KafkaTests/KafkaConsumerTests.swift @@ -95,12 +95,12 @@ final class KafkaConsumerTests: XCTestCase { bootstrapBrokerAddresses: [] ) - var metricsOptions = KafkaConfiguration.MetricsOptions() + var metricsOptions = KafkaConfiguration.KafkaMetrics() let handler = MockTimerHandler() metricsOptions.age = .init(label: "age", dimensions: [], handler: handler) - config.metrics = .enable(updateInterval: .value(.milliseconds(10)), options: metricsOptions) + config.metrics = .enabled(updateInterval: .milliseconds(10), options: metricsOptions) let (consumer, events) = try KafkaConsumer.makeConsumerWithEvents(configuration: config, logger: .kafkaTest) @@ -116,31 +116,13 @@ final class KafkaConsumerTests: XCTestCase { try await serviceGroup.run() } - // check for librdkafka statistics -// group.addTask { -// var statistics: KafkaStatistics? = nil -// for try await event in events { -// if case let .statistics(stat) = event { -// statistics = stat -// break -// } -// } -// guard let statistics else { -// XCTFail("stats are not occurred") -// return -// } -// XCTAssertFalse(statistics.jsonString.isEmpty) -// XCTAssertNoThrow(try statistics.json) -// } - try await Task.sleep(for: .milliseconds(500)) -// try await group.next() - // Shutdown the serviceGroup await serviceGroup.triggerGracefulShutdown() } - let value = handler.duration.load(ordering: .relaxed) + var iter = handler.expectation.makeAsyncIterator() + let value = await iter.next() XCTAssertNotEqual(value, 0) } } diff --git a/Tests/KafkaTests/KafkaProducerTests.swift b/Tests/KafkaTests/KafkaProducerTests.swift index ebdbe191..70e47701 100644 --- a/Tests/KafkaTests/KafkaProducerTests.swift +++ b/Tests/KafkaTests/KafkaProducerTests.swift @@ -360,14 +360,12 @@ final class KafkaProducerTests: XCTestCase { } func testProducerStatistics() async throws { - var metricsOptions = KafkaConfiguration.MetricsOptions() + var metricsOptions = KafkaConfiguration.KafkaMetrics() let handler = MockTimerHandler() metricsOptions.age = .init(label: "age", dimensions: [], handler: handler) - self.config.metrics = .enable(updateInterval: .value(.milliseconds(10)), options: metricsOptions) - -// self.config.statisticsInterval = .value(.milliseconds(10)) + self.config.metrics = .enabled(updateInterval: .milliseconds(10), options: metricsOptions) let (producer, events) = try KafkaProducer.makeProducerWithEvents( configuration: self.config, @@ -386,28 +384,13 @@ final class KafkaProducerTests: XCTestCase { try await serviceGroup.run() } - // check for librdkafka statistics -// group.addTask { -// var statistics: KafkaStatistics? = nil -// for try await e in events { -// if case let .statistics(stat) = e { -// statistics = stat -// break -// } -// } -// guard let statistics else { -// XCTFail("stats are not occurred") -// return -// } -// XCTAssertFalse(statistics.jsonString.isEmpty) -// XCTAssertNoThrow(try statistics.json) -// } try await Task.sleep(for: .milliseconds(500)) -// try await group.next() + // Shutdown the serviceGroup await serviceGroup.triggerGracefulShutdown() } - let value = handler.duration.load(ordering: .relaxed) + var iter = handler.expectation.makeAsyncIterator() + let value = await iter.next() XCTAssertNotEqual(value, 0) } } diff --git a/Tests/KafkaTests/Utilities.swift b/Tests/KafkaTests/Utilities.swift index fd9cccc9..75461e74 100644 --- a/Tests/KafkaTests/Utilities.swift +++ b/Tests/KafkaTests/Utilities.swift @@ -102,8 +102,16 @@ internal struct MockLogHandler: LogHandler { } class MockTimerHandler: TimerHandler { - let duration = ManagedAtomic(0) + let expectation: AsyncStream + private let expectationContinuation: AsyncStream.Continuation + + init() { + var expectationContinuation: AsyncStream.Continuation! + self.expectation = AsyncStream(bufferingPolicy: .bufferingNewest(1)) { expectationContinuation = $0 } + self.expectationContinuation = expectationContinuation + } + func recordNanoseconds(_ duration: Int64) { - self.duration.store(duration, ordering: .relaxed) + _ = self.expectationContinuation.yield(duration) } } From a4ee67807a3c2eacd2015e12c20ea7fb8240dab0 Mon Sep 17 00:00:00 2001 From: BlindSpot <127803250+blindspotbounty@users.noreply.github.com> Date: Mon, 21 Aug 2023 12:34:35 +0300 Subject: [PATCH 12/20] formatting --- .../KafkaConfiguration+Metrics.swift | 40 +++++++++---------- .../KafkaConsumerConfiguration.swift | 2 +- .../KafkaProducerConfiguration.swift | 6 +-- Sources/Kafka/KafkaConsumer.swift | 4 +- Sources/Kafka/KafkaProducer.swift | 8 ++-- Sources/Kafka/Utilities/KafkaStatistics.swift | 36 ++++++++--------- .../Utilities/KafkaStatisticsJsonModel.swift | 12 +++--- Tests/KafkaTests/KafkaConsumerTests.swift | 24 ++++++----- Tests/KafkaTests/KafkaProducerTests.swift | 16 +++++--- Tests/KafkaTests/Utilities.swift | 3 +- 10 files changed, 79 insertions(+), 72 deletions(-) diff --git a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift index 2152c04d..5e4c9c58 100644 --- a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift +++ b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift @@ -21,23 +21,23 @@ extension KafkaConfiguration { public struct KafkaMetrics: Sendable { internal var someMetricsSet: Bool { self.timestamp != nil || - self.time != nil || - self.age != nil || - self.replyQueue != nil || - self.messageCount != nil || - self.messageSize != nil || - self.messageMax != nil || - self.messageSizeMax != nil || - self.totalRequestsSent != nil || - self.totalBytesSent != nil || - self.totalResponsesRecieved != nil || - self.totalBytesReceived != nil || - self.totalMessagesSent != nil || - self.totalMessagesBytesSent != nil || - self.totalBytesReceived != nil || - self.metadataCacheCount != nil + self.time != nil || + self.age != nil || + self.replyQueue != nil || + self.messageCount != nil || + self.messageSize != nil || + self.messageMax != nil || + self.messageSizeMax != nil || + self.totalRequestsSent != nil || + self.totalBytesSent != nil || + self.totalResponsesRecieved != nil || + self.totalBytesReceived != nil || + self.totalMessagesSent != nil || + self.totalMessagesBytesSent != nil || + self.totalBytesReceived != nil || + self.metadataCacheCount != nil } - + /// librdkafka's internal monotonic clock (microseconds) public var timestamp: Gauge? /// Wall clock time in seconds since the epoch @@ -54,7 +54,7 @@ extension KafkaConfiguration { public var messageMax: Gauge? /// Threshold: maximum total size of messages allowed on the producer queues public var messageSizeMax: Gauge? - + /// Total number of requests sent to Kafka brokers public var totalRequestsSent: Gauge? /// Total number of bytes transmitted to Kafka brokers @@ -63,7 +63,7 @@ extension KafkaConfiguration { public var totalResponsesRecieved: Gauge? /// Total number of bytes received from Kafka brokers public var totalBytesReceived: Gauge? - + /// Total number of messages transmitted (produced) to Kafka brokers public var totalMessagesSent: Gauge? /// Total number of message bytes (including framing, such as per-Message framing and MessageSet/batch framing) transmitted to Kafka brokers @@ -72,11 +72,11 @@ extension KafkaConfiguration { public var totalMessagesRecieved: Gauge? /// Total number of message bytes (including framing) received from Kafka brokers public var totalMessagesBytesRecieved: Gauge? - + /// Number of topics in the metadata cache. public var metadataCacheCount: Gauge? } - + public enum Metrics: Sendable { case disabled case enabled(updateInterval: Duration, options: KafkaMetrics) diff --git a/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift b/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift index 9be4c8be..b5d46177 100644 --- a/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift @@ -286,7 +286,7 @@ extension KafkaConsumerConfiguration { resultDict["broker.address.family"] = broker.addressFamily.description resultDict["reconnect.backoff.ms"] = String(reconnect.backoff.rawValue) resultDict["reconnect.backoff.max.ms"] = String(reconnect.maximumBackoff.inMilliseconds) - + switch self.metrics { case .disabled: resultDict["statistics.interval.ms"] = "0" diff --git a/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift b/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift index af773282..36e9827c 100644 --- a/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift @@ -162,7 +162,7 @@ public struct KafkaProducerConfiguration { public var reconnect: KafkaConfiguration.ReconnectOptions = .init() /// Options for librdkafka metrics updates - public var metrics: KafkaConfiguration.Metrics = .disabled { + public var metrics: KafkaConfiguration.Metrics = .disabled { didSet { if case .enabled(let updateInterval, let options) = metrics { precondition( @@ -176,7 +176,7 @@ public struct KafkaProducerConfiguration { } } } - + /// Security protocol to use (plaintext, ssl, sasl_plaintext, sasl_ssl). /// Default: `.plaintext` public var securityProtocol: KafkaConfiguration.SecurityProtocol = .plaintext @@ -227,7 +227,7 @@ extension KafkaProducerConfiguration { resultDict["broker.address.family"] = self.broker.addressFamily.description resultDict["reconnect.backoff.ms"] = String(self.reconnect.backoff.rawValue) resultDict["reconnect.backoff.max.ms"] = String(self.reconnect.maximumBackoff.inMilliseconds) - + switch self.metrics { case .disabled: resultDict["statistics.interval.ms"] = "0" diff --git a/Sources/Kafka/KafkaConsumer.swift b/Sources/Kafka/KafkaConsumer.swift index 4cd2e4d8..1c18a3b0 100644 --- a/Sources/Kafka/KafkaConsumer.swift +++ b/Sources/Kafka/KafkaConsumer.swift @@ -275,7 +275,7 @@ public final class KafkaConsumer: Sendable, Service { configuration: configuration, logger: logger ) - + return (consumer, eventsSequence) } @@ -352,7 +352,7 @@ public final class KafkaConsumer: Sendable, Service { assert(options.someMetricsSet, "Unexpected statistics received when no metrics configured") statistics.fill(options) case .disabled: - assert(false, "Unexpected statistics received when metrics disabled") + assertionFailure("Unexpected statistics received when metrics disabled") } default: break // Ignore diff --git a/Sources/Kafka/KafkaProducer.swift b/Sources/Kafka/KafkaProducer.swift index 4c584268..1dc94024 100644 --- a/Sources/Kafka/KafkaProducer.swift +++ b/Sources/Kafka/KafkaProducer.swift @@ -115,9 +115,9 @@ public final class KafkaProducer: Service, Sendable { logger: Logger ) throws { let stateMachine = NIOLockedValueBox(StateMachine(logger: logger)) - + var subscribedEvents: [RDKafkaEvent] = [.log] // No .deliveryReport here! - + if case .enabled = configuration.metrics { subscribedEvents.append(.statistics) } @@ -168,7 +168,7 @@ public final class KafkaProducer: Service, Sendable { delegate: KafkaProducerCloseOnTerminate(stateMachine: stateMachine) ) let source = sourceAndSequence.source - + var subscribedEvents: [RDKafkaEvent] = [.log, .deliveryReport] // Listen to statistics events when statistics enabled if case .enabled = configuration.metrics { @@ -227,7 +227,7 @@ public final class KafkaProducer: Service, Sendable { assert(options.someMetricsSet, "Unexpected statistics received when no metrics configured") statistics.fill(options) case .disabled: - assert(false, "Unexpected statistics received when metrics disabled") + assertionFailure("Unexpected statistics received when metrics disabled") } case .deliveryReport(let reports): _ = source?.yield(.deliveryReports(reports)) diff --git a/Sources/Kafka/Utilities/KafkaStatistics.swift b/Sources/Kafka/Utilities/KafkaStatistics.swift index 75e24bab..e0258ad8 100644 --- a/Sources/Kafka/Utilities/KafkaStatistics.swift +++ b/Sources/Kafka/Utilities/KafkaStatistics.swift @@ -16,7 +16,7 @@ import ExtrasJSON struct KafkaStatistics: Sendable, Hashable { let jsonString: String - + func fill(_ options: KafkaConfiguration.KafkaMetrics) { do { let json = try XJSONDecoder().decode(KafkaStatisticsJson.self, from: self.jsonString.utf8) @@ -24,82 +24,82 @@ struct KafkaStatistics: Sendable, Hashable { let jsonTimestamp = json.ts { timestamp.record(jsonTimestamp) } - + if let time = options.time, let jsonTime = json.time { time.record(.init(jsonTime)) } - + if let age = options.age, let jsonAge = json.age { age.recordMicroseconds(jsonAge) } - + if let replyQueue = options.replyQueue, let jsonReplyQueue = json.replyq { replyQueue.record(jsonReplyQueue) } - + if let messageCount = options.messageCount, let jsonMessageCount = json.msgCnt { messageCount.record(jsonMessageCount) } - + if let messageSize = options.messageSize, let jsonMessageSize = json.msgSize { messageSize.record(jsonMessageSize) } - + if let messageMax = options.messageMax, let jsonMessageMax = json.msgMax { - messageMax.record(jsonMessageMax) + messageMax.record(jsonMessageMax) } - + if let messageSizeMax = options.messageSizeMax, let jsonMessageSizeMax = json.msgSizeMax { messageSizeMax.record(jsonMessageSizeMax) } - + if let totalRequestsSent = options.totalRequestsSent, let jsonTx = json.tx { totalRequestsSent.record(jsonTx) } - + if let totalBytesSent = options.totalBytesSent, let jsonTxBytes = json.txBytes { totalBytesSent.record(jsonTxBytes) } - + if let totalResponsesRecieved = options.totalResponsesRecieved, let jsonRx = json.rx { totalResponsesRecieved.record(jsonRx) } - + if let totalBytesReceived = options.totalBytesReceived, let jsonRxBytes = json.rxBytes { totalBytesReceived.record(jsonRxBytes) } - + if let totalMessagesSent = options.totalMessagesSent, let jsonTxMessages = json.txmsgs { totalMessagesSent.record(jsonTxMessages) } - + if let totalMessagesBytesSent = options.totalMessagesBytesSent, let jsonTxMessagesBytes = json.txmsgBytes { totalMessagesBytesSent.record(jsonTxMessagesBytes) } - + if let totalMessagesRecieved = options.totalMessagesRecieved, let jsonRxMessages = json.rxmsgs { totalMessagesRecieved.record(jsonRxMessages) } - + if let totalMessagesBytesRecieved = options.totalMessagesBytesRecieved, let jsonRxMessagesBytes = json.rxmsgBytes { totalMessagesBytesRecieved.record(jsonRxMessagesBytes) } - + if let metadataCacheCount = options.metadataCacheCount, let jsonMetaDataCacheCount = json.metadataCacheCnt { metadataCacheCount.record(jsonMetaDataCacheCount) diff --git a/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift b/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift index 67a28bfd..16a2c553 100644 --- a/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift +++ b/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift @@ -54,7 +54,7 @@ struct KafkaStatisticsJson: Hashable, Codable { // MARK: - Broker -//struct Broker: Hashable, Codable { +// struct Broker: Hashable, Codable { // let name: String? // let nodeid: Int? // let nodename, source, state: String? @@ -84,7 +84,7 @@ struct KafkaStatisticsJson: Hashable, Codable { // case outbufLatency = "outbuf_latency" // case rtt, throttle, req, toppars // } -//} +// } // MARK: - Toppars @@ -119,7 +119,7 @@ struct Cgrp: Hashable, Codable { // MARK: - Topic -//struct Topic: Hashable, Codable { +// struct Topic: Hashable, Codable { // let topic: String? // let age, metadataAge: Int? // let batchsize, batchcnt: [String: Int]? @@ -130,11 +130,11 @@ struct Cgrp: Hashable, Codable { // case metadataAge = "metadata_age" // case batchsize, batchcnt, partitions // } -//} +// } // MARK: - Partition -//struct Partition: Hashable, Codable { +// struct Partition: Hashable, Codable { // let partition, broker, leader: Int? // let desired, unknown: Bool? // let msgqCnt, msgqBytes, xmitMsgqCnt, xmitMsgqBytes: Int? @@ -175,4 +175,4 @@ struct Cgrp: Hashable, Codable { // case nextErrSeq = "next_err_seq" // case ackedMsgid = "acked_msgid" // } -//} +// } diff --git a/Tests/KafkaTests/KafkaConsumerTests.swift b/Tests/KafkaTests/KafkaConsumerTests.swift index d3f6e542..4f28c02e 100644 --- a/Tests/KafkaTests/KafkaConsumerTests.swift +++ b/Tests/KafkaTests/KafkaConsumerTests.swift @@ -12,13 +12,13 @@ // //===----------------------------------------------------------------------===// -import struct Foundation.UUID import Atomics +import struct Foundation.UUID @testable import Kafka import Logging +import Metrics import ServiceLifecycle import XCTest -import Metrics // For testing locally on Mac, do the following: // @@ -87,19 +87,19 @@ final class KafkaConsumerTests: XCTestCase { ) } } - + func testConsumerStatistics() async throws { let uniqueGroupID = UUID().uuidString var config = KafkaConsumerConfiguration( consumptionStrategy: .group(id: uniqueGroupID, topics: ["this-topic-does-not-exist"]), bootstrapBrokerAddresses: [] ) - + var metricsOptions = KafkaConfiguration.KafkaMetrics() - + let handler = MockTimerHandler() metricsOptions.age = .init(label: "age", dimensions: [], handler: handler) - + config.metrics = .enabled(updateInterval: .milliseconds(10), options: metricsOptions) let (consumer, events) = try KafkaConsumer.makeConsumerWithEvents(configuration: config, logger: .kafkaTest) @@ -116,13 +116,17 @@ final class KafkaConsumerTests: XCTestCase { try await serviceGroup.run() } - try await Task.sleep(for: .milliseconds(500)) + group.addTask { + for await value in handler.expectation { + XCTAssertNotEqual(value, 0) + break + } + } + + try await group.next() // Shutdown the serviceGroup await serviceGroup.triggerGracefulShutdown() } - var iter = handler.expectation.makeAsyncIterator() - let value = await iter.next() - XCTAssertNotEqual(value, 0) } } diff --git a/Tests/KafkaTests/KafkaProducerTests.swift b/Tests/KafkaTests/KafkaProducerTests.swift index 70e47701..8c59adf2 100644 --- a/Tests/KafkaTests/KafkaProducerTests.swift +++ b/Tests/KafkaTests/KafkaProducerTests.swift @@ -361,10 +361,10 @@ final class KafkaProducerTests: XCTestCase { func testProducerStatistics() async throws { var metricsOptions = KafkaConfiguration.KafkaMetrics() - + let handler = MockTimerHandler() metricsOptions.age = .init(label: "age", dimensions: [], handler: handler) - + self.config.metrics = .enabled(updateInterval: .milliseconds(10), options: metricsOptions) let (producer, events) = try KafkaProducer.makeProducerWithEvents( @@ -384,13 +384,17 @@ final class KafkaProducerTests: XCTestCase { try await serviceGroup.run() } - try await Task.sleep(for: .milliseconds(500)) + group.addTask { + for await value in handler.expectation { + XCTAssertNotEqual(value, 0) + break + } + } + + try await group.next() // Shutdown the serviceGroup await serviceGroup.triggerGracefulShutdown() } - var iter = handler.expectation.makeAsyncIterator() - let value = await iter.next() - XCTAssertNotEqual(value, 0) } } diff --git a/Tests/KafkaTests/Utilities.swift b/Tests/KafkaTests/Utilities.swift index 75461e74..85c1611c 100644 --- a/Tests/KafkaTests/Utilities.swift +++ b/Tests/KafkaTests/Utilities.swift @@ -12,10 +12,9 @@ // //===----------------------------------------------------------------------===// -import Atomics import Logging -import NIOConcurrencyHelpers import Metrics +import NIOConcurrencyHelpers extension Logger { static var kafkaTest: Logger { From 0a0f1b8bf66f41cf30fc62ef22071606a017844e Mon Sep 17 00:00:00 2001 From: BlindSpot <127803250+blindspotbounty@users.noreply.github.com> Date: Thu, 24 Aug 2023 17:56:10 +0300 Subject: [PATCH 13/20] map gauges in one place --- Sources/Kafka/Utilities/KafkaStatistics.swift | 120 ++++++------------ .../Utilities/KafkaStatisticsJsonModel.swift | 49 +++---- 2 files changed, 64 insertions(+), 105 deletions(-) diff --git a/Sources/Kafka/Utilities/KafkaStatistics.swift b/Sources/Kafka/Utilities/KafkaStatistics.swift index e0258ad8..a617f340 100644 --- a/Sources/Kafka/Utilities/KafkaStatistics.swift +++ b/Sources/Kafka/Utilities/KafkaStatistics.swift @@ -13,97 +13,51 @@ //===----------------------------------------------------------------------===// import ExtrasJSON +import Metrics struct KafkaStatistics: Sendable, Hashable { let jsonString: String + + private func record(_ value: T?, to: Gauge?) { + guard let value, + let to else { + return + } + to.record(value) + } + private func recordMircoseconds(_ value: T?, to: Timer?) { + guard let value, + let to else { + return + } + to.recordMicroseconds(value) + } func fill(_ options: KafkaConfiguration.KafkaMetrics) { do { let json = try XJSONDecoder().decode(KafkaStatisticsJson.self, from: self.jsonString.utf8) - if let timestamp = options.timestamp, - let jsonTimestamp = json.ts { - timestamp.record(jsonTimestamp) - } - - if let time = options.time, - let jsonTime = json.time { - time.record(.init(jsonTime)) - } - - if let age = options.age, - let jsonAge = json.age { - age.recordMicroseconds(jsonAge) - } - - if let replyQueue = options.replyQueue, - let jsonReplyQueue = json.replyq { - replyQueue.record(jsonReplyQueue) - } - - if let messageCount = options.messageCount, - let jsonMessageCount = json.msgCnt { - messageCount.record(jsonMessageCount) - } - - if let messageSize = options.messageSize, - let jsonMessageSize = json.msgSize { - messageSize.record(jsonMessageSize) - } - - if let messageMax = options.messageMax, - let jsonMessageMax = json.msgMax { - messageMax.record(jsonMessageMax) - } - - if let messageSizeMax = options.messageSizeMax, - let jsonMessageSizeMax = json.msgSizeMax { - messageSizeMax.record(jsonMessageSizeMax) - } - - if let totalRequestsSent = options.totalRequestsSent, - let jsonTx = json.tx { - totalRequestsSent.record(jsonTx) - } - - if let totalBytesSent = options.totalBytesSent, - let jsonTxBytes = json.txBytes { - totalBytesSent.record(jsonTxBytes) - } - - if let totalResponsesRecieved = options.totalResponsesRecieved, - let jsonRx = json.rx { - totalResponsesRecieved.record(jsonRx) - } - - if let totalBytesReceived = options.totalBytesReceived, - let jsonRxBytes = json.rxBytes { - totalBytesReceived.record(jsonRxBytes) - } - - if let totalMessagesSent = options.totalMessagesSent, - let jsonTxMessages = json.txmsgs { - totalMessagesSent.record(jsonTxMessages) - } - - if let totalMessagesBytesSent = options.totalMessagesBytesSent, - let jsonTxMessagesBytes = json.txmsgBytes { - totalMessagesBytesSent.record(jsonTxMessagesBytes) - } - - if let totalMessagesRecieved = options.totalMessagesRecieved, - let jsonRxMessages = json.rxmsgs { - totalMessagesRecieved.record(jsonRxMessages) - } - - if let totalMessagesBytesRecieved = options.totalMessagesBytesRecieved, - let jsonRxMessagesBytes = json.rxmsgBytes { - totalMessagesBytesRecieved.record(jsonRxMessagesBytes) - } + + record(json.timestamp, to: options.timestamp) + recordMircoseconds(json.time, to: options.time) + recordMircoseconds(json.age, to: options.age) + record(json.replyQueue, to: options.replyQueue) + record(json.messageCount, to: options.messageCount) + record(json.messageSize, to: options.messageSize) + record(json.messageMax, to: options.messageMax) + record(json.messageSizeMax, to: options.messageSizeMax) + record(json.totalRequestsSent, to: options.totalRequestsSent) + record(json.totalBytesSent, to: options.totalBytesSent) + + record(json.totalResponsesRecieved, to: options.totalResponsesRecieved) + record(json.totalBytesReceived, to: options.totalBytesReceived) + record(json.totalMessagesSent, to: options.totalMessagesSent) + record(json.totalBytesSent, to: options.totalBytesSent) + + record(json.totalMessagesBytesSent, to: options.totalMessagesBytesSent) + record(json.totalMessagesRecieved, to: options.totalMessagesRecieved) + record(json.totalMessagesBytesRecieved, to: options.totalMessagesBytesRecieved) + record(json.metadataCacheCount, to: options.metadataCacheCount) - if let metadataCacheCount = options.metadataCacheCount, - let jsonMetaDataCacheCount = json.metadataCacheCnt { - metadataCacheCount.record(jsonMetaDataCacheCount) - } } catch { fatalError("Statistics json decode error \(error)") } diff --git a/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift b/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift index 16a2c553..57d66ed4 100644 --- a/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift +++ b/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift @@ -21,37 +21,42 @@ struct KafkaStatisticsJson: Hashable, Codable { let name, clientID, type: String? - let ts, time, age, replyq: Int? - let msgCnt, msgSize, msgMax, msgSizeMax: Int? - let simpleCnt, metadataCacheCnt: Int? + let timestamp, time, age, replyQueue: Int? + let messageCount, messageSize, messageMax, messageSizeMax: Int? + let simpleCnt, metadataCacheCount: Int? // let brokers: [String: Broker]? // let topics: [String: Topic]? let cgrp: Cgrp? - let tx, txBytes, rx, rxBytes: Int? - let txmsgs, txmsgBytes, rxmsgs, rxmsgBytes: Int? + let totalRequestsSent, totalBytesSent, totalResponsesRecieved, totalBytesReceived: Int? + let totalMessagesSent, totalMessagesBytesSent, totalMessagesRecieved, totalMessagesBytesRecieved: Int? enum CodingKeys: String, CodingKey { - case name - case clientID = "client_id" - case type, ts, time, age, replyq - case msgCnt = "msg_cnt" - case msgSize = "msg_size" - case msgMax = "msg_max" - case msgSizeMax = "msg_size_max" - case simpleCnt = "simple_cnt" - case metadataCacheCnt = "metadata_cache_cnt" + case name // unused + case clientID = "client_id" // unused + case type // unused + case timestamp = "ts" + case time, age + case replyQueue = "replyq" + case messageCount = "msg_cnt" + case messageSize = "msg_size" + case messageMax = "msg_max" + case messageSizeMax = "msg_size_max" + case simpleCnt = "simple_cnt" // unused + case metadataCacheCount = "metadata_cache_cnt" // case brokers, topics - case cgrp, tx - case txBytes = "tx_bytes" - case rx - case rxBytes = "rx_bytes" - case txmsgs - case txmsgBytes = "txmsg_bytes" - case rxmsgs - case rxmsgBytes = "rxmsg_bytes" + case cgrp + case totalRequestsSent = "tx" + case totalBytesSent = "tx_bytes" + case totalResponsesRecieved = "rx" + case totalBytesReceived = "rx_bytes" + case totalMessagesSent = "txmsgs" + case totalMessagesBytesSent = "txmsg_bytes" + case totalMessagesRecieved = "rxmsgs" + case totalMessagesBytesRecieved = "rxmsg_bytes" } } +// FIXME: for future use // MARK: - Broker // struct Broker: Hashable, Codable { From 5448eb43c403079acaef74fe76fe116f3924f768 Mon Sep 17 00:00:00 2001 From: BlindSpot <127803250+blindspotbounty@users.noreply.github.com> Date: Wed, 30 Aug 2023 10:49:43 +0300 Subject: [PATCH 14/20] move json mode as rd kafka statistics, misc renaming + docc --- .../KafkaConfiguration+Metrics.swift | 51 ++++++++++++++- .../KafkaConsumerConfiguration.swift | 6 +- .../KafkaProducerConfiguration.swift | 6 +- Sources/Kafka/KafkaConsumer.swift | 13 +--- Sources/Kafka/KafkaProducer.swift | 8 +-- Sources/Kafka/RDKafka/RDKafkaClient.swift | 20 ++++-- .../RDKafkaStatistics.swift} | 61 ++++++++--------- Sources/Kafka/Utilities/KafkaStatistics.swift | 65 ------------------- Tests/KafkaTests/KafkaConsumerTests.swift | 14 ++-- Tests/KafkaTests/KafkaProducerTests.swift | 13 ++-- 10 files changed, 115 insertions(+), 142 deletions(-) rename Sources/Kafka/{Utilities/KafkaStatisticsJsonModel.swift => RDKafka/RDKafkaStatistics.swift} (85%) delete mode 100644 Sources/Kafka/Utilities/KafkaStatistics.swift diff --git a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift index 5e4c9c58..94d6d4a4 100644 --- a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift +++ b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift @@ -75,10 +75,59 @@ extension KafkaConfiguration { /// Number of topics in the metadata cache. public var metadataCacheCount: Gauge? + + private static func record(_ value: T?, to: Gauge?) { + guard let value, + let to else { + return + } + to.record(value) + } + + private static func recordMircoseconds(_ value: T?, to: Timer?) { + guard let value, + let to else { + return + } + to.recordMicroseconds(value) + } + + internal func update(with rdKafkaStatistics: RDKafkaStatistics) { + Self.record(rdKafkaStatistics.timestamp, to: self.timestamp) + Self.recordMircoseconds(rdKafkaStatistics.time, to: self.time) + Self.recordMircoseconds(rdKafkaStatistics.age, to: self.age) + Self.record(rdKafkaStatistics.replyQueue, to: self.replyQueue) + Self.record(rdKafkaStatistics.messageCount, to: self.messageCount) + Self.record(rdKafkaStatistics.messageSize, to: self.messageSize) + Self.record(rdKafkaStatistics.messageMax, to: self.messageMax) + Self.record(rdKafkaStatistics.messageSizeMax, to: self.messageSizeMax) + Self.record(rdKafkaStatistics.totalRequestsSent, to: self.totalRequestsSent) + Self.record(rdKafkaStatistics.totalBytesSent, to: self.totalBytesSent) + + Self.record(rdKafkaStatistics.totalResponsesRecieved, to: self.totalResponsesRecieved) + Self.record(rdKafkaStatistics.totalBytesReceived, to: self.totalBytesReceived) + Self.record(rdKafkaStatistics.totalMessagesSent, to: self.totalMessagesSent) + Self.record(rdKafkaStatistics.totalBytesSent, to: self.totalBytesSent) + + Self.record(rdKafkaStatistics.totalMessagesBytesSent, to: self.totalMessagesBytesSent) + Self.record(rdKafkaStatistics.totalMessagesRecieved, to: self.totalMessagesRecieved) + Self.record(rdKafkaStatistics.totalMessagesBytesRecieved, to: self.totalMessagesBytesRecieved) + Self.record(rdKafkaStatistics.metadataCacheCount, to: self.metadataCacheCount) + } } public enum Metrics: Sendable { case disabled - case enabled(updateInterval: Duration, options: KafkaMetrics) + case enabled(updateInterval: Duration, metrics: KafkaMetrics) + + internal func update(with rdKafkaStatistics: RDKafkaStatistics) { + switch self { + case .enabled(_, let metrics): + assert(metrics.someMetricsSet, "Unexpected statistics received when no metrics configured") + metrics.update(with: rdKafkaStatistics) + case .disabled: + assertionFailure("Unexpected statistics received when metrics disabled") + } + } } } diff --git a/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift b/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift index b5d46177..b4ebd9cc 100644 --- a/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift @@ -210,13 +210,13 @@ public struct KafkaConsumerConfiguration { /// Options for librdkafka metrics updates public var metrics: KafkaConfiguration.Metrics = .disabled { didSet { - if case .enabled(let updateInterval, let options) = metrics { + if case .enabled(let updateInterval, let metrics) = metrics { precondition( updateInterval.canBeRepresentedAsMilliseconds, "Lowest granularity is milliseconds" ) precondition( - options.someMetricsSet, + metrics.someMetricsSet, "No metrics set but enabled" ) } @@ -289,7 +289,7 @@ extension KafkaConsumerConfiguration { switch self.metrics { case .disabled: - resultDict["statistics.interval.ms"] = "0" + resultDict["statistics.interval.ms"] = "0" // Disables metrics case .enabled(let interval, _): resultDict["statistics.interval.ms"] = String(interval.inMilliseconds) } diff --git a/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift b/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift index 36e9827c..023ba1cd 100644 --- a/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift @@ -164,13 +164,13 @@ public struct KafkaProducerConfiguration { /// Options for librdkafka metrics updates public var metrics: KafkaConfiguration.Metrics = .disabled { didSet { - if case .enabled(let updateInterval, let options) = metrics { + if case .enabled(let updateInterval, let metrics) = metrics { precondition( updateInterval.canBeRepresentedAsMilliseconds, "Lowest granularity is milliseconds" ) precondition( - options.someMetricsSet, + metrics.someMetricsSet, "No metrics set but enabled" ) } @@ -230,7 +230,7 @@ extension KafkaProducerConfiguration { switch self.metrics { case .disabled: - resultDict["statistics.interval.ms"] = "0" + resultDict["statistics.interval.ms"] = "0" // Disables metrics case .enabled(let interval, _): resultDict["statistics.interval.ms"] = String(interval.inMilliseconds) } diff --git a/Sources/Kafka/KafkaConsumer.swift b/Sources/Kafka/KafkaConsumer.swift index d7b96c27..5b2a1749 100644 --- a/Sources/Kafka/KafkaConsumer.swift +++ b/Sources/Kafka/KafkaConsumer.swift @@ -130,11 +130,6 @@ public final class KafkaConsumer: Sendable, Service { NIOAsyncSequenceProducerBackPressureStrategies.NoBackPressure, KafkaConsumerCloseOnTerminate > - typealias ProducerEvents = NIOAsyncSequenceProducer< - KafkaConsumerEvent, - NIOAsyncSequenceProducerBackPressureStrategies.NoBackPressure, - KafkaConsumerCloseOnTerminate - > /// The configuration object of the consumer client. private let configuration: KafkaConsumerConfiguration @@ -354,13 +349,7 @@ public final class KafkaConsumer: Sendable, Service { // We do not support back pressure, we can ignore the yield result _ = source.yield(result) case .statistics(let statistics): - switch self.configuration.metrics { - case .enabled(_, let options): - assert(options.someMetricsSet, "Unexpected statistics received when no metrics configured") - statistics.fill(options) - case .disabled: - assertionFailure("Unexpected statistics received when metrics disabled") - } + self.configuration.metrics.update(with: statistics) default: break // Ignore } diff --git a/Sources/Kafka/KafkaProducer.swift b/Sources/Kafka/KafkaProducer.swift index 37ee3e95..543cdb9d 100644 --- a/Sources/Kafka/KafkaProducer.swift +++ b/Sources/Kafka/KafkaProducer.swift @@ -226,13 +226,7 @@ public final class KafkaProducer: Service, Sendable { for event in events { switch event { case .statistics(let statistics): - switch self.configuration.metrics { - case .enabled(_, let options): - assert(options.someMetricsSet, "Unexpected statistics received when no metrics configured") - statistics.fill(options) - case .disabled: - assertionFailure("Unexpected statistics received when metrics disabled") - } + self.configuration.metrics.update(with: statistics) case .deliveryReport(let reports): _ = source?.yield(.deliveryReports(reports)) case .consumerMessages: diff --git a/Sources/Kafka/RDKafka/RDKafkaClient.swift b/Sources/Kafka/RDKafka/RDKafkaClient.swift index db7c8af2..f45eb7b7 100644 --- a/Sources/Kafka/RDKafka/RDKafkaClient.swift +++ b/Sources/Kafka/RDKafka/RDKafkaClient.swift @@ -14,6 +14,7 @@ import Crdkafka import Dispatch +import ExtrasJSON import Logging /// Base class for ``KafkaProducer`` and ``KafkaConsumer``, @@ -311,7 +312,7 @@ final class RDKafkaClient: Sendable { enum KafkaEvent { case deliveryReport(results: [KafkaDeliveryReport]) case consumerMessages(result: Result) - case statistics(KafkaStatistics) + case statistics(RDKafkaStatistics) } /// Poll the event `rd_kafka_queue_t` for new events. @@ -343,7 +344,9 @@ final class RDKafkaClient: Sendable { case .offsetCommit: self.handleOffsetCommitEvent(event) case .statistics: - events.append(self.handleStatistics(event)) + if let forwardEvent = self.handleStatistics(event) { + events.append(forwardEvent) + } case .none: // Finished reading events, return early return events @@ -395,9 +398,18 @@ final class RDKafkaClient: Sendable { // The returned message(s) MUST NOT be freed with rd_kafka_message_destroy(). } - private func handleStatistics(_ event: OpaquePointer?) -> KafkaEvent { + /// Handle event of type `RDKafkaEvent.statistics`. + /// + /// - Parameter event: Pointer to underlying `rd_kafka_event_t`. + private func handleStatistics(_ event: OpaquePointer?) -> KafkaEvent? { let jsonStr = String(cString: rd_kafka_event_stats(event)) - return .statistics(KafkaStatistics(jsonString: jsonStr)) + do { + let json = try XJSONDecoder().decode(RDKafkaStatistics.self, from: jsonStr.utf8) + return .statistics(json) + } catch { + assertionFailure("Error occurred when decoding JSON statistics: \(error) when decoding \(jsonStr)") + } + return nil } /// Handle event of type `RDKafkaEvent.log`. diff --git a/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift b/Sources/Kafka/RDKafka/RDKafkaStatistics.swift similarity index 85% rename from Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift rename to Sources/Kafka/RDKafka/RDKafkaStatistics.swift index 57d66ed4..b0a2c79f 100644 --- a/Sources/Kafka/Utilities/KafkaStatisticsJsonModel.swift +++ b/Sources/Kafka/RDKafka/RDKafkaStatistics.swift @@ -15,24 +15,24 @@ // This file was generated from JSON Schema using quicktype, do not modify it directly. // To parse the JSON, add this file to your project and do: // -// let statistics = try? newJSONDecoder().decode(KafkaStatisticsJsonModel.self, from: jsonData) +// let statistics = try? newJSONDecoder().decode(RDKafkaStatistics.self, from: jsonData) // MARK: - Statistics -struct KafkaStatisticsJson: Hashable, Codable { +struct RDKafkaStatistics: Hashable, Codable { let name, clientID, type: String? let timestamp, time, age, replyQueue: Int? let messageCount, messageSize, messageMax, messageSizeMax: Int? let simpleCnt, metadataCacheCount: Int? // let brokers: [String: Broker]? // let topics: [String: Topic]? - let cgrp: Cgrp? +// let cgrp: Cgrp? let totalRequestsSent, totalBytesSent, totalResponsesRecieved, totalBytesReceived: Int? let totalMessagesSent, totalMessagesBytesSent, totalMessagesRecieved, totalMessagesBytesRecieved: Int? enum CodingKeys: String, CodingKey { case name // unused - case clientID = "client_id" // unused + case clientID = "client_id" // unused case type // unused case timestamp = "ts" case time, age @@ -44,7 +44,7 @@ struct KafkaStatisticsJson: Hashable, Codable { case simpleCnt = "simple_cnt" // unused case metadataCacheCount = "metadata_cache_cnt" // case brokers, topics - case cgrp +// case cgrp case totalRequestsSent = "tx" case totalBytesSent = "tx_bytes" case totalResponsesRecieved = "rx" @@ -57,6 +57,7 @@ struct KafkaStatisticsJson: Hashable, Codable { } // FIXME: for future use + // MARK: - Broker // struct Broker: Hashable, Codable { @@ -93,34 +94,34 @@ struct KafkaStatisticsJson: Hashable, Codable { // MARK: - Toppars -struct Toppar: Hashable, Codable { - let topic: String? - let partition: Int? - - enum CodingKeys: String, CodingKey { - case topic, partition - } -} +// struct Toppar: Hashable, Codable { +// let topic: String? +// let partition: Int? +// +// enum CodingKeys: String, CodingKey { +// case topic, partition +// } +// } // MARK: - Cgrp -struct Cgrp: Hashable, Codable { - let state: String? - let stateage: Int? - let joinState: String? - let rebalanceAge, rebalanceCnt: Int? - let rebalanceReason: String? - let assignmentSize: Int? - - enum CodingKeys: String, CodingKey { - case state, stateage - case joinState = "join_state" - case rebalanceAge = "rebalance_age" - case rebalanceCnt = "rebalance_cnt" - case rebalanceReason = "rebalance_reason" - case assignmentSize = "assignment_size" - } -} +// struct Cgrp: Hashable, Codable { +// let state: String? +// let stateage: Int? +// let joinState: String? +// let rebalanceAge, rebalanceCnt: Int? +// let rebalanceReason: String? +// let assignmentSize: Int? +// +// enum CodingKeys: String, CodingKey { +// case state, stateage +// case joinState = "join_state" +// case rebalanceAge = "rebalance_age" +// case rebalanceCnt = "rebalance_cnt" +// case rebalanceReason = "rebalance_reason" +// case assignmentSize = "assignment_size" +// } +// } // MARK: - Topic diff --git a/Sources/Kafka/Utilities/KafkaStatistics.swift b/Sources/Kafka/Utilities/KafkaStatistics.swift deleted file mode 100644 index a617f340..00000000 --- a/Sources/Kafka/Utilities/KafkaStatistics.swift +++ /dev/null @@ -1,65 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// This source file is part of the swift-kafka-gsoc open source project -// -// Copyright (c) 2023 Apple Inc. and the swift-kafka-gsoc project authors -// Licensed under Apache License v2.0 -// -// See LICENSE.txt for license information -// See CONTRIBUTORS.txt for the list of swift-kafka-gsoc project authors -// -// SPDX-License-Identifier: Apache-2.0 -// -//===----------------------------------------------------------------------===// - -import ExtrasJSON -import Metrics - -struct KafkaStatistics: Sendable, Hashable { - let jsonString: String - - private func record(_ value: T?, to: Gauge?) { - guard let value, - let to else { - return - } - to.record(value) - } - private func recordMircoseconds(_ value: T?, to: Timer?) { - guard let value, - let to else { - return - } - to.recordMicroseconds(value) - } - - func fill(_ options: KafkaConfiguration.KafkaMetrics) { - do { - let json = try XJSONDecoder().decode(KafkaStatisticsJson.self, from: self.jsonString.utf8) - - record(json.timestamp, to: options.timestamp) - recordMircoseconds(json.time, to: options.time) - recordMircoseconds(json.age, to: options.age) - record(json.replyQueue, to: options.replyQueue) - record(json.messageCount, to: options.messageCount) - record(json.messageSize, to: options.messageSize) - record(json.messageMax, to: options.messageMax) - record(json.messageSizeMax, to: options.messageSizeMax) - record(json.totalRequestsSent, to: options.totalRequestsSent) - record(json.totalBytesSent, to: options.totalBytesSent) - - record(json.totalResponsesRecieved, to: options.totalResponsesRecieved) - record(json.totalBytesReceived, to: options.totalBytesReceived) - record(json.totalMessagesSent, to: options.totalMessagesSent) - record(json.totalBytesSent, to: options.totalBytesSent) - - record(json.totalMessagesBytesSent, to: options.totalMessagesBytesSent) - record(json.totalMessagesRecieved, to: options.totalMessagesRecieved) - record(json.totalMessagesBytesRecieved, to: options.totalMessagesBytesRecieved) - record(json.metadataCacheCount, to: options.metadataCacheCount) - - } catch { - fatalError("Statistics json decode error \(error)") - } - } -} diff --git a/Tests/KafkaTests/KafkaConsumerTests.swift b/Tests/KafkaTests/KafkaConsumerTests.swift index 6cca712d..c9f72dac 100644 --- a/Tests/KafkaTests/KafkaConsumerTests.swift +++ b/Tests/KafkaTests/KafkaConsumerTests.swift @@ -12,7 +12,6 @@ // //===----------------------------------------------------------------------===// -import Atomics import struct Foundation.UUID @testable import Kafka import Logging @@ -97,15 +96,12 @@ final class KafkaConsumerTests: XCTestCase { let handler = MockTimerHandler() metricsOptions.age = .init(label: "age", dimensions: [], handler: handler) - config.metrics = .enabled(updateInterval: .milliseconds(10), options: metricsOptions) + config.metrics = .enabled(updateInterval: .milliseconds(10), metrics: metricsOptions) - let (consumer, events) = try KafkaConsumer.makeConsumerWithEvents(configuration: config, logger: .kafkaTest) + let consumer = try KafkaConsumer(configuration: config, logger: .kafkaTest) - let serviceGroup = ServiceGroup( - services: [consumer], - configuration: ServiceGroupConfiguration(gracefulShutdownSignals: []), - logger: .kafkaTest - ) + let svcGroupConfig = ServiceGroupConfiguration(services: [consumer], logger: .kafkaTest) + let serviceGroup = ServiceGroup(configuration: svcGroupConfig) try await withThrowingTaskGroup(of: Void.self) { group in // Run Task @@ -119,7 +115,7 @@ final class KafkaConsumerTests: XCTestCase { break } } - + try await group.next() // Shutdown the serviceGroup diff --git a/Tests/KafkaTests/KafkaProducerTests.swift b/Tests/KafkaTests/KafkaProducerTests.swift index 197ca65d..81984ae5 100644 --- a/Tests/KafkaTests/KafkaProducerTests.swift +++ b/Tests/KafkaTests/KafkaProducerTests.swift @@ -347,18 +347,15 @@ final class KafkaProducerTests: XCTestCase { let handler = MockTimerHandler() metricsOptions.age = .init(label: "age", dimensions: [], handler: handler) - self.config.metrics = .enabled(updateInterval: .milliseconds(10), options: metricsOptions) + self.config.metrics = .enabled(updateInterval: .milliseconds(10), metrics: metricsOptions) - let (producer, events) = try KafkaProducer.makeProducerWithEvents( + let producer = try KafkaProducer( configuration: self.config, logger: .kafkaTest ) - let serviceGroup = ServiceGroup( - services: [producer], - configuration: ServiceGroupConfiguration(gracefulShutdownSignals: []), - logger: .kafkaTest - ) + let svcGroupConfig = ServiceGroupConfiguration(services: [producer], logger: .kafkaTest) + let serviceGroup = ServiceGroup(configuration: svcGroupConfig) try await withThrowingTaskGroup(of: Void.self) { group in // Run Task @@ -372,7 +369,7 @@ final class KafkaProducerTests: XCTestCase { break } } - + try await group.next() // Shutdown the serviceGroup From a83c97066570718d3916533b501fbb5933cac0c4 Mon Sep 17 00:00:00 2001 From: BlindSpot <127803250+blindspotbounty@users.noreply.github.com> Date: Tue, 10 Oct 2023 13:35:40 +0300 Subject: [PATCH 15/20] address review comments --- Package.swift | 7 +- .../KafkaConfiguration+Metrics.swift | 145 +++++--------- .../KafkaConsumerConfiguration.swift | 23 +-- .../KafkaProducerConfiguration.swift | 23 +-- Sources/Kafka/KafkaConsumer.swift | 4 +- Sources/Kafka/KafkaProducer.swift | 4 +- Sources/Kafka/RDKafka/RDKafkaClient.swift | 8 +- Sources/Kafka/RDKafka/RDKafkaStatistics.swift | 188 +++--------------- Tests/KafkaTests/KafkaConsumerTests.swift | 33 +-- Tests/KafkaTests/KafkaProducerTests.swift | 31 +-- Tests/KafkaTests/Utilities.swift | 15 -- 11 files changed, 136 insertions(+), 345 deletions(-) diff --git a/Package.swift b/Package.swift index 7efa78db..8570f6a5 100644 --- a/Package.swift +++ b/Package.swift @@ -52,7 +52,6 @@ let package = Package( // The zstd Swift package produces warnings that we cannot resolve: // https://github.com/facebook/zstd/issues/3328 .package(url: "https://github.com/facebook/zstd.git", from: "1.5.0"), - .package(url: "https://github.com/swift-extras/swift-extras-json.git", .upToNextMajor(from: "0.6.0")), ], targets: [ .target( @@ -83,7 +82,6 @@ let package = Package( .product(name: "ServiceLifecycle", package: "swift-service-lifecycle"), .product(name: "Logging", package: "swift-log"), .product(name: "Metrics", package: "swift-metrics"), - .product(name: "ExtrasJSON", package: "swift-extras-json"), ] ), .target( @@ -102,7 +100,10 @@ let package = Package( ), .testTarget( name: "KafkaTests", - dependencies: ["Kafka"] + dependencies: [ + "Kafka", + .product(name: "MetricsTestKit", package: "swift-metrics") + ] ), .testTarget( name: "IntegrationTests", diff --git a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift index 94d6d4a4..f04a3590 100644 --- a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift +++ b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift @@ -17,64 +17,53 @@ import Metrics extension KafkaConfiguration { // MARK: - Metrics - /// Use to configure metrics. - public struct KafkaMetrics: Sendable { - internal var someMetricsSet: Bool { - self.timestamp != nil || - self.time != nil || - self.age != nil || - self.replyQueue != nil || - self.messageCount != nil || - self.messageSize != nil || - self.messageMax != nil || - self.messageSizeMax != nil || - self.totalRequestsSent != nil || - self.totalBytesSent != nil || - self.totalResponsesRecieved != nil || - self.totalBytesReceived != nil || - self.totalMessagesSent != nil || - self.totalMessagesBytesSent != nil || - self.totalBytesReceived != nil || - self.metadataCacheCount != nil + /// Configuration for the metrics emitted by `SwiftKafka`. + public struct Metrics: Sendable { + internal var enabled: Bool { + self.updateInterval != nil && + (self.queuedOperation != nil || + self.queuedProducerMessages != nil || + self.queuedProducerMessagesSize != nil || + self.totalKafkaBrokerRequests != nil || + self.totalKafkaBrokerBytesSent != nil || + self.totalKafkaBrokerResponses != nil || + self.totalKafkaBrokerResponsesSize != nil || + self.totalKafkaBrokerMessagesSent != nil || + self.totalKafkaBrokerMessagesBytesSent != nil || + self.totalKafkaBrokerMessagesBytesRecieved != nil || + self.topicsInMetadataCache != nil) } + + /// Update interval for statistics. + public var updateInterval: Duration? - /// librdkafka's internal monotonic clock (microseconds) - public var timestamp: Gauge? - /// Wall clock time in seconds since the epoch - public var time: Timer? - /// Time since this client instance was created - public var age: Timer? - /// Number of ops (callbacks, events, etc) waiting in queue for application to serve - public var replyQueue: Gauge? - /// Current number of messages in producer queues - public var messageCount: Gauge? - /// Current total size of messages in producer queues - public var messageSize: Gauge? - /// Threshold: maximum number of messages allowed allowed on the producer queues - public var messageMax: Gauge? - /// Threshold: maximum total size of messages allowed on the producer queues - public var messageSizeMax: Gauge? + /// Number of operations (callbacks, events, etc) waiting in the queue. + public var queuedOperation: Gauge? + /// Current number of queued producer messages. + public var queuedProducerMessages: Gauge? + /// Current total size in bytes of queued producer messages. + public var queuedProducerMessagesSize: Gauge? - /// Total number of requests sent to Kafka brokers - public var totalRequestsSent: Gauge? - /// Total number of bytes transmitted to Kafka brokers - public var totalBytesSent: Gauge? - /// Total number of responses received from Kafka brokers - public var totalResponsesRecieved: Gauge? - /// Total number of bytes received from Kafka brokers - public var totalBytesReceived: Gauge? + /// Total number of requests sent to Kafka brokers. + public var totalKafkaBrokerRequests: Gauge? + /// Total number of bytes transmitted to Kafka brokers. + public var totalKafkaBrokerBytesSent: Gauge? + /// Total number of responses received from Kafka brokers. + public var totalKafkaBrokerResponses: Gauge? + /// Total number of bytes received from Kafka brokers. + public var totalKafkaBrokerResponsesSize: Gauge? - /// Total number of messages transmitted (produced) to Kafka brokers - public var totalMessagesSent: Gauge? - /// Total number of message bytes (including framing, such as per-Message framing and MessageSet/batch framing) transmitted to Kafka brokers - public var totalMessagesBytesSent: Gauge? + /// Total number of messages transmitted (produced) to Kafka brokers. + public var totalKafkaBrokerMessagesSent: Gauge? + /// Total number of message bytes (including framing, such as per-Message framing and MessageSet/batch framing) transmitted to Kafka brokers. + public var totalKafkaBrokerMessagesBytesSent: Gauge? /// Total number of messages consumed, not including ignored messages (due to offset, etc), from Kafka brokers. - public var totalMessagesRecieved: Gauge? - /// Total number of message bytes (including framing) received from Kafka brokers - public var totalMessagesBytesRecieved: Gauge? + public var totalKafkaBrokerMessagesRecieved: Gauge? + /// Total number of message bytes (including framing) received from Kafka brokers. + public var totalKafkaBrokerMessagesBytesRecieved: Gauge? /// Number of topics in the metadata cache. - public var metadataCacheCount: Gauge? + public var topicsInMetadataCache: Gauge? private static func record(_ value: T?, to: Gauge?) { guard let value, @@ -84,50 +73,22 @@ extension KafkaConfiguration { to.record(value) } - private static func recordMircoseconds(_ value: T?, to: Timer?) { - guard let value, - let to else { - return - } - to.recordMicroseconds(value) - } - internal func update(with rdKafkaStatistics: RDKafkaStatistics) { - Self.record(rdKafkaStatistics.timestamp, to: self.timestamp) - Self.recordMircoseconds(rdKafkaStatistics.time, to: self.time) - Self.recordMircoseconds(rdKafkaStatistics.age, to: self.age) - Self.record(rdKafkaStatistics.replyQueue, to: self.replyQueue) - Self.record(rdKafkaStatistics.messageCount, to: self.messageCount) - Self.record(rdKafkaStatistics.messageSize, to: self.messageSize) - Self.record(rdKafkaStatistics.messageMax, to: self.messageMax) - Self.record(rdKafkaStatistics.messageSizeMax, to: self.messageSizeMax) - Self.record(rdKafkaStatistics.totalRequestsSent, to: self.totalRequestsSent) - Self.record(rdKafkaStatistics.totalBytesSent, to: self.totalBytesSent) + Self.record(rdKafkaStatistics.queuedOperation, to: self.queuedOperation) + Self.record(rdKafkaStatistics.queuedProducerMessages, to: self.queuedProducerMessages) + Self.record(rdKafkaStatistics.queuedProducerMessagesSize, to: self.queuedProducerMessagesSize) - Self.record(rdKafkaStatistics.totalResponsesRecieved, to: self.totalResponsesRecieved) - Self.record(rdKafkaStatistics.totalBytesReceived, to: self.totalBytesReceived) - Self.record(rdKafkaStatistics.totalMessagesSent, to: self.totalMessagesSent) - Self.record(rdKafkaStatistics.totalBytesSent, to: self.totalBytesSent) + Self.record(rdKafkaStatistics.totalKafkaBrokerRequests, to: self.totalKafkaBrokerRequests) + Self.record(rdKafkaStatistics.totalKafkaBrokerBytesSent, to: self.totalKafkaBrokerBytesSent) // TODO: finish with KafkaBroker... + Self.record(rdKafkaStatistics.totalKafkaBrokerResponses, to: self.totalKafkaBrokerResponses) + Self.record(rdKafkaStatistics.totalKafkaBrokerResponsesSize, to: self.totalKafkaBrokerResponsesSize) - Self.record(rdKafkaStatistics.totalMessagesBytesSent, to: self.totalMessagesBytesSent) - Self.record(rdKafkaStatistics.totalMessagesRecieved, to: self.totalMessagesRecieved) - Self.record(rdKafkaStatistics.totalMessagesBytesRecieved, to: self.totalMessagesBytesRecieved) - Self.record(rdKafkaStatistics.metadataCacheCount, to: self.metadataCacheCount) - } - } - - public enum Metrics: Sendable { - case disabled - case enabled(updateInterval: Duration, metrics: KafkaMetrics) - - internal func update(with rdKafkaStatistics: RDKafkaStatistics) { - switch self { - case .enabled(_, let metrics): - assert(metrics.someMetricsSet, "Unexpected statistics received when no metrics configured") - metrics.update(with: rdKafkaStatistics) - case .disabled: - assertionFailure("Unexpected statistics received when metrics disabled") - } + Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesSent, to: self.totalKafkaBrokerMessagesSent) + Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesBytesSent, to: self.totalKafkaBrokerMessagesBytesSent) + Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesRecieved, to: self.totalKafkaBrokerMessagesRecieved) + Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesBytesRecieved, to: self.totalKafkaBrokerMessagesBytesRecieved) + + Self.record(rdKafkaStatistics.topicsInMetadataCache, to: self.topicsInMetadataCache) } } } diff --git a/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift b/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift index b4ebd9cc..8b11a3ec 100644 --- a/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift @@ -208,20 +208,7 @@ public struct KafkaConsumerConfiguration { public var reconnect: KafkaConfiguration.ReconnectOptions = .init() /// Options for librdkafka metrics updates - public var metrics: KafkaConfiguration.Metrics = .disabled { - didSet { - if case .enabled(let updateInterval, let metrics) = metrics { - precondition( - updateInterval.canBeRepresentedAsMilliseconds, - "Lowest granularity is milliseconds" - ) - precondition( - metrics.someMetricsSet, - "No metrics set but enabled" - ) - } - } - } + public var metrics: KafkaConfiguration.Metrics = .init() /// Security protocol to use (plaintext, ssl, sasl_plaintext, sasl_ssl). /// Default: `.plaintext` @@ -287,11 +274,9 @@ extension KafkaConsumerConfiguration { resultDict["reconnect.backoff.ms"] = String(reconnect.backoff.rawValue) resultDict["reconnect.backoff.max.ms"] = String(reconnect.maximumBackoff.inMilliseconds) - switch self.metrics { - case .disabled: - resultDict["statistics.interval.ms"] = "0" // Disables metrics - case .enabled(let interval, _): - resultDict["statistics.interval.ms"] = String(interval.inMilliseconds) + if self.metrics.enabled, + let updateInterval = self.metrics.updateInterval { + resultDict["statistics.interval.ms"] = String(updateInterval.inMilliseconds) } // Merge with SecurityProtocol configuration dictionary diff --git a/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift b/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift index 023ba1cd..3331f167 100644 --- a/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift @@ -162,20 +162,7 @@ public struct KafkaProducerConfiguration { public var reconnect: KafkaConfiguration.ReconnectOptions = .init() /// Options for librdkafka metrics updates - public var metrics: KafkaConfiguration.Metrics = .disabled { - didSet { - if case .enabled(let updateInterval, let metrics) = metrics { - precondition( - updateInterval.canBeRepresentedAsMilliseconds, - "Lowest granularity is milliseconds" - ) - precondition( - metrics.someMetricsSet, - "No metrics set but enabled" - ) - } - } - } + public var metrics: KafkaConfiguration.Metrics = .init() /// Security protocol to use (plaintext, ssl, sasl_plaintext, sasl_ssl). /// Default: `.plaintext` @@ -228,11 +215,9 @@ extension KafkaProducerConfiguration { resultDict["reconnect.backoff.ms"] = String(self.reconnect.backoff.rawValue) resultDict["reconnect.backoff.max.ms"] = String(self.reconnect.maximumBackoff.inMilliseconds) - switch self.metrics { - case .disabled: - resultDict["statistics.interval.ms"] = "0" // Disables metrics - case .enabled(let interval, _): - resultDict["statistics.interval.ms"] = String(interval.inMilliseconds) + if self.metrics.enabled, + let updateInterval = self.metrics.updateInterval { + resultDict["statistics.interval.ms"] = String(updateInterval.inMilliseconds) } // Merge with SecurityProtocol configuration dictionary diff --git a/Sources/Kafka/KafkaConsumer.swift b/Sources/Kafka/KafkaConsumer.swift index 5b2a1749..4715cf34 100644 --- a/Sources/Kafka/KafkaConsumer.swift +++ b/Sources/Kafka/KafkaConsumer.swift @@ -211,7 +211,7 @@ public final class KafkaConsumer: Sendable, Service { if configuration.isAutoCommitEnabled == false { subscribedEvents.append(.offsetCommit) } - if case .enabled = configuration.metrics { + if configuration.metrics.enabled { subscribedEvents.append(.statistics) } @@ -254,7 +254,7 @@ public final class KafkaConsumer: Sendable, Service { if configuration.isAutoCommitEnabled == false { subscribedEvents.append(.offsetCommit) } - if case .enabled = configuration.metrics { + if configuration.metrics.enabled { subscribedEvents.append(.statistics) } diff --git a/Sources/Kafka/KafkaProducer.swift b/Sources/Kafka/KafkaProducer.swift index 543cdb9d..d2c9a4ae 100644 --- a/Sources/Kafka/KafkaProducer.swift +++ b/Sources/Kafka/KafkaProducer.swift @@ -118,7 +118,7 @@ public final class KafkaProducer: Service, Sendable { var subscribedEvents: [RDKafkaEvent] = [.log] // No .deliveryReport here! - if case .enabled = configuration.metrics { + if configuration.metrics.enabled { subscribedEvents.append(.statistics) } @@ -164,7 +164,7 @@ public final class KafkaProducer: Service, Sendable { var subscribedEvents: [RDKafkaEvent] = [.log, .deliveryReport] // Listen to statistics events when statistics enabled - if case .enabled = configuration.metrics { + if configuration.metrics.enabled { subscribedEvents.append(.statistics) } diff --git a/Sources/Kafka/RDKafka/RDKafkaClient.swift b/Sources/Kafka/RDKafka/RDKafkaClient.swift index 2bf65d23..0ae19ada 100644 --- a/Sources/Kafka/RDKafka/RDKafkaClient.swift +++ b/Sources/Kafka/RDKafka/RDKafkaClient.swift @@ -14,8 +14,8 @@ import Crdkafka import Dispatch -import ExtrasJSON import Logging +import class Foundation.JSONDecoder /// Base class for ``KafkaProducer`` and ``KafkaConsumer``, /// which is used to handle the connection to the Kafka ecosystem. @@ -404,8 +404,10 @@ final class RDKafkaClient: Sendable { private func handleStatistics(_ event: OpaquePointer?) -> KafkaEvent? { let jsonStr = String(cString: rd_kafka_event_stats(event)) do { - let json = try XJSONDecoder().decode(RDKafkaStatistics.self, from: jsonStr.utf8) - return .statistics(json) + if let jsonData = jsonStr.data(using: .utf8) { + let json = try JSONDecoder().decode(RDKafkaStatistics.self, from: jsonData) + return .statistics(json) + } } catch { assertionFailure("Error occurred when decoding JSON statistics: \(error) when decoding \(jsonStr)") } diff --git a/Sources/Kafka/RDKafka/RDKafkaStatistics.swift b/Sources/Kafka/RDKafka/RDKafkaStatistics.swift index b0a2c79f..9bbd6445 100644 --- a/Sources/Kafka/RDKafka/RDKafkaStatistics.swift +++ b/Sources/Kafka/RDKafka/RDKafkaStatistics.swift @@ -1,12 +1,12 @@ //===----------------------------------------------------------------------===// // -// This source file is part of the swift-kafka-gsoc open source project +// This source file is part of the swift-kafka-client open source project // -// Copyright (c) 2023 Apple Inc. and the swift-kafka-gsoc project authors +// Copyright (c) 2023 Apple Inc. and the swift-kafka-client project authors // Licensed under Apache License v2.0 // // See LICENSE.txt for license information -// See CONTRIBUTORS.txt for the list of swift-kafka-gsoc project authors +// See CONTRIBUTORS.txt for the list of swift-kafka-client project authors // // SPDX-License-Identifier: Apache-2.0 // @@ -20,165 +20,31 @@ // MARK: - Statistics struct RDKafkaStatistics: Hashable, Codable { - let name, clientID, type: String? - let timestamp, time, age, replyQueue: Int? - let messageCount, messageSize, messageMax, messageSizeMax: Int? - let simpleCnt, metadataCacheCount: Int? -// let brokers: [String: Broker]? -// let topics: [String: Topic]? -// let cgrp: Cgrp? - let totalRequestsSent, totalBytesSent, totalResponsesRecieved, totalBytesReceived: Int? - let totalMessagesSent, totalMessagesBytesSent, totalMessagesRecieved, totalMessagesBytesRecieved: Int? + let queuedOperation: Int? + let queuedProducerMessages: Int? + let queuedProducerMessagesSize: Int? + let topicsInMetadataCache: Int? + let totalKafkaBrokerRequests: Int? + let totalKafkaBrokerBytesSent: Int? + let totalKafkaBrokerResponses: Int? + let totalKafkaBrokerResponsesSize: Int? + let totalKafkaBrokerMessagesSent: Int? + let totalKafkaBrokerMessagesBytesSent: Int? + let totalKafkaBrokerMessagesRecieved: Int? + let totalKafkaBrokerMessagesBytesRecieved: Int? enum CodingKeys: String, CodingKey { - case name // unused - case clientID = "client_id" // unused - case type // unused - case timestamp = "ts" - case time, age - case replyQueue = "replyq" - case messageCount = "msg_cnt" - case messageSize = "msg_size" - case messageMax = "msg_max" - case messageSizeMax = "msg_size_max" - case simpleCnt = "simple_cnt" // unused - case metadataCacheCount = "metadata_cache_cnt" -// case brokers, topics -// case cgrp - case totalRequestsSent = "tx" - case totalBytesSent = "tx_bytes" - case totalResponsesRecieved = "rx" - case totalBytesReceived = "rx_bytes" - case totalMessagesSent = "txmsgs" - case totalMessagesBytesSent = "txmsg_bytes" - case totalMessagesRecieved = "rxmsgs" - case totalMessagesBytesRecieved = "rxmsg_bytes" + case queuedOperation = "replyq" + case queuedProducerMessages = "msg_cnt" + case queuedProducerMessagesSize = "msg_size" + case topicsInMetadataCache = "metadata_cache_cnt" + case totalKafkaBrokerRequests = "tx" + case totalKafkaBrokerBytesSent = "tx_bytes" + case totalKafkaBrokerResponses = "rx" + case totalKafkaBrokerResponsesSize = "rx_bytes" + case totalKafkaBrokerMessagesSent = "txmsgs" + case totalKafkaBrokerMessagesBytesSent = "txmsg_bytes" + case totalKafkaBrokerMessagesRecieved = "rxmsgs" + case totalKafkaBrokerMessagesBytesRecieved = "rxmsg_bytes" } } - -// FIXME: for future use - -// MARK: - Broker - -// struct Broker: Hashable, Codable { -// let name: String? -// let nodeid: Int? -// let nodename, source, state: String? -// let stateage, outbufCnt, outbufMsgCnt, waitrespCnt: Int? -// let waitrespMsgCnt, tx, txbytes, txerrs: Int? -// let txretries, txidle, reqTimeouts, rx: Int? -// let rxbytes, rxerrs, rxcorriderrs, rxpartial: Int? -// let rxidle, zbufGrow, bufGrow, wakeups: Int? -// let connects, disconnects: Int? -// let intLatency, outbufLatency, rtt, throttle: [String: Int]? -// let req: [String: Int]? -// let toppars: [String: Toppar]? -// -// enum CodingKeys: String, CodingKey { -// case name, nodeid, nodename, source, state, stateage -// case outbufCnt = "outbuf_cnt" -// case outbufMsgCnt = "outbuf_msg_cnt" -// case waitrespCnt = "waitresp_cnt" -// case waitrespMsgCnt = "waitresp_msg_cnt" -// case tx, txbytes, txerrs, txretries, txidle -// case reqTimeouts = "req_timeouts" -// case rx, rxbytes, rxerrs, rxcorriderrs, rxpartial, rxidle -// case zbufGrow = "zbuf_grow" -// case bufGrow = "buf_grow" -// case wakeups, connects, disconnects -// case intLatency = "int_latency" -// case outbufLatency = "outbuf_latency" -// case rtt, throttle, req, toppars -// } -// } - -// MARK: - Toppars - -// struct Toppar: Hashable, Codable { -// let topic: String? -// let partition: Int? -// -// enum CodingKeys: String, CodingKey { -// case topic, partition -// } -// } - -// MARK: - Cgrp - -// struct Cgrp: Hashable, Codable { -// let state: String? -// let stateage: Int? -// let joinState: String? -// let rebalanceAge, rebalanceCnt: Int? -// let rebalanceReason: String? -// let assignmentSize: Int? -// -// enum CodingKeys: String, CodingKey { -// case state, stateage -// case joinState = "join_state" -// case rebalanceAge = "rebalance_age" -// case rebalanceCnt = "rebalance_cnt" -// case rebalanceReason = "rebalance_reason" -// case assignmentSize = "assignment_size" -// } -// } - -// MARK: - Topic - -// struct Topic: Hashable, Codable { -// let topic: String? -// let age, metadataAge: Int? -// let batchsize, batchcnt: [String: Int]? -// let partitions: [String: Partition]? -// -// enum CodingKeys: String, CodingKey { -// case topic, age -// case metadataAge = "metadata_age" -// case batchsize, batchcnt, partitions -// } -// } - -// MARK: - Partition - -// struct Partition: Hashable, Codable { -// let partition, broker, leader: Int? -// let desired, unknown: Bool? -// let msgqCnt, msgqBytes, xmitMsgqCnt, xmitMsgqBytes: Int? -// let fetchqCnt, fetchqSize: Int? -// let fetchState: String? -// let queryOffset, nextOffset, appOffset, storedOffset: Int? -// let commitedOffset, committedOffset, eofOffset, loOffset: Int? -// let hiOffset, lsOffset, consumerLag, consumerLagStored: Int? -// let txmsgs, txbytes, rxmsgs, rxbytes: Int? -// let msgs, rxVerDrops, msgsInflight, nextACKSeq: Int? -// let nextErrSeq, ackedMsgid: Int? -// -// enum CodingKeys: String, CodingKey { -// case partition, broker, leader, desired, unknown -// case msgqCnt = "msgq_cnt" -// case msgqBytes = "msgq_bytes" -// case xmitMsgqCnt = "xmit_msgq_cnt" -// case xmitMsgqBytes = "xmit_msgq_bytes" -// case fetchqCnt = "fetchq_cnt" -// case fetchqSize = "fetchq_size" -// case fetchState = "fetch_state" -// case queryOffset = "query_offset" -// case nextOffset = "next_offset" -// case appOffset = "app_offset" -// case storedOffset = "stored_offset" -// case commitedOffset = "commited_offset" -// case committedOffset = "committed_offset" -// case eofOffset = "eof_offset" -// case loOffset = "lo_offset" -// case hiOffset = "hi_offset" -// case lsOffset = "ls_offset" -// case consumerLag = "consumer_lag" -// case consumerLagStored = "consumer_lag_stored" -// case txmsgs, txbytes, rxmsgs, rxbytes, msgs -// case rxVerDrops = "rx_ver_drops" -// case msgsInflight = "msgs_inflight" -// case nextACKSeq = "next_ack_seq" -// case nextErrSeq = "next_err_seq" -// case ackedMsgid = "acked_msgid" -// } -// } diff --git a/Tests/KafkaTests/KafkaConsumerTests.swift b/Tests/KafkaTests/KafkaConsumerTests.swift index c9f72dac..5a79bacd 100644 --- a/Tests/KafkaTests/KafkaConsumerTests.swift +++ b/Tests/KafkaTests/KafkaConsumerTests.swift @@ -12,10 +12,12 @@ // //===----------------------------------------------------------------------===// +@testable import CoreMetrics // for MetricsSystem.bootstrapInternal import struct Foundation.UUID @testable import Kafka import Logging import Metrics +import MetricsTestKit import ServiceLifecycle import XCTest @@ -34,6 +36,17 @@ import XCTest // zookeeper-server-start /usr/local/etc/kafka/zookeeper.properties & kafka-server-start /usr/local/etc/kafka/server.properties final class KafkaConsumerTests: XCTestCase { + var metrics: TestMetrics! = TestMetrics() + + override func setUp() async throws { + MetricsSystem.bootstrapInternal(self.metrics) + } + + override func tearDown() async throws { + self.metrics = nil + MetricsSystem.bootstrapInternal(NOOPMetricsHandler.instance) + } + func testConsumerLog() async throws { let recorder = LogEventRecorder() let mockLogger = Logger(label: "kafka.test.consumer.log") { @@ -91,12 +104,8 @@ final class KafkaConsumerTests: XCTestCase { bootstrapBrokerAddresses: [] ) - var metricsOptions = KafkaConfiguration.KafkaMetrics() - - let handler = MockTimerHandler() - metricsOptions.age = .init(label: "age", dimensions: [], handler: handler) - - config.metrics = .enabled(updateInterval: .milliseconds(10), metrics: metricsOptions) + config.metrics.updateInterval = .milliseconds(100) + config.metrics.queuedOperation = .init(label: "operations") let consumer = try KafkaConsumer(configuration: config, logger: .kafkaTest) @@ -109,17 +118,13 @@ final class KafkaConsumerTests: XCTestCase { try await serviceGroup.run() } - group.addTask { - for await value in handler.expectation { - XCTAssertNotEqual(value, 0) - break - } - } - - try await group.next() + try await Task.sleep(for: .seconds(1)) // Shutdown the serviceGroup await serviceGroup.triggerGracefulShutdown() } + + let value = try metrics.expectGauge("operations").lastValue + XCTAssertNotNil(value) } } diff --git a/Tests/KafkaTests/KafkaProducerTests.swift b/Tests/KafkaTests/KafkaProducerTests.swift index 81984ae5..3cc7328a 100644 --- a/Tests/KafkaTests/KafkaProducerTests.swift +++ b/Tests/KafkaTests/KafkaProducerTests.swift @@ -12,8 +12,11 @@ // //===----------------------------------------------------------------------===// +@testable import CoreMetrics // for MetricsSystem.bootstrapInternal @testable import Kafka import Logging +import Metrics +import MetricsTestKit import NIOCore import ServiceLifecycle import XCTest @@ -38,6 +41,7 @@ final class KafkaProducerTests: XCTestCase { let kafkaPort: Int = .init(ProcessInfo.processInfo.environment["KAFKA_PORT"] ?? "9092")! var bootstrapBrokerAddress: KafkaConfiguration.BrokerAddress! var config: KafkaProducerConfiguration! + var metrics: TestMetrics! = TestMetrics() override func setUpWithError() throws { self.bootstrapBrokerAddress = KafkaConfiguration.BrokerAddress( @@ -49,11 +53,16 @@ final class KafkaProducerTests: XCTestCase { bootstrapBrokerAddresses: [self.bootstrapBrokerAddress] ) self.config.broker.addressFamily = .v4 + + MetricsSystem.bootstrapInternal(self.metrics) } override func tearDownWithError() throws { self.bootstrapBrokerAddress = nil self.config = nil + + self.metrics = nil + MetricsSystem.bootstrapInternal(NOOPMetricsHandler.instance) } func testSend() async throws { @@ -342,12 +351,8 @@ final class KafkaProducerTests: XCTestCase { } func testProducerStatistics() async throws { - var metricsOptions = KafkaConfiguration.KafkaMetrics() - - let handler = MockTimerHandler() - metricsOptions.age = .init(label: "age", dimensions: [], handler: handler) - - self.config.metrics = .enabled(updateInterval: .milliseconds(10), metrics: metricsOptions) + self.config.metrics.updateInterval = .milliseconds(100) + self.config.metrics.queuedOperation = .init(label: "operations") let producer = try KafkaProducer( configuration: self.config, @@ -362,18 +367,14 @@ final class KafkaProducerTests: XCTestCase { group.addTask { try await serviceGroup.run() } - - group.addTask { - for await value in handler.expectation { - XCTAssertNotEqual(value, 0) - break - } - } - - try await group.next() + + try await Task.sleep(for: .seconds(1)) // Shutdown the serviceGroup await serviceGroup.triggerGracefulShutdown() } + + let value = try metrics.expectGauge("operations").lastValue + XCTAssertNotNil(value) } } diff --git a/Tests/KafkaTests/Utilities.swift b/Tests/KafkaTests/Utilities.swift index 85c1611c..eb2eafdb 100644 --- a/Tests/KafkaTests/Utilities.swift +++ b/Tests/KafkaTests/Utilities.swift @@ -99,18 +99,3 @@ internal struct MockLogHandler: LogHandler { } } } - -class MockTimerHandler: TimerHandler { - let expectation: AsyncStream - private let expectationContinuation: AsyncStream.Continuation - - init() { - var expectationContinuation: AsyncStream.Continuation! - self.expectation = AsyncStream(bufferingPolicy: .bufferingNewest(1)) { expectationContinuation = $0 } - self.expectationContinuation = expectationContinuation - } - - func recordNanoseconds(_ duration: Int64) { - _ = self.expectationContinuation.yield(duration) - } -} From 4ebdf9d2c4bacbcc6bec634d1ab8b121083360cf Mon Sep 17 00:00:00 2001 From: BlindSpot <127803250+blindspotbounty@users.noreply.github.com> Date: Wed, 11 Oct 2023 19:59:22 +0300 Subject: [PATCH 16/20] remove import Metrics --- Tests/KafkaTests/Utilities.swift | 1 - 1 file changed, 1 deletion(-) diff --git a/Tests/KafkaTests/Utilities.swift b/Tests/KafkaTests/Utilities.swift index eb2eafdb..f7fbfbf8 100644 --- a/Tests/KafkaTests/Utilities.swift +++ b/Tests/KafkaTests/Utilities.swift @@ -13,7 +13,6 @@ //===----------------------------------------------------------------------===// import Logging -import Metrics import NIOConcurrencyHelpers extension Logger { From 05cf1b9f4bcf2d5a4c688faaf318289ab5e79c29 Mon Sep 17 00:00:00 2001 From: BlindSpot <127803250+blindspotbounty@users.noreply.github.com> Date: Fri, 20 Oct 2023 17:06:04 +0300 Subject: [PATCH 17/20] divide producer/consumer configuration --- .../KafkaConfiguration+Metrics.swift | 72 ++++++++++++++++--- .../KafkaConsumerConfiguration.swift | 2 +- .../KafkaProducerConfiguration.swift | 2 +- Sources/Kafka/RDKafka/RDKafkaStatistics.swift | 7 +- 4 files changed, 65 insertions(+), 18 deletions(-) diff --git a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift index f04a3590..343968e5 100644 --- a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift +++ b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift @@ -17,8 +17,67 @@ import Metrics extension KafkaConfiguration { // MARK: - Metrics - /// Configuration for the metrics emitted by `SwiftKafka`. - public struct Metrics: Sendable { + /// Configuration for the consumer metrics emitted by `SwiftKafka`. + public struct ConsumerMetrics: Sendable { + internal var enabled: Bool { + self.updateInterval != nil && + (self.queuedOperation != nil || + self.totalKafkaBrokerRequests != nil || + self.totalKafkaBrokerBytesSent != nil || + self.totalKafkaBrokerResponses != nil || + self.totalKafkaBrokerResponsesSize != nil || + self.totalKafkaBrokerMessagesBytesRecieved != nil || + self.topicsInMetadataCache != nil) + } + + /// Update interval for statistics. + public var updateInterval: Duration? + + /// Number of operations (callbacks, events, etc) waiting in the queue. + public var queuedOperation: Gauge? + + /// Total number of requests sent to Kafka brokers. + public var totalKafkaBrokerRequests: Gauge? + /// Total number of bytes transmitted to Kafka brokers. + public var totalKafkaBrokerBytesSent: Gauge? + /// Total number of responses received from Kafka brokers. + public var totalKafkaBrokerResponses: Gauge? + /// Total number of bytes received from Kafka brokers. + public var totalKafkaBrokerResponsesSize: Gauge? + + /// Total number of messages consumed, not including ignored messages (due to offset, etc), from Kafka brokers. + public var totalKafkaBrokerMessagesRecieved: Gauge? + /// Total number of message bytes (including framing) received from Kafka brokers. + public var totalKafkaBrokerMessagesBytesRecieved: Gauge? + + /// Number of topics in the metadata cache. + public var topicsInMetadataCache: Gauge? + + private static func record(_ value: T?, to: Gauge?) { + guard let value, + let to else { + return + } + to.record(value) + } + + internal func update(with rdKafkaStatistics: RDKafkaStatistics) { + Self.record(rdKafkaStatistics.queuedOperation, to: self.queuedOperation) + + Self.record(rdKafkaStatistics.totalKafkaBrokerRequests, to: self.totalKafkaBrokerRequests) + Self.record(rdKafkaStatistics.totalKafkaBrokerBytesSent, to: self.totalKafkaBrokerBytesSent) + Self.record(rdKafkaStatistics.totalKafkaBrokerResponses, to: self.totalKafkaBrokerResponses) + Self.record(rdKafkaStatistics.totalKafkaBrokerResponsesSize, to: self.totalKafkaBrokerResponsesSize) + + Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesRecieved, to: self.totalKafkaBrokerMessagesRecieved) + Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesBytesRecieved, to: self.totalKafkaBrokerMessagesBytesRecieved) + + Self.record(rdKafkaStatistics.topicsInMetadataCache, to: self.topicsInMetadataCache) + } + } + + /// Configuration for the producer metrics emitted by `SwiftKafka`. + public struct ProducerMetrics: Sendable { internal var enabled: Bool { self.updateInterval != nil && (self.queuedOperation != nil || @@ -30,7 +89,6 @@ extension KafkaConfiguration { self.totalKafkaBrokerResponsesSize != nil || self.totalKafkaBrokerMessagesSent != nil || self.totalKafkaBrokerMessagesBytesSent != nil || - self.totalKafkaBrokerMessagesBytesRecieved != nil || self.topicsInMetadataCache != nil) } @@ -57,10 +115,6 @@ extension KafkaConfiguration { public var totalKafkaBrokerMessagesSent: Gauge? /// Total number of message bytes (including framing, such as per-Message framing and MessageSet/batch framing) transmitted to Kafka brokers. public var totalKafkaBrokerMessagesBytesSent: Gauge? - /// Total number of messages consumed, not including ignored messages (due to offset, etc), from Kafka brokers. - public var totalKafkaBrokerMessagesRecieved: Gauge? - /// Total number of message bytes (including framing) received from Kafka brokers. - public var totalKafkaBrokerMessagesBytesRecieved: Gauge? /// Number of topics in the metadata cache. public var topicsInMetadataCache: Gauge? @@ -79,14 +133,12 @@ extension KafkaConfiguration { Self.record(rdKafkaStatistics.queuedProducerMessagesSize, to: self.queuedProducerMessagesSize) Self.record(rdKafkaStatistics.totalKafkaBrokerRequests, to: self.totalKafkaBrokerRequests) - Self.record(rdKafkaStatistics.totalKafkaBrokerBytesSent, to: self.totalKafkaBrokerBytesSent) // TODO: finish with KafkaBroker... + Self.record(rdKafkaStatistics.totalKafkaBrokerBytesSent, to: self.totalKafkaBrokerBytesSent) Self.record(rdKafkaStatistics.totalKafkaBrokerResponses, to: self.totalKafkaBrokerResponses) Self.record(rdKafkaStatistics.totalKafkaBrokerResponsesSize, to: self.totalKafkaBrokerResponsesSize) Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesSent, to: self.totalKafkaBrokerMessagesSent) Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesBytesSent, to: self.totalKafkaBrokerMessagesBytesSent) - Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesRecieved, to: self.totalKafkaBrokerMessagesRecieved) - Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesBytesRecieved, to: self.totalKafkaBrokerMessagesBytesRecieved) Self.record(rdKafkaStatistics.topicsInMetadataCache, to: self.topicsInMetadataCache) } diff --git a/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift b/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift index 8b11a3ec..4b23eb50 100644 --- a/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaConsumerConfiguration.swift @@ -208,7 +208,7 @@ public struct KafkaConsumerConfiguration { public var reconnect: KafkaConfiguration.ReconnectOptions = .init() /// Options for librdkafka metrics updates - public var metrics: KafkaConfiguration.Metrics = .init() + public var metrics: KafkaConfiguration.ConsumerMetrics = .init() /// Security protocol to use (plaintext, ssl, sasl_plaintext, sasl_ssl). /// Default: `.plaintext` diff --git a/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift b/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift index 3331f167..2a345b02 100644 --- a/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift +++ b/Sources/Kafka/Configuration/KafkaProducerConfiguration.swift @@ -162,7 +162,7 @@ public struct KafkaProducerConfiguration { public var reconnect: KafkaConfiguration.ReconnectOptions = .init() /// Options for librdkafka metrics updates - public var metrics: KafkaConfiguration.Metrics = .init() + public var metrics: KafkaConfiguration.ProducerMetrics = .init() /// Security protocol to use (plaintext, ssl, sasl_plaintext, sasl_ssl). /// Default: `.plaintext` diff --git a/Sources/Kafka/RDKafka/RDKafkaStatistics.swift b/Sources/Kafka/RDKafka/RDKafkaStatistics.swift index 9bbd6445..96ceb4b2 100644 --- a/Sources/Kafka/RDKafka/RDKafkaStatistics.swift +++ b/Sources/Kafka/RDKafka/RDKafkaStatistics.swift @@ -12,12 +12,7 @@ // //===----------------------------------------------------------------------===// -// This file was generated from JSON Schema using quicktype, do not modify it directly. -// To parse the JSON, add this file to your project and do: -// -// let statistics = try? newJSONDecoder().decode(RDKafkaStatistics.self, from: jsonData) - -// MARK: - Statistics +// MARK: - RDKafkaStatistics struct RDKafkaStatistics: Hashable, Codable { let queuedOperation: Int? From 3febfcd776b30e33986f06f9b0b4fa4a4ae0b15a Mon Sep 17 00:00:00 2001 From: BlindSpot <127803250+blindspotbounty@users.noreply.github.com> Date: Fri, 20 Oct 2023 17:07:23 +0300 Subject: [PATCH 18/20] apply swiftformat --- .../KafkaConfiguration+Metrics.swift | 40 +++++++++---------- Sources/Kafka/RDKafka/RDKafkaClient.swift | 2 +- Tests/KafkaTests/KafkaConsumerTests.swift | 4 +- Tests/KafkaTests/KafkaProducerTests.swift | 8 ++-- 4 files changed, 27 insertions(+), 27 deletions(-) diff --git a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift index 343968e5..e9878c99 100644 --- a/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift +++ b/Sources/Kafka/Configuration/KafkaConfiguration+Metrics.swift @@ -22,14 +22,14 @@ extension KafkaConfiguration { internal var enabled: Bool { self.updateInterval != nil && (self.queuedOperation != nil || - self.totalKafkaBrokerRequests != nil || - self.totalKafkaBrokerBytesSent != nil || - self.totalKafkaBrokerResponses != nil || - self.totalKafkaBrokerResponsesSize != nil || - self.totalKafkaBrokerMessagesBytesRecieved != nil || - self.topicsInMetadataCache != nil) + self.totalKafkaBrokerRequests != nil || + self.totalKafkaBrokerBytesSent != nil || + self.totalKafkaBrokerResponses != nil || + self.totalKafkaBrokerResponsesSize != nil || + self.totalKafkaBrokerMessagesBytesRecieved != nil || + self.topicsInMetadataCache != nil) } - + /// Update interval for statistics. public var updateInterval: Duration? @@ -71,27 +71,27 @@ extension KafkaConfiguration { Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesRecieved, to: self.totalKafkaBrokerMessagesRecieved) Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesBytesRecieved, to: self.totalKafkaBrokerMessagesBytesRecieved) - + Self.record(rdKafkaStatistics.topicsInMetadataCache, to: self.topicsInMetadataCache) } } - + /// Configuration for the producer metrics emitted by `SwiftKafka`. public struct ProducerMetrics: Sendable { internal var enabled: Bool { self.updateInterval != nil && (self.queuedOperation != nil || - self.queuedProducerMessages != nil || - self.queuedProducerMessagesSize != nil || - self.totalKafkaBrokerRequests != nil || - self.totalKafkaBrokerBytesSent != nil || - self.totalKafkaBrokerResponses != nil || - self.totalKafkaBrokerResponsesSize != nil || - self.totalKafkaBrokerMessagesSent != nil || - self.totalKafkaBrokerMessagesBytesSent != nil || - self.topicsInMetadataCache != nil) + self.queuedProducerMessages != nil || + self.queuedProducerMessagesSize != nil || + self.totalKafkaBrokerRequests != nil || + self.totalKafkaBrokerBytesSent != nil || + self.totalKafkaBrokerResponses != nil || + self.totalKafkaBrokerResponsesSize != nil || + self.totalKafkaBrokerMessagesSent != nil || + self.totalKafkaBrokerMessagesBytesSent != nil || + self.topicsInMetadataCache != nil) } - + /// Update interval for statistics. public var updateInterval: Duration? @@ -139,7 +139,7 @@ extension KafkaConfiguration { Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesSent, to: self.totalKafkaBrokerMessagesSent) Self.record(rdKafkaStatistics.totalKafkaBrokerMessagesBytesSent, to: self.totalKafkaBrokerMessagesBytesSent) - + Self.record(rdKafkaStatistics.topicsInMetadataCache, to: self.topicsInMetadataCache) } } diff --git a/Sources/Kafka/RDKafka/RDKafkaClient.swift b/Sources/Kafka/RDKafka/RDKafkaClient.swift index 0ae19ada..1a811820 100644 --- a/Sources/Kafka/RDKafka/RDKafkaClient.swift +++ b/Sources/Kafka/RDKafka/RDKafkaClient.swift @@ -14,8 +14,8 @@ import Crdkafka import Dispatch -import Logging import class Foundation.JSONDecoder +import Logging /// Base class for ``KafkaProducer`` and ``KafkaConsumer``, /// which is used to handle the connection to the Kafka ecosystem. diff --git a/Tests/KafkaTests/KafkaConsumerTests.swift b/Tests/KafkaTests/KafkaConsumerTests.swift index 5a79bacd..fc3da31a 100644 --- a/Tests/KafkaTests/KafkaConsumerTests.swift +++ b/Tests/KafkaTests/KafkaConsumerTests.swift @@ -46,7 +46,7 @@ final class KafkaConsumerTests: XCTestCase { self.metrics = nil MetricsSystem.bootstrapInternal(NOOPMetricsHandler.instance) } - + func testConsumerLog() async throws { let recorder = LogEventRecorder() let mockLogger = Logger(label: "kafka.test.consumer.log") { @@ -123,7 +123,7 @@ final class KafkaConsumerTests: XCTestCase { // Shutdown the serviceGroup await serviceGroup.triggerGracefulShutdown() } - + let value = try metrics.expectGauge("operations").lastValue XCTAssertNotNil(value) } diff --git a/Tests/KafkaTests/KafkaProducerTests.swift b/Tests/KafkaTests/KafkaProducerTests.swift index 3cc7328a..de083b97 100644 --- a/Tests/KafkaTests/KafkaProducerTests.swift +++ b/Tests/KafkaTests/KafkaProducerTests.swift @@ -53,14 +53,14 @@ final class KafkaProducerTests: XCTestCase { bootstrapBrokerAddresses: [self.bootstrapBrokerAddress] ) self.config.broker.addressFamily = .v4 - + MetricsSystem.bootstrapInternal(self.metrics) } override func tearDownWithError() throws { self.bootstrapBrokerAddress = nil self.config = nil - + self.metrics = nil MetricsSystem.bootstrapInternal(NOOPMetricsHandler.instance) } @@ -367,13 +367,13 @@ final class KafkaProducerTests: XCTestCase { group.addTask { try await serviceGroup.run() } - + try await Task.sleep(for: .seconds(1)) // Shutdown the serviceGroup await serviceGroup.triggerGracefulShutdown() } - + let value = try metrics.expectGauge("operations").lastValue XCTAssertNotNil(value) } From af05f5b64832ee2c99db9093082125bbb55bd74d Mon Sep 17 00:00:00 2001 From: BlindSpot <127803250+blindspotbounty@users.noreply.github.com> Date: Fri, 3 Nov 2023 11:09:17 +0200 Subject: [PATCH 19/20] fix code after conflicts --- Sources/Kafka/KafkaConsumer.swift | 4 +++- Sources/Kafka/KafkaProducer.swift | 2 -- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Sources/Kafka/KafkaConsumer.swift b/Sources/Kafka/KafkaConsumer.swift index c074f41b..78d7b858 100644 --- a/Sources/Kafka/KafkaConsumer.swift +++ b/Sources/Kafka/KafkaConsumer.swift @@ -382,11 +382,13 @@ public final class KafkaConsumer: Sendable, Service { case .pollForEvents(let client): // Event poll to serve any events queued inside of `librdkafka`. let events = client.eventPoll() - switch event { + for event in events { + switch event { case .statistics(let statistics): self.configuration.metrics.update(with: statistics) default: break + } } try await Task.sleep(for: self.configuration.pollInterval) case .terminatePollLoop: diff --git a/Sources/Kafka/KafkaProducer.swift b/Sources/Kafka/KafkaProducer.swift index bb59d316..81742f3b 100644 --- a/Sources/Kafka/KafkaProducer.swift +++ b/Sources/Kafka/KafkaProducer.swift @@ -229,8 +229,6 @@ public final class KafkaProducer: Service, Sendable { self.configuration.metrics.update(with: statistics) case .deliveryReport(let reports): _ = source?.yield(.deliveryReports(reports)) - case .consumerMessages: - fatalError("Unexpected event for producer \(event)") } } try await Task.sleep(for: self.configuration.pollInterval) From 8a3caf3859069887fdc4a1f14fb452d7148aad1a Mon Sep 17 00:00:00 2001 From: BlindSpot <127803250+blindspotbounty@users.noreply.github.com> Date: Fri, 3 Nov 2023 11:18:30 +0200 Subject: [PATCH 20/20] fix formatting --- Package.swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Package.swift b/Package.swift index eb27c56e..c13229ec 100644 --- a/Package.swift +++ b/Package.swift @@ -97,7 +97,7 @@ let package = Package( name: "KafkaTests", dependencies: [ "Kafka", - .product(name: "MetricsTestKit", package: "swift-metrics") + .product(name: "MetricsTestKit", package: "swift-metrics"), ] ), .testTarget(