|
| 1 | +import json |
| 2 | +import matplotlib.pyplot as plt |
| 3 | +import sys |
| 4 | +import os |
| 5 | + |
| 6 | +def load_metrics(filename): |
| 7 | + """ |
| 8 | + Parses the flat-array JSON format from OpenMessaging Benchmark |
| 9 | + """ |
| 10 | + if not os.path.exists(filename): |
| 11 | + print(f"Error: File {filename} not found.") |
| 12 | + return None |
| 13 | + |
| 14 | + with open(filename, 'r') as f: |
| 15 | + data = json.load(f) |
| 16 | + |
| 17 | + # We generate a 'time' axis based on the number of data points |
| 18 | + count = len(data.get('publishRate', [])) |
| 19 | + time_axis = list(range(1, count + 1)) |
| 20 | + |
| 21 | + return { |
| 22 | + 'driver': data.get('driver', 'Unknown'), |
| 23 | + 'time': time_axis, |
| 24 | + 'throughput': data.get('publishRate', []), |
| 25 | + 'p99_pub_latency': data.get('publishLatency99pct', []), |
| 26 | + 'p99_e2e_latency': data.get('endToEndLatency99pct', []) |
| 27 | + } |
| 28 | + |
| 29 | +def plot_comparison(rabbit_file, kafka_file): |
| 30 | + rabbit_data = load_metrics(rabbit_file) |
| 31 | + kafka_data = load_metrics(kafka_file) |
| 32 | + |
| 33 | + if not rabbit_data or not kafka_data: |
| 34 | + return |
| 35 | + |
| 36 | + # Create a figure with 2 subplots (Latency and Throughput) |
| 37 | + fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10)) |
| 38 | + |
| 39 | + # Plot 1: P99 Publish Latency (Responsiveness) |
| 40 | + ax1.plot(rabbit_data['time'], rabbit_data['p99_pub_latency'], |
| 41 | + label='RabbitMQ', marker='o', linestyle='-', color='orange') |
| 42 | + ax1.plot(kafka_data['time'], kafka_data['p99_pub_latency'], |
| 43 | + label='Kafka', marker='x', linestyle='--', color='blue') |
| 44 | + |
| 45 | + ax1.set_title('Hypothesis 1: P99 Publish Latency (Lower is Better)') |
| 46 | + ax1.set_ylabel('Latency (ms)') |
| 47 | + ax1.set_xlabel('Benchmark Interval') |
| 48 | + ax1.grid(True) |
| 49 | + ax1.legend() |
| 50 | + |
| 51 | + # Plot 2: Throughput Stability |
| 52 | + ax2.plot(rabbit_data['time'], rabbit_data['throughput'], |
| 53 | + label='RabbitMQ', color='orange') |
| 54 | + ax2.plot(kafka_data['time'], kafka_data['throughput'], |
| 55 | + label='Kafka', color='blue') |
| 56 | + |
| 57 | + # Add a line for the Target Rate (1000) |
| 58 | + ax2.axhline(y=1000, color='r', linestyle=':', label='Target Rate (1000 msg/s)') |
| 59 | + |
| 60 | + ax2.set_title('Throughput Stability (Target: 1000 msg/s)') |
| 61 | + ax2.set_ylabel('Messages / Second') |
| 62 | + ax2.set_xlabel('Benchmark Interval') |
| 63 | + ax2.grid(True) |
| 64 | + ax2.legend() |
| 65 | + |
| 66 | + plt.tight_layout() |
| 67 | + output_file = 'benchmark_comparison_simple.png' |
| 68 | + plt.savefig(output_file) |
| 69 | + print(f"Successfully generated comparison graph: {output_file}") |
| 70 | + |
| 71 | +if __name__ == "__main__": |
| 72 | + if len(sys.argv) < 3: |
| 73 | + print("Usage: python analyze_results_1000.py <rabbitmq_result.json> <kafka_result.json>") |
| 74 | + sys.exit(1) |
| 75 | + |
| 76 | + plot_comparison(sys.argv[1], sys.argv[2]) |
0 commit comments