|
12 | 12 | import uuid |
13 | 13 |
|
14 | 14 | # Prometheus imports (for backward compatibility) |
15 | | -from prometheus_client import Counter, Histogram, Gauge, start_http_server |
| 15 | +from prometheus_client import Counter, Histogram, Gauge |
16 | 16 |
|
17 | 17 | # OpenTelemetry imports |
18 | 18 | from opentelemetry import metrics as otel_metrics |
19 | | -from opentelemetry import trace as otel_trace |
20 | 19 | from opentelemetry.sdk.metrics import MeterProvider |
21 | | -from opentelemetry.sdk.trace import TracerProvider |
22 | 20 | from opentelemetry.sdk.resources import Resource |
23 | 21 | from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter |
24 | | -from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter |
25 | 22 | from opentelemetry.exporter.prometheus import PrometheusMetricReader |
26 | 23 | from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader |
27 | | -from opentelemetry.sdk.trace.export import BatchSpanProcessor |
28 | 24 | # Note: RedisInstrumentor import removed to prevent automatic instrumentation |
29 | 25 |
|
30 | 26 | from logger import get_logger |
@@ -136,18 +132,7 @@ def _setup_opentelemetry(self): |
136 | 132 | unit="1" |
137 | 133 | ) |
138 | 134 |
|
139 | | - # Setup tracing |
140 | | - if self.otel_endpoint: |
141 | | - trace_exporter = OTLPSpanExporter( |
142 | | - endpoint=self.otel_endpoint, |
143 | | - insecure=True |
144 | | - ) |
145 | | - trace_provider = TracerProvider(resource=resource) |
146 | | - trace_provider.add_span_processor( |
147 | | - BatchSpanProcessor(trace_exporter) |
148 | | - ) |
149 | | - otel_trace.set_tracer_provider(trace_provider) |
150 | | - self.tracer = otel_trace.get_tracer(self.service_name, self.service_version) |
| 135 | + |
151 | 136 |
|
152 | 137 | # Note: Using manual instrumentation instead of automatic Redis instrumentation |
153 | 138 | # to have full control over operation labeling and avoid "BATCH" aggregation |
@@ -214,14 +199,7 @@ def _setup_prometheus_metrics(self): |
214 | 199 | ['operation'] + base_labels |
215 | 200 | ) |
216 | 201 |
|
217 | | - def _start_prometheus_server(self): |
218 | | - """Start Prometheus metrics server.""" |
219 | | - try: |
220 | | - start_http_server(self.prometheus_port) |
221 | | - self.logger.info(f"Prometheus metrics server started on port {self.prometheus_port}") |
222 | | - except Exception as e: |
223 | | - self.logger.error(f"Failed to start Prometheus server: {e}") |
224 | | - self.enable_prometheus = False |
| 202 | + |
225 | 203 |
|
226 | 204 | def record_operation(self, operation: str, duration: float, success: bool, error_type: str = None): |
227 | 205 | """Record metrics for a Redis operation.""" |
@@ -318,50 +296,9 @@ def update_active_connections(self, count: int): |
318 | 296 |
|
319 | 297 | # Active connections tracked via OpenTelemetry only |
320 | 298 |
|
321 | | - def update_calculated_metrics(self): |
322 | | - """Update calculated metrics like throughput, error rate, and average latency.""" |
323 | | - if not self.enable_otel: |
324 | | - return |
325 | 299 |
|
326 | | - with self._lock: |
327 | | - # Calculate overall throughput and error rate |
328 | | - total_ops = sum(m.total_count for m in self._metrics.values()) |
329 | | - total_errors = sum(m.error_count for m in self._metrics.values()) |
330 | 300 |
|
331 | | - if total_ops > 0: |
332 | | - # Calculate current throughput (ops in last interval) |
333 | | - current_time = time.time() |
334 | | - if hasattr(self, '_last_metrics_update'): |
335 | | - time_diff = current_time - self._last_metrics_update |
336 | | - if time_diff > 0: |
337 | | - ops_diff = total_ops - getattr(self, '_last_total_ops', 0) |
338 | | - current_throughput = ops_diff / time_diff |
339 | | - # Calculated metrics are now handled by the OpenTelemetry Collector |
340 | | - # which exposes them via its Prometheus endpoint |
341 | | - |
342 | | - # Error rate and average latency calculations |
343 | | - error_rate = (total_errors / total_ops) * 100 |
344 | | - # These calculated metrics are available via the collector's Prometheus endpoint |
345 | | - |
346 | | - # Store for next calculation |
347 | | - self._last_metrics_update = current_time |
348 | | - self._last_total_ops = total_ops |
349 | | - |
350 | | - def create_span(self, operation_name: str, **attributes): |
351 | | - """Create an OpenTelemetry span for tracing Redis operations.""" |
352 | | - if self.enable_otel and hasattr(self, 'tracer'): |
353 | | - return self.tracer.start_span( |
354 | | - name=f"redis.{operation_name}", |
355 | | - attributes={ |
356 | | - "db.system": "redis", |
357 | | - "db.operation": operation_name, |
358 | | - **attributes |
359 | | - } |
360 | | - ) |
361 | | - else: |
362 | | - # Return a no-op context manager if OpenTelemetry is not enabled |
363 | | - from contextlib import nullcontext |
364 | | - return nullcontext() |
| 301 | + |
365 | 302 |
|
366 | 303 | def get_operation_stats(self, operation: str) -> Dict: |
367 | 304 | """Get statistics for a specific operation.""" |
|
0 commit comments