diff --git a/docs/infrastructure/troubleshooting.md b/docs/infrastructure/troubleshooting.md index f14d5a0b536..e74445c6994 100644 --- a/docs/infrastructure/troubleshooting.md +++ b/docs/infrastructure/troubleshooting.md @@ -8,27 +8,6 @@ For less urgent issues or general support, you can file a bug with [cloudOps](ht ## Monitoring & metrics -- New Relic - - Overview: - [prototype](https://rpm.newrelic.com/accounts/677903/applications/7385291) | - [stage](https://rpm.newrelic.com/accounts/677903/applications/14179733) | - [prod](https://rpm.newrelic.com/accounts/677903/applications/14179757) - - Error analytics: - [prototype](https://rpm.newrelic.com/accounts/677903/applications/7385291/filterable_errors) | - [stage](https://rpm.newrelic.com/accounts/677903/applications/14179733/filterable_errors) | - [prod](https://rpm.newrelic.com/accounts/677903/applications/14179757/filterable_errors) - - Web transactions: - [prototype](https://rpm.newrelic.com/accounts/677903/applications/7385291/transactions?type=app) | - [stage](https://rpm.newrelic.com/accounts/677903/applications/14179733/transactions?type=app) | - [prod](https://rpm.newrelic.com/accounts/677903/applications/14179757/transactions?type=app) - - Non-web transactions (background tasks): - [prototype](https://rpm.newrelic.com/accounts/677903/applications/7385291/transactions?type=other&show_browser=false) | - [stage](https://rpm.newrelic.com/accounts/677903/applications/14179733/transactions?type=other&show_browser=false) | - [prod](https://rpm.newrelic.com/accounts/677903/applications/14179757/transactions?type=other&show_browser=false) - - Postgres/Redis client request stats: - [prototype](https://rpm.newrelic.com/accounts/677903/applications/7385291/datastores) | - [stage](https://rpm.newrelic.com/accounts/677903/applications/14179733/datastores) | - [prod](https://rpm.newrelic.com/accounts/677903/applications/14179757/datastores) - Google Cloud Console - [prod](https://console.cloud.google.com/kubernetes/list?project=moz-fx-treeherder-prod-c739) - [all other deployments](https://console.cloud.google.com/kubernetes/list?project=moz-fx-treeherde-nonprod-34ec) diff --git a/treeherder/config/settings.py b/treeherder/config/settings.py index bb20f05069b..8773a0c0103 100644 --- a/treeherder/config/settings.py +++ b/treeherder/config/settings.py @@ -26,7 +26,6 @@ LOGGING_LEVEL = env("LOGGING_LEVEL", default="INFO") NEW_RELIC_INSIGHTS_API_KEY = env("NEW_RELIC_INSIGHTS_API_KEY", default=None) -NEW_RELIC_INSIGHTS_API_URL = "https://insights-api.newrelic.com/v1/accounts/677903/query" # Make this unique, and don't share it with anybody. SECRET_KEY = env( diff --git a/treeherder/etl/classification_loader.py b/treeherder/etl/classification_loader.py index eb756dd8640..b294bebdece 100644 --- a/treeherder/etl/classification_loader.py +++ b/treeherder/etl/classification_loader.py @@ -2,7 +2,6 @@ import re import environ -import newrelic.agent from treeherder.model.models import ( BugJobMap, @@ -106,7 +105,6 @@ def get_push(self, task_route): raise try: - newrelic.agent.add_custom_attribute("project", project) repository = Repository.objects.get(name=project) except Repository.DoesNotExist: @@ -114,7 +112,6 @@ def get_push(self, task_route): raise try: - newrelic.agent.add_custom_attribute("revision", revision) revision_field = "revision__startswith" if len(revision) < 40 else "revision" filter_kwargs = {"repository": repository, revision_field: revision} diff --git a/treeherder/etl/job_loader.py b/treeherder/etl/job_loader.py index 27e343b0d66..1ecc0883ecd 100644 --- a/treeherder/etl/job_loader.py +++ b/treeherder/etl/job_loader.py @@ -2,7 +2,6 @@ import uuid import jsonschema -import newrelic.agent import slugid from django.conf import settings @@ -67,7 +66,6 @@ def process_job(self, pulse_job, root_url): try: with settings.STATSD_CLIENT.timer("process_job_transform"): project = pulse_job["origin"]["project"] - newrelic.agent.add_custom_attribute("project", project) repository = Repository.objects.get(name=project) if repository.active_status != "active": @@ -103,11 +101,6 @@ def validate_revision(self, repository, pulse_job): revision_field = "revision__startswith" if len(revision) < 40 else "revision" filter_kwargs = {"repository": repository, revision_field: revision} - if revision_field == "revision__startswith": - newrelic.agent.record_custom_event( - "short_revision_job_loader", - {"error": "Revision <40 chars", "revision": revision, "job": pulse_job}, - ) if not Push.objects.filter(**filter_kwargs).exists(): (real_task_id, _) = task_and_retry_ids(pulse_job["taskId"]) diff --git a/treeherder/etl/jobs.py b/treeherder/etl/jobs.py index 8cb24a5aaf8..bd064e3c2ef 100644 --- a/treeherder/etl/jobs.py +++ b/treeherder/etl/jobs.py @@ -5,7 +5,6 @@ from datetime import datetime from hashlib import sha1 -import newrelic.agent from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.db.utils import IntegrityError @@ -500,7 +499,6 @@ def store_job_data(repository, original_data): # make more fields visible in new relic for the job # where we encountered the error datum.update(datum.get("job", {})) - newrelic.agent.notice_error(attributes=datum) # skip any jobs that hit errors in these stages. continue diff --git a/treeherder/etl/push_loader.py b/treeherder/etl/push_loader.py index 05e3a67a5ae..90ea9338cd7 100644 --- a/treeherder/etl/push_loader.py +++ b/treeherder/etl/push_loader.py @@ -1,7 +1,6 @@ import logging import environ -import newrelic.agent from django.core.exceptions import ObjectDoesNotExist from treeherder.etl.common import to_timestamp @@ -20,15 +19,12 @@ class PushLoader: def process(self, message_body, exchange, root_url): transformer = self.get_transformer_class(exchange)(message_body) try: - newrelic.agent.add_custom_attribute("url", transformer.repo_url) - newrelic.agent.add_custom_attribute("branch", transformer.branch) repos = Repository.objects if transformer.branch: repos = repos.filter(branch__regex=f"(^|,){transformer.branch}($|,)") else: repos = repos.filter(branch=None) repo = repos.get(url=transformer.repo_url, active_status="active") - newrelic.agent.add_custom_attribute("repository", repo.name) except ObjectDoesNotExist: repo_info = transformer.get_info() repo_info.update( @@ -37,7 +33,6 @@ def process(self, message_body, exchange, root_url): "branch": transformer.branch, } ) - newrelic.agent.record_custom_event("skip_unknown_repository", repo_info) logger.warning( "Skipping unsupported repo: %s %s", transformer.repo_url, transformer.branch ) @@ -257,7 +252,6 @@ def transform(self, repository): return self.fetch_push(url, repository) def fetch_push(self, url, repository, sha=None): - newrelic.agent.add_custom_attribute("sha", sha) logger.debug("fetching for %s %s", repository, url) # there will only ever be one, with this url diff --git a/treeherder/etl/pushlog.py b/treeherder/etl/pushlog.py index 83871677c41..b723505236a 100644 --- a/treeherder/etl/pushlog.py +++ b/treeherder/etl/pushlog.py @@ -1,7 +1,6 @@ import logging import traceback -import newrelic.agent import requests from django.core.cache import cache @@ -123,7 +122,6 @@ def run(self, source_url, repository_name, changeset=None, last_push_id=None): try: store_push(repository, self.transform_push(push)) except Exception: - newrelic.agent.notice_error() errors.append( { "project": repository, diff --git a/treeherder/etl/tasks/pulse_tasks.py b/treeherder/etl/tasks/pulse_tasks.py index c39637019cc..8c10a5c9037 100644 --- a/treeherder/etl/tasks/pulse_tasks.py +++ b/treeherder/etl/tasks/pulse_tasks.py @@ -4,7 +4,6 @@ import asyncio -import newrelic.agent from django.conf import settings from treeherder.etl.classification_loader import ClassificationLoader @@ -25,8 +24,7 @@ def store_pulse_tasks( Fetches tasks from Taskcluster """ loop = asyncio.get_event_loop() - newrelic.agent.add_custom_attribute("exchange", exchange) - newrelic.agent.add_custom_attribute("routing_key", routing_key) + # handle_message expects messages in this format with settings.STATSD_CLIENT.timer("pulse_handle_message"): runs = loop.run_until_complete( @@ -50,8 +48,7 @@ def store_pulse_pushes( """ Fetches the pushes pending from pulse exchanges and loads them. """ - newrelic.agent.add_custom_attribute("exchange", exchange) - newrelic.agent.add_custom_attribute("routing_key", routing_key) + PushLoader().process(body, exchange, root_url) @@ -66,7 +63,5 @@ def store_pulse_tasks_classification( By default, it should listen to the Community cluster as classifications are only running there for the moment """ - newrelic.agent.add_custom_attribute("exchange", exchange) - newrelic.agent.add_custom_attribute("routing_key", routing_key) ClassificationLoader().process(pulse_job, root_url) diff --git a/treeherder/etl/tasks/pushlog_tasks.py b/treeherder/etl/tasks/pushlog_tasks.py index 20ff3f8cf71..0551a4f8dda 100644 --- a/treeherder/etl/tasks/pushlog_tasks.py +++ b/treeherder/etl/tasks/pushlog_tasks.py @@ -1,4 +1,3 @@ -import newrelic.agent from celery import shared_task from treeherder.etl.pushlog import HgPushlogProcess @@ -19,6 +18,6 @@ def fetch_hg_push_log(repo_name, repo_url): """ Run a HgPushlog etl process """ - newrelic.agent.add_custom_attribute("repo_name", repo_name) + process = HgPushlogProcess() process.run(repo_url + "/json-pushes/?full=1&version=2", repo_name) diff --git a/treeherder/log_parser/artifactbuildercollection.py b/treeherder/log_parser/artifactbuildercollection.py index a45fc259a2b..3f5a7c9f66d 100644 --- a/treeherder/log_parser/artifactbuildercollection.py +++ b/treeherder/log_parser/artifactbuildercollection.py @@ -1,7 +1,5 @@ import logging -import newrelic.agent - from treeherder.utils.http import make_request from .artifactbuilders import LogViewerArtifactBuilder, PerformanceDataArtifactBuilder @@ -86,10 +84,6 @@ def parse(self): download_size_in_bytes = int(response.headers.get("Content-Length", -1)) # Temporary annotation of log size to help set thresholds in bug 1295997. - newrelic.agent.add_custom_attribute("unstructured_log_size", download_size_in_bytes) - newrelic.agent.add_custom_attribute( - "unstructured_log_encoding", response.headers.get("Content-Encoding", "None") - ) if download_size_in_bytes > MAX_DOWNLOAD_SIZE_IN_BYTES: raise LogSizeError(f"Download size of {download_size_in_bytes} bytes exceeds limit") diff --git a/treeherder/log_parser/failureline.py b/treeherder/log_parser/failureline.py index 96beb2f8860..b1cacb125f4 100644 --- a/treeherder/log_parser/failureline.py +++ b/treeherder/log_parser/failureline.py @@ -3,7 +3,6 @@ from collections import defaultdict from itertools import islice -import newrelic.agent from django.conf import settings from django.db import transaction from django.db.utils import DataError, IntegrityError, OperationalError @@ -119,17 +118,8 @@ def create_group_result(job_log, line): # Log to New Relic if it's not in a form we like. We can enter # Bugs to upstream to remedy them. if "\\" in group_path or len(group_path) > 255: - newrelic.agent.record_custom_event( - "malformed_test_group", - { - "message": "Group paths must be relative, with no backslashes and <255 chars", - "group": line["group"], - "group_path": group_path, - "length": len(group_path), - "repository": job_log.job.repository, - "job_guid": job_log.job.guid, - }, - ) + pass + else: group, _ = Group.objects.get_or_create(name=group_path[:255]) duration = line.get("duration", 0) @@ -157,7 +147,6 @@ def create(job_log, log_list): for line in log_list: action = line["action"] if action not in FailureLine.ACTION_LIST: - newrelic.agent.record_custom_event("unsupported_failure_line_action", line) # Unfortunately, these errors flood the logs, but we want to report any # others that we didn't expect. We know about the following action we choose # to ignore. diff --git a/treeherder/log_parser/tasks.py b/treeherder/log_parser/tasks.py index 827a2bb2e50..657a32766ae 100644 --- a/treeherder/log_parser/tasks.py +++ b/treeherder/log_parser/tasks.py @@ -1,6 +1,5 @@ import logging -import newrelic.agent import simplejson as json from celery.exceptions import SoftTimeLimitExceeded from requests.exceptions import HTTPError @@ -20,7 +19,6 @@ @retryable_task(name="log-parser", max_retries=10) def parse_logs(job_id, job_log_ids, priority): - newrelic.agent.add_custom_attribute("job_id", str(job_id)) job = Job.objects.get(id=job_id) job_logs = JobLog.objects.filter(id__in=job_log_ids, job=job) @@ -41,7 +39,6 @@ def parse_logs(job_id, job_log_ids, priority): first_exception = None completed_names = set() for job_log in job_logs: - newrelic.agent.add_custom_attribute(f"job_log_{job_log.name}_url", job_log.url) logger.info("parser_task for %s", job_log.id) # Only parse logs which haven't yet been processed or else failed on the last attempt. @@ -60,16 +57,13 @@ def parse_logs(job_id, job_log_ids, priority): parser(job_log) except Exception as e: if isinstance(e, SoftTimeLimitExceeded): - # stop parsing further logs but raise so NewRelic and + # stop parsing further logs but raise so # Papertrail will still show output raise if first_exception is None: first_exception = e - # track the exception on NewRelic but don't stop parsing future - # log lines. - newrelic.agent.notice_error() else: completed_names.add(job_log.name) diff --git a/treeherder/middleware.py b/treeherder/middleware.py index 308fbad8cd4..b0b325e0d45 100644 --- a/treeherder/middleware.py +++ b/treeherder/middleware.py @@ -1,6 +1,5 @@ import re -import newrelic.agent from django.utils.deprecation import MiddlewareMixin from whitenoise.middleware import WhiteNoiseMiddleware @@ -69,12 +68,3 @@ def immutable_file_test(self, path, url): # bootstrap.min.abda843684d0.js return super().immutable_file_test(path, url) - -class NewRelicMiddleware(MiddlewareMixin): - """Adds custom annotations to New Relic web transactions.""" - - def process_request(self, request): - # The New Relic Python agent only submits the User Agent to APM (for exceptions and - # slow transactions), so for use in Insights we have to add it as a customer parameter. - if "HTTP_USER_AGENT" in request.META: - newrelic.agent.add_custom_attribute("user_agent", request.META["HTTP_USER_AGENT"]) diff --git a/treeherder/model/error_summary.py b/treeherder/model/error_summary.py index a09238fa576..1e366af7fd5 100644 --- a/treeherder/model/error_summary.py +++ b/treeherder/model/error_summary.py @@ -2,7 +2,6 @@ import logging import re -import newrelic.agent from django.core.cache import caches from treeherder.model.models import Bugscache, TextLogError @@ -174,7 +173,6 @@ def get_error_summary(job, queryset=None): try: cache.set(cache_key, error_summary, BUG_SUGGESTION_CACHE_TIMEOUT) except Exception as e: - newrelic.agent.record_custom_event("error caching error_summary for job", job.id) logger.error("error caching error_summary for job %s: %s", job.id, e, exc_info=True) try: @@ -182,7 +180,6 @@ def get_error_summary(job, queryset=None): # TODO: consider reducing this, each date is ~5%, so it will be faster lcache.update_db_cache(date, line_cache[date]) except Exception as e: - newrelic.agent.record_custom_event("error caching error_lines for job", job.id) logger.error("error caching error_lines for job %s: %s", job.id, e, exc_info=True) return error_summary diff --git a/treeherder/model/models.py b/treeherder/model/models.py index 6de26f11329..8bcb3a4b2a2 100644 --- a/treeherder/model/models.py +++ b/treeherder/model/models.py @@ -5,7 +5,6 @@ import warnings from hashlib import sha1 -import newrelic.agent from django.conf import settings from django.contrib.auth.models import User from django.contrib.postgres.indexes import GinIndex @@ -21,7 +20,6 @@ from treeherder.webapp.api.utils import REPO_GROUPS, to_timestamp -warnings.filterwarnings("ignore", category=DeprecationWarning, module="newrelic") logger = logging.getLogger(__name__) @@ -294,7 +292,6 @@ def search(cls, search_term): open_recent = [x for x in all_data if x["resolution"] == ""] all_others = [x for x in all_data if x["resolution"] != ""] except ProgrammingError as e: - newrelic.agent.notice_error() logger.error( f"Failed to execute FULLTEXT search on Bugscache, error={e}, SQL={recent_qs.query.__str__()}" ) @@ -1291,19 +1288,11 @@ def verify_classification(self, classification): self.metadata.best_is_verified = True self.metadata.save(update_fields=["best_classification", "best_is_verified"]) - # Send event to NewRelic when a verifing an autoclassified failure. + match = self.matches.filter(classified_failure=classification).first() if not match: return - newrelic.agent.record_custom_event( - "user_verified_classification", - { - "matcher": match.matcher_name, - "job_id": self.id, - }, - ) - def get_failure_line(self): """Get a related FailureLine instance if one exists.""" try: diff --git a/treeherder/perf/alerts.py b/treeherder/perf/alerts.py index a038867dc02..6dbd96f7ccf 100644 --- a/treeherder/perf/alerts.py +++ b/treeherder/perf/alerts.py @@ -4,7 +4,6 @@ from datetime import datetime import moz_measure_noise -import newrelic.agent import numpy as np from django.conf import settings from django.db import transaction @@ -128,7 +127,6 @@ def generate_new_alerts_in_series(signature): ) except Exception: # Fail without breaking the alert computation - newrelic.agent.notice_error() logger.error("Failed to obtain a noise profile.") # ignore regressions below the configured regression diff --git a/treeherder/perf/ingest_data.py b/treeherder/perf/ingest_data.py index 14b68b7068e..86a9c6f56e5 100644 --- a/treeherder/perf/ingest_data.py +++ b/treeherder/perf/ingest_data.py @@ -1,8 +1,6 @@ import json import logging -import newrelic.agent - from treeherder.etl.artifact import serialize_artifact_json_blobs from treeherder.etl.perf import store_performance_artifact from treeherder.log_parser.utils import validate_perf_data @@ -19,7 +17,6 @@ def post_perfherder_artifacts(job_log): try: with make_request(job_log.url, stream=False, timeout=60) as response: download_size_in_bytes = int(response.headers.get("Content-Length", -1)) - newrelic.agent.add_custom_attribute("perf_json_size", download_size_in_bytes) if download_size_in_bytes > 0 and download_size_in_bytes > MAX_JSON_SIZE: job_log.update_status(JobLog.SKIPPED_SIZE) logger.warning( diff --git a/treeherder/perf/tasks.py b/treeherder/perf/tasks.py index e6543f9b897..5a4bf56bbf1 100644 --- a/treeherder/perf/tasks.py +++ b/treeherder/perf/tasks.py @@ -1,6 +1,5 @@ import logging -import newrelic.agent from celery.exceptions import SoftTimeLimitExceeded from treeherder.model.models import Job, JobLog @@ -13,7 +12,6 @@ @retryable_task(name="generate-alerts", max_retries=10) def generate_alerts(signature_id): - newrelic.agent.add_custom_attribute("signature_id", str(signature_id)) signature = PerformanceSignature.objects.get(id=signature_id) generate_new_alerts_in_series(signature) @@ -22,8 +20,6 @@ def generate_alerts(signature_id): def ingest_perfherder_data(job_id, job_log_ids): from treeherder.perf.ingest_data import post_perfherder_artifacts - newrelic.agent.add_custom_attribute("job_id", str(job_id)) - job = Job.objects.get(id=job_id) job_artifacts = JobLog.objects.filter(id__in=job_log_ids, job=job) @@ -38,7 +34,6 @@ def ingest_perfherder_data(job_id, job_log_ids): if not job_artifact_name.startswith("perfherder_data"): continue - newrelic.agent.add_custom_attribute(f"job_log_{job_artifact.name}_url", job_artifact.url) logger.info("ingest_perfherder_data for %s", job_artifact.id) if job_artifact.status not in (JobLog.PENDING, JobLog.FAILED): @@ -53,14 +48,13 @@ def ingest_perfherder_data(job_id, job_log_ids): post_perfherder_artifacts(job_artifact) except Exception as e: if isinstance(e, SoftTimeLimitExceeded): - # stop parsing further logs but raise so NewRelic and + # stop parsing further logs but raise so # Papertrail will still show output raise if first_exception is None: first_exception = e - newrelic.agent.notice_error() logger.exception("Failed ingesting perfherder JSON for log %s", job_artifact.id) if first_exception: diff --git a/treeherder/services/pulse/consumers.py b/treeherder/services/pulse/consumers.py index 4a4f4a01449..f197fd93344 100644 --- a/treeherder/services/pulse/consumers.py +++ b/treeherder/services/pulse/consumers.py @@ -3,7 +3,6 @@ import threading import environ -import newrelic.agent from django.conf import settings from kombu import Connection, Exchange, Queue from kombu.mixins import ConsumerMixin @@ -159,7 +158,6 @@ class TaskConsumer(PulseConsumer): def bindings(self): return TASKCLUSTER_TASK_BINDINGS - @newrelic.agent.background_task(name="pulse-listener-tasks.on_message", group="Pulse Listener") def on_message(self, body, message): exchange = message.delivery_info["exchange"] routing_key = message.delivery_info["routing_key"] @@ -184,9 +182,6 @@ def bindings(self): ) return MOZCI_CLASSIFICATION_PRODUCTION_BINDINGS - @newrelic.agent.background_task( - name="pulse-listener-tasks-classification.on_message", group="Pulse Listener" - ) def on_message(self, body, message): exchange = message.delivery_info["exchange"] routing_key = message.delivery_info["routing_key"] @@ -209,7 +204,6 @@ def bindings(self): rv += GITHUB_PUSH_BINDINGS return rv - @newrelic.agent.background_task(name="pulse-listener-pushes.on_message", group="Pulse Listener") def on_message(self, body, message): exchange = message.delivery_info["exchange"] routing_key = message.delivery_info["routing_key"] @@ -250,7 +244,6 @@ def bindings(self): return rv - @newrelic.agent.background_task(name="pulse-joint-listener.on_message", group="Pulse Listener") def on_message(self, body, message): exchange = message.delivery_info["exchange"] routing_key = message.delivery_info["routing_key"] diff --git a/treeherder/utils/http.py b/treeherder/utils/http.py index f7326451694..9260c6fdc7c 100644 --- a/treeherder/utils/http.py +++ b/treeherder/utils/http.py @@ -1,4 +1,3 @@ -import newrelic.agent import requests from django.conf import settings @@ -14,7 +13,6 @@ def make_request(url, method="GET", headers=None, timeout=30, **kwargs): "redirects": len(response.history), "duration": sum(r.elapsed.total_seconds() for r in response.history), } - newrelic.agent.record_custom_event("RedirectedRequest", params=params) response.raise_for_status() return response diff --git a/treeherder/webapp/api/auth.py b/treeherder/webapp/api/auth.py index 2a106261556..f8b27cd866b 100644 --- a/treeherder/webapp/api/auth.py +++ b/treeherder/webapp/api/auth.py @@ -1,6 +1,5 @@ import logging -import newrelic.agent from django.contrib.auth import authenticate, login, logout from rest_framework import viewsets from rest_framework.decorators import action @@ -35,7 +34,6 @@ def login(self, request): # This indicates an error that may require attention by the # Treeherder or Taskcluster teams. Logging this to New Relic to # increase visibility. - newrelic.agent.notice_error() logger.exception("Error", exc_info=ex) raise AuthenticationFailed(str(ex)) diff --git a/treeherder/webapp/api/csp_report.py b/treeherder/webapp/api/csp_report.py index 99ce55a6dc9..32a241eefce 100644 --- a/treeherder/webapp/api/csp_report.py +++ b/treeherder/webapp/api/csp_report.py @@ -1,7 +1,7 @@ import json import logging -import newrelic.agent + from django.http import HttpResponse, HttpResponseBadRequest from django.views.decorators.csrf import csrf_exempt from django.views.decorators.http import require_POST @@ -29,5 +29,4 @@ def csp_report_collector(request): return HttpResponseBadRequest("Invalid CSP violation report") logger.warning("CSP violation: %s", report) - newrelic.agent.record_custom_event("CSP violation", report) return HttpResponse() diff --git a/treeherder/webapp/api/push.py b/treeherder/webapp/api/push.py index 2a780282602..d2756a122c5 100644 --- a/treeherder/webapp/api/push.py +++ b/treeherder/webapp/api/push.py @@ -1,7 +1,6 @@ import datetime import logging -import newrelic.agent from cache_memoize import cache_memoize from django.contrib.postgres.search import SearchQuery from django.db.models import Exists, OuterRef, Q @@ -408,16 +407,6 @@ def health(self, request, project): # for the push health API's (total_failures doesn't include known intermittent failures) status["testfailed"] = total_failures - newrelic.agent.record_custom_event( - "push_health_need_investigation", - { - "revision": revision, - "repo": repository.name, - "needInvestigation": len(push_health_test_failures["needInvestigation"]), - "author": push.author, - }, - ) - return Response( { "revision": revision, @@ -486,10 +475,7 @@ def decisiontask(self, request, project): # TODO: Remove when we no longer support short revisions: Bug 1306707 def report_if_short_revision(self, param, revision): if len(revision) < 40: - newrelic.agent.record_custom_event( - "short_revision_push_api", - {"error": "Revision <40 chars", "param": param, "revision": revision}, - ) + pass @action(detail=False) def group_results(self, request, project): diff --git a/treeherder/workers/task.py b/treeherder/workers/task.py index 78a37c55203..595434d0e0c 100644 --- a/treeherder/workers/task.py +++ b/treeherder/workers/task.py @@ -3,7 +3,6 @@ from functools import wraps import jsonschema -import newrelic.agent from celery import shared_task from django.db.utils import IntegrityError, ProgrammingError @@ -57,7 +56,6 @@ def inner(*args, **kwargs): params = { "number_of_prior_retries": number_of_prior_retries, } - newrelic.agent.notice_error(attributes=params) # Implement exponential backoff with some randomness to prevent # thundering herd type problems. Constant factor chosen so we get # reasonable pause between the fastest retries.