diff --git a/.github/workflows/push.yaml b/.github/workflows/push.yaml index 6869cf7fb7..7808f9d18e 100644 --- a/.github/workflows/push.yaml +++ b/.github/workflows/push.yaml @@ -158,6 +158,7 @@ jobs: echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env + echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env - name: Install dependencies working-directory: frontend run: yarn install diff --git a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml index 1f5f8e4229..03f3cb92f1 100644 --- a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml @@ -146,7 +146,7 @@ services: condition: on-failure query-service: - image: signoz/query-service:0.48.0 + image: signoz/query-service:0.48.1 command: [ "-config=/root/config/prometheus.yml", diff --git a/deploy/docker/clickhouse-setup/docker-compose.testing.yaml b/deploy/docker/clickhouse-setup/docker-compose.testing.yaml index 075bb3322c..c5c13c5de8 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.testing.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.testing.yaml @@ -164,7 +164,7 @@ services: # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` query-service: - image: signoz/query-service:${DOCKER_TAG:-0.48.0} + image: signoz/query-service:${DOCKER_TAG:-0.48.1} container_name: signoz-query-service command: [ @@ -204,7 +204,7 @@ services: <<: *db-depend frontend: - image: signoz/frontend:${DOCKER_TAG:-0.48.0} + image: signoz/frontend:${DOCKER_TAG:-0.48.1} container_name: signoz-frontend restart: on-failure depends_on: diff --git a/deploy/docker/clickhouse-setup/docker-compose.yaml b/deploy/docker/clickhouse-setup/docker-compose.yaml index 72980c15a5..fb7757ba0d 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.yaml @@ -164,7 +164,7 @@ services: # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` query-service: - image: signoz/query-service:${DOCKER_TAG:-0.48.0} + image: signoz/query-service:${DOCKER_TAG:-0.48.1} container_name: signoz-query-service command: [ @@ -203,7 +203,7 @@ services: <<: *db-depend frontend: - image: signoz/frontend:${DOCKER_TAG:-0.48.0} + image: signoz/frontend:${DOCKER_TAG:-0.48.1} container_name: signoz-frontend restart: on-failure depends_on: diff --git a/deploy/install.sh b/deploy/install.sh index 1d4905b6f6..85c63c248d 100755 --- a/deploy/install.sh +++ b/deploy/install.sh @@ -389,7 +389,7 @@ trap bye EXIT URL="https://api.segment.io/v1/track" HEADER_1="Content-Type: application/json" -HEADER_2="Authorization: Basic NEdtb2E0aXhKQVVIeDJCcEp4c2p3QTFiRWZud0VlUno6" +HEADER_2="Authorization: Basic OWtScko3b1BDR1BFSkxGNlFqTVBMdDVibGpGaFJRQnI=" send_event() { error="" diff --git a/ee/query-service/app/api/api.go b/ee/query-service/app/api/api.go index be0cf1ec36..66b462e167 100644 --- a/ee/query-service/app/api/api.go +++ b/ee/query-service/app/api/api.go @@ -24,7 +24,6 @@ import ( type APIHandlerOptions struct { DataConnector interfaces.DataConnector SkipConfig *basemodel.SkipConfig - PreferDelta bool PreferSpanMetrics bool MaxIdleConns int MaxOpenConns int @@ -53,7 +52,6 @@ func NewAPIHandler(opts APIHandlerOptions) (*APIHandler, error) { baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{ Reader: opts.DataConnector, SkipConfig: opts.SkipConfig, - PerferDelta: opts.PreferDelta, PreferSpanMetrics: opts.PreferSpanMetrics, MaxIdleConns: opts.MaxIdleConns, MaxOpenConns: opts.MaxOpenConns, diff --git a/ee/query-service/app/server.go b/ee/query-service/app/server.go index 75af1d7ebc..9a2c96734f 100644 --- a/ee/query-service/app/server.go +++ b/ee/query-service/app/server.go @@ -64,7 +64,6 @@ type ServerOptions struct { // alert specific params DisableRules bool RuleRepoURL string - PreferDelta bool PreferSpanMetrics bool MaxIdleConns int MaxOpenConns int @@ -256,7 +255,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { apiOpts := api.APIHandlerOptions{ DataConnector: reader, SkipConfig: skipConfig, - PreferDelta: serverOptions.PreferDelta, PreferSpanMetrics: serverOptions.PreferSpanMetrics, MaxIdleConns: serverOptions.MaxIdleConns, MaxOpenConns: serverOptions.MaxOpenConns, diff --git a/ee/query-service/main.go b/ee/query-service/main.go index 4a8a12af6e..c5a03f4c0f 100644 --- a/ee/query-service/main.go +++ b/ee/query-service/main.go @@ -89,7 +89,6 @@ func main() { var cacheConfigPath, fluxInterval string var enableQueryServiceLogOTLPExport bool - var preferDelta bool var preferSpanMetrics bool var maxIdleConns int @@ -100,14 +99,13 @@ func main() { flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)") flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)") flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)") - flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over cumulative metrics)") flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)") flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)") flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)") flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)") flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)") flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)") - flag.StringVar(&fluxInterval, "flux-interval", "5m", "(cache config to use)") + flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)") flag.BoolVar(&enableQueryServiceLogOTLPExport, "enable.query.service.log.otlp.export", false, "(enable query service log otlp export)") flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')") flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)") @@ -125,7 +123,6 @@ func main() { HTTPHostPort: baseconst.HTTPHostPort, PromConfigPath: promConfigPath, SkipTopLvlOpsPath: skipTopLvlOpsPath, - PreferDelta: preferDelta, PreferSpanMetrics: preferSpanMetrics, PrivateHostPort: baseconst.PrivateHostPort, DisableRules: disableRules, diff --git a/frontend/package.json b/frontend/package.json index 25d32f69df..d78064278a 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -88,6 +88,7 @@ "lucide-react": "0.379.0", "mini-css-extract-plugin": "2.4.5", "papaparse": "5.4.1", + "posthog-js": "1.140.1", "rc-tween-one": "3.0.6", "react": "18.2.0", "react-addons-update": "15.6.3", diff --git a/frontend/public/Logos/azure-aks.svg b/frontend/public/Logos/azure-aks.svg new file mode 100644 index 0000000000..d45672703d --- /dev/null +++ b/frontend/public/Logos/azure-aks.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/public/Logos/azure-app-service.svg b/frontend/public/Logos/azure-app-service.svg new file mode 100644 index 0000000000..54051fc58f --- /dev/null +++ b/frontend/public/Logos/azure-app-service.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/public/Logos/azure-blob-storage.svg b/frontend/public/Logos/azure-blob-storage.svg new file mode 100644 index 0000000000..1650133096 --- /dev/null +++ b/frontend/public/Logos/azure-blob-storage.svg @@ -0,0 +1,2 @@ + + \ No newline at end of file diff --git a/frontend/public/Logos/azure-container-apps.svg b/frontend/public/Logos/azure-container-apps.svg new file mode 100644 index 0000000000..3dd3d4db91 --- /dev/null +++ b/frontend/public/Logos/azure-container-apps.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/public/Logos/azure-functions.svg b/frontend/public/Logos/azure-functions.svg new file mode 100644 index 0000000000..9face30fb9 --- /dev/null +++ b/frontend/public/Logos/azure-functions.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/public/Logos/azure-sql-database-metrics.svg b/frontend/public/Logos/azure-sql-database-metrics.svg new file mode 100644 index 0000000000..fed69970bb --- /dev/null +++ b/frontend/public/Logos/azure-sql-database-metrics.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/public/Logos/azure-vm.svg b/frontend/public/Logos/azure-vm.svg new file mode 100644 index 0000000000..bde2b81881 --- /dev/null +++ b/frontend/public/Logos/azure-vm.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/public/locales/en/titles.json b/frontend/public/locales/en/titles.json index 8aef9c9af6..f77bf0e85a 100644 --- a/frontend/public/locales/en/titles.json +++ b/frontend/public/locales/en/titles.json @@ -8,6 +8,7 @@ "GET_STARTED_LOGS_MANAGEMENT": "SigNoz | Get Started | Logs", "GET_STARTED_INFRASTRUCTURE_MONITORING": "SigNoz | Get Started | Infrastructure", "GET_STARTED_AWS_MONITORING": "SigNoz | Get Started | AWS", + "GET_STARTED_AZURE_MONITORING": "SigNoz | Get Started | AZURE", "TRACE": "SigNoz | Trace", "TRACE_DETAIL": "SigNoz | Trace Detail", "TRACES_EXPLORER": "SigNoz | Traces Explorer", diff --git a/frontend/src/AppRoutes/index.tsx b/frontend/src/AppRoutes/index.tsx index 645974204c..bc2b02d842 100644 --- a/frontend/src/AppRoutes/index.tsx +++ b/frontend/src/AppRoutes/index.tsx @@ -17,6 +17,7 @@ import { NotificationProvider } from 'hooks/useNotifications'; import { ResourceProvider } from 'hooks/useResourceAttribute'; import history from 'lib/history'; import { identity, pick, pickBy } from 'lodash-es'; +import posthog from 'posthog-js'; import { DashboardProvider } from 'providers/Dashboard/Dashboard'; import { QueryBuilderProvider } from 'providers/QueryBuilder'; import { Suspense, useEffect, useState } from 'react'; @@ -38,7 +39,7 @@ import defaultRoutes, { function App(): JSX.Element { const themeConfig = useThemeConfig(); - const { data } = useLicense(); + const { data: licenseData } = useLicense(); const [routes, setRoutes] = useState(defaultRoutes); const { role, isLoggedIn: isLoggedInState, user, org } = useSelector< AppState, @@ -92,10 +93,10 @@ function App(): JSX.Element { }); const isOnBasicPlan = - data?.payload?.licenses?.some( + licenseData?.payload?.licenses?.some( (license) => license.isCurrent && license.planKey === LICENSE_PLAN_KEY.BASIC_PLAN, - ) || data?.payload?.licenses === null; + ) || licenseData?.payload?.licenses === null; const enableAnalytics = (user: User): void => { const orgName = @@ -112,9 +113,7 @@ function App(): JSX.Element { }; const sanitizedIdentifyPayload = pickBy(identifyPayload, identity); - const domain = extractDomain(email); - const hostNameParts = hostname.split('.'); const groupTraits = { @@ -127,10 +126,30 @@ function App(): JSX.Element { }; window.analytics.identify(email, sanitizedIdentifyPayload); - window.analytics.group(domain, groupTraits); - window.clarity('identify', email, name); + + posthog?.identify(email, { + email, + name, + orgName, + tenant_id: hostNameParts[0], + data_region: hostNameParts[1], + tenant_url: hostname, + company_domain: domain, + source: 'signoz-ui', + isPaidUser: !!licenseData?.payload?.trialConvertedToSubscription, + }); + + posthog?.group('company', domain, { + name: orgName, + tenant_id: hostNameParts[0], + data_region: hostNameParts[1], + tenant_url: hostname, + company_domain: domain, + source: 'signoz-ui', + isPaidUser: !!licenseData?.payload?.trialConvertedToSubscription, + }); }; useEffect(() => { @@ -144,10 +163,6 @@ function App(): JSX.Element { !isIdentifiedUser ) { setLocalStorageApi(LOCALSTORAGE.IS_IDENTIFIED_USER, 'true'); - - if (isCloudUserVal) { - enableAnalytics(user); - } } if ( @@ -195,6 +210,11 @@ function App(): JSX.Element { console.error('Failed to parse local storage theme analytics event'); } } + + if (isCloudUserVal && user && user.email) { + enableAnalytics(user); + } + // eslint-disable-next-line react-hooks/exhaustive-deps }, [user]); diff --git a/frontend/src/components/CustomTimePicker/CustomTimePicker.tsx b/frontend/src/components/CustomTimePicker/CustomTimePicker.tsx index 114db17924..a3bb980175 100644 --- a/frontend/src/components/CustomTimePicker/CustomTimePicker.tsx +++ b/frontend/src/components/CustomTimePicker/CustomTimePicker.tsx @@ -287,7 +287,7 @@ function CustomTimePicker({ ) } arrow={false} - trigger="hover" + trigger="click" open={open} onOpenChange={handleOpenChange} style={{ diff --git a/frontend/src/components/Logs/AddToQueryHOC.tsx b/frontend/src/components/Logs/AddToQueryHOC.tsx index 609840477b..8391a23b81 100644 --- a/frontend/src/components/Logs/AddToQueryHOC.tsx +++ b/frontend/src/components/Logs/AddToQueryHOC.tsx @@ -2,7 +2,7 @@ import './AddToQueryHOC.styles.scss'; import { Popover } from 'antd'; import { OPERATORS } from 'constants/queryBuilder'; -import { memo, ReactNode, useCallback, useMemo } from 'react'; +import { memo, MouseEvent, ReactNode, useMemo } from 'react'; function AddToQueryHOC({ fieldKey, @@ -10,9 +10,10 @@ function AddToQueryHOC({ onAddToQuery, children, }: AddToQueryHOCProps): JSX.Element { - const handleQueryAdd = useCallback(() => { + const handleQueryAdd = (event: MouseEvent): void => { + event.stopPropagation(); onAddToQuery(fieldKey, fieldValue, OPERATORS.IN); - }, [fieldKey, fieldValue, onAddToQuery]); + }; const popOverContent = useMemo(() => Add to query: {fieldKey}, [ fieldKey, diff --git a/frontend/src/components/TextToolTip/TextToolTip.tsx b/frontend/src/components/TextToolTip/TextToolTip.tsx index 6c8fad783e..13f3c72d73 100644 --- a/frontend/src/components/TextToolTip/TextToolTip.tsx +++ b/frontend/src/components/TextToolTip/TextToolTip.tsx @@ -9,7 +9,6 @@ import { Tooltip } from 'antd'; import { themeColors } from 'constants/theme'; import { useIsDarkMode } from 'hooks/useDarkMode'; import { useMemo } from 'react'; -import { popupContainer } from 'utils/selectPopupContainer'; import { style } from './constant'; @@ -64,7 +63,7 @@ function TextToolTip({ ); return ( - + {useFilledIcon ? ( ) : ( diff --git a/frontend/src/constants/routes.ts b/frontend/src/constants/routes.ts index 243bdd0bba..ef73184a86 100644 --- a/frontend/src/constants/routes.ts +++ b/frontend/src/constants/routes.ts @@ -13,6 +13,7 @@ const ROUTES = { GET_STARTED_INFRASTRUCTURE_MONITORING: '/get-started/infrastructure-monitoring', GET_STARTED_AWS_MONITORING: '/get-started/aws-monitoring', + GET_STARTED_AZURE_MONITORING: '/get-started/azure-monitoring', USAGE_EXPLORER: '/usage-explorer', APPLICATION: '/services', ALL_DASHBOARD: '/dashboard', diff --git a/frontend/src/container/AppLayout/index.tsx b/frontend/src/container/AppLayout/index.tsx index ef7be7eef4..88c6b8f8f6 100644 --- a/frontend/src/container/AppLayout/index.tsx +++ b/frontend/src/container/AppLayout/index.tsx @@ -236,7 +236,8 @@ function AppLayout(props: AppLayoutProps): JSX.Element { pathname === ROUTES.GET_STARTED_APPLICATION_MONITORING || pathname === ROUTES.GET_STARTED_INFRASTRUCTURE_MONITORING || pathname === ROUTES.GET_STARTED_LOGS_MANAGEMENT || - pathname === ROUTES.GET_STARTED_AWS_MONITORING; + pathname === ROUTES.GET_STARTED_AWS_MONITORING || + pathname === ROUTES.GET_STARTED_AZURE_MONITORING; const [showTrialExpiryBanner, setShowTrialExpiryBanner] = useState(false); diff --git a/frontend/src/container/Header/index.tsx b/frontend/src/container/Header/index.tsx index 191f83e38b..af24bdc4eb 100644 --- a/frontend/src/container/Header/index.tsx +++ b/frontend/src/container/Header/index.tsx @@ -27,6 +27,7 @@ import { import { useSelector } from 'react-redux'; import { NavLink } from 'react-router-dom'; import { AppState } from 'store/reducers'; +import { License } from 'types/api/licenses/def'; import AppReducer from 'types/reducer/app'; import { getFormattedDate, getRemainingDays } from 'utils/timeUtils'; @@ -109,9 +110,13 @@ function HeaderContainer(): JSX.Element { const { data: licenseData, isFetching, status: licenseStatus } = useLicense(); + const licensesStatus: string = + licenseData?.payload?.licenses?.find((e: License) => e.isCurrent)?.status || + ''; + const isLicenseActive = - licenseData?.payload?.licenses?.find((e) => e.isCurrent)?.status === - LICENSE_PLAN_STATUS.VALID; + licensesStatus?.toLocaleLowerCase() === + LICENSE_PLAN_STATUS.VALID.toLocaleLowerCase(); useEffect(() => { if ( diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AKS/aks-installCentralCollector.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AKS/aks-installCentralCollector.md new file mode 100644 index 0000000000..cb78ca5d27 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AKS/aks-installCentralCollector.md @@ -0,0 +1,111 @@ +## Setup + +### Installing with OpenTelemetry Helm Charts + +Prior to installation, you must ensure your Kubernetes cluster is ready and that you have the necessary permissions to deploy applications. Follow these steps to use Helm for setting up the Collector: + +1. **Add the OpenTelemetry Helm repository:** + +```bash +helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts +``` + +2. **Prepare the `otel-collector-values.yaml` Configuration** + + #### Azure Event Hub Receiver Configuration + If you haven't created the logs Event Hub, you can create one by following the steps in the [Azure Event Hubs documentation](../../bootstrapping/data-ingestion). + + and replace the placeholders `` with the primary connection string for your Event Hub, it should look something like this: + + ```yaml + connection: Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName + ``` + The Event Hub docs have a step to create a SAS policy for the event hub and copy the connection string. + + #### Azure Monitor Receiver Configuration + + You will need to set up a [service principal](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal) with Read permissions to receive data from Azure Monitor. + + 1. Follow the steps in the [Create a service principal Azure Doc](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#register-an-application-with-microsoft-entra-id-and-create-a-service-principal) documentation to create a service principal. + You can name it `signoz-central-collector-app` the redirect URI can be empty. + 2. To add read permissions to Azure Monitor, Follow the [Assign Role](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#assign-a-role-to-the-application) documentation. The read acess can be given to the full subscription. + 3. There are multiple ways to authenticate the service principal, we will use the client secret option, follow [Creating a client secret](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#option-3-create-a-new-client-secret) and don't forget to copy the client secret. The secret is used in the configuration file as `client_secret`. + + 4. To find `client_id` and `tenant_id`, go to the [Azure Portal](https://portal.azure.com/) and search for the `Application` you created. You would see the `Application (client) ID` and `Directory (tenant) ID` in the Overview section. + +
+ Application Overview +
+ + Application Overview + +
+
+ + 5. To find `subscription_id`, follow steps in [Find Your Subscription](https://learn.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription) and populate them in the configuration file. + + 6. Ensure you replace the placeholders `` and `` with the appropriate values for your signoz cloud instance. + + + +Below is an example targeting the SigNoz backend with Azure Monitor receivers configured: + +```yaml +service: + pipelines: + metrics/am: + receivers: [azuremonitor] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp, azureeventhub] + processors: [batch] + exporters: [otlp] +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + azureeventhub: + connection: Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName + format: "azure" + azuremonitor: + subscription_id: "" + tenant_id: "" + client_id: "" + client_secret: "" + resource_groups: [""] + collection_interval: 60s +processors: + batch: {} +exporters: + otlp: + endpoint: "ingest..signoz.cloud:443" + tls: + insecure: false + headers: + "signoz-access-token": "" +``` + +3. **Deploy the OpenTelemetry Collector to your Kubernetes cluster:** + +You'll need to prepare a custom configuration file, say `otel-collector-values.yaml`, that matches your environment's specific needs. Replace `` with the Kubernetes namespace where you wish to install the Collector. + +```bash +helm install -n --create-namespace otel-collector open-telemetry/opentelemetry-collector -f otel-collector-values.yaml + +``` + +For more detail, refer to the [official OpenTelemetry Helm Chart documentation](https://github.com/open-telemetry/opentelemetry-helm-charts/tree/main/charts/opentelemetry-collector), which offers comprehensive installation instructions and configuration options tailored to your environment's requirements. diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AKS/aks-logs.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AKS/aks-logs.md new file mode 100644 index 0000000000..17f80b964a --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AKS/aks-logs.md @@ -0,0 +1,8 @@ +## Prerequisite + +- An AKS cluster +- Central Collector Setup + +  + +Once you have setup the Central Collector, it will automatically start collecting your Logs. \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AKS/aks-metrics.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AKS/aks-metrics.md new file mode 100644 index 0000000000..68b8c391b4 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AKS/aks-metrics.md @@ -0,0 +1,8 @@ +## Prerequisite + +- An AKS cluster +- Central Collector Setup + +  + +Once you have setup the Central Collector, it will automatically start sending your Metrics to SigNoz. \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AKS/aks-setupEventsHub.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AKS/aks-setupEventsHub.md new file mode 100644 index 0000000000..976ef7c813 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AKS/aks-setupEventsHub.md @@ -0,0 +1,40 @@ +## Overview + +Azure Event Hubs is a big data streaming platform ideal for centralizing logging and real-time log streaming for applications on Azure or on-premises. + +Integrate SigNoz with Azure Event Hubs for a robust log management solution, leveraging SigNoz's log aggregation, querying, visualization, and alerting features. + +## Prerequisites + +- An active Azure subscription + +## Setup + +### 1. Create an Event Hubs Namespace + +1. In the [Azure portal](https://portal.azure.com), create an Event Hubs namespace. +2. Fill in the required details: + - **Resource group**: Choose or create a new one. + - **Namespace name**: Enter a unique name, e.g., `-obs-signoz`. + - **Pricing tier**: Based on your logging requirements. + - **Region**: Should match the region of the resources you want to monitor. + - **Throughput units**: Choose based on logging needs. +3. Click "Review + create" and then "Create". + +### 2. Create an Event Hub + +1. Navigate to the Event Hubs namespace you created in the Azure portal. +2. Click "+ Event Hub" to create a new event hub. +3. Enter a name, e.g., `logs`and click "Create" + +### 3. Create a SAS Policy and Copy Connection String + +1. Navigate to the Event Hub in the Azure portal. +2. Click "Shared access policies" in the left menu. +3. Click "Add" to create a new policy named `signozListen`. +4. Select the "Listen" permission and set the expiration time. +5. Click "Save". +6. Copy the *Connection string–primary key*. + + + diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AKS/aks-tracing.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AKS/aks-tracing.md new file mode 100644 index 0000000000..16a3d8cacb --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AKS/aks-tracing.md @@ -0,0 +1,16 @@ +## Application level Tracing + +For application-level tracing, you can use the OpenTelemetry SDKs integrated with your application. These SDKs will automatically collect and forward traces to the Central Collector. + +  + +To see how you can instrument your applications like FastAPI, NextJS, Node.js, Spring etc. you can check out the **Application Monitoring** section available at the start of this onboarding or you can checkout this [documentation](https://signoz.io/docs/instrumentation/). + +## Configure the OpenTelemetry SDK + +```bash +# Set env vars or config file +export OTEL_EXPORTER_OTLP_ENDPOINT="http://otel-collector.kubelet-otel.svc.cluster.local:4318/" +``` + +For application-level traces and metrics, configure your application to use the `kube-dns` name of the **Central Collector** you set up earlier. \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AppService/appService-installCentralCollector.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AppService/appService-installCentralCollector.md new file mode 100644 index 0000000000..7963cf9526 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AppService/appService-installCentralCollector.md @@ -0,0 +1,129 @@ +Set up the OpenTelemetry Collector on a Virtual Machine (VM). The setup is compatible with cloud VM instances, your own data center, or even a local VM on your development machine. Here's how to do it: + + +## Download and Install the OpenTelemetry Collector Binary + +Please visit [Documentation For VM](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) which provides further guidance on a VM installation. + +  + +## Configure OpenTelemetry Collector + +While following the documentation above for installing the OpenTelemetry Collector Binary, you must have created `config.yaml` file. Replace the content of the `config.yaml` with the below config file which includes the **Azure Monitor receiver**. + +```yaml +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + azureeventhub: + connection: + format: "azure" + azuremonitor: + subscription_id: "" + tenant_id: "" + client_id: "" + client_secret: "" + resource_groups: [""] + collection_interval: 60s +processors: + batch: {} +exporters: + otlp: + endpoint: "ingest.{{REGION}}.signoz.cloud:443" + tls: + insecure: false + headers: + "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}" +service: + pipelines: + metrics/am: + receivers: [azuremonitor] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp, azureeventhub] + processors: [batch] + exporters: [otlp] + +``` +**NOTE:** +Replace the `` in the config file with the primary connection string for your Event Hub that you created in the previous section. It would look something like this: + +```bash +Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName +``` + +  + +## Azure Monitor Receiver Configuration + +You will need to set up a [service principal](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal) with Read permissions to receive data from Azure Monitor. + +1. Follow the steps in the [Create a service principal Azure Doc](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#register-an-application-with-microsoft-entra-id-and-create-a-service-principal) documentation to create a service principal. +You can name it `signoz-central-collector-app` the redirect URI can be empty. + +2. To add read permissions to Azure Monitor, Follow the [Assign Role](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#assign-a-role-to-the-application) documentation. The read access can be given to the full subscription. + +3. There are multiple ways to authenticate the service principal, we will use the client secret option, follow [Creating a client secret](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#option-3-create-a-new-client-secret) and don't forget to copy the client secret. The secret is used in the configuration file as `client_secret`. + +4. To find `client_id` and `tenant_id`, go to the [Azure Portal](https://portal.azure.com/) and search for the `Application` you created. You would see the `Application (client) ID` and `Directory (tenant) ID` in the Overview section. + +5. To find `subscription_id`, follow steps in [Find Your Subscription](https://learn.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription) and populate them in the configuration file. + +**NOTE:** +By following the above steps, you will get the values for ``, ``, `` and `` which you need to fill in the `config.yaml` file. + +  + +## Run the Collector + +With your configuration file ready, you can now start the Collector using the following command: + +```bash +# Runs in background with the configuration we just created +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +  + +### Open Ports + +You will need to open the following ports on your Azure VM: +- 4317 for gRPC +- 4318 for HTTP + +You can do this by navigating to the Azure VM's Networking section and adding a new inbound rule for the ports. + +  + +### Validating the Deployment + +Once the Collector is running, ensure that telemetry data is being successfully sent and received. Use the logging exporter as defined in your configuration file, or check the logs for any startup errors. + +  + +## Configure DNS label For Collector + +To the IP address of the collector, you can add a DNS label to the Public IP address. This will make it easier to refer to the centralized collector from other services. You can do this by following these steps: + +1. Go to the Public IP address of the collector. This would be the IP address of the VM or Load Balancer in case of Kubernetes or Load Balanced collector. +2. Click on the "Configuration" tab. +3. Enter the DNS label you want to use for the collector. +4. Click on "Save". + +**NOTE:** Please take note of the DNS label you have entered. You will need this in the next steps. + +  + +If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/bootstrapping/collector-setup/#troubleshooting) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AppService/appService-logs.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AppService/appService-logs.md new file mode 100644 index 0000000000..e0b650d0b9 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AppService/appService-logs.md @@ -0,0 +1,33 @@ +Follow these steps if you want to setup logging for your Azure App Service. + +  + +## Prerequisites + +- EventHub Setup +- Central Collector Setup + + +## Setup + +1. Navigate to your App Service in the Azure portal + +2. Search for "Diagnostic settings" in the left navigation menu + +3. Click on "Add Diagnostic Setting" + +4. Select the desired log categories to export: +- HTTP logs +- App Service Console Logs +- App Service Application Logs +- Access Audit Logs +- IPSecurity Audit logs +- App Service Platform logs + + +5. Configure the destination details as **"Stream to an Event Hub"** and select the Event Hub namespace and Event Hub name created during the EventHub Setup in the earlier steps. + +6. Save the diagnostic settings + + +This will start sending your Azure App Service Logs to SigNoz! \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AppService/appService-metrics.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AppService/appService-metrics.md new file mode 100644 index 0000000000..06893fda79 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AppService/appService-metrics.md @@ -0,0 +1,25 @@ +Follow these steps if you want to monitor System metrics like CPU Percentage, Memory Percentage etc. of your Azure App Service. + +  + +## Prerequisites + +- EventHub Setup +- Central Collector Setup + +## Dashboard Example + +Once you have completed the prerequisites, you can start monitoring your Azure App Service's system metrics with SigNoz Cloud. Here's how you can do it: + +1. Log in to your SigNoz account +2. Navigate to the Dashboards section, and [add a dashboard](https://signoz.io/docs/userguide/manage-dashboards/) +3. Add a Timeseries Panel +4. In **Metrics**, select `azure_memorypercentage_total` and **Avg By** select tag `location` +5. In Filter say `name = ` +6. Hit “Save Changes” and you now have Memory Usage of your App Service in a Dashboard for reporting and alerting + +In this way, you can monitor system metrics of your Azure App Service in SigNoz Cloud. + +  + +If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/app-service/metrics/#troubleshooting) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AppService/appService-setupEventsHub.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AppService/appService-setupEventsHub.md new file mode 100644 index 0000000000..67e4ceffc1 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AppService/appService-setupEventsHub.md @@ -0,0 +1,54 @@ +## Overview + +Azure Event Hubs is a big data streaming platform ideal for centralizing logging and real-time log streaming for applications on Azure or on-premises. + +Integrate SigNoz with Azure Event Hubs for a robust log management solution, leveraging SigNoz's log aggregation, querying, visualization, and alerting features. + +## Prerequisites + +- An active Azure subscription + +## Setup + +### 1. Create an Event Hubs Namespace + +1. In the [Azure portal](https://portal.azure.com), create an Event Hubs namespace. +2. Fill in the required details: + - **Resource group**: Choose or create a new one. + - **Namespace name**: Enter a unique name, e.g., `-obs-signoz`. + - **Pricing tier**: Based on your logging requirements. + - **Region**: Should match the region of the resources you want to monitor. + - **Throughput units**: Choose based on logging needs. +3. Click "Review + create" and then "Create". + +### 2. Create an Event Hub + +1. Navigate to the Event Hubs namespace you created in the Azure portal. +2. Click "+ Event Hub" to create a new event hub. +3. Enter a name, e.g., `logs`and click "Create" + +### 3. Create a SAS Policy and Copy Connection String + +1. Navigate to the Event Hub in the Azure portal. +2. Click "Shared access policies" in the left menu. +3. Click "Add" to create a new policy named `signozListen`. +4. Select the "Listen" permission and set the expiration time. +5. Click "Save". +6. Copy the *Connection string–primary key*. + + + + + + diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AppService/appService-tracing.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AppService/appService-tracing.md new file mode 100644 index 0000000000..35e1ba03e6 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/AppService/appService-tracing.md @@ -0,0 +1,29 @@ +## Application level Tracing + +For application-level tracing, you can use the OpenTelemetry SDKs integrated with your application. These SDKs will automatically collect and forward traces to the Central Collector. + +  + +To see how you can instrument your applications like FastAPI, NextJS, Node.js, Spring etc. you can check out the **Application Monitoring** section available at the start of this onboarding or you can checkout this [documentation](https://signoz.io/docs/instrumentation/). + +  + +## Prerequisites + +1. **Azure Subscription & App Service**: You need an active Azure subscription with a running Azure App Service instance. +2. **Central Collector Setup**: Make sure you have set up the Central Collector + +  + +## Configure the OpenTelemetry SDK + +```bash +# Set env vars or config file +export OTEL_EXPORTER_OTLP_ENDPOINT="http://:4318/" +``` + +For application-level traces, configure your application to use the DNS name of the **Central Collector** you set up earlier. This Central Collector will automatically forward the collected data to SigNoz. + +  + +If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/app-service/tracing/#troubleshooting) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/BlobStorage/blobStorage-installCentralCollector.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/BlobStorage/blobStorage-installCentralCollector.md new file mode 100644 index 0000000000..7963cf9526 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/BlobStorage/blobStorage-installCentralCollector.md @@ -0,0 +1,129 @@ +Set up the OpenTelemetry Collector on a Virtual Machine (VM). The setup is compatible with cloud VM instances, your own data center, or even a local VM on your development machine. Here's how to do it: + + +## Download and Install the OpenTelemetry Collector Binary + +Please visit [Documentation For VM](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) which provides further guidance on a VM installation. + +  + +## Configure OpenTelemetry Collector + +While following the documentation above for installing the OpenTelemetry Collector Binary, you must have created `config.yaml` file. Replace the content of the `config.yaml` with the below config file which includes the **Azure Monitor receiver**. + +```yaml +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + azureeventhub: + connection: + format: "azure" + azuremonitor: + subscription_id: "" + tenant_id: "" + client_id: "" + client_secret: "" + resource_groups: [""] + collection_interval: 60s +processors: + batch: {} +exporters: + otlp: + endpoint: "ingest.{{REGION}}.signoz.cloud:443" + tls: + insecure: false + headers: + "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}" +service: + pipelines: + metrics/am: + receivers: [azuremonitor] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp, azureeventhub] + processors: [batch] + exporters: [otlp] + +``` +**NOTE:** +Replace the `` in the config file with the primary connection string for your Event Hub that you created in the previous section. It would look something like this: + +```bash +Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName +``` + +  + +## Azure Monitor Receiver Configuration + +You will need to set up a [service principal](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal) with Read permissions to receive data from Azure Monitor. + +1. Follow the steps in the [Create a service principal Azure Doc](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#register-an-application-with-microsoft-entra-id-and-create-a-service-principal) documentation to create a service principal. +You can name it `signoz-central-collector-app` the redirect URI can be empty. + +2. To add read permissions to Azure Monitor, Follow the [Assign Role](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#assign-a-role-to-the-application) documentation. The read access can be given to the full subscription. + +3. There are multiple ways to authenticate the service principal, we will use the client secret option, follow [Creating a client secret](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#option-3-create-a-new-client-secret) and don't forget to copy the client secret. The secret is used in the configuration file as `client_secret`. + +4. To find `client_id` and `tenant_id`, go to the [Azure Portal](https://portal.azure.com/) and search for the `Application` you created. You would see the `Application (client) ID` and `Directory (tenant) ID` in the Overview section. + +5. To find `subscription_id`, follow steps in [Find Your Subscription](https://learn.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription) and populate them in the configuration file. + +**NOTE:** +By following the above steps, you will get the values for ``, ``, `` and `` which you need to fill in the `config.yaml` file. + +  + +## Run the Collector + +With your configuration file ready, you can now start the Collector using the following command: + +```bash +# Runs in background with the configuration we just created +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +  + +### Open Ports + +You will need to open the following ports on your Azure VM: +- 4317 for gRPC +- 4318 for HTTP + +You can do this by navigating to the Azure VM's Networking section and adding a new inbound rule for the ports. + +  + +### Validating the Deployment + +Once the Collector is running, ensure that telemetry data is being successfully sent and received. Use the logging exporter as defined in your configuration file, or check the logs for any startup errors. + +  + +## Configure DNS label For Collector + +To the IP address of the collector, you can add a DNS label to the Public IP address. This will make it easier to refer to the centralized collector from other services. You can do this by following these steps: + +1. Go to the Public IP address of the collector. This would be the IP address of the VM or Load Balancer in case of Kubernetes or Load Balanced collector. +2. Click on the "Configuration" tab. +3. Enter the DNS label you want to use for the collector. +4. Click on "Save". + +**NOTE:** Please take note of the DNS label you have entered. You will need this in the next steps. + +  + +If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/bootstrapping/collector-setup/#troubleshooting) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/BlobStorage/blobStorage-logs.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/BlobStorage/blobStorage-logs.md new file mode 100644 index 0000000000..17ea35ddd2 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/BlobStorage/blobStorage-logs.md @@ -0,0 +1,23 @@ +Follow these steps if you want to setup logging for your Azure App Service. + +  + +## Prerequisites + +- EventHub Setup +- Central Collector Setup + +## Setup + +1. Navigate to the relevant Storage Account in the Azure portal +2. Search for "Diagnostic settings" in the left navigation menu +3. Click on `blob` under the storage account +4. Click on "Add Diagnostic Setting" +5. Select the desired log categories to export: + - Storage Read + - Storage Write + - Storage Delete +5. Configure the destination details as "**Stream to an Event Hub**" and select the Event Hub namespace and Event Hub name created during the EventHub Setup +6. Save the diagnostic settings + +That's it! You have successfully set up logging for your Azure Blob Storage. \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/BlobStorage/blobStorage-metrics.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/BlobStorage/blobStorage-metrics.md new file mode 100644 index 0000000000..2e9ebeacfa --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/BlobStorage/blobStorage-metrics.md @@ -0,0 +1,28 @@ +Follow these steps if you want to monitor system metrics like Total Requests, Total Ingress / Egress, and Total Errors etc., of your Azure Blob Storage. + +  + +## Prerequisites + +- Azure Subscription and Azure Blob storage instance running +- Central Collector Setup + +  + +## Dashboard Example + +Once you have completed the prerequisites, you can start monitoring your Azure Blob Storage's system metrics with SigNoz. + +1. Log in to your SigNoz account. +2. Navigate to the Dashboards, and [add a dashboard](https://signoz.io/docs/userguide/manage-dashboards/) +3. Add a Timeseries Panel +4. In **Metrics**, select `azure_ingress_total` and **Avg B*y* select tag `location` +5. In Filter say `name = ` +6. Hit “Save Changes”. You now have Total Ingress of your Azure Blob Storage in a Dashboard for reporting and alerting + + +That's it! You have successfully set up monitoring for your Azure Blob Storage's system metrics with SigNoz. You can now start creating other panels and dashboards to monitor other Azure Blob Storage's metrics. + +  + +If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/az-blob-storage/metrics/#troubleshooting) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/BlobStorage/blobStorage-setupEventsHub.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/BlobStorage/blobStorage-setupEventsHub.md new file mode 100644 index 0000000000..67e4ceffc1 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/BlobStorage/blobStorage-setupEventsHub.md @@ -0,0 +1,54 @@ +## Overview + +Azure Event Hubs is a big data streaming platform ideal for centralizing logging and real-time log streaming for applications on Azure or on-premises. + +Integrate SigNoz with Azure Event Hubs for a robust log management solution, leveraging SigNoz's log aggregation, querying, visualization, and alerting features. + +## Prerequisites + +- An active Azure subscription + +## Setup + +### 1. Create an Event Hubs Namespace + +1. In the [Azure portal](https://portal.azure.com), create an Event Hubs namespace. +2. Fill in the required details: + - **Resource group**: Choose or create a new one. + - **Namespace name**: Enter a unique name, e.g., `-obs-signoz`. + - **Pricing tier**: Based on your logging requirements. + - **Region**: Should match the region of the resources you want to monitor. + - **Throughput units**: Choose based on logging needs. +3. Click "Review + create" and then "Create". + +### 2. Create an Event Hub + +1. Navigate to the Event Hubs namespace you created in the Azure portal. +2. Click "+ Event Hub" to create a new event hub. +3. Enter a name, e.g., `logs`and click "Create" + +### 3. Create a SAS Policy and Copy Connection String + +1. Navigate to the Event Hub in the Azure portal. +2. Click "Shared access policies" in the left menu. +3. Click "Add" to create a new policy named `signozListen`. +4. Select the "Listen" permission and set the expiration time. +5. Click "Save". +6. Copy the *Connection string–primary key*. + + + + + + diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/ContainerApps/containerApps-installCentralCollector.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/ContainerApps/containerApps-installCentralCollector.md new file mode 100644 index 0000000000..7963cf9526 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/ContainerApps/containerApps-installCentralCollector.md @@ -0,0 +1,129 @@ +Set up the OpenTelemetry Collector on a Virtual Machine (VM). The setup is compatible with cloud VM instances, your own data center, or even a local VM on your development machine. Here's how to do it: + + +## Download and Install the OpenTelemetry Collector Binary + +Please visit [Documentation For VM](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) which provides further guidance on a VM installation. + +  + +## Configure OpenTelemetry Collector + +While following the documentation above for installing the OpenTelemetry Collector Binary, you must have created `config.yaml` file. Replace the content of the `config.yaml` with the below config file which includes the **Azure Monitor receiver**. + +```yaml +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + azureeventhub: + connection: + format: "azure" + azuremonitor: + subscription_id: "" + tenant_id: "" + client_id: "" + client_secret: "" + resource_groups: [""] + collection_interval: 60s +processors: + batch: {} +exporters: + otlp: + endpoint: "ingest.{{REGION}}.signoz.cloud:443" + tls: + insecure: false + headers: + "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}" +service: + pipelines: + metrics/am: + receivers: [azuremonitor] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp, azureeventhub] + processors: [batch] + exporters: [otlp] + +``` +**NOTE:** +Replace the `` in the config file with the primary connection string for your Event Hub that you created in the previous section. It would look something like this: + +```bash +Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName +``` + +  + +## Azure Monitor Receiver Configuration + +You will need to set up a [service principal](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal) with Read permissions to receive data from Azure Monitor. + +1. Follow the steps in the [Create a service principal Azure Doc](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#register-an-application-with-microsoft-entra-id-and-create-a-service-principal) documentation to create a service principal. +You can name it `signoz-central-collector-app` the redirect URI can be empty. + +2. To add read permissions to Azure Monitor, Follow the [Assign Role](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#assign-a-role-to-the-application) documentation. The read access can be given to the full subscription. + +3. There are multiple ways to authenticate the service principal, we will use the client secret option, follow [Creating a client secret](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#option-3-create-a-new-client-secret) and don't forget to copy the client secret. The secret is used in the configuration file as `client_secret`. + +4. To find `client_id` and `tenant_id`, go to the [Azure Portal](https://portal.azure.com/) and search for the `Application` you created. You would see the `Application (client) ID` and `Directory (tenant) ID` in the Overview section. + +5. To find `subscription_id`, follow steps in [Find Your Subscription](https://learn.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription) and populate them in the configuration file. + +**NOTE:** +By following the above steps, you will get the values for ``, ``, `` and `` which you need to fill in the `config.yaml` file. + +  + +## Run the Collector + +With your configuration file ready, you can now start the Collector using the following command: + +```bash +# Runs in background with the configuration we just created +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +  + +### Open Ports + +You will need to open the following ports on your Azure VM: +- 4317 for gRPC +- 4318 for HTTP + +You can do this by navigating to the Azure VM's Networking section and adding a new inbound rule for the ports. + +  + +### Validating the Deployment + +Once the Collector is running, ensure that telemetry data is being successfully sent and received. Use the logging exporter as defined in your configuration file, or check the logs for any startup errors. + +  + +## Configure DNS label For Collector + +To the IP address of the collector, you can add a DNS label to the Public IP address. This will make it easier to refer to the centralized collector from other services. You can do this by following these steps: + +1. Go to the Public IP address of the collector. This would be the IP address of the VM or Load Balancer in case of Kubernetes or Load Balanced collector. +2. Click on the "Configuration" tab. +3. Enter the DNS label you want to use for the collector. +4. Click on "Save". + +**NOTE:** Please take note of the DNS label you have entered. You will need this in the next steps. + +  + +If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/bootstrapping/collector-setup/#troubleshooting) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/ContainerApps/containerApps-logs.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/ContainerApps/containerApps-logs.md new file mode 100644 index 0000000000..98f0c1cb23 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/ContainerApps/containerApps-logs.md @@ -0,0 +1,28 @@ +Follow these steps if you want to setup logging for your Azure Container App. + +  + +## Prerequisites + +- EventHub Setup +- Central Collector Setup + + +## Setup + +1. Navigate to your Container Apps in the Azure portal +2. Click on "Container Apps Environment" to open the Container Apps Environment +3. Search for "Diagnostic settings" in the left navigation menu +4. Click on "Add Diagnostic Setting" +5. Select the desired log categories to export: + - Container App console logs + - Container App system logs + - Spring App console logs + + +6. Configure the destination details as **"Stream to an Event Hub"** and select the Event Hub namespace and Event Hub name created during the EventHub Setup. + +7. Save the diagnostic settings + + +That's it! You have successfully set up logging for your Azure Container App. \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/ContainerApps/containerApps-metrics.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/ContainerApps/containerApps-metrics.md new file mode 100644 index 0000000000..771e19321d --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/ContainerApps/containerApps-metrics.md @@ -0,0 +1,27 @@ +Follow these steps if you want to monitor System metrics like CPU Percentage, Memory Percentage etc. of your Azure Container App. + +  + +## Prerequisites + +- Azure subscription and an Azure Container App instance running +- Central Collector Setup + +  + +# Dashboard Example + +Once you have completed the prerequisites, you can start monitoring your Azure Container App's system metrics with SigNoz. Here's how you can do it: + +1. Log in to your SigNoz account. +2. Navigate to the Dashboards, and [add an dashboard](https://signoz.io/docs/userguide/manage-dashboards/) +3. Add a Timeseries Panel +4. In **Metrics**, select `azure_replicas_count` and **Avg By** select tag `name` +5. In Filter say `type = Microsoft.App/containerApps` +6. Hit “Save Changes”. You now have Memory Usage of your Container App in a Dashboard for reporting and alerting + +In this way, you can monitor system metrics of your Azure Container App in SigNoz! + +  + +If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/az-container-apps/metrics/#troubleshooting) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/ContainerApps/containerApps-setupEventsHub.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/ContainerApps/containerApps-setupEventsHub.md new file mode 100644 index 0000000000..67e4ceffc1 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/ContainerApps/containerApps-setupEventsHub.md @@ -0,0 +1,54 @@ +## Overview + +Azure Event Hubs is a big data streaming platform ideal for centralizing logging and real-time log streaming for applications on Azure or on-premises. + +Integrate SigNoz with Azure Event Hubs for a robust log management solution, leveraging SigNoz's log aggregation, querying, visualization, and alerting features. + +## Prerequisites + +- An active Azure subscription + +## Setup + +### 1. Create an Event Hubs Namespace + +1. In the [Azure portal](https://portal.azure.com), create an Event Hubs namespace. +2. Fill in the required details: + - **Resource group**: Choose or create a new one. + - **Namespace name**: Enter a unique name, e.g., `-obs-signoz`. + - **Pricing tier**: Based on your logging requirements. + - **Region**: Should match the region of the resources you want to monitor. + - **Throughput units**: Choose based on logging needs. +3. Click "Review + create" and then "Create". + +### 2. Create an Event Hub + +1. Navigate to the Event Hubs namespace you created in the Azure portal. +2. Click "+ Event Hub" to create a new event hub. +3. Enter a name, e.g., `logs`and click "Create" + +### 3. Create a SAS Policy and Copy Connection String + +1. Navigate to the Event Hub in the Azure portal. +2. Click "Shared access policies" in the left menu. +3. Click "Add" to create a new policy named `signozListen`. +4. Select the "Listen" permission and set the expiration time. +5. Click "Save". +6. Copy the *Connection string–primary key*. + + + + + + diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/ContainerApps/containerApps-tracing.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/ContainerApps/containerApps-tracing.md new file mode 100644 index 0000000000..3bcc892e08 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/ContainerApps/containerApps-tracing.md @@ -0,0 +1,29 @@ +## Application level Tracing + +For application-level tracing, you can use the OpenTelemetry SDKs integrated with your application. These SDKs will automatically collect and forward traces to the Central Collector. + +  + +To see how you can instrument your applications like FastAPI, NextJS, Node.js, Spring etc. you can check out the **Application Monitoring** section available at the start of this onboarding or you can checkout this [documentation](https://signoz.io/docs/instrumentation/). + +  + +## Prerequisites + +1. **Azure Subscription & App Service**: You need an active Azure subscription with a running Azure App Service instance. +2. **Central Collector Setup**: Make sure you have set up the Central Collector + +  + +## Configure the OpenTelemetry SDK + +```bash +# Set env vars or config file +export OTEL_EXPORTER_OTLP_ENDPOINT="http://:4318/" +``` + +For application-level traces, configure your application to use the DNS name of the **Central Collector** you set up earlier. This Central Collector will automatically forward the collected data to SigNoz. + +  + +If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/az-container-apps/tracing/#troubleshooting) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Functions/functions-installCentralCollector.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Functions/functions-installCentralCollector.md new file mode 100644 index 0000000000..7963cf9526 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Functions/functions-installCentralCollector.md @@ -0,0 +1,129 @@ +Set up the OpenTelemetry Collector on a Virtual Machine (VM). The setup is compatible with cloud VM instances, your own data center, or even a local VM on your development machine. Here's how to do it: + + +## Download and Install the OpenTelemetry Collector Binary + +Please visit [Documentation For VM](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) which provides further guidance on a VM installation. + +  + +## Configure OpenTelemetry Collector + +While following the documentation above for installing the OpenTelemetry Collector Binary, you must have created `config.yaml` file. Replace the content of the `config.yaml` with the below config file which includes the **Azure Monitor receiver**. + +```yaml +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + azureeventhub: + connection: + format: "azure" + azuremonitor: + subscription_id: "" + tenant_id: "" + client_id: "" + client_secret: "" + resource_groups: [""] + collection_interval: 60s +processors: + batch: {} +exporters: + otlp: + endpoint: "ingest.{{REGION}}.signoz.cloud:443" + tls: + insecure: false + headers: + "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}" +service: + pipelines: + metrics/am: + receivers: [azuremonitor] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp, azureeventhub] + processors: [batch] + exporters: [otlp] + +``` +**NOTE:** +Replace the `` in the config file with the primary connection string for your Event Hub that you created in the previous section. It would look something like this: + +```bash +Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName +``` + +  + +## Azure Monitor Receiver Configuration + +You will need to set up a [service principal](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal) with Read permissions to receive data from Azure Monitor. + +1. Follow the steps in the [Create a service principal Azure Doc](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#register-an-application-with-microsoft-entra-id-and-create-a-service-principal) documentation to create a service principal. +You can name it `signoz-central-collector-app` the redirect URI can be empty. + +2. To add read permissions to Azure Monitor, Follow the [Assign Role](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#assign-a-role-to-the-application) documentation. The read access can be given to the full subscription. + +3. There are multiple ways to authenticate the service principal, we will use the client secret option, follow [Creating a client secret](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#option-3-create-a-new-client-secret) and don't forget to copy the client secret. The secret is used in the configuration file as `client_secret`. + +4. To find `client_id` and `tenant_id`, go to the [Azure Portal](https://portal.azure.com/) and search for the `Application` you created. You would see the `Application (client) ID` and `Directory (tenant) ID` in the Overview section. + +5. To find `subscription_id`, follow steps in [Find Your Subscription](https://learn.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription) and populate them in the configuration file. + +**NOTE:** +By following the above steps, you will get the values for ``, ``, `` and `` which you need to fill in the `config.yaml` file. + +  + +## Run the Collector + +With your configuration file ready, you can now start the Collector using the following command: + +```bash +# Runs in background with the configuration we just created +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +  + +### Open Ports + +You will need to open the following ports on your Azure VM: +- 4317 for gRPC +- 4318 for HTTP + +You can do this by navigating to the Azure VM's Networking section and adding a new inbound rule for the ports. + +  + +### Validating the Deployment + +Once the Collector is running, ensure that telemetry data is being successfully sent and received. Use the logging exporter as defined in your configuration file, or check the logs for any startup errors. + +  + +## Configure DNS label For Collector + +To the IP address of the collector, you can add a DNS label to the Public IP address. This will make it easier to refer to the centralized collector from other services. You can do this by following these steps: + +1. Go to the Public IP address of the collector. This would be the IP address of the VM or Load Balancer in case of Kubernetes or Load Balanced collector. +2. Click on the "Configuration" tab. +3. Enter the DNS label you want to use for the collector. +4. Click on "Save". + +**NOTE:** Please take note of the DNS label you have entered. You will need this in the next steps. + +  + +If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/bootstrapping/collector-setup/#troubleshooting) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Functions/functions-logs.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Functions/functions-logs.md new file mode 100644 index 0000000000..73ae89d8b0 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Functions/functions-logs.md @@ -0,0 +1,21 @@ +Follow these steps if you want to setup logging for your Azure Functions. + +  + +## Prerequisites + +- EventHub Setup +- Central Collector Setup + + +## Setup + +1. Navigate to your Azure Function in the Azure portal +2. Search for "Diagnostic settings" in the left navigation menu +3. Click on "Add Diagnostic Setting" +4. Select the desired log categories to export: + - Function App logs +5. Configure the destination details as "**Stream to an Event Hub**" and select the Event Hub namespace and Event Hub name created during the EventHub Setup +6. Save the diagnostic settings + +That's it! You have successfully set up logging for your Azure Function. \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Functions/functions-metrics.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Functions/functions-metrics.md new file mode 100644 index 0000000000..abb92f8303 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Functions/functions-metrics.md @@ -0,0 +1,28 @@ +Follow these steps if you want to monitor System metrics like CPU Percentage, Memory Percentage etc. of your Azure Functions. + +  + +## Prerequisites + +- Azure subscription and an Azure Container App instance running +- Central Collector Setup + +  + +## Dashboard Example + +Once you have completed the prerequisites, you can start monitoring your Azure Function's system metrics with SigNoz. Here's how you can do it: + +1. Log in to your SigNoz account. +2. Navigate to the Dashboards, and add an dashboard +3. Add a Timeseries Panel +4. In *Metrics*, select `azure_requests_total` and *Avg By* select tag `location` +5. In Filter say `name = ` +6. Hit “Save Changes” You now have Total Requests of your Azure Function in a Dashboard for reporting and alerting + + +That's it! You have successfully set up monitoring for your Azure Function's system metrics with SigNoz. + +  + +If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/az-fns/metrics/#troubleshooting) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Functions/functions-setupEventsHub.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Functions/functions-setupEventsHub.md new file mode 100644 index 0000000000..67e4ceffc1 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Functions/functions-setupEventsHub.md @@ -0,0 +1,54 @@ +## Overview + +Azure Event Hubs is a big data streaming platform ideal for centralizing logging and real-time log streaming for applications on Azure or on-premises. + +Integrate SigNoz with Azure Event Hubs for a robust log management solution, leveraging SigNoz's log aggregation, querying, visualization, and alerting features. + +## Prerequisites + +- An active Azure subscription + +## Setup + +### 1. Create an Event Hubs Namespace + +1. In the [Azure portal](https://portal.azure.com), create an Event Hubs namespace. +2. Fill in the required details: + - **Resource group**: Choose or create a new one. + - **Namespace name**: Enter a unique name, e.g., `-obs-signoz`. + - **Pricing tier**: Based on your logging requirements. + - **Region**: Should match the region of the resources you want to monitor. + - **Throughput units**: Choose based on logging needs. +3. Click "Review + create" and then "Create". + +### 2. Create an Event Hub + +1. Navigate to the Event Hubs namespace you created in the Azure portal. +2. Click "+ Event Hub" to create a new event hub. +3. Enter a name, e.g., `logs`and click "Create" + +### 3. Create a SAS Policy and Copy Connection String + +1. Navigate to the Event Hub in the Azure portal. +2. Click "Shared access policies" in the left menu. +3. Click "Add" to create a new policy named `signozListen`. +4. Select the "Listen" permission and set the expiration time. +5. Click "Save". +6. Copy the *Connection string–primary key*. + + + + + + diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Functions/functions-tracing.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Functions/functions-tracing.md new file mode 100644 index 0000000000..c20488159c --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Functions/functions-tracing.md @@ -0,0 +1,29 @@ +## Application level Tracing + +For application-level tracing, you can use the OpenTelemetry SDKs integrated with your application. These SDKs will automatically collect and forward traces to the Central Collector. + +  + +To see how you can instrument your applications like FastAPI, NextJS, Node.js, Spring etc. you can check out the **Application Monitoring** section available at the start of this onboarding or you can checkout this [documentation](https://signoz.io/docs/instrumentation/). + +  + +## Prerequisites + +1. **Azure Subscription & App Service**: You need an active Azure subscription with a running Azure Function App instance. +2. **Central Collector Setup**: Make sure you have set up the Central Collector + +  + +## Configure the OpenTelemetry SDK + +```bash +# Set env vars or config file +export OTEL_EXPORTER_OTLP_ENDPOINT="http://:4318/" +``` + +For application-level traces, configure your application to use the DNS name of the **Central Collector** you set up earlier. This Central Collector will automatically forward the collected data to SigNoz. + +  + +If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/az-fns/tracing/#troubleshooting) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/SqlDatabaseMetrics/sqlDatabaseMetrics-installCentralCollector.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/SqlDatabaseMetrics/sqlDatabaseMetrics-installCentralCollector.md new file mode 100644 index 0000000000..7963cf9526 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/SqlDatabaseMetrics/sqlDatabaseMetrics-installCentralCollector.md @@ -0,0 +1,129 @@ +Set up the OpenTelemetry Collector on a Virtual Machine (VM). The setup is compatible with cloud VM instances, your own data center, or even a local VM on your development machine. Here's how to do it: + + +## Download and Install the OpenTelemetry Collector Binary + +Please visit [Documentation For VM](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) which provides further guidance on a VM installation. + +  + +## Configure OpenTelemetry Collector + +While following the documentation above for installing the OpenTelemetry Collector Binary, you must have created `config.yaml` file. Replace the content of the `config.yaml` with the below config file which includes the **Azure Monitor receiver**. + +```yaml +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + azureeventhub: + connection: + format: "azure" + azuremonitor: + subscription_id: "" + tenant_id: "" + client_id: "" + client_secret: "" + resource_groups: [""] + collection_interval: 60s +processors: + batch: {} +exporters: + otlp: + endpoint: "ingest.{{REGION}}.signoz.cloud:443" + tls: + insecure: false + headers: + "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}" +service: + pipelines: + metrics/am: + receivers: [azuremonitor] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp, azureeventhub] + processors: [batch] + exporters: [otlp] + +``` +**NOTE:** +Replace the `` in the config file with the primary connection string for your Event Hub that you created in the previous section. It would look something like this: + +```bash +Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName +``` + +  + +## Azure Monitor Receiver Configuration + +You will need to set up a [service principal](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal) with Read permissions to receive data from Azure Monitor. + +1. Follow the steps in the [Create a service principal Azure Doc](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#register-an-application-with-microsoft-entra-id-and-create-a-service-principal) documentation to create a service principal. +You can name it `signoz-central-collector-app` the redirect URI can be empty. + +2. To add read permissions to Azure Monitor, Follow the [Assign Role](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#assign-a-role-to-the-application) documentation. The read access can be given to the full subscription. + +3. There are multiple ways to authenticate the service principal, we will use the client secret option, follow [Creating a client secret](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#option-3-create-a-new-client-secret) and don't forget to copy the client secret. The secret is used in the configuration file as `client_secret`. + +4. To find `client_id` and `tenant_id`, go to the [Azure Portal](https://portal.azure.com/) and search for the `Application` you created. You would see the `Application (client) ID` and `Directory (tenant) ID` in the Overview section. + +5. To find `subscription_id`, follow steps in [Find Your Subscription](https://learn.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription) and populate them in the configuration file. + +**NOTE:** +By following the above steps, you will get the values for ``, ``, `` and `` which you need to fill in the `config.yaml` file. + +  + +## Run the Collector + +With your configuration file ready, you can now start the Collector using the following command: + +```bash +# Runs in background with the configuration we just created +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +  + +### Open Ports + +You will need to open the following ports on your Azure VM: +- 4317 for gRPC +- 4318 for HTTP + +You can do this by navigating to the Azure VM's Networking section and adding a new inbound rule for the ports. + +  + +### Validating the Deployment + +Once the Collector is running, ensure that telemetry data is being successfully sent and received. Use the logging exporter as defined in your configuration file, or check the logs for any startup errors. + +  + +## Configure DNS label For Collector + +To the IP address of the collector, you can add a DNS label to the Public IP address. This will make it easier to refer to the centralized collector from other services. You can do this by following these steps: + +1. Go to the Public IP address of the collector. This would be the IP address of the VM or Load Balancer in case of Kubernetes or Load Balanced collector. +2. Click on the "Configuration" tab. +3. Enter the DNS label you want to use for the collector. +4. Click on "Save". + +**NOTE:** Please take note of the DNS label you have entered. You will need this in the next steps. + +  + +If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/bootstrapping/collector-setup/#troubleshooting) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/SqlDatabaseMetrics/sqlDatabaseMetrics-metrics.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/SqlDatabaseMetrics/sqlDatabaseMetrics-metrics.md new file mode 100644 index 0000000000..d6da5ffc2e --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/SqlDatabaseMetrics/sqlDatabaseMetrics-metrics.md @@ -0,0 +1,31 @@ +## Prerequisite + +- Azure subscription and Database instance running +- Central Collector Setup +- [SQL monitoring profile](https://learn.microsoft.com/en-us/azure/azure-sql/database/sql-insights-enable?view=azuresql#create-sql-monitoring-profile) created to monitor the databases in Azure Monitor + +  + + +## Setup + +Once you have completed the prerequisites, you can start monitoring your Database's system metrics with SigNoz. Here's how you can do it: + +1. Log in to your SigNoz account. +2. Navigate to the Dashboards Section, and [add an dashboard](https://signoz.io/docs/userguide/manage-dashboards/) +3. Add a Timeseries Panel +4. In **Metrics**, select `azure_storage_maximum` and **Avg By** select tag `location` +5. In Filter say `name = ` +6. Hit “Save Changes”. You now have Memory Usage of your Database in a Dashboard for reporting and alerting + +That's it! You have successfully set up monitoring for your Database's system metrics with SigNoz. + +  + +**NOTE:** +Make sure you have created a sql monitoring profile in Azure Monitor if not, follow this guide to [Create SQL Monitoring Profile](https://learn.microsoft.com/en-us/azure/azure-sql/database/sql-insights-enable?view=azuresql#create-sql-monitoring-profile). +You can monitor multiple databases in a single profile. + +  + +If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/db-metrics/#troubleshooting) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/SqlDatabaseMetrics/sqlDatabaseMetrics-setupEventsHub.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/SqlDatabaseMetrics/sqlDatabaseMetrics-setupEventsHub.md new file mode 100644 index 0000000000..67e4ceffc1 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/SqlDatabaseMetrics/sqlDatabaseMetrics-setupEventsHub.md @@ -0,0 +1,54 @@ +## Overview + +Azure Event Hubs is a big data streaming platform ideal for centralizing logging and real-time log streaming for applications on Azure or on-premises. + +Integrate SigNoz with Azure Event Hubs for a robust log management solution, leveraging SigNoz's log aggregation, querying, visualization, and alerting features. + +## Prerequisites + +- An active Azure subscription + +## Setup + +### 1. Create an Event Hubs Namespace + +1. In the [Azure portal](https://portal.azure.com), create an Event Hubs namespace. +2. Fill in the required details: + - **Resource group**: Choose or create a new one. + - **Namespace name**: Enter a unique name, e.g., `-obs-signoz`. + - **Pricing tier**: Based on your logging requirements. + - **Region**: Should match the region of the resources you want to monitor. + - **Throughput units**: Choose based on logging needs. +3. Click "Review + create" and then "Create". + +### 2. Create an Event Hub + +1. Navigate to the Event Hubs namespace you created in the Azure portal. +2. Click "+ Event Hub" to create a new event hub. +3. Enter a name, e.g., `logs`and click "Create" + +### 3. Create a SAS Policy and Copy Connection String + +1. Navigate to the Event Hub in the Azure portal. +2. Click "Shared access policies" in the left menu. +3. Click "Add" to create a new policy named `signozListen`. +4. Select the "Listen" permission and set the expiration time. +5. Click "Save". +6. Copy the *Connection string–primary key*. + + + + + + diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Vm/vm-hostmetrics-and-logs.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Vm/vm-hostmetrics-and-logs.md new file mode 100644 index 0000000000..e3aba6a9c9 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Vm/vm-hostmetrics-and-logs.md @@ -0,0 +1,134 @@ +## Prerequisites + +- An Azure subscription with Azure VM and SSH access enabled +- Central Collector Setup + + +### Connect to the VM +The [SSH Keys Guide](https://learn.microsoft.com/en-us/azure/virtual-machines/ssh-keys-portal#connect-to-the-vm) has steps on how to connect to your VM via SSH. + +  + +### Install OpenTelemetry Collector + +Follow the [OpenTelemetry SigNoz documentation](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) to install the OpenTelemetry Collector. + +  + +### Configure Collector + + We send the logs, traces and metrics to the central collector that we set up in the previous step instead of SigNoz directly, in order to adopt a scalable architecture pattern. We recommend to our users to use the same pattern in your Azure subscription. + +Replace the content of the `config.yaml` file that you created while installing the collector. + +```yaml +receivers: + filelog: + include: [ ] # /var/log/myservice/*.json + operators: + - type: json_parser + timestamp: + parse_from: attributes.time + layout: '%Y-%m-%d %H:%M:%S' + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + hostmetrics: + collection_interval: 60s + scrapers: + cpu: {} + disk: {} + load: {} + filesystem: {} + memory: {} + network: {} + paging: {} + process: + mute_process_name_error: true + mute_process_exe_error: true + mute_process_io_error: true + processes: {} + prometheus: + config: + global: + scrape_interval: 60s + scrape_configs: + - job_name: otel-collector-binary + static_configs: + - targets: + # - localhost:8888 +processors: + batch: + send_batch_size: 1000 + timeout: 10s + # Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md + resourcedetection: + detectors: [env, azure, system] + # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. + timeout: 2s + system: + hostname_sources: [dns, os] +extensions: + health_check: {} + zpages: {} +exporters: + otlp: + endpoint: ":4318" + logging: + verbosity: normal +service: + telemetry: + metrics: + address: 0.0.0.0:8888 + extensions: [health_check, zpages] + pipelines: + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics/internal: + receivers: [prometheus, hostmetrics] + processors: [resourcedetection, batch] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp, filelog] + processors: [batch] + exporters: [otlp] +``` +  + +#### OLTP Exporter Configuration +Make sure to replace `` with the DNS name of your central collector that you set up earlier. + +  + +#### File Logs Receiver Configuration +The file logs receiver needs to be configured with the paths to the log files that you want to stream to SigNoz. You can specify multiple paths by separating them as a array. + +You can also specify globed path patterns to match multiple log files. For example, `/var/log/myservice/*.json` will match all log files in the `/var/log/myservice` directory with a `.json` extension. + +  + +### Start the OpenTelemetry Collector + +Once we are done with the above configurations, we can now run the collector service with the following command: + +```bash +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +  + +### Hostmetrics Dashboard + +Once the collector is running, you can access the SigNoz dashboard to view the logs and metrics from your Azure VM. + +Please refer to the [Hostmetrics Dashboard](https://signoz.io/docs/userguide/hostmetrics/) for information on how to import and use the dashboard. + diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Vm/vm-installCentralCollector.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Vm/vm-installCentralCollector.md new file mode 100644 index 0000000000..7963cf9526 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Vm/vm-installCentralCollector.md @@ -0,0 +1,129 @@ +Set up the OpenTelemetry Collector on a Virtual Machine (VM). The setup is compatible with cloud VM instances, your own data center, or even a local VM on your development machine. Here's how to do it: + + +## Download and Install the OpenTelemetry Collector Binary + +Please visit [Documentation For VM](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) which provides further guidance on a VM installation. + +  + +## Configure OpenTelemetry Collector + +While following the documentation above for installing the OpenTelemetry Collector Binary, you must have created `config.yaml` file. Replace the content of the `config.yaml` with the below config file which includes the **Azure Monitor receiver**. + +```yaml +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + azureeventhub: + connection: + format: "azure" + azuremonitor: + subscription_id: "" + tenant_id: "" + client_id: "" + client_secret: "" + resource_groups: [""] + collection_interval: 60s +processors: + batch: {} +exporters: + otlp: + endpoint: "ingest.{{REGION}}.signoz.cloud:443" + tls: + insecure: false + headers: + "signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}" +service: + pipelines: + metrics/am: + receivers: [azuremonitor] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + logs: + receivers: [otlp, azureeventhub] + processors: [batch] + exporters: [otlp] + +``` +**NOTE:** +Replace the `` in the config file with the primary connection string for your Event Hub that you created in the previous section. It would look something like this: + +```bash +Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName +``` + +  + +## Azure Monitor Receiver Configuration + +You will need to set up a [service principal](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal) with Read permissions to receive data from Azure Monitor. + +1. Follow the steps in the [Create a service principal Azure Doc](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#register-an-application-with-microsoft-entra-id-and-create-a-service-principal) documentation to create a service principal. +You can name it `signoz-central-collector-app` the redirect URI can be empty. + +2. To add read permissions to Azure Monitor, Follow the [Assign Role](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#assign-a-role-to-the-application) documentation. The read access can be given to the full subscription. + +3. There are multiple ways to authenticate the service principal, we will use the client secret option, follow [Creating a client secret](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#option-3-create-a-new-client-secret) and don't forget to copy the client secret. The secret is used in the configuration file as `client_secret`. + +4. To find `client_id` and `tenant_id`, go to the [Azure Portal](https://portal.azure.com/) and search for the `Application` you created. You would see the `Application (client) ID` and `Directory (tenant) ID` in the Overview section. + +5. To find `subscription_id`, follow steps in [Find Your Subscription](https://learn.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription) and populate them in the configuration file. + +**NOTE:** +By following the above steps, you will get the values for ``, ``, `` and `` which you need to fill in the `config.yaml` file. + +  + +## Run the Collector + +With your configuration file ready, you can now start the Collector using the following command: + +```bash +# Runs in background with the configuration we just created +./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid +``` + +  + +### Open Ports + +You will need to open the following ports on your Azure VM: +- 4317 for gRPC +- 4318 for HTTP + +You can do this by navigating to the Azure VM's Networking section and adding a new inbound rule for the ports. + +  + +### Validating the Deployment + +Once the Collector is running, ensure that telemetry data is being successfully sent and received. Use the logging exporter as defined in your configuration file, or check the logs for any startup errors. + +  + +## Configure DNS label For Collector + +To the IP address of the collector, you can add a DNS label to the Public IP address. This will make it easier to refer to the centralized collector from other services. You can do this by following these steps: + +1. Go to the Public IP address of the collector. This would be the IP address of the VM or Load Balancer in case of Kubernetes or Load Balanced collector. +2. Click on the "Configuration" tab. +3. Enter the DNS label you want to use for the collector. +4. Click on "Save". + +**NOTE:** Please take note of the DNS label you have entered. You will need this in the next steps. + +  + +If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/bootstrapping/collector-setup/#troubleshooting) \ No newline at end of file diff --git a/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Vm/vm-setupEventsHub.md b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Vm/vm-setupEventsHub.md new file mode 100644 index 0000000000..67e4ceffc1 --- /dev/null +++ b/frontend/src/container/OnboardingContainer/Modules/AzureMonitoring/Vm/vm-setupEventsHub.md @@ -0,0 +1,54 @@ +## Overview + +Azure Event Hubs is a big data streaming platform ideal for centralizing logging and real-time log streaming for applications on Azure or on-premises. + +Integrate SigNoz with Azure Event Hubs for a robust log management solution, leveraging SigNoz's log aggregation, querying, visualization, and alerting features. + +## Prerequisites + +- An active Azure subscription + +## Setup + +### 1. Create an Event Hubs Namespace + +1. In the [Azure portal](https://portal.azure.com), create an Event Hubs namespace. +2. Fill in the required details: + - **Resource group**: Choose or create a new one. + - **Namespace name**: Enter a unique name, e.g., `-obs-signoz`. + - **Pricing tier**: Based on your logging requirements. + - **Region**: Should match the region of the resources you want to monitor. + - **Throughput units**: Choose based on logging needs. +3. Click "Review + create" and then "Create". + +### 2. Create an Event Hub + +1. Navigate to the Event Hubs namespace you created in the Azure portal. +2. Click "+ Event Hub" to create a new event hub. +3. Enter a name, e.g., `logs`and click "Create" + +### 3. Create a SAS Policy and Copy Connection String + +1. Navigate to the Event Hub in the Azure portal. +2. Click "Shared access policies" in the left menu. +3. Click "Add" to create a new policy named `signozListen`. +4. Select the "Listen" permission and set the expiration time. +5. Click "Save". +6. Copy the *Connection string–primary key*. + + + + + + diff --git a/frontend/src/container/OnboardingContainer/Onboarding.styles.scss b/frontend/src/container/OnboardingContainer/Onboarding.styles.scss index 018c9af352..e81679d143 100644 --- a/frontend/src/container/OnboardingContainer/Onboarding.styles.scss +++ b/frontend/src/container/OnboardingContainer/Onboarding.styles.scss @@ -31,7 +31,7 @@ .onboardingHeader { text-align: center; - margin-top: 48px; + margin-top: 24px; } .onboardingHeader h1 { @@ -51,13 +51,14 @@ justify-content: center; gap: 36px; margin: 36px; + flex-wrap: wrap; } .moduleStyles { padding: 0; box-sizing: border-box; cursor: pointer; - width: 400px; + width: 300px; transition: 0.3s; .ant-card-body { diff --git a/frontend/src/container/OnboardingContainer/OnboardingContainer.tsx b/frontend/src/container/OnboardingContainer/OnboardingContainer.tsx index 68e8f0edce..5383f459f9 100644 --- a/frontend/src/container/OnboardingContainer/OnboardingContainer.tsx +++ b/frontend/src/container/OnboardingContainer/OnboardingContainer.tsx @@ -25,6 +25,7 @@ import { DataSourceType } from './Steps/DataSource/DataSource'; import { defaultApplicationDataSource, defaultAwsServices, + defaultAzureServices, defaultInfraMetricsType, defaultLogsType, moduleRouteMap, @@ -32,6 +33,7 @@ import { import { APM_STEPS, AWS_MONITORING_STEPS, + AZURE_MONITORING_STEPS, getSteps, INFRASTRUCTURE_MONITORING_STEPS, LOGS_MANAGEMENT_STEPS, @@ -42,6 +44,7 @@ export enum ModulesMap { LogsManagement = 'LogsManagement', InfrastructureMonitoring = 'InfrastructureMonitoring', AwsMonitoring = 'AwsMonitoring', + AzureMonitoring = 'AzureMonitoring', } export interface ModuleProps { @@ -81,6 +84,12 @@ export const useCases = { desc: 'Monitor your traces, logs and metrics for AWS services like EC2, ECS, EKS etc.', }, + AzureMonitoring: { + id: ModulesMap.AzureMonitoring, + title: 'Azure Monitoring', + desc: + 'Monitor your traces, logs and metrics for Azure services like AKS, Container Apps, App Service etc.', + }, }; export default function Onboarding(): JSX.Element { @@ -172,6 +181,7 @@ export default function Onboarding(): JSX.Element { setSelectedModuleSteps(APM_STEPS); }; + // eslint-disable-next-line sonarjs/cognitive-complexity useEffect(() => { if (selectedModule?.id === ModulesMap.InfrastructureMonitoring) { if (selectedDataSource) { @@ -194,6 +204,13 @@ export default function Onboarding(): JSX.Element { setSelectedModuleSteps(AWS_MONITORING_STEPS); updateSelectedDataSource(defaultAwsServices); } + } else if (selectedModule?.id === ModulesMap.AzureMonitoring) { + if (selectedDataSource) { + setModuleStepsBasedOnSelectedDataSource(selectedDataSource); + } else { + setSelectedModuleSteps(AZURE_MONITORING_STEPS); + updateSelectedDataSource(defaultAzureServices); + } } else if (selectedModule?.id === ModulesMap.APM) { handleAPMSteps(); @@ -240,18 +257,24 @@ export default function Onboarding(): JSX.Element { }; useEffect(() => { - if (location.pathname === ROUTES.GET_STARTED_APPLICATION_MONITORING) { + const { pathname } = location; + + if (pathname === ROUTES.GET_STARTED_APPLICATION_MONITORING) { handleModuleSelect(useCases.APM); updateSelectedDataSource(defaultApplicationDataSource); handleNextStep(); - } else if ( - location.pathname === ROUTES.GET_STARTED_INFRASTRUCTURE_MONITORING - ) { + } else if (pathname === ROUTES.GET_STARTED_INFRASTRUCTURE_MONITORING) { handleModuleSelect(useCases.InfrastructureMonitoring); handleNextStep(); - } else if (location.pathname === ROUTES.GET_STARTED_LOGS_MANAGEMENT) { + } else if (pathname === ROUTES.GET_STARTED_LOGS_MANAGEMENT) { handleModuleSelect(useCases.LogsManagement); handleNextStep(); + } else if (pathname === ROUTES.GET_STARTED_AWS_MONITORING) { + handleModuleSelect(useCases.AwsMonitoring); + handleNextStep(); + } else if (pathname === ROUTES.GET_STARTED_AZURE_MONITORING) { + handleModuleSelect(useCases.AzureMonitoring); + handleNextStep(); } // eslint-disable-next-line react-hooks/exhaustive-deps }, []); diff --git a/frontend/src/container/OnboardingContainer/Steps/MarkdownStep/MarkdownStep.tsx b/frontend/src/container/OnboardingContainer/Steps/MarkdownStep/MarkdownStep.tsx index ce38786dee..6954714342 100644 --- a/frontend/src/container/OnboardingContainer/Steps/MarkdownStep/MarkdownStep.tsx +++ b/frontend/src/container/OnboardingContainer/Steps/MarkdownStep/MarkdownStep.tsx @@ -2,6 +2,7 @@ import { MarkdownRenderer } from 'components/MarkdownRenderer/MarkdownRenderer'; import { ApmDocFilePaths } from 'container/OnboardingContainer/constants/apmDocFilePaths'; import { AwsMonitoringDocFilePaths } from 'container/OnboardingContainer/constants/awsMonitoringDocFilePaths'; +import { AzureMonitoringDocFilePaths } from 'container/OnboardingContainer/constants/azureMonitoringDocFilePaths'; import { InfraMonitoringDocFilePaths } from 'container/OnboardingContainer/constants/infraMonitoringDocFilePaths'; import { LogsManagementDocFilePaths } from 'container/OnboardingContainer/constants/logsManagementDocFilePaths'; import { @@ -69,6 +70,8 @@ export default function MarkdownStep(): JSX.Element { docFilePaths = InfraMonitoringDocFilePaths; } else if (selectedModule?.id === ModulesMap.AwsMonitoring) { docFilePaths = AwsMonitoringDocFilePaths; + } else if (selectedModule?.id === ModulesMap.AzureMonitoring) { + docFilePaths = AzureMonitoringDocFilePaths; } // @ts-ignore if (docFilePaths && docFilePaths[path]) { diff --git a/frontend/src/container/OnboardingContainer/constants/azureMonitoringDocFilePaths.ts b/frontend/src/container/OnboardingContainer/constants/azureMonitoringDocFilePaths.ts new file mode 100644 index 0000000000..5e867ef6ee --- /dev/null +++ b/frontend/src/container/OnboardingContainer/constants/azureMonitoringDocFilePaths.ts @@ -0,0 +1,82 @@ +import AzureMonitoring_azureAks_setupCentralCollector from '../Modules/AzureMonitoring/AKS/aks-installCentralCollector.md'; +import AzureMonitoring_azureAks_sendLogs from '../Modules/AzureMonitoring/AKS/aks-logs.md'; +import AzureMonitoring_azureAks_sendMetrics from '../Modules/AzureMonitoring/AKS/aks-metrics.md'; +import AzureMonitoring_azureAks_setupAzureEventsHub from '../Modules/AzureMonitoring/AKS/aks-setupEventsHub.md'; +import AzureMonitoring_azureAks_sendTraces from '../Modules/AzureMonitoring/AKS/aks-tracing.md'; +// Azure App Service +import AzureMonitoring_azureAppService_setupCentralCollector from '../Modules/AzureMonitoring/AppService/appService-installCentralCollector.md'; +import AzureMonitoring_azureAppService_sendLogs from '../Modules/AzureMonitoring/AppService/appService-logs.md'; +import AzureMonitoring_azureAppService_sendMetrics from '../Modules/AzureMonitoring/AppService/appService-metrics.md'; +import AzureMonitoring_azureAppService_setupAzureEventsHub from '../Modules/AzureMonitoring/AppService/appService-setupEventsHub.md'; +import AzureMonitoring_azureAppService_sendTraces from '../Modules/AzureMonitoring/AppService/appService-tracing.md'; +// Azure Blob Storage +import AzureMonitoring_azureBlobStorage_setupCentralCollector from '../Modules/AzureMonitoring/BlobStorage/blobStorage-installCentralCollector.md'; +import AzureMonitoring_azureBlobStorage_sendLogs from '../Modules/AzureMonitoring/BlobStorage/blobStorage-logs.md'; +import AzureMonitoring_azureBlobStorage_sendMetrics from '../Modules/AzureMonitoring/BlobStorage/blobStorage-metrics.md'; +import AzureMonitoring_azureBlobStorage_setupAzureEventsHub from '../Modules/AzureMonitoring/BlobStorage/blobStorage-setupEventsHub.md'; +// Azure Container Apps +import AzureMonitoring_azureContainerApps_setupCentralCollector from '../Modules/AzureMonitoring/ContainerApps/containerApps-installCentralCollector.md'; +import AzureMonitoring_azureContainerApps_sendLogs from '../Modules/AzureMonitoring/ContainerApps/containerApps-logs.md'; +import AzureMonitoring_azureContainerApps_sendMetrics from '../Modules/AzureMonitoring/ContainerApps/containerApps-metrics.md'; +import AzureMonitoring_azureContainerApps_setupAzureEventsHub from '../Modules/AzureMonitoring/ContainerApps/containerApps-setupEventsHub.md'; +import AzureMonitoring_azureContainerApps_sendTraces from '../Modules/AzureMonitoring/ContainerApps/containerApps-tracing.md'; +// Azure Functions +import AzureMonitoring_azureFunctions_setupCentralCollector from '../Modules/AzureMonitoring/Functions/functions-installCentralCollector.md'; +import AzureMonitoring_azureFunctions_sendLogs from '../Modules/AzureMonitoring/Functions/functions-logs.md'; +import AzureMonitoring_azureFunctions_sendMetrics from '../Modules/AzureMonitoring/Functions/functions-metrics.md'; +import AzureMonitoring_azureFunctions_setupAzureEventsHub from '../Modules/AzureMonitoring/Functions/functions-setupEventsHub.md'; +import AzureMonitoring_azureFunctions_sendTraces from '../Modules/AzureMonitoring/Functions/functions-tracing.md'; +// Azure SQL Database Metrics +import AzureMonitoring_azureSQLDatabaseMetrics_setupCentralCollector from '../Modules/AzureMonitoring/SqlDatabaseMetrics/sqlDatabaseMetrics-installCentralCollector.md'; +import AzureMonitoring_azureSQLDatabaseMetrics_sendMetrics from '../Modules/AzureMonitoring/SqlDatabaseMetrics/sqlDatabaseMetrics-metrics.md'; +import AzureMonitoring_azureSQLDatabaseMetrics_setupAzureEventsHub from '../Modules/AzureMonitoring/SqlDatabaseMetrics/sqlDatabaseMetrics-setupEventsHub.md'; +import AzureMonitoring_azureVm_sendHostmetricsLogs from '../Modules/AzureMonitoring/Vm/vm-hostmetrics-and-logs.md'; +// Azure VM +import AzureMonitoring_azureVm_setupCentralCollector from '../Modules/AzureMonitoring/Vm/vm-installCentralCollector.md'; +import AzureMonitoring_azureVm_setupAzureEventsHub from '../Modules/AzureMonitoring/Vm/vm-setupEventsHub.md'; + +export const AzureMonitoringDocFilePaths = { + // Azure AKS + AzureMonitoring_azureAks_setupCentralCollector, + AzureMonitoring_azureAks_setupAzureEventsHub, + AzureMonitoring_azureAks_sendTraces, + AzureMonitoring_azureAks_sendLogs, + AzureMonitoring_azureAks_sendMetrics, + + // Azure App Service + AzureMonitoring_azureFunctions_setupCentralCollector, + AzureMonitoring_azureFunctions_setupAzureEventsHub, + AzureMonitoring_azureFunctions_sendTraces, + AzureMonitoring_azureFunctions_sendLogs, + AzureMonitoring_azureFunctions_sendMetrics, + + // Azure Functions + AzureMonitoring_azureAppService_setupCentralCollector, + AzureMonitoring_azureAppService_setupAzureEventsHub, + AzureMonitoring_azureAppService_sendTraces, + AzureMonitoring_azureAppService_sendLogs, + AzureMonitoring_azureAppService_sendMetrics, + + // Azure Container Apps + AzureMonitoring_azureContainerApps_setupCentralCollector, + AzureMonitoring_azureContainerApps_setupAzureEventsHub, + AzureMonitoring_azureContainerApps_sendTraces, + AzureMonitoring_azureContainerApps_sendLogs, + AzureMonitoring_azureContainerApps_sendMetrics, + + // Azure VM + AzureMonitoring_azureVm_setupCentralCollector, + AzureMonitoring_azureVm_setupAzureEventsHub, + AzureMonitoring_azureVm_sendHostmetricsLogs, + + // Azure SQL Database Metrics + AzureMonitoring_azureSQLDatabaseMetrics_setupCentralCollector, + AzureMonitoring_azureSQLDatabaseMetrics_setupAzureEventsHub, + AzureMonitoring_azureSQLDatabaseMetrics_sendMetrics, + + // Azure Blob Storage + AzureMonitoring_azureBlobStorage_setupCentralCollector, + AzureMonitoring_azureBlobStorage_setupAzureEventsHub, + AzureMonitoring_azureBlobStorage_sendLogs, + AzureMonitoring_azureBlobStorage_sendMetrics, +}; diff --git a/frontend/src/container/OnboardingContainer/constants/stepsConfig.tsx b/frontend/src/container/OnboardingContainer/constants/stepsConfig.tsx index 57b3a597d8..e4897cda50 100644 --- a/frontend/src/container/OnboardingContainer/constants/stepsConfig.tsx +++ b/frontend/src/container/OnboardingContainer/constants/stepsConfig.tsx @@ -35,6 +35,12 @@ export const stepsMap = { deployTaskDefinition: `deployTaskDefinition`, ecsSendLogsData: `ecsSendLogsData`, monitorDashboard: `monitorDashboard`, + setupCentralCollector: `setupCentralCollector`, + setupAzureEventsHub: `setupAzureEventsHub`, + sendTraces: `sendTraces`, + sendLogs: `sendLogs`, + sendMetrics: `sendMetrics`, + sendHostmetricsLogs: `sendHostmetricsLogs`, }; export const DataSourceStep: SelectedModuleStepProps = { @@ -201,3 +207,33 @@ export const MonitorDashboard: SelectedModuleStepProps = { title: 'Monitor using Dashboard ', component: , }; +export const SetupCentralCollectorStep: SelectedModuleStepProps = { + id: stepsMap.setupCentralCollector, + title: 'Setup Central Collector ', + component: , +}; +export const SetupAzureEventsHub: SelectedModuleStepProps = { + id: stepsMap.setupAzureEventsHub, + title: 'Setup EventsHub', + component: , +}; +export const SendTraces: SelectedModuleStepProps = { + id: stepsMap.sendTraces, + title: 'Send Traces', + component: , +}; +export const SendLogs: SelectedModuleStepProps = { + id: stepsMap.sendLogs, + title: 'Send Logs', + component: , +}; +export const SendMetrics: SelectedModuleStepProps = { + id: stepsMap.sendMetrics, + title: 'Send Metrics', + component: , +}; +export const SendHostmetricsLogs: SelectedModuleStepProps = { + id: stepsMap.sendHostmetricsLogs, + title: 'HostMetrics and Logging', + component: , +}; diff --git a/frontend/src/container/OnboardingContainer/utils/dataSourceUtils.ts b/frontend/src/container/OnboardingContainer/utils/dataSourceUtils.ts index 7e5ba3b319..03f92c2a39 100644 --- a/frontend/src/container/OnboardingContainer/utils/dataSourceUtils.ts +++ b/frontend/src/container/OnboardingContainer/utils/dataSourceUtils.ts @@ -8,6 +8,7 @@ export enum ModulesMap { LogsManagement = 'LogsManagement', InfrastructureMonitoring = 'InfrastructureMonitoring', AwsMonitoring = 'AwsMonitoring', + AzureMonitoring = 'AzureMonitoring', } export const frameworksMap = { @@ -82,6 +83,7 @@ export const frameworksMap = { LogsManagement: {}, InfrastructureMonitoring: {}, AwsMonitoring: {}, + AzureMonitoring: {}, }; export const defaultApplicationDataSource = { @@ -270,6 +272,50 @@ const supportedAwsServices = [ }, ]; +export const defaultAzureServices = { + name: 'VM', + id: 'azureVm', + imgURL: `/Logos/azure-vm.svg`, +}; + +const supportedAzureServices = [ + { + name: 'VM', + id: 'azureVm', + imgURL: `/Logos/azure-vm.svg`, + }, + { + name: 'App Service', + id: 'azureAppService', + imgURL: `/Logos/azure-app-service.svg`, + }, + { + name: 'AKS', + id: 'azureAks', + imgURL: `/Logos/azure-aks.svg`, + }, + { + name: 'Azure Functions', + id: 'azureFunctions', + imgURL: `/Logos/azure-functions.svg`, + }, + { + name: 'Azure Container Apps', + id: 'azureContainerApps', + imgURL: `/Logos/azure-container-apps.svg`, + }, + { + name: 'SQL Database Metrics', + id: 'azureSQLDatabaseMetrics', + imgURL: `/Logos/azure-sql-database-metrics.svg`, + }, + { + name: 'Azure Blob Storage', + id: 'azureBlobStorage', + imgURL: `/Logos/azure-blob-storage.svg`, + }, +]; + export const getDataSources = (module: ModuleProps): DataSourceType[] => { if (module.id === ModulesMap.APM) { return supportedLanguages; @@ -283,7 +329,11 @@ export const getDataSources = (module: ModuleProps): DataSourceType[] => { return supportedLogsTypes; } - return supportedAwsServices; + if (module.id === ModulesMap.AwsMonitoring) { + return supportedAwsServices; + } + + return supportedAzureServices; }; export const getSupportedFrameworks = ({ @@ -347,4 +397,5 @@ export const moduleRouteMap = { [ModulesMap.InfrastructureMonitoring]: ROUTES.GET_STARTED_INFRASTRUCTURE_MONITORING, [ModulesMap.AwsMonitoring]: ROUTES.GET_STARTED_AWS_MONITORING, + [ModulesMap.AzureMonitoring]: ROUTES.GET_STARTED_AZURE_MONITORING, }; diff --git a/frontend/src/container/OnboardingContainer/utils/getSteps.ts b/frontend/src/container/OnboardingContainer/utils/getSteps.ts index 4ad252fa89..94b2472b02 100644 --- a/frontend/src/container/OnboardingContainer/utils/getSteps.ts +++ b/frontend/src/container/OnboardingContainer/utils/getSteps.ts @@ -22,7 +22,13 @@ import { RestartOtelCollector, RunApplicationStep, SelectMethodStep, + SendHostmetricsLogs, + SendLogs, SendLogsCloudwatch, + SendMetrics, + SendTraces, + SetupAzureEventsHub, + SetupCentralCollectorStep, SetupDaemonService, SetupLogDrains, SetupOtelCollectorStep, @@ -57,6 +63,10 @@ export const INFRASTRUCTURE_MONITORING_STEPS: SelectedModuleStepProps[] = [ export const AWS_MONITORING_STEPS: SelectedModuleStepProps[] = [DataSourceStep]; +export const AZURE_MONITORING_STEPS: SelectedModuleStepProps[] = [ + DataSourceStep, +]; + export const getSteps = ({ selectedDataSource, }: GetStepsProps): SelectedModuleStepProps[] => { @@ -144,6 +154,70 @@ export const getSteps = ({ ]; case 'awsEks': return [DataSourceStep, SetupOtelCollectorStep, MonitorDashboard]; + case 'azureVm': + return [ + DataSourceStep, + SetupAzureEventsHub, + SetupCentralCollectorStep, + SendHostmetricsLogs, + ]; + // eslint-disable-next-line sonarjs/no-duplicated-branches + case 'azureAks': + return [ + DataSourceStep, + SetupAzureEventsHub, + SetupCentralCollectorStep, + SendTraces, + SendLogs, + SendMetrics, + ]; + // eslint-disable-next-line sonarjs/no-duplicated-branches + case 'azureAppService': + return [ + DataSourceStep, + SetupAzureEventsHub, + SetupCentralCollectorStep, + SendTraces, + SendLogs, + SendMetrics, + ]; + // eslint-disable-next-line sonarjs/no-duplicated-branches + case 'azureFunctions': + return [ + DataSourceStep, + SetupAzureEventsHub, + SetupCentralCollectorStep, + SendTraces, + SendLogs, + SendMetrics, + ]; + // eslint-disable-next-line sonarjs/no-duplicated-branches + case 'azureContainerApps': + return [ + DataSourceStep, + SetupAzureEventsHub, + SetupCentralCollectorStep, + SendTraces, + SendLogs, + SendMetrics, + ]; + // eslint-disable-next-line sonarjs/no-duplicated-branches + case 'azureBlobStorage': + return [ + DataSourceStep, + SetupAzureEventsHub, + SetupCentralCollectorStep, + SendLogs, + SendMetrics, + ]; + // eslint-disable-next-line sonarjs/no-duplicated-branches + case 'azureSQLDatabaseMetrics': + return [ + DataSourceStep, + SetupAzureEventsHub, + SetupCentralCollectorStep, + SendMetrics, + ]; default: return [DataSourceStep]; diff --git a/frontend/src/container/SideNav/SideNav.tsx b/frontend/src/container/SideNav/SideNav.tsx index 6cec0448b2..82697d78b0 100644 --- a/frontend/src/container/SideNav/SideNav.tsx +++ b/frontend/src/container/SideNav/SideNav.tsx @@ -152,9 +152,13 @@ function SideNav({ const { t } = useTranslation(''); + const licenseStatus: string = + licenseData?.payload?.licenses?.find((e: License) => e.isCurrent)?.status || + ''; + const isLicenseActive = - licenseData?.payload?.licenses?.find((e: License) => e.isCurrent)?.status === - LICENSE_PLAN_STATUS.VALID; + licenseStatus?.toLocaleLowerCase() === + LICENSE_PLAN_STATUS.VALID.toLocaleLowerCase(); const isEnterprise = licenseData?.payload?.licenses?.some( (license: License) => diff --git a/frontend/src/container/TopNav/DateTimeSelection/config.ts b/frontend/src/container/TopNav/DateTimeSelection/config.ts index 102fe00c43..b46c60bab0 100644 --- a/frontend/src/container/TopNav/DateTimeSelection/config.ts +++ b/frontend/src/container/TopNav/DateTimeSelection/config.ts @@ -112,6 +112,7 @@ export const routesToSkip = [ ROUTES.GET_STARTED_INFRASTRUCTURE_MONITORING, ROUTES.GET_STARTED_LOGS_MANAGEMENT, ROUTES.GET_STARTED_AWS_MONITORING, + ROUTES.GET_STARTED_AZURE_MONITORING, ROUTES.VERSION, ROUTES.ALL_DASHBOARD, ROUTES.ORG_SETTINGS, diff --git a/frontend/src/container/TopNav/DateTimeSelectionV2/config.ts b/frontend/src/container/TopNav/DateTimeSelectionV2/config.ts index 7543e02a47..19a3e8c431 100644 --- a/frontend/src/container/TopNav/DateTimeSelectionV2/config.ts +++ b/frontend/src/container/TopNav/DateTimeSelectionV2/config.ts @@ -181,6 +181,7 @@ export const routesToSkip = [ ROUTES.GET_STARTED_INFRASTRUCTURE_MONITORING, ROUTES.GET_STARTED_LOGS_MANAGEMENT, ROUTES.GET_STARTED_AWS_MONITORING, + ROUTES.GET_STARTED_AZURE_MONITORING, ROUTES.VERSION, ROUTES.ALL_DASHBOARD, ROUTES.ORG_SETTINGS, diff --git a/frontend/src/container/TraceDetail/SelectedSpanDetails/Tags/Tag.tsx b/frontend/src/container/TraceDetail/SelectedSpanDetails/Tags/Tag.tsx index 293b6fc993..f913dd6cbb 100644 --- a/frontend/src/container/TraceDetail/SelectedSpanDetails/Tags/Tag.tsx +++ b/frontend/src/container/TraceDetail/SelectedSpanDetails/Tags/Tag.tsx @@ -1,3 +1,5 @@ +import './Tags.styles.scss'; + import { Tooltip } from 'antd'; import { useIsDarkMode } from 'hooks/useDarkMode'; import { Fragment, useMemo } from 'react'; @@ -26,7 +28,12 @@ function Tag({ tags, onToggleHandler, setText }: TagProps): JSX.Element { {tags.key} - value}> + ( (state) => state.globalTime, ); + const { + tree, + firstSpanStartTime, + traceStartTime = minTime, + traceEndTime = maxTime, + } = props; + const { id: traceId } = useParams(); const isDarkMode = useIsDarkMode(); @@ -74,7 +79,7 @@ function SelectedSpanDetails(props: SelectedSpanDetailsProps): JSX.Element { ]; const onLogsHandler = (): void => { - const query = getTraceToLogsQuery(traceId, minTime, maxTime); + const query = getTraceToLogsQuery(traceId, traceStartTime, traceEndTime); history.push( `${ROUTES.LOGS_EXPLORER}?${createQueryParams({ @@ -140,10 +145,14 @@ function SelectedSpanDetails(props: SelectedSpanDetailsProps): JSX.Element { interface SelectedSpanDetailsProps { tree?: ITraceTree; firstSpanStartTime: number; + traceStartTime?: number; + traceEndTime?: number; } SelectedSpanDetails.defaultProps = { tree: undefined, + traceStartTime: undefined, + traceEndTime: undefined, }; export interface ModalText { diff --git a/frontend/src/container/TraceDetail/index.tsx b/frontend/src/container/TraceDetail/index.tsx index 4b333e0dad..568ed3c4f4 100644 --- a/frontend/src/container/TraceDetail/index.tsx +++ b/frontend/src/container/TraceDetail/index.tsx @@ -48,6 +48,12 @@ function TraceDetail({ response }: TraceDetailProps): JSX.Element { [response], ); + const traceStartTime = useMemo(() => response[0].startTimestampMillis, [ + response, + ]); + + const traceEndTime = useMemo(() => response[0].endTimestampMillis, [response]); + const urlQuery = useUrlQuery(); const [spanId] = useState(urlQuery.get('spanId')); @@ -260,6 +266,8 @@ function TraceDetail({ response }: TraceDetailProps): JSX.Element { = 0; i--) { const { values } = series[i]; for (let j = 0; j < values.length; j++) { @@ -84,6 +88,9 @@ function getStackedSeries(apiResponse: QueryData[]): QueryData[] { */ function getStackedSeriesQueryFormat(apiResponse: QueryData[]): QueryData[] { const series = cloneDeep(apiResponse); + if (!series) { + return apiResponse; + } for (let i = series.length - 2; i >= 0; i--) { const { values } = series[i]; @@ -102,9 +109,12 @@ function getStackedSeriesQueryFormat(apiResponse: QueryData[]): QueryData[] { function getStackedSeriesYAxis(apiResponse: QueryDataV3[]): QueryDataV3[] { const series = cloneDeep(apiResponse); + if (!series) { + return apiResponse; + } for (let i = 0; i < series.length; i++) { - series[i].series = getStackedSeriesQueryFormat(series[i].series); + series[i].series = getStackedSeriesQueryFormat(series[i].series || []); } return series; diff --git a/frontend/src/pages/TracesExplorer/Filter/SectionContent.tsx b/frontend/src/pages/TracesExplorer/Filter/SectionContent.tsx index 05fc7bb1f3..4cefaaeca0 100644 --- a/frontend/src/pages/TracesExplorer/Filter/SectionContent.tsx +++ b/frontend/src/pages/TracesExplorer/Filter/SectionContent.tsx @@ -4,7 +4,7 @@ import { Button, Card, Checkbox, Input, Tooltip } from 'antd'; import { CheckboxChangeEvent } from 'antd/es/checkbox'; import { ParaGraph } from 'container/Trace/Filters/Panel/PanelBody/Common/styles'; import useDebouncedFn from 'hooks/useDebouncedFunction'; -import { defaultTo, isEmpty } from 'lodash-es'; +import { isArray, isEmpty } from 'lodash-es'; import { ChangeEvent, Dispatch, @@ -17,6 +17,7 @@ import { import { addFilter, AllTraceFilterKeys, + convertToStringArr, FilterType, HandleRunProps, removeFilter, @@ -37,15 +38,14 @@ export function SectionBody(props: SectionBodyProps): JSX.Element { const [searchFilter, setSearchFilter] = useState(''); const [searchText, setSearchText] = useState(''); const [checkedItems, setCheckedItems] = useState( - defaultTo(selectedFilters?.[type]?.values as string[], []), + convertToStringArr(selectedFilters?.[type]?.values), ); const [results, setResults] = useState([]); const [isFetching, setFetching] = useState(false); useEffect( - () => - setCheckedItems(defaultTo(selectedFilters?.[type]?.values as string[], [])), + () => setCheckedItems(convertToStringArr(selectedFilters?.[type]?.values)), [selectedFilters, type], ); @@ -92,17 +92,21 @@ export function SectionBody(props: SectionBodyProps): JSX.Element { if (checked) { addFilter(type, newValue, setSelectedFilters, keys); setCheckedItems((prev) => { - if (!prev.includes(newValue)) { - prev.push(newValue); + const arr = prev || []; + if (isArray(arr) && !arr.includes(newValue)) { + arr.push(newValue); } - return prev; + return convertToStringArr(arr); }); } else if (checkedItems.length === 1) { handleRun({ clearByType: type }); setCheckedItems([]); } else { removeFilter(type, newValue, setSelectedFilters, keys); - setCheckedItems((prev) => prev.filter((item) => item !== newValue)); + setCheckedItems((prev) => { + const prevValue = convertToStringArr(prev); + return prevValue.filter((item) => item !== newValue); + }); } }; diff --git a/frontend/src/pages/TracesExplorer/Filter/filterUtils.ts b/frontend/src/pages/TracesExplorer/Filter/filterUtils.ts index 86b52fdbb0..88f604a0dc 100644 --- a/frontend/src/pages/TracesExplorer/Filter/filterUtils.ts +++ b/frontend/src/pages/TracesExplorer/Filter/filterUtils.ts @@ -1,6 +1,5 @@ /* eslint-disable react-hooks/exhaustive-deps */ import { getAttributesValues } from 'api/queryBuilder/getAttributesValues'; -import { isArray } from 'lodash-es'; import { Dispatch, SetStateAction, useEffect, useState } from 'react'; import { BaseAutocompleteData, @@ -41,6 +40,18 @@ export type FilterType = Record< { values: string[] | string; keys: BaseAutocompleteData } >; +export function convertToStringArr( + value: string | string[] | undefined, +): string[] { + if (value) { + if (typeof value === 'string') { + return [value]; + } + return value; + } + return []; +} + export const addFilter = ( filterType: AllTraceFilterKeys, value: string, @@ -62,28 +73,36 @@ export const addFilter = ( 'durationNano', ].includes(filterType); + // Convert value to string array + const valueArray = convertToStringArr(value); + // If previous filters are undefined, initialize them if (!prevFilters) { return ({ - [filterType]: { values: isDuration ? value : [value], keys }, + [filterType]: { values: isDuration ? value : valueArray, keys }, } as unknown) as FilterType; } + // If the filter type doesn't exist, initialize it if (!prevFilters[filterType]?.values.length) { return { ...prevFilters, - [filterType]: { values: isDuration ? value : [value], keys }, + [filterType]: { values: isDuration ? value : valueArray, keys }, }; } + // If the value already exists, don't add it again - if (prevFilters[filterType].values.includes(value)) { + if (convertToStringArr(prevFilters[filterType].values).includes(value)) { return prevFilters; } + // Otherwise, add the value to the existing array return { ...prevFilters, [filterType]: { - values: isDuration ? value : [...prevFilters[filterType].values, value], + values: isDuration + ? value + : [...convertToStringArr(prevFilters[filterType].values), value], keys, }, }; @@ -110,10 +129,8 @@ export const removeFilter = ( return prevFilters; } - const prevValue = prevFilters[filterType]?.values; - const updatedValues = !isArray(prevValue) - ? prevValue - : prevValue?.filter((item: any) => item !== value); + const prevValue = convertToStringArr(prevFilters[filterType]?.values); + const updatedValues = prevValue.filter((item: any) => item !== value); if (updatedValues.length === 0) { const { [filterType]: item, ...remainingFilters } = prevFilters; diff --git a/frontend/src/types/api/trace/getTraceItem.ts b/frontend/src/types/api/trace/getTraceItem.ts index 4a6def3896..5f94e0f6d8 100644 --- a/frontend/src/types/api/trace/getTraceItem.ts +++ b/frontend/src/types/api/trace/getTraceItem.ts @@ -15,6 +15,8 @@ export interface PayloadProps { segmentID: string; columns: string[]; isSubTree: boolean; + startTimestampMillis: number; + endTimestampMillis: number; }; } diff --git a/frontend/src/utils/permission/index.ts b/frontend/src/utils/permission/index.ts index 44757e3508..8af1c68f3f 100644 --- a/frontend/src/utils/permission/index.ts +++ b/frontend/src/utils/permission/index.ts @@ -86,6 +86,7 @@ export const routePermission: Record = { GET_STARTED_INFRASTRUCTURE_MONITORING: ['ADMIN', 'EDITOR', 'VIEWER'], GET_STARTED_LOGS_MANAGEMENT: ['ADMIN', 'EDITOR', 'VIEWER'], GET_STARTED_AWS_MONITORING: ['ADMIN', 'EDITOR', 'VIEWER'], + GET_STARTED_AZURE_MONITORING: ['ADMIN', 'EDITOR', 'VIEWER'], WORKSPACE_LOCKED: ['ADMIN', 'EDITOR', 'VIEWER'], BILLING: ['ADMIN', 'EDITOR', 'VIEWER'], SUPPORT: ['ADMIN', 'EDITOR', 'VIEWER'], diff --git a/frontend/webpack.config.js b/frontend/webpack.config.js index 65883594bb..2e5c0d0f4e 100644 --- a/frontend/webpack.config.js +++ b/frontend/webpack.config.js @@ -22,6 +22,7 @@ const plugins = [ template: 'src/index.html.ejs', INTERCOM_APP_ID: process.env.INTERCOM_APP_ID, SEGMENT_ID: process.env.SEGMENT_ID, + POSTHOG_KEY: process.env.POSTHOG_KEY, CLARITY_PROJECT_ID: process.env.CLARITY_PROJECT_ID, SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN, SENTRY_ORG: process.env.SENTRY_ORG, @@ -39,6 +40,7 @@ const plugins = [ FRONTEND_API_ENDPOINT: process.env.FRONTEND_API_ENDPOINT, INTERCOM_APP_ID: process.env.INTERCOM_APP_ID, SEGMENT_ID: process.env.SEGMENT_ID, + POSTHOG_KEY: process.env.POSTHOG_KEY, CLARITY_PROJECT_ID: process.env.CLARITY_PROJECT_ID, SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN, SENTRY_ORG: process.env.SENTRY_ORG, diff --git a/frontend/webpack.config.prod.js b/frontend/webpack.config.prod.js index 9b17d345c9..87ef8b7143 100644 --- a/frontend/webpack.config.prod.js +++ b/frontend/webpack.config.prod.js @@ -27,6 +27,7 @@ const plugins = [ template: 'src/index.html.ejs', INTERCOM_APP_ID: process.env.INTERCOM_APP_ID, SEGMENT_ID: process.env.SEGMENT_ID, + POSTHOG_KEY: process.env.POSTHOG_KEY, CLARITY_PROJECT_ID: process.env.CLARITY_PROJECT_ID, SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN, SENTRY_ORG: process.env.SENTRY_ORG, @@ -49,6 +50,7 @@ const plugins = [ FRONTEND_API_ENDPOINT: process.env.FRONTEND_API_ENDPOINT, INTERCOM_APP_ID: process.env.INTERCOM_APP_ID, SEGMENT_ID: process.env.SEGMENT_ID, + POSTHOG_KEY: process.env.POSTHOG_KEY, CLARITY_PROJECT_ID: process.env.CLARITY_PROJECT_ID, SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN, SENTRY_ORG: process.env.SENTRY_ORG, diff --git a/frontend/yarn.lock b/frontend/yarn.lock index c717a16507..295ae66012 100644 --- a/frontend/yarn.lock +++ b/frontend/yarn.lock @@ -8776,6 +8776,11 @@ fb-watchman@^2.0.0: dependencies: bser "2.1.1" +fflate@^0.4.8: + version "0.4.8" + resolved "https://registry.yarnpkg.com/fflate/-/fflate-0.4.8.tgz#f90b82aefbd8ac174213abb338bd7ef848f0f5ae" + integrity sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA== + figures@^3.0.0: version "3.2.0" resolved "https://registry.yarnpkg.com/figures/-/figures-3.2.0.tgz#625c18bd293c604dc4a8ddb2febf0c88341746af" @@ -13700,6 +13705,19 @@ postcss@8.4.38, postcss@^8.0.0, postcss@^8.1.1, postcss@^8.3.7, postcss@^8.4.21, picocolors "^1.0.0" source-map-js "^1.2.0" +posthog-js@1.140.1: + version "1.140.1" + resolved "https://registry.yarnpkg.com/posthog-js/-/posthog-js-1.140.1.tgz#34efc0d326fa5fcf7950106f350fb4f0e73b2da6" + integrity sha512-UeKuAtQSvbzmTCzNVaauku8F194EYwAP33WrRrWZlDlMNbMy7GKcZOgKbr7jZqnha7FlVlHrWk+Rpyr1zCFhPQ== + dependencies: + fflate "^0.4.8" + preact "^10.19.3" + +preact@^10.19.3: + version "10.22.0" + resolved "https://registry.yarnpkg.com/preact/-/preact-10.22.0.tgz#a50f38006ae438d255e2631cbdaf7488e6dd4e16" + integrity sha512-RRurnSjJPj4rp5K6XoP45Ui33ncb7e4H7WiOHVpjbkvqvA3U+N8Z6Qbo0AE6leGYBV66n8EhEaFixvIu3SkxFw== + prelude-ls@^1.2.1: version "1.2.1" resolved "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz" diff --git a/go.mod b/go.mod index 4a52cead08..fbaea1ba45 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,6 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.9.1 - github.com/posthog/posthog-go v0.0.0-20220817142604-0b0bbf0f9c0f github.com/prometheus/common v0.54.0 github.com/prometheus/prometheus v2.5.0+incompatible github.com/rs/cors v1.11.0 diff --git a/go.sum b/go.sum index cb3f0f902e..04162eb8bd 100644 --- a/go.sum +++ b/go.sum @@ -137,7 +137,6 @@ github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoE github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -625,8 +624,6 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/posthog/posthog-go v0.0.0-20220817142604-0b0bbf0f9c0f h1:h0p1aZ9F5d6IXOygysob3g4B07b+HuVUQC0VJKD8wA4= -github.com/posthog/posthog-go v0.0.0-20220817142604-0b0bbf0f9c0f/go.mod h1:oa2sAs9tGai3VldabTV0eWejt/O4/OOD7azP8GaikqU= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= @@ -673,7 +670,6 @@ github.com/russellhaering/gosaml2 v0.9.0 h1:CNMnH42z/GirrKjdmNrSS6bAAs47F9bPdl4P github.com/russellhaering/gosaml2 v0.9.0/go.mod h1:byViER/1YPUa0Puj9ROZblpoq2jsE7h/CJmitzX0geU= github.com/russellhaering/goxmldsig v1.2.0 h1:Y6GTTc9Un5hCxSzVz4UIWQ/zuVwDvzJk80guqzwx6Vg= github.com/russellhaering/goxmldsig v1.2.0/go.mod h1:gM4MDENBQf7M+V824SGfyIUVFWydB7n0KkEubVJl+Tw= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -697,7 +693,6 @@ github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= @@ -747,7 +742,6 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= diff --git a/pkg/query-service/app/clickhouseReader/reader.go b/pkg/query-service/app/clickhouseReader/reader.go index 320f7fe5b3..20eb11d479 100644 --- a/pkg/query-service/app/clickhouseReader/reader.go +++ b/pkg/query-service/app/clickhouseReader/reader.go @@ -1924,6 +1924,7 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, params *model.Searc telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_TRACE_DETAIL_API, data, userEmail, true, false) } + var startTime, endTime, durationNano uint64 var searchScanResponses []model.SearchSpanDBResponseItem query := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable) @@ -1954,6 +1955,15 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, params *model.Searc easyjson.Unmarshal([]byte(item.Model), &jsonItem) jsonItem.TimeUnixNano = uint64(item.Timestamp.UnixNano() / 1000000) searchSpanResponses = append(searchSpanResponses, jsonItem) + if startTime == 0 || jsonItem.TimeUnixNano < startTime { + startTime = jsonItem.TimeUnixNano + } + if endTime == 0 || jsonItem.TimeUnixNano > endTime { + endTime = jsonItem.TimeUnixNano + } + if durationNano == 0 || uint64(jsonItem.DurationNano) > durationNano { + durationNano = uint64(jsonItem.DurationNano) + } } end = time.Now() zap.L().Debug("getTraceSQLQuery unmarshal took: ", zap.Duration("duration", end.Sub(start))) @@ -1983,6 +1993,9 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, params *model.Searc } } + searchSpansResult[0].StartTimestampMillis = startTime - (durationNano/1000000) + searchSpansResult[0].EndTimestampMillis = endTime + (durationNano/1000000) + return &searchSpansResult, nil } @@ -3219,7 +3232,7 @@ func (r *ClickHouseReader) GetSamplesInfoInLastHeartBeatInterval(ctx context.Con var totalSamples uint64 - queryStr := fmt.Sprintf("select count() from %s.%s where metric_name not like 'signoz_%%' and timestamp_ms > toUnixTimestamp(now()-toIntervalMinute(%d))*1000;", signozMetricDBName, signozSampleTableName, int(interval.Minutes())) + queryStr := fmt.Sprintf("select count() from %s.%s where metric_name not like 'signoz_%%' and unix_milli > toUnixTimestamp(now()-toIntervalMinute(%d))*1000;", signozMetricDBName, signozSampleTableName, int(interval.Minutes())) r.db.QueryRow(ctx, queryStr).Scan(&totalSamples) @@ -4431,6 +4444,21 @@ func readRow(vars []interface{}, columnNames []string, countOfNumberCols int) ([ } groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Float()) } + case **float64, **float32: + val := reflect.ValueOf(v) + if val.IsValid() && !val.IsNil() && !val.Elem().IsNil() { + isValidPoint = true + value := reflect.ValueOf(v).Elem().Elem().Float() + if _, ok := constants.ReservedColumnTargetAliases[colName]; ok || countOfNumberCols == 1 { + point.Value = value + } else { + groupBy = append(groupBy, fmt.Sprintf("%v", value)) + if _, ok := groupAttributes[colName]; !ok { + groupAttributesArray = append(groupAttributesArray, map[string]string{colName: fmt.Sprintf("%v", value)}) + } + groupAttributes[colName] = fmt.Sprintf("%v", value) + } + } case *uint, *uint8, *uint64, *uint16, *uint32: isValidPoint = true if _, ok := constants.ReservedColumnTargetAliases[colName]; ok || countOfNumberCols == 1 { @@ -4442,6 +4470,21 @@ func readRow(vars []interface{}, columnNames []string, countOfNumberCols int) ([ } groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Uint()) } + case **uint, **uint8, **uint64, **uint16, **uint32: + val := reflect.ValueOf(v) + if val.IsValid() && !val.IsNil() && !val.Elem().IsNil() { + isValidPoint = true + value := reflect.ValueOf(v).Elem().Elem().Uint() + if _, ok := constants.ReservedColumnTargetAliases[colName]; ok || countOfNumberCols == 1 { + point.Value = float64(value) + } else { + groupBy = append(groupBy, fmt.Sprintf("%v", value)) + if _, ok := groupAttributes[colName]; !ok { + groupAttributesArray = append(groupAttributesArray, map[string]string{colName: fmt.Sprintf("%v", value)}) + } + groupAttributes[colName] = fmt.Sprintf("%v", value) + } + } case *int, *int8, *int16, *int32, *int64: isValidPoint = true if _, ok := constants.ReservedColumnTargetAliases[colName]; ok || countOfNumberCols == 1 { @@ -4453,6 +4496,21 @@ func readRow(vars []interface{}, columnNames []string, countOfNumberCols int) ([ } groupAttributes[colName] = fmt.Sprintf("%v", reflect.ValueOf(v).Elem().Int()) } + case **int, **int8, **int16, **int32, **int64: + val := reflect.ValueOf(v) + if val.IsValid() && !val.IsNil() && !val.Elem().IsNil() { + isValidPoint = true + value := reflect.ValueOf(v).Elem().Elem().Int() + if _, ok := constants.ReservedColumnTargetAliases[colName]; ok || countOfNumberCols == 1 { + point.Value = float64(value) + } else { + groupBy = append(groupBy, fmt.Sprintf("%v", value)) + if _, ok := groupAttributes[colName]; !ok { + groupAttributesArray = append(groupAttributesArray, map[string]string{colName: fmt.Sprintf("%v", value)}) + } + groupAttributes[colName] = fmt.Sprintf("%v", value) + } + } case *bool: groupBy = append(groupBy, fmt.Sprintf("%v", *v)) if _, ok := groupAttributes[colName]; !ok { diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index 1f0769bb08..d6c91558a5 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -76,7 +76,6 @@ type APIHandler struct { querier interfaces.Querier querierV2 interfaces.Querier queryBuilder *queryBuilder.QueryBuilder - preferDelta bool preferSpanMetrics bool // temporalityMap is a map of metric name to temporality @@ -106,7 +105,6 @@ type APIHandlerOpts struct { SkipConfig *model.SkipConfig - PerferDelta bool PreferSpanMetrics bool MaxIdleConns int @@ -166,7 +164,6 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) { reader: opts.Reader, appDao: opts.AppDao, skipConfig: opts.SkipConfig, - preferDelta: opts.PerferDelta, preferSpanMetrics: opts.PreferSpanMetrics, temporalityMap: make(map[string]map[v3.Temporality]bool), maxIdleConns: opts.MaxIdleConns, @@ -3016,6 +3013,7 @@ func (aH *APIHandler) QueryRangeV3Format(w http.ResponseWriter, r *http.Request) RespondError(w, apiErrorObj, nil) return } + queryRangeParams.Version = "v3" aH.Respond(w, queryRangeParams) } @@ -3070,6 +3068,14 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que postprocess.FillGaps(result, queryRangeParams) } + if queryRangeParams.CompositeQuery.PanelType == v3.PanelTypeTable && queryRangeParams.FormatForWeb { + if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeClickHouseSQL { + result = postprocess.TransformToTableForClickHouseQueries(result) + } else if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder { + result = postprocess.TransformToTableForBuilderQueries(result, queryRangeParams) + } + } + resp := v3.QueryRangeResponse{ Result: result, } @@ -3318,8 +3324,10 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que } if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder { - result, err = postprocess.PostProcessResult(result, queryRangeParams) + } else if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeClickHouseSQL && + queryRangeParams.CompositeQuery.PanelType == v3.PanelTypeTable && queryRangeParams.FormatForWeb { + result = postprocess.TransformToTableForClickHouseQueries(result) } if err != nil { @@ -3343,6 +3351,7 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) { RespondError(w, apiErrorObj, nil) return } + queryRangeParams.Version = "v4" // add temporality for each metric temporalityErr := aH.populateTemporality(r.Context(), queryRangeParams) diff --git a/pkg/query-service/app/server.go b/pkg/query-service/app/server.go index 92b879fcc9..2260045f4d 100644 --- a/pkg/query-service/app/server.go +++ b/pkg/query-service/app/server.go @@ -55,7 +55,6 @@ type ServerOptions struct { // alert specific params DisableRules bool RuleRepoURL string - PreferDelta bool PreferSpanMetrics bool MaxIdleConns int MaxOpenConns int @@ -172,7 +171,6 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) { apiHandler, err := NewAPIHandler(APIHandlerOpts{ Reader: reader, SkipConfig: skipConfig, - PerferDelta: serverOptions.PreferDelta, PreferSpanMetrics: serverOptions.PreferSpanMetrics, MaxIdleConns: serverOptions.MaxIdleConns, MaxOpenConns: serverOptions.MaxOpenConns, diff --git a/pkg/query-service/main.go b/pkg/query-service/main.go index 793ce25bf2..3063e07b12 100644 --- a/pkg/query-service/main.go +++ b/pkg/query-service/main.go @@ -37,7 +37,6 @@ func main() { var ruleRepoURL, cacheConfigPath, fluxInterval string var cluster string - var preferDelta bool var preferSpanMetrics bool var maxIdleConns int @@ -47,11 +46,10 @@ func main() { flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)") flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)") flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)") - flag.BoolVar(&preferDelta, "prefer-delta", false, "(prefer delta over cumulative metrics)") flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)") flag.StringVar(&ruleRepoURL, "rules.repo-url", constants.AlertHelpPage, "(host address used to build rule link in alert messages)") flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)") - flag.StringVar(&fluxInterval, "flux-interval", "5m", "(cache config to use)") + flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)") flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')") // Allow using the consistent naming with the signoz collector flag.StringVar(&cluster, "cluster-name", "cluster", "(cluster name - defaults to 'cluster')") @@ -71,7 +69,6 @@ func main() { HTTPHostPort: constants.HTTPHostPort, PromConfigPath: promConfigPath, SkipTopLvlOpsPath: skipTopLvlOpsPath, - PreferDelta: preferDelta, PreferSpanMetrics: preferSpanMetrics, PrivateHostPort: constants.PrivateHostPort, DisableRules: disableRules, diff --git a/pkg/query-service/model/response.go b/pkg/query-service/model/response.go index 7a3d948ebb..5ad5ea54ef 100644 --- a/pkg/query-service/model/response.go +++ b/pkg/query-service/model/response.go @@ -212,9 +212,11 @@ type ServiceOverviewItem struct { } type SearchSpansResult struct { - Columns []string `json:"columns"` - Events [][]interface{} `json:"events"` - IsSubTree bool `json:"isSubTree"` + StartTimestampMillis uint64 `json:"startTimestampMillis"` + EndTimestampMillis uint64 `json:"endTimestampMillis"` + Columns []string `json:"columns"` + Events [][]interface{} `json:"events"` + IsSubTree bool `json:"isSubTree"` } type GetFilterSpansResponseItem struct { diff --git a/pkg/query-service/model/v3/v3.go b/pkg/query-service/model/v3/v3.go index 2a12c8e1fa..7e6daa3751 100644 --- a/pkg/query-service/model/v3/v3.go +++ b/pkg/query-service/model/v3/v3.go @@ -354,6 +354,8 @@ type QueryRangeParamsV3 struct { CompositeQuery *CompositeQuery `json:"compositeQuery"` Variables map[string]interface{} `json:"variables,omitempty"` NoCache bool `json:"noCache"` + Version string `json:"-"` + FormatForWeb bool `json:"formatForWeb,omitempty"` } type PromQuery struct { @@ -986,10 +988,24 @@ type QueryRangeResponse struct { Result []*Result `json:"result"` } +type TableColumn struct { + Name string `json:"name"` +} + +type TableRow struct { + Data []interface{} `json:"data"` +} + +type Table struct { + Columns []*TableColumn `json:"columns"` + Rows []*TableRow `json:"rows"` +} + type Result struct { - QueryName string `json:"queryName"` - Series []*Series `json:"series"` - List []*Row `json:"list"` + QueryName string `json:"queryName,omitempty"` + Series []*Series `json:"series,omitempty"` + List []*Row `json:"list,omitempty"` + Table *Table `json:"table,omitempty"` } type LogsLiveTailClient struct { diff --git a/pkg/query-service/postprocess/process.go b/pkg/query-service/postprocess/process.go index fc35b404de..1f9ace33eb 100644 --- a/pkg/query-service/postprocess/process.go +++ b/pkg/query-service/postprocess/process.go @@ -86,6 +86,13 @@ func PostProcessResult(result []*v3.Result, queryRangeParams *v3.QueryRangeParam if queryRangeParams.CompositeQuery.FillGaps { FillGaps(result, queryRangeParams) } + + if queryRangeParams.FormatForWeb && + queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder && + queryRangeParams.CompositeQuery.PanelType == v3.PanelTypeTable { + result = TransformToTableForBuilderQueries(result, queryRangeParams) + } + return result, nil } diff --git a/pkg/query-service/postprocess/table.go b/pkg/query-service/postprocess/table.go new file mode 100644 index 0000000000..1599bf37be --- /dev/null +++ b/pkg/query-service/postprocess/table.go @@ -0,0 +1,299 @@ +package postprocess + +import ( + "fmt" + "sort" + "strings" + + "go.signoz.io/signoz/pkg/query-service/constants" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" +) + +func getAutoColNameForQuery(queryName string, params *v3.QueryRangeParamsV3) string { + q := params.CompositeQuery.BuilderQueries[queryName] + if q.DataSource == v3.DataSourceTraces || q.DataSource == v3.DataSourceLogs { + if q.AggregateAttribute.Key != "" { + return fmt.Sprintf("%s(%s)", q.AggregateOperator, q.AggregateAttribute.Key) + } + return string(q.AggregateOperator) + } else if q.DataSource == v3.DataSourceMetrics { + if q.SpaceAggregation != "" && params.Version == "v4" { + return fmt.Sprintf("%s(%s)", q.SpaceAggregation, q.AggregateAttribute.Key) + } + return fmt.Sprintf("%s(%s)", q.AggregateOperator, q.AggregateAttribute.Key) + } + return queryName +} + +func TransformToTableForBuilderQueries(results []*v3.Result, params *v3.QueryRangeParamsV3) []*v3.Result { + if len(results) == 0 { + return []*v3.Result{} + } + + // Sort results by QueryName + sort.Slice(results, func(i, j int) bool { + return results[i].QueryName < results[j].QueryName + }) + + // Create a map to store all unique labels + seen := make(map[string]struct{}) + labelKeys := []string{} + for _, result := range results { + for _, series := range result.Series { + for _, labels := range series.LabelsArray { + for key := range labels { + if _, ok := seen[key]; !ok { + seen[key] = struct{}{} + labelKeys = append(labelKeys, key) + } + } + } + } + } + + // Create columns + // There will be one column for each label key and one column for each query name + columns := make([]*v3.TableColumn, 0, len(labelKeys)+len(results)) + for _, key := range labelKeys { + columns = append(columns, &v3.TableColumn{Name: key}) + } + for _, result := range results { + columns = append(columns, &v3.TableColumn{Name: result.QueryName}) + } + + // Create a map to store unique rows + rowMap := make(map[string]*v3.TableRow) + + for _, result := range results { + for _, series := range result.Series { + if len(series.Points) == 0 { + continue + } + + // Create a key for the row based on labels + var keyParts []string + rowData := make([]interface{}, len(columns)) + for i, key := range labelKeys { + value := "n/a" + for _, labels := range series.LabelsArray { + if v, ok := labels[key]; ok { + value = v + break + } + } + keyParts = append(keyParts, fmt.Sprintf("%s=%s", key, value)) + rowData[i] = value + } + rowKey := strings.Join(keyParts, ",") + + // Get or create the row + row, ok := rowMap[rowKey] + if !ok { + row = &v3.TableRow{Data: rowData} + rowMap[rowKey] = row + } + + // Add the value for this query + for i, col := range columns { + if col.Name == result.QueryName { + row.Data[i] = series.Points[0].Value + break + } + } + } + } + + // Convert rowMap to a slice of TableRows + rows := make([]*v3.TableRow, 0, len(rowMap)) + for _, row := range rowMap { + for i, value := range row.Data { + if value == nil { + row.Data[i] = "n/a" + } + } + rows = append(rows, row) + } + + // Get sorted query names + queryNames := make([]string, 0, len(params.CompositeQuery.BuilderQueries)) + for queryName := range params.CompositeQuery.BuilderQueries { + queryNames = append(queryNames, queryName) + } + sort.Strings(queryNames) + + // Sort rows based on OrderBy from BuilderQueries + sortRows(rows, columns, params.CompositeQuery.BuilderQueries, queryNames) + + for _, column := range columns { + if _, exists := params.CompositeQuery.BuilderQueries[column.Name]; exists { + column.Name = getAutoColNameForQuery(column.Name, params) + } + } + + // Create the final result + tableResult := v3.Result{ + Table: &v3.Table{ + Columns: columns, + Rows: rows, + }, + } + + return []*v3.Result{&tableResult} +} + +func sortRows(rows []*v3.TableRow, columns []*v3.TableColumn, builderQueries map[string]*v3.BuilderQuery, queryNames []string) { + sort.SliceStable(rows, func(i, j int) bool { + for _, queryName := range queryNames { + query := builderQueries[queryName] + orderByList := query.OrderBy + if len(orderByList) == 0 { + // If no orderBy is specified, sort by value in descending order + orderByList = []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "desc"}} + } + for _, orderBy := range orderByList { + name := orderBy.ColumnName + if name == constants.SigNozOrderByValue { + name = queryName + } + colIndex := -1 + for k, col := range columns { + if col.Name == name { + colIndex = k + break + } + } + if colIndex == -1 { + continue + } + + valI := rows[i].Data[colIndex] + valJ := rows[j].Data[colIndex] + + // Handle "n/a" values + if valI == "n/a" && valJ == "n/a" { + continue + } + + // Compare based on the data type + switch v := valI.(type) { + case float64: + switch w := valJ.(type) { + case float64: + if v != w { + return (v < w) == (orderBy.Order == "asc") + } + default: + // For any other type, sort float64 first + return orderBy.Order == "asc" + } + case string: + switch w := valJ.(type) { + case float64: + // If types are different, sort numbers before strings + return orderBy.Order != "asc" + case string: + if v != w { + return (v < w) == (orderBy.Order == "asc") + } + default: + // For any other type, sort strings before bools + return orderBy.Order == "asc" + } + case bool: + switch w := valJ.(type) { + case float64, string: + // If types are different, sort bools after numbers and strings + return orderBy.Order != "asc" + case bool: + if v != w { + return (!v && w) == (orderBy.Order == "asc") + } + } + } + } + } + return false + }) +} + +func TransformToTableForClickHouseQueries(results []*v3.Result) []*v3.Result { + if len(results) == 0 { + return []*v3.Result{} + } + + // Sort results by QueryName + sort.Slice(results, func(i, j int) bool { + return results[i].QueryName < results[j].QueryName + }) + + // Create a map to store all unique labels + seen := make(map[string]struct{}) + labelKeys := []string{} + for _, result := range results { + for _, series := range result.Series { + for _, labels := range series.LabelsArray { + for key := range labels { + if _, ok := seen[key]; !ok { + seen[key] = struct{}{} + labelKeys = append(labelKeys, key) + } + } + } + } + } + + // Create columns + // Why don't we have a column for each query name? + // Because we don't know if the query is an aggregation query or a non-aggregation query + // So we create a column for each query name that has at least one point + columns := make([]*v3.TableColumn, 0) + for _, key := range labelKeys { + columns = append(columns, &v3.TableColumn{Name: key}) + } + for _, result := range results { + if len(result.Series) > 0 && len(result.Series[0].Points) > 0 { + columns = append(columns, &v3.TableColumn{Name: result.QueryName}) + } + } + + rows := make([]*v3.TableRow, 0) + for _, result := range results { + for _, series := range result.Series { + + // Create a key for the row based on labels + rowData := make([]interface{}, len(columns)) + for i, key := range labelKeys { + value := "n/a" + for _, labels := range series.LabelsArray { + if v, ok := labels[key]; ok { + value = v + break + } + } + rowData[i] = value + } + + // Get or create the row + row := &v3.TableRow{Data: rowData} + + // Add the value for this query + for i, col := range columns { + if col.Name == result.QueryName && len(series.Points) > 0 { + row.Data[i] = series.Points[0].Value + break + } + } + rows = append(rows, row) + } + } + + // Create the final result + tableResult := v3.Result{ + Table: &v3.Table{ + Columns: columns, + Rows: rows, + }, + } + + return []*v3.Result{&tableResult} +} diff --git a/pkg/query-service/postprocess/table_test.go b/pkg/query-service/postprocess/table_test.go new file mode 100644 index 0000000000..6e8f588a5f --- /dev/null +++ b/pkg/query-service/postprocess/table_test.go @@ -0,0 +1,668 @@ +package postprocess + +import ( + "encoding/json" + "reflect" + "testing" + + "go.signoz.io/signoz/pkg/query-service/constants" + v3 "go.signoz.io/signoz/pkg/query-service/model/v3" +) + +func TestSortRows(t *testing.T) { + tests := []struct { + name string + rows []*v3.TableRow + columns []*v3.TableColumn + builderQueries map[string]*v3.BuilderQuery + queryNames []string + expected []*v3.TableRow + }{ + { + name: "Sort by single numeric query, ascending order", + rows: []*v3.TableRow{ + {Data: []interface{}{"service2", 20.0}}, + {Data: []interface{}{"service1", 10.0}}, + {Data: []interface{}{"service3", 30.0}}, + }, + columns: []*v3.TableColumn{ + {Name: "service_name"}, + {Name: "A"}, + }, + builderQueries: map[string]*v3.BuilderQuery{ + "A": {OrderBy: []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "asc"}}}, + }, + queryNames: []string{"A"}, + expected: []*v3.TableRow{ + {Data: []interface{}{"service1", 10.0}}, + {Data: []interface{}{"service2", 20.0}}, + {Data: []interface{}{"service3", 30.0}}, + }, + }, + { + name: "Sort by single numeric query, descending order", + rows: []*v3.TableRow{ + {Data: []interface{}{"service2", 20.0}}, + {Data: []interface{}{"service1", 10.0}}, + {Data: []interface{}{"service3", 30.0}}, + }, + columns: []*v3.TableColumn{ + {Name: "service_name"}, + {Name: "A"}, + }, + builderQueries: map[string]*v3.BuilderQuery{ + "A": {OrderBy: []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "desc"}}}, + }, + queryNames: []string{"A"}, + expected: []*v3.TableRow{ + {Data: []interface{}{"service3", 30.0}}, + {Data: []interface{}{"service2", 20.0}}, + {Data: []interface{}{"service1", 10.0}}, + }, + }, + { + name: "Sort by single string query, ascending order", + rows: []*v3.TableRow{ + {Data: []interface{}{"service2", "b"}}, + {Data: []interface{}{"service1", "c"}}, + {Data: []interface{}{"service3", "a"}}, + }, + columns: []*v3.TableColumn{ + {Name: "service_name"}, + {Name: "A"}, + }, + builderQueries: map[string]*v3.BuilderQuery{ + "A": {OrderBy: []v3.OrderBy{{ColumnName: "A", Order: "asc"}}}, + }, + queryNames: []string{"A"}, + expected: []*v3.TableRow{ + {Data: []interface{}{"service3", "a"}}, + {Data: []interface{}{"service2", "b"}}, + {Data: []interface{}{"service1", "c"}}, + }, + }, + { + name: "Sort with n/a values", + rows: []*v3.TableRow{ + {Data: []interface{}{"service1", 10.0, "n/a"}}, + {Data: []interface{}{"service2", "n/a", 15.0}}, + {Data: []interface{}{"service3", 30.0, 25.0}}, + {Data: []interface{}{"service4", "n/a", "n/a"}}, + }, + columns: []*v3.TableColumn{ + {Name: "service_name"}, + {Name: "A"}, + {Name: "B"}, + }, + builderQueries: map[string]*v3.BuilderQuery{ + "A": {OrderBy: []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "asc"}}}, + "B": {OrderBy: []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "desc"}}}, + }, + queryNames: []string{"A", "B"}, + expected: []*v3.TableRow{ + {Data: []interface{}{"service1", 10.0, "n/a"}}, + {Data: []interface{}{"service3", 30.0, 25.0}}, + {Data: []interface{}{"service4", "n/a", "n/a"}}, + {Data: []interface{}{"service2", "n/a", 15.0}}, + }, + }, + { + name: "Sort with different data types", + rows: []*v3.TableRow{ + {Data: []interface{}{"service1", "string", 10.0, true}}, + {Data: []interface{}{"service2", 20.0, "string", false}}, + {Data: []interface{}{"service3", true, 30.0, "string"}}, + }, + columns: []*v3.TableColumn{ + {Name: "service_name"}, + {Name: "A"}, + {Name: "B"}, + {Name: "C"}, + }, + builderQueries: map[string]*v3.BuilderQuery{ + "A": {OrderBy: []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "asc"}}}, + "B": {OrderBy: []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "desc"}}}, + "C": {OrderBy: []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "asc"}}}, + }, + queryNames: []string{"A", "B", "C"}, + expected: []*v3.TableRow{ + {Data: []interface{}{"service2", 20.0, "string", false}}, + {Data: []interface{}{"service1", "string", 10.0, true}}, + {Data: []interface{}{"service3", true, 30.0, "string"}}, + }, + }, + { + name: "Sort with SigNozOrderByValue", + rows: []*v3.TableRow{ + {Data: []interface{}{"service1", 20.0}}, + {Data: []interface{}{"service2", 10.0}}, + {Data: []interface{}{"service3", 30.0}}, + }, + columns: []*v3.TableColumn{ + {Name: "service_name"}, + {Name: "A"}, + }, + builderQueries: map[string]*v3.BuilderQuery{ + "A": {OrderBy: []v3.OrderBy{{ColumnName: constants.SigNozOrderByValue, Order: "desc"}}}, + }, + queryNames: []string{"A"}, + expected: []*v3.TableRow{ + {Data: []interface{}{"service3", 30.0}}, + {Data: []interface{}{"service1", 20.0}}, + {Data: []interface{}{"service2", 10.0}}, + }, + }, + { + name: "Sort by multiple queries with mixed types", + rows: []*v3.TableRow{ + {Data: []interface{}{"service1", 10.0, "b", true}}, + {Data: []interface{}{"service2", 20.0, "a", false}}, + {Data: []interface{}{"service3", 10.0, "c", true}}, + {Data: []interface{}{"service4", 20.0, "b", false}}, + }, + columns: []*v3.TableColumn{ + {Name: "service_name"}, + {Name: "A"}, + {Name: "B"}, + {Name: "C"}, + }, + builderQueries: map[string]*v3.BuilderQuery{ + "A": {OrderBy: []v3.OrderBy{{ColumnName: "A", Order: "asc"}}}, + "B": {OrderBy: []v3.OrderBy{{ColumnName: "B", Order: "desc"}}}, + "C": {OrderBy: []v3.OrderBy{{ColumnName: "C", Order: "asc"}}}, + }, + queryNames: []string{"A", "B", "C"}, + expected: []*v3.TableRow{ + {Data: []interface{}{"service3", 10.0, "c", true}}, + {Data: []interface{}{"service1", 10.0, "b", true}}, + {Data: []interface{}{"service4", 20.0, "b", false}}, + {Data: []interface{}{"service2", 20.0, "a", false}}, + }, + }, + { + name: "Sort with all n/a values", + rows: []*v3.TableRow{ + {Data: []interface{}{"service1", "n/a", "n/a"}}, + {Data: []interface{}{"service2", "n/a", "n/a"}}, + {Data: []interface{}{"service3", "n/a", "n/a"}}, + }, + columns: []*v3.TableColumn{ + {Name: "service_name"}, + {Name: "A"}, + {Name: "B"}, + }, + builderQueries: map[string]*v3.BuilderQuery{ + "A": {OrderBy: []v3.OrderBy{{ColumnName: "A", Order: "asc"}}}, + "B": {OrderBy: []v3.OrderBy{{ColumnName: "B", Order: "desc"}}}, + }, + queryNames: []string{"A", "B"}, + expected: []*v3.TableRow{ + {Data: []interface{}{"service1", "n/a", "n/a"}}, + {Data: []interface{}{"service2", "n/a", "n/a"}}, + {Data: []interface{}{"service3", "n/a", "n/a"}}, + }, + }, + { + name: "Sort with negative numbers", + rows: []*v3.TableRow{ + {Data: []interface{}{"service1", -10.0}}, + {Data: []interface{}{"service2", 20.0}}, + {Data: []interface{}{"service3", -30.0}}, + {Data: []interface{}{"service4", 0.0}}, + }, + columns: []*v3.TableColumn{ + {Name: "service_name"}, + {Name: "A"}, + }, + builderQueries: map[string]*v3.BuilderQuery{ + "A": {OrderBy: []v3.OrderBy{{ColumnName: "A", Order: "asc"}}}, + }, + queryNames: []string{"A"}, + expected: []*v3.TableRow{ + {Data: []interface{}{"service3", -30.0}}, + {Data: []interface{}{"service1", -10.0}}, + {Data: []interface{}{"service4", 0.0}}, + {Data: []interface{}{"service2", 20.0}}, + }, + }, + { + name: "Sort with mixed case strings", + rows: []*v3.TableRow{ + {Data: []interface{}{"service1", "Apple"}}, + {Data: []interface{}{"service2", "banana"}}, + {Data: []interface{}{"service3", "Cherry"}}, + {Data: []interface{}{"service4", "date"}}, + }, + columns: []*v3.TableColumn{ + {Name: "service_name"}, + {Name: "A"}, + }, + builderQueries: map[string]*v3.BuilderQuery{ + "A": {OrderBy: []v3.OrderBy{{ColumnName: "A", Order: "asc"}}}, + }, + queryNames: []string{"A"}, + expected: []*v3.TableRow{ + {Data: []interface{}{"service1", "Apple"}}, + {Data: []interface{}{"service3", "Cherry"}}, + {Data: []interface{}{"service2", "banana"}}, + {Data: []interface{}{"service4", "date"}}, + }, + }, + { + name: "Sort with empty strings", + rows: []*v3.TableRow{ + {Data: []interface{}{"service1", ""}}, + {Data: []interface{}{"service2", "b"}}, + {Data: []interface{}{"service3", ""}}, + {Data: []interface{}{"service4", "a"}}, + }, + columns: []*v3.TableColumn{ + {Name: "service_name"}, + {Name: "A"}, + }, + builderQueries: map[string]*v3.BuilderQuery{ + "A": {OrderBy: []v3.OrderBy{{ColumnName: "A", Order: "asc"}}}, + }, + queryNames: []string{"A"}, + expected: []*v3.TableRow{ + {Data: []interface{}{"service1", ""}}, + {Data: []interface{}{"service3", ""}}, + {Data: []interface{}{"service4", "a"}}, + {Data: []interface{}{"service2", "b"}}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sortRows(tt.rows, tt.columns, tt.builderQueries, tt.queryNames) + if !reflect.DeepEqual(tt.rows, tt.expected) { + exp, _ := json.Marshal(tt.expected) + got, _ := json.Marshal(tt.rows) + t.Errorf("sortRows() = %v, want %v", string(got), string(exp)) + } + }) + } +} + +func TestSortRowsWithEmptyQueries(t *testing.T) { + rows := []*v3.TableRow{ + {Data: []interface{}{"service1", 20.0}}, + {Data: []interface{}{"service2", 10.0}}, + {Data: []interface{}{"service3", 30.0}}, + } + columns := []*v3.TableColumn{ + {Name: "service_name"}, + {Name: "A"}, + } + builderQueries := map[string]*v3.BuilderQuery{} + queryNames := []string{} + + sortRows(rows, columns, builderQueries, queryNames) + + // Expect the original order to be maintained + expected := []*v3.TableRow{ + {Data: []interface{}{"service1", 20.0}}, + {Data: []interface{}{"service2", 10.0}}, + {Data: []interface{}{"service3", 30.0}}, + } + + if !reflect.DeepEqual(rows, expected) { + t.Errorf("sortRows() with empty queries = %v, want %v", rows, expected) + } +} + +func TestSortRowsWithInvalidColumnName(t *testing.T) { + rows := []*v3.TableRow{ + {Data: []interface{}{"service1", 20.0}}, + {Data: []interface{}{"service2", 10.0}}, + {Data: []interface{}{"service3", 30.0}}, + } + columns := []*v3.TableColumn{ + {Name: "service_name"}, + {Name: "A"}, + } + builderQueries := map[string]*v3.BuilderQuery{ + "A": {OrderBy: []v3.OrderBy{{ColumnName: "InvalidColumn", Order: "asc"}}}, + } + queryNames := []string{"A"} + + sortRows(rows, columns, builderQueries, queryNames) + + // Expect the original order to be maintained + expected := []*v3.TableRow{ + {Data: []interface{}{"service1", 20.0}}, + {Data: []interface{}{"service2", 10.0}}, + {Data: []interface{}{"service3", 30.0}}, + } + + if !reflect.DeepEqual(rows, expected) { + t.Errorf("sortRows() with invalid column name = %v, want %v", rows, expected) + } +} + +func TestSortRowsStability(t *testing.T) { + rows := []*v3.TableRow{ + {Data: []interface{}{"service1", 10.0, "a"}}, + {Data: []interface{}{"service2", 10.0, "b"}}, + {Data: []interface{}{"service3", 10.0, "c"}}, + } + columns := []*v3.TableColumn{ + {Name: "service_name"}, + {Name: "A"}, + {Name: "B"}, + } + builderQueries := map[string]*v3.BuilderQuery{ + "A": {OrderBy: []v3.OrderBy{{ColumnName: "A", Order: "asc"}}}, + } + queryNames := []string{"A"} + + sortRows(rows, columns, builderQueries, queryNames) + + // Expect the original order to be maintained for equal values + expected := []*v3.TableRow{ + {Data: []interface{}{"service1", 10.0, "a"}}, + {Data: []interface{}{"service2", 10.0, "b"}}, + {Data: []interface{}{"service3", 10.0, "c"}}, + } + + if !reflect.DeepEqual(rows, expected) { + t.Errorf("sortRows() stability test failed = %v, want %v", rows, expected) + } +} + +func TestTransformToTableForClickHouseQueries(t *testing.T) { + tests := []struct { + name string + input []*v3.Result + expected []*v3.Result + }{ + { + name: "Empty input", + input: []*v3.Result{}, + expected: []*v3.Result{}, + }, + { + name: "Single result with one series", + input: []*v3.Result{ + { + QueryName: "A", + Series: []*v3.Series{ + { + LabelsArray: []map[string]string{ + {"service": "frontend"}, + }, + Points: []v3.Point{ + {Value: 10.0}, + }, + }, + }, + }, + }, + expected: []*v3.Result{ + { + Table: &v3.Table{ + Columns: []*v3.TableColumn{ + {Name: "service"}, + {Name: "A"}, + }, + Rows: []*v3.TableRow{ + {Data: []interface{}{"frontend", 10.0}}, + }, + }, + }, + }, + }, + { + name: "Multiple results with multiple series", + input: []*v3.Result{ + { + QueryName: "A", + Series: []*v3.Series{ + { + LabelsArray: []map[string]string{ + {"service": "frontend", "env": "prod"}, + }, + Points: []v3.Point{ + {Value: 10.0}, + }, + }, + { + LabelsArray: []map[string]string{ + {"service": "backend", "env": "prod"}, + }, + Points: []v3.Point{ + {Value: 20.0}, + }, + }, + }, + }, + { + QueryName: "B", + Series: []*v3.Series{ + { + LabelsArray: []map[string]string{ + {"service": "frontend", "env": "prod"}, + }, + Points: []v3.Point{ + {Value: 15.0}, + }, + }, + { + LabelsArray: []map[string]string{ + {"service": "backend", "env": "prod"}, + }, + Points: []v3.Point{ + {Value: 25.0}, + }, + }, + }, + }, + }, + expected: []*v3.Result{ + { + Table: &v3.Table{ + Columns: []*v3.TableColumn{ + {Name: "service"}, + {Name: "env"}, + {Name: "A"}, + {Name: "B"}, + }, + Rows: []*v3.TableRow{ + {Data: []interface{}{"frontend", "prod", 10.0, nil}}, + {Data: []interface{}{"backend", "prod", 20.0, nil}}, + {Data: []interface{}{"frontend", "prod", nil, 15.0}}, + {Data: []interface{}{"backend", "prod", nil, 25.0}}, + }, + }, + }, + }, + }, + { + name: "Results with missing labels", + input: []*v3.Result{ + { + QueryName: "A", + Series: []*v3.Series{ + { + LabelsArray: []map[string]string{ + {"service": "frontend"}, + }, + Points: []v3.Point{ + {Value: 10.0}, + }, + }, + }, + }, + { + QueryName: "B", + Series: []*v3.Series{ + { + LabelsArray: []map[string]string{ + {"env": "prod"}, + }, + Points: []v3.Point{ + {Value: 20.0}, + }, + }, + }, + }, + }, + expected: []*v3.Result{ + { + Table: &v3.Table{ + Columns: []*v3.TableColumn{ + {Name: "service"}, + {Name: "env"}, + {Name: "A"}, + {Name: "B"}, + }, + Rows: []*v3.TableRow{ + {Data: []interface{}{"frontend", "n/a", 10.0, nil}}, + {Data: []interface{}{"n/a", "prod", nil, 20.0}}, + }, + }, + }, + }, + }, + { + name: "Results with empty series", + input: []*v3.Result{ + { + QueryName: "A", + Series: []*v3.Series{ + { + LabelsArray: []map[string]string{ + {"service": "frontend"}, + }, + Points: []v3.Point{ + {Value: 10.0}, + }, + }, + }, + }, + { + QueryName: "B", + Series: []*v3.Series{}, + }, + }, + expected: []*v3.Result{ + { + Table: &v3.Table{ + Columns: []*v3.TableColumn{ + {Name: "service"}, + {Name: "A"}, + }, + Rows: []*v3.TableRow{ + {Data: []interface{}{"frontend", 10.0}}, + }, + }, + }, + }, + }, + { + name: "Results with empty points", + input: []*v3.Result{ + { + QueryName: "A", + Series: []*v3.Series{ + { + LabelsArray: []map[string]string{ + {"service": "frontend"}, + }, + Points: []v3.Point{}, + }, + }, + }, + { + QueryName: "B", + Series: []*v3.Series{ + { + LabelsArray: []map[string]string{ + {"service": "backend"}, + }, + Points: []v3.Point{ + {Value: 20.0}, + }, + }, + }, + }, + }, + expected: []*v3.Result{ + { + Table: &v3.Table{ + Columns: []*v3.TableColumn{ + {Name: "service"}, + {Name: "B"}, + }, + Rows: []*v3.TableRow{ + {Data: []interface{}{"frontend", nil}}, + {Data: []interface{}{"backend", 20.0}}, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := TransformToTableForClickHouseQueries(tt.input) + if !reflect.DeepEqual(result, tt.expected) { + t.Errorf("TransformToTableForClickHouseQueries() = %v, want %v", result, tt.expected) + } + }) + } +} + +func TestTransformToTableForClickHouseQueriesSorting(t *testing.T) { + input := []*v3.Result{ + { + QueryName: "B", + Series: []*v3.Series{ + { + LabelsArray: []map[string]string{ + {"service": "frontend"}, + }, + Points: []v3.Point{ + {Value: 10.0}, + }, + }, + }, + }, + { + QueryName: "A", + Series: []*v3.Series{ + { + LabelsArray: []map[string]string{ + {"service": "backend"}, + }, + Points: []v3.Point{ + {Value: 20.0}, + }, + }, + }, + }, + } + + expected := []*v3.Result{ + { + Table: &v3.Table{ + Columns: []*v3.TableColumn{ + {Name: "service"}, + {Name: "A"}, + {Name: "B"}, + }, + Rows: []*v3.TableRow{ + {Data: []interface{}{"backend", 20.0, nil}}, + {Data: []interface{}{"frontend", nil, 10.0}}, + }, + }, + }, + } + + result := TransformToTableForClickHouseQueries(input) + if !reflect.DeepEqual(result, expected) { + t.Errorf("TransformToTableForClickHouseQueries() sorting test failed. Got %v, want %v", result, expected) + } +} diff --git a/pkg/query-service/telemetry/ignored.go b/pkg/query-service/telemetry/ignored.go index c0a739e9ee..f91ec7966c 100644 --- a/pkg/query-service/telemetry/ignored.go +++ b/pkg/query-service/telemetry/ignored.go @@ -10,7 +10,7 @@ func EnabledPaths() map[string]struct{} { func ignoreEvents(event string, attributes map[string]interface{}) bool { - if event == TELEMETRY_EVENT_ACTIVE_USER || event == TELEMETRY_EVENT_ACTIVE_USER_PH { + if event == TELEMETRY_EVENT_ACTIVE_USER { for attr_key, attr_val := range attributes { if attr_key == "any" && attr_val.(int8) == 0 { diff --git a/pkg/query-service/telemetry/telemetry.go b/pkg/query-service/telemetry/telemetry.go index 22be2a8648..9b75259296 100644 --- a/pkg/query-service/telemetry/telemetry.go +++ b/pkg/query-service/telemetry/telemetry.go @@ -11,7 +11,6 @@ import ( "testing" "time" - ph "github.com/posthog/posthog-go" "gopkg.in/segmentio/analytics-go.v3" "go.signoz.io/signoz/pkg/query-service/constants" @@ -26,7 +25,6 @@ const ( TELEMETRY_EVENT_USER = "User" TELEMETRY_EVENT_INPRODUCT_FEEDBACK = "InProduct Feedback Submitted" TELEMETRY_EVENT_NUMBER_OF_SERVICES = "Number of Services" - TELEMETRY_EVENT_NUMBER_OF_SERVICES_PH = "Number of Services V2" TELEMETRY_EVENT_HEART_BEAT = "Heart Beat" TELEMETRY_EVENT_ORG_SETTINGS = "Org Settings" DEFAULT_SAMPLING = 0.1 @@ -44,7 +42,6 @@ const ( TELEMETRY_EVENT_QUERY_RANGE_API = "Query Range API" TELEMETRY_EVENT_DASHBOARDS_ALERTS = "Dashboards/Alerts Info" TELEMETRY_EVENT_ACTIVE_USER = "Active User" - TELEMETRY_EVENT_ACTIVE_USER_PH = "Active User V2" TELEMETRY_EVENT_USER_INVITATION_SENT = "User Invitation Sent" TELEMETRY_EVENT_USER_INVITATION_ACCEPTED = "User Invitation Accepted" TELEMETRY_EVENT_SUCCESSFUL_DASHBOARD_PANEL_QUERY = "Successful Dashboard Panel Query" @@ -69,8 +66,21 @@ var SAAS_EVENTS_LIST = map[string]struct{}{ TELEMETRY_EVENT_TRACE_DETAIL_API: {}, } -const api_key = "4Gmoa4ixJAUHx2BpJxsjwA1bEfnwEeRz" -const ph_api_key = "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w" +var OSS_EVENTS_LIST = map[string]struct{}{ + TELEMETRY_EVENT_NUMBER_OF_SERVICES: {}, + TELEMETRY_EVENT_HEART_BEAT: {}, + TELEMETRY_EVENT_LANGUAGE: {}, + TELEMETRY_EVENT_ENVIRONMENT: {}, + TELEMETRY_EVENT_DASHBOARDS_ALERTS: {}, + TELEMETRY_EVENT_ACTIVE_USER: {}, + TELEMETRY_EVENT_PATH: {}, + TELEMETRY_EVENT_ORG_SETTINGS: {}, + TELEMETRY_LICENSE_CHECK_FAILED: {}, + TELEMETRY_LICENSE_UPDATED: {}, + TELEMETRY_LICENSE_ACT_FAILED: {}, +} + +const api_key = "9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr" const IP_NOT_FOUND_PLACEHOLDER = "NA" const DEFAULT_NUMBER_OF_SERVICES = 6 @@ -110,13 +120,13 @@ func (telemetry *Telemetry) CheckSigNozSignals(postData *v3.QueryRangeParamsV3) if postData.CompositeQuery.QueryType == v3.QueryTypeBuilder { for _, query := range postData.CompositeQuery.BuilderQueries { - if query.DataSource == v3.DataSourceLogs && len(query.Filters.Items) > 0 { + if query.DataSource == v3.DataSourceLogs && query.Filters != nil && len(query.Filters.Items) > 0 { signozLogsUsed = true } else if query.DataSource == v3.DataSourceMetrics && !strings.Contains(query.AggregateAttribute.Key, "signoz_") && len(query.AggregateAttribute.Key) > 0 { signozMetricsUsed = true - } else if query.DataSource == v3.DataSourceTraces && len(query.Filters.Items) > 0 { + } else if query.DataSource == v3.DataSourceTraces && query.Filters != nil && len(query.Filters.Items) > 0 { signozTracesUsed = true } } @@ -159,9 +169,8 @@ func (telemetry *Telemetry) AddActiveLogsUser() { } type Telemetry struct { - operator analytics.Client + ossOperator analytics.Client saasOperator analytics.Client - phOperator ph.Client ipAddress string userEmail string isEnabled bool @@ -188,11 +197,10 @@ func createTelemetry() { } telemetry = &Telemetry{ - operator: analytics.New(api_key), - phOperator: ph.New(ph_api_key), - ipAddress: getOutboundIP(), - rateLimits: make(map[string]int8), - activeUser: make(map[string]int8), + ossOperator: analytics.New(api_key), + ipAddress: getOutboundIP(), + rateLimits: make(map[string]int8), + activeUser: make(map[string]int8), } telemetry.minRandInt = 0 telemetry.maxRandInt = int(1 / DEFAULT_SAMPLING) @@ -392,18 +400,16 @@ func (a *Telemetry) IdentifyUser(user *model.User) { }) } - a.operator.Enqueue(analytics.Identify{ + a.ossOperator.Enqueue(analytics.Identify{ UserId: a.ipAddress, Traits: analytics.NewTraits().SetName(user.Name).SetEmail(user.Email).Set("ip", a.ipAddress), }) // Updating a groups properties - a.phOperator.Enqueue(ph.GroupIdentify{ - Type: "companyDomain", - Key: a.getCompanyDomain(), - Properties: ph.NewProperties(). - Set("companyDomain", a.getCompanyDomain()), + a.ossOperator.Enqueue(analytics.Group{ + UserId: a.ipAddress, + GroupId: a.getCompanyDomain(), + Traits: analytics.NewTraits().Set("company_domain", a.getCompanyDomain()), }) - } func (a *Telemetry) SetCountUsers(countUsers int8) { @@ -520,33 +526,19 @@ func (a *Telemetry) SendEvent(event string, data map[string]interface{}, userEma }) } - a.operator.Enqueue(analytics.Track{ - Event: event, - UserId: userId, - Properties: properties, - }) - - if event == TELEMETRY_EVENT_NUMBER_OF_SERVICES { - - a.phOperator.Enqueue(ph.Capture{ - DistinctId: userId, - Event: TELEMETRY_EVENT_NUMBER_OF_SERVICES_PH, - Properties: ph.Properties(properties), - Groups: ph.NewGroups(). - Set("companyDomain", a.getCompanyDomain()), - }) + _, isOSSEvent := OSS_EVENTS_LIST[event] - } - if event == TELEMETRY_EVENT_ACTIVE_USER { - - a.phOperator.Enqueue(ph.Capture{ - DistinctId: userId, - Event: TELEMETRY_EVENT_ACTIVE_USER_PH, - Properties: ph.Properties(properties), - Groups: ph.NewGroups(). - Set("companyDomain", a.getCompanyDomain()), + if a.ossOperator != nil && isOSSEvent { + a.ossOperator.Enqueue(analytics.Track{ + Event: event, + UserId: userId, + Properties: properties, + Context: &analytics.Context{ + Extra: map[string]interface{}{ + "groupId": a.getCompanyDomain(), + }, + }, }) - } }