"
+
+active_playbooks:
+ - name: "restart_loop_reporter"
+ action_params:
+ restart_reason: "CrashLoopBackOff"
+ - name: "python_profiler"
\ No newline at end of file
diff --git a/playbooks/alerts_integration.py b/playbooks/alerts_integration.py
new file mode 100644
index 000000000..de21b452f
--- /dev/null
+++ b/playbooks/alerts_integration.py
@@ -0,0 +1,131 @@
+from robusta.api import *
+from node_cpu_analysis import do_node_cpu_analysis
+
+
+class GenParams(BaseModel):
+ name: str
+ params: Dict[Any,Any] = None
+
+class Silencer:
+ params: Dict[Any,Any]
+
+ def __init__(self, params: Dict[Any,Any]):
+ self.params = params
+
+ def silence(self, alert: PrometheusKubernetesAlert) -> bool:
+ pass
+
+
+class NodeRestartSilencer(Silencer):
+
+ post_restart_silence: int = 300
+
+ def __init__(self, params: Dict[Any, Any]):
+ super().__init__(params)
+ if params and params.get("post_restart_silence"):
+ self.post_restart_silence = self.params.get("post_restart_silence")
+
+ def silence(self, alert: PrometheusKubernetesAlert) -> bool:
+ if not alert.obj or not alert.obj.kind == "Pod":
+ return False # Silencing only pod alerts on NodeRestartSilencer
+
+ node: Node = Node.readNode(alert.obj.spec.nodeName).obj
+ if not node:
+ logging.warning(f"Node {alert.obj.spec.nodeName} not found for NodeRestartSilencer for {alert}")
+ return False
+
+ last_transition_times = [condition.lastTransitionTime for condition in node.status.conditions if condition.type == "Ready"]
+ if last_transition_times and last_transition_times[0]:
+ node_start_time_str = last_transition_times[0]
+ else: # if no ready time, take creation time
+ node_start_time_str = node.metadata.creationTimestamp
+
+ node_start_time = datetime.strptime(node_start_time_str, '%Y-%m-%dT%H:%M:%SZ')
+ return datetime.utcnow().timestamp() < (node_start_time.timestamp() + self.post_restart_silence)
+
+
+class Enricher:
+ params: Dict[Any, Any] = None
+
+ def __init__(self, params: Dict[Any,Any]):
+ self.params = params
+
+ def enrich(self, alert: PrometheusKubernetesAlert):
+ pass
+
+
+class DefaultEnricher (Enricher):
+
+ def enrich(self, alert: PrometheusKubernetesAlert):
+ alert_name = alert.alert.labels.get("alertname", "")
+ labels = alert.alert.labels
+ annotations = alert.alert.annotations
+
+ if "summary" in annotations:
+ alert.report_title = f'{alert_name}: {annotations["summary"]}'
+ else:
+ alert.report_title = alert_name
+
+ alert.report_attachment_blocks.append(TableBlock(labels.items(), ["label", "value"]))
+ if "description" in annotations:
+ alert.report_attachment_blocks.append(MarkdownBlock(annotations["description"]))
+
+
+class NodeCPUEnricher (Enricher):
+
+ def enrich(self, alert: PrometheusKubernetesAlert):
+ alert.report_blocks.extend(do_node_cpu_analysis(alert.obj.metadata.name))
+
+DEFAULT_ENRICHER = "AlertDefaults"
+
+silencers = {}
+silencers["NodeRestartSilencer"] = NodeRestartSilencer
+
+enrichers = {}
+enrichers[DEFAULT_ENRICHER] = DefaultEnricher
+enrichers["NodeCPUAnalysis"] = NodeCPUEnricher
+
+
+class AlertConfig(BaseModel):
+ alert_name: str
+ silencers: List[GenParams] = []
+ enrichers: List[GenParams] = []
+
+
+class AlertsIntegrationParams(BaseModel):
+ slack_channel: str
+ default_enricher: str = DEFAULT_ENRICHER
+ alerts_config: List[AlertConfig]
+
+
+def default_alert_config(alert_name, config: AlertsIntegrationParams) -> AlertConfig:
+ return AlertConfig(alert_name=alert_name, silencers=[], enrichers=[GenParams(name=config.default_enricher)])
+
+@on_pod_prometheus_alert(status="firing")
+def alerts_integration(alert: PrometheusKubernetesAlert, config: AlertsIntegrationParams):
+ logging.info(f'running alerts_integration alert - alert: {alert.alert} pod: {alert.obj.metadata.name if alert.obj is not None else "None!"}')
+
+ alert.slack_channel = config.slack_channel
+ alert_name = alert.alert.labels.get("alertname")
+ alert_configs = [alert_config for alert_config in config.alerts_config if alert_config.alert_name == alert_name]
+ if not alert_configs:
+ alert_configs = [default_alert_config(alert_name, config)]
+
+ for alert_config in alert_configs:
+ for silencer_config in alert_config.silencers:
+ silencer_class = silencers.get(silencer_config.name)
+ if silencer_class is None:
+ logging.error(f"Silencer {silencer_config.name} for alert {alert_name} does not exist. Silence not enforced")
+ continue
+ if silencer_class(silencer_config.params).silence(alert):
+ return
+
+ for enricher_config in alert_config.enrichers:
+ enricher_class = enrichers.get(enricher_config.name)
+ if enricher_class is None:
+ logging.error(f"Enricher {enricher_config.name} for alert {alert_name} does not exist. No enrichment")
+ continue
+ enricher_class(enricher_config.params).enrich(alert)
+
+ if alert.report_blocks or alert.report_title or alert.report_attachment_blocks:
+ send_to_slack(alert)
diff --git a/playbooks/autoscaler.py b/playbooks/autoscaler.py
new file mode 100644
index 000000000..9b118d73e
--- /dev/null
+++ b/playbooks/autoscaler.py
@@ -0,0 +1,55 @@
+from math import ceil
+
+from robusta.api import *
+
+HPA_NAME = "hpa_name"
+NAMESPACE = "namespace"
+MAX_REPLICAS = "max_replicas"
+SLACK_CHANNEL = "slack_channel"
+
+class HPALimitParams(BaseModel):
+ increase_pct : int = 20
+ slack_channel: str
+
+@on_report_callback
+def scale_hpa_callback(event: ReportCallbackEvent):
+ context = json.loads(event.source_context)
+ hpa_name = context[HPA_NAME]
+ hpa_ns = context[NAMESPACE]
+ hpa : HorizontalPodAutoscaler = HorizontalPodAutoscaler.readNamespacedHorizontalPodAutoscaler(hpa_name, hpa_ns).obj
+ new_max_replicas = int(context[MAX_REPLICAS])
+ hpa.spec.maxReplicas = new_max_replicas
+ hpa.replaceNamespacedHorizontalPodAutoscaler(hpa_name, hpa_ns)
+ event.report_title = f"Max replicas for HPA *{hpa_name}* in namespace *{hpa_ns}* updated to: *{new_max_replicas}*"
+ event.slack_channel=context[SLACK_CHANNEL]
+ send_to_slack(event)
+
+
+@on_horizontalpodautoscaler_update
+def alert_on_hpa_reached_limit(event : HorizontalPodAutoscalerEvent, action_params : HPALimitParams):
+ logging.info(f'running alert_on_hpa_reached_limit: {event.obj.metadata.name} ns: {event.obj.metadata.namespace}')
+
+ hpa = event.obj
+ if hpa.status.currentReplicas == event.old_obj.status.currentReplicas:
+ return # run only when number of replicas change
+
+ if hpa.status.desiredReplicas != hpa.spec.maxReplicas:
+ return # didn't reached max replicas limit
+
+ avg_cpu = int(hpa.status.currentCPUUtilizationPercentage / (hpa.status.currentReplicas if hpa.status.currentReplicas > 0 else 1))
+ new_max_replicas_suggestion = ceil((action_params.increase_pct + 100) * hpa.spec.maxReplicas / 100)
+ choices = {
+ f'Update HPA max replicas to: {new_max_replicas_suggestion}': scale_hpa_callback,
+ }
+ context = {
+ HPA_NAME: hpa.metadata.name,
+ NAMESPACE: hpa.metadata.namespace,
+ MAX_REPLICAS: new_max_replicas_suggestion,
+ SLACK_CHANNEL: action_params.slack_channel
+ }
+
+ event.report_title = f"HPA *{event.obj.metadata.name}* in namespace *{event.obj.metadata.namespace}* reached max replicas: *{hpa.spec.maxReplicas}*"
+ event.slack_channel = action_params.slack_channel
+ event.report_blocks.extend([MarkdownBlock(f"Current avg cpu utilization: *{avg_cpu} %* -- (usage vs requested)"),
+ CallbackBlock(choices, context)])
+ send_to_slack(event)
diff --git a/playbooks/chaos_engineering.py b/playbooks/chaos_engineering.py
new file mode 100644
index 000000000..3352e50f9
--- /dev/null
+++ b/playbooks/chaos_engineering.py
@@ -0,0 +1,13 @@
+from robusta.api import *
+import time
+
+
+@on_manual_trigger
+def generate_high_cpu(event: ManualTriggerEvent):
+ logging.info("starting high cpu")
+ dep = RobustaDeployment.from_image("stress-test", "jfusterm/stress", "stress --cpu 100")
+ dep: RobustaDeployment = dep.createNamespacedDeployment(dep.metadata.namespace).obj
+ time.sleep(60)
+ logging.info("stopping high cpu")
+ RobustaDeployment.deleteNamespacedDeployment(dep.metadata.name, dep.metadata.namespace)
+ logging.info("done")
diff --git a/playbooks/configuration_ab_testing.py b/playbooks/configuration_ab_testing.py
new file mode 100644
index 000000000..888947183
--- /dev/null
+++ b/playbooks/configuration_ab_testing.py
@@ -0,0 +1,50 @@
+from robusta.api import *
+
+
+class ConfigurationSet(BaseModel):
+ config_set_name: str
+ config_items: Dict[str, str] = {}
+
+
+class ABTestingParams(BaseModel):
+ grafana_url: str = "http://kube-prometheus-stack-1616314181-grafana.default.svc"
+ grafana_api_key: str
+ grafana_dashboard_uid: str
+ slack_channel: str
+ api_version: str = "v1"
+ kind: str
+ name: str
+ namespace: str = "default"
+ configuration_sets: List[ConfigurationSet]
+
+ def pre_deploy_func(self, trigger_params : TriggerParams):
+ trigger_params.repeat = len(self.configuration_sets)
+
+@on_recurring_trigger(seconds_delay=None)
+def config_ab_testing(event: RecurringTriggerEvent, action_params: ABTestingParams):
+ """Change configuration according to pre-defined configuration sets."""
+ if len(action_params.configuration_sets) < event.recurrence:
+ logging.error(f"No matching configuration set for recurrence {event.recurrence}")
+ return
+
+ next_config_set = action_params.configuration_sets[event.recurrence]
+ object_class = get_api_version(action_params.api_version).get(action_params.kind)
+ if object_class is None:
+ logging.error(f"No matching tested kind {action_params.kind}")
+ return
+
+ # TODO: get rid of this ugly hack once we have a generic read() function on hikaru classes
+ reader_function = getattr(object_class, f"readNamespaced{object_class.kind}")
+ tested_object = reader_function(action_params.name, action_params.namespace).obj
+ for attribute_name, attribute_value in next_config_set.config_items.items():
+ update_item_attr(tested_object, attribute_name, attribute_value)
+
+ update_function = getattr(tested_object, f"patchNamespaced{object_class.kind}")
+ update_function(action_params.name, action_params.namespace)
+
+ grafana_message = f"configuration:{next_config_set.config_set_name}
"
+ for attribute_name, attribute_value in next_config_set.config_items.items():
+ grafana_message += f"{attribute_name} : {attribute_value}
"
+ grafana_message += "
"
+ grafana = Grafana(action_params.grafana_api_key, action_params.grafana_url)
+ grafana.add_line_to_dashboard(action_params.grafana_dashboard_uid, grafana_message, tags=["AB Testing"])
diff --git a/playbooks/cpu_alerts.py b/playbooks/cpu_alerts.py
new file mode 100644
index 000000000..b99e25439
--- /dev/null
+++ b/playbooks/cpu_alerts.py
@@ -0,0 +1,37 @@
+from robusta.api import *
+
+
+class HighCpuConfig(BaseModel):
+ slack_channel: str
+
+
+@on_report_callback
+def high_cpu_delete_confirmation_handler(event: ReportCallbackEvent):
+ logging.info(f'high_cpu_delete_confirmation_handler {event.context}')
+
+
+@on_report_callback
+def high_cpu_profile_confirmation_handler(event: ReportCallbackEvent):
+ logging.info(f'high_cpu_profile_confirmation_handler {event.context}')
+
+
+@on_pod_prometheus_alert(alert_name="HighCPUAlert", status="firing")
+def slack_confirmation_on_cpu(event: PrometheusKubernetesAlert, config: HighCpuConfig):
+ logging.info(f'running slack_confirmation_on_cpu alert - alert: {event.alert} pod: {event.obj}')
+
+ choices = {
+ 'delete pod': high_cpu_delete_confirmation_handler,
+ 'profile pod': high_cpu_profile_confirmation_handler
+ }
+ context = {
+ 'pod_name': event.obj.metadata.name,
+ 'namespace': event.obj.metadata.namespace
+ }
+
+ event.report_title = f"Pod {event.obj.metadata.name} has high cpu"
+ event.slack_channel = config.slack_channel
+ event.report_blocks.extend([
+ CallbackBlock(choices, context)
+ ])
+
+ send_to_slack(event)
diff --git a/playbooks/deployment_babysitter.py b/playbooks/deployment_babysitter.py
new file mode 100644
index 000000000..0ebaa5721
--- /dev/null
+++ b/playbooks/deployment_babysitter.py
@@ -0,0 +1,52 @@
+# TODO: we should attach a full yaml diff when the deployment spec (not status) changes
+# options for generating a human-readable diff:
+# * python_diff = "\n".join([x for x in unified_diff(before.splitlines(), after.splitlines(), fromfile="old", tofile="new")])
+# * https://github.com/google/diff-match-patch/wiki/Language:-Python (see output format here: https://neil.fraser.name/software/diff_match_patch/demos/diff.html)
+# * https://github.com/wagoodman/diff2HtmlCompare
+# * https://github.com/GerHobbelt/google-diff-match-patch
+from typing import Tuple
+from hikaru.meta import DiffDetail, DiffType
+from robusta.api import *
+
+
+class DeploymentBabysitterConfig(BaseModel):
+ slack_channel: str
+ fields_to_monitor: Tuple[str] = (
+ "status.readyReplicas",
+ "message",
+ "reason",
+ "spec"
+ )
+
+
+# TODO: filter out all the managed fields crap
+def babysitter_should_include_diff(diff_detail: DiffDetail, config: DeploymentBabysitterConfig):
+ return any(substring in diff_detail.formatted_path for substring in config.fields_to_monitor)
+
+
+def babysitter_get_blocks(diffs: List[DiffDetail]):
+ num_additions = len([d for d in diffs if d.diff_type == DiffType.ADDED])
+ num_subtractions = len([d for d in diffs if d.diff_type == DiffType.REMOVED])
+ num_modifications = len(diffs) - num_additions - num_subtractions
+ blocks = [
+ MarkdownBlock(f"{num_additions} fields added. {num_subtractions} fields removed. {num_modifications} fields changed")
+ ]
+ for d in diffs:
+ blocks.extend([DividerBlock(),
+ MarkdownBlock(f"*{d.formatted_path}*: {d.other_value} :arrow_right: {d.value}")])
+ return blocks
+
+
+@on_deployment_all_changes
+def deployment_babysitter(event: DeploymentEvent, config: DeploymentBabysitterConfig):
+ """Track changes to a deployment and send the changes in slack."""
+ if event.operation == K8sOperationType.UPDATE:
+ all_diffs = event.obj.diff(event.old_obj)
+ filtered_diffs = list(filter(lambda x: babysitter_should_include_diff(x, config), all_diffs))
+ if len(filtered_diffs) == 0:
+ return
+ event.report_attachment_blocks.extend(babysitter_get_blocks(filtered_diffs))
+
+ event.report_title = f"Deployment {event.obj.metadata.name} {event.operation.value}d in namespace {event.obj.metadata.namespace}"
+ event.slack_channel = config.slack_channel
+ send_to_slack(event)
diff --git a/playbooks/disk_benchmark.py b/playbooks/disk_benchmark.py
new file mode 100644
index 000000000..d60883dae
--- /dev/null
+++ b/playbooks/disk_benchmark.py
@@ -0,0 +1,57 @@
+from robusta.api import *
+
+class DiskBenchmarkParams(BaseModel):
+ pvc_name: str = "robusta-disk-benchmark"
+ test_seconds: int = 20
+ namespace: str = "robusta"
+ disk_size: str = "10Gi"
+ storage_class_name: str
+
+def format_float_per2(f_param):
+ return "{:.2f}".format(f_param)
+
+@on_manual_trigger
+def disk_benchmark(event : ManualTriggerEvent):
+
+ action_params = DiskBenchmarkParams(**event.data)
+ pvc = PersistentVolumeClaim(
+ metadata=ObjectMeta(name=action_params.pvc_name, namespace=action_params.namespace),
+ spec=PersistentVolumeClaimSpec(
+ accessModes=['ReadWriteOnce'],
+ storageClassName=action_params.storage_class_name,
+ resources=ResourceRequirements(
+ requests={
+ "storage": action_params.disk_size
+ }
+ )
+ ))
+ try:
+ pvc.createNamespacedPersistentVolumeClaim(action_params.namespace)
+ pv_name = "robusta-benchmark-pv"
+ image = "us-central1-docker.pkg.dev/arabica-300319/devel/robusta-fio-benchmark"
+ name = "robusta-fio-disk-benchmark"
+ mount_path = "/robusta/data"
+ spec = PodSpec(
+ volumes=[Volume(name=pv_name,
+ persistentVolumeClaim=PersistentVolumeClaimVolumeSource(claimName=action_params.pvc_name))],
+ containers=[Container(name=name,
+ image=image,
+ imagePullPolicy="Always",
+ volumeMounts=[VolumeMount(mountPath=mount_path, name=pv_name)],
+ args=["--directory", mount_path, "--output-format", "json", "--group_reporting", "--runtime", f"{action_params.test_seconds}", "/jobs/rand-rw.fio"])],
+ restartPolicy="Never"
+ )
+
+
+ json_output = json.loads(RobustaJob.run_simple_job_spec(spec, name, 120 + action_params.test_seconds).replace("'","\""))
+ job = json_output["jobs"][0]
+
+ logging.info(f"\nfio benchmark:\n"
+ f"Total Time: {action_params.test_seconds} Sec\n"
+ f"Read Band Width: {format_float_per2(job['read']['bw'])} KB \n"
+ f"Read IO Ops/Sec: {format_float_per2(job['read']['iops'])}\n"
+ f"Write Band Width: {format_float_per2(job['write']['bw'])} KB \n"
+ f"Write Ops/Sec: {format_float_per2(job['write']['iops'])}\n ")
+
+ finally:
+ pvc.deleteNamespacedPersistentVolumeClaim(name=action_params.pvc_name, namespace=action_params.namespace)
\ No newline at end of file
diff --git a/playbooks/event_publisher.py b/playbooks/event_publisher.py
new file mode 100644
index 000000000..f9c922632
--- /dev/null
+++ b/playbooks/event_publisher.py
@@ -0,0 +1,37 @@
+from robusta.api import *
+
+class PrometehusAlertParams(BaseModel):
+ alert_name: str
+ pod_name: str
+ status: str = "firing"
+ description: str = "simulated prometheus alert"
+ namespace: str = "default"
+
+# Usage: curl -X POST -F 'alert_name=HighCPUAlert' -F 'pod_name=robusta-runner-5d6f654bf9-jm2hx' -F 'namespace=robusta' -F 'trigger_name=prometheus_alert' http://localhost:5000/api/trigger
+# or: robusta trigger prometheus_alert alert_name=HighCPUAlert pod_name=robusta-runner-5d6f654bf9-jm2hx namespace=robusta
+@on_manual_trigger
+def prometheus_alert(event: ManualTriggerEvent):
+ prometheus_event_data = PrometehusAlertParams(**event.data)
+
+ prometheus_event = PrometheusEvent(**{
+ "status": prometheus_event_data.status,
+ "description": prometheus_event_data.description,
+ "externalURL": "",
+ "groupKey": "{}/{}:{}",
+ "version": "1",
+ "receiver": "robusta receiver",
+ "alerts": [{
+ "status": prometheus_event_data.status,
+ "endsAt": datetime.now(),
+ "startsAt": datetime.now(),
+ "generatorURL" : "",
+ "labels": {
+ "pod": prometheus_event_data.pod_name,
+ "namespace": prometheus_event_data.namespace,
+ "alertname": prometheus_event_data.alert_name
+ },
+ "annotations": {}
+ }],
+ })
+
+ run_playbooks(prometheus_cloud_event(prometheus_event))
diff --git a/playbooks/git_change_audit.py b/playbooks/git_change_audit.py
new file mode 100644
index 000000000..c52d67ac7
--- /dev/null
+++ b/playbooks/git_change_audit.py
@@ -0,0 +1,45 @@
+from robusta.api import *
+
+from pydantic.main import BaseModel
+
+
+class GitAuditParams(BaseModel):
+ cluster_name: str
+ git_https_url: str
+ git_user: str
+ git_password: str
+
+ def __str__(self):
+ return f"cluster_name={self.cluster_name} git_https_url={self.git_https_url} git_user=***** git_password=*****"
+
+
+def git_safe_name(name):
+ return re.sub("[^0-9a-zA-Z\\-]+", "-", name)
+
+skipped_kinds = ["Event"]
+
+@on_kubernetes_any_all_changes
+def git_change_audit(event : KubernetesAnyEvent, action_params: GitAuditParams):
+ """Save a configuration copy to git"""
+ if event.obj.kind in skipped_kinds:
+ return
+
+ if len(event.obj.metadata.ownerReferences) != 0:
+ return # not handling runtime objects
+
+ git_repo = GitRepoManager.get_git_repo(action_params.git_https_url, action_params.git_user, action_params.git_password)
+ name = f"{git_safe_name(event.obj.metadata.name)}.yaml"
+ path = f"{git_safe_name(action_params.cluster_name)}/{git_safe_name(event.obj.metadata.namespace)}"
+
+ if event.operation == K8sOperationType.DELETE:
+ git_repo.delete_push(path, name)
+ elif event.operation == K8sOperationType.CREATE:
+ obj_yaml = hikaru.get_yaml(event.obj.spec)
+ git_repo.commit_push(obj_yaml, path, name, f"Create {event.obj.kind} named {event.obj.metadata.name} on namespace {event.obj.metadata.namespace}")
+ else: # update
+ obj_yaml = hikaru.get_yaml(event.obj.spec)
+ old_obj_yaml = ""
+ if event.old_obj is not None:
+ old_obj_yaml = hikaru.get_yaml(event.old_obj.spec)
+ if obj_yaml != old_obj_yaml: # we have a change in the spec
+ git_repo.commit_push(obj_yaml, path, name, f"Update {event.obj.kind} named {event.obj.metadata.name} on namespace {event.obj.metadata.namespace}")
diff --git a/playbooks/grafana_enrichment.py b/playbooks/grafana_enrichment.py
new file mode 100644
index 000000000..6b09b2278
--- /dev/null
+++ b/playbooks/grafana_enrichment.py
@@ -0,0 +1,45 @@
+from robusta.api import *
+
+
+class Params(BaseModel):
+ grafana_url: str = None
+ grafana_api_key: str
+ grafana_dashboard_uid: str
+
+
+@on_deployment_update
+def add_deployment_lines_to_grafana(event: DeploymentEvent, action_params: Params):
+ """
+ Add annotations to grafana whenever a new application version is deployed so that you can easily see changes in performance.
+ """
+ new_images = event.obj.get_images()
+ old_images = event.old_obj.get_images()
+ if new_images == old_images:
+ return
+
+ msg = ""
+ if new_images.keys() != old_images.keys():
+ msg = f"number or names of images changed
new{new_images}
old{old_images}
"
+ else:
+ for name in new_images:
+ if new_images[name] != old_images[name]:
+ msg += f"image name:{name}
new tag:{new_images[name]}
old tag{old_images[name]}
"
+
+ grafana = Grafana(action_params.grafana_api_key, action_params.grafana_url)
+ grafana.add_line_to_dashboard(action_params.grafana_dashboard_uid, msg, tags=[event.obj.metadata.name])
+
+
+@on_pod_create
+def test_pod_orm(event : PodEvent):
+ logging.info('running test_pod_orm')
+ pod = event.obj
+
+ images = [container.image for container in event.obj.spec.containers]
+ logging.info(f'pod images are {images}')
+
+ exec_resp = pod.exec("ls -l /")
+ logging.info(f'pod ls / command: {exec_resp}')
+
+ logging.info(f'deleting pod {pod.metadata.name}')
+ RobustaPod.deleteNamespacedPod(pod.metadata.name, pod.metadata.namespace)
+ logging.info(f'pod deleted')
diff --git a/playbooks/networking.py b/playbooks/networking.py
new file mode 100644
index 000000000..2753c180a
--- /dev/null
+++ b/playbooks/networking.py
@@ -0,0 +1,11 @@
+from robusta.api import *
+
+class PingParams (BaseModel):
+ hostname: str
+
+
+@on_manual_trigger
+def incluster_ping(event: ManualTriggerEvent):
+ action_params = PingParams(**event.data)
+ output = RobustaJob.run_simple_job("nicolaka/netshoot", f"ping -c 1 {action_params.hostname}", 60)
+ print("got output", output)
diff --git a/playbooks/node_cpu_analysis.py b/playbooks/node_cpu_analysis.py
new file mode 100644
index 000000000..a2b4012b3
--- /dev/null
+++ b/playbooks/node_cpu_analysis.py
@@ -0,0 +1,162 @@
+import textwrap
+from collections import OrderedDict
+
+import cairosvg
+import pygal
+from pygal.style import DarkStyle as ChosenStyle
+from hikaru.model import *
+from robusta.api import *
+from prometheus_api_client import PrometheusConnect
+
+
+class NodeCPUAnalysisParams(BaseModel):
+ prometheus_url: str = None
+ node: str = ""
+ slack_channel: str = ""
+
+
+# TODO: should we move this to the robusta framework?
+class NodeAnalyzer:
+
+ # TODO: perhaps we should handle this more elegantly by first loading all the data into a pandas dataframe
+ # and then slicing it different ways
+ def __init__(self, node_name: str, prometheus_url: str, range_size="5m"):
+ self.node_name = node_name
+ self.range_size = range_size
+ self.node: Node = Node.readNode(node_name).obj
+ self.internal_ip = next(addr.address for addr in self.node.status.addresses if addr.type == "InternalIP")
+ if prometheus_url is None:
+ prometheus_url = find_prometheus_url()
+ self.prom = PrometheusConnect(url=prometheus_url, disable_ssl=True)
+
+ def get_total_cpu_usage(self, other_method=False):
+ """
+ Gets the total cpu usage for the node, including both containers and everything running on the host directly
+ :return: a float between 0 and 1 representing the percentage of total cpus used
+ """
+ if other_method:
+ return self._query(
+ f'rate(container_cpu_usage_seconds_total{{node="{self.node_name}",pod="",id="/"}}[{self.range_size}]) '
+ f'/ scalar(sum (machine_cpu_cores{{node="{self.node_name}"}}))')
+
+ # the instance here refers to the node as identified by it's internal IP
+ # we average by the instance to account for multiple cpus and still return a number between 0-1
+ return self._query(f'1'
+ f'- avg by(instance)(rate('
+ f' node_cpu_seconds_total{{mode=~"idle", instance=~"{self.internal_ip}:.*"}}[{self.range_size}]'
+ f'))'
+ f'- avg by(instance)(rate('
+ f' node_cpu_seconds_total{{mode=~"iowait", instance=~"{self.internal_ip}:.*"}}[{self.range_size}]'
+ f'))'
+ )
+
+ def get_total_containerized_cpu_usage(self):
+ query = self._build_query_for_containerized_cpu_usage(True, True)
+ return self._query(query)
+
+ def get_per_pod_cpu_usage(self, threshold=0.0, normalize_by_cpu_count=True):
+ """
+ Gets the cpu usage of each pod on a node
+ :param threshold: only return pods with a cpu above threshold
+ :param normalize_by_cpu_count: should we divide by the number of cpus so that the result is in the range 0-1 regardless of cpu count?
+ :return: a dict of {[pod_name] : [cpu_usage in the 0-1 range] }
+ """
+ query = self._build_query_for_containerized_cpu_usage(False, normalize_by_cpu_count)
+ result = self.prom.custom_query(query)
+ print("result is", result)
+ pod_value_pairs = [(r["metric"]["pod"], float(r["value"][1])) for r in result]
+ pod_value_pairs = [(k, v) for (k, v) in pod_value_pairs if v >= threshold]
+ pod_value_pairs.sort(key=lambda x: x[1], reverse=True)
+ pod_to_cpu = OrderedDict(pod_value_pairs)
+ return pod_to_cpu
+
+ def get_per_pod_cpu_request(self):
+ query = f'sum by (pod)(kube_pod_container_resource_requests_cpu_cores{{node="{self.node_name}"}})'
+ result = self.prom.custom_query(query)
+ print("result is", result)
+ return dict((r["metric"]["pod"], float(r["value"][1])) for r in result)
+
+ def _query(self, query):
+ """
+ Runs a simple query returning a single metric and returns that metric
+ """
+ print(f"running query: {query}")
+ result = self.prom.custom_query(query)
+ print("result is", result)
+ return float(result[0]["value"][1])
+
+ def _build_query_for_containerized_cpu_usage(self, total, normalized_by_cpu_count):
+ if total:
+ grouping = ""
+ else:
+ grouping = "by (pod)"
+
+ if normalized_by_cpu_count:
+ # we divide by the number of machine_cpu_cores to return a result in th 0-1 range regardless of cpu count
+ normalization = f'/ scalar(sum (machine_cpu_cores{{node="{self.node_name}"}}))'
+ else:
+ normalization = ''
+
+ # note: it is important to set either image!="" or image="" because otherwise we count everything twice -
+ # once for the whole pod (image="") and once for each container (image!="")
+ return f'sum(rate(' \
+ f' container_cpu_usage_seconds_total{{node="{self.node_name}",pod!="",image!=""}}[{self.range_size}]' \
+ f')) {grouping} {normalization}'
+
+
+def do_node_cpu_analysis(node: str, prometheus_url: str = None) -> List[BaseBlock]:
+ analyzer = NodeAnalyzer(node, prometheus_url)
+
+ threshold = 0.005
+ total_cpu_usage = analyzer.get_total_cpu_usage()
+ total_container_cpu_usage = analyzer.get_total_containerized_cpu_usage()
+ non_container_cpu_usage = total_cpu_usage - total_container_cpu_usage
+ per_pod_usage_normalized = analyzer.get_per_pod_cpu_usage()
+ per_pod_usage_unbounded = analyzer.get_per_pod_cpu_usage(threshold=threshold, normalize_by_cpu_count=False)
+ per_pod_request = analyzer.get_per_pod_cpu_request()
+ all_pod_names = list(set(per_pod_usage_unbounded.keys()).union(per_pod_request.keys()))
+
+ treemap = pygal.Treemap(style=ChosenStyle)
+ treemap.title = f'CPU Usage on Node {node}'
+ treemap.value_formatter = lambda x: f"{int(x * 100)}%"
+ treemap.add("Non-container usage", [non_container_cpu_usage])
+ treemap.add("Free CPU", [1 - total_cpu_usage])
+ for (pod_name, cpu_usage) in per_pod_usage_normalized.items():
+ treemap.add(pod_name, [cpu_usage])
+
+ MISSING_VALUE = -0.001
+ bar_chart = pygal.Bar(x_label_rotation=-40, style=ChosenStyle)
+ bar_chart.title = f'Actual Vs Requested vCPUs on Node {node}'
+ bar_chart.x_labels = all_pod_names
+ bar_chart.value_formatter = lambda x: f"{x:.2f} vCPU" if x != MISSING_VALUE else "no data"
+ bar_chart.add('Actual CPU Usage',
+ [per_pod_usage_unbounded.get(pod_name, MISSING_VALUE) for pod_name in all_pod_names])
+ bar_chart.add('CPU Request', [per_pod_request.get(pod_name, MISSING_VALUE) for pod_name in all_pod_names])
+
+ return [
+ MarkdownBlock(f"_*Quick explanation:* High CPU typically occurs if you define pod CPU "
+ f"requests incorrectly and Kubernetes schedules too many pods on one node. "
+ f"If this is the case, update your pod CPU requests to more accurate numbers"
+ f"using guidance from the attached graphs_"),
+ DividerBlock(),
+ MarkdownBlock(textwrap.dedent(f"""\
+ *Total CPU usage on node:* {int(total_cpu_usage * 100)}%
+ *Container CPU usage on node:* {int(total_container_cpu_usage * 100)}%
+ *Non-container CPU usage on node:* {int(non_container_cpu_usage * 100)}%
+ """)),
+ DividerBlock(),
+ MarkdownBlock(f"*Per Pod Usage* (pods with under {threshold * 100:0.1f}% CPU usage aren't shown):"),
+ ListBlock([f"{k}: *{v * 100:0.1f}%*" for (k, v) in per_pod_usage_normalized.items() if v >= threshold]),
+ MarkdownBlock("All percentages are between 0% and 100% regardless of the number of CPUs."),
+ FileBlock("treemap.svg", treemap.render()),
+ FileBlock("usage_vs_requested.svg", bar_chart.render()),
+ ]
+
+
+@on_manual_trigger
+def node_cpu_analysis(event: ManualTriggerEvent):
+ params = NodeCPUAnalysisParams(**event.data)
+ event.report_title = f"Node CPU Usage Report for {params.node}"
+ event.slack_channel = params.slack_channel
+ event.report_blocks = do_node_cpu_analysis(params.node, params.prometheus_url)
+ send_to_slack(event)
diff --git a/playbooks/persistent_data.py b/playbooks/persistent_data.py
new file mode 100644
index 000000000..3a94888b7
--- /dev/null
+++ b/playbooks/persistent_data.py
@@ -0,0 +1,23 @@
+# TODO: turn this into a bot that prints statistics/a graph of changes at the end of the day/week
+# on what changed the most
+from robusta.api import *
+
+
+class DeploymentChangeCounter(BaseModel):
+ changes_per_deployment: Dict[str, int] = {}
+
+
+PERSISTENT_DATA_NAME = "test_persistency"
+
+
+@on_deployment_update
+def count_pod_creations(event: DeploymentEvent):
+ logging.info("we got an event... sending it to slack")
+ with get_persistent_data(PERSISTENT_DATA_NAME, DeploymentChangeCounter) as data:
+ name = event.obj.metadata.name
+ value = data.changes_per_deployment.get(name, 0)
+ data.changes_per_deployment[name] = value + 1
+
+ event.report_title = f"DeploymentChangeCounter: {data.changes_per_deployment}"
+ event.slack_channel = "general"
+ send_to_slack(event)
diff --git a/playbooks/pod_troubleshooting.py b/playbooks/pod_troubleshooting.py
new file mode 100644
index 000000000..1aea0134d
--- /dev/null
+++ b/playbooks/pod_troubleshooting.py
@@ -0,0 +1,70 @@
+# playbooks for peeking inside running pods
+from cairosvg import svg2png
+
+from robusta.api import *
+
+
+class StartProfilingParams(BaseModel):
+ namespace: str = "default"
+ seconds: int = 2
+ process_name: str = ""
+ slack_channel: str
+ pod_name: str
+
+
+@on_manual_trigger
+def python_profiler(event: ManualTriggerEvent):
+ # This should use ephemeral containers, but they aren't in GA yet. To enable them on GCP for example,
+ # you need to create a brand new cluster. Therefore we're sticking with regular containers for now
+ action_params = StartProfilingParams(**event.data)
+ pod = RobustaPod.find_pod(action_params.pod_name, action_params.namespace)
+ processes = pod.get_processes()
+
+ debugger = pod.create_debugger_pod()
+
+ try:
+ for proc in processes:
+ cmd = " ".join(proc.cmdline)
+ if action_params.process_name not in cmd:
+ logging.info(f"skipping process because it doesn't match process_name. {cmd}")
+ continue
+ elif "python" not in proc.exe:
+ logging.info(f"skipping process because it doesn't look like a python process. {cmd}")
+ continue
+
+ filename = "/profile.svg"
+ pyspy_output = debugger.exec(f"py-spy record --duration={action_params.seconds} --pid={proc.pid} --rate 30 --nonblocking -o {filename}")
+ if "Error:" in pyspy_output:
+ continue
+
+ svg = debugger.exec(f"cat {filename}")
+ event.report_blocks.append(FileBlock(f"{cmd}.svg", svg))
+
+ event.slack_channel = action_params.slack_channel
+ event.report_title = f"Profile results for {pod.metadata.name} in namespace {pod.metadata.namespace}:"
+ send_to_slack(event)
+ finally:
+ debugger.deleteNamespacedPod(debugger.metadata.name, debugger.metadata.namespace)
+
+
+class PodInfoParams(BaseModel):
+ pod_name: str
+ namespace: str = "default"
+
+
+@on_manual_trigger
+def pod_ps(event: ManualTriggerEvent):
+ action_params = PodInfoParams(**event.data)
+ logging.info(f"getting info for: {action_params}")
+
+ pod: RobustaPod = RobustaPod.find_pod(action_params.pod_name, action_params.namespace)
+ for proc in pod.get_processes():
+ print(f"{proc.pid}\t{proc.exe}\t{proc.cmdline}")
+
+
+class PythonStackDumpParams(BaseModel):
+ namespace: str = "default"
+ process_name: str = ""
+ slack_channel: str
+ pod_name: str
+
diff --git a/playbooks/requirements.txt b/playbooks/requirements.txt
new file mode 100644
index 000000000..3ccd0a441
--- /dev/null
+++ b/playbooks/requirements.txt
@@ -0,0 +1,7 @@
+cairosvg
+flask
+kubernetes
+prometheus-api-client
+pygal
+tinycss
+cssselect
\ No newline at end of file
diff --git a/playbooks/restart_loop_reporter.py b/playbooks/restart_loop_reporter.py
new file mode 100644
index 000000000..2b2f43465
--- /dev/null
+++ b/playbooks/restart_loop_reporter.py
@@ -0,0 +1,41 @@
+import logging
+from robusta.api import *
+
+
+class RestartLoopParams(BaseModel):
+ restart_reason: str = None
+ slack_channel: str = "general"
+ rate_limit: int = 3600
+
+
+def get_crashing_containers(status: PodStatus, config: RestartLoopParams) -> [ContainerStatus]:
+ return [container_status for container_status in status.containerStatuses if
+ container_status.state.waiting is not None and container_status.restartCount > 1 # report only after the 2nd restart and get previous logs
+ and (config.restart_reason is None or container_status.state.waiting.reason == config.restart_reason)]
+
+
+@on_pod_update
+def restart_loop_reporter(event: PodEvent, config: RestartLoopParams):
+ crashed_container_statuses = get_crashing_containers(event.obj.status, config)
+
+ if len(crashed_container_statuses) == 0:
+ return # no matched containers
+
+ pod = event.obj
+ pod_name = pod.metadata.name
+ if not RateLimiter.mark_and_test("restart_loop_reporter", pod_name + pod.metadata.namespace, config.rate_limit):
+ return
+
+ for container_status in crashed_container_statuses:
+ event.report_blocks.append(MarkdownBlock(f'*{container_status.name} restart* count: {container_status.restartCount}'))
+ container_log = pod.get_logs(container_status.name, previous=True)
+ if container_log:
+ event.report_blocks.append(FileBlock(f"{pod_name}.txt", container_log))
+ else:
+ event.report_blocks.append(MarkdownBlock(f"Container logs unavailable for container: {container_status.name}"))
+ logging.error(f"could not fetch logs from container: {container_status.name}. logs were {container_log}")
+
+ event.report_title = f"Crashing pod {pod.metadata.name} in namespace {pod.metadata.namespace} ({container_status.state.waiting.reason})"
+ event.slack_channel = config.slack_channel
+ send_to_slack(event)
+
diff --git a/playbooks/stress_tests.py b/playbooks/stress_tests.py
new file mode 100644
index 000000000..d6bae9f92
--- /dev/null
+++ b/playbooks/stress_tests.py
@@ -0,0 +1,17 @@
+from robusta.api import *
+
+
+class StressTestParams (BaseModel):
+ slack_channel: str
+ n: int = 1000
+ url: str
+
+@on_manual_trigger
+def http_stress_test(event: ManualTriggerEvent):
+ action_params = StressTestParams(**event.data)
+ # TODO: remove timeout?
+ output = RobustaJob.run_simple_job("williamyeh/hey", f"/hey -n {action_params.n} {action_params.url}", 120)
+ event.slack_channel = action_params.slack_channel
+ event.report_title = f"Done running stress test with {action_params.n} http requests for url {action_params.url}"
+ event.report_blocks.append(FileBlock("result.txt", output))
+ send_to_slack(event)
diff --git a/research/hiakru_test.py b/research/hiakru_test.py
new file mode 100644
index 000000000..2138c3687
--- /dev/null
+++ b/research/hiakru_test.py
@@ -0,0 +1,8 @@
+from hikaru.model import *
+from kubernetes import config
+
+config.load_kube_config()
+try:
+ x= ConfigMap.readNamespacedConfigMap("doesnt-exist", "robusta").obj
+except Exception as e:
+ pass
diff --git a/research/volume_backups/.gitignore b/research/volume_backups/.gitignore
new file mode 100644
index 000000000..34a4bc094
--- /dev/null
+++ b/research/volume_backups/.gitignore
@@ -0,0 +1,2 @@
+controller/
+crds/
\ No newline at end of file
diff --git a/research/volume_backups/setup_snapshot_crds.sh b/research/volume_backups/setup_snapshot_crds.sh
new file mode 100644
index 000000000..af45831e6
--- /dev/null
+++ b/research/volume_backups/setup_snapshot_crds.sh
@@ -0,0 +1,10 @@
+mkdir crds
+wget https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/53469c21962339229dd150cbba50c34359acec73/config/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml -O crds/a.yaml
+wget https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/53469c21962339229dd150cbba50c34359acec73/config/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml -O crds/b.yaml
+wget https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/53469c21962339229dd150cbba50c34359acec73/config/crd/snapshot.storage.k8s.io_volumesnapshots.yaml -O crds/c.yaml
+kubectl apply -f crds
+
+mkdir controller
+wget https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/master/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml -O controller/a.yaml
+wget https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/master/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml -O controller/b.yaml
+kubectl apply -f controller
diff --git a/research/volume_backups/snapshot.yaml b/research/volume_backups/snapshot.yaml
new file mode 100644
index 000000000..59c916738
--- /dev/null
+++ b/research/volume_backups/snapshot.yaml
@@ -0,0 +1,15 @@
+apiVersion: snapshot.storage.k8s.io/v1beta1
+kind: VolumeSnapshotClass
+metadata:
+ name: snapshot-class
+driver: pd.csi.storage.gke.io
+deletionPolicy: Delete
+---
+apiVersion: snapshot.storage.k8s.io/v1beta1
+kind: VolumeSnapshot
+metadata:
+ name: test-snapshot
+spec:
+ volumeSnapshotClassName: snapshot-class
+ source:
+ persistentVolumeClaimName: prometheus-data
\ No newline at end of file
diff --git a/scripts/README.md b/scripts/README.md
new file mode 100644
index 000000000..45118f9b8
--- /dev/null
+++ b/scripts/README.md
@@ -0,0 +1,2 @@
+This directory is for developer scripts only.
+Everything that should be packaged and installed via pip is in src/
\ No newline at end of file
diff --git a/scripts/__init__.py b/scripts/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/scripts/generate_kubernetes_code.py b/scripts/generate_kubernetes_code.py
new file mode 100755
index 000000000..25f936132
--- /dev/null
+++ b/scripts/generate_kubernetes_code.py
@@ -0,0 +1,172 @@
+import argparse
+import os
+import textwrap
+import inflection
+from typing import TextIO
+
+KUBERNETES_VERSIONS = ["v1", "v2beta1", "v2beta2"]
+KUBERNETES_RESOURCES = ["Pod", "ReplicaSet", "DaemonSet", "Deployment", "Service", "ConfigMap", "Event", "HorizontalPodAutoscaler"]
+TRIGGER_TYPES = {
+ "create": "K8sOperationType.CREATE",
+ "update": "K8sOperationType.UPDATE",
+ "delete": "K8sOperationType.DELETE",
+ "all_changes": "None",
+}
+
+CUSTOM_SUBCLASSES = {
+ "Pod": "RobustaPod",
+ "Deployment": "RobustaDeployment"
+}
+CUSTOM_SUBCLASSES_NAMES_STR = ",".join(CUSTOM_SUBCLASSES.values())
+
+COMMON_PREFIX = """# This file was autogenerated. Do not edit.\n\n"""
+
+
+def get_model_class(k8s_resource_name: str) -> str:
+ if k8s_resource_name in CUSTOM_SUBCLASSES:
+ return CUSTOM_SUBCLASSES[k8s_resource_name]
+ return k8s_resource_name
+
+
+def autogenerate_events(f: TextIO):
+ f.write(COMMON_PREFIX)
+ f.write(textwrap.dedent(f"""\
+ from dataclasses import dataclass
+ from typing import Union
+ from ..base_event import K8sBaseEvent
+ from ..custom_models import {CUSTOM_SUBCLASSES_NAMES_STR}
+ """))
+
+ for version in KUBERNETES_VERSIONS:
+ for resource in KUBERNETES_RESOURCES:
+ f.write(textwrap.dedent(f"""\
+ from hikaru.model.rel_1_16.{version} import {resource} as {version}{resource}
+ """))
+
+
+ all_versioned_resources = set()
+ for resource in KUBERNETES_RESOURCES:
+ if resource in CUSTOM_SUBCLASSES:
+ model_class_str = get_model_class(resource)
+ all_versioned_resources.add(model_class_str)
+ else:
+ version_resources = [f"{version}{resource}" for version in KUBERNETES_VERSIONS]
+ model_class_str = f"Union[{','.join(version_resources)}]"
+ all_versioned_resources = all_versioned_resources.union(set(version_resources))
+
+ f.write(textwrap.dedent(f"""\
+
+ @dataclass
+ class {resource}Event (K8sBaseEvent):
+ obj: {model_class_str}
+ old_obj: {model_class_str}
+
+ """))
+
+ # add the KubernetesAnyEvent
+ f.write(textwrap.dedent(f"""\
+
+ @dataclass
+ class KubernetesAnyEvent (K8sBaseEvent):
+ obj: {f"Union[{','.join(all_versioned_resources)}]"}
+ old_obj: {f"Union[{','.join(all_versioned_resources)}]"}
+
+ """))
+
+ mappers = [f"'{r}': {r}Event" for r in KUBERNETES_RESOURCES]
+ mappers_str = ",\n ".join(mappers)
+ f.write(f"\nKIND_TO_EVENT_CLASS = {{\n {mappers_str}\n}}\n")
+
+
+def autogenerate_models(f: TextIO, version : str):
+ f.write(COMMON_PREFIX)
+ f.write(textwrap.dedent(f"""\
+ from hikaru.model.rel_1_16.{version} import *
+ from ...custom_models import {CUSTOM_SUBCLASSES_NAMES_STR}
+
+
+ """))
+
+ mappers = [f"'{r}': {get_model_class(r)}" for r in KUBERNETES_RESOURCES]
+ mappers_str = ",\n ".join(mappers)
+ f.write(f"KIND_TO_MODEL_CLASS = {{\n {mappers_str}\n}}\n")
+
+
+def autogenerate_versioned_models(f: TextIO):
+ f.write(COMMON_PREFIX)
+ for version in KUBERNETES_VERSIONS:
+
+ f.write(textwrap.dedent(f"""\
+ from .{version}.models import KIND_TO_MODEL_CLASS as {version}
+ """))
+
+ mappers = [f"'{version}': {version}" for version in KUBERNETES_VERSIONS]
+ mappers_str = ",\n ".join(mappers)
+
+ f.write(f"VERSION_KIND_TO_MODEL_CLASS = {{\n {mappers_str}\n}}\n")
+ f.write(textwrap.dedent(f"""\
+
+
+ def get_api_version(apiVersion: str):
+ if "/" in apiVersion:
+ apiVersion = apiVersion.split("/")[1]
+ return VERSION_KIND_TO_MODEL_CLASS.get(apiVersion)
+ """))
+
+
+
+def autogenerate_triggers(f: TextIO):
+ f.write(COMMON_PREFIX)
+ f.write(textwrap.dedent("""\
+ from ....utils.decorators import doublewrap
+ from ..base_triggers import register_k8s_playbook, register_k8s_any_playbook
+ from ..base_event import K8sOperationType
+
+
+ """))
+
+ for resource in KUBERNETES_RESOURCES:
+ f.write(f"# {resource} Triggers\n")
+ for trigger_name, operation_type in TRIGGER_TYPES.items():
+ f.write(textwrap.dedent(f"""\
+ @doublewrap
+ def on_{resource.lower()}_{trigger_name}(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, '{resource}', {operation_type}, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+ """))
+
+ f.write(f"# Kubernetes Any Triggers\n")
+ for trigger_name, operation_type in TRIGGER_TYPES.items():
+ f.write(textwrap.dedent(f"""\
+ @doublewrap
+ def on_kubernetes_any_{trigger_name}(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_any_playbook(func, {operation_type}, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+ """))
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Autogenerate kubernetes models, events, and triggers')
+ parser.add_argument('directory', type=str, help='output directory')
+ args = parser.parse_args()
+
+ # generate versioned events and models
+ for version in KUBERNETES_VERSIONS:
+ dir_path = os.path.join(args.directory, version)
+ os.makedirs(dir_path, exist_ok=True)
+ with open(os.path.join(dir_path, "models.py"), "w") as f:
+ autogenerate_models(f, version)
+
+ # generate all version unions
+ with open(os.path.join(args.directory, "events.py"), "w") as f:
+ autogenerate_events(f)
+ with open(os.path.join(args.directory, "models.py"), "w") as f:
+ autogenerate_versioned_models(f)
+ with open(os.path.join(args.directory, "triggers.py"), "w") as f:
+ autogenerate_triggers(f)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/generate_playbook_descriptions.py b/scripts/generate_playbook_descriptions.py
new file mode 100644
index 000000000..9235d2ff2
--- /dev/null
+++ b/scripts/generate_playbook_descriptions.py
@@ -0,0 +1,64 @@
+import argparse
+import importlib
+import inspect
+import os
+import glob
+
+from pydantic import BaseModel
+
+from src.robusta import get_function_params_class
+from src.robusta import TriggerParams
+from src.robusta.runner import install_requirements
+
+
+class PlaybookDescription(BaseModel):
+ function_name: str
+ builtin_trigger_params: TriggerParams
+ docs: str = None
+ src: str
+ src_file: str
+ action_params: dict = None
+
+
+def get_params_schema(func):
+ action_params = get_function_params_class(func)
+ if action_params is None:
+ return None
+ return action_params.schema()
+
+
+def load_scripts(scripts_root):
+ install_requirements(os.path.join(scripts_root, 'requirements.txt'))
+
+ python_files = glob.glob(f'{scripts_root}/*.py')
+
+ for script in python_files:
+ print(f'loading playbooks {script}')
+ filename = os.path.basename(script)
+ (module_name, ext) = os.path.splitext(filename)
+ spec = importlib.util.spec_from_file_location(module_name, script)
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+
+ playbooks = inspect.getmembers(module, lambda f: inspect.isfunction(f) and getattr(f, "__playbook", None) is not None)
+ for _, func in playbooks:
+ description = PlaybookDescription(
+ function_name=func.__name__,
+ builtin_trigger_params=func.__playbook["default_trigger_params"],
+ docs=inspect.getdoc(func),
+ src=inspect.getsource(func),
+ src_file=inspect.getsourcefile(func),
+ action_params=get_params_schema(func),
+ )
+ print(description.json(), "\n\n")
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Generate playbook descriptions')
+ parser.add_argument('directory', type=str, help='directory containing the playbooks')
+ args = parser.parse_args()
+ load_scripts(args.directory)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/start_telepresence_forwarding.sh b/scripts/start_telepresence_forwarding.sh
new file mode 100644
index 000000000..9b293e1b5
--- /dev/null
+++ b/scripts/start_telepresence_forwarding.sh
@@ -0,0 +1,11 @@
+set -o xtrace
+telepresence connect
+telepresence intercept -n robusta robusta-runner --port 5000:http --env-file example.env
+# on WSL we also run socat to forward traffic from wsl to windows
+if grep -qi microsoft /proc/version; then
+ # put your Windows public IP here, but don't the Windows WSL ip because it doesn't work :(
+ socat tcp-listen:5000,fork tcp:192.168.14.97:5000
+fi
+
+telepresence leave robusta-runner-robusta
+
diff --git a/skaffold.yaml b/skaffold.yaml
new file mode 100644
index 000000000..22fa673d9
--- /dev/null
+++ b/skaffold.yaml
@@ -0,0 +1,40 @@
+apiVersion: skaffold/v2beta1
+kind: Config
+metadata:
+ name: robusta
+build:
+ artifacts:
+ - image: us-central1-docker.pkg.dev/arabica-300319/devel/robusta-runner
+ context: src
+ docker:
+ dockerfile: Dockerfile
+ local:
+ push: true
+ concurrency: 0
+deploy:
+ kustomize:
+ paths: ["./deployment/dev"]
+
+portForward:
+- resourceType: deployment
+ resourceName: robusta-runner
+ port: 5000
+ localPort: 5000
+
+
+profiles:
+- name: prod
+ deploy:
+ kustomize:
+ paths: [ "./deployment/prod" ]
+
+- name: apple-m1-dev
+ build:
+ artifacts:
+ - image: us-central1-docker.pkg.dev/arabica-300319/devel/robusta-runner
+ context: src
+ custom:
+ buildCommand: ./build_on_apple_m1.sh
+ local:
+ push: true
+ concurrency: 0
\ No newline at end of file
diff --git a/src/Dockerfile b/src/Dockerfile
new file mode 100644
index 000000000..ee3e21c5c
--- /dev/null
+++ b/src/Dockerfile
@@ -0,0 +1,30 @@
+# see https://pythonspeed.com/articles/alpine-docker-python/ for the reason we don't use alpine
+FROM python:3.8-slim
+RUN apt-get update \
+ && apt-get install -y --no-install-recommends git socat wget curl libcairo2 python3-dev libffi-dev \
+ && apt-get purge -y --auto-remove \
+ && rm -rf /var/lib/apt/lists/*
+
+# install a custom version of socat with readline enabled
+RUN wget https://launchpad.net/~ionel-mc/+archive/ubuntu/socat/+build/15532886/+files/socat_1.7.3.2-2ubuntu2ionelmc2~ppa1_amd64.deb
+RUN dpkg -i socat_1.7.3.2-2ubuntu2ionelmc2~ppa1_amd64.deb
+
+ENV CUSTOM_PLAYBOOKS_ROOT=/etc/robusta/config
+ENV ENV_TYPE=DEV
+
+# we install the project requirements and install the app in separate stages to optimize docker layer caching
+RUN mkdir /app
+RUN pip3 install --upgrade pip
+RUN pip3 install poetry==1.1.6
+RUN poetry config virtualenvs.create false
+COPY pyproject.toml /app
+COPY poetry.lock /app
+WORKDIR /app
+RUN bash -c "pip3 install --requirement <(poetry export --dev --format requirements.txt --without-hashes)"
+
+COPY . /app
+
+RUN pip3 install --use-feature=in-tree-build .
+
+# -u disables stdout buffering https://stackoverflow.com/questions/107705/disable-output-buffering
+CMD [ "python3", "-u", "-m", "robusta.runner.main"]
\ No newline at end of file
diff --git a/src/__init__.py b/src/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/build_on_apple_m1.sh b/src/build_on_apple_m1.sh
new file mode 100755
index 000000000..673fbf709
--- /dev/null
+++ b/src/build_on_apple_m1.sh
@@ -0,0 +1,5 @@
+docker buildx build \
+ --platform linux/amd64 \
+ --tag $IMAGE \
+ --push \
+ $BUILD_CONTEXT
\ No newline at end of file
diff --git a/src/poetry.lock b/src/poetry.lock
new file mode 100644
index 000000000..1a8f0308e
--- /dev/null
+++ b/src/poetry.lock
@@ -0,0 +1,1184 @@
+[[package]]
+name = "appdirs"
+version = "1.4.4"
+description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
+category = "dev"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "autopep8"
+version = "1.5.7"
+description = "A tool that automatically formats Python code to conform to the PEP 8 style guide"
+category = "dev"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+pycodestyle = ">=2.7.0"
+toml = "*"
+
+[[package]]
+name = "better-exceptions"
+version = "0.3.3"
+description = "Pretty and helpful exceptions, automatically"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+
+[[package]]
+name = "black"
+version = "21.6b0"
+description = "The uncompromising code formatter."
+category = "dev"
+optional = false
+python-versions = ">=3.6.2"
+
+[package.dependencies]
+appdirs = "*"
+click = ">=7.1.2"
+mypy-extensions = ">=0.4.3"
+pathspec = ">=0.8.1,<1"
+regex = ">=2020.1.8"
+toml = ">=0.10.1"
+
+[package.extras]
+colorama = ["colorama (>=0.4.3)"]
+d = ["aiohttp (>=3.6.0)", "aiohttp-cors (>=0.4.0)"]
+python2 = ["typed-ast (>=1.4.2)"]
+uvloop = ["uvloop (>=0.15.2)"]
+
+[[package]]
+name = "cachetools"
+version = "4.2.2"
+description = "Extensible memoizing collections and decorators"
+category = "main"
+optional = false
+python-versions = "~=3.5"
+
+[[package]]
+name = "cairocffi"
+version = "1.2.0"
+description = "cffi-based cairo bindings for Python"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+cffi = ">=1.1.0"
+
+[package.extras]
+doc = ["sphinx", "sphinx-rtd-theme"]
+test = ["pytest-runner", "pytest-cov", "pytest-flake8", "pytest-isort"]
+xcb = ["xcffib (>=0.3.2)"]
+
+[[package]]
+name = "cairosvg"
+version = "2.5.2"
+description = "A Simple SVG Converter based on Cairo"
+category = "main"
+optional = false
+python-versions = ">=3.5"
+
+[package.dependencies]
+cairocffi = "*"
+cssselect2 = "*"
+defusedxml = "*"
+pillow = "*"
+tinycss2 = "*"
+
+[package.extras]
+doc = ["sphinx", "sphinx-rtd-theme"]
+test = ["pytest-runner", "pytest-cov", "pytest-flake8", "pytest-isort"]
+
+[[package]]
+name = "certifi"
+version = "2021.5.30"
+description = "Python package for providing Mozilla's CA Bundle."
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "cffi"
+version = "1.14.5"
+description = "Foreign Function Interface for Python calling C code."
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+pycparser = "*"
+
+[[package]]
+name = "chardet"
+version = "4.0.0"
+description = "Universal encoding detector for Python 2 and 3"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[[package]]
+name = "click"
+version = "7.1.2"
+description = "Composable command line interface toolkit"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[[package]]
+name = "colorama"
+version = "0.4.4"
+description = "Cross-platform colored terminal text."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[[package]]
+name = "colorlog"
+version = "5.0.1"
+description = "Add colours to the output of Python's logging module."
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+
+[[package]]
+name = "cssselect2"
+version = "0.4.1"
+description = "cssselect2"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+tinycss2 = "*"
+webencodings = "*"
+
+[package.extras]
+doc = ["sphinx", "sphinx-rtd-theme"]
+test = ["pytest", "pytest-cov", "pytest-flake8", "pytest-isort", "coverage"]
+
+[[package]]
+name = "defusedxml"
+version = "0.7.1"
+description = "XML bomb protection for Python stdlib modules"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[[package]]
+name = "dulwich"
+version = "0.20.23"
+description = "Python Git Library"
+category = "main"
+optional = false
+python-versions = ">=3.5"
+
+[package.dependencies]
+certifi = "*"
+urllib3 = ">=1.24.1"
+
+[package.extras]
+fastimport = ["fastimport"]
+https = ["urllib3[secure] (>=1.24.1)"]
+pgp = ["gpg"]
+watch = ["pyinotify"]
+
+[[package]]
+name = "dunamai"
+version = "1.5.5"
+description = "Dynamic version generation"
+category = "dev"
+optional = false
+python-versions = ">=3.5,<4.0"
+
+[[package]]
+name = "flask"
+version = "1.1.4"
+description = "A simple framework for building complex web applications."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[package.dependencies]
+click = ">=5.1,<8.0"
+itsdangerous = ">=0.24,<2.0"
+Jinja2 = ">=2.10.1,<3.0"
+Werkzeug = ">=0.15,<2.0"
+
+[package.extras]
+dev = ["pytest", "coverage", "tox", "sphinx", "pallets-sphinx-themes", "sphinxcontrib-log-cabinet", "sphinx-issues"]
+docs = ["sphinx", "pallets-sphinx-themes", "sphinxcontrib-log-cabinet", "sphinx-issues"]
+dotenv = ["python-dotenv"]
+
+[[package]]
+name = "google-auth"
+version = "1.31.0"
+description = "Google Authentication Library"
+category = "main"
+optional = false
+python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*"
+
+[package.dependencies]
+cachetools = ">=2.0.0,<5.0"
+pyasn1-modules = ">=0.2.1"
+rsa = {version = ">=3.1.4,<5", markers = "python_version >= \"3.6\""}
+six = ">=1.9.0"
+
+[package.extras]
+aiohttp = ["requests (>=2.20.0,<3.0.0dev)", "aiohttp (>=3.6.2,<4.0.0dev)"]
+pyopenssl = ["pyopenssl (>=20.0.0)"]
+reauth = ["pyu2f (>=0.1.5)"]
+
+[[package]]
+name = "grafana-api"
+version = "1.0.3"
+description = "Yet another Python library for Grafana API"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+requests = ">=2.23.0"
+
+[package.extras]
+test = ["codecov (>=2.1.0)", "coverage (>=5.2.0)", "unittest-xml-reporting (>=3.0.0)", "requests-mock (>=1.8.0)"]
+
+[[package]]
+name = "hikaru"
+version = "0.4b0"
+description = "Hikaru allows you to smoothly move between Kubernetes YAML, Python objects, and Python source, in any direction"
+category = "dev"
+optional = false
+python-versions = "*"
+develop = false
+
+[package.dependencies]
+autopep8 = ">=1.5.5"
+black = ">=20.8b1"
+kubernetes = "12.0.1"
+"ruamel.yaml" = ">=0.16.12"
+
+[package.source]
+type = "git"
+url = "https://github.com/aantn/hikaru.git"
+reference = "fix_datetimes"
+resolved_reference = "00caa9075a9915b44b04695bc5b9ff78b9caf6e8"
+
+[[package]]
+name = "idna"
+version = "2.10"
+description = "Internationalized Domain Names in Applications (IDNA)"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
+[[package]]
+name = "inflection"
+version = "0.5.1"
+description = "A port of Ruby on Rails inflector to Python"
+category = "main"
+optional = false
+python-versions = ">=3.5"
+
+[[package]]
+name = "itsdangerous"
+version = "1.1.0"
+description = "Various helpers to pass data to untrusted environments and back."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
+[[package]]
+name = "jinja2"
+version = "2.11.3"
+description = "A very fast and expressive template engine."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[package.dependencies]
+MarkupSafe = ">=0.23"
+
+[package.extras]
+i18n = ["Babel (>=0.8)"]
+
+[[package]]
+name = "kubernetes"
+version = "12.0.1"
+description = "Kubernetes python client"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+certifi = ">=14.05.14"
+google-auth = ">=1.0.1"
+python-dateutil = ">=2.5.3"
+pyyaml = ">=3.12"
+requests = "*"
+requests-oauthlib = "*"
+six = ">=1.9.0"
+urllib3 = ">=1.24.2"
+websocket-client = ">=0.32.0,<0.40.0 || >0.40.0,<0.41.0 || >=0.43.0"
+
+[package.extras]
+adal = ["adal (>=1.0.2)"]
+
+[[package]]
+name = "manhole"
+version = "1.8.0"
+description = "Manhole is in-process service that will accept unix domain socket connections and present the"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[[package]]
+name = "markupsafe"
+version = "2.0.1"
+description = "Safely add untrusted strings to HTML/XML markup."
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "mypy-extensions"
+version = "0.4.3"
+description = "Experimental type system extensions for programs checked with the mypy typechecker."
+category = "dev"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "oauthlib"
+version = "3.1.1"
+description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+rsa = ["cryptography (>=3.0.0,<4)"]
+signals = ["blinker (>=1.4.0)"]
+signedtoken = ["cryptography (>=3.0.0,<4)", "pyjwt (>=2.0.0,<3)"]
+
+[[package]]
+name = "pathspec"
+version = "0.8.1"
+description = "Utility library for gitignore style pattern matching of file paths."
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[[package]]
+name = "pillow"
+version = "8.2.0"
+description = "Python Imaging Library (Fork)"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "pyasn1"
+version = "0.4.8"
+description = "ASN.1 types and codecs"
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "pyasn1-modules"
+version = "0.2.8"
+description = "A collection of ASN.1-based protocols modules."
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+pyasn1 = ">=0.4.6,<0.5.0"
+
+[[package]]
+name = "pycodestyle"
+version = "2.7.0"
+description = "Python style guide checker"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
+[[package]]
+name = "pycparser"
+version = "2.20"
+description = "C parser in Python"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
+[[package]]
+name = "pydantic"
+version = "1.8.2"
+description = "Data validation and settings management using python 3.6 type hinting"
+category = "main"
+optional = false
+python-versions = ">=3.6.1"
+
+[package.dependencies]
+typing-extensions = ">=3.7.4.3"
+
+[package.extras]
+dotenv = ["python-dotenv (>=0.10.4)"]
+email = ["email-validator (>=1.0.3)"]
+
+[[package]]
+name = "python-dateutil"
+version = "2.8.1"
+description = "Extensions to the standard Python datetime module"
+category = "main"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+
+[package.dependencies]
+six = ">=1.5"
+
+[[package]]
+name = "pyyaml"
+version = "5.4.1"
+description = "YAML parser and emitter for Python"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+
+[[package]]
+name = "regex"
+version = "2021.4.4"
+description = "Alternative regular expression module, to replace re."
+category = "dev"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "requests"
+version = "2.25.1"
+description = "Python HTTP for Humans."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+chardet = ">=3.0.2,<5"
+idna = ">=2.5,<3"
+urllib3 = ">=1.21.1,<1.27"
+
+[package.extras]
+security = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)"]
+socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"]
+
+[[package]]
+name = "requests-oauthlib"
+version = "1.3.0"
+description = "OAuthlib authentication support for Requests."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
+[package.dependencies]
+oauthlib = ">=3.0.0"
+requests = ">=2.0.0"
+
+[package.extras]
+rsa = ["oauthlib[signedtoken] (>=3.0.0)"]
+
+[[package]]
+name = "rsa"
+version = "4.7.2"
+description = "Pure-Python RSA implementation"
+category = "main"
+optional = false
+python-versions = ">=3.5, <4"
+
+[package.dependencies]
+pyasn1 = ">=0.1.3"
+
+[[package]]
+name = "ruamel.yaml"
+version = "0.17.9"
+description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order"
+category = "dev"
+optional = false
+python-versions = ">=3"
+
+[package.dependencies]
+"ruamel.yaml.clib" = {version = ">=0.1.2", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.10\""}
+
+[package.extras]
+docs = ["ryd"]
+jinja2 = ["ruamel.yaml.jinja2 (>=0.2)"]
+
+[[package]]
+name = "ruamel.yaml.clib"
+version = "0.2.2"
+description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml"
+category = "dev"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "six"
+version = "1.16.0"
+description = "Python 2 and 3 compatibility utilities"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
+
+[[package]]
+name = "slack-bolt"
+version = "1.6.1"
+description = "The Bolt Framework for Python"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+slack-sdk = ">=3.5.0,<4"
+
+[package.extras]
+adapter = ["boto3 (<=2)", "moto (<2)", "bottle (>=0.12,<1)", "boddle (>=0.2,<0.3)", "chalice (>=1.22.4,<2)", "click (>=7,<8)", "CherryPy (>=18,<19)", "Django (>=3,<4)", "falcon (>=2,<3)", "fastapi (<1)", "Flask (>=1,<2)", "Werkzeug (<2)", "pyramid (>=1,<2)", "sanic (>=20,<21)", "starlette (>=0.13,<1)", "requests (>=2,<3)", "tornado (>=6,<7)", "uvicorn (<1)", "gunicorn (>=20,<21)", "websocket-client (>=0.57,<1)"]
+async = ["aiohttp (>=3,<4)", "websockets (>=8,<9)"]
+testing = ["pytest (>=5,<6)", "pytest-cov (>=2,<3)", "pytest-asyncio (<1)", "aiohttp (>=3,<4)", "Flask-Sockets (>=0.2,<1)", "Werkzeug (<2)", "black (==21.5b1)"]
+
+[[package]]
+name = "slack-sdk"
+version = "3.6.0"
+description = "The Slack API Platform SDK for Python"
+category = "main"
+optional = false
+python-versions = ">=3.6.0"
+
+[package.extras]
+optional = ["aiodns (>1.0)", "aiohttp (>=3.7.3,<4)", "boto3 (<=2)", "SQLAlchemy (>=1,<2)", "websockets (>=9.1,<10)", "websocket-client (>=0.57,<1)"]
+testing = ["pytest (>=5.4,<6)", "pytest-asyncio (<1)", "Flask-Sockets (>=0.2,<1)", "pytest-cov (>=2,<3)", "codecov (>=2,<3)", "flake8 (>=3,<4)", "black (==21.5b1)", "psutil (>=5,<6)", "databases (>=0.3)"]
+
+[[package]]
+name = "tabulate"
+version = "0.8.9"
+description = "Pretty-print tabular data"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.extras]
+widechars = ["wcwidth"]
+
+[[package]]
+name = "tinycss2"
+version = "1.1.0"
+description = "tinycss2"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+webencodings = ">=0.4"
+
+[package.extras]
+doc = ["sphinx", "sphinx-rtd-theme"]
+test = ["pytest", "pytest-cov", "pytest-flake8", "pytest-isort", "coverage"]
+
+[[package]]
+name = "toml"
+version = "0.10.2"
+description = "Python Library for Tom's Obvious, Minimal Language"
+category = "dev"
+optional = false
+python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+
+[[package]]
+name = "typer"
+version = "0.3.2"
+description = "Typer, build great CLIs. Easy to code. Based on Python type hints."
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+click = ">=7.1.1,<7.2.0"
+
+[package.extras]
+test = ["pytest-xdist (>=1.32.0,<2.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "mypy (==0.782)", "black (>=19.10b0,<20.0b0)", "isort (>=5.0.6,<6.0.0)", "shellingham (>=1.3.0,<2.0.0)", "pytest (>=4.4.0,<5.4.0)", "pytest-cov (>=2.10.0,<3.0.0)", "coverage (>=5.2,<6.0)"]
+all = ["colorama (>=0.4.3,<0.5.0)", "shellingham (>=1.3.0,<2.0.0)"]
+dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)"]
+doc = ["mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=5.4.0,<6.0.0)", "markdown-include (>=0.5.1,<0.6.0)"]
+
+[[package]]
+name = "typing-extensions"
+version = "3.10.0.0"
+description = "Backported and Experimental Type Hints for Python 3.5+"
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "urllib3"
+version = "1.26.5"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4"
+
+[package.extras]
+brotli = ["brotlipy (>=0.6.0)"]
+secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"]
+socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
+
+[[package]]
+name = "watchdog"
+version = "2.1.2"
+description = "Filesystem events monitoring"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+watchmedo = ["PyYAML (>=3.10)", "argh (>=0.24.1)"]
+
+[[package]]
+name = "webencodings"
+version = "0.5.1"
+description = "Character encoding aliases for legacy web content"
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "websocket-client"
+version = "1.1.0"
+description = "WebSocket client for Python with low level API options"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "werkzeug"
+version = "1.0.1"
+description = "The comprehensive WSGI web application library."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[package.extras]
+dev = ["pytest", "pytest-timeout", "coverage", "tox", "sphinx", "pallets-sphinx-themes", "sphinx-issues"]
+watchdog = ["watchdog"]
+
+[metadata]
+lock-version = "1.1"
+python-versions = "^3.8"
+content-hash = "1474b7f871a089c5c5055d3621f1bb252443e674612bf91167c5ca2298dea127"
+
+[metadata.files]
+appdirs = [
+ {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"},
+ {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"},
+]
+autopep8 = [
+ {file = "autopep8-1.5.7-py2.py3-none-any.whl", hash = "sha256:aa213493c30dcdac99537249ee65b24af0b2c29f2e83cd8b3f68760441ed0db9"},
+ {file = "autopep8-1.5.7.tar.gz", hash = "sha256:276ced7e9e3cb22e5d7c14748384a5cf5d9002257c0ed50c0e075b68011bb6d0"},
+]
+better-exceptions = [
+ {file = "better_exceptions-0.3.3-py3-none-any.whl", hash = "sha256:9c70b1c61d5a179b84cd2c9d62c3324b667d74286207343645ed4306fdaad976"},
+ {file = "better_exceptions-0.3.3-py3.8.egg", hash = "sha256:bf111d0c9994ac1123f29c24907362bed2320a86809c85f0d858396000667ce2"},
+ {file = "better_exceptions-0.3.3.tar.gz", hash = "sha256:e4e6bc18444d5f04e6e894b10381e5e921d3d544240418162c7db57e9eb3453b"},
+]
+black = [
+ {file = "black-21.6b0-py3-none-any.whl", hash = "sha256:dfb8c5a069012b2ab1e972e7b908f5fb42b6bbabcba0a788b86dc05067c7d9c7"},
+ {file = "black-21.6b0.tar.gz", hash = "sha256:dc132348a88d103016726fe360cb9ede02cecf99b76e3660ce6c596be132ce04"},
+]
+cachetools = [
+ {file = "cachetools-4.2.2-py3-none-any.whl", hash = "sha256:2cc0b89715337ab6dbba85b5b50effe2b0c74e035d83ee8ed637cf52f12ae001"},
+ {file = "cachetools-4.2.2.tar.gz", hash = "sha256:61b5ed1e22a0924aed1d23b478f37e8d52549ff8a961de2909c69bf950020cff"},
+]
+cairocffi = [
+ {file = "cairocffi-1.2.0.tar.gz", hash = "sha256:9a979b500c64c8179fec286f337e8fe644eca2f2cd05860ce0b62d25f22ea140"},
+]
+cairosvg = [
+ {file = "CairoSVG-2.5.2-py3-none-any.whl", hash = "sha256:98c276b7e4f0caf01e5c7176765c104ffa1aa1461d63b2053b04ab663cf7052b"},
+ {file = "CairoSVG-2.5.2.tar.gz", hash = "sha256:b0b9929cf5dba005178d746a8036fcf0025550f498ca54db61873322384783bc"},
+]
+certifi = [
+ {file = "certifi-2021.5.30-py2.py3-none-any.whl", hash = "sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8"},
+ {file = "certifi-2021.5.30.tar.gz", hash = "sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee"},
+]
+cffi = [
+ {file = "cffi-1.14.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:bb89f306e5da99f4d922728ddcd6f7fcebb3241fc40edebcb7284d7514741991"},
+ {file = "cffi-1.14.5-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:34eff4b97f3d982fb93e2831e6750127d1355a923ebaeeb565407b3d2f8d41a1"},
+ {file = "cffi-1.14.5-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:99cd03ae7988a93dd00bcd9d0b75e1f6c426063d6f03d2f90b89e29b25b82dfa"},
+ {file = "cffi-1.14.5-cp27-cp27m-win32.whl", hash = "sha256:65fa59693c62cf06e45ddbb822165394a288edce9e276647f0046e1ec26920f3"},
+ {file = "cffi-1.14.5-cp27-cp27m-win_amd64.whl", hash = "sha256:51182f8927c5af975fece87b1b369f722c570fe169f9880764b1ee3bca8347b5"},
+ {file = "cffi-1.14.5-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:43e0b9d9e2c9e5d152946b9c5fe062c151614b262fda2e7b201204de0b99e482"},
+ {file = "cffi-1.14.5-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:cbde590d4faaa07c72bf979734738f328d239913ba3e043b1e98fe9a39f8b2b6"},
+ {file = "cffi-1.14.5-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:5de7970188bb46b7bf9858eb6890aad302577a5f6f75091fd7cdd3ef13ef3045"},
+ {file = "cffi-1.14.5-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:a465da611f6fa124963b91bf432d960a555563efe4ed1cc403ba5077b15370aa"},
+ {file = "cffi-1.14.5-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:d42b11d692e11b6634f7613ad8df5d6d5f8875f5d48939520d351007b3c13406"},
+ {file = "cffi-1.14.5-cp35-cp35m-win32.whl", hash = "sha256:72d8d3ef52c208ee1c7b2e341f7d71c6fd3157138abf1a95166e6165dd5d4369"},
+ {file = "cffi-1.14.5-cp35-cp35m-win_amd64.whl", hash = "sha256:29314480e958fd8aab22e4a58b355b629c59bf5f2ac2492b61e3dc06d8c7a315"},
+ {file = "cffi-1.14.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:3d3dd4c9e559eb172ecf00a2a7517e97d1e96de2a5e610bd9b68cea3925b4892"},
+ {file = "cffi-1.14.5-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:48e1c69bbacfc3d932221851b39d49e81567a4d4aac3b21258d9c24578280058"},
+ {file = "cffi-1.14.5-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:69e395c24fc60aad6bb4fa7e583698ea6cc684648e1ffb7fe85e3c1ca131a7d5"},
+ {file = "cffi-1.14.5-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:9e93e79c2551ff263400e1e4be085a1210e12073a31c2011dbbda14bda0c6132"},
+ {file = "cffi-1.14.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24ec4ff2c5c0c8f9c6b87d5bb53555bf267e1e6f70e52e5a9740d32861d36b6f"},
+ {file = "cffi-1.14.5-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c3f39fa737542161d8b0d680df2ec249334cd70a8f420f71c9304bd83c3cbed"},
+ {file = "cffi-1.14.5-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:681d07b0d1e3c462dd15585ef5e33cb021321588bebd910124ef4f4fb71aef55"},
+ {file = "cffi-1.14.5-cp36-cp36m-win32.whl", hash = "sha256:58e3f59d583d413809d60779492342801d6e82fefb89c86a38e040c16883be53"},
+ {file = "cffi-1.14.5-cp36-cp36m-win_amd64.whl", hash = "sha256:005a36f41773e148deac64b08f233873a4d0c18b053d37da83f6af4d9087b813"},
+ {file = "cffi-1.14.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2894f2df484ff56d717bead0a5c2abb6b9d2bf26d6960c4604d5c48bbc30ee73"},
+ {file = "cffi-1.14.5-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:0857f0ae312d855239a55c81ef453ee8fd24136eaba8e87a2eceba644c0d4c06"},
+ {file = "cffi-1.14.5-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:cd2868886d547469123fadc46eac7ea5253ea7fcb139f12e1dfc2bbd406427d1"},
+ {file = "cffi-1.14.5-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:35f27e6eb43380fa080dccf676dece30bef72e4a67617ffda586641cd4508d49"},
+ {file = "cffi-1.14.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06d7cd1abac2ffd92e65c0609661866709b4b2d82dd15f611e602b9b188b0b69"},
+ {file = "cffi-1.14.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f861a89e0043afec2a51fd177a567005847973be86f709bbb044d7f42fc4e05"},
+ {file = "cffi-1.14.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc5a8e069b9ebfa22e26d0e6b97d6f9781302fe7f4f2b8776c3e1daea35f1adc"},
+ {file = "cffi-1.14.5-cp37-cp37m-win32.whl", hash = "sha256:9ff227395193126d82e60319a673a037d5de84633f11279e336f9c0f189ecc62"},
+ {file = "cffi-1.14.5-cp37-cp37m-win_amd64.whl", hash = "sha256:9cf8022fb8d07a97c178b02327b284521c7708d7c71a9c9c355c178ac4bbd3d4"},
+ {file = "cffi-1.14.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8b198cec6c72df5289c05b05b8b0969819783f9418e0409865dac47288d2a053"},
+ {file = "cffi-1.14.5-cp38-cp38-manylinux1_i686.whl", hash = "sha256:ad17025d226ee5beec591b52800c11680fca3df50b8b29fe51d882576e039ee0"},
+ {file = "cffi-1.14.5-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:6c97d7350133666fbb5cf4abdc1178c812cb205dc6f41d174a7b0f18fb93337e"},
+ {file = "cffi-1.14.5-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8ae6299f6c68de06f136f1f9e69458eae58f1dacf10af5c17353eae03aa0d827"},
+ {file = "cffi-1.14.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04c468b622ed31d408fea2346bec5bbffba2cc44226302a0de1ade9f5ea3d373"},
+ {file = "cffi-1.14.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:06db6321b7a68b2bd6df96d08a5adadc1fa0e8f419226e25b2a5fbf6ccc7350f"},
+ {file = "cffi-1.14.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:293e7ea41280cb28c6fcaaa0b1aa1f533b8ce060b9e701d78511e1e6c4a1de76"},
+ {file = "cffi-1.14.5-cp38-cp38-win32.whl", hash = "sha256:b85eb46a81787c50650f2392b9b4ef23e1f126313b9e0e9013b35c15e4288e2e"},
+ {file = "cffi-1.14.5-cp38-cp38-win_amd64.whl", hash = "sha256:1f436816fc868b098b0d63b8920de7d208c90a67212546d02f84fe78a9c26396"},
+ {file = "cffi-1.14.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1071534bbbf8cbb31b498d5d9db0f274f2f7a865adca4ae429e147ba40f73dea"},
+ {file = "cffi-1.14.5-cp39-cp39-manylinux1_i686.whl", hash = "sha256:9de2e279153a443c656f2defd67769e6d1e4163952b3c622dcea5b08a6405322"},
+ {file = "cffi-1.14.5-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:6e4714cc64f474e4d6e37cfff31a814b509a35cb17de4fb1999907575684479c"},
+ {file = "cffi-1.14.5-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:158d0d15119b4b7ff6b926536763dc0714313aa59e320ddf787502c70c4d4bee"},
+ {file = "cffi-1.14.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bf1ac1984eaa7675ca8d5745a8cb87ef7abecb5592178406e55858d411eadc0"},
+ {file = "cffi-1.14.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:df5052c5d867c1ea0b311fb7c3cd28b19df469c056f7fdcfe88c7473aa63e333"},
+ {file = "cffi-1.14.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:24a570cd11895b60829e941f2613a4f79df1a27344cbbb82164ef2e0116f09c7"},
+ {file = "cffi-1.14.5-cp39-cp39-win32.whl", hash = "sha256:afb29c1ba2e5a3736f1c301d9d0abe3ec8b86957d04ddfa9d7a6a42b9367e396"},
+ {file = "cffi-1.14.5-cp39-cp39-win_amd64.whl", hash = "sha256:f2d45f97ab6bb54753eab54fffe75aaf3de4ff2341c9daee1987ee1837636f1d"},
+ {file = "cffi-1.14.5.tar.gz", hash = "sha256:fd78e5fee591709f32ef6edb9a015b4aa1a5022598e36227500c8f4e02328d9c"},
+]
+chardet = [
+ {file = "chardet-4.0.0-py2.py3-none-any.whl", hash = "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5"},
+ {file = "chardet-4.0.0.tar.gz", hash = "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa"},
+]
+click = [
+ {file = "click-7.1.2-py2.py3-none-any.whl", hash = "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"},
+ {file = "click-7.1.2.tar.gz", hash = "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"},
+]
+colorama = [
+ {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"},
+ {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"},
+]
+colorlog = [
+ {file = "colorlog-5.0.1-py2.py3-none-any.whl", hash = "sha256:4e6be13d9169254e2ded6526a6a4a1abb8ac564f2fa65b310a98e4ca5bea2c04"},
+ {file = "colorlog-5.0.1.tar.gz", hash = "sha256:f17c013a06962b02f4449ee07cfdbe6b287df29efc2c9a1515b4a376f4e588ea"},
+]
+cssselect2 = [
+ {file = "cssselect2-0.4.1-py3-none-any.whl", hash = "sha256:2f4a9f20965367bae459e3bb42561f7927e0cfe5b7ea1692757cf67ef5d7dace"},
+ {file = "cssselect2-0.4.1.tar.gz", hash = "sha256:93fbb9af860e95dd40bf18c3b2b6ed99189a07c0f29ba76f9c5be71344664ec8"},
+]
+defusedxml = [
+ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
+ {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
+]
+dulwich = [
+ {file = "dulwich-0.20.23-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:17c556b81a9593003b40c60bf80698802834458ee4c969ccf9a55077de876785"},
+ {file = "dulwich-0.20.23-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fad33179c8c3a5782740865151a4011c4665bc6e356a200c9543637a180254e1"},
+ {file = "dulwich-0.20.23-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a4fa6221978437508d7974a8239a21c6291b35230409427a0de8a67b40905140"},
+ {file = "dulwich-0.20.23-cp36-cp36m-win_amd64.whl", hash = "sha256:7a3992324e896afcb262fb384345f9a36364f8f9be848779041a15897897ec23"},
+ {file = "dulwich-0.20.23-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:c4cd03c793e11c8ad3c96f296a908837483f82746b3aa8c680389cd5ca3c25a6"},
+ {file = "dulwich-0.20.23-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53a5db0f85cf5868766b92c03278ca0ba36bccf2076cfaed4b91c034a8dbc967"},
+ {file = "dulwich-0.20.23-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7bc04283563d0f0b56b0686ad55702108950d4ed66c8ce4216400f5046b77d59"},
+ {file = "dulwich-0.20.23-cp37-cp37m-win_amd64.whl", hash = "sha256:5fa3db56cc86fe4e16b9645b12b2a9e19ae7263817e97a2cee744b5062769c55"},
+ {file = "dulwich-0.20.23-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:89606403c43e4d78ce549f5ba5d82c2d263027ec3ea7f51873861d9f511a0362"},
+ {file = "dulwich-0.20.23-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02439319d764d2df194a81f96ad65d5ad4e09e5b10c9d0ed22a28aa2ca8aec4f"},
+ {file = "dulwich-0.20.23-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d410093f9b37089c8199853f8442c9577fc2b7bd985b6568a32461d15397ad67"},
+ {file = "dulwich-0.20.23-cp38-cp38-win_amd64.whl", hash = "sha256:c0a2def5f6d3293a9d13dc0ce4f4674525fdf94644909fa56b57a3d2cf642fad"},
+ {file = "dulwich-0.20.23-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:8d982f98f692acb7614f60d701b5eaf10f05dddb0482182ef3aae2fed0f5df83"},
+ {file = "dulwich-0.20.23-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645429801e9f4dd25e08fa6cb72175af2a9e7e396fb6d0983ab98f8af4d6d9ab"},
+ {file = "dulwich-0.20.23-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:992767878cca3997454d397aa60d9df456b7204e64322dbec3c76b32a4bae070"},
+ {file = "dulwich-0.20.23-cp39-cp39-win_amd64.whl", hash = "sha256:ebdff115e6fc45b957322d9f5197ef947eb3cf523449612228fb3b1f6133acb6"},
+ {file = "dulwich-0.20.23.tar.gz", hash = "sha256:402e56b5c07f040479d1188e5c2f406e2c006aa3943080155d4c6d05e5fca865"},
+]
+dunamai = [
+ {file = "dunamai-1.5.5-py3-none-any.whl", hash = "sha256:525ac30db6ca4f8e48b9f198c2e8fbc2a9ce3ea189768361c621ea635212ee49"},
+ {file = "dunamai-1.5.5.tar.gz", hash = "sha256:32f30db71e8fd1adeb42fac45c04433680e47a28298447cd30304e0bba95a7dd"},
+]
+flask = [
+ {file = "Flask-1.1.4-py2.py3-none-any.whl", hash = "sha256:c34f04500f2cbbea882b1acb02002ad6fe6b7ffa64a6164577995657f50aed22"},
+ {file = "Flask-1.1.4.tar.gz", hash = "sha256:0fbeb6180d383a9186d0d6ed954e0042ad9f18e0e8de088b2b419d526927d196"},
+]
+google-auth = [
+ {file = "google-auth-1.31.0.tar.gz", hash = "sha256:154f7889c5d679a6f626f36adb12afbd4dbb0a9a04ec575d989d6ba79c4fd65e"},
+ {file = "google_auth-1.31.0-py2.py3-none-any.whl", hash = "sha256:6d47c79b5d09fbc7e8355fd9594cc4cf65fdde5d401c63951eaac4baa1ba2ae1"},
+]
+grafana-api = [
+ {file = "grafana_api-1.0.3-py2.py3-none-any.whl", hash = "sha256:30caef227025ecdb5662b242aaae921b89b58959566d83a5948523c8138e98ce"},
+ {file = "grafana_api-1.0.3.tar.gz", hash = "sha256:d541ea1a5b7efafa56c5f04053ed42b2d972a6aa313bc4881dd192aeb0706e2e"},
+]
+hikaru = []
+idna = [
+ {file = "idna-2.10-py2.py3-none-any.whl", hash = "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0"},
+ {file = "idna-2.10.tar.gz", hash = "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6"},
+]
+inflection = [
+ {file = "inflection-0.5.1-py2.py3-none-any.whl", hash = "sha256:f38b2b640938a4f35ade69ac3d053042959b62a0f1076a5bbaa1b9526605a8a2"},
+ {file = "inflection-0.5.1.tar.gz", hash = "sha256:1a29730d366e996aaacffb2f1f1cb9593dc38e2ddd30c91250c6dde09ea9b417"},
+]
+itsdangerous = [
+ {file = "itsdangerous-1.1.0-py2.py3-none-any.whl", hash = "sha256:b12271b2047cb23eeb98c8b5622e2e5c5e9abd9784a153e9d8ef9cb4dd09d749"},
+ {file = "itsdangerous-1.1.0.tar.gz", hash = "sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19"},
+]
+jinja2 = [
+ {file = "Jinja2-2.11.3-py2.py3-none-any.whl", hash = "sha256:03e47ad063331dd6a3f04a43eddca8a966a26ba0c5b7207a9a9e4e08f1b29419"},
+ {file = "Jinja2-2.11.3.tar.gz", hash = "sha256:a6d58433de0ae800347cab1fa3043cebbabe8baa9d29e668f1c768cb87a333c6"},
+]
+kubernetes = [
+ {file = "kubernetes-12.0.1-py2.py3-none-any.whl", hash = "sha256:23c85d8571df8f56e773f1a413bc081537536dc47e2b5e8dc2e6262edb2c57ca"},
+ {file = "kubernetes-12.0.1.tar.gz", hash = "sha256:ec52ea01d52e2ec3da255992f7e859f3a76f2bdb51cf65ba8cd71dfc309d8daa"},
+]
+manhole = [
+ {file = "manhole-1.8.0-py2.py3-none-any.whl", hash = "sha256:a17f62f9d47bd9a53438f655d351d7d2b0d6e9d805665fc775e4cb6cbbb4440b"},
+ {file = "manhole-1.8.0.tar.gz", hash = "sha256:bada20a25b547b395d472e2e08928f0437df26bbdbda4797c55863198e29a21f"},
+]
+markupsafe = [
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-win32.whl", hash = "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-win32.whl", hash = "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-win32.whl", hash = "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8"},
+ {file = "MarkupSafe-2.0.1.tar.gz", hash = "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a"},
+]
+mypy-extensions = [
+ {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
+ {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
+]
+oauthlib = [
+ {file = "oauthlib-3.1.1-py2.py3-none-any.whl", hash = "sha256:42bf6354c2ed8c6acb54d971fce6f88193d97297e18602a3a886603f9d7730cc"},
+ {file = "oauthlib-3.1.1.tar.gz", hash = "sha256:8f0215fcc533dd8dd1bee6f4c412d4f0cd7297307d43ac61666389e3bc3198a3"},
+]
+pathspec = [
+ {file = "pathspec-0.8.1-py2.py3-none-any.whl", hash = "sha256:aa0cb481c4041bf52ffa7b0d8fa6cd3e88a2ca4879c533c9153882ee2556790d"},
+ {file = "pathspec-0.8.1.tar.gz", hash = "sha256:86379d6b86d75816baba717e64b1a3a3469deb93bb76d613c9ce79edc5cb68fd"},
+]
+pillow = [
+ {file = "Pillow-8.2.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:dc38f57d8f20f06dd7c3161c59ca2c86893632623f33a42d592f097b00f720a9"},
+ {file = "Pillow-8.2.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:a013cbe25d20c2e0c4e85a9daf438f85121a4d0344ddc76e33fd7e3965d9af4b"},
+ {file = "Pillow-8.2.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:8bb1e155a74e1bfbacd84555ea62fa21c58e0b4e7e6b20e4447b8d07990ac78b"},
+ {file = "Pillow-8.2.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:c5236606e8570542ed424849f7852a0ff0bce2c4c8d0ba05cc202a5a9c97dee9"},
+ {file = "Pillow-8.2.0-cp36-cp36m-win32.whl", hash = "sha256:12e5e7471f9b637762453da74e390e56cc43e486a88289995c1f4c1dc0bfe727"},
+ {file = "Pillow-8.2.0-cp36-cp36m-win_amd64.whl", hash = "sha256:5afe6b237a0b81bd54b53f835a153770802f164c5570bab5e005aad693dab87f"},
+ {file = "Pillow-8.2.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:cb7a09e173903541fa888ba010c345893cd9fc1b5891aaf060f6ca77b6a3722d"},
+ {file = "Pillow-8.2.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:0d19d70ee7c2ba97631bae1e7d4725cdb2ecf238178096e8c82ee481e189168a"},
+ {file = "Pillow-8.2.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:083781abd261bdabf090ad07bb69f8f5599943ddb539d64497ed021b2a67e5a9"},
+ {file = "Pillow-8.2.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:c6b39294464b03457f9064e98c124e09008b35a62e3189d3513e5148611c9388"},
+ {file = "Pillow-8.2.0-cp37-cp37m-win32.whl", hash = "sha256:01425106e4e8cee195a411f729cff2a7d61813b0b11737c12bd5991f5f14bcd5"},
+ {file = "Pillow-8.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:3b570f84a6161cf8865c4e08adf629441f56e32f180f7aa4ccbd2e0a5a02cba2"},
+ {file = "Pillow-8.2.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:031a6c88c77d08aab84fecc05c3cde8414cd6f8406f4d2b16fed1e97634cc8a4"},
+ {file = "Pillow-8.2.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:66cc56579fd91f517290ab02c51e3a80f581aba45fd924fcdee01fa06e635812"},
+ {file = "Pillow-8.2.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:6c32cc3145928c4305d142ebec682419a6c0a8ce9e33db900027ddca1ec39178"},
+ {file = "Pillow-8.2.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:624b977355cde8b065f6d51b98497d6cd5fbdd4f36405f7a8790e3376125e2bb"},
+ {file = "Pillow-8.2.0-cp38-cp38-win32.whl", hash = "sha256:5cbf3e3b1014dddc45496e8cf38b9f099c95a326275885199f427825c6522232"},
+ {file = "Pillow-8.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:463822e2f0d81459e113372a168f2ff59723e78528f91f0bd25680ac185cf797"},
+ {file = "Pillow-8.2.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:95d5ef984eff897850f3a83883363da64aae1000e79cb3c321915468e8c6add5"},
+ {file = "Pillow-8.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b91c36492a4bbb1ee855b7d16fe51379e5f96b85692dc8210831fbb24c43e484"},
+ {file = "Pillow-8.2.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:d68cb92c408261f806b15923834203f024110a2e2872ecb0bd2a110f89d3c602"},
+ {file = "Pillow-8.2.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:f217c3954ce5fd88303fc0c317af55d5e0204106d86dea17eb8205700d47dec2"},
+ {file = "Pillow-8.2.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:5b70110acb39f3aff6b74cf09bb4169b167e2660dabc304c1e25b6555fa781ef"},
+ {file = "Pillow-8.2.0-cp39-cp39-win32.whl", hash = "sha256:a7d5e9fad90eff8f6f6106d3b98b553a88b6f976e51fce287192a5d2d5363713"},
+ {file = "Pillow-8.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:238c197fc275b475e87c1453b05b467d2d02c2915fdfdd4af126145ff2e4610c"},
+ {file = "Pillow-8.2.0-pp36-pypy36_pp73-macosx_10_10_x86_64.whl", hash = "sha256:0e04d61f0064b545b989126197930807c86bcbd4534d39168f4aa5fda39bb8f9"},
+ {file = "Pillow-8.2.0-pp36-pypy36_pp73-manylinux2010_i686.whl", hash = "sha256:63728564c1410d99e6d1ae8e3b810fe012bc440952168af0a2877e8ff5ab96b9"},
+ {file = "Pillow-8.2.0-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:c03c07ed32c5324939b19e36ae5f75c660c81461e312a41aea30acdd46f93a7c"},
+ {file = "Pillow-8.2.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:4d98abdd6b1e3bf1a1cbb14c3895226816e666749ac040c4e2554231068c639b"},
+ {file = "Pillow-8.2.0-pp37-pypy37_pp73-manylinux2010_i686.whl", hash = "sha256:aac00e4bc94d1b7813fe882c28990c1bc2f9d0e1aa765a5f2b516e8a6a16a9e4"},
+ {file = "Pillow-8.2.0-pp37-pypy37_pp73-manylinux2010_x86_64.whl", hash = "sha256:22fd0f42ad15dfdde6c581347eaa4adb9a6fc4b865f90b23378aa7914895e120"},
+ {file = "Pillow-8.2.0-pp37-pypy37_pp73-win32.whl", hash = "sha256:e98eca29a05913e82177b3ba3d198b1728e164869c613d76d0de4bde6768a50e"},
+ {file = "Pillow-8.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:8b56553c0345ad6dcb2e9b433ae47d67f95fc23fe28a0bde15a120f25257e291"},
+ {file = "Pillow-8.2.0.tar.gz", hash = "sha256:a787ab10d7bb5494e5f76536ac460741788f1fbce851068d73a87ca7c35fc3e1"},
+]
+pyasn1 = [
+ {file = "pyasn1-0.4.8-py2.4.egg", hash = "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3"},
+ {file = "pyasn1-0.4.8-py2.5.egg", hash = "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf"},
+ {file = "pyasn1-0.4.8-py2.6.egg", hash = "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00"},
+ {file = "pyasn1-0.4.8-py2.7.egg", hash = "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8"},
+ {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"},
+ {file = "pyasn1-0.4.8-py3.1.egg", hash = "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86"},
+ {file = "pyasn1-0.4.8-py3.2.egg", hash = "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7"},
+ {file = "pyasn1-0.4.8-py3.3.egg", hash = "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576"},
+ {file = "pyasn1-0.4.8-py3.4.egg", hash = "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12"},
+ {file = "pyasn1-0.4.8-py3.5.egg", hash = "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2"},
+ {file = "pyasn1-0.4.8-py3.6.egg", hash = "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359"},
+ {file = "pyasn1-0.4.8-py3.7.egg", hash = "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776"},
+ {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"},
+]
+pyasn1-modules = [
+ {file = "pyasn1-modules-0.2.8.tar.gz", hash = "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e"},
+ {file = "pyasn1_modules-0.2.8-py2.4.egg", hash = "sha256:0fe1b68d1e486a1ed5473f1302bd991c1611d319bba158e98b106ff86e1d7199"},
+ {file = "pyasn1_modules-0.2.8-py2.5.egg", hash = "sha256:fe0644d9ab041506b62782e92b06b8c68cca799e1a9636ec398675459e031405"},
+ {file = "pyasn1_modules-0.2.8-py2.6.egg", hash = "sha256:a99324196732f53093a84c4369c996713eb8c89d360a496b599fb1a9c47fc3eb"},
+ {file = "pyasn1_modules-0.2.8-py2.7.egg", hash = "sha256:0845a5582f6a02bb3e1bde9ecfc4bfcae6ec3210dd270522fee602365430c3f8"},
+ {file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"},
+ {file = "pyasn1_modules-0.2.8-py3.1.egg", hash = "sha256:f39edd8c4ecaa4556e989147ebf219227e2cd2e8a43c7e7fcb1f1c18c5fd6a3d"},
+ {file = "pyasn1_modules-0.2.8-py3.2.egg", hash = "sha256:b80486a6c77252ea3a3e9b1e360bc9cf28eaac41263d173c032581ad2f20fe45"},
+ {file = "pyasn1_modules-0.2.8-py3.3.egg", hash = "sha256:65cebbaffc913f4fe9e4808735c95ea22d7a7775646ab690518c056784bc21b4"},
+ {file = "pyasn1_modules-0.2.8-py3.4.egg", hash = "sha256:15b7c67fabc7fc240d87fb9aabf999cf82311a6d6fb2c70d00d3d0604878c811"},
+ {file = "pyasn1_modules-0.2.8-py3.5.egg", hash = "sha256:426edb7a5e8879f1ec54a1864f16b882c2837bfd06eee62f2c982315ee2473ed"},
+ {file = "pyasn1_modules-0.2.8-py3.6.egg", hash = "sha256:cbac4bc38d117f2a49aeedec4407d23e8866ea4ac27ff2cf7fb3e5b570df19e0"},
+ {file = "pyasn1_modules-0.2.8-py3.7.egg", hash = "sha256:c29a5e5cc7a3f05926aff34e097e84f8589cd790ce0ed41b67aed6857b26aafd"},
+]
+pycodestyle = [
+ {file = "pycodestyle-2.7.0-py2.py3-none-any.whl", hash = "sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068"},
+ {file = "pycodestyle-2.7.0.tar.gz", hash = "sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef"},
+]
+pycparser = [
+ {file = "pycparser-2.20-py2.py3-none-any.whl", hash = "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705"},
+ {file = "pycparser-2.20.tar.gz", hash = "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0"},
+]
+pydantic = [
+ {file = "pydantic-1.8.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:05ddfd37c1720c392f4e0d43c484217b7521558302e7069ce8d318438d297739"},
+ {file = "pydantic-1.8.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:a7c6002203fe2c5a1b5cbb141bb85060cbff88c2d78eccbc72d97eb7022c43e4"},
+ {file = "pydantic-1.8.2-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:589eb6cd6361e8ac341db97602eb7f354551482368a37f4fd086c0733548308e"},
+ {file = "pydantic-1.8.2-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:10e5622224245941efc193ad1d159887872776df7a8fd592ed746aa25d071840"},
+ {file = "pydantic-1.8.2-cp36-cp36m-win_amd64.whl", hash = "sha256:99a9fc39470010c45c161a1dc584997f1feb13f689ecf645f59bb4ba623e586b"},
+ {file = "pydantic-1.8.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a83db7205f60c6a86f2c44a61791d993dff4b73135df1973ecd9eed5ea0bda20"},
+ {file = "pydantic-1.8.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:41b542c0b3c42dc17da70554bc6f38cbc30d7066d2c2815a94499b5684582ecb"},
+ {file = "pydantic-1.8.2-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:ea5cb40a3b23b3265f6325727ddfc45141b08ed665458be8c6285e7b85bd73a1"},
+ {file = "pydantic-1.8.2-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:18b5ea242dd3e62dbf89b2b0ec9ba6c7b5abaf6af85b95a97b00279f65845a23"},
+ {file = "pydantic-1.8.2-cp37-cp37m-win_amd64.whl", hash = "sha256:234a6c19f1c14e25e362cb05c68afb7f183eb931dd3cd4605eafff055ebbf287"},
+ {file = "pydantic-1.8.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:021ea0e4133e8c824775a0cfe098677acf6fa5a3cbf9206a376eed3fc09302cd"},
+ {file = "pydantic-1.8.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:e710876437bc07bd414ff453ac8ec63d219e7690128d925c6e82889d674bb505"},
+ {file = "pydantic-1.8.2-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:ac8eed4ca3bd3aadc58a13c2aa93cd8a884bcf21cb019f8cfecaae3b6ce3746e"},
+ {file = "pydantic-1.8.2-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:4a03cbbe743e9c7247ceae6f0d8898f7a64bb65800a45cbdc52d65e370570820"},
+ {file = "pydantic-1.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:8621559dcf5afacf0069ed194278f35c255dc1a1385c28b32dd6c110fd6531b3"},
+ {file = "pydantic-1.8.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8b223557f9510cf0bfd8b01316bf6dd281cf41826607eada99662f5e4963f316"},
+ {file = "pydantic-1.8.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:244ad78eeb388a43b0c927e74d3af78008e944074b7d0f4f696ddd5b2af43c62"},
+ {file = "pydantic-1.8.2-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:05ef5246a7ffd2ce12a619cbb29f3307b7c4509307b1b49f456657b43529dc6f"},
+ {file = "pydantic-1.8.2-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:54cd5121383f4a461ff7644c7ca20c0419d58052db70d8791eacbbe31528916b"},
+ {file = "pydantic-1.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:4be75bebf676a5f0f87937c6ddb061fa39cbea067240d98e298508c1bda6f3f3"},
+ {file = "pydantic-1.8.2-py3-none-any.whl", hash = "sha256:fec866a0b59f372b7e776f2d7308511784dace622e0992a0b59ea3ccee0ae833"},
+ {file = "pydantic-1.8.2.tar.gz", hash = "sha256:26464e57ccaafe72b7ad156fdaa4e9b9ef051f69e175dbbb463283000c05ab7b"},
+]
+python-dateutil = [
+ {file = "python-dateutil-2.8.1.tar.gz", hash = "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c"},
+ {file = "python_dateutil-2.8.1-py2.py3-none-any.whl", hash = "sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a"},
+]
+pyyaml = [
+ {file = "PyYAML-5.4.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922"},
+ {file = "PyYAML-5.4.1-cp27-cp27m-win32.whl", hash = "sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393"},
+ {file = "PyYAML-5.4.1-cp27-cp27m-win_amd64.whl", hash = "sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8"},
+ {file = "PyYAML-5.4.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185"},
+ {file = "PyYAML-5.4.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253"},
+ {file = "PyYAML-5.4.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc"},
+ {file = "PyYAML-5.4.1-cp36-cp36m-win32.whl", hash = "sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5"},
+ {file = "PyYAML-5.4.1-cp36-cp36m-win_amd64.whl", hash = "sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df"},
+ {file = "PyYAML-5.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018"},
+ {file = "PyYAML-5.4.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63"},
+ {file = "PyYAML-5.4.1-cp37-cp37m-win32.whl", hash = "sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b"},
+ {file = "PyYAML-5.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf"},
+ {file = "PyYAML-5.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46"},
+ {file = "PyYAML-5.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb"},
+ {file = "PyYAML-5.4.1-cp38-cp38-win32.whl", hash = "sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc"},
+ {file = "PyYAML-5.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696"},
+ {file = "PyYAML-5.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77"},
+ {file = "PyYAML-5.4.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183"},
+ {file = "PyYAML-5.4.1-cp39-cp39-win32.whl", hash = "sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10"},
+ {file = "PyYAML-5.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db"},
+ {file = "PyYAML-5.4.1.tar.gz", hash = "sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e"},
+]
+regex = [
+ {file = "regex-2021.4.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:619d71c59a78b84d7f18891fe914446d07edd48dc8328c8e149cbe0929b4e000"},
+ {file = "regex-2021.4.4-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:47bf5bf60cf04d72bf6055ae5927a0bd9016096bf3d742fa50d9bf9f45aa0711"},
+ {file = "regex-2021.4.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:281d2fd05555079448537fe108d79eb031b403dac622621c78944c235f3fcf11"},
+ {file = "regex-2021.4.4-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:bd28bc2e3a772acbb07787c6308e00d9626ff89e3bfcdebe87fa5afbfdedf968"},
+ {file = "regex-2021.4.4-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:7c2a1af393fcc09e898beba5dd59196edaa3116191cc7257f9224beaed3e1aa0"},
+ {file = "regex-2021.4.4-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:c38c71df845e2aabb7fb0b920d11a1b5ac8526005e533a8920aea97efb8ec6a4"},
+ {file = "regex-2021.4.4-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:96fcd1888ab4d03adfc9303a7b3c0bd78c5412b2bfbe76db5b56d9eae004907a"},
+ {file = "regex-2021.4.4-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:ade17eb5d643b7fead300a1641e9f45401c98eee23763e9ed66a43f92f20b4a7"},
+ {file = "regex-2021.4.4-cp36-cp36m-win32.whl", hash = "sha256:e8e5b509d5c2ff12f8418006d5a90e9436766133b564db0abaec92fd27fcee29"},
+ {file = "regex-2021.4.4-cp36-cp36m-win_amd64.whl", hash = "sha256:11d773d75fa650cd36f68d7ca936e3c7afaae41b863b8c387a22aaa78d3c5c79"},
+ {file = "regex-2021.4.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d3029c340cfbb3ac0a71798100ccc13b97dddf373a4ae56b6a72cf70dfd53bc8"},
+ {file = "regex-2021.4.4-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:18c071c3eb09c30a264879f0d310d37fe5d3a3111662438889ae2eb6fc570c31"},
+ {file = "regex-2021.4.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:4c557a7b470908b1712fe27fb1ef20772b78079808c87d20a90d051660b1d69a"},
+ {file = "regex-2021.4.4-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:01afaf2ec48e196ba91b37451aa353cb7eda77efe518e481707e0515025f0cd5"},
+ {file = "regex-2021.4.4-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:3a9cd17e6e5c7eb328517969e0cb0c3d31fd329298dd0c04af99ebf42e904f82"},
+ {file = "regex-2021.4.4-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:90f11ff637fe8798933fb29f5ae1148c978cccb0452005bf4c69e13db951e765"},
+ {file = "regex-2021.4.4-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:919859aa909429fb5aa9cf8807f6045592c85ef56fdd30a9a3747e513db2536e"},
+ {file = "regex-2021.4.4-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:339456e7d8c06dd36a22e451d58ef72cef293112b559010db3d054d5560ef439"},
+ {file = "regex-2021.4.4-cp37-cp37m-win32.whl", hash = "sha256:67bdb9702427ceddc6ef3dc382455e90f785af4c13d495f9626861763ee13f9d"},
+ {file = "regex-2021.4.4-cp37-cp37m-win_amd64.whl", hash = "sha256:32e65442138b7b76dd8173ffa2cf67356b7bc1768851dded39a7a13bf9223da3"},
+ {file = "regex-2021.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1e1c20e29358165242928c2de1482fb2cf4ea54a6a6dea2bd7a0e0d8ee321500"},
+ {file = "regex-2021.4.4-cp38-cp38-manylinux1_i686.whl", hash = "sha256:314d66636c494ed9c148a42731b3834496cc9a2c4251b1661e40936814542b14"},
+ {file = "regex-2021.4.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:6d1b01031dedf2503631d0903cb563743f397ccaf6607a5e3b19a3d76fc10480"},
+ {file = "regex-2021.4.4-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:741a9647fcf2e45f3a1cf0e24f5e17febf3efe8d4ba1281dcc3aa0459ef424dc"},
+ {file = "regex-2021.4.4-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:4c46e22a0933dd783467cf32b3516299fb98cfebd895817d685130cc50cd1093"},
+ {file = "regex-2021.4.4-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:e512d8ef5ad7b898cdb2d8ee1cb09a8339e4f8be706d27eaa180c2f177248a10"},
+ {file = "regex-2021.4.4-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:980d7be47c84979d9136328d882f67ec5e50008681d94ecc8afa8a65ed1f4a6f"},
+ {file = "regex-2021.4.4-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:ce15b6d103daff8e9fee13cf7f0add05245a05d866e73926c358e871221eae87"},
+ {file = "regex-2021.4.4-cp38-cp38-win32.whl", hash = "sha256:a91aa8619b23b79bcbeb37abe286f2f408d2f2d6f29a17237afda55bb54e7aac"},
+ {file = "regex-2021.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:c0502c0fadef0d23b128605d69b58edb2c681c25d44574fc673b0e52dce71ee2"},
+ {file = "regex-2021.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:598585c9f0af8374c28edd609eb291b5726d7cbce16be6a8b95aa074d252ee17"},
+ {file = "regex-2021.4.4-cp39-cp39-manylinux1_i686.whl", hash = "sha256:ee54ff27bf0afaf4c3b3a62bcd016c12c3fdb4ec4f413391a90bd38bc3624605"},
+ {file = "regex-2021.4.4-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7d9884d86dd4dd489e981d94a65cd30d6f07203d90e98f6f657f05170f6324c9"},
+ {file = "regex-2021.4.4-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:bf5824bfac591ddb2c1f0a5f4ab72da28994548c708d2191e3b87dd207eb3ad7"},
+ {file = "regex-2021.4.4-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:563085e55b0d4fb8f746f6a335893bda5c2cef43b2f0258fe1020ab1dd874df8"},
+ {file = "regex-2021.4.4-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b9c3db21af35e3b3c05764461b262d6f05bbca08a71a7849fd79d47ba7bc33ed"},
+ {file = "regex-2021.4.4-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:3916d08be28a1149fb97f7728fca1f7c15d309a9f9682d89d79db75d5e52091c"},
+ {file = "regex-2021.4.4-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:fd45ff9293d9274c5008a2054ecef86a9bfe819a67c7be1afb65e69b405b3042"},
+ {file = "regex-2021.4.4-cp39-cp39-win32.whl", hash = "sha256:fa4537fb4a98fe8fde99626e4681cc644bdcf2a795038533f9f711513a862ae6"},
+ {file = "regex-2021.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:97f29f57d5b84e73fbaf99ab3e26134e6687348e95ef6b48cfd2c06807005a07"},
+ {file = "regex-2021.4.4.tar.gz", hash = "sha256:52ba3d3f9b942c49d7e4bc105bb28551c44065f139a65062ab7912bef10c9afb"},
+]
+requests = [
+ {file = "requests-2.25.1-py2.py3-none-any.whl", hash = "sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e"},
+ {file = "requests-2.25.1.tar.gz", hash = "sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804"},
+]
+requests-oauthlib = [
+ {file = "requests-oauthlib-1.3.0.tar.gz", hash = "sha256:b4261601a71fd721a8bd6d7aa1cc1d6a8a93b4a9f5e96626f8e4d91e8beeaa6a"},
+ {file = "requests_oauthlib-1.3.0-py2.py3-none-any.whl", hash = "sha256:7f71572defaecd16372f9006f33c2ec8c077c3cfa6f5911a9a90202beb513f3d"},
+ {file = "requests_oauthlib-1.3.0-py3.7.egg", hash = "sha256:fa6c47b933f01060936d87ae9327fead68768b69c6c9ea2109c48be30f2d4dbc"},
+]
+rsa = [
+ {file = "rsa-4.7.2-py3-none-any.whl", hash = "sha256:78f9a9bf4e7be0c5ded4583326e7461e3a3c5aae24073648b4bdfa797d78c9d2"},
+ {file = "rsa-4.7.2.tar.gz", hash = "sha256:9d689e6ca1b3038bc82bf8d23e944b6b6037bc02301a574935b2dd946e0353b9"},
+]
+"ruamel.yaml" = [
+ {file = "ruamel.yaml-0.17.9-py3-none-any.whl", hash = "sha256:8873a6f5516e0d848c92418b0b006519c0566b6cd0dcee7deb9bf399e2bd204f"},
+ {file = "ruamel.yaml-0.17.9.tar.gz", hash = "sha256:374373b4743aee9f6d9f40bea600fe020a7ac7ae36b838b4a6a93f72b584a14c"},
+]
+"ruamel.yaml.clib" = [
+ {file = "ruamel.yaml.clib-0.2.2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:28116f204103cb3a108dfd37668f20abe6e3cafd0d3fd40dba126c732457b3cc"},
+ {file = "ruamel.yaml.clib-0.2.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:daf21aa33ee9b351f66deed30a3d450ab55c14242cfdfcd377798e2c0d25c9f1"},
+ {file = "ruamel.yaml.clib-0.2.2-cp27-cp27m-win32.whl", hash = "sha256:30dca9bbcbb1cc858717438218d11eafb78666759e5094dd767468c0d577a7e7"},
+ {file = "ruamel.yaml.clib-0.2.2-cp27-cp27m-win_amd64.whl", hash = "sha256:f6061a31880c1ed6b6ce341215336e2f3d0c1deccd84957b6fa8ca474b41e89f"},
+ {file = "ruamel.yaml.clib-0.2.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:73b3d43e04cc4b228fa6fa5d796409ece6fcb53a6c270eb2048109cbcbc3b9c2"},
+ {file = "ruamel.yaml.clib-0.2.2-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:53b9dd1abd70e257a6e32f934ebc482dac5edb8c93e23deb663eac724c30b026"},
+ {file = "ruamel.yaml.clib-0.2.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:839dd72545ef7ba78fd2aa1a5dd07b33696adf3e68fae7f31327161c1093001b"},
+ {file = "ruamel.yaml.clib-0.2.2-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:1236df55e0f73cd138c0eca074ee086136c3f16a97c2ac719032c050f7e0622f"},
+ {file = "ruamel.yaml.clib-0.2.2-cp35-cp35m-win32.whl", hash = "sha256:b1e981fe1aff1fd11627f531524826a4dcc1f26c726235a52fcb62ded27d150f"},
+ {file = "ruamel.yaml.clib-0.2.2-cp35-cp35m-win_amd64.whl", hash = "sha256:4e52c96ca66de04be42ea2278012a2342d89f5e82b4512fb6fb7134e377e2e62"},
+ {file = "ruamel.yaml.clib-0.2.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a873e4d4954f865dcb60bdc4914af7eaae48fb56b60ed6daa1d6251c72f5337c"},
+ {file = "ruamel.yaml.clib-0.2.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:ab845f1f51f7eb750a78937be9f79baea4a42c7960f5a94dde34e69f3cce1988"},
+ {file = "ruamel.yaml.clib-0.2.2-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:2fd336a5c6415c82e2deb40d08c222087febe0aebe520f4d21910629018ab0f3"},
+ {file = "ruamel.yaml.clib-0.2.2-cp36-cp36m-win32.whl", hash = "sha256:e9f7d1d8c26a6a12c23421061f9022bb62704e38211fe375c645485f38df34a2"},
+ {file = "ruamel.yaml.clib-0.2.2-cp36-cp36m-win_amd64.whl", hash = "sha256:2602e91bd5c1b874d6f93d3086f9830f3e907c543c7672cf293a97c3fabdcd91"},
+ {file = "ruamel.yaml.clib-0.2.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:44c7b0498c39f27795224438f1a6be6c5352f82cb887bc33d962c3a3acc00df6"},
+ {file = "ruamel.yaml.clib-0.2.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:8e8fd0a22c9d92af3a34f91e8a2594eeb35cba90ab643c5e0e643567dc8be43e"},
+ {file = "ruamel.yaml.clib-0.2.2-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:75f0ee6839532e52a3a53f80ce64925ed4aed697dd3fa890c4c918f3304bd4f4"},
+ {file = "ruamel.yaml.clib-0.2.2-cp37-cp37m-win32.whl", hash = "sha256:464e66a04e740d754170be5e740657a3b3b6d2bcc567f0c3437879a6e6087ff6"},
+ {file = "ruamel.yaml.clib-0.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:52ae5739e4b5d6317b52f5b040b1b6639e8af68a5b8fd606a8b08658fbd0cab5"},
+ {file = "ruamel.yaml.clib-0.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4df5019e7783d14b79217ad9c56edf1ba7485d614ad5a385d1b3c768635c81c0"},
+ {file = "ruamel.yaml.clib-0.2.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:5254af7d8bdf4d5484c089f929cb7f5bafa59b4f01d4f48adda4be41e6d29f99"},
+ {file = "ruamel.yaml.clib-0.2.2-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8be05be57dc5c7b4a0b24edcaa2f7275866d9c907725226cdde46da09367d923"},
+ {file = "ruamel.yaml.clib-0.2.2-cp38-cp38-win32.whl", hash = "sha256:74161d827407f4db9072011adcfb825b5258a5ccb3d2cd518dd6c9edea9e30f1"},
+ {file = "ruamel.yaml.clib-0.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:058a1cc3df2a8aecc12f983a48bda99315cebf55a3b3a5463e37bb599b05727b"},
+ {file = "ruamel.yaml.clib-0.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c6ac7e45367b1317e56f1461719c853fd6825226f45b835df7436bb04031fd8a"},
+ {file = "ruamel.yaml.clib-0.2.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:b4b0d31f2052b3f9f9b5327024dc629a253a83d8649d4734ca7f35b60ec3e9e5"},
+ {file = "ruamel.yaml.clib-0.2.2-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:1f8c0a4577c0e6c99d208de5c4d3fd8aceed9574bb154d7a2b21c16bb924154c"},
+ {file = "ruamel.yaml.clib-0.2.2-cp39-cp39-win32.whl", hash = "sha256:46d6d20815064e8bb023ea8628cfb7402c0f0e83de2c2227a88097e239a7dffd"},
+ {file = "ruamel.yaml.clib-0.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:6c0a5dc52fc74eb87c67374a4e554d4761fd42a4d01390b7e868b30d21f4b8bb"},
+ {file = "ruamel.yaml.clib-0.2.2.tar.gz", hash = "sha256:2d24bd98af676f4990c4d715bcdc2a60b19c56a3fb3a763164d2d8ca0e806ba7"},
+]
+six = [
+ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
+ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
+]
+slack-bolt = [
+ {file = "slack_bolt-1.6.1-py2.py3-none-any.whl", hash = "sha256:3589b3c98c584256b3a8a4e64a3152db5b0f11e5b03cd9f441b29c9879323be8"},
+ {file = "slack_bolt-1.6.1.tar.gz", hash = "sha256:b0e6c18b74332c0cb3f4174ec7847fbfbbe4f8370f91bc8c006f0c9d01c05beb"},
+]
+slack-sdk = [
+ {file = "slack_sdk-3.6.0-py2.py3-none-any.whl", hash = "sha256:e1b257923a1ef88b8620dd3abff94dc5b3eee16ef37975d101ba9e60123ac3af"},
+ {file = "slack_sdk-3.6.0.tar.gz", hash = "sha256:195f044e02a2844579a7a26818ce323e85dde8de224730c859644918d793399e"},
+]
+tabulate = [
+ {file = "tabulate-0.8.9-py3-none-any.whl", hash = "sha256:d7c013fe7abbc5e491394e10fa845f8f32fe54f8dc60c6622c6cf482d25d47e4"},
+ {file = "tabulate-0.8.9.tar.gz", hash = "sha256:eb1d13f25760052e8931f2ef80aaf6045a6cceb47514db8beab24cded16f13a7"},
+]
+tinycss2 = [
+ {file = "tinycss2-1.1.0-py3-none-any.whl", hash = "sha256:0353b5234bcaee7b1ac7ca3dea7e02cd338a9f8dcbb8f2dcd32a5795ec1e5f9a"},
+ {file = "tinycss2-1.1.0.tar.gz", hash = "sha256:fbdcac3044d60eb85fdb2aa840ece43cf7dbe798e373e6ee0be545d4d134e18a"},
+]
+toml = [
+ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"},
+ {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
+]
+typer = [
+ {file = "typer-0.3.2-py3-none-any.whl", hash = "sha256:ba58b920ce851b12a2d790143009fa00ac1d05b3ff3257061ff69dbdfc3d161b"},
+ {file = "typer-0.3.2.tar.gz", hash = "sha256:5455d750122cff96745b0dec87368f56d023725a7ebc9d2e54dd23dc86816303"},
+]
+typing-extensions = [
+ {file = "typing_extensions-3.10.0.0-py2-none-any.whl", hash = "sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497"},
+ {file = "typing_extensions-3.10.0.0-py3-none-any.whl", hash = "sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84"},
+ {file = "typing_extensions-3.10.0.0.tar.gz", hash = "sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342"},
+]
+urllib3 = [
+ {file = "urllib3-1.26.5-py2.py3-none-any.whl", hash = "sha256:753a0374df26658f99d826cfe40394a686d05985786d946fbe4165b5148f5a7c"},
+ {file = "urllib3-1.26.5.tar.gz", hash = "sha256:a7acd0977125325f516bda9735fa7142b909a8d01e8b2e4c8108d0984e6e0098"},
+]
+watchdog = [
+ {file = "watchdog-2.1.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:581e3548159fe7d2a9f377a1fbcb41bdcee46849cca8ab803c7ac2e5e04ec77c"},
+ {file = "watchdog-2.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:edcd9ef3fd460bb8a98eb1fcf99941e9fd9f275f45f1a82cb1359ec92975d647"},
+ {file = "watchdog-2.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d34ce2261f118ecd57eedeef95fc2a495fc4a40b3ed7b3bf0bd7a8ccc1ab4f8f"},
+ {file = "watchdog-2.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:668391e6c32742d76e5be5db6bf95c455fa4b3d11e76a77c13b39bccb3a47a72"},
+ {file = "watchdog-2.1.2-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6ef9fe57162c4c361692620e1d9167574ba1975ee468b24051ca11c9bba6438e"},
+ {file = "watchdog-2.1.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:58ebb1095ee493008a7789d47dd62e4999505d82be89fc884d473086fccc6ebd"},
+ {file = "watchdog-2.1.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:91387ee2421f30b75f7ff632c9d48f76648e56bf346a7c805c0a34187a93aab4"},
+ {file = "watchdog-2.1.2-py3-none-manylinux2014_armv7l.whl", hash = "sha256:a6471517315a8541a943c00b45f1d252e36898a3ae963d2d52509b89a50cb2b9"},
+ {file = "watchdog-2.1.2-py3-none-manylinux2014_i686.whl", hash = "sha256:a42e6d652f820b2b94cd03156c62559a2ea68d476476dfcd77d931e7f1012d4a"},
+ {file = "watchdog-2.1.2-py3-none-manylinux2014_ppc64.whl", hash = "sha256:3d6405681471ebe0beb3aa083998c4870e48b57f8afdb45ea1b5957cc5cf1014"},
+ {file = "watchdog-2.1.2-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:598d772beeaf9c98d0df946fbabf0c8365dd95ea46a250c224c725fe0c4730bc"},
+ {file = "watchdog-2.1.2-py3-none-manylinux2014_s390x.whl", hash = "sha256:4b219d46d89cfa49af1d73175487c14a318a74cb8c5442603fd13c6a5b418c86"},
+ {file = "watchdog-2.1.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:188145185c08c73c56f1478ccf1f0f0f85101191439679b35b6b100886ce0b39"},
+ {file = "watchdog-2.1.2-py3-none-win32.whl", hash = "sha256:255a32d44bbbe62e52874ff755e2eefe271b150e0ec240ad7718a62a7a7a73c4"},
+ {file = "watchdog-2.1.2-py3-none-win_amd64.whl", hash = "sha256:1a62a4671796dc93d1a7262286217d9e75823c63d4c42782912d39a506d30046"},
+ {file = "watchdog-2.1.2-py3-none-win_ia64.whl", hash = "sha256:104266a778906ae0e971368d368a65c4cd032a490a9fca5ba0b78c6c7ae11720"},
+ {file = "watchdog-2.1.2.tar.gz", hash = "sha256:0237db4d9024859bea27d0efb59fe75eef290833fd988b8ead7a879b0308c2db"},
+]
+webencodings = [
+ {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"},
+ {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"},
+]
+websocket-client = [
+ {file = "websocket-client-1.1.0.tar.gz", hash = "sha256:b68e4959d704768fa20e35c9d508c8dc2bbc041fd8d267c0d7345cffe2824568"},
+ {file = "websocket_client-1.1.0-py2.py3-none-any.whl", hash = "sha256:e5c333bfa9fa739538b652b6f8c8fc2559f1d364243c8a689d7c0e1d41c2e611"},
+]
+werkzeug = [
+ {file = "Werkzeug-1.0.1-py2.py3-none-any.whl", hash = "sha256:2de2a5db0baeae7b2d2664949077c2ac63fbd16d98da0ff71837f7d1dea3fd43"},
+ {file = "Werkzeug-1.0.1.tar.gz", hash = "sha256:6c80b1e5ad3665290ea39320b91e1be1e0d5f60652b964a3070216de83d2e47c"},
+]
diff --git a/src/pyproject.toml b/src/pyproject.toml
new file mode 100644
index 000000000..3f44c6b8a
--- /dev/null
+++ b/src/pyproject.toml
@@ -0,0 +1,44 @@
+[tool.poetry]
+name = "robusta-cli"
+version = "0.0.0"
+description = ""
+authors = ["Natan Yellin "]
+packages = [
+ { include = "robusta"},
+]
+
+[tool.poetry.scripts]
+robusta = "robusta.cli.main:app"
+
+[tool.poetry.dependencies]
+python = "^3.8"
+typer = "^0.3.2"
+Flask = "^1.1.2"
+colorlog = "^5.0.1"
+pydantic = "^1.8.1"
+kubernetes = "^12.0.1"
+grafana-api = "^1.0.3"
+slack-bolt = "^1.5.0"
+inflection = "^0.5.1"
+manhole = "^1.8.0"
+PyYAML = "^5.4.1"
+watchdog = "^2.1.0"
+dulwich = "^0.20.23"
+better-exceptions = "^0.3.3"
+CairoSVG = "^2.5.2"
+tabulate = "^0.8.9"
+
+[tool.poetry.dev-dependencies]
+dunamai = "^1.5.5"
+hikaru = {git = "https://github.com/aantn/hikaru.git", rev = "fix_datetimes"}
+
+[build-system]
+#requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning"]
+requires = ["poetry-core>=1.0.0"]
+build-backend = "poetry.core.masonry.api"
+
+# https://github.com/mtkennerly/poetry-dynamic-versioning
+# we can use this in github actions by running `poetry run poetry-dynamic-versioning`
+#[tool.poetry-dynamic-versioning]
+#vcs = "git"
+#pattern = "^(?P\\d+\\.\\d+\\.\\d+)"
diff --git a/src/robusta/__init__.py b/src/robusta/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/robusta/_version.py b/src/robusta/_version.py
new file mode 100644
index 000000000..3aa0d7b3c
--- /dev/null
+++ b/src/robusta/_version.py
@@ -0,0 +1 @@
+__version__ = "0.0.0"
\ No newline at end of file
diff --git a/src/robusta/api/__init__.py b/src/robusta/api/__init__.py
new file mode 100644
index 000000000..152856ebe
--- /dev/null
+++ b/src/robusta/api/__init__.py
@@ -0,0 +1,19 @@
+from pydantic import BaseModel
+from ..core.active_playbooks import run_playbooks
+from ..core.reporting.callbacks import *
+from ..integrations.kubernetes.custom_models import *
+from ..integrations.kubernetes.autogenerated.triggers import *
+from ..integrations.kubernetes.autogenerated.events import *
+from ..integrations.kubernetes.autogenerated.models import *
+from ..integrations.prometheus.triggers import *
+from ..integrations.prometheus.incoming_handler import prometheus_cloud_event
+from ..integrations.prometheus.models import *
+from ..integrations.prometheus.utils import *
+from ..integrations.slack.sender import *
+from ..integrations.grafana import *
+from ..integrations.manual.triggers import *
+from ..integrations.scheduled.triggers import *
+from ..integrations.git.git_repo_manager import *
+from ..core.persistency.in_memory import get_persistent_data
+from ..utils.rate_limiter import RateLimiter
+from ..runner.object_updater import *
diff --git a/src/robusta/cli/__init__.py b/src/robusta/cli/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/robusta/cli/main.py b/src/robusta/cli/main.py
new file mode 100644
index 000000000..053c4c837
--- /dev/null
+++ b/src/robusta/cli/main.py
@@ -0,0 +1,169 @@
+import os
+import subprocess
+import time
+import uuid
+from contextlib import contextmanager
+from importlib.metadata import version as get_module_version
+from typing import List, Optional
+from zipfile import ZipFile
+
+import typer
+import requests
+
+from robusta._version import __version__
+
+
+app = typer.Typer()
+
+SLACK_INTEGRATION_SERVICE_ADDRESS = os.environ.get('SLACK_INTEGRATION_SERVICE_ADDRESS', "https://robusta.dev/integrations/slack/get-token")
+EXAMPLES_BUCKET_URL = f"https://storage.googleapis.com/robusta-public/{__version__}"
+DOWNLOAD_URL = f"https://gist.githubusercontent.com/arikalon1/1196dd6496707d42d85d96f7e6b5d000/raw/robusta-{__version__}.yaml"
+CRASHPOD_YAML = "https://gist.githubusercontent.com/arikalon1/4fad3cee4c6921679c513a953cd615ce/raw/crashpod.yaml"
+
+def exec_in_robusta_runner(cmd, tries=1, time_between_attempts=10, error_msg="error running cmd"):
+ cmd = ["kubectl", "exec", "-n", "robusta", "-it", "deploy/robusta-runner", "--", "bash", "-c", cmd]
+ for _ in range(tries-1):
+ try:
+ return subprocess.check_call(cmd)
+ except Exception as e:
+ typer.echo(f"{error_msg}")
+ time.sleep(time_between_attempts)
+ return subprocess.check_call(cmd)
+
+
+def download_file(url, local_path):
+ response = requests.get(url)
+ response.raise_for_status()
+ with open(local_path, "wb") as f:
+ f.write(response.content)
+
+
+def log_title(title):
+ typer.echo("="*70)
+ typer.echo(title)
+ typer.echo("=" * 70)
+
+
+def replace_in_file(path, original, replacement):
+ with open(path) as r:
+ text = r.read()
+ if original not in text:
+ raise Exception(f"Cannot replace text {original} in file {path} because it was not found")
+ text = text.replace(original, replacement)
+ with open(path, "w") as w:
+ w.write(text)
+
+
+@contextmanager
+def fetch_runner_logs(all_logs=False):
+ start = time.time()
+ try:
+ yield
+ finally:
+ log_title("Fetching logs...")
+ if all_logs:
+ subprocess.check_call(f"kubectl logs -n robusta deployment/robusta-runner", shell=True)
+ else:
+ subprocess.check_call(f"kubectl logs -n robusta deployment/robusta-runner --since={int(time.time() - start + 1)}s", shell=True)
+
+def wait_for_slack_api_key(id: str) -> str:
+ while True:
+ try:
+ response_json = requests.get(f"{SLACK_INTEGRATION_SERVICE_ADDRESS}?id={id}").json()
+ if response_json['token']:
+ return str(response_json['token'])
+ time.sleep(0.5)
+ except Exception as e:
+ log_title(f"Error getting slack token {e}")
+
+
+@app.command()
+def install(slack_api_key: str = None):
+ """install robusta into your cluster"""
+ filename = "robusta.yaml"
+ download_file(DOWNLOAD_URL, filename)
+
+ if slack_api_key is None and typer.confirm("do you want to configure slack integration? this is HIGHLY recommended.", default=True):
+ id = str(uuid.uuid4())
+ typer.launch(f"https://robusta.dev/integrations/slack?id={id}")
+ slack_api_key = wait_for_slack_api_key(id)
+
+ if slack_api_key is not None:
+ replace_in_file(filename, "", slack_api_key.strip())
+
+ with fetch_runner_logs(all_logs=True):
+ log_title("Installing")
+ subprocess.check_call(["kubectl", "apply", "-f", filename])
+ log_title("Waiting for resources to be ready")
+ subprocess.check_call(["kubectl", "rollout", "-n", "robusta", "status", "deployments/robusta-runner"])
+ # subprocess.run(["kubectl", "wait", "-n", "robusta", "pods", "--all", "--for", "condition=available"])
+ # TODO: if this is an upgrade there can still be pods in the old terminating status and then we will bring
+ # logs from the wrong pod...
+ time.sleep(5) # wait an extra second for logs to be written
+
+ log_title("Done")
+
+
+@app.command()
+def deploy(playbooks_directory: str):
+ """deploy playbooks"""
+ log_title("Updating playbooks...")
+ with fetch_runner_logs():
+ subprocess.check_call(f'kubectl create configmap -n robusta robusta-config --from-file {playbooks_directory} -o yaml --dry-run | kubectl apply -f -', shell=True)
+ subprocess.check_call(f'kubectl annotate pods -n robusta --all --overwrite "playbooks-last-modified={time.time()}"', shell=True)
+ time.sleep(5) # wait five seconds for the runner to actually reload the playbooks
+ log_title("Done!")
+
+
+@app.command()
+def trigger(trigger_name: str, param: Optional[List[str]] = typer.Argument(None, help="data to send to playbook (can be used multiple times)", metavar="key=value")):
+ """trigger a manually run playbook"""
+ log_title("Triggering playbook...")
+ trigger_params = " ".join([f"-F '{p}'" for p in param])
+ with fetch_runner_logs():
+ cmd = f"curl -X POST -F 'trigger_name={trigger_name}' {trigger_params} http://localhost:5000/api/trigger"
+ exec_in_robusta_runner(cmd, tries=3,
+ error_msg="Cannot trigger playbook - usually this means Robusta just started. Will try again")
+ typer.echo("\n")
+ log_title("Done!")
+
+
+@app.command()
+def examples():
+ """download example playbooks"""
+ filename = "example-playbooks.zip"
+ download_file(f'{EXAMPLES_BUCKET_URL}/{filename}', filename)
+ with ZipFile(filename, "r") as zip_file:
+ zip_file.extractall()
+
+ slack_channel = typer.prompt("which slack channel should I send notifications to?")
+ replace_in_file("playbooks/active_playbooks.yaml", "", slack_channel)
+
+ typer.echo("examples downloaded into the playbooks/ directory")
+
+
+@app.command()
+def playground():
+ """open a python playground - useful when writing playbooks"""
+ exec_in_robusta_runner("socat readline unix-connect:/tmp/manhole-1")
+
+
+@app.command()
+def version():
+ """show the version of the local robusta-cli"""
+ typer.echo(get_module_version("robusta-cli"))
+
+
+@app.command()
+def demo():
+ """deliberately deploy a crashing pod to kubernetes so you can test robusta's response"""
+ log_title("Deploying a crashing pod to kubernetes...")
+ with fetch_runner_logs():
+ subprocess.check_call(f'kubectl apply -f {CRASHPOD_YAML}', shell=True)
+ time.sleep(10)
+ subprocess.check_call(f'kubectl delete -n robusta deployment crashpod', shell=True)
+ log_title("Done!")
+
+
+if __name__ == "__main__":
+ app()
diff --git a/src/robusta/core/__init__.py b/src/robusta/core/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/robusta/core/active_playbooks.py b/src/robusta/core/active_playbooks.py
new file mode 100644
index 000000000..8b3ee5523
--- /dev/null
+++ b/src/robusta/core/active_playbooks.py
@@ -0,0 +1,70 @@
+import inspect
+import logging
+from collections import defaultdict, Callable
+from sys import exc_info
+
+import better_exceptions
+from ..core.model.cloud_event import CloudEvent
+
+
+class PlaybookWrapper:
+
+ def __init__(self, wrapper, playbook_id):
+ self.wrapper = wrapper
+ self.playbook_id = playbook_id
+
+
+active_playbooks = defaultdict(list) # maps trigger types to active playbooks
+playbook_inventory = {}
+
+
+def get_playbook_inventory():
+ return playbook_inventory
+
+
+def get_active_playbooks():
+ return active_playbooks
+
+
+def get_function_params_class(func: Callable):
+ """Inspects a playbook function's signature and returns the type of the param class if it exists"""
+ func_signature = inspect.signature(func)
+ if len(func_signature.parameters) == 1:
+ return None
+ parameter_name = list(func_signature.parameters)[1]
+ return func_signature.parameters[parameter_name].annotation
+
+
+def register_playbook(func, deploy_func, default_trigger_params):
+ get_playbook_inventory()[func.__name__] = {
+ 'func': func,
+ 'default_trigger_params': default_trigger_params,
+ 'deploy_func': deploy_func,
+ 'action_params': get_function_params_class(func),
+ }
+ func.__playbook = playbook_inventory[func.__name__]
+
+
+def clear_playbook_inventory():
+ playbook_inventory.clear()
+
+
+def activate_playbook(trigger_type, wrapper, func, playbook_id):
+ logging.info(f'adding handler {func} playbook_id {playbook_id}')
+ active_playbooks[trigger_type.name].append(PlaybookWrapper(wrapper, playbook_id))
+
+
+def run_playbooks(cloud_event: CloudEvent):
+ # TODO: Ideally we would do the conversion to a concrete event class here so that we pass the same event
+ # object to all playbooks that are triggered and they can each add stuff to the reporting blocks
+ description = cloud_event.data["description"].replace("\n", "")
+ logging.debug(f'received cloud event: {description}')
+ handlers = active_playbooks[cloud_event.type]
+ logging.debug(f'relevant handlers: {handlers}')
+ for playbook_wrapper in handlers:
+ try:
+ playbook_wrapper.wrapper(cloud_event)
+ except Exception as e:
+ _, _, traceback = exc_info()
+ msg = "\n".join(better_exceptions.format_exception(e.__class__, e, traceback))
+ logging.exception(f"got exception running handler: {msg}")
diff --git a/src/robusta/core/model/__init__.py b/src/robusta/core/model/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/robusta/core/model/cloud_event.py b/src/robusta/core/model/cloud_event.py
new file mode 100644
index 000000000..e85f50a31
--- /dev/null
+++ b/src/robusta/core/model/cloud_event.py
@@ -0,0 +1,14 @@
+from datetime import datetime
+from pydantic import BaseModel
+from typing import Dict, Any
+
+# for deserializing incoming events in the cloudevent format
+class CloudEvent(BaseModel):
+ specversion : str
+ type : str
+ source: str
+ subject: str
+ id: str
+ time: datetime
+ datacontenttype: str
+ data: Dict[Any,Any]
diff --git a/src/robusta/core/model/events.py b/src/robusta/core/model/events.py
new file mode 100644
index 000000000..e57ab855d
--- /dev/null
+++ b/src/robusta/core/model/events.py
@@ -0,0 +1,28 @@
+from enum import Enum
+from typing import List, Any, Optional
+from dataclasses import dataclass, field
+
+from ..reporting.blocks import BaseBlock
+
+
+class EventType(Enum):
+ KUBERNETES_TOPOLOGY_CHANGE = 1
+ PROMETHEUS = 2
+ MANUAL_TRIGGER = 3
+ SCHEDULED_TRIGGER = 4
+
+
+# Right now:
+# 1. this is a dataclass but we need to make all fields optional in subclasses because of https://stackoverflow.com/questions/51575931/
+# 2. this can't be a pydantic BaseModel because of various pydantic bugs (see https://github.com/samuelcolvin/pydantic/pull/2557)
+# once the pydantic PR that addresses those issues is merged, this should be a pydantic class
+@dataclass
+class BaseEvent:
+ # TODO: just like you can add generic reporting blocks, should we allow attaching persistent context too?
+ report_blocks: List[BaseBlock] = field(default_factory=list)
+ # some chat APIs allow attachment blocks which are formatted differently
+ report_attachment_blocks: List[BaseBlock] = field(default_factory=list)
+ report_title: str = ""
+ report_title_hidden: bool = False
+ slack_channel: Optional[str] = None
+ slack_mentions: List[str] = field(default_factory=list) # TODO: expand this to a broader concept of all humans related to this event
diff --git a/src/robusta/core/model/playbook_deploy_config.py b/src/robusta/core/model/playbook_deploy_config.py
new file mode 100644
index 000000000..c62b96467
--- /dev/null
+++ b/src/robusta/core/model/playbook_deploy_config.py
@@ -0,0 +1,10 @@
+from pydantic import BaseModel
+from typing import Optional
+
+from .trigger_params import TriggerParams
+
+
+class PlaybookDeployConfig(BaseModel):
+ name: str = None
+ trigger_params: Optional[TriggerParams] = TriggerParams()
+ action_params: Optional[dict] = {}
\ No newline at end of file
diff --git a/src/robusta/core/model/playbook_hash.py b/src/robusta/core/model/playbook_hash.py
new file mode 100644
index 000000000..919928c2e
--- /dev/null
+++ b/src/robusta/core/model/playbook_hash.py
@@ -0,0 +1,11 @@
+import hashlib
+
+from ...utils.function_hashes import get_function_hash
+from ...core.model.trigger_params import TriggerParams
+
+
+def playbook_hash(func, trigger_params : TriggerParams, action_params):
+ hash_input = f"{get_function_hash(func)}" + \
+ ("None" if trigger_params is None else trigger_params.json()) + \
+ ("None" if action_params is None else action_params.json())
+ return hashlib.md5(hash_input.encode()).hexdigest()
diff --git a/src/robusta/core/model/runner_config.py b/src/robusta/core/model/runner_config.py
new file mode 100644
index 000000000..01530ae99
--- /dev/null
+++ b/src/robusta/core/model/runner_config.py
@@ -0,0 +1,11 @@
+from typing import List, Optional
+
+from pydantic import BaseModel
+
+from .playbook_deploy_config import PlaybookDeployConfig
+
+
+class RunnerConfig(BaseModel):
+ global_config: Optional[dict] = {}
+ active_playbooks: Optional[List[PlaybookDeployConfig]] = []
+
diff --git a/src/robusta/core/model/trigger_params.py b/src/robusta/core/model/trigger_params.py
new file mode 100644
index 000000000..6c7dcbf63
--- /dev/null
+++ b/src/robusta/core/model/trigger_params.py
@@ -0,0 +1,16 @@
+from pydantic import BaseModel
+from ...integrations.kubernetes.base_event import K8sOperationType
+
+
+class TriggerParams(BaseModel):
+ trigger_name: str = None
+ alert_name: str = None
+ pod_name_prefix: str = None
+ instance_name_prefix: str = None
+ name_prefix: str = None
+ namespace_prefix: str = None
+ status: str = None
+ kind: str = None
+ operation: K8sOperationType = None
+ repeat: int = None
+ seconds_delay: int = None
diff --git a/src/robusta/core/persistency/__init__.py b/src/robusta/core/persistency/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/robusta/core/persistency/in_memory.py b/src/robusta/core/persistency/in_memory.py
new file mode 100644
index 000000000..4ba046b45
--- /dev/null
+++ b/src/robusta/core/persistency/in_memory.py
@@ -0,0 +1,17 @@
+# dummy persistence driver
+from contextlib import contextmanager
+from pydantic import BaseModel
+from typing import Type, TypeVar, Dict, ContextManager
+
+persistent_data: Dict[str, BaseModel] = {}
+
+# TODO: we probably want some form of locking for this so two playbooks can't edit the same data at the same time
+T = TypeVar('T', bound=BaseModel)
+@contextmanager
+def get_persistent_data(name: str, cls: Type[T]) -> ContextManager[T]:
+ try:
+ data = persistent_data.get(name, cls())
+ yield data
+ finally:
+ # write data back
+ persistent_data[name] = data
diff --git a/src/robusta/core/persistency/scheduled_jobs_states_dal.py b/src/robusta/core/persistency/scheduled_jobs_states_dal.py
new file mode 100644
index 000000000..39dd986c9
--- /dev/null
+++ b/src/robusta/core/persistency/scheduled_jobs_states_dal.py
@@ -0,0 +1,62 @@
+import json
+import logging
+from typing import List
+from threading import Lock
+
+import kubernetes
+from hikaru.model import ObjectMeta
+
+from ...core.schedule.model import JobState
+from ...integrations.kubernetes.autogenerated.v1.models import ConfigMap
+from ...runner.not_found_exception import NotFoundException
+
+CONFIGMAP_NAME = "jobs-states"
+CONFIGMAP_NAMESPACE = "robusta"
+mutex = Lock()
+
+def load_config_map() -> ConfigMap:
+ return ConfigMap.readNamespacedConfigMap(CONFIGMAP_NAME, CONFIGMAP_NAMESPACE).obj
+
+def init_scheduler_dal():
+ try:
+ load_config_map()
+ except kubernetes.client.exceptions.ApiException as e:
+ # we only want to catch exceptions because the config map doesn't exist
+ if e.reason != 'Not Found':
+ raise
+ # job states configmap doesn't exists, create it
+ mutex.acquire()
+ try:
+ conf_map = ConfigMap(metadata=ObjectMeta(name=CONFIGMAP_NAME, namespace=CONFIGMAP_NAMESPACE))
+ conf_map.createNamespacedConfigMap(conf_map.metadata.namespace)
+ logging.info(f"created jobs states configmap {CONFIGMAP_NAME} {CONFIGMAP_NAMESPACE}")
+ finally:
+ mutex.release()
+
+init_scheduler_dal()
+
+def save_scheduled_job_state(job_state : JobState):
+ mutex.acquire()
+ try:
+ confMap = load_config_map()
+ confMap.data[job_state.params.playbook_id] = job_state.json()
+ confMap.replaceNamespacedConfigMap(confMap.metadata.name, confMap.metadata.namespace)
+ finally:
+ mutex.release()
+
+def get_scheduled_job_state(playbook_id : str) -> JobState:
+ state_data = load_config_map().data.get(playbook_id)
+ return JobState(**json.loads(state_data)) if state_data is not None else None
+
+def del_scheduled_job_state(playbook_id : str):
+ mutex.acquire()
+ try:
+ confMap = load_config_map()
+ if confMap.data.get(playbook_id) is not None:
+ del confMap.data[playbook_id]
+ confMap.replaceNamespacedConfigMap(confMap.metadata.name, confMap.metadata.namespace)
+ finally:
+ mutex.release()
+
+def list_scheduled_jobs_states() -> List[JobState]:
+ return [get_scheduled_job_state(pid) for pid in load_config_map().data.keys()]
\ No newline at end of file
diff --git a/src/robusta/core/reporting/__init__.py b/src/robusta/core/reporting/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/robusta/core/reporting/blocks.py b/src/robusta/core/reporting/blocks.py
new file mode 100644
index 000000000..8531278b0
--- /dev/null
+++ b/src/robusta/core/reporting/blocks.py
@@ -0,0 +1,71 @@
+# TODO: add a KubernetesBlock for rendering Kubernetes object in a standard way
+# Notes on how we define all the classes below:
+# 1. We use pydantic and not dataclasses so that field types are validated
+# 2. We add __init__ methods ourselves for convenience. Without our own __init__ method, something like
+# HeaderBlock("foo") doesn't work. Only HeaderBlock(text="foo") would be allowed by pydantic.
+from typing import List, Callable, Dict, Any, Iterable
+
+from pydantic import BaseModel
+from tabulate import tabulate
+
+
+class BaseBlock (BaseModel):
+ hidden: bool = False
+
+
+class MarkdownBlock (BaseBlock):
+ text: str
+
+ def __init__(self, text: str):
+ super().__init__(text=text)
+
+
+class DividerBlock (BaseBlock):
+ pass
+
+
+class FileBlock (BaseBlock):
+ filename: str
+ contents: bytes
+
+ def __init__(self, filename: str, contents: bytes):
+ super().__init__(filename=filename, contents=contents)
+
+
+class HeaderBlock (BaseBlock):
+ text: str
+
+ def __init__(self, text: str):
+ super().__init__(text=text)
+
+
+class ListBlock (BaseBlock):
+ items: List[str]
+
+ def __init__(self, items: List[str]):
+ super().__init__(items=items)
+
+ def to_markdown(self) -> MarkdownBlock:
+ mrkdwn = [f"* {item}" for item in self.items]
+ return MarkdownBlock("\n".join(mrkdwn))
+
+
+class TableBlock (BaseBlock):
+ rows: Iterable[Iterable[str]]
+ headers: Iterable[str] = ()
+
+ def __init__(self, rows: Iterable[Iterable[str]], headers: Iterable[str] = ()):
+ super().__init__(rows=rows, headers=headers)
+
+ def to_markdown(self) -> MarkdownBlock:
+ table = tabulate(self.rows, headers=self.headers, tablefmt="fancy_grid")
+ return MarkdownBlock(f"```\n{table}\n```")
+
+
+class CallbackBlock (BaseBlock):
+ choices: Dict[str, Callable]
+ context: Dict[str, Any] = {}
+
+ def __init__(self, choices: Dict[str, Callable], context: Dict[str, Any]):
+ super().__init__(choices=choices, context=context)
+
diff --git a/src/robusta/core/reporting/callbacks.py b/src/robusta/core/reporting/callbacks.py
new file mode 100644
index 000000000..e642c0c3e
--- /dev/null
+++ b/src/robusta/core/reporting/callbacks.py
@@ -0,0 +1,71 @@
+# This file contains internal wiring for report callbacks
+# Playbook writers don't need to be familiar with this - they should use the API in callbacks.py and not worry about details
+import inspect
+import logging
+from dataclasses import dataclass
+from typing import Callable, Any
+
+from pydantic import BaseModel
+
+from ..model.events import BaseEvent
+from ...utils.function_hashes import get_function_hash
+from ...utils.decorators import doublewrap
+
+class PlaybookCallbackRequest (BaseModel):
+ func_name: str
+ func_file: str
+ func_hash: str
+ context: str
+
+ @classmethod
+ def create_for_func(cls, func: Callable, context: Any):
+ return cls(func_name=func.__name__, func_file=inspect.getsourcefile(func), func_hash=get_function_hash(func), context=context)
+
+
+class CallbackRegistry:
+
+ def __init__(self):
+ self.callbacks = {}
+
+ def register_callback(self, func: Callable):
+ key = self._get_callback_key_for_func(func)
+ if key in self.callbacks:
+ logging.warning(f"overriding existing callback in registry; func={func}")
+ self.callbacks[key] = func
+
+ def is_callback_in_registry(self, func: Callable):
+ key = self._get_callback_key_for_func(func)
+ return key in self.callbacks and self.callbacks[key] == func
+
+ def lookup_callback(self, callback_request: PlaybookCallbackRequest):
+ key = (callback_request.func_name, callback_request.func_file)
+ if key not in self.callbacks:
+ return None
+
+ func = self.callbacks[key]
+ if callback_request.func_hash != get_function_hash(func):
+ logging.warning("callback hash doesn't match! calling a different version of the function than the original one!")
+ return func
+
+ @staticmethod
+ def _get_callback_key_for_func(func: Callable):
+ return func.__name__, inspect.getsourcefile(func)
+
+
+# TODO: make this something more generic which isn't slack specific
+@dataclass
+class ReportCallbackEvent(BaseEvent):
+ source_channel_name: str = ""
+ source_channel_id: str = ""
+ source_user_id: str = ""
+ source_context: str = ""
+ source_message: str = ""
+
+
+callback_registry = CallbackRegistry()
+
+
+@doublewrap
+def on_report_callback(func):
+ callback_registry.register_callback(func)
+ return func
diff --git a/src/robusta/core/reporting/utils.py b/src/robusta/core/reporting/utils.py
new file mode 100644
index 000000000..a00651b9c
--- /dev/null
+++ b/src/robusta/core/reporting/utils.py
@@ -0,0 +1,13 @@
+from .blocks import *
+import cairosvg
+
+def add_pngs_for_all_svgs(blocks: List[FileBlock]):
+ new_blocks = blocks.copy()
+ for b in blocks:
+ if not isinstance(b, FileBlock):
+ continue
+ if not b.filename.endswith(".svg"):
+ continue
+ conversion = cairosvg.svg2png(bytestring=b.contents)
+ new_blocks.append(FileBlock(b.filename.replace(".svg", ".png"), conversion))
+ return new_blocks
diff --git a/src/robusta/core/schedule/model.py b/src/robusta/core/schedule/model.py
new file mode 100644
index 000000000..df835535d
--- /dev/null
+++ b/src/robusta/core/schedule/model.py
@@ -0,0 +1,24 @@
+from enum import Enum
+
+from pydantic import BaseModel
+
+
+class JobStatus(Enum):
+ NEW = 1
+ RUNNING = 2
+ DONE = 3
+
+class SchedulingType(Enum):
+ FIXED_DELAY_REPEAT = 1
+
+class SchedulingParams(BaseModel):
+ playbook_id: str = None
+ repeat: int = None
+ seconds_delay: int = None
+
+class JobState(BaseModel):
+ exec_count: int = 0
+ job_status: JobStatus = JobStatus.NEW
+ sched_type: SchedulingType = SchedulingType.FIXED_DELAY_REPEAT
+ last_exec_time_sec: int = 0
+ params: SchedulingParams
diff --git a/src/robusta/core/schedule/scheduler.py b/src/robusta/core/schedule/scheduler.py
new file mode 100644
index 000000000..11d717d80
--- /dev/null
+++ b/src/robusta/core/schedule/scheduler.py
@@ -0,0 +1,109 @@
+import os
+import threading
+import time, logging
+import uuid
+from collections import defaultdict
+from datetime import datetime
+
+from ...core.active_playbooks import run_playbooks
+from ...core.model.cloud_event import CloudEvent
+from ...core.model.events import EventType
+from ...core.persistency.scheduled_jobs_states_dal import save_scheduled_job_state, \
+ del_scheduled_job_state, get_scheduled_job_state, list_scheduled_jobs_states
+from ...core.schedule.model import JobState, JobStatus
+from ...core.model.trigger_params import TriggerParams
+from ...integrations.scheduled.models import SchedulerEvent
+
+INITIAL_SCHEDULE_DELAY_SEC = os.environ.get("INITIAL_SCHEDULE_DELAY_SEC", 5)
+
+
+scheduled_jobs = defaultdict(None)
+
+def is_scheduled(playbook_id):
+ return scheduled_jobs.get(playbook_id) is not None
+
+
+def schedule_job(delay, playbook_id, func, kwargs):
+ job = threading.Timer(delay, func, kwargs=kwargs)
+ scheduled_jobs[playbook_id] = job
+ job.start()
+
+
+def recurrence_job(job_state : JobState):
+ logging.info(f"running recurrence job playbook_id {job_state.params.playbook_id}")
+ params = job_state.params
+
+ if job_state.job_status == JobStatus.NEW:
+ job_state.job_status = JobStatus.RUNNING
+ job_state.last_exec_time_sec = round(time.time())
+
+ cloud_event = CloudEvent(specversion='1.0',
+ type=EventType.SCHEDULED_TRIGGER.name,
+ source=EventType.SCHEDULED_TRIGGER.name,
+ subject='scheduled trigger',
+ id=str(uuid.uuid4()),
+ time=datetime.now(),
+ datacontenttype='application/json',
+ data=SchedulerEvent(**{
+ "description": f"scheduled recurrence playbook event {params.playbook_id}",
+ "playbook_id": params.playbook_id,
+ "recurrence": job_state.exec_count
+ }),
+ )
+ try:
+ run_playbooks(cloud_event)
+ except:
+ logging.exception(f"failed to execute recurring job. playbook_id {params.playbook_id} exec_count {job_state.exec_count}")
+
+ job_state.exec_count += 1
+ if job_state.exec_count == params.repeat:
+ job_state.job_status = JobStatus.DONE
+ save_scheduled_job_state(job_state) # need to save jobs state before unscheduling the job. (to avoid race condition, on configuration reload)
+ del scheduled_jobs[params.playbook_id]
+ logging.info(f"Scheduled recurrence job done. playbook_id {params.playbook_id} recurrence {job_state.exec_count}")
+ else:
+ save_scheduled_job_state(job_state)
+ schedule_job(params.seconds_delay, params.playbook_id, recurrence_job, {"job_state": job_state})
+
+
+def schedule_trigger(playbook_id : str, trigger_params : TriggerParams):
+ if is_scheduled(playbook_id):
+ logging.info(f"playbook {playbook_id} already scheduled")
+ return # playbook is already scheduled, no need to re-schedule. (this is a reload playbooks scenario)
+ job_state = get_scheduled_job_state(playbook_id)
+ if job_state is None: # no earlier job state, create one
+ job_state = JobState(**{"params": {
+ "playbook_id": playbook_id,
+ "repeat": trigger_params.repeat,
+ "seconds_delay": trigger_params.seconds_delay
+ }})
+ save_scheduled_job_state(job_state)
+ elif job_state.job_status == JobStatus.DONE:
+ logging.info(f"Scheduled recurring already job done. Skipping scheduling. playbook {playbook_id}")
+ return
+
+ next_delay = calc_job_delay_for_next_run(job_state)
+ logging.info(f"scheduling recurring trigger for playbook {playbook_id} repeat {trigger_params.repeat} delay {trigger_params.seconds_delay} will run in {next_delay}")
+ schedule_job(next_delay, playbook_id, recurrence_job, {"job_state": job_state})
+
+
+def unschedule_trigger(playbook_id):
+ job = scheduled_jobs.get(playbook_id)
+ if job is not None:
+ job.cancel()
+ del scheduled_jobs[playbook_id]
+ del_scheduled_job_state(playbook_id)
+
+
+def unschedule_deleted_playbooks(active_playbook_ids : set):
+ for job_state in list_scheduled_jobs_states():
+ if job_state.params.playbook_id not in active_playbook_ids:
+ logging.info(f"unscheduling deleted playbook {job_state.params.playbook_id}")
+ unschedule_trigger(job_state.params.playbook_id)
+
+
+def calc_job_delay_for_next_run(job_state):
+ if job_state.job_status == JobStatus.NEW:
+ return INITIAL_SCHEDULE_DELAY_SEC
+ return max(job_state.last_exec_time_sec + job_state.params.seconds_delay - round(time.time()), INITIAL_SCHEDULE_DELAY_SEC)
+
diff --git a/src/robusta/core/triggers.py b/src/robusta/core/triggers.py
new file mode 100644
index 000000000..2c5452899
--- /dev/null
+++ b/src/robusta/core/triggers.py
@@ -0,0 +1,81 @@
+import copy
+import logging
+
+from .model.trigger_params import TriggerParams
+from .model.playbook_hash import playbook_hash
+
+from ..core.active_playbooks import get_active_playbooks, get_playbook_inventory
+from ..core.model.runner_config import RunnerConfig
+from ..core.schedule.scheduler import unschedule_deleted_playbooks
+from ..integrations.prometheus.incoming_handler import *
+
+def clear_active_playbooks():
+ get_active_playbooks().clear()
+
+
+class DeployCommand:
+ def __init__(self, deploy_func, func, trigger_params, action_params=None):
+ self.deploy_func = deploy_func
+ self.func = func
+ self.trigger_params = trigger_params
+ self.action_params = action_params
+ self.playbook_id = playbook_hash(func, trigger_params, action_params)
+ if getattr(action_params, "pre_deploy_func", None) is not None:
+ action_params.pre_deploy_func(trigger_params)
+
+
+def deploy_playbook_config(runner_config: RunnerConfig):
+ deploy_commands = []
+ for playbook_config in runner_config.active_playbooks:
+ playbook_definition = get_playbook_inventory().get(playbook_config.name)
+ if playbook_definition is None:
+ logging.error(
+ f'playbook definition not found. skipping. {playbook_config.name}') # TODO - should we continue, or not run at all??
+ continue
+
+ runtime_trigger_params = copy.deepcopy(playbook_definition['default_trigger_params'])
+ # first override defaults with global config
+ runtime_trigger_params = get_merged_global_config(runner_config.global_config, runtime_trigger_params)
+ # then override the result with trigger_params specific config
+ runtime_trigger_params = get_merged_config(playbook_config.trigger_params, runtime_trigger_params)
+ deploy_func = playbook_definition['deploy_func']
+ if playbook_definition['action_params'] is None:
+ deploy_commands.append(DeployCommand(deploy_func, playbook_definition['func'], runtime_trigger_params))
+ else:
+ # in case we have params, we have to apply the global config on it as well
+ playbook_config.action_params = merge_global_params(runner_config.global_config, playbook_config.action_params)
+ deploy_commands.append(DeployCommand(deploy_func, playbook_definition['func'], runtime_trigger_params,
+ playbook_definition['action_params'](**playbook_config.action_params)))
+
+ new_playbook_ids = set([deploy_command.playbook_id for deploy_command in deploy_commands])
+ clear_active_playbooks()
+
+ # unschedule playbooks that doesn't exist any more
+ unschedule_deleted_playbooks(new_playbook_ids)
+
+ for deploy_command in deploy_commands:
+ if deploy_command.action_params is None:
+ deploy_command.deploy_func(deploy_command.func, deploy_command.trigger_params)
+ else:
+ deploy_command.deploy_func(deploy_command.func, deploy_command.trigger_params, deploy_command.action_params)
+
+
+def merge_global_params(global_config: dict, config_params: dict) -> dict:
+ merged = global_config.copy()
+ merged.update(config_params)
+ return merged
+
+
+def get_merged_global_config(global_config: dict, config_defaults: TriggerParams) -> TriggerParams:
+ config_defaults_fields = config_defaults.__fields_set__
+ for attribute in global_config.keys():
+ if global_config.get(attribute) is not None and hasattr(config_defaults_fields, attribute):
+ setattr(config_defaults, attribute, getattr(global_config, attribute))
+ return config_defaults
+
+
+def get_merged_config(config_overrides: TriggerParams, config_defaults: TriggerParams) -> TriggerParams:
+ for attribute in config_overrides.__fields_set__:
+ if getattr(config_overrides, attribute) is not None:
+ setattr(config_defaults, attribute, getattr(config_overrides, attribute))
+ return config_defaults
\ No newline at end of file
diff --git a/src/robusta/integrations/__init__.py b/src/robusta/integrations/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/robusta/integrations/git/__init__.py b/src/robusta/integrations/git/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/robusta/integrations/git/git_repo.py b/src/robusta/integrations/git/git_repo.py
new file mode 100644
index 000000000..01f2a4687
--- /dev/null
+++ b/src/robusta/integrations/git/git_repo.py
@@ -0,0 +1,78 @@
+import logging
+import os
+import shutil
+import threading
+import traceback
+from datetime import datetime
+
+from dulwich import porcelain
+
+GIT_DIR_NAME = "robusta-git-audit"
+REPO_LOCAL_BASE_DIR = os.path.join(os.environ.get("REPO_LOCAL_BASE_DIR", "/app"), GIT_DIR_NAME)
+try:
+ os.makedirs(REPO_LOCAL_BASE_DIR, exist_ok=True)
+except Exception as e:
+ print(f"Failed to create git audit base path {REPO_LOCAL_BASE_DIR}", traceback.print_exc())
+ raise e
+
+class GitRepo:
+
+ def __init__(self, https_repo_url : str, username : str, password : str):
+ url_parts = https_repo_url.split("://")
+ self.repo_lock = threading.RLock()
+ self.https_repo_url = https_repo_url
+ self.repo_url = f"{url_parts[0]}://{username}:{password}@{url_parts[1]}"
+ self.repo_name = os.path.splitext(os.path.basename(https_repo_url))[0]
+ self.repo_local_path = os.path.join(REPO_LOCAL_BASE_DIR, self.repo_name)
+ self.init_repo()
+
+ def init_repo(self):
+ with self.repo_lock:
+ if os.path.exists(self.repo_local_path):
+ logging.info(f"Deleting local repo before init {self.repo_local_path}")
+ shutil.rmtree(self.repo_local_path)
+
+ logging.info(f"Cloning git repo {self.https_repo_url}. repo name {self.repo_name}")
+ self.repo = porcelain.clone(self.repo_url, self.repo_local_path)
+
+ def commit(self, file_data : str, file_path : str, file_name, commit_message: str = "Robusta Git"):
+ with self.repo_lock:
+ file_local_path = os.path.join(self.repo_local_path, file_path)
+ try:
+ os.makedirs(file_local_path, exist_ok=True)
+ git_file_name = os.path.join(file_local_path, file_name)
+ with open(git_file_name, 'w') as git_file:
+ git_file.write(file_data)
+
+ porcelain.add(self.repo, git_file_name)
+ porcelain.commit(self.repo, commit_message)
+ except Exception as e:
+ logging.error(f"Commit file failed {self.repo_local_path} {file_path} {file_name}", traceback.print_exc())
+ raise e
+
+ def push(self):
+ with self.repo_lock:
+ porcelain.push(self.repo)
+
+ def commit_push(self, file_data : str, file_path : str, file_name, commit_message: str):
+ with self.repo_lock:
+ self.commit(file_data, file_path, file_name, commit_message)
+ self.push()
+
+ def delete(self, file_path: str, file_name):
+ with self.repo_lock:
+ file_local_path = os.path.join(self.repo_local_path, file_path)
+ if not os.path.exists(file_local_path): # file not in repo. Might have been added before the audit playbook was configured
+ return
+
+ try:
+ porcelain.remove(self.repo, [os.path.join(file_local_path, file_name)])
+ porcelain.commit(self.repo, f"robusta audit {datetime.now()} - delete")
+ except Exception as e:
+ logging.error(f"Commit file failed {self.repo_local_path} {file_path} {file_name}", traceback.print_exc())
+ raise e
+
+ def delete_push(self, file_path : str, file_name):
+ with self.repo_lock:
+ self.delete(file_path, file_name)
+ self.push()
diff --git a/src/robusta/integrations/git/git_repo_manager.py b/src/robusta/integrations/git/git_repo_manager.py
new file mode 100644
index 000000000..2caa354c6
--- /dev/null
+++ b/src/robusta/integrations/git/git_repo_manager.py
@@ -0,0 +1,20 @@
+import threading
+from collections import defaultdict
+
+from .git_repo import GitRepo
+
+
+class GitRepoManager:
+
+ manager_lock = threading.Lock()
+ repo_map = defaultdict(None)
+
+ @staticmethod
+ def get_git_repo(https_repo_url : str, username : str, password: str):
+ with GitRepoManager.manager_lock:
+ repo = GitRepoManager.repo_map.get(https_repo_url)
+ if repo is not None:
+ return repo
+ repo = GitRepo(https_repo_url, username, password)
+ GitRepoManager.repo_map[https_repo_url] = repo
+ return repo
diff --git a/src/robusta/integrations/grafana.py b/src/robusta/integrations/grafana.py
new file mode 100644
index 000000000..de2ef759e
--- /dev/null
+++ b/src/robusta/integrations/grafana.py
@@ -0,0 +1,58 @@
+from datetime import datetime
+import logging
+from typing import List
+
+from grafana_api.grafana_face import GrafanaFace
+
+from ..utils.service_discovery import find_service_url
+
+
+class Grafana:
+
+ def __init__(self, api_key, grafana_url=None):
+ """
+ Create a new connection to Grafana.
+ :param grafana_url: if None, then attempt to discover the address of an in-cluster Grafana service
+ """
+ if grafana_url is None:
+ grafana_url = find_service_url("app.kubernetes.io/name=grafana")
+ protocol_host = grafana_url.split("://")
+ logging.debug(f'Grafana params: protocol - {protocol_host[0]} host - {protocol_host[1]}')
+ self.grafana = GrafanaFace(auth=api_key, protocol=protocol_host[0], host=protocol_host[1])
+
+ def add_line_to_dashboard(self, dashboard_uid: str, text: str, time:datetime=None, tags: List[str] = [], panel_substring: str = None):
+ if time is None:
+ time = datetime.now()
+ self.__add_annotation(dashboard_uid, text, start_time=time, tags=tags, panel_substring=panel_substring)
+
+ def add_range_to_dashboard(self, dashboard_uid: str, text: str, start_time:datetime, end_time:datetime, tags:List[str]=[], panel_substring:str=None):
+ self.__add_annotation(dashboard_uid, text, start_time=start_time, end_time=end_time, tags=tags, panel_substring=panel_substring)
+
+ def __add_annotation(self, dashboard_uid, text, start_time, end_time=None, tags=[], panel_substring=None):
+ dashboard = self.grafana.dashboard.get_dashboard(dashboard_uid)["dashboard"]
+ dashboard_id = dashboard["id"]
+
+ # grafana wants the timestamp as an int with millisecond resolution
+ start_time = int(start_time.timestamp()) * 1000
+ if end_time is not None:
+ end_time = int(end_time.timestamp()) * 1000
+
+ # add an annotation for the entire dashboard
+ if panel_substring is None:
+ resp = self.grafana.annotations.add_annotation(dashboard_id=dashboard_id, text=text, tags=tags, time_from=start_time, time_to=end_time)
+ logging.debug(f'grafana dashboard annotation response {resp}')
+ # add an annotation to specific panels only
+ else:
+ panel_ids = self.__get_panels_with_subtring(dashboard, panel_substring)
+ for panel_id in panel_ids:
+ resp = self.grafana.annotations.add_annotation(dashboard_id=dashboard_id, panel_id=panel_id, text=text, tags=tags, time_from=start_time, time_to=end_time)
+ logging.debug(f'grafana panel annotation response {resp}')
+
+ def __get_panels_with_subtring(self, dashboard, panel_substring):
+ panel_ids = []
+ for row in dashboard["rows"]:
+ for panel in row["panels"]:
+ if panel_substring.lower() in panel["title"].lower():
+ panel_ids.append(panel["id"])
+ return panel_ids
+
diff --git a/src/robusta/integrations/kubernetes/__init__.py b/src/robusta/integrations/kubernetes/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/robusta/integrations/kubernetes/api_client_utils.py b/src/robusta/integrations/kubernetes/api_client_utils.py
new file mode 100644
index 000000000..6737be9c9
--- /dev/null
+++ b/src/robusta/integrations/kubernetes/api_client_utils.py
@@ -0,0 +1,159 @@
+import logging
+import os
+import re
+import time
+import traceback
+from typing import List
+
+from kubernetes import config
+from kubernetes.client.api import core_v1_api
+from kubernetes.client.rest import ApiException
+from kubernetes.stream import stream
+
+from hikaru.model import *
+
+RUNNING_STATE = "Running"
+
+if os.getenv('KUBERNETES_SERVICE_HOST'):
+ config.load_incluster_config()
+else:
+ config.load_kube_config()
+
+core_v1 = core_v1_api.CoreV1Api()
+
+default_exec_command = [
+ '/bin/sh',
+ '-c']
+
+
+def wait_until(read_function, predicate_function, timeout_sec: float, backoff_wait_sec: float):
+ """
+ repeatedly calls predicate_function(read_function)) until predicate_function returns True or we timeout
+ return the last result of read_function() on success and raises an exception on timeout
+ between attempts, we wait backoff_wait_sec seconds
+ """
+ start_time_sec = time.time()
+
+ while start_time_sec + timeout_sec > time.time():
+ try:
+ resp = read_function()
+ if predicate_function(resp):
+ return resp
+ except ApiException as e:
+ logging.error(f'failed calling read_function {traceback.format_exc()}')
+
+ time.sleep(backoff_wait_sec)
+
+ raise Exception("Failed to reach wait condition")
+
+
+def wait_until_job_complete(job: Job, timeout):
+ """
+ wait until a kubernetes Job object either succeeds or fails at least once
+ """
+ def is_job_complete(j: Job) -> bool:
+ return j.status.completionTime is not None or j.status.failed is not None
+
+ return wait_until(lambda: Job.readNamespacedJob(job.metadata.name, job.metadata.namespace).obj,
+ is_job_complete, timeout, 5)
+
+
+# TODO: refactor to use wait_until function
+def wait_for_pod_status(name, namespace, status: str, timeout_sec: float, backoff_wait_sec: float) -> str:
+ pod_details = f'pod status: {name} {namespace} {status} {timeout_sec}'
+ logging.debug(f'waiting for {pod_details}')
+
+ start_time_sec = time.time()
+ while start_time_sec + timeout_sec > time.time():
+ try:
+ resp = core_v1.read_namespaced_pod_status(name, namespace)
+
+ if resp.status.phase == status:
+ logging.debug(f'reached {pod_details}')
+ return status
+
+ except ApiException as e:
+ logging.error(f'failed to get pod status {name} {namespace} {traceback.format_exc()}')
+
+ time.sleep(backoff_wait_sec)
+
+ logging.debug(f'failed to reach {pod_details}')
+ return "FAIL"
+
+
+def exec_shell_command(name, shell_command: str, namespace="default", container=""):
+ commands = default_exec_command.copy()
+ commands.append(shell_command)
+ return exec_commands(name, commands, namespace, container)
+
+
+def get_pod_logs(name, namespace="default", container="", previous=None, tail_lines=None, since_seconds=None):
+ resp = None
+ try:
+ resp = core_v1.read_namespaced_pod_log(
+ name,
+ namespace,
+ container=container,
+ previous=previous,
+ tail_lines=tail_lines,
+ since_seconds=since_seconds)
+
+ except ApiException as e:
+ if e.status != 404:
+ logging.exception(f'failed to get pod logs {name} {namespace} {container}')
+ resp = 'error getting logs'
+
+ logging.debug(f'get logs {resp}')
+ return resp
+
+
+def prepare_pod_command(cmd) -> List[str]:
+ if type(cmd) == list:
+ return cmd
+ elif type(cmd) == str:
+ return cmd.split(" ") # cmd need to be a list of strings
+ elif cmd is None:
+ return None
+ else:
+ logging.exception(f"cmd {cmd} has unknown type {type(cmd)}")
+ return cmd
+
+
+def exec_commands(name, exec_command: [], namespace="default", container=""):
+ logging.debug(
+ f'Executing command name: {name} command: {exec_command} namespace: {namespace} container: {container}')
+ resp = None
+
+ # verify pod state before connecting
+ pod_status = wait_for_pod_status(name, namespace, RUNNING_STATE, 30, 0.2) # TODO config
+ if pod_status != RUNNING_STATE:
+ msg = f'Not running exec commands. Pod {name} {namespace} is not in running state'
+ logging.error(msg)
+ return msg
+
+ try:
+ resp = stream(core_v1.connect_get_namespaced_pod_exec,
+ name,
+ namespace,
+ container=container,
+ command=exec_command,
+ stderr=True, stdin=False,
+ stdout=True, tty=False)
+
+ except ApiException as e:
+ if e.status != 404:
+ logging.exception(f'exec command {exec_command} resulted with error')
+ resp = 'error executing commands'
+
+ logging.debug(f'exec command response {resp}')
+ return resp
+
+
+def to_kubernetes_name(name, prefix=""):
+ """
+ returns a valid and unique kubernetes name based on prefix and name, replacing characters in name as necessary
+ see https://kubernetes.io/docs/concepts/overview/working-with-objects/names/
+ """
+ unique_id = str(time.time()).replace('.', '-')
+ safe_name = re.sub("[^0-9a-zA-Z\\-]+", "-", name)
+ return f"{prefix}{safe_name}-{unique_id}"[:63]
diff --git a/src/robusta/integrations/kubernetes/autogenerated/__init__.py b/src/robusta/integrations/kubernetes/autogenerated/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/robusta/integrations/kubernetes/autogenerated/events.py b/src/robusta/integrations/kubernetes/autogenerated/events.py
new file mode 100644
index 000000000..a2d6209b9
--- /dev/null
+++ b/src/robusta/integrations/kubernetes/autogenerated/events.py
@@ -0,0 +1,95 @@
+# This file was autogenerated. Do not edit.
+
+from dataclasses import dataclass
+from typing import Union
+from ..base_event import K8sBaseEvent
+from ..custom_models import RobustaPod,RobustaDeployment
+from hikaru.model.rel_1_16.v1 import Pod as v1Pod
+from hikaru.model.rel_1_16.v1 import ReplicaSet as v1ReplicaSet
+from hikaru.model.rel_1_16.v1 import DaemonSet as v1DaemonSet
+from hikaru.model.rel_1_16.v1 import Deployment as v1Deployment
+from hikaru.model.rel_1_16.v1 import Service as v1Service
+from hikaru.model.rel_1_16.v1 import ConfigMap as v1ConfigMap
+from hikaru.model.rel_1_16.v1 import Event as v1Event
+from hikaru.model.rel_1_16.v1 import HorizontalPodAutoscaler as v1HorizontalPodAutoscaler
+from hikaru.model.rel_1_16.v2beta1 import Pod as v2beta1Pod
+from hikaru.model.rel_1_16.v2beta1 import ReplicaSet as v2beta1ReplicaSet
+from hikaru.model.rel_1_16.v2beta1 import DaemonSet as v2beta1DaemonSet
+from hikaru.model.rel_1_16.v2beta1 import Deployment as v2beta1Deployment
+from hikaru.model.rel_1_16.v2beta1 import Service as v2beta1Service
+from hikaru.model.rel_1_16.v2beta1 import ConfigMap as v2beta1ConfigMap
+from hikaru.model.rel_1_16.v2beta1 import Event as v2beta1Event
+from hikaru.model.rel_1_16.v2beta1 import HorizontalPodAutoscaler as v2beta1HorizontalPodAutoscaler
+from hikaru.model.rel_1_16.v2beta2 import Pod as v2beta2Pod
+from hikaru.model.rel_1_16.v2beta2 import ReplicaSet as v2beta2ReplicaSet
+from hikaru.model.rel_1_16.v2beta2 import DaemonSet as v2beta2DaemonSet
+from hikaru.model.rel_1_16.v2beta2 import Deployment as v2beta2Deployment
+from hikaru.model.rel_1_16.v2beta2 import Service as v2beta2Service
+from hikaru.model.rel_1_16.v2beta2 import ConfigMap as v2beta2ConfigMap
+from hikaru.model.rel_1_16.v2beta2 import Event as v2beta2Event
+from hikaru.model.rel_1_16.v2beta2 import HorizontalPodAutoscaler as v2beta2HorizontalPodAutoscaler
+
+@dataclass
+class PodEvent (K8sBaseEvent):
+ obj: RobustaPod
+ old_obj: RobustaPod
+
+
+@dataclass
+class ReplicaSetEvent (K8sBaseEvent):
+ obj: Union[v1ReplicaSet,v2beta1ReplicaSet,v2beta2ReplicaSet]
+ old_obj: Union[v1ReplicaSet,v2beta1ReplicaSet,v2beta2ReplicaSet]
+
+
+@dataclass
+class DaemonSetEvent (K8sBaseEvent):
+ obj: Union[v1DaemonSet,v2beta1DaemonSet,v2beta2DaemonSet]
+ old_obj: Union[v1DaemonSet,v2beta1DaemonSet,v2beta2DaemonSet]
+
+
+@dataclass
+class DeploymentEvent (K8sBaseEvent):
+ obj: RobustaDeployment
+ old_obj: RobustaDeployment
+
+
+@dataclass
+class ServiceEvent (K8sBaseEvent):
+ obj: Union[v1Service,v2beta1Service,v2beta2Service]
+ old_obj: Union[v1Service,v2beta1Service,v2beta2Service]
+
+
+@dataclass
+class ConfigMapEvent (K8sBaseEvent):
+ obj: Union[v1ConfigMap,v2beta1ConfigMap,v2beta2ConfigMap]
+ old_obj: Union[v1ConfigMap,v2beta1ConfigMap,v2beta2ConfigMap]
+
+
+@dataclass
+class EventEvent (K8sBaseEvent):
+ obj: Union[v1Event,v2beta1Event,v2beta2Event]
+ old_obj: Union[v1Event,v2beta1Event,v2beta2Event]
+
+
+@dataclass
+class HorizontalPodAutoscalerEvent (K8sBaseEvent):
+ obj: Union[v1HorizontalPodAutoscaler,v2beta1HorizontalPodAutoscaler,v2beta2HorizontalPodAutoscaler]
+ old_obj: Union[v1HorizontalPodAutoscaler,v2beta1HorizontalPodAutoscaler,v2beta2HorizontalPodAutoscaler]
+
+
+@dataclass
+class KubernetesAnyEvent (K8sBaseEvent):
+ obj: Union[v1ConfigMap,v1ReplicaSet,v2beta1ReplicaSet,RobustaPod,v1DaemonSet,v2beta2Service,v1Service,v2beta2ConfigMap,v2beta1ConfigMap,v2beta2Event,v2beta1HorizontalPodAutoscaler,RobustaDeployment,v1HorizontalPodAutoscaler,v2beta1DaemonSet,v1Event,v2beta2ReplicaSet,v2beta2HorizontalPodAutoscaler,v2beta2DaemonSet,v2beta1Service,v2beta1Event]
+ old_obj: Union[v1ConfigMap,v1ReplicaSet,v2beta1ReplicaSet,RobustaPod,v1DaemonSet,v2beta2Service,v1Service,v2beta2ConfigMap,v2beta1ConfigMap,v2beta2Event,v2beta1HorizontalPodAutoscaler,RobustaDeployment,v1HorizontalPodAutoscaler,v2beta1DaemonSet,v1Event,v2beta2ReplicaSet,v2beta2HorizontalPodAutoscaler,v2beta2DaemonSet,v2beta1Service,v2beta1Event]
+
+
+KIND_TO_EVENT_CLASS = {
+ 'Pod': PodEvent,
+ 'ReplicaSet': ReplicaSetEvent,
+ 'DaemonSet': DaemonSetEvent,
+ 'Deployment': DeploymentEvent,
+ 'Service': ServiceEvent,
+ 'ConfigMap': ConfigMapEvent,
+ 'Event': EventEvent,
+ 'HorizontalPodAutoscaler': HorizontalPodAutoscalerEvent
+}
diff --git a/src/robusta/integrations/kubernetes/autogenerated/models.py b/src/robusta/integrations/kubernetes/autogenerated/models.py
new file mode 100644
index 000000000..7462dd0a3
--- /dev/null
+++ b/src/robusta/integrations/kubernetes/autogenerated/models.py
@@ -0,0 +1,16 @@
+# This file was autogenerated. Do not edit.
+
+from .v1.models import KIND_TO_MODEL_CLASS as v1
+from .v2beta1.models import KIND_TO_MODEL_CLASS as v2beta1
+from .v2beta2.models import KIND_TO_MODEL_CLASS as v2beta2
+VERSION_KIND_TO_MODEL_CLASS = {
+ 'v1': v1,
+ 'v2beta1': v2beta1,
+ 'v2beta2': v2beta2
+}
+
+
+def get_api_version(apiVersion: str):
+ if "/" in apiVersion:
+ apiVersion = apiVersion.split("/")[1]
+ return VERSION_KIND_TO_MODEL_CLASS.get(apiVersion)
diff --git a/src/robusta/integrations/kubernetes/autogenerated/triggers.py b/src/robusta/integrations/kubernetes/autogenerated/triggers.py
new file mode 100644
index 000000000..09cf9cc84
--- /dev/null
+++ b/src/robusta/integrations/kubernetes/autogenerated/triggers.py
@@ -0,0 +1,196 @@
+# This file was autogenerated. Do not edit.
+
+from ....utils.decorators import doublewrap
+from ..base_triggers import register_k8s_playbook, register_k8s_any_playbook
+from ..base_event import K8sOperationType
+
+
+# Pod Triggers
+@doublewrap
+def on_pod_create(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'Pod', K8sOperationType.CREATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_pod_update(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'Pod', K8sOperationType.UPDATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_pod_delete(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'Pod', K8sOperationType.DELETE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_pod_all_changes(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'Pod', None, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+# ReplicaSet Triggers
+@doublewrap
+def on_replicaset_create(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'ReplicaSet', K8sOperationType.CREATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_replicaset_update(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'ReplicaSet', K8sOperationType.UPDATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_replicaset_delete(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'ReplicaSet', K8sOperationType.DELETE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_replicaset_all_changes(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'ReplicaSet', None, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+# DaemonSet Triggers
+@doublewrap
+def on_daemonset_create(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'DaemonSet', K8sOperationType.CREATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_daemonset_update(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'DaemonSet', K8sOperationType.UPDATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_daemonset_delete(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'DaemonSet', K8sOperationType.DELETE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_daemonset_all_changes(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'DaemonSet', None, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+# Deployment Triggers
+@doublewrap
+def on_deployment_create(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'Deployment', K8sOperationType.CREATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_deployment_update(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'Deployment', K8sOperationType.UPDATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_deployment_delete(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'Deployment', K8sOperationType.DELETE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_deployment_all_changes(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'Deployment', None, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+# Service Triggers
+@doublewrap
+def on_service_create(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'Service', K8sOperationType.CREATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_service_update(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'Service', K8sOperationType.UPDATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_service_delete(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'Service', K8sOperationType.DELETE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_service_all_changes(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'Service', None, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+# ConfigMap Triggers
+@doublewrap
+def on_configmap_create(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'ConfigMap', K8sOperationType.CREATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_configmap_update(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'ConfigMap', K8sOperationType.UPDATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_configmap_delete(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'ConfigMap', K8sOperationType.DELETE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_configmap_all_changes(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'ConfigMap', None, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+# Event Triggers
+@doublewrap
+def on_event_create(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'Event', K8sOperationType.CREATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_event_update(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'Event', K8sOperationType.UPDATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_event_delete(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'Event', K8sOperationType.DELETE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_event_all_changes(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'Event', None, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+# HorizontalPodAutoscaler Triggers
+@doublewrap
+def on_horizontalpodautoscaler_create(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'HorizontalPodAutoscaler', K8sOperationType.CREATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_horizontalpodautoscaler_update(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'HorizontalPodAutoscaler', K8sOperationType.UPDATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_horizontalpodautoscaler_delete(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'HorizontalPodAutoscaler', K8sOperationType.DELETE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_horizontalpodautoscaler_all_changes(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_playbook(func, 'HorizontalPodAutoscaler', None, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+# Kubernetes Any Triggers
+@doublewrap
+def on_kubernetes_any_create(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_any_playbook(func, K8sOperationType.CREATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_kubernetes_any_update(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_any_playbook(func, K8sOperationType.UPDATE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_kubernetes_any_delete(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_any_playbook(func, K8sOperationType.DELETE, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
+@doublewrap
+def on_kubernetes_any_all_changes(func, name_prefix='', namespace_prefix=''):
+ return register_k8s_any_playbook(func, None, name_prefix=name_prefix, namespace_prefix=namespace_prefix)
+
+
diff --git a/src/robusta/integrations/kubernetes/autogenerated/v1/models.py b/src/robusta/integrations/kubernetes/autogenerated/v1/models.py
new file mode 100644
index 000000000..b5074719c
--- /dev/null
+++ b/src/robusta/integrations/kubernetes/autogenerated/v1/models.py
@@ -0,0 +1,16 @@
+# This file was autogenerated. Do not edit.
+
+from hikaru.model.rel_1_16.v1 import *
+from ...custom_models import RobustaPod,RobustaDeployment
+
+
+KIND_TO_MODEL_CLASS = {
+ 'Pod': RobustaPod,
+ 'ReplicaSet': ReplicaSet,
+ 'DaemonSet': DaemonSet,
+ 'Deployment': RobustaDeployment,
+ 'Service': Service,
+ 'ConfigMap': ConfigMap,
+ 'Event': Event,
+ 'HorizontalPodAutoscaler': HorizontalPodAutoscaler
+}
diff --git a/src/robusta/integrations/kubernetes/autogenerated/v2beta1/models.py b/src/robusta/integrations/kubernetes/autogenerated/v2beta1/models.py
new file mode 100644
index 000000000..9e97bb550
--- /dev/null
+++ b/src/robusta/integrations/kubernetes/autogenerated/v2beta1/models.py
@@ -0,0 +1,16 @@
+# This file was autogenerated. Do not edit.
+
+from hikaru.model.rel_1_16.v2beta1 import *
+from ...custom_models import RobustaPod,RobustaDeployment
+
+
+KIND_TO_MODEL_CLASS = {
+ 'Pod': RobustaPod,
+ 'ReplicaSet': ReplicaSet,
+ 'DaemonSet': DaemonSet,
+ 'Deployment': RobustaDeployment,
+ 'Service': Service,
+ 'ConfigMap': ConfigMap,
+ 'Event': Event,
+ 'HorizontalPodAutoscaler': HorizontalPodAutoscaler
+}
diff --git a/src/robusta/integrations/kubernetes/autogenerated/v2beta2/models.py b/src/robusta/integrations/kubernetes/autogenerated/v2beta2/models.py
new file mode 100644
index 000000000..8f7929f88
--- /dev/null
+++ b/src/robusta/integrations/kubernetes/autogenerated/v2beta2/models.py
@@ -0,0 +1,16 @@
+# This file was autogenerated. Do not edit.
+
+from hikaru.model.rel_1_16.v2beta2 import *
+from ...custom_models import RobustaPod,RobustaDeployment
+
+
+KIND_TO_MODEL_CLASS = {
+ 'Pod': RobustaPod,
+ 'ReplicaSet': ReplicaSet,
+ 'DaemonSet': DaemonSet,
+ 'Deployment': RobustaDeployment,
+ 'Service': Service,
+ 'ConfigMap': ConfigMap,
+ 'Event': Event,
+ 'HorizontalPodAutoscaler': HorizontalPodAutoscaler
+}
diff --git a/src/robusta/integrations/kubernetes/base_event.py b/src/robusta/integrations/kubernetes/base_event.py
new file mode 100644
index 000000000..9c1c8b615
--- /dev/null
+++ b/src/robusta/integrations/kubernetes/base_event.py
@@ -0,0 +1,20 @@
+from enum import Enum
+
+from dataclasses import dataclass
+from hikaru.meta import HikaruDocumentBase
+
+from ...core.model.events import BaseEvent, EventType
+
+
+class K8sOperationType (Enum):
+ CREATE = "create"
+ UPDATE = "update"
+ DELETE = "delete"
+
+
+@dataclass
+class K8sBaseEvent (BaseEvent):
+ description: str = ""
+ operation: K8sOperationType = None # because this dataclass needs to have defaults :(
+ obj: HikaruDocumentBase = None # marked as optional because this dataclass needs to have defaults :(
+ old_obj: HikaruDocumentBase = None # same above
diff --git a/src/robusta/integrations/kubernetes/base_triggers.py b/src/robusta/integrations/kubernetes/base_triggers.py
new file mode 100644
index 000000000..0e51d8d02
--- /dev/null
+++ b/src/robusta/integrations/kubernetes/base_triggers.py
@@ -0,0 +1,166 @@
+import logging
+from functools import wraps
+
+import hikaru
+from ...core.model.cloud_event import *
+from ...core.model.playbook_hash import playbook_hash
+from ...core.model.runner_config import *
+from ...core.active_playbooks import register_playbook, activate_playbook
+from pydantic import BaseModel
+
+from ...core.model.events import EventType
+from ...core.triggers import TriggerParams, CloudEvent
+from .autogenerated.events import KIND_TO_EVENT_CLASS, KubernetesAnyEvent
+from .autogenerated.models import get_api_version
+from .base_event import K8sBaseEvent, K8sOperationType
+
+
+class IncomingK8sEventPayload(BaseModel):
+ """
+ The format of incoming CloudEvent payloads containing kubernetes events. This is mostly used for deserialization.
+ """
+ operation: str
+ kind: str
+ apiVersion: str = ""
+ clusterUid: str
+ description: str
+ obj: Dict[Any, Any]
+ oldObj: Optional[Dict[Any, Any]]
+
+
+def parse_incoming_kubernetes_event(k8s_payload: IncomingK8sEventPayload) -> Optional[K8sBaseEvent]:
+ event_class = KIND_TO_EVENT_CLASS.get(k8s_payload.kind)
+ model_class = get_api_version(k8s_payload.apiVersion).get(k8s_payload.kind)
+ if event_class is None or model_class is None:
+ logging.error(
+ f'classes for kind {k8s_payload.kind} cannot be found. skipping. description {k8s_payload.description}')
+ return None
+
+ obj = hikaru.from_dict(k8s_payload.obj, cls=model_class)
+ old_obj = None
+ if k8s_payload.oldObj is not None:
+ old_obj = hikaru.from_dict(k8s_payload.oldObj, cls=model_class)
+
+ operation_type = K8sOperationType(k8s_payload.operation)
+ return event_class(operation=operation_type, description=k8s_payload.description.replace("\n", ""), obj=obj, old_obj=old_obj)
+
+
+def parse_incoming_kubernetes_any_event(k8s_payload: IncomingK8sEventPayload) -> Optional[K8sBaseEvent]:
+ model_class = get_api_version(k8s_payload.apiVersion).get(k8s_payload.kind)
+ if model_class is None:
+ logging.error(
+ f'classes for kind {k8s_payload.kind} cannot be found. skipping. description {k8s_payload.description}')
+ return None
+
+ obj = hikaru.from_dict(k8s_payload.obj, cls=model_class)
+ old_obj = None
+ if k8s_payload.oldObj is not None:
+ old_obj = hikaru.from_dict(k8s_payload.oldObj, cls=model_class)
+
+ operation_type = K8sOperationType(k8s_payload.operation)
+ return KubernetesAnyEvent(operation=operation_type, description=k8s_payload.description.replace("\n", ""), obj=obj, old_obj=old_obj)
+
+def register_k8s_playbook(func, kind, operation: Optional[K8sOperationType], name_prefix="", namespace_prefix=""):
+ register_playbook(func, deploy_on_k8s_topology, TriggerParams(
+ name_prefix=name_prefix,
+ namespace_prefix=namespace_prefix,
+ kind=kind,
+ operation=operation))
+ return func
+
+
+def register_k8s_any_playbook(func, operation: Optional[K8sOperationType], name_prefix="", namespace_prefix=""):
+ register_playbook(func, deploy_on_any_k8s_topology, TriggerParams(
+ name_prefix=name_prefix,
+ namespace_prefix=namespace_prefix,
+ operation=operation))
+ return func
+
+
+def prefix_match(prefix, field_value) -> bool:
+ if prefix == "":
+ return True
+ if field_value is None: # we have a prefix requirement, but field doesn't exist. no match
+ return False
+ return field_value.startswith(prefix)
+
+
+def deploy_on_k8s_topology(func, trigger_params: TriggerParams, action_params=None):
+ @wraps(func)
+ def wrapper(cloud_event: CloudEvent):
+ logging.debug(f'checking if we should run {func} on k8s event')
+ k8s_payload = IncomingK8sEventPayload(**cloud_event.data)
+
+ if not k8s_topology_event_matched(k8s_payload, trigger_params):
+ return "SKIP"
+
+ concrete_event = parse_incoming_kubernetes_event(k8s_payload)
+ if concrete_event is None:
+ return "SKIP"
+
+ logging.info(f"running kubernetes playbook {func.__name__}; action_params={action_params}; event={concrete_event.description}")
+ if action_params is None:
+ result = func(concrete_event)
+ else:
+ result = func(concrete_event, action_params)
+
+ if result is not None:
+ return result
+ return "OK"
+
+ playbook_id = playbook_hash(func, trigger_params, action_params)
+ activate_playbook(EventType.KUBERNETES_TOPOLOGY_CHANGE, wrapper, func, playbook_id)
+ return wrapper
+
+
+def deploy_on_any_k8s_topology(func, trigger_params: TriggerParams, action_params=None):
+ @wraps(func)
+ def wrapper(cloud_event: CloudEvent):
+ logging.debug(f'checking if we should run {func} on k8s event')
+ k8s_payload = IncomingK8sEventPayload(**cloud_event.data)
+
+ if not k8s_topology_event_matched(k8s_payload, trigger_params):
+ return "SKIP"
+
+ concrete_event = parse_incoming_kubernetes_any_event(k8s_payload)
+ if concrete_event is None:
+ return "SKIP"
+
+ logging.info(f"running kubernetes playbook {func.__name__}; action_params={action_params}; event={concrete_event.description}")
+ if action_params is None:
+ result = func(concrete_event)
+ else:
+ result = func(concrete_event, action_params)
+
+ if result is not None:
+ return result
+ return "OK"
+
+ playbook_id = playbook_hash(func, trigger_params, action_params)
+ activate_playbook(EventType.KUBERNETES_TOPOLOGY_CHANGE, wrapper, func, playbook_id)
+ return wrapper
+
+
+def k8s_topology_event_matched(event: IncomingK8sEventPayload, trigger_params: TriggerParams):
+ if trigger_params.operation is not None and trigger_params.operation.value != event.operation:
+ logging.debug(f"operation {event.operation} != {trigger_params.operation}")
+ return False
+
+ if trigger_params.kind is not None and trigger_params.kind != event.kind:
+ logging.debug(f"kind {event.kind} != {trigger_params.kind}")
+ return False
+
+ metadata = event.obj.get('metadata', {})
+ if trigger_params.name_prefix != "":
+ obj_name = metadata.get('name', '')
+ if not obj_name.startswith(trigger_params.name_prefix):
+ logging.debug("name doesn't match")
+ return False
+
+ if trigger_params.namespace_prefix != "":
+ obj_namespace = metadata.get('namespace', '')
+ if not obj_namespace.startswith(trigger_params.namespace_prefix):
+ logging.debug("namespace doesn't match")
+ return False
+
+ return True
diff --git a/src/robusta/integrations/kubernetes/custom_models.py b/src/robusta/integrations/kubernetes/custom_models.py
new file mode 100644
index 000000000..b63bdfcfa
--- /dev/null
+++ b/src/robusta/integrations/kubernetes/custom_models.py
@@ -0,0 +1,176 @@
+import time
+from typing import Type, TypeVar, List, Dict
+
+import hikaru
+import json
+import yaml
+from hikaru.model import *
+from pydantic import BaseModel
+
+from .api_client_utils import *
+from .templates import get_deployment_yaml
+
+S = TypeVar("S")
+T = TypeVar("T")
+PYTHON_DEBUGGER_IMAGE = "us-central1-docker.pkg.dev/arabica-300319/devel/python-tools:latest"
+
+
+# TODO: import these from the lookup_pid project
+class Process(BaseModel):
+ pid: int
+ exe: str
+ cmdline: List[str]
+
+
+class ProcessList(BaseModel):
+ processes: List[Process]
+
+
+def get_images(containers: List[Container]) -> Dict[str, str]:
+ """
+ Takes a list of containers and returns a dict mapping image name to image tag.
+ """
+ name_to_version = {}
+ for container in containers:
+ if ":" in container.image:
+ image_name, image_tag = container.image.split(":", maxsplit=1)
+ name_to_version[image_name] = image_tag
+ else:
+ name_to_version[container.image] = ""
+ return name_to_version
+
+
+class RobustaPod(Pod):
+
+ def exec(self, shell_command: str) -> str:
+ """Execute a command inside the pod"""
+ return exec_shell_command(self.metadata.name, shell_command, self.metadata.namespace)
+
+ def get_logs(self, container_name=None, previous=None, tail_lines=None) -> str:
+ """
+ Fetch pod logs
+ """
+ if container_name is None:
+ container_name = self.spec.containers[0].name
+ return get_pod_logs(self.metadata.name, self.metadata.namespace, container_name, previous, tail_lines)
+
+ def create_debugger_pod(self, debug_image=PYTHON_DEBUGGER_IMAGE, debug_cmd=None) -> 'RobustaPod':
+ """
+ Creates a debugging pod with high privileges
+ """
+ debugger = RobustaPod(apiVersion="v1", kind="Pod",
+ metadata=ObjectMeta(name=to_kubernetes_name(self.metadata.name, "debug-"),
+ namespace="robusta"),
+ spec=PodSpec(hostPID=True,
+ nodeName=self.spec.nodeName,
+ containers=[Container(name="debugger",
+ image=debug_image,
+ imagePullPolicy="Always",
+ command=prepare_pod_command(debug_cmd),
+ securityContext=SecurityContext(
+ capabilities=Capabilities(
+ add=["SYS_PTRACE", "SYS_ADMIN"]
+ ),
+ privileged=True
+ ))]))
+ # TODO: check the result code
+ debugger = debugger.createNamespacedPod(debugger.metadata.namespace).obj
+ return debugger
+
+ def exec_in_debugger_pod(self, cmd, debug_image=PYTHON_DEBUGGER_IMAGE) -> str:
+ debugger = self.create_debugger_pod(debug_image)
+ try:
+ return debugger.exec(cmd)
+ finally:
+ RobustaPod.deleteNamespacedPod(debugger.metadata.name, debugger.metadata.namespace)
+
+ def get_processes(self) -> List[Process]:
+ output = self.exec_in_debugger_pod(f"/lookup_pid.py {self.metadata.uid}")
+ # somehow when doing the exec command the quotes in the json output are converted from " to '
+ # we fix this so that we can deserialize the json properly...
+ # we should eventually figure out why this is happening
+ output = output.replace("'", '"')
+ processes = ProcessList(**json.loads(output))
+ return processes.processes
+
+ def get_images(self) -> Dict[str, str]:
+ return get_images(self.spec.containers)
+
+ @staticmethod
+ def find_pod(name_prefix, namespace) -> 'RobustaPod':
+ pods: PodList = PodList.listNamespacedPod(namespace).obj
+ for pod in pods.items:
+ if pod.metadata.name.startswith(name_prefix):
+ # we serialize and then deserialize to work around https://github.com/haxsaw/hikaru/issues/15
+ return hikaru.from_dict(pod.to_dict(), cls=RobustaPod)
+ raise Exception(f"No pod exists in namespace '{namespace}' with name prefix '{name_prefix}'")
+
+ @staticmethod
+ def read(name: str, namespace: str) -> 'RobustaPod':
+ """Read pod definition from the API server"""
+ return Pod.readNamespacedPod(name, namespace).obj
+
+
+class RobustaDeployment(Deployment):
+
+ @classmethod
+ def from_image(cls: Type[T], name, image="busybox", cmd=None) -> T:
+ obj: RobustaDeployment = hikaru.from_dict(yaml.safe_load(get_deployment_yaml(name, image)), RobustaDeployment)
+ obj.spec.template.spec.containers[0].command = prepare_pod_command(cmd)
+ return obj
+
+ def get_images(self) -> Dict[str, str]:
+ return get_images(self.spec.template.spec.containers)
+
+
+class RobustaJob(Job):
+
+ def get_pods(self) -> List[RobustaPod]:
+ """
+ gets the pods associated with a job
+ """
+ pods: PodList = PodList.listNamespacedPod(self.metadata.namespace,
+ label_selector=f"job-name = {self.metadata.name}").obj
+ # we serialize and then deserialize to work around https://github.com/haxsaw/hikaru/issues/15
+ return [hikaru.from_dict(pod.to_dict(), cls=RobustaPod) for pod in pods.items]
+
+ def get_single_pod(self) -> RobustaPod:
+ """
+ like get_pods() but verifies that only one pod is associated with the job and returns that pod
+ """
+ pods = self.get_pods()
+ if len(pods) != 1:
+ raise Exception(f"got more pods than expected for job: {pods}")
+ return pods[0]
+
+ @classmethod
+ def run_simple_job_spec(cls, spec, name, timeout) -> str:
+ job = RobustaJob(metadata=ObjectMeta(namespace="robusta", name=to_kubernetes_name(name)),
+ spec=JobSpec(backoffLimit=0,
+ template=PodTemplateSpec(
+ spec=spec,
+ )))
+ try:
+ job = job.createNamespacedJob(job.metadata.namespace).obj
+ job = hikaru.from_dict(job.to_dict(), cls=RobustaJob) # temporary workaround for hikaru bug #15
+ job: RobustaJob = wait_until_job_complete(job, timeout)
+ job = hikaru.from_dict(job.to_dict(), cls=RobustaJob) # temporary workaround for hikaru bug #15
+ pod = job.get_single_pod()
+ return pod.get_logs()
+ finally:
+ job.deleteNamespacedJob(job.metadata.name, job.metadata.namespace, propagation_policy="Foreground")
+
+ @classmethod
+ def run_simple_job(cls, image, command, timeout) -> str:
+ spec = PodSpec(
+ containers=[Container(name=to_kubernetes_name(image),
+ image=image,
+ command=prepare_pod_command(command))],
+ restartPolicy="Never"
+ )
+ return cls.run_simple_job_spec(spec, name=image, timeout=timeout)
+
+
+hikaru.register_version_kind_class(RobustaPod, Pod.apiVersion, Pod.kind)
+hikaru.register_version_kind_class(RobustaDeployment, Deployment.apiVersion, Deployment.kind)
+hikaru.register_version_kind_class(RobustaJob, Job.apiVersion, Job.kind)
diff --git a/src/robusta/integrations/kubernetes/templates.py b/src/robusta/integrations/kubernetes/templates.py
new file mode 100644
index 000000000..461aa7741
--- /dev/null
+++ b/src/robusta/integrations/kubernetes/templates.py
@@ -0,0 +1,26 @@
+import textwrap
+
+def get_deployment_yaml(name, image="busybox"):
+ return textwrap.dedent(f"""\
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: {name}
+ namespace: robusta
+ labels:
+ app: {name}
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {name}
+ template:
+ metadata:
+ labels:
+ app: {name}
+ spec:
+ containers:
+ - name: runner
+ image: {image}
+ imagePullPolicy: Always
+ """)
diff --git a/src/robusta/integrations/manual/__init__.py b/src/robusta/integrations/manual/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/robusta/integrations/manual/incoming_handler.py b/src/robusta/integrations/manual/incoming_handler.py
new file mode 100644
index 000000000..1cc4f7d7b
--- /dev/null
+++ b/src/robusta/integrations/manual/incoming_handler.py
@@ -0,0 +1,25 @@
+import uuid
+from datetime import datetime
+
+import flask
+from ...core.model.cloud_event import CloudEvent
+from ...core.model.events import EventType
+
+
+def parse_incoming_manual_trigger(request: flask.Request) -> CloudEvent:
+ trigger_name = request.form.get("trigger_name")
+ if trigger_name is None:
+ raise Exception(f"manual trigger is missing trigger_name. request={request}")
+
+ event_data = request.form.to_dict()
+ event_data['description'] = f'manual trigger for playbook {trigger_name}'
+
+ return CloudEvent(specversion='1.0',
+ type=EventType.MANUAL_TRIGGER.name,
+ source=EventType.MANUAL_TRIGGER.name,
+ subject=trigger_name,
+ id=str(uuid.uuid4()),
+ time=datetime.now(),
+ datacontenttype='application/json',
+ data=event_data,
+ )
diff --git a/src/robusta/integrations/manual/triggers.py b/src/robusta/integrations/manual/triggers.py
new file mode 100644
index 000000000..8f4739f71
--- /dev/null
+++ b/src/robusta/integrations/manual/triggers.py
@@ -0,0 +1,47 @@
+import logging
+from functools import wraps
+from dataclasses import dataclass, field
+import pydantic
+
+from ...core.active_playbooks import activate_playbook, register_playbook
+from ...core.model.playbook_hash import playbook_hash
+from ...utils.decorators import doublewrap
+from ...core.model.trigger_params import TriggerParams
+from ...core.model.cloud_event import CloudEvent
+from ...core.model.events import EventType, BaseEvent
+
+
+@dataclass
+class ManualTriggerEvent (BaseEvent):
+ trigger_name: str = ""
+ data: dict = field(default_factory=dict)
+
+
+@doublewrap
+def on_manual_trigger(func):
+ return register_playbook(func, deploy_manual_trigger, TriggerParams(trigger_name=func.__name__))
+
+
+def deploy_manual_trigger(func, trigger_params: TriggerParams, action_params=None):
+ @wraps(func)
+ def wrapper(cloud_event: CloudEvent):
+ trigger_event = ManualTriggerEvent(trigger_name=cloud_event.subject, data=cloud_event.data)
+ logging.debug(
+ f'checking if we should run manually triggered playbook {func}. trigger_name in request is {trigger_event.trigger_name} and playbook trigger_name is {trigger_params.trigger_name}')
+
+ if trigger_event.trigger_name != trigger_params.trigger_name:
+ logging.debug("not running")
+ return
+
+ logging.info(f"running manual playbook {func.__name__}; action_params={action_params}")
+ try:
+ if action_params is None:
+ func(trigger_event)
+ else:
+ func(trigger_event, action_params)
+ except pydantic.error_wrappers.ValidationError as e:
+ logging.error(f"{e}")
+
+ playbook_id = playbook_hash(func, trigger_params, action_params)
+ activate_playbook(EventType.MANUAL_TRIGGER, wrapper, func, playbook_id)
+ return wrapper
diff --git a/src/robusta/integrations/prometheus/__init__.py b/src/robusta/integrations/prometheus/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/robusta/integrations/prometheus/incoming_handler.py b/src/robusta/integrations/prometheus/incoming_handler.py
new file mode 100644
index 000000000..9d2cfc641
--- /dev/null
+++ b/src/robusta/integrations/prometheus/incoming_handler.py
@@ -0,0 +1,27 @@
+import datetime
+import uuid
+
+import flask
+
+from ...core.model.cloud_event import CloudEvent
+from ...core.model.events import EventType
+
+
+def parse_incoming_prometheus_alerts(request: flask.Request) -> CloudEvent:
+ event_data = request.get_json()
+ event_data['description'] = 'prometheus alerts group'
+ return prometheus_cloud_event(event_data)
+
+
+def prometheus_cloud_event(event_data):
+ cloud_event = CloudEvent(**{
+ 'specversion': '1.0',
+ 'type': EventType.PROMETHEUS.name,
+ 'source': EventType.PROMETHEUS.name,
+ 'subject': '',
+ 'id': str(uuid.uuid4()),
+ 'datacontenttype': 'application/json',
+ 'time': datetime.datetime.now(),
+ 'data': event_data
+ })
+ return cloud_event
diff --git a/src/robusta/integrations/prometheus/models.py b/src/robusta/integrations/prometheus/models.py
new file mode 100644
index 000000000..ae22c7239
--- /dev/null
+++ b/src/robusta/integrations/prometheus/models.py
@@ -0,0 +1,38 @@
+from dataclasses import dataclass
+from datetime import datetime
+from typing import List, Optional, Dict, Any, Union
+from pydantic import BaseModel
+
+from ...core.model.events import BaseEvent
+from ..kubernetes.custom_models import RobustaPod, Node, RobustaDeployment
+
+
+# for parsing incoming data
+class PrometheusAlert(BaseModel):
+ endsAt: datetime
+ generatorURL: str
+ startsAt: datetime
+ fingerprint: Optional[str] = ""
+ status: str
+ labels: Dict[Any, Any]
+ annotations: Dict[Any, Any]
+
+
+# for parsing incoming data
+class PrometheusEvent(BaseModel):
+ alerts: List[PrometheusAlert] = []
+ description: str
+ externalURL: str
+ groupKey: str
+ version: str
+ commonAnnotations: Optional[Dict[Any, Any]] = None
+ commonLabels: Optional[Dict[Any, Any]] = None
+ groupLabels: Optional[Dict[Any, Any]] = None
+ receiver: str
+ status: str
+
+
+@dataclass
+class PrometheusKubernetesAlert (BaseEvent):
+ alert: PrometheusAlert = None
+ obj: Union[RobustaPod, Node, RobustaDeployment, None] = None
diff --git a/src/robusta/integrations/prometheus/triggers.py b/src/robusta/integrations/prometheus/triggers.py
new file mode 100644
index 000000000..89faa7f4c
--- /dev/null
+++ b/src/robusta/integrations/prometheus/triggers.py
@@ -0,0 +1,107 @@
+import logging
+from functools import wraps
+from hikaru.model.rel_1_16 import *
+
+from .models import PrometheusEvent, PrometheusKubernetesAlert
+from ..kubernetes.custom_models import RobustaPod, Node, traceback, RobustaDeployment
+from ...core.model.playbook_hash import playbook_hash
+from ...integrations.kubernetes.base_triggers import prefix_match
+from ...core.active_playbooks import register_playbook, activate_playbook
+from ...core.model.trigger_params import TriggerParams
+from ...core.model.cloud_event import CloudEvent
+from ...core.model.events import EventType
+from ...utils.decorators import doublewrap
+
+
+def find_node_by_ip(ip) -> Node:
+ nodes: NodeList = NodeList.listNode().obj
+ for node in nodes.items:
+ addresses = [a.address for a in node.status.addresses]
+ logging.info(f"node {node.metadata.name} has addresses {addresses}")
+ if ip in addresses:
+ return node
+ raise Exception(f"No node exists with IP '{ip}'")
+
+
+@doublewrap
+def on_pod_prometheus_alert(func, alert_name="", pod_name_prefix="", namespace_prefix="", instance_name_prefix="",
+ status=""):
+ register_playbook(func, deploy_on_pod_prometheus_alert, TriggerParams(
+ alert_name=alert_name,
+ pod_name_prefix=pod_name_prefix,
+ namespace_prefix=namespace_prefix,
+ instance_name_prefix=instance_name_prefix,
+ status=status))
+ return func
+
+
+def deploy_on_pod_prometheus_alert(func, trigger_params: TriggerParams, action_params=None):
+ @wraps(func)
+ def wrapper(cloud_event: CloudEvent):
+ logging.debug(f'checking if we should run {func} on prometheus event {trigger_params.alert_name}')
+ prometheus_event = PrometheusEvent(**cloud_event.data)
+ results = []
+ for alert in prometheus_event.alerts:
+ try:
+ alert_name = alert.labels['alertname']
+ if trigger_params.alert_name and alert_name != trigger_params.alert_name:
+ continue
+ if trigger_params.status != "" and trigger_params.status != alert.status:
+ continue
+ if not prefix_match(trigger_params.pod_name_prefix, alert.labels.get('pod')):
+ continue
+ if not prefix_match(trigger_params.namespace_prefix, alert.labels.get('namespace')):
+ continue
+ if not prefix_match(trigger_params.instance_name_prefix, alert.labels.get('instance')):
+ continue
+
+ kubernetes_obj = None
+ pod_name = alert.labels.get('pod', None)
+ node_name = alert.labels.get('instance', None)
+ deployment_name = alert.labels.get('deployment', None)
+ try:
+ if pod_name is not None: # pod alert
+ pod_namespace = alert.labels.get('namespace', 'default')
+ kubernetes_obj = RobustaPod.read(pod_name, pod_namespace)
+ if kubernetes_obj is None:
+ logging.info(f'pod {pod_name} namespace {pod_namespace} not found. Skipping alert {alert_name}')
+ continue
+ elif deployment_name:
+ namespace = alert.labels.get('namespace', 'default')
+ kubernetes_obj = RobustaDeployment.readNamespacedDeployment(deployment_name, namespace).obj
+ if kubernetes_obj is None:
+ logging.info(f'deployment {deployment_name} namespace {namespace} not found. Skipping alert {alert_name}')
+ continue
+ elif alert.labels.get('job_name', None): # jobs alert not implemented yet
+ continue
+ elif node_name is not None: # node alert
+ # sometimes we get an IP:PORT instead of the node name. handle that case
+ if ":" in node_name:
+ kubernetes_obj = find_node_by_ip(node_name.split(":")[0])
+ else:
+ kubernetes_obj = Node.readNode(node_name).obj
+ if kubernetes_obj is None:
+ logging.info(f'node {node_name} not found. Skipping alert {alert_name}')
+ continue
+ else: # other alert, not implemented yet
+ logging.warn(f'alert {alert_name} does not contain pod/instance identifier. Not loading kubernetes object')
+ except Exception as e:
+ logging.info(f"Error loading alert kubernetes object {alert}. error: {e}")
+
+ kubernetes_alert = PrometheusKubernetesAlert(alert=alert, obj=kubernetes_obj)
+
+ logging.info(f"running prometheus playbook {func.__name__}; action_params={action_params}")
+ if action_params is None:
+ result = func(kubernetes_alert)
+ else:
+ result = func(kubernetes_alert, action_params)
+
+ if result is not None:
+ results = results.append(result)
+ except Exception:
+ logging.error(f"Failed to process alert {alert} {traceback.format_exc()}")
+ return ",".join(results)
+
+ playbook_id = playbook_hash(func, trigger_params, action_params)
+ activate_playbook(EventType.PROMETHEUS, wrapper, func, playbook_id)
+ return wrapper
diff --git a/src/robusta/integrations/prometheus/utils.py b/src/robusta/integrations/prometheus/utils.py
new file mode 100644
index 000000000..a8ca5cb63
--- /dev/null
+++ b/src/robusta/integrations/prometheus/utils.py
@@ -0,0 +1,8 @@
+from ...utils.service_discovery import find_service_url
+
+
+def find_prometheus_url():
+ """
+ Try to autodiscover the url of an in-cluster grafana service
+ """
+ return find_service_url("app=kube-prometheus-stack-prometheus")
diff --git a/src/robusta/integrations/scheduled/__init__.py b/src/robusta/integrations/scheduled/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/robusta/integrations/scheduled/models.py b/src/robusta/integrations/scheduled/models.py
new file mode 100644
index 000000000..f45fb7bdb
--- /dev/null
+++ b/src/robusta/integrations/scheduled/models.py
@@ -0,0 +1,7 @@
+from pydantic import BaseModel
+
+
+class SchedulerEvent(BaseModel):
+ playbook_id: str
+ recurrence: int
+ description: str
diff --git a/src/robusta/integrations/scheduled/triggers.py b/src/robusta/integrations/scheduled/triggers.py
new file mode 100644
index 000000000..bc852aa18
--- /dev/null
+++ b/src/robusta/integrations/scheduled/triggers.py
@@ -0,0 +1,48 @@
+import logging
+from functools import wraps
+from dataclasses import dataclass
+
+from ...core.model.cloud_event import *
+from ...core.model.events import *
+from ...core.model.trigger_params import TriggerParams
+from ...core.model.playbook_hash import playbook_hash
+from ...core.schedule.scheduler import schedule_trigger
+from ...integrations.scheduled.models import SchedulerEvent
+from ...utils.decorators import doublewrap
+from ...core.active_playbooks import register_playbook, activate_playbook
+
+
+@dataclass
+class RecurringTriggerEvent (BaseEvent):
+ recurrence: int = 0
+
+
+@doublewrap
+def on_recurring_trigger(func, repeat=1, seconds_delay=None):
+ register_playbook(func, deploy_on_scheduler_event, TriggerParams(repeat=repeat, seconds_delay=seconds_delay))
+ return func
+
+
+def deploy_on_scheduler_event(func, trigger_params: TriggerParams, action_params=None):
+ playbook_id = playbook_hash(func, trigger_params, action_params)
+ @wraps(func)
+ def wrapper(cloud_event: CloudEvent):
+
+ logging.debug(f'checking if we should run {func} on scheduler event {playbook_id}')
+ scheduler_event = SchedulerEvent(**cloud_event.data)
+
+ if scheduler_event.playbook_id == playbook_id:
+ trigger_event = RecurringTriggerEvent(recurrence=scheduler_event.recurrence)
+ logging.info(f"running scheduled playbook {func.__name__}; action_params={action_params}")
+ if action_params is None:
+ result = func(trigger_event)
+ else:
+ result = func(trigger_event, action_params)
+
+ if result is not None:
+ return result
+ return "OK"
+
+ activate_playbook(EventType.SCHEDULED_TRIGGER, wrapper, func, playbook_id)
+ schedule_trigger(playbook_id, trigger_params)
+ return wrapper
diff --git a/src/robusta/integrations/slack/__init__.py b/src/robusta/integrations/slack/__init__.py
new file mode 100644
index 000000000..8738108cc
--- /dev/null
+++ b/src/robusta/integrations/slack/__init__.py
@@ -0,0 +1,3 @@
+from .sender import *
+from .sender import slack_app
+from .receiver import start_slack_receiver
diff --git a/src/robusta/integrations/slack/receiver.py b/src/robusta/integrations/slack/receiver.py
new file mode 100644
index 000000000..6aa9a2c60
--- /dev/null
+++ b/src/robusta/integrations/slack/receiver.py
@@ -0,0 +1,75 @@
+import uuid
+import websocket
+import json
+import os
+import logging
+import time
+from threading import Thread
+
+from ...core.reporting.callbacks import *
+
+SLACK_WEBSOCKET_RELAY_ADDRESS = os.environ.get('SLACK_WEBSOCKET_RELAY_ADDRESS', "")
+SLACK_RECEIVER_ENABLED = os.environ.get('SLACK_RECEIVER_ENABLED', "True")
+SLACK_ENABLE_WEBSOCKET_TRACING = os.environ.get('SLACK_ENABLE_WEBSOCKET_TRACING', False)
+SLACK_WEBSOCKET_RECONNECT_DELAY_SEC = os.environ.get('SLACK_WEBSOCKET_RECONNECT_DELAY_SEC', 3)
+TARGET_ID = str(uuid.uuid4())
+
+
+def run_report_callback(action, body):
+ callback_request = PlaybookCallbackRequest.parse_raw(action['value'])
+ func = callback_registry.lookup_callback(callback_request)
+ event = ReportCallbackEvent(source_channel_id=body['channel']['id'],
+ source_channel_name=body['channel']['name'],
+ source_user_id=body['user']['id'],
+ source_message=body['message']['text'],
+ source_context=callback_request.context)
+ logging.info(f"got callback `{func}`")
+ if func is None:
+ logging.error(f"no callback found for action_id={action['action_id']} with value={action['value']}")
+ return
+ func(event)
+
+
+def start_slack_receiver():
+ if SLACK_RECEIVER_ENABLED != "True":
+ logging.info("Slack outgoing messages only mode. Slack receiver not initialized")
+ return
+
+ if SLACK_WEBSOCKET_RELAY_ADDRESS == "":
+ logging.warn("Slack relay adress empty. Not initializing slack relay")
+ return
+
+ websocket.enableTrace(SLACK_ENABLE_WEBSOCKET_TRACING)
+ receiver_thread = Thread(target=run_forever)
+ receiver_thread.start()
+
+
+def run_forever():
+ logging.info('starting slack relay receiver')
+ while True:
+ ws = websocket.WebSocketApp(SLACK_WEBSOCKET_RELAY_ADDRESS,
+ on_open=on_open,
+ on_message=on_message,
+ on_error=on_error)
+ ws.run_forever()
+ logging.info('slack relay websocket closed')
+ time.sleep(SLACK_WEBSOCKET_RECONNECT_DELAY_SEC)
+
+
+def on_message(ws, message):
+ # TODO: use typed pydantic classes here?
+ logging.debug(f'received slack message {message}')
+ slack_event = json.loads(message)
+ actions = slack_event['actions']
+ for action in actions:
+ run_report_callback(action, slack_event)
+
+
+def on_error(ws, error):
+ logging.info(f'slack relay websocket error: {error}')
+
+def on_open(ws):
+ logging.info(f'connecting to server as {TARGET_ID}')
+ ws.send(json.dumps({'action': 'auth', 'key': 'dummy key', 'target_id': TARGET_ID}))
+
+
diff --git a/src/robusta/integrations/slack/sender.py b/src/robusta/integrations/slack/sender.py
new file mode 100644
index 000000000..5a093a3de
--- /dev/null
+++ b/src/robusta/integrations/slack/sender.py
@@ -0,0 +1,152 @@
+import json
+import logging
+import os
+import tempfile
+
+import slack_bolt
+
+from ...core.model.events import *
+from ...core.reporting.blocks import *
+from ...core.reporting.utils import add_pngs_for_all_svgs
+from ...core.reporting.callbacks import PlaybookCallbackRequest, callback_registry
+from .receiver import TARGET_ID
+
+SLACK_TOKEN = os.getenv("SLACK_TOKEN")
+ACTION_TRIGGER_PLAYBOOK = "trigger_playbook"
+
+# TODO: we need to make this modular so you can plug n' play different report receivers (slack, msteams, etc)
+# a first step in that direction would be to move all the functions here to a class like SlackReceiver
+# which inherits from an abstract base class ReportReceiver
+try:
+ slack_app = slack_bolt.App(token=SLACK_TOKEN)
+except Exception as e:
+ # we still create a slack_app so that stuff like @slack_app.action wont throw exceptions
+ logging.exception(f"error setting up slack API. cannot send messages. exception={e}")
+ slack_app = slack_bolt.App(token="dummy_token", signing_secret="dummy_signing_secret",
+ token_verification_enabled=False)
+
+
+def get_action_block_for_choices(choices: Dict[str, Callable] = None, context=""):
+ if choices is None:
+ return []
+
+ buttons = []
+ for (i, (text, callback)) in enumerate(choices.items()):
+ if callback is None:
+ raise Exception(
+ f"The callback for choice {text} is None. Did you accidentally pass `foo()` as a callback and not `foo`?")
+ if not callback_registry.is_callback_in_registry(callback):
+ raise Exception(f"{callback} is not a function that was decorated with @slack_callback or it somehow has the"
+ "wrong version (e.g. multiple functions with the same name were decorated with @slack_callback)")
+ buttons.append({
+ "type": "button",
+ "text": {
+ "type": "plain_text",
+ "text": text,
+ },
+ "style": "primary",
+ "action_id": f"{ACTION_TRIGGER_PLAYBOOK}_{i}",
+ "value": PlaybookCallbackRequest.create_for_func(callback, context).json(),
+ })
+
+ return [{
+ "type": "actions",
+ "elements": buttons
+ }]
+
+
+SlackBlock = Dict[str, Any]
+def to_slack(block: BaseBlock) -> List[SlackBlock]:
+ if isinstance(block, MarkdownBlock):
+ return [{
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": block.text
+ }
+ }]
+ elif isinstance(block, DividerBlock):
+ return [{"type": "divider"}]
+ elif isinstance(block, FileBlock):
+ raise AssertionError("to_slack() should never be called on a FileBlock")
+ elif isinstance(block, HeaderBlock):
+ return [{
+ "type": "header",
+ "text": {
+ "type": "plain_text",
+ "text": block.text,
+ },
+ }]
+ elif isinstance(block, ListBlock) or isinstance(block, TableBlock):
+ return to_slack(block.to_markdown())
+ elif isinstance(block, CallbackBlock):
+ context = block.context.copy()
+ context['target_id'] = TARGET_ID
+ return get_action_block_for_choices(block.choices, json.dumps(context))
+ else:
+ logging.error(f"cannot convert block of type {type(block)} to slack format")
+ return [] # no reason to crash the entire report
+
+
+def upload_file_to_slack(block: FileBlock) -> str:
+ """Upload a file to slack and return a link to it"""
+ with tempfile.NamedTemporaryFile() as f:
+ f.write(block.contents)
+ f.flush()
+ result = slack_app.client.files_upload(title=block.filename, file=f.name, filename=block.filename)
+ return result["file"]["permalink"]
+
+
+def prepare_slack_text(message: str, mentions: List[str] = [], files: List[FileBlock] = []):
+ """Adds mentions and truncates text if it is too long."""
+ max_message_length = 3000
+ truncator = "..."
+ mention_prefix = " ".join([f"<@{user_id}>" for user_id in mentions])
+ if mention_prefix != "":
+ message = f"{mention_prefix} {message}"
+ if files:
+ # it's a little annoying but it seems like files need to be referenced in `title` and not just `blocks`
+ # in order to be actually shared. well, I'm actually not sure about that, but when I tried adding the files
+ # to a separate block and not including them in `title` or the first block then the link was present but
+ # the file wasn't actually shared and the link was broken
+ uploaded_files = []
+ for file_block in files:
+ permalink = upload_file_to_slack(file_block)
+ uploaded_files.append(f"* <{permalink} | {file_block.filename}>")
+
+ file_references = "\n".join(uploaded_files)
+ message = f"{message}\n{file_references}"
+
+ if len(message) <= max_message_length:
+ return message
+
+ return message[:max_message_length - len(truncator)] + truncator
+
+
+def send_to_slack(event: BaseEvent):
+ file_blocks = add_pngs_for_all_svgs([b for b in event.report_blocks if isinstance(b, FileBlock)])
+ other_blocks = [b for b in event.report_blocks if not isinstance(b, FileBlock)]
+
+ message = prepare_slack_text(event.report_title, event.slack_mentions, file_blocks)
+
+ output_blocks = []
+ if not event.report_title_hidden:
+ output_blocks.extend(to_slack(HeaderBlock(event.report_title)))
+ for block in other_blocks:
+ output_blocks.extend(to_slack(block))
+ attachment_blocks = []
+ for block in event.report_attachment_blocks:
+ attachment_blocks.extend(to_slack(block))
+
+ logging.debug(f"--sending to slack--\n"
+ f"title:{event.report_title}\n"
+ f"blocks: {output_blocks}\n"
+ f"attachment_blocks: {event.report_attachment_blocks}\n"
+ f"message:{message}")
+
+ if attachment_blocks:
+ slack_app.client.chat_postMessage(channel=event.slack_channel, text=message, blocks=output_blocks,
+ display_as_bot=True, attachments=[{"blocks": attachment_blocks}])
+ else:
+ slack_app.client.chat_postMessage(channel=event.slack_channel, text=message, blocks=output_blocks,
+ display_as_bot=True)
diff --git a/src/robusta/runner/__init__.py b/src/robusta/runner/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/robusta/runner/config_handler.py b/src/robusta/runner/config_handler.py
new file mode 100644
index 000000000..e41d1215b
--- /dev/null
+++ b/src/robusta/runner/config_handler.py
@@ -0,0 +1,88 @@
+import glob
+import importlib.util
+import logging
+import os
+import subprocess
+import sys
+
+import yaml
+
+from ..core.active_playbooks import clear_playbook_inventory, get_playbook_inventory
+from ..core.triggers import deploy_playbook_config, RunnerConfig
+from ..utils.directory_watcher import DirWatcher
+
+
+class ConfigHandler:
+
+ def __init__(self):
+ custom_playbooks_root = os.environ.get('CUSTOM_PLAYBOOKS_ROOT')
+ # we add this to sys.path so that playbooks can import from one another with:
+ # from other_playbook import shared_function
+ sys.path.append(custom_playbooks_root)
+ reload_runner_configuration(custom_playbooks_root)
+ self.watcher = DirWatcher(custom_playbooks_root, reload_runner_configuration)
+
+ def close(self):
+ self.watcher.stop_watcher()
+
+
+def reload_runner_configuration(custom_playbooks_root):
+ try:
+ logging.info(f'reloading custom playbooks from {custom_playbooks_root}')
+ if not os.path.exists(custom_playbooks_root):
+ logging.error(f'playbooks configuration not found {custom_playbooks_root}')
+ return
+ reload_scripts(custom_playbooks_root)
+ reload_deploy_config(custom_playbooks_root)
+ except Exception as e:
+ logging.exception(f"unknown error reloading playbooks. will try again when they next change. exception={e}")
+
+
+def reload_scripts(path):
+ install_requirements(os.path.join(path, 'requirements.txt'))
+
+ python_files = glob.glob(f'{path}/*.py')
+ clear_playbook_inventory()
+
+ if len(python_files) == 0:
+ logging.warning('no playbook scripts to load')
+ return
+
+ for script in python_files:
+ try:
+ logging.info(f'loading playbooks from file {script}')
+ filename = os.path.basename(script)
+ (module_name, ext) = os.path.splitext(filename)
+ spec = importlib.util.spec_from_file_location(module_name, script)
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+ except Exception as e:
+ logging.error(f"error loading playbooks from file {script}. exception={e}")
+
+ logging.info(f'{len(python_files)} custom playbooks loaded')
+
+
+def reload_deploy_config(path):
+ # TODO: there is a race condition here where we can lose events if they arrive while we are reloading
+ # even if the script that should handle those events was active in both versions
+ active_playbooks_file_name = os.path.join(path, 'active_playbooks.yaml')
+ if not os.path.exists(active_playbooks_file_name):
+ logging.warning(f'no playbook definition file found at {active_playbooks_file_name}. not configuring any playbooks.')
+ return
+
+ logging.info(f'loading active playbooks config {active_playbooks_file_name}')
+ with open(active_playbooks_file_name) as file:
+ yaml_content = yaml.safe_load(file)
+ runner_config = RunnerConfig(**yaml_content)
+
+ deploy_playbook_config(runner_config)
+
+
+def install_requirements(requirements_file_name):
+ if os.path.exists(requirements_file_name):
+ logging.info(f'installing custom requirements file {requirements_file_name}')
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", requirements_file_name])
+ logging.info('custom requirements installed')
+ else:
+ logging.warning(f'requirements file {requirements_file_name} not found')
+
diff --git a/src/robusta/runner/main.py b/src/robusta/runner/main.py
new file mode 100644
index 000000000..5c8534b85
--- /dev/null
+++ b/src/robusta/runner/main.py
@@ -0,0 +1,70 @@
+import logging
+import os
+import os.path
+from inspect import getmembers
+
+import colorlog
+import manhole
+from flask import Flask, request
+
+from .. import api as robusta_api
+from ..core.triggers import CloudEvent
+from ..core.active_playbooks import run_playbooks
+from ..integrations.prometheus.incoming_handler import parse_incoming_prometheus_alerts
+from ..integrations.manual.incoming_handler import parse_incoming_manual_trigger
+from ..integrations.slack.receiver import start_slack_receiver
+from .config_handler import ConfigHandler
+
+app = Flask(__name__)
+
+LOGGING_LEVEL = os.environ.get("LOG_LEVEL", "INFO")
+LOGGING_FORMAT = '%(log_color)s%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s'
+LOGGING_DATEFMT = '%Y-%m-%d %H:%M:%S'
+
+if os.environ.get("ENABLE_COLORED_LOGS", "false").lower() == "true":
+ print("setting up colored logging")
+ colorlog.basicConfig(format=LOGGING_FORMAT, level=LOGGING_LEVEL, datefmt=LOGGING_DATEFMT)
+else:
+ print("setting up regular logging")
+ logging.basicConfig(format=LOGGING_FORMAT, level=LOGGING_LEVEL, datefmt=LOGGING_DATEFMT)
+
+logging.getLogger().setLevel(LOGGING_LEVEL)
+log = logging.getLogger('werkzeug')
+log.setLevel(logging.ERROR)
+logging.info(f'logger initialized using {LOGGING_LEVEL} log level')
+
+
+def main():
+ config_handler = ConfigHandler()
+ if os.environ.get("ENABLE_MANHOLE", "false").lower() == "true":
+ manhole.install(locals=dict(getmembers(robusta_api)))
+ start_slack_receiver()
+ app.run(host="0.0.0.0", use_reloader=False)
+ config_handler.close()
+
+
+# TODO: in each of the below handlers we block until the playbook finishes running
+# this is probably wrong especially if a playbook runs for some time
+@app.route('/api/alerts', methods=['POST'])
+def handle_alert_event():
+ cloud_event = parse_incoming_prometheus_alerts(request)
+ run_playbooks(cloud_event)
+ return "OK"
+
+
+@app.route('/api/handle', methods=['POST'])
+def handle_cloud_event():
+ cloud_event = CloudEvent(**request.get_json())
+ run_playbooks(cloud_event)
+ return "OK"
+
+
+@app.route('/api/trigger', methods=['POST'])
+def handle_manual_trigger():
+ cloud_event = parse_incoming_manual_trigger(request)
+ run_playbooks(cloud_event)
+ return "OK"
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/robusta/runner/not_found_exception.py b/src/robusta/runner/not_found_exception.py
new file mode 100644
index 000000000..f2db42956
--- /dev/null
+++ b/src/robusta/runner/not_found_exception.py
@@ -0,0 +1,5 @@
+from requests import RequestException
+
+
+class NotFoundException(RequestException):
+ """The resource was not found, and the operation could not be completed"""
\ No newline at end of file
diff --git a/src/robusta/runner/object_updater.py b/src/robusta/runner/object_updater.py
new file mode 100644
index 000000000..a21bf683d
--- /dev/null
+++ b/src/robusta/runner/object_updater.py
@@ -0,0 +1,14 @@
+import regex
+from hikaru import HikaruBase
+
+
+def update_item_attr(obj : HikaruBase, attr_key : str, attr_value):
+ path_parts = regex.split('\\[|\\].|\\]|\\.', attr_key)
+ parent_item = obj.object_at_path(path_parts[0:len(path_parts)-1])
+ last_part = path_parts[len(path_parts) - 1]
+ if type(parent_item) == dict:
+ parent_item[last_part] = attr_value
+ elif type(parent_item) == list:
+ parent_item[int(last_part)] = attr_value
+ else:
+ setattr(parent_item, last_part, attr_value)
diff --git a/src/robusta/utils/__init__.py b/src/robusta/utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/robusta/utils/decorators.py b/src/robusta/utils/decorators.py
new file mode 100644
index 000000000..d8115856d
--- /dev/null
+++ b/src/robusta/utils/decorators.py
@@ -0,0 +1,21 @@
+from functools import wraps
+
+# from https://stackoverflow.com/a/14412901/495995
+def doublewrap(f):
+ """
+ a decorator decorator, allowing the decorator to be used as:
+ @decorator(with, arguments, and=kwargs)
+ or
+ @decorator
+ """
+
+ @wraps(f)
+ def new_dec(*args, **kwargs):
+ if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
+ # actual decorated function
+ return f(args[0])
+ else:
+ # decorator arguments
+ return lambda realf: f(realf, *args, **kwargs)
+
+ return new_dec
diff --git a/src/robusta/utils/directory_watcher.py b/src/robusta/utils/directory_watcher.py
new file mode 100644
index 000000000..92c594e35
--- /dev/null
+++ b/src/robusta/utils/directory_watcher.py
@@ -0,0 +1,70 @@
+import logging
+import threading
+import time
+
+from watchdog.events import FileSystemEventHandler
+from watchdog.observers import Observer
+
+WAIT_SEC = 2
+
+class FsChangeHandler(FileSystemEventHandler):
+ """reload playbooks on change."""
+
+ def __init__(self, mark_change):
+ super().__init__()
+ self.mark_change = mark_change
+
+
+ def on_moved(self, event):
+ self.mark_change()
+
+ def on_created(self, event):
+ self.mark_change()
+
+ def on_deleted(self, event):
+ self.mark_change()
+
+ def on_modified(self, event):
+ self.mark_change()
+
+
+class DirWatcher:
+
+ def __init__(self, path_to_watch, reload_configuration):
+ self.active = True
+ self.change_detected = False
+ self.path_to_watch = path_to_watch
+ self.reload_configuration = reload_configuration
+
+ self.watch_thread = threading.Thread(target=self.watch, name="config-watcher")
+ self.watch_thread.start()
+
+ logging.info(f'watching dir {path_to_watch} for custom playbooks changes')
+
+ def watch(self):
+ observer = Observer()
+ fs_change_handler = FsChangeHandler(self.mark_change)
+ observer.schedule(fs_change_handler, self.path_to_watch)
+ observer.start()
+ try:
+ while True:
+ if not self.active:
+ break
+ time.sleep(WAIT_SEC)
+
+ if self.change_detected:
+ time.sleep(WAIT_SEC) # once we detected a change, we wait a safety period to make sure all the changes under this 'bulk' are finished
+ self.change_detected = False
+ try:
+ self.reload_configuration(self.path_to_watch)
+ except Exception as e: # in case we have an error while trying to reload, we want the watch thread to stay alive
+ logging.exception("failed to reload configuration")
+ finally:
+ observer.stop()
+ observer.join()
+
+ def stop_watcher(self):
+ self.active = False
+
+ def mark_change(self):
+ self.change_detected = True
diff --git a/src/robusta/utils/function_hashes.py b/src/robusta/utils/function_hashes.py
new file mode 100644
index 000000000..cc2b9e4d0
--- /dev/null
+++ b/src/robusta/utils/function_hashes.py
@@ -0,0 +1,10 @@
+import inspect
+import hashlib
+
+# this returns a hash that will change if the function's definition changes
+# this is desirable because it lets you warn users if a playbook is called with parameters that
+# were generated by a previous version of the playbook. in that case you can ask the user if you should
+# call the playbook anyway even though the code that the callback refers to has changed
+def get_function_hash(func):
+ plaintext = str(inspect.getfullargspec(func)).encode() + func.__code__.co_code
+ return hashlib.sha256(plaintext).hexdigest()
diff --git a/src/robusta/utils/optional_params.py b/src/robusta/utils/optional_params.py
new file mode 100644
index 000000000..dc780d946
--- /dev/null
+++ b/src/robusta/utils/optional_params.py
@@ -0,0 +1,16 @@
+import inspect
+import logging
+from typing import Callable, Dict
+
+
+# TODO: could be extracted to a small library - see e.g. https://stackoverflow.com/questions/16576553/python-only-pass-arguments-if-the-variable-exists
+def call_with_optional_params(func: Callable, available_args: Dict[str, object]):
+ args_to_use = [] # must be in order
+ expected_args = inspect.getfullargspec(func)
+ for arg_name in expected_args.args + expected_args.kwonlyargs:
+ if arg_name in available_args:
+ args_to_use.append(available_args[arg_name])
+ else:
+ raise Exception(f"function requires argument that we don't recognize by name {arg_name}")
+ logging.info(f"available_args={available_args} expected_args={expected_args} args_to_use=f{args_to_use}")
+ return func(*args_to_use)
diff --git a/src/robusta/utils/rate_limiter.py b/src/robusta/utils/rate_limiter.py
new file mode 100644
index 000000000..ae9a50fcc
--- /dev/null
+++ b/src/robusta/utils/rate_limiter.py
@@ -0,0 +1,25 @@
+import threading
+from collections import defaultdict
+from datetime import datetime
+
+
+class RateLimiter:
+
+ limiter_lock = threading.Lock()
+ limiter_map = defaultdict(None)
+
+ @staticmethod
+ def mark_and_test(operation: str, id: str, period_seconds: int) -> bool:
+ with RateLimiter.limiter_lock:
+ limiter_key = operation + id
+ last_run = RateLimiter.limiter_map.get(limiter_key)
+ curr_seconds = datetime.utcnow().timestamp()
+ if last_run:
+ if curr_seconds - last_run > period_seconds:
+ RateLimiter.limiter_map[limiter_key] = curr_seconds
+ return True
+ else:
+ return False
+ else:
+ RateLimiter.limiter_map[limiter_key] = curr_seconds
+ return True
diff --git a/src/robusta/utils/service_discovery.py b/src/robusta/utils/service_discovery.py
new file mode 100644
index 000000000..03bdbc9a0
--- /dev/null
+++ b/src/robusta/utils/service_discovery.py
@@ -0,0 +1,18 @@
+import logging
+from kubernetes import client
+from kubernetes.client.models.v1_service import V1Service
+
+
+def find_service_url(label_selector):
+ """
+ Get the url of an in-cluster service with a specific label
+ """
+ # we do it this way because there is a weird issue with hikaru's ServiceList.listServiceForAllNamespaces()
+ v1 = client.CoreV1Api()
+ svc: V1Service = v1.list_service_for_all_namespaces(label_selector=label_selector).items[0]
+ name = svc.metadata.name
+ namespace = svc.metadata.namespace
+ port = svc.spec.ports[0].port
+ url = f"http://{name}.{namespace}.svc:{port}"
+ logging.debug(f"discovered service with label-selector: `{label_selector}` at url: `{url}`")
+ return url