apiVersion: v1 kind: ConfigMap metadata: name: agent-configmap namespace: {{ .Release.Namespace }} data: config.river: | discovery.kubernetes "pods" { role = "pod" namespaces { own_namespace = true names = [ {{ include "agent.namespaces" . }} ] } } discovery.relabel "rename_meta_labels" { targets = discovery.kubernetes.pods.targets rule { source_labels = ["__meta_kubernetes_namespace"] target_label = "namespace" } rule { source_labels = ["__meta_kubernetes_pod_name"] target_label = "pod" } rule { source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_label_app_kubernetes_io_name", "__meta_kubernetes_pod_label_app_kubernetes_io_component"] separator = "/" regex = "(.*)/(.*)/(.*)" replacement = "${1}/${2}-${3}" target_label = "job" } rule { target_label = "cluster" replacement = "{{- .Values.clusterName -}}" } } {{- if or .Values.local.logs.enabled .Values.cloud.logs.enabled }} // Logs remote.kubernetes.secret "logs_credentials" { namespace = "{{- .Values.metaMonitoringNamespace -}}" name = "logs" } loki.source.kubernetes "pods" { clustering { enabled = true } targets = discovery.relabel.rename_meta_labels.output forward_to = [ {{ include "agent.loki_process_targets" . }} ] } {{- if or (not (empty .Values.logs.retain)) (not (empty .Values.logs.piiRegexes)) }} loki.process "filter" { forward_to = [ {{ include "agent.loki_write_targets" . }} ] {{- if not (empty .Values.logs.retain) }} stage.match { selector = "{cluster=\"{{- .Values.clusterName -}}\", namespace=~\"{{- .Values.lokiNamespace -}}|{{- .Values.metaMonitoringNamespace -}}\", pod=~\"loki.*\"} !~ \"{{ join "|" .Values.logs.retain }}\"" action = "drop" } {{- end }} {{- if not (empty .Values.logs.piiRegexes) }} {{- range .Values.logs.piiRegexes }} stage.replace { expression = "{{ .expression }}" source = "{{ .source }}" replace = "{{ .replace }}" } {{- end }} {{- end }} } {{- end }} {{- end }} {{- if or .Values.local.metrics.enabled .Values.cloud.metrics.enabled }} // Metrics remote.kubernetes.secret "metrics_credentials" { namespace = "{{- .Values.metaMonitoringNamespace -}}" name = "metrics" } discovery.kubernetes "metric_pods" { role = "pod" namespaces { own_namespace = true names = [ {{ include "agent.namespaces" . }} ] } } discovery.relabel "only_http_metrics" { targets = discovery.kubernetes.metric_pods.targets rule { source_labels = ["__meta_kubernetes_namespace"] target_label = "namespace" } rule { source_labels = ["__meta_kubernetes_pod_name"] target_label = "pod" } rule { source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_label_app_kubernetes_io_name", "__meta_kubernetes_pod_label_app_kubernetes_io_component"] separator = "/" regex = "(.*)/(.*)/(.*)" replacement = "${1}/${2}-${3}" target_label = "job" } rule { target_label = "cluster" replacement = "{{- .Values.clusterName -}}" } rule { source_labels = ["__meta_kubernetes_pod_container_port_number"] action = "drop" regex = "9095" } } prometheus.scrape "pods" { clustering { enabled = true } targets = discovery.relabel.only_http_metrics.output forward_to = [ {{ include "agent.prometheus_write_targets" . }} ] } {{- if .Values.kubeStateMetrics.enabled }} prometheus.scrape "kubeStateMetrics" { clustering { enabled = true } targets = [ { "__address__" = "{{ .Values.kubeStateMetrics.endpoint }}" } ] forward_to = [ {{ include "agent.prometheus_write_targets" . }} ] } {{- end }} // cAdvisor and Kubelete metrics // Based on https://github.com/Chewie/loutretelecom-manifests/blob/main/manifests/addons/monitoring/config.river discovery.kubernetes "all_nodes" { role = "node" } discovery.relabel "all_nodes" { targets = discovery.kubernetes.all_nodes.targets rule { source_labels = ["__meta_kubernetes_node_name"] target_label = "node" } rule { source_labels = ["__meta_kubernetes_namespace"] target_label = "namespace" } rule { source_labels = ["__meta_kubernetes_pod_name"] target_label = "pod" } rule { source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_label_app_kubernetes_io_name", "__meta_kubernetes_pod_label_app_kubernetes_io_component"] separator = "/" regex = "(.*)/(.*)/(.*)" replacement = "${1}/${2}-${3}" target_label = "job" } rule { target_label = "cluster" replacement = "{{- .Values.clusterName -}}" } } prometheus.scrape "cadvisor" { clustering { enabled = true } targets = discovery.relabel.all_nodes.output forward_to = [ {{ include "agent.prometheus_write_targets" . }} ] metrics_path = "/metrics/cadvisor" scheme = "https" bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" tls_config { ca_file = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" } } prometheus.scrape "kubelet" { clustering { enabled = true } targets = discovery.relabel.all_nodes.output forward_to = [ {{ include "agent.prometheus_write_targets" . }} ] metrics_path = "/metrics" scheme = "https" bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" tls_config { ca_file = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" } } prometheus.exporter.unix "promexporter" {} prometheus.scrape "node_exporter" { clustering { enabled = true } targets = prometheus.exporter.unix.promexporter.targets forward_to = [prometheus.relabel.node_exporter.receiver] job_name = "node-exporter" } prometheus.relabel "node_exporter" { forward_to = [ {{ include "agent.prometheus_write_targets" . }} ] rule { replacement = env("HOSTNAME") target_label = "nodename" } rule { replacement = "node-exporter" target_label = "job" } rule { source_labels = ["__meta_kubernetes_node_name"] target_label = "node" } rule { source_labels = ["__meta_kubernetes_namespace"] target_label = "namespace" } rule { source_labels = ["__meta_kubernetes_pod_name"] target_label = "pod" } rule { source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_label_app_kubernetes_io_name", "__meta_kubernetes_pod_label_app_kubernetes_io_component"] separator = "/" regex = "(.*)/(.*)/(.*)" replacement = "${1}/${2}-${3}" target_label = "job" } rule { target_label = "cluster" replacement = "{{- .Values.clusterName -}}" } } {{- end }} {{- if or .Values.local.traces.enabled .Values.cloud.traces.enabled }} // Traces remote.kubernetes.secret "traces_credentials" { namespace = "{{- .Values.metaMonitoringNamespace -}}" name = "traces" } // Shamelessly copied from https://github.com/grafana/intro-to-mlt/blob/main/agent/config.river otelcol.receiver.otlp "otlp_receiver" { // We don't technically need this, but it shows how to change listen address and incoming port. // In this case, the Agent is listening on all available bindable addresses on port 4317 (which is the // default OTLP gRPC port) for the OTLP protocol. grpc { endpoint = "0.0.0.0:4317" } // We define where to send the output of all ingested traces. In this case, to the OpenTelemetry batch processor // named 'default'. output { traces = [otelcol.processor.batch.default.input] } } // The OpenTelemetry batch processor collects trace spans until a batch size or timeout is met, before sending those // spans onto another target. This processor is labeled 'default'. otelcol.processor.batch "default" { // Wait until we've received 16K of data. send_batch_size = 16384 // Or until 2 seconds have elapsed. timeout = "2s" // When the Agent has enough batched data, send it to the OpenTelemetry exporter named 'local'. output { traces = [ {{ include "agent.tempo_write_targets" . }} ] } } {{- end }} {{- if .Values.local.logs.enabled }} loki.write "local" { endpoint { url = "http://loki-gateway.{{- .Release.Namespace -}}.svc.cluster.local:80/loki/api/v1/push" } } {{- end }} {{- if .Values.local.metrics.enabled }} prometheus.remote_write "local" { endpoint { url = "http://{{- .Release.Name -}}-mimir-nginx.{{- .Release.Namespace -}}.svc:80/api/v1/push" } } {{- end }} {{- if or .Values.local.traces.enabled .Values.cloud.traces.enabled }} // The OpenTelemetry exporter exports processed trace spans to another target that is listening for OTLP format traces. // A unique label, 'local', is added to uniquely identify this exporter. otelcol.exporter.otlp "local" { // Define the client for exporting. client { // Send to the locally running Tempo instance, on port 4317 (OTLP gRPC). endpoint = "meta-tempo-distributor:4317" // Configure TLS settings for communicating with the endpoint. tls { // The connection is insecure. insecure = true // Do not verify TLS certificates when connecting. insecure_skip_verify = true } } } {{- end }} {{- if .Values.cloud.logs.enabled }} loki.write "cloud" { endpoint { url = nonsensitive(remote.kubernetes.secret.logs_credentials.data["endpoint"]) basic_auth { username = nonsensitive(remote.kubernetes.secret.logs_credentials.data["username"]) password = remote.kubernetes.secret.logs_credentials.data["password"] } } } {{- end }} {{- if .Values.cloud.metrics.enabled }} prometheus.remote_write "cloud" { endpoint { url = nonsensitive(remote.kubernetes.secret.metrics_credentials.data["endpoint"]) basic_auth { username = nonsensitive(remote.kubernetes.secret.metrics_credentials.data["username"]) password = remote.kubernetes.secret.metrics_credentials.data["password"] } } } {{- end }} {{- if .Values.cloud.traces.enabled }} otelcol.exporter.otlp "cloud" { client { endpoint = nonsensitive(remote.kubernetes.secret.traces_credentials.data["endpoint"]) auth = otelcol.auth.basic.creds.handler } } otelcol.auth.basic "creds" { username = nonsensitive(remote.kubernetes.secret.traces_credentials.data["username"]) password = remote.kubernetes.secret.traces_credentials.data["password"] } {{- end }}