Michel Hollands 18f0dc932a Add scraping of kube state metrics
Signed-off-by: Michel Hollands <michel.hollands@gmail.com>
2023-07-27 09:23:09 +01:00

161 lines
3.8 KiB
YAML

namespacesToMonitor:
- loki
- mimir
- tempo
clusterName: "meta-monitoring" # TODO check if this can be derived
local:
logs:
enabled: false
metrics:
enabled: false
traces:
enabled: false
minio:
enabled: false # This should be set to true if any of the previous is enabled
cloud:
logs:
enabled: true
endpoint:
username:
password:
metrics:
enabled: true
endpoint:
username:
password:
traces:
enabled: true
endpoint:
username:
password:
global:
minio:
rootUser: "rootuser"
rootPassword: "rootpassword"
kubeStateMetrics:
# Scrape https://github.com/kubernetes/kube-state-metrics by default
enabled: true
# This endpoint is created when the helm chart from
# https://artifacthub.io/packages/helm/prometheus-community/kube-state-metrics/
# is used. Change this if kube-state-metrics is installed somewhere else.
endpoint: kube-state-metrics.kube-state-metrics.svc.cluster.local:8080
# The following are configuration for the dependencies.
# These should not be changed.
loki:
loki:
auth_enabled: false
storage:
type: "s3"
s3:
endpoint: "meta-minio.meta.svc:9000"
access_key_id: rootuser
secret_access_key: rootpassword
insecure: true
bucketNames:
chunks: loki-chunks
ruler: loki-ruler
monitoring:
dashboards:
enabled: false
rules:
enabled: false
serviceMonitor:
enabled: false
selfMonitoring:
enabled: false
grafanaAgent:
installOperator: false
lokiCanary:
enabled: false
test:
enabled: false
grafana-agent:
agent:
configMap:
create: false
name: "agent-configmap"
key: 'config.river'
mimir-distributed:
minio:
enabled: false
mimir:
structuredConfig:
alertmanager_storage:
s3:
bucket_name: mimir-ruler
access_key_id: "{{ .Values.global.minio.rootUser }}"
endpoint: "{{ .Release.Name }}-minio.{{ .Release.Namespace }}.svc:9000"
secret_access_key: "{{ .Values.global.minio.rootPassword }}"
insecure: true
blocks_storage:
backend: s3
s3:
bucket_name: mimir-tsdb
access_key_id: "{{ .Values.global.minio.rootUser }}"
endpoint: "{{ .Release.Name }}-minio.{{ .Release.Namespace }}.svc:9000"
secret_access_key: "{{ .Values.global.minio.rootPassword }}"
insecure: true
ruler_storage:
s3:
bucket_name: mimir-ruler
access_key_id: "{{ .Values.global.minio.rootUser }}"
endpoint: "{{ .Release.Name }}-minio.{{ .Release.Namespace }}.svc:9000"
secret_access_key: "{{ .Values.global.minio.rootPassword }}"
insecure: true
tempo-distributed:
tempo:
structuredConfig:
storage:
trace:
backend: s3
s3:
bucket: tempo
endpoint: "{{ .Release.Name }}-minio.{{ .Release.Namespace }}.svc:9000"
access_key: "{{ .Values.global.minio.rootUser }}"
secret_key: "{{ .Values.global.minio.rootPassword }}"
insecure: true
traces:
otlp:
http:
enabled: true
grpc:
enabled: true
minio:
rootUser: rootuser
rootPassword: rootpassword
buckets:
- name: loki-chunks
policy: none
purge: false
- name: loki-ruler
policy: none
purge: false
- name: tempo
policy: none
purge: false
- name: mimir-ruler
policy: none
purge: false
- name: mimir-tsdb
policy: none
purge: false
mode: standalone
persistence:
size: 5Gi
resources:
requests:
cpu: 100m
memory: 128Mi
# Changed the mc config path to '/tmp' from '/etc' as '/etc' is only writable by root and OpenShift will not permit this.
configPathmc: "/tmp/minio/mc/"