forked from RemoteSync/grafana-meta-monitoring-chart
Compare commits
35 Commits
use_secret
...
remove_rul
Author | SHA1 | Date | |
---|---|---|---|
|
232777d71a | ||
|
d9a4d4a964 | ||
|
57adbf43e2 | ||
|
add43ae974 | ||
|
52ec526718 | ||
|
8a5ed559a2 | ||
|
188cd7e56f | ||
|
9e4dbcd44a | ||
|
28daa27fca | ||
|
2de595baf4 | ||
|
95257b66d3 | ||
|
e9b0e57ef0 | ||
|
03609ebb35 | ||
|
7e38d19814 | ||
|
32272298d7 | ||
|
3879207e05 | ||
|
cd42da2197 | ||
|
56cab04af8 | ||
|
c6d0444dfa | ||
|
b99140d3f4 | ||
|
749e271455 | ||
|
d938dbbfe5 | ||
|
e9125d1a9c | ||
|
076685ef06 | ||
|
b0451d626e | ||
|
90e949e89a | ||
|
06e176e720 | ||
|
d4c886ba9d | ||
|
643e73f5f1 | ||
|
7e65f3d9c9 | ||
|
de91b4dac7 | ||
|
9f6e52d7a1 | ||
|
26e0ad0b85 | ||
|
025bb5b0c3 | ||
|
0b31eae425 |
30
.github/configs/updatecli.d/grafana.yaml
vendored
Normal file
30
.github/configs/updatecli.d/grafana.yaml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Bump grafana version specified in the values.yaml
|
||||
sources:
|
||||
latestGrafanaRelease:
|
||||
name: Get latest grafana release on Github
|
||||
kind: githubrelease
|
||||
spec:
|
||||
owner: grafana
|
||||
repository: grafana
|
||||
token: '{{ requiredEnv "UPDATECLI_GITHUB_TOKEN" }}'
|
||||
versionfilter:
|
||||
kind: latest
|
||||
transformers:
|
||||
- trimprefix: "v"
|
||||
conditions:
|
||||
grafanaImagePublished:
|
||||
name: Ensure the latest Grafana is published on DockerHub
|
||||
kind: dockerimage
|
||||
source-id: latestGrafanaRelease
|
||||
spec:
|
||||
image: "grafana/grafana"
|
||||
targets:
|
||||
grafana:
|
||||
name: Update Grafana version in values.yaml
|
||||
kind: helmchart
|
||||
spec:
|
||||
file: values.yaml
|
||||
key: $.grafana.version
|
||||
name: charts/meta-monitoring
|
||||
versionincrement: none
|
||||
sourceid: latestGrafanaRelease
|
150
.github/workflows/check-for-dependency-updates.yaml
vendored
150
.github/workflows/check-for-dependency-updates.yaml
vendored
@@ -16,8 +16,8 @@ env:
|
||||
UPDATECLI_GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
jobs:
|
||||
updateLoki:
|
||||
name: Update the Loki subchart
|
||||
updateVersions:
|
||||
name: Update the subcharts
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
- name: Install Updatecli
|
||||
uses: updatecli/updatecli-action@v2
|
||||
|
||||
- name: Run Updatecli
|
||||
- name: Run Updatecli for Loki
|
||||
id: update-loki
|
||||
run: |
|
||||
updatecli apply --config ${UPDATECLI_CONFIG_DIR}/loki.yaml
|
||||
@@ -34,31 +34,7 @@ jobs:
|
||||
echo "changed=true" >> "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
|
||||
- name: Create pull request
|
||||
if: steps.update-loki.outputs.changed == 'true'
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
title: "[dependency] Update the Loki subchart"
|
||||
body: "Updates the Loki subchart"
|
||||
base: main
|
||||
author: "${{ github.actor }} <${{ github.actor }}@users.noreply.github.com>"
|
||||
committer: "GitHub <noreply@github.com>"
|
||||
commit-message: Update loki
|
||||
labels: dependencies
|
||||
branch: chore/update-loki
|
||||
delete-branch: true
|
||||
|
||||
updateGrafanaAlloy:
|
||||
name: Update the Grafana Alloy subchart
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install Updatecli
|
||||
uses: updatecli/updatecli-action@v2
|
||||
|
||||
- name: Run Updatecli
|
||||
- name: Run Updatecli for Alloy
|
||||
id: update-grafana-alloy
|
||||
run: |
|
||||
updatecli apply --config ${UPDATECLI_CONFIG_DIR}/alloy.yaml
|
||||
@@ -66,31 +42,7 @@ jobs:
|
||||
echo "changed=true" >> "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
|
||||
- name: Create pull request
|
||||
if: steps.update-grafana-alloy.outputs.changed == 'true'
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
title: "[dependency] Update the Grafana Alloy subchart"
|
||||
body: "Updates the Grafana Alloy subchart"
|
||||
base: main
|
||||
author: "${{ github.actor }} <${{ github.actor }}@users.noreply.github.com>"
|
||||
committer: "GitHub <noreply@github.com>"
|
||||
commit-message: Update Grafana Alloy
|
||||
labels: dependencies
|
||||
branch: chore/update-grafana-alloy
|
||||
delete-branch: true
|
||||
|
||||
updateMimirDistributed:
|
||||
name: Update the Mimir Distributed subchart
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install Updatecli
|
||||
uses: updatecli/updatecli-action@v2
|
||||
|
||||
- name: Run Updatecli
|
||||
- name: Run Updatecli for Mimir
|
||||
id: update-mimir-distributed
|
||||
run: |
|
||||
updatecli apply --config ${UPDATECLI_CONFIG_DIR}/mimir-distributed.yaml
|
||||
@@ -98,31 +50,7 @@ jobs:
|
||||
echo "changed=true" >> "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
|
||||
- name: Create pull request
|
||||
if: steps.update-mimir-distributed.outputs.changed == 'true'
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
title: "[dependency] Update the Mimir Distributed subchart"
|
||||
body: "Updates the Mimir Distributed subchart"
|
||||
base: main
|
||||
author: "${{ github.actor }} <${{ github.actor }}@users.noreply.github.com>"
|
||||
committer: "GitHub <noreply@github.com>"
|
||||
commit-message: Update Mimir Distributed
|
||||
labels: dependencies
|
||||
branch: chore/update-mimir-distributed
|
||||
delete-branch: true
|
||||
|
||||
updateTempoDistributed:
|
||||
name: Update the Tempo Distributed subchart
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install Updatecli
|
||||
uses: updatecli/updatecli-action@v2
|
||||
|
||||
- name: Run Updatecli
|
||||
- name: Run Updatecli for Tempo
|
||||
id: update-tempo-distributed
|
||||
run: |
|
||||
updatecli apply --config ${UPDATECLI_CONFIG_DIR}/tempo-distributed.yaml
|
||||
@@ -130,31 +58,7 @@ jobs:
|
||||
echo "changed=true" >> "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
|
||||
- name: Create pull request
|
||||
if: steps.update-tempo-distributed.outputs.changed == 'true'
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
title: "[dependency] Update the Tempo Distributed subchart"
|
||||
body: "Updates the tempo Distributed subchart"
|
||||
base: main
|
||||
author: "${{ github.actor }} <${{ github.actor }}@users.noreply.github.com>"
|
||||
committer: "GitHub <noreply@github.com>"
|
||||
commit-message: Update Tempo Distributed
|
||||
labels: dependencies
|
||||
branch: chore/update-tempo-distributed
|
||||
delete-branch: true
|
||||
|
||||
updateMinio:
|
||||
name: Update the Minio subchart
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install Updatecli
|
||||
uses: updatecli/updatecli-action@v2
|
||||
|
||||
- name: Run Updatecli
|
||||
- name: Run Updatecli for Minio
|
||||
id: update-minio
|
||||
run: |
|
||||
updatecli apply --config ${UPDATECLI_CONFIG_DIR}/minio.yaml
|
||||
@@ -163,15 +67,47 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Create pull request
|
||||
if: steps.update-minio.outputs.changed == 'true'
|
||||
if: steps.update-loki.outputs.changed == 'true' || steps.update-grafana-alloy.outputs.changed == 'true' || steps.update-mimir-distributed.outputs.changed == 'true' || steps.update-tempo-distributed.outputs.changed == 'true' || steps.update-minio.outputs.changed == 'true'
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
title: "[dependency] Update the Minio subchart"
|
||||
body: "Updates the Minio subchart"
|
||||
title: "[dependency] Update the subcharts"
|
||||
body: "Updates the subcharts"
|
||||
base: main
|
||||
author: "${{ github.actor }} <${{ github.actor }}@users.noreply.github.com>"
|
||||
committer: "GitHub <noreply@github.com>"
|
||||
commit-message: Update minio
|
||||
commit-message: Update dependencies
|
||||
labels: dependencies
|
||||
branch: chore/update-dependencies
|
||||
delete-branch: true
|
||||
|
||||
updateGrafana:
|
||||
name: Update the Grafana version
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install Updatecli
|
||||
uses: updatecli/updatecli-action@v2
|
||||
|
||||
- name: Run Updatecli
|
||||
id: update-grafana
|
||||
run: |
|
||||
updatecli apply --config ${UPDATECLI_CONFIG_DIR}/grafana.yaml
|
||||
if ! git diff --exit-code > /dev/null; then
|
||||
echo "changed=true" >> "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
|
||||
- name: Create pull request
|
||||
if: steps.update-grafana.outputs.changed == 'true'
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
title: "[dependency] Update the Grafana version"
|
||||
body: "Updates the Grafana version"
|
||||
base: main
|
||||
author: "${{ github.actor }} <${{ github.actor }}@users.noreply.github.com>"
|
||||
committer: "GitHub <noreply@github.com>"
|
||||
commit-message: Update Grafana version
|
||||
labels: dependencies
|
||||
branch: chore/update-minio
|
||||
delete-branch: true
|
||||
|
@@ -1,7 +1,7 @@
|
||||
dependencies:
|
||||
- name: loki
|
||||
repository: https://grafana.github.io/helm-charts
|
||||
version: 6.4.2
|
||||
version: 6.5.0
|
||||
- name: alloy
|
||||
repository: https://grafana.github.io/helm-charts
|
||||
version: 0.1.1
|
||||
@@ -10,9 +10,9 @@ dependencies:
|
||||
version: 5.3.0
|
||||
- name: tempo-distributed
|
||||
repository: https://grafana.github.io/helm-charts
|
||||
version: 1.9.5
|
||||
version: 1.9.9
|
||||
- name: minio
|
||||
repository: https://charts.min.io
|
||||
version: 5.2.0
|
||||
digest: sha256:1b5cc2c89ce11b6f0f1c02d7459ac9202577ac4e56bae9cc7e54e253a27df265
|
||||
generated: "2024-05-03T07:02:27.762464734Z"
|
||||
digest: sha256:5328702b5f6b0487aba8f7bc77d6abfcd5e094569e9205cd725971e3e31255dd
|
||||
generated: "2024-05-08T07:03:21.797461955Z"
|
||||
|
@@ -22,7 +22,7 @@ appVersion: "0.0.1"
|
||||
dependencies:
|
||||
- name: loki
|
||||
repository: https://grafana.github.io/helm-charts
|
||||
version: 6.4.2
|
||||
version: 6.5.0
|
||||
condition: local.logs.enabled
|
||||
- name: alloy
|
||||
repository: https://grafana.github.io/helm-charts
|
||||
@@ -33,7 +33,7 @@ dependencies:
|
||||
condition: local.metrics.enabled
|
||||
- name: tempo-distributed
|
||||
repository: https://grafana.github.io/helm-charts
|
||||
version: 1.9.5
|
||||
version: 1.9.9
|
||||
condition: local.traces.enabled
|
||||
- name: minio
|
||||
repository: https://charts.min.io
|
||||
|
Binary file not shown.
BIN
charts/meta-monitoring/charts/loki-6.5.0.tgz
Normal file
BIN
charts/meta-monitoring/charts/loki-6.5.0.tgz
Normal file
Binary file not shown.
Binary file not shown.
BIN
charts/meta-monitoring/charts/tempo-distributed-1.9.9.tgz
Normal file
BIN
charts/meta-monitoring/charts/tempo-distributed-1.9.9.tgz
Normal file
Binary file not shown.
@@ -1,555 +0,0 @@
|
||||
groups:
|
||||
- name: "mimir_api_1"
|
||||
rules:
|
||||
- expr: "histogram_quantile(0.99, sum(rate(cortex_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_request_duration_seconds:99quantile"
|
||||
- expr: "histogram_quantile(0.50, sum(rate(cortex_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_request_duration_seconds:50quantile"
|
||||
- expr: "sum(rate(cortex_request_duration_seconds_sum[5m])) by (cluster, job) / sum(rate(cortex_request_duration_seconds_count[5m]))
|
||||
by (cluster, job)"
|
||||
record: "cluster_job:cortex_request_duration_seconds:avg"
|
||||
- expr: "sum(rate(cortex_request_duration_seconds_bucket[5m])) by (le, cluster, job)"
|
||||
record: "cluster_job:cortex_request_duration_seconds_bucket:sum_rate"
|
||||
- expr: "sum(rate(cortex_request_duration_seconds_sum[5m])) by (cluster, job)"
|
||||
record: "cluster_job:cortex_request_duration_seconds_sum:sum_rate"
|
||||
- expr: "sum(rate(cortex_request_duration_seconds_count[5m])) by (cluster, job)"
|
||||
record: "cluster_job:cortex_request_duration_seconds_count:sum_rate"
|
||||
- name: "mimir_api_2"
|
||||
rules:
|
||||
- expr: "histogram_quantile(0.99, sum(rate(cortex_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job, route))"
|
||||
record: "cluster_job_route:cortex_request_duration_seconds:99quantile"
|
||||
- expr: "histogram_quantile(0.50, sum(rate(cortex_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job, route))"
|
||||
record: "cluster_job_route:cortex_request_duration_seconds:50quantile"
|
||||
- expr: "sum(rate(cortex_request_duration_seconds_sum[5m])) by (cluster, job, route)
|
||||
/ sum(rate(cortex_request_duration_seconds_count[5m])) by (cluster, job, route)"
|
||||
record: "cluster_job_route:cortex_request_duration_seconds:avg"
|
||||
- expr: "sum(rate(cortex_request_duration_seconds_bucket[5m])) by (le, cluster, job,
|
||||
route)"
|
||||
record: "cluster_job_route:cortex_request_duration_seconds_bucket:sum_rate"
|
||||
- expr: "sum(rate(cortex_request_duration_seconds_sum[5m])) by (cluster, job, route)"
|
||||
record: "cluster_job_route:cortex_request_duration_seconds_sum:sum_rate"
|
||||
- expr: "sum(rate(cortex_request_duration_seconds_count[5m])) by (cluster, job, route)"
|
||||
record: "cluster_job_route:cortex_request_duration_seconds_count:sum_rate"
|
||||
- name: "mimir_api_3"
|
||||
rules:
|
||||
- expr: "histogram_quantile(0.99, sum(rate(cortex_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, namespace, job, route))"
|
||||
record: "cluster_namespace_job_route:cortex_request_duration_seconds:99quantile"
|
||||
- expr: "histogram_quantile(0.50, sum(rate(cortex_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, namespace, job, route))"
|
||||
record: "cluster_namespace_job_route:cortex_request_duration_seconds:50quantile"
|
||||
- expr: "sum(rate(cortex_request_duration_seconds_sum[5m])) by (cluster, namespace,
|
||||
job, route) / sum(rate(cortex_request_duration_seconds_count[5m])) by (cluster,
|
||||
namespace, job, route)"
|
||||
record: "cluster_namespace_job_route:cortex_request_duration_seconds:avg"
|
||||
- expr: "sum(rate(cortex_request_duration_seconds_bucket[5m])) by (le, cluster, namespace,
|
||||
job, route)"
|
||||
record: "cluster_namespace_job_route:cortex_request_duration_seconds_bucket:sum_rate"
|
||||
- expr: "sum(rate(cortex_request_duration_seconds_sum[5m])) by (cluster, namespace,
|
||||
job, route)"
|
||||
record: "cluster_namespace_job_route:cortex_request_duration_seconds_sum:sum_rate"
|
||||
- expr: "sum(rate(cortex_request_duration_seconds_count[5m])) by (cluster, namespace,
|
||||
job, route)"
|
||||
record: "cluster_namespace_job_route:cortex_request_duration_seconds_count:sum_rate"
|
||||
- name: "mimir_querier_api"
|
||||
rules:
|
||||
- expr: "histogram_quantile(0.99, sum(rate(cortex_querier_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_querier_request_duration_seconds:99quantile"
|
||||
- expr: "histogram_quantile(0.50, sum(rate(cortex_querier_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_querier_request_duration_seconds:50quantile"
|
||||
- expr: "sum(rate(cortex_querier_request_duration_seconds_sum[5m])) by (cluster,
|
||||
job) / sum(rate(cortex_querier_request_duration_seconds_count[5m])) by (cluster,
|
||||
job)"
|
||||
record: "cluster_job:cortex_querier_request_duration_seconds:avg"
|
||||
- expr: "sum(rate(cortex_querier_request_duration_seconds_bucket[5m])) by (le, cluster,
|
||||
job)"
|
||||
record: "cluster_job:cortex_querier_request_duration_seconds_bucket:sum_rate"
|
||||
- expr: "sum(rate(cortex_querier_request_duration_seconds_sum[5m])) by (cluster,
|
||||
job)"
|
||||
record: "cluster_job:cortex_querier_request_duration_seconds_sum:sum_rate"
|
||||
- expr: "sum(rate(cortex_querier_request_duration_seconds_count[5m])) by (cluster,
|
||||
job)"
|
||||
record: "cluster_job:cortex_querier_request_duration_seconds_count:sum_rate"
|
||||
- expr: "histogram_quantile(0.99, sum(rate(cortex_querier_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job, route))"
|
||||
record: "cluster_job_route:cortex_querier_request_duration_seconds:99quantile"
|
||||
- expr: "histogram_quantile(0.50, sum(rate(cortex_querier_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job, route))"
|
||||
record: "cluster_job_route:cortex_querier_request_duration_seconds:50quantile"
|
||||
- expr: "sum(rate(cortex_querier_request_duration_seconds_sum[5m])) by (cluster,
|
||||
job, route) / sum(rate(cortex_querier_request_duration_seconds_count[5m])) by
|
||||
(cluster, job, route)"
|
||||
record: "cluster_job_route:cortex_querier_request_duration_seconds:avg"
|
||||
- expr: "sum(rate(cortex_querier_request_duration_seconds_bucket[5m])) by (le, cluster,
|
||||
job, route)"
|
||||
record: "cluster_job_route:cortex_querier_request_duration_seconds_bucket:sum_rate"
|
||||
- expr: "sum(rate(cortex_querier_request_duration_seconds_sum[5m])) by (cluster,
|
||||
job, route)"
|
||||
record: "cluster_job_route:cortex_querier_request_duration_seconds_sum:sum_rate"
|
||||
- expr: "sum(rate(cortex_querier_request_duration_seconds_count[5m])) by (cluster,
|
||||
job, route)"
|
||||
record: "cluster_job_route:cortex_querier_request_duration_seconds_count:sum_rate"
|
||||
- expr: "histogram_quantile(0.99, sum(rate(cortex_querier_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, namespace, job, route))"
|
||||
record: "cluster_namespace_job_route:cortex_querier_request_duration_seconds:99quantile"
|
||||
- expr: "histogram_quantile(0.50, sum(rate(cortex_querier_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, namespace, job, route))"
|
||||
record: "cluster_namespace_job_route:cortex_querier_request_duration_seconds:50quantile"
|
||||
- expr: "sum(rate(cortex_querier_request_duration_seconds_sum[5m])) by (cluster,
|
||||
namespace, job, route) / sum(rate(cortex_querier_request_duration_seconds_count[5m]))
|
||||
by (cluster, namespace, job, route)"
|
||||
record: "cluster_namespace_job_route:cortex_querier_request_duration_seconds:avg"
|
||||
- expr: "sum(rate(cortex_querier_request_duration_seconds_bucket[5m])) by (le, cluster,
|
||||
namespace, job, route)"
|
||||
record: "cluster_namespace_job_route:cortex_querier_request_duration_seconds_bucket:sum_rate"
|
||||
- expr: "sum(rate(cortex_querier_request_duration_seconds_sum[5m])) by (cluster,
|
||||
namespace, job, route)"
|
||||
record: "cluster_namespace_job_route:cortex_querier_request_duration_seconds_sum:sum_rate"
|
||||
- expr: "sum(rate(cortex_querier_request_duration_seconds_count[5m])) by (cluster,
|
||||
namespace, job, route)"
|
||||
record: "cluster_namespace_job_route:cortex_querier_request_duration_seconds_count:sum_rate"
|
||||
- name: "mimir_cache"
|
||||
rules:
|
||||
- expr: "histogram_quantile(0.99, sum(rate(cortex_memcache_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job, method))"
|
||||
record: "cluster_job_method:cortex_memcache_request_duration_seconds:99quantile"
|
||||
- expr: "histogram_quantile(0.50, sum(rate(cortex_memcache_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job, method))"
|
||||
record: "cluster_job_method:cortex_memcache_request_duration_seconds:50quantile"
|
||||
- expr: "sum(rate(cortex_memcache_request_duration_seconds_sum[5m])) by (cluster,
|
||||
job, method) / sum(rate(cortex_memcache_request_duration_seconds_count[5m]))
|
||||
by (cluster, job, method)"
|
||||
record: "cluster_job_method:cortex_memcache_request_duration_seconds:avg"
|
||||
- expr: "sum(rate(cortex_memcache_request_duration_seconds_bucket[5m])) by (le, cluster,
|
||||
job, method)"
|
||||
record: "cluster_job_method:cortex_memcache_request_duration_seconds_bucket:sum_rate"
|
||||
- expr: "sum(rate(cortex_memcache_request_duration_seconds_sum[5m])) by (cluster,
|
||||
job, method)"
|
||||
record: "cluster_job_method:cortex_memcache_request_duration_seconds_sum:sum_rate"
|
||||
- expr: "sum(rate(cortex_memcache_request_duration_seconds_count[5m])) by (cluster,
|
||||
job, method)"
|
||||
record: "cluster_job_method:cortex_memcache_request_duration_seconds_count:sum_rate"
|
||||
- expr: "histogram_quantile(0.99, sum(rate(cortex_cache_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_cache_request_duration_seconds:99quantile"
|
||||
- expr: "histogram_quantile(0.50, sum(rate(cortex_cache_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_cache_request_duration_seconds:50quantile"
|
||||
- expr: "sum(rate(cortex_cache_request_duration_seconds_sum[5m])) by (cluster, job)
|
||||
/ sum(rate(cortex_cache_request_duration_seconds_count[5m])) by (cluster, job)"
|
||||
record: "cluster_job:cortex_cache_request_duration_seconds:avg"
|
||||
- expr: "sum(rate(cortex_cache_request_duration_seconds_bucket[5m])) by (le, cluster,
|
||||
job)"
|
||||
record: "cluster_job:cortex_cache_request_duration_seconds_bucket:sum_rate"
|
||||
- expr: "sum(rate(cortex_cache_request_duration_seconds_sum[5m])) by (cluster, job)"
|
||||
record: "cluster_job:cortex_cache_request_duration_seconds_sum:sum_rate"
|
||||
- expr: "sum(rate(cortex_cache_request_duration_seconds_count[5m])) by (cluster,
|
||||
job)"
|
||||
record: "cluster_job:cortex_cache_request_duration_seconds_count:sum_rate"
|
||||
- expr: "histogram_quantile(0.99, sum(rate(cortex_cache_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job, method))"
|
||||
record: "cluster_job_method:cortex_cache_request_duration_seconds:99quantile"
|
||||
- expr: "histogram_quantile(0.50, sum(rate(cortex_cache_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job, method))"
|
||||
record: "cluster_job_method:cortex_cache_request_duration_seconds:50quantile"
|
||||
- expr: "sum(rate(cortex_cache_request_duration_seconds_sum[5m])) by (cluster, job,
|
||||
method) / sum(rate(cortex_cache_request_duration_seconds_count[5m])) by (cluster,
|
||||
job, method)"
|
||||
record: "cluster_job_method:cortex_cache_request_duration_seconds:avg"
|
||||
- expr: "sum(rate(cortex_cache_request_duration_seconds_bucket[5m])) by (le, cluster,
|
||||
job, method)"
|
||||
record: "cluster_job_method:cortex_cache_request_duration_seconds_bucket:sum_rate"
|
||||
- expr: "sum(rate(cortex_cache_request_duration_seconds_sum[5m])) by (cluster, job,
|
||||
method)"
|
||||
record: "cluster_job_method:cortex_cache_request_duration_seconds_sum:sum_rate"
|
||||
- expr: "sum(rate(cortex_cache_request_duration_seconds_count[5m])) by (cluster,
|
||||
job, method)"
|
||||
record: "cluster_job_method:cortex_cache_request_duration_seconds_count:sum_rate"
|
||||
- name: "mimir_storage"
|
||||
rules:
|
||||
- expr: "histogram_quantile(0.99, sum(rate(cortex_kv_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_kv_request_duration_seconds:99quantile"
|
||||
- expr: "histogram_quantile(0.50, sum(rate(cortex_kv_request_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_kv_request_duration_seconds:50quantile"
|
||||
- expr: "sum(rate(cortex_kv_request_duration_seconds_sum[5m])) by (cluster, job)
|
||||
/ sum(rate(cortex_kv_request_duration_seconds_count[5m])) by (cluster, job)"
|
||||
record: "cluster_job:cortex_kv_request_duration_seconds:avg"
|
||||
- expr: "sum(rate(cortex_kv_request_duration_seconds_bucket[5m])) by (le, cluster,
|
||||
job)"
|
||||
record: "cluster_job:cortex_kv_request_duration_seconds_bucket:sum_rate"
|
||||
- expr: "sum(rate(cortex_kv_request_duration_seconds_sum[5m])) by (cluster, job)"
|
||||
record: "cluster_job:cortex_kv_request_duration_seconds_sum:sum_rate"
|
||||
- expr: "sum(rate(cortex_kv_request_duration_seconds_count[5m])) by (cluster, job)"
|
||||
record: "cluster_job:cortex_kv_request_duration_seconds_count:sum_rate"
|
||||
- name: "mimir_queries"
|
||||
rules:
|
||||
- expr: "histogram_quantile(0.99, sum(rate(cortex_query_frontend_retries_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_query_frontend_retries:99quantile"
|
||||
- expr: "histogram_quantile(0.50, sum(rate(cortex_query_frontend_retries_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_query_frontend_retries:50quantile"
|
||||
- expr: "sum(rate(cortex_query_frontend_retries_sum[5m])) by (cluster, job) / sum(rate(cortex_query_frontend_retries_count[5m]))
|
||||
by (cluster, job)"
|
||||
record: "cluster_job:cortex_query_frontend_retries:avg"
|
||||
- expr: "sum(rate(cortex_query_frontend_retries_bucket[5m])) by (le, cluster, job)"
|
||||
record: "cluster_job:cortex_query_frontend_retries_bucket:sum_rate"
|
||||
- expr: "sum(rate(cortex_query_frontend_retries_sum[5m])) by (cluster, job)"
|
||||
record: "cluster_job:cortex_query_frontend_retries_sum:sum_rate"
|
||||
- expr: "sum(rate(cortex_query_frontend_retries_count[5m])) by (cluster, job)"
|
||||
record: "cluster_job:cortex_query_frontend_retries_count:sum_rate"
|
||||
- expr: "histogram_quantile(0.99, sum(rate(cortex_query_frontend_queue_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_query_frontend_queue_duration_seconds:99quantile"
|
||||
- expr: "histogram_quantile(0.50, sum(rate(cortex_query_frontend_queue_duration_seconds_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_query_frontend_queue_duration_seconds:50quantile"
|
||||
- expr: "sum(rate(cortex_query_frontend_queue_duration_seconds_sum[5m])) by (cluster,
|
||||
job) / sum(rate(cortex_query_frontend_queue_duration_seconds_count[5m])) by
|
||||
(cluster, job)"
|
||||
record: "cluster_job:cortex_query_frontend_queue_duration_seconds:avg"
|
||||
- expr: "sum(rate(cortex_query_frontend_queue_duration_seconds_bucket[5m])) by (le,
|
||||
cluster, job)"
|
||||
record: "cluster_job:cortex_query_frontend_queue_duration_seconds_bucket:sum_rate"
|
||||
- expr: "sum(rate(cortex_query_frontend_queue_duration_seconds_sum[5m])) by (cluster,
|
||||
job)"
|
||||
record: "cluster_job:cortex_query_frontend_queue_duration_seconds_sum:sum_rate"
|
||||
- expr: "sum(rate(cortex_query_frontend_queue_duration_seconds_count[5m])) by (cluster,
|
||||
job)"
|
||||
record: "cluster_job:cortex_query_frontend_queue_duration_seconds_count:sum_rate"
|
||||
- name: "mimir_ingester_queries"
|
||||
rules:
|
||||
- expr: "histogram_quantile(0.99, sum(rate(cortex_ingester_queried_series_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_ingester_queried_series:99quantile"
|
||||
- expr: "histogram_quantile(0.50, sum(rate(cortex_ingester_queried_series_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_ingester_queried_series:50quantile"
|
||||
- expr: "sum(rate(cortex_ingester_queried_series_sum[5m])) by (cluster, job) / sum(rate(cortex_ingester_queried_series_count[5m]))
|
||||
by (cluster, job)"
|
||||
record: "cluster_job:cortex_ingester_queried_series:avg"
|
||||
- expr: "sum(rate(cortex_ingester_queried_series_bucket[5m])) by (le, cluster, job)"
|
||||
record: "cluster_job:cortex_ingester_queried_series_bucket:sum_rate"
|
||||
- expr: "sum(rate(cortex_ingester_queried_series_sum[5m])) by (cluster, job)"
|
||||
record: "cluster_job:cortex_ingester_queried_series_sum:sum_rate"
|
||||
- expr: "sum(rate(cortex_ingester_queried_series_count[5m])) by (cluster, job)"
|
||||
record: "cluster_job:cortex_ingester_queried_series_count:sum_rate"
|
||||
- expr: "histogram_quantile(0.99, sum(rate(cortex_ingester_queried_samples_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_ingester_queried_samples:99quantile"
|
||||
- expr: "histogram_quantile(0.50, sum(rate(cortex_ingester_queried_samples_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_ingester_queried_samples:50quantile"
|
||||
- expr: "sum(rate(cortex_ingester_queried_samples_sum[5m])) by (cluster, job) / sum(rate(cortex_ingester_queried_samples_count[5m]))
|
||||
by (cluster, job)"
|
||||
record: "cluster_job:cortex_ingester_queried_samples:avg"
|
||||
- expr: "sum(rate(cortex_ingester_queried_samples_bucket[5m])) by (le, cluster, job)"
|
||||
record: "cluster_job:cortex_ingester_queried_samples_bucket:sum_rate"
|
||||
- expr: "sum(rate(cortex_ingester_queried_samples_sum[5m])) by (cluster, job)"
|
||||
record: "cluster_job:cortex_ingester_queried_samples_sum:sum_rate"
|
||||
- expr: "sum(rate(cortex_ingester_queried_samples_count[5m])) by (cluster, job)"
|
||||
record: "cluster_job:cortex_ingester_queried_samples_count:sum_rate"
|
||||
- expr: "histogram_quantile(0.99, sum(rate(cortex_ingester_queried_exemplars_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_ingester_queried_exemplars:99quantile"
|
||||
- expr: "histogram_quantile(0.50, sum(rate(cortex_ingester_queried_exemplars_bucket[5m]))
|
||||
by (le, cluster, job))"
|
||||
record: "cluster_job:cortex_ingester_queried_exemplars:50quantile"
|
||||
- expr: "sum(rate(cortex_ingester_queried_exemplars_sum[5m])) by (cluster, job) /
|
||||
sum(rate(cortex_ingester_queried_exemplars_count[5m])) by (cluster, job)"
|
||||
record: "cluster_job:cortex_ingester_queried_exemplars:avg"
|
||||
- expr: "sum(rate(cortex_ingester_queried_exemplars_bucket[5m])) by (le, cluster,
|
||||
job)"
|
||||
record: "cluster_job:cortex_ingester_queried_exemplars_bucket:sum_rate"
|
||||
- expr: "sum(rate(cortex_ingester_queried_exemplars_sum[5m])) by (cluster, job)"
|
||||
record: "cluster_job:cortex_ingester_queried_exemplars_sum:sum_rate"
|
||||
- expr: "sum(rate(cortex_ingester_queried_exemplars_count[5m])) by (cluster, job)"
|
||||
record: "cluster_job:cortex_ingester_queried_exemplars_count:sum_rate"
|
||||
- name: "mimir_received_samples"
|
||||
rules:
|
||||
- expr: "sum by (cluster, namespace, job) (rate(cortex_distributor_received_samples_total[5m]))"
|
||||
record: "cluster_namespace_job:cortex_distributor_received_samples:rate5m"
|
||||
- name: "mimir_exemplars_in"
|
||||
rules:
|
||||
- expr: "sum by (cluster, namespace, job) (rate(cortex_distributor_exemplars_in_total[5m]))"
|
||||
record: "cluster_namespace_job:cortex_distributor_exemplars_in:rate5m"
|
||||
- name: "mimir_received_exemplars"
|
||||
rules:
|
||||
- expr: "sum by (cluster, namespace, job) (rate(cortex_distributor_received_exemplars_total[5m]))"
|
||||
record: "cluster_namespace_job:cortex_distributor_received_exemplars:rate5m"
|
||||
- name: "mimir_exemplars_ingested"
|
||||
rules:
|
||||
- expr: "sum by (cluster, namespace, job) (rate(cortex_ingester_ingested_exemplars_total[5m]))"
|
||||
record: "cluster_namespace_job:cortex_ingester_ingested_exemplars:rate5m"
|
||||
- name: "mimir_exemplars_appended"
|
||||
rules:
|
||||
- expr: "sum by (cluster, namespace, job) (rate(cortex_ingester_tsdb_exemplar_exemplars_appended_total[5m]))"
|
||||
record: "cluster_namespace_job:cortex_ingester_tsdb_exemplar_exemplars_appended:rate5m"
|
||||
- name: "mimir_scaling_rules"
|
||||
rules:
|
||||
- expr: |
|
||||
# Convenience rule to get the number of replicas for both a deployment and a statefulset.
|
||||
# Multi-zone deployments are grouped together removing the "zone-X" suffix.
|
||||
sum by (cluster, namespace, deployment) (
|
||||
label_replace(
|
||||
kube_deployment_spec_replicas,
|
||||
# The question mark in "(.*?)" is used to make it non-greedy, otherwise it
|
||||
# always matches everything and the (optional) zone is not removed.
|
||||
"deployment", "$1", "deployment", "(.*?)(?:-zone-[a-z])?"
|
||||
)
|
||||
)
|
||||
or
|
||||
sum by (cluster, namespace, deployment) (
|
||||
label_replace(kube_statefulset_replicas, "deployment", "$1", "statefulset", "(.*?)(?:-zone-[a-z])?")
|
||||
)
|
||||
record: "cluster_namespace_deployment:actual_replicas:count"
|
||||
- expr: |
|
||||
ceil(
|
||||
quantile_over_time(0.99,
|
||||
sum by (cluster, namespace) (
|
||||
cluster_namespace_job:cortex_distributor_received_samples:rate5m
|
||||
)[24h:]
|
||||
)
|
||||
/ 240000
|
||||
)
|
||||
labels:
|
||||
deployment: "distributor"
|
||||
reason: "sample_rate"
|
||||
record: "cluster_namespace_deployment_reason:required_replicas:count"
|
||||
- expr: |
|
||||
ceil(
|
||||
sum by (cluster, namespace) (cortex_limits_overrides{limit_name="ingestion_rate"})
|
||||
* 0.59999999999999998 / 240000
|
||||
)
|
||||
labels:
|
||||
deployment: "distributor"
|
||||
reason: "sample_rate_limits"
|
||||
record: "cluster_namespace_deployment_reason:required_replicas:count"
|
||||
- expr: |
|
||||
ceil(
|
||||
quantile_over_time(0.99,
|
||||
sum by (cluster, namespace) (
|
||||
cluster_namespace_job:cortex_distributor_received_samples:rate5m
|
||||
)[24h:]
|
||||
)
|
||||
* 3 / 80000
|
||||
)
|
||||
labels:
|
||||
deployment: "ingester"
|
||||
reason: "sample_rate"
|
||||
record: "cluster_namespace_deployment_reason:required_replicas:count"
|
||||
- expr: |
|
||||
ceil(
|
||||
quantile_over_time(0.99,
|
||||
sum by(cluster, namespace) (
|
||||
cortex_ingester_memory_series
|
||||
)[24h:]
|
||||
)
|
||||
/ 1500000
|
||||
)
|
||||
labels:
|
||||
deployment: "ingester"
|
||||
reason: "active_series"
|
||||
record: "cluster_namespace_deployment_reason:required_replicas:count"
|
||||
- expr: |
|
||||
ceil(
|
||||
sum by (cluster, namespace) (cortex_limits_overrides{limit_name="max_global_series_per_user"})
|
||||
* 3 * 0.59999999999999998 / 1500000
|
||||
)
|
||||
labels:
|
||||
deployment: "ingester"
|
||||
reason: "active_series_limits"
|
||||
record: "cluster_namespace_deployment_reason:required_replicas:count"
|
||||
- expr: |
|
||||
ceil(
|
||||
sum by (cluster, namespace) (cortex_limits_overrides{limit_name="ingestion_rate"})
|
||||
* 0.59999999999999998 / 80000
|
||||
)
|
||||
labels:
|
||||
deployment: "ingester"
|
||||
reason: "sample_rate_limits"
|
||||
record: "cluster_namespace_deployment_reason:required_replicas:count"
|
||||
- expr: |
|
||||
ceil(
|
||||
(sum by (cluster, namespace) (
|
||||
cortex_ingester_tsdb_storage_blocks_bytes{job=~".+/ingester.*"}
|
||||
) / 4)
|
||||
/
|
||||
avg by (cluster, namespace) (
|
||||
memcached_limit_bytes{job=~".+/memcached"}
|
||||
)
|
||||
)
|
||||
labels:
|
||||
deployment: "memcached"
|
||||
reason: "active_series"
|
||||
record: "cluster_namespace_deployment_reason:required_replicas:count"
|
||||
- expr: |
|
||||
sum by (cluster, namespace, deployment) (
|
||||
label_replace(
|
||||
label_replace(
|
||||
sum by (cluster, namespace, pod)(rate(container_cpu_usage_seconds_total[5m])),
|
||||
"deployment", "$1", "pod", "(.*)-(?:([0-9]+)|([a-z0-9]+)-([a-z0-9]+))"
|
||||
),
|
||||
# The question mark in "(.*?)" is used to make it non-greedy, otherwise it
|
||||
# always matches everything and the (optional) zone is not removed.
|
||||
"deployment", "$1", "deployment", "(.*?)(?:-zone-[a-z])?"
|
||||
)
|
||||
)
|
||||
record: "cluster_namespace_deployment:container_cpu_usage_seconds_total:sum_rate"
|
||||
- expr: |
|
||||
# Convenience rule to get the CPU request for both a deployment and a statefulset.
|
||||
# Multi-zone deployments are grouped together removing the "zone-X" suffix.
|
||||
# This recording rule is made compatible with the breaking changes introduced in kube-state-metrics v2
|
||||
# that remove resource metrics, ref:
|
||||
# - https://github.com/kubernetes/kube-state-metrics/blob/master/CHANGELOG.md#v200-alpha--2020-09-16
|
||||
# - https://github.com/kubernetes/kube-state-metrics/pull/1004
|
||||
#
|
||||
# This is the old expression, compatible with kube-state-metrics < v2.0.0,
|
||||
# where kube_pod_container_resource_requests_cpu_cores was removed:
|
||||
(
|
||||
sum by (cluster, namespace, deployment) (
|
||||
label_replace(
|
||||
label_replace(
|
||||
kube_pod_container_resource_requests_cpu_cores,
|
||||
"deployment", "$1", "pod", "(.*)-(?:([0-9]+)|([a-z0-9]+)-([a-z0-9]+))"
|
||||
),
|
||||
# The question mark in "(.*?)" is used to make it non-greedy, otherwise it
|
||||
# always matches everything and the (optional) zone is not removed.
|
||||
"deployment", "$1", "deployment", "(.*?)(?:-zone-[a-z])?"
|
||||
)
|
||||
)
|
||||
)
|
||||
or
|
||||
# This expression is compatible with kube-state-metrics >= v1.4.0,
|
||||
# where kube_pod_container_resource_requests was introduced.
|
||||
(
|
||||
sum by (cluster, namespace, deployment) (
|
||||
label_replace(
|
||||
label_replace(
|
||||
kube_pod_container_resource_requests{resource="cpu"},
|
||||
"deployment", "$1", "pod", "(.*)-(?:([0-9]+)|([a-z0-9]+)-([a-z0-9]+))"
|
||||
),
|
||||
# The question mark in "(.*?)" is used to make it non-greedy, otherwise it
|
||||
# always matches everything and the (optional) zone is not removed.
|
||||
"deployment", "$1", "deployment", "(.*?)(?:-zone-[a-z])?"
|
||||
)
|
||||
)
|
||||
)
|
||||
record: "cluster_namespace_deployment:kube_pod_container_resource_requests_cpu_cores:sum"
|
||||
- expr: |
|
||||
# Jobs should be sized to their CPU usage.
|
||||
# We do this by comparing 99th percentile usage over the last 24hrs to
|
||||
# their current provisioned #replicas and resource requests.
|
||||
ceil(
|
||||
cluster_namespace_deployment:actual_replicas:count
|
||||
*
|
||||
quantile_over_time(0.99, cluster_namespace_deployment:container_cpu_usage_seconds_total:sum_rate[24h])
|
||||
/
|
||||
cluster_namespace_deployment:kube_pod_container_resource_requests_cpu_cores:sum
|
||||
)
|
||||
labels:
|
||||
reason: "cpu_usage"
|
||||
record: "cluster_namespace_deployment_reason:required_replicas:count"
|
||||
- expr: |
|
||||
# Convenience rule to get the Memory utilization for both a deployment and a statefulset.
|
||||
# Multi-zone deployments are grouped together removing the "zone-X" suffix.
|
||||
sum by (cluster, namespace, deployment) (
|
||||
label_replace(
|
||||
label_replace(
|
||||
container_memory_usage_bytes{image!=""},
|
||||
"deployment", "$1", "pod", "(.*)-(?:([0-9]+)|([a-z0-9]+)-([a-z0-9]+))"
|
||||
),
|
||||
# The question mark in "(.*?)" is used to make it non-greedy, otherwise it
|
||||
# always matches everything and the (optional) zone is not removed.
|
||||
"deployment", "$1", "deployment", "(.*?)(?:-zone-[a-z])?"
|
||||
)
|
||||
)
|
||||
record: "cluster_namespace_deployment:container_memory_usage_bytes:sum"
|
||||
- expr: |
|
||||
# Convenience rule to get the Memory request for both a deployment and a statefulset.
|
||||
# Multi-zone deployments are grouped together removing the "zone-X" suffix.
|
||||
# This recording rule is made compatible with the breaking changes introduced in kube-state-metrics v2
|
||||
# that remove resource metrics, ref:
|
||||
# - https://github.com/kubernetes/kube-state-metrics/blob/master/CHANGELOG.md#v200-alpha--2020-09-16
|
||||
# - https://github.com/kubernetes/kube-state-metrics/pull/1004
|
||||
#
|
||||
# This is the old expression, compatible with kube-state-metrics < v2.0.0,
|
||||
# where kube_pod_container_resource_requests_memory_bytes was removed:
|
||||
(
|
||||
sum by (cluster, namespace, deployment) (
|
||||
label_replace(
|
||||
label_replace(
|
||||
kube_pod_container_resource_requests_memory_bytes,
|
||||
"deployment", "$1", "pod", "(.*)-(?:([0-9]+)|([a-z0-9]+)-([a-z0-9]+))"
|
||||
),
|
||||
# The question mark in "(.*?)" is used to make it non-greedy, otherwise it
|
||||
# always matches everything and the (optional) zone is not removed.
|
||||
"deployment", "$1", "deployment", "(.*?)(?:-zone-[a-z])?"
|
||||
)
|
||||
)
|
||||
)
|
||||
or
|
||||
# This expression is compatible with kube-state-metrics >= v1.4.0,
|
||||
# where kube_pod_container_resource_requests was introduced.
|
||||
(
|
||||
sum by (cluster, namespace, deployment) (
|
||||
label_replace(
|
||||
label_replace(
|
||||
kube_pod_container_resource_requests{resource="memory"},
|
||||
"deployment", "$1", "pod", "(.*)-(?:([0-9]+)|([a-z0-9]+)-([a-z0-9]+))"
|
||||
),
|
||||
# The question mark in "(.*?)" is used to make it non-greedy, otherwise it
|
||||
# always matches everything and the (optional) zone is not removed.
|
||||
"deployment", "$1", "deployment", "(.*?)(?:-zone-[a-z])?"
|
||||
)
|
||||
)
|
||||
)
|
||||
record: "cluster_namespace_deployment:kube_pod_container_resource_requests_memory_bytes:sum"
|
||||
- expr: |
|
||||
# Jobs should be sized to their Memory usage.
|
||||
# We do this by comparing 99th percentile usage over the last 24hrs to
|
||||
# their current provisioned #replicas and resource requests.
|
||||
ceil(
|
||||
cluster_namespace_deployment:actual_replicas:count
|
||||
*
|
||||
quantile_over_time(0.99, cluster_namespace_deployment:container_memory_usage_bytes:sum[24h])
|
||||
/
|
||||
cluster_namespace_deployment:kube_pod_container_resource_requests_memory_bytes:sum
|
||||
)
|
||||
labels:
|
||||
reason: "memory_usage"
|
||||
record: "cluster_namespace_deployment_reason:required_replicas:count"
|
||||
- name: "mimir_alertmanager_rules"
|
||||
rules:
|
||||
- expr: "sum by (cluster, job, pod) (cortex_alertmanager_alerts)"
|
||||
record: "cluster_job_pod:cortex_alertmanager_alerts:sum"
|
||||
- expr: "sum by (cluster, job, pod) (cortex_alertmanager_silences)"
|
||||
record: "cluster_job_pod:cortex_alertmanager_silences:sum"
|
||||
- expr: "sum by (cluster, job) (rate(cortex_alertmanager_alerts_received_total[5m]))"
|
||||
record: "cluster_job:cortex_alertmanager_alerts_received_total:rate5m"
|
||||
- expr: "sum by (cluster, job) (rate(cortex_alertmanager_alerts_invalid_total[5m]))"
|
||||
record: "cluster_job:cortex_alertmanager_alerts_invalid_total:rate5m"
|
||||
- expr: "sum by (cluster, job, integration) (rate(cortex_alertmanager_notifications_total[5m]))"
|
||||
record: "cluster_job_integration:cortex_alertmanager_notifications_total:rate5m"
|
||||
- expr: "sum by (cluster, job, integration) (rate(cortex_alertmanager_notifications_failed_total[5m]))"
|
||||
record: "cluster_job_integration:cortex_alertmanager_notifications_failed_total:rate5m"
|
||||
- expr: "sum by (cluster, job) (rate(cortex_alertmanager_state_replication_total[5m]))"
|
||||
record: "cluster_job:cortex_alertmanager_state_replication_total:rate5m"
|
||||
- expr: "sum by (cluster, job) (rate(cortex_alertmanager_state_replication_failed_total[5m]))"
|
||||
record: "cluster_job:cortex_alertmanager_state_replication_failed_total:rate5m"
|
||||
- expr: "sum by (cluster, job) (rate(cortex_alertmanager_partial_state_merges_total[5m]))"
|
||||
record: "cluster_job:cortex_alertmanager_partial_state_merges_total:rate5m"
|
||||
- expr: "sum by (cluster, job) (rate(cortex_alertmanager_partial_state_merges_failed_total[5m]))"
|
||||
record: "cluster_job:cortex_alertmanager_partial_state_merges_failed_total:rate5m"
|
||||
- name: "mimir_ingester_rules"
|
||||
rules:
|
||||
- expr: "sum by(cluster, namespace, pod) (rate(cortex_ingester_ingested_samples_total[5m]))"
|
||||
record: "cluster_namespace_pod:cortex_ingester_ingested_samples_total:rate1m"
|
@@ -1,15 +0,0 @@
|
||||
groups:
|
||||
- name: "tempo_rules"
|
||||
rules:
|
||||
- expr: "histogram_quantile(0.99, sum(rate(tempo_request_duration_seconds_bucket[5m])) by (le, cluster, namespace, job, route))"
|
||||
record: "cluster_namespace_job_route:tempo_request_duration_seconds:99quantile"
|
||||
- expr: "histogram_quantile(0.50, sum(rate(tempo_request_duration_seconds_bucket[5m])) by (le, cluster, namespace, job, route))"
|
||||
record: "cluster_namespace_job_route:tempo_request_duration_seconds:50quantile"
|
||||
- expr: "sum(rate(tempo_request_duration_seconds_sum[5m])) by (cluster, namespace, job, route) / sum(rate(tempo_request_duration_seconds_count[5m])) by (cluster, namespace, job, route)"
|
||||
record: "cluster_namespace_job_route:tempo_request_duration_seconds:avg"
|
||||
- expr: "sum(rate(tempo_request_duration_seconds_bucket[5m])) by (le, cluster, namespace, job, route)"
|
||||
record: "cluster_namespace_job_route:tempo_request_duration_seconds_bucket:sum_rate"
|
||||
- expr: "sum(rate(tempo_request_duration_seconds_sum[5m])) by (cluster, namespace, job, route)"
|
||||
record: "cluster_namespace_job_route:tempo_request_duration_seconds_sum:sum_rate"
|
||||
- expr: "sum(rate(tempo_request_duration_seconds_count[5m])) by (cluster, namespace, job, route)"
|
||||
record: "cluster_namespace_job_route:tempo_request_duration_seconds_count:sum_rate"
|
@@ -48,7 +48,7 @@
|
||||
{{- define "agent.tempo_write_targets" -}}
|
||||
{{- $list := list }}
|
||||
{{- if .Values.local.traces.enabled }}
|
||||
{{- $list = append $list ("otelcol.exporter.otlp.local.input") }}
|
||||
{{- $list = append $list ("otelcol.exporter.otlphttp.local.input") }}
|
||||
{{- end }}
|
||||
{{- if .Values.cloud.traces.enabled }}
|
||||
{{- $list = append $list ("otelcol.exporter.otlphttp.cloud.input") }}
|
||||
|
@@ -294,9 +294,7 @@ data:
|
||||
// We don't technically need this, but it shows how to change listen address and incoming port.
|
||||
// In this case, the Agent is listening on all available bindable addresses on port 4317 (which is the
|
||||
// default OTLP gRPC port) for the OTLP protocol.
|
||||
grpc {
|
||||
endpoint = "0.0.0.0:4317"
|
||||
}
|
||||
grpc {}
|
||||
|
||||
// We define where to send the output of all ingested traces. In this case, to the OpenTelemetry batch processor
|
||||
// named 'default'.
|
||||
@@ -345,6 +343,14 @@ data:
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.local.traces.enabled }}
|
||||
otelcol.exporter.otlphttp "local" {
|
||||
client {
|
||||
endpoint = "http://{{- .Release.Name -}}-tempo-distributor.svc:4318"
|
||||
}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.cloud.logs.enabled }}
|
||||
loki.write "cloud" {
|
||||
endpoint {
|
||||
|
@@ -1,4 +1,4 @@
|
||||
{{- if and .Values.local.grafana.enabled (or .Values.dashboards.logs.enabled .Values.dashboards.metrics.enabled .Values.dashboards.traces.enabled) }}
|
||||
{{- if and .Values.local.grafana.enabled .Values.dashboards.logs.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
|
@@ -1,4 +1,4 @@
|
||||
{{- if and .Values.local.grafana.enabled (or .Values.dashboards.logs.enabled .Values.dashboards.metrics.enabled .Values.dashboards.traces.enabled) }}
|
||||
{{- if and .Values.local.grafana.enabled .Values.dashboards.logs.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
|
@@ -1,16 +1,4 @@
|
||||
{{- if .Values.local.grafana.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: grafana-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
@@ -32,7 +20,7 @@ spec:
|
||||
- 0
|
||||
containers:
|
||||
- name: grafana
|
||||
image: grafana/grafana:10.0.0
|
||||
image: grafana/grafana:{{- .Values.grafana.version }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
@@ -65,7 +53,7 @@ spec:
|
||||
name: grafana-pv
|
||||
- mountPath: /etc/grafana/provisioning/datasources
|
||||
name: datasources-provisioning
|
||||
{{- if or (or .Values.dashboards.logs.enabled .Values.dashboards.metrics.enabled) .Values.dashboards.traces.enabled }}
|
||||
{{- if .Values.dashboards.logs.enabled }}
|
||||
- mountPath: /etc/grafana/provisioning/dashboards
|
||||
name: dashboards-provisioning
|
||||
{{- end }}
|
||||
@@ -98,19 +86,4 @@ spec:
|
||||
- name: agent-dashboards-1
|
||||
configMap:
|
||||
name: agent-dashboards-1
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: grafana
|
||||
spec:
|
||||
ports:
|
||||
- port: 3000
|
||||
protocol: TCP
|
||||
targetPort: http-grafana
|
||||
selector:
|
||||
app: grafana
|
||||
sessionAffinity: None
|
||||
type: ClusterIP # Make this configurable
|
||||
{{- end }}
|
12
charts/meta-monitoring/templates/grafana/grafana-pvc.yaml
Normal file
12
charts/meta-monitoring/templates/grafana/grafana-pvc.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
{{- if .Values.local.grafana.enabled }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: grafana-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
{{- end }}
|
@@ -0,0 +1,15 @@
|
||||
{{- if .Values.local.grafana.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: grafana
|
||||
spec:
|
||||
ports:
|
||||
- port: 3000
|
||||
protocol: TCP
|
||||
targetPort: http-grafana
|
||||
selector:
|
||||
app: grafana
|
||||
sessionAffinity: None
|
||||
type: ClusterIP # Make this configurable
|
||||
{{- end }}
|
@@ -1,5 +1,5 @@
|
||||
{{- if .Values.local.grafana.enabled }}
|
||||
{{- if and .Values.local.grafana.enabled (or .Values.dashboards.logs.enabled .Values.dashboards.metrics.enabled .Values.dashboards.traces.enabled) }}
|
||||
{{- if and .Values.local.grafana.enabled .Values.dashboards.logs.enabled }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
@@ -51,7 +51,7 @@ spec:
|
||||
protocol: TCP
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: mmc-minio
|
||||
name: minio
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
|
@@ -1,5 +1,5 @@
|
||||
{{- if .Values.local.metrics.enabled }}
|
||||
{{- if and .Values.local.grafana.enabled (or .Values.dashboards.logs.enabled .Values.dashboards.metrics.enabled .Values.dashboards.traces.enabled) }}
|
||||
{{- if and .Values.local.grafana.enabled .Values.dashboards.logs.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
@@ -10,11 +10,5 @@ data:
|
||||
{{- if .Values.dashboards.logs.enabled }}
|
||||
{{ ($.Files.Glob "src/rules/loki-rules.yaml").AsConfig | indent 2 }}
|
||||
{{- end }}
|
||||
{{- if .Values.dashboards.metrics.enabled }}
|
||||
{{ ($.Files.Glob "src/rules/mimir-rules.yaml").AsConfig | indent 2 }}
|
||||
{{- end }}
|
||||
{{- if .Values.dashboards.traces.enabled }}
|
||||
{{ ($.Files.Glob "src/rules/tempo-rules.yaml").AsConfig | indent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@@ -3,7 +3,6 @@ namespacesToMonitor:
|
||||
- loki
|
||||
# The name of the cluster where this will be installed
|
||||
clusterLabelValue: "meta-monitoring"
|
||||
|
||||
# Set to true to write logs, metrics or traces to Grafana Cloud
|
||||
# The secrets have to be created first
|
||||
cloud:
|
||||
@@ -16,7 +15,6 @@ cloud:
|
||||
traces:
|
||||
enabled: true
|
||||
secret: "traces"
|
||||
|
||||
# Set to true for a local version of logs, metrics or traces
|
||||
local:
|
||||
grafana:
|
||||
@@ -28,9 +26,9 @@ local:
|
||||
traces:
|
||||
enabled: false
|
||||
minio:
|
||||
enabled: false # This should be set to true if any of the previous is enabled
|
||||
|
||||
enabled: false # This should be set to true if any of the previous is enabled
|
||||
grafana:
|
||||
version: 10.4.2
|
||||
# Gateway ingress configuration
|
||||
ingress:
|
||||
# -- Specifies whether an ingress for the gateway should be created
|
||||
@@ -38,9 +36,9 @@ grafana:
|
||||
# -- Ingress Class Name. MAY be required for Kubernetes versions >= 1.18
|
||||
ingressClassName: ""
|
||||
# -- Annotations for the gateway ingress
|
||||
annotations: { }
|
||||
annotations: {}
|
||||
# -- Labels for the gateway ingress
|
||||
labels: { }
|
||||
labels: {}
|
||||
# -- Hosts configuration for the gateway ingress, passed through the `tpl` function to allow templating
|
||||
hosts:
|
||||
- host: monitoring.example.com
|
||||
@@ -53,18 +51,14 @@ grafana:
|
||||
# - secretName: grafana-tls
|
||||
# hosts:
|
||||
# - monitoring.example.com
|
||||
|
||||
|
||||
logs:
|
||||
# Adding regexes here will add a stage.replace block for logs. For more information see
|
||||
# https://grafana.com/docs/agent/latest/flow/reference/components/loki.process/#stagereplace-block
|
||||
piiRegexes:
|
||||
# This example replaces the word after password with *****
|
||||
# - expression: "password (\\\\S+)"
|
||||
# source: "" # Empty uses the log message
|
||||
# replace: "*****""
|
||||
|
||||
# The lines matching these will be kept in Loki
|
||||
piiRegexes: null # This example replaces the word after password with *****
|
||||
# - expression: "password (\\\\S+)"
|
||||
# source: "" # Empty uses the log message
|
||||
# replace: "*****""
|
||||
# The lines matching these will be kept in Loki
|
||||
retain:
|
||||
# This shows the queries
|
||||
- caller=metrics.go
|
||||
@@ -78,7 +72,6 @@ logs:
|
||||
# - caller=push.go
|
||||
# Additional log lines to retain
|
||||
extraLogs: []
|
||||
|
||||
metrics:
|
||||
# The list of metrics to retain for logging dashboards
|
||||
retain:
|
||||
@@ -179,21 +172,10 @@ metrics:
|
||||
- promtail_custom_bad_words_total
|
||||
# Additional metrics to retain
|
||||
extraMetrics: []
|
||||
|
||||
# Set enabled = true to add the default logs/metrics/traces dashboards to the local Grafana
|
||||
# Set enabled = true to add the default logs dashboards to the local Grafana
|
||||
dashboards:
|
||||
logs:
|
||||
enabled: true
|
||||
metrics:
|
||||
enabled: true
|
||||
traces:
|
||||
enabled: true
|
||||
|
||||
global:
|
||||
minio:
|
||||
rootUser: "rootuser"
|
||||
rootPassword: "rootpassword"
|
||||
|
||||
kubeStateMetrics:
|
||||
# Scrape https://github.com/kubernetes/kube-state-metrics by default
|
||||
enabled: true
|
||||
@@ -201,10 +183,8 @@ kubeStateMetrics:
|
||||
# https://artifacthub.io/packages/helm/prometheus-community/kube-state-metrics/
|
||||
# is used. Change this if kube-state-metrics is installed somewhere else.
|
||||
endpoint: kube-state-metrics.kube-state-metrics.svc.cluster.local:8080
|
||||
|
||||
# The following are configuration for the dependencies.
|
||||
# These should usually not be changed.
|
||||
|
||||
loki:
|
||||
loki:
|
||||
auth_enabled: false
|
||||
@@ -259,20 +239,19 @@ loki:
|
||||
- "-config.expand-env=true"
|
||||
extraEnvFrom:
|
||||
- secretRef:
|
||||
name: "mmc-minio"
|
||||
name: "minio"
|
||||
read:
|
||||
extraArgs:
|
||||
- "-config.expand-env=true"
|
||||
extraEnvFrom:
|
||||
- secretRef:
|
||||
name: "mmc-minio"
|
||||
name: "minio"
|
||||
backend:
|
||||
extraArgs:
|
||||
- "-config.expand-env=true"
|
||||
extraEnvFrom:
|
||||
- secretRef:
|
||||
name: "mmc-minio"
|
||||
|
||||
name: "minio"
|
||||
alloy:
|
||||
alloy:
|
||||
clustering:
|
||||
@@ -304,14 +283,13 @@ alloy:
|
||||
maxReplicas: 30
|
||||
targetMemoryUtilizationPercentage: 90
|
||||
targetCPUUtilizationPercentage: 90
|
||||
|
||||
mimir-distributed:
|
||||
minio:
|
||||
enabled: false
|
||||
global:
|
||||
extraEnvFrom:
|
||||
- secretRef:
|
||||
name: "mmc-minio"
|
||||
name: "minio"
|
||||
mimir:
|
||||
structuredConfig:
|
||||
alertmanager_storage:
|
||||
@@ -335,7 +313,6 @@ mimir-distributed:
|
||||
insecure: true
|
||||
limits:
|
||||
compactor_blocks_retention_period: 30d
|
||||
|
||||
tempo-distributed:
|
||||
tempo:
|
||||
structuredConfig:
|
||||
@@ -353,38 +330,37 @@ tempo-distributed:
|
||||
- "-config.expand-env=true"
|
||||
extraEnvFrom:
|
||||
- secretRef:
|
||||
name: "mmc-minio"
|
||||
name: "minio"
|
||||
ingester:
|
||||
extraArgs:
|
||||
- "-config.expand-env=true"
|
||||
extraEnvFrom:
|
||||
- secretRef:
|
||||
name: "mmc-minio"
|
||||
name: "minio"
|
||||
compactor:
|
||||
extraArgs:
|
||||
- "-config.expand-env=true"
|
||||
extraEnvFrom:
|
||||
- secretRef:
|
||||
name: "mmc-minio"
|
||||
name: "minio"
|
||||
querier:
|
||||
extraArgs:
|
||||
- "-config.expand-env=true"
|
||||
extraEnvFrom:
|
||||
- secretRef:
|
||||
name: "mmc-minio"
|
||||
name: "minio"
|
||||
queryFrontend:
|
||||
extraArgs:
|
||||
- "-config.expand-env=true"
|
||||
extraEnvFrom:
|
||||
- secretRef:
|
||||
name: "mmc-minio"
|
||||
name: "minio"
|
||||
traces:
|
||||
otlp:
|
||||
http:
|
||||
enabled: true
|
||||
grpc:
|
||||
enabled: true
|
||||
|
||||
minio:
|
||||
existingSecret: "minio"
|
||||
buckets:
|
||||
|
@@ -67,7 +67,7 @@
|
||||
kubectl create namespace meta
|
||||
```
|
||||
|
||||
1. Create a secret with the user and password for the local Minio:
|
||||
1. Create a secret named `minio` with the user and password for the local Minio:
|
||||
|
||||
```
|
||||
kubectl create secret generic minio -n meta \
|
||||
|
Reference in New Issue
Block a user