diff --git a/modules/kubernetes/metrics/jobs/README.md b/modules/kubernetes/metrics/jobs/README.md index 09f6547..73180c6 100644 --- a/modules/kubernetes/metrics/jobs/README.md +++ b/modules/kubernetes/metrics/jobs/README.md @@ -36,7 +36,7 @@ The following jobs are completely isolated and have no dependencies on other mod | `keep_metrics` | `false` | [see module](./agent.river#L170) | A regex of metrics to keep | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | --- @@ -56,7 +56,7 @@ The following jobs are completely isolated and have no dependencies on other mod | `drop_metrics` | `false` | `""` | A regex of metrics to drop | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | --- @@ -76,7 +76,7 @@ The following jobs are completely isolated and have no dependencies on other mod | `scrape_port_named_metrics` | `false` | Whether or not to automatically scrape endpoints that have a port with 'metrics' in the name | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | --- @@ -91,7 +91,7 @@ The following jobs are completely isolated and have no dependencies on other mod | `keep_metrics` | `false` | [see module](./agent.river#L115) | A regex of metrics to keep | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | --- @@ -109,7 +109,7 @@ The following jobs are completely isolated and have no dependencies on other mod | `keep_metrics` | `false` | [see module](./agent.river#L128) | A regex of metrics to keep | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | --- @@ -127,7 +127,7 @@ The following jobs are completely isolated and have no dependencies on other mod | `keep_metrics` | `false` | [see module](./agent.river#L128) | A regex of metrics to keep | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | --- @@ -146,7 +146,7 @@ The following jobs are completely isolated and have no dependencies on other mod | `drop_les` | `false` | [see module](./kube-apiserver.river#163) | Regex of metric les label values to drop | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | --- @@ -161,7 +161,7 @@ The following jobs are completely isolated and have no dependencies on other mod | `keep_metrics` | `false` | [see module](./kube-probes.river#L117) | A regex of metrics to keep | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | --- @@ -179,7 +179,7 @@ The following jobs are completely isolated and have no dependencies on other mod | `keep_metrics` | `false` | [see module](./kube-proxy.river#L124) | A regex of metrics to drop | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | --- @@ -194,7 +194,7 @@ The following jobs are completely isolated and have no dependencies on other mod | `keep_metrics` | `false` | [see module](./kube-resource.river#L117) | A regex of metrics to keep | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | --- @@ -212,7 +212,7 @@ The following jobs are completely isolated and have no dependencies on other mod | `keep_metrics` | `false` | [see module](./kube-state-metrics.river#L127) | A regex of metrics to keep | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | --- @@ -227,7 +227,7 @@ The following jobs are completely isolated and have no dependencies on other mod | `keep_metrics` | `false` | [see module](./kubelet.river#L116) | A regex of metrics to keep | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | --- @@ -245,7 +245,7 @@ The following jobs are completely isolated and have no dependencies on other mod | `keep_metrics` | `false` | [see module](./loki.river#L186) | A regex of metrics to keep | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | --- @@ -263,7 +263,7 @@ The following jobs are completely isolated and have no dependencies on other mod | `keep_metrics` | `false` | [see module](./memcached.river#L170) | A regex of metrics to keep | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | ___ @@ -281,7 +281,7 @@ ___ | `keep_metrics` | `false` | [see module](./mimir.river#L186) | A regex of metrics to keep | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | --- @@ -299,7 +299,7 @@ ___ | `keep_metrics` | `false` | [see module](./node-exporter.river#L141) | A regex of metrics to keep | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | --- @@ -317,7 +317,7 @@ ___ | `keep_metrics` | `false` | [see module](./opencost.river#L123) | A regex of metrics to keep | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | --- @@ -349,5 +349,5 @@ ___ | `keep_metrics` | `false` | [see module](./tempo.river#L183) | A regex of metrics to keep | | `scrape_interval` | `false` | `60s` | How often to scrape metrics from the targets | | `scrape_timeout` | `false` | `10s` | How long before a scrape times out | -| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient | +| `max_cache_size` | `false` | `100000` | The maximum number of elements to hold in the relabeling cache. This should be at least 2x-5x your largest scrape target or samples appended rate. | | `clustering` | `false` | `false` | Whether or not clustering should be enabled | diff --git a/modules/kubernetes/metrics/jobs/agent.river b/modules/kubernetes/metrics/jobs/agent.river index 684fb0e..6638fb0 100644 --- a/modules/kubernetes/metrics/jobs/agent.river +++ b/modules/kubernetes/metrics/jobs/agent.river @@ -53,7 +53,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true } diff --git a/modules/kubernetes/metrics/jobs/annotations-probe.river b/modules/kubernetes/metrics/jobs/annotations-probe.river index 792ed76..1f744a2 100644 --- a/modules/kubernetes/metrics/jobs/annotations-probe.river +++ b/modules/kubernetes/metrics/jobs/annotations-probe.river @@ -119,7 +119,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true } diff --git a/modules/kubernetes/metrics/jobs/annotations-scrape.river b/modules/kubernetes/metrics/jobs/annotations-scrape.river index e94d10e..ceba188 100644 --- a/modules/kubernetes/metrics/jobs/annotations-scrape.river +++ b/modules/kubernetes/metrics/jobs/annotations-scrape.river @@ -146,7 +146,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true } diff --git a/modules/kubernetes/metrics/jobs/cadvisor.river b/modules/kubernetes/metrics/jobs/cadvisor.river index c08c2b5..443e99c 100644 --- a/modules/kubernetes/metrics/jobs/cadvisor.river +++ b/modules/kubernetes/metrics/jobs/cadvisor.river @@ -37,7 +37,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true } diff --git a/modules/kubernetes/metrics/jobs/gitlab-exporter.river b/modules/kubernetes/metrics/jobs/gitlab-exporter.river index dde929f..e7f0f87 100644 --- a/modules/kubernetes/metrics/jobs/gitlab-exporter.river +++ b/modules/kubernetes/metrics/jobs/gitlab-exporter.river @@ -57,7 +57,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true } diff --git a/modules/kubernetes/metrics/jobs/grafana.river b/modules/kubernetes/metrics/jobs/grafana.river index 387c151..c421a13 100644 --- a/modules/kubernetes/metrics/jobs/grafana.river +++ b/modules/kubernetes/metrics/jobs/grafana.river @@ -53,7 +53,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true } diff --git a/modules/kubernetes/metrics/jobs/kube-apiserver.river b/modules/kubernetes/metrics/jobs/kube-apiserver.river index 31f3bcc..585b0fc 100644 --- a/modules/kubernetes/metrics/jobs/kube-apiserver.river +++ b/modules/kubernetes/metrics/jobs/kube-apiserver.river @@ -60,7 +60,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true } diff --git a/modules/kubernetes/metrics/jobs/kube-probes.river b/modules/kubernetes/metrics/jobs/kube-probes.river index c2e2ff6..a077b71 100644 --- a/modules/kubernetes/metrics/jobs/kube-probes.river +++ b/modules/kubernetes/metrics/jobs/kube-probes.river @@ -38,7 +38,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true } diff --git a/modules/kubernetes/metrics/jobs/kube-proxy.river b/modules/kubernetes/metrics/jobs/kube-proxy.river index f3216f7..0b6acf1 100644 --- a/modules/kubernetes/metrics/jobs/kube-proxy.river +++ b/modules/kubernetes/metrics/jobs/kube-proxy.river @@ -54,7 +54,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true } diff --git a/modules/kubernetes/metrics/jobs/kube-resource.river b/modules/kubernetes/metrics/jobs/kube-resource.river index d117d7c..71d850e 100644 --- a/modules/kubernetes/metrics/jobs/kube-resource.river +++ b/modules/kubernetes/metrics/jobs/kube-resource.river @@ -38,7 +38,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true } diff --git a/modules/kubernetes/metrics/jobs/kube-state-metrics.river b/modules/kubernetes/metrics/jobs/kube-state-metrics.river index 529d7c6..b32a4b3 100644 --- a/modules/kubernetes/metrics/jobs/kube-state-metrics.river +++ b/modules/kubernetes/metrics/jobs/kube-state-metrics.river @@ -57,7 +57,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true } diff --git a/modules/kubernetes/metrics/jobs/kubelet.river b/modules/kubernetes/metrics/jobs/kubelet.river index bb21df8..d4423b8 100644 --- a/modules/kubernetes/metrics/jobs/kubelet.river +++ b/modules/kubernetes/metrics/jobs/kubelet.river @@ -38,7 +38,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true } diff --git a/modules/kubernetes/metrics/jobs/loki.river b/modules/kubernetes/metrics/jobs/loki.river index d4feb20..6b2956f 100644 --- a/modules/kubernetes/metrics/jobs/loki.river +++ b/modules/kubernetes/metrics/jobs/loki.river @@ -53,7 +53,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true } diff --git a/modules/kubernetes/metrics/jobs/memcached.river b/modules/kubernetes/metrics/jobs/memcached.river index f8d5c8e..da58f74 100644 --- a/modules/kubernetes/metrics/jobs/memcached.river +++ b/modules/kubernetes/metrics/jobs/memcached.river @@ -53,7 +53,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true } diff --git a/modules/kubernetes/metrics/jobs/mimir.river b/modules/kubernetes/metrics/jobs/mimir.river index ff4b0cc..6adbde7 100644 --- a/modules/kubernetes/metrics/jobs/mimir.river +++ b/modules/kubernetes/metrics/jobs/mimir.river @@ -53,7 +53,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true } diff --git a/modules/kubernetes/metrics/jobs/node-exporter.river b/modules/kubernetes/metrics/jobs/node-exporter.river index ea11cc2..86e430a 100644 --- a/modules/kubernetes/metrics/jobs/node-exporter.river +++ b/modules/kubernetes/metrics/jobs/node-exporter.river @@ -58,7 +58,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true } diff --git a/modules/kubernetes/metrics/jobs/opencost.river b/modules/kubernetes/metrics/jobs/opencost.river index 651f811..47b0682 100644 --- a/modules/kubernetes/metrics/jobs/opencost.river +++ b/modules/kubernetes/metrics/jobs/opencost.river @@ -56,7 +56,7 @@ argument "scrape_timeout" { } argument "max_cache_size" { - comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). Only increase if the amount of metrics returned is extremely large, the default will almost always be sufficient" + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." optional = true }