diff --git a/image_sources_versions.jsonnet b/image_sources_versions.jsonnet index 9778d12..209019b 100644 --- a/image_sources_versions.jsonnet +++ b/image_sources_versions.jsonnet @@ -3,7 +3,7 @@ versions+:: { prometheus: 'v2.16.0', alertmanager: 'v0.20.0', - kubeStateMetrics: 'v1.7.2', + kubeStateMetrics: '1.7.2', kubeRbacProxy: 'v0.4.1', addonResizer: 'v1.8.4', nodeExporter: 'v0.18.1', diff --git a/jsonnetfile.lock.json b/jsonnetfile.lock.json index b3d1071..25af17a 100644 --- a/jsonnetfile.lock.json +++ b/jsonnetfile.lock.json @@ -30,7 +30,7 @@ "subdir": "grafana-builder" } }, - "version": "c19a92e586a6752f11745b47f309b13f02ef7147", + "version": "03da9ea0fc25e621d195fbb218a6bf8593152721", "sum": "slxrtftVDiTlQK22ertdfrg4Epnq97gdrLI63ftUfaE=" }, { @@ -63,8 +63,8 @@ "subdir": "jsonnet/kube-prometheus" } }, - "version": "502f81b235a84484b55493af5cf96623ae37ef80", - "sum": "weorIzfuzEqgRWW5mtt/p8cXMRhmilW20ppYruOpSZs=" + "version": "285624d8fbef01923f7b9772fe2da21c5698a666", + "sum": "npFy3VLHkSDiwUK2DPC9Up4ETD0bMiTpGTXqetH0r4A=" }, { "name": "kube-state-metrics", @@ -74,7 +74,7 @@ "subdir": "jsonnet/kube-state-metrics" } }, - "version": "fdd2ef120e5d9b56a29e7c3eeeda153acfb446ce", + "version": "c485728b2e585bd1079e12e462cd7c6fef25f155", "sum": "cJjGZaLBjcIGrLHZLjRPU9c3KL+ep9rZTb9dbALSKqA=" }, { @@ -85,7 +85,7 @@ "subdir": "jsonnet/kube-state-metrics-mixin" } }, - "version": "fdd2ef120e5d9b56a29e7c3eeeda153acfb446ce", + "version": "c485728b2e585bd1079e12e462cd7c6fef25f155", "sum": "E1GGavnf9PCWBm4WVrxWnc0FIj72UcbcweqGioWrOdU=" }, { @@ -96,8 +96,8 @@ "subdir": "" } }, - "version": "16ff3841fea16a0f2151479ab67d8d34893759f3", - "sum": "UdI7A4jYc5PxmUHZBIGymx9Hk3eStqYSzXuUHot4oTQ=" + "version": "ea905d25c01ff4364937a2faed248e5f2f3fdb35", + "sum": "ww9kAgxtpzRC6y7SiVWH7fWJcptBg3Hde50eXmHzN7U=" }, { "name": "node-mixin", @@ -140,7 +140,7 @@ "subdir": "lib/promgrafonnet" } }, - "version": "16ff3841fea16a0f2151479ab67d8d34893759f3", + "version": "ea905d25c01ff4364937a2faed248e5f2f3fdb35", "sum": "VhgBM39yv0f4bKv8VfGg4FXkg573evGDRalip9ypKbc=" }, { diff --git a/k3s-overrides.jsonnet b/k3s-overrides.jsonnet index 4edfac6..c4d4dc0 100644 --- a/k3s-overrides.jsonnet +++ b/k3s-overrides.jsonnet @@ -10,27 +10,4 @@ local vars = import 'vars.jsonnet'; kubeSchedulerPrometheusDiscoveryEndpoints: utils.newEndpoint('kube-scheduler-prometheus-discovery', 'kube-system', vars.k3s.master_ip, 'http-metrics', 10251), }, - - // Temporary workaround until merge of https://github.com/coreos/kube-prometheus/pull/456 - kubeStateMetrics+:: { - deployment+: { - spec+: { - template+: { - spec+: { - containers: - std.map( - function(c) - if std.startsWith(c.name, 'kube-state-metrics') then - c { - image: $._config.imageRepos.kubeStateMetrics + ':' + $._config.versions.kubeStateMetrics, - } - else - c, - super.containers, - ), - }, - }, - }, - }, - }, } diff --git a/manifests/grafana-dashboardDefinitions.yaml b/manifests/grafana-dashboardDefinitions.yaml index 6a86eb2..b6f2681 100644 --- a/manifests/grafana-dashboardDefinitions.yaml +++ b/manifests/grafana-dashboardDefinitions.yaml @@ -15130,7 +15130,7 @@ items: "steppedLine": false, "targets": [ { - "expr": "(sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__interval])\n* on (namespace,pod) \ngroup_left(workload,workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "expr": "(sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{pod}}", @@ -15306,7 +15306,7 @@ items: "value": "" }, "datasource": "$datasource", - "hide": 2, + "hide": 0, "includeAll": false, "label": null, "multi": false, diff --git a/manifests/kube-state-metrics-clusterRole.yaml b/manifests/kube-state-metrics-clusterRole.yaml index a365fee..0c7e28d 100644 --- a/manifests/kube-state-metrics-clusterRole.yaml +++ b/manifests/kube-state-metrics-clusterRole.yaml @@ -3,7 +3,7 @@ kind: ClusterRole metadata: labels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/version: 1.9.5 + app.kubernetes.io/version: 1.7.2 name: kube-state-metrics rules: - apiGroups: diff --git a/manifests/kube-state-metrics-clusterRoleBinding.yaml b/manifests/kube-state-metrics-clusterRoleBinding.yaml index ddf6bc6..cc144a2 100644 --- a/manifests/kube-state-metrics-clusterRoleBinding.yaml +++ b/manifests/kube-state-metrics-clusterRoleBinding.yaml @@ -3,7 +3,7 @@ kind: ClusterRoleBinding metadata: labels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/version: 1.9.5 + app.kubernetes.io/version: 1.7.2 name: kube-state-metrics roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/manifests/kube-state-metrics-deployment.yaml b/manifests/kube-state-metrics-deployment.yaml index 71f2aed..44556a2 100644 --- a/manifests/kube-state-metrics-deployment.yaml +++ b/manifests/kube-state-metrics-deployment.yaml @@ -3,7 +3,7 @@ kind: Deployment metadata: labels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/version: 1.9.5 + app.kubernetes.io/version: 1.7.2 name: kube-state-metrics namespace: monitoring spec: @@ -15,7 +15,7 @@ spec: metadata: labels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/version: 1.9.5 + app.kubernetes.io/version: 1.7.2 spec: containers: - args: @@ -23,7 +23,7 @@ spec: - --port=8081 - --telemetry-host=127.0.0.1 - --telemetry-port=8082 - image: quay.io/coreos/kube-state-metrics:v1.9.5 + image: carlosedp/kube-state-metrics:v1.7.2 name: kube-state-metrics securityContext: runAsUser: 65534 @@ -37,6 +37,8 @@ spec: ports: - containerPort: 8443 name: https-main + securityContext: + runAsUser: 65534 - args: - --logtostderr - --secure-listen-address=:9443 @@ -47,6 +49,8 @@ spec: ports: - containerPort: 9443 name: https-self + securityContext: + runAsUser: 65534 nodeSelector: kubernetes.io/os: linux serviceAccountName: kube-state-metrics diff --git a/manifests/kube-state-metrics-service.yaml b/manifests/kube-state-metrics-service.yaml index 59881ce..4b99bd4 100644 --- a/manifests/kube-state-metrics-service.yaml +++ b/manifests/kube-state-metrics-service.yaml @@ -3,7 +3,7 @@ kind: Service metadata: labels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/version: 1.9.5 + app.kubernetes.io/version: 1.7.2 name: kube-state-metrics namespace: monitoring spec: diff --git a/manifests/kube-state-metrics-serviceAccount.yaml b/manifests/kube-state-metrics-serviceAccount.yaml index 5dd13bb..564a3f6 100644 --- a/manifests/kube-state-metrics-serviceAccount.yaml +++ b/manifests/kube-state-metrics-serviceAccount.yaml @@ -3,6 +3,6 @@ kind: ServiceAccount metadata: labels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/version: 1.9.5 + app.kubernetes.io/version: 1.7.2 name: kube-state-metrics namespace: monitoring diff --git a/manifests/kube-state-metrics-serviceMonitor.yaml b/manifests/kube-state-metrics-serviceMonitor.yaml index f84324e..ad5880c 100644 --- a/manifests/kube-state-metrics-serviceMonitor.yaml +++ b/manifests/kube-state-metrics-serviceMonitor.yaml @@ -3,7 +3,7 @@ kind: ServiceMonitor metadata: labels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/version: 1.9.5 + app.kubernetes.io/version: 1.7.2 name: kube-state-metrics namespace: monitoring spec: diff --git a/manifests/prometheus-rules.yaml b/manifests/prometheus-rules.yaml index 5ecfa15..2395c01 100644 --- a/manifests/prometheus-rules.yaml +++ b/manifests/prometheus-rules.yaml @@ -1137,7 +1137,7 @@ spec: }} of its Pod capacity. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubelettoomanypods expr: | - max(max(kubelet_running_pod_count{job="kubelet", metrics_path="/metrics"}) by(instance) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) by(node) / max(kube_node_status_capacity_pods{job="kube-state-metrics"}) by(node) > 0.95 + max(max(kubelet_running_pod_count{job="kubelet", metrics_path="/metrics"}) by(instance) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) by(node) / max(kube_node_status_capacity_pods{job="kube-state-metrics"} != 1) by(node) > 0.95 for: 15m labels: severity: warning