mirror of
https://github.com/carlosedp/cluster-monitoring.git
synced 2024-11-20 19:07:17 +01:00
Update readme and regenerate manifests for default Kubernetes
This commit is contained in:
parent
8ef44ef1ce
commit
aad5dd8d93
11
Makefile
11
Makefile
@ -27,10 +27,6 @@ deploy: manifests
|
||||
sleep 40
|
||||
kubectl apply -f ./manifests/
|
||||
|
||||
ingressip:
|
||||
@perl -p -i -e 's/^(\s*)\-\ host:.*/\1- host: alertmanager.${IP}.nip.io/g' manifests/ingress-alertmanager-main.yaml manifests/ingress-prometheus-k8s.yaml manifests/ingress-grafana.yaml
|
||||
@echo "Ingress IPs changed to [service].${IP}.nip.io"
|
||||
|
||||
teardown:
|
||||
kubectl delete -f ./manifests/
|
||||
|
||||
@ -50,3 +46,10 @@ ifeq (, $(shell which jsonnet))
|
||||
@go get github.com/google/go-jsonnet/cmd/jsonnet
|
||||
@go get github.com/brancz/gojsontoyaml
|
||||
endif
|
||||
|
||||
change_suffix:
|
||||
@perl -p -i -e 's/^(\s*)\-\ host:.*/\1- host: alertmanager.${IP}.nip.io/g' manifests/ingress-alertmanager-main.yaml manifests/ingress-prometheus-k8s.yaml manifests/ingress-grafana.yaml
|
||||
@echo "Ingress IPs changed to [service].${IP}.nip.io"
|
||||
${K3S} kubectl apply -f manifests/ingress-alertmanager-main.yaml
|
||||
${K3S} kubectl apply -f manifests/ingress-grafana.yaml
|
||||
${K3S} kubectl apply -f manifests/ingress-prometheus-k8s.yaml
|
||||
|
@ -69,13 +69,18 @@ Now to deploy the monitoring stack on your K3s cluster, there are three paramete
|
||||
|
||||
After changing these values, run `make` to build the manifests and `k3s kubectl apply -f manifests/` to apply the stack to your cluster. In case of errors on some resources, re-run the command.
|
||||
|
||||
If you already have the manifests and don't want to rebuilt from Jsonnet, just run `make ingressip IP="[IP-ADDRESS]"` to change the IP for the ingress routes for Grafana, Prometheus and Alertmanager. Re-apply the ingress manifests after this.
|
||||
Now you can open the applications:
|
||||
|
||||
Now you can open the applications:
|
||||
* Grafana on [https://grafana.[your_node_ip].nip.io](https://grafana.[your_node_ip].nip.io),
|
||||
* Prometheus on [https://prometheus.[your_node_ip].nip.io](https://prometheus.[your_node_ip].nip.io)
|
||||
* Alertmanager on [https://alertmanager.[your_node_ip].nip.io](https://alertmanager.[your_node_ip].nip.io)
|
||||
|
||||
There are some dashboards that shows no values due to some cadvisor metrics not having the complete metadata. Check the open issues for more information.
|
||||
|
||||
## Updating the ingress suffixes
|
||||
|
||||
To avoid rebuilding all manifests, there is a make target to update the Ingress URL suffix to a different suffix (using nip.io) to match your host IP. Run `make change_suffix IP="[IP-ADDRESS]"` to change the ingress route IP for Grafana, Prometheus and Alertmanager and reapply the manifests. If you have a K3s cluster, run `make change_suffix IP="[IP-ADDRESS] K3S=k3s`.
|
||||
|
||||
## Customizing
|
||||
|
||||
The content of this project consists of a set of jsonnet files making up a library to be consumed.
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -96,9 +96,6 @@ spec:
|
||||
- mountPath: /grafana-dashboard-definitions/0/statefulset
|
||||
name: grafana-dashboard-statefulset
|
||||
readOnly: false
|
||||
- mountPath: /grafana-dashboard-definitions/0/traefik-dashboard
|
||||
name: grafana-dashboard-traefik-dashboard
|
||||
readOnly: false
|
||||
- mountPath: /etc/grafana
|
||||
name: grafana-config
|
||||
readOnly: false
|
||||
@ -171,9 +168,6 @@ spec:
|
||||
- configMap:
|
||||
name: grafana-dashboard-statefulset
|
||||
name: grafana-dashboard-statefulset
|
||||
- configMap:
|
||||
name: grafana-dashboard-traefik-dashboard
|
||||
name: grafana-dashboard-traefik-dashboard
|
||||
- name: grafana-config
|
||||
secret:
|
||||
secretName: grafana-config
|
||||
|
@ -17,15 +17,46 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --port=8080
|
||||
- --telemetry-port=8081
|
||||
- --logtostderr
|
||||
- --secure-listen-address=:8443
|
||||
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
|
||||
- --upstream=http://127.0.0.1:8081/
|
||||
image: carlosedp/kube-rbac-proxy:v0.4.1
|
||||
name: kube-rbac-proxy-main
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
name: https-main
|
||||
resources:
|
||||
limits:
|
||||
cpu: 20m
|
||||
memory: 40Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- args:
|
||||
- --logtostderr
|
||||
- --secure-listen-address=:9443
|
||||
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
|
||||
- --upstream=http://127.0.0.1:8082/
|
||||
image: carlosedp/kube-rbac-proxy:v0.4.1
|
||||
name: kube-rbac-proxy-self
|
||||
ports:
|
||||
- containerPort: 9443
|
||||
name: https-self
|
||||
resources:
|
||||
limits:
|
||||
cpu: 20m
|
||||
memory: 40Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- args:
|
||||
- --host=127.0.0.1
|
||||
- --port=8081
|
||||
- --telemetry-host=127.0.0.1
|
||||
- --telemetry-port=8082
|
||||
image: carlosedp/kube-state-metrics:v1.7.2
|
||||
name: kube-state-metrics
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http-main
|
||||
- containerPort: 8081
|
||||
name: http-self
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
|
@ -8,11 +8,11 @@ metadata:
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: http-main
|
||||
port: 8080
|
||||
targetPort: http-main
|
||||
- name: http-self
|
||||
port: 8081
|
||||
targetPort: http-self
|
||||
- name: https-main
|
||||
port: 8443
|
||||
targetPort: https-main
|
||||
- name: https-self
|
||||
port: 9443
|
||||
targetPort: https-self
|
||||
selector:
|
||||
app: kube-state-metrics
|
||||
|
@ -7,16 +7,18 @@ metadata:
|
||||
namespace: monitoring
|
||||
spec:
|
||||
endpoints:
|
||||
- honorLabels: true
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
honorLabels: true
|
||||
interval: 30s
|
||||
port: http-main
|
||||
scheme: http
|
||||
port: https-main
|
||||
scheme: https
|
||||
scrapeTimeout: 30s
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
- interval: 30s
|
||||
port: http-self
|
||||
scheme: http
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
interval: 30s
|
||||
port: https-self
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
jobLabel: k8s-app
|
||||
|
@ -16,7 +16,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --web.listen-address=:9100
|
||||
- --web.listen-address=127.0.0.1:9100
|
||||
- --path.procfs=/host/proc
|
||||
- --path.sysfs=/host/sys
|
||||
- --path.rootfs=/host/root
|
||||
@ -24,9 +24,6 @@ spec:
|
||||
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
|
||||
image: prom/node-exporter:v0.18.1
|
||||
name: node-exporter
|
||||
ports:
|
||||
- containerPort: 9100
|
||||
name: http
|
||||
resources:
|
||||
limits:
|
||||
cpu: 250m
|
||||
@ -45,6 +42,29 @@ spec:
|
||||
mountPropagation: HostToContainer
|
||||
name: root
|
||||
readOnly: true
|
||||
- args:
|
||||
- --logtostderr
|
||||
- --secure-listen-address=$(IP):9100
|
||||
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
|
||||
- --upstream=http://127.0.0.1:9100/
|
||||
env:
|
||||
- name: IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
image: carlosedp/kube-rbac-proxy:v0.4.1
|
||||
name: kube-rbac-proxy
|
||||
ports:
|
||||
- containerPort: 9100
|
||||
hostPort: 9100
|
||||
name: https
|
||||
resources:
|
||||
limits:
|
||||
cpu: 20m
|
||||
memory: 60Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
nodeSelector:
|
||||
|
@ -8,8 +8,8 @@ metadata:
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: http
|
||||
- name: https
|
||||
port: 9100
|
||||
targetPort: http
|
||||
targetPort: https
|
||||
selector:
|
||||
app: node-exporter
|
||||
|
@ -7,8 +7,9 @@ metadata:
|
||||
namespace: monitoring
|
||||
spec:
|
||||
endpoints:
|
||||
- interval: 30s
|
||||
port: http
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
interval: 30s
|
||||
port: https
|
||||
relabelings:
|
||||
- action: replace
|
||||
regex: (.*)
|
||||
@ -16,7 +17,9 @@ spec:
|
||||
sourceLabels:
|
||||
- __meta_kubernetes_pod_node_name
|
||||
targetLabel: instance
|
||||
scheme: http
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
jobLabel: k8s-app
|
||||
selector:
|
||||
matchLabels:
|
||||
|
@ -1,14 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-controller-manager
|
||||
name: kube-controller-manager-prometheus-discovery
|
||||
namespace: kube-system
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: 192.168.99.100
|
||||
ports:
|
||||
- name: http-metrics
|
||||
port: 10252
|
||||
protocol: TCP
|
@ -12,4 +12,4 @@ spec:
|
||||
port: 10252
|
||||
targetPort: 10252
|
||||
selector:
|
||||
k8s-app: kube-controller-manager
|
||||
component: kube-controller-manager
|
||||
|
@ -1,14 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-scheduler
|
||||
name: kube-scheduler-prometheus-discovery
|
||||
namespace: kube-system
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: 192.168.99.100
|
||||
ports:
|
||||
- name: http-metrics
|
||||
port: 10251
|
||||
protocol: TCP
|
@ -12,4 +12,4 @@ spec:
|
||||
port: 10251
|
||||
targetPort: 10251
|
||||
selector:
|
||||
k8s-app: kube-scheduler
|
||||
component: kube-scheduler
|
||||
|
@ -8,25 +8,15 @@ metadata:
|
||||
spec:
|
||||
endpoints:
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
honorLabels: true
|
||||
interval: 30s
|
||||
port: https-metrics
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
port: http-metrics
|
||||
scheme: http
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
honorLabels: true
|
||||
interval: 30s
|
||||
metricRelabelings:
|
||||
- action: drop
|
||||
regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
|
||||
sourceLabels:
|
||||
- __name__
|
||||
path: /metrics/cadvisor
|
||||
port: https-metrics
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
port: http-metrics
|
||||
scheme: http
|
||||
jobLabel: k8s-app
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
|
@ -1,19 +0,0 @@
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
namespace: monitoring
|
||||
spec:
|
||||
endpoints:
|
||||
- interval: 30s
|
||||
port: admin
|
||||
scheme: http
|
||||
jobLabel: k8s-app
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- kube-system
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
@ -18,7 +18,7 @@
|
||||
},
|
||||
{
|
||||
name: 'traefikExporter',
|
||||
enabled: true,
|
||||
enabled: false,
|
||||
file: import 'traefik.jsonnet',
|
||||
},
|
||||
{
|
||||
@ -29,9 +29,8 @@
|
||||
],
|
||||
|
||||
k3s: {
|
||||
enabled: true,
|
||||
enabled: false,
|
||||
master_ip: '192.168.99.100'
|
||||
|
||||
},
|
||||
|
||||
// Domain suffix for the ingresses
|
||||
|
Loading…
Reference in New Issue
Block a user