Update readme and regenerate manifests for default Kubernetes

This commit is contained in:
Carlos de Paula 2019-08-21 19:13:13 -03:00
parent 8ef44ef1ce
commit aad5dd8d93
17 changed files with 106 additions and 1888 deletions

View File

@ -27,10 +27,6 @@ deploy: manifests
sleep 40 sleep 40
kubectl apply -f ./manifests/ kubectl apply -f ./manifests/
ingressip:
@perl -p -i -e 's/^(\s*)\-\ host:.*/\1- host: alertmanager.${IP}.nip.io/g' manifests/ingress-alertmanager-main.yaml manifests/ingress-prometheus-k8s.yaml manifests/ingress-grafana.yaml
@echo "Ingress IPs changed to [service].${IP}.nip.io"
teardown: teardown:
kubectl delete -f ./manifests/ kubectl delete -f ./manifests/
@ -50,3 +46,10 @@ ifeq (, $(shell which jsonnet))
@go get github.com/google/go-jsonnet/cmd/jsonnet @go get github.com/google/go-jsonnet/cmd/jsonnet
@go get github.com/brancz/gojsontoyaml @go get github.com/brancz/gojsontoyaml
endif endif
change_suffix:
@perl -p -i -e 's/^(\s*)\-\ host:.*/\1- host: alertmanager.${IP}.nip.io/g' manifests/ingress-alertmanager-main.yaml manifests/ingress-prometheus-k8s.yaml manifests/ingress-grafana.yaml
@echo "Ingress IPs changed to [service].${IP}.nip.io"
${K3S} kubectl apply -f manifests/ingress-alertmanager-main.yaml
${K3S} kubectl apply -f manifests/ingress-grafana.yaml
${K3S} kubectl apply -f manifests/ingress-prometheus-k8s.yaml

View File

@ -69,13 +69,18 @@ Now to deploy the monitoring stack on your K3s cluster, there are three paramete
After changing these values, run `make` to build the manifests and `k3s kubectl apply -f manifests/` to apply the stack to your cluster. In case of errors on some resources, re-run the command. After changing these values, run `make` to build the manifests and `k3s kubectl apply -f manifests/` to apply the stack to your cluster. In case of errors on some resources, re-run the command.
If you already have the manifests and don't want to rebuilt from Jsonnet, just run `make ingressip IP="[IP-ADDRESS]"` to change the IP for the ingress routes for Grafana, Prometheus and Alertmanager. Re-apply the ingress manifests after this.
Now you can open the applications: Now you can open the applications:
* Grafana on [https://grafana.[your_node_ip].nip.io](https://grafana.[your_node_ip].nip.io), * Grafana on [https://grafana.[your_node_ip].nip.io](https://grafana.[your_node_ip].nip.io),
* Prometheus on [https://prometheus.[your_node_ip].nip.io](https://prometheus.[your_node_ip].nip.io) * Prometheus on [https://prometheus.[your_node_ip].nip.io](https://prometheus.[your_node_ip].nip.io)
* Alertmanager on [https://alertmanager.[your_node_ip].nip.io](https://alertmanager.[your_node_ip].nip.io) * Alertmanager on [https://alertmanager.[your_node_ip].nip.io](https://alertmanager.[your_node_ip].nip.io)
There are some dashboards that shows no values due to some cadvisor metrics not having the complete metadata. Check the open issues for more information.
## Updating the ingress suffixes
To avoid rebuilding all manifests, there is a make target to update the Ingress URL suffix to a different suffix (using nip.io) to match your host IP. Run `make change_suffix IP="[IP-ADDRESS]"` to change the ingress route IP for Grafana, Prometheus and Alertmanager and reapply the manifests. If you have a K3s cluster, run `make change_suffix IP="[IP-ADDRESS] K3S=k3s`.
## Customizing ## Customizing
The content of this project consists of a set of jsonnet files making up a library to be consumed. The content of this project consists of a set of jsonnet files making up a library to be consumed.

File diff suppressed because it is too large Load Diff

View File

@ -96,9 +96,6 @@ spec:
- mountPath: /grafana-dashboard-definitions/0/statefulset - mountPath: /grafana-dashboard-definitions/0/statefulset
name: grafana-dashboard-statefulset name: grafana-dashboard-statefulset
readOnly: false readOnly: false
- mountPath: /grafana-dashboard-definitions/0/traefik-dashboard
name: grafana-dashboard-traefik-dashboard
readOnly: false
- mountPath: /etc/grafana - mountPath: /etc/grafana
name: grafana-config name: grafana-config
readOnly: false readOnly: false
@ -171,9 +168,6 @@ spec:
- configMap: - configMap:
name: grafana-dashboard-statefulset name: grafana-dashboard-statefulset
name: grafana-dashboard-statefulset name: grafana-dashboard-statefulset
- configMap:
name: grafana-dashboard-traefik-dashboard
name: grafana-dashboard-traefik-dashboard
- name: grafana-config - name: grafana-config
secret: secret:
secretName: grafana-config secretName: grafana-config

View File

@ -17,15 +17,46 @@ spec:
spec: spec:
containers: containers:
- args: - args:
- --port=8080 - --logtostderr
- --telemetry-port=8081 - --secure-listen-address=:8443
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- --upstream=http://127.0.0.1:8081/
image: carlosedp/kube-rbac-proxy:v0.4.1
name: kube-rbac-proxy-main
ports:
- containerPort: 8443
name: https-main
resources:
limits:
cpu: 20m
memory: 40Mi
requests:
cpu: 10m
memory: 20Mi
- args:
- --logtostderr
- --secure-listen-address=:9443
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- --upstream=http://127.0.0.1:8082/
image: carlosedp/kube-rbac-proxy:v0.4.1
name: kube-rbac-proxy-self
ports:
- containerPort: 9443
name: https-self
resources:
limits:
cpu: 20m
memory: 40Mi
requests:
cpu: 10m
memory: 20Mi
- args:
- --host=127.0.0.1
- --port=8081
- --telemetry-host=127.0.0.1
- --telemetry-port=8082
image: carlosedp/kube-state-metrics:v1.7.2 image: carlosedp/kube-state-metrics:v1.7.2
name: kube-state-metrics name: kube-state-metrics
ports:
- containerPort: 8080
name: http-main
- containerPort: 8081
name: http-self
resources: resources:
limits: limits:
cpu: 100m cpu: 100m

View File

@ -8,11 +8,11 @@ metadata:
spec: spec:
clusterIP: None clusterIP: None
ports: ports:
- name: http-main - name: https-main
port: 8080 port: 8443
targetPort: http-main targetPort: https-main
- name: http-self - name: https-self
port: 8081 port: 9443
targetPort: http-self targetPort: https-self
selector: selector:
app: kube-state-metrics app: kube-state-metrics

View File

@ -7,16 +7,18 @@ metadata:
namespace: monitoring namespace: monitoring
spec: spec:
endpoints: endpoints:
- honorLabels: true - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
interval: 30s interval: 30s
port: http-main port: https-main
scheme: http scheme: https
scrapeTimeout: 30s scrapeTimeout: 30s
tlsConfig: tlsConfig:
insecureSkipVerify: true insecureSkipVerify: true
- interval: 30s - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
port: http-self interval: 30s
scheme: http port: https-self
scheme: https
tlsConfig: tlsConfig:
insecureSkipVerify: true insecureSkipVerify: true
jobLabel: k8s-app jobLabel: k8s-app

View File

@ -16,7 +16,7 @@ spec:
spec: spec:
containers: containers:
- args: - args:
- --web.listen-address=:9100 - --web.listen-address=127.0.0.1:9100
- --path.procfs=/host/proc - --path.procfs=/host/proc
- --path.sysfs=/host/sys - --path.sysfs=/host/sys
- --path.rootfs=/host/root - --path.rootfs=/host/root
@ -24,9 +24,6 @@ spec:
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$ - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
image: prom/node-exporter:v0.18.1 image: prom/node-exporter:v0.18.1
name: node-exporter name: node-exporter
ports:
- containerPort: 9100
name: http
resources: resources:
limits: limits:
cpu: 250m cpu: 250m
@ -45,6 +42,29 @@ spec:
mountPropagation: HostToContainer mountPropagation: HostToContainer
name: root name: root
readOnly: true readOnly: true
- args:
- --logtostderr
- --secure-listen-address=$(IP):9100
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- --upstream=http://127.0.0.1:9100/
env:
- name: IP
valueFrom:
fieldRef:
fieldPath: status.podIP
image: carlosedp/kube-rbac-proxy:v0.4.1
name: kube-rbac-proxy
ports:
- containerPort: 9100
hostPort: 9100
name: https
resources:
limits:
cpu: 20m
memory: 60Mi
requests:
cpu: 10m
memory: 20Mi
hostNetwork: true hostNetwork: true
hostPID: true hostPID: true
nodeSelector: nodeSelector:

View File

@ -8,8 +8,8 @@ metadata:
spec: spec:
clusterIP: None clusterIP: None
ports: ports:
- name: http - name: https
port: 9100 port: 9100
targetPort: http targetPort: https
selector: selector:
app: node-exporter app: node-exporter

View File

@ -7,8 +7,9 @@ metadata:
namespace: monitoring namespace: monitoring
spec: spec:
endpoints: endpoints:
- interval: 30s - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
port: http interval: 30s
port: https
relabelings: relabelings:
- action: replace - action: replace
regex: (.*) regex: (.*)
@ -16,7 +17,9 @@ spec:
sourceLabels: sourceLabels:
- __meta_kubernetes_pod_node_name - __meta_kubernetes_pod_node_name
targetLabel: instance targetLabel: instance
scheme: http scheme: https
tlsConfig:
insecureSkipVerify: true
jobLabel: k8s-app jobLabel: k8s-app
selector: selector:
matchLabels: matchLabels:

View File

@ -1,14 +0,0 @@
apiVersion: v1
kind: Endpoints
metadata:
labels:
k8s-app: kube-controller-manager
name: kube-controller-manager-prometheus-discovery
namespace: kube-system
subsets:
- addresses:
- ip: 192.168.99.100
ports:
- name: http-metrics
port: 10252
protocol: TCP

View File

@ -12,4 +12,4 @@ spec:
port: 10252 port: 10252
targetPort: 10252 targetPort: 10252
selector: selector:
k8s-app: kube-controller-manager component: kube-controller-manager

View File

@ -1,14 +0,0 @@
apiVersion: v1
kind: Endpoints
metadata:
labels:
k8s-app: kube-scheduler
name: kube-scheduler-prometheus-discovery
namespace: kube-system
subsets:
- addresses:
- ip: 192.168.99.100
ports:
- name: http-metrics
port: 10251
protocol: TCP

View File

@ -12,4 +12,4 @@ spec:
port: 10251 port: 10251
targetPort: 10251 targetPort: 10251
selector: selector:
k8s-app: kube-scheduler component: kube-scheduler

View File

@ -8,25 +8,15 @@ metadata:
spec: spec:
endpoints: endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
interval: 30s interval: 30s
port: https-metrics port: http-metrics
scheme: https scheme: http
tlsConfig:
insecureSkipVerify: true
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true honorLabels: true
interval: 30s interval: 30s
metricRelabelings:
- action: drop
regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
sourceLabels:
- __name__
path: /metrics/cadvisor path: /metrics/cadvisor
port: https-metrics port: http-metrics
scheme: https scheme: http
tlsConfig:
insecureSkipVerify: true
jobLabel: k8s-app jobLabel: k8s-app
namespaceSelector: namespaceSelector:
matchNames: matchNames:

View File

@ -1,19 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
namespace: monitoring
spec:
endpoints:
- interval: 30s
port: admin
scheme: http
jobLabel: k8s-app
namespaceSelector:
matchNames:
- kube-system
selector:
matchLabels:
k8s-app: traefik-ingress-lb

View File

@ -18,7 +18,7 @@
}, },
{ {
name: 'traefikExporter', name: 'traefikExporter',
enabled: true, enabled: false,
file: import 'traefik.jsonnet', file: import 'traefik.jsonnet',
}, },
{ {
@ -29,9 +29,8 @@
], ],
k3s: { k3s: {
enabled: true, enabled: false,
master_ip: '192.168.99.100' master_ip: '192.168.99.100'
}, },
// Domain suffix for the ingresses // Domain suffix for the ingresses