Update readme and manifest generation for K3s

This commit is contained in:
Carlos de Paula 2019-08-20 21:46:29 -03:00
parent 5c19ad2ab2
commit 19bd000f3e
14 changed files with 1979 additions and 11 deletions

View File

@ -27,6 +27,10 @@ deploy: manifests
sleep 40
kubectl apply -f ./manifests/
ingressip:
@perl -p -i -e 's/^(\s*)\-\ host:.*/\1- host: alertmanager.${IP}.nip.io/g' manifests/ingress-alertmanager-main.yaml manifests/ingress-prometheus-k8s.yaml manifests/ingress-grafana.yaml
@echo "Ingress IPs changed to [service].${IP}.nip.io"
teardown:
kubectl delete -f ./manifests/

View File

@ -55,6 +55,27 @@ $ until kubectl get servicemonitors --all-namespaces ; do date; sleep 1; echo ""
$ kubectl apply -f manifests/ # This command sometimes may need to be done twice (to workaround a race condition).
```
If you get an error from applying the manifests, run the `make deploy` or `kubectl apply -f manifests/` again. Sometimes the resources required to apply the CRDs are not deployed yet.
## Customizing for K3s
To have your [K3s](https://github.com/rancher/k3s) cluster and the monitoring stack on it, deploy K3s with `curl -sfL https://get.k3s.io | sh -`.
Now to deploy the monitoring stack on your K3s cluster, there are three parameters to be configured on `vars.jsonnet`:
1. Set `k3s.enabled` to `true`.
2. Change your K3s master node IP(your VM or host IP) on `k3s.master_ip`.
3. Edit `suffixDomain` to have your node IP with the `.nip.io` suffix. This will be your ingress URL suffix.
After changing these values, run `make` to build the manifests and `k3s kubectl apply -f manifests/` to apply the stack to your cluster. In case of errors on some resources, re-run the command.
If you already have the manifests and don't want to rebuilt from Jsonnet, just run `make ingressip IP="[IP-ADDRESS]"` to change the IP for the ingress routes for Grafana, Prometheus and Alertmanager. Re-apply the ingress manifests after this.
Now you can open the applications:
* Grafana on [https://grafana.[your_node_ip].nip.io](https://grafana.[your_node_ip].nip.io),
* Prometheus on [https://prometheus.[your_node_ip].nip.io](https://prometheus.[your_node_ip].nip.io)
* Alertmanager on [https://alertmanager.[your_node_ip].nip.io](https://alertmanager.[your_node_ip].nip.io)
## Customizing
The content of this project consists of a set of jsonnet files making up a library to be consumed.

View File

@ -1,7 +1,102 @@
local k = import 'ksonnet/ksonnet.beta.4/k.libsonnet';
local vars = import 'vars.jsonnet';
local service = k.core.v1.service;
local servicePort = k.core.v1.service.mixin.spec.portsType;
{
prometheus+:: {
kubeControllerManagerPrometheusDiscoveryService:
service.new('kube-controller-manager-prometheus-discovery', { 'k8s-app': 'kube-controller-manager' }, servicePort.newNamed('http-metrics', 10252, 10252)) +
service.mixin.metadata.withNamespace('kube-system') +
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-controller-manager' }) +
service.mixin.spec.withClusterIp('None'),
kubeControllerManagerPrometheusDiscoveryEndpoints:
local endpoints = k.core.v1.endpoints;
local endpointSubset = endpoints.subsetsType;
local endpointPort = endpointSubset.portsType;
local Port = endpointPort.new() +
endpointPort.withName('http-metrics') +
endpointPort.withPort(10252) +
endpointPort.withProtocol('TCP');
local subset = endpointSubset.new() +
endpointSubset.withAddresses([
{ ip: vars.k3s.master_ip }]) +
endpointSubset.withPorts(Port);
endpoints.new() +
endpoints.mixin.metadata.withName('kube-controller-manager-prometheus-discovery') +
endpoints.mixin.metadata.withNamespace('kube-system') +
endpoints.mixin.metadata.withLabels({ 'k8s-app': 'kube-controller-manager' }) +
endpoints.withSubsets(subset),
kubeSchedulerPrometheusDiscoveryService:
service.new('kube-scheduler-prometheus-discovery', { 'k8s-app': 'kube-scheduler' }, servicePort.newNamed('http-metrics', 10251, 10251)) +
service.mixin.metadata.withNamespace('kube-system') +
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-scheduler' }) +
service.mixin.spec.withClusterIp('None'),
kubeSchedulerPrometheusDiscoveryEndpoints:
local endpoints = k.core.v1.endpoints;
local endpointSubset = endpoints.subsetsType;
local endpointPort = endpointSubset.portsType;
local Port = endpointPort.new() +
endpointPort.withName('http-metrics') +
endpointPort.withPort(10251) +
endpointPort.withProtocol('TCP');
local subset = endpointSubset.new() +
endpointSubset.withAddresses([
{ ip: vars.k3s.master_ip }]) +
endpointSubset.withPorts(Port);
endpoints.new() +
endpoints.mixin.metadata.withName('kube-scheduler-prometheus-discovery') +
endpoints.mixin.metadata.withNamespace('kube-system') +
endpoints.mixin.metadata.withLabels({ 'k8s-app': 'kube-scheduler' }) +
endpoints.withSubsets(subset),
serviceMonitorKubelet+:
{
spec+: {
endpoints: [
{
port: 'https-metrics',
scheme: 'https',
interval: '30s',
honorLabels: true,
tlsConfig: {
insecureSkipVerify: true,
},
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
},
{
port: 'https-metrics',
scheme: 'https',
path: '/metrics/cadvisor',
interval: '30s',
honorLabels: true,
tlsConfig: {
insecureSkipVerify: true,
},
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
metricRelabelings: [
// Drop a bunch of metrics which are disabled but still sent, see
// https://github.com/google/cadvisor/issues/1925.
{
sourceLabels: ['__name__'],
regex: 'container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)',
action: 'drop',
},
],
},
],
},
},
},
nodeExporter+:: {
daemonset+: {
spec+: {
@ -24,6 +119,12 @@ local vars = import 'vars.jsonnet';
'--collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)',
'--collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$',
],
ports: [
{
containerPort: 9100,
name: 'http'
}],
}
else
c,
@ -135,7 +236,7 @@ local vars = import 'vars.jsonnet';
},
{
port: 'http-self',
scheme: 'https',
scheme: 'http',
interval: '30s',
tlsConfig: {
insecureSkipVerify: true,

View File

@ -14,12 +14,12 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet')
+ (import 'kube-prometheus/kube-prometheus-kops-coredns.libsonnet')
+ (import 'kube-prometheus/kube-prometheus-kubeadm.libsonnet')
// Use http Kubelet targets. Comment to revert to https
+ join_objects([m for m in [import 'kube-prometheus/kube-prometheus-insecure-kubelet.libsonnet'] if vars.k3s == false])
+ (import 'kube-prometheus/kube-prometheus-insecure-kubelet.libsonnet')
+ (import 'smtp_server.jsonnet')
// Additional modules are loaded dynamically from vars.jsonnet
+ join_objects([module.file for module in vars.modules if module.enabled])
// Load K3s customized modules
+ join_objects([m for m in [import 'k3s-overrides.jsonnet'] if vars.k3s])
+ join_objects([m for m in [import 'k3s-overrides.jsonnet'] if vars.k3s.enabled])
// Base stack is loaded at the end to override previous definitions
+ (import 'base_operator_stack.jsonnet')
// Load image versions last to override default from modules

File diff suppressed because it is too large Load Diff

View File

@ -105,6 +105,9 @@ spec:
- mountPath: /grafana-dashboard-definitions/0/statefulset
name: grafana-dashboard-statefulset
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/traefik-dashboard
name: grafana-dashboard-traefik-dashboard
readOnly: false
- mountPath: /etc/grafana
name: grafana-config
readOnly: false
@ -186,6 +189,9 @@ spec:
- configMap:
name: grafana-dashboard-statefulset
name: grafana-dashboard-statefulset
- configMap:
name: grafana-dashboard-traefik-dashboard
name: grafana-dashboard-traefik-dashboard
- name: grafana-config
secret:
secretName: grafana-config

View File

@ -16,7 +16,7 @@ spec:
insecureSkipVerify: true
- interval: 30s
port: http-self
scheme: https
scheme: http
tlsConfig:
insecureSkipVerify: true
jobLabel: k8s-app

View File

@ -24,6 +24,9 @@ spec:
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
image: prom/node-exporter:v0.18.1
name: node-exporter
ports:
- containerPort: 9100
name: http
resources:
limits:
cpu: 250m

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Endpoints
metadata:
labels:
k8s-app: kube-controller-manager
name: kube-controller-manager-prometheus-discovery
namespace: kube-system
subsets:
- addresses:
- ip: 192.168.99.100
ports:
- name: http-metrics
port: 10252
protocol: TCP

View File

@ -12,4 +12,4 @@ spec:
port: 10252
targetPort: 10252
selector:
component: kube-controller-manager
k8s-app: kube-controller-manager

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Endpoints
metadata:
labels:
k8s-app: kube-scheduler
name: kube-scheduler-prometheus-discovery
namespace: kube-system
subsets:
- addresses:
- ip: 192.168.99.100
ports:
- name: http-metrics
port: 10251
protocol: TCP

View File

@ -12,4 +12,4 @@ spec:
port: 10251
targetPort: 10251
selector:
component: kube-scheduler
k8s-app: kube-scheduler

View File

@ -0,0 +1,19 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
namespace: monitoring
spec:
endpoints:
- interval: 30s
port: admin
scheme: http
jobLabel: k8s-app
namespaceSelector:
matchNames:
- kube-system
selector:
matchLabels:
k8s-app: traefik-ingress-lb

View File

@ -18,7 +18,7 @@
},
{
name: 'traefikExporter',
enabled: false,
enabled: true,
file: import 'traefik.jsonnet',
},
{
@ -28,7 +28,14 @@
},
],
k3s: true,
k3s: {
enabled: true,
master_ip: '192.168.99.100'
},
// Domain suffix for the ingresses
suffixDomain: '192.168.99.100.nip.io',
// Setting these to false, defaults to emptyDirs
enablePersistence: {
@ -36,9 +43,6 @@
grafana: false,
},
// Domain suffix for the ingresses
suffixDomain: '192.168.99.100.nip.io',
// Grafana "from" email
grafana: {
from_address: 'myemail@gmail.com',