# my global config global: scrape_interval: 120s # By default, scrape targets every 15 seconds. evaluation_interval: 120s # By default, scrape targets every 15 seconds. # scrape_timeout is set to the global default (10s). # Attach these labels to any time series or alerts when communicating with # external systems (federation, remote storage, Alertmanager). external_labels: monitor: "{{ grafana_prometheus_project }}" # Load and evaluate rules in this file every 'evaluation_interval' seconds. rule_files: # - "alert.rules" # - "first.rules" # - "second.rules" # A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: # The job name is added as a label `job=` to any timeseries scraped from this config. - job_name: "{{ grafana_prometheus_job_name }}" # Override the global default and scrape targets from this job every 5 seconds. scrape_interval: "{{ grafana_prometheus_scrape_interval }}" # metrics_path defaults to '/metrics' # scheme defaults to 'http'. static_configs: - targets: - 'localhost:9090' {% for target in grafana_prometheus_targets %} - "{{ target }}" {% endfor %}