dashboard: 'Environment metrics' priority: 1 panel_groups: - group: System metrics (Kubernetes) panels: - title: "Memory Usage (Total)" type: "area-chart" y_label: "Total Memory Used (GB)" metrics: - id: system_metrics_kubernetes_container_memory_total # Remove the second metric (after OR) when we drop support for K8s 1.13 # https://gitlab.com/gitlab-org/gitlab/-/issues/229279 query_range: 'avg(sum(container_memory_usage_bytes{container!="POD",pod=~"^{{ci_environment_slug}}-(.*)",namespace="{{kube_namespace}}"}) by (job)) without (job) /1024/1024/1024 OR avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-(.*)",namespace="{{kube_namespace}}"}) by (job)) without (job) /1024/1024/1024' label: Total (GB) unit: GB - title: "Core Usage (Total)" type: "area-chart" y_label: "Total Cores" metrics: - id: system_metrics_kubernetes_container_cores_total # Remove the second metric (after OR) when we drop support for K8s 1.13 # https://gitlab.com/gitlab-org/gitlab/-/issues/229279 query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container!="POD",pod=~"^{{ci_environment_slug}}-(.*)",namespace="{{kube_namespace}}"}[15m])) by (job)) without (job) OR avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-(.*)",namespace="{{kube_namespace}}"}[15m])) by (job)) without (job)' label: Total (cores) unit: "cores" - title: "Memory Usage (Pod average)" type: "line-chart" y_label: "Memory Used per Pod (MB)" metrics: - id: system_metrics_kubernetes_container_memory_average # Remove the second metric (after OR) when we drop support for K8s 1.13 # https://gitlab.com/gitlab-org/gitlab/-/issues/229279 query_range: 'avg(sum(container_memory_usage_bytes{container!="POD",pod=~"^{{ci_environment_slug}}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="{{kube_namespace}}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container!="POD",pod=~"^{{ci_environment_slug}}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="{{kube_namespace}}"}) without (job)) /1024/1024 OR avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="{{kube_namespace}}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="{{kube_namespace}}"}) without (job)) /1024/1024' label: Pod average (MB) unit: MB - title: "Canary: Memory Usage (Pod Average)" type: "line-chart" y_label: "Memory Used per Pod (MB)" metrics: - id: system_metrics_kubernetes_container_memory_average_canary # Remove the second metric (after OR) when we drop support for K8s 1.13 # https://gitlab.com/gitlab-org/gitlab/-/issues/229279 query_range: 'avg(sum(container_memory_usage_bytes{container!="POD",pod=~"^{{ci_environment_slug}}-canary-(.*)",namespace="{{kube_namespace}}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container!="POD",pod=~"^{{ci_environment_slug}}-canary-(.*)",namespace="{{kube_namespace}}"}) without (job)) /1024/1024 OR avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-canary-(.*)",namespace="{{kube_namespace}}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-canary-(.*)",namespace="{{kube_namespace}}"}) without (job)) /1024/1024' label: Pod average (MB) unit: MB track: canary - title: "Core Usage (Pod Average)" type: "line-chart" y_label: "Cores per Pod" metrics: - id: system_metrics_kubernetes_container_core_usage # Remove the second metric (after OR) when we drop support for K8s 1.13 # https://gitlab.com/gitlab-org/gitlab/-/issues/229279 query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container!="POD",pod=~"^{{ci_environment_slug}}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="{{kube_namespace}}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container!="POD",pod=~"^{{ci_environment_slug}}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="{{kube_namespace}}"}[15m])) by (pod)) OR avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="{{kube_namespace}}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="{{kube_namespace}}"}[15m])) by (pod_name))' label: Pod average (cores) unit: "cores" - title: "Canary: Core Usage (Pod Average)" type: "line-chart" y_label: "Cores per Pod" metrics: - id: system_metrics_kubernetes_container_core_usage_canary # Remove the second metric (after OR) when we drop support for K8s 1.13 # https://gitlab.com/gitlab-org/gitlab/-/issues/229279 query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container!="POD",pod=~"^{{ci_environment_slug}}-canary-(.*)",namespace="{{kube_namespace}}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container!="POD",pod=~"^{{ci_environment_slug}}-canary-(.*)",namespace="{{kube_namespace}}"}[15m])) by (pod)) OR avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-canary-(.*)",namespace="{{kube_namespace}}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-canary-(.*)",namespace="{{kube_namespace}}"}[15m])) by (pod_name))' label: Pod average (cores) unit: "cores" track: canary - title: "Knative function invocations" type: "area-chart" y_label: "Invocations" metrics: - id: system_metrics_knative_function_invocation_count query_range: 'sum(ceil(rate(istio_requests_total{destination_service_namespace="{{kube_namespace}}", destination_service=~"{{function_name}}.*"}[1m])*60))' label: invocations / minute unit: requests # NGINX Ingress metrics for pre-0.16.0 versions - group: Response metrics (NGINX Ingress VTS) panels: - title: "Throughput" type: "area-chart" y_label: "Requests / Sec" metrics: - id: response_metrics_nginx_ingress_throughput_status_code query_range: 'sum(rate(nginx_upstream_responses_total{upstream=~"{{kube_namespace}}-{{ci_environment_slug}}-.*"}[2m])) by (status_code)' unit: req / sec label: Status Code - title: "Latency" type: "area-chart" y_label: "Latency (ms)" y_axis: format: milliseconds metrics: - id: response_metrics_nginx_ingress_latency_pod_average query_range: 'avg(nginx_upstream_response_msecs_avg{upstream=~"{{kube_namespace}}-{{ci_environment_slug}}-.*"})' label: Pod average (ms) unit: ms - title: "HTTP Error Rate" type: "area-chart" y_label: "HTTP Errors (%)" y_axis: format: percentHundred metrics: - id: response_metrics_nginx_ingress_http_error_rate query_range: 'sum(rate(nginx_upstream_responses_total{status_code="5xx", upstream=~"{{kube_namespace}}-{{ci_environment_slug}}-.*"}[2m])) / sum(rate(nginx_upstream_responses_total{upstream=~"{{kube_namespace}}-{{ci_environment_slug}}-.*"}[2m])) * 100' label: 5xx Errors (%) unit: "%" # NGINX Ingress metrics for post-0.16.0 versions - group: Response metrics (NGINX Ingress) panels: - title: "Throughput" type: "area-chart" y_label: "Requests / Sec" metrics: - id: response_metrics_nginx_ingress_16_throughput_status_code query_range: 'sum(label_replace(rate(nginx_ingress_controller_requests{namespace="{{kube_namespace}}",ingress=~".*{{ci_environment_slug}}.*"}[2m]), "status_code", "${1}xx", "status", "(.)..")) by (status_code)' unit: req / sec label: Status Code - title: "Latency" type: "area-chart" y_label: "Latency (ms)" metrics: - id: response_metrics_nginx_ingress_16_latency_pod_average query_range: 'sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_sum{namespace="{{kube_namespace}}",ingress=~".*{{ci_environment_slug}}.*"}[2m])) / sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{namespace="{{kube_namespace}}",ingress=~".*{{ci_environment_slug}}.*"}[2m])) * 1000' label: Pod average (ms) unit: ms - title: "HTTP Error Rate" type: "area-chart" y_label: "HTTP Errors (%)" metrics: - id: response_metrics_nginx_ingress_16_http_error_rate query_range: 'sum(rate(nginx_ingress_controller_requests{status=~"5.*",namespace="{{kube_namespace}}",ingress=~".*{{ci_environment_slug}}.*"}[2m])) / sum(rate(nginx_ingress_controller_requests{namespace="{{kube_namespace}}",ingress=~".*{{ci_environment_slug}}.*"}[2m])) * 100' label: 5xx Errors (%) unit: "%" - group: Response metrics (HA Proxy) panels: - title: "Throughput" type: "area-chart" y_label: "Requests / Sec" metrics: - id: response_metrics_ha_proxy_throughput_status_code query_range: 'sum(rate(haproxy_frontend_http_requests_total{ {{environment_filter}} }[2m])) by (code)' unit: req / sec label: Status Code - title: "HTTP Error Rate" type: "area-chart" y_label: "Error Rate (%)" metrics: - id: response_metrics_ha_proxy_http_error_rate query_range: 'sum(rate(haproxy_frontend_http_responses_total{code="5xx",{{environment_filter}} }[2m])) / sum(rate(haproxy_frontend_http_responses_total{ {{environment_filter}} }[2m]))' label: HTTP Errors (%) unit: "%" - group: Response metrics (AWS ELB) panels: - title: "Throughput" type: "area-chart" y_label: "Requests / Sec" metrics: - id: response_metrics_aws_elb_throughput_requests query_range: 'sum(aws_elb_request_count_sum{ {{environment_filter}} }) / 60' label: Total (req/sec) unit: req / sec - title: "Latency" type: "area-chart" y_label: "Latency (ms)" metrics: - id: response_metrics_aws_elb_latency_average query_range: 'avg(aws_elb_latency_average{ {{environment_filter}} }) * 1000' label: Average (ms) unit: ms - title: "HTTP Error Rate" type: "area-chart" y_label: "Error Rate (%)" metrics: - id: response_metrics_aws_elb_http_error_rate query_range: 'sum(aws_elb_httpcode_backend_5_xx_sum{ {{environment_filter}} }) / sum(aws_elb_request_count_sum{ {{environment_filter}} })' label: HTTP Errors (%) unit: "%" - group: Response metrics (NGINX) panels: - title: "Throughput" type: "area-chart" y_label: "Requests / Sec" metrics: - id: response_metrics_nginx_throughput_status_code query_range: 'sum(rate(nginx_server_requests{server_zone!="*", server_zone!="_", {{environment_filter}} }[2m])) by (code)' unit: req / sec label: Status Code - title: "Latency" type: "area-chart" y_label: "Latency (ms)" metrics: - id: response_metrics_nginx_latency query_range: 'avg(nginx_server_requestMsec{ {{environment_filter}} })' label: Upstream (ms) unit: ms - title: "HTTP Error Rate (Errors / Sec)" type: "area-chart" y_label: "HTTP 500 Errors / Sec" y_axis: precision: 0 metrics: - id: response_metrics_nginx_http_error_rate query_range: 'sum(rate(nginx_server_requests{code="5xx", {{environment_filter}} }[2m]))' label: HTTP Errors unit: "errors / sec" - title: "HTTP Error Rate" type: "area-chart" y_label: "HTTP Errors (%)" metrics: - id: response_metrics_nginx_http_error_percentage query_range: 'sum(rate(nginx_server_requests{code=~"5.*", host="*", {{environment_filter}} }[2m])) / sum(rate(nginx_server_requests{code="total", host="*", {{environment_filter}} }[2m])) * 100' label: 5xx Errors (%) unit: "%"