expand Namespaces:: and refactoring yaml parsing out of MetricGroup class

This commit is contained in:
Pawel Chojnacki 2017-06-06 16:07:33 +02:00
parent c7a1da800f
commit ccf89acc71
16 changed files with 281 additions and 238 deletions

View file

@ -159,7 +159,7 @@ class Environment < ActiveRecord::Base
def additional_metrics
if has_additional_metrics?
project.monitoring_service.reactive_query(Gitlab::Prometheus::Queries::AdditionalMetricsQuery.name, self.id, &:itself)
project.monitoring_service.reactive_query(Gitlab::Prometheus::Queries::AdditionalMetricsEnvironmentQuery.name, self.id, &:itself)
end
end

View file

@ -0,0 +1,36 @@
module Gitlab
module Prometheus
module AdditionalMetricsParser
extend self
def load_groups_from_yaml
additional_metrics_raw.map(&method(:new))
end
private
def metrics_from_list(list)
list.map { |entry| metric_from_entry(entry) }
end
def metric_from_entry(entry)
missing_fields = [:title, :required_metrics, :weight, :queries].select { |key| !entry.has_key?(key) }
raise ParsingError.new("entry missing required fields #{missing_fields}") unless missing_fields.empty?
Metric.new(entry[:title], entry[:required_metrics], entry[:weight], entry[:y_label], entry[:queries])
end
def group_from_entry(entry)
missing_fields = [:group, :priority, :metrics].select { |key| !entry.has_key?(key) }
raise ParsingError.new("entry missing required fields #{missing_fields}") unless missing_fields.empty?
group = MetricGroup.new(entry[:group], entry[:priority])
group.tap { |g| g.metrics = Metric.metrics_from_list(entry[:metrics]) }
end
def additional_metrics_raw
@additional_metrics_raw ||= YAML.load_file(Rails.root.join('config/additional_metrics.yml'))&.map(&:deep_symbolize_keys).freeze
end
end
end
end

View file

@ -1,24 +1,15 @@
module Gitlab::Prometheus
class Metric
attr_reader :group, :title, :required_metrics, :weight, :y_label, :queries
module Gitlab
module Prometheus
class Metric
attr_reader :group, :title, :required_metrics, :weight, :y_label, :queries
def initialize(title, required_metrics, weight, y_label, queries = [])
@title = title
@required_metrics = required_metrics
@weight = weight
@y_label = y_label || 'Values'
@queries = queries
end
def self.metric_from_entry(entry)
missing_fields = [:title, :required_metrics, :weight, :queries].select { |key| !entry.has_key?(key) }
raise ParsingError.new("entry missing required fields #{missing_fields}") unless missing_fields.empty?
Metric.new(entry[:title], entry[:required_metrics], entry[:weight], entry[:y_label], entry[:queries])
end
def self.metrics_from_list(list)
list.map { |entry| metric_from_entry(entry) }
def initialize(title, required_metrics, weight, y_label, queries = [])
@title = title
@required_metrics = required_metrics
@weight = weight
@y_label = y_label || 'Values'
@queries = queries
end
end
end
end

View file

@ -1,33 +1,18 @@
module Gitlab::Prometheus
class MetricGroup
attr_reader :priority, :name
attr_accessor :metrics
module Gitlab
module Prometheus
class MetricGroup
attr_reader :priority, :name
attr_accessor :metrics
def initialize(name, priority, metrics = [])
@name = name
@priority = priority
@metrics = metrics
end
def initialize(name:, priority:, metrics: [])
@name = name
@priority = priority
@metrics = metrics
end
def self.all
load_groups_from_yaml
end
def self.load_groups_from_yaml
additional_metrics_raw.map(&method(:group_from_entry))
end
def self.group_from_entry(entry)
missing_fields = [:group, :priority, :metrics].select { |key| !entry.has_key?(key) }
raise ParsingError.new("entry missing required fields #{missing_fields}") unless missing_fields.empty?
group = MetricGroup.new(entry[:group], entry[:priority])
group.metrics = Metric.metrics_from_list(entry[:metrics])
group
end
def self.additional_metrics_raw
@additional_metrics_raw ||= YAML.load_file(Rails.root.join('config/additional_metrics.yml'))&.map(&:deep_symbolize_keys).freeze
def self.all
AdditionalMetricsParser.load_groups_from_yaml
end
end
end
end

View file

@ -1,3 +1,5 @@
module Gitlab::Prometheus
ParsingError = Class.new(StandardError)
module Gitlab
module Prometheus
ParsingError = Class.new(StandardError)
end
end

View file

@ -1,15 +1,21 @@
module Gitlab::Prometheus::Queries
class AdditionalMetricsDeploymentQuery < AdditionalMetricsQuery
def query(deployment_id)
deployment = Deployment.find_by(id: deployment_id)
query_context = {
environment_slug: deployment.environment.slug,
environment_filter: %{container_name!="POD",environment="#{deployment.environment.slug}"},
timeframe_start: (deployment.created_at - 30.minutes).to_f,
timeframe_end: (deployment.created_at + 30.minutes).to_f
}
module Gitlab
module Prometheus
module Queries
class AdditionalMetricsDeploymentQuery < BaseQuery
include QueryAdditionalMetrics
query_metrics(query_context)
def query(deployment_id)
deployment = Deployment.find_by(id: deployment_id)
query_context = {
environment_slug: deployment.environment.slug,
environment_filter: %{container_name!="POD",environment="#{deployment.environment.slug}"},
timeframe_start: (deployment.created_at - 30.minutes).to_f,
timeframe_end: (deployment.created_at + 30.minutes).to_f
}
query_metrics(query_context)
end
end
end
end
end

View file

@ -0,0 +1,21 @@
module Gitlab
module Prometheus
module Queries
class AdditionalMetricsEnvironmentQuery < BaseQuery
include QueryAdditionalMetrics
def query(environment_id)
environment = Environment.find_by(id: environment_id)
query_context = {
environment_slug: environment.slug,
environment_filter: %{container_name!="POD",environment="#{environment.slug}"},
timeframe_start: 8.hours.ago.to_f,
timeframe_end: Time.now.to_f
}
query_metrics(query_context)
end
end
end
end
end

View file

@ -1,82 +0,0 @@
module Gitlab::Prometheus::Queries
class AdditionalMetricsQuery < BaseQuery
def query(environment_id)
environment = Environment.find_by(id: environment_id)
query_context = {
environment_slug: environment.slug,
environment_filter: %{container_name!="POD",environment="#{environment.slug}"},
timeframe_start: 8.hours.ago.to_f,
timeframe_end: Time.now.to_f
}
query_metrics(query_context)
end
protected
def query_metrics(query_context)
query_processor = method(:process_query).curry[query_context]
groups = matched_metrics.map do |group|
metrics = group.metrics.map do |metric|
{
title: metric.title,
weight: metric.weight,
y_label: metric.y_label,
queries: metric.queries.map(&query_processor).select(&method(:query_with_result))
}
end
{
group: group.name,
priority: group.priority,
metrics: metrics.select(&method(:metric_with_any_queries))
}
end
groups.select(&method(:group_with_any_metrics))
end
private
def metric_with_any_queries(metric)
metric[:queries]&.count&.> 0
end
def group_with_any_metrics(group)
group[:metrics]&.count&.> 0
end
def query_with_result(query)
query[:result]&.any? do |item|
item&.[](:values)&.any? || item&.[](:value)&.any?
end
end
def process_query(context, query)
query_with_result = query.dup
query_with_result[:result] =
if query.has_key?(:query_range)
client_query_range(query[:query_range] % context, start: context[:timeframe_start], stop: context[:timeframe_end])
else
client_query(query[:query] % context, time: context[:timeframe_end])
end
query_with_result
end
def available_metrics
@available_metrics ||= client_label_values || []
end
def matched_metrics
result = Gitlab::Prometheus::MetricGroup.all.map do |group|
group.metrics.select! do |metric|
metric.required_metrics.all?(&available_metrics.method(:include?))
end
group
end
result.select { |group| group.metrics.any? }
end
end
end

View file

@ -1,26 +1,30 @@
module Gitlab::Prometheus::Queries
class DeploymentQuery < BaseQuery
def query(deployment_id)
deployment = Deployment.find_by(id: deployment_id)
environment_slug = deployment.environment.slug
module Gitlab
module Prometheus
module Queries
class DeploymentQuery < BaseQuery
def query(deployment_id)
deployment = Deployment.find_by(id: deployment_id)
environment_slug = deployment.environment.slug
memory_query = raw_memory_usage_query(environment_slug)
memory_avg_query = %{avg(avg_over_time(container_memory_usage_bytes{container_name!="POD",environment="#{environment_slug}"}[30m]))}
cpu_query = raw_cpu_usage_query(environment_slug)
cpu_avg_query = %{avg(rate(container_cpu_usage_seconds_total{container_name!="POD",environment="#{environment_slug}"}[30m])) * 100}
memory_query = raw_memory_usage_query(environment_slug)
memory_avg_query = %{avg(avg_over_time(container_memory_usage_bytes{container_name!="POD",environment="#{environment_slug}"}[30m]))}
cpu_query = raw_cpu_usage_query(environment_slug)
cpu_avg_query = %{avg(rate(container_cpu_usage_seconds_total{container_name!="POD",environment="#{environment_slug}"}[30m])) * 100}
timeframe_start = (deployment.created_at - 30.minutes).to_f
timeframe_end = (deployment.created_at + 30.minutes).to_f
timeframe_start = (deployment.created_at - 30.minutes).to_f
timeframe_end = (deployment.created_at + 30.minutes).to_f
{
memory_values: client_query_range(memory_query, start: timeframe_start, stop: timeframe_end),
memory_before: client_query(memory_avg_query, time: deployment.created_at.to_f),
memory_after: client_query(memory_avg_query, time: timeframe_end),
{
memory_values: client_query_range(memory_query, start: timeframe_start, stop: timeframe_end),
memory_before: client_query(memory_avg_query, time: deployment.created_at.to_f),
memory_after: client_query(memory_avg_query, time: timeframe_end),
cpu_values: client_query_range(cpu_query, start: timeframe_start, stop: timeframe_end),
cpu_before: client_query(cpu_avg_query, time: deployment.created_at.to_f),
cpu_after: client_query(cpu_avg_query, time: timeframe_end)
}
cpu_values: client_query_range(cpu_query, start: timeframe_start, stop: timeframe_end),
cpu_before: client_query(cpu_avg_query, time: deployment.created_at.to_f),
cpu_after: client_query(cpu_avg_query, time: timeframe_end)
}
end
end
end
end
end

View file

@ -1,20 +1,24 @@
module Gitlab::Prometheus::Queries
class EnvironmentQuery < BaseQuery
def query(environment_id)
environment = Environment.find_by(id: environment_id)
environment_slug = environment.slug
timeframe_start = 8.hours.ago.to_f
timeframe_end = Time.now.to_f
module Gitlab
module Prometheus
module Queries
class EnvironmentQuery < BaseQuery
def query(environment_id)
environment = Environment.find_by(id: environment_id)
environment_slug = environment.slug
timeframe_start = 8.hours.ago.to_f
timeframe_end = Time.now.to_f
memory_query = raw_memory_usage_query(environment_slug)
cpu_query = raw_cpu_usage_query(environment_slug)
memory_query = raw_memory_usage_query(environment_slug)
cpu_query = raw_cpu_usage_query(environment_slug)
{
memory_values: client_query_range(memory_query, start: timeframe_start, stop: timeframe_end),
memory_current: client_query(memory_query, time: timeframe_end),
cpu_values: client_query_range(cpu_query, start: timeframe_start, stop: timeframe_end),
cpu_current: client_query(cpu_query, time: timeframe_end)
}
{
memory_values: client_query_range(memory_query, start: timeframe_start, stop: timeframe_end),
memory_current: client_query(memory_query, time: timeframe_end),
cpu_values: client_query_range(cpu_query, start: timeframe_start, stop: timeframe_end),
cpu_current: client_query(cpu_query, time: timeframe_end)
}
end
end
end
end
end

View file

@ -1,74 +1,78 @@
module Gitlab::Prometheus::Queries
class MatchedMetricsQuery < BaseQuery
MAX_QUERY_ITEMS = 40.freeze
module Gitlab
module Prometheus
module Queries
class MatchedMetricsQuery < BaseQuery
MAX_QUERY_ITEMS = 40.freeze
def query
groups_data.map do |group, data|
{
group: group.name,
priority: group.priority,
active_metrics: data[:active_metrics],
metrics_missing_requirements: data[:metrics_missing_requirements]
}
end
end
def query
groups_data.map do |group, data|
{
group: group.name,
priority: group.priority,
active_metrics: data[:active_metrics],
metrics_missing_requirements: data[:metrics_missing_requirements]
}
end
end
private
private
def groups_data
metrics_groups = groups_with_active_metrics(Gitlab::Prometheus::MetricGroup.all)
lookup = active_series_lookup(metrics_groups)
def groups_data
metrics_groups = groups_with_active_metrics(Gitlab::Prometheus::MetricGroup.all)
lookup = active_series_lookup(metrics_groups)
groups = {}
groups = {}
metrics_groups.each do |group|
groups[group] ||= { active_metrics: 0, metrics_missing_requirements: 0 }
active_metrics = group.metrics.count { |metric| metric.required_metrics.all?(&lookup.method(:has_key?)) }
metrics_groups.each do |group|
groups[group] ||= { active_metrics: 0, metrics_missing_requirements: 0 }
active_metrics = group.metrics.count { |metric| metric.required_metrics.all?(&lookup.method(:has_key?)) }
groups[group][:active_metrics] += active_metrics
groups[group][:metrics_missing_requirements] += group.metrics.count - active_metrics
end
groups[group][:active_metrics] += active_metrics
groups[group][:metrics_missing_requirements] += group.metrics.count - active_metrics
end
groups
end
groups
end
def active_series_lookup(metric_groups)
timeframe_start = 8.hours.ago
timeframe_end = Time.now
def active_series_lookup(metric_groups)
timeframe_start = 8.hours.ago
timeframe_end = Time.now
series = metric_groups.flat_map(&:metrics).flat_map(&:required_metrics).uniq
series = metric_groups.flat_map(&:metrics).flat_map(&:required_metrics).uniq
lookup = series.each_slice(MAX_QUERY_ITEMS).flat_map do |batched_series|
client_series(*batched_series, start: timeframe_start, stop: timeframe_end)
.select(&method(:has_matching_label))
.map { |series_info| [series_info['__name__'], true] }
end
lookup.to_h
end
lookup = series.each_slice(MAX_QUERY_ITEMS).flat_map do |batched_series|
client_series(*batched_series, start: timeframe_start, stop: timeframe_end)
.select(&method(:has_matching_label))
.map { |series_info| [series_info['__name__'], true] }
end
lookup.to_h
end
def has_matching_label(series_info)
series_info.key?('environment')
end
def has_matching_label(series_info)
series_info.key?('environment')
end
def available_metrics
@available_metrics ||= client_label_values || []
end
def available_metrics
@available_metrics ||= client_label_values || []
end
def filter_active_metrics(metric_group)
metric_group.metrics.select! do |metric|
metric.required_metrics.all?(&available_metrics.method(:include?))
end
metric_group
end
def filter_active_metrics(metric_group)
metric_group.metrics.select! do |metric|
metric.required_metrics.all?(&available_metrics.method(:include?))
end
metric_group
end
def groups_with_active_metrics(metric_groups)
metric_groups.map(&method(:filter_active_metrics)).select { |group| group.metrics.any? }
end
def groups_with_active_metrics(metric_groups)
metric_groups.map(&method(:filter_active_metrics)).select { |group| group.metrics.any? }
end
def metrics_with_required_series(metric_groups)
metric_groups.flat_map do |group|
group.metrics.select do |metric|
metric.required_metrics.all?(&available_metrics.method(:include?))
def metrics_with_required_series(metric_groups)
metric_groups.flat_map do |group|
group.metrics.select do |metric|
metric.required_metrics.all?(&available_metrics.method(:include?))
end
end
end
end
end

View file

@ -0,0 +1,72 @@
module Gitlab
module Prometheus
module Queries
module QueryAdditionalMetrics
def query_metrics(query_context)
query_processor = method(:process_query).curry[query_context]
groups = matched_metrics.map do |group|
metrics = group.metrics.map do |metric|
{
title: metric.title,
weight: metric.weight,
y_label: metric.y_label,
queries: metric.queries.map(&query_processor).select(&method(:query_with_result))
}
end
{
group: group.name,
priority: group.priority,
metrics: metrics.select(&method(:metric_with_any_queries))
}
end
groups.select(&method(:group_with_any_metrics))
end
private
def metric_with_any_queries(metric)
metric[:queries]&.count&.> 0
end
def group_with_any_metrics(group)
group[:metrics]&.count&.> 0
end
def query_with_result(query)
query[:result]&.any? do |item|
item&.[](:values)&.any? || item&.[](:value)&.any?
end
end
def process_query(context, query)
query_with_result = query.dup
query_with_result[:result] =
if query.has_key?(:query_range)
client_query_range(query[:query_range] % context, start: context[:timeframe_start], stop: context[:timeframe_end])
else
client_query(query[:query] % context, time: context[:timeframe_end])
end
query_with_result
end
def available_metrics
@available_metrics ||= client_label_values || []
end
def matched_metrics
result = Gitlab::Prometheus::MetricGroup.all.map do |group|
group.metrics.select! do |metric|
metric.required_metrics.all?(&available_metrics.method(:include?))
end
group
end
result.select { |group| group.metrics.any? }
end
end
end
end
end

View file

@ -57,4 +57,4 @@ describe Projects::PrometheusController do
def project_params(opts = {})
opts.reverse_merge(namespace_id: project.namespace, project_id: project)
end
end
end

View file

@ -1,6 +1,6 @@
require 'spec_helper'
describe Gitlab::Prometheus::Queries::AdditionalMetricsQuery, lib: true do
describe Gitlab::Prometheus::Queries::AdditionalMetricsEnvironmentQuery, lib: true do
include Prometheus::MetricBuilders
let(:client) { double('prometheus_client') }

View file

@ -453,7 +453,7 @@ describe Environment, models: true do
it 'returns the additional metrics from the deployment service' do
expect(project.monitoring_service).to receive(:reactive_query)
.with(Gitlab::Prometheus::Queries::AdditionalMetricsQuery.name, environment.id)
.with(Gitlab::Prometheus::Queries::AdditionalMetricsEnvironmentQuery.name, environment.id)
.and_return(:fake_metrics)
is_expected.to eq(:fake_metrics)

View file

@ -21,7 +21,7 @@ module Prometheus
end
def simple_metric_group(name: 'name', metrics: simple_metrics)
Gitlab::Prometheus::MetricGroup.new(name, 1, metrics)
Gitlab::Prometheus::MetricGroup.new(name: name, priority: 1, metrics: metrics)
end
end
end