2020-06-10 08:08:58 -04:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
|
|
|
module Gitlab
|
2020-07-03 11:09:13 -04:00
|
|
|
class UsageData
|
|
|
|
class Topology
|
2020-06-10 08:08:58 -04:00
|
|
|
include Gitlab::Utils::UsageData
|
|
|
|
|
2020-06-16 23:08:38 -04:00
|
|
|
JOB_TO_SERVICE_NAME = {
|
|
|
|
'gitlab-rails' => 'web',
|
|
|
|
'gitlab-sidekiq' => 'sidekiq',
|
|
|
|
'gitlab-workhorse' => 'workhorse',
|
|
|
|
'redis' => 'redis',
|
|
|
|
'postgres' => 'postgres',
|
|
|
|
'gitaly' => 'gitaly',
|
|
|
|
'prometheus' => 'prometheus',
|
2020-07-15 02:09:35 -04:00
|
|
|
'node' => 'node-exporter',
|
|
|
|
'registry' => 'registry'
|
2020-06-16 23:08:38 -04:00
|
|
|
}.freeze
|
|
|
|
|
2020-08-13 08:09:50 -04:00
|
|
|
# If these errors occur, all subsequent queries are likely to fail for the same error
|
|
|
|
TIMEOUT_ERRORS = [Errno::ETIMEDOUT, Net::OpenTimeout, Net::ReadTimeout].freeze
|
|
|
|
|
2020-07-03 11:09:13 -04:00
|
|
|
CollectionFailure = Struct.new(:query, :error) do
|
|
|
|
def to_h
|
|
|
|
{ query => error }
|
2020-06-10 08:08:58 -04:00
|
|
|
end
|
2020-07-03 11:09:13 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def topology_usage_data
|
|
|
|
@failures = []
|
2020-07-15 02:09:35 -04:00
|
|
|
@instances = Set[]
|
2020-07-03 11:09:13 -04:00
|
|
|
topology_data, duration = measure_duration { topology_fetch_all_data }
|
|
|
|
{
|
|
|
|
topology: topology_data
|
|
|
|
.merge(duration_s: duration)
|
|
|
|
.merge(failures: @failures.map(&:to_h))
|
|
|
|
}
|
2020-06-10 08:08:58 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
private
|
|
|
|
|
2020-07-01 20:09:51 -04:00
|
|
|
def topology_fetch_all_data
|
2020-09-16 14:09:47 -04:00
|
|
|
with_prometheus_client(fallback: {}, verify: false) do |client|
|
2020-07-01 20:09:51 -04:00
|
|
|
{
|
2020-07-02 08:09:02 -04:00
|
|
|
application_requests_per_hour: topology_app_requests_per_hour(client),
|
2020-09-14 14:09:48 -04:00
|
|
|
query_apdex_weekly_average: topology_query_apdex_weekly_average(client),
|
2020-07-01 20:09:51 -04:00
|
|
|
nodes: topology_node_data(client)
|
2020-07-02 08:09:02 -04:00
|
|
|
}.compact
|
2020-07-01 20:09:51 -04:00
|
|
|
end
|
2021-04-26 08:09:44 -04:00
|
|
|
rescue StandardError => e
|
2020-07-03 11:09:13 -04:00
|
|
|
@failures << CollectionFailure.new('other', e.class.to_s)
|
|
|
|
|
|
|
|
{}
|
2020-07-01 20:09:51 -04:00
|
|
|
end
|
|
|
|
|
2020-07-02 08:09:02 -04:00
|
|
|
def topology_app_requests_per_hour(client)
|
2020-07-03 11:09:13 -04:00
|
|
|
result = query_safely('gitlab_usage_ping:ops:rate5m', 'app_requests', fallback: nil) do |query|
|
2020-08-04 11:09:27 -04:00
|
|
|
client.query(aggregate_one_week(query)).first
|
2020-07-03 11:09:13 -04:00
|
|
|
end
|
|
|
|
|
2020-07-02 08:09:02 -04:00
|
|
|
return unless result
|
|
|
|
|
|
|
|
# the metric is recorded as a per-second rate
|
|
|
|
(result['value'].last.to_f * 1.hour).to_i
|
|
|
|
end
|
|
|
|
|
2020-09-14 14:09:48 -04:00
|
|
|
def topology_query_apdex_weekly_average(client)
|
|
|
|
result = query_safely('gitlab_usage_ping:sql_duration_apdex:ratio_rate5m', 'query_apdex', fallback: nil) do |query|
|
|
|
|
client.query(aggregate_one_week(query)).first
|
|
|
|
end
|
|
|
|
|
|
|
|
return unless result
|
|
|
|
|
|
|
|
result['value'].last.to_f
|
|
|
|
end
|
|
|
|
|
2020-07-01 20:09:51 -04:00
|
|
|
def topology_node_data(client)
|
|
|
|
# node-level data
|
|
|
|
by_instance_mem = topology_node_memory(client)
|
2020-08-12 08:10:25 -04:00
|
|
|
by_instance_mem_utilization = topology_node_memory_utilization(client)
|
2020-07-01 20:09:51 -04:00
|
|
|
by_instance_cpus = topology_node_cpus(client)
|
2020-08-12 08:10:25 -04:00
|
|
|
by_instance_cpu_utilization = topology_node_cpu_utilization(client)
|
2020-07-09 08:08:56 -04:00
|
|
|
by_instance_uname_info = topology_node_uname_info(client)
|
2020-07-01 20:09:51 -04:00
|
|
|
# service-level data
|
|
|
|
by_instance_by_job_by_type_memory = topology_all_service_memory(client)
|
|
|
|
by_instance_by_job_process_count = topology_all_service_process_count(client)
|
2020-07-08 08:09:33 -04:00
|
|
|
by_instance_by_job_server_types = topology_all_service_server_types(client)
|
2020-07-01 20:09:51 -04:00
|
|
|
|
2020-07-15 02:09:35 -04:00
|
|
|
@instances.map do |instance|
|
2020-07-01 20:09:51 -04:00
|
|
|
{
|
|
|
|
node_memory_total_bytes: by_instance_mem[instance],
|
2020-08-12 08:10:25 -04:00
|
|
|
node_memory_utilization: by_instance_mem_utilization[instance],
|
2020-07-01 20:09:51 -04:00
|
|
|
node_cpus: by_instance_cpus[instance],
|
2020-08-12 08:10:25 -04:00
|
|
|
node_cpu_utilization: by_instance_cpu_utilization[instance],
|
2020-07-09 08:08:56 -04:00
|
|
|
node_uname_info: by_instance_uname_info[instance],
|
2020-07-01 20:09:51 -04:00
|
|
|
node_services:
|
2020-07-08 08:09:33 -04:00
|
|
|
topology_node_services(
|
|
|
|
instance, by_instance_by_job_process_count, by_instance_by_job_by_type_memory, by_instance_by_job_server_types
|
|
|
|
)
|
2020-07-01 20:09:51 -04:00
|
|
|
}.compact
|
2020-06-10 08:08:58 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def topology_node_memory(client)
|
2020-08-04 11:09:27 -04:00
|
|
|
query_safely('gitlab_usage_ping:node_memory_total_bytes:max', 'node_memory', fallback: {}) do |query|
|
|
|
|
aggregate_by_instance(client, aggregate_one_week(query, aggregation: :max))
|
2020-07-03 11:09:13 -04:00
|
|
|
end
|
2020-06-10 08:08:58 -04:00
|
|
|
end
|
|
|
|
|
2020-08-12 08:10:25 -04:00
|
|
|
def topology_node_memory_utilization(client)
|
|
|
|
query_safely('gitlab_usage_ping:node_memory_utilization:avg', 'node_memory_utilization', fallback: {}) do |query|
|
|
|
|
aggregate_by_instance(client, aggregate_one_week(query), transform_value: :to_f)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-06-10 08:08:58 -04:00
|
|
|
def topology_node_cpus(client)
|
2020-07-03 11:09:13 -04:00
|
|
|
query_safely('gitlab_usage_ping:node_cpus:count', 'node_cpus', fallback: {}) do |query|
|
2020-08-04 11:09:27 -04:00
|
|
|
aggregate_by_instance(client, aggregate_one_week(query, aggregation: :max))
|
2020-07-03 11:09:13 -04:00
|
|
|
end
|
2020-06-10 08:08:58 -04:00
|
|
|
end
|
|
|
|
|
2020-08-12 08:10:25 -04:00
|
|
|
def topology_node_cpu_utilization(client)
|
|
|
|
query_safely('gitlab_usage_ping:node_cpu_utilization:avg', 'node_cpu_utilization', fallback: {}) do |query|
|
|
|
|
aggregate_by_instance(client, aggregate_one_week(query), transform_value: :to_f)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-07-09 08:08:56 -04:00
|
|
|
def topology_node_uname_info(client)
|
|
|
|
node_uname_info = query_safely('node_uname_info', 'node_uname_info', fallback: []) do |query|
|
|
|
|
client.query(query)
|
|
|
|
end
|
|
|
|
|
|
|
|
map_instance_labels(node_uname_info, %w(machine sysname release))
|
|
|
|
end
|
|
|
|
|
2020-06-10 08:08:58 -04:00
|
|
|
def topology_all_service_memory(client)
|
2020-07-01 20:09:51 -04:00
|
|
|
{
|
|
|
|
rss: topology_service_memory_rss(client),
|
|
|
|
uss: topology_service_memory_uss(client),
|
|
|
|
pss: topology_service_memory_pss(client)
|
|
|
|
}
|
|
|
|
end
|
|
|
|
|
|
|
|
def topology_service_memory_rss(client)
|
2020-07-03 11:09:13 -04:00
|
|
|
query_safely(
|
2020-07-09 08:08:56 -04:00
|
|
|
'gitlab_usage_ping:node_service_process_resident_memory_bytes:avg', 'service_rss', fallback: {}
|
2020-08-04 11:09:27 -04:00
|
|
|
) { |query| aggregate_by_labels(client, aggregate_one_week(query)) }
|
2020-07-01 20:09:51 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def topology_service_memory_uss(client)
|
2020-07-03 11:09:13 -04:00
|
|
|
query_safely(
|
2020-07-09 08:08:56 -04:00
|
|
|
'gitlab_usage_ping:node_service_process_unique_memory_bytes:avg', 'service_uss', fallback: {}
|
2020-08-04 11:09:27 -04:00
|
|
|
) { |query| aggregate_by_labels(client, aggregate_one_week(query)) }
|
2020-07-01 20:09:51 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def topology_service_memory_pss(client)
|
2020-07-03 11:09:13 -04:00
|
|
|
query_safely(
|
2020-07-09 08:08:56 -04:00
|
|
|
'gitlab_usage_ping:node_service_process_proportional_memory_bytes:avg', 'service_pss', fallback: {}
|
2020-08-04 11:09:27 -04:00
|
|
|
) { |query| aggregate_by_labels(client, aggregate_one_week(query)) }
|
2020-06-10 08:08:58 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def topology_all_service_process_count(client)
|
2020-07-03 11:09:13 -04:00
|
|
|
query_safely(
|
2020-07-09 08:08:56 -04:00
|
|
|
'gitlab_usage_ping:node_service_process:count', 'service_process_count', fallback: {}
|
2020-08-04 11:09:27 -04:00
|
|
|
) { |query| aggregate_by_labels(client, aggregate_one_week(query)) }
|
2020-07-08 08:09:33 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def topology_all_service_server_types(client)
|
|
|
|
query_safely(
|
2020-07-09 08:08:56 -04:00
|
|
|
'gitlab_usage_ping:node_service_app_server_workers:sum', 'service_workers', fallback: {}
|
2020-07-03 11:09:13 -04:00
|
|
|
) { |query| aggregate_by_labels(client, query) }
|
|
|
|
end
|
|
|
|
|
|
|
|
def query_safely(query, query_name, fallback:)
|
2020-08-13 08:09:50 -04:00
|
|
|
if timeout_error_exists?
|
|
|
|
@failures << CollectionFailure.new(query_name, 'timeout_cancellation')
|
|
|
|
return fallback
|
|
|
|
end
|
|
|
|
|
2020-07-03 11:09:13 -04:00
|
|
|
result = yield query
|
|
|
|
|
|
|
|
return result if result.present?
|
|
|
|
|
|
|
|
@failures << CollectionFailure.new(query_name, 'empty_result')
|
|
|
|
fallback
|
2021-04-26 08:09:44 -04:00
|
|
|
rescue StandardError => e
|
2020-07-03 11:09:13 -04:00
|
|
|
@failures << CollectionFailure.new(query_name, e.class.to_s)
|
|
|
|
fallback
|
2020-06-10 08:08:58 -04:00
|
|
|
end
|
|
|
|
|
2020-08-13 08:09:50 -04:00
|
|
|
def timeout_error_exists?
|
|
|
|
timeout_error_names = TIMEOUT_ERRORS.map(&:to_s).to_set
|
|
|
|
|
|
|
|
@failures.any? do |failure|
|
|
|
|
timeout_error_names.include?(failure.error)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-07-08 08:09:33 -04:00
|
|
|
def topology_node_services(instance, all_process_counts, all_process_memory, all_server_types)
|
2020-06-10 08:08:58 -04:00
|
|
|
# returns all node service data grouped by service name as the key
|
|
|
|
instance_service_data =
|
|
|
|
topology_instance_service_process_count(instance, all_process_counts)
|
|
|
|
.deep_merge(topology_instance_service_memory(instance, all_process_memory))
|
2020-07-08 08:09:33 -04:00
|
|
|
.deep_merge(topology_instance_service_server_types(instance, all_server_types))
|
2020-06-10 08:08:58 -04:00
|
|
|
|
2020-07-29 11:09:39 -04:00
|
|
|
# map to list of hashes where service names become values instead, and skip
|
2020-06-16 23:08:38 -04:00
|
|
|
# unknown services, since they might not be ours
|
|
|
|
instance_service_data.each_with_object([]) do |entry, list|
|
|
|
|
service, service_metrics = entry
|
2020-07-29 11:09:39 -04:00
|
|
|
service_name = service.to_s.strip
|
2020-06-16 23:08:38 -04:00
|
|
|
|
2020-07-29 11:09:39 -04:00
|
|
|
if gitlab_service = JOB_TO_SERVICE_NAME[service_name]
|
|
|
|
list << { name: gitlab_service }.merge(service_metrics)
|
|
|
|
else
|
|
|
|
@failures << CollectionFailure.new('service_unknown', service_name)
|
|
|
|
end
|
2020-06-10 08:08:58 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def topology_instance_service_process_count(instance, all_instance_data)
|
|
|
|
topology_data_for_instance(instance, all_instance_data).to_h do |metric, count|
|
2020-06-16 23:08:38 -04:00
|
|
|
[metric['job'], { process_count: count }]
|
2020-06-10 08:08:58 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-07-01 20:09:51 -04:00
|
|
|
# Given a hash mapping memory set types to Prometheus response data, returns a hash
|
|
|
|
# mapping instance/node names to services and their respective memory use in bytes
|
|
|
|
def topology_instance_service_memory(instance, instance_data_by_type)
|
|
|
|
result = {}
|
|
|
|
instance_data_by_type.each do |memory_type, instance_data|
|
|
|
|
topology_data_for_instance(instance, instance_data).each do |metric, memory_bytes|
|
|
|
|
job = metric['job']
|
|
|
|
key = "process_memory_#{memory_type}".to_sym
|
|
|
|
|
|
|
|
result[job] ||= {}
|
|
|
|
result[job][key] ||= memory_bytes
|
|
|
|
end
|
2020-06-10 08:08:58 -04:00
|
|
|
end
|
|
|
|
|
2020-07-01 20:09:51 -04:00
|
|
|
result
|
2020-06-16 23:08:38 -04:00
|
|
|
end
|
|
|
|
|
2020-07-08 08:09:33 -04:00
|
|
|
def topology_instance_service_server_types(instance, all_instance_data)
|
|
|
|
topology_data_for_instance(instance, all_instance_data).to_h do |metric, _value|
|
|
|
|
[metric['job'], { server: metric['server'] }]
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-06-10 08:08:58 -04:00
|
|
|
def topology_data_for_instance(instance, all_instance_data)
|
|
|
|
all_instance_data.filter { |metric, _value| metric['instance'] == instance }
|
|
|
|
end
|
|
|
|
|
2020-07-15 02:09:35 -04:00
|
|
|
def normalize_instance_label(instance)
|
|
|
|
normalize_localhost_address(drop_port_number(instance))
|
|
|
|
end
|
|
|
|
|
|
|
|
def normalize_localhost_address(instance)
|
|
|
|
ip_addr = IPAddr.new(instance)
|
2020-08-05 11:09:59 -04:00
|
|
|
is_local_ip = ip_addr.loopback? || ip_addr.to_i == 0
|
2020-07-15 02:09:35 -04:00
|
|
|
|
|
|
|
is_local_ip ? 'localhost' : instance
|
|
|
|
rescue IPAddr::InvalidAddressError
|
|
|
|
# This most likely means it was a host name, not an IP address
|
|
|
|
instance
|
|
|
|
end
|
|
|
|
|
|
|
|
def drop_port_number(instance)
|
|
|
|
instance.gsub(/:\d+$/, '')
|
|
|
|
end
|
|
|
|
|
|
|
|
def normalize_and_track_instance(instance)
|
|
|
|
normalize_instance_label(instance).tap do |normalized_instance|
|
|
|
|
@instances << normalized_instance
|
|
|
|
end
|
2020-06-10 08:08:58 -04:00
|
|
|
end
|
|
|
|
|
2020-08-04 11:09:27 -04:00
|
|
|
def aggregate_one_week(query, aggregation: :avg)
|
2020-07-17 11:09:13 -04:00
|
|
|
"#{aggregation}_over_time (#{query}[1w])"
|
2020-07-01 20:09:51 -04:00
|
|
|
end
|
|
|
|
|
2020-08-12 08:10:25 -04:00
|
|
|
def aggregate_by_instance(client, query, transform_value: :to_i)
|
|
|
|
client.aggregate(query, transform_value: transform_value) { |metric| normalize_and_track_instance(metric['instance']) }
|
2020-06-10 08:08:58 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
# Will retain a composite key that values are mapped to
|
2020-08-12 08:10:25 -04:00
|
|
|
def aggregate_by_labels(client, query, transform_value: :to_i)
|
|
|
|
client.aggregate(query, transform_value: transform_value) do |metric|
|
2020-07-15 02:09:35 -04:00
|
|
|
metric['instance'] = normalize_and_track_instance(metric['instance'])
|
2020-06-10 08:08:58 -04:00
|
|
|
metric
|
|
|
|
end
|
|
|
|
end
|
2020-07-09 08:08:56 -04:00
|
|
|
|
|
|
|
# Given query result vector, map instance to a hash of target labels key/value.
|
|
|
|
# @return [Hash] mapping instance to a hash of target labels key/value, or the empty hash if input empty vector
|
|
|
|
def map_instance_labels(query_result_vector, target_labels)
|
|
|
|
query_result_vector.to_h do |result|
|
2020-07-15 02:09:35 -04:00
|
|
|
key = normalize_and_track_instance(result['metric']['instance'])
|
2020-07-09 08:08:56 -04:00
|
|
|
value = result['metric'].slice(*target_labels).symbolize_keys
|
|
|
|
[key, value]
|
|
|
|
end
|
|
|
|
end
|
2020-06-10 08:08:58 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|