Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-06-30 21:07:26 +00:00
parent 533f020a0b
commit c9920bcd99
29 changed files with 827 additions and 49 deletions

View File

@ -0,0 +1,101 @@
/**
* @module finite_state_machine
*/
/**
* The states to be used with state machine definitions
* @typedef {Object} FiniteStateMachineStates
* @property {!Object} ANY_KEY - Any key that maps to a known state
* @property {!Object} ANY_KEY.on - A dictionary of transition events for the ANY_KEY state that map to a different state
* @property {!String} ANY_KEY.on.ANY_EVENT - The resulting state that the machine should end at
*/
/**
* An object whose minimum definition defined here can be used to guard UI state transitions
* @typedef {Object} StatelessFiniteStateMachineDefinition
* @property {FiniteStateMachineStates} states
*/
/**
* An object whose minimum definition defined here can be used to create a live finite state machine
* @typedef {Object} LiveFiniteStateMachineDefinition
* @property {String} initial - The initial state for this machine
* @property {FiniteStateMachineStates} states
*/
/**
* An object that allows interacting with a stateful, live finite state machine
* @typedef {Object} LiveStateMachine
* @property {String} value - The current state of this machine
* @property {Object} states - The states from when the machine definition was constructed
* @property {Function} is - {@link module:finite_state_machine~is LiveStateMachine.is}
* @property {Function} send - {@link module:finite_state_machine~send LiveStatemachine.send}
*/
// This is not user-facing functionality
/* eslint-disable @gitlab/require-i18n-strings */
function hasKeys(object, keys) {
return keys.every((key) => Object.keys(object).includes(key));
}
/**
* Get an updated state given a machine definition, a starting state, and a transition event
* @param {StatelessFiniteStateMachineDefinition} definition
* @param {String} current - The current known state
* @param {String} event - A transition event
* @returns {String} A state value
*/
export function transition(definition, current, event) {
return definition?.states?.[current]?.on[event] || current;
}
function startMachine({ states, initial } = {}) {
let current = initial;
return {
/**
* A convenience function to test arbitrary input against the machine's current state
* @param {String} testState - The value to test against the machine's current state
*/
is(testState) {
return current === testState;
},
/**
* A function to transition the live state machine using an arbitrary event
* @param {String} event - The event to send to the machine
* @returns {String} A string representing the current state. Note this may not have changed if the current state + transition event combination are not valid.
*/
send(event) {
current = transition({ states }, current, event);
return current;
},
get value() {
return current;
},
set value(forcedState) {
current = forcedState;
},
states,
};
}
/**
* Create a live state machine
* @param {LiveFiniteStateMachineDefinition} definition
* @returns {LiveStateMachine} A live state machine
*/
export function machine(definition) {
if (!hasKeys(definition, ['initial', 'states'])) {
throw new Error(
'A state machine must have an initial state (`.initial`) and a dictionary of possible states (`.states`)',
);
} else if (!hasKeys(definition.states, [definition.initial])) {
throw new Error(
`Cannot initialize the state machine to state '${definition.initial}'. Is that one of the machine's defined states?`,
);
} else {
return startMachine(definition);
}
}

View File

@ -40,18 +40,23 @@ export default {
data() {
return {
isLoading: false,
isSharedRunnerEnabled: false,
isSharedRunnerEnabled: this.isEnabled,
errorMessage: null,
isCcValidationRequired: false,
successfulValidation: false,
};
},
created() {
this.isSharedRunnerEnabled = this.isEnabled;
this.isCcValidationRequired = this.isCreditCardValidationRequired;
computed: {
showCreditCardValidation() {
return (
this.isCreditCardValidationRequired &&
!this.isSharedRunnerEnabled &&
!this.successfulValidation
);
},
},
methods: {
creditCardValidated() {
this.isCcValidationRequired = false;
this.successfulValidation = true;
},
toggleSharedRunners() {
this.isLoading = true;
@ -62,7 +67,6 @@ export default {
.then(() => {
this.isLoading = false;
this.isSharedRunnerEnabled = !this.isSharedRunnerEnabled;
this.isCcValidationRequired = this.isCreditCardValidationRequired;
})
.catch((error) => {
this.isLoading = false;
@ -81,7 +85,7 @@ export default {
</gl-alert>
<cc-validation-required-alert
v-if="isCcValidationRequired && !isSharedRunnerEnabled"
v-if="showCreditCardValidation"
class="gl-pb-5"
:custom-message="$options.i18n.REQUIRES_VALIDATION_TEXT"
@verifiedCreditCard="creditCardValidated"

View File

@ -225,6 +225,10 @@ module AlertManagement
open_statuses.include?(status)
end
def open?
self.class.open_status?(status_name)
end
def status_event_for(status)
self.class.state_machines[:status].events.transitions_for(self, to: status.to_s.to_sym).first&.event
end

View File

@ -37,7 +37,6 @@ module AlertManagement
private
attr_reader :alert, :current_user, :params, :param_errors, :status
delegate :resolved?, to: :alert
def allowed?
current_user&.can?(:update_alert_management_alert, alert)
@ -129,7 +128,7 @@ module AlertManagement
def handle_status_change
add_status_change_system_note
resolve_todos if resolved?
resolve_todos if alert.resolved?
end
def add_status_change_system_note
@ -177,3 +176,5 @@ module AlertManagement
end
end
end
AlertManagement::Alerts::UpdateService.prepend_mod

View File

@ -1,6 +1,6 @@
- navbar_links = links.sort_by(&:title)
- all_paths = navbar_links.map(&:path)
- analytics_link = navbar_links.find { |link| link.title == _('Value Stream') } || navbar_links.first
- analytics_link = navbar_links.find { |link| link.title == _('Value stream') } || navbar_links.first
- if navbar_links.any?
= nav_link(path: all_paths) do

View File

@ -1121,6 +1121,7 @@ production: &base
## Backup settings
backup:
path: "tmp/backups" # Relative paths are relative to Rails.root (default: tmp/backups/)
# gitaly_backup_path: # Path of the gitaly-backup binary (default: searches $PATH)
# archive_permissions: 0640 # Permissions for the resulting backup.tar file (default: 0600)
# keep_time: 604800 # default: 0 (forever) (in seconds)
# pg_schema: public # default: nil, it means that all schemas will be backed up
@ -1436,6 +1437,7 @@ test:
secret_file: tmp/gitlab_workhorse_test_secret
backup:
path: tmp/tests/backups
gitaly_backup_path: tmp/tests/gitaly/_build/bin/gitaly-backup
pseudonymizer:
manifest: config/pseudonymizer.yml
upload:

View File

@ -644,6 +644,9 @@ Gitlab.ee do
Settings.cron_jobs['incident_management_persist_oncall_rotation_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['incident_management_persist_oncall_rotation_worker']['cron'] ||= '*/5 * * * *'
Settings.cron_jobs['incident_management_persist_oncall_rotation_worker']['job_class'] = 'IncidentManagement::OncallRotations::PersistAllRotationsShiftsJob'
Settings.cron_jobs['incident_management_schedule_escalation_check_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['incident_management_schedule_escalation_check_worker']['cron'] ||= '*/1 * * * *'
Settings.cron_jobs['incident_management_schedule_escalation_check_worker']['job_class'] = 'IncidentManagement::PendingEscalations::ScheduleCheckCronWorker'
Settings.cron_jobs['import_software_licenses_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['import_software_licenses_worker']['cron'] ||= '0 3 * * 0'
Settings.cron_jobs['import_software_licenses_worker']['job_class'] = 'ImportSoftwareLicensesWorker'
@ -793,6 +796,7 @@ Settings.backup['upload']['multipart_chunk_size'] ||= 104857600
Settings.backup['upload']['encryption'] ||= nil
Settings.backup['upload']['encryption_key'] ||= ENV['GITLAB_BACKUP_ENCRYPTION_KEY']
Settings.backup['upload']['storage_class'] ||= nil
Settings.backup['gitaly_backup_path'] ||= Gitlab::Utils.which('gitaly-backup')
#
# Pseudonymizer

View File

@ -6,6 +6,10 @@
Gitlab::Database::Partitioning::PartitionCreator.register(AuditEvent)
Gitlab::Database::Partitioning::PartitionCreator.register(WebHookLog)
if Gitlab.ee?
Gitlab::Database::Partitioning::PartitionCreator.register(IncidentManagement::PendingEscalations::Alert)
end
begin
Gitlab::Database::Partitioning::PartitionCreator.new.create_partitions unless ENV['DISABLE_POSTGRES_PARTITION_CREATION_ON_STARTUP']
rescue ActiveRecord::ActiveRecordError, PG::Error

View File

@ -149,7 +149,15 @@ if Gitlab::Metrics.enabled? && !Rails.env.test? && !(Rails.env.development? && d
require_dependency 'gitlab/metrics/subscribers/rails_cache'
Gitlab::Application.configure do |config|
config.middleware.use(Gitlab::Metrics::RackMiddleware)
# We want to track certain metrics during the Load Balancing host resolving process.
# Because of that, we need to have metrics code available earlier for Load Balancing.
if Gitlab::Database::LoadBalancing.enable?
config.middleware.insert_before Gitlab::Database::LoadBalancing::RackMiddleware,
Gitlab::Metrics::RackMiddleware
else
config.middleware.use(Gitlab::Metrics::RackMiddleware)
end
config.middleware.use(Gitlab::Middleware::RailsQueueDuration)
config.middleware.use(Gitlab::Metrics::ElasticsearchRackMiddleware)
end

View File

@ -184,6 +184,10 @@
- 1
- - incident_management_oncall_rotations_persist_shifts_job
- 1
- - incident_management_pending_escalations_alert_check
- 1
- - incident_management_pending_escalations_alert_create
- 1
- - invalid_gpg_signature_update
- 2
- - irker

View File

@ -0,0 +1,48 @@
# frozen_string_literal: true
class CreateIncidentManagementPendingAlertEscalations < ActiveRecord::Migration[6.1]
include Gitlab::Database::MigrationHelpers
def up
with_lock_retries do
execute(<<~SQL)
CREATE TABLE incident_management_pending_alert_escalations (
id bigserial NOT NULL,
rule_id bigint,
alert_id bigint NOT NULL,
schedule_id bigint NOT NULL,
process_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
status smallint NOT NULL,
PRIMARY KEY (id, process_at)
) PARTITION BY RANGE (process_at);
CREATE INDEX index_incident_management_pending_alert_escalations_on_alert_id
ON incident_management_pending_alert_escalations USING btree (alert_id);
CREATE INDEX index_incident_management_pending_alert_escalations_on_rule_id
ON incident_management_pending_alert_escalations USING btree (rule_id);
CREATE INDEX index_incident_management_pending_alert_escalations_on_schedule_id
ON incident_management_pending_alert_escalations USING btree (schedule_id);
ALTER TABLE incident_management_pending_alert_escalations ADD CONSTRAINT fk_rails_fcbfd9338b
FOREIGN KEY (schedule_id) REFERENCES incident_management_oncall_schedules(id) ON DELETE CASCADE;
ALTER TABLE incident_management_pending_alert_escalations ADD CONSTRAINT fk_rails_057c1e3d87
FOREIGN KEY (rule_id) REFERENCES incident_management_escalation_rules(id) ON DELETE SET NULL;
ALTER TABLE incident_management_pending_alert_escalations ADD CONSTRAINT fk_rails_8d8de95da9
FOREIGN KEY (alert_id) REFERENCES alert_management_alerts(id) ON DELETE CASCADE;
SQL
end
end
def down
with_lock_retries do
drop_table :incident_management_pending_alert_escalations
end
end
end

View File

@ -0,0 +1 @@
fa4f1ec80e7039e59d283dc6effd6904ca33c637d27c687d990822eb2f6198e5

View File

@ -190,6 +190,18 @@ CREATE TABLE audit_events (
)
PARTITION BY RANGE (created_at);
CREATE TABLE incident_management_pending_alert_escalations (
id bigint NOT NULL,
rule_id bigint,
alert_id bigint NOT NULL,
schedule_id bigint NOT NULL,
process_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
status smallint NOT NULL
)
PARTITION BY RANGE (process_at);
CREATE TABLE web_hook_logs (
id bigint NOT NULL,
web_hook_id integer NOT NULL,
@ -13943,6 +13955,15 @@ CREATE SEQUENCE incident_management_oncall_shifts_id_seq
ALTER SEQUENCE incident_management_oncall_shifts_id_seq OWNED BY incident_management_oncall_shifts.id;
CREATE SEQUENCE incident_management_pending_alert_escalations_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE incident_management_pending_alert_escalations_id_seq OWNED BY incident_management_pending_alert_escalations.id;
CREATE TABLE index_statuses (
id integer NOT NULL,
project_id integer NOT NULL,
@ -20054,6 +20075,8 @@ ALTER TABLE ONLY incident_management_oncall_schedules ALTER COLUMN id SET DEFAUL
ALTER TABLE ONLY incident_management_oncall_shifts ALTER COLUMN id SET DEFAULT nextval('incident_management_oncall_shifts_id_seq'::regclass);
ALTER TABLE ONLY incident_management_pending_alert_escalations ALTER COLUMN id SET DEFAULT nextval('incident_management_pending_alert_escalations_id_seq'::regclass);
ALTER TABLE ONLY index_statuses ALTER COLUMN id SET DEFAULT nextval('index_statuses_id_seq'::regclass);
ALTER TABLE ONLY insights ALTER COLUMN id SET DEFAULT nextval('insights_id_seq'::regclass);
@ -21438,6 +21461,9 @@ ALTER TABLE ONLY incident_management_oncall_schedules
ALTER TABLE ONLY incident_management_oncall_shifts
ADD CONSTRAINT incident_management_oncall_shifts_pkey PRIMARY KEY (id);
ALTER TABLE ONLY incident_management_pending_alert_escalations
ADD CONSTRAINT incident_management_pending_alert_escalations_pkey PRIMARY KEY (id, process_at);
ALTER TABLE ONLY index_statuses
ADD CONSTRAINT index_statuses_pkey PRIMARY KEY (id);
@ -23652,6 +23678,12 @@ CREATE INDEX index_incident_management_oncall_schedules_on_project_id ON inciden
CREATE INDEX index_incident_management_oncall_shifts_on_participant_id ON incident_management_oncall_shifts USING btree (participant_id);
CREATE INDEX index_incident_management_pending_alert_escalations_on_alert_id ON ONLY incident_management_pending_alert_escalations USING btree (alert_id);
CREATE INDEX index_incident_management_pending_alert_escalations_on_rule_id ON ONLY incident_management_pending_alert_escalations USING btree (rule_id);
CREATE INDEX index_incident_management_pending_alert_escalations_on_schedule ON ONLY incident_management_pending_alert_escalations USING btree (schedule_id);
CREATE UNIQUE INDEX index_index_statuses_on_project_id ON index_statuses USING btree (project_id);
CREATE INDEX index_insights_on_namespace_id ON insights USING btree (namespace_id);
@ -26374,6 +26406,9 @@ ALTER TABLE ONLY terraform_state_versions
ALTER TABLE ONLY ci_build_report_results
ADD CONSTRAINT fk_rails_056d298d48 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE incident_management_pending_alert_escalations
ADD CONSTRAINT fk_rails_057c1e3d87 FOREIGN KEY (rule_id) REFERENCES incident_management_escalation_rules(id) ON DELETE SET NULL;
ALTER TABLE ONLY ci_daily_build_group_report_results
ADD CONSTRAINT fk_rails_0667f7608c FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
@ -27217,6 +27252,9 @@ ALTER TABLE ONLY vulnerability_feedback
ALTER TABLE ONLY ci_pipeline_messages
ADD CONSTRAINT fk_rails_8d3b04e3e1 FOREIGN KEY (pipeline_id) REFERENCES ci_pipelines(id) ON DELETE CASCADE;
ALTER TABLE incident_management_pending_alert_escalations
ADD CONSTRAINT fk_rails_8d8de95da9 FOREIGN KEY (alert_id) REFERENCES alert_management_alerts(id) ON DELETE CASCADE;
ALTER TABLE ONLY approval_merge_request_rules_approved_approvers
ADD CONSTRAINT fk_rails_8dc94cff4d FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;
@ -27877,6 +27915,9 @@ ALTER TABLE ONLY ci_job_variables
ALTER TABLE ONLY packages_nuget_metadata
ADD CONSTRAINT fk_rails_fc0c19f5b4 FOREIGN KEY (package_id) REFERENCES packages_packages(id) ON DELETE CASCADE;
ALTER TABLE incident_management_pending_alert_escalations
ADD CONSTRAINT fk_rails_fcbfd9338b FOREIGN KEY (schedule_id) REFERENCES incident_management_oncall_schedules(id) ON DELETE CASCADE;
ALTER TABLE ONLY external_approval_rules
ADD CONSTRAINT fk_rails_fd4f9ac573 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;

View File

@ -264,10 +264,11 @@ configuration option in `gitlab.yml`. These metrics are served from the
The following metrics are available:
| Metric | Type | Since | Description | Labels |
|:--------------------------------- |:--------- |:------------------------------------------------------------- |:-------------------------------------- |:--------------------------------------------------------- |
| `db_load_balancing_hosts` | Gauge | [12.3](https://gitlab.com/gitlab-org/gitlab/-/issues/13630) | Current number of load balancing hosts | |
| `sidekiq_load_balancing_count` | Counter | 13.11 | Sidekiq jobs using load balancing with data consistency set to :sticky or :delayed | `queue`, `boundary`, `external_dependencies`, `feature_category`, `job_status`, `urgency`, `data_consistency`, `load_balancing_strategy` |
| Metric | Type | Since | Description | Labels |
|:-------------------------------------------------------- |:--------- |:------------------------------------------------------------- |:---------------------------------------------------------------------------------- |:---------------------------------------------------------------------------------------------------------------------------------------- |
| `db_load_balancing_hosts` | Gauge | [12.3](https://gitlab.com/gitlab-org/gitlab/-/issues/13630) | Current number of load balancing hosts | |
| `sidekiq_load_balancing_count` | Counter | 13.11 | Sidekiq jobs using load balancing with data consistency set to :sticky or :delayed | `queue`, `boundary`, `external_dependencies`, `feature_category`, `job_status`, `urgency`, `data_consistency`, `load_balancing_strategy` |
| `gitlab_transaction_caught_up_replica_pick_count_total` | Counter | 14.1 | Number of search attempts for caught up replica | `result` |
## Database partitioning metrics **(PREMIUM SELF)**

View File

@ -67,7 +67,7 @@ module Backup
end
def bin_path
File.absolute_path(File.join(Gitlab.config.gitaly.client_path, 'gitaly-backup'))
File.absolute_path(Gitlab.config.backup.gitaly_backup_path)
end
end
end

View File

@ -39,6 +39,8 @@ module Gitlab
result = @app.call(env)
ActiveSupport::Notifications.instrument('web_transaction_completed.load_balancing')
stick_if_necessary(env)
result

View File

@ -34,6 +34,8 @@ module Gitlab
return true unless location
load_balancer.all_caught_up?(location).tap do |caught_up|
ActiveSupport::Notifications.instrument('caught_up_replica_pick.load_balancing', { result: caught_up } )
unstick(namespace, id) if caught_up
end
end

View File

@ -29,6 +29,7 @@ module Gitlab
instrument_rack_attack(payload)
instrument_cpu(payload)
instrument_thread_memory_allocations(payload)
instrument_load_balancing(payload)
end
def instrument_gitaly(payload)
@ -104,6 +105,12 @@ module Gitlab
payload.merge!(counters) if counters
end
def instrument_load_balancing(payload)
load_balancing_payload = ::Gitlab::Metrics::Subscribers::LoadBalancing.load_balancing_payload
payload.merge!(load_balancing_payload)
end
# Returns the queuing duration for a Sidekiq job in seconds, as a float, if the
# `enqueued_at` field or `created_at` field is available.
#

View File

@ -0,0 +1,70 @@
# frozen_string_literal: true
module Gitlab
module Metrics
module Subscribers
class LoadBalancing < ActiveSupport::Subscriber
attach_to :load_balancing
PROMETHEUS_COUNTER = :gitlab_transaction_caught_up_replica_pick_count_total
LOG_COUNTERS = { true => :caught_up_replica_pick_ok, false => :caught_up_replica_pick_fail }.freeze
def caught_up_replica_pick(event)
return unless Gitlab::SafeRequestStore.active? && ::Gitlab::Database::LoadBalancing.enable?
result = event.payload[:result]
counter_name = counter(result)
increment(counter_name)
end
# we want to update Prometheus counter after the controller/action are set
def web_transaction_completed(_event)
return unless Gitlab::SafeRequestStore.active? && ::Gitlab::Database::LoadBalancing.enable?
LOG_COUNTERS.keys.each { |result| increment_prometheus_for_result_label(result) }
end
def self.load_balancing_payload
return {} unless Gitlab::SafeRequestStore.active? && ::Gitlab::Database::LoadBalancing.enable?
{}.tap do |payload|
LOG_COUNTERS.values.each do |counter|
value = Gitlab::SafeRequestStore[counter]
payload[counter] = value.to_i if value
end
end
end
private
def increment(counter)
Gitlab::SafeRequestStore[counter] = Gitlab::SafeRequestStore[counter].to_i + 1
end
def increment_prometheus_for_result_label(label_value)
counter_name = counter(label_value)
return unless (counter_value = Gitlab::SafeRequestStore[counter_name])
increment_prometheus(labels: { result: label_value }, value: counter_value.to_i)
end
def increment_prometheus(labels:, value:)
current_transaction&.increment(PROMETHEUS_COUNTER, value, labels) do
docstring 'Caught up replica pick result'
label_keys labels.keys
end
end
def counter(result)
LOG_COUNTERS[result]
end
def current_transaction
::Gitlab::Metrics::WebTransaction.current
end
end
end
end
end

View File

@ -79,7 +79,7 @@ module Sidebars
end
::Sidebars::MenuItem.new(
title: _('Value Stream'),
title: _('Value stream'),
link: project_cycle_analytics_path(context.project),
container_html_options: { class: 'shortcuts-project-cycle-analytics' },
active_routes: { path: 'cycle_analytics#show' },

View File

@ -7741,6 +7741,9 @@ msgstr ""
msgid "Code owners"
msgstr ""
msgid "Code review"
msgstr ""
msgid "Code snippet copied. Insert it in the correct location in the YAML file."
msgstr ""
@ -11069,6 +11072,9 @@ msgstr ""
msgid "DevOps Report"
msgstr ""
msgid "DevOps adoption"
msgstr ""
msgid "DevopsAdoption|Add Group"
msgstr ""
@ -35673,9 +35679,6 @@ msgstr ""
msgid "Value"
msgstr ""
msgid "Value Stream"
msgstr ""
msgid "Value Stream Analytics"
msgstr ""
@ -35685,6 +35688,9 @@ msgstr ""
msgid "Value Stream Analytics gives an overview of how much time it takes to go from idea to production in your project."
msgstr ""
msgid "Value stream"
msgstr ""
msgid "ValueStreamAnalyticsStage|We don't have enough data to show this stage."
msgstr ""

View File

@ -138,9 +138,9 @@ RSpec.describe 'Project active tab' do
visit project_cycle_analytics_path(project)
end
context 'on project Analytics/Value Stream Analytics' do
context 'on project Analytics/Value stream Analytics' do
it_behaves_like 'page has active tab', _('Analytics')
it_behaves_like 'page has active sub tab', _('Value Stream')
it_behaves_like 'page has active sub tab', _('Value stream')
end
context 'on project Analytics/"CI/CD"' do

View File

@ -0,0 +1,293 @@
import { machine, transition } from '~/lib/utils/finite_state_machine';
describe('Finite State Machine', () => {
const STATE_IDLE = 'idle';
const STATE_LOADING = 'loading';
const STATE_ERRORED = 'errored';
const TRANSITION_START_LOAD = 'START_LOAD';
const TRANSITION_LOAD_ERROR = 'LOAD_ERROR';
const TRANSITION_LOAD_SUCCESS = 'LOAD_SUCCESS';
const TRANSITION_ACKNOWLEDGE_ERROR = 'ACKNOWLEDGE_ERROR';
const definition = {
initial: STATE_IDLE,
states: {
[STATE_IDLE]: {
on: {
[TRANSITION_START_LOAD]: STATE_LOADING,
},
},
[STATE_LOADING]: {
on: {
[TRANSITION_LOAD_ERROR]: STATE_ERRORED,
[TRANSITION_LOAD_SUCCESS]: STATE_IDLE,
},
},
[STATE_ERRORED]: {
on: {
[TRANSITION_ACKNOWLEDGE_ERROR]: STATE_IDLE,
[TRANSITION_START_LOAD]: STATE_LOADING,
},
},
},
};
describe('machine', () => {
const STATE_IMPOSSIBLE = 'impossible';
const badDefinition = {
init: definition.initial,
badKeyShouldBeStates: definition.states,
};
const unstartableDefinition = {
initial: STATE_IMPOSSIBLE,
states: definition.states,
};
let liveMachine;
beforeEach(() => {
liveMachine = machine(definition);
});
it('throws an error if the machine definition is invalid', () => {
expect(() => machine(badDefinition)).toThrowError(
'A state machine must have an initial state (`.initial`) and a dictionary of possible states (`.states`)',
);
});
it('throws an error if the initial state is invalid', () => {
expect(() => machine(unstartableDefinition)).toThrowError(
`Cannot initialize the state machine to state '${STATE_IMPOSSIBLE}'. Is that one of the machine's defined states?`,
);
});
it.each`
partOfMachine | equals | description | eqDescription
${'keys'} | ${['is', 'send', 'value', 'states']} | ${'keys'} | ${'the correct array'}
${'is'} | ${expect.any(Function)} | ${'`is` property'} | ${'a function'}
${'send'} | ${expect.any(Function)} | ${'`send` property'} | ${'a function'}
${'value'} | ${definition.initial} | ${'`value` property'} | ${'the same as the `initial` value of the machine definition'}
${'states'} | ${definition.states} | ${'`states` property'} | ${'the same as the `states` value of the machine definition'}
`("The machine's $description should be $eqDescription", ({ partOfMachine, equals }) => {
const test = partOfMachine === 'keys' ? Object.keys(liveMachine) : liveMachine[partOfMachine];
expect(test).toEqual(equals);
});
it.each`
initialState | transitionEvent | expectedState
${definition.initial} | ${TRANSITION_START_LOAD} | ${STATE_LOADING}
${STATE_LOADING} | ${TRANSITION_LOAD_ERROR} | ${STATE_ERRORED}
${STATE_ERRORED} | ${TRANSITION_ACKNOWLEDGE_ERROR} | ${STATE_IDLE}
${STATE_IDLE} | ${TRANSITION_START_LOAD} | ${STATE_LOADING}
${STATE_LOADING} | ${TRANSITION_LOAD_SUCCESS} | ${STATE_IDLE}
`(
'properly steps from $initialState to $expectedState when the event "$transitionEvent" is sent',
({ initialState, transitionEvent, expectedState }) => {
liveMachine.value = initialState;
liveMachine.send(transitionEvent);
expect(liveMachine.is(expectedState)).toBe(true);
expect(liveMachine.value).toBe(expectedState);
},
);
it.each`
initialState | transitionEvent
${STATE_IDLE} | ${TRANSITION_ACKNOWLEDGE_ERROR}
${STATE_IDLE} | ${TRANSITION_LOAD_SUCCESS}
${STATE_IDLE} | ${TRANSITION_LOAD_ERROR}
${STATE_IDLE} | ${'RANDOM_FOO'}
${STATE_LOADING} | ${TRANSITION_START_LOAD}
${STATE_LOADING} | ${TRANSITION_ACKNOWLEDGE_ERROR}
${STATE_LOADING} | ${'RANDOM_FOO'}
${STATE_ERRORED} | ${TRANSITION_LOAD_ERROR}
${STATE_ERRORED} | ${TRANSITION_LOAD_SUCCESS}
${STATE_ERRORED} | ${'RANDOM_FOO'}
`(
`does not perform any transition if the machine can't move from "$initialState" using the "$transitionEvent" event`,
({ initialState, transitionEvent }) => {
liveMachine.value = initialState;
liveMachine.send(transitionEvent);
expect(liveMachine.is(initialState)).toBe(true);
expect(liveMachine.value).toBe(initialState);
},
);
describe('send', () => {
it.each`
startState | transitionEvent | result
${STATE_IDLE} | ${TRANSITION_START_LOAD} | ${STATE_LOADING}
${STATE_LOADING} | ${TRANSITION_LOAD_SUCCESS} | ${STATE_IDLE}
${STATE_LOADING} | ${TRANSITION_LOAD_ERROR} | ${STATE_ERRORED}
${STATE_ERRORED} | ${TRANSITION_ACKNOWLEDGE_ERROR} | ${STATE_IDLE}
${STATE_ERRORED} | ${TRANSITION_START_LOAD} | ${STATE_LOADING}
`(
'successfully transitions to $result from $startState when the transition $transitionEvent is received',
({ startState, transitionEvent, result }) => {
liveMachine.value = startState;
expect(liveMachine.send(transitionEvent)).toEqual(result);
},
);
it.each`
startState | transitionEvent
${STATE_IDLE} | ${TRANSITION_ACKNOWLEDGE_ERROR}
${STATE_IDLE} | ${TRANSITION_LOAD_SUCCESS}
${STATE_IDLE} | ${TRANSITION_LOAD_ERROR}
${STATE_IDLE} | ${'RANDOM_FOO'}
${STATE_LOADING} | ${TRANSITION_START_LOAD}
${STATE_LOADING} | ${TRANSITION_ACKNOWLEDGE_ERROR}
${STATE_LOADING} | ${'RANDOM_FOO'}
${STATE_ERRORED} | ${TRANSITION_LOAD_ERROR}
${STATE_ERRORED} | ${TRANSITION_LOAD_SUCCESS}
${STATE_ERRORED} | ${'RANDOM_FOO'}
`(
'remains as $startState if an undefined transition ($transitionEvent) is received',
({ startState, transitionEvent }) => {
liveMachine.value = startState;
expect(liveMachine.send(transitionEvent)).toEqual(startState);
},
);
describe('detached', () => {
it.each`
startState | transitionEvent | result
${STATE_IDLE} | ${TRANSITION_START_LOAD} | ${STATE_LOADING}
${STATE_LOADING} | ${TRANSITION_LOAD_SUCCESS} | ${STATE_IDLE}
${STATE_LOADING} | ${TRANSITION_LOAD_ERROR} | ${STATE_ERRORED}
${STATE_ERRORED} | ${TRANSITION_ACKNOWLEDGE_ERROR} | ${STATE_IDLE}
${STATE_ERRORED} | ${TRANSITION_START_LOAD} | ${STATE_LOADING}
`(
'successfully transitions to $result from $startState when the transition $transitionEvent is received outside the context of the machine',
({ startState, transitionEvent, result }) => {
const liveSend = machine({
...definition,
initial: startState,
}).send;
expect(liveSend(transitionEvent)).toEqual(result);
},
);
it.each`
startState | transitionEvent
${STATE_IDLE} | ${TRANSITION_ACKNOWLEDGE_ERROR}
${STATE_IDLE} | ${TRANSITION_LOAD_SUCCESS}
${STATE_IDLE} | ${TRANSITION_LOAD_ERROR}
${STATE_IDLE} | ${'RANDOM_FOO'}
${STATE_LOADING} | ${TRANSITION_START_LOAD}
${STATE_LOADING} | ${TRANSITION_ACKNOWLEDGE_ERROR}
${STATE_LOADING} | ${'RANDOM_FOO'}
${STATE_ERRORED} | ${TRANSITION_LOAD_ERROR}
${STATE_ERRORED} | ${TRANSITION_LOAD_SUCCESS}
${STATE_ERRORED} | ${'RANDOM_FOO'}
`(
'remains as $startState if an undefined transition ($transitionEvent) is received',
({ startState, transitionEvent }) => {
const liveSend = machine({
...definition,
initial: startState,
}).send;
expect(liveSend(transitionEvent)).toEqual(startState);
},
);
});
});
describe('is', () => {
it.each`
bool | test | actual
${true} | ${STATE_IDLE} | ${STATE_IDLE}
${false} | ${STATE_LOADING} | ${STATE_IDLE}
${false} | ${STATE_ERRORED} | ${STATE_IDLE}
${true} | ${STATE_LOADING} | ${STATE_LOADING}
${false} | ${STATE_IDLE} | ${STATE_LOADING}
${false} | ${STATE_ERRORED} | ${STATE_LOADING}
${true} | ${STATE_ERRORED} | ${STATE_ERRORED}
${false} | ${STATE_IDLE} | ${STATE_ERRORED}
${false} | ${STATE_LOADING} | ${STATE_ERRORED}
`(
'returns "$bool" for "$test" when the current state is "$actual"',
({ bool, test, actual }) => {
liveMachine = machine({
...definition,
initial: actual,
});
expect(liveMachine.is(test)).toEqual(bool);
},
);
describe('detached', () => {
it.each`
bool | test | actual
${true} | ${STATE_IDLE} | ${STATE_IDLE}
${false} | ${STATE_LOADING} | ${STATE_IDLE}
${false} | ${STATE_ERRORED} | ${STATE_IDLE}
${true} | ${STATE_LOADING} | ${STATE_LOADING}
${false} | ${STATE_IDLE} | ${STATE_LOADING}
${false} | ${STATE_ERRORED} | ${STATE_LOADING}
${true} | ${STATE_ERRORED} | ${STATE_ERRORED}
${false} | ${STATE_IDLE} | ${STATE_ERRORED}
${false} | ${STATE_LOADING} | ${STATE_ERRORED}
`(
'returns "$bool" for "$test" when the current state is "$actual"',
({ bool, test, actual }) => {
const liveIs = machine({
...definition,
initial: actual,
}).is;
expect(liveIs(test)).toEqual(bool);
},
);
});
});
});
describe('transition', () => {
it.each`
startState | transitionEvent | result
${STATE_IDLE} | ${TRANSITION_START_LOAD} | ${STATE_LOADING}
${STATE_LOADING} | ${TRANSITION_LOAD_SUCCESS} | ${STATE_IDLE}
${STATE_LOADING} | ${TRANSITION_LOAD_ERROR} | ${STATE_ERRORED}
${STATE_ERRORED} | ${TRANSITION_ACKNOWLEDGE_ERROR} | ${STATE_IDLE}
${STATE_ERRORED} | ${TRANSITION_START_LOAD} | ${STATE_LOADING}
`(
'successfully transitions to $result from $startState when the transition $transitionEvent is received',
({ startState, transitionEvent, result }) => {
expect(transition(definition, startState, transitionEvent)).toEqual(result);
},
);
it.each`
startState | transitionEvent
${STATE_IDLE} | ${TRANSITION_ACKNOWLEDGE_ERROR}
${STATE_IDLE} | ${TRANSITION_LOAD_SUCCESS}
${STATE_IDLE} | ${TRANSITION_LOAD_ERROR}
${STATE_IDLE} | ${'RANDOM_FOO'}
${STATE_LOADING} | ${TRANSITION_START_LOAD}
${STATE_LOADING} | ${TRANSITION_ACKNOWLEDGE_ERROR}
${STATE_LOADING} | ${'RANDOM_FOO'}
${STATE_ERRORED} | ${TRANSITION_LOAD_ERROR}
${STATE_ERRORED} | ${TRANSITION_LOAD_SUCCESS}
${STATE_ERRORED} | ${'RANDOM_FOO'}
`(
'remains as $startState if an undefined transition ($transitionEvent) is received',
({ startState, transitionEvent }) => {
expect(transition(definition, startState, transitionEvent)).toEqual(startState);
},
);
it('remains as the provided starting state if it is an unrecognized state', () => {
expect(transition(definition, 'RANDOM_FOO', TRANSITION_START_LOAD)).toEqual('RANDOM_FOO');
});
});
});

View File

@ -71,6 +71,11 @@ RSpec.describe Gitlab::Database::LoadBalancing::RackMiddleware, :redis do
expect(app).to receive(:call).with(env).and_return(10)
expect(ActiveSupport::Notifications)
.to receive(:instrument)
.with('web_transaction_completed.load_balancing')
.and_call_original
expect(middleware.call(env)).to eq(10)
end
end

View File

@ -46,41 +46,68 @@ RSpec.describe Gitlab::Database::LoadBalancing::Sticking, :redis do
describe '.all_caught_up?' do
let(:lb) { double(:lb) }
let(:last_write_location) { 'foo' }
before do
allow(described_class).to receive(:load_balancer).and_return(lb)
end
it 'returns true if no write location could be found' do
allow(described_class).to receive(:last_write_location_for)
.with(:user, 42)
.and_return(nil)
expect(lb).not_to receive(:all_caught_up?)
expect(described_class.all_caught_up?(:user, 42)).to eq(true)
.and_return(last_write_location)
end
it 'returns true, and unsticks if all secondaries have caught up' do
allow(described_class).to receive(:last_write_location_for)
.with(:user, 42)
.and_return('foo')
context 'when no write location could be found' do
let(:last_write_location) { nil }
allow(lb).to receive(:all_caught_up?).with('foo').and_return(true)
it 'returns true' do
allow(described_class).to receive(:last_write_location_for)
.with(:user, 42)
.and_return(nil)
expect(described_class).to receive(:unstick).with(:user, 42)
expect(lb).not_to receive(:all_caught_up?)
expect(described_class.all_caught_up?(:user, 42)).to eq(true)
expect(described_class.all_caught_up?(:user, 42)).to eq(true)
end
end
it 'return false if the secondaries have not yet caught up' do
allow(described_class).to receive(:last_write_location_for)
.with(:user, 42)
.and_return('foo')
context 'when all secondaries have caught up' do
before do
allow(lb).to receive(:all_caught_up?).with('foo').and_return(true)
end
allow(lb).to receive(:all_caught_up?).with('foo').and_return(false)
it 'returns true, and unsticks' do
expect(described_class).to receive(:unstick).with(:user, 42)
expect(described_class.all_caught_up?(:user, 42)).to eq(false)
expect(described_class.all_caught_up?(:user, 42)).to eq(true)
end
it 'notifies with the proper event payload' do
expect(ActiveSupport::Notifications)
.to receive(:instrument)
.with('caught_up_replica_pick.load_balancing', { result: true })
.and_call_original
described_class.all_caught_up?(:user, 42)
end
end
context 'when the secondaries have not yet caught up' do
before do
allow(lb).to receive(:all_caught_up?).with('foo').and_return(false)
end
it 'returns false' do
expect(described_class.all_caught_up?(:user, 42)).to eq(false)
end
it 'notifies with the proper event payload' do
expect(ActiveSupport::Notifications)
.to receive(:instrument)
.with('caught_up_replica_pick.load_balancing', { result: false })
.and_call_original
described_class.all_caught_up?(:user, 42)
end
end
end

View File

@ -137,6 +137,34 @@ RSpec.describe Gitlab::InstrumentationHelper do
db_primary_wal_cached_count: 0,
db_replica_wal_cached_count: 0)
end
context 'when replica caught up search was made' do
before do
Gitlab::SafeRequestStore[:caught_up_replica_pick_ok] = 2
Gitlab::SafeRequestStore[:caught_up_replica_pick_fail] = 1
end
it 'includes related metrics' do
subject
expect(payload).to include(caught_up_replica_pick_ok: 2)
expect(payload).to include(caught_up_replica_pick_fail: 1)
end
end
context 'when only a single counter was updated' do
before do
Gitlab::SafeRequestStore[:caught_up_replica_pick_ok] = 1
Gitlab::SafeRequestStore[:caught_up_replica_pick_fail] = nil
end
it 'includes only that counter into logging' do
subject
expect(payload).to include(caught_up_replica_pick_ok: 1)
expect(payload).not_to include(:caught_up_replica_pick_fail)
end
end
end
context 'when load balancing is disabled' do

View File

@ -0,0 +1,115 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Metrics::Subscribers::LoadBalancing, :request_store do
let(:subscriber) { described_class.new }
before do
allow(Gitlab::Database::LoadBalancing).to receive(:enable?).and_return(true)
end
describe '#caught_up_replica_pick' do
shared_examples 'having payload result value' do |result, counter_name|
subject { subscriber.caught_up_replica_pick(event) }
let(:payload) { { result: result } }
let(:event) do
double(
:event,
name: 'load_balancing.caught_up_replica_pick',
payload: payload
)
end
it 'stores per-request caught up replica search result' do
subject
expect(Gitlab::SafeRequestStore[counter_name]).to eq(1)
end
end
it_behaves_like 'having payload result value', true, :caught_up_replica_pick_ok
it_behaves_like 'having payload result value', false, :caught_up_replica_pick_fail
end
describe "#web_transaction_completed" do
subject { subscriber.web_transaction_completed(event) }
let(:event) do
double(
:event,
name: 'load_balancing.web_transaction_completed',
payload: {}
)
end
let(:web_transaction) { double('Gitlab::Metrics::WebTransaction') }
before do
allow(::Gitlab::Metrics::WebTransaction).to receive(:current)
.and_return(web_transaction)
end
context 'when no data in request store' do
before do
Gitlab::SafeRequestStore[:caught_up_replica_pick] = nil
end
it 'does not change the counters' do
expect(web_transaction).not_to receive(:increment)
end
end
context 'when request store was updated' do
before do
Gitlab::SafeRequestStore[:caught_up_replica_pick_ok] = 2
Gitlab::SafeRequestStore[:caught_up_replica_pick_fail] = 1
end
it 'increments :caught_up_replica_pick count with proper label' do
expect(web_transaction).to receive(:increment).with(:gitlab_transaction_caught_up_replica_pick_count_total, 2, { result: true })
expect(web_transaction).to receive(:increment).with(:gitlab_transaction_caught_up_replica_pick_count_total, 1, { result: false })
subject
end
end
end
describe '.load_balancing_payload' do
subject { described_class.load_balancing_payload }
context 'when no data in request store' do
before do
Gitlab::SafeRequestStore[:caught_up_replica_pick_ok] = nil
Gitlab::SafeRequestStore[:caught_up_replica_pick_fail] = nil
end
it 'returns empty hash' do
expect(subject).to eq({})
end
end
context 'when request store was updated for a single counter' do
before do
Gitlab::SafeRequestStore[:caught_up_replica_pick_ok] = 2
end
it 'returns proper payload with only that counter' do
expect(subject).to eq({ caught_up_replica_pick_ok: 2 })
end
end
context 'when both counters were updated' do
before do
Gitlab::SafeRequestStore[:caught_up_replica_pick_ok] = 2
Gitlab::SafeRequestStore[:caught_up_replica_pick_fail] = 1
end
it 'return proper payload' do
expect(subject).to eq({ caught_up_replica_pick_ok: 2, caught_up_replica_pick_fail: 1 })
end
end
end
end

View File

@ -95,10 +95,10 @@ RSpec.shared_context 'project navbar structure' do
nav_item: _('Analytics'),
nav_sub_items: [
_('CI/CD'),
(_('Code Review') if Gitlab.ee?),
(_('Merge Request') if Gitlab.ee?),
(_('Code review') if Gitlab.ee?),
(_('Merge request') if Gitlab.ee?),
_('Repository'),
_('Value Stream')
_('Value stream')
]
},
{

View File

@ -695,11 +695,11 @@ RSpec.describe 'layouts/nav/sidebar/_project' do
end
end
describe 'Value Stream' do
describe 'Value stream' do
it 'has a link to the value stream page' do
render
expect(rendered).to have_link('Value Stream', href: project_cycle_analytics_path(project))
expect(rendered).to have_link('Value stream', href: project_cycle_analytics_path(project))
end
context 'when user does not have access' do
@ -708,7 +708,7 @@ RSpec.describe 'layouts/nav/sidebar/_project' do
it 'does not have a link to the value stream page' do
render
expect(rendered).not_to have_link('Value Stream', href: project_cycle_analytics_path(project))
expect(rendered).not_to have_link('Value stream', href: project_cycle_analytics_path(project))
end
end
end