Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-03-17 21:08:35 +00:00
parent 1038f06b86
commit 93003eb115
85 changed files with 1832 additions and 314 deletions

View file

@ -1247,7 +1247,7 @@
"oneOf": [
{
"type": "object",
"description": "Trigger a multi-project pipeline. Read more: https://docs.gitlab.com/ee/ci/yaml/README.html#simple-trigger-syntax-for-multi-project-pipelines",
"description": "Trigger a multi-project pipeline. Read more: https://docs.gitlab.com/ee/ci/pipelines/multi_project_pipelines.html#specify-a-downstream-pipeline-branch",
"additionalProperties": false,
"properties": {
"project": {
@ -1263,6 +1263,23 @@
"description": "You can mirror the pipeline status from the triggered pipeline to the source bridge job by using strategy: depend",
"type": "string",
"enum": ["depend"]
},
"forward": {
"description": "Specify what to forward to the downstream pipeline.",
"type": "object",
"additionalProperties": false,
"properties": {
"yaml_variables": {
"type": "boolean",
"description": "Variables defined in the trigger job are passed to downstream pipelines.",
"default": true
},
"pipeline_variables": {
"type": "boolean",
"description": "Variables added for manual pipeline runs are passed to downstream pipelines.",
"default": false
}
}
}
},
"required": ["project"],
@ -1272,7 +1289,7 @@
},
{
"type": "object",
"description": "Trigger a child pipeline. Read more: https://docs.gitlab.com/ee/ci/yaml/README.html#trigger-syntax-for-child-pipeline",
"description": "Trigger a child pipeline. Read more: https://docs.gitlab.com/ee/ci/pipelines/parent_child_pipelines.html",
"additionalProperties": false,
"properties": {
"include": {
@ -1362,11 +1379,28 @@
"description": "You can mirror the pipeline status from the triggered pipeline to the source bridge job by using strategy: depend",
"type": "string",
"enum": ["depend"]
},
"forward": {
"description": "Specify what to forward to the downstream pipeline.",
"type": "object",
"additionalProperties": false,
"properties": {
"yaml_variables": {
"type": "boolean",
"description": "Variables defined in the trigger job are passed to downstream pipelines.",
"default": true
},
"pipeline_variables": {
"type": "boolean",
"description": "Variables added for manual pipeline runs are passed to downstream pipelines.",
"default": false
}
}
}
}
},
{
"description": "Path to the project, e.g. `group/project`, or `group/sub-group/project`.",
"description": "Path to the project, e.g. `group/project`, or `group/sub-group/project`. Read more: https://docs.gitlab.com/ee/ci/pipelines/multi_project_pipelines.html#define-multi-project-pipelines-in-your-gitlab-ciyml-file",
"type": "string",
"pattern": "\\S/\\S"
}

View file

@ -42,6 +42,8 @@ import {
ISSUE_REFERENCE,
MAX_LIST_SIZE,
PAGE_SIZE,
PARAM_PAGE_AFTER,
PARAM_PAGE_BEFORE,
PARAM_STATE,
RELATIVE_POSITION_ASC,
TOKEN_TYPE_ASSIGNEE,
@ -135,6 +137,8 @@ export default {
},
},
data() {
const pageAfter = getParameterByName(PARAM_PAGE_AFTER);
const pageBefore = getParameterByName(PARAM_PAGE_BEFORE);
const state = getParameterByName(PARAM_STATE);
const defaultSortKey = state === IssuableStates.Closed ? UPDATED_DESC : CREATED_DESC;
const dashboardSortKey = getSortKey(this.initialSort);
@ -166,7 +170,7 @@ export default {
issuesCounts: {},
issuesError: null,
pageInfo: {},
pageParams: getInitialPageParams(sortKey),
pageParams: getInitialPageParams(sortKey, pageAfter, pageBefore),
showBulkEditSidebar: false,
sortKey,
state: state || IssuableStates.Opened,
@ -237,7 +241,12 @@ export default {
return this.isProject ? ITEM_TYPE.PROJECT : ITEM_TYPE.GROUP;
},
hasSearch() {
return this.searchQuery || Object.keys(this.urlFilterParams).length;
return (
this.searchQuery ||
Object.keys(this.urlFilterParams).length ||
this.pageParams.afterCursor ||
this.pageParams.beforeCursor
);
},
isBulkEditButtonDisabled() {
return this.showBulkEditSidebar || !this.issues.length;
@ -394,6 +403,8 @@ export default {
},
urlParams() {
return {
page_after: this.pageParams.afterCursor,
page_before: this.pageParams.beforeCursor,
search: this.searchQuery,
sort: urlSortParams[this.sortKey],
state: this.state,

View file

@ -56,17 +56,11 @@ export const ISSUE_REFERENCE = /^#\d+$/;
export const MAX_LIST_SIZE = 10;
export const PAGE_SIZE = 20;
export const PAGE_SIZE_MANUAL = 100;
export const PARAM_PAGE_AFTER = 'page_after';
export const PARAM_PAGE_BEFORE = 'page_before';
export const PARAM_STATE = 'state';
export const RELATIVE_POSITION = 'relative_position';
export const defaultPageSizeParams = {
firstPageSize: PAGE_SIZE,
};
export const largePageSizeParams = {
firstPageSize: PAGE_SIZE_MANUAL,
};
export const BLOCKING_ISSUES_ASC = 'BLOCKING_ISSUES_ASC';
export const BLOCKING_ISSUES_DESC = 'BLOCKING_ISSUES_DESC';
export const CREATED_ASC = 'CREATED_ASC';

View file

@ -10,16 +10,16 @@ import {
BLOCKING_ISSUES_DESC,
CREATED_ASC,
CREATED_DESC,
defaultPageSizeParams,
DUE_DATE_ASC,
DUE_DATE_DESC,
filters,
LABEL_PRIORITY_ASC,
LABEL_PRIORITY_DESC,
largePageSizeParams,
MILESTONE_DUE_ASC,
MILESTONE_DUE_DESC,
NORMAL_FILTER,
PAGE_SIZE,
PAGE_SIZE_MANUAL,
POPULARITY_ASC,
POPULARITY_DESC,
PRIORITY_ASC,
@ -43,8 +43,11 @@ import {
WEIGHT_DESC,
} from './constants';
export const getInitialPageParams = (sortKey) =>
sortKey === RELATIVE_POSITION_ASC ? largePageSizeParams : defaultPageSizeParams;
export const getInitialPageParams = (sortKey, afterCursor, beforeCursor) => ({
firstPageSize: sortKey === RELATIVE_POSITION_ASC ? PAGE_SIZE_MANUAL : PAGE_SIZE,
afterCursor,
beforeCursor,
});
export const getSortKey = (sort) =>
Object.keys(urlSortParams).find((key) => urlSortParams[key] === sort);

View file

@ -17,7 +17,7 @@ module IssuableCollectionsAction
respond_to do |format|
format.html
format.atom { render layout: 'xml.atom' }
format.atom { render layout: 'xml' }
end
end

View file

@ -23,7 +23,7 @@ class Dashboard::ProjectsController < Dashboard::ApplicationController
end
format.atom do
load_events
render layout: 'xml.atom'
render layout: 'xml'
end
format.json do
render json: {

View file

@ -235,7 +235,7 @@ class GroupsController < Groups::ApplicationController
def render_details_view_atom
load_events
render layout: 'xml.atom', template: 'groups/show'
render layout: 'xml', template: 'groups/show'
end
# rubocop: disable CodeReuse/ActiveRecord

View file

@ -30,7 +30,7 @@ class Projects::CommitsController < Projects::ApplicationController
respond_to do |format|
format.html
format.atom { render layout: 'xml.atom' }
format.atom { render layout: 'xml' }
format.json do
pager_json(

View file

@ -81,7 +81,7 @@ class Projects::IssuesController < Projects::ApplicationController
respond_to do |format|
format.html
format.atom { render layout: 'xml.atom' }
format.atom { render layout: 'xml' }
format.json do
render json: {
html: view_to_html_string("projects/issues/_issues"),

View file

@ -93,7 +93,7 @@ class Projects::MergeRequestsController < Projects::MergeRequests::ApplicationCo
respond_to do |format|
format.html
format.atom { render layout: 'xml.atom' }
format.atom { render layout: 'xml' }
format.json do
render json: {
html: view_to_html_string("projects/merge_requests/_merge_requests")

View file

@ -42,7 +42,7 @@ class Projects::TagsController < Projects::ApplicationController
status = @tags_loading_error ? :service_unavailable : :ok
format.html { render status: status }
format.atom { render layout: 'xml.atom', status: status }
format.atom { render layout: 'xml', status: status }
end
end
# rubocop: enable CodeReuse/ActiveRecord

View file

@ -173,7 +173,7 @@ class ProjectsController < Projects::ApplicationController
format.atom do
load_events
@events = @events.select { |event| event.visible_to_user?(current_user) }
render layout: 'xml.atom'
render layout: 'xml'
end
end
end

View file

@ -35,7 +35,7 @@ class UsersController < ApplicationController
format.atom do
load_events
render layout: 'xml.atom'
render layout: 'xml'
end
format.json do

View file

@ -11,6 +11,11 @@ module Ci
InvalidBridgeTypeError = Class.new(StandardError)
InvalidTransitionError = Class.new(StandardError)
FORWARD_DEFAULTS = {
yaml_variables: true,
pipeline_variables: false
}.freeze
belongs_to :project
belongs_to :trigger_request
has_many :sourced_pipelines, class_name: "::Ci::Sources::Pipeline",
@ -199,12 +204,13 @@ module Ci
end
def downstream_variables
variables = scoped_variables.concat(pipeline.persisted_variables)
variables.to_runner_variables.yield_self do |all_variables|
yaml_variables.to_a.map do |hash|
{ key: hash[:key], value: ::ExpandVariables.expand(hash[:value], all_variables) }
end
if ::Feature.enabled?(:ci_trigger_forward_variables, project, default_enabled: :yaml)
calculate_downstream_variables
.reverse # variables priority
.uniq { |var| var[:key] } # only one variable key to pass
.reverse
else
legacy_downstream_variables
end
end
@ -250,6 +256,58 @@ module Ci
}
}
end
def legacy_downstream_variables
variables = scoped_variables.concat(pipeline.persisted_variables)
variables.to_runner_variables.yield_self do |all_variables|
yaml_variables.to_a.map do |hash|
{ key: hash[:key], value: ::ExpandVariables.expand(hash[:value], all_variables) }
end
end
end
def calculate_downstream_variables
expand_variables = scoped_variables
.concat(pipeline.persisted_variables)
.to_runner_variables
# The order of this list refers to the priority of the variables
downstream_yaml_variables(expand_variables) +
downstream_pipeline_variables(expand_variables)
end
def downstream_yaml_variables(expand_variables)
return [] unless forward_yaml_variables?
yaml_variables.to_a.map do |hash|
{ key: hash[:key], value: ::ExpandVariables.expand(hash[:value], expand_variables) }
end
end
def downstream_pipeline_variables(expand_variables)
return [] unless forward_pipeline_variables?
pipeline.variables.to_a.map do |variable|
{ key: variable.key, value: ::ExpandVariables.expand(variable.value, expand_variables) }
end
end
def forward_yaml_variables?
strong_memoize(:forward_yaml_variables) do
result = options&.dig(:trigger, :forward, :yaml_variables)
result.nil? ? FORWARD_DEFAULTS[:yaml_variables] : result
end
end
def forward_pipeline_variables?
strong_memoize(:forward_pipeline_variables) do
result = options&.dig(:trigger, :forward, :pipeline_variables)
result.nil? ? FORWARD_DEFAULTS[:pipeline_variables] : result
end
end
end
end

View file

@ -16,7 +16,7 @@ module Ci
scope :with_needs, -> (names = nil) do
needs = Ci::BuildNeed.scoped_build.select(1)
needs = needs.where(name: names) if names
where('EXISTS (?)', needs).preload(:needs)
where('EXISTS (?)', needs)
end
scope :without_needs, -> (names = nil) do

View file

@ -102,9 +102,7 @@ module CounterAttribute
run_after_commit_or_now do
if counter_attribute_enabled?(attribute)
redis_state do |redis|
redis.incrby(counter_key(attribute), increment)
end
increment_counter(attribute, increment)
FlushCounterIncrementsWorker.perform_in(WORKER_DELAY, self.class.name, self.id, attribute)
else
@ -115,6 +113,28 @@ module CounterAttribute
true
end
def increment_counter(attribute, increment)
if counter_attribute_enabled?(attribute)
redis_state do |redis|
redis.incrby(counter_key(attribute), increment)
end
end
end
def clear_counter!(attribute)
if counter_attribute_enabled?(attribute)
redis_state { |redis| redis.del(counter_key(attribute)) }
end
end
def get_counter_value(attribute)
if counter_attribute_enabled?(attribute)
redis_state do |redis|
redis.get(counter_key(attribute)).to_i
end
end
end
def counter_key(attribute)
"project:{#{project_id}}:counters:#{self.class}:#{id}:#{attribute}"
end

View file

@ -0,0 +1,91 @@
# frozen_string_literal: true
module Projects
class BuildArtifactsSizeRefresh < ApplicationRecord
include BulkInsertSafe
STALE_WINDOW = 3.days
self.table_name = 'project_build_artifacts_size_refreshes'
belongs_to :project
validates :project, presence: true
STATES = {
created: 1,
running: 2,
pending: 3
}.freeze
state_machine :state, initial: :created do
# created -> running <-> pending
state :created, value: STATES[:created]
state :running, value: STATES[:running]
state :pending, value: STATES[:pending]
event :process do
transition [:created, :pending, :running] => :running
end
event :requeue do
transition running: :pending
end
# set it only the first time we execute the refresh
before_transition created: :running do |refresh|
refresh.reset_project_statistics!
refresh.refresh_started_at = Time.zone.now
end
before_transition running: any do |refresh, transition|
refresh.updated_at = Time.zone.now
end
before_transition running: :pending do |refresh, transition|
refresh.last_job_artifact_id = transition.args.first
end
end
scope :stale, -> { with_state(:running).where('updated_at < ?', STALE_WINDOW.ago) }
scope :remaining, -> { with_state(:created, :pending).or(stale) }
def self.enqueue_refresh(projects)
now = Time.zone.now
records = Array(projects).map do |project|
new(project: project, state: STATES[:created], created_at: now, updated_at: now)
end
bulk_insert!(records, skip_duplicates: true)
end
def self.process_next_refresh!
next_refresh = nil
transaction do
next_refresh = remaining
.order(:state, :updated_at)
.lock('FOR UPDATE SKIP LOCKED')
.take
next_refresh&.process!
end
next_refresh
end
def reset_project_statistics!
statistics = project.statistics
statistics.update!(build_artifacts_size: 0)
statistics.clear_counter!(:build_artifacts_size)
end
def next_batch(limit:)
project.job_artifacts.select(:id, :size)
.where('created_at <= ? AND id > ?', refresh_started_at, last_job_artifact_id.to_i)
.order(:created_at)
.limit(limit)
end
end
end

View file

@ -22,9 +22,15 @@ module Ci
end
def dependent_jobs
stage_dependent_jobs
.or(needs_dependent_jobs.except(:preload))
dependent_jobs = stage_dependent_jobs
.or(needs_dependent_jobs)
.ordered_by_stage
if ::Feature.enabled?(:ci_fix_order_of_subsequent_jobs, @processable.pipeline.project, default_enabled: :yaml)
dependent_jobs = ordered_by_dag(dependent_jobs)
end
dependent_jobs
end
def process(job)
@ -44,5 +50,23 @@ module Ci
def skipped_jobs
@skipped_jobs ||= @processable.pipeline.processables.skipped
end
# rubocop: disable CodeReuse/ActiveRecord
def ordered_by_dag(jobs)
sorted_job_names = sort_jobs(jobs).each_with_index.to_h
jobs.preload(:needs).group_by(&:stage_idx).flat_map do |_, stage_jobs|
stage_jobs.sort_by { |job| sorted_job_names.fetch(job.name) }
end
end
def sort_jobs(jobs)
Gitlab::Ci::YamlProcessor::Dag.order(
jobs.to_h do |job|
[job.name, job.needs.map(&:name)]
end
)
end
# rubocop: enable CodeReuse/ActiveRecord
end
end

View file

@ -0,0 +1,32 @@
# frozen_string_literal: true
module Projects
class RefreshBuildArtifactsSizeStatisticsService
BATCH_SIZE = 1000
def execute
refresh = Projects::BuildArtifactsSizeRefresh.process_next_refresh!
return unless refresh
batch = refresh.next_batch(limit: BATCH_SIZE).to_a
if batch.any?
# We are doing the sum in ruby because the query takes too long when done in SQL
total_artifacts_size = batch.sum(&:size)
Projects::BuildArtifactsSizeRefresh.transaction do
# Mark the refresh ready for another worker to pick up and process the next batch
refresh.requeue!(batch.last.id)
refresh.project.statistics.delayed_increment_counter(:build_artifacts_size, total_artifacts_size)
end
else
# Remove the refresh job from the table if there are no more
# remaining job artifacts to calculate for the given project.
refresh.destroy!
end
refresh
end
end
end

View file

@ -27,7 +27,7 @@
%p.mb-2= s_('%{service_ping_link_start}What information is shared with GitLab Inc.?%{service_ping_link_end}').html_safe % { service_ping_link_start: service_ping_link_start, service_ping_link_end: '</a>'.html_safe }
%button.gl-button.btn.btn-default.js-payload-preview-trigger{ type: 'button', data: { payload_selector: ".#{payload_class}" } }
.gl-spinner.js-spinner.gl-display-none.gl-mr-2
= gl_loading_icon(css_class: 'js-spinner gl-display-none gl-mr-2')
.js-text.gl-display-inline= _('Preview payload')
%pre.service-data-payload-container.js-syntax-highlight.code.highlight.gl-mt-2.gl-display-none{ class: payload_class, data: { endpoint: usage_data_admin_application_settings_path(format: :html) } }
- else

View file

@ -65,8 +65,7 @@
= gl_redirect_listbox_tag admin_users_sort_options(filter: params[:filter], search_query: params[:search_query]), @sort, data: { right: true }
#js-admin-users-app{ data: admin_users_data_attributes(@users) }
.gl-spinner-container.gl-my-7
%span.gl-vertical-align-bottom.gl-spinner.gl-spinner-dark.gl-spinner-lg{ aria: { label: _('Loading') } }
= gl_loading_icon(size: 'lg', css_class: 'gl-my-7')
= paginate_collection @users

View file

@ -37,8 +37,9 @@
.panel-footer
= f.submit _('Mirror repository'), class: 'gl-button btn btn-confirm js-mirror-submit qa-mirror-repository-button', name: :update_remote_mirror
- else
.gl-alert.gl-alert-info{ role: 'alert' }
= sprite_icon('information-o', css_class: 'gl-icon gl-alert-icon gl-alert-icon-no-title')
= render 'shared/global_alert',
dismissible: false,
variant: :info do
.gl-alert-body
= _('Mirror settings are only available to GitLab administrators.')

View file

@ -1,8 +1,7 @@
.gl-alert.gl-alert-danger.gl-mb-5.gl-mt-5
.gl-alert-container
= sprite_icon('error', size: 16, css_class: 'gl-icon gl-alert-icon gl-alert-icon-no-title')
.gl-alert-content
.gl-alert-title
= reason
.gl-alert-body
= s_('The git server, Gitaly, is not available at this time. Please contact your administrator.')
= render 'shared/global_alert',
alert_class: 'gl-my-5',
variant: :danger,
dismissible: false,
title: reason do
.gl-alert-body
= s_('The git server, Gitaly, is not available at this time. Please contact your administrator.')

View file

@ -561,6 +561,15 @@
:weight: 1
:idempotent:
:tags: []
- :name: cronjob:projects_schedule_refresh_build_artifacts_size_statistics
:worker_name: Projects::ScheduleRefreshBuildArtifactsSizeStatisticsWorker
:feature_category: :build_artifacts
:has_external_dependencies:
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent: true
:tags: []
- :name: cronjob:prune_old_events
:worker_name: PruneOldEventsWorker
:feature_category: :users
@ -2803,6 +2812,15 @@
:weight: 1
:idempotent: true
:tags: []
- :name: projects_refresh_build_artifacts_size_statistics
:worker_name: Projects::RefreshBuildArtifactsSizeStatisticsWorker
:feature_category: :build_artifacts
:has_external_dependencies:
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent: true
:tags: []
- :name: projects_schedule_bulk_repository_shard_moves
:worker_name: Projects::ScheduleBulkRepositoryShardMovesWorker
:feature_category: :gitaly

View file

@ -0,0 +1,51 @@
# frozen_string_literal: true
module Projects
class RefreshBuildArtifactsSizeStatisticsWorker
include ApplicationWorker
include LimitedCapacity::Worker
MAX_RUNNING_LOW = 2
MAX_RUNNING_MEDIUM = 20
MAX_RUNNING_HIGH = 50
data_consistency :always
feature_category :build_artifacts
idempotent!
def perform_work(*args)
refresh = Projects::RefreshBuildArtifactsSizeStatisticsService.new.execute
return unless refresh
log_extra_metadata_on_done(:project_id, refresh.project_id)
log_extra_metadata_on_done(:last_job_artifact_id, refresh.last_job_artifact_id)
log_extra_metadata_on_done(:last_batch, refresh.destroyed?)
log_extra_metadata_on_done(:refresh_started_at, refresh.refresh_started_at)
end
def remaining_work_count(*args)
# LimitedCapacity::Worker only needs to know if there is work left to do
# so we can get by with an EXISTS query rather than a count.
# https://gitlab.com/gitlab-org/gitlab/-/issues/356167
if Projects::BuildArtifactsSizeRefresh.remaining.any?
1
else
0
end
end
def max_running_jobs
if ::Feature.enabled?(:projects_build_artifacts_size_refresh_high)
MAX_RUNNING_HIGH
elsif ::Feature.enabled?(:projects_build_artifacts_size_refresh_medium)
MAX_RUNNING_MEDIUM
elsif ::Feature.enabled?(:projects_build_artifacts_size_refresh_low)
MAX_RUNNING_LOW
else
0
end
end
end
end

View file

@ -0,0 +1,18 @@
# frozen_string_literal: true
module Projects
class ScheduleRefreshBuildArtifactsSizeStatisticsWorker
include ApplicationWorker
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext
data_consistency :always
feature_category :build_artifacts
idempotent!
def perform
Projects::RefreshBuildArtifactsSizeStatisticsWorker.perform_with_capacity
end
end
end

View file

@ -1,8 +1,8 @@
---
name: track_file_size_over_highlight_limit
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/61273
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/330374
milestone: '13.12'
name: ci_fix_order_of_subsequent_jobs
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/74394
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/345587
milestone: '14.9'
type: development
group: group::code review
group: group::pipeline authoring
default_enabled: false

View file

@ -1,8 +1,8 @@
---
name: track_highlight_timeouts
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60956
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/329909
milestone: '13.12'
name: ci_trigger_forward_variables
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/82676
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/355572
milestone: '14.9'
type: development
group: group::code review
group: group::pipeline authoring
default_enabled: false

View file

@ -0,0 +1,8 @@
---
name: projects_build_artifacts_size_refresh_high
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/81306
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/356018
milestone: '14.9'
type: development
group: group::pipeline insights
default_enabled: false

View file

@ -0,0 +1,8 @@
---
name: projects_build_artifacts_size_refresh_low
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/81306
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/356018
milestone: '14.9'
type: development
group: group::pipeline insights
default_enabled: false

View file

@ -0,0 +1,8 @@
---
name: projects_build_artifacts_size_refresh_medium
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/81306
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/356018
milestone: '14.9'
type: development
group: group::pipeline insights
default_enabled: false

View file

@ -620,6 +620,9 @@ Settings.cron_jobs['issues_reschedule_stuck_issue_rebalances']['job_class'] = 'I
Settings.cron_jobs['clusters_integrations_check_prometheus_health_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['clusters_integrations_check_prometheus_health_worker']['cron'] ||= '0 * * * *'
Settings.cron_jobs['clusters_integrations_check_prometheus_health_worker']['job_class'] = 'Clusters::Integrations::CheckPrometheusHealthWorker'
Settings.cron_jobs['projects_schedule_refresh_build_artifacts_size_statistics_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['projects_schedule_refresh_build_artifacts_size_statistics_worker']['cron'] ||= '2/17 * * * *'
Settings.cron_jobs['projects_schedule_refresh_build_artifacts_size_statistics_worker']['job_class'] = 'Projects::ScheduleRefreshBuildArtifactsSizeStatisticsWorker'
Gitlab.ee do
Settings.cron_jobs['analytics_devops_adoption_create_all_snapshots_worker'] ||= Settingslogic.new({})

View file

@ -363,6 +363,8 @@
- 1
- - projects_process_sync_events
- 1
- - projects_refresh_build_artifacts_size_statistics
- 1
- - projects_schedule_bulk_repository_shard_moves
- 1
- - projects_update_repository_storage

View file

@ -0,0 +1,24 @@
# frozen_string_literal: true
class CreateProjectBuildArtifactsSizeRefresh < Gitlab::Database::Migration[1.0]
enable_lock_retries!
CREATED_STATE = 1
def change
create_table :project_build_artifacts_size_refreshes do |t|
t.references :project, index: { unique: true }, foreign_key: { on_delete: :cascade }, null: false
t.bigint :last_job_artifact_id, null: true
t.integer :state, null: false, default: CREATED_STATE, limit: 1
t.datetime_with_timezone :refresh_started_at, null: true
t.timestamps_with_timezone null: false
# We will use this index for 2 purposes:
# - for finding rows with state = :waiting
# - for finding rows with state = :running and updated_at < x.days.ago
# which we can use to find jobs that were not able to complete and considered
# stale so we can retry
t.index [:state, :updated_at], name: 'idx_build_artifacts_size_refreshes_state_updated_at'
end
end
end

View file

@ -0,0 +1,7 @@
# frozen_string_literal: true
class AddTimeToRestoreServiceDoraMetric < Gitlab::Database::Migration[1.0]
def change
add_column :dora_daily_metrics, :time_to_restore_service_in_seconds, :integer
end
end

View file

@ -0,0 +1,15 @@
# frozen_string_literal: true
class AddIndexOnIssuesClosedIncidents < Gitlab::Database::Migration[1.0]
disable_ddl_transaction!
INDEX_NAME = 'index_on_issues_closed_incidents_by_project_id_and_closed_at'
def up
add_concurrent_index :issues, [:project_id, :closed_at], where: "issue_type = 1 AND state_id = 2", name: INDEX_NAME
end
def down
remove_concurrent_index_by_name :issues, INDEX_NAME
end
end

View file

@ -0,0 +1,15 @@
# frozen_string_literal: true
class AddIndexCiJobArtifactsProjectIdCreatedAt < Gitlab::Database::Migration[1.0]
INDEX_NAME = 'index_ci_job_artifacts_on_id_project_id_and_created_at'
disable_ddl_transaction!
def up
add_concurrent_index :ci_job_artifacts, [:project_id, :created_at, :id], name: INDEX_NAME
end
def down
remove_concurrent_index_by_name :ci_job_artifacts, INDEX_NAME
end
end

View file

@ -0,0 +1 @@
d0a8daf9fb9892fc92b03f13de4d7e470e5c54f03b09f887cdd45bc5eb9a7e37

View file

@ -0,0 +1 @@
3385dc0dc2a3d306e01a719b7a21197ea8468976d37abab932beade4780bb4ff

View file

@ -0,0 +1 @@
9e62675366f9c2f0fc159a9748409dbcaea240c813ab19ea26d24c966e5fd6c8

View file

@ -0,0 +1 @@
7992448797888fd69d1e5cd4f2602e5a2b49a57052c50b19522f37d711c9f2f2

View file

@ -14461,6 +14461,7 @@ CREATE TABLE dora_daily_metrics (
date date NOT NULL,
deployment_frequency integer,
lead_time_for_changes_in_seconds integer,
time_to_restore_service_in_seconds integer,
CONSTRAINT dora_daily_metrics_deployment_frequency_positive CHECK ((deployment_frequency >= 0)),
CONSTRAINT dora_daily_metrics_lead_time_for_changes_in_seconds_positive CHECK ((lead_time_for_changes_in_seconds >= 0))
);
@ -18973,6 +18974,25 @@ CREATE SEQUENCE project_auto_devops_id_seq
ALTER SEQUENCE project_auto_devops_id_seq OWNED BY project_auto_devops.id;
CREATE TABLE project_build_artifacts_size_refreshes (
id bigint NOT NULL,
project_id bigint NOT NULL,
last_job_artifact_id bigint,
state smallint DEFAULT 1 NOT NULL,
refresh_started_at timestamp with time zone,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL
);
CREATE SEQUENCE project_build_artifacts_size_refreshes_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE project_build_artifacts_size_refreshes_id_seq OWNED BY project_build_artifacts_size_refreshes.id;
CREATE TABLE project_ci_cd_settings (
id integer NOT NULL,
project_id integer NOT NULL,
@ -22869,6 +22889,8 @@ ALTER TABLE ONLY project_aliases ALTER COLUMN id SET DEFAULT nextval('project_al
ALTER TABLE ONLY project_auto_devops ALTER COLUMN id SET DEFAULT nextval('project_auto_devops_id_seq'::regclass);
ALTER TABLE ONLY project_build_artifacts_size_refreshes ALTER COLUMN id SET DEFAULT nextval('project_build_artifacts_size_refreshes_id_seq'::regclass);
ALTER TABLE ONLY project_ci_cd_settings ALTER COLUMN id SET DEFAULT nextval('project_ci_cd_settings_id_seq'::regclass);
ALTER TABLE ONLY project_ci_feature_usages ALTER COLUMN id SET DEFAULT nextval('project_ci_feature_usages_id_seq'::regclass);
@ -24939,6 +24961,9 @@ ALTER TABLE ONLY project_authorizations
ALTER TABLE ONLY project_auto_devops
ADD CONSTRAINT project_auto_devops_pkey PRIMARY KEY (id);
ALTER TABLE ONLY project_build_artifacts_size_refreshes
ADD CONSTRAINT project_build_artifacts_size_refreshes_pkey PRIMARY KEY (id);
ALTER TABLE ONLY project_ci_cd_settings
ADD CONSTRAINT project_ci_cd_settings_pkey PRIMARY KEY (id);
@ -26397,6 +26422,8 @@ CREATE INDEX idx_audit_events_part_on_entity_id_desc_author_id_created_at ON ONL
CREATE INDEX idx_award_emoji_on_user_emoji_name_awardable_type_awardable_id ON award_emoji USING btree (user_id, name, awardable_type, awardable_id);
CREATE INDEX idx_build_artifacts_size_refreshes_state_updated_at ON project_build_artifacts_size_refreshes USING btree (state, updated_at);
CREATE INDEX idx_ci_pipelines_artifacts_locked ON ci_pipelines USING btree (ci_ref_id, id) WHERE (locked = 1);
CREATE INDEX idx_container_exp_policies_on_project_id_next_run_at ON container_expiration_policies USING btree (project_id, next_run_at) WHERE (enabled = true);
@ -26919,6 +26946,8 @@ CREATE INDEX index_ci_job_artifacts_on_file_store ON ci_job_artifacts USING btre
CREATE INDEX index_ci_job_artifacts_on_file_type_for_devops_adoption ON ci_job_artifacts USING btree (file_type, project_id, created_at) WHERE (file_type = ANY (ARRAY[5, 6, 8, 23]));
CREATE INDEX index_ci_job_artifacts_on_id_project_id_and_created_at ON ci_job_artifacts USING btree (project_id, created_at, id);
CREATE INDEX index_ci_job_artifacts_on_id_project_id_and_file_type ON ci_job_artifacts USING btree (project_id, file_type, id);
CREATE UNIQUE INDEX index_ci_job_artifacts_on_job_id_and_file_type ON ci_job_artifacts USING btree (job_id, file_type);
@ -28291,6 +28320,8 @@ CREATE INDEX index_on_identities_lower_extern_uid_and_provider ON identities USI
CREATE UNIQUE INDEX index_on_instance_statistics_recorded_at_and_identifier ON analytics_usage_trends_measurements USING btree (identifier, recorded_at);
CREATE INDEX index_on_issues_closed_incidents_by_project_id_and_closed_at ON issues USING btree (project_id, closed_at) WHERE ((issue_type = 1) AND (state_id = 2));
CREATE INDEX index_on_label_links_all_columns ON label_links USING btree (target_id, label_id, target_type);
CREATE INDEX index_on_merge_request_assignees_state ON merge_request_assignees USING btree (state) WHERE (state = 2);
@ -28525,6 +28556,8 @@ CREATE INDEX index_project_aliases_on_project_id ON project_aliases USING btree
CREATE UNIQUE INDEX index_project_auto_devops_on_project_id ON project_auto_devops USING btree (project_id);
CREATE UNIQUE INDEX index_project_build_artifacts_size_refreshes_on_project_id ON project_build_artifacts_size_refreshes USING btree (project_id);
CREATE UNIQUE INDEX index_project_ci_cd_settings_on_project_id ON project_ci_cd_settings USING btree (project_id);
CREATE UNIQUE INDEX index_project_ci_feature_usages_unique_columns ON project_ci_feature_usages USING btree (project_id, feature, default_branch);
@ -32664,6 +32697,9 @@ ALTER TABLE ONLY list_user_preferences
ALTER TABLE ONLY merge_request_cleanup_schedules
ADD CONSTRAINT fk_rails_92dd0e705c FOREIGN KEY (merge_request_id) REFERENCES merge_requests(id) ON DELETE CASCADE;
ALTER TABLE ONLY project_build_artifacts_size_refreshes
ADD CONSTRAINT fk_rails_936db5fc44 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY board_labels
ADD CONSTRAINT fk_rails_9374a16edd FOREIGN KEY (board_id) REFERENCES boards(id) ON DELETE CASCADE;

View file

@ -9,6 +9,7 @@ type: reference, api
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/279039) in GitLab 13.10.
> - The legacy key/value pair `{ "<date>" => "<value>" }` was removed from the payload in GitLab 14.0.
> `time_to_restore_service` metric was introduced in GitLab 14.9.
All methods require at least the Reporter role.
@ -20,14 +21,14 @@ Get project-level DORA metrics.
GET /projects/:id/dora/metrics
```
| Attribute | Type | Required | Description |
|-------------- |-------- |----------|----------------------- |
| `id` | integer/string | yes | The ID or [URL-encoded path of the project](../index.md#namespaced-path-encoding) can be accessed by the authenticated user. |
| `metric` | string | yes | The [metric name](../../user/analytics/ci_cd_analytics.md#supported-metrics-in-gitlab). One of `deployment_frequency` or `lead_time_for_changes`. |
| `start_date` | string | no | Date range to start from. ISO 8601 Date format, for example `2021-03-01`. Default is 3 months ago. |
| `end_date` | string | no | Date range to end at. ISO 8601 Date format, for example `2021-03-01`. Default is the current date. |
| `interval` | string | no | The bucketing interval. One of `all`, `monthly` or `daily`. Default is `daily`. |
| `environment_tier` | string | no | The [tier of the environment](../../ci/environments/index.md#deployment-tier-of-environments). Default is `production`. |
| Attribute | Type | Required | Description |
|-------------- |-------- |----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `id` | integer/string | yes | The ID or [URL-encoded path of the project](../index.md#namespaced-path-encoding) can be accessed by the authenticated user. |
| `metric` | string | yes | The [metric name](../../user/analytics/ci_cd_analytics.md#supported-metrics-in-gitlab). One of `deployment_frequency`, `lead_time_for_changes` or `time_to_restore_service`.|
| `start_date` | string | no | Date range to start from. ISO 8601 Date format, for example `2021-03-01`. Default is 3 months ago. |
| `end_date` | string | no | Date range to end at. ISO 8601 Date format, for example `2021-03-01`. Default is the current date. |
| `interval` | string | no | The bucketing interval. One of `all`, `monthly` or `daily`. Default is `daily`. |
| `environment_tier` | string | no | The [tier of the environment](../../ci/environments/index.md#deployment-tier-of-environments). Default is `production`. |
Example request:
@ -63,7 +64,7 @@ GET /groups/:id/dora/metrics
| Attribute | Type | Required | Description |
|-------------- |-------- |----------|----------------------- |
| `id` | integer/string | yes | The ID or [URL-encoded path of the project](../index.md#namespaced-path-encoding) can be accessed by the authenticated user. |
| `metric` | string | yes | The [metric name](../../user/analytics/ci_cd_analytics.md#supported-metrics-in-gitlab). One of `deployment_frequency` or `lead_time_for_changes`. |
| `metric` | string | yes | The [metric name](../../user/analytics/ci_cd_analytics.md#supported-metrics-in-gitlab). One of `deployment_frequency`, `lead_time_for_changes` or `time_to_restore_service`. |
| `start_date` | string | no | Date range to start from. ISO 8601 Date format, for example `2021-03-01`. Default is 3 months ago. |
| `end_date` | string | no | Date range to end at. ISO 8601 Date format, for example `2021-03-01`. Default is the current date. |
| `interval` | string | no | The bucketing interval. One of `all`, `monthly` or `daily`. Default is `daily`. |
@ -97,6 +98,7 @@ API response has a different meaning depending on the provided `metric` query
parameter:
| `metric` query parameter | Description of `value` in response |
| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| ------------------------ |--------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `deployment_frequency` | The number of successful deployments during the time period. |
| `lead_time_for_changes` | The median number of seconds between the merge of the merge request (MR) and the deployment of the MR's commits for all MRs deployed during the time period. |
| `time_to_restore_service` | The median number of seconds an incident was open during the time period. Available only for production environment |

View file

@ -18098,6 +18098,7 @@ All supported DORA metric types.
| ----- | ----------- |
| <a id="dorametrictypedeployment_frequency"></a>`DEPLOYMENT_FREQUENCY` | Deployment frequency. |
| <a id="dorametrictypelead_time_for_changes"></a>`LEAD_TIME_FOR_CHANGES` | Lead time for changes. |
| <a id="dorametrictypetime_to_restore_service"></a>`TIME_TO_RESTORE_SERVICE` | Time to restore service. |
### `EntryType`

View file

@ -6,10 +6,10 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Linked epics API **(ULTIMATE)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/352493) in GitLab 14.9 [with a flag](../administration/feature_flags.md) named `related_epics_widget`. Disabled by default.
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/352493) in GitLab 14.9 [with a flag](../administration/feature_flags.md) named `related_epics_widget`. Enabled by default.
FLAG:
On self-managed GitLab, by default this feature is not available. To make it available, ask an administrator to [enable the feature flag](../administration/feature_flags.md) named `related_epics_widget`. On GitLab.com, this feature is not available.
On self-managed GitLab, by default this feature is available. To hide the feature, ask an administrator to [disable the feature flag](../administration/feature_flags.md) named `related_epics_widget`. On GitLab.com, this feature is available.
If the Related Epics feature is not available in your GitLab plan, a `403` status code is returned.

View file

@ -3692,6 +3692,61 @@ trigger_job:
In this example, jobs from subsequent stages wait for the triggered pipeline to
successfully complete before starting.
#### `trigger:forward`
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/213729) in GitLab 14.9 [with a flag](../../administration/feature_flags.md) named `ci_trigger_forward_variables`. Disabled by default.
FLAG:
On self-managed GitLab, by default this feature is not available. To make it available,
ask an administrator to [enable the feature flag](../../administration/feature_flags.md) named `ci_trigger_forward_variables`.
The feature is not ready for production use.
Use `trigger:forward` to specify what to forward to the downstream pipeline. You can control
what is forwarded to both [parent-child pipelines](../pipelines/parent_child_pipelines.md)
and [multi-project pipelines](../pipelines/multi_project_pipelines.md).
**Possible inputs**:
- `yaml_variables`: `true` (default), or `false`. When `true`, variables defined
in the trigger job are passed to downstream pipelines.
- `pipeline_variables`: `true` or `false` (default). When `true`, [manual pipeline variables](../variables/index.md#override-a-defined-cicd-variable)
are passed to downstream pipelines.
**Example of `trigger:forward`**:
[Run this pipeline manually](../pipelines/index.md#run-a-pipeline-manually), with
the CI/CD variable `MYVAR = my value`:
```yaml
variables: # default variables for each job
VAR: value
# Default behavior:
# - VAR is passed to the child
# - MYVAR is not passed to the child
child1:
trigger:
include: .child-pipeline.yml
# Forward pipeline variables:
# - VAR is passed to the child
# - MYVAR is passed to the child
child2:
trigger:
include: .child-pipeline.yml
forward:
pipeline_variables: true
# Do not forward YAML variables:
# - VAR is not passed to the child
# - MYVAR is not passed to the child
child3:
trigger:
include: .child-pipeline.yml
forward:
yaml_variables: false
```
### `variables`
[CI/CD variables](../variables/index.md) are configurable values that are passed to jobs.

View file

@ -6,12 +6,12 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Linked epics **(ULTIMATE)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/353473) in GitLab 14.9 [with a flag](../../../administration/feature_flags.md) named `related_epics_widget`. Disabled by default.
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/353473) in GitLab 14.9 [with a flag](../../../administration/feature_flags.md) named `related_epics_widget`. Enabled by default.
FLAG:
On self-managed GitLab, by default this feature is not available. To make it available,
ask an administrator to [enable the feature flag](../../../administration/feature_flags.md)
named `related_epics_widget`. On GitLab.com, this feature is not available.
On self-managed GitLab, by default this feature is available. To hide the feature,
ask an administrator to [disable the feature flag](../../../administration/feature_flags.md)
named `related_epics_widget`. On GitLab.com, this feature is available.
Linked epics are a bi-directional relationship between any two epics and appear in a block below
the epic description. You can link epics in different groups.

View file

@ -5,12 +5,13 @@ module Gitlab
class Config
module Entry
##
# Entry that represents a cross-project downstream trigger.
# Entry that represents a parent-child or cross-project downstream trigger.
#
class Trigger < ::Gitlab::Config::Entry::Simplifiable
strategy :SimpleTrigger, if: -> (config) { config.is_a?(String) }
strategy :ComplexTrigger, if: -> (config) { config.is_a?(Hash) }
# cross-project
class SimpleTrigger < ::Gitlab::Config::Entry::Node
include ::Gitlab::Config::Entry::Validatable
@ -28,11 +29,13 @@ module Gitlab
config.key?(:include)
end
# cross-project
class CrossProjectTrigger < ::Gitlab::Config::Entry::Node
include ::Gitlab::Config::Entry::Validatable
include ::Gitlab::Config::Entry::Attributable
include ::Gitlab::Config::Entry::Configurable
ALLOWED_KEYS = %i[project branch strategy].freeze
ALLOWED_KEYS = %i[project branch strategy forward].freeze
attributes :project, :branch, :strategy
validations do
@ -42,15 +45,26 @@ module Gitlab
validates :branch, type: String, allow_nil: true
validates :strategy, type: String, inclusion: { in: %w[depend], message: 'should be depend' }, allow_nil: true
end
entry :forward, ::Gitlab::Ci::Config::Entry::Trigger::Forward,
description: 'List what to forward to downstream pipelines'
def value
{ project: project,
branch: branch,
strategy: strategy,
forward: forward_value }.compact
end
end
# parent-child
class SameProjectTrigger < ::Gitlab::Config::Entry::Node
include ::Gitlab::Config::Entry::Validatable
include ::Gitlab::Config::Entry::Attributable
include ::Gitlab::Config::Entry::Configurable
INCLUDE_MAX_SIZE = 3
ALLOWED_KEYS = %i[strategy include].freeze
ALLOWED_KEYS = %i[strategy include forward].freeze
attributes :strategy
validations do
@ -64,8 +78,13 @@ module Gitlab
reserved: true,
metadata: { max_size: INCLUDE_MAX_SIZE }
entry :forward, ::Gitlab::Ci::Config::Entry::Trigger::Forward,
description: 'List what to forward to downstream pipelines'
def value
@config
{ include: @config[:include],
strategy: strategy,
forward: forward_value }.compact
end
end

View file

@ -0,0 +1,32 @@
# frozen_string_literal: true
module Gitlab
module Ci
class Config
module Entry
##
# Entry that represents the configuration for passing attributes to the downstream pipeline
#
class Trigger
class Forward < ::Gitlab::Config::Entry::Node
include ::Gitlab::Config::Entry::Validatable
include ::Gitlab::Config::Entry::Attributable
ALLOWED_KEYS = %i[yaml_variables pipeline_variables].freeze
attributes ALLOWED_KEYS
validations do
validates :config, allowed_keys: ALLOWED_KEYS
with_options allow_nil: true do
validates :yaml_variables, boolean: true
validates :pipeline_variables, boolean: true
end
end
end
end
end
end
end
end

View file

@ -45,7 +45,7 @@ module Gitlab
validate_job!(name, job)
end
YamlProcessor::Dag.check_circular_dependencies!(@jobs)
check_circular_dependencies
end
def validate_job!(name, job)
@ -146,6 +146,17 @@ module Gitlab
end
end
def check_circular_dependencies
jobs = @jobs.values.to_h do |job|
name = job[:name].to_s
needs = job.dig(:needs, :job).to_a
[name, needs.map { |need| need[:name].to_s }]
end
Dag.check_circular_dependencies!(jobs)
end
def error!(message)
raise ValidationError, message
end

View file

@ -7,28 +7,22 @@ module Gitlab
class Dag
include TSort
MissingNodeError = Class.new(StandardError)
def initialize(nodes)
@nodes = nodes
end
def self.order(jobs)
new(jobs).tsort
end
def self.check_circular_dependencies!(jobs)
nodes = jobs.values.to_h do |job|
name = job[:name].to_s
needs = job.dig(:needs, :job).to_a
[name, needs.map { |need| need[:name].to_s }]
end
new(nodes).tsort
new(jobs).tsort
rescue TSort::Cyclic
raise ValidationError, 'The pipeline has circular dependencies'
rescue MissingNodeError
end
def tsort_each_child(node, &block)
raise MissingNodeError, "node #{node} is missing" unless @nodes[node]
return unless @nodes[node]
@nodes[node].each(&block)
end

View file

@ -401,6 +401,7 @@ project_alerting_settings: :gitlab_main
project_aliases: :gitlab_main
project_authorizations: :gitlab_main
project_auto_devops: :gitlab_main
project_build_artifacts_size_refreshes: :gitlab_main
project_ci_cd_settings: :gitlab_main
project_ci_feature_usages: :gitlab_main
project_compliance_framework_settings: :gitlab_main

View file

@ -17,7 +17,11 @@ module Gitlab
def observe(version:, name:, connection:, &block)
observation = Observation.new(version: version, name: name, success: false)
observers = observer_classes.map { |c| c.new(observation, @result_dir, connection) }
per_migration_result_dir = File.join(@result_dir, name)
FileUtils.mkdir_p(per_migration_result_dir)
observers = observer_classes.map { |c| c.new(observation, per_migration_result_dir, connection) }
on_each_observer(observers) { |observer| observer.before }

View file

@ -6,7 +6,7 @@ module Gitlab
module Observers
class QueryDetails < MigrationObserver
def before
file_path = File.join(output_dir, "#{observation.version}_#{observation.name}-query-details.json")
file_path = File.join(output_dir, "query-details.json")
@file = File.open(file_path, 'wb')
@writer = Oj::StreamWriter.new(@file, {})
@writer.push_array

View file

@ -7,7 +7,7 @@ module Gitlab
class QueryLog < MigrationObserver
def before
@logger_was = ActiveRecord::Base.logger
file_path = File.join(output_dir, "#{observation.version}_#{observation.name}.log")
file_path = File.join(output_dir, "migration.log")
@logger = Logger.new(file_path)
ActiveRecord::Base.logger = @logger
end

View file

@ -6,7 +6,7 @@ module Gitlab
module Observers
class TransactionDuration < MigrationObserver
def before
file_path = File.join(output_dir, "#{observation.version}_#{observation.name}-transaction-duration.json")
file_path = File.join(output_dir, "transaction-duration.json")
@file = File.open(file_path, 'wb')
@writer = Oj::StreamWriter.new(@file, {})
@writer.push_array

View file

@ -5,6 +5,8 @@ module Gitlab
module Migrations
class Runner
BASE_RESULT_DIR = Rails.root.join('tmp', 'migration-testing').freeze
METADATA_FILENAME = 'metadata.json'
SCHEMA_VERSION = 2 # Version of the output format produced by the runner
class << self
def up
@ -75,9 +77,11 @@ module Gitlab
end
ensure
if instrumentation
File.open(File.join(result_dir, Gitlab::Database::Migrations::Instrumentation::STATS_FILENAME), 'wb+') do |io|
io << instrumentation.observations.to_json
end
stats_filename = File.join(result_dir, Gitlab::Database::Migrations::Instrumentation::STATS_FILENAME)
File.write(stats_filename, instrumentation.observations.to_json)
metadata_filename = File.join(result_dir, METADATA_FILENAME)
File.write(metadata_filename, { version: SCHEMA_VERSION }.to_json)
end
# We clear the cache here to mirror the cache clearing that happens at the end of `db:migrate` tasks

View file

@ -11,11 +11,7 @@ module Gitlab
end
def self.too_large?(size)
return false unless size.to_i > self.file_size_limit
over_highlight_size_limit.increment(source: "file size: #{self.file_size_limit}") if Feature.enabled?(:track_file_size_over_highlight_limit)
true
size.to_i > self.file_size_limit
end
attr_reader :blob_name
@ -74,14 +70,10 @@ module Gitlab
end
def highlight_rich(text, continue: true)
add_highlight_attempt_metric
tag = lexer.tag
tokens = lexer.lex(text, continue: continue)
Timeout.timeout(timeout_time) { @formatter.format(tokens, **context, tag: tag).html_safe }
rescue Timeout::Error => e
add_highlight_timeout_metric
Gitlab::ErrorTracking.track_and_raise_for_dev_exception(e)
highlight_plain(text)
rescue StandardError
@ -95,38 +87,5 @@ module Gitlab
def link_dependencies(text, highlighted_text)
Gitlab::DependencyLinker.link(blob_name, text, highlighted_text)
end
def add_highlight_attempt_metric
return unless Feature.enabled?(:track_highlight_timeouts)
highlighting_attempt.increment(source: (@language || "undefined"))
end
def add_highlight_timeout_metric
return unless Feature.enabled?(:track_highlight_timeouts)
highlight_timeout.increment(source: Gitlab::Runtime.sidekiq? ? "background" : "foreground")
end
def highlighting_attempt
@highlight_attempt ||= Gitlab::Metrics.counter(
:file_highlighting_attempt,
'Counts the times highlighting has been attempted on a file'
)
end
def highlight_timeout
@highlight_timeout ||= Gitlab::Metrics.counter(
:highlight_timeout,
'Counts the times highlights have timed out'
)
end
def self.over_highlight_size_limit
@over_highlight_size_limit ||= Gitlab::Metrics.counter(
:over_highlight_size_limit,
'Count the times files have been over the highlight size limit'
)
end
end
end

View file

@ -0,0 +1,23 @@
# frozen_string_literal: true
namespace :gitlab do
desc "GitLab | Refresh build artifacts size project statistics for given project IDs"
BUILD_ARTIFACTS_SIZE_REFRESH_ENQUEUE_BATCH_SIZE = 500
task :refresh_project_statistics_build_artifacts_size, [:project_ids] => :environment do |_t, args|
project_ids = []
project_ids = $stdin.read.split unless $stdin.tty?
project_ids = args.project_ids.to_s.split unless project_ids.any?
if project_ids.any?
project_ids.in_groups_of(BUILD_ARTIFACTS_SIZE_REFRESH_ENQUEUE_BATCH_SIZE) do |ids|
projects = Project.where(id: ids)
Projects::BuildArtifactsSizeRefresh.enqueue_refresh(projects)
end
puts 'Done.'.green
else
puts 'Please provide a string of space-separated project IDs as the argument or through the STDIN'.red
end
end
end

View file

@ -1,7 +1,7 @@
# frozen_string_literal: true
module QA
RSpec.describe 'Manage', :requires_admin, :reliable do
RSpec.describe 'Manage', :reliable do
describe 'Add project member' do
it 'user adds project member', testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/347887' do
Flow::Login.sign_in

View file

@ -2,7 +2,7 @@
module QA
# Tagging with issue for a transient invite group modal search bug, but does not require quarantine at this time
RSpec.describe 'Manage', :requires_admin, :transient, issue: 'https://gitlab.com/gitlab-org/gitlab/-/issues/349379' do
RSpec.describe 'Manage', :transient, issue: 'https://gitlab.com/gitlab-org/gitlab/-/issues/349379' do
describe 'Invite group' do
shared_examples 'invites group to project' do
it 'verifies group is added and members can access project with correct access level' do
@ -16,6 +16,8 @@ module QA
Flow::Login.sign_in(as: @user)
Page::Dashboard::Projects.perform do |projects|
projects.filter_by_name(project.name)
expect(projects).to have_project_with_access_role(project.name, 'Developer')
end

View file

@ -1,7 +1,7 @@
# frozen_string_literal: true
module QA
RSpec.describe 'Plan', :orchestrated, :smtp, :requires_admin do
RSpec.describe 'Plan', :orchestrated, :smtp do
describe 'Email Notification' do
include Support::API

View file

@ -1590,14 +1590,18 @@ RSpec.describe ProjectsController do
get :show, format: :atom, params: { id: public_project, namespace_id: public_project.namespace }
expect(response).to render_template('xml.atom')
expect(response).to have_gitlab_http_status(:success)
expect(response).to render_template(:show)
expect(response).to render_template(layout: :xml)
expect(assigns(:events)).to eq([event])
end
it 'filters by calling event.visible_to_user?' do
get :show, format: :atom, params: { id: public_project, namespace_id: public_project.namespace }
expect(response).to render_template('xml.atom')
expect(response).to have_gitlab_http_status(:success)
expect(response).to render_template(:show)
expect(response).to render_template(layout: :xml)
expect(assigns(:events)).to eq([event])
end
end

View file

@ -67,6 +67,7 @@ RSpec.describe 'Database schema' do
oauth_access_tokens: %w[resource_owner_id application_id],
oauth_applications: %w[owner_id],
product_analytics_events_experimental: %w[event_id txn_id user_id],
project_build_artifacts_size_refreshes: %w[last_job_artifact_id],
project_group_links: %w[group_id],
project_statistics: %w[namespace_id],
projects: %w[creator_id ci_id mirror_user_id],

View file

@ -0,0 +1,27 @@
# frozen_string_literal: true
FactoryBot.define do
factory :project_build_artifacts_size_refresh, class: 'Projects::BuildArtifactsSizeRefresh' do
project factory: :project
trait :created do
state { Projects::BuildArtifactsSizeRefresh::STATES[:created] }
end
trait :pending do
state { Projects::BuildArtifactsSizeRefresh::STATES[:pending] }
refresh_started_at { Time.zone.now }
end
trait :running do
state { Projects::BuildArtifactsSizeRefresh::STATES[:running] }
refresh_started_at { Time.zone.now }
end
trait :stale do
running
refresh_started_at { 30.days.ago }
updated_at { 30.days.ago }
end
end
end

View file

@ -294,6 +294,28 @@ describe('CE IssuesListApp component', () => {
});
describe('initial url params', () => {
describe('page', () => {
it('page_after is set from the url params', () => {
setWindowLocation('?page_after=randomCursorString');
wrapper = mountComponent();
expect(findIssuableList().props('urlParams')).toMatchObject({
page_after: 'randomCursorString',
});
});
it('page_before is set from the url params', () => {
setWindowLocation('?page_before=anotherRandomCursorString');
wrapper = mountComponent();
expect(findIssuableList().props('urlParams')).toMatchObject({
page_before: 'anotherRandomCursorString',
});
});
});
describe('search', () => {
it('is set from the url params', () => {
setWindowLocation(locationSearch);
@ -881,7 +903,12 @@ describe('CE IssuesListApp component', () => {
});
it('does not update IssuableList with url params ', async () => {
const defaultParams = { sort: 'created_date', state: 'opened' };
const defaultParams = {
page_after: null,
page_before: null,
sort: 'created_date',
state: 'opened',
};
expect(findIssuableList().props('urlParams')).toEqual(defaultParams);
});

View file

@ -9,8 +9,8 @@ import {
urlParamsWithSpecialValues,
} from 'jest/issues/list/mock_data';
import {
defaultPageSizeParams,
largePageSizeParams,
PAGE_SIZE,
PAGE_SIZE_MANUAL,
RELATIVE_POSITION_ASC,
urlSortParams,
} from '~/issues/list/constants';
@ -29,10 +29,37 @@ describe('getInitialPageParams', () => {
it.each(Object.keys(urlSortParams))(
'returns the correct page params for sort key %s',
(sortKey) => {
const expectedPageParams =
sortKey === RELATIVE_POSITION_ASC ? largePageSizeParams : defaultPageSizeParams;
const firstPageSize = sortKey === RELATIVE_POSITION_ASC ? PAGE_SIZE_MANUAL : PAGE_SIZE;
expect(getInitialPageParams(sortKey)).toBe(expectedPageParams);
expect(getInitialPageParams(sortKey)).toEqual({ firstPageSize });
},
);
it.each(Object.keys(urlSortParams))(
'returns the correct page params for sort key %s with afterCursor',
(sortKey) => {
const firstPageSize = sortKey === RELATIVE_POSITION_ASC ? PAGE_SIZE_MANUAL : PAGE_SIZE;
const afterCursor = 'randomCursorString';
const beforeCursor = undefined;
expect(getInitialPageParams(sortKey, afterCursor, beforeCursor)).toEqual({
firstPageSize,
afterCursor,
});
},
);
it.each(Object.keys(urlSortParams))(
'returns the correct page params for sort key %s with beforeCursor',
(sortKey) => {
const firstPageSize = sortKey === RELATIVE_POSITION_ASC ? PAGE_SIZE_MANUAL : PAGE_SIZE;
const afterCursor = undefined;
const beforeCursor = 'anotherRandomCursorString';
expect(getInitialPageParams(sortKey, afterCursor, beforeCursor)).toEqual({
firstPageSize,
beforeCursor,
});
},
);
});

View file

@ -293,6 +293,30 @@ RSpec.describe Gitlab::Ci::Config::Entry::Bridge do
end
end
end
context 'when bridge trigger contains forward' do
let(:config) do
{ trigger: { project: 'some/project', forward: { pipeline_variables: true } } }
end
describe '#valid?' do
it { is_expected.to be_valid }
end
describe '#value' do
it 'returns a bridge job configuration hash' do
expect(subject.value).to eq(name: :my_bridge,
trigger: { project: 'some/project',
forward: { pipeline_variables: true } },
ignore: false,
stage: 'test',
only: { refs: %w[branches tags] },
job_variables: {},
root_variables_inheritance: true,
scheduling_type: :stage)
end
end
end
end
describe '#manual_action?' do

View file

@ -0,0 +1,64 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::Ci::Config::Entry::Trigger::Forward do
subject(:entry) { described_class.new(config) }
context 'when entry config is correct' do
let(:config) do
{
yaml_variables: false,
pipeline_variables: false
}
end
it 'returns set values' do
expect(entry.value).to eq(yaml_variables: false, pipeline_variables: false)
end
it { is_expected.to be_valid }
end
context 'when entry config value is empty' do
let(:config) do
{}
end
it 'returns empty' do
expect(entry.value).to eq({})
end
it { is_expected.to be_valid }
end
context 'when entry value is not correct' do
context 'invalid attribute' do
let(:config) do
{
xxx_variables: true
}
end
it { is_expected.not_to be_valid }
it 'reports error' do
expect(entry.errors).to include 'forward config contains unknown keys: xxx_variables'
end
end
context 'non-boolean value' do
let(:config) do
{
yaml_variables: 'okay'
}
end
it { is_expected.not_to be_valid }
it 'reports error' do
expect(entry.errors).to include 'forward yaml variables should be a boolean value'
end
end
end
end

View file

@ -34,7 +34,7 @@ RSpec.describe Gitlab::Ci::Config::Entry::Trigger do
end
end
context 'when trigger is a hash' do
context 'when trigger is a hash - cross-project' do
context 'when branch is provided' do
let(:config) { { project: 'some/project', branch: 'feature' } }
@ -82,40 +82,6 @@ RSpec.describe Gitlab::Ci::Config::Entry::Trigger do
end
end
describe '#include' do
context 'with simple include' do
let(:config) { { include: 'path/to/config.yml' } }
it { is_expected.to be_valid }
it 'returns a trigger configuration hash' do
expect(subject.value).to eq(include: 'path/to/config.yml' )
end
end
context 'with project' do
let(:config) { { project: 'some/project', include: 'path/to/config.yml' } }
it { is_expected.not_to be_valid }
it 'returns an error' do
expect(subject.errors.first)
.to match /config contains unknown keys: project/
end
end
context 'with branch' do
let(:config) { { branch: 'feature', include: 'path/to/config.yml' } }
it { is_expected.not_to be_valid }
it 'returns an error' do
expect(subject.errors.first)
.to match /config contains unknown keys: branch/
end
end
end
context 'when config contains unknown keys' do
let(:config) { { project: 'some/project', unknown: 123 } }
@ -130,6 +96,72 @@ RSpec.describe Gitlab::Ci::Config::Entry::Trigger do
end
end
end
context 'with forward' do
let(:config) { { project: 'some/project', forward: { pipeline_variables: true } } }
before do
subject.compose!
end
it { is_expected.to be_valid }
it 'returns a trigger configuration hash' do
expect(subject.value).to eq(
project: 'some/project', forward: { pipeline_variables: true }
)
end
end
end
context 'when trigger is a hash - parent-child' do
context 'with simple include' do
let(:config) { { include: 'path/to/config.yml' } }
it { is_expected.to be_valid }
it 'returns a trigger configuration hash' do
expect(subject.value).to eq(include: 'path/to/config.yml' )
end
end
context 'with project' do
let(:config) { { project: 'some/project', include: 'path/to/config.yml' } }
it { is_expected.not_to be_valid }
it 'returns an error' do
expect(subject.errors.first)
.to match /config contains unknown keys: project/
end
end
context 'with branch' do
let(:config) { { branch: 'feature', include: 'path/to/config.yml' } }
it { is_expected.not_to be_valid }
it 'returns an error' do
expect(subject.errors.first)
.to match /config contains unknown keys: branch/
end
end
context 'with forward' do
let(:config) { { include: 'path/to/config.yml', forward: { yaml_variables: false } } }
before do
subject.compose!
end
it { is_expected.to be_valid }
it 'returns a trigger configuration hash' do
expect(subject.value).to eq(
include: 'path/to/config.yml', forward: { yaml_variables: false }
)
end
end
end
context 'when trigger configuration is not valid' do

View file

@ -27,15 +27,13 @@ RSpec.describe Gitlab::Ci::YamlProcessor::Dag do
end
end
context 'when there is a missing job' do
context 'when there are some missing jobs' do
let(:nodes) do
{ 'job_a' => %w(job_d), 'job_b' => %w(job_a) }
{ 'job_a' => %w(job_d job_f), 'job_b' => %w(job_a job_c job_e) }
end
it 'raises MissingNodeError' do
expect { result }.to raise_error(
Gitlab::Ci::YamlProcessor::Dag::MissingNodeError, 'node job_d is missing'
)
it 'ignores the missing ones and returns in a valid order' do
expect(result).to eq(%w(job_d job_f job_a job_c job_e job_b))
end
end
end

View file

@ -325,6 +325,40 @@ module Gitlab
end
end
end
describe 'bridge job' do
let(:config) do
YAML.dump(rspec: {
trigger: {
project: 'namespace/project',
branch: 'main'
}
})
end
it 'has the attributes' do
expect(subject[:options]).to eq(
trigger: { project: 'namespace/project', branch: 'main' }
)
end
context 'with forward' do
let(:config) do
YAML.dump(rspec: {
trigger: {
project: 'namespace/project',
forward: { pipeline_variables: true }
}
})
end
it 'has the attributes' do
expect(subject[:options]).to eq(
trigger: { project: 'namespace/project', forward: { pipeline_variables: true } }
)
end
end
end
end
describe '#stages_attributes' do

View file

@ -9,7 +9,7 @@ RSpec.describe Gitlab::Database::Migrations::Observers::QueryDetails do
let(:query) { "select date_trunc('day', $1::timestamptz) + $2 * (interval '1 hour')" }
let(:query_binds) { [Time.current, 3] }
let(:directory_path) { Dir.mktmpdir }
let(:log_file) { "#{directory_path}/#{migration_version}_#{migration_name}-query-details.json" }
let(:log_file) { "#{directory_path}/query-details.json" }
let(:query_details) { Gitlab::Json.parse(File.read(log_file)) }
let(:migration_version) { 20210422152437 }
let(:migration_name) { 'test' }

View file

@ -18,7 +18,7 @@ RSpec.describe Gitlab::Database::Migrations::Observers::QueryLog do
it 'writes a file with the query log' do
observe
expect(File.read("#{directory_path}/#{migration_version}_#{migration_name}.log")).to include(query)
expect(File.read("#{directory_path}/migration.log")).to include(query)
end
it 'does not change the default logger' do

View file

@ -7,7 +7,7 @@ RSpec.describe Gitlab::Database::Migrations::Observers::TransactionDuration do
let(:connection) { ActiveRecord::Migration.connection }
let(:observation) { Gitlab::Database::Migrations::Observation.new(version: migration_version, name: migration_name) }
let(:directory_path) { Dir.mktmpdir }
let(:log_file) { "#{directory_path}/#{migration_version}_#{migration_name}-transaction-duration.json" }
let(:log_file) { "#{directory_path}/transaction-duration.json" }
let(:transaction_duration) { Gitlab::Json.parse(File.read(log_file)) }
let(:migration_version) { 20210422152437 }
let(:migration_name) { 'test' }

View file

@ -79,6 +79,15 @@ RSpec.describe Gitlab::Database::Migrations::Runner do
expect(migration_runs.map(&:dir)).to match_array([:up, :up])
expect(migration_runs.map(&:version_to_migrate)).to eq(pending_migrations.map(&:version))
end
it 'writes a metadata file with the current schema version' do
up.run
metadata_file = result_dir.join('up', described_class::METADATA_FILENAME)
expect(metadata_file.exist?).to be_truthy
metadata = Gitlab::Json.parse(File.read(metadata_file))
expect(metadata).to match('version' => described_class::SCHEMA_VERSION)
end
end
end
@ -105,5 +114,14 @@ RSpec.describe Gitlab::Database::Migrations::Runner do
expect(migration_runs.map(&:version_to_migrate)).to eq(applied_migrations_this_branch.reverse.map(&:version))
end
end
it 'writes a metadata file with the current schema version' do
down.run
metadata_file = result_dir.join('down', described_class::METADATA_FILENAME)
expect(metadata_file.exist?).to be_truthy
metadata = Gitlab::Json.parse(File.read(metadata_file))
expect(metadata).to match('version' => described_class::SCHEMA_VERSION)
end
end
end

View file

@ -53,10 +53,6 @@ RSpec.describe Gitlab::Highlight do
stub_config(extra: { 'maximum_text_highlight_size_kilobytes' => 0.0001 } ) # 1.024 bytes
end
it 'increments the metric for oversized files' do
expect { result }.to change { over_highlight_size_limit('file size: 0.0001') }.by(1)
end
it 'returns plain version for long content' do
expect(result).to eq(%[<span id="LC1" class="line" lang="">(make-pathname :defaults name</span>\n<span id="LC2" class="line" lang="">:type "assem")</span>])
end
@ -126,79 +122,29 @@ RSpec.describe Gitlab::Highlight do
end
context 'timeout' do
subject { described_class.new('file.name', 'Contents') }
subject(:highlight) { described_class.new('file.rb', 'begin', language: 'ruby').highlight('Content') }
it 'utilizes timeout for web' do
expect(Timeout).to receive(:timeout).with(described_class::TIMEOUT_FOREGROUND).and_call_original
subject.highlight("Content")
highlight
end
it 'falls back to plaintext on timeout' do
allow(Gitlab::ErrorTracking).to receive(:track_and_raise_for_dev_exception)
expect(Timeout).to receive(:timeout).and_raise(Timeout::Error)
expect(Rouge::Lexers::PlainText).to receive(:lex).and_call_original
highlight
end
it 'utilizes longer timeout for sidekiq' do
allow(Gitlab::Runtime).to receive(:sidekiq?).and_return(true)
expect(Timeout).to receive(:timeout).with(described_class::TIMEOUT_BACKGROUND).and_call_original
subject.highlight("Content")
highlight
end
end
describe 'highlight timeouts' do
let(:result) { described_class.highlight(file_name, content, language: "ruby") }
context 'when there is an attempt' do
it "increments the attempt counter with a defined language" do
expect { result }.to change { highlight_attempt_total("ruby") }
end
it "increments the attempt counter with an undefined language" do
expect do
described_class.highlight(file_name, content)
end.to change { highlight_attempt_total("undefined") }
end
end
context 'when there is a timeout error while highlighting' do
before do
allow(Timeout).to receive(:timeout).twice.and_raise(Timeout::Error)
# This is done twice because it's rescued first and then
# calls the original exception
end
it "increments the foreground counter if it's in the foreground" do
expect { result }
.to raise_error(Timeout::Error)
.and change { highlight_timeout_total('foreground') }.by(1)
.and not_change { highlight_timeout_total('background') }
end
it "increments the background counter if it's in the background" do
allow(Gitlab::Runtime).to receive(:sidekiq?).and_return(true)
expect { result }
.to raise_error(Timeout::Error)
.and change { highlight_timeout_total('background') }.by(1)
.and not_change { highlight_timeout_total('foreground') }
end
end
end
end
def highlight_timeout_total(source)
Gitlab::Metrics
.counter(:highlight_timeout, 'Counts the times highlights have timed out')
.get(source: source)
end
def highlight_attempt_total(source)
Gitlab::Metrics
.counter(:file_highlighting_attempt, 'Counts the times highlighting has been attempted on a file')
.get(source: source)
end
def over_highlight_size_limit(source)
Gitlab::Metrics
.counter(:over_highlight_size_limit,
'Count the times text has been over the highlight size limit')
.get(source: source)
end
end

View file

@ -7,6 +7,10 @@ RSpec.describe Ci::Bridge do
let_it_be(:target_project) { create(:project, name: 'project', namespace: create(:namespace, name: 'my')) }
let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
before_all do
create(:ci_pipeline_variable, pipeline: pipeline, key: 'PVAR1', value: 'PVAL1')
end
let(:bridge) do
create(:ci_bridge, :variables, status: :created,
options: options,
@ -215,6 +219,70 @@ RSpec.describe Ci::Bridge do
.to include(key: 'EXPANDED', value: '$EXPANDED')
end
end
context 'forward variables' do
using RSpec::Parameterized::TableSyntax
where(:yaml_variables, :pipeline_variables, :ff, :variables) do
nil | nil | true | %w[BRIDGE]
nil | false | true | %w[BRIDGE]
nil | true | true | %w[BRIDGE PVAR1]
false | nil | true | %w[]
false | false | true | %w[]
false | true | true | %w[PVAR1]
true | nil | true | %w[BRIDGE]
true | false | true | %w[BRIDGE]
true | true | true | %w[BRIDGE PVAR1]
nil | nil | false | %w[BRIDGE]
nil | false | false | %w[BRIDGE]
nil | true | false | %w[BRIDGE]
false | nil | false | %w[BRIDGE]
false | false | false | %w[BRIDGE]
false | true | false | %w[BRIDGE]
true | nil | false | %w[BRIDGE]
true | false | false | %w[BRIDGE]
true | true | false | %w[BRIDGE]
end
with_them do
let(:options) do
{
trigger: {
project: 'my/project',
branch: 'master',
forward: { yaml_variables: yaml_variables,
pipeline_variables: pipeline_variables }.compact
}
}
end
before do
stub_feature_flags(ci_trigger_forward_variables: ff)
end
it 'returns variables according to the forward value' do
expect(bridge.downstream_variables.map { |v| v[:key] }).to contain_exactly(*variables)
end
end
context 'when sending a variable via both yaml and pipeline' do
let(:pipeline) { create(:ci_pipeline, project: project) }
let(:options) do
{ trigger: { project: 'my/project', forward: { pipeline_variables: true } } }
end
before do
create(:ci_pipeline_variable, pipeline: pipeline, key: 'BRIDGE', value: 'new value')
end
it 'uses the pipeline variable' do
expect(bridge.downstream_variables).to contain_exactly(
{ key: 'BRIDGE', value: 'new value' }
)
end
end
end
end
describe 'metadata support' do

View file

@ -0,0 +1,227 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::BuildArtifactsSizeRefresh, type: :model do
describe 'associations' do
it { is_expected.to belong_to(:project) }
end
it_behaves_like 'having unique enum values'
describe 'validations' do
it { is_expected.to validate_presence_of(:project) }
end
describe 'scopes' do
let_it_be(:refresh_1) { create(:project_build_artifacts_size_refresh, :running, updated_at: 4.days.ago) }
let_it_be(:refresh_2) { create(:project_build_artifacts_size_refresh, :running, updated_at: 2.days.ago) }
let_it_be(:refresh_3) { create(:project_build_artifacts_size_refresh, :pending) }
let_it_be(:refresh_4) { create(:project_build_artifacts_size_refresh, :created) }
describe 'stale' do
it 'returns records in running state and has not been updated for more than 3 days' do
expect(described_class.stale).to eq([refresh_1])
end
end
describe 'remaining' do
it 'returns stale, created, and pending records' do
expect(described_class.remaining).to match_array([refresh_1, refresh_3, refresh_4])
end
end
end
describe 'state machine', :clean_gitlab_redis_shared_state do
around do |example|
freeze_time { example.run }
end
let(:now) { Time.zone.now }
describe 'initial state' do
let(:refresh) { create(:project_build_artifacts_size_refresh) }
it 'defaults to created' do
expect(refresh).to be_created
end
end
describe '#process!' do
context 'when refresh state is created' do
let!(:refresh) do
create(
:project_build_artifacts_size_refresh,
:created,
updated_at: 2.days.ago,
refresh_started_at: nil,
last_job_artifact_id: nil
)
end
before do
stats = create(:project_statistics, project: refresh.project, build_artifacts_size: 120)
stats.increment_counter(:build_artifacts_size, 30)
end
it 'transitions the state to running' do
expect { refresh.process! }.to change { refresh.reload.state }.to(described_class::STATES[:running])
end
it 'sets the refresh_started_at' do
expect { refresh.process! }.to change { refresh.reload.refresh_started_at.to_i }.to(now.to_i)
end
it 'bumps the updated_at' do
expect { refresh.process! }.to change { refresh.reload.updated_at.to_i }.to(now.to_i)
end
it 'resets the build artifacts size stats' do
expect { refresh.process! }.to change { refresh.project.statistics.reload.build_artifacts_size }.to(0)
end
it 'resets the counter attribute to zero' do
expect { refresh.process! }.to change { refresh.project.statistics.get_counter_value(:build_artifacts_size) }.to(0)
end
end
context 'when refresh state is pending' do
let!(:refresh) do
create(
:project_build_artifacts_size_refresh,
:pending,
updated_at: 2.days.ago
)
end
before do
create(:project_statistics, project: refresh.project)
end
it 'transitions the state to running' do
expect { refresh.process! }.to change { refresh.reload.state }.to(described_class::STATES[:running])
end
it 'bumps the updated_at' do
expect { refresh.process! }.to change { refresh.reload.updated_at.to_i }.to(now.to_i)
end
end
context 'when refresh state is running' do
let!(:refresh) do
create(
:project_build_artifacts_size_refresh,
:running,
updated_at: 2.days.ago
)
end
before do
create(:project_statistics, project: refresh.project)
end
it 'keeps the state at running' do
expect { refresh.process! }.not_to change { refresh.reload.state }
end
it 'bumps the updated_at' do
# If this was a stale job, we want to bump the updated at now so that
# it won't be picked up by another worker while we're recalculating
expect { refresh.process! }.to change { refresh.reload.updated_at.to_i }.to(now.to_i)
end
end
end
describe '#requeue!' do
let!(:refresh) do
create(
:project_build_artifacts_size_refresh,
:running,
updated_at: 2.days.ago,
last_job_artifact_id: 111
)
end
let(:last_job_artifact_id) { 123 }
it 'transitions refresh state from running to pending' do
expect { refresh.requeue!(last_job_artifact_id) }.to change { refresh.reload.state }.to(described_class::STATES[:pending])
end
it 'bumps updated_at' do
expect { refresh.requeue!(last_job_artifact_id) }.to change { refresh.reload.updated_at.to_i }.to(now.to_i)
end
it 'updates last_job_artifact_id' do
expect { refresh.requeue!(last_job_artifact_id) }.to change { refresh.reload.last_job_artifact_id.to_i }.to(last_job_artifact_id)
end
end
end
describe '.process_next_refresh!' do
let!(:refresh_running) { create(:project_build_artifacts_size_refresh, :running) }
let!(:refresh_created) { create(:project_build_artifacts_size_refresh, :created) }
let!(:refresh_stale) { create(:project_build_artifacts_size_refresh, :stale) }
let!(:refresh_pending) { create(:project_build_artifacts_size_refresh, :pending) }
subject(:processed_refresh) { described_class.process_next_refresh! }
it 'picks the first record from the remaining work' do
expect(processed_refresh).to eq(refresh_created)
expect(processed_refresh.reload).to be_running
end
end
describe '.enqueue_refresh' do
let_it_be(:project_1) { create(:project) }
let_it_be(:project_2) { create(:project) }
let(:projects) { [project_1, project_1, project_2] }
it 'creates refresh records for each given project, skipping duplicates' do
expect { described_class.enqueue_refresh(projects) }
.to change { described_class.count }.from(0).to(2)
expect(described_class.first).to have_attributes(
project_id: project_1.id,
last_job_artifact_id: nil,
refresh_started_at: nil,
state: described_class::STATES[:created]
)
expect(described_class.last).to have_attributes(
project_id: project_2.id,
last_job_artifact_id: nil,
refresh_started_at: nil,
state: described_class::STATES[:created]
)
end
end
describe '#next_batch' do
let!(:project) { create(:project) }
let!(:artifact_1) { create(:ci_job_artifact, project: project, created_at: 14.days.ago) }
let!(:artifact_2) { create(:ci_job_artifact, project: project, created_at: 13.days.ago) }
let!(:artifact_3) { create(:ci_job_artifact, project: project, created_at: 12.days.ago) }
# This should not be included in the recalculation as it is created later than the refresh start time
let!(:future_artifact) { create(:ci_job_artifact, project: project, size: 8, created_at: refresh.refresh_started_at + 1.second) }
let!(:refresh) do
create(
:project_build_artifacts_size_refresh,
:pending,
project: project,
updated_at: 2.days.ago,
refresh_started_at: 10.days.ago,
last_job_artifact_id: artifact_1.id
)
end
subject(:batch) { refresh.next_batch(limit: 3) }
it 'returns the job artifact records that were created not later than the refresh_started_at and IDs greater than the last_job_artifact_id' do
expect(batch).to eq([artifact_2, artifact_3])
end
end
end

View file

@ -2,69 +2,236 @@
require 'spec_helper'
RSpec.describe Ci::AfterRequeueJobService do
let_it_be(:project) { create(:project) }
RSpec.describe Ci::AfterRequeueJobService, :sidekiq_inline do
let_it_be(:project) { create(:project, :empty_repo) }
let_it_be(:user) { project.first_owner }
let(:pipeline) { create(:ci_pipeline, project: project) }
let!(:build1) { create(:ci_build, name: 'build1', pipeline: pipeline, stage_idx: 0) }
let!(:test1) { create(:ci_build, :success, name: 'test1', pipeline: pipeline, stage_idx: 1) }
let!(:test2) { create(:ci_build, :skipped, name: 'test2', pipeline: pipeline, stage_idx: 1) }
let!(:test3) { create(:ci_build, :skipped, :dependent, name: 'test3', pipeline: pipeline, stage_idx: 1, needed: build1) }
let!(:deploy) { create(:ci_build, :skipped, :dependent, name: 'deploy', pipeline: pipeline, stage_idx: 2, needed: test3) }
subject(:execute_service) { described_class.new(project, user).execute(build1) }
shared_examples 'processing subsequent skipped jobs' do
it 'marks subsequent skipped jobs as processable' do
expect(test1.reload).to be_success
expect(test2.reload).to be_skipped
expect(test3.reload).to be_skipped
expect(deploy.reload).to be_skipped
execute_service
expect(test1.reload).to be_success
expect(test2.reload).to be_created
expect(test3.reload).to be_created
expect(deploy.reload).to be_created
end
before_all do
project.repository.create_file(user, 'init', 'init', message: 'init', branch_name: 'master')
end
it_behaves_like 'processing subsequent skipped jobs'
subject(:service) { described_class.new(project, user) }
context 'when there is a job need from the same stage' do
let!(:build2) do
create(:ci_build,
:skipped,
:dependent,
name: 'build2',
pipeline: pipeline,
stage_idx: 0,
scheduling_type: :dag,
needed: build1)
context 'stage-dag mixed pipeline' do
let(:config) do
<<-EOY
stages: [a, b, c]
a1:
stage: a
script: exit $(($RANDOM % 2))
a2:
stage: a
script: exit 0
needs: [a1]
b1:
stage: b
script: exit 0
needs: []
b2:
stage: b
script: exit 0
needs: [a2]
c1:
stage: c
script: exit 0
needs: [b2]
c2:
stage: c
script: exit 0
EOY
end
shared_examples 'processing the same stage job' do
it 'marks subsequent skipped jobs as processable' do
expect { execute_service }.to change { build2.reload.status }.from('skipped').to('created')
end
let(:pipeline) do
Ci::CreatePipelineService.new(project, user, { ref: 'master' }).execute(:push).payload
end
it_behaves_like 'processing subsequent skipped jobs'
it_behaves_like 'processing the same stage job'
end
context 'when the pipeline is a downstream pipeline and the bridge is depended' do
let!(:trigger_job) { create(:ci_bridge, :strategy_depend, name: 'trigger_job', status: 'success') }
let(:a1) { find_job('a1') }
let(:b1) { find_job('b1') }
before do
create(:ci_sources_pipeline, pipeline: pipeline, source_job: trigger_job)
stub_ci_pipeline_yaml_file(config)
check_jobs_statuses(
a1: 'pending',
a2: 'created',
b1: 'pending',
b2: 'created',
c1: 'created',
c2: 'created'
)
b1.success!
check_jobs_statuses(
a1: 'pending',
a2: 'created',
b1: 'success',
b2: 'created',
c1: 'created',
c2: 'created'
)
a1.drop!
check_jobs_statuses(
a1: 'failed',
a2: 'skipped',
b1: 'success',
b2: 'skipped',
c1: 'skipped',
c2: 'skipped'
)
new_a1 = Ci::RetryBuildService.new(project, user).clone!(a1)
new_a1.enqueue!
check_jobs_statuses(
a1: 'pending',
a2: 'skipped',
b1: 'success',
b2: 'skipped',
c1: 'skipped',
c2: 'skipped'
)
end
it 'marks source bridge as pending' do
expect { execute_service }.to change { trigger_job.reload.status }.from('success').to('pending')
it 'marks subsequent skipped jobs as processable' do
execute_after_requeue_service(a1)
check_jobs_statuses(
a1: 'pending',
a2: 'created',
b1: 'success',
b2: 'created',
c1: 'created',
c2: 'created'
)
end
end
context 'stage-dag mixed pipeline with some same-stage needs' do
let(:config) do
<<-EOY
stages: [a, b, c]
a1:
stage: a
script: exit $(($RANDOM % 2))
a2:
stage: a
script: exit 0
needs: [a1]
b1:
stage: b
script: exit 0
needs: [b2]
b2:
stage: b
script: exit 0
c1:
stage: c
script: exit 0
needs: [b2]
c2:
stage: c
script: exit 0
EOY
end
let(:pipeline) do
Ci::CreatePipelineService.new(project, user, { ref: 'master' }).execute(:push).payload
end
let(:a1) { find_job('a1') }
before do
stub_ci_pipeline_yaml_file(config)
check_jobs_statuses(
a1: 'pending',
a2: 'created',
b1: 'created',
b2: 'created',
c1: 'created',
c2: 'created'
)
a1.drop!
check_jobs_statuses(
a1: 'failed',
a2: 'skipped',
b1: 'skipped',
b2: 'skipped',
c1: 'skipped',
c2: 'skipped'
)
new_a1 = Ci::RetryBuildService.new(project, user).clone!(a1)
new_a1.enqueue!
check_jobs_statuses(
a1: 'pending',
a2: 'skipped',
b1: 'skipped',
b2: 'skipped',
c1: 'skipped',
c2: 'skipped'
)
end
it 'marks subsequent skipped jobs as processable' do
execute_after_requeue_service(a1)
check_jobs_statuses(
a1: 'pending',
a2: 'created',
b1: 'created',
b2: 'created',
c1: 'created',
c2: 'created'
)
end
context 'when the FF ci_fix_order_of_subsequent_jobs is disabled' do
before do
stub_feature_flags(ci_fix_order_of_subsequent_jobs: false)
end
it 'does not mark b1 as processable' do
execute_after_requeue_service(a1)
check_jobs_statuses(
a1: 'pending',
a2: 'created',
b1: 'skipped',
b2: 'created',
c1: 'created',
c2: 'created'
)
end
end
end
private
def find_job(name)
processables.find_by!(name: name)
end
def check_jobs_statuses(statuses)
expect(processables.order(:name).pluck(:name, :status)).to contain_exactly(*statuses.stringify_keys.to_a)
end
def processables
pipeline.processables.latest
end
def execute_after_requeue_service(processable)
service.execute(processable)
end
end

View file

@ -0,0 +1,102 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::RefreshBuildArtifactsSizeStatisticsService, :clean_gitlab_redis_shared_state do
let(:service) { described_class.new }
describe '#execute' do
let_it_be(:project) { create(:project) }
let_it_be(:artifact_1) { create(:ci_job_artifact, project: project, size: 1, created_at: 14.days.ago) }
let_it_be(:artifact_2) { create(:ci_job_artifact, project: project, size: 2, created_at: 13.days.ago) }
let_it_be(:artifact_3) { create(:ci_job_artifact, project: project, size: 5, created_at: 12.days.ago) }
# This should not be included in the recalculation as it is created later than the refresh start time
let_it_be(:future_artifact) { create(:ci_job_artifact, project: project, size: 8, created_at: 2.days.from_now) }
let!(:refresh) do
create(
:project_build_artifacts_size_refresh,
:created,
project: project,
updated_at: 2.days.ago,
refresh_started_at: nil,
last_job_artifact_id: nil
)
end
let(:now) { Time.zone.now }
around do |example|
freeze_time { example.run }
end
before do
stub_const("#{described_class}::BATCH_SIZE", 2)
stats = create(:project_statistics, project: project, build_artifacts_size: 120)
stats.increment_counter(:build_artifacts_size, 30)
end
it 'resets the build artifacts size stats' do
expect { service.execute }.to change { project.statistics.reload.build_artifacts_size }.to(0)
end
it 'increments the counter attribute by the total size of the current batch of artifacts' do
expect { service.execute }.to change { project.statistics.get_counter_value(:build_artifacts_size) }.to(3)
end
it 'updates the last_job_artifact_id to the ID of the last artifact from the batch' do
expect { service.execute }.to change { refresh.reload.last_job_artifact_id.to_i }.to(artifact_2.id)
end
it 'requeues the refresh job' do
service.execute
expect(refresh.reload).to be_pending
end
context 'when an error happens after the recalculation has started' do
let!(:refresh) do
create(
:project_build_artifacts_size_refresh,
:pending,
project: project,
last_job_artifact_id: artifact_2.id
)
end
before do
allow(Gitlab::Redis::SharedState).to receive(:with).and_raise(StandardError, 'error')
expect { service.execute }.to raise_error(StandardError)
end
it 'keeps the last_job_artifact_id unchanged' do
expect(refresh.reload.last_job_artifact_id).to eq(artifact_2.id)
end
it 'keeps the state of the refresh record at running' do
expect(refresh.reload).to be_running
end
end
context 'when there are no more artifacts to recalculate for the next refresh job' do
let!(:refresh) do
create(
:project_build_artifacts_size_refresh,
:pending,
project: project,
updated_at: 2.days.ago,
refresh_started_at: now,
last_job_artifact_id: artifact_3.id
)
end
it 'deletes the refresh record' do
service.execute
expect(Projects::BuildArtifactsSizeRefresh.where(id: refresh.id)).not_to exist
end
end
end
end

View file

@ -0,0 +1,48 @@
# frozen_string_literal: true
require 'rake_helper'
RSpec.describe 'gitlab:refresh_project_statistics_build_artifacts_size rake task', :silence_stdout do
let(:rake_task) { 'gitlab:refresh_project_statistics_build_artifacts_size' }
describe 'enqueuing build artifacts size statistics refresh for given list of project IDs' do
let_it_be(:project_1) { create(:project) }
let_it_be(:project_2) { create(:project) }
let_it_be(:project_3) { create(:project) }
let(:string_of_ids) { "#{project_1.id} #{project_2.id} #{project_3.id} 999999" }
before do
Rake.application.rake_require('tasks/gitlab/refresh_project_statistics_build_artifacts_size')
stub_const("BUILD_ARTIFACTS_SIZE_REFRESH_ENQUEUE_BATCH_SIZE", 2)
end
context 'when given a list of space-separated IDs through STDIN' do
before do
allow($stdin).to receive(:tty?).and_return(false)
allow($stdin).to receive(:read).and_return(string_of_ids)
end
it 'enqueues the projects for refresh' do
expect { run_rake_task(rake_task) }.to output(/Done/).to_stdout
expect(Projects::BuildArtifactsSizeRefresh.all.map(&:project)).to match_array([project_1, project_2, project_3])
end
end
context 'when given a list of space-separated IDs through rake argument' do
it 'enqueues the projects for refresh' do
expect { run_rake_task(rake_task, string_of_ids) }.to output(/Done/).to_stdout
expect(Projects::BuildArtifactsSizeRefresh.all.map(&:project)).to match_array([project_1, project_2, project_3])
end
end
context 'when not given any IDs' do
it 'returns an error message' do
expect { run_rake_task(rake_task) }.to output(/Please provide a string of space-separated project IDs/).to_stdout
end
end
end
end

View file

@ -395,6 +395,7 @@ RSpec.describe 'Every Sidekiq worker' do
'Projects::PostCreationWorker' => 3,
'Projects::ScheduleBulkRepositoryShardMovesWorker' => 3,
'Projects::UpdateRepositoryStorageWorker' => 3,
'Projects::RefreshBuildArtifactsSizeStatisticsWorker' => 0,
'Prometheus::CreateDefaultAlertsWorker' => 3,
'PropagateIntegrationGroupWorker' => 3,
'PropagateIntegrationInheritDescendantWorker' => 3,

View file

@ -0,0 +1,96 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::RefreshBuildArtifactsSizeStatisticsWorker do
let(:worker) { described_class.new }
describe '#perform_work' do
before do
expect_next_instance_of(Projects::RefreshBuildArtifactsSizeStatisticsService) do |instance|
expect(instance).to receive(:execute).and_return(refresh)
end
end
context 'when refresh job is present' do
let(:refresh) do
build(
:project_build_artifacts_size_refresh,
:running,
project_id: 77,
last_job_artifact_id: 123
)
end
it 'logs refresh information' do
expect(worker).to receive(:log_extra_metadata_on_done).with(:project_id, refresh.project_id)
expect(worker).to receive(:log_extra_metadata_on_done).with(:last_job_artifact_id, refresh.last_job_artifact_id)
expect(worker).to receive(:log_extra_metadata_on_done).with(:last_batch, refresh.destroyed?)
expect(worker).to receive(:log_extra_metadata_on_done).with(:refresh_started_at, refresh.refresh_started_at)
worker.perform_work
end
end
context 'when refresh job is not present' do
let(:refresh) { nil }
it 'logs refresh information' do
expect(worker).not_to receive(:log_extra_metadata_on_done)
worker.perform_work
end
end
end
describe '#remaining_work_count' do
subject { worker.remaining_work_count }
context 'and there are remaining refresh jobs' do
before do
create_list(:project_build_artifacts_size_refresh, 2, :pending)
end
it { is_expected.to eq(1) }
end
context 'and there are no remaining refresh jobs' do
it { is_expected.to eq(0) }
end
end
describe '#max_running_jobs' do
subject { worker.max_running_jobs }
context 'when all projects_build_artifacts_size_refresh flags are enabled' do
it { is_expected.to eq(described_class::MAX_RUNNING_HIGH) }
end
context 'when projects_build_artifacts_size_refresh_high flags is disabled' do
before do
stub_feature_flags(projects_build_artifacts_size_refresh_high: false)
end
it { is_expected.to eq(described_class::MAX_RUNNING_MEDIUM) }
end
context 'when projects_build_artifacts_size_refresh_high and projects_build_artifacts_size_refresh_medium flags are disabled' do
before do
stub_feature_flags(projects_build_artifacts_size_refresh_high: false)
stub_feature_flags(projects_build_artifacts_size_refresh_medium: false)
end
it { is_expected.to eq(described_class::MAX_RUNNING_LOW) }
end
context 'when all projects_build_artifacts_size_refresh flags are disabled' do
before do
stub_feature_flags(projects_build_artifacts_size_refresh_low: false)
stub_feature_flags(projects_build_artifacts_size_refresh_medium: false)
stub_feature_flags(projects_build_artifacts_size_refresh_high: false)
end
it { is_expected.to eq(0) }
end
end
end

View file

@ -0,0 +1,17 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::ScheduleRefreshBuildArtifactsSizeStatisticsWorker do
subject(:worker) { described_class.new }
describe '#perform' do
include_examples 'an idempotent worker' do
it 'schedules Projects::RefreshBuildArtifactsSizeStatisticsWorker to be performed with capacity' do
expect(Projects::RefreshBuildArtifactsSizeStatisticsWorker).to receive(:perform_with_capacity).twice
subject
end
end
end
end