Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2020-11-18 21:09:32 +00:00
parent 7ea5ca0bb5
commit ae0889b396
39 changed files with 608 additions and 148 deletions

View File

@ -1,6 +1,7 @@
<script>
import $ from 'jquery';
import 'select2';
import { loadCSSFile } from '~/lib/utils/css_utils';
export default {
// False positive i18n lint: https://gitlab.com/gitlab-org/frontend/eslint-plugin-i18n/issues/26
@ -20,10 +21,14 @@ export default {
},
mounted() {
$(this.$refs.dropdownInput)
.val(this.value)
.select2(this.options)
.on('change', event => this.$emit('input', event.target.value));
loadCSSFile(gon.select2_css_path)
.then(() => {
$(this.$refs.dropdownInput)
.val(this.value)
.select2(this.options)
.on('change', event => this.$emit('input', event.target.value));
})
.catch(() => {});
},
beforeDestroy() {

View File

@ -0,0 +1,46 @@
# frozen_string_literal: true
module Releases
class EvidencePipelineFinder
include Gitlab::Utils::StrongMemoize
attr_reader :project, :params
def initialize(project, params = {})
@project = project
@params = params
end
def execute
# TODO: remove this with the release creation moved to it's own form https://gitlab.com/gitlab-org/gitlab/-/issues/214245
return params[:evidence_pipeline] if params[:evidence_pipeline]
sha = existing_tag&.dereferenced_target&.sha
sha ||= repository&.commit(ref)&.sha
return unless sha
project.ci_pipelines.for_sha(sha).last
end
private
def repository
strong_memoize(:repository) do
project.repository
end
end
def existing_tag
repository.find_tag(tag_name)
end
def tag_name
params[:tag]
end
def ref
params[:ref]
end
end
end

View File

@ -29,6 +29,8 @@ class Release < ApplicationRecord
scope :preloaded, -> { includes(:evidences, :milestones, project: [:project_feature, :route, { namespace: :route }]) }
scope :with_project_and_namespace, -> { includes(project: :namespace) }
scope :recent, -> { sorted.limit(MAX_NUMBER_TO_DISPLAY) }
scope :without_evidence, -> { left_joins(:evidences).where(::Releases::Evidence.arel_table[:id].eq(nil)) }
scope :released_within_2hrs, -> { where(released_at: Time.zone.now - 1.hour..Time.zone.now + 1.hour) }
# Sorting
scope :order_created, -> { reorder('created_at ASC') }

View File

@ -11,8 +11,6 @@ module Releases
@project, @current_user, @params = project, user, params.dup
end
delegate :repository, to: :project
def tag_name
params[:tag]
end
@ -39,22 +37,18 @@ module Releases
end
end
def existing_tag
strong_memoize(:existing_tag) do
repository.find_tag(tag_name)
end
end
def tag_exist?
existing_tag.present?
end
def repository
strong_memoize(:repository) do
project.repository
end
end
def existing_tag
strong_memoize(:existing_tag) do
repository.find_tag(tag_name)
end
end
def milestones
return [] unless param_for_milestone_titles_provided?

View File

@ -10,7 +10,7 @@ module Releases
# should be found before the creation of new tag
# because tag creation can spawn new pipeline
# which won't have any data for evidence yet
evidence_pipeline = find_evidence_pipeline
evidence_pipeline = Releases::EvidencePipelineFinder.new(project, params).execute
tag = ensure_tag
@ -78,26 +78,10 @@ module Releases
)
end
def find_evidence_pipeline
# TODO: remove this with the release creation moved to it's own form https://gitlab.com/gitlab-org/gitlab/-/issues/214245
return params[:evidence_pipeline] if params[:evidence_pipeline]
sha = existing_tag&.dereferenced_target&.sha
sha ||= repository.commit(ref)&.sha
return unless sha
project.ci_pipelines.for_sha(sha).last
end
def create_evidence!(release, pipeline)
return if release.historical_release?
return if release.historical_release? || release.upcoming_release?
if release.upcoming_release?
CreateEvidenceWorker.perform_at(release.released_at, release.id, pipeline&.id)
else
CreateEvidenceWorker.perform_async(release.id, pipeline&.id)
end
::Releases::CreateEvidenceWorker.perform_async(release.id, pipeline&.id)
end
end
end

View File

@ -18,28 +18,28 @@
.gl-mt-3
= form.check_box :repository_update_events, class: 'float-left'
.prepend-left-20
.gl-ml-6
= form.label :repository_update_events, class: 'list-label' do
%strong Repository update events
%p.light
This URL will be triggered when repository is updated
%li
= form.check_box :push_events, class: 'float-left'
.prepend-left-20
.gl-ml-6
= form.label :push_events, class: 'list-label' do
%strong Push events
%p.light
This URL will be triggered for each branch updated to the repository
%li
= form.check_box :tag_push_events, class: 'float-left'
.prepend-left-20
.gl-ml-6
= form.label :tag_push_events, class: 'list-label' do
%strong Tag push events
%p.light
This URL will be triggered when a new tag is pushed to the repository
%li
= form.check_box :merge_requests_events, class: 'float-left'
.prepend-left-20
.gl-ml-6
= form.label :merge_requests_events, class: 'list-label' do
%strong Merge request events
%p.light

View File

@ -323,6 +323,22 @@
:weight: 1
:idempotent:
:tags: []
- :name: cronjob:releases_create_evidence
:feature_category: :release_evidence
:has_external_dependencies:
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent:
:tags: []
- :name: cronjob:releases_manage_evidence
:feature_category: :release_evidence
:has_external_dependencies:
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent:
:tags: []
- :name: cronjob:remove_expired_group_links
:feature_category: :authentication_and_authorization
:has_external_dependencies:
@ -1377,14 +1393,6 @@
:weight: 2
:idempotent: true
:tags: []
- :name: create_evidence
:feature_category: :release_evidence
:has_external_dependencies:
:urgency: :low
:resource_boundary: :unknown
:weight: 2
:idempotent:
:tags: []
- :name: create_note_diff_file
:feature_category: :source_code_management
:has_external_dependencies:

View File

@ -1,20 +0,0 @@
# frozen_string_literal: true
class CreateEvidenceWorker # rubocop:disable Scalability/IdempotentWorker
include ApplicationWorker
feature_category :release_evidence
weight 2
# pipeline_id is optional for backward compatibility with existing jobs
# caller should always try to provide the pipeline and pass nil only
# if pipeline is absent
def perform(release_id, pipeline_id = nil)
release = Release.find_by_id(release_id)
return unless release
pipeline = Ci::Pipeline.find_by_id(pipeline_id)
::Releases::CreateEvidenceService.new(release, pipeline: pipeline).execute
end
end

View File

@ -0,0 +1,23 @@
# frozen_string_literal: true
module Releases
class CreateEvidenceWorker # rubocop:disable Scalability/IdempotentWorker
include ApplicationWorker
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext
feature_category :release_evidence
# pipeline_id is optional for backward compatibility with existing jobs
# caller should always try to provide the pipeline and pass nil only
# if pipeline is absent
def perform(release_id, pipeline_id = nil)
release = Release.find_by_id(release_id)
return unless release
pipeline = Ci::Pipeline.find_by_id(pipeline_id)
::Releases::CreateEvidenceService.new(release, pipeline: pipeline).execute
end
end
end

View File

@ -0,0 +1,24 @@
# frozen_string_literal: true
module Releases
class ManageEvidenceWorker # rubocop:disable Scalability/IdempotentWorker
include ApplicationWorker
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext
feature_category :release_evidence
def perform
releases = Release.without_evidence.released_within_2hrs
releases.each do |release|
project = release.project
params = { tag: release.tag }
evidence_pipeline = Releases::EvidencePipelineFinder.new(project, params).execute
# perform_at released_at
::Releases::CreateEvidenceWorker.perform_async(release.id, evidence_pipeline&.id)
end
end
end
end

View File

@ -2,10 +2,6 @@
class TrendingProjectsWorker # rubocop:disable Scalability/IdempotentWorker
include ApplicationWorker
# rubocop:disable Scalability/CronWorkerContext
# This worker does not perform work scoped to a context
include CronjobQueue
# rubocop:enable Scalability/CronWorkerContext
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext
feature_category :source_code_management

View File

@ -0,0 +1,6 @@
---
title: Create `vulnerability_findings_remediations` and `vulnerability_remediations`
tables
merge_request: 47166
author:
type: added

View File

@ -0,0 +1,5 @@
---
title: Add cloud_license_enabled column to application_settings
merge_request: 47882
author:
type: added

View File

@ -0,0 +1,5 @@
---
title: Schedule CreateEvidenceWorker jobs in a sliding window
merge_request: 47638
author:
type: added

View File

@ -532,6 +532,9 @@ Settings.cron_jobs['member_invitation_reminder_emails_worker']['job_class'] = 'M
Settings.cron_jobs['schedule_merge_request_cleanup_refs_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['schedule_merge_request_cleanup_refs_worker']['cron'] ||= '* * * * *'
Settings.cron_jobs['schedule_merge_request_cleanup_refs_worker']['job_class'] = 'ScheduleMergeRequestCleanupRefsWorker'
Settings.cron_jobs['manage_evidence_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['manage_evidence_worker']['cron'] ||= '0 * * * *'
Settings.cron_jobs['manage_evidence_worker']['job_class'] = 'Releases::ManageEvidenceWorker'
Gitlab.ee do
Settings.cron_jobs['active_user_count_threshold_worker'] ||= Settingslogic.new({})

View File

@ -58,8 +58,6 @@
- 1
- - create_commit_signature
- 2
- - create_evidence
- 2
- - create_github_webhook
- 2
- - create_note_diff_file

View File

@ -0,0 +1,18 @@
# frozen_string_literal: true
class AddIndexToReleases < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
INDEX_NAME = 'index_releases_on_released_at'
disable_ddl_transaction!
def up
add_concurrent_index :releases, :released_at, name: INDEX_NAME
end
def down
remove_concurrent_index_by_name :releases, INDEX_NAME
end
end

View File

@ -0,0 +1,26 @@
# frozen_string_literal: true
class CreateVulnerabilityRemediationsTable < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
disable_ddl_transaction!
def up
create_table :vulnerability_remediations, if_not_exists: true do |t|
t.timestamps_with_timezone
t.integer :file_store, limit: 2
t.text :summary, null: false
t.text :file, null: false
end
add_text_limit :vulnerability_remediations, :summary, 200
add_text_limit :vulnerability_remediations, :file, 255
end
def down
drop_table :vulnerability_remediations
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
class CreateVulnerabilityFindingsRemediationsJoinTable < ActiveRecord::Migration[6.0]
DOWNTIME = false
def change
create_table :vulnerability_findings_remediations do |t|
t.references :vulnerability_occurrence, index: false, foreign_key: { on_delete: :cascade }
t.references :vulnerability_remediation, index: { name: 'index_vulnerability_findings_remediations_on_remediation_id' }, foreign_key: { on_delete: :cascade }
t.timestamps_with_timezone
t.index [:vulnerability_occurrence_id, :vulnerability_remediation_id], unique: true, name: 'index_vulnerability_findings_remediations_on_unique_keys'
end
end
end

View File

@ -0,0 +1,9 @@
# frozen_string_literal: true
class AddCloudLicenseEnabledToSettings < ActiveRecord::Migration[6.0]
DOWNTIME = false
def change
add_column :application_settings, :cloud_license_enabled, :boolean, null: false, default: false
end
end

View File

@ -0,0 +1 @@
764f08e3083985bb8e206bd25fb27209702110bb4848c8bbfc6546a2777d9157

View File

@ -0,0 +1 @@
27ee3c5429dba139e6c300961172c4f90d25397e3d1e13d0654e049b63ac3325

View File

@ -0,0 +1 @@
bdbf3cf39228c9b65b02391a9aa030bdeb06aa3fc9955e2fd53bd784bea37b66

View File

@ -0,0 +1 @@
119afd73a58c247522446bc9693ece5c83a25c279e4dd7dfb942f7febd5b7a82

View File

@ -9345,6 +9345,7 @@ CREATE TABLE application_settings (
encrypted_cloud_license_auth_token text,
encrypted_cloud_license_auth_token_iv text,
secret_detection_revocation_token_types_url text,
cloud_license_enabled boolean DEFAULT false NOT NULL,
CONSTRAINT app_settings_registry_exp_policies_worker_capacity_positive CHECK ((container_registry_expiration_policies_worker_capacity >= 0)),
CONSTRAINT check_2dba05b802 CHECK ((char_length(gitpod_url) <= 255)),
CONSTRAINT check_51700b31b5 CHECK ((char_length(default_branch_name) <= 255)),
@ -17190,6 +17191,23 @@ CREATE SEQUENCE vulnerability_finding_links_id_seq
ALTER SEQUENCE vulnerability_finding_links_id_seq OWNED BY vulnerability_finding_links.id;
CREATE TABLE vulnerability_findings_remediations (
id bigint NOT NULL,
vulnerability_occurrence_id bigint,
vulnerability_remediation_id bigint,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL
);
CREATE SEQUENCE vulnerability_findings_remediations_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE vulnerability_findings_remediations_id_seq OWNED BY vulnerability_findings_remediations.id;
CREATE TABLE vulnerability_historical_statistics (
id bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
@ -17316,6 +17334,26 @@ CREATE SEQUENCE vulnerability_occurrences_id_seq
ALTER SEQUENCE vulnerability_occurrences_id_seq OWNED BY vulnerability_occurrences.id;
CREATE TABLE vulnerability_remediations (
id bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
file_store smallint,
summary text NOT NULL,
file text NOT NULL,
CONSTRAINT check_ac0ccabff3 CHECK ((char_length(summary) <= 200)),
CONSTRAINT check_fe3325e3ba CHECK ((char_length(file) <= 255))
);
CREATE SEQUENCE vulnerability_remediations_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE vulnerability_remediations_id_seq OWNED BY vulnerability_remediations.id;
CREATE TABLE vulnerability_scanners (
id bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
@ -18298,6 +18336,8 @@ ALTER TABLE ONLY vulnerability_feedback ALTER COLUMN id SET DEFAULT nextval('vul
ALTER TABLE ONLY vulnerability_finding_links ALTER COLUMN id SET DEFAULT nextval('vulnerability_finding_links_id_seq'::regclass);
ALTER TABLE ONLY vulnerability_findings_remediations ALTER COLUMN id SET DEFAULT nextval('vulnerability_findings_remediations_id_seq'::regclass);
ALTER TABLE ONLY vulnerability_historical_statistics ALTER COLUMN id SET DEFAULT nextval('vulnerability_historical_statistics_id_seq'::regclass);
ALTER TABLE ONLY vulnerability_identifiers ALTER COLUMN id SET DEFAULT nextval('vulnerability_identifiers_id_seq'::regclass);
@ -18310,6 +18350,8 @@ ALTER TABLE ONLY vulnerability_occurrence_pipelines ALTER COLUMN id SET DEFAULT
ALTER TABLE ONLY vulnerability_occurrences ALTER COLUMN id SET DEFAULT nextval('vulnerability_occurrences_id_seq'::regclass);
ALTER TABLE ONLY vulnerability_remediations ALTER COLUMN id SET DEFAULT nextval('vulnerability_remediations_id_seq'::regclass);
ALTER TABLE ONLY vulnerability_scanners ALTER COLUMN id SET DEFAULT nextval('vulnerability_scanners_id_seq'::regclass);
ALTER TABLE ONLY vulnerability_statistics ALTER COLUMN id SET DEFAULT nextval('vulnerability_statistics_id_seq'::regclass);
@ -19753,6 +19795,9 @@ ALTER TABLE ONLY vulnerability_feedback
ALTER TABLE ONLY vulnerability_finding_links
ADD CONSTRAINT vulnerability_finding_links_pkey PRIMARY KEY (id);
ALTER TABLE ONLY vulnerability_findings_remediations
ADD CONSTRAINT vulnerability_findings_remediations_pkey PRIMARY KEY (id);
ALTER TABLE ONLY vulnerability_historical_statistics
ADD CONSTRAINT vulnerability_historical_statistics_pkey PRIMARY KEY (id);
@ -19771,6 +19816,9 @@ ALTER TABLE ONLY vulnerability_occurrence_pipelines
ALTER TABLE ONLY vulnerability_occurrences
ADD CONSTRAINT vulnerability_occurrences_pkey PRIMARY KEY (id);
ALTER TABLE ONLY vulnerability_remediations
ADD CONSTRAINT vulnerability_remediations_pkey PRIMARY KEY (id);
ALTER TABLE ONLY vulnerability_scanners
ADD CONSTRAINT vulnerability_scanners_pkey PRIMARY KEY (id);
@ -21793,6 +21841,8 @@ CREATE INDEX index_releases_on_author_id ON releases USING btree (author_id);
CREATE INDEX index_releases_on_project_id_and_tag ON releases USING btree (project_id, tag);
CREATE INDEX index_releases_on_released_at ON releases USING btree (released_at);
CREATE INDEX index_remote_mirrors_on_last_successful_update_at ON remote_mirrors USING btree (last_successful_update_at);
CREATE INDEX index_remote_mirrors_on_project_id ON remote_mirrors USING btree (project_id);
@ -22229,6 +22279,10 @@ CREATE INDEX index_vulnerability_feedback_on_merge_request_id ON vulnerability_f
CREATE INDEX index_vulnerability_feedback_on_pipeline_id ON vulnerability_feedback USING btree (pipeline_id);
CREATE INDEX index_vulnerability_findings_remediations_on_remediation_id ON vulnerability_findings_remediations USING btree (vulnerability_remediation_id);
CREATE UNIQUE INDEX index_vulnerability_findings_remediations_on_unique_keys ON vulnerability_findings_remediations USING btree (vulnerability_occurrence_id, vulnerability_remediation_id);
CREATE INDEX index_vulnerability_historical_statistics_on_date_and_id ON vulnerability_historical_statistics USING btree (date, id);
CREATE UNIQUE INDEX index_vulnerability_identifiers_on_project_id_and_fingerprint ON vulnerability_identifiers USING btree (project_id, fingerprint);
@ -23508,6 +23562,9 @@ ALTER TABLE ONLY project_alerting_settings
ALTER TABLE ONLY dast_site_validations
ADD CONSTRAINT fk_rails_285c617324 FOREIGN KEY (dast_site_token_id) REFERENCES dast_site_tokens(id) ON DELETE CASCADE;
ALTER TABLE ONLY vulnerability_findings_remediations
ADD CONSTRAINT fk_rails_28a8d0cf93 FOREIGN KEY (vulnerability_occurrence_id) REFERENCES vulnerability_occurrences(id) ON DELETE CASCADE;
ALTER TABLE ONLY resource_state_events
ADD CONSTRAINT fk_rails_29af06892a FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE;
@ -23868,6 +23925,9 @@ ALTER TABLE ONLY web_hook_logs
ALTER TABLE ONLY jira_imports
ADD CONSTRAINT fk_rails_675d38c03b FOREIGN KEY (label_id) REFERENCES labels(id) ON DELETE SET NULL;
ALTER TABLE ONLY vulnerability_findings_remediations
ADD CONSTRAINT fk_rails_681c85ae0f FOREIGN KEY (vulnerability_remediation_id) REFERENCES vulnerability_remediations(id) ON DELETE CASCADE;
ALTER TABLE ONLY resource_iteration_events
ADD CONSTRAINT fk_rails_6830c13ac1 FOREIGN KEY (merge_request_id) REFERENCES merge_requests(id) ON DELETE CASCADE;

View File

@ -24,19 +24,19 @@ The keywords available for jobs are:
| Keyword | Description |
|:---------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| [`script`](#script) | Shell script that is executed by a runner. |
| [`script`](#script) | Shell script that is executed by a runner. |
| [`after_script`](#after_script) | Override a set of commands that are executed after job. |
| [`allow_failure`](#allow_failure) | Allow job to fail. Failed job does not contribute to commit status. |
| [`allow_failure`](#allow_failure) | Allow job to fail. A failed job does not cause the pipeline to fail. |
| [`artifacts`](#artifacts) | List of files and directories to attach to a job on success. Also available: `artifacts:paths`, `artifacts:exclude`, `artifacts:expose_as`, `artifacts:name`, `artifacts:untracked`, `artifacts:when`, `artifacts:expire_in`, and `artifacts:reports`. |
| [`before_script`](#before_script) | Override a set of commands that are executed before job. |
| [`cache`](#cache) | List of files that should be cached between subsequent runs. Also available: `cache:paths`, `cache:key`, `cache:untracked`, `cache:when`, and `cache:policy`. |
| [`cache`](#cache) | List of files that should be cached between subsequent runs. Also available: `cache:paths`, `cache:key`, `cache:untracked`, `cache:when`, and `cache:policy`. |
| [`coverage`](#coverage) | Code coverage settings for a given job. |
| [`dependencies`](#dependencies) | Restrict which artifacts are passed to a specific job by providing a list of jobs to fetch artifacts from. |
| [`environment`](#environment) | Name of an environment to which the job deploys. Also available: `environment:name`, `environment:url`, `environment:on_stop`, `environment:auto_stop_in`, and `environment:action`. |
| [`except`](#onlyexcept-basic) | Limit when jobs are not created. Also available: [`except:refs`, `except:kubernetes`, `except:variables`, and `except:changes`](#onlyexcept-advanced). |
| [`extends`](#extends) | Configuration entries that this job inherits from. |
| [`extends`](#extends) | Configuration entries that this job inherits from. |
| [`image`](#image) | Use Docker images. Also available: `image:name` and `image:entrypoint`. |
| [`include`](#include) | Allows this job to include external YAML files. Also available: `include:local`, `include:file`, `include:template`, and `include:remote`. |
| [`include`](#include) | Include external YAML files. Also available: `include:local`, `include:file`, `include:template`, and `include:remote`. |
| [`interruptible`](#interruptible) | Defines if a job can be canceled when made redundant by a newer run. |
| [`only`](#onlyexcept-basic) | Limit when jobs are created. Also available: [`only:refs`, `only:kubernetes`, `only:variables`, and `only:changes`](#onlyexcept-advanced). |
| [`pages`](#pages) | Upload the result of a job to use with GitLab Pages. |
@ -47,7 +47,7 @@ The keywords available for jobs are:
| [`rules`](#rules) | List of conditions to evaluate and determine selected attributes of a job, and whether or not it's created. May not be used alongside `only`/`except`. |
| [`services`](#services) | Use Docker services images. Also available: `services:name`, `services:alias`, `services:entrypoint`, and `services:command`. |
| [`stage`](#stage) | Defines a job stage (default: `test`). |
| [`tags`](#tags) | List of tags that are used to select a runner. |
| [`tags`](#tags) | List of tags that are used to select a runner. |
| [`timeout`](#timeout) | Define a custom job-level timeout that takes precedence over the project-wide setting. |
| [`trigger`](#trigger) | Defines a downstream pipeline trigger. |
| [`variables`](#variables) | Define job variables on a job level. |
@ -225,7 +225,7 @@ stages:
There are also two edge cases worth mentioning:
1. If no `stages` are defined in `.gitlab-ci.yml`, then the `build`,
`test` and `deploy` are allowed to be used as job's stage by default.
`test` and `deploy` can be used as job's stage by default.
1. If a job does not specify a `stage`, the job is assigned the `test` stage.
### `workflow:rules`
@ -488,8 +488,8 @@ include:
- remote: 'https://gitlab.com/awesome-project/raw/master/.gitlab-ci-template.yml'
```
All [nested includes](#nested-includes) are executed without context as public user, so only another remote
or public project, or template, is allowed.
All [nested includes](#nested-includes) are executed without context as a public user,
so you can only `include` public projects or templates.
#### `include:template`
@ -521,9 +521,9 @@ so it's possible to use project, remote or template includes.
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/56836) in GitLab 11.9.
Nested includes allow you to compose a set of includes.
Use nested includes to compose a set of includes.
A total of 100 includes is allowed, but duplicate includes are considered a configuration error.
You can have up to 100 includes, but you can't have duplicate includes.
In [GitLab 12.4](https://gitlab.com/gitlab-org/gitlab/-/issues/28212) and later, the time limit
to resolve all files is 30 seconds.
@ -713,7 +713,7 @@ You can use special syntax in [`script`](README.md#script) sections to:
### `stage`
`stage` is defined per-job and relies on [`stages`](#stages), which is defined
globally. It allows to group jobs into different stages, and jobs of the same
globally. Use `stage` to define which stage a job runs in, and jobs of the same
`stage` are executed in parallel (subject to [certain conditions](#using-your-own-runners)). For example:
```yaml
@ -989,7 +989,7 @@ If you attempt to use both keywords in the same job, the linter returns a
#### Rules attributes
The job attributes allowed by `rules` are:
The job attributes you can use with `rules` are:
- [`when`](#when): If not defined, defaults to `when: on_success`.
- If used as `when: delayed`, `start_in` is also required.
@ -1059,7 +1059,7 @@ In this example:
is added to the [merge request pipeline](../merge_request_pipelines/index.md)
with attributes of:
- `when: manual` (manual job)
- `allow_failure: true` (allows the pipeline to continue running even if the manual job is not run)
- `allow_failure: true` (the pipeline continues running even if the manual job is not run)
- If the pipeline is **not** for a merge request, the first rule doesn't match, and the
second rule is evaluated.
- If the pipeline is a scheduled pipeline, the second rule matches, and the job
@ -1098,8 +1098,8 @@ for more details.
Jobs defined with `rules` can trigger multiple pipelines with the same action. You
don't have to explicitly configure rules for each type of pipeline to trigger them
accidentally. Rules that are too loose (allowing too many types of pipelines) could
cause a second pipeline to run unexpectedly.
accidentally. Rules that are too broad could cause simultaneous pipelines of a different
type to run unexpectedly.
Some configurations that have the potential to cause duplicate pipelines cause a
[pipeline warning](../troubleshooting.md#pipeline-warnings) to be displayed.
@ -1124,8 +1124,8 @@ causes duplicated pipelines.
There are multiple ways to avoid this:
- Use [`workflow: rules`](#workflowrules) to specify which types of pipelines
can run. To eliminate duplicate pipelines, allow only merge request pipelines
or push (branch) pipelines.
can run. To eliminate duplicate pipelines, use merge request pipelines only
or push (branch) pipelines only.
- Rewrite the rules to run the job only in very specific cases,
and avoid using a final `when:` rule:
@ -1323,8 +1323,8 @@ docker build:
In this example:
- If the pipeline is a merge request pipeline, check `Dockerfile` for changes.
- If `Dockerfile` has changed, add the job to the pipeline as a manual job, and allow the pipeline
to continue running even if the job is not triggered (`allow_failure: true`).
- If `Dockerfile` has changed, add the job to the pipeline as a manual job, and the pipeline
continues running even if the job is not triggered (`allow_failure: true`).
- If `Dockerfile` has not changed, do not add job to any pipeline (same as `when: never`).
To use `rules: changes` with branch pipelines instead of merge request pipelines,
@ -1511,11 +1511,10 @@ There are a few rules that apply to the usage of job policy:
- `only` and `except` are inclusive. If both `only` and `except` are defined
in a job specification, the ref is filtered by `only` and `except`.
- `only` and `except` allow the use of regular expressions ([supported regexp syntax](#supported-onlyexcept-regexp-syntax)).
- `only` and `except` allow to specify a repository path to filter jobs for
forks.
- `only` and `except` can use regular expressions ([supported regexp syntax](#supported-onlyexcept-regexp-syntax)).
- `only` and `except` can specify a repository path to filter jobs for forks.
In addition, `only` and `except` allow the use of special keywords:
In addition, `only` and `except` can use special keywords:
| **Value** | **Description** |
|--------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
@ -1636,8 +1635,8 @@ Only a subset of features provided by [Ruby Regexp](https://ruby-doc.org/core/Re
are now supported.
From GitLab 11.9.7 to GitLab 12.0, GitLab provided a feature flag to
let you use the unsafe regexp syntax. This flag allowed
compatibility with the previous syntax version so you could gracefully migrate to the new syntax.
let you use unsafe regexp syntax. After migrating to safe syntax, you should disable
this feature flag again:
```ruby
Feature.enable(:allow_unsafe_ruby_regexp)
@ -1935,8 +1934,8 @@ runs.
> - In GitLab 12.3, maximum number of jobs in `needs` array raised from five to 50.
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/30631) in GitLab 12.8, `needs: []` lets jobs start immediately.
The `needs:` keyword enables executing jobs out-of-order, allowing you to implement
a [directed acyclic graph](../directed_acyclic_graph/index.md) in your `.gitlab-ci.yml`.
Use the `needs:` keyword to execute jobs out-of-order. Relationships between jobs
that use `needs` can be visualized as a [directed acyclic graph](../directed_acyclic_graph/index.md).
This lets you run some jobs without waiting for other ones, disregarding stage ordering
so you can have multiple stages running concurrently.
@ -2179,7 +2178,7 @@ The default value is `false`, except for [manual](#whenmanual) jobs using the
`when: manual` syntax, unless using [`rules:`](#rules) syntax, where all jobs
default to false, *including* `when: manual` jobs.
When `allow_failure` is enabled and the job fails, the job shows an orange warning in the UI.
When `allow_failure` is set to `true` and the job fails, the job shows an orange warning in the UI.
However, the logical flow of the pipeline considers the job a
success/passed, and is not blocked.
@ -2320,13 +2319,12 @@ can opt to disable it.
##### Protecting manual jobs **(PREMIUM)**
It's possible to use [protected environments](../environments/protected_environments.md)
to define a precise list of users authorized to run a manual job. By allowing only
users associated with a protected environment to trigger manual jobs, it's possible
to implement some special use cases, such as:
Use [protected environments](../environments/protected_environments.md)
to define a list of users authorized to run a manual job. You can authorize only
the users associated with a protected environment to trigger manual jobs, which can:
- More precisely limiting who can deploy to an environment.
- Enabling a pipeline to be blocked until an approved user "approves" it.
- More precisely limit who can deploy to an environment.
- Block a pipeline until an approved user "approves" it.
To do this, you must:
@ -2808,7 +2806,7 @@ If neither file was changed in any commits, the prefix is added to `default`, so
key in the example would be `test-default`.
Like `cache:key`, `prefix` can use any of the [predefined variables](../variables/README.md),
but the following are not allowed:
but cannot include:
- the `/` character (or the equivalent URI-encoded `%2F`)
- a value made only of `.` (or the equivalent URI-encoded `%2E`)
@ -4353,8 +4351,8 @@ job_name:
#### YAML anchors for variables
[YAML anchors](#anchors) can be used with `variables`, to easily repeat assignment
of variables across multiple jobs. It can also enable more flexibility when a job
[YAML anchors](#anchors) can be used with `variables`, to repeat assignment
of variables across multiple jobs. Use can also use YAML anchors when a job
requires a specific `variables` block that would otherwise override the global variables.
In the example below, we override the `GIT_STRATEGY` variable without affecting

View File

@ -833,7 +833,7 @@ If your application contains `@client` queries, most probably you will have an A
```javascript
import createMockApollo from 'jest/helpers/mock_apollo_helper';
...
fakeApollo = createMockApollo(requestHandlers, {});
mockApollo = createMockApollo(requestHandlers, resolvers);
```
Sometimes we want to test a `result` hook of the local query. In order to have it triggered, we need to populate a cache with correct data to be fetched with this query:
@ -849,14 +849,14 @@ query fetchLocalUser {
```javascript
import fetchLocalUserQuery from '~/design_management/graphql/queries/fetch_local_user.query.graphql';
function createComponentWithApollo() {
function createMockApolloProvider() {
const requestHandlers = [
[getDesignListQuery, jest.fn().mockResolvedValue(designListQueryResponse)],
[permissionsQuery, jest.fn().mockResolvedValue(permissionsQueryResponse)],
];
fakeApollo = createMockApollo(requestHandlers, {});
fakeApollo.clients.defaultClient.cache.writeQuery({
mockApollo = createMockApollo(requestHandlers, {});
mockApollo.clients.defaultClient.cache.writeQuery({
query: fetchLocalUserQuery,
data: {
fetchLocalUser: {
@ -864,15 +864,107 @@ function createComponentWithApollo() {
name: 'Test',
},
},
})
});
wrapper = shallowMount(Index, {
return mockApollo;
}
function createComponent(options = {}) {
const { mockApollo } = options;
return shallowMount(Index, {
localVue,
apolloProvider: fakeApollo,
apolloProvider: mockApollo,
});
}
```
Sometimes it is necessary to control what the local resolver returns and inspect how it is called by the component. This can be done by mocking your local resolver:
```javascript
import fetchLocalUserQuery from '~/design_management/graphql/queries/fetch_local_user.query.graphql';
function createMockApolloProvider(options = {}) {
const { fetchLocalUserSpy } = options;
mockApollo = createMockApollo([], {
Query: {
fetchLocalUser: fetchLocalUserSpy,
},
});
// Necessary for local resolvers to be activated
mockApollo.clients.defaultClient.cache.writeQuery({
query: fetchLocalUserQuery,
data: {},
});
return mockApollo;
}
```
In the test you can then control what the spy is supposed to do and inspect the component after the request have returned:
```javascript
describe('My Index test with `createMockApollo`', () => {
let wrapper;
let fetchLocalUserSpy;
afterEach(() => {
wrapper.destroy();
wrapper = null;
fetchLocalUserSpy = null;
});
describe('when loading', () => {
beforeEach(() => {
const mockApollo = createMockApolloProvider();
wrapper = createComponent({ mockApollo });
});
it('displays the loader', () => {
// Assess that the loader is present
});
});
describe('with data', () => {
beforeEach(async () => {
fetchLocalUserSpy = jest.fn().mockResolvedValue(localUserQueryResponse);
const mockApollo = createMockApolloProvider(fetchLocalUserSpy);
wrapper = createComponent({ mockApollo });
await waitForPromises();
});
it('should fetch data once', () => {
expect(fetchLocalUserSpy).toHaveBeenCalledTimes(1);
});
it('displays data', () => {
// Assess that data is present
});
});
describe('with error', () => {
const error = 'Error!';
beforeEach(async () => {
fetchLocalUserSpy = jest.fn().mockRejectedValueOnce(error);
const mockApollo = createMockApolloProvider(fetchLocalUserSpy);
wrapper = createComponent({ mockApollo });
await waitForPromises();
});
it('should fetch data once', () => {
expect(fetchLocalUserSpy).toHaveBeenCalledTimes(1);
});
it('displays the error', () => {
// Assess that the error is displayed
});
});
});
```
## Handling errors
GitLab's GraphQL mutations currently have two distinct error modes: [Top-level](#top-level-errors) and [errors-as-data](#errors-as-data).

View File

@ -1,6 +1,6 @@
---
stage: none
group: unassigned
stage: Manage
group: Access
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#designated-technical-writers
type: reference, howto
---

View File

@ -1,6 +1,6 @@
---
stage: none
group: unassigned
stage: Manage
group: Access
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#designated-technical-writers
type: reference
---

View File

@ -1,6 +1,6 @@
---
stage: none
group: unassigned
stage: Manage
group: Access
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#designated-technical-writers
type: howto
---

View File

@ -1,6 +1,6 @@
---
stage: none
group: unassigned
stage: Manage
group: Access
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#designated-technical-writers
---

View File

@ -36,7 +36,7 @@ Merge Request Analytics could be used when:
## Visualizations and data
The following visualizations and data are available, representing all merge requests that were merged in the past 12 months.
The following visualizations and data are available, representing all merge requests that were merged in the given date range.
### Throughput chart
@ -46,7 +46,25 @@ The throughput chart shows the number of merge requests merged per month.
### Throughput table
Data table displaying a maximum of the 100 most recent merge requests merged for the time period.
[Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/232651) in GitLab 13.3.
The Throughput table displays the most recent merge requests merged in the date range. The
table displays up to 20 merge requests at a time. If there are more than 20 merge requests,
you can paginate to them. For each merge request, you can review the following data:
- Title (as a link to the merge request itself)
- ID
- Pipeline status
- Label count
- Comment count
- Approval count (if approved)
- Date merged
- Time to merge
- Milestone
- Commit count
- Pipeline count
- Line change counts
- Assignees
![Throughput table](img/mr_throughput_table_v13_3.png "Merge Request Analytics - Throughput table listing the 100 merge requests most recently merged")
@ -68,6 +86,17 @@ To filter results:
1. Click on the filter bar.
1. Select a parameter to filter by.
1. Select a value from the autocompleted results, or enter search text to refine the results.
1. Hit the "Return" key.
## Date range
The date range is set to the past 12 months by default. You can modify the date range by changing the "From" and/or "To" values that appear alongside the filter bar. After changing either value, the data displayed on the page will update automatically.
## Tip: Bookmark preferred settings
You can bookmark preferred filters and date ranges. After you have applied a change to the
filter bar or the date range, you'll see that information in the URL. You can create a
bookmark for those preferred settings in your browser.
## Permissions

View File

@ -55,7 +55,8 @@ module Gitlab
bulk_migrate_async(jobs) unless jobs.empty?
end
# Queues background migration jobs for an entire table, batched by ID range.
# Queues background migration jobs for an entire table in batches.
# The default batching column used is the standard primary key `id`.
# Each job is scheduled with a `delay_interval` in between.
# If you use a small interval, then some jobs may run at the same time.
#
@ -68,6 +69,7 @@ module Gitlab
# is scheduled to be run. These records can be used to trace execution of the background job, but there is no
# builtin support to manage that automatically at this time. You should only set this flag if you are aware of
# how it works, and intend to manually cleanup the database records in your background job.
# primary_column_name - The name of the primary key column if the primary key is not `id`
#
# *Returns the final migration delay*
#
@ -87,8 +89,9 @@ module Gitlab
# # do something
# end
# end
def queue_background_migration_jobs_by_range_at_intervals(model_class, job_class_name, delay_interval, batch_size: BACKGROUND_MIGRATION_BATCH_SIZE, other_job_arguments: [], initial_delay: 0, track_jobs: false)
raise "#{model_class} does not have an ID to use for batch ranges" unless model_class.column_names.include?('id')
def queue_background_migration_jobs_by_range_at_intervals(model_class, job_class_name, delay_interval, batch_size: BACKGROUND_MIGRATION_BATCH_SIZE, other_job_arguments: [], initial_delay: 0, track_jobs: false, primary_column_name: :id)
raise "#{model_class} does not have an ID column of #{primary_column_name} to use for batch ranges" unless model_class.column_names.include?(primary_column_name.to_s)
raise "#{primary_column_name} is not an integer column" unless model_class.columns_hash[primary_column_name.to_s].type == :integer
# To not overload the worker too much we enforce a minimum interval both
# when scheduling and performing jobs.
@ -99,7 +102,7 @@ module Gitlab
final_delay = 0
model_class.each_batch(of: batch_size) do |relation, index|
start_id, end_id = relation.pluck(Arel.sql('MIN(id), MAX(id)')).first
start_id, end_id = relation.pluck(Arel.sql("MIN(#{primary_column_name}), MAX(#{primary_column_name})")).first
# `BackgroundMigrationWorker.bulk_perform_in` schedules all jobs for
# the same time, which is not helpful in most cases where we wish to

View File

@ -65,6 +65,9 @@ module QA
deleted_pipeline = pipeline
!pipeline.empty?
end
raise "Pipeline response does not have a 'message' key: #{deleted_pipeline}" unless deleted_pipeline&.key?('message')
expect(deleted_pipeline['message'].downcase).to have_content('404 not found')
end
end

View File

@ -0,0 +1,44 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Releases::EvidencePipelineFinder, '#execute' do
let(:params) { {} }
let(:project) { create(:project, :repository) }
let(:tag_name) { project.repository.tag_names.first }
let(:sha) { project.repository.find_tag(tag_name).dereferenced_target.sha }
let!(:pipeline) { create(:ci_empty_pipeline, sha: sha, project: project) }
subject { described_class.new(project, params).execute }
context 'when the tag is passed' do
let(:params) { { tag: tag_name } }
it 'returns the evidence pipeline' do
expect(subject).to eq(pipeline)
end
end
context 'when the ref is passed' do
let(:params) { { ref: sha } }
it 'returns the evidence pipeline' do
expect(subject).to eq(pipeline)
end
end
context 'empty params' do
it 'returns nil' do
expect(subject).to be_nil
end
end
# TODO: remove this with the release creation moved to it's own form https://gitlab.com/gitlab-org/gitlab/-/issues/214245
context 'params[:evidence_pipeline] is present' do
let(:params) { { evidence_pipeline: pipeline } }
it 'returns the passed evidence pipeline' do
expect(subject).to eq(pipeline)
end
end
end

View File

@ -189,7 +189,51 @@ RSpec.describe Gitlab::Database::Migrations::BackgroundMigrationHelpers do
end
end
context "when the model doesn't have an ID column" do
context 'when the model specifies a primary_column_name' do
let!(:id1) { create(:container_expiration_policy).id }
let!(:id2) { create(:container_expiration_policy).id }
let!(:id3) { create(:container_expiration_policy).id }
around do |example|
freeze_time { example.run }
end
before do
ContainerExpirationPolicy.class_eval do
include EachBatch
end
end
it 'returns the final expected delay', :aggregate_failures do
Sidekiq::Testing.fake! do
final_delay = model.queue_background_migration_jobs_by_range_at_intervals(ContainerExpirationPolicy, 'FooJob', 10.minutes, batch_size: 2, primary_column_name: :project_id)
expect(final_delay.to_f).to eq(20.minutes.to_f)
expect(BackgroundMigrationWorker.jobs[0]['args']).to eq(['FooJob', [id1, id2]])
expect(BackgroundMigrationWorker.jobs[0]['at']).to eq(10.minutes.from_now.to_f)
expect(BackgroundMigrationWorker.jobs[1]['args']).to eq(['FooJob', [id3, id3]])
expect(BackgroundMigrationWorker.jobs[1]['at']).to eq(20.minutes.from_now.to_f)
end
end
context "when the primary_column_name is not an integer" do
it 'raises error' do
expect do
model.queue_background_migration_jobs_by_range_at_intervals(ContainerExpirationPolicy, 'FooJob', 10.minutes, primary_column_name: :enabled)
end.to raise_error(StandardError, /is not an integer column/)
end
end
context "when the primary_column_name does not exist" do
it 'raises error' do
expect do
model.queue_background_migration_jobs_by_range_at_intervals(ContainerExpirationPolicy, 'FooJob', 10.minutes, primary_column_name: :foo)
end.to raise_error(StandardError, /does not have an ID column of foo/)
end
end
end
context "when the model doesn't have an ID or primary_column_name column" do
it 'raises error (for now)' do
expect do
model.queue_background_migration_jobs_by_range_at_intervals(ProjectAuthorization, 'FooJob', 10.seconds)

View File

@ -236,7 +236,7 @@ RSpec.describe Releases::CreateService do
let(:released_at) { 3.weeks.ago }
it 'does not execute CreateEvidenceWorker' do
expect { subject }.not_to change(CreateEvidenceWorker.jobs, :size)
expect { subject }.not_to change(Releases::CreateEvidenceWorker.jobs, :size)
end
it 'does not create an Evidence object', :sidekiq_inline do
@ -335,7 +335,7 @@ RSpec.describe Releases::CreateService do
end
it 'queues CreateEvidenceWorker' do
expect { subject }.to change(CreateEvidenceWorker.jobs, :size).by(1)
expect { subject }.to change(Releases::CreateEvidenceWorker.jobs, :size).by(1)
end
it 'creates Evidence', :sidekiq_inline do
@ -360,18 +360,12 @@ RSpec.describe Releases::CreateService do
context 'upcoming release' do
let(:released_at) { 1.day.from_now }
it 'queues CreateEvidenceWorker' do
expect { subject }.to change(CreateEvidenceWorker.jobs, :size).by(1)
it 'does not execute CreateEvidenceWorker' do
expect { subject }.not_to change(Releases::CreateEvidenceWorker.jobs, :size)
end
it 'queues CreateEvidenceWorker at the released_at timestamp' do
subject
expect(CreateEvidenceWorker.jobs.last['at'].to_i).to eq(released_at.to_i)
end
it 'creates Evidence', :sidekiq_inline do
expect { subject }.to change(Releases::Evidence, :count).by(1)
it 'does not create an Evidence object', :sidekiq_inline do
expect { subject }.not_to change(Releases::Evidence, :count)
end
it 'is not a historical release' do
@ -385,8 +379,6 @@ RSpec.describe Releases::CreateService do
expect(last_release.upcoming_release?).to be_truthy
end
include_examples 'uses the right pipeline for evidence'
end
end
end

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe CreateEvidenceWorker do
RSpec.describe Releases::CreateEvidenceWorker do
let(:project) { create(:project, :repository) }
let(:release) { create(:release, project: project) }
let(:pipeline) { create(:ci_empty_pipeline, sha: release.sha, project: project) }

View File

@ -0,0 +1,43 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Releases::ManageEvidenceWorker do
let(:project) { create(:project, :repository) }
shared_examples_for 'does not create a new Evidence record' do
specify :sidekiq_inline do
aggregate_failures do
expect(::Releases::CreateEvidenceService).not_to receive(:execute)
expect { described_class.new.perform }.to change(Releases::Evidence, :count).by(0)
end
end
end
context 'when `released_at` in inside the window' do
context 'when Evidence has not been created' do
let(:release) { create(:release, project: project, released_at: 1.hour.since) }
it 'creates a new Evidence record', :sidekiq_inline do
expect_next_instance_of(::Releases::CreateEvidenceService, release, { pipeline: nil }) do |service|
expect(service).to receive(:execute).and_call_original
end
expect { described_class.new.perform }.to change(Releases::Evidence, :count).by(1)
end
end
context 'when evidence has already been created' do
let(:release) { create(:release, project: project, released_at: 1.hour.since) }
let!(:evidence) { create(:evidence, release: release )}
it_behaves_like 'does not create a new Evidence record'
end
end
context 'when `released_at` is outside the window' do
let(:release) { create(:release, project: project, released_at: 300.minutes.since) }
it_behaves_like 'does not create a new Evidence record'
end
end