Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-05-25 09:10:54 +00:00
parent a1668610f7
commit 686c2375e1
43 changed files with 453 additions and 104 deletions

View file

@ -2506,7 +2506,6 @@ Gitlab/NamespacedClass:
- 'ee/app/workers/elastic_full_index_worker.rb'
- 'ee/app/workers/elastic_index_bulk_cron_worker.rb'
- 'ee/app/workers/elastic_index_initial_bulk_cron_worker.rb'
- 'ee/app/workers/elastic_indexer_worker.rb'
- 'ee/app/workers/elastic_indexing_control_worker.rb'
- 'ee/app/workers/elastic_namespace_indexer_worker.rb'
- 'ee/app/workers/elastic_namespace_rollout_worker.rb'

View file

@ -1246,6 +1246,10 @@ module Ci
end
end
def build_matchers
self.builds.build_matchers(project)
end
private
def add_message(severity, content)

View file

@ -433,13 +433,7 @@ module Ci
end
def matches_build?(build)
return false if self.ref_protected? && !build.protected?
accepting_tags?(build)
end
def accepting_tags?(build)
(run_untagged? || build.has_tags?) && (build.tag_list - tag_list).empty?
runner_matcher.matches?(build.build_matcher)
end
end
end

View file

@ -174,8 +174,11 @@ class CommitStatus < ApplicationRecord
next if commit_status.processed?
next unless commit_status.project
last_arg = transition.args.last
transition_options = last_arg.is_a?(Hash) && last_arg.extractable_options? ? last_arg : {}
commit_status.run_after_commit do
PipelineProcessWorker.perform_async(pipeline_id)
PipelineProcessWorker.perform_async(pipeline_id) unless transition_options[:skip_pipeline_processing]
ExpireJobCacheWorker.perform_async(id)
end
end

View file

@ -24,6 +24,7 @@ module Enums
project_deleted: 15,
ci_quota_exceeded: 16,
pipeline_loop_detected: 17,
no_matching_runner: 18,
insufficient_bridge_permissions: 1_001,
downstream_bridge_project_not_found: 1_002,
invalid_bridge_trigger: 1_003,

View file

@ -25,7 +25,8 @@ class CommitStatusPresenter < Gitlab::View::Presenter::Delegated
reached_max_descendant_pipelines_depth: 'You reached the maximum depth of child pipelines',
project_deleted: 'The job belongs to a deleted project',
user_blocked: 'The user who created this job is blocked',
ci_quota_exceeded: 'No more CI minutes available'
ci_quota_exceeded: 'No more CI minutes available',
no_matching_runner: 'No matching runner available'
}.freeze
private_constant :CALLOUT_FAILURE_MESSAGES

View file

@ -0,0 +1,91 @@
# frozen_string_literal: true
module Ci
module PipelineCreation
class DropNotRunnableBuildsService
include Gitlab::Utils::StrongMemoize
def initialize(pipeline)
@pipeline = pipeline
end
##
# We want to run this service exactly once,
# before the first pipeline processing call
#
def execute
return unless ::Feature.enabled?(:ci_drop_new_builds_when_ci_quota_exceeded, project, default_enabled: :yaml)
return unless pipeline.created?
load_runners
validate_build_matchers
end
private
attr_reader :pipeline
attr_reader :instance_runners, :private_runners
delegate :project, to: :pipeline
def load_runners
@instance_runners, @private_runners = project
.all_runners
.active
.online
.runner_matchers
.partition(&:instance_type?)
end
def validate_build_matchers
pipeline.build_matchers.each do |build_matcher|
failure_reason = validate_build_matcher(build_matcher)
next unless failure_reason
drop_all_builds(build_matcher.build_ids, failure_reason)
end
end
def validate_build_matcher(build_matcher)
return if matching_private_runners?(build_matcher)
return if matching_instance_runners_available?(build_matcher)
matching_failure_reason(build_matcher)
end
##
# We skip pipeline processing until we drop all required builds. Otherwise
# as we drop the first build, the remaining builds to be dropped could
# transition to other states by `PipelineProcessWorker` running async.
#
def drop_all_builds(build_ids, failure_reason)
pipeline.builds.id_in(build_ids).each do |build|
build.drop(failure_reason, skip_pipeline_processing: true)
end
end
def matching_private_runners?(build_matcher)
private_runners
.find { |matcher| matcher.matches?(build_matcher) }
.present?
end
# Overridden in EE to include more conditions
def matching_instance_runners_available?(build_matcher)
matching_instance_runners?(build_matcher)
end
def matching_instance_runners?(build_matcher)
instance_runners
.find { |matcher| matcher.matches?(build_matcher) }
.present?
end
# Overridden in EE
def matching_failure_reason(build_matcher)
:no_matching_runner
end
end
end
end
Ci::PipelineCreation::DropNotRunnableBuildsService.prepend_mod_with('Ci::PipelineCreation::DropNotRunnableBuildsService')

View file

@ -0,0 +1,18 @@
# frozen_string_literal: true
module Ci
module PipelineCreation
class StartPipelineService
attr_reader :pipeline
def initialize(pipeline)
@pipeline = pipeline
end
def execute
DropNotRunnableBuildsService.new(pipeline).execute
Ci::ProcessPipelineService.new(pipeline).execute
end
end
end
end

View file

@ -184,7 +184,7 @@ class IssuableBaseService < ::BaseProjectService
params[:assignee_ids] = process_assignee_ids(params, extra_assignee_ids: issuable.assignee_ids.to_a)
end
issuable.assign_attributes(params)
issuable.assign_attributes(allowed_create_params(params))
before_create(issuable)
@ -194,6 +194,7 @@ class IssuableBaseService < ::BaseProjectService
if issuable_saved
create_system_notes(issuable, is_update: false) unless skip_system_notes
handle_changes(issuable, { params: params })
after_create(issuable)
execute_hooks(issuable)
@ -233,7 +234,7 @@ class IssuableBaseService < ::BaseProjectService
assign_requested_assignees(issuable)
if issuable.changed? || params.present?
issuable.assign_attributes(params)
issuable.assign_attributes(allowed_update_params(params))
if has_title_or_description_changed?(issuable)
issuable.assign_attributes(last_edited_at: Time.current, last_edited_by: current_user)
@ -260,7 +261,7 @@ class IssuableBaseService < ::BaseProjectService
issuable, old_labels: old_associations[:labels], old_milestone: old_associations[:milestone]
)
handle_changes(issuable, old_associations: old_associations)
handle_changes(issuable, old_associations: old_associations, params: params)
new_assignees = issuable.assignees.to_a
affected_assignees = (old_associations[:assignees] + new_assignees) - (old_associations[:assignees] & new_assignees)
@ -505,6 +506,14 @@ class IssuableBaseService < ::BaseProjectService
def update_timestamp?(issuable)
issuable.changes.keys != ["relative_position"]
end
def allowed_create_params(params)
params
end
def allowed_update_params(params)
params
end
end
IssuableBaseService.prepend_mod_with('IssuableBaseService')

View file

@ -40,6 +40,20 @@ module Issues
super
end
def handle_changes(issue, options)
super
old_associations = options.fetch(:old_associations, {})
old_assignees = old_associations.fetch(:assignees, [])
handle_assignee_changes(issue, old_assignees)
end
def handle_assignee_changes(issue, old_assignees)
return if issue.assignees == old_assignees
create_assignee_note(issue, old_assignees)
end
def resolve_discussions_with_issue(issue)
return if discussions_to_resolve.empty?

View file

@ -43,6 +43,7 @@ module Issues
end
def handle_changes(issue, options)
super
old_associations = options.fetch(:old_associations, {})
old_labels = old_associations.fetch(:labels, [])
old_mentioned_users = old_associations.fetch(:mentioned_users, [])

View file

@ -27,6 +27,33 @@ module MergeRequests
enqueue_jira_connect_messages_for(merge_request)
end
def handle_changes(merge_request, options)
old_associations = options.fetch(:old_associations, {})
old_assignees = old_associations.fetch(:assignees, [])
old_reviewers = old_associations.fetch(:reviewers, [])
handle_assignees_change(merge_request, old_assignees) if merge_request.assignees != old_assignees
handle_reviewers_change(merge_request, old_reviewers) if merge_request.reviewers != old_reviewers
end
def handle_assignees_change(merge_request, old_assignees)
MergeRequests::HandleAssigneesChangeService
.new(project: project, current_user: current_user)
.async_execute(merge_request, old_assignees)
end
def handle_reviewers_change(merge_request, old_reviewers)
affected_reviewers = (old_reviewers + merge_request.reviewers) - (old_reviewers & merge_request.reviewers)
create_reviewer_note(merge_request, old_reviewers)
notification_service.async.changed_reviewer_of_merge_request(merge_request, current_user, old_reviewers)
todo_service.reassigned_reviewable(merge_request, current_user, old_reviewers)
invalidate_cache_counts(merge_request, users: affected_reviewers.compact)
new_reviewers = merge_request.reviewers - old_reviewers
merge_request_activity_counter.track_users_review_requested(users: new_reviewers)
merge_request_activity_counter.track_reviewers_changed_action(user: current_user)
end
def cleanup_environments(merge_request)
Ci::StopEnvironmentsService.new(merge_request.source_project, current_user)
.execute_for_merge_request(merge_request)

View file

@ -15,6 +15,7 @@ module MergeRequests
end
def handle_changes(merge_request, options)
super
old_associations = options.fetch(:old_associations, {})
old_labels = old_associations.fetch(:labels, [])
old_mentioned_users = old_associations.fetch(:mentioned_users, [])
@ -31,8 +32,6 @@ module MergeRequests
end
handle_target_branch_change(merge_request)
handle_assignees_change(merge_request, old_assignees) if merge_request.assignees != old_assignees
handle_reviewers_change(merge_request, old_reviewers) if merge_request.reviewers != old_reviewers
handle_milestone_change(merge_request)
handle_draft_status_change(merge_request, changed_fields)
@ -220,24 +219,6 @@ module MergeRequests
end
end
def handle_assignees_change(merge_request, old_assignees)
MergeRequests::HandleAssigneesChangeService
.new(project: project, current_user: current_user)
.async_execute(merge_request, old_assignees)
end
def handle_reviewers_change(merge_request, old_reviewers)
affected_reviewers = (old_reviewers + merge_request.reviewers) - (old_reviewers & merge_request.reviewers)
create_reviewer_note(merge_request, old_reviewers)
notification_service.async.changed_reviewer_of_merge_request(merge_request, current_user, old_reviewers)
todo_service.reassigned_reviewable(merge_request, current_user, old_reviewers)
invalidate_cache_counts(merge_request, users: affected_reviewers.compact)
new_reviewers = merge_request.reviewers - old_reviewers
merge_request_activity_counter.track_users_review_requested(users: new_reviewers)
merge_request_activity_counter.track_reviewers_changed_action(user: current_user)
end
def create_branch_change_note(issuable, branch_type, event_type, old_branch, new_branch)
SystemNoteService.change_branch(
issuable, issuable.project, current_user, branch_type, event_type,

View file

@ -15,7 +15,7 @@ module Ci
def perform(pipeline_id)
Ci::Pipeline.find_by_id(pipeline_id).try do |pipeline|
Ci::ProcessPipelineService
Ci::PipelineCreation::StartPipelineService
.new(pipeline)
.execute
end

View file

@ -0,0 +1,8 @@
---
name: ci_drop_new_builds_when_ci_quota_exceeded
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/61166
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/326709
milestone: '14.0'
type: development
group: group::continuous integration
default_enabled: false

View file

@ -120,8 +120,6 @@
- 1
- - elastic_full_index
- 1
- - elastic_indexer
- 1
- - elastic_indexing_control
- 1
- - elastic_namespace_indexer

View file

@ -0,0 +1,11 @@
# frozen_string_literal: true
class MakeSnapshotSegmentIdOptional < ActiveRecord::Migration[6.0]
def up
change_column_null(:analytics_devops_adoption_snapshots, :segment_id, true)
end
def down
change_column_null(:analytics_devops_adoption_snapshots, :segment_id, false)
end
end

View file

@ -0,0 +1,19 @@
# frozen_string_literal: true
require Rails.root.join('db', 'post_migrate', '20210430134202_copy_adoption_snapshot_namespace.rb')
class RequireSnapshotNamespace < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
disable_ddl_transaction!
def up
CopyAdoptionSnapshotNamespace.new.up
add_not_null_constraint(:analytics_devops_adoption_snapshots, :namespace_id)
end
def down
remove_not_null_constraint(:analytics_devops_adoption_snapshots, :namespace_id)
end
end

View file

@ -0,0 +1 @@
3bcc5ae97f3185ea33e568f42b90d1bfd31ac7c5126dab4580b64bd9b4603721

View file

@ -0,0 +1 @@
1944c983dd384029cef6e456108a1ccfdb9c991c65343d3b7f26aff51f244816

View file

@ -9130,7 +9130,7 @@ ALTER SEQUENCE analytics_devops_adoption_segments_id_seq OWNED BY analytics_devo
CREATE TABLE analytics_devops_adoption_snapshots (
id bigint NOT NULL,
segment_id bigint NOT NULL,
segment_id bigint,
recorded_at timestamp with time zone NOT NULL,
issue_opened boolean NOT NULL,
merge_request_opened boolean NOT NULL,
@ -9142,7 +9142,8 @@ CREATE TABLE analytics_devops_adoption_snapshots (
end_time timestamp with time zone NOT NULL,
total_projects_count integer,
code_owners_used_count integer,
namespace_id integer
namespace_id integer,
CONSTRAINT check_3f472de131 CHECK ((namespace_id IS NOT NULL))
);
CREATE SEQUENCE analytics_devops_adoption_snapshots_id_seq

View file

@ -60,7 +60,7 @@ the difference between Geo and Gitaly Cluster, see [Gitaly Cluster compared to G
## How it works
Your Geo instance can be used for cloning and fetching projects, in addition to reading any data. This will make working with large repositories over large distances much faster.
Your Geo instance can be used for cloning and fetching projects, in addition to reading any data. This makes working with large repositories over large distances much faster.
![Geo overview](replication/img/geo_overview.png)
@ -150,17 +150,17 @@ NOTE:
When using HTTP or HTTPS proxying, your load balancer must be configured to pass through the `Connection` and `Upgrade` hop-by-hop headers. See the [web terminal](../integration/terminal.md) integration guide for more details.
NOTE:
When using HTTPS protocol for port 443, you will need to add an SSL certificate to the load balancers.
When using HTTPS protocol for port 443, you need to add an SSL certificate to the load balancers.
If you wish to terminate SSL at the GitLab application server instead, use TCP protocol.
### LDAP
We recommend that if you use LDAP on your **primary** site, you also set up secondary LDAP servers on each **secondary** site. Otherwise, users will not be able to perform Git operations over HTTP(s) on the **secondary** site using HTTP Basic Authentication. However, Git via SSH and personal access tokens will still work.
We recommend that if you use LDAP on your **primary** site, you also set up secondary LDAP servers on each **secondary** site. Otherwise, users are unable to perform Git operations over HTTP(s) on the **secondary** site using HTTP Basic Authentication. However, Git via SSH and personal access tokens still works.
NOTE:
It is possible for all **secondary** sites to share an LDAP server, but additional latency can be an issue. Also, consider what LDAP server will be available in a [disaster recovery](disaster_recovery/index.md) scenario if a **secondary** site is promoted to be a **primary** site.
It is possible for all **secondary** sites to share an LDAP server, but additional latency can be an issue. Also, consider what LDAP server is available in a [disaster recovery](disaster_recovery/index.md) scenario if a **secondary** site is promoted to be a **primary** site.
Check for instructions on how to set up replication in your LDAP service. Instructions will be different depending on the software or service used. For example, OpenLDAP provides [these instructions](https://www.openldap.org/doc/admin24/replication.html).
Check for instructions on how to set up replication in your LDAP service. Instructions are different depending on the software or service used. For example, OpenLDAP provides [these instructions](https://www.openldap.org/doc/admin24/replication.html).
### Geo Tracking Database
@ -179,9 +179,9 @@ This daemon:
- Reads a log of events replicated by the **primary** site to the **secondary** database instance.
- Updates the Geo Tracking Database instance with changes that need to be executed.
When something is marked to be updated in the tracking database instance, asynchronous jobs running on the **secondary** site will execute the required operations and update the state.
When something is marked to be updated in the tracking database instance, asynchronous jobs running on the **secondary** site execute the required operations and update the state.
This new architecture allows GitLab to be resilient to connectivity issues between the sites. It doesn't matter how long the **secondary** site is disconnected from the **primary** site as it will be able to replay all the events in the correct order and become synchronized with the **primary** site again.
This new architecture allows GitLab to be resilient to connectivity issues between the sites. It doesn't matter how long the **secondary** site is disconnected from the **primary** site as it is able to replay all the events in the correct order and become synchronized with the **primary** site again.
## Limitations
@ -280,7 +280,7 @@ For an example of how to set up a location-aware Git remote URL with AWS Route53
### Backfill
Once a **secondary** site is set up, it will start replicating missing data from
Once a **secondary** site is set up, it starts replicating missing data from
the **primary** site in a process known as **backfill**. You can monitor the
synchronization process on each Geo site from the **primary** site's **Geo Nodes**
dashboard in your browser.

View file

@ -38,11 +38,11 @@ To start multiple processes:
process, and values in each item determine the queues it works on.
For example, the following setting creates three Sidekiq processes, one to run on
`elastic_indexer`, one to run on `mailers`, and one process running on all queues:
`elastic_commit_indexer`, one to run on `mailers`, and one process running on all queues:
```ruby
sidekiq['queue_groups'] = [
"elastic_indexer",
"elastic_commit_indexer",
"mailers",
"*"
]
@ -53,7 +53,7 @@ To start multiple processes:
```ruby
sidekiq['queue_groups'] = [
"elastic_indexer, elastic_commit_indexer",
"elastic_commit_indexer, elastic_association_indexer",
"mailers",
"*"
]

View file

@ -26,7 +26,7 @@ you want using steps 1 and 2 from the GitLab downloads page.
## Optional: Enable extra Sidekiq processes
sidekiq_cluster['enable'] = true
sidekiq['queue_groups'] = [
"elastic_indexer",
"elastic_commit_indexer",
"*"
]
```

View file

@ -8184,7 +8184,7 @@ Segment.
| ---- | ---- | ----------- |
| <a id="devopsadoptionsegmentid"></a>`id` | [`ID!`](#id) | ID of the segment. |
| <a id="devopsadoptionsegmentlatestsnapshot"></a>`latestSnapshot` | [`DevopsAdoptionSnapshot`](#devopsadoptionsnapshot) | The latest adoption metrics for the segment. |
| <a id="devopsadoptionsegmentnamespace"></a>`namespace` | [`Namespace`](#namespace) | Segment namespace. |
| <a id="devopsadoptionsegmentnamespace"></a>`namespace` | [`Namespace`](#namespace) | Namespace which should be calculated. |
### `DevopsAdoptionSnapshot`

View file

@ -622,7 +622,7 @@ Sidekiq processes](../administration/operations/extra_sidekiq_processes.md).
This enqueues a Sidekiq job for each project that needs to be indexed.
You can view the jobs in **Admin Area > Monitoring > Background Jobs > Queues Tab**
and click `elastic_indexer`, or you can query indexing status using a Rake task:
and click `elastic_commit_indexer`, or you can query indexing status using a Rake task:
```shell
# Omnibus installations

View file

@ -9,26 +9,29 @@ info: To determine the technical writer assigned to the Stage/Group associated w
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/6861) in [GitLab Premium](https://about.gitlab.com/pricing/) 11.6.
Custom project templates are useful for organizations that need to create many similar types of [projects](../project/index.md) and want to start from the same jumping-off point.
Custom project templates are useful for organizations that need to create many similar types of
[projects](../project/index.md).
Projects created from these templates serve as a common starting point.
## Setting up group-level project templates
To use a custom project template for a new project you need to:
To use a custom project template for a new project:
1. [Create a 'templates' subgroup](subgroups/index.md).
1. [Add repositories (projects) to the that new subgroup](index.md#add-projects-to-a-group), as your templates.
1. Edit your group's settings to look to your 'templates' subgroup for templates:
1. In the left-hand menu, click **{settings}** **Settings > General**.
1. [Create a `templates` subgroup](subgroups/index.md).
1. [Add repositories (projects) to that new subgroup](index.md#add-projects-to-a-group),
as your templates.
1. Edit your group's settings to look to your _templates_ subgroup for templates:
NOTE:
If you don't have access to the group's settings, you may not have sufficient privileges (for example, you may need developer or higher permissions).
1. Scroll to **Custom project templates** and click **Expand**. If no **Custom project templates** section displays, make sure you've created a subgroup, and added a project (repository) to it.
1. Select the 'templates' subgroup.
1. In the left menu, select **Settings > General**. If you don't have access to the
group's settings, you may not have sufficient privileges (for example, you may need developer
or higher permissions).
1. Scroll to **Custom project templates** and select **Expand**. If no **Custom project templates**
section displays, make sure you've created a subgroup and added a project (repository) to it.
1. Select the **templates** subgroup.
### Example structure
Here is a sample group/project structure for a hypothetical "Acme Co" for project templates:
Here's a sample group/project structure for project templates, for a hypothetical _Acme Co_:
```plaintext
# GitLab instance and group
@ -53,24 +56,22 @@ gitlab.com/acmeco/
### Adjust Settings
Users can configure a GitLab group that serves as template
source under a group's **Settings > General > Custom project templates**.
Users can configure a GitLab group that serves as template source under a group's
**Settings > General > Custom project templates**.
NOTE:
GitLab administrators can
[set project templates for an entire GitLab instance](../admin_area/custom_project_templates.md).
GitLab administrators can [set project templates for an entire GitLab instance](../admin_area/custom_project_templates.md).
Within this section, you can configure the group where all the custom project
templates are sourced. Every project _template_ directly under the group namespace is
available to every signed-in user, if all enabled [project features](../project/settings/index.md#sharing-and-permissions) except for GitLab Pages are set to **Everyone With Access**.
Within this section, you can configure the group where all the custom project templates are sourced.
If all enabled [project features](../project/settings/index.md#sharing-and-permissions)
(except for GitLab Pages) are set to **Everyone With Access**, then every project template directly
under the group namespace is available to every signed-in user. However, private projects are
available only if the user is a member of the project. Also note that only direct subgroups can be
set as the template source. Projects of nested subgroups of a selected template source cannot be
used.
However, private projects will be available only if the user is a member of the project.
NOTE:
Only direct subgroups can be set as the template source. Projects of nested subgroups of a selected template source cannot be used.
Repository and database information that are copied over to each new project are
identical to the data exported with the [GitLab Project Import/Export](../project/settings/import_export.md).
Repository and database information that are copied over to each new project are identical to the
data exported with the [GitLab Project Import/Export](../project/settings/import_export.md).
<!-- ## Troubleshooting

View file

@ -30,7 +30,8 @@ module Gitlab
reached_max_descendant_pipelines_depth: 'reached maximum depth of child pipelines',
project_deleted: 'pipeline project was deleted',
user_blocked: 'pipeline user was blocked',
ci_quota_exceeded: 'no more CI minutes available'
ci_quota_exceeded: 'no more CI minutes available',
no_matching_runner: 'no matching runner available'
}.freeze
private_constant :REASONS

View file

@ -13,6 +13,10 @@ module GoogleApi
LEAST_TOKEN_LIFE_TIME = 10.minutes
CLUSTER_MASTER_AUTH_USERNAME = 'admin'
CLUSTER_IPV4_CIDR_BLOCK = '/16'
# Don't upgrade to > 1.18 before we move away from Basic Auth
# See issue: https://gitlab.com/gitlab-org/gitlab/-/issues/331582
# Possible solution: https://gitlab.com/groups/gitlab-org/-/epics/6049
GKE_VERSION = '1.18'
CLUSTER_OAUTH_SCOPES = [
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
@ -90,6 +94,7 @@ module GoogleApi
cluster: {
name: cluster_name,
initial_node_count: cluster_size,
initial_cluster_version: GKE_VERSION,
node_config: {
machine_type: machine_type,
oauth_scopes: CLUSTER_OAUTH_SCOPES

View file

@ -23334,6 +23334,9 @@ msgstr ""
msgid "Otherwise, click the link below to complete the process:"
msgstr ""
msgid "Our team has been notified. Please try again."
msgstr ""
msgid "Out-of-compliance with this project's policies and should be removed"
msgstr ""
@ -36377,9 +36380,6 @@ msgstr ""
msgid "We don't have enough data to show this stage."
msgstr ""
msgid "We encountered an error and our team has been notified. Please try again."
msgstr ""
msgid "We have found the following errors:"
msgstr ""

View file

@ -25,6 +25,8 @@ RSpec.describe 'Merge request > User sees pipelines triggered by merge request',
}
end
let_it_be(:runner) { create(:ci_runner, :online) }
before do
stub_application_setting(auto_devops_enabled: false)
stub_ci_pipeline_yaml_file(YAML.dump(config))

View file

@ -91,6 +91,7 @@ RSpec.describe GoogleApi::CloudPlatform::Client do
cluster: {
name: cluster_name,
initial_node_count: cluster_size,
initial_cluster_version: '1.18',
node_config: {
machine_type: machine_type,
oauth_scopes: [

View file

@ -1,15 +1,15 @@
# frozen_string_literal: true
#
require 'spec_helper'
require Rails.root.join('db', 'post_migrate', '20210430134202_copy_adoption_snapshot_namespace.rb')
RSpec.describe CopyAdoptionSnapshotNamespace, :migration do
RSpec.describe CopyAdoptionSnapshotNamespace, :migration, schema: 20210430124630 do
let(:namespaces_table) { table(:namespaces) }
let(:segments_table) { table(:analytics_devops_adoption_segments) }
let(:snapshots_table) { table(:analytics_devops_adoption_snapshots) }
before do
it 'updates all snapshots without namespace set' do
namespaces_table.create!(id: 123, name: 'group1', path: 'group1')
namespaces_table.create!(id: 124, name: 'group2', path: 'group2')
@ -19,9 +19,7 @@ RSpec.describe CopyAdoptionSnapshotNamespace, :migration do
create_snapshot(id: 1, segment_id: 1)
create_snapshot(id: 2, segment_id: 2)
create_snapshot(id: 3, segment_id: 2, namespace_id: 123)
end
it 'updates all snapshots without namespace set' do
migrate!
expect(snapshots_table.find(1).namespace_id).to eq 123

View file

@ -4512,4 +4512,17 @@ RSpec.describe Ci::Pipeline, :mailer, factory_default: :keep do
.not_to exceed_query_limit(control_count)
end
end
describe '#build_matchers' do
let_it_be(:pipeline) { create(:ci_pipeline) }
let_it_be(:builds) { create_list(:ci_build, 2, pipeline: pipeline, project: pipeline.project) }
subject(:matchers) { pipeline.build_matchers }
it 'returns build matchers' do
expect(matchers.size).to eq(1)
expect(matchers).to all be_a(Gitlab::Ci::Matching::BuildMatcher)
expect(matchers.first.build_ids).to match_array(builds.map(&:id))
end
end
end

View file

@ -53,6 +53,8 @@ RSpec.describe Ci::CreatePipelineService, '#execute' do
end
context 'when sidekiq processes the job', :sidekiq_inline do
let_it_be(:runner) { create(:ci_runner, :online) }
it 'transitions to pending status and triggers a downstream pipeline' do
pipeline = create_pipeline!

View file

@ -202,6 +202,11 @@ RSpec.describe Ci::CreatePipelineService do
YAML
end
context 'when there are runners matching the builds' do
before do
create(:ci_runner, :online)
end
it 'creates a pipeline with build_a and test_b pending; deploy_b manual', :sidekiq_inline do
processables = pipeline.processables
@ -211,7 +216,7 @@ RSpec.describe Ci::CreatePipelineService do
deploy_a = processables.find { |processable| processable.name == 'deploy_a' }
deploy_b = processables.find { |processable| processable.name == 'deploy_b' }
expect(pipeline).to be_persisted
expect(pipeline).to be_created_successfully
expect(build_a.status).to eq('pending')
expect(test_a.status).to eq('created')
expect(test_b.status).to eq('pending')
@ -220,6 +225,17 @@ RSpec.describe Ci::CreatePipelineService do
end
end
context 'when there are no runners matching the builds' do
it 'creates a pipeline but all jobs failed', :sidekiq_inline do
processables = pipeline.processables
expect(pipeline).to be_created_successfully
expect(processables).to all be_failed
expect(processables.map(&:failure_reason)).to all eq('no_matching_runner')
end
end
end
context 'when needs is empty hash' do
let(:config) do
<<~YAML

View file

@ -7,6 +7,7 @@ RSpec.describe Ci::CreatePipelineService do
let_it_be(:project, reload: true) { create(:project, :repository) }
let_it_be(:user, reload: true) { project.owner }
let_it_be(:runner) { create(:ci_runner, :online, tag_list: %w[postgres mysql ruby]) }
let(:ref_name) { 'refs/heads/master' }

View file

@ -0,0 +1,80 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PipelineCreation::DropNotRunnableBuildsService do
let_it_be(:group) { create(:group) }
let_it_be(:project) { create(:project, group: group) }
let_it_be_with_reload(:pipeline) do
create(:ci_pipeline, project: project, status: :created)
end
let_it_be_with_reload(:job) do
create(:ci_build, project: project, pipeline: pipeline)
end
describe '#execute' do
subject(:execute) { described_class.new(pipeline).execute }
shared_examples 'jobs allowed to run' do
it 'does not drop the jobs' do
expect { execute }.not_to change { job.reload.status }
end
end
context 'when the feature flag is disabled' do
before do
stub_feature_flags(ci_drop_new_builds_when_ci_quota_exceeded: false)
end
it_behaves_like 'jobs allowed to run'
end
context 'when the pipeline status is running' do
before do
pipeline.update!(status: :running)
end
it_behaves_like 'jobs allowed to run'
end
context 'when there are no runners available' do
let_it_be(:offline_project_runner) do
create(:ci_runner, runner_type: :project_type, projects: [project])
end
it 'drops the job' do
execute
job.reload
expect(job).to be_failed
expect(job.failure_reason).to eq('no_matching_runner')
end
end
context 'with project runners' do
let_it_be(:project_runner) do
create(:ci_runner, :online, runner_type: :project_type, projects: [project])
end
it_behaves_like 'jobs allowed to run'
end
context 'with group runners' do
let_it_be(:group_runner) do
create(:ci_runner, :online, runner_type: :group_type, groups: [group])
end
it_behaves_like 'jobs allowed to run'
end
context 'with instance runners' do
let_it_be(:instance_runner) do
create(:ci_runner, :online, runner_type: :instance_type)
end
it_behaves_like 'jobs allowed to run'
end
end
end

View file

@ -0,0 +1,29 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PipelineCreation::StartPipelineService do
let(:pipeline) { build(:ci_pipeline) }
subject(:service) { described_class.new(pipeline) }
describe '#execute' do
it 'calls the pipeline runners matching validation service' do
expect(Ci::PipelineCreation::DropNotRunnableBuildsService)
.to receive(:new)
.with(pipeline)
.and_return(double('service', execute: true))
service.execute
end
it 'calls the pipeline process service' do
expect(Ci::ProcessPipelineService)
.to receive(:new)
.with(pipeline)
.and_return(double('service', execute: true))
service.execute
end
end
end

View file

@ -859,6 +859,8 @@ RSpec.shared_examples 'Pipeline Processing Service' do
end
context 'when a bridge job has parallel:matrix config', :sidekiq_inline do
let_it_be(:runner) { create(:ci_runner, :online) }
let(:parent_config) do
<<-EOY
test:

View file

@ -3,6 +3,7 @@
RSpec.shared_context 'Pipeline Processing Service Tests With Yaml' do
let_it_be(:project) { create(:project, :repository) }
let_it_be(:user) { project.owner }
let_it_be(:runner) { create(:ci_runner, :online) }
where(:test_file_path) do
Dir.glob(Rails.root.join('spec/services/ci/pipeline_processing/test_cases/*.yml'))

View file

@ -4,11 +4,18 @@ require 'spec_helper'
RSpec.describe Ci::InitialPipelineProcessWorker do
describe '#perform' do
let_it_be(:pipeline) { create(:ci_pipeline, :with_job, status: :created) }
let_it_be_with_reload(:pipeline) do
create(:ci_pipeline, :with_job, status: :created)
end
include_examples 'an idempotent worker' do
let(:job_args) { pipeline.id }
context 'when there are runners available' do
before do
create(:ci_runner, :online)
end
it 'marks the pipeline as pending' do
expect(pipeline).to be_created
@ -17,5 +24,14 @@ RSpec.describe Ci::InitialPipelineProcessWorker do
expect(pipeline.reload).to be_pending
end
end
it 'marks the pipeline as failed' do
expect(pipeline).to be_created
subject
expect(pipeline.reload).to be_failed
end
end
end
end

View file

@ -212,7 +212,6 @@ RSpec.describe 'Every Sidekiq worker' do
'ElasticCommitIndexerWorker' => 2,
'ElasticDeleteProjectWorker' => 2,
'ElasticFullIndexWorker' => 2,
'ElasticIndexerWorker' => 2,
'ElasticIndexingControlWorker' => 3,
'ElasticNamespaceIndexerWorker' => 2,
'ElasticNamespaceRolloutWorker' => 2,