Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-05-26 06:08:37 +00:00
parent 47ef8c6c53
commit 864dae0d98
37 changed files with 289 additions and 523 deletions

View File

@ -11,10 +11,8 @@ tasks:
( (
set -e set -e
cd /workspace/gitlab-development-kit cd /workspace/gitlab-development-kit
# GitLab FOSS # Ensure GitLab directory is symlinked under the GDK
[[ -d /workspace/gitlab-foss ]] && ln -fs /workspace/gitlab-foss /workspace/gitlab-development-kit/gitlab ln -nfs "$GITPOD_REPO_ROOT" /workspace/gitlab-development-kit/gitlab
# GitLab
[[ -d /workspace/gitlab ]] && ln -fs /workspace/gitlab /workspace/gitlab-development-kit/gitlab
mv /workspace/gitlab-development-kit/secrets.yml /workspace/gitlab-development-kit/gitlab/config mv /workspace/gitlab-development-kit/secrets.yml /workspace/gitlab-development-kit/gitlab/config
# ensure gdk.yml has correct instance settings # ensure gdk.yml has correct instance settings
gdk config set gitlab.rails.port 443 gdk config set gitlab.rails.port 443

View File

@ -121,7 +121,7 @@ export default {
@error="error = $event" @error="error = $event"
@updated="$emit('workItemUpdated')" @updated="$emit('workItemUpdated')"
/> />
<work-item-links :work-item-id="workItem.id" /> <work-item-links v-if="glFeatures.workItemsHierarchy" :work-item-id="workItem.id" />
</template> </template>
</section> </section>
</template> </template>

View File

@ -30,6 +30,11 @@ export default {
toggleIcon() { toggleIcon() {
return this.isOpen ? 'angle-up' : 'angle-down'; return this.isOpen ? 'angle-up' : 'angle-down';
}, },
toggleLabel() {
return this.isOpen
? s__('WorkItem|Collapse child items')
: s__('WorkItem|Expand child items');
},
}, },
methods: { methods: {
toggle() { toggle() {
@ -60,6 +65,7 @@ export default {
<gl-button <gl-button
category="tertiary" category="tertiary"
:icon="toggleIcon" :icon="toggleIcon"
:aria-label="toggleLabel"
data-testid="toggle-links" data-testid="toggle-links"
@click="toggle" @click="toggle"
/> />

View File

@ -4,6 +4,7 @@ class Projects::WorkItemsController < Projects::ApplicationController
before_action do before_action do
push_force_frontend_feature_flag(:work_items, project&.work_items_feature_flag_enabled?) push_force_frontend_feature_flag(:work_items, project&.work_items_feature_flag_enabled?)
push_frontend_feature_flag(:work_item_assignees) push_frontend_feature_flag(:work_item_assignees)
push_frontend_feature_flag(:work_items_hierarchy, project)
end end
feature_category :team_planning feature_category :team_planning

View File

@ -215,14 +215,10 @@ module Ci
end end
def downstream_variables def downstream_variables
if ::Feature.enabled?(:ci_trigger_forward_variables, project) calculate_downstream_variables
calculate_downstream_variables .reverse # variables priority
.reverse # variables priority .uniq { |var| var[:key] } # only one variable key to pass
.uniq { |var| var[:key] } # only one variable key to pass .reverse
.reverse
else
legacy_downstream_variables
end
end end
def target_revision_ref def target_revision_ref
@ -268,16 +264,6 @@ module Ci
} }
end end
def legacy_downstream_variables
variables = scoped_variables.concat(pipeline.persisted_variables)
variables.to_runner_variables.yield_self do |all_variables|
yaml_variables.to_a.map do |hash|
{ key: hash[:key], value: ::ExpandVariables.expand(hash[:value], all_variables) }
end
end
end
def calculate_downstream_variables def calculate_downstream_variables
expand_variables = scoped_variables expand_variables = scoped_variables
.concat(pipeline.persisted_variables) .concat(pipeline.persisted_variables)

View File

@ -2416,24 +2416,6 @@
:weight: 1 :weight: 1
:idempotent: true :idempotent: true
:tags: [] :tags: []
- :name: issue_placement
:worker_name: IssuePlacementWorker
:feature_category: :team_planning
:has_external_dependencies:
:urgency: :high
:resource_boundary: :cpu
:weight: 2
:idempotent: true
:tags: []
- :name: issue_rebalancing
:worker_name: IssueRebalancingWorker
:feature_category: :team_planning
:has_external_dependencies:
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent: true
:tags: []
- :name: issues_placement - :name: issues_placement
:worker_name: Issues::PlacementWorker :worker_name: Issues::PlacementWorker
:feature_category: :team_planning :feature_category: :team_planning

View File

@ -1,71 +0,0 @@
# frozen_string_literal: true
# DEPRECATED. Will be removed in 14.7 https://gitlab.com/gitlab-org/gitlab/-/merge_requests/72803
# Please use Issues::PlacementWorker instead
#
# todo: remove this worker and it's queue definition from all_queues after Issues::PlacementWorker is deployed
# We want to keep it for one release in case some jobs are already scheduled in the old queue so we need the worker
# to be available to finish those. All new jobs will be queued into the new queue.
class IssuePlacementWorker
include ApplicationWorker
data_consistency :always
sidekiq_options retry: 3
idempotent!
deduplicate :until_executed, including_scheduled: true
feature_category :team_planning
urgency :high
worker_resource_boundary :cpu
weight 2
# Move at most the most recent 100 issues
QUERY_LIMIT = 100
# rubocop: disable CodeReuse/ActiveRecord
def perform(issue_id, project_id = nil)
issue = find_issue(issue_id, project_id)
return unless issue
# Temporary disable moving null elements because of performance problems
# For more information check https://gitlab.com/gitlab-com/gl-infra/production/-/issues/4321
return if issue.blocked_for_repositioning?
# Move the oldest 100 unpositioned items to the end.
# This is to deal with out-of-order execution of the worker,
# while preserving creation order.
to_place = Issue
.relative_positioning_query_base(issue)
.with_null_relative_position
.order({ created_at: :asc }, { id: :asc })
.limit(QUERY_LIMIT + 1)
.to_a
leftover = to_place.pop if to_place.count > QUERY_LIMIT
Issue.move_nulls_to_end(to_place)
Issues::BaseService.new(project: nil).rebalance_if_needed(to_place.max_by(&:relative_position))
Issues::PlacementWorker.perform_async(nil, leftover.project_id) if leftover.present?
rescue RelativePositioning::NoSpaceLeft => e
Gitlab::ErrorTracking.log_exception(e, issue_id: issue_id, project_id: project_id)
Issues::RebalancingWorker.perform_async(nil, *root_namespace_id_to_rebalance(issue, project_id))
end
def find_issue(issue_id, project_id)
return Issue.id_in(issue_id).take if issue_id
project = Project.id_in(project_id).take
return unless project
project.issues.take
end
# rubocop: enable CodeReuse/ActiveRecord
private
def root_namespace_id_to_rebalance(issue, project_id)
project_id = project_id.presence || issue.project_id
Project.find(project_id)&.self_or_root_group_ids
end
end

View File

@ -1,56 +0,0 @@
# frozen_string_literal: true
# DEPRECATED. Will be removed in 14.7 https://gitlab.com/gitlab-org/gitlab/-/merge_requests/72803
# Please use Issues::RebalancingWorker instead
#
# todo: remove this worker and it's queue definition from all_queues after Issue::RebalancingWorker is released.
# We want to keep it for one release in case some jobs are already scheduled in the old queue so we need the worker
# to be available to finish those. All new jobs will be queued into the new queue.
class IssueRebalancingWorker
include ApplicationWorker
data_consistency :always
sidekiq_options retry: 3
idempotent!
urgency :low
feature_category :team_planning
deduplicate :until_executed, including_scheduled: true
def perform(ignore = nil, project_id = nil, root_namespace_id = nil)
# we need to have exactly one of the project_id and root_namespace_id params be non-nil
raise ArgumentError, "Expected only one of the params project_id: #{project_id} and root_namespace_id: #{root_namespace_id}" if project_id && root_namespace_id
return if project_id.nil? && root_namespace_id.nil?
return if ::Gitlab::Issues::Rebalancing::State.rebalance_recently_finished?(project_id, root_namespace_id)
# pull the projects collection to be rebalanced either the project if namespace is not a group(i.e. user namesapce)
# or the root namespace, this also makes the worker backward compatible with previous version where a project_id was
# passed as the param
projects_to_rebalance = projects_collection(project_id, root_namespace_id)
# something might have happened with the namespace between scheduling the worker and actually running it,
# maybe it was removed.
if projects_to_rebalance.blank?
Gitlab::ErrorTracking.log_exception(
ArgumentError.new("Projects to be rebalanced not found for arguments: project_id #{project_id}, root_namespace_id: #{root_namespace_id}"),
{ project_id: project_id, root_namespace_id: root_namespace_id })
return
end
Issues::RelativePositionRebalancingService.new(projects_to_rebalance).execute
rescue Issues::RelativePositionRebalancingService::TooManyConcurrentRebalances => e
Gitlab::ErrorTracking.log_exception(e, root_namespace_id: root_namespace_id, project_id: project_id)
end
private
def projects_collection(project_id, root_namespace_id)
# we can have either project_id(older version) or project_id if project is part of a user namespace and not a group
# or root_namespace_id(newer version) never both.
return Project.id_in([project_id]) if project_id
Namespace.find_by_id(root_namespace_id)&.all_projects
end
end

View File

@ -1,8 +1,8 @@
--- ---
name: ci_trigger_forward_variables name: work_items_hierarchy
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/82676 introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/88504
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/355572 rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/363447
milestone: '14.9' milestone: '15.1'
type: development type: development
group: group::pipeline authoring group: group::product planning
default_enabled: true default_enabled: false

View File

@ -233,10 +233,6 @@
- 1 - 1
- - issuables_clear_groups_issue_counter - - issuables_clear_groups_issue_counter
- 1 - 1
- - issue_placement
- 2
- - issue_rebalancing
- 1
- - issues_placement - - issues_placement
- 2 - 2
- - issues_rebalancing - - issues_rebalancing

View File

@ -144,6 +144,7 @@ The following metrics are available from the `/metrics` endpoint:
- `gitaly_praefect_node_latency_bucket`, a histogram measuring the latency in Gitaly returning - `gitaly_praefect_node_latency_bucket`, a histogram measuring the latency in Gitaly returning
health check information to Praefect. This indicates Praefect connection saturation. Available in health check information to Praefect. This indicates Praefect connection saturation. Available in
GitLab 12.10 and later. GitLab 12.10 and later.
- `gitaly_praefect_connections_total`, the total number of connections to Praefect. [Introduced](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/4220) in GitLab 14.7.
To monitor [strong consistency](index.md#strong-consistency), you can use the following Prometheus metrics: To monitor [strong consistency](index.md#strong-consistency), you can use the following Prometheus metrics:

View File

@ -3706,12 +3706,9 @@ successfully complete before starting.
#### `trigger:forward` #### `trigger:forward`
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/213729) in GitLab 14.9 [with a flag](../../administration/feature_flags.md) named `ci_trigger_forward_variables`. Disabled by default. > - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/213729) in GitLab 14.9 [with a flag](../../administration/feature_flags.md) named `ci_trigger_forward_variables`. Disabled by default.
> - [Enabled on GitLab.com and self-managed](https://gitlab.com/gitlab-org/gitlab/-/issues/355572) in GitLab 14.10.
FLAG: > - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/355572) in GitLab 15.1. [Feature flag ci_trigger_forward_variables](https://gitlab.com/gitlab-org/gitlab/-/issues/355572) removed.
On self-managed GitLab, by default this feature is not available. To make it available,
ask an administrator to [enable the feature flag](../../administration/feature_flags.md) named `ci_trigger_forward_variables`.
The feature is not ready for production use.
Use `trigger:forward` to specify what to forward to the downstream pipeline. You can control Use `trigger:forward` to specify what to forward to the downstream pipeline. You can control
what is forwarded to both [parent-child pipelines](../pipelines/parent_child_pipelines.md) what is forwarded to both [parent-child pipelines](../pipelines/parent_child_pipelines.md)

View File

@ -215,6 +215,39 @@ In this case, the `total time` and `top-level time` numbers match more closely:
8 8 0.0477s 0.0477s 0.0477s namespace 8 8 0.0477s 0.0477s 0.0477s namespace
``` ```
#### Stubbing methods within factories
You should avoid using `allow(object).to receive(:method)` in factories, as this makes the factory unable to be used with `let_it_be`.
Instead, you can use `stub_method` to stub the method:
```ruby
before(:create) do |user, evaluator|
# Stub a method.
stub_method(user, :some_method) { 'stubbed!' }
# Or with arguments, including named ones
stub_method(user, :some_method) { |var1| "Returning #{var1}!" }
stub_method(user, :some_method) { |var1: 'default'| "Returning #{var1}!" }
end
# Un-stub the method.
# This may be useful where the stubbed object is created with `let_it_be`
# and you want to reset the method between tests.
after(:create) do |user, evaluator|
restore_original_method(user, :some_method)
# or
restore_original_methods(user)
end
```
NOTE:
`stub_method` does not work when used in conjunction with `let_it_be_with_refind`. This is because `stub_method` will stub a method on an instance and `let_it_be_with_refind` will create a new instance of the object for each run.
`stub_method` does not support method existence and method arity checks.
WARNING:
`stub_method` is supposed to be used in factories only. It's strongly discouraged to be used elsewhere. Please consider using [RSpec's mocks](https://relishapp.com/rspec/rspec-mocks/v/3-10/docs/basics) if available.
#### Identify slow tests #### Identify slow tests
Running a spec with profiling is a good way to start optimizing a spec. This can Running a spec with profiling is a good way to start optimizing a spec. This can

View File

@ -182,10 +182,12 @@ Follow the Google Workspace documentation on
[setting up SSO with Google as your identity provider](https://support.google.com/a/answer/6087519?hl=en) [setting up SSO with Google as your identity provider](https://support.google.com/a/answer/6087519?hl=en)
with the notes below for consideration. with the notes below for consideration.
| GitLab setting | Google Workspace field | | GitLab setting | Google Workspace field |
|:-------------------------------|:-----------------------| |:-------------------------------------|:-----------------------|
| Identifier | Entity ID | | Identifier | Entity ID |
| Assertion consumer service URL | ACS URL | | Assertion consumer service URL | ACS URL |
| GitLab single sign-on URL | Start URL |
| Identity provider single sign-on URL | SSO URL |
You must download the certificate to get the SHA1 certificate fingerprint. You must download the certificate to get the SHA1 certificate fingerprint.
@ -200,8 +202,7 @@ For NameID, the following settings are recommended:
- **Name ID format** is set to `EMAIL`. - **Name ID format** is set to `EMAIL`.
- **NameID** set to `Basic Information > Primary email`. - **NameID** set to `Basic Information > Primary email`.
When selecting **Verify SAML Configuration** on the GitLab SAML SSO page, disregard the warning about the NameID format When selecting **Verify SAML Configuration** on the GitLab SAML SSO page, disregard the warning recommending setting the NameID format to "persistent".
"persistent" being recommended.
See the [troubleshooting page](../../../administration/troubleshooting/group_saml_scim.md#google-workspace) for an example configuration. See the [troubleshooting page](../../../administration/troubleshooting/group_saml_scim.md#google-workspace) for an example configuration.

View File

@ -42939,6 +42939,9 @@ msgstr ""
msgid "WorkItem|Child items" msgid "WorkItem|Child items"
msgstr "" msgstr ""
msgid "WorkItem|Collapse child items"
msgstr ""
msgid "WorkItem|Convert to work item" msgid "WorkItem|Convert to work item"
msgstr "" msgstr ""
@ -42948,6 +42951,9 @@ msgstr ""
msgid "WorkItem|Delete work item" msgid "WorkItem|Delete work item"
msgstr "" msgstr ""
msgid "WorkItem|Expand child items"
msgstr ""
msgid "WorkItem|New Task" msgid "WorkItem|New Task"
msgstr "" msgstr ""

View File

@ -1,13 +1,7 @@
# frozen_string_literal: true # frozen_string_literal: true
module QA module QA
# TODO: RSpec.describe 'Verify', :runner do
# Remove FF :ci_trigger_forward_variables
# when https://gitlab.com/gitlab-org/gitlab/-/issues/355572 is closed
RSpec.describe 'Verify', :runner, feature_flag: {
name: 'ci_trigger_forward_variables',
scope: :global
} do
describe 'Pipeline API defined variable inheritance' do describe 'Pipeline API defined variable inheritance' do
include_context 'variable inheritance test prep' include_context 'variable inheritance test prep'

View File

@ -36,7 +36,7 @@ module QA
mr_page.rebase! mr_page.rebase!
expect(mr_page).to have_merge_button expect { mr_page.has_merge_button? }.to eventually_be_truthy.within(max_duration: 60, reload_page: mr_page)
mr_page.merge! mr_page.merge!

View File

@ -1,13 +1,7 @@
# frozen_string_literal: true # frozen_string_literal: true
module QA module QA
# TODO: RSpec.describe 'Verify', :runner do
# Remove FF :ci_trigger_forward_variables
# when https://gitlab.com/gitlab-org/gitlab/-/issues/355572 is closed
RSpec.describe 'Verify', :runner, feature_flag: {
name: 'ci_trigger_forward_variables',
scope: :global
} do
describe 'UI defined variable' do describe 'UI defined variable' do
include_context 'variable inheritance test prep' include_context 'variable inheritance test prep'

View File

@ -1,13 +1,7 @@
# frozen_string_literal: true # frozen_string_literal: true
module QA module QA
# TODO: RSpec.describe 'Verify', :runner do
# Remove FF :ci_trigger_forward_variables
# when https://gitlab.com/gitlab-org/gitlab/-/issues/355572 is closed
RSpec.describe 'Verify', :runner, feature_flag: {
name: 'ci_trigger_forward_variables',
scope: :global
} do
describe 'UI defined variable' do describe 'UI defined variable' do
include_context 'variable inheritance test prep' include_context 'variable inheritance test prep'

View File

@ -45,13 +45,11 @@ module QA
end end
before do before do
Runtime::Feature.enable(:ci_trigger_forward_variables)
Flow::Login.sign_in Flow::Login.sign_in
end end
after do after do
runner.remove_via_api! runner.remove_via_api!
Runtime::Feature.disable(:ci_trigger_forward_variables)
end end
def start_pipeline_with_variable def start_pipeline_with_variable

View File

@ -494,13 +494,15 @@ FactoryBot.define do
trait :with_commit do trait :with_commit do
after(:build) do |build| after(:build) do |build|
allow(build).to receive(:commit).and_return build(:commit, :without_author) commit = build(:commit, :without_author)
stub_method(build, :commit) { commit }
end end
end end
trait :with_commit_and_author do trait :with_commit_and_author do
after(:build) do |build| after(:build) do |build|
allow(build).to receive(:commit).and_return build(:commit) commit = build(:commit)
stub_method(build, :commit) { commit }
end end
end end

View File

@ -10,19 +10,18 @@ FactoryBot.define do
before(:create) do |_record, evaluator| before(:create) do |_record, evaluator|
if evaluator.helm_installed if evaluator.helm_installed
allow(Gitlab::Kubernetes::Helm::V2::Certificate).to receive(:generate_root) stub_method(Gitlab::Kubernetes::Helm::V2::Certificate, :generate_root) do
.and_return( OpenStruct.new( # rubocop: disable Style/OpenStructUse
double( key_string: File.read(Rails.root.join('spec/fixtures/clusters/sample_key.key')),
key_string: File.read(Rails.root.join('spec/fixtures/clusters/sample_key.key')), cert_string: File.read(Rails.root.join('spec/fixtures/clusters/sample_cert.pem'))
cert_string: File.read(Rails.root.join('spec/fixtures/clusters/sample_cert.pem'))
)
) )
end
end end
end end
after(:create) do |_record, evaluator| after(:create) do |_record, evaluator|
if evaluator.helm_installed if evaluator.helm_installed
allow(Gitlab::Kubernetes::Helm::V2::Certificate).to receive(:generate_root).and_call_original restore_original_methods(Gitlab::Kubernetes::Helm::V2::Certificate)
end end
end end

View File

@ -28,19 +28,20 @@ FactoryBot.define do
end end
after(:build) do |commit, evaluator| after(:build) do |commit, evaluator|
allow(commit).to receive(:author).and_return(evaluator.author || build_stubbed(:author)) author = evaluator.author || build_stubbed(:author)
allow(commit).to receive(:parent_ids).and_return([]) stub_method(commit, :author) { author }
stub_method(commit, :parent_ids) { [] }
end end
trait :merge_commit do trait :merge_commit do
after(:build) do |commit| after(:build) do |commit|
allow(commit).to receive(:parent_ids).and_return(Array.new(2) { SecureRandom.hex(20) }) stub_method(commit, :parent_ids) { Array.new(2) { SecureRandom.hex(20) } }
end end
end end
trait :without_author do trait :without_author do
after(:build) do |commit| after(:build) do |commit|
allow(commit).to receive(:author).and_return nil stub_method(commit, :author) { nil }
end end
end end
end end

View File

@ -85,13 +85,12 @@ FactoryBot.define do
tags = evaluator.tags tags = evaluator.tags
# convert Array into Hash # convert Array into Hash
tags = tags.product(['sha256:4c8e63ca4cb663ce6c688cb06f1c372b088dac5b6d7ad7d49cd620d85cf72a15']).to_h unless tags.is_a?(Hash) tags = tags.product(['sha256:4c8e63ca4cb663ce6c688cb06f1c372b088dac5b6d7ad7d49cd620d85cf72a15']).to_h unless tags.is_a?(Hash)
stub_method(repository.client, :repository_tags) do |*args|
allow(repository.client) {
.to receive(:repository_tags)
.and_return({
'name' => repository.path, 'name' => repository.path,
'tags' => tags.keys 'tags' => tags.keys
}) }
end
tags.each_pair do |tag, digest| tags.each_pair do |tag, digest|
allow(repository.client) allow(repository.client)

View File

@ -15,7 +15,7 @@ FactoryBot.define do
deployment.user ||= deployment.project.creator deployment.user ||= deployment.project.creator
unless deployment.project.repository_exists? unless deployment.project.repository_exists?
allow(deployment.project.repository).to receive(:create_ref) stub_method(deployment.project.repository, :create_ref) { nil }
end end
if deployment.cluster && deployment.cluster.project_type? && deployment.cluster.project.nil? if deployment.cluster && deployment.cluster.project_type? && deployment.cluster.project.nil?

View File

@ -320,7 +320,7 @@ FactoryBot.define do
# Fake `fetch_ref!` if we don't have repository # Fake `fetch_ref!` if we don't have repository
# We have too many existing tests relying on this behaviour # We have too many existing tests relying on this behaviour
unless [target_project, source_project].all?(&:repository_exists?) unless [target_project, source_project].all?(&:repository_exists?)
allow(merge_request).to receive(:fetch_ref!) stub_method(merge_request, :fetch_ref!) { nil }
end end
end end

View File

@ -301,8 +301,8 @@ FactoryBot.define do
trait :stubbed_repository do trait :stubbed_repository do
after(:build) do |project| after(:build) do |project|
allow(project).to receive(:empty_repo?).and_return(false) stub_method(project, :empty_repo?) { false }
allow(project.repository).to receive(:empty?).and_return(false) stub_method(project.repository, :empty?) { false }
end end
end end

View File

@ -8,8 +8,7 @@ RSpec.describe ::Routing::PseudonymizationHelper do
let_it_be(:project) { create(:project, group: group) } let_it_be(:project) { create(:project, group: group) }
let_it_be(:subproject) { create(:project, group: subgroup) } let_it_be(:subproject) { create(:project, group: subgroup) }
let_it_be(:issue) { create(:issue, project: project) } let_it_be(:issue) { create(:issue, project: project) }
let_it_be(:merge_request) { create(:merge_request, source_project: project) }
let(:merge_request) { create(:merge_request, source_project: project) }
let(:subject) { helper.masked_page_url(group: group, project: project) } let(:subject) { helper.masked_page_url(group: group, project: project) }

View File

@ -3,9 +3,10 @@
require 'spec_helper' require 'spec_helper'
RSpec.describe Gitlab::Ci::Status::Build::Play do RSpec.describe Gitlab::Ci::Status::Build::Play do
let(:user) { create(:user) } let_it_be(:user) { create(:user) }
let(:project) { create(:project, :stubbed_repository) } let_it_be(:project) { create(:project, :stubbed_repository) }
let(:build) { create(:ci_build, :manual, project: project) } let_it_be_with_refind(:build) { create(:ci_build, :manual, project: project) }
let(:status) { Gitlab::Ci::Status::Core.new(build, user) } let(:status) { Gitlab::Ci::Status::Core.new(build, user) }
subject { described_class.new(status) } subject { described_class.new(status) }

View File

@ -3,8 +3,9 @@
require 'spec_helper' require 'spec_helper'
RSpec.describe Gitlab::Ci::Status::Build::Scheduled do RSpec.describe Gitlab::Ci::Status::Build::Scheduled do
let(:user) { create(:user) } let_it_be(:user) { create(:user) }
let(:project) { create(:project, :stubbed_repository) } let_it_be(:project) { create(:project, :stubbed_repository) }
let(:build) { create(:ci_build, :scheduled, project: project) } let(:build) { create(:ci_build, :scheduled, project: project) }
let(:status) { Gitlab::Ci::Status::Core.new(build, user) } let(:status) { Gitlab::Ci::Status::Core.new(build, user) }

View File

@ -259,25 +259,16 @@ RSpec.describe Ci::Bridge do
context 'forward variables' do context 'forward variables' do
using RSpec::Parameterized::TableSyntax using RSpec::Parameterized::TableSyntax
where(:yaml_variables, :pipeline_variables, :ff, :variables) do where(:yaml_variables, :pipeline_variables, :variables) do
nil | nil | true | %w[BRIDGE] nil | nil | %w[BRIDGE]
nil | false | true | %w[BRIDGE] nil | false | %w[BRIDGE]
nil | true | true | %w[BRIDGE PVAR1] nil | true | %w[BRIDGE PVAR1]
false | nil | true | %w[] false | nil | %w[]
false | false | true | %w[] false | false | %w[]
false | true | true | %w[PVAR1] false | true | %w[PVAR1]
true | nil | true | %w[BRIDGE] true | nil | %w[BRIDGE]
true | false | true | %w[BRIDGE] true | false | %w[BRIDGE]
true | true | true | %w[BRIDGE PVAR1] true | true | %w[BRIDGE PVAR1]
nil | nil | false | %w[BRIDGE]
nil | false | false | %w[BRIDGE]
nil | true | false | %w[BRIDGE]
false | nil | false | %w[BRIDGE]
false | false | false | %w[BRIDGE]
false | true | false | %w[BRIDGE]
true | nil | false | %w[BRIDGE]
true | false | false | %w[BRIDGE]
true | true | false | %w[BRIDGE]
end end
with_them do with_them do
@ -292,10 +283,6 @@ RSpec.describe Ci::Bridge do
} }
end end
before do
stub_feature_flags(ci_trigger_forward_variables: ff)
end
it 'returns variables according to the forward value' do it 'returns variables according to the forward value' do
expect(bridge.downstream_variables.map { |v| v[:key] }).to contain_exactly(*variables) expect(bridge.downstream_variables.map { |v| v[:key] }).to contain_exactly(*variables)
end end

View File

@ -2,6 +2,7 @@
FactoryBot::SyntaxRunner.class_eval do FactoryBot::SyntaxRunner.class_eval do
include RSpec::Mocks::ExampleMethods include RSpec::Mocks::ExampleMethods
include StubMethodCalls
# FactoryBot doesn't allow yet to add a helper that can be used in factories # FactoryBot doesn't allow yet to add a helper that can be used in factories
# While the fixture_file_upload helper is reasonable to be used there: # While the fixture_file_upload helper is reasonable to be used there:

View File

@ -0,0 +1,66 @@
# frozen_string_literal: true
# Used to stud methods for factories where we can't
# use rspec-mocks.
#
# Examples:
# stub_method(user, :some_method) { |var1, var2| var1 + var2 }
# stub_method(user, :some_method) { true }
# stub_method(user, :some_method) => nil
# stub_method(user, :some_method) do |*args|
# true
# end
#
# restore_original_method(user, :some_method)
# restore_original_methods(user)
#
module StubMethodCalls
AlreadyImplementedError = Class.new(StandardError)
def stub_method(object, method, &block)
Backup.stub_method(object, method, &block)
end
def restore_original_method(object, method)
Backup.restore_method(object, method)
end
def restore_original_methods(object)
Backup.stubbed_methods(object).each_key { |method, backed_up_method| restore_original_method(object, method) }
end
module Backup
def self.stubbed_methods(object)
return {} unless object.respond_to?(:_stubbed_methods)
object._stubbed_methods
end
def self.backup_method(object, method)
backed_up_methods = stubbed_methods(object)
backed_up_methods[method] = object.respond_to?(method) ? object.method(method) : nil
object.define_singleton_method(:_stubbed_methods) { backed_up_methods }
end
def self.stub_method(object, method, &block)
raise ArgumentError, "Block is required" unless block_given?
backup_method(object, method) unless backed_up_method?(object, method)
object.define_singleton_method(method, &block)
end
def self.restore_method(object, method)
raise NotImplementedError, "#{method} has not been stubbed on #{object}" unless backed_up_method?(object, method)
object.singleton_class.remove_method(method)
backed_up_method = stubbed_methods(object)[method]
object.define_singleton_method(method, backed_up_method) if backed_up_method
end
def self.backed_up_method?(object, method)
stubbed_methods(object).key?(method)
end
end
end

View File

@ -0,0 +1,107 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe StubMethodCalls do
include described_class
let(:object) do
Class.new do
def self.test_method
'test'
end
def self.test_method_two(response: nil)
response || 'test_two'
end
end
end
describe '#stub_method' do
let(:method_to_stub) { :test_method }
it 'stubs the method response' do
stub_method(object, method_to_stub) { true }
expect(object.send(method_to_stub)).to eq(true)
end
context 'when calling it on an already stubbed method' do
before do
stub_method(object, method_to_stub) { false }
end
it 'stubs correctly' do
stub_method(object, method_to_stub) { true }
expect(object.send(method_to_stub)).to eq(true)
end
end
context 'methods that accept arguments' do
it 'stubs correctly' do
stub_method(object, method_to_stub) { |a, b| a + b }
expect(object.send(method_to_stub, 1, 2)).to eq(3)
end
context 'methods that use named arguments' do
let(:method_to_stub) { :test_method_two }
it 'stubs correctly' do
stub_method(object, method_to_stub) { |a: 'test'| a }
expect(object.send(method_to_stub, a: 'testing')).to eq('testing')
expect(object.send(method_to_stub)).to eq('test')
end
context 'stubbing non-existent method' do
let(:method_to_stub) { :another_method }
it 'stubs correctly' do
stub_method(object, method_to_stub) { |a: 'test'| a }
expect(object.send(method_to_stub, a: 'testing')).to eq('testing')
expect(object.send(method_to_stub)).to eq('test')
end
end
end
end
end
describe '#restore_original_method' do
before do
stub_method(object, :test_method) { true }
end
it 'restores original behaviour' do
expect(object.test_method).to eq(true)
restore_original_method(object, :test_method)
expect(object.test_method).to eq('test')
end
context 'method is not stubbed' do
specify do
expect do
restore_original_method(object, 'some_other_method')
end.to raise_error(NotImplementedError, "some_other_method has not been stubbed on #{object}")
end
end
end
describe '#restore_original_methods' do
before do
stub_method(object, :test_method) { true }
stub_method(object, :test_method_two) { true }
end
it 'restores original behaviour' do
restore_original_methods(object)
expect(object.test_method).to eq('test')
expect(object.test_method_two).to eq('test_two')
end
end
end

View File

@ -308,8 +308,6 @@ RSpec.describe 'Every Sidekiq worker' do
'InvalidGpgSignatureUpdateWorker' => 3, 'InvalidGpgSignatureUpdateWorker' => 3,
'IrkerWorker' => 3, 'IrkerWorker' => 3,
'IssuableExportCsvWorker' => 3, 'IssuableExportCsvWorker' => 3,
'IssuePlacementWorker' => 3,
'IssueRebalancingWorker' => 3,
'Issues::PlacementWorker' => 3, 'Issues::PlacementWorker' => 3,
'Issues::RebalancingWorker' => 3, 'Issues::RebalancingWorker' => 3,
'IterationsUpdateStatusWorker' => 3, 'IterationsUpdateStatusWorker' => 3,

View File

@ -1,151 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe IssuePlacementWorker do
describe '#perform' do
let_it_be(:time) { Time.now.utc }
let_it_be(:group) { create(:group) }
let_it_be(:project) { create(:project, group: group) }
let_it_be(:author) { create(:user) }
let_it_be(:common_attrs) { { author: author, project: project } }
let_it_be(:unplaced) { common_attrs.merge(relative_position: nil) }
let_it_be_with_reload(:issue) { create(:issue, **unplaced, created_at: time) }
let_it_be_with_reload(:issue_a) { create(:issue, **unplaced, created_at: time - 1.minute) }
let_it_be_with_reload(:issue_b) { create(:issue, **unplaced, created_at: time - 2.minutes) }
let_it_be_with_reload(:issue_c) { create(:issue, **unplaced, created_at: time + 1.minute) }
let_it_be_with_reload(:issue_d) { create(:issue, **unplaced, created_at: time + 2.minutes) }
let_it_be_with_reload(:issue_e) { create(:issue, **common_attrs, relative_position: 10, created_at: time + 1.minute) }
let_it_be_with_reload(:issue_f) { create(:issue, **unplaced, created_at: time + 1.minute) }
let_it_be(:irrelevant) { create(:issue, relative_position: nil, created_at: time) }
shared_examples 'running the issue placement worker' do
let(:issue_id) { issue.id }
let(:project_id) { project.id }
it 'places all issues created at most 5 minutes before this one at the end, most recent last' do
expect { run_worker }.not_to change { irrelevant.reset.relative_position }
expect(project.issues.order_by_relative_position)
.to eq([issue_e, issue_b, issue_a, issue, issue_c, issue_f, issue_d])
expect(project.issues.where(relative_position: nil)).not_to exist
end
it 'schedules rebalancing if needed' do
issue_a.update!(relative_position: RelativePositioning::MAX_POSITION)
expect(Issues::RebalancingWorker).to receive(:perform_async).with(nil, nil, project.group.id)
run_worker
end
context 'there are more than QUERY_LIMIT unplaced issues' do
before_all do
# Ensure there are more than N issues in this set
n = described_class::QUERY_LIMIT
create_list(:issue, n - 5, **unplaced)
end
it 'limits the sweep to QUERY_LIMIT records, and reschedules placement' do
expect(Issue).to receive(:move_nulls_to_end)
.with(have_attributes(count: described_class::QUERY_LIMIT))
.and_call_original
expect(Issues::PlacementWorker).to receive(:perform_async).with(nil, project.id)
run_worker
expect(project.issues.where(relative_position: nil)).to exist
end
it 'is eventually correct' do
prefix = project.issues.where.not(relative_position: nil).order(:relative_position).to_a
moved = project.issues.where.not(id: prefix.map(&:id))
run_worker
expect(project.issues.where(relative_position: nil)).to exist
run_worker
expect(project.issues.where(relative_position: nil)).not_to exist
expect(project.issues.order(:relative_position)).to eq(prefix + moved.order(:created_at, :id))
end
end
context 'we are passed bad IDs' do
let(:issue_id) { non_existing_record_id }
let(:project_id) { non_existing_record_id }
def max_positions_by_project
Issue
.group(:project_id)
.pluck(:project_id, Issue.arel_table[:relative_position].maximum.as('max_relative_position'))
.to_h
end
it 'does move any issues to the end' do
expect { run_worker }.not_to change { max_positions_by_project }
end
context 'the project_id refers to an empty project' do
let!(:project_id) { create(:project).id }
it 'does move any issues to the end' do
expect { run_worker }.not_to change { max_positions_by_project }
end
end
end
it 'anticipates the failure to place the issues, and schedules rebalancing' do
allow(Issue).to receive(:move_nulls_to_end) { raise RelativePositioning::NoSpaceLeft }
expect(Issues::RebalancingWorker).to receive(:perform_async).with(nil, nil, project.group.id)
expect(Gitlab::ErrorTracking)
.to receive(:log_exception)
.with(RelativePositioning::NoSpaceLeft, worker_arguments)
run_worker
end
end
context 'passing an issue ID' do
def run_worker
described_class.new.perform(issue_id)
end
let(:worker_arguments) { { issue_id: issue_id, project_id: nil } }
it_behaves_like 'running the issue placement worker'
context 'when block_issue_repositioning is enabled' do
let(:issue_id) { issue.id }
let(:project_id) { project.id }
before do
stub_feature_flags(block_issue_repositioning: group)
end
it 'does not run repositioning tasks' do
expect { run_worker }.not_to change { issue.reset.relative_position }
end
end
end
context 'passing a project ID' do
def run_worker
described_class.new.perform(nil, project_id)
end
let(:worker_arguments) { { issue_id: nil, project_id: project_id } }
it_behaves_like 'running the issue placement worker'
end
end
it 'has the `until_executed` deduplicate strategy' do
expect(described_class.get_deduplicate_strategy).to eq(:until_executed)
expect(described_class.get_deduplication_options).to include({ including_scheduled: true })
end
end

View File

@ -1,104 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe IssueRebalancingWorker, :clean_gitlab_redis_shared_state do
describe '#perform' do
let_it_be(:group) { create(:group) }
let_it_be(:project) { create(:project, group: group) }
let_it_be(:issue) { create(:issue, project: project) }
shared_examples 'running the worker' do
it 'runs an instance of Issues::RelativePositionRebalancingService' do
service = double(execute: nil)
service_param = arguments.second.present? ? kind_of(Project.id_in([project]).class) : kind_of(group&.all_projects.class)
expect(Issues::RelativePositionRebalancingService).to receive(:new).with(service_param).and_return(service)
described_class.new.perform(*arguments)
end
it 'anticipates there being too many concurent rebalances' do
service = double
service_param = arguments.second.present? ? kind_of(Project.id_in([project]).class) : kind_of(group&.all_projects.class)
allow(service).to receive(:execute).and_raise(Issues::RelativePositionRebalancingService::TooManyConcurrentRebalances)
expect(Issues::RelativePositionRebalancingService).to receive(:new).with(service_param).and_return(service)
expect(Gitlab::ErrorTracking).to receive(:log_exception).with(Issues::RelativePositionRebalancingService::TooManyConcurrentRebalances, include(project_id: arguments.second, root_namespace_id: arguments.third))
described_class.new.perform(*arguments)
end
it 'takes no action if the value is nil' do
expect(Issues::RelativePositionRebalancingService).not_to receive(:new)
expect(Gitlab::ErrorTracking).not_to receive(:log_exception)
described_class.new.perform # all arguments are nil
end
it 'does not schedule a new rebalance if it finished under 1h ago' do
container_type = arguments.second.present? ? ::Gitlab::Issues::Rebalancing::State::PROJECT : ::Gitlab::Issues::Rebalancing::State::NAMESPACE
container_id = arguments.second || arguments.third
Gitlab::Redis::SharedState.with do |redis|
redis.set(::Gitlab::Issues::Rebalancing::State.send(:recently_finished_key, container_type, container_id), true)
end
expect(Issues::RelativePositionRebalancingService).not_to receive(:new)
expect(Gitlab::ErrorTracking).not_to receive(:log_exception)
described_class.new.perform(*arguments)
end
end
shared_examples 'safely handles non-existent ids' do
it 'anticipates the inability to find the issue' do
expect(Gitlab::ErrorTracking).to receive(:log_exception).with(ArgumentError, include(project_id: arguments.second, root_namespace_id: arguments.third))
expect(Issues::RelativePositionRebalancingService).not_to receive(:new)
described_class.new.perform(*arguments)
end
end
context 'without root_namespace param' do
it_behaves_like 'running the worker' do
let(:arguments) { [-1, project.id] }
end
it_behaves_like 'safely handles non-existent ids' do
let(:arguments) { [nil, -1] }
end
include_examples 'an idempotent worker' do
let(:job_args) { [-1, project.id] }
end
include_examples 'an idempotent worker' do
let(:job_args) { [nil, -1] }
end
end
context 'with root_namespace param' do
it_behaves_like 'running the worker' do
let(:arguments) { [nil, nil, group.id] }
end
it_behaves_like 'safely handles non-existent ids' do
let(:arguments) { [nil, nil, -1] }
end
include_examples 'an idempotent worker' do
let(:job_args) { [nil, nil, group.id] }
end
include_examples 'an idempotent worker' do
let(:job_args) { [nil, nil, -1] }
end
end
end
it 'has the `until_executed` deduplicate strategy' do
expect(described_class.get_deduplicate_strategy).to eq(:until_executed)
expect(described_class.get_deduplication_options).to include({ including_scheduled: true })
end
end