Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-10-25 15:12:11 +00:00
parent e5d3d8c323
commit 82c63f420c
32 changed files with 1442 additions and 87 deletions

1
.gitignore vendored
View File

@ -74,6 +74,7 @@ eslint-report.html
/.gitlab_kas_secret
/webpack-report/
/crystalball/
/test_results/
/deprecations/
/knapsack/
/rspec_flaky/

View File

@ -885,5 +885,24 @@ fail-pipeline-early:
- install_gitlab_gem
script:
- fail_pipeline_early
rspec rspec-pg12-rerun-previous-failed-tests:
extends:
- .rspec-base-pg12
- .rails:rules:rerun-previous-failed-tests
stage: test
needs: ["setup-test-env", "compile-test-assets", "detect-previous-failed-tests"]
script:
- !reference [.base-script, script]
- rspec_rerun_previous_failed_tests tmp/previous_failed_tests/rspec_failed_files.txt
rspec rspec-ee-pg12-rerun-previous-failed-tests:
extends:
- "rspec rspec-pg12-rerun-previous-failed-tests"
- .rspec-ee-base-pg12
script:
- !reference [.base-script, script]
- rspec_rerun_previous_failed_tests tmp/previous_failed_tests/rspec_ee_failed_files.txt
# EE: Canonical MR pipelines
##################################################

View File

@ -1198,6 +1198,18 @@
- changes: *code-backstage-patterns
- <<: *if-merge-request-labels-run-all-rspec
.rails:rules:detect-previous-failed-tests:
rules:
- <<: *if-merge-request-labels-run-all-rspec
- <<: *if-merge-request
changes: *code-backstage-patterns
.rails:rules:rerun-previous-failed-tests:
rules:
- <<: *if-merge-request-labels-run-all-rspec
- <<: *if-merge-request
changes: *code-backstage-patterns
.rails:rules:rspec-foss-impact:
rules:
- <<: *if-not-ee

View File

@ -102,6 +102,23 @@ detect-tests as-if-foss:
before_script:
- '[ "$FOSS_ONLY" = "1" ] && rm -rf ee/ qa/spec/ee/ qa/qa/specs/features/ee/ qa/qa/ee/ qa/qa/ee.rb'
detect-previous-failed-tests:
extends:
- .detect-test-base
- .rails:rules:detect-previous-failed-tests
variables:
PREVIOUS_FAILED_TESTS_DIR: tmp/previous_failed_tests/
RSPEC_PG_REGEX: /rspec .+ pg12( .+)?/
RSPEC_EE_PG_REGEX: /rspec-ee .+ pg12( .+)?/
script:
- source ./scripts/utils.sh
- source ./scripts/rspec_helpers.sh
- retrieve_previous_failed_tests ${PREVIOUS_FAILED_TESTS_DIR} "${RSPEC_PG_REGEX}" "${RSPEC_EE_PG_REGEX}"
artifacts:
expire_in: 7d
paths:
- ${PREVIOUS_FAILED_TESTS_DIR}
add-jh-folder:
extends: .setup:rules:add-jh-folder
image: ${GITLAB_DEPENDENCY_PROXY}alpine:edge

View File

@ -113,6 +113,7 @@ Naming/FileName:
- 'config.ru'
- 'config/**/*'
- 'ee/config/**/*'
- 'jh/config/**/*'
- 'db/**/*'
- 'ee/db/**/*'
- 'ee/elastic/migrate/*'
@ -124,6 +125,7 @@ Naming/FileName:
- 'spec/**/*'
- 'tooling/bin/*'
- 'ee/spec/**/*'
- 'jh/spec/**/*'
- 'qa/bin/*'
- 'qa/spec/**/*'
- 'qa/qa/specs/**/*'

View File

@ -398,7 +398,7 @@ group :development, :test do
end
group :development, :test, :danger do
gem 'gitlab-dangerfiles', '~> 2.3.0', require: false
gem 'gitlab-dangerfiles', '~> 2.3.1', require: false
end
group :development, :test, :coverage do

View File

@ -458,7 +458,7 @@ GEM
terminal-table (~> 1.5, >= 1.5.1)
gitlab-chronic (0.10.5)
numerizer (~> 0.2)
gitlab-dangerfiles (2.3.0)
gitlab-dangerfiles (2.3.1)
danger (>= 8.3.1)
danger-gitlab (>= 8.0.0)
gitlab-experiment (0.6.4)
@ -1460,7 +1460,7 @@ DEPENDENCIES
gitaly (~> 14.3.0.pre.rc2)
github-markup (~> 1.7.0)
gitlab-chronic (~> 0.10.5)
gitlab-dangerfiles (~> 2.3.0)
gitlab-dangerfiles (~> 2.3.1)
gitlab-experiment (~> 0.6.4)
gitlab-fog-azure-rm (~> 1.2.0)
gitlab-labkit (~> 0.21.1)

View File

@ -2285,6 +2285,24 @@
:weight: 1
:idempotent: true
:tags: []
- :name: issues_placement
:worker_name: Issues::PlacementWorker
:feature_category: :issue_tracking
:has_external_dependencies:
:urgency: :high
:resource_boundary: :cpu
:weight: 2
:idempotent: true
:tags: []
- :name: issues_rebalancing
:worker_name: Issues::RebalancingWorker
:feature_category: :issue_tracking
:has_external_dependencies:
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent: true
:tags: []
- :name: mailers
:worker_name: ActionMailer::MailDeliveryJob
:feature_category: :not_owned

View File

@ -14,6 +14,7 @@ module ApplicationWorker
LOGGING_EXTRA_KEY = 'extra'
DEFAULT_DELAY_INTERVAL = 1
SAFE_PUSH_BULK_LIMIT = 1000
included do
set_queue
@ -135,24 +136,47 @@ module ApplicationWorker
end
def bulk_perform_async(args_list)
Sidekiq::Client.push_bulk('class' => self, 'args' => args_list)
if Feature.enabled?(:sidekiq_push_bulk_in_batches)
in_safe_limit_batches(args_list) do |args_batch, _|
Sidekiq::Client.push_bulk('class' => self, 'args' => args_batch)
end
else
Sidekiq::Client.push_bulk('class' => self, 'args' => args_list)
end
end
def bulk_perform_in(delay, args_list, batch_size: nil, batch_delay: nil)
now = Time.now.to_i
schedule = now + delay.to_i
base_schedule_at = now + delay.to_i
if schedule <= now
raise ArgumentError, _('The schedule time must be in the future!')
if base_schedule_at <= now
raise ArgumentError, 'The schedule time must be in the future!'
end
schedule_at = base_schedule_at
if batch_size && batch_delay
args_list.each_slice(batch_size.to_i).with_index do |args_batch, idx|
batch_schedule = schedule + idx * batch_delay.to_i
Sidekiq::Client.push_bulk('class' => self, 'args' => args_batch, 'at' => batch_schedule)
batch_size = batch_size.to_i
batch_delay = batch_delay.to_i
raise ArgumentError, 'batch_size should be greater than 0' unless batch_size > 0
raise ArgumentError, 'batch_delay should be greater than 0' unless batch_delay > 0
# build an array of schedules corresponding to each item in `args_list`
bulk_schedule_at = Array.new(args_list.size) do |index|
batch_number = index / batch_size
base_schedule_at + (batch_number * batch_delay)
end
schedule_at = bulk_schedule_at
end
if Feature.enabled?(:sidekiq_push_bulk_in_batches)
in_safe_limit_batches(args_list, schedule_at) do |args_batch, schedule_at_for_batch|
Sidekiq::Client.push_bulk('class' => self, 'args' => args_batch, 'at' => schedule_at_for_batch)
end
else
Sidekiq::Client.push_bulk('class' => self, 'args' => args_list, 'at' => schedule)
Sidekiq::Client.push_bulk('class' => self, 'args' => args_list, 'at' => schedule_at)
end
end
@ -161,5 +185,34 @@ module ApplicationWorker
def delay_interval
DEFAULT_DELAY_INTERVAL.seconds
end
private
def in_safe_limit_batches(args_list, schedule_at = nil, safe_limit = SAFE_PUSH_BULK_LIMIT)
# `schedule_at` could be one of
# - nil.
# - a single Numeric that represents time, like `30.minutes.from_now.to_i`.
# - an array, where each element is a Numeric that reprsents time.
# - Each element in this array would correspond to the time at which
# - the job in `args_list` at the corresponding index needs to be scheduled.
# In the case where `schedule_at` is an array of Numeric, it needs to be sliced
# in the same manner as the `args_list`, with each slice containing `safe_limit`
# number of elements.
schedule_at = schedule_at.each_slice(safe_limit).to_a if schedule_at.is_a?(Array)
args_list.each_slice(safe_limit).with_index.flat_map do |args_batch, index|
schedule_at_for_batch = process_schedule_at_for_batch(schedule_at, index)
yield(args_batch, schedule_at_for_batch)
end
end
def process_schedule_at_for_batch(schedule_at, index)
return unless schedule_at
return schedule_at[index] if schedule_at.is_a?(Array) && schedule_at.all?(Array)
schedule_at
end
end
end

View File

@ -1,5 +1,8 @@
# frozen_string_literal: true
# todo: remove this worker and it's queue definition from all_queues after Issues::PlacementWorker is deployed
# We want to keep it for one release in case some jobs are already scheduled in the old queue so we need the worker
# to be available to finish those. All new jobs will be queued into the new queue.
class IssuePlacementWorker
include ApplicationWorker

View File

@ -1,5 +1,8 @@
# frozen_string_literal: true
# todo: remove this worker and it's queue definition from all_queues after Issue::RebalancingWorker is released.
# We want to keep it for one release in case some jobs are already scheduled in the old queue so we need the worker
# to be available to finish those. All new jobs will be queued into the new queue.
class IssueRebalancingWorker
include ApplicationWorker

View File

@ -0,0 +1,67 @@
# frozen_string_literal: true
module Issues
class PlacementWorker
include ApplicationWorker
data_consistency :always
sidekiq_options retry: 3
idempotent!
deduplicate :until_executed, including_scheduled: true
feature_category :issue_tracking
urgency :high
worker_resource_boundary :cpu
weight 2
# Move at most the most recent 100 issues
QUERY_LIMIT = 100
# rubocop: disable CodeReuse/ActiveRecord
def perform(issue_id, project_id = nil)
issue = find_issue(issue_id, project_id)
return unless issue
# Temporary disable moving null elements because of performance problems
# For more information check https://gitlab.com/gitlab-com/gl-infra/production/-/issues/4321
return if issue.blocked_for_repositioning?
# Move the oldest 100 unpositioned items to the end.
# This is to deal with out-of-order execution of the worker,
# while preserving creation order.
to_place = Issue
.relative_positioning_query_base(issue)
.with_null_relative_position
.order({ created_at: :asc }, { id: :asc })
.limit(QUERY_LIMIT + 1)
.to_a
leftover = to_place.pop if to_place.count > QUERY_LIMIT
Issue.move_nulls_to_end(to_place)
Issues::BaseService.new(project: nil).rebalance_if_needed(to_place.max_by(&:relative_position))
Issues::PlacementWorker.perform_async(nil, leftover.project_id) if leftover.present?
rescue RelativePositioning::NoSpaceLeft => e
Gitlab::ErrorTracking.log_exception(e, issue_id: issue_id, project_id: project_id)
Issues::RebalancingWorker.perform_async(nil, *root_namespace_id_to_rebalance(issue, project_id))
end
def find_issue(issue_id, project_id)
return Issue.id_in(issue_id).take if issue_id
project = Project.id_in(project_id).take
return unless project
project.issues.take
end
# rubocop: enable CodeReuse/ActiveRecord
private
def root_namespace_id_to_rebalance(issue, project_id)
project_id = project_id.presence || issue.project_id
Project.find(project_id)&.self_or_root_group_ids
end
end
end

View File

@ -0,0 +1,51 @@
# frozen_string_literal: true
module Issues
class RebalancingWorker
include ApplicationWorker
data_consistency :always
sidekiq_options retry: 3
idempotent!
urgency :low
feature_category :issue_tracking
deduplicate :until_executed, including_scheduled: true
def perform(ignore = nil, project_id = nil, root_namespace_id = nil)
# we need to have exactly one of the project_id and root_namespace_id params be non-nil
raise ArgumentError, "Expected only one of the params project_id: #{project_id} and root_namespace_id: #{root_namespace_id}" if project_id && root_namespace_id
return if project_id.nil? && root_namespace_id.nil?
# pull the projects collection to be rebalanced either the project if namespace is not a group(i.e. user namesapce)
# or the root namespace, this also makes the worker backward compatible with previous version where a project_id was
# passed as the param
projects_to_rebalance = projects_collection(project_id, root_namespace_id)
# something might have happened with the namespace between scheduling the worker and actually running it,
# maybe it was removed.
if projects_to_rebalance.blank?
Gitlab::ErrorTracking.log_exception(
ArgumentError.new("Projects to be rebalanced not found for arguments: project_id #{project_id}, root_namespace_id: #{root_namespace_id}"),
{ project_id: project_id, root_namespace_id: root_namespace_id })
return
end
Issues::RelativePositionRebalancingService.new(projects_to_rebalance).execute
rescue Issues::RelativePositionRebalancingService::TooManyConcurrentRebalances => e
Gitlab::ErrorTracking.log_exception(e, root_namespace_id: root_namespace_id, project_id: project_id)
end
private
def projects_collection(project_id, root_namespace_id)
# we can have either project_id(older version) or project_id if project is part of a user namespace and not a group
# or root_namespace_id(newer version) never both.
return Project.id_in([project_id]) if project_id
Namespace.find_by_id(root_namespace_id)&.all_projects
end
end
end

View File

@ -0,0 +1,8 @@
---
name: sidekiq_push_bulk_in_batches
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/72263
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/343740
milestone: '14.5'
type: development
group: group::access
default_enabled: false

View File

@ -213,6 +213,10 @@
- 2
- - issue_rebalancing
- 1
- - issues_placement
- 2
- - issues_rebalancing
- 1
- - iterations
- 1
- - jira_connect

View File

@ -811,10 +811,6 @@ the note content.
Regardless of the tag names, the relative order of the reference tags determines the rendered
numbering.
Reference tags can use letters and other characters. Avoid using lowercase `w` or an underscore
(`_`) in footnote tag names until [this bug](https://gitlab.com/gitlab-org/gitlab/-/issues/24423) is
resolved.
<!--
Do not edit the following codeblock. It uses HTML to skip the Vale ReferenceLinks test.
-->

View File

@ -212,20 +212,21 @@ requests that modify locked files. Unlock the file to allow changes.
To lock a file:
1. Open the file or directory in GitLab.
1. Click the **Lock** button, located near the Web IDE button.
1. On the top right, above the file, select **Lock**.
1. On the confirmation dialog box, select **OK**.
![Locking file](img/file_lock.png)
If you do not have permission to lock the file, the button is not enabled.
An **Unlock** button is displayed if the file is already locked, and
is disabled if you do not have permission to unlock the file.
If you did not lock the file, hovering your cursor over the button shows
who locked the file.
To view the user who locked the file (if it was not you), hover over the button.
### View and remove existing locks
The **Locked Files**, accessed from **Project > Repository** left menu, lists
all file and directory locks. Locks can be removed by their author, or any user
with the [Maintainer role](../permissions.md) and above.
To view and remove file locks:
1. On the top bar, select **Menu > Projects** and find your project.
1. On the left sidebar, select **Repository > Locked Files**.
This list shows all the files locked either through LFS or GitLab UI.
Locks can be removed by their author, or any user
with at least the [Maintainer role](../permissions.md).

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

View File

@ -67,7 +67,8 @@ Usage: rake "gitlab:gitaly:install[/installation/dir,/storage/path]")
env["BUNDLE_DEPLOYMENT"] = 'false'
end
Gitlab::Popen.popen([make_cmd, 'all', 'git'], nil, env)
output, status = Gitlab::Popen.popen([make_cmd, 'all', 'git'], nil, env)
raise "Gitaly failed to compile: #{output}" unless status&.zero?
end
end
end

View File

@ -34237,9 +34237,6 @@ msgstr ""
msgid "The same shared runner executes code from multiple projects, unless you configure autoscaling with %{link} set to 1 (which it is on GitLab.com)."
msgstr ""
msgid "The schedule time must be in the future!"
msgstr ""
msgid "The snippet can be accessed without any authentication."
msgstr ""

View File

@ -9,3 +9,10 @@ module API
endpoint: ENV['CI_API_V4_URL'] || 'https://gitlab.com/api/v4'
}.freeze
end
module Host
DEFAULT_OPTIONS = {
instance_base_url: ENV['CI_SERVER_URL'],
mr_id: ENV['CI_MERGE_REQUEST_ID']
}.freeze
end

122
scripts/failed_tests.rb Executable file
View File

@ -0,0 +1,122 @@
#!/usr/bin/env ruby
# frozen_string_literal: true
require 'optparse'
require 'fileutils'
require 'uri'
require 'json'
require 'set'
class FailedTests
def initialize(options)
@filename = options.delete(:previous_tests_report_path)
@output_directory = options.delete(:output_directory)
@rspec_pg_regex = options.delete(:rspec_pg_regex)
@rspec_ee_pg_regex = options.delete(:rspec_ee_pg_regex)
end
def output_failed_test_files
create_output_dir
failed_files_for_suite_collection.each do |suite_collection_name, suite_collection_files|
failed_test_files = suite_collection_files.map { |filepath| filepath.delete_prefix('./') }.join(' ')
output_file = File.join(output_directory, "#{suite_collection_name}_failed_files.txt")
File.open(output_file, 'w') do |file|
file.write(failed_test_files)
end
end
end
def failed_files_for_suite_collection
suite_map.each_with_object(Hash.new { |h, k| h[k] = Set.new }) do |(suite_collection_name, suite_collection_regex), hash|
failed_suites.each do |suite|
hash[suite_collection_name].merge(failed_files(suite)) if suite['name'] =~ suite_collection_regex
end
end
end
def suite_map
@suite_map ||= {
rspec: rspec_pg_regex,
rspec_ee: rspec_ee_pg_regex,
jest: /jest/
}
end
private
attr_reader :filename, :output_directory, :rspec_pg_regex, :rspec_ee_pg_regex
def file_contents
@file_contents ||= begin
File.read(filename)
rescue Errno::ENOENT
'{}'
end
end
def file_contents_as_json
@file_contents_as_json ||= begin
JSON.parse(file_contents)
rescue JSON::ParserError
{}
end
end
def failed_suites
return [] unless file_contents_as_json['suites']
file_contents_as_json['suites'].select { |suite| suite['failed_count'] > 0 }
end
def failed_files(suite)
return [] unless suite
suite['test_cases'].each_with_object([]) do |failure_hash, failed_cases|
failed_cases << failure_hash['file'] if failure_hash['status'] == 'failed'
end
end
def create_output_dir
return if File.directory?(output_directory)
puts 'Creating output directory...'
FileUtils.mkdir_p(output_directory)
end
end
if $0 == __FILE__
options = {
previous_tests_report_path: 'test_results/previous/test_reports.json',
output_directory: 'tmp/previous_failed_tests/',
rspec_pg_regex: /rspec .+ pg12( .+)?/,
rspec_ee_pg_regex: /rspec-ee .+ pg12( .+)?/
}
OptionParser.new do |opts|
opts.on("-p", "--previous-tests-report-path PREVIOUS_TESTS_REPORT_PATH", String, "Path of the file listing previous test failures") do |value|
options[:previous_tests_report_path] = value
end
opts.on("-o", "--output-directory OUTPUT_DIRECTORY", String, "Output directory for failed test files") do |value|
options[:output_directory] = value
end
opts.on("--rspec-pg-regex RSPEC_PG_REGEX", Regexp, "Regex to use when finding matching RSpec jobs") do |value|
options[:rspec_pg_regex] = value
end
opts.on("--rspec-ee-pg-regex RSPEC_EE_PG_REGEX", Regexp, "Regex to use when finding matching RSpec EE jobs") do |value|
options[:rspec_ee_pg_regex] = value
end
opts.on("-h", "--help", "Prints this help") do
puts opts
exit
end
end.parse!
FailedTests.new(options).output_failed_test_files
end

View File

@ -0,0 +1,153 @@
#!/usr/bin/env ruby
# frozen_string_literal: true
require 'optparse'
require 'time'
require 'fileutils'
require 'uri'
require 'cgi'
require 'net/http'
require 'json'
require_relative 'api/default_options'
# Request list of pipelines for MR
# https://gitlab.com/api/v4/projects/gitlab-org%2Fgitlab/merge_requests/69053/pipelines
# Find latest failed pipeline
# Retrieve list of failed builds for test stage in pipeline
# https://gitlab.com/api/v4/projects/gitlab-org%2Fgitlab/pipelines/363788864/jobs/?scope=failed
# Retrieve test reports for these builds
# https://gitlab.com/gitlab-org/gitlab/-/pipelines/363788864/tests/suite.json?build_ids[]=1555608749
# Push into expected format for failed tests
class PipelineTestReportBuilder
def initialize(options)
@project = options.delete(:project)
@mr_id = options.delete(:mr_id) || Host::DEFAULT_OPTIONS[:mr_id]
@instance_base_url = options.delete(:instance_base_url) || Host::DEFAULT_OPTIONS[:instance_base_url]
@output_file_path = options.delete(:output_file_path)
end
def test_report_for_latest_pipeline
build_test_report_json_for_pipeline(previous_pipeline)
end
def execute
if output_file_path
FileUtils.mkdir_p(File.dirname(output_file_path))
end
File.open(output_file_path, 'w') do |file|
file.write(test_report_for_latest_pipeline)
end
end
private
attr_reader :project, :mr_id, :instance_base_url, :output_file_path
def project_api_base_url
"#{instance_base_url}/api/v4/projects/#{CGI.escape(project)}"
end
def project_base_url
"#{instance_base_url}/#{project}"
end
def previous_pipeline
# Top of the list will always be the current pipeline
# Second from top will be the previous pipeline
pipelines_for_mr.sort_by { |a| -Time.parse(a['created_at']).to_i }[1]
end
def pipelines_for_mr
fetch("#{project_api_base_url}/merge_requests/#{mr_id}/pipelines")
end
def failed_builds_for_pipeline(pipeline_id)
fetch("#{project_api_base_url}/pipelines/#{pipeline_id}/jobs?scope=failed&per_page=100")
end
# Method uses the test suite endpoint to gather test results for a particular build.
# Here we request individual builds, even though it is possible to supply multiple build IDs.
# The reason for this; it is possible to lose the job context and name when requesting multiple builds.
# Please see for more info: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/69053#note_709939709
def test_report_for_build(pipeline_id, build_id)
fetch("#{project_base_url}/-/pipelines/#{pipeline_id}/tests/suite.json?build_ids[]=#{build_id}")
end
def build_test_report_json_for_pipeline(pipeline)
# empty file if no previous failed pipeline
return {}.to_json if pipeline.nil? || pipeline['status'] != 'failed'
test_report = {}
puts "Discovered last failed pipeline (#{pipeline['id']}) for MR!#{mr_id}"
failed_builds_for_test_stage = failed_builds_for_pipeline(pipeline['id']).select do |failed_build|
failed_build['stage'] == 'test'
end
puts "#{failed_builds_for_test_stage.length} failed builds in test stage found..."
if failed_builds_for_test_stage.any?
test_report['suites'] ||= []
failed_builds_for_test_stage.each do |failed_build|
test_report['suites'] << test_report_for_build(pipeline['id'], failed_build['id'])
end
end
test_report.to_json
end
def fetch(uri_str)
uri = URI(uri_str)
puts "URL: #{uri}"
request = Net::HTTP::Get.new(uri)
body = ''
Net::HTTP.start(uri.host, uri.port, use_ssl: true) do |http|
http.request(request) do |response|
case response
when Net::HTTPSuccess
body = response.read_body
else
raise "Unexpected response: #{response.value}"
end
end
end
JSON.parse(body)
end
end
if $0 == __FILE__
options = Host::DEFAULT_OPTIONS.dup
OptionParser.new do |opts|
opts.on("-p", "--project PROJECT", String, "Project where to find the merge request(defaults to $CI_PROJECT_ID)") do |value|
options[:project] = value
end
opts.on("-m", "--mr-id MR_ID", String, "A merge request ID") do |value|
options[:mr_id] = value
end
opts.on("-i", "--instance-base-url INSTANCE_BASE_URL", String, "URL of the instance where project and merge request resides") do |value|
options[:instance_base_url] = value
end
opts.on("-o", "--output-file-path OUTPUT_PATH", String, "A path for output file") do |value|
options[:output_file_path] = value
end
opts.on("-h", "--help", "Prints this help") do
puts opts
exit
end
end.parse!
PipelineTestReportBuilder.new(options).execute
end

View File

@ -89,6 +89,22 @@ function crystalball_rspec_data_exists() {
compgen -G "crystalball/rspec*.yml" >/dev/null
}
function retrieve_previous_failed_tests() {
local directory_for_output_reports="${1}"
local rspec_pg_regex="${2}"
local rspec_ee_pg_regex="${3}"
local pipeline_report_path="test_results/previous/test_reports.json"
local project_path="gitlab-org/gitlab"
echo 'Attempting to build pipeline test report...'
scripts/pipeline_test_report_builder.rb --instance-base-url "https://gitlab.com" --project "${project_path}" --mr-id "${CI_MERGE_REQUEST_IID}" --output-file-path "${pipeline_report_path}"
echo 'Generating failed tests lists...'
scripts/failed_tests.rb --previous-tests-report-path "${pipeline_report_path}" --output-directory "${directory_for_output_reports}" --rspec-pg-regex "${rspec_pg_regex}" --rspec-ee-pg-regex "${rspec_ee_pg_regex}"
}
function rspec_simple_job() {
local rspec_opts="${1}"
@ -172,6 +188,25 @@ function rspec_paralellized_job() {
date
}
function rspec_rerun_previous_failed_tests() {
local test_file_count_threshold=${RSPEC_PREVIOUS_FAILED_TEST_FILE_COUNT_THRESHOLD:-10}
local matching_tests_file=${1}
local rspec_opts=${2}
local test_files="$(cat "${matching_tests_file}")"
local test_file_count=$(wc -w "${matching_tests_file}" | awk {'print $1'})
if [[ "${test_file_count}" -gt "${test_file_count_threshold}" ]]; then
echo "This job is intentionally failed because there are more than ${test_file_count_threshold} test files to rerun."
exit 1
fi
if [[ -n $test_files ]]; then
rspec_simple_job "${test_files}"
else
echo "No failed test files to rerun"
fi
}
function rspec_fail_fast() {
local test_file_count_threshold=${RSPEC_FAIL_FAST_TEST_FILE_COUNT_THRESHOLD:-10}
local matching_tests_file=${1}

36
spec/fixtures/scripts/test_report.json vendored Normal file
View File

@ -0,0 +1,36 @@
{
"suites": [
{
"name": "rspec unit pg12",
"total_time": 975.6635620000018,
"total_count": 3811,
"success_count": 3800,
"failed_count": 1,
"skipped_count": 10,
"error_count": 0,
"suite_error": null,
"test_cases": [
{
"status": "failed",
"name": "Note associations is expected not to belong to project required: ",
"classname": "spec.models.note_spec",
"file": "./spec/models/note_spec.rb",
"execution_time": 0.209091,
"system_output": "Failure/Error: it { is_expected.not_to belong_to(:project) }\n Did not expect Note to have a belongs_to association called project\n./spec/models/note_spec.rb:9:in `block (3 levels) in <top (required)>'\n./spec/spec_helper.rb:392:in `block (3 levels) in <top (required)>'\n./spec/support/sidekiq_middleware.rb:9:in `with_sidekiq_server_middleware'\n./spec/spec_helper.rb:383:in `block (2 levels) in <top (required)>'\n./spec/spec_helper.rb:379:in `block (3 levels) in <top (required)>'\n./lib/gitlab/application_context.rb:31:in `with_raw_context'\n./spec/spec_helper.rb:379:in `block (2 levels) in <top (required)>'\n./spec/support/database/prevent_cross_joins.rb:95:in `block (3 levels) in <top (required)>'\n./spec/support/database/prevent_cross_joins.rb:62:in `with_cross_joins_prevented'\n./spec/support/database/prevent_cross_joins.rb:95:in `block (2 levels) in <top (required)>'",
"stack_trace": null,
"recent_failures": null
},
{
"status": "success",
"name": "Gitlab::ImportExport yields the initial tree when importing and exporting it again",
"classname": "spec.lib.gitlab.import_export.import_export_equivalence_spec",
"file": "./spec/lib/gitlab/import_export/import_export_equivalence_spec.rb",
"execution_time": 17.084198,
"system_output": null,
"stack_trace": null,
"recent_failures": null
}
]
}
]
}

View File

@ -0,0 +1,127 @@
# frozen_string_literal: true
require 'spec_helper'
require_relative '../../scripts/failed_tests'
RSpec.describe FailedTests do
let(:report_file) { 'spec/fixtures/scripts/test_report.json' }
let(:output_directory) { 'tmp/previous_test_results' }
let(:rspec_pg_regex) { /rspec .+ pg12( .+)?/ }
let(:rspec_ee_pg_regex) { /rspec-ee .+ pg12( .+)?/ }
subject { described_class.new(previous_tests_report_path: report_file, output_directory: output_directory, rspec_pg_regex: rspec_pg_regex, rspec_ee_pg_regex: rspec_ee_pg_regex) }
describe '#output_failed_test_files' do
it 'writes the file for the suite' do
expect(File).to receive(:open).with(File.join(output_directory, "rspec_failed_files.txt"), 'w').once
subject.output_failed_test_files
end
end
describe '#failed_files_for_suite_collection' do
let(:failure_path) { 'path/to/fail_file_spec.rb' }
let(:other_failure_path) { 'path/to/fail_file_spec_2.rb' }
let(:file_contents_as_json) do
{
'suites' => [
{
'failed_count' => 1,
'name' => 'rspec unit pg12 10/12',
'test_cases' => [
{
'status' => 'failed',
'file' => failure_path
}
]
},
{
'failed_count' => 1,
'name' => 'rspec-ee unit pg12',
'test_cases' => [
{
'status' => 'failed',
'file' => failure_path
}
]
},
{
'failed_count' => 1,
'name' => 'rspec unit pg13 10/12',
'test_cases' => [
{
'status' => 'failed',
'file' => other_failure_path
}
]
}
]
}
end
before do
allow(subject).to receive(:file_contents_as_json).and_return(file_contents_as_json)
end
it 'returns a list of failed file paths for suite collection' do
result = subject.failed_files_for_suite_collection
expect(result[:rspec].to_a).to match_array(failure_path)
expect(result[:rspec_ee].to_a).to match_array(failure_path)
end
end
describe 'empty report' do
let(:file_content) do
'{}'
end
before do
allow(subject).to receive(:file_contents).and_return(file_content)
end
it 'does not fail for output files' do
subject.output_failed_test_files
end
it 'returns empty results for suite failures' do
result = subject.failed_files_for_suite_collection
expect(result.values.flatten).to be_empty
end
end
describe 'invalid report' do
let(:file_content) do
''
end
before do
allow(subject).to receive(:file_contents).and_return(file_content)
end
it 'does not fail for output files' do
subject.output_failed_test_files
end
it 'returns empty results for suite failures' do
result = subject.failed_files_for_suite_collection
expect(result.values.flatten).to be_empty
end
end
describe 'missing report file' do
let(:report_file) { 'unknownfile.json' }
it 'does not fail for output files' do
subject.output_failed_test_files
end
it 'returns empty results for suite failures' do
result = subject.failed_files_for_suite_collection
expect(result.values.flatten).to be_empty
end
end
end

View File

@ -0,0 +1,137 @@
# frozen_string_literal: true
require 'spec_helper'
require_relative '../../scripts/pipeline_test_report_builder'
RSpec.describe PipelineTestReportBuilder do
let(:report_file) { 'spec/fixtures/scripts/test_report.json' }
let(:output_file_path) { 'tmp/previous_test_results/output_file.json' }
subject do
described_class.new(
project: 'gitlab-org/gitlab',
mr_id: '999',
instance_base_url: 'https://gitlab.com',
output_file_path: output_file_path
)
end
let(:mr_pipelines) do
[
{
'status' => 'running',
'created_at' => DateTime.now.to_s
},
{
'status' => 'failed',
'created_at' => (DateTime.now - 5).to_s
}
]
end
let(:failed_builds_for_pipeline) do
[
{
'id' => 9999,
'stage' => 'test'
}
]
end
let(:test_report_for_build) do
{
"name": "rspec-ee system pg11 geo",
"failed_count": 41,
"test_cases": [
{
"status": "failed",
"name": "example",
"classname": "ee.spec.features.geo_node_spec",
"file": "./ee/spec/features/geo_node_spec.rb",
"execution_time": 6.324748,
"system_output": {
"__content__": "\n",
"message": "RSpec::Core::MultipleExceptionError",
"type": "RSpec::Core::MultipleExceptionError"
}
}
]
}
end
before do
allow(subject).to receive(:pipelines_for_mr).and_return(mr_pipelines)
allow(subject).to receive(:failed_builds_for_pipeline).and_return(failed_builds_for_pipeline)
allow(subject).to receive(:test_report_for_build).and_return(test_report_for_build)
end
describe '#test_report_for_latest_pipeline' do
context 'no previous pipeline' do
let(:mr_pipelines) { [] }
it 'returns empty hash' do
expect(subject.test_report_for_latest_pipeline).to eq("{}")
end
end
context 'first pipeline scenario' do
let(:mr_pipelines) do
[
{
'status' => 'running',
'created_at' => DateTime.now.to_s
}
]
end
it 'returns empty hash' do
expect(subject.test_report_for_latest_pipeline).to eq("{}")
end
end
context 'no previous failed pipeline' do
let(:mr_pipelines) do
[
{
'status' => 'running',
'created_at' => DateTime.now.to_s
},
{
'status' => 'success',
'created_at' => (DateTime.now - 5).to_s
}
]
end
it 'returns empty hash' do
expect(subject.test_report_for_latest_pipeline).to eq("{}")
end
end
context 'no failed test builds' do
let(:failed_builds_for_pipeline) do
[
{
'id' => 9999,
'stage' => 'prepare'
}
]
end
it 'returns empty hash' do
expect(subject.test_report_for_latest_pipeline).to eq("{}")
end
end
context 'failed pipeline and failed test builds' do
it 'returns populated test list for suites' do
actual = subject.test_report_for_latest_pipeline
expected = {
'suites' => [test_report_for_build]
}.to_json
expect(actual).to eq(expected)
end
end
end
end

View File

@ -67,21 +67,42 @@ RSpec.describe 'gitlab:gitaly namespace rake task', :silence_stdout do
end
it 'calls gmake in the gitaly directory' do
expect(Gitlab::Popen).to receive(:popen).with(%w[which gmake]).and_return(['/usr/bin/gmake', 0])
expect(Gitlab::Popen).to receive(:popen).with(%w[gmake all git], nil, { "BUNDLE_GEMFILE" => nil, "RUBYOPT" => nil }).and_return(true)
expect(Gitlab::Popen).to receive(:popen)
.with(%w[which gmake])
.and_return(['/usr/bin/gmake', 0])
expect(Gitlab::Popen).to receive(:popen)
.with(%w[gmake all git], nil, { "BUNDLE_GEMFILE" => nil, "RUBYOPT" => nil })
.and_return(['ok', 0])
subject
end
context 'when gmake fails' do
it 'aborts process' do
expect(Gitlab::Popen).to receive(:popen)
.with(%w[which gmake])
.and_return(['/usr/bin/gmake', 0])
expect(Gitlab::Popen).to receive(:popen)
.with(%w[gmake all git], nil, { "BUNDLE_GEMFILE" => nil, "RUBYOPT" => nil })
.and_return(['output', 1])
expect { subject }.to raise_error /Gitaly failed to compile: output/
end
end
end
context 'gmake is not available' do
before do
expect(main_object).to receive(:checkout_or_clone_version)
expect(Gitlab::Popen).to receive(:popen).with(%w[which gmake]).and_return(['', 42])
expect(Gitlab::Popen).to receive(:popen)
.with(%w[which gmake])
.and_return(['', 42])
end
it 'calls make in the gitaly directory' do
expect(Gitlab::Popen).to receive(:popen).with(%w[make all git], nil, { "BUNDLE_GEMFILE" => nil, "RUBYOPT" => nil }).and_return(true)
expect(Gitlab::Popen).to receive(:popen)
.with(%w[make all git], nil, { "BUNDLE_GEMFILE" => nil, "RUBYOPT" => nil })
.and_return(['output', 0])
subject
end
@ -94,7 +115,9 @@ RSpec.describe 'gitlab:gitaly namespace rake task', :silence_stdout do
end
it 'calls make in the gitaly directory with BUNDLE_DEPLOYMENT and GEM_HOME variables' do
expect(Gitlab::Popen).to receive(:popen).with(command, nil, { "BUNDLE_GEMFILE" => nil, "RUBYOPT" => nil, "BUNDLE_DEPLOYMENT" => 'false', "GEM_HOME" => Bundler.bundle_path.to_s }).and_return(true)
expect(Gitlab::Popen).to receive(:popen)
.with(command, nil, { "BUNDLE_GEMFILE" => nil, "RUBYOPT" => nil, "BUNDLE_DEPLOYMENT" => 'false', "GEM_HOME" => Bundler.bundle_path.to_s })
.and_return(['/usr/bin/gmake', 0])
subject
end

View File

@ -285,48 +285,38 @@ RSpec.describe ApplicationWorker do
end
end
describe '.bulk_perform_async' do
before do
stub_const(worker.name, worker)
end
it 'enqueues jobs in bulk' do
Sidekiq::Testing.fake! do
worker.bulk_perform_async([['Foo', [1]], ['Foo', [2]]])
expect(worker.jobs.count).to eq 2
expect(worker.jobs).to all(include('enqueued_at'))
end
end
end
describe '.bulk_perform_in' do
before do
stub_const(worker.name, worker)
end
context 'when delay is valid' do
it 'correctly schedules jobs' do
Sidekiq::Testing.fake! do
worker.bulk_perform_in(1.minute, [['Foo', [1]], ['Foo', [2]]])
expect(worker.jobs.count).to eq 2
expect(worker.jobs).to all(include('at'))
end
context 'different kinds of push_bulk' do
shared_context 'disable the `sidekiq_push_bulk_in_batches` feature flag' do
before do
stub_feature_flags(sidekiq_push_bulk_in_batches: false)
end
end
context 'when delay is invalid' do
it 'raises an ArgumentError exception' do
expect { worker.bulk_perform_in(-60, [['Foo']]) }
.to raise_error(ArgumentError)
shared_context 'set safe limit beyond the number of jobs to be enqueued' do
before do
stub_const("#{described_class}::SAFE_PUSH_BULK_LIMIT", args.count + 1)
end
end
context 'with batches' do
let(:batch_delay) { 1.minute }
shared_context 'set safe limit below the number of jobs to be enqueued' do
before do
stub_const("#{described_class}::SAFE_PUSH_BULK_LIMIT", 2)
end
end
it 'correctly schedules jobs' do
shared_examples_for 'returns job_id of all enqueued jobs' do
let(:job_id_regex) { /[0-9a-f]{12}/ }
it 'returns job_id of all enqueued jobs' do
job_ids = perform_action
expect(job_ids.count).to eq(args.count)
expect(job_ids).to all(match(job_id_regex))
end
end
shared_examples_for 'enqueues the jobs in a batched fashion, with each batch enqueing jobs as per the set safe limit' do
it 'enqueues the jobs in a batched fashion, with each batch enqueing jobs as per the set safe limit' do
expect(Sidekiq::Client).to(
receive(:push_bulk).with(hash_including('args' => [['Foo', [1]], ['Foo', [2]]]))
.ordered
@ -337,28 +327,257 @@ RSpec.describe ApplicationWorker do
.and_call_original)
expect(Sidekiq::Client).to(
receive(:push_bulk).with(hash_including('args' => [['Foo', [5]]]))
.ordered
.and_call_original)
.ordered
.and_call_original)
worker.bulk_perform_in(
1.minute,
[['Foo', [1]], ['Foo', [2]], ['Foo', [3]], ['Foo', [4]], ['Foo', [5]]],
batch_size: 2, batch_delay: batch_delay)
perform_action
expect(worker.jobs.count).to eq 5
expect(worker.jobs[0]['at']).to eq(worker.jobs[1]['at'])
expect(worker.jobs[2]['at']).to eq(worker.jobs[3]['at'])
expect(worker.jobs[2]['at'] - worker.jobs[1]['at']).to eq(batch_delay)
expect(worker.jobs[4]['at'] - worker.jobs[3]['at']).to eq(batch_delay)
expect(worker.jobs.count).to eq args.count
expect(worker.jobs).to all(include('enqueued_at'))
end
end
shared_examples_for 'enqueues jobs in one go' do
it 'enqueues jobs in one go' do
expect(Sidekiq::Client).to(
receive(:push_bulk).with(hash_including('args' => args)).once.and_call_original)
perform_action
expect(worker.jobs.count).to eq args.count
expect(worker.jobs).to all(include('enqueued_at'))
end
end
before do
stub_const(worker.name, worker)
end
let(:args) do
[
['Foo', [1]],
['Foo', [2]],
['Foo', [3]],
['Foo', [4]],
['Foo', [5]]
]
end
describe '.bulk_perform_async' do
shared_examples_for 'does not schedule the jobs for any specific time' do
it 'does not schedule the jobs for any specific time' do
perform_action
expect(worker.jobs).to all(exclude('at'))
end
end
context 'when batch_size is invalid' do
it 'raises an ArgumentError exception' do
expect do
worker.bulk_perform_in(1.minute,
[['Foo']],
batch_size: -1, batch_delay: batch_delay)
end.to raise_error(ArgumentError)
subject(:perform_action) do
worker.bulk_perform_async(args)
end
context 'push_bulk in safe limit batches' do
context 'when the number of jobs to be enqueued does not exceed the safe limit' do
include_context 'set safe limit beyond the number of jobs to be enqueued'
it_behaves_like 'enqueues jobs in one go'
it_behaves_like 'returns job_id of all enqueued jobs'
it_behaves_like 'does not schedule the jobs for any specific time'
end
context 'when the number of jobs to be enqueued exceeds safe limit' do
include_context 'set safe limit below the number of jobs to be enqueued'
it_behaves_like 'enqueues the jobs in a batched fashion, with each batch enqueing jobs as per the set safe limit'
it_behaves_like 'returns job_id of all enqueued jobs'
it_behaves_like 'does not schedule the jobs for any specific time'
end
context 'when the feature flag `sidekiq_push_bulk_in_batches` is disabled' do
include_context 'disable the `sidekiq_push_bulk_in_batches` feature flag'
context 'when the number of jobs to be enqueued does not exceed the safe limit' do
include_context 'set safe limit beyond the number of jobs to be enqueued'
it_behaves_like 'enqueues jobs in one go'
it_behaves_like 'returns job_id of all enqueued jobs'
it_behaves_like 'does not schedule the jobs for any specific time'
end
context 'when the number of jobs to be enqueued exceeds safe limit' do
include_context 'set safe limit below the number of jobs to be enqueued'
it_behaves_like 'enqueues jobs in one go'
it_behaves_like 'returns job_id of all enqueued jobs'
it_behaves_like 'does not schedule the jobs for any specific time'
end
end
end
end
describe '.bulk_perform_in' do
context 'without batches' do
shared_examples_for 'schedules all the jobs at a specific time' do
it 'schedules all the jobs at a specific time' do
perform_action
worker.jobs.each do |job_detail|
expect(job_detail['at']).to be_within(3.seconds).of(expected_scheduled_at_time)
end
end
end
let(:delay) { 3.minutes }
let(:expected_scheduled_at_time) { Time.current.to_i + delay.to_i }
subject(:perform_action) do
worker.bulk_perform_in(delay, args)
end
context 'when the scheduled time falls in the past' do
let(:delay) { -60 }
it 'raises an ArgumentError exception' do
expect { perform_action }
.to raise_error(ArgumentError)
end
end
context 'push_bulk in safe limit batches' do
context 'when the number of jobs to be enqueued does not exceed the safe limit' do
include_context 'set safe limit beyond the number of jobs to be enqueued'
it_behaves_like 'enqueues jobs in one go'
it_behaves_like 'returns job_id of all enqueued jobs'
it_behaves_like 'schedules all the jobs at a specific time'
end
context 'when the number of jobs to be enqueued exceeds safe limit' do
include_context 'set safe limit below the number of jobs to be enqueued'
it_behaves_like 'enqueues the jobs in a batched fashion, with each batch enqueing jobs as per the set safe limit'
it_behaves_like 'returns job_id of all enqueued jobs'
it_behaves_like 'schedules all the jobs at a specific time'
end
context 'when the feature flag `sidekiq_push_bulk_in_batches` is disabled' do
include_context 'disable the `sidekiq_push_bulk_in_batches` feature flag'
context 'when the number of jobs to be enqueued does not exceed the safe limit' do
include_context 'set safe limit beyond the number of jobs to be enqueued'
it_behaves_like 'enqueues jobs in one go'
it_behaves_like 'returns job_id of all enqueued jobs'
it_behaves_like 'schedules all the jobs at a specific time'
end
context 'when the number of jobs to be enqueued exceeds safe limit' do
include_context 'set safe limit below the number of jobs to be enqueued'
it_behaves_like 'enqueues jobs in one go'
it_behaves_like 'returns job_id of all enqueued jobs'
it_behaves_like 'schedules all the jobs at a specific time'
end
end
end
end
context 'with batches' do
shared_examples_for 'schedules all the jobs at a specific time, per batch' do
it 'schedules all the jobs at a specific time, per batch' do
perform_action
expect(worker.jobs[0]['at']).to eq(worker.jobs[1]['at'])
expect(worker.jobs[2]['at']).to eq(worker.jobs[3]['at'])
expect(worker.jobs[2]['at'] - worker.jobs[1]['at']).to eq(batch_delay)
expect(worker.jobs[4]['at'] - worker.jobs[3]['at']).to eq(batch_delay)
end
end
let(:delay) { 1.minute }
let(:batch_size) { 2 }
let(:batch_delay) { 10.minutes }
subject(:perform_action) do
worker.bulk_perform_in(delay, args, batch_size: batch_size, batch_delay: batch_delay)
end
context 'when the `batch_size` is invalid' do
context 'when `batch_size` is 0' do
let(:batch_size) { 0 }
it 'raises an ArgumentError exception' do
expect { perform_action }
.to raise_error(ArgumentError)
end
end
context 'when `batch_size` is negative' do
let(:batch_size) { -3 }
it 'raises an ArgumentError exception' do
expect { perform_action }
.to raise_error(ArgumentError)
end
end
end
context 'when the `batch_delay` is invalid' do
context 'when `batch_delay` is 0' do
let(:batch_delay) { 0.minutes }
it 'raises an ArgumentError exception' do
expect { perform_action }
.to raise_error(ArgumentError)
end
end
context 'when `batch_delay` is negative' do
let(:batch_delay) { -3.minutes }
it 'raises an ArgumentError exception' do
expect { perform_action }
.to raise_error(ArgumentError)
end
end
end
context 'push_bulk in safe limit batches' do
context 'when the number of jobs to be enqueued does not exceed the safe limit' do
include_context 'set safe limit beyond the number of jobs to be enqueued'
it_behaves_like 'enqueues jobs in one go'
it_behaves_like 'returns job_id of all enqueued jobs'
it_behaves_like 'schedules all the jobs at a specific time, per batch'
end
context 'when the number of jobs to be enqueued exceeds safe limit' do
include_context 'set safe limit below the number of jobs to be enqueued'
it_behaves_like 'enqueues the jobs in a batched fashion, with each batch enqueing jobs as per the set safe limit'
it_behaves_like 'returns job_id of all enqueued jobs'
it_behaves_like 'schedules all the jobs at a specific time, per batch'
end
context 'when the feature flag `sidekiq_push_bulk_in_batches` is disabled' do
include_context 'disable the `sidekiq_push_bulk_in_batches` feature flag'
context 'when the number of jobs to be enqueued does not exceed the safe limit' do
include_context 'set safe limit beyond the number of jobs to be enqueued'
it_behaves_like 'enqueues jobs in one go'
it_behaves_like 'returns job_id of all enqueued jobs'
it_behaves_like 'schedules all the jobs at a specific time, per batch'
end
context 'when the number of jobs to be enqueued exceeds safe limit' do
include_context 'set safe limit below the number of jobs to be enqueued'
it_behaves_like 'enqueues jobs in one go'
it_behaves_like 'returns job_id of all enqueued jobs'
it_behaves_like 'schedules all the jobs at a specific time, per batch'
end
end
end
end
end

View File

@ -316,6 +316,8 @@ RSpec.describe 'Every Sidekiq worker' do
'IssuableExportCsvWorker' => 3,
'IssuePlacementWorker' => 3,
'IssueRebalancingWorker' => 3,
'Issues::PlacementWorker' => 3,
'Issues::RebalancingWorker' => 3,
'IterationsUpdateStatusWorker' => 3,
'JiraConnect::SyncBranchWorker' => 3,
'JiraConnect::SyncBuildsWorker' => 3,

View File

@ -0,0 +1,151 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Issues::PlacementWorker do
describe '#perform' do
let_it_be(:time) { Time.now.utc }
let_it_be(:group) { create(:group) }
let_it_be(:project) { create(:project, group: group) }
let_it_be(:author) { create(:user) }
let_it_be(:common_attrs) { { author: author, project: project } }
let_it_be(:unplaced) { common_attrs.merge(relative_position: nil) }
let_it_be_with_reload(:issue) { create(:issue, **unplaced, created_at: time) }
let_it_be_with_reload(:issue_a) { create(:issue, **unplaced, created_at: time - 1.minute) }
let_it_be_with_reload(:issue_b) { create(:issue, **unplaced, created_at: time - 2.minutes) }
let_it_be_with_reload(:issue_c) { create(:issue, **unplaced, created_at: time + 1.minute) }
let_it_be_with_reload(:issue_d) { create(:issue, **unplaced, created_at: time + 2.minutes) }
let_it_be_with_reload(:issue_e) { create(:issue, **common_attrs, relative_position: 10, created_at: time + 1.minute) }
let_it_be_with_reload(:issue_f) { create(:issue, **unplaced, created_at: time + 1.minute) }
let_it_be(:irrelevant) { create(:issue, relative_position: nil, created_at: time) }
shared_examples 'running the issue placement worker' do
let(:issue_id) { issue.id }
let(:project_id) { project.id }
it 'places all issues created at most 5 minutes before this one at the end, most recent last' do
expect { run_worker }.not_to change { irrelevant.reset.relative_position }
expect(project.issues.order_by_relative_position)
.to eq([issue_e, issue_b, issue_a, issue, issue_c, issue_f, issue_d])
expect(project.issues.where(relative_position: nil)).not_to exist
end
it 'schedules rebalancing if needed' do
issue_a.update!(relative_position: RelativePositioning::MAX_POSITION)
expect(IssueRebalancingWorker).to receive(:perform_async).with(nil, nil, project.group.id)
run_worker
end
context 'there are more than QUERY_LIMIT unplaced issues' do
before_all do
# Ensure there are more than N issues in this set
n = described_class::QUERY_LIMIT
create_list(:issue, n - 5, **unplaced)
end
it 'limits the sweep to QUERY_LIMIT records, and reschedules placement' do
expect(Issue).to receive(:move_nulls_to_end)
.with(have_attributes(count: described_class::QUERY_LIMIT))
.and_call_original
expect(described_class).to receive(:perform_async).with(nil, project.id)
run_worker
expect(project.issues.where(relative_position: nil)).to exist
end
it 'is eventually correct' do
prefix = project.issues.where.not(relative_position: nil).order(:relative_position).to_a
moved = project.issues.where.not(id: prefix.map(&:id))
run_worker
expect(project.issues.where(relative_position: nil)).to exist
run_worker
expect(project.issues.where(relative_position: nil)).not_to exist
expect(project.issues.order(:relative_position)).to eq(prefix + moved.order(:created_at, :id))
end
end
context 'we are passed bad IDs' do
let(:issue_id) { non_existing_record_id }
let(:project_id) { non_existing_record_id }
def max_positions_by_project
Issue
.group(:project_id)
.pluck(:project_id, Issue.arel_table[:relative_position].maximum.as('max_relative_position'))
.to_h
end
it 'does move any issues to the end' do
expect { run_worker }.not_to change { max_positions_by_project }
end
context 'the project_id refers to an empty project' do
let!(:project_id) { create(:project).id }
it 'does move any issues to the end' do
expect { run_worker }.not_to change { max_positions_by_project }
end
end
end
it 'anticipates the failure to place the issues, and schedules rebalancing' do
allow(Issue).to receive(:move_nulls_to_end) { raise RelativePositioning::NoSpaceLeft }
expect(Issues::RebalancingWorker).to receive(:perform_async).with(nil, nil, project.group.id)
expect(Gitlab::ErrorTracking)
.to receive(:log_exception)
.with(RelativePositioning::NoSpaceLeft, worker_arguments)
run_worker
end
end
context 'passing an issue ID' do
def run_worker
described_class.new.perform(issue_id)
end
let(:worker_arguments) { { issue_id: issue_id, project_id: nil } }
it_behaves_like 'running the issue placement worker'
context 'when block_issue_repositioning is enabled' do
let(:issue_id) { issue.id }
let(:project_id) { project.id }
before do
stub_feature_flags(block_issue_repositioning: group)
end
it 'does not run repositioning tasks' do
expect { run_worker }.not_to change { issue.reset.relative_position }
end
end
end
context 'passing a project ID' do
def run_worker
described_class.new.perform(nil, project_id)
end
let(:worker_arguments) { { issue_id: nil, project_id: project_id } }
it_behaves_like 'running the issue placement worker'
end
end
it 'has the `until_executed` deduplicate strategy' do
expect(described_class.get_deduplicate_strategy).to eq(:until_executed)
expect(described_class.get_deduplication_options).to include({ including_scheduled: true })
end
end

View File

@ -0,0 +1,90 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Issues::RebalancingWorker do
describe '#perform' do
let_it_be(:group) { create(:group) }
let_it_be(:project) { create(:project, group: group) }
let_it_be(:issue) { create(:issue, project: project) }
shared_examples 'running the worker' do
it 'runs an instance of Issues::RelativePositionRebalancingService' do
service = double(execute: nil)
service_param = arguments.second.present? ? kind_of(Project.id_in([project]).class) : kind_of(group&.all_projects.class)
expect(Issues::RelativePositionRebalancingService).to receive(:new).with(service_param).and_return(service)
described_class.new.perform(*arguments)
end
it 'anticipates there being too many concurent rebalances' do
service = double
service_param = arguments.second.present? ? kind_of(Project.id_in([project]).class) : kind_of(group&.all_projects.class)
allow(service).to receive(:execute).and_raise(Issues::RelativePositionRebalancingService::TooManyConcurrentRebalances)
expect(Issues::RelativePositionRebalancingService).to receive(:new).with(service_param).and_return(service)
expect(Gitlab::ErrorTracking).to receive(:log_exception).with(Issues::RelativePositionRebalancingService::TooManyConcurrentRebalances, include(project_id: arguments.second, root_namespace_id: arguments.third))
described_class.new.perform(*arguments)
end
it 'takes no action if the value is nil' do
expect(Issues::RelativePositionRebalancingService).not_to receive(:new)
expect(Gitlab::ErrorTracking).not_to receive(:log_exception)
described_class.new.perform # all arguments are nil
end
end
shared_examples 'safely handles non-existent ids' do
it 'anticipates the inability to find the issue' do
expect(Gitlab::ErrorTracking).to receive(:log_exception).with(ArgumentError, include(project_id: arguments.second, root_namespace_id: arguments.third))
expect(Issues::RelativePositionRebalancingService).not_to receive(:new)
described_class.new.perform(*arguments)
end
end
context 'without root_namespace param' do
it_behaves_like 'running the worker' do
let(:arguments) { [-1, project.id] }
end
it_behaves_like 'safely handles non-existent ids' do
let(:arguments) { [nil, -1] }
end
include_examples 'an idempotent worker' do
let(:job_args) { [-1, project.id] }
end
include_examples 'an idempotent worker' do
let(:job_args) { [nil, -1] }
end
end
context 'with root_namespace param' do
it_behaves_like 'running the worker' do
let(:arguments) { [nil, nil, group.id] }
end
it_behaves_like 'safely handles non-existent ids' do
let(:arguments) { [nil, nil, -1] }
end
include_examples 'an idempotent worker' do
let(:job_args) { [nil, nil, group.id] }
end
include_examples 'an idempotent worker' do
let(:job_args) { [nil, nil, -1] }
end
end
end
it 'has the `until_executed` deduplicate strategy' do
expect(described_class.get_deduplicate_strategy).to eq(:until_executed)
expect(described_class.get_deduplication_options).to include({ including_scheduled: true })
end
end