From 82c63f420c7fc2229a44aa336d656a9712884f83 Mon Sep 17 00:00:00 2001 From: GitLab Bot Date: Mon, 25 Oct 2021 15:12:11 +0000 Subject: [PATCH] Add latest changes from gitlab-org/gitlab@master --- .gitignore | 1 + .gitlab/ci/rails.gitlab-ci.yml | 19 + .gitlab/ci/rules.gitlab-ci.yml | 12 + .gitlab/ci/setup.gitlab-ci.yml | 17 + .rubocop.yml | 2 + Gemfile | 2 +- Gemfile.lock | 4 +- app/workers/all_queues.yml | 18 + app/workers/concerns/application_worker.rb | 69 +++- app/workers/issue_placement_worker.rb | 3 + app/workers/issue_rebalancing_worker.rb | 3 + app/workers/issues/placement_worker.rb | 67 ++++ app/workers/issues/rebalancing_worker.rb | 51 +++ .../sidekiq_push_bulk_in_batches.yml | 8 + config/sidekiq_queues.yml | 4 + doc/user/markdown.md | 4 - doc/user/project/file_lock.md | 21 +- doc/user/project/img/file_lock.png | Bin 20461 -> 0 bytes lib/tasks/gitlab/gitaly.rake | 3 +- locale/gitlab.pot | 3 - scripts/api/default_options.rb | 7 + scripts/failed_tests.rb | 122 +++++++ scripts/pipeline_test_report_builder.rb | 153 +++++++++ scripts/rspec_helpers.sh | 35 ++ spec/fixtures/scripts/test_report.json | 36 ++ spec/scripts/failed_tests_spec.rb | 127 +++++++ .../pipeline_test_report_builder_spec.rb | 137 ++++++++ spec/tasks/gitlab/gitaly_rake_spec.rb | 33 +- .../concerns/application_worker_spec.rb | 325 +++++++++++++++--- spec/workers/every_sidekiq_worker_spec.rb | 2 + spec/workers/issues/placement_worker_spec.rb | 151 ++++++++ .../workers/issues/rebalancing_worker_spec.rb | 90 +++++ 32 files changed, 1442 insertions(+), 87 deletions(-) create mode 100644 app/workers/issues/placement_worker.rb create mode 100644 app/workers/issues/rebalancing_worker.rb create mode 100644 config/feature_flags/development/sidekiq_push_bulk_in_batches.yml delete mode 100644 doc/user/project/img/file_lock.png create mode 100755 scripts/failed_tests.rb create mode 100755 scripts/pipeline_test_report_builder.rb create mode 100644 spec/fixtures/scripts/test_report.json create mode 100644 spec/scripts/failed_tests_spec.rb create mode 100644 spec/scripts/pipeline_test_report_builder_spec.rb create mode 100644 spec/workers/issues/placement_worker_spec.rb create mode 100644 spec/workers/issues/rebalancing_worker_spec.rb diff --git a/.gitignore b/.gitignore index 5152ef20575..bff82967fc6 100644 --- a/.gitignore +++ b/.gitignore @@ -74,6 +74,7 @@ eslint-report.html /.gitlab_kas_secret /webpack-report/ /crystalball/ +/test_results/ /deprecations/ /knapsack/ /rspec_flaky/ diff --git a/.gitlab/ci/rails.gitlab-ci.yml b/.gitlab/ci/rails.gitlab-ci.yml index e82068092e0..b85a0f6c1e8 100644 --- a/.gitlab/ci/rails.gitlab-ci.yml +++ b/.gitlab/ci/rails.gitlab-ci.yml @@ -885,5 +885,24 @@ fail-pipeline-early: - install_gitlab_gem script: - fail_pipeline_early + +rspec rspec-pg12-rerun-previous-failed-tests: + extends: + - .rspec-base-pg12 + - .rails:rules:rerun-previous-failed-tests + stage: test + needs: ["setup-test-env", "compile-test-assets", "detect-previous-failed-tests"] + script: + - !reference [.base-script, script] + - rspec_rerun_previous_failed_tests tmp/previous_failed_tests/rspec_failed_files.txt + +rspec rspec-ee-pg12-rerun-previous-failed-tests: + extends: + - "rspec rspec-pg12-rerun-previous-failed-tests" + - .rspec-ee-base-pg12 + script: + - !reference [.base-script, script] + - rspec_rerun_previous_failed_tests tmp/previous_failed_tests/rspec_ee_failed_files.txt + # EE: Canonical MR pipelines ################################################## diff --git a/.gitlab/ci/rules.gitlab-ci.yml b/.gitlab/ci/rules.gitlab-ci.yml index 6dc2f23f737..283fd0ddb76 100644 --- a/.gitlab/ci/rules.gitlab-ci.yml +++ b/.gitlab/ci/rules.gitlab-ci.yml @@ -1198,6 +1198,18 @@ - changes: *code-backstage-patterns - <<: *if-merge-request-labels-run-all-rspec +.rails:rules:detect-previous-failed-tests: + rules: + - <<: *if-merge-request-labels-run-all-rspec + - <<: *if-merge-request + changes: *code-backstage-patterns + +.rails:rules:rerun-previous-failed-tests: + rules: + - <<: *if-merge-request-labels-run-all-rspec + - <<: *if-merge-request + changes: *code-backstage-patterns + .rails:rules:rspec-foss-impact: rules: - <<: *if-not-ee diff --git a/.gitlab/ci/setup.gitlab-ci.yml b/.gitlab/ci/setup.gitlab-ci.yml index eb7a5afad3d..d2ad9d99d65 100644 --- a/.gitlab/ci/setup.gitlab-ci.yml +++ b/.gitlab/ci/setup.gitlab-ci.yml @@ -102,6 +102,23 @@ detect-tests as-if-foss: before_script: - '[ "$FOSS_ONLY" = "1" ] && rm -rf ee/ qa/spec/ee/ qa/qa/specs/features/ee/ qa/qa/ee/ qa/qa/ee.rb' +detect-previous-failed-tests: + extends: + - .detect-test-base + - .rails:rules:detect-previous-failed-tests + variables: + PREVIOUS_FAILED_TESTS_DIR: tmp/previous_failed_tests/ + RSPEC_PG_REGEX: /rspec .+ pg12( .+)?/ + RSPEC_EE_PG_REGEX: /rspec-ee .+ pg12( .+)?/ + script: + - source ./scripts/utils.sh + - source ./scripts/rspec_helpers.sh + - retrieve_previous_failed_tests ${PREVIOUS_FAILED_TESTS_DIR} "${RSPEC_PG_REGEX}" "${RSPEC_EE_PG_REGEX}" + artifacts: + expire_in: 7d + paths: + - ${PREVIOUS_FAILED_TESTS_DIR} + add-jh-folder: extends: .setup:rules:add-jh-folder image: ${GITLAB_DEPENDENCY_PROXY}alpine:edge diff --git a/.rubocop.yml b/.rubocop.yml index c4f99914174..59ac81bb4de 100644 --- a/.rubocop.yml +++ b/.rubocop.yml @@ -113,6 +113,7 @@ Naming/FileName: - 'config.ru' - 'config/**/*' - 'ee/config/**/*' + - 'jh/config/**/*' - 'db/**/*' - 'ee/db/**/*' - 'ee/elastic/migrate/*' @@ -124,6 +125,7 @@ Naming/FileName: - 'spec/**/*' - 'tooling/bin/*' - 'ee/spec/**/*' + - 'jh/spec/**/*' - 'qa/bin/*' - 'qa/spec/**/*' - 'qa/qa/specs/**/*' diff --git a/Gemfile b/Gemfile index a2ed0fa9f26..11bef61d487 100644 --- a/Gemfile +++ b/Gemfile @@ -398,7 +398,7 @@ group :development, :test do end group :development, :test, :danger do - gem 'gitlab-dangerfiles', '~> 2.3.0', require: false + gem 'gitlab-dangerfiles', '~> 2.3.1', require: false end group :development, :test, :coverage do diff --git a/Gemfile.lock b/Gemfile.lock index be5272e3e5d..40b75863c65 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -458,7 +458,7 @@ GEM terminal-table (~> 1.5, >= 1.5.1) gitlab-chronic (0.10.5) numerizer (~> 0.2) - gitlab-dangerfiles (2.3.0) + gitlab-dangerfiles (2.3.1) danger (>= 8.3.1) danger-gitlab (>= 8.0.0) gitlab-experiment (0.6.4) @@ -1460,7 +1460,7 @@ DEPENDENCIES gitaly (~> 14.3.0.pre.rc2) github-markup (~> 1.7.0) gitlab-chronic (~> 0.10.5) - gitlab-dangerfiles (~> 2.3.0) + gitlab-dangerfiles (~> 2.3.1) gitlab-experiment (~> 0.6.4) gitlab-fog-azure-rm (~> 1.2.0) gitlab-labkit (~> 0.21.1) diff --git a/app/workers/all_queues.yml b/app/workers/all_queues.yml index 41db45ec708..a1ecd8ccf25 100644 --- a/app/workers/all_queues.yml +++ b/app/workers/all_queues.yml @@ -2285,6 +2285,24 @@ :weight: 1 :idempotent: true :tags: [] +- :name: issues_placement + :worker_name: Issues::PlacementWorker + :feature_category: :issue_tracking + :has_external_dependencies: + :urgency: :high + :resource_boundary: :cpu + :weight: 2 + :idempotent: true + :tags: [] +- :name: issues_rebalancing + :worker_name: Issues::RebalancingWorker + :feature_category: :issue_tracking + :has_external_dependencies: + :urgency: :low + :resource_boundary: :unknown + :weight: 1 + :idempotent: true + :tags: [] - :name: mailers :worker_name: ActionMailer::MailDeliveryJob :feature_category: :not_owned diff --git a/app/workers/concerns/application_worker.rb b/app/workers/concerns/application_worker.rb index 3399a4f9b57..caf3fcf01fc 100644 --- a/app/workers/concerns/application_worker.rb +++ b/app/workers/concerns/application_worker.rb @@ -14,6 +14,7 @@ module ApplicationWorker LOGGING_EXTRA_KEY = 'extra' DEFAULT_DELAY_INTERVAL = 1 + SAFE_PUSH_BULK_LIMIT = 1000 included do set_queue @@ -135,24 +136,47 @@ module ApplicationWorker end def bulk_perform_async(args_list) - Sidekiq::Client.push_bulk('class' => self, 'args' => args_list) + if Feature.enabled?(:sidekiq_push_bulk_in_batches) + in_safe_limit_batches(args_list) do |args_batch, _| + Sidekiq::Client.push_bulk('class' => self, 'args' => args_batch) + end + else + Sidekiq::Client.push_bulk('class' => self, 'args' => args_list) + end end def bulk_perform_in(delay, args_list, batch_size: nil, batch_delay: nil) now = Time.now.to_i - schedule = now + delay.to_i + base_schedule_at = now + delay.to_i - if schedule <= now - raise ArgumentError, _('The schedule time must be in the future!') + if base_schedule_at <= now + raise ArgumentError, 'The schedule time must be in the future!' end + schedule_at = base_schedule_at + if batch_size && batch_delay - args_list.each_slice(batch_size.to_i).with_index do |args_batch, idx| - batch_schedule = schedule + idx * batch_delay.to_i - Sidekiq::Client.push_bulk('class' => self, 'args' => args_batch, 'at' => batch_schedule) + batch_size = batch_size.to_i + batch_delay = batch_delay.to_i + + raise ArgumentError, 'batch_size should be greater than 0' unless batch_size > 0 + raise ArgumentError, 'batch_delay should be greater than 0' unless batch_delay > 0 + + # build an array of schedules corresponding to each item in `args_list` + bulk_schedule_at = Array.new(args_list.size) do |index| + batch_number = index / batch_size + base_schedule_at + (batch_number * batch_delay) + end + + schedule_at = bulk_schedule_at + end + + if Feature.enabled?(:sidekiq_push_bulk_in_batches) + in_safe_limit_batches(args_list, schedule_at) do |args_batch, schedule_at_for_batch| + Sidekiq::Client.push_bulk('class' => self, 'args' => args_batch, 'at' => schedule_at_for_batch) end else - Sidekiq::Client.push_bulk('class' => self, 'args' => args_list, 'at' => schedule) + Sidekiq::Client.push_bulk('class' => self, 'args' => args_list, 'at' => schedule_at) end end @@ -161,5 +185,34 @@ module ApplicationWorker def delay_interval DEFAULT_DELAY_INTERVAL.seconds end + + private + + def in_safe_limit_batches(args_list, schedule_at = nil, safe_limit = SAFE_PUSH_BULK_LIMIT) + # `schedule_at` could be one of + # - nil. + # - a single Numeric that represents time, like `30.minutes.from_now.to_i`. + # - an array, where each element is a Numeric that reprsents time. + # - Each element in this array would correspond to the time at which + # - the job in `args_list` at the corresponding index needs to be scheduled. + + # In the case where `schedule_at` is an array of Numeric, it needs to be sliced + # in the same manner as the `args_list`, with each slice containing `safe_limit` + # number of elements. + schedule_at = schedule_at.each_slice(safe_limit).to_a if schedule_at.is_a?(Array) + + args_list.each_slice(safe_limit).with_index.flat_map do |args_batch, index| + schedule_at_for_batch = process_schedule_at_for_batch(schedule_at, index) + + yield(args_batch, schedule_at_for_batch) + end + end + + def process_schedule_at_for_batch(schedule_at, index) + return unless schedule_at + return schedule_at[index] if schedule_at.is_a?(Array) && schedule_at.all?(Array) + + schedule_at + end end end diff --git a/app/workers/issue_placement_worker.rb b/app/workers/issue_placement_worker.rb index 22e2a8e95f4..5a66c8d79ea 100644 --- a/app/workers/issue_placement_worker.rb +++ b/app/workers/issue_placement_worker.rb @@ -1,5 +1,8 @@ # frozen_string_literal: true +# todo: remove this worker and it's queue definition from all_queues after Issues::PlacementWorker is deployed +# We want to keep it for one release in case some jobs are already scheduled in the old queue so we need the worker +# to be available to finish those. All new jobs will be queued into the new queue. class IssuePlacementWorker include ApplicationWorker diff --git a/app/workers/issue_rebalancing_worker.rb b/app/workers/issue_rebalancing_worker.rb index 01984197aae..9c2a6355d2b 100644 --- a/app/workers/issue_rebalancing_worker.rb +++ b/app/workers/issue_rebalancing_worker.rb @@ -1,5 +1,8 @@ # frozen_string_literal: true +# todo: remove this worker and it's queue definition from all_queues after Issue::RebalancingWorker is released. +# We want to keep it for one release in case some jobs are already scheduled in the old queue so we need the worker +# to be available to finish those. All new jobs will be queued into the new queue. class IssueRebalancingWorker include ApplicationWorker diff --git a/app/workers/issues/placement_worker.rb b/app/workers/issues/placement_worker.rb new file mode 100644 index 00000000000..0aa6b21622d --- /dev/null +++ b/app/workers/issues/placement_worker.rb @@ -0,0 +1,67 @@ +# frozen_string_literal: true + +module Issues + class PlacementWorker + include ApplicationWorker + + data_consistency :always + + sidekiq_options retry: 3 + + idempotent! + deduplicate :until_executed, including_scheduled: true + feature_category :issue_tracking + urgency :high + worker_resource_boundary :cpu + weight 2 + + # Move at most the most recent 100 issues + QUERY_LIMIT = 100 + + # rubocop: disable CodeReuse/ActiveRecord + def perform(issue_id, project_id = nil) + issue = find_issue(issue_id, project_id) + return unless issue + + # Temporary disable moving null elements because of performance problems + # For more information check https://gitlab.com/gitlab-com/gl-infra/production/-/issues/4321 + return if issue.blocked_for_repositioning? + + # Move the oldest 100 unpositioned items to the end. + # This is to deal with out-of-order execution of the worker, + # while preserving creation order. + to_place = Issue + .relative_positioning_query_base(issue) + .with_null_relative_position + .order({ created_at: :asc }, { id: :asc }) + .limit(QUERY_LIMIT + 1) + .to_a + + leftover = to_place.pop if to_place.count > QUERY_LIMIT + + Issue.move_nulls_to_end(to_place) + Issues::BaseService.new(project: nil).rebalance_if_needed(to_place.max_by(&:relative_position)) + Issues::PlacementWorker.perform_async(nil, leftover.project_id) if leftover.present? + rescue RelativePositioning::NoSpaceLeft => e + Gitlab::ErrorTracking.log_exception(e, issue_id: issue_id, project_id: project_id) + Issues::RebalancingWorker.perform_async(nil, *root_namespace_id_to_rebalance(issue, project_id)) + end + + def find_issue(issue_id, project_id) + return Issue.id_in(issue_id).take if issue_id + + project = Project.id_in(project_id).take + return unless project + + project.issues.take + end + # rubocop: enable CodeReuse/ActiveRecord + + private + + def root_namespace_id_to_rebalance(issue, project_id) + project_id = project_id.presence || issue.project_id + Project.find(project_id)&.self_or_root_group_ids + end + end +end diff --git a/app/workers/issues/rebalancing_worker.rb b/app/workers/issues/rebalancing_worker.rb new file mode 100644 index 00000000000..05455800860 --- /dev/null +++ b/app/workers/issues/rebalancing_worker.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +module Issues + class RebalancingWorker + include ApplicationWorker + + data_consistency :always + + sidekiq_options retry: 3 + + idempotent! + urgency :low + feature_category :issue_tracking + deduplicate :until_executed, including_scheduled: true + + def perform(ignore = nil, project_id = nil, root_namespace_id = nil) + # we need to have exactly one of the project_id and root_namespace_id params be non-nil + raise ArgumentError, "Expected only one of the params project_id: #{project_id} and root_namespace_id: #{root_namespace_id}" if project_id && root_namespace_id + return if project_id.nil? && root_namespace_id.nil? + + # pull the projects collection to be rebalanced either the project if namespace is not a group(i.e. user namesapce) + # or the root namespace, this also makes the worker backward compatible with previous version where a project_id was + # passed as the param + projects_to_rebalance = projects_collection(project_id, root_namespace_id) + + # something might have happened with the namespace between scheduling the worker and actually running it, + # maybe it was removed. + if projects_to_rebalance.blank? + Gitlab::ErrorTracking.log_exception( + ArgumentError.new("Projects to be rebalanced not found for arguments: project_id #{project_id}, root_namespace_id: #{root_namespace_id}"), + { project_id: project_id, root_namespace_id: root_namespace_id }) + + return + end + + Issues::RelativePositionRebalancingService.new(projects_to_rebalance).execute + rescue Issues::RelativePositionRebalancingService::TooManyConcurrentRebalances => e + Gitlab::ErrorTracking.log_exception(e, root_namespace_id: root_namespace_id, project_id: project_id) + end + + private + + def projects_collection(project_id, root_namespace_id) + # we can have either project_id(older version) or project_id if project is part of a user namespace and not a group + # or root_namespace_id(newer version) never both. + return Project.id_in([project_id]) if project_id + + Namespace.find_by_id(root_namespace_id)&.all_projects + end + end +end diff --git a/config/feature_flags/development/sidekiq_push_bulk_in_batches.yml b/config/feature_flags/development/sidekiq_push_bulk_in_batches.yml new file mode 100644 index 00000000000..ea4c5253856 --- /dev/null +++ b/config/feature_flags/development/sidekiq_push_bulk_in_batches.yml @@ -0,0 +1,8 @@ +--- +name: sidekiq_push_bulk_in_batches +introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/72263 +rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/343740 +milestone: '14.5' +type: development +group: group::access +default_enabled: false diff --git a/config/sidekiq_queues.yml b/config/sidekiq_queues.yml index c69e0bf170d..1e43dc9d3c6 100644 --- a/config/sidekiq_queues.yml +++ b/config/sidekiq_queues.yml @@ -213,6 +213,10 @@ - 2 - - issue_rebalancing - 1 +- - issues_placement + - 2 +- - issues_rebalancing + - 1 - - iterations - 1 - - jira_connect diff --git a/doc/user/markdown.md b/doc/user/markdown.md index e4141799ff7..60ad0b9fcd2 100644 --- a/doc/user/markdown.md +++ b/doc/user/markdown.md @@ -811,10 +811,6 @@ the note content. Regardless of the tag names, the relative order of the reference tags determines the rendered numbering. -Reference tags can use letters and other characters. Avoid using lowercase `w` or an underscore -(`_`) in footnote tag names until [this bug](https://gitlab.com/gitlab-org/gitlab/-/issues/24423) is -resolved. - diff --git a/doc/user/project/file_lock.md b/doc/user/project/file_lock.md index db8c6f24063..10dcbddac17 100644 --- a/doc/user/project/file_lock.md +++ b/doc/user/project/file_lock.md @@ -212,20 +212,21 @@ requests that modify locked files. Unlock the file to allow changes. To lock a file: 1. Open the file or directory in GitLab. -1. Click the **Lock** button, located near the Web IDE button. +1. On the top right, above the file, select **Lock**. +1. On the confirmation dialog box, select **OK**. - ![Locking file](img/file_lock.png) +If you do not have permission to lock the file, the button is not enabled. -An **Unlock** button is displayed if the file is already locked, and -is disabled if you do not have permission to unlock the file. - -If you did not lock the file, hovering your cursor over the button shows -who locked the file. +To view the user who locked the file (if it was not you), hover over the button. ### View and remove existing locks -The **Locked Files**, accessed from **Project > Repository** left menu, lists -all file and directory locks. Locks can be removed by their author, or any user -with the [Maintainer role](../permissions.md) and above. +To view and remove file locks: + +1. On the top bar, select **Menu > Projects** and find your project. +1. On the left sidebar, select **Repository > Locked Files**. This list shows all the files locked either through LFS or GitLab UI. + +Locks can be removed by their author, or any user +with at least the [Maintainer role](../permissions.md). diff --git a/doc/user/project/img/file_lock.png b/doc/user/project/img/file_lock.png deleted file mode 100644 index e881442630bb8f0133b7dfbd3e1d08d44d7062c8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20461 zcmb5VbzEG*6DK%?5F`OYAP|B(!3GNi$N<58a0xI34C(0Kk`*lTrr&9-{yNjBOlD zH04V1!)pKl6QHW3DSdr?eSCZjh@-g%cgqc?*7}iZ(KkzJua@(>)VHinfltXU!1`xq=3!;TcF^R>O z7E0mc;R;+^MQ7W@A3jU)T-d8?i*d^YjPzyNjvd|HEX;xAK2oyq=YQmC8bkID4BsBl z-B!ro?N+z^g+C*vzMBj;gc#g)$Cv}TZ0at83`E^M++e@*FJ=ybK!XaOkN35H1NGG_ zP7`}`z31)yX=+?1!S%=6>l)6vtGzY6vbIM&l3t~gH1|Hrd>Yif3>gt5t-STQNRA(_;FI_vQ@5|OYqmVyAubNU9Z1`Am-7%vMv>= zUsPVu}s>|UColEw^acXor&<2N>p;$Tkcah0pjt$q^5j2M(^ICQm9 z+&&gRvE^`AGh1*0#1XjihjfPC2>gL{41_fQEp5vP6r`RN-xE)1Nqk*i)XPv+m< zuGT<@OZ9DOH>1RGe&A1NmU3Uyw8w7qiAj#BY1`pj0e6fp8C&!hon2qIlcTYUFst6g z*8yKZa){dx&nmq$zFw*kIW)GG;o+cyEu5qZ@y#cm6O?5Vp%~k-(Y)!KT6702?OHwt z1!Ml>Y6NrpA74MI#R<`zVW0p|0O%i+Xu7vJ0Q8TJmgygzHfm}>rDvAlzx<*97+OO^ zivOg&FI3a0#4$1MP_yj)GKA9e%+qSJmm8d4Qskcjol>33a}L{WYLcH;FTgG1dV~_s z8aA=$ZsGlQ?IN+rsxKLz+Q7giJ=*doCu7$w#*nuO-nreB`uHcc07UJ=Nw`yI>J$Ai z7dzd~2xLGM9?D1kfswI3dX=_^vQM%{GbC)3UgcF|Ea# z6i7zqpU)Q5QGFvht~V(vU-Zqf3fbpC0vc^4zLxl#4X1Ci#ScF&(#^ijuB3`C%};ce z4NC15aEQ#AE|Zss$(oq0vdq3UBrB1}d{=D1g6xl2bqg4RM0@9J{3r;$6UWi&8Ymc@3W1KuLUC_cJn9UhyU!52+F^1suvqd)(TGC;{d-qxZOVL!g1S>gx{CuViS=Kg2akjZH#%hG)F5ecJ)@?JSn2bK zhG6L{v=aWXVQ7H6-rH?&q}Fo;$~MMIyi>@?AHl!<&Oqo!$AtGLut`hQmZJhHckr^k zlI(=R)KEa;HJ?Q9WA!mirw%8z?c|~C22%@7Gi5%+6W11~Fh7H)_lQjqpqj^TONo!| zhQ_wn#9BM*VH^`}WHgUWU>E5X2vcyna5aCQWaOU7$teMhJd$-7<+2SUKXh_-b`~yu zj16y^$wr~&o;ecqm%o)_ANFdQ4!F}(6 z+sSyB9I`E}!{^#BwM&4wANl9VM`QBCGQB7kbG zo(t^X%?S=@IeZed85s+(PWBib9ZgJnlWRdhw(T1DUez9~Hx1QbFYM&huhNil5HSY} zTVtS2>^l&T`*I)goL?Cq2swLDNkfv&QiL{7B$O%FS+%tXNns(r4x9^Li$to&OMgN* ziT~!Ilp=M7-j7B!x=H8$8#%TG0h-D`EjuLW-uxwkYy!&dof{h6pui!r)?Zlk>HSj zUPJ}-q@BOs(bCO?yN57ND?8Dm_(TnPHgj`Vr-rSaQl8|X%+{lv?FI&JU%mbPZY(z* z5d?4d4e_*{(R0;KLRAljt32ehqOM`GsDI<6mF0}8Yc4nA93c)X3h(Gv_N``@**z3l zgS&kID{0(nrMRQEM=|M`Lf*(W712c*94zh%$cS=nj%Qh>W}z;`?VcS%C=9r!C`<1z zHJ%b#UzVt7Za&`j9=E8mMSRlL<1(Uc>BD&hb*atK5*zE6ZwY6qI@ndsN;EE|H*`}W zBNl{=JpH7OmZ8Z@22_HtVuL860n>_N{U*_1k$nEp8aJa;)lGTYkmES=BJ16EH5V-$eW z67m*vP9-?i{0v`O-_nW`<_TBY#6I}|#*~Px@5*>U9twmz!(_#TZl4!qOhsrPO&O2|oIb2clEg z+UC(Q-$G?efPaviF+|If7;k^K{NW$M2a->r4|MjwmE4g~AkiS8e&xB_SnCzSn7D%& zG)GcQT%^~Lv021?l-}B!s?oWSdM)9-c?>MSfbCkYZ_VHGWUZ?NZu)rwK3cnVaktWJ0AE$ zONc66Y1!#55*67IHkXNzTxTsSqAJh8{HV`l5;>!m8@sqRq48>y;Z{(baQ%x#qb{x#*FCl4XVJZ(uV5YE8V9-o|<{a;P zFCFk{{!oq{50tX;2*3P+>h$@ zE!A7&P(FNWxwrVV<6sCfLpN$ILTxP5VxZC2Xb5-g=qQLvmJd8<&zzJoR`(h`nICaA zuC|HLb;9SeWRAdh`0 zX%^Wv7V|^C0zzC*eqEw2;o|$@&pJ1J!-GpcOxEp8HQyQm)a2A4i$CNnmuwQp$Zrq> z-ge$#dhRyNK<0fQwmwSYq|yi_c2i-HeZyftLK?WpQiqdj=9cPev7MS8{fusN&j0QL zT-^FoD-*HoaajE6|=~P|1o4L zJEn@?@Ga-t7Xu?=laOHTVQanD6xp>k{p3bfUwPsks;ft>HAtctODobA+|)>P8q}BS z8guC7TK!8XTPvE6b~w+@35?>}Iot0%q|M` zf*`Z_l1wGP`4>Q!Nlu`jy#>Xnc(Rcm3|V@GOeV-oGDzy2jMd)Ueg1Rmc*SWB!gDYl z>V^d!e617+R(kn~H1n}w8aAK@@VEet1|Ts2fIMsfAc`6Q5QqMs2#WF47=WKzJ?;nw zyHQgz0KjfkLt74tBud3H!U6o82;5_~g0Zx_qQB{N2di`?V*rX(=F#mg+q6AFck@rW z|BwBNcfnY!mLER?KxjfmDfRloP>d$g3meaBztSC118Avcju=g9(}<9I^R_b^CY{vZ z_cP7{3nJRx06!TG*pC@a=5qWqI&0jKy&DOa%acSz9o(2W06Bq9h4;bJHJ4uZrqPKk zpkDLXwLXzjwqcs!&W|^0dl&#EYrSX{(!`8)NCJv+rsTn|{7nu~=+zy8E^tjwF)11g zfY5(7SCPlk2qW3TMPA*x@qPn10&~0|vl*>q51zkTuDFb6lopqAhxwd6*X|51Jz8VV zZ(2k~nm>KrVE)Qmj7EEyN( zIV%o6&SrS?cB|YC2AwUxZ+z>7h&mo(O61tRwZ8lMUU`@7nXs3r*@~GGz746>F}WUE zSFQTh)wMoF`cPcQ#jjGqQrEA>#(Z-|PH!xg#}XVVSPeC3rDTxjIvnuAe#ICF@j?T~ zuGJlhldu!#D~Egcp0-{_fjCY@^uBQU?WoB9Ski$Z9AW=`tM}$mnhLL-h+GYJ(T{y^ z#ST21FJ(k(Eto&m*)B?iY}o0@K#|w-EO6ITJtZhEzKwbcxn3NUR`J)-)h64c!9nj3 zq0BOq358)9#CgXX(bz@(;0#!!|=lr`0D0JF-i@2oVp<5nhMArJAx>pZ2tp2x?j@kBLcPyka*Zj@#dtJ>M>r1_ZbI9-5(<86!-~FkzF4NORUKG~i8u%t)-y!9IioHT5>LYk#K|Hy-*(Iz* zo&of>Lzk!oNbtwm^q6%g2(djv-+yu_m!dgcNU(@1dbC+f@B?9guE;y)X&lWl+1YK5T8Uz2h@x-W3kQdX>%wbePA}sOZ-l-i zTcPl~(iV*yHk$?Nr`+y)O>5XS=0AvvO{pm_xdpC+$f2x^-BWoAk+t&@Q-2IwMmAAZ ztc8{@c#S)?WE(W4!Wv#%RJmt}@XZd=9KN!Yh|frfCXI9MA_mH(rSnT{3%x?mAo+|z z5(Gu{9Q>0z39&(nOS676e^k8A*D*7e-`d!Z0J>i zd_|$KVX(xo+|T%MJHkB5=;FDZn!8zpgb6leet&O|pkPw^PSi;ex-SNcdm|gy-A8ER zH@9;kJE2e(SVs)g5z`+3Hdtx#Bgyb=Lesnv>KNLlm?X3GNzTqs;8vDn-RSH{*|KHg zr9G4AV*@EDhm_!2(uveBK{jVWFd|Wmjb)9+aOI&%QsL%sqqav7 z(ELu0Hp_$6;h)%j9mn=@ffCgDG}dC6{P1My=Xi)z@Bk~{9{8eoHImg>v`MR^cJ2}* zQNE1fQ1Z*u5=-k(@zf;^gPiUh5M+6u9B0`og;IP58~h`9Lh8Fe-v{V~1bc$fOBO}D z;vPm2=_)x8uO5YBJQyn%!Js@|_SUBkUKTzhSRf6r_@vs((@ka_XnpJW zjrI#BdfN}2oWkNK0faVdQ@a=*VO^ywoR%%?eY$0aae-HJ*0FOv@?ch&O$BQFHJIAb zTn<{Hcr72p*cb(w6aU7{&D~Sf9P}bJEk!eV$GMjo$@9{NlL*l-q-0gMfSmpfSrAel zf6cWfmb^p3j~qQBdcCxp7Z?B*vYIe7)^ag0D|&WN+%vt_^KtQPaPU4b|$2VwODo z5ZIYh5KGoDhfygL{!V zptiDj;xXNzZ(IZ+B8uAo@k|O|{(r_Suy(0h&HC-Z9-kr@f3}BZUsp&p5*0>?(%m#u zhXtm_en>D0;!=ZpG-Ks;J6?=Zl{R;&cxreqJmDdBSh>t>c-?H#sXusGz6+*tFVc#+ z{rH&@vScVjGanS$BSU%Xqxe}Q5@n#qQzp34_Y>83HKsLtdAbrmNe#~Z;YfQVc0MG^ zSm+SB*3BovB53nmx}2!{#C(LPS8kBQ_V=!Vw6*$CN^ibFOuSlejX()c{N4-LHbm_q zDlZ<-@K?=rjkpg!Z$r*HjrCl^6l0xRq7RqU!@StR+-cgyS{gaaSZc%)p|0g-+Lm+1 zs1S|kaKCFrzb(zLBuW~_8saV<8H!iH-$B`i)or1_8{w%{Z$cNGxaB85J0}0~H;yS@ zxIUh1WSV>RC|B$7S1!)sXYh7CTj6vK>oAQHj`fKquvS>E9L1KaQ1YGPweAK&!xB0M zfss*HaI)v3v_Bw8B{BAARH{Y>fS2!jNiH$kZWzg45!pW>&U_gV7)R+g#?3Tfo(os4 z%_Vo#10!}>9^U;rOOS-#a@KOLvm}~9Uz(O4=j71q>|f2)`sQ-xpmw~L-EF)K7mrJ; zXX;#+Djq(#x5V0X7M&P(>~g+)9?4EnoTFs7>Dk&7w z^!)y+LS?l)btwC!>35Y;hhkj-@PjN@1+QQJSLrUIS%iAxL@Y${Rdj}LB4oWne)z+E zD%T9Pdj`(%Mj=(IGbj^GMRlk3ZE;i62xj>UrtwZdEa>_YpcVNneDCkC-4VZ0tct6)(=EasJ`QjF9t}1lXi7>voA)`vi9T#Hbj>}x6AcsR1 zXGja;;^O6vR{++UC+D$nP%B{b)pZF>4R(8&=EnL)8fxTy7hz^CYhL_vs;$^*wv&6P zvrPEoM@97kD708p3vi?uH>&?KG;EO+M_JnnhyFdI#Kas+Gx$`Tsh-%OJF#*&UR296 zoG8pQzqU2{wJc1QrA~%xO#BIx_idFxgMmkDYc6o85z0ph4zhxnd3cyM?dLv7--iL2 z0PMlyi7FIDwegN9K}UC(iJqGYOQzyHqGm}oaTuPrBhCCNKSL`F9C zv6O9B9_m1An$yyf$^R`;61tkxJBATUN#^Qhk$fZ+6w7$M^(+48X6h2k@6Pz5;`KayJ6UD5K`> z9q7DBtV&+3-U*#=ZzbSTOz|Ulc-hmhbr+JUI|GBo1wIn0@46~$2twU&VgMZh(^KH`N5ntIelfkm!f4u1 zV!bFzcFx;ct-{dq{m0*4>evJU(mYum{Vdp?ed4V8^a>66vHx>$1RWO4t=c$mdBuKQ zADO-}c-0jXZmJ$Gx2bEfLQ#5NAMpm-U#ZEiVe{zbTJQrj-&2^XNk{c+?}+qzSTbjL z3LL)%!-qfx`wv`y1wOAm+w$m3#`uZS7Kf(ox94>~)@^@fi=mXHRp2JZo|_~uDnaIS zU@mfUJy4t5E*T~nd7Y-ddOJ==?t&k3PjAKl6*wZBpct0{J2CGfdAjK(0}eYhU1f%$^omKAk>wez z=sXm`ho2_@Jo6`oB4RozGrdtR`1OX%giOb8>(_=Y)GN1z#=&3z!yU)OQ(Y{?nEtn0 zI7~JXh5sxxl4a9$)!$~};BCso5t+N11f6U8I1MK%3wC&^w4YzJ>E`O{TJ3Pm(VoRn znr!BPW#wG@#shkb_jC`Qo`d=vG@YN@_w)st$6gdadi0;dSY%igvO-;EfVQD87GoH? zlDx`LNFwA}bsP-zTx_{!8?|TISZnZ>Mm-`?a8Ik9E}f9v7}XA^R-dQb7^su`*|Gtx z`5PI*Igt9sBcA#U@-y71|NRK*GIFRgDSu^ha&i-)AHu`fIj`0DhEMv>M}pI)hV_B6 zp*?sN@qo)-!@Jkf?g!!R-+j(uZ!#D*)fEp*DJ92jC;g&*b`>L~dPdVeM7RiV6n})G zvJ}3%FvC&Ws>i?cC6SC|@sa;nk~xl!YB-Bp-DI`wdi#iKQAz8+odx*&z=CBMf^yGF z1j&_?@V1@p%R>t^@L;0iv)oFXG=~;-BMIuc2VZFF9ZaP0w?FY3BtiVKW>yuArd*q7T?c+nAEWE}bQ`FZtN;@h48j zNQ2Joup{xP>5@jdvB9(J-=!#%T7;Ed09ektF7P9}+u`rpmXYYEJZGb^kbIab14t-9 z_z!a!<-BKfsoHd0>CvJ0OBk}4g-I^G zX*e3@3GyRmt`I`6Jir${sCw5}>Ujuq#(hE92+pag163k)L(clYN%3_|Ags}IvdgCM z)*0x$PNt?&eBJkQt(T_?lf01o$G3dm{oQ#{zUW0{T&^}#8%Hp=k<9D0)p5`pdi!K} zoQM&p+_0zndYSj>glvZ;z7B%LyPH4#M#ZD4suir4Y)->Xy+hYEs5bNrpxciqA}L!< z%75+2^SLHhrNvc|rI;?H`IGW1As4JS zViJOT_Fa2CLO$LyS;Xg3$lmITTD>p^u{<0P=*u<6^^r&ue_pFb!-19c|Ja^|H}*q@WSW1`L3q(6!`oheY`vRm>W z^ck6SuXzA$xjgTk9R7Yt{=bJ?(O59%{|gZN|AcCPqUQ51aCPx8E?)xN7qmrs&}Y4R z&|vcaS70}a`jWbOg$5Qs-pFYYYQS>vfbY2Wg`zms! zd}Dnb-T2h)k<8E^+Bw?L+Z8UN3RRm`y7hw~p7jVb!4<-nrt`%`87YHSTq$xnyEM+d zn?DY3h9Xtwtls2k_Fv?B4>cX$ya$?Z#%`rsJp};%Bkw<%&;>LUj$VRj&VQn1_@4;0 zJBTr{ZBiO5uMz~MCH?{_H-y?GRr0zEdzdD2LUvXJ#ozWe*j?=F>Dlag<{L^GKt~`3idE%5Jss?$7b(4~WzVNxYxau^G)iG>PydSBuT?M;Da?15Yxm zh8*=V!^Wcpols_><`SRvZD!sVp$sm1qXIcy^DGC0aSSHcFS*-jlFbQIaw55vMUhv~ zH+kJ3pn#4X)U%iGhebAa0_2WxilC3!-ZEM84;H^-UtAv>;y@4wFalY!8DbW#3%h$@ zf;3g0)wFro;$ezlaC$+q>Opi~ZH<_sy>ZBxb{O;b@H@wp_Juay>2GGh$^}9(<(L{f z{hAYz#L`4qftYUh;hs{C&ll4*|D?7k$VCtsI%}`>yNcy^SD`wm;aL^+o|zE-OGbN!Zr>M*H!QKnPsEDfrtEszE{ofnvDf9`TU=<$LkSNA)=WiKVk-WC?%nGn> zXu0NtcKqUuN@Q(X-iUT&C3ZmaE?<6X0J-JauYw;RU9@`wg1^SaP!9?ihjk0}277q$ z003&y8RrLMR=tbY%U+VNsQ~#l^}FrfS~udQAA|Uji<4&5;vPj(Z=v zMXj1>8*}2DE5HehHa5O>fwx8ZH+grKh@Dj98V_gz=`Ov9-1{mtLi!=-*V)I+ykP<2_YA=@TDpzQMWiOp1EL2S}9~cfNR7mj*Vn~`H ziJW76wg-Y0C5D*;#}YMk7hfQm$VO@=JQ2BW>--#z+?~s;tIzH%h6SL4Dt$zZ$Y?|Y z3@;&Lh4*Y${xGPf!EYY|+~YAgpB%eO>`+rq-eDe%o&)Qx9C=s3-)b@Bdx-8SR_S-$h6MDF0uAo&&KUs-{FYgloEnYn2BPa0r za??)=@~hqZzK+Rhqiz~%l^6wn_9>SiZmH7BP_glYTh#Y-Mv%6S+65NS&K&;rTU-xQ za*{>@BsN#i5UTzfjTWorebf|kqo4CG(-1wEj>=;}D`}A~HjwA9d88Y`9z~=al=-!+ zh&_+|=c7&lKuQ`~Aiawv1U7`O-`d#OQ$8>3=rCdSb$^R|`Eo?sB%%yqYKo#Vqf;Q8 zAY(R1;>N7xB^E@z9wg-e)gy@a-Y9}q7!<*rcq~h)XM-67uoHG(m5Ajcl<2cJ@Kh6z zTc1)jBN=8;IigY?(HPuL{PVLypHDW_!X`n_jq*p@XDJ!?Sa5@GPP#ASsRn)szCS#HGG(FoKb`@^tI$Dx9otZ4 zp*+yF-YxRLAjHLZDC#qkk`1orXp$md)>{soOR-;_p}~=?+WMrw(}h~!NI6CXVs*xT zmHW{hZ)u5bw}H=kuz3D4pps@X2D)3CZ@+CkQA+d@Nu=+TAX?1SSKPbHwXYbR$5LQC zGqa!{&w2vY5pv6EYyy%HtvA-16k;6Q3^A%fYbkIZmQ|o>Ugg88Q?7P|gGo)uO2tSD zM$@6@qlh5<0@$l}au`7=s5+bBK&{1+tcIqA$(X(`4S5%BJ4m-a{;dMP&fHy5qoy~q zbrQNgcl9Av2CyH(UhMJCOQeDBKPKp!@s$&(!`n=(31hyY939yZ_SLWxosYdKr?I3N zEkCrH^I2d-6=PPvq@ojQ?<>i4n$#cbK{+OtC^2X8j^QEJwkr}I#4xEYb(~ugC|Y`6J(h*yTLt)Qz>^X>@JgE6gIDAL!aO18dqqnYPIR> zXlvw}oGvM}TE{ldc{~8InazgcqJliup!8mSP-vSSCScJ5DUEwg#M;Z*mQD-+1i2z@ ze$Ca-eczz=tOoq-qyrs{hp$bXT@F41kcR$`-Vup*A^&H;`9C?*s0?+e(|aCtvy329 z$_#RVIL8Z1&3H)DP|(&Bz;m}(j>m-v#UQ~Qv@_o1zE0Z$qFr*39f z6@Zbo^nLE`#&7OpAz6I`csITX+3V7x6P4m$3~Gr%o6pXcC}YisIu}+#z@K0D+|Tb1 zI03XW<&sb|r+x(gt3bgd1@2umqlAxzlYb$9i9%Xt9LFH zgKfTPqc!Z=B~x4z-la79cKvRlo?p{bU@gL)YLMy?J{rrVAN}9je5N?zxqfxrEO*%K z&Rz;LgImgeqZ0FKRCyPqie-rBvrzJG7na(-&$;n*MqGy~ilcFU2Q*JwSj*({|NN;Q zd+PSiymdDYf|PyF#D|j4{BHS_{W2xwrh-|*dgV2)3Bd^@3N^{0HqqvMU3Af=YZ)GW zOg*PNDW-Hj-KMM1AM?57>n|-3*1lvLmzsQ5f@iN=;i&)$JfH@Rqz2)s&bD|qawFEB z*&N89VbZ&{!2F7qr)KHnsCOR%V72M;z*xg6kqCRzCK;qhN2a9G<=7s;t;aMN5!Cd&FPAYr;firJ#S`|GYwaK4|>0Z>WqGbPjr?sXqb zw3^GHC~yB!p|Z?GE5B!^F~h&wLHP-@D%h(|-+c2rqEATS@K7_}OFigN(CA*}cZVP| z$oWgr%qZpNuOwfTg(MLSo-!Vp);xSW7H9JPtLf4i;dZk<-;!vZ)BZ=t;fEQKi=MF#TWWIt-R7 z2+klhgnCiJTUw>)>VaGJzzhEFg zSev&$=&WyXUlQ7a+(|)a=4y9kMxkyRJs%V&}k+ zN0ti4ET>FFF&306GHNK3-;2=gzy34zms`uocJ!VW5jvQ8TTo)vTY%9rn3=*oAuR>< zWoIuEn?okdDT0yk7W>@1i$=95h)(*{5Ha!V&UwFj@447d_tW{zCiXAK;y0QD_%o2% z2)D`N1CMoK+IwBHIgE{D(IT<8!$ozODQ+!a52(Iq3ql&qeFmJst-RW-&t}0K~Z$Rz2yK-(RXW78B$P|71RA=+1W@MoQ4Y6woFhd=*J^M&crjv z%7a#QprK|WOXb+JWByE_tAXq_4)gi5p=`YZ)JYf!qj*i-e6bfZm=<*Pd-sHpxr^oo zm|1Lxz3|qlwZ5i0q3zh6IQeBltgQWem~7EPbX3e*cjuRs#IsRwGust*u)9_h%Ixs= z)1=26iEIGcO>T1~a+Q^q*3pA%ny%x|OI{i?nIQW4cuVak3gCHUTH-;v%5r$|<4pQ= zYA9Z$Rwi>er?q-QLa7;6Nb}LVzfwIDQaX&z-u&j@Z1Qd?Sbqb)q%2E*A7z5?u*eqt zUf$M|>U6Zvm`g+Wu6rjg{XLQ{;}f){*&i-g$#}_9N=rctCrwxge-aP5bFUeFH@0E~ zWu|}T9~AWF8B&NtKspp!`r=WAigc)zXrRJyj5z*FP3XCbe|;^xe;@NqK2a*B*<7`1 zGcBwJLkY|3Pv_(`eE>7B+n|Nxt|1L_)a)H4Qpnm=V?hhFQDlO+(Vu#>z-kbp+S7vK z(m-~)mly`uI}07MoVvS#o32D z)KlA#ILkkeFw^tA?~NMe1BDvZ-BbBUNEg1zmq8d7brwFn&N6E(g!JD0Sx8@O?&fL5 z7w~xOGhcEt-=$avah)SSh7r;ni#k&Mig4Q77;|N31u<5jGcBBxYIU5Xma%(r@4!(UvQIH>jw0YH&Rv@$LkCv^*C7>5K z-|&E`;z-WDH^Xw1QyT{*^T~SHNN#>(ZY?XmY)?1K+A`D=Wv$XDK61mQd*{W4n(Ad= zEw{=UvN>d9^xYwKB-yG_a=^{{J?R0O-5EP(&=zmzA|5-DvYlSk1}EHr*Awf}pSK9# zri*pon?JFKV_z7`YiXk}6XQqfxb9o0sS?Lm!Lb3*@SKqN`P{&g3Q_gVnS$=emaN0e zb-$;jhHaFf!@~WVC_!brEf339T}SLR$vQo>{B0i4yQr9*&)m<^`P8~1A*}uSHAU|{ zOJ*72B#dMm(k1y<5>O|BX24{{7p}HR)5xmptCmbS1^2jmk~f~GuFpBrKWZ@xdAocW z?;R-ktn+3i%Y&bAfAa=4d!7JQj64T4yv7-EU)#zg{B6qaQRu@q?fOl9*7qGfdM>c) z=3QVPbiO61Lf=BN(?H_(D?v-+<>U{yZc*!3{ODQqX($tA*20x%6mZN0fs*ws+NG?N z*{7y5yRmybVf!@3Gksl%AWls^;6`$C2@IbVgFND>D=-ig^qirZP}9~tLHI>!v7OW+ zY3eI+sJ28vF)fIlv2T()&yuyd)k~fI7?a!t!HJ(@q(J%Y@715Yf{7`m?)<)M>wM&L zXakY0o+G(k;ft*g2Xe#W44`HV*`J$aq+~X#v0$wcQ4zv^kIIxNq7`*N2|9ZC7UKOf zeE1j2&zu=FrI45B{4MOegH0Z`UI7h~952+a=*NT)iWXr%Tj=*OhS&qVk?^rWJ)3U9 z00)UL;n$;lsG`pRFY?;)HBVU7cD_u$mK8YdR31Z98L?>|flAQn)M;bKjx1+IZ0!-_YjiQ}p7^Z2VS5OYtLi zCFqbj1lraoqj301o6u4x29HbVLlgufp2E;XZ;vDtl_l4w%n?Q=NXo${een0 zq)0*@c(>viRiGMfnMo};GE-`TAbuj;^!#{u$(E+`f`B$UmOAxMTVx#pRvhGqtQ%BA z242sm3f)aBDtLM(74Rw1d*yY4wYCq3xG6upWLj$urH0nhv#Afr<|st0lAyJCq_4E0 zqeCGi-M$acMHMo)6T5gYo+#f=-oyI2VOLEHtAtG;K0~(Q1Sgg7pW$JZ%X2Z%fn7gA6e;`MH3Q&wa(VurY~(8s@}ZK_Fv9{A4fz4eSkbi^Vv z%bp58e^e{8VOngvv3$RO6N<9Fb4%+Uw)Ki5Z|0ZwFn#;1e|Mq&;7wKX)5w+O*OA^~ z+G_*A)Zs-(!RJy?%=QIpt`A`u=|6y)wg=izJ)BWTUj%$0p} zmQgHf#USDe8tB$7Ltxg?9ZZ*84IOUB3rf( zF|ohYd*{ITt)gW#`&M*$-3!(N$(vTxA{8>wd51AI5XMS#QJV?2m6rGL(A|2YU=sfUIt1E zLJ~YjTJg;wC)Lm7KkJ8FsoQJT#1>QXpke zpR+|=g*&p=WmRnWWw;RKi8x_@sZA&$?nbQDuEz?XC?vo3puj74BbMz@k!bemS(|r! zbwB$TC#lT|^)QJ;7T)~OjPRNHnO%0(yT6;SH^rk4V^BVesE;73a0e6N6EV+fO_a7v zf5-)~js6W&jw!WE%X7A!SLrD^b?}I)AD@y4Olf<*FvGWrWlUR+_}DuykRAnjWF-BV z3oKJpr^#Z7lEhPM)ZA%^*#GCe-ynq|DsCIE-PH8_F1zO$}y4J zCkv$tWd%L{fXqf0aC{Jw=)!Ze*gP`0JR)?6uvRFJT7mF`Km_9`vphJC#MYh_{Wj15l{D;1Xce1yHF0fuL)-nv(iYNYiKIxf@G}zYx&kocIRJ5 z|E0Ts6xx5U&*P|lXmQZZJ%0=fq{XRV$_s3IA%DptezaS&G4o3f%!sa;fo7qe|A)u_ z^^*<lf7dEntbfh_=SlXYA~{x1)>jUE-iz>>5ow7Mr~pOix1FQtdjIYR5y~7O zx_p`YrzYl z);V+G>9=tcbo+<@wy~K~H{rVrvhu?fm98<)h6|tjzRS!;Nsofk9HAl3oZG?zQajr` z#waxEAHfdK&dKr4&11RRXK|e>sn07=7&~nEN^xLeXe8So$fdvJ$MxCPW?q0^!1TZV zki!6DxthCmc8T6?A^G@~@J?p%J~dmA>EntUL)COk&zF(zY*hljS!QJq`$oxuIcetLEdcr+Vw0aJsGDsKP=KF!$OrsSFK3?$9Am=T=r~Wy2=$1uiy2 z^s?Pog<#cF^7L6;8K0F`SL}UY(e;(3+gpvA4);=`@7k0qj4l(7%#*_G;QkC;Z4Sb* zD-}WTntLU3Tp7qe@ow5QV^8=f(2M2b(?8Z#WTA0xR`yV;)`q1H!cDF9_MJ~e2+~XJhL&92hWhQ3g*4)_OdtKkN&SoXx zoANMJ*RS$DHghmiDZUuyf^YxZs_}_ye_mpSTe#4={vz<+4UaD83mr1D3{#W9ew*T($o41`g?zwe)`Wd}Y?D1k-dAf5Z0$*;u7 zO)W~Al+9fBoe%f=Q89=)+NtE~-|rWBRtuMRi4=QhrxJAA-%durPzl>a*7a>2aKck;*^f``0u_f<&kxfFD=NZJJ&sPuxAC=mHb=d_Q+UaTHs2~G>I!W> z&a^~D524%--nO#*Kn8=DWlf9}ijtxdQ*?Wx3PFt?9v+6t)Ym132{DGS*YFy*v$Q}?`QPNDqw{B{*o{+^n0nK9;YxD%AC!s8qFZQ zaKbDw|J%ASK551jds4K58$4;*opij%i9V)b>DjMvLWH|^$bA03xMZfVJK6M$U4vus zZy~MDX-ojpLj}pSF!nceb}(dZ(X-^8K*)#3fHZO>k5&T-t1(%m+fB{)fH-`>w}8BE zT?mD&D(n$pDCK{|)BYP?wOj?$fuL_+0EWU*&~)vhvHz5VQBQ8o10cIk(V41mkiNMb zddW$_C5itE!8KDN-@YS5V^DwVqciFo@t=|de%dpD+Rq2o|MPvG<*Pcw01q^tbxIG% z975mv02o!Eo}}IPC$jgWJN{cAmGR-`iCmGV_P;!!duuW^rlM2o<&J9-E_3tM6-(H2K-P2E z#k_U3uzj=8mDQ1~0+Kac!xasunUUFq&fD@ZzsK_&6s|*g!d|VYH8BzoOxGHDL=rs$ zkmR762o><*Lk30|LNDhPYtS=j1H+3SVltC$iqr~w)&qXWde#59@(FvN0YKb4q4rjh z_lF0?>-f@E9H8z*9%>v7AE>RKuX0Z`Nbq-sv}+#8wDm!llgdM1(fmgg&aEArhkkN% zU@TI8^pY?P6f(ytV)b8Frxe*U004^})V}8Bxb%K!@c(M#+T)?zy8k2O5~-L(ksM(# z29sNw66v^4$tB~G65|r1Tqe&@>X34YZf!$|F&D zALo5PpWp9&Kkt9fT6^ui*4q2o>-*WCwb$;#ZI{%AY;O|R5 z?;ZkxMC#7F0A!J$gaCC*UN&(e^hE|I)svrdV6)~P=ok>eCnx4 z4MOk3_>^y4i2ik!h&LQ_F1-3!g>s3kI4z}_P#4Tfqshf-_MgRmR<}wSE~&f}*%KS2 zC{vH|(1A;E)V+8J$%+2Y3UcKk#uw76J*2T2W!+9XRu?^cpT%~Y#*NmVIRpUWj-b!o z^*F%*hrl-}7hZ&eFtgpPr=nXe#`P;g!uJ`T%KrWIe>4&?Xey0U; zY(E~913w9pTlMAZpK*V^#7Q}!fGG)UDfE8f|DADOK5j`2So6xi>3{!pQPCP7qc+mq zXnqXQ8xnSQns<%AwV9R<-EEGFmdu$Y`Xi&12 zNPx=TxsTMnYD@p-x|?=8BwD#?M88|*&e{BUcfZ4b{sWFIc`*=t-kVjU>oO+lDj-oF zqNm;Q?OtCioSpT66WBrbdJ;KqVC*{XXD#A3t4I-NWJFVhC5+&p)n!@3Vdu@|E7SYG zukp8kvH>T;^Bki%nQX)zb-z$BkI7{AZP=#R5aGOS?~#g`#mWhI6jDPIp{x`8hgR79 zU-EB`zz8N(XgKH$SusN0TO+DaHsDI3v1d#GqoNOqwaQ_JYN1f5#Q~@Z>a`1B9oxz( zcU;cMoVMDV(O6FMA4$F;1z4i*^7K=eDRVAtht00@?uZ+|EE0mdGd-`JNpvp7&pJj{ zl*eBO$EdYsL)VW?%m{x*Y1}01b^I1EqyXPzpnF>nOl%gYU)h0-c-w+6%`BmOO9m#L z)NmpzkZhNU4P@Uee%#bj_@pu}$?d0)7?h%vUvNG&4)NiVRMf82Q%DEq>!_}LI01sj@#LxxjH5SwI&S;Xw1BCG zLKiM z3EN7_AbH9!l(?;pVYTLB3~{97jLOINCSknMU({ojuBRB%#->))+HGx- zGnBclQ)gD6iDOOytm8144+@%c_k(~a)=UbZ{>&9nf<&=EtRht;W^iG?0FgTLP~kJ+ zes6GL@PVp{lWBKokRK4@WHwMnN}tqF5>P6n5?oBJ8?6!+-35^2Fvm-sO3ToR{VlmJxm)S{|5_t8S84*0E23`AjYs0O%(%_;T`e<}5 zT=$i|jeH>?U}|bcq^Db6Ke~DpBrSwL$Jg*hb;)&Jq_h1Tm%R&|yKTn+QA_S|o8~g^ zQ)C_)?3e#K{bSD)hYr(n9di}UrPSE~0a0r%-6KJYOQAT$9y8$l<;$fL6?VnM#CkR~ z9E`k$tJuZJ=502%T&Qwp?m>6VKDxMhbL^CuP>ffsXC*{HK6$(PTU$^?OjvO!&6(th zxjeA*eWu(-Z@7Icd*Bw(SHCa>0s!&;od~+xx}Pq`b>lI z@8cfprwu6UB-^&rRS#JaD?#3(!2SJT#C|-d9et9TWvrWoGb@acY`^@?K-J*zHfRp) zOBzfprSKCtwgVX&GFASQx2Yj1nnFqssk9rDOt-_s4+PUlUjU(#kh0r*Hif;*wA6gS zw%N$dMxg0Db!R>P$0|%EJYZie@ojMYw7}xK+W{_UXm;Y-++fdk|A&8- zb?DU;0mP}Qs#5%dQ2O=Dro9~5&e(|)Y=wuHhv42x1B*_tAJo2cl;%-B#!^mU;xnC| zkF}FHL)qWG-MxV=En-%9_sfo6iX5(Wh1QqNzVEcj4L^IbvTK~n>?l6QuRPC9KJ8?p zm7g8It-IYj2?=WJ0(Z^$Zz62gf@%<{qq(-L&c~?=q8coCr#9#HzfLAOLCoI#T^T~j}y-< z*R)pNx#CC8J*H%vf2cHQJd>oLtpntn)f8>l<`1@>@l>anaKg@lHtuUK^5%K{)-MCv zQg3`%5Co2zp+~=@d0wHit|G^Z#2Je7UlrLcn_G&&w|!uCW)`#?0iGoE$&qa#;2Q!& zOiGHRMMtZQf2>rwGP!FR4c(_bDqXBI*lo(K4b=n8QvM@aiU6^mh9~j_n(V1RGv#6y z^qJ3j5~hA;M3rg;2=hfRaxG}mkIn^K{{;D+6x>G{tZ3U@R!tk5%wQ{H;yo)1 zzgwI)fprKOK+1+;zte+u%%6I|{ur!m4Sxk9K){qM08n8E75|^p^nb)s7Kde#yhNdW zTZ4(wpN8XYuBgOR$bpP;Z+;0?jhiSGVoWtGkOe)by?d7;9Tif3zLHSBT${V)tWCSo zbn7MBp<&J91Djr%_5VerCY$(+n-JT}E)^;B^6 z3B1nO63?Rv?ZA)~dzCVngHkX|(A%htkqKVQ_J}M771eM_^C_36`=Mdi`1p7!`4N9v zUZMbqk<7`-r0r<*dIuv%u4U9amN>R#dFWGQR5X5njY}2zW+=IXXMJgD z2$pH0%09Bionvcv=>OY0!^2Lg0j`0M_0bnt@FeTZ4kn%}DALa>p?OX}XRa7|s{i2s zC924<|B1k0qs&1%!Cml813-It3ZNI%N#ZGAoX{XK8Tm8+2^w{ZM%w?602 zrRXPNB?5=SxWXjqi}SE_#h~C0)cqIc{{-#)d0T=S@6&n-s|D)t 0 } + end + + def failed_files(suite) + return [] unless suite + + suite['test_cases'].each_with_object([]) do |failure_hash, failed_cases| + failed_cases << failure_hash['file'] if failure_hash['status'] == 'failed' + end + end + + def create_output_dir + return if File.directory?(output_directory) + + puts 'Creating output directory...' + FileUtils.mkdir_p(output_directory) + end +end + +if $0 == __FILE__ + options = { + previous_tests_report_path: 'test_results/previous/test_reports.json', + output_directory: 'tmp/previous_failed_tests/', + rspec_pg_regex: /rspec .+ pg12( .+)?/, + rspec_ee_pg_regex: /rspec-ee .+ pg12( .+)?/ + } + + OptionParser.new do |opts| + opts.on("-p", "--previous-tests-report-path PREVIOUS_TESTS_REPORT_PATH", String, "Path of the file listing previous test failures") do |value| + options[:previous_tests_report_path] = value + end + + opts.on("-o", "--output-directory OUTPUT_DIRECTORY", String, "Output directory for failed test files") do |value| + options[:output_directory] = value + end + + opts.on("--rspec-pg-regex RSPEC_PG_REGEX", Regexp, "Regex to use when finding matching RSpec jobs") do |value| + options[:rspec_pg_regex] = value + end + + opts.on("--rspec-ee-pg-regex RSPEC_EE_PG_REGEX", Regexp, "Regex to use when finding matching RSpec EE jobs") do |value| + options[:rspec_ee_pg_regex] = value + end + + opts.on("-h", "--help", "Prints this help") do + puts opts + exit + end + end.parse! + + FailedTests.new(options).output_failed_test_files +end diff --git a/scripts/pipeline_test_report_builder.rb b/scripts/pipeline_test_report_builder.rb new file mode 100755 index 00000000000..56491d40a3e --- /dev/null +++ b/scripts/pipeline_test_report_builder.rb @@ -0,0 +1,153 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require 'optparse' +require 'time' +require 'fileutils' +require 'uri' +require 'cgi' +require 'net/http' +require 'json' +require_relative 'api/default_options' + +# Request list of pipelines for MR +# https://gitlab.com/api/v4/projects/gitlab-org%2Fgitlab/merge_requests/69053/pipelines +# Find latest failed pipeline +# Retrieve list of failed builds for test stage in pipeline +# https://gitlab.com/api/v4/projects/gitlab-org%2Fgitlab/pipelines/363788864/jobs/?scope=failed +# Retrieve test reports for these builds +# https://gitlab.com/gitlab-org/gitlab/-/pipelines/363788864/tests/suite.json?build_ids[]=1555608749 +# Push into expected format for failed tests +class PipelineTestReportBuilder + def initialize(options) + @project = options.delete(:project) + @mr_id = options.delete(:mr_id) || Host::DEFAULT_OPTIONS[:mr_id] + @instance_base_url = options.delete(:instance_base_url) || Host::DEFAULT_OPTIONS[:instance_base_url] + @output_file_path = options.delete(:output_file_path) + end + + def test_report_for_latest_pipeline + build_test_report_json_for_pipeline(previous_pipeline) + end + + def execute + if output_file_path + FileUtils.mkdir_p(File.dirname(output_file_path)) + end + + File.open(output_file_path, 'w') do |file| + file.write(test_report_for_latest_pipeline) + end + end + + private + + attr_reader :project, :mr_id, :instance_base_url, :output_file_path + + def project_api_base_url + "#{instance_base_url}/api/v4/projects/#{CGI.escape(project)}" + end + + def project_base_url + "#{instance_base_url}/#{project}" + end + + def previous_pipeline + # Top of the list will always be the current pipeline + # Second from top will be the previous pipeline + pipelines_for_mr.sort_by { |a| -Time.parse(a['created_at']).to_i }[1] + end + + def pipelines_for_mr + fetch("#{project_api_base_url}/merge_requests/#{mr_id}/pipelines") + end + + def failed_builds_for_pipeline(pipeline_id) + fetch("#{project_api_base_url}/pipelines/#{pipeline_id}/jobs?scope=failed&per_page=100") + end + + # Method uses the test suite endpoint to gather test results for a particular build. + # Here we request individual builds, even though it is possible to supply multiple build IDs. + # The reason for this; it is possible to lose the job context and name when requesting multiple builds. + # Please see for more info: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/69053#note_709939709 + def test_report_for_build(pipeline_id, build_id) + fetch("#{project_base_url}/-/pipelines/#{pipeline_id}/tests/suite.json?build_ids[]=#{build_id}") + end + + def build_test_report_json_for_pipeline(pipeline) + # empty file if no previous failed pipeline + return {}.to_json if pipeline.nil? || pipeline['status'] != 'failed' + + test_report = {} + + puts "Discovered last failed pipeline (#{pipeline['id']}) for MR!#{mr_id}" + + failed_builds_for_test_stage = failed_builds_for_pipeline(pipeline['id']).select do |failed_build| + failed_build['stage'] == 'test' + end + + puts "#{failed_builds_for_test_stage.length} failed builds in test stage found..." + + if failed_builds_for_test_stage.any? + test_report['suites'] ||= [] + + failed_builds_for_test_stage.each do |failed_build| + test_report['suites'] << test_report_for_build(pipeline['id'], failed_build['id']) + end + end + + test_report.to_json + end + + def fetch(uri_str) + uri = URI(uri_str) + + puts "URL: #{uri}" + + request = Net::HTTP::Get.new(uri) + + body = '' + + Net::HTTP.start(uri.host, uri.port, use_ssl: true) do |http| + http.request(request) do |response| + case response + when Net::HTTPSuccess + body = response.read_body + else + raise "Unexpected response: #{response.value}" + end + end + end + + JSON.parse(body) + end +end + +if $0 == __FILE__ + options = Host::DEFAULT_OPTIONS.dup + + OptionParser.new do |opts| + opts.on("-p", "--project PROJECT", String, "Project where to find the merge request(defaults to $CI_PROJECT_ID)") do |value| + options[:project] = value + end + + opts.on("-m", "--mr-id MR_ID", String, "A merge request ID") do |value| + options[:mr_id] = value + end + + opts.on("-i", "--instance-base-url INSTANCE_BASE_URL", String, "URL of the instance where project and merge request resides") do |value| + options[:instance_base_url] = value + end + + opts.on("-o", "--output-file-path OUTPUT_PATH", String, "A path for output file") do |value| + options[:output_file_path] = value + end + + opts.on("-h", "--help", "Prints this help") do + puts opts + exit + end + end.parse! + + PipelineTestReportBuilder.new(options).execute +end diff --git a/scripts/rspec_helpers.sh b/scripts/rspec_helpers.sh index accc52a7ece..2aec8a67734 100644 --- a/scripts/rspec_helpers.sh +++ b/scripts/rspec_helpers.sh @@ -89,6 +89,22 @@ function crystalball_rspec_data_exists() { compgen -G "crystalball/rspec*.yml" >/dev/null } +function retrieve_previous_failed_tests() { + local directory_for_output_reports="${1}" + local rspec_pg_regex="${2}" + local rspec_ee_pg_regex="${3}" + local pipeline_report_path="test_results/previous/test_reports.json" + local project_path="gitlab-org/gitlab" + + echo 'Attempting to build pipeline test report...' + + scripts/pipeline_test_report_builder.rb --instance-base-url "https://gitlab.com" --project "${project_path}" --mr-id "${CI_MERGE_REQUEST_IID}" --output-file-path "${pipeline_report_path}" + + echo 'Generating failed tests lists...' + + scripts/failed_tests.rb --previous-tests-report-path "${pipeline_report_path}" --output-directory "${directory_for_output_reports}" --rspec-pg-regex "${rspec_pg_regex}" --rspec-ee-pg-regex "${rspec_ee_pg_regex}" +} + function rspec_simple_job() { local rspec_opts="${1}" @@ -172,6 +188,25 @@ function rspec_paralellized_job() { date } +function rspec_rerun_previous_failed_tests() { + local test_file_count_threshold=${RSPEC_PREVIOUS_FAILED_TEST_FILE_COUNT_THRESHOLD:-10} + local matching_tests_file=${1} + local rspec_opts=${2} + local test_files="$(cat "${matching_tests_file}")" + local test_file_count=$(wc -w "${matching_tests_file}" | awk {'print $1'}) + + if [[ "${test_file_count}" -gt "${test_file_count_threshold}" ]]; then + echo "This job is intentionally failed because there are more than ${test_file_count_threshold} test files to rerun." + exit 1 + fi + + if [[ -n $test_files ]]; then + rspec_simple_job "${test_files}" + else + echo "No failed test files to rerun" + fi +} + function rspec_fail_fast() { local test_file_count_threshold=${RSPEC_FAIL_FAST_TEST_FILE_COUNT_THRESHOLD:-10} local matching_tests_file=${1} diff --git a/spec/fixtures/scripts/test_report.json b/spec/fixtures/scripts/test_report.json new file mode 100644 index 00000000000..29fd9a4bcb5 --- /dev/null +++ b/spec/fixtures/scripts/test_report.json @@ -0,0 +1,36 @@ +{ + "suites": [ + { + "name": "rspec unit pg12", + "total_time": 975.6635620000018, + "total_count": 3811, + "success_count": 3800, + "failed_count": 1, + "skipped_count": 10, + "error_count": 0, + "suite_error": null, + "test_cases": [ + { + "status": "failed", + "name": "Note associations is expected not to belong to project required: ", + "classname": "spec.models.note_spec", + "file": "./spec/models/note_spec.rb", + "execution_time": 0.209091, + "system_output": "Failure/Error: it { is_expected.not_to belong_to(:project) }\n Did not expect Note to have a belongs_to association called project\n./spec/models/note_spec.rb:9:in `block (3 levels) in '\n./spec/spec_helper.rb:392:in `block (3 levels) in '\n./spec/support/sidekiq_middleware.rb:9:in `with_sidekiq_server_middleware'\n./spec/spec_helper.rb:383:in `block (2 levels) in '\n./spec/spec_helper.rb:379:in `block (3 levels) in '\n./lib/gitlab/application_context.rb:31:in `with_raw_context'\n./spec/spec_helper.rb:379:in `block (2 levels) in '\n./spec/support/database/prevent_cross_joins.rb:95:in `block (3 levels) in '\n./spec/support/database/prevent_cross_joins.rb:62:in `with_cross_joins_prevented'\n./spec/support/database/prevent_cross_joins.rb:95:in `block (2 levels) in '", + "stack_trace": null, + "recent_failures": null + }, + { + "status": "success", + "name": "Gitlab::ImportExport yields the initial tree when importing and exporting it again", + "classname": "spec.lib.gitlab.import_export.import_export_equivalence_spec", + "file": "./spec/lib/gitlab/import_export/import_export_equivalence_spec.rb", + "execution_time": 17.084198, + "system_output": null, + "stack_trace": null, + "recent_failures": null + } + ] + } + ] +} diff --git a/spec/scripts/failed_tests_spec.rb b/spec/scripts/failed_tests_spec.rb new file mode 100644 index 00000000000..92eae75b3be --- /dev/null +++ b/spec/scripts/failed_tests_spec.rb @@ -0,0 +1,127 @@ +# frozen_string_literal: true + +require 'spec_helper' +require_relative '../../scripts/failed_tests' + +RSpec.describe FailedTests do + let(:report_file) { 'spec/fixtures/scripts/test_report.json' } + let(:output_directory) { 'tmp/previous_test_results' } + let(:rspec_pg_regex) { /rspec .+ pg12( .+)?/ } + let(:rspec_ee_pg_regex) { /rspec-ee .+ pg12( .+)?/ } + + subject { described_class.new(previous_tests_report_path: report_file, output_directory: output_directory, rspec_pg_regex: rspec_pg_regex, rspec_ee_pg_regex: rspec_ee_pg_regex) } + + describe '#output_failed_test_files' do + it 'writes the file for the suite' do + expect(File).to receive(:open).with(File.join(output_directory, "rspec_failed_files.txt"), 'w').once + + subject.output_failed_test_files + end + end + + describe '#failed_files_for_suite_collection' do + let(:failure_path) { 'path/to/fail_file_spec.rb' } + let(:other_failure_path) { 'path/to/fail_file_spec_2.rb' } + let(:file_contents_as_json) do + { + 'suites' => [ + { + 'failed_count' => 1, + 'name' => 'rspec unit pg12 10/12', + 'test_cases' => [ + { + 'status' => 'failed', + 'file' => failure_path + } + ] + }, + { + 'failed_count' => 1, + 'name' => 'rspec-ee unit pg12', + 'test_cases' => [ + { + 'status' => 'failed', + 'file' => failure_path + } + ] + }, + { + 'failed_count' => 1, + 'name' => 'rspec unit pg13 10/12', + 'test_cases' => [ + { + 'status' => 'failed', + 'file' => other_failure_path + } + ] + } + ] + } + end + + before do + allow(subject).to receive(:file_contents_as_json).and_return(file_contents_as_json) + end + + it 'returns a list of failed file paths for suite collection' do + result = subject.failed_files_for_suite_collection + + expect(result[:rspec].to_a).to match_array(failure_path) + expect(result[:rspec_ee].to_a).to match_array(failure_path) + end + end + + describe 'empty report' do + let(:file_content) do + '{}' + end + + before do + allow(subject).to receive(:file_contents).and_return(file_content) + end + + it 'does not fail for output files' do + subject.output_failed_test_files + end + + it 'returns empty results for suite failures' do + result = subject.failed_files_for_suite_collection + + expect(result.values.flatten).to be_empty + end + end + + describe 'invalid report' do + let(:file_content) do + '' + end + + before do + allow(subject).to receive(:file_contents).and_return(file_content) + end + + it 'does not fail for output files' do + subject.output_failed_test_files + end + + it 'returns empty results for suite failures' do + result = subject.failed_files_for_suite_collection + + expect(result.values.flatten).to be_empty + end + end + + describe 'missing report file' do + let(:report_file) { 'unknownfile.json' } + + it 'does not fail for output files' do + subject.output_failed_test_files + end + + it 'returns empty results for suite failures' do + result = subject.failed_files_for_suite_collection + + expect(result.values.flatten).to be_empty + end + end +end diff --git a/spec/scripts/pipeline_test_report_builder_spec.rb b/spec/scripts/pipeline_test_report_builder_spec.rb new file mode 100644 index 00000000000..8a5388f4db8 --- /dev/null +++ b/spec/scripts/pipeline_test_report_builder_spec.rb @@ -0,0 +1,137 @@ +# frozen_string_literal: true + +require 'spec_helper' +require_relative '../../scripts/pipeline_test_report_builder' + +RSpec.describe PipelineTestReportBuilder do + let(:report_file) { 'spec/fixtures/scripts/test_report.json' } + let(:output_file_path) { 'tmp/previous_test_results/output_file.json' } + + subject do + described_class.new( + project: 'gitlab-org/gitlab', + mr_id: '999', + instance_base_url: 'https://gitlab.com', + output_file_path: output_file_path + ) + end + + let(:mr_pipelines) do + [ + { + 'status' => 'running', + 'created_at' => DateTime.now.to_s + }, + { + 'status' => 'failed', + 'created_at' => (DateTime.now - 5).to_s + } + ] + end + + let(:failed_builds_for_pipeline) do + [ + { + 'id' => 9999, + 'stage' => 'test' + } + ] + end + + let(:test_report_for_build) do + { + "name": "rspec-ee system pg11 geo", + "failed_count": 41, + "test_cases": [ + { + "status": "failed", + "name": "example", + "classname": "ee.spec.features.geo_node_spec", + "file": "./ee/spec/features/geo_node_spec.rb", + "execution_time": 6.324748, + "system_output": { + "__content__": "\n", + "message": "RSpec::Core::MultipleExceptionError", + "type": "RSpec::Core::MultipleExceptionError" + } + } + ] + } + end + + before do + allow(subject).to receive(:pipelines_for_mr).and_return(mr_pipelines) + allow(subject).to receive(:failed_builds_for_pipeline).and_return(failed_builds_for_pipeline) + allow(subject).to receive(:test_report_for_build).and_return(test_report_for_build) + end + + describe '#test_report_for_latest_pipeline' do + context 'no previous pipeline' do + let(:mr_pipelines) { [] } + + it 'returns empty hash' do + expect(subject.test_report_for_latest_pipeline).to eq("{}") + end + end + + context 'first pipeline scenario' do + let(:mr_pipelines) do + [ + { + 'status' => 'running', + 'created_at' => DateTime.now.to_s + } + ] + end + + it 'returns empty hash' do + expect(subject.test_report_for_latest_pipeline).to eq("{}") + end + end + + context 'no previous failed pipeline' do + let(:mr_pipelines) do + [ + { + 'status' => 'running', + 'created_at' => DateTime.now.to_s + }, + { + 'status' => 'success', + 'created_at' => (DateTime.now - 5).to_s + } + ] + end + + it 'returns empty hash' do + expect(subject.test_report_for_latest_pipeline).to eq("{}") + end + end + + context 'no failed test builds' do + let(:failed_builds_for_pipeline) do + [ + { + 'id' => 9999, + 'stage' => 'prepare' + } + ] + end + + it 'returns empty hash' do + expect(subject.test_report_for_latest_pipeline).to eq("{}") + end + end + + context 'failed pipeline and failed test builds' do + it 'returns populated test list for suites' do + actual = subject.test_report_for_latest_pipeline + expected = { + 'suites' => [test_report_for_build] + }.to_json + + expect(actual).to eq(expected) + end + end + end +end diff --git a/spec/tasks/gitlab/gitaly_rake_spec.rb b/spec/tasks/gitlab/gitaly_rake_spec.rb index 22bd9414925..c5625db922d 100644 --- a/spec/tasks/gitlab/gitaly_rake_spec.rb +++ b/spec/tasks/gitlab/gitaly_rake_spec.rb @@ -67,21 +67,42 @@ RSpec.describe 'gitlab:gitaly namespace rake task', :silence_stdout do end it 'calls gmake in the gitaly directory' do - expect(Gitlab::Popen).to receive(:popen).with(%w[which gmake]).and_return(['/usr/bin/gmake', 0]) - expect(Gitlab::Popen).to receive(:popen).with(%w[gmake all git], nil, { "BUNDLE_GEMFILE" => nil, "RUBYOPT" => nil }).and_return(true) + expect(Gitlab::Popen).to receive(:popen) + .with(%w[which gmake]) + .and_return(['/usr/bin/gmake', 0]) + expect(Gitlab::Popen).to receive(:popen) + .with(%w[gmake all git], nil, { "BUNDLE_GEMFILE" => nil, "RUBYOPT" => nil }) + .and_return(['ok', 0]) subject end + + context 'when gmake fails' do + it 'aborts process' do + expect(Gitlab::Popen).to receive(:popen) + .with(%w[which gmake]) + .and_return(['/usr/bin/gmake', 0]) + expect(Gitlab::Popen).to receive(:popen) + .with(%w[gmake all git], nil, { "BUNDLE_GEMFILE" => nil, "RUBYOPT" => nil }) + .and_return(['output', 1]) + + expect { subject }.to raise_error /Gitaly failed to compile: output/ + end + end end context 'gmake is not available' do before do expect(main_object).to receive(:checkout_or_clone_version) - expect(Gitlab::Popen).to receive(:popen).with(%w[which gmake]).and_return(['', 42]) + expect(Gitlab::Popen).to receive(:popen) + .with(%w[which gmake]) + .and_return(['', 42]) end it 'calls make in the gitaly directory' do - expect(Gitlab::Popen).to receive(:popen).with(%w[make all git], nil, { "BUNDLE_GEMFILE" => nil, "RUBYOPT" => nil }).and_return(true) + expect(Gitlab::Popen).to receive(:popen) + .with(%w[make all git], nil, { "BUNDLE_GEMFILE" => nil, "RUBYOPT" => nil }) + .and_return(['output', 0]) subject end @@ -94,7 +115,9 @@ RSpec.describe 'gitlab:gitaly namespace rake task', :silence_stdout do end it 'calls make in the gitaly directory with BUNDLE_DEPLOYMENT and GEM_HOME variables' do - expect(Gitlab::Popen).to receive(:popen).with(command, nil, { "BUNDLE_GEMFILE" => nil, "RUBYOPT" => nil, "BUNDLE_DEPLOYMENT" => 'false', "GEM_HOME" => Bundler.bundle_path.to_s }).and_return(true) + expect(Gitlab::Popen).to receive(:popen) + .with(command, nil, { "BUNDLE_GEMFILE" => nil, "RUBYOPT" => nil, "BUNDLE_DEPLOYMENT" => 'false', "GEM_HOME" => Bundler.bundle_path.to_s }) + .and_return(['/usr/bin/gmake', 0]) subject end diff --git a/spec/workers/concerns/application_worker_spec.rb b/spec/workers/concerns/application_worker_spec.rb index af038c81b9e..cae3440f11f 100644 --- a/spec/workers/concerns/application_worker_spec.rb +++ b/spec/workers/concerns/application_worker_spec.rb @@ -285,48 +285,38 @@ RSpec.describe ApplicationWorker do end end - describe '.bulk_perform_async' do - before do - stub_const(worker.name, worker) - end - - it 'enqueues jobs in bulk' do - Sidekiq::Testing.fake! do - worker.bulk_perform_async([['Foo', [1]], ['Foo', [2]]]) - - expect(worker.jobs.count).to eq 2 - expect(worker.jobs).to all(include('enqueued_at')) - end - end - end - - describe '.bulk_perform_in' do - before do - stub_const(worker.name, worker) - end - - context 'when delay is valid' do - it 'correctly schedules jobs' do - Sidekiq::Testing.fake! do - worker.bulk_perform_in(1.minute, [['Foo', [1]], ['Foo', [2]]]) - - expect(worker.jobs.count).to eq 2 - expect(worker.jobs).to all(include('at')) - end + context 'different kinds of push_bulk' do + shared_context 'disable the `sidekiq_push_bulk_in_batches` feature flag' do + before do + stub_feature_flags(sidekiq_push_bulk_in_batches: false) end end - context 'when delay is invalid' do - it 'raises an ArgumentError exception' do - expect { worker.bulk_perform_in(-60, [['Foo']]) } - .to raise_error(ArgumentError) + shared_context 'set safe limit beyond the number of jobs to be enqueued' do + before do + stub_const("#{described_class}::SAFE_PUSH_BULK_LIMIT", args.count + 1) end end - context 'with batches' do - let(:batch_delay) { 1.minute } + shared_context 'set safe limit below the number of jobs to be enqueued' do + before do + stub_const("#{described_class}::SAFE_PUSH_BULK_LIMIT", 2) + end + end - it 'correctly schedules jobs' do + shared_examples_for 'returns job_id of all enqueued jobs' do + let(:job_id_regex) { /[0-9a-f]{12}/ } + + it 'returns job_id of all enqueued jobs' do + job_ids = perform_action + + expect(job_ids.count).to eq(args.count) + expect(job_ids).to all(match(job_id_regex)) + end + end + + shared_examples_for 'enqueues the jobs in a batched fashion, with each batch enqueing jobs as per the set safe limit' do + it 'enqueues the jobs in a batched fashion, with each batch enqueing jobs as per the set safe limit' do expect(Sidekiq::Client).to( receive(:push_bulk).with(hash_including('args' => [['Foo', [1]], ['Foo', [2]]])) .ordered @@ -337,28 +327,257 @@ RSpec.describe ApplicationWorker do .and_call_original) expect(Sidekiq::Client).to( receive(:push_bulk).with(hash_including('args' => [['Foo', [5]]])) - .ordered - .and_call_original) + .ordered + .and_call_original) - worker.bulk_perform_in( - 1.minute, - [['Foo', [1]], ['Foo', [2]], ['Foo', [3]], ['Foo', [4]], ['Foo', [5]]], - batch_size: 2, batch_delay: batch_delay) + perform_action - expect(worker.jobs.count).to eq 5 - expect(worker.jobs[0]['at']).to eq(worker.jobs[1]['at']) - expect(worker.jobs[2]['at']).to eq(worker.jobs[3]['at']) - expect(worker.jobs[2]['at'] - worker.jobs[1]['at']).to eq(batch_delay) - expect(worker.jobs[4]['at'] - worker.jobs[3]['at']).to eq(batch_delay) + expect(worker.jobs.count).to eq args.count + expect(worker.jobs).to all(include('enqueued_at')) + end + end + + shared_examples_for 'enqueues jobs in one go' do + it 'enqueues jobs in one go' do + expect(Sidekiq::Client).to( + receive(:push_bulk).with(hash_including('args' => args)).once.and_call_original) + + perform_action + + expect(worker.jobs.count).to eq args.count + expect(worker.jobs).to all(include('enqueued_at')) + end + end + + before do + stub_const(worker.name, worker) + end + + let(:args) do + [ + ['Foo', [1]], + ['Foo', [2]], + ['Foo', [3]], + ['Foo', [4]], + ['Foo', [5]] + ] + end + + describe '.bulk_perform_async' do + shared_examples_for 'does not schedule the jobs for any specific time' do + it 'does not schedule the jobs for any specific time' do + perform_action + + expect(worker.jobs).to all(exclude('at')) + end end - context 'when batch_size is invalid' do - it 'raises an ArgumentError exception' do - expect do - worker.bulk_perform_in(1.minute, - [['Foo']], - batch_size: -1, batch_delay: batch_delay) - end.to raise_error(ArgumentError) + subject(:perform_action) do + worker.bulk_perform_async(args) + end + + context 'push_bulk in safe limit batches' do + context 'when the number of jobs to be enqueued does not exceed the safe limit' do + include_context 'set safe limit beyond the number of jobs to be enqueued' + + it_behaves_like 'enqueues jobs in one go' + it_behaves_like 'returns job_id of all enqueued jobs' + it_behaves_like 'does not schedule the jobs for any specific time' + end + + context 'when the number of jobs to be enqueued exceeds safe limit' do + include_context 'set safe limit below the number of jobs to be enqueued' + + it_behaves_like 'enqueues the jobs in a batched fashion, with each batch enqueing jobs as per the set safe limit' + it_behaves_like 'returns job_id of all enqueued jobs' + it_behaves_like 'does not schedule the jobs for any specific time' + end + + context 'when the feature flag `sidekiq_push_bulk_in_batches` is disabled' do + include_context 'disable the `sidekiq_push_bulk_in_batches` feature flag' + + context 'when the number of jobs to be enqueued does not exceed the safe limit' do + include_context 'set safe limit beyond the number of jobs to be enqueued' + + it_behaves_like 'enqueues jobs in one go' + it_behaves_like 'returns job_id of all enqueued jobs' + it_behaves_like 'does not schedule the jobs for any specific time' + end + + context 'when the number of jobs to be enqueued exceeds safe limit' do + include_context 'set safe limit below the number of jobs to be enqueued' + + it_behaves_like 'enqueues jobs in one go' + it_behaves_like 'returns job_id of all enqueued jobs' + it_behaves_like 'does not schedule the jobs for any specific time' + end + end + end + end + + describe '.bulk_perform_in' do + context 'without batches' do + shared_examples_for 'schedules all the jobs at a specific time' do + it 'schedules all the jobs at a specific time' do + perform_action + + worker.jobs.each do |job_detail| + expect(job_detail['at']).to be_within(3.seconds).of(expected_scheduled_at_time) + end + end + end + + let(:delay) { 3.minutes } + let(:expected_scheduled_at_time) { Time.current.to_i + delay.to_i } + + subject(:perform_action) do + worker.bulk_perform_in(delay, args) + end + + context 'when the scheduled time falls in the past' do + let(:delay) { -60 } + + it 'raises an ArgumentError exception' do + expect { perform_action } + .to raise_error(ArgumentError) + end + end + + context 'push_bulk in safe limit batches' do + context 'when the number of jobs to be enqueued does not exceed the safe limit' do + include_context 'set safe limit beyond the number of jobs to be enqueued' + + it_behaves_like 'enqueues jobs in one go' + it_behaves_like 'returns job_id of all enqueued jobs' + it_behaves_like 'schedules all the jobs at a specific time' + end + + context 'when the number of jobs to be enqueued exceeds safe limit' do + include_context 'set safe limit below the number of jobs to be enqueued' + + it_behaves_like 'enqueues the jobs in a batched fashion, with each batch enqueing jobs as per the set safe limit' + it_behaves_like 'returns job_id of all enqueued jobs' + it_behaves_like 'schedules all the jobs at a specific time' + end + + context 'when the feature flag `sidekiq_push_bulk_in_batches` is disabled' do + include_context 'disable the `sidekiq_push_bulk_in_batches` feature flag' + + context 'when the number of jobs to be enqueued does not exceed the safe limit' do + include_context 'set safe limit beyond the number of jobs to be enqueued' + + it_behaves_like 'enqueues jobs in one go' + it_behaves_like 'returns job_id of all enqueued jobs' + it_behaves_like 'schedules all the jobs at a specific time' + end + + context 'when the number of jobs to be enqueued exceeds safe limit' do + include_context 'set safe limit below the number of jobs to be enqueued' + + it_behaves_like 'enqueues jobs in one go' + it_behaves_like 'returns job_id of all enqueued jobs' + it_behaves_like 'schedules all the jobs at a specific time' + end + end + end + end + + context 'with batches' do + shared_examples_for 'schedules all the jobs at a specific time, per batch' do + it 'schedules all the jobs at a specific time, per batch' do + perform_action + + expect(worker.jobs[0]['at']).to eq(worker.jobs[1]['at']) + expect(worker.jobs[2]['at']).to eq(worker.jobs[3]['at']) + expect(worker.jobs[2]['at'] - worker.jobs[1]['at']).to eq(batch_delay) + expect(worker.jobs[4]['at'] - worker.jobs[3]['at']).to eq(batch_delay) + end + end + + let(:delay) { 1.minute } + let(:batch_size) { 2 } + let(:batch_delay) { 10.minutes } + + subject(:perform_action) do + worker.bulk_perform_in(delay, args, batch_size: batch_size, batch_delay: batch_delay) + end + + context 'when the `batch_size` is invalid' do + context 'when `batch_size` is 0' do + let(:batch_size) { 0 } + + it 'raises an ArgumentError exception' do + expect { perform_action } + .to raise_error(ArgumentError) + end + end + + context 'when `batch_size` is negative' do + let(:batch_size) { -3 } + + it 'raises an ArgumentError exception' do + expect { perform_action } + .to raise_error(ArgumentError) + end + end + end + + context 'when the `batch_delay` is invalid' do + context 'when `batch_delay` is 0' do + let(:batch_delay) { 0.minutes } + + it 'raises an ArgumentError exception' do + expect { perform_action } + .to raise_error(ArgumentError) + end + end + + context 'when `batch_delay` is negative' do + let(:batch_delay) { -3.minutes } + + it 'raises an ArgumentError exception' do + expect { perform_action } + .to raise_error(ArgumentError) + end + end + end + + context 'push_bulk in safe limit batches' do + context 'when the number of jobs to be enqueued does not exceed the safe limit' do + include_context 'set safe limit beyond the number of jobs to be enqueued' + + it_behaves_like 'enqueues jobs in one go' + it_behaves_like 'returns job_id of all enqueued jobs' + it_behaves_like 'schedules all the jobs at a specific time, per batch' + end + + context 'when the number of jobs to be enqueued exceeds safe limit' do + include_context 'set safe limit below the number of jobs to be enqueued' + + it_behaves_like 'enqueues the jobs in a batched fashion, with each batch enqueing jobs as per the set safe limit' + it_behaves_like 'returns job_id of all enqueued jobs' + it_behaves_like 'schedules all the jobs at a specific time, per batch' + end + + context 'when the feature flag `sidekiq_push_bulk_in_batches` is disabled' do + include_context 'disable the `sidekiq_push_bulk_in_batches` feature flag' + + context 'when the number of jobs to be enqueued does not exceed the safe limit' do + include_context 'set safe limit beyond the number of jobs to be enqueued' + + it_behaves_like 'enqueues jobs in one go' + it_behaves_like 'returns job_id of all enqueued jobs' + it_behaves_like 'schedules all the jobs at a specific time, per batch' + end + + context 'when the number of jobs to be enqueued exceeds safe limit' do + include_context 'set safe limit below the number of jobs to be enqueued' + + it_behaves_like 'enqueues jobs in one go' + it_behaves_like 'returns job_id of all enqueued jobs' + it_behaves_like 'schedules all the jobs at a specific time, per batch' + end + end end end end diff --git a/spec/workers/every_sidekiq_worker_spec.rb b/spec/workers/every_sidekiq_worker_spec.rb index 9a4b27997e9..d00243672f9 100644 --- a/spec/workers/every_sidekiq_worker_spec.rb +++ b/spec/workers/every_sidekiq_worker_spec.rb @@ -316,6 +316,8 @@ RSpec.describe 'Every Sidekiq worker' do 'IssuableExportCsvWorker' => 3, 'IssuePlacementWorker' => 3, 'IssueRebalancingWorker' => 3, + 'Issues::PlacementWorker' => 3, + 'Issues::RebalancingWorker' => 3, 'IterationsUpdateStatusWorker' => 3, 'JiraConnect::SyncBranchWorker' => 3, 'JiraConnect::SyncBuildsWorker' => 3, diff --git a/spec/workers/issues/placement_worker_spec.rb b/spec/workers/issues/placement_worker_spec.rb new file mode 100644 index 00000000000..694cdd2ef37 --- /dev/null +++ b/spec/workers/issues/placement_worker_spec.rb @@ -0,0 +1,151 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe Issues::PlacementWorker do + describe '#perform' do + let_it_be(:time) { Time.now.utc } + let_it_be(:group) { create(:group) } + let_it_be(:project) { create(:project, group: group) } + let_it_be(:author) { create(:user) } + let_it_be(:common_attrs) { { author: author, project: project } } + let_it_be(:unplaced) { common_attrs.merge(relative_position: nil) } + let_it_be_with_reload(:issue) { create(:issue, **unplaced, created_at: time) } + let_it_be_with_reload(:issue_a) { create(:issue, **unplaced, created_at: time - 1.minute) } + let_it_be_with_reload(:issue_b) { create(:issue, **unplaced, created_at: time - 2.minutes) } + let_it_be_with_reload(:issue_c) { create(:issue, **unplaced, created_at: time + 1.minute) } + let_it_be_with_reload(:issue_d) { create(:issue, **unplaced, created_at: time + 2.minutes) } + let_it_be_with_reload(:issue_e) { create(:issue, **common_attrs, relative_position: 10, created_at: time + 1.minute) } + let_it_be_with_reload(:issue_f) { create(:issue, **unplaced, created_at: time + 1.minute) } + + let_it_be(:irrelevant) { create(:issue, relative_position: nil, created_at: time) } + + shared_examples 'running the issue placement worker' do + let(:issue_id) { issue.id } + let(:project_id) { project.id } + + it 'places all issues created at most 5 minutes before this one at the end, most recent last' do + expect { run_worker }.not_to change { irrelevant.reset.relative_position } + + expect(project.issues.order_by_relative_position) + .to eq([issue_e, issue_b, issue_a, issue, issue_c, issue_f, issue_d]) + expect(project.issues.where(relative_position: nil)).not_to exist + end + + it 'schedules rebalancing if needed' do + issue_a.update!(relative_position: RelativePositioning::MAX_POSITION) + + expect(IssueRebalancingWorker).to receive(:perform_async).with(nil, nil, project.group.id) + + run_worker + end + + context 'there are more than QUERY_LIMIT unplaced issues' do + before_all do + # Ensure there are more than N issues in this set + n = described_class::QUERY_LIMIT + create_list(:issue, n - 5, **unplaced) + end + + it 'limits the sweep to QUERY_LIMIT records, and reschedules placement' do + expect(Issue).to receive(:move_nulls_to_end) + .with(have_attributes(count: described_class::QUERY_LIMIT)) + .and_call_original + + expect(described_class).to receive(:perform_async).with(nil, project.id) + + run_worker + + expect(project.issues.where(relative_position: nil)).to exist + end + + it 'is eventually correct' do + prefix = project.issues.where.not(relative_position: nil).order(:relative_position).to_a + moved = project.issues.where.not(id: prefix.map(&:id)) + + run_worker + + expect(project.issues.where(relative_position: nil)).to exist + + run_worker + + expect(project.issues.where(relative_position: nil)).not_to exist + expect(project.issues.order(:relative_position)).to eq(prefix + moved.order(:created_at, :id)) + end + end + + context 'we are passed bad IDs' do + let(:issue_id) { non_existing_record_id } + let(:project_id) { non_existing_record_id } + + def max_positions_by_project + Issue + .group(:project_id) + .pluck(:project_id, Issue.arel_table[:relative_position].maximum.as('max_relative_position')) + .to_h + end + + it 'does move any issues to the end' do + expect { run_worker }.not_to change { max_positions_by_project } + end + + context 'the project_id refers to an empty project' do + let!(:project_id) { create(:project).id } + + it 'does move any issues to the end' do + expect { run_worker }.not_to change { max_positions_by_project } + end + end + end + + it 'anticipates the failure to place the issues, and schedules rebalancing' do + allow(Issue).to receive(:move_nulls_to_end) { raise RelativePositioning::NoSpaceLeft } + + expect(Issues::RebalancingWorker).to receive(:perform_async).with(nil, nil, project.group.id) + expect(Gitlab::ErrorTracking) + .to receive(:log_exception) + .with(RelativePositioning::NoSpaceLeft, worker_arguments) + + run_worker + end + end + + context 'passing an issue ID' do + def run_worker + described_class.new.perform(issue_id) + end + + let(:worker_arguments) { { issue_id: issue_id, project_id: nil } } + + it_behaves_like 'running the issue placement worker' + + context 'when block_issue_repositioning is enabled' do + let(:issue_id) { issue.id } + let(:project_id) { project.id } + + before do + stub_feature_flags(block_issue_repositioning: group) + end + + it 'does not run repositioning tasks' do + expect { run_worker }.not_to change { issue.reset.relative_position } + end + end + end + + context 'passing a project ID' do + def run_worker + described_class.new.perform(nil, project_id) + end + + let(:worker_arguments) { { issue_id: nil, project_id: project_id } } + + it_behaves_like 'running the issue placement worker' + end + end + + it 'has the `until_executed` deduplicate strategy' do + expect(described_class.get_deduplicate_strategy).to eq(:until_executed) + expect(described_class.get_deduplication_options).to include({ including_scheduled: true }) + end +end diff --git a/spec/workers/issues/rebalancing_worker_spec.rb b/spec/workers/issues/rebalancing_worker_spec.rb new file mode 100644 index 00000000000..438edd85f66 --- /dev/null +++ b/spec/workers/issues/rebalancing_worker_spec.rb @@ -0,0 +1,90 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe Issues::RebalancingWorker do + describe '#perform' do + let_it_be(:group) { create(:group) } + let_it_be(:project) { create(:project, group: group) } + let_it_be(:issue) { create(:issue, project: project) } + + shared_examples 'running the worker' do + it 'runs an instance of Issues::RelativePositionRebalancingService' do + service = double(execute: nil) + service_param = arguments.second.present? ? kind_of(Project.id_in([project]).class) : kind_of(group&.all_projects.class) + + expect(Issues::RelativePositionRebalancingService).to receive(:new).with(service_param).and_return(service) + + described_class.new.perform(*arguments) + end + + it 'anticipates there being too many concurent rebalances' do + service = double + service_param = arguments.second.present? ? kind_of(Project.id_in([project]).class) : kind_of(group&.all_projects.class) + + allow(service).to receive(:execute).and_raise(Issues::RelativePositionRebalancingService::TooManyConcurrentRebalances) + expect(Issues::RelativePositionRebalancingService).to receive(:new).with(service_param).and_return(service) + expect(Gitlab::ErrorTracking).to receive(:log_exception).with(Issues::RelativePositionRebalancingService::TooManyConcurrentRebalances, include(project_id: arguments.second, root_namespace_id: arguments.third)) + + described_class.new.perform(*arguments) + end + + it 'takes no action if the value is nil' do + expect(Issues::RelativePositionRebalancingService).not_to receive(:new) + expect(Gitlab::ErrorTracking).not_to receive(:log_exception) + + described_class.new.perform # all arguments are nil + end + end + + shared_examples 'safely handles non-existent ids' do + it 'anticipates the inability to find the issue' do + expect(Gitlab::ErrorTracking).to receive(:log_exception).with(ArgumentError, include(project_id: arguments.second, root_namespace_id: arguments.third)) + expect(Issues::RelativePositionRebalancingService).not_to receive(:new) + + described_class.new.perform(*arguments) + end + end + + context 'without root_namespace param' do + it_behaves_like 'running the worker' do + let(:arguments) { [-1, project.id] } + end + + it_behaves_like 'safely handles non-existent ids' do + let(:arguments) { [nil, -1] } + end + + include_examples 'an idempotent worker' do + let(:job_args) { [-1, project.id] } + end + + include_examples 'an idempotent worker' do + let(:job_args) { [nil, -1] } + end + end + + context 'with root_namespace param' do + it_behaves_like 'running the worker' do + let(:arguments) { [nil, nil, group.id] } + end + + it_behaves_like 'safely handles non-existent ids' do + let(:arguments) { [nil, nil, -1] } + end + + include_examples 'an idempotent worker' do + let(:job_args) { [nil, nil, group.id] } + end + + include_examples 'an idempotent worker' do + let(:job_args) { [nil, nil, -1] } + end + end + end + + it 'has the `until_executed` deduplicate strategy' do + expect(described_class.get_deduplicate_strategy).to eq(:until_executed) + expect(described_class.get_deduplication_options).to include({ including_scheduled: true }) + end +end