Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2020-03-20 18:09:31 +00:00
parent 194b499aa8
commit ae4ef81757
31 changed files with 383 additions and 83 deletions

1
.gitignore vendored
View File

@ -34,6 +34,7 @@ eslint-report.html
/config/database*.yml
/config/gitlab.yml
/config/gitlab_ci.yml
/config/Gitlab.gitlab-license
/config/initializers/rack_attack.rb
/config/initializers/smtp_settings.rb
/config/initializers/relative_url.rb

View File

@ -138,7 +138,7 @@ export default {
variables: { id: this.snippet.id },
})
.then(({ data }) => {
if (data?.destroySnippet?.errors) {
if (data?.destroySnippet?.errors.length) {
throw new Error(data?.destroySnippet?.errors[0]);
}
this.isDeleting = false;

View File

@ -90,8 +90,6 @@ module Types
# proc because we set complexity depending on arguments and number of
# items which can be loaded.
proc do |ctx, args, child_complexity|
next base_complexity unless resolver_complexity_enabled?(ctx)
# Resolvers may add extra complexity depending on used arguments
complexity = child_complexity + self.resolver&.try(:resolver_complexity, args, child_complexity: child_complexity).to_i
complexity += 1 if calls_gitaly?
@ -101,10 +99,6 @@ module Types
end
end
def resolver_complexity_enabled?(ctx)
ctx.fetch(:graphql_resolver_complexity_flag) { |key| ctx[key] = Feature.enabled?(:graphql_resolver_complexity) }
end
def connection_complexity_multiplier(ctx, args)
# Resolvers may add extra complexity depending on number of items being loaded.
field_defn = to_graphql

View File

@ -245,7 +245,7 @@
= link_to repository_admin_application_settings_path, title: _('Repository'), class: 'qa-admin-settings-repository-item' do
%span
= _('Repository')
- if template_exists?('admin/application_settings/templates')
- if Gitlab.ee? && License.feature_available?(:custom_file_templates)
= nav_link(path: 'application_settings#templates') do
= link_to templates_admin_application_settings_path, title: _('Templates'), class: 'qa-admin-settings-template-item' do
%span

View File

@ -92,10 +92,10 @@
.form-check
= f.check_box :forward_deployment_enabled, { class: 'form-check-input' }
= f.label :forward_deployment_enabled, class: 'form-check-label' do
%strong= _("Skip older, pending deployment jobs")
%strong= _("Skip outdated deployment jobs")
.form-text.text-muted
= _("When a deployment job is successful, skip older deployment jobs that are still pending")
= link_to icon('question-circle'), help_page_path('ci/pipelines/settings', anchor: 'skip-older-pending-deployment-jobs'), target: '_blank'
= link_to icon('question-circle'), help_page_path('ci/pipelines/settings', anchor: 'skip-outdated-deployment-jobs'), target: '_blank'
%hr
.form-group

View File

@ -84,7 +84,7 @@
= link_to @user.public_email, "mailto:#{@user.public_email}", class: 'text-link'
- if @user.bio.present?
.cover-desc.cgray
%p.profile-user-bio.font-italic
%p.profile-user-bio
= @user.bio
- unless profile_tabs.empty?

View File

@ -9,6 +9,15 @@ module WaitableWorker
# Short-circuit: it's more efficient to do small numbers of jobs inline
return bulk_perform_inline(args_list) if args_list.size <= 3
# Don't wait if there's too many jobs to be waited for. Not including the
# waiter allows them to be deduplicated and it skips waiting for jobs that
# are not likely to finish within the timeout. This assumes we can process
# 10 jobs per second:
# https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/205
if ::Feature.enabled?(:skip_job_waiter_for_large_batches)
return bulk_perform_async(args_list) if args_list.length >= 10 * timeout
end
waiter = Gitlab::JobWaiter.new(args_list.size, worker_label: self.to_s)
# Point all the bulk jobs at the same JobWaiter. Converts, [[1], [2], [3]]

View File

@ -0,0 +1,5 @@
---
title: Revert user bio back to non-italicized font to fix rendering of emojis
merge_request: 27693
author:
type: fixed

View File

@ -0,0 +1,5 @@
---
title: Rename feature on the FE and locale
merge_request:
author:
type: changed

View File

@ -0,0 +1,5 @@
---
title: Fix processing of GrapqhQL query complexity based on used resolvers.
merge_request: 27652
author:
type: fixed

View File

@ -252,6 +252,10 @@
- 1
- - upload_checksum
- 1
- - vulnerability_exports_export
- 1
- - vulnerability_exports_export_deletion
- 1
- - web_hook
- 1
- - x509_certificate_revoke

View File

@ -0,0 +1,24 @@
# frozen_string_literal: true
class CreateVulnerabilityExports < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
def change
create_table :vulnerability_exports do |t|
t.timestamps_with_timezone null: false
t.datetime_with_timezone :started_at
t.datetime_with_timezone :finished_at
t.string :status, limit: 255, null: false
t.string :file, limit: 255
t.bigint :project_id, null: false
t.bigint :author_id, null: false
t.integer :file_store
t.integer :format, limit: 2, null: false, default: 0
t.index %i[project_id id], unique: true
t.index %i[author_id]
end
end
end

View File

@ -0,0 +1,19 @@
# frozen_string_literal: true
class AddVulnerabilityExportProjectForeignKey < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
def up
with_lock_retries do
add_foreign_key :vulnerability_exports, :projects, column: :project_id, on_delete: :cascade, index: false # rubocop:disable Migration/AddConcurrentForeignKey
end
end
def down
with_lock_retries do
remove_foreign_key :vulnerability_exports, column: :project_id
end
end
end

View File

@ -0,0 +1,19 @@
# frozen_string_literal: true
class AddVulnerabilityExportUserForeignKey < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
def up
with_lock_retries do
add_foreign_key :vulnerability_exports, :users, column: :author_id, on_delete: :cascade, index: false # rubocop:disable Migration/AddConcurrentForeignKey
end
end
def down
with_lock_retries do
remove_foreign_key :vulnerability_exports, column: :author_id
end
end
end

View File

@ -4535,6 +4535,21 @@ ActiveRecord::Schema.define(version: 2020_03_19_203901) do
t.index ["updated_by_id"], name: "index_vulnerabilities_on_updated_by_id"
end
create_table "vulnerability_exports", force: :cascade do |t|
t.datetime_with_timezone "created_at", null: false
t.datetime_with_timezone "updated_at", null: false
t.datetime_with_timezone "started_at"
t.datetime_with_timezone "finished_at"
t.string "status", limit: 255, null: false
t.string "file", limit: 255
t.bigint "project_id", null: false
t.bigint "author_id", null: false
t.integer "file_store"
t.integer "format", limit: 2, default: 0, null: false
t.index ["author_id"], name: "index_vulnerability_exports_on_author_id"
t.index ["project_id", "id"], name: "index_vulnerability_exports_on_project_id_and_id", unique: true
end
create_table "vulnerability_feedback", id: :serial, force: :cascade do |t|
t.datetime_with_timezone "created_at", null: false
t.datetime_with_timezone "updated_at", null: false
@ -5220,6 +5235,8 @@ ActiveRecord::Schema.define(version: 2020_03_19_203901) do
add_foreign_key "vulnerabilities", "users", column: "last_edited_by_id", name: "fk_1302949740", on_delete: :nullify
add_foreign_key "vulnerabilities", "users", column: "resolved_by_id", name: "fk_76bc5f5455", on_delete: :nullify
add_foreign_key "vulnerabilities", "users", column: "updated_by_id", name: "fk_7ac31eacb9", on_delete: :nullify
add_foreign_key "vulnerability_exports", "projects", on_delete: :cascade
add_foreign_key "vulnerability_exports", "users", column: "author_id", on_delete: :cascade
add_foreign_key "vulnerability_feedback", "ci_pipelines", column: "pipeline_id", on_delete: :nullify
add_foreign_key "vulnerability_feedback", "issues", on_delete: :nullify
add_foreign_key "vulnerability_feedback", "merge_requests", name: "fk_563ff1912e", on_delete: :nullify

View File

@ -44,6 +44,7 @@ package (highly recommended), follow the steps below:
1. [Configuring the Praefect proxy/router](#praefect)
1. [Configuring each Gitaly node](#gitaly) (once for each Gitaly node)
1. [Updating the GitLab server configuration](#gitlab)
1. [Configure Grafana](#grafana)
### Preparation
@ -532,8 +533,6 @@ Particular attention should be shown to:
]
}
]
grafana['disable_login_form'] = false
```
1. Save the changes to `/etc/gitlab/gitlab.rb` and [reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure):
@ -548,12 +547,6 @@ Particular attention should be shown to:
gitlab-rake gitlab:gitaly:check
```
1. Set the Grafana admin password. This command will prompt you to enter a new password:
```shell
gitlab-ctl set-grafana-password
```
1. Update the **Repository storage** settings from **Admin Area > Settings >
Repository > Repository storage** to make the newly configured Praefect
cluster the storage location for new Git repositories.
@ -566,11 +559,6 @@ Particular attention should be shown to:
repository that viewed. If the project is created, and you can see the
README file, it works!
1. Inspect metrics by browsing to `/-/grafana` on your GitLab server.
Log in with `admin` / `GRAFANA_PASSWORD`. Go to 'Explore' and query
`gitlab_build_info` to verify that you are getting metrics from all your
machines.
Congratulations! You have configured a highly available Praefect cluster.
### Failover
@ -694,6 +682,49 @@ for example behind a load balancer, `failover_enabled` should be disabled. The r
is no coordination that currently happens across different Praefect instances, so there could be a situation where
two Praefect instances think two different Gitaly nodes are the primary.
## Grafana
Grafana is included with GitLab, and can be used to monitor your Praefect
cluster. See [Grafana Dashboard
Service](https://docs.gitlab.com/omnibus/settings/grafana.html)
for detailed documentation.
To get started quickly:
1. SSH into the **GitLab** node and login as root:
```shell
sudo -i
```
1. Enable the Grafana login form by editing `/etc/gitlab/gitlab.rb`.
```ruby
grafana['disable_login_form'] = false
```
1. Save the changes to `/etc/gitlab/gitlab.rb` and [reconfigure
GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure):
```shell
gitlab-ctl reconfigure
```
1. Set the Grafana admin password. This command will prompt you to enter a new
password:
```shell
gitlab-ctl set-grafana-password
```
1. In your web browser, open `/-/grafana` (e.g.
`https://gitlab.example.com/-/grafana`) on your GitLab server.
Login using the password you set, and the username `admin`.
1. Go to **Explore** and query `gitlab_build_info` to verify that you are
getting metrics from all your machines.
## Migrating existing repositories to Praefect
If your GitLab instance already has repositories, these won't be migrated

View File

@ -192,7 +192,7 @@ you can enable this in the project settings:
1. Check the **Auto-cancel redundant, pending pipelines** checkbox.
1. Click **Save changes**.
## Skip older, pending deployment jobs
## Skip outdated deployment jobs
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/25276) in GitLab 12.9.
@ -206,7 +206,7 @@ To avoid this scenario:
1. Go to **{settings}** **Settings > CI / CD**.
1. Expand **General pipelines**.
1. Check the **Skip older, pending deployment jobs** checkbox.
1. Check the **Skip outdated deployment jobs** checkbox.
1. Click **Save changes**.
The pending deployment jobs will be skipped.

View File

@ -153,36 +153,67 @@ A few notes:
## Customizable Value Stream Analytics
The default stages are designed to work straight out of the box, but they might not be suitable for all teams. Different teams use different approaches to building software, so some teams might want to customize their Value Stream Analytics. From GitLab 12.9, users can hide default stages and create custom stages that align better to their development workflow.
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/12196) in GitLab 12.9.
The default stages are designed to work straight out of the box, but they might not be suitable for
all teams. Different teams use different approaches to building software, so some teams might want
to customize their Value Stream Analytics.
GitLab allows users to hide default stages and create custom stages that align better to their
development workflow.
### Adding a stage
In the following example we're creating a new stage that measures and tracks issues from creation time until they are closed.
In the following example we're creating a new stage that measures and tracks issues from creation
time until they are closed.
1. Navigate to your group page.
1. Open Value Stream Analytics from the sidebar: **Analytics > Value Stream**
1. Click the "Add a stage" button.
1. Navigate to your group's **Analytics > Value Stream**.
1. Click the **Add a stage** button.
1. Fill in the new stage form:
- Name: Issue start to finish
- Start event: Issue created
- End event: Issue closed
1. Click the "Add stage" button.
- Name: Issue start to finish.
- Start event: Issue created.
- End event: Issue closed.
1. Click the **Add stage** button.
![New Value Stream Analytics Stage](img/new_vsm_stage_v12_9.png "Form for creating a new stage")
The new stage is persisted and it will always show up on the value stream analytics page for your group. In case you want to alter or delete the stage you can easily do that for customized stages by hovering over the stage and clicking the three-dot icon that appears.
The new stage is persisted and it will always show up on the Value Stream Analytics page for your
group.
If you want to alter or delete the stage, you can easily do that for customized stages by:
1. Hovering over the stage.
1. Clicking the vertical ellipsis (**{ellipsis_v}**) button that appears.
![Value Stream Analytics Stages](img/vsm_stage_list_v12_9.png)
Creating a custom stage requires specifying two events, a start and an end. Be careful to choose a start event that occurs *before* your end event. For example, consider if a stage started when an issue is added to a board, and ended when the issue is created. This stage would not work because the end event has already happened when the start event occurs. To prevent such invalid stages, the form prohibits incompatible start and end events. After you select the start event, the stop event dropdown will only list the compatible events.
Creating a custom stage requires specifying two events:
Note: The ability to re-order the stages is a [planned enhancement](https://gitlab.com/gitlab-org/gitlab/issues/196698).
- A start.
- An end.
Be careful to choose a start event that occurs *before* your end event. For example, consider a
stage that:
- Started when an issue is added to a board.
- Ended when the issue is created.
This stage would not work because the end event has already happened when the start event occurs.
To prevent such invalid stages, the UI prohibits incompatible start and end events. After you select
the start event, the stop event dropdown will only list the compatible events.
NOTE: **Note:**
The ability to re-order the stages is [planned](https://gitlab.com/gitlab-org/gitlab/issues/196698).
### Label based stages
The pre-defined start and end events can cover many use cases involving both issues and merge requests. For supporting more complex workflows, we can use stages based on group labels. These events are based on labels being added/removed. In particular, [scoped labels](../project/labels.md#scoped-labels-premium) are useful for complex workflows.
The pre-defined start and end events can cover many use cases involving both issues and merge requests.
In this example we'd like to measure more accurate code review times. The workflow is the following:
For supporting more complex workflows, use stages based on group labels. These events are based on
labels being added or removed. In particular, [scoped labels](../project/labels.md#scoped-labels-premium)
are useful for complex workflows.
In this example, we'd like to measure more accurate code review times. The workflow is the following:
- When the code review starts, the reviewer adds `workflow::code_review_start` label to the merge request.
- When the code review is finished, the reviewer adds `workflow::code_review_complete` label to the merge request.
@ -193,12 +224,17 @@ Creating a new stage called "Code Review":
### Hiding unused stages
Sometimes certain default stages are not relevant to a team. In this case you can easily hide stages so they no longer appear in the list. First, add a custom stage to activate customizability. Then hover over the default stage you want to hide, click the three-dot icon that appears and select "Hide stage".
Sometimes certain default stages are not relevant to a team. In this case, you can easily hide stages
so they no longer appear in the list. To hide stages:
1. Add a custom stage to activate customizability.
1. Hover over the default stage you want to hide.
1. Click the vertical ellipsis (**{ellipsis_v}**) button that appears and select **Hide stage**.
To recover a default stage that was previously hidden:
1. Click "Add a stage" button.
1. In the top right corner open the "Recover hidden stage" dropdown.
1. Click **Add a stage** button.
1. In the top right corner open the **Recover hidden stage** dropdown.
1. Select a stage.
## Days to completion chart

View File

@ -6,15 +6,15 @@ type: reference
> - Introduced in [GitLab Ultimate](https://about.gitlab.com/pricing/) 10.5.
> - In [GitLab 12.9](https://gitlab.com/gitlab-org/gitlab/issues/198062), Roadmaps were moved to the Premium tier.
> - In [GitLab 12.9](https://gitlab.com/gitlab-org/gitlab/issues/5164) and later, the epic bars show their title, progress, and completed weight percentage.
An Epic within a group containing **Start date** and/or **Due date**
can be visualized in a form of a timeline (e.g. a Gantt chart). The Epics Roadmap page
shows such a visualization for all the epics which are under a group and/or its subgroups.
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/5164) in GitLab 12.9.
On the epic bars, you can see their title, progress, and completed weight percentage.
When you hover over an epic bar, a popover appears with its title, start and due dates, and weight completed.
When you hover over an epic bar, a popover appears with its title, start and due dates, and weight
completed.
![roadmap view](img/roadmap_view_v12_9.png)

View File

@ -104,9 +104,9 @@ This feature is similar to the [Credentials inventory for self-managed instances
> [Introduced](https://gitlab.com/gitlab-org/gitlab/issues/34648) in GitLab 12.9.
Groups with enabled group-managed accounts can allow or disallow forking of projects outside of root group
by using separate toggle. If forking is disallowed any project of given root group or its subgroups can be forked to
a subgroup of the same root group only.
Groups with group-managed accounts can disallow forking of projects to destinations outside the group.
To do so, enable the "Prohibit outer forks" option in **Settings > SAML SSO**.
When enabled, projects within the group can only be forked to other destinations within the group (including its subgroups).
##### Other restrictions for Group-managed accounts

View File

@ -5,14 +5,18 @@ module Gitlab
HEADER_NAME = 'Poll-Interval'
def self.set_header(response, interval:)
if polling_enabled?
multiplier = Gitlab::CurrentSettings.polling_interval_multiplier
value = (interval * multiplier).to_i
else
value = -1
end
response.headers[HEADER_NAME] = polling_interval_value(interval).to_s
end
response.headers[HEADER_NAME] = value.to_s
def self.set_api_header(context, interval:)
context.header HEADER_NAME, polling_interval_value(interval).to_s
end
def self.polling_interval_value(interval)
return -1 unless polling_enabled?
multiplier = Gitlab::CurrentSettings.polling_interval_multiplier
(interval * multiplier).to_i
end
def self.polling_enabled?

View File

@ -18323,7 +18323,7 @@ msgstr ""
msgid "Size settings for static websites"
msgstr ""
msgid "Skip older, pending deployment jobs"
msgid "Skip outdated deployment jobs"
msgstr ""
msgid "Skip this for now"

View File

@ -0,0 +1,25 @@
# frozen_string_literal: true
module RuboCop
module Cop
class FilenameLength < Cop
include RangeHelp
FILEPATH_MAX_BYTES = 256
FILENAME_MAX_BYTES = 100
MSG_FILEPATH_LEN = "This file path is too long. It should be #{FILEPATH_MAX_BYTES} or less"
MSG_FILENAME_LEN = "This file name is too long. It should be #{FILENAME_MAX_BYTES} or less"
def investigate(processed_source)
file_path = processed_source.file_path
return if config.file_to_exclude?(file_path)
if file_path.bytesize > FILEPATH_MAX_BYTES
add_offense(nil, location: source_range(processed_source.buffer, 1, 0, 1), message: MSG_FILEPATH_LEN)
elsif File.basename(file_path).bytesize > FILENAME_MAX_BYTES
add_offense(nil, location: source_range(processed_source.buffer, 1, 0, 1), message: MSG_FILENAME_LEN)
end
end
end
end
end

View File

@ -32,7 +32,9 @@ describe('Snippet header component', () => {
const errorMsg = 'Foo bar';
const err = { message: errorMsg };
const resolveMutate = jest.fn(() => Promise.resolve({ data: {} }));
const resolveMutate = jest.fn(() =>
Promise.resolve({ data: { destroySnippet: { errors: [] } } }),
);
const rejectMutation = jest.fn(() => Promise.reject(err));
const mutationTypes = {

View File

@ -67,17 +67,6 @@ describe Types::BaseField do
expect(field.to_graphql.complexity.call({}, { first: 1 }, 2)).to eq 2
expect(field.to_graphql.complexity.call({}, { first: 1, foo: true }, 2)).to eq 4
end
context 'when graphql_resolver_complexity is disabled' do
before do
stub_feature_flags(graphql_resolver_complexity: false)
end
it 'sets default field complexity' do
expect(field.to_graphql.complexity.call({}, {}, 2)).to eq 1
expect(field.to_graphql.complexity.call({}, { first: 50 }, 2)).to eq 1
end
end
end
context 'and is not a connection' do

View File

@ -431,6 +431,7 @@ project:
- sourced_pipelines
- prometheus_metrics
- vulnerabilities
- vulnerability_exports
- vulnerability_findings
- vulnerability_feedback
- vulnerability_identifiers

View File

@ -33,4 +33,36 @@ describe Gitlab::PollingInterval do
end
end
end
describe '.set_api_header' do
let(:context) { double(Grape::Endpoint) }
before do
allow(context).to receive(:header)
end
context 'when polling is disabled' do
before do
stub_application_setting(polling_interval_multiplier: 0)
end
it 'sets value to -1' do
expect(context).to receive(:header).with('Poll-Interval', '-1')
polling_interval.set_api_header(context, interval: 10_000)
end
end
context 'when polling is enabled' do
before do
stub_application_setting(polling_interval_multiplier: 0.33333)
end
it 'applies modifier to base interval' do
expect(context).to receive(:header).with('Poll-Interval', '3333')
polling_interval.set_api_header(context, interval: 10_000)
end
end
end
end

View File

@ -165,7 +165,6 @@ describe 'GraphQL' do
before do
stub_const('GitlabSchema::DEFAULT_MAX_COMPLEXITY', 6)
stub_feature_flags(graphql_resolver_complexity: true)
end
context 'when fetching single resource' do
@ -186,18 +185,6 @@ describe 'GraphQL' do
expect_graphql_errors_to_include(/which exceeds max complexity/)
end
context 'when graphql_resolver_complexity is disabled' do
before do
stub_feature_flags(graphql_resolver_complexity: false)
end
it 'processes the query' do
post_graphql(query)
expect(graphql_errors).to be_nil
end
end
end
end
end

View File

@ -0,0 +1,51 @@
# frozen_string_literal: true
require 'spec_helper'
require 'rubocop'
require 'rubocop/rspec/support'
require_relative '../../../rubocop/cop/filename_length'
require_relative '../../support/helpers/expect_offense'
describe RuboCop::Cop::FilenameLength do
subject(:cop) { described_class.new }
it 'does not flag files with names 100 characters long' do
expect_no_offenses('puts "it does not matter"', 'a' * 100)
end
it 'tags files with names 101 characters long' do
filename = 'a' * 101
expect_offense(<<~SOURCE, filename)
source code
^ This file name is too long. It should be 100 or less
SOURCE
end
it 'tags files with names 256 characters long' do
filename = 'a' * 256
expect_offense(<<~SOURCE, filename)
source code
^ This file name is too long. It should be 100 or less
SOURCE
end
it 'tags files with filepath 256 characters long' do
filepath = File.join 'a', 'b' * 254
expect_offense(<<~SOURCE, filepath)
source code
^ This file name is too long. It should be 100 or less
SOURCE
end
it 'tags files with filepath 257 characters long' do
filepath = File.join 'a', 'b' * 255
expect_offense(<<~SOURCE, filepath)
source code
^ This file path is too long. It should be 256 or less
SOURCE
end
end

View File

@ -91,4 +91,20 @@ describe 'layouts/nav/sidebar/_admin' do
it_behaves_like 'page has active tab', 'Monitoring'
it_behaves_like 'page has active sub tab', 'Background Jobs'
end
context 'on settings' do
before do
render
end
it 'includes General link' do
expect(rendered).to have_link('General', href: general_admin_application_settings_path)
end
context 'when GitLab FOSS' do
it 'does not include Templates link' do
expect(rendered).not_to have_link('Templates', href: '/admin/application_settings/templates')
end
end
end
end

View File

@ -44,11 +44,35 @@ describe WaitableWorker do
expect(worker.counter).to eq(6)
end
it 'runs > 3 jobs using sidekiq' do
it 'runs > 3 jobs using sidekiq and a waiter key' do
expect(worker).to receive(:bulk_perform_async)
.with([[1, anything], [2, anything], [3, anything], [4, anything]])
worker.bulk_perform_and_wait([[1], [2], [3], [4]])
end
it 'runs > 10 * timeout jobs using sidekiq and no waiter key' do
arguments = 1.upto(21).map { |i| [i] }
expect(worker).to receive(:bulk_perform_async).with(arguments)
worker.bulk_perform_and_wait(arguments, timeout: 2)
end
context 'when the skip_job_waiter_for_large_batches flag is disabled' do
before do
stub_feature_flags(skip_job_waiter_for_large_batches: false)
end
it 'runs jobs over 10 * the timeout using a waiter key' do
arguments = 1.upto(21).map { |i| [i] }
arguments_with_waiter = arguments.map { |arg| arg + [anything] }
expect(worker).to receive(:bulk_perform_async).with(arguments_with_waiter)
worker.bulk_perform_and_wait(arguments, timeout: 2)
end
end
end
describe '.bulk_perform_inline' do