Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-05-19 15:10:40 +00:00
parent f304336f5e
commit a6508d0028
65 changed files with 1339 additions and 415 deletions

View File

@ -3,6 +3,7 @@ import { pickBy } from 'lodash';
import { mapActions } from 'vuex';
import { updateHistory, setUrlParams } from '~/lib/utils/url_utility';
import { __ } from '~/locale';
import { FILTERED_SEARCH_TERM } from '~/vue_shared/components/filtered_search_bar/constants';
import FilteredSearch from '~/vue_shared/components/filtered_search_bar/filtered_search_bar_root.vue';
export default {
@ -104,7 +105,9 @@ export default {
},
getFilterParams(filters = []) {
const notFilters = filters.filter((item) => item.value.operator === '!=');
const equalsFilters = filters.filter((item) => item.value.operator === '=');
const equalsFilters = filters.filter(
(item) => item?.value?.operator === '=' || item.type === FILTERED_SEARCH_TERM,
);
return { ...this.generateParams(equalsFilters), not: { ...this.generateParams(notFilters) } };
},

View File

@ -1,3 +1,4 @@
import { getParameterValues } from '~/lib/utils/url_utility';
import { __, n__ } from '~/locale';
import {
PARALLEL_DIFF_VIEW_TYPE,
@ -172,4 +173,6 @@ export function suggestionCommitMessage(state, _, rootState) {
}
export const isVirtualScrollingEnabled = (state) =>
!state.viewDiffsFileByFile && window.gon?.features?.diffsVirtualScrolling;
!state.viewDiffsFileByFile &&
(window.gon?.features?.diffsVirtualScrolling ||
getParameterValues('virtual_scrolling')[0] === 'true');

View File

@ -43,7 +43,7 @@ module GroupsHelper
end
def group_information_title(group)
if Feature.enabled?(:sidebar_refactor, current_user)
if Feature.enabled?(:sidebar_refactor, current_user, default_enabled: :yaml)
group.subgroup? ? _('Subgroup information') : _('Group information')
else
group.subgroup? ? _('Subgroup overview') : _('Group overview')

View File

@ -12,7 +12,7 @@ module NavHelper
def page_with_sidebar_class
class_name = page_gutter_class
class_name << 'page-with-contextual-sidebar' if defined?(@left_sidebar) && @left_sidebar
class_name << 'sidebar-refactoring' if Feature.enabled?(:sidebar_refactor, current_user)
class_name << 'sidebar-refactoring' if Feature.enabled?(:sidebar_refactor, current_user, default_enabled: :yaml)
class_name << 'page-with-icon-sidebar' if collapsed_sidebar? && @left_sidebar
class_name -= ['right-sidebar-expanded'] if defined?(@right_sidebar) && !@right_sidebar

View File

@ -612,12 +612,12 @@ module ProjectsHelper
end
def settings_container_registry_expiration_policy_available?(project)
Feature.disabled?(:sidebar_refactor, current_user) &&
Feature.disabled?(:sidebar_refactor, current_user, default_enabled: :yaml) &&
can_destroy_container_registry_image?(current_user, project)
end
def settings_packages_and_registries_enabled?(project)
Feature.enabled?(:sidebar_refactor, current_user) &&
Feature.enabled?(:sidebar_refactor, current_user, default_enabled: :yaml) &&
can_destroy_container_registry_image?(current_user, project)
end

View File

@ -38,6 +38,7 @@ module Ci
has_one :deployment, as: :deployable, class_name: 'Deployment'
has_one :pending_state, class_name: 'Ci::BuildPendingState', inverse_of: :build
has_one :queuing_entry, class_name: 'Ci::PendingBuild', foreign_key: :build_id
has_many :trace_sections, class_name: 'Ci::BuildTraceSection'
has_many :trace_chunks, class_name: 'Ci::BuildTraceChunk', foreign_key: :build_id, inverse_of: :build
has_many :report_results, class_name: 'Ci::BuildReportResult', inverse_of: :build
@ -305,12 +306,20 @@ module Ci
end
end
after_transition any => [:pending] do |build|
# rubocop:disable CodeReuse/ServiceClass
after_transition any => [:pending] do |build, transition|
Ci::UpdateBuildQueueService.new.push(build, transition)
build.run_after_commit do
BuildQueueWorker.perform_async(id)
end
end
after_transition pending: any do |build, transition|
Ci::UpdateBuildQueueService.new.pop(build, transition)
end
# rubocop:enable CodeReuse/ServiceClass
after_transition pending: :running do |build|
build.deployment&.run
@ -1067,6 +1076,14 @@ module Ci
options.dig(:allow_failure_criteria, :exit_codes).present?
end
def all_queuing_entries
# We can have only one queuing entry, because there is a unique index on
# `build_id`, but we need a relation to remove this single queuing entry
# more efficiently in a single statement without actually load data.
::Ci::PendingBuild.where(build_id: self.id)
end
protected
def run_status_commit_hooks!

View File

@ -0,0 +1,18 @@
# frozen_string_literal: true
module Ci
class PendingBuild < ApplicationRecord
extend Gitlab::Ci::Model
belongs_to :project
belongs_to :build, class_name: 'Ci::Build'
def self.upsert_from_build!(build)
entry = self.new(build: build, project: build.project)
entry.validate!
self.upsert(entry.attributes.compact, returning: %w[build_id], unique_by: :build_id)
end
end
end

View File

@ -2,13 +2,62 @@
module Ci
class UpdateBuildQueueService
def execute(build, metrics = ::Gitlab::Ci::Queue::Metrics)
tick_for(build, build.project.all_runners, metrics)
InvalidQueueTransition = Class.new(StandardError)
attr_reader :metrics
def initialize(metrics = ::Gitlab::Ci::Queue::Metrics)
@metrics = metrics
end
##
# Add a build to the pending builds queue
#
def push(build, transition)
return unless maintain_pending_builds_queue?(build)
raise InvalidQueueTransition unless transition.to == 'pending'
transition.within_transaction do
result = ::Ci::PendingBuild.upsert_from_build!(build)
unless result.empty?
metrics.increment_queue_operation(:build_queue_push)
result.rows.dig(0, 0)
end
end
end
##
# Remove a build from the pending builds queue
#
def pop(build, transition)
return unless maintain_pending_builds_queue?(build)
raise InvalidQueueTransition unless transition.from == 'pending'
transition.within_transaction do
removed = build.all_queuing_entries.delete_all
if removed > 0
metrics.increment_queue_operation(:build_queue_pop)
build.id
end
end
end
##
# Unblock runner associated with given project / build
#
def tick(build)
tick_for(build, build.project.all_runners)
end
private
def tick_for(build, runners, metrics)
def tick_for(build, runners)
runners = runners.with_recent_runner_queue
runners = runners.with_tags if Feature.enabled?(:ci_preload_runner_tags, default_enabled: :yaml)
@ -20,5 +69,9 @@ module Ci
runner.pick_build!(build)
end
end
def maintain_pending_builds_queue?(build)
Feature.enabled?(:ci_pending_builds_queue_maintain, build.project, default_enabled: :yaml)
end
end
end

View File

@ -18,7 +18,7 @@
= nav_link(path: paths, unless: -> { current_path?('groups/contribution_analytics#show') }, html_options: { class: 'home' }) do
= link_to group_path(@group) do
.nav-icon-container
- sprite = Feature.enabled?(:sidebar_refactor, current_user) ? 'group' : 'home'
- sprite = Feature.enabled?(:sidebar_refactor, current_user, default_enabled: :yaml) ? 'group' : 'home'
= sprite_icon(sprite)
%span.nav-item-name
= group_information_title(@group)
@ -30,7 +30,7 @@
= group_information_title(@group)
%li.divider.fly-out-top-item
- if Feature.disabled?(:sidebar_refactor, current_user)
- if Feature.disabled?(:sidebar_refactor, current_user, default_enabled: :yaml)
= nav_link(path: ['groups#show', 'groups#details', 'groups#subgroups'], html_options: { class: 'home' }) do
= link_to details_group_path(@group), title: _('Group details') do
%span

View File

@ -14,7 +14,7 @@ class BuildQueueWorker # rubocop:disable Scalability/IdempotentWorker
# rubocop: disable CodeReuse/ActiveRecord
def perform(build_id)
Ci::Build.find_by(id: build_id).try do |build|
Ci::UpdateBuildQueueService.new.execute(build)
Ci::UpdateBuildQueueService.new.tick(build)
end
end
# rubocop: enable CodeReuse/ActiveRecord

View File

@ -0,0 +1,5 @@
---
title: Accelerate builds queuing using a denormalized accelerated table
merge_request: 61581
author:
type: performance

View File

@ -0,0 +1,5 @@
---
title: Compress oversized Sidekiq job payload before dispatching into Redis
merge_request: 61667
author:
type: added

View File

@ -0,0 +1,8 @@
---
name: ci_pending_builds_queue_maintain
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/61581
rollout_issue_url:
milestone: '13.12'
type: development
group: group::continuous integration
default_enabled: false

View File

@ -1,8 +0,0 @@
---
name: load_balancing_for_bulk_cron_workers
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/58345
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/326721
milestone: '13.11'
type: development
group: group::global search
default_enabled: false

View File

@ -6,7 +6,7 @@ product_stage: secure
product_group: group::static analysis
product_category: dependency_scanning
value_type: number
status: implemented
status: data_available
milestone: '13.11'
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/57133
time_frame: 28d

View File

@ -6,7 +6,7 @@ product_stage: verify
product_group: group::testing
product_category: code_quality
value_type: number
status: implemented
status: data_available
milestone: '13.11'
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/57133
time_frame: 28d

View File

@ -6,7 +6,7 @@ product_stage: verify
product_group: group::testing
product_category: accessibility_testing
value_type: number
status: implemented
status: data_available
milestone: '13.11'
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/57133
time_frame: 28d

View File

@ -6,7 +6,7 @@ product_stage: configure
product_group: group::configure
product_category: infrastructure_as_code
value_type: number
status: implemented
status: data_available
milestone: '13.11'
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/55018
time_frame: 28d

View File

@ -6,7 +6,7 @@ product_stage: configure
product_group: group::configure
product_category: infrastructure_as_code
value_type: number
status: implemented
status: data_available
milestone: '13.11'
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/55018
time_frame: 28d

View File

@ -6,7 +6,7 @@ product_stage: verify
product_group: group::testing
product_category: testing
value_type: number
status: implemented
status: data_available
milestone: "13.11"
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/59316
time_frame: 28d

View File

@ -6,7 +6,7 @@ product_stage: secure
product_group: group::static analysis
product_category: dependency_scanning
value_type: number
status: implemented
status: data_available
milestone: '13.11'
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/57133
time_frame: 7d

View File

@ -6,7 +6,7 @@ product_stage: verify
product_group: group::testing
product_category: code_quality
value_type: number
status: implemented
status: data_available
milestone: '13.11'
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/57133
time_frame: 7d

View File

@ -6,7 +6,7 @@ product_stage: verify
product_group: group::testing
product_category: accessibility_testing
value_type: number
status: implemented
status: data_available
milestone: '13.11'
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/57133
time_frame: 7d

View File

@ -6,7 +6,7 @@ product_stage: configure
product_group: group::configure
product_category: infrastructure_as_code
value_type: number
status: implemented
status: data_available
milestone: '13.11'
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/55018
time_frame: 7d

View File

@ -6,7 +6,7 @@ product_stage: configure
product_group: group::configure
product_category: infrastructure_as_code
value_type: number
status: implemented
status: data_available
milestone: '13.11'
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/55018
time_frame: 7d

View File

@ -6,7 +6,7 @@ product_stage: verify
product_group: group::testing
product_category: testing
value_type: number
status: implemented
status: data_available
milestone: "13.11"
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/59316
time_frame: 7d

View File

@ -6,7 +6,7 @@ product_stage: configure
product_group: group::configure
product_category: infrastructure_as_code
value_type: number
status: implemented
status: data_available
milestone: '13.11'
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/55018
time_frame: all

View File

@ -6,7 +6,7 @@ product_stage: configure
product_group: group::configure
product_category: infrastructure_as_code
value_type: number
status: implemented
status: data_available
milestone: '13.11'
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/55018
time_frame: all

View File

@ -6,7 +6,7 @@ product_stage: configure
product_group: group::configure
product_category: infrastructure_as_code
value_type: number
status: implemented
status: data_available
milestone: '13.11'
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/55018
time_frame: all

View File

@ -6,7 +6,7 @@ product_stage: create
product_group: group::gitaly
product_category: gitaly
value_type: number
status: implemented
status: data_available
milestone: "13.11"
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/47040
time_frame: none

View File

@ -0,0 +1,15 @@
# frozen_string_literal: true
class AddPendingBuildsTable < ActiveRecord::Migration[6.0]
def up
create_table :ci_pending_builds do |t|
t.references :build, index: { unique: true }, null: false, foreign_key: { to_table: :ci_builds, on_delete: :cascade }
t.references :project, index: true, null: false, foreign_key: { on_delete: :cascade }
t.datetime_with_timezone :created_at, null: false, default: -> { 'NOW()' }
end
end
def down
drop_table :ci_pending_builds
end
end

View File

@ -0,0 +1 @@
1acc251417e3230c9b0a46e294cb9a6e8768f31978b8d4f439101f8de4e9269e

View File

@ -10775,6 +10775,22 @@ CREATE SEQUENCE ci_namespace_monthly_usages_id_seq
ALTER SEQUENCE ci_namespace_monthly_usages_id_seq OWNED BY ci_namespace_monthly_usages.id;
CREATE TABLE ci_pending_builds (
id bigint NOT NULL,
build_id bigint NOT NULL,
project_id bigint NOT NULL,
created_at timestamp with time zone DEFAULT now() NOT NULL
);
CREATE SEQUENCE ci_pending_builds_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE ci_pending_builds_id_seq OWNED BY ci_pending_builds.id;
CREATE TABLE ci_pipeline_artifacts (
id bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
@ -19578,6 +19594,8 @@ ALTER TABLE ONLY ci_job_variables ALTER COLUMN id SET DEFAULT nextval('ci_job_va
ALTER TABLE ONLY ci_namespace_monthly_usages ALTER COLUMN id SET DEFAULT nextval('ci_namespace_monthly_usages_id_seq'::regclass);
ALTER TABLE ONLY ci_pending_builds ALTER COLUMN id SET DEFAULT nextval('ci_pending_builds_id_seq'::regclass);
ALTER TABLE ONLY ci_pipeline_artifacts ALTER COLUMN id SET DEFAULT nextval('ci_pipeline_artifacts_id_seq'::regclass);
ALTER TABLE ONLY ci_pipeline_chat_data ALTER COLUMN id SET DEFAULT nextval('ci_pipeline_chat_data_id_seq'::regclass);
@ -20757,6 +20775,9 @@ ALTER TABLE ONLY ci_job_variables
ALTER TABLE ONLY ci_namespace_monthly_usages
ADD CONSTRAINT ci_namespace_monthly_usages_pkey PRIMARY KEY (id);
ALTER TABLE ONLY ci_pending_builds
ADD CONSTRAINT ci_pending_builds_pkey PRIMARY KEY (id);
ALTER TABLE ONLY ci_pipeline_artifacts
ADD CONSTRAINT ci_pipeline_artifacts_pkey PRIMARY KEY (id);
@ -22673,6 +22694,10 @@ CREATE UNIQUE INDEX index_ci_job_variables_on_key_and_job_id ON ci_job_variables
CREATE UNIQUE INDEX index_ci_namespace_monthly_usages_on_namespace_id_and_date ON ci_namespace_monthly_usages USING btree (namespace_id, date);
CREATE UNIQUE INDEX index_ci_pending_builds_on_build_id ON ci_pending_builds USING btree (build_id);
CREATE INDEX index_ci_pending_builds_on_project_id ON ci_pending_builds USING btree (project_id);
CREATE INDEX index_ci_pipeline_artifacts_failed_verification ON ci_pipeline_artifacts USING btree (verification_retry_at NULLS FIRST) WHERE (verification_state = 3);
CREATE INDEX index_ci_pipeline_artifacts_needs_verification ON ci_pipeline_artifacts USING btree (verification_state) WHERE ((verification_state = 0) OR (verification_state = 3));
@ -26444,6 +26469,9 @@ ALTER TABLE ONLY vulnerability_feedback
ALTER TABLE ONLY user_custom_attributes
ADD CONSTRAINT fk_rails_47b91868a8 FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;
ALTER TABLE ONLY ci_pending_builds
ADD CONSTRAINT fk_rails_480669c3b3 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY ci_pipeline_artifacts
ADD CONSTRAINT fk_rails_4a70390ca6 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
@ -26690,6 +26718,9 @@ ALTER TABLE ONLY list_user_preferences
ALTER TABLE ONLY project_custom_attributes
ADD CONSTRAINT fk_rails_719c3dccc5 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
ALTER TABLE ONLY ci_pending_builds
ADD CONSTRAINT fk_rails_725a2644a3 FOREIGN KEY (build_id) REFERENCES ci_builds(id) ON DELETE CASCADE;
ALTER TABLE ONLY security_findings
ADD CONSTRAINT fk_rails_729b763a54 FOREIGN KEY (scanner_id) REFERENCES vulnerability_scanners(id) ON DELETE CASCADE;

View File

@ -722,18 +722,21 @@ Example response:
}
```
### Disabling the results limit
### Disable the results limit **(FREE SELF)**
The 100 results limit can be disabled if it breaks integrations developed using GitLab
12.4 and earlier.
The 100 results limit can break integrations developed using GitLab 12.4 and earlier.
To disable the limit while migrating to using the [list a group's projects](#list-a-groups-projects) endpoint, ask a GitLab administrator
with Rails console access to run the following command:
For GitLab 12.5 to GitLab 13.12, the limit can be disabled while migrating to using the
[list a group's projects](#list-a-groups-projects) endpoint.
Ask a GitLab administrator with Rails console access to run the following command:
```ruby
Feature.disable(:limit_projects_in_groups_api)
```
For GitLab 14.0 and later, the [limit cannot be disabled](https://gitlab.com/gitlab-org/gitlab/-/issues/257829).
## New group
Creates a new project group. Available only for users who can create groups.
@ -918,19 +921,21 @@ Example response:
}
```
### Disabling the results limit
### Disable the results limit **(FREE SELF)**
The 100 results limit can be disabled if it breaks integrations developed using GitLab
12.4 and earlier.
The 100 results limit can break integrations developed using GitLab 12.4 and earlier.
To disable the limit while migrating to using the
[list a group's projects](#list-a-groups-projects) endpoint, ask a GitLab administrator
with Rails console access to run the following command:
For GitLab 12.5 to GitLab 13.12, the limit can be disabled while migrating to using the
[list a group's projects](#list-a-groups-projects) endpoint.
Ask a GitLab administrator with Rails console access to run the following command:
```ruby
Feature.disable(:limit_projects_in_groups_api)
```
For GitLab 14.0 and later, the [limit cannot be disabled](https://gitlab.com/gitlab-org/gitlab/-/issues/257829).
### Options for `shared_runners_setting`
The `shared_runners_setting` attribute determines whether shared runners are enabled for a group's subgroups and projects.

View File

@ -194,8 +194,10 @@ NOTE:
For a detailed flow diagram, see the [RFC specification](https://tools.ietf.org/html/rfc6749#section-4.2).
WARNING:
The Implicit grant flow is inherently insecure. The IETF plans to remove it in
[OAuth 2.1](https://oauth.net/2.1/).
Implicit grant flow is inherently insecure and the IETF has removed it in [OAuth 2.1](https://oauth.net/2.1/).
For this reason, [support for it is deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/288516).
In GitLab 14.0, new applications can't be created using it. In GitLab 14.4, support for it is
scheduled to be removed for existing applications.
We recommend that you use [Authorization code with PKCE](#authorization-code-with-proof-key-for-code-exchange-pkce) instead. If you choose to use Implicit flow, be sure to verify the
`application id` (or `client_id`) associated with the access token before granting

View File

@ -39,9 +39,20 @@ so when in doubt don't use `aria-*`, `role`, and `tabindex` and stick with seman
- [Clickable icons](#icons-that-are-clickable) are buttons, that is, `<gl-button icon="close" />` is used and not `<gl-icon />`.
- Icon-only buttons have an `aria-label`.
- Interactive elements can be [accessed with the Tab key](#support-keyboard-only-use) and have a visible focus state.
- Elements with [tooltips](#tooltips) are focusable using the Tab key.
- Are any `role`, `tabindex` or `aria-*` attributes unnecessary?
- Can any `div` or `span` elements be replaced with a more semantic [HTML element](https://developer.mozilla.org/en-US/docs/Web/HTML/Element) like `p`, `button`, or `time`?
## Provide a good document outline
[Headings are the primary mechanism used by screen reader users to navigate content](https://webaim.org/projects/screenreadersurvey8/#finding).
Therefore, the structure of headings on a page should make sense, like a good table of contents.
We should ensure that:
- There is only one `h1` element on the page.
- Heading levels are not skipped.
- Heading levels are nested correctly.
## Provide accessible names for screen readers
To provide markup with accessible names, ensure every:
@ -257,6 +268,9 @@ Image examples:
<!-- SVGs implicitly have a graphics role so if it is semantically an image we should apply `role="img"` -->
<svg role="img" :alt="__('A description of the image')" />
<!-- A decorative image, hidden from screen readers -->
<img :src="imagePath" :alt="" />
```
#### Buttons and links with descriptive accessible names
@ -275,6 +289,14 @@ Buttons and links should have accessible names that are descriptive enough to be
<gl-link :href="url">{{ __("GitLab's accessibility page") }}</gl-link>
```
#### Links styled like buttons
Links can be styled like buttons using `GlButton`.
```html
<gl-button :href="url">{{ __('Link styled as a button') }}</gl-button>
```
## Role
In general, avoid using `role`.
@ -336,7 +358,7 @@ Once the markup is semantically complete, use CSS to update it to its desired vi
<div role="button" tabindex="0" @click="expand">Expand</div>
<!-- good -->
<gl-button @click="expand">Expand</gl-button>
<gl-button class="gl-p-0!" category="tertiary" @click="expand">Expand</gl-button>
```
### Do not use `tabindex="0"` on interactive elements
@ -423,6 +445,30 @@ Icons that are clickable are semantically buttons, so they should be rendered as
<gl-button icon="close" category="tertiary" :aria-label="__('Close')" @click="handleClick" />
```
## Tooltips
When adding tooltips, we must ensure that the element with the tooltip can receive focus so keyboard users can see the tooltip.
If the element is a static one, such as an icon, we can enclose it in a button, which already is
focusable, so we don't have to add `tabindex=0` to the icon.
The following code snippet is a good example of an icon with a tooltip.
- It is automatically focusable, as it is a button.
- It is given an accessible name with `aria-label`, as it is a button with no text.
- We can use the `gl-hover-bg-transparent!` class if we don't want the button's background to become gray on hover.
- We can use the `gl-p-0!` class to remove the button padding, if needed.
```html
<gl-button
v-gl-tooltip
class="gl-hover-bg-transparent! gl-p-0!"
icon="warning"
category="tertiary"
:title="tooltipText"
:aria-label="__('Warning')"
/>
```
## Hiding elements
Use the following table to hide elements from users, when appropriate.
@ -478,5 +524,3 @@ We have two options for Web accessibility testing:
- [The A11Y Project](https://www.a11yproject.com/) is a good resource for accessibility
- [Awesome Accessibility](https://github.com/brunopulis/awesome-a11y)
is a compilation of accessibility-related material
- You can read [Chrome Accessibility Developer Tools'](https://github.com/GoogleChrome/accessibility-developer-tools)
rules on its [Audit Rules page](https://github.com/GoogleChrome/accessibility-developer-tools/wiki/Audit-Rules)

View File

@ -1126,54 +1126,6 @@ Status: `data_available`
Tiers: `free`
### `counts.g_project_management_users_checking_epic_task_monthly`
Counts of MAU checking epic task
[YAML definition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/config/metrics/counts_28d/20210421080207_g_project_management_users_checking_epic_task_monthly.yml)
Group: `group::product planning`
Status: `implemented`
Tiers: `premium`, `ultimate`
### `counts.g_project_management_users_checking_epic_task_weekly`
Counts of WAU checking epic task
[YAML definition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/config/metrics/counts_7d/20210421075943_g_project_management_users_checking_epic_task_weekly.yml)
Group: `group::product planning`
Status: `implemented`
Tiers: `premium`, `ultimate`
### `counts.g_project_management_users_unchecking_epic_task_monthly`
Counts of MAU unchecking epic task
[YAML definition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/config/metrics/counts_28d/20210421102516_g_project_management_users_unchecking_epic_task_monthly.yml)
Group: `group::product planning`
Status: `implemented`
Tiers: `premium`, `ultimate`
### `counts.g_project_management_users_unchecking_epic_task_weekly`
Counts of WAU unchecking epic task
[YAML definition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/config/metrics/counts_7d/20210421102812_g_project_management_users_unchecking_epic_task_weekly.yml)
Group: `group::product planning`
Status: `implemented`
Tiers: `premium`, `ultimate`
### `counts.geo_event_log_max_id`
Number of replication events on a Geo primary
@ -4086,7 +4038,7 @@ Total count of Terraform Module packages delete events
Group: `group::configure`
Status: `implemented`
Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
@ -4098,7 +4050,7 @@ Total count of pull Terraform Module packages events
Group: `group::configure`
Status: `implemented`
Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
@ -4110,7 +4062,7 @@ Total count of push Terraform Module packages events
Group: `group::configure`
Status: `implemented`
Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
@ -7938,7 +7890,7 @@ Counts visits to DevOps Adoption page per month
Group: `group::optimize`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -7950,7 +7902,7 @@ Counts visits to DevOps Adoption page per week
Group: `group::optimize`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -10338,7 +10290,7 @@ Number of distinct users authorized via deploy token creating Terraform Module p
Group: `group::configure`
Status: `implemented`
Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
@ -10350,7 +10302,7 @@ Number of distinct users authorized via deploy token creating Terraform Module p
Group: `group::configure`
Status: `implemented`
Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
@ -10690,6 +10642,30 @@ Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
### `redis_hll_counters.epic_boards_usage.epic_boards_usage_total_unique_counts_monthly`
Missing description
[YAML definition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/config/metrics/counts_28d/20210507171840_epic_boards_usage_total_unique_counts_monthly.yml)
Group: ``
Status: `implemented`
Tiers: `ultimate`
### `redis_hll_counters.epic_boards_usage.epic_boards_usage_total_unique_counts_weekly`
Missing description
[YAML definition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/config/metrics/counts_7d/20210507171838_epic_boards_usage_total_unique_counts_weekly.yml)
Group: ``
Status: `implemented`
Tiers: `ultimate`
### `redis_hll_counters.epic_boards_usage.g_project_management_users_creating_epic_boards_monthly`
Count of MAU creating epic boards
@ -10770,7 +10746,7 @@ Total monthly users count for epics_usage
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -10782,7 +10758,7 @@ Total weekly users count for epics_usage
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -10794,7 +10770,7 @@ Counts of MAU closing epics
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -10866,7 +10842,7 @@ Count of MAU destroying epics
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -10878,7 +10854,7 @@ Count of WAU destroying epics
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -10890,7 +10866,7 @@ Count of MAU adding issues to epics
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -10902,7 +10878,7 @@ Count of WAU adding issues to epics
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -10914,7 +10890,7 @@ Counts of MAU moving epic issues between projects
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -10926,7 +10902,7 @@ Counts of WAU moving epic issues between projects
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -10938,7 +10914,7 @@ Count of MAU removing issues from epics
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -10950,7 +10926,7 @@ Counts of WAU removing issues from epics
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -10962,7 +10938,7 @@ Counts of MAU closing epics
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -10974,7 +10950,7 @@ Counts of WAU re-opening epics
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -10986,7 +10962,7 @@ Count of MAU chaging the epic lables
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -10998,7 +10974,7 @@ Count of WAU chaging the epic lables
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11010,7 +10986,7 @@ Count of MAU promoting issues to epics
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11022,7 +10998,7 @@ Counts of WAU promoting issues to epics
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11082,7 +11058,7 @@ Counts of MAU destroying epic notes
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11094,7 +11070,7 @@ Counts of WAU destroying epic notes
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11130,7 +11106,7 @@ Count of MAU making epics confidential
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11142,7 +11118,7 @@ Count of WAU making epics confidential
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11154,7 +11130,7 @@ Counts of MAU setting epic due date as inherited
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11166,7 +11142,7 @@ Counts of WAU setting epic due date as fixed
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11178,7 +11154,7 @@ Counts of MAU setting epic due date as inherited
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11190,7 +11166,7 @@ Counts of WAU setting epic due date as inherited
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11202,7 +11178,7 @@ Counts of MAU setting epic start date as fixed
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11214,7 +11190,7 @@ Counts of WAU setting epic start date as fixed
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11226,7 +11202,7 @@ Counts of MAU setting epic start date as inherited
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11238,7 +11214,7 @@ Counts of WAU setting epic start date as inherited
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11250,7 +11226,7 @@ Count of MAU making epics visible
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11262,7 +11238,7 @@ Count of WAU making epics visible
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11274,7 +11250,7 @@ Counts of MAU changing epic descriptions
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11286,7 +11262,7 @@ Counts of WAU changing epic descriptions
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11298,7 +11274,7 @@ Counts of MAU updating epic notes
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11310,7 +11286,7 @@ Counts of WAU updating epic notes
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11346,7 +11322,7 @@ Counts of MAU changing epic titles
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11358,7 +11334,7 @@ Counts of WAU changing epic titles
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11370,7 +11346,7 @@ Counts of MAU manually updating fixed due date
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11382,7 +11358,7 @@ Counts of WAU manually updating fixed due date
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11394,7 +11370,7 @@ Counts of MAU manually updating fixed start date
Group: `group::product planning`
Status: `implemented`
Status: `data_available`
Tiers: `premium`, `ultimate`
@ -11406,6 +11382,54 @@ Counts of WAU manually updating fixed start date
Group: `group::product planning`
Status: `data_available`
Tiers: `premium`, `ultimate`
### `redis_hll_counters.epics_usage.project_management_users_checking_epic_task_monthly`
Counts of MAU checking epic task
[YAML definition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/config/metrics/counts_28d/20210421080207_g_project_management_users_checking_epic_task_monthly.yml)
Group: `group::product planning`
Status: `implemented`
Tiers: `premium`, `ultimate`
### `redis_hll_counters.epics_usage.project_management_users_checking_epic_task_weekly`
Counts of WAU checking epic task
[YAML definition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/config/metrics/counts_7d/20210421075943_g_project_management_users_checking_epic_task_weekly.yml)
Group: `group::product planning`
Status: `implemented`
Tiers: `premium`, `ultimate`
### `redis_hll_counters.epics_usage.project_management_users_unchecking_epic_task_monthly`
Counts of MAU unchecking epic task
[YAML definition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/config/metrics/counts_28d/20210421102516_g_project_management_users_unchecking_epic_task_monthly.yml)
Group: `group::product planning`
Status: `implemented`
Tiers: `premium`, `ultimate`
### `redis_hll_counters.epics_usage.project_management_users_unchecking_epic_task_weekly`
Counts of WAU unchecking epic task
[YAML definition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/config/metrics/counts_7d/20210421102812_g_project_management_users_unchecking_epic_task_weekly.yml)
Group: `group::product planning`
Status: `implemented`
Tiers: `premium`, `ultimate`
@ -12768,15 +12792,15 @@ Tiers: `free`
### `redis_hll_counters.pipeline_authoring.o_pipeline_authoring_unique_users_committing_ciconfigfile_weekly`
Missing description
Monthly unique user count doing commits which contains the CI config file
[YAML definition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/config/metrics/counts_7d/20210216184301_o_pipeline_authoring_unique_users_committing_ciconfigfile_weekly.yml)
Group: ``
Group: `group::pipeline authoring`
Status: `data_available`
Tiers:
Tiers: `free`, `premium`, `ultimate`
### `redis_hll_counters.pipeline_authoring.o_pipeline_authoring_unique_users_pushing_mr_ciconfigfile_monthly`
@ -14562,7 +14586,7 @@ Count of expanding the security report widget
Group: `group::static analysis`
Status: `implemented`
Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
@ -14574,7 +14598,7 @@ Count of expanding the security report widget
Group: `group::static analysis`
Status: `implemented`
Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
@ -14922,7 +14946,7 @@ Unique users that expand the test summary merge request widget by month
Group: `group::testing`
Status: `implemented`
Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
@ -14934,7 +14958,7 @@ Unique users that expand the test summary merge request widget by week
Group: `group::testing`
Status: `implemented`
Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
@ -15018,7 +15042,7 @@ Count of expanding the accessibility report widget
Group: `group::testing`
Status: `implemented`
Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
@ -15030,7 +15054,7 @@ Count of expanding the accessibility report widget
Group: `group::testing`
Status: `implemented`
Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
@ -15042,7 +15066,7 @@ Count of expanding the code quality widget
Group: `group::testing`
Status: `implemented`
Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
@ -15054,7 +15078,7 @@ Count of expanding the code quality widget
Group: `group::testing`
Status: `implemented`
Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
@ -15354,7 +15378,7 @@ Number of distinct users creating Terraform Module packages in recent 28 days
Group: `group::configure`
Status: `implemented`
Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
@ -15366,7 +15390,7 @@ Number of distinct users creating Terraform Module packages in recent 7 days
Group: `group::configure`
Status: `implemented`
Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
@ -15474,7 +15498,7 @@ Gitaly application performance
Group: `group::gitaly`
Status: `implemented`
Status: `data_available`
Tiers: `free`, `premium`, `ultimate`
@ -18074,6 +18098,18 @@ Status: `data_available`
Tiers: `free`
### `usage_activity_by_stage_monthly.manage.custom_compliance_frameworks`
Monthly count of all custom compliance framework labels
[YAML definition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/config/metrics/counts_28d/20210507165054_custom_compliance_frameworks.yml)
Group: `compliance`
Status: `implemented`
Tiers: `premium`, `ultimate`
### `usage_activity_by_stage_monthly.manage.events`
Missing description

View File

@ -5,7 +5,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
type: reference
---
# Requirements **(FREE SELF)**
# Installation requirements **(FREE SELF)**
This page includes useful information on the supported Operating Systems as well
as the hardware requirements that are needed to install and use GitLab.

View File

@ -5,9 +5,9 @@ info: To determine the technical writer assigned to the Stage/Group associated w
type: howto, reference
---
# Email from GitLab **(STARTER ONLY)**
# Email from GitLab **(PREMIUM SELF)**
GitLab provides a simple tool to administrators for emailing all users, or users of
GitLab provides a tool to administrators for emailing all users, or users of
a chosen group or project, right from the Admin Area. Users receive the email
at their primary email address.

View File

@ -90,6 +90,35 @@ default branch or commit SHA when the project is configured to have a private
repository. This is by design, as badges are intended to be used publicly. Avoid
using these placeholders if the information is sensitive.
## Use custom badge images
Use custom badge images in a project or a group if you want to use badges other than the default
ones.
Prerequisites:
- A valid URL that points directly to the desired image for the badge.
If the image is located in a GitLab repository, use the raw link to the image.
Using placeholders, here is an example badge image URL referring to a raw image at the root of a repository:
```plaintext
https://gitlab.example.com/<project_path>/-/raw/<default_branch>/my-image.svg
```
To add a new badge to a group or project with a custom image:
1. Go to your group or project and select **Settings > General**.
1. Expand **Badges**.
1. Under **Name**, enter the name for the badge.
1. Under **Link**, enter the URL that the badge should point to.
1. Under **Badge image URL**, enter the URL that points directly to the custom image that should be
displayed.
1. Select **Add badge**.
To learn how to use custom images generated via a pipeline, see our documentation on
[accessing the latest job artifacts by URL](../../ci/pipelines/job_artifacts.md#access-the-latest-job-artifacts-by-url).
## API
You can also configure badges via the GitLab API. As in the settings, there is

View File

@ -20,6 +20,8 @@ module Gitlab
:build_can_pick,
:build_not_pick,
:build_not_pending,
:build_queue_push,
:build_queue_pop,
:build_temporary_locked,
:build_conflict_lock,
:build_conflict_exception,
@ -77,11 +79,7 @@ module Gitlab
# rubocop: enable CodeReuse/ActiveRecord
def increment_queue_operation(operation)
if !Rails.env.production? && !OPERATION_COUNTERS.include?(operation)
raise ArgumentError, "unknown queue operation: #{operation}"
end
self.class.queue_operations_total.increment(operation: operation)
self.class.increment_queue_operation(operation)
end
def observe_queue_depth(queue, size)
@ -121,6 +119,14 @@ module Gitlab
result
end
def self.increment_queue_operation(operation)
if !Rails.env.production? && !OPERATION_COUNTERS.include?(operation)
raise ArgumentError, "unknown queue operation: #{operation}"
end
queue_operations_total.increment(operation: operation)
end
def self.observe_active_runners(runners_proc)
return unless Feature.enabled?(:gitlab_ci_builds_queuing_metrics, default_enabled: false)

View File

@ -70,6 +70,8 @@ ee:
- :award_emoji
- events:
- :push_event_payload
- label_links:
- :label
- notes:
- :author
- :award_emoji

View File

@ -72,6 +72,8 @@ ee:
- :award_emoji
- events:
- :push_event_payload
- label_links:
- :label
- notes:
- :author
- :award_emoji

View File

@ -14,6 +14,9 @@ module Gitlab
job = job.except('error_backtrace', 'error_class', 'error_message')
job['class'] = job.delete('wrapped') if job['wrapped'].present?
job['job_size_bytes'] = Sidekiq.dump_json(job['args']).bytesize
job['args'] = ['[COMPRESSED]'] if ::Gitlab::SidekiqMiddleware::SizeLimiter::Compressor.compressed?(job)
# Add process id params
job['pid'] = ::Process.pid

View File

@ -55,8 +55,6 @@ module Gitlab
scheduling_latency_s = ::Gitlab::InstrumentationHelper.queue_duration_for_job(payload)
payload['scheduling_latency_s'] = scheduling_latency_s if scheduling_latency_s
payload['job_size_bytes'] = Sidekiq.dump_json(job).bytesize
payload
end

View File

@ -9,6 +9,8 @@ module Gitlab
# eg: `config.server_middleware(&Gitlab::SidekiqMiddleware.server_configurator)`
def self.server_configurator(metrics: true, arguments_logger: true, memory_killer: true)
lambda do |chain|
# Size limiter should be placed at the top
chain.add ::Gitlab::SidekiqMiddleware::SizeLimiter::Server
chain.add ::Gitlab::SidekiqMiddleware::Monitor
chain.add ::Gitlab::SidekiqMiddleware::ServerMetrics if metrics
chain.add ::Gitlab::SidekiqMiddleware::ArgumentsLogger if arguments_logger

View File

@ -0,0 +1,52 @@
# frozen_string_literal: true
module Gitlab
module SidekiqMiddleware
module SizeLimiter
class Compressor
PayloadDecompressionConflictError = Class.new(StandardError)
PayloadDecompressionError = Class.new(StandardError)
# Level 5 is a good trade-off between space and time
# https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1054#note_568129605
COMPRESS_LEVEL = 5
ORIGINAL_SIZE_KEY = 'original_job_size_bytes'
COMPRESSED_KEY = 'compressed'
def self.compressed?(job)
job&.has_key?(COMPRESSED_KEY)
end
def self.compress(job, job_args)
compressed_args = Base64.strict_encode64(Zlib::Deflate.deflate(job_args, COMPRESS_LEVEL))
job[COMPRESSED_KEY] = true
job[ORIGINAL_SIZE_KEY] = job_args.bytesize
job['args'] = [compressed_args]
compressed_args
end
def self.decompress(job)
return unless compressed?(job)
validate_args!(job)
job.except!(ORIGINAL_SIZE_KEY, COMPRESSED_KEY)
job['args'] = Sidekiq.load_json(Zlib::Inflate.inflate(Base64.strict_decode64(job['args'].first)))
rescue Zlib::Error
raise PayloadDecompressionError, 'Fail to decompress Sidekiq job payload'
end
def self.validate_args!(job)
if job['args'] && job['args'].length != 1
exception = PayloadDecompressionConflictError.new('Sidekiq argument list should include 1 argument.\
This means that there is another a middleware interfering with the job payload.\
That conflicts with the payload compressor')
::Gitlab::ErrorTracking.track_and_raise_exception(exception)
end
end
end
end
end
end

View File

@ -0,0 +1,18 @@
# frozen_string_literal: true
module Gitlab
module SidekiqMiddleware
module SizeLimiter
class Server
def call(worker, job, queue)
# This middleware should always decompress jobs regardless of the
# limiter mode or size limit. Otherwise, this could leave compressed
# payloads in queues that are then not able to be processed.
::Gitlab::SidekiqMiddleware::SizeLimiter::Compressor.decompress(job)
yield
end
end
end
end
end

View File

@ -3,63 +3,58 @@
module Gitlab
module SidekiqMiddleware
module SizeLimiter
# Validate a Sidekiq job payload limit based on current configuration.
# Handle a Sidekiq job payload limit based on current configuration.
# This validator pulls the configuration from the environment variables:
#
# - GITLAB_SIDEKIQ_SIZE_LIMITER_MODE: the current mode of the size
# limiter. This must be either `track` or `raise`.
#
# limiter. This must be either `track` or `compress`.
# - GITLAB_SIDEKIQ_SIZE_LIMITER_COMPRESSION_THRESHOLD_BYTES: the
# threshold before the input job payload is compressed.
# - GITLAB_SIDEKIQ_SIZE_LIMITER_LIMIT_BYTES: the size limit in bytes.
#
# If the size of job payload after serialization exceeds the limit, an
# error is tracked raised adhering to the mode.
# In track mode, if a job payload limit exceeds the size limit, an
# event is sent to Sentry and the job is scheduled like normal.
#
# In compress mode, if a job payload limit exceeds the threshold, it is
# then compressed. If the compressed payload still exceeds the limit, the
# job is discarded, and a ExceedLimitError exception is raised.
class Validator
def self.validate!(worker_class, job)
new(worker_class, job).validate!
end
DEFAULT_SIZE_LIMIT = 0
DEFAULT_COMPRESION_THRESHOLD_BYTES = 100_000 # 100kb
MODES = [
TRACK_MODE = 'track',
RAISE_MODE = 'raise'
COMPRESS_MODE = 'compress'
].freeze
attr_reader :mode, :size_limit
attr_reader :mode, :size_limit, :compression_threshold
def initialize(
worker_class, job,
mode: ENV['GITLAB_SIDEKIQ_SIZE_LIMITER_MODE'],
compression_threshold: ENV['GITLAB_SIDEKIQ_SIZE_LIMITER_COMPRESSION_THRESHOLD_BYTES'],
size_limit: ENV['GITLAB_SIDEKIQ_SIZE_LIMITER_LIMIT_BYTES']
)
@worker_class = worker_class
@job = job
@mode = (mode || TRACK_MODE).to_s.strip
unless MODES.include?(@mode)
::Sidekiq.logger.warn "Invalid Sidekiq size limiter mode: #{@mode}. Fallback to #{TRACK_MODE} mode."
@mode = TRACK_MODE
end
@size_limit = (size_limit || DEFAULT_SIZE_LIMIT).to_i
if @size_limit < 0
::Sidekiq.logger.warn "Invalid Sidekiq size limiter limit: #{@size_limit}"
end
set_mode(mode)
set_compression_threshold(compression_threshold)
set_size_limit(size_limit)
end
def validate!
return unless @size_limit > 0
return if allow_big_payload?
return if job_size <= @size_limit
exception = ExceedLimitError.new(@worker_class, job_size, @size_limit)
# This should belong to Gitlab::ErrorTracking. We'll remove this
# after this epic is done:
# https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/396
exception.set_backtrace(backtrace)
job_args = compress_if_necessary(::Sidekiq.dump_json(@job['args']))
return if job_args.bytesize <= @size_limit
if raise_mode?
exception = exceed_limit_error(job_args)
if compress_mode?
raise exception
else
track(exception)
@ -68,11 +63,43 @@ module Gitlab
private
def job_size
# This maynot be the optimal solution, but can be acceptable solution
# for now. Internally, Sidekiq calls Sidekiq.dump_json everywhere.
# There is no clean way to intefere to prevent double serialization.
@job_size ||= ::Sidekiq.dump_json(@job).bytesize
def set_mode(mode)
@mode = (mode || TRACK_MODE).to_s.strip
unless MODES.include?(@mode)
::Sidekiq.logger.warn "Invalid Sidekiq size limiter mode: #{@mode}. Fallback to #{TRACK_MODE} mode."
@mode = TRACK_MODE
end
end
def set_compression_threshold(compression_threshold)
@compression_threshold = (compression_threshold || DEFAULT_COMPRESION_THRESHOLD_BYTES).to_i
if @compression_threshold <= 0
::Sidekiq.logger.warn "Invalid Sidekiq size limiter compression threshold: #{@compression_threshold}"
@compression_threshold = DEFAULT_COMPRESION_THRESHOLD_BYTES
end
end
def set_size_limit(size_limit)
@size_limit = (size_limit || DEFAULT_SIZE_LIMIT).to_i
if @size_limit < 0
::Sidekiq.logger.warn "Invalid Sidekiq size limiter limit: #{@size_limit}"
end
end
def exceed_limit_error(job_args)
ExceedLimitError.new(@worker_class, job_args.bytesize, @size_limit).tap do |exception|
# This should belong to Gitlab::ErrorTracking. We'll remove this
# after this epic is done:
# https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/396
exception.set_backtrace(backtrace)
end
end
def compress_if_necessary(job_args)
return job_args unless compress_mode?
return job_args if job_args.bytesize < @compression_threshold
::Gitlab::SidekiqMiddleware::SizeLimiter::Compressor.compress(@job, job_args)
end
def allow_big_payload?
@ -80,8 +107,8 @@ module Gitlab
worker_class.respond_to?(:big_payload?) && worker_class.big_payload?
end
def raise_mode?
@mode == RAISE_MODE
def compress_mode?
@mode == COMPRESS_MODE
end
def track(exception)

View File

@ -6,7 +6,7 @@ module Sidebars
class InfrastructureMenu < ::Sidebars::Menu
override :configure_menu_items
def configure_menu_items
return false if Feature.disabled?(:sidebar_refactor, context.current_user)
return false if Feature.disabled?(:sidebar_refactor, context.current_user, default_enabled: :yaml)
return false unless context.project.feature_available?(:operations, context.current_user)
add_item(kubernetes_menu_item)

View File

@ -98,7 +98,7 @@ module Sidebars
end
def labels_menu_item
if Feature.enabled?(:sidebar_refactor, context.current_user)
if Feature.enabled?(:sidebar_refactor, context.current_user, default_enabled: :yaml)
return ::Sidebars::NilMenuItem.new(item_id: :labels)
end

View File

@ -40,7 +40,7 @@ module Sidebars
override :render?
def render?
return false if Feature.enabled?(:sidebar_refactor, context.current_user)
return false if Feature.enabled?(:sidebar_refactor, context.current_user, default_enabled: :yaml)
can?(context.current_user, :read_label, context.project) && !context.project.issues_enabled?
end

View File

@ -139,7 +139,7 @@ module Sidebars
end
def serverless_menu_item
if Feature.enabled?(:sidebar_refactor, context.current_user) ||
if Feature.enabled?(:sidebar_refactor, context.current_user, default_enabled: :yaml) ||
!can?(context.current_user, :read_cluster, context.project)
return ::Sidebars::NilMenuItem.new(item_id: :serverless)
end
@ -153,7 +153,7 @@ module Sidebars
end
def terraform_menu_item
if Feature.enabled?(:sidebar_refactor, context.current_user) ||
if Feature.enabled?(:sidebar_refactor, context.current_user, default_enabled: :yaml) ||
!can?(context.current_user, :read_terraform_state, context.project)
return ::Sidebars::NilMenuItem.new(item_id: :terraform)
end
@ -167,7 +167,7 @@ module Sidebars
end
def kubernetes_menu_item
if Feature.enabled?(:sidebar_refactor, context.current_user) ||
if Feature.enabled?(:sidebar_refactor, context.current_user, default_enabled: :yaml) ||
!can?(context.current_user, :read_cluster, context.project)
return ::Sidebars::NilMenuItem.new(item_id: :kubernetes)
end

View File

@ -34,7 +34,7 @@ module Sidebars
override :title
def title
if Feature.enabled?(:sidebar_refactor, context.current_user)
if Feature.enabled?(:sidebar_refactor, context.current_user, default_enabled: :yaml)
_('Project information')
else
_('Project overview')
@ -43,7 +43,7 @@ module Sidebars
override :sprite_icon
def sprite_icon
if Feature.enabled?(:sidebar_refactor, context.current_user)
if Feature.enabled?(:sidebar_refactor, context.current_user, default_enabled: :yaml)
'project'
else
'home'
@ -52,7 +52,7 @@ module Sidebars
override :active_routes
def active_routes
return {} if Feature.disabled?(:sidebar_refactor, context.current_user)
return {} if Feature.disabled?(:sidebar_refactor, context.current_user, default_enabled: :yaml)
{ path: 'projects#show' }
end
@ -60,7 +60,7 @@ module Sidebars
private
def details_menu_item
return if Feature.enabled?(:sidebar_refactor, context.current_user)
return if Feature.enabled?(:sidebar_refactor, context.current_user, default_enabled: :yaml)
::Sidebars::MenuItem.new(
title: _('Details'),
@ -103,7 +103,7 @@ module Sidebars
end
def labels_menu_item
if Feature.disabled?(:sidebar_refactor, context.current_user)
if Feature.disabled?(:sidebar_refactor, context.current_user, default_enabled: :yaml)
return ::Sidebars::NilMenuItem.new(item_id: :labels)
end

View File

@ -136,7 +136,7 @@ module Sidebars
def packages_and_registries_menu_item
if !Gitlab.config.registry.enabled ||
Feature.disabled?(:sidebar_refactor, context.current_user) ||
Feature.disabled?(:sidebar_refactor, context.current_user, default_enabled: :yaml) ||
!can?(context.current_user, :destroy_container_image, context.project)
return ::Sidebars::NilMenuItem.new(item_id: :packages_and_registries)
end

View File

@ -278,8 +278,9 @@ RSpec.describe 'Merge request > User creates image diff notes', :js do
end
def create_image_diff_note
expand_text = 'Click to expand it.'
page.all('a', text: expand_text, wait: false).each do |element|
wait_for_all_requests
page.all('a', text: 'Click to expand it.', wait: false).each do |element|
element.click
end

View File

@ -303,6 +303,39 @@ RSpec.describe Gitlab::SidekiqLogging::StructuredLogger do
expect { subject.call(job.dup, 'test_queue') {} }.not_to raise_error
end
end
context 'when the job payload is compressed' do
let(:compressed_args) { "eJyLVspIzcnJV4oFAA88AxE=" }
let(:expected_start_payload) do
start_payload.merge(
'args' => ['[COMPRESSED]'],
'job_size_bytes' => Sidekiq.dump_json([compressed_args]).bytesize,
'compressed' => true
)
end
let(:expected_end_payload) do
end_payload.merge(
'args' => ['[COMPRESSED]'],
'job_size_bytes' => Sidekiq.dump_json([compressed_args]).bytesize,
'compressed' => true
)
end
it 'logs it in the done log' do
Timecop.freeze(timestamp) do
expect(logger).to receive(:info).with(expected_start_payload).ordered
expect(logger).to receive(:info).with(expected_end_payload).ordered
job['args'] = [compressed_args]
job['compressed'] = true
call_subject(job, 'test_queue') do
::Gitlab::SidekiqMiddleware::SizeLimiter::Compressor.decompress(job)
end
end
end
end
end
describe '#add_time_keys!' do

View File

@ -0,0 +1,200 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Compressor do
using RSpec::Parameterized::TableSyntax
let(:base_payload) do
{
"class" => "ARandomWorker",
"queue" => "a_worker",
"retry" => true,
"jid" => "d774900367dc8b2962b2479c",
"created_at" => 1234567890,
"enqueued_at" => 1234567890
}
end
describe '.compressed?' do
where(:job, :result) do
{} | false
base_payload.merge("args" => [123, 'hello', ['world']]) | false
base_payload.merge("args" => ['eJzLSM3JyQcABiwCFQ=='], 'compressed' => true) | true
end
with_them do
it 'returns whether the job payload is compressed' do
expect(described_class.compressed?(job)).to eql(result)
end
end
end
describe '.compress' do
where(:args) do
[
nil,
[],
['hello'],
[
{
"job_class" => "SomeWorker",
"job_id" => "b4a577edbccf1d805744efa9",
"provider_job_id" => nil,
"queue_name" => "default",
"arguments" => ["some", ["argument"]],
"executions" => 0,
"locale" => "en",
"attempt_number" => 1
},
nil,
'hello',
12345678901234567890,
['nice']
],
[
'2021-05-13_09:59:37.57483 rails-background-jobs : {"severity":"ERROR","time":"2021-05-13T09:59:37.574Z"',
'bonne journée - ขอให้มีความสุขในวันนี้ - một ngày mới tốt lành - 좋은 하루 되세요 - ごきげんよう',
'🤝 - 🦊'
]
]
end
with_them do
let(:payload) { base_payload.merge("args" => args) }
it 'injects compressed data' do
serialized_args = Sidekiq.dump_json(args)
described_class.compress(payload, serialized_args)
expect(payload['args'].length).to be(1)
expect(payload['args'].first).to be_a(String)
expect(payload['compressed']).to be(true)
expect(payload['original_job_size_bytes']).to eql(serialized_args.bytesize)
expect do
Sidekiq.dump_json(payload)
end.not_to raise_error
end
it 'can decompress the payload' do
original_payload = payload.deep_dup
described_class.compress(payload, Sidekiq.dump_json(args))
described_class.decompress(payload)
expect(payload).to eql(original_payload)
end
end
end
describe '.decompress' do
context 'job payload is not compressed' do
let(:payload) { base_payload.merge("args" => ['hello']) }
it 'preserves the payload after decompression' do
original_payload = payload.deep_dup
described_class.decompress(payload)
expect(payload).to eql(original_payload)
end
end
context 'job payload is compressed with a default level' do
let(:payload) do
base_payload.merge(
'args' => ['eF6LVspIzcnJV9JRKs8vyklRigUAMq0FqQ=='],
'compressed' => true
)
end
it 'decompresses and clean up the job payload' do
described_class.decompress(payload)
expect(payload['args']).to eql(%w[hello world])
expect(payload).not_to have_key('compressed')
end
end
context 'job payload is compressed with a different level' do
let(:payload) do
base_payload.merge(
'args' => [Base64.strict_encode64(Zlib::Deflate.deflate(Sidekiq.dump_json(%w[hello world]), 9))],
'compressed' => true
)
end
it 'decompresses and clean up the job payload' do
described_class.decompress(payload)
expect(payload['args']).to eql(%w[hello world])
expect(payload).not_to have_key('compressed')
end
end
context 'job payload argument list is malformed' do
let(:payload) do
base_payload.merge(
'args' => ['eNqLVspIzcnJV9JRKs8vyklRigUAMq0FqQ==', 'something else'],
'compressed' => true
)
end
it 'tracks the conflicting exception' do
expect(::Gitlab::ErrorTracking).to receive(:track_and_raise_exception).with(
be_a(::Gitlab::SidekiqMiddleware::SizeLimiter::Compressor::PayloadDecompressionConflictError)
)
described_class.decompress(payload)
expect(payload['args']).to eql(%w[hello world])
expect(payload).not_to have_key('compressed')
end
end
context 'job payload is not a valid base64 string' do
let(:payload) do
base_payload.merge(
'args' => ['hello123'],
'compressed' => true
)
end
it 'raises an exception' do
expect do
described_class.decompress(payload)
end.to raise_error(::Gitlab::SidekiqMiddleware::SizeLimiter::Compressor::PayloadDecompressionError)
end
end
context 'job payload compression does not contain a valid Gzip header' do
let(:payload) do
base_payload.merge(
'args' => ['aGVsbG8='],
'compressed' => true
)
end
it 'raises an exception' do
expect do
described_class.decompress(payload)
end.to raise_error(::Gitlab::SidekiqMiddleware::SizeLimiter::Compressor::PayloadDecompressionError)
end
end
context 'job payload compression does not contain a valid Gzip body' do
let(:payload) do
base_payload.merge(
'args' => ["eNqLVspIzcnJVw=="],
'compressed' => true
)
end
it 'raises an exception' do
expect do
described_class.decompress(payload)
end.to raise_error(::Gitlab::SidekiqMiddleware::SizeLimiter::Compressor::PayloadDecompressionError)
end
end
end
end

View File

@ -0,0 +1,33 @@
# frozen_string_literal: true
require 'spec_helper'
# rubocop: disable RSpec/MultipleMemoizedHelpers
RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Server, :clean_gitlab_redis_queues do
subject(:middleware) { described_class.new }
let(:worker) { Class.new }
let(:job) do
{
"class" => "ARandomWorker",
"queue" => "a_worker",
"args" => %w[Hello World],
"created_at" => 1234567890,
"enqueued_at" => 1234567890
}
end
before do
allow(::Gitlab::SidekiqMiddleware::SizeLimiter::Compressor).to receive(:compress)
end
it 'yields block' do
expect { |b| subject.call(worker, job, :test, &b) }.to yield_control.once
end
it 'calls the Compressor' do
expect(::Gitlab::SidekiqMiddleware::SizeLimiter::Compressor).to receive(:decompress).with(job)
subject.call(worker, job, :test) {}
end
end

View File

@ -3,6 +3,21 @@
require 'spec_helper'
RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
let(:base_payload) do
{
"class" => "ARandomWorker",
"queue" => "a_worker",
"retry" => true,
"jid" => "d774900367dc8b2962b2479c",
"created_at" => 1234567890,
"enqueued_at" => 1234567890
}
end
def job_payload(args = {})
base_payload.merge('args' => args)
end
let(:worker_class) do
Class.new do
def self.name
@ -24,8 +39,8 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
it 'does not log a warning message' do
expect(::Sidekiq.logger).not_to receive(:warn)
described_class.new(TestSizeLimiterWorker, {}, mode: 'track')
described_class.new(TestSizeLimiterWorker, {}, mode: 'raise')
described_class.new(TestSizeLimiterWorker, job_payload, mode: 'track')
described_class.new(TestSizeLimiterWorker, job_payload, mode: 'compress')
end
end
@ -33,7 +48,7 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
it 'defaults to track mode and logs a warning message' do
expect(::Sidekiq.logger).to receive(:warn).with('Invalid Sidekiq size limiter mode: invalid. Fallback to track mode.')
validator = described_class.new(TestSizeLimiterWorker, {}, mode: 'invalid')
validator = described_class.new(TestSizeLimiterWorker, job_payload, mode: 'invalid')
expect(validator.mode).to eql('track')
end
@ -43,7 +58,7 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
it 'defaults to track mode' do
expect(::Sidekiq.logger).not_to receive(:warn)
validator = described_class.new(TestSizeLimiterWorker, {})
validator = described_class.new(TestSizeLimiterWorker, job_payload)
expect(validator.mode).to eql('track')
end
@ -53,8 +68,8 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
it 'does not log a warning message' do
expect(::Sidekiq.logger).not_to receive(:warn)
described_class.new(TestSizeLimiterWorker, {}, size_limit: 300)
described_class.new(TestSizeLimiterWorker, {}, size_limit: 0)
described_class.new(TestSizeLimiterWorker, job_payload, size_limit: 300)
described_class.new(TestSizeLimiterWorker, job_payload, size_limit: 0)
end
end
@ -62,7 +77,7 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
it 'defaults to 0 and logs a warning message' do
expect(::Sidekiq.logger).to receive(:warn).with('Invalid Sidekiq size limiter limit: -1')
described_class.new(TestSizeLimiterWorker, {}, size_limit: -1)
described_class.new(TestSizeLimiterWorker, job_payload, size_limit: -1)
end
end
@ -70,15 +85,63 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
it 'defaults to 0' do
expect(::Sidekiq.logger).not_to receive(:warn)
validator = described_class.new(TestSizeLimiterWorker, {})
validator = described_class.new(TestSizeLimiterWorker, job_payload)
expect(validator.size_limit).to be(0)
end
end
context 'when the compression threshold is valid' do
it 'does not log a warning message' do
expect(::Sidekiq.logger).not_to receive(:warn)
described_class.new(TestSizeLimiterWorker, job_payload, compression_threshold: 300)
described_class.new(TestSizeLimiterWorker, job_payload, compression_threshold: 1)
end
end
context 'when the compression threshold is negative' do
it 'logs a warning message' do
expect(::Sidekiq.logger).to receive(:warn).with('Invalid Sidekiq size limiter compression threshold: -1')
described_class.new(TestSizeLimiterWorker, job_payload, compression_threshold: -1)
end
it 'falls back to the default' do
validator = described_class.new(TestSizeLimiterWorker, job_payload, compression_threshold: -1)
expect(validator.compression_threshold).to be(100_000)
end
end
context 'when the compression threshold is zero' do
it 'logs a warning message' do
expect(::Sidekiq.logger).to receive(:warn).with('Invalid Sidekiq size limiter compression threshold: 0')
described_class.new(TestSizeLimiterWorker, job_payload, compression_threshold: 0)
end
it 'falls back to the default' do
validator = described_class.new(TestSizeLimiterWorker, job_payload, compression_threshold: 0)
expect(validator.compression_threshold).to be(100_000)
end
end
context 'when the compression threshold is empty' do
it 'defaults to 100_000' do
expect(::Sidekiq.logger).not_to receive(:warn)
validator = described_class.new(TestSizeLimiterWorker, job_payload)
expect(validator.compression_threshold).to be(100_000)
end
end
end
shared_examples 'validate limit job payload size' do
context 'in track mode' do
let(:compression_threshold) { nil }
let(:mode) { 'track' }
context 'when size limit negative' do
@ -87,11 +150,11 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
it 'does not track jobs' do
expect(Gitlab::ErrorTracking).not_to receive(:track_exception)
validate.call(TestSizeLimiterWorker, { a: 'a' * 300 })
validate.call(TestSizeLimiterWorker, job_payload(a: 'a' * 300))
end
it 'does not raise exception' do
expect { validate.call(TestSizeLimiterWorker, { a: 'a' * 300 }) }.not_to raise_error
expect { validate.call(TestSizeLimiterWorker, job_payload(a: 'a' * 300)) }.not_to raise_error
end
end
@ -101,11 +164,13 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
it 'does not track jobs' do
expect(Gitlab::ErrorTracking).not_to receive(:track_exception)
validate.call(TestSizeLimiterWorker, { a: 'a' * 300 })
validate.call(TestSizeLimiterWorker, job_payload(a: 'a' * 300))
end
it 'does not raise exception' do
expect { validate.call(TestSizeLimiterWorker, { a: 'a' * 300 }) }.not_to raise_error
expect do
validate.call(TestSizeLimiterWorker, job_payload(a: 'a' * 300))
end.not_to raise_error
end
end
@ -117,11 +182,13 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
be_a(Gitlab::SidekiqMiddleware::SizeLimiter::ExceedLimitError)
)
validate.call(TestSizeLimiterWorker, { a: 'a' * 100 })
validate.call(TestSizeLimiterWorker, job_payload(a: 'a' * 100))
end
it 'does not raise an exception' do
expect { validate.call(TestSizeLimiterWorker, { a: 'a' * 300 }) }.not_to raise_error
expect do
validate.call(TestSizeLimiterWorker, job_payload(a: 'a' * 300))
end.not_to raise_error
end
context 'when the worker has big_payload attribute' do
@ -132,13 +199,17 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
it 'does not track jobs' do
expect(Gitlab::ErrorTracking).not_to receive(:track_exception)
validate.call(TestSizeLimiterWorker, { a: 'a' * 300 })
validate.call('TestSizeLimiterWorker', { a: 'a' * 300 })
validate.call(TestSizeLimiterWorker, job_payload(a: 'a' * 300))
validate.call('TestSizeLimiterWorker', job_payload(a: 'a' * 300))
end
it 'does not raise an exception' do
expect { validate.call(TestSizeLimiterWorker, { a: 'a' * 300 }) }.not_to raise_error
expect { validate.call('TestSizeLimiterWorker', { a: 'a' * 300 }) }.not_to raise_error
expect do
validate.call(TestSizeLimiterWorker, job_payload(a: 'a' * 300))
end.not_to raise_error
expect do
validate.call('TestSizeLimiterWorker', job_payload(a: 'a' * 300))
end.not_to raise_error
end
end
end
@ -149,63 +220,60 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
it 'does not track job' do
expect(Gitlab::ErrorTracking).not_to receive(:track_exception)
validate.call(TestSizeLimiterWorker, { a: 'a' })
validate.call(TestSizeLimiterWorker, job_payload(a: 'a'))
end
it 'does not raise an exception' do
expect { validate.call(TestSizeLimiterWorker, { a: 'a' }) }.not_to raise_error
expect { validate.call(TestSizeLimiterWorker, job_payload(a: 'a')) }.not_to raise_error
end
end
end
context 'in raise mode' do
let(:mode) { 'raise' }
context 'in compress mode' do
let(:mode) { 'compress' }
context 'when size limit is negative' do
let(:size_limit) { -1 }
it 'does not raise exception' do
expect { validate.call(TestSizeLimiterWorker, { a: 'a' * 300 }) }.not_to raise_error
end
end
context 'when size limit is 0' do
let(:size_limit) { 0 }
it 'does not raise exception' do
expect { validate.call(TestSizeLimiterWorker, { a: 'a' * 300 }) }.not_to raise_error
end
end
context 'when job size is bigger than size limit' do
let(:size_limit) { 50 }
it 'raises an exception' do
expect do
validate.call(TestSizeLimiterWorker, { a: 'a' * 300 })
end.to raise_error(
Gitlab::SidekiqMiddleware::SizeLimiter::ExceedLimitError,
/TestSizeLimiterWorker job exceeds payload size limit/i
)
end
context 'when the worker has big_payload attribute' do
before do
worker_class.big_payload!
end
it 'does not raise an exception' do
expect { validate.call(TestSizeLimiterWorker, { a: 'a' * 300 }) }.not_to raise_error
expect { validate.call('TestSizeLimiterWorker', { a: 'a' * 300 }) }.not_to raise_error
end
end
end
context 'when job size is less than size limit' do
context 'when job size is less than compression threshold' do
let(:size_limit) { 50 }
let(:compression_threshold) { 30 }
let(:job) { job_payload(a: 'a' * 10) }
it 'does not raise an exception' do
expect { validate.call(TestSizeLimiterWorker, { a: 'a' }) }.not_to raise_error
expect(::Gitlab::SidekiqMiddleware::SizeLimiter::Compressor).not_to receive(:compress)
expect { validate.call(TestSizeLimiterWorker, job_payload(a: 'a')) }.not_to raise_error
end
end
context 'when job size is bigger than compression threshold and less than size limit after compressed' do
let(:size_limit) { 50 }
let(:compression_threshold) { 30 }
let(:args) { { a: 'a' * 300 } }
let(:job) { job_payload(args) }
it 'does not raise an exception' do
expect(::Gitlab::SidekiqMiddleware::SizeLimiter::Compressor).to receive(:compress).with(
job, Sidekiq.dump_json(args)
).and_return('a' * 40)
expect do
validate.call(TestSizeLimiterWorker, job)
end.not_to raise_error
end
end
context 'when job size is bigger than compression threshold and bigger than size limit after compressed' do
let(:size_limit) { 50 }
let(:compression_threshold) { 30 }
let(:args) { { a: 'a' * 3000 } }
let(:job) { job_payload(args) }
it 'does not raise an exception' do
expect(::Gitlab::SidekiqMiddleware::SizeLimiter::Compressor).to receive(:compress).with(
job, Sidekiq.dump_json(args)
).and_return('a' * 60)
expect do
validate.call(TestSizeLimiterWorker, job)
end.to raise_error(Gitlab::SidekiqMiddleware::SizeLimiter::ExceedLimitError)
end
end
end
@ -218,6 +286,7 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
before do
stub_env('GITLAB_SIDEKIQ_SIZE_LIMITER_MODE', mode)
stub_env('GITLAB_SIDEKIQ_SIZE_LIMITER_LIMIT_BYTES', size_limit)
stub_env('GITLAB_SIDEKIQ_SIZE_LIMITER_COMPRESSION_THRESHOLD_BYTES', compression_threshold)
end
it_behaves_like 'validate limit job payload size'
@ -226,14 +295,14 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
context 'when creating an instance with the related ENV variables' do
let(:validate) do
->(worker_clas, job) do
validator = described_class.new(worker_class, job, mode: mode, size_limit: size_limit)
validator.validate!
described_class.new(worker_class, job).validate!
end
end
before do
stub_env('GITLAB_SIDEKIQ_SIZE_LIMITER_MODE', mode)
stub_env('GITLAB_SIDEKIQ_SIZE_LIMITER_LIMIT_BYTES', size_limit)
stub_env('GITLAB_SIDEKIQ_SIZE_LIMITER_COMPRESSION_THRESHOLD_BYTES', compression_threshold)
end
it_behaves_like 'validate limit job payload size'
@ -242,7 +311,10 @@ RSpec.describe Gitlab::SidekiqMiddleware::SizeLimiter::Validator do
context 'when creating an instance with mode and size limit' do
let(:validate) do
->(worker_clas, job) do
validator = described_class.new(worker_class, job, mode: mode, size_limit: size_limit)
validator = described_class.new(
worker_class, job,
mode: mode, size_limit: size_limit, compression_threshold: compression_threshold
)
validator.validate!
end
end

View File

@ -323,8 +323,6 @@ RSpec.describe Ci::Build do
describe '#enqueue' do
let(:build) { create(:ci_build, :created) }
subject { build.enqueue }
before do
allow(build).to receive(:any_unmet_prerequisites?).and_return(has_prerequisites)
allow(Ci::PrepareBuildService).to receive(:perform_async)
@ -334,28 +332,74 @@ RSpec.describe Ci::Build do
let(:has_prerequisites) { true }
it 'transitions to preparing' do
subject
build.enqueue
expect(build).to be_preparing
end
it 'does not push build to the queue' do
build.enqueue
expect(::Ci::PendingBuild.all.count).to be_zero
end
end
context 'build has no prerequisites' do
let(:has_prerequisites) { false }
it 'transitions to pending' do
subject
build.enqueue
expect(build).to be_pending
end
it 'pushes build to a queue' do
build.enqueue
expect(build.queuing_entry).to be_present
end
context 'when build status transition fails' do
before do
::Ci::Build.find(build.id).update_column(:lock_version, 100)
end
it 'does not push build to a queue' do
expect { build.enqueue! }
.to raise_error(ActiveRecord::StaleObjectError)
expect(build.queuing_entry).not_to be_present
end
end
context 'when there is a queuing entry already present' do
before do
::Ci::PendingBuild.create!(build: build, project: build.project)
end
it 'does not raise an error' do
expect { build.enqueue! }.not_to raise_error
expect(build.reload.queuing_entry).to be_present
end
end
context 'when both failure scenario happen at the same time' do
before do
::Ci::Build.find(build.id).update_column(:lock_version, 100)
::Ci::PendingBuild.create!(build: build, project: build.project)
end
it 'raises stale object error exception' do
expect { build.enqueue! }
.to raise_error(ActiveRecord::StaleObjectError)
end
end
end
end
describe '#enqueue_preparing' do
let(:build) { create(:ci_build, :preparing) }
subject { build.enqueue_preparing }
before do
allow(build).to receive(:any_unmet_prerequisites?).and_return(has_unmet_prerequisites)
end
@ -364,9 +408,10 @@ RSpec.describe Ci::Build do
let(:has_unmet_prerequisites) { false }
it 'transitions to pending' do
subject
build.enqueue_preparing
expect(build).to be_pending
expect(build.queuing_entry).to be_present
end
end
@ -374,9 +419,10 @@ RSpec.describe Ci::Build do
let(:has_unmet_prerequisites) { true }
it 'remains in preparing' do
subject
build.enqueue_preparing
expect(build).to be_preparing
expect(build.queuing_entry).not_to be_present
end
end
end
@ -405,6 +451,36 @@ RSpec.describe Ci::Build do
end
end
describe '#run' do
context 'when build has been just created' do
let(:build) { create(:ci_build, :created) }
it 'creates queuing entry and then removes it' do
build.enqueue!
expect(build.queuing_entry).to be_present
build.run!
expect(build.reload.queuing_entry).not_to be_present
end
end
context 'when build status transition fails' do
let(:build) { create(:ci_build, :pending) }
before do
::Ci::PendingBuild.create!(build: build, project: build.project)
::Ci::Build.find(build.id).update_column(:lock_version, 100)
end
it 'does not remove build from a queue' do
expect { build.run! }
.to raise_error(ActiveRecord::StaleObjectError)
expect(build.queuing_entry).to be_present
end
end
end
describe '#schedulable?' do
subject { build.schedulable? }

View File

@ -0,0 +1,33 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Ci::PendingBuild do
let_it_be(:project) { create(:project) }
let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
let(:build) { create(:ci_build, :created, pipeline: pipeline) }
describe '.upsert_from_build!' do
context 'another pending entry does not exist' do
it 'creates a new pending entry' do
result = described_class.upsert_from_build!(build)
expect(result.rows.dig(0, 0)).to eq build.id
expect(build.reload.queuing_entry).to be_present
end
end
context 'when another queuing entry exists for given build' do
before do
described_class.create!(build: build, project: project)
end
it 'returns a build id as a result' do
result = described_class.upsert_from_build!(build)
expect(result.rows.dig(0, 0)).to eq build.id
end
end
end
end

View File

@ -2726,7 +2726,7 @@ RSpec.describe Ci::Pipeline, :mailer, factory_default: :keep do
pipeline2.cancel_running
end
extra_update_queries = 3 # transition ... => :canceled
extra_update_queries = 4 # transition ... => :canceled, queue pop
extra_generic_commit_status_validation_queries = 2 # name_uniqueness_across_types
expect(control2.count).to eq(control1.count + extra_update_queries + extra_generic_commit_status_validation_queries)

View File

@ -59,7 +59,8 @@ RSpec.describe Ci::RetryBuildService do
metadata runner_session trace_chunks upstream_pipeline_id
artifacts_file artifacts_metadata artifacts_size commands
resource resource_group_id processed security_scans author
pipeline_id report_results pending_state pages_deployments].freeze
pipeline_id report_results pending_state pages_deployments
queuing_entry].freeze
shared_examples 'build duplication' do
let_it_be(:another_pipeline) { create(:ci_empty_pipeline, project: project) }

View File

@ -7,151 +7,249 @@ RSpec.describe Ci::UpdateBuildQueueService do
let(:build) { create(:ci_build, pipeline: pipeline) }
let(:pipeline) { create(:ci_pipeline, project: project) }
shared_examples 'refreshes runner' do
it 'ticks runner queue value' do
expect { subject.execute(build) }.to change { runner.ensure_runner_queue_value }
end
end
describe '#push' do
let(:transition) { double('transition') }
shared_examples 'does not refresh runner' do
it 'ticks runner queue value' do
expect { subject.execute(build) }.not_to change { runner.ensure_runner_queue_value }
before do
allow(transition).to receive(:to).and_return('pending')
allow(transition).to receive(:within_transaction).and_yield
end
end
shared_examples 'matching build' do
context 'when there is a online runner that can pick build' do
context 'when pending build can be created' do
it 'creates a new pending build in transaction' do
queued = subject.push(build, transition)
expect(queued).to eq build.id
end
it 'increments queue push metric' do
metrics = spy('metrics')
described_class.new(metrics).push(build, transition)
expect(metrics)
.to have_received(:increment_queue_operation)
.with(:build_queue_push)
end
end
context 'when invalid transition is detected' do
it 'raises an error' do
allow(transition).to receive(:to).and_return('created')
expect { subject.push(build, transition) }
.to raise_error(described_class::InvalidQueueTransition)
end
end
context 'when duplicate entry exists' do
before do
runner.update!(contacted_at: 30.minutes.ago)
::Ci::PendingBuild.create!(build: build, project: project)
end
it_behaves_like 'refreshes runner'
it 'does nothing and returns build id' do
queued = subject.push(build, transition)
it 'avoids running redundant queries' do
expect(Ci::Runner).not_to receive(:owned_or_instance_wide)
expect(queued).to eq build.id
end
end
end
subject.execute(build)
describe '#pop' do
let(:transition) { double('transition') }
before do
allow(transition).to receive(:from).and_return('pending')
allow(transition).to receive(:within_transaction).and_yield
end
context 'when pending build exists' do
before do
Ci::PendingBuild.create!(build: build, project: project)
end
context 'when feature flag ci_reduce_queries_when_ticking_runner_queue is disabled' do
it 'removes pending build in a transaction' do
dequeued = subject.pop(build, transition)
expect(dequeued).to eq build.id
end
it 'increments queue pop metric' do
metrics = spy('metrics')
described_class.new(metrics).pop(build, transition)
expect(metrics)
.to have_received(:increment_queue_operation)
.with(:build_queue_pop)
end
end
context 'when pending build does not exist' do
it 'does nothing if there is no pending build to remove' do
dequeued = subject.pop(build, transition)
expect(dequeued).to be_nil
end
end
context 'when invalid transition is detected' do
it 'raises an error' do
allow(transition).to receive(:from).and_return('created')
expect { subject.pop(build, transition) }
.to raise_error(described_class::InvalidQueueTransition)
end
end
end
describe '#tick' do
shared_examples 'refreshes runner' do
it 'ticks runner queue value' do
expect { subject.tick(build) }.to change { runner.ensure_runner_queue_value }
end
end
shared_examples 'does not refresh runner' do
it 'ticks runner queue value' do
expect { subject.tick(build) }.not_to change { runner.ensure_runner_queue_value }
end
end
shared_examples 'matching build' do
context 'when there is a online runner that can pick build' do
before do
stub_feature_flags(ci_reduce_queries_when_ticking_runner_queue: false)
stub_feature_flags(ci_runners_short_circuit_assignable_for: false)
runner.update!(contacted_at: 30.minutes.ago)
end
it 'runs redundant queries using `owned_or_instance_wide` scope' do
expect(Ci::Runner).to receive(:owned_or_instance_wide).and_call_original
it_behaves_like 'refreshes runner'
subject.execute(build)
it 'avoids running redundant queries' do
expect(Ci::Runner).not_to receive(:owned_or_instance_wide)
subject.tick(build)
end
context 'when feature flag ci_reduce_queries_when_ticking_runner_queue is disabled' do
before do
stub_feature_flags(ci_reduce_queries_when_ticking_runner_queue: false)
stub_feature_flags(ci_runners_short_circuit_assignable_for: false)
end
it 'runs redundant queries using `owned_or_instance_wide` scope' do
expect(Ci::Runner).to receive(:owned_or_instance_wide).and_call_original
subject.tick(build)
end
end
end
end
end
shared_examples 'mismatching tags' do
context 'when there is no runner that can pick build due to tag mismatch' do
before do
build.tag_list = [:docker]
end
shared_examples 'mismatching tags' do
context 'when there is no runner that can pick build due to tag mismatch' do
before do
build.tag_list = [:docker]
end
it_behaves_like 'does not refresh runner'
end
end
shared_examples 'recent runner queue' do
context 'when there is runner with expired cache' do
before do
runner.update!(contacted_at: Ci::Runner.recent_queue_deadline)
end
it_behaves_like 'does not refresh runner'
end
end
context 'when updating specific runners' do
let(:runner) { create(:ci_runner, :project, projects: [project]) }
it_behaves_like 'matching build'
it_behaves_like 'mismatching tags'
it_behaves_like 'recent runner queue'
context 'when the runner is assigned to another project' do
let(:another_project) { create(:project) }
let(:runner) { create(:ci_runner, :project, projects: [another_project]) }
it_behaves_like 'does not refresh runner'
end
end
context 'when updating shared runners' do
let(:runner) { create(:ci_runner, :instance) }
it_behaves_like 'matching build'
it_behaves_like 'mismatching tags'
it_behaves_like 'recent runner queue'
context 'when there is no runner that can pick build due to being disabled on project' do
before do
build.project.shared_runners_enabled = false
end
it_behaves_like 'does not refresh runner'
end
end
context 'when updating group runners' do
let(:group) { create(:group) }
let(:project) { create(:project, group: group) }
let(:runner) { create(:ci_runner, :group, groups: [group]) }
it_behaves_like 'matching build'
it_behaves_like 'mismatching tags'
it_behaves_like 'recent runner queue'
context 'when there is no runner that can pick build due to being disabled on project' do
before do
build.project.group_runners_enabled = false
end
it_behaves_like 'does not refresh runner'
end
end
context 'avoids N+1 queries', :request_store do
let!(:build) { create(:ci_build, pipeline: pipeline, tag_list: %w[a b]) }
let!(:project_runner) { create(:ci_runner, :project, :online, projects: [project], tag_list: %w[a b c]) }
context 'when ci_preload_runner_tags and ci_reduce_queries_when_ticking_runner_queue are enabled' do
before do
stub_feature_flags(
ci_reduce_queries_when_ticking_runner_queue: true,
ci_preload_runner_tags: true
)
end
it 'does execute the same amount of queries regardless of number of runners' do
control_count = ActiveRecord::QueryRecorder.new { subject.execute(build) }.count
create_list(:ci_runner, 10, :project, :online, projects: [project], tag_list: %w[b c d])
expect { subject.execute(build) }.not_to exceed_all_query_limit(control_count)
it_behaves_like 'does not refresh runner'
end
end
context 'when ci_preload_runner_tags and ci_reduce_queries_when_ticking_runner_queue are disabled' do
before do
stub_feature_flags(
ci_reduce_queries_when_ticking_runner_queue: false,
ci_preload_runner_tags: false
)
shared_examples 'recent runner queue' do
context 'when there is runner with expired cache' do
before do
runner.update!(contacted_at: Ci::Runner.recent_queue_deadline)
end
it_behaves_like 'does not refresh runner'
end
end
context 'when updating specific runners' do
let(:runner) { create(:ci_runner, :project, projects: [project]) }
it_behaves_like 'matching build'
it_behaves_like 'mismatching tags'
it_behaves_like 'recent runner queue'
context 'when the runner is assigned to another project' do
let(:another_project) { create(:project) }
let(:runner) { create(:ci_runner, :project, projects: [another_project]) }
it_behaves_like 'does not refresh runner'
end
end
context 'when updating shared runners' do
let(:runner) { create(:ci_runner, :instance) }
it_behaves_like 'matching build'
it_behaves_like 'mismatching tags'
it_behaves_like 'recent runner queue'
context 'when there is no runner that can pick build due to being disabled on project' do
before do
build.project.shared_runners_enabled = false
end
it_behaves_like 'does not refresh runner'
end
end
context 'when updating group runners' do
let(:group) { create(:group) }
let(:project) { create(:project, group: group) }
let(:runner) { create(:ci_runner, :group, groups: [group]) }
it_behaves_like 'matching build'
it_behaves_like 'mismatching tags'
it_behaves_like 'recent runner queue'
context 'when there is no runner that can pick build due to being disabled on project' do
before do
build.project.group_runners_enabled = false
end
it_behaves_like 'does not refresh runner'
end
end
context 'avoids N+1 queries', :request_store do
let!(:build) { create(:ci_build, pipeline: pipeline, tag_list: %w[a b]) }
let!(:project_runner) { create(:ci_runner, :project, :online, projects: [project], tag_list: %w[a b c]) }
context 'when ci_preload_runner_tags and ci_reduce_queries_when_ticking_runner_queue are enabled' do
before do
stub_feature_flags(
ci_reduce_queries_when_ticking_runner_queue: true,
ci_preload_runner_tags: true
)
end
it 'does execute the same amount of queries regardless of number of runners' do
control_count = ActiveRecord::QueryRecorder.new { subject.tick(build) }.count
create_list(:ci_runner, 10, :project, :online, projects: [project], tag_list: %w[b c d])
expect { subject.tick(build) }.not_to exceed_all_query_limit(control_count)
end
end
it 'does execute more queries for more runners' do
control_count = ActiveRecord::QueryRecorder.new { subject.execute(build) }.count
context 'when ci_preload_runner_tags and ci_reduce_queries_when_ticking_runner_queue are disabled' do
before do
stub_feature_flags(
ci_reduce_queries_when_ticking_runner_queue: false,
ci_preload_runner_tags: false
)
end
create_list(:ci_runner, 10, :project, :online, projects: [project], tag_list: %w[b c d])
it 'does execute more queries for more runners' do
control_count = ActiveRecord::QueryRecorder.new { subject.tick(build) }.count
expect { subject.execute(build) }.to exceed_all_query_limit(control_count)
create_list(:ci_runner, 10, :project, :online, projects: [project], tag_list: %w[b c d])
expect { subject.tick(build) }.to exceed_all_query_limit(control_count)
end
end
end
end