Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-08-05 15:12:12 +00:00
parent f63850d9d6
commit 8ec882085e
71 changed files with 2136 additions and 996 deletions

View File

@ -292,6 +292,7 @@ Style/ClassAndModuleChildren:
- 'app/models/merge_request/metrics.rb'
- 'app/models/namespace/admin_note.rb'
- 'app/models/namespace/aggregation_schedule.rb'
- 'app/models/namespace/detail.rb'
- 'app/models/namespace/package_setting.rb'
- 'app/models/namespace/root_storage_statistics.rb'
- 'app/models/namespaces/sync_event.rb'

View File

@ -3,14 +3,11 @@ import { GlButtonGroup } from '@gitlab/ui';
import { BubbleMenu } from '@tiptap/vue-2';
import { BUBBLE_MENU_TRACKING_ACTION } from '../../constants';
import trackUIControl from '../../services/track_ui_control';
import Image from '../../extensions/image';
import Paragraph from '../../extensions/paragraph';
import Heading from '../../extensions/heading';
import Audio from '../../extensions/audio';
import Video from '../../extensions/video';
import Code from '../../extensions/code';
import CodeBlockHighlight from '../../extensions/code_block_highlight';
import Diagram from '../../extensions/diagram';
import Frontmatter from '../../extensions/frontmatter';
import ReferenceDefinition from '../../extensions/reference_definition';
import Image from '../../extensions/image';
import ToolbarButton from '../toolbar_button.vue';
export default {
@ -28,18 +25,13 @@ export default {
shouldShow: ({ editor, from, to }) => {
if (from === to) return false;
const exclude = [
Code.name,
CodeBlockHighlight.name,
Diagram.name,
Frontmatter.name,
Image.name,
Audio.name,
Video.name,
ReferenceDefinition.name,
];
const includes = [Paragraph.name, Heading.name];
const excludes = [Image.name, Audio.name, Video.name];
return !exclude.some((type) => editor.isActive(type));
return (
includes.some((type) => editor.isActive(type)) &&
!excludes.some((type) => editor.isActive(type))
);
},
},
};

View File

@ -33,8 +33,12 @@ export default {
this.$emit('execute', { contentType: listType });
},
execute(command, contentType) {
this.tiptapEditor.chain().focus()[command]().run();
execute(command, contentType, ...args) {
this.tiptapEditor
.chain()
.focus()
[command](...args)
.run();
this.$emit('execute', { contentType });
},
@ -67,5 +71,8 @@ export default {
<gl-dropdown-item @click="insert('diagram', { language: 'plantuml' })">
{{ __('PlantUML diagram') }}
</gl-dropdown-item>
<gl-dropdown-item @click="execute('insertTableOfContents', 'tableOfContents')">
{{ __('Table of contents') }}
</gl-dropdown-item>
</gl-dropdown>
</template>

View File

@ -0,0 +1,55 @@
<script>
import { debounce } from 'lodash';
import { NodeViewWrapper } from '@tiptap/vue-2';
import { DEFAULT_DEBOUNCE_AND_THROTTLE_MS } from '~/lib/utils/constants';
import { getHeadings } from '../../services/table_of_contents_utils';
import TableOfContentsHeading from './table_of_contents_heading.vue';
export default {
name: 'TableOfContentsWrapper',
components: {
NodeViewWrapper,
TableOfContentsHeading,
},
props: {
editor: {
type: Object,
required: true,
},
node: {
type: Object,
required: true,
},
},
data() {
return {
headings: [],
};
},
mounted() {
this.handleUpdate = debounce(this.handleUpdate, DEFAULT_DEBOUNCE_AND_THROTTLE_MS * 2);
this.editor.on('update', this.handleUpdate);
this.$nextTick(this.handleUpdate);
},
methods: {
handleUpdate() {
this.headings = getHeadings(this.editor);
},
},
};
</script>
<template>
<node-view-wrapper
as="ul"
class="table-of-contents gl-border-1 gl-border-solid gl-border-gray-100 gl-mb-5 gl-p-4!"
data-testid="table-of-contents"
>
{{ __('Table of contents') }}
<table-of-contents-heading
v-for="(heading, index) in headings"
:key="index"
:heading="heading"
/>
</node-view-wrapper>
</template>

View File

@ -0,0 +1,25 @@
<script>
export default {
name: 'TableOfContentsHeading',
props: {
heading: {
type: Object,
required: true,
},
},
};
</script>
<template>
<li>
<a v-if="heading.text" href="#" @click.prevent>
{{ heading.text }}
</a>
<ul v-if="heading.subHeadings.length">
<table-of-contents-heading
v-for="(child, index) in heading.subHeadings"
:key="index"
:heading="child"
/>
</ul>
</li>
</template>

View File

@ -1,6 +1,8 @@
import { Node, InputRule } from '@tiptap/core';
import { s__ } from '~/locale';
import { VueNodeViewRenderer } from '@tiptap/vue-2';
import { __ } from '~/locale';
import { PARSE_HTML_PRIORITY_HIGHEST } from '../constants';
import TableOfContentsWrapper from '../components/wrappers/table_of_contents.vue';
export default Node.create({
name: 'tableOfContents',
@ -25,9 +27,18 @@ export default Node.create({
class:
'table-of-contents gl-border-1 gl-border-solid gl-text-center gl-border-gray-100 gl-mb-5',
},
s__('ContentEditor|Table of Contents'),
__('Table of contents'),
];
},
addNodeView() {
return VueNodeViewRenderer(TableOfContentsWrapper);
},
addCommands() {
return {
insertTableOfContents: () => ({ commands }) => commands.insertContent({ type: this.name }),
};
},
addInputRules() {
const { type } = this;

View File

@ -0,0 +1,67 @@
export function fillEmpty(headings) {
for (let i = 0; i < headings.length; i += 1) {
let j = headings[i - 1]?.level || 0;
if (headings[i].level - j > 1) {
while (j < headings[i].level) {
headings.splice(i, 0, { level: j + 1, text: '' });
j += 1;
}
}
}
return headings;
}
const exitHeadingBranch = (heading, targetLevel) => {
let currentHeading = heading;
while (currentHeading.level > targetLevel) {
currentHeading = currentHeading.parent;
}
return currentHeading;
};
export function toTree(headings) {
fillEmpty(headings);
const tree = [];
let currentHeading;
for (let i = 0; i < headings.length; i += 1) {
const heading = headings[i];
if (heading.level === 1) {
const h = { ...heading, subHeadings: [] };
tree.push(h);
currentHeading = h;
} else if (heading.level > currentHeading.level) {
const h = { ...heading, subHeadings: [], parent: currentHeading };
currentHeading.subHeadings.push(h);
currentHeading = h;
} else if (heading.level <= currentHeading.level) {
currentHeading = exitHeadingBranch(currentHeading, heading.level - 1);
const h = { ...heading, subHeadings: [], parent: currentHeading };
(currentHeading?.subHeadings || headings).push(h);
currentHeading = h;
}
}
return tree;
}
export function getHeadings(editor) {
const headings = [];
editor.state.doc.descendants((node) => {
if (node.type.name !== 'heading') return false;
headings.push({
level: node.attrs.level,
text: node.textContent,
});
return true;
});
return toTree(headings);
}

View File

@ -50,6 +50,7 @@ class Namespace < ApplicationRecord
has_many :project_statistics
has_one :namespace_settings, inverse_of: :namespace, class_name: 'NamespaceSetting', autosave: true
has_one :ci_cd_settings, inverse_of: :namespace, class_name: 'NamespaceCiCdSetting', autosave: true
has_one :namespace_details, inverse_of: :namespace, class_name: 'Namespace::Detail', autosave: true
has_one :namespace_statistics
has_one :namespace_route, foreign_key: :namespace_id, autosave: false, inverse_of: :namespace, class_name: 'Route'
has_many :namespace_members, foreign_key: :member_namespace_id, inverse_of: :member_namespace, class_name: 'Member'
@ -127,6 +128,7 @@ class Namespace < ApplicationRecord
to: :namespace_settings, allow_nil: true
after_save :schedule_sync_event_worker, if: -> { saved_change_to_id? || saved_change_to_parent_id? }
after_save :reload_namespace_details
after_commit :refresh_access_of_projects_invited_groups, on: :update, if: -> { previous_changes.key?('share_with_group_lock') }
@ -676,6 +678,12 @@ class Namespace < ApplicationRecord
end
end
def reload_namespace_details
return unless !project_namespace? && (previous_changes.keys & %w(description description_html cached_markdown_version)).any? && namespace_details.present?
namespace_details.reset
end
def sync_share_with_group_lock_with_parent
if parent&.share_with_group_lock?
self.share_with_group_lock = true

View File

@ -0,0 +1,9 @@
# frozen_string_literal: true
class Namespace::Detail < ApplicationRecord
belongs_to :namespace, inverse_of: :namespace_details
validates :namespace, presence: true
validates :description, length: { maximum: 255 }
self.primary_key = :namespace_id
end

View File

@ -131,6 +131,8 @@ class Project < ApplicationRecord
after_save :save_topics
after_save :reload_project_namespace_details
after_create -> { create_or_load_association(:project_feature) }
after_create -> { create_or_load_association(:ci_cd_settings) }
@ -3257,6 +3259,12 @@ class Project < ApplicationRecord
project_namespace.assign_attributes(attributes_to_sync)
end
def reload_project_namespace_details
return unless (previous_changes.keys & %w(description description_html cached_markdown_version)).any? && project_namespace.namespace_details.present?
project_namespace.namespace_details.reset
end
# SyncEvents are created by PG triggers (with the function `insert_projects_sync_event`)
def schedule_sync_event_worker
run_after_commit do

View File

@ -244,10 +244,10 @@ class Repository
end
end
def add_branch(user, branch_name, ref)
def add_branch(user, branch_name, ref, expire_cache: true)
branch = raw_repository.add_branch(branch_name, user: user, target: ref)
after_create_branch
after_create_branch(expire_cache: expire_cache)
branch
rescue Gitlab::Git::Repository::InvalidRef

View File

@ -25,6 +25,7 @@ module Branches
branches
.then { |branches| only_valid_branches(branches) }
.then { |branches| create_branches(branches) }
.then { |branches| expire_branches_cache(branches) }
return error(errors) if errors.present?
@ -54,7 +55,7 @@ module Branches
def create_branches(branches)
branches.filter_map do |branch_name, ref|
result = create_branch(branch_name, ref)
result = create_branch(branch_name, ref, expire_cache: false)
if result[:status] == :error
errors << result[:message]
@ -65,8 +66,14 @@ module Branches
end
end
def create_branch(branch_name, ref)
new_branch = repository.add_branch(current_user, branch_name, ref)
def expire_branches_cache(branches)
repository.expire_branches_cache if branches.present?
branches
end
def create_branch(branch_name, ref, expire_cache: true)
new_branch = repository.add_branch(current_user, branch_name, ref, expire_cache: expire_cache)
if new_branch
success(branch: new_branch)

View File

@ -14,7 +14,7 @@ module Ci
def execute
unless @user.present? && @user.can?(:assign_runner, @runner)
return ServiceResponse.error(message: 'user not allowed to assign runner')
return ServiceResponse.error(message: 'user not allowed to assign runner', http_status: :forbidden)
end
if @runner.assign_to(@project, @user)

View File

@ -6,7 +6,7 @@ module Ci
def execute(registration_token, attributes)
runner_type_attrs = extract_runner_type_attrs(registration_token)
return unless runner_type_attrs
return ServiceResponse.error(message: 'invalid token supplied', http_status: :forbidden) unless runner_type_attrs
runner = ::Ci::Runner.new(attributes.merge(runner_type_attrs))
@ -20,7 +20,7 @@ module Ci
end
end
runner
ServiceResponse.success(payload: { runner: runner })
end
private

View File

@ -13,9 +13,15 @@ module Ci
end
def execute
return false unless @user.present? && @user.can?(:assign_runner, @runner)
unless @user.present? && @user.can?(:assign_runner, @runner)
return ServiceResponse.error(message: 'user not allowed to assign runner')
end
@runner_project.destroy
if @runner_project.destroy
ServiceResponse.success
else
ServiceResponse.error(message: 'failed to destroy runner project')
end
end
private

View File

@ -0,0 +1,9 @@
---
table_name: namespace_details
classes:
- NamespaceDetail
feature_categories:
- subgroups
description: Used to store details for namespaces
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/82958
milestone: '15.3'

View File

@ -0,0 +1,21 @@
# frozen_string_literal: true
class CreateNamespaceDetails < Gitlab::Database::Migration[2.0]
disable_ddl_transaction!
def up
with_lock_retries do
create_table :namespace_details, id: false do |t|
t.references :namespace, primary_key: true, null: false, default: nil, type: :bigint, index: false, foreign_key: { on_delete: :cascade } # rubocop:disable Layout/LineLength
t.timestamps_with_timezone null: true
t.integer :cached_markdown_version
t.text :description, limit: 255
t.text :description_html, limit: 255
end
end
end
def down
drop_table :namespace_details
end
end

View File

@ -0,0 +1,70 @@
# frozen_string_literal: true
class CreateSyncNamespaceDetailsTrigger < Gitlab::Database::Migration[2.0]
include Gitlab::Database::SchemaHelpers
UPDATE_TRIGGER_NAME = 'trigger_update_details_on_namespace_update'
INSERT_TRIGGER_NAME = 'trigger_update_details_on_namespace_insert'
FUNCTION_NAME = 'update_namespace_details_from_namespaces'
enable_lock_retries!
def up
create_trigger_function(FUNCTION_NAME, replace: true) do
<<~SQL
INSERT INTO
namespace_details (
description,
description_html,
cached_markdown_version,
updated_at,
created_at,
namespace_id
)
VALUES
(
NEW.description,
NEW.description_html,
NEW.cached_markdown_version,
NEW.updated_at,
NEW.updated_at,
NEW.id
) ON CONFLICT (namespace_id) DO
UPDATE
SET
description = NEW.description,
description_html = NEW.description_html,
cached_markdown_version = NEW.cached_markdown_version,
updated_at = NEW.updated_at
WHERE
namespace_details.namespace_id = NEW.id;RETURN NULL;
SQL
end
execute(<<~SQL)
CREATE TRIGGER #{UPDATE_TRIGGER_NAME}
AFTER UPDATE ON namespaces
FOR EACH ROW
WHEN (
NEW.type <> 'Project' AND (
OLD.description IS DISTINCT FROM NEW.description OR
OLD.description_html IS DISTINCT FROM NEW.description_html OR
OLD.cached_markdown_version IS DISTINCT FROM NEW.cached_markdown_version)
)
EXECUTE PROCEDURE #{FUNCTION_NAME}();
SQL
execute(<<~SQL)
CREATE TRIGGER #{INSERT_TRIGGER_NAME}
AFTER INSERT ON namespaces
FOR EACH ROW
WHEN (NEW.type <> 'Project')
EXECUTE PROCEDURE #{FUNCTION_NAME}();
SQL
end
def down
drop_trigger(:namespaces, UPDATE_TRIGGER_NAME)
drop_trigger(:namespaces, INSERT_TRIGGER_NAME)
drop_function(FUNCTION_NAME)
end
end

View File

@ -0,0 +1,68 @@
# frozen_string_literal: true
class CreateSyncProjectNamespaceDetailsTrigger < Gitlab::Database::Migration[2.0]
include Gitlab::Database::SchemaHelpers
UPDATE_TRIGGER_NAME = 'trigger_update_details_on_project_update'
INSERT_TRIGGER_NAME = 'trigger_update_details_on_project_insert'
FUNCTION_NAME = 'update_namespace_details_from_projects'
enable_lock_retries!
def up
create_trigger_function(FUNCTION_NAME, replace: true) do
<<~SQL
INSERT INTO
namespace_details (
description,
description_html,
cached_markdown_version,
updated_at,
created_at,
namespace_id
)
VALUES
(
NEW.description,
NEW.description_html,
NEW.cached_markdown_version,
NEW.updated_at,
NEW.updated_at,
NEW.project_namespace_id
) ON CONFLICT (namespace_id) DO
UPDATE
SET
description = NEW.description,
description_html = NEW.description_html,
cached_markdown_version = NEW.cached_markdown_version,
updated_at = NEW.updated_at
WHERE
namespace_details.namespace_id = NEW.project_namespace_id;RETURN NULL;
SQL
end
execute(<<~SQL)
CREATE TRIGGER #{UPDATE_TRIGGER_NAME}
AFTER UPDATE ON projects
FOR EACH ROW
WHEN (
OLD.description IS DISTINCT FROM NEW.description OR
OLD.description_html IS DISTINCT FROM NEW.description_html OR
OLD.cached_markdown_version IS DISTINCT FROM NEW.cached_markdown_version
)
EXECUTE PROCEDURE #{FUNCTION_NAME}();
SQL
execute(<<~SQL)
CREATE TRIGGER #{INSERT_TRIGGER_NAME}
AFTER INSERT ON projects
FOR EACH ROW
EXECUTE PROCEDURE #{FUNCTION_NAME}();
SQL
end
def down
drop_trigger(:projects, UPDATE_TRIGGER_NAME)
drop_trigger(:projects, INSERT_TRIGGER_NAME)
drop_function(FUNCTION_NAME)
end
end

View File

@ -0,0 +1 @@
c974e1a600323bac9b913e9e382384c302037ed6d1fc1df3b747471810293167

View File

@ -0,0 +1 @@
a931441890bd5d472f88dcef82bb42e3c8046a981788f2362a8deb89f4ac049a

View File

@ -0,0 +1 @@
f28bf2a6fe412342eef053b57cce14c0681d04f9e978e37bbd505f1efa36e92e

View File

@ -259,6 +259,74 @@ RETURN NULL;
END
$$;
CREATE FUNCTION update_namespace_details_from_namespaces() RETURNS trigger
LANGUAGE plpgsql
AS $$
BEGIN
INSERT INTO
namespace_details (
description,
description_html,
cached_markdown_version,
updated_at,
created_at,
namespace_id
)
VALUES
(
NEW.description,
NEW.description_html,
NEW.cached_markdown_version,
NEW.updated_at,
NEW.updated_at,
NEW.id
) ON CONFLICT (namespace_id) DO
UPDATE
SET
description = NEW.description,
description_html = NEW.description_html,
cached_markdown_version = NEW.cached_markdown_version,
updated_at = NEW.updated_at
WHERE
namespace_details.namespace_id = NEW.id;RETURN NULL;
END
$$;
CREATE FUNCTION update_namespace_details_from_projects() RETURNS trigger
LANGUAGE plpgsql
AS $$
BEGIN
INSERT INTO
namespace_details (
description,
description_html,
cached_markdown_version,
updated_at,
created_at,
namespace_id
)
VALUES
(
NEW.description,
NEW.description_html,
NEW.cached_markdown_version,
NEW.updated_at,
NEW.updated_at,
NEW.project_namespace_id
) ON CONFLICT (namespace_id) DO
UPDATE
SET
description = NEW.description,
description_html = NEW.description_html,
cached_markdown_version = NEW.cached_markdown_version,
updated_at = NEW.updated_at
WHERE
namespace_details.namespace_id = NEW.project_namespace_id;RETURN NULL;
END
$$;
CREATE FUNCTION update_vulnerability_reads_from_vulnerability() RETURNS trigger
LANGUAGE plpgsql
AS $$
@ -17555,6 +17623,17 @@ CREATE TABLE namespace_ci_cd_settings (
allow_stale_runner_pruning boolean DEFAULT false NOT NULL
);
CREATE TABLE namespace_details (
namespace_id bigint NOT NULL,
created_at timestamp with time zone,
updated_at timestamp with time zone,
cached_markdown_version integer,
description text,
description_html text,
CONSTRAINT check_2df620eaf6 CHECK ((char_length(description_html) <= 255)),
CONSTRAINT check_2f563eec0f CHECK ((char_length(description) <= 255))
);
CREATE TABLE namespace_limits (
additional_purchased_storage_size bigint DEFAULT 0 NOT NULL,
additional_purchased_storage_ends_on date,
@ -25371,6 +25450,9 @@ ALTER TABLE ONLY namespace_bans
ALTER TABLE ONLY namespace_ci_cd_settings
ADD CONSTRAINT namespace_ci_cd_settings_pkey PRIMARY KEY (namespace_id);
ALTER TABLE ONLY namespace_details
ADD CONSTRAINT namespace_details_pkey PRIMARY KEY (namespace_id);
ALTER TABLE ONLY namespace_limits
ADD CONSTRAINT namespace_limits_pkey PRIMARY KEY (namespace_id);
@ -31743,6 +31825,14 @@ CREATE TRIGGER trigger_projects_parent_id_on_insert AFTER INSERT ON projects FOR
CREATE TRIGGER trigger_projects_parent_id_on_update AFTER UPDATE ON projects FOR EACH ROW WHEN ((old.namespace_id IS DISTINCT FROM new.namespace_id)) EXECUTE FUNCTION insert_projects_sync_event();
CREATE TRIGGER trigger_update_details_on_namespace_insert AFTER INSERT ON namespaces FOR EACH ROW WHEN (((new.type)::text <> 'Project'::text)) EXECUTE FUNCTION update_namespace_details_from_namespaces();
CREATE TRIGGER trigger_update_details_on_namespace_update AFTER UPDATE ON namespaces FOR EACH ROW WHEN ((((new.type)::text <> 'Project'::text) AND (((old.description)::text IS DISTINCT FROM (new.description)::text) OR (old.description_html IS DISTINCT FROM new.description_html) OR (old.cached_markdown_version IS DISTINCT FROM new.cached_markdown_version)))) EXECUTE FUNCTION update_namespace_details_from_namespaces();
CREATE TRIGGER trigger_update_details_on_project_insert AFTER INSERT ON projects FOR EACH ROW EXECUTE FUNCTION update_namespace_details_from_projects();
CREATE TRIGGER trigger_update_details_on_project_update AFTER UPDATE ON projects FOR EACH ROW WHEN (((old.description IS DISTINCT FROM new.description) OR (old.description_html IS DISTINCT FROM new.description_html) OR (old.cached_markdown_version IS DISTINCT FROM new.cached_markdown_version))) EXECUTE FUNCTION update_namespace_details_from_projects();
CREATE TRIGGER trigger_update_has_issues_on_vulnerability_issue_links_delete AFTER DELETE ON vulnerability_issue_links FOR EACH ROW EXECUTE FUNCTION unset_has_issues_on_vulnerability_reads();
CREATE TRIGGER trigger_update_has_issues_on_vulnerability_issue_links_update AFTER INSERT ON vulnerability_issue_links FOR EACH ROW EXECUTE FUNCTION set_has_issues_on_vulnerability_reads();
@ -33937,6 +34027,9 @@ ALTER TABLE ONLY boards_epic_board_positions
ALTER TABLE ONLY vulnerability_finding_links
ADD CONSTRAINT fk_rails_cbdfde27ce FOREIGN KEY (vulnerability_occurrence_id) REFERENCES vulnerability_occurrences(id) ON DELETE CASCADE;
ALTER TABLE ONLY namespace_details
ADD CONSTRAINT fk_rails_cc11a451f8 FOREIGN KEY (namespace_id) REFERENCES namespaces(id) ON DELETE CASCADE;
ALTER TABLE ONLY issues_self_managed_prometheus_alert_events
ADD CONSTRAINT fk_rails_cc5d88bbb0 FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE;

View File

@ -290,7 +290,7 @@ and GitLab features. For example, restrict the token by:
of specific users.
- Setting Vault time limits for TTL of the token as specified in [`token_explicit_max_ttl`](https://www.vaultproject.io/api-docs/auth/jwt#token_explicit_max_ttl),
where the token expires after authentication.
- Scoping the JWT to [GitLab projected branches](../../../user/project/protected_branches.md)
- Scoping the JWT to [GitLab protected branches](../../../user/project/protected_branches.md)
that are restricted to a subset of project users.
- Scoping the JWT to [GitLab projected tags](../../../user/project/protected_tags.md),
- Scoping the JWT to [GitLab protected tags](../../../user/project/protected_tags.md),
that are restricted to a subset of project users.

View File

@ -783,7 +783,7 @@ Enum values can be deprecated using the
### Defining GraphQL enums dynamically from Rails enums
If your GraphQL enum is backed by a [Rails enum](creating_enums.md), then consider
If your GraphQL enum is backed by a [Rails enum](database/creating_enums.md), then consider
using the Rails enum to dynamically define the GraphQL enum values. Doing so
binds the GraphQL enum values to the Rails enum definition, so if values are
ever added to the Rails enum then the GraphQL enum automatically reflects the change.

View File

@ -1,154 +1,11 @@
---
stage: Data Stores
group: Database
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
redirect_to: 'database/creating_enums.md'
remove_date: '2022-11-06'
---
# Creating enums
This document was moved to [another location](database/creating_enums.md).
When creating a new enum, it should use the database type `SMALLINT`.
The `SMALLINT` type size is 2 bytes, which is sufficient for an enum.
This would help to save space in the database.
To use this type, add `limit: 2` to the migration that creates the column.
Example:
```ruby
def change
add_column :ci_job_artifacts, :file_format, :integer, limit: 2
end
```
## All of the key/value pairs should be defined in FOSS
**Summary:** All enums needs to be defined in FOSS, if a model is also part of the FOSS.
```ruby
class Model < ApplicationRecord
enum platform: {
aws: 0,
gcp: 1 # EE-only
}
end
```
When you add a new key/value pair to a `enum` and if it's EE-specific, you might be
tempted to organize the `enum` as the following:
```ruby
# Define `failure_reason` enum in `Pipeline` model:
class Pipeline < ApplicationRecord
enum failure_reason: Enums::Pipeline.failure_reasons
end
```
```ruby
# Define key/value pairs that used in FOSS and EE:
module Enums
module Pipeline
def self.failure_reasons
{ unknown_failure: 0, config_error: 1 }
end
end
end
Enums::Pipeline.prepend_mod_with('Enums::Pipeline')
```
```ruby
# Define key/value pairs that used in EE only:
module EE
module Enums
module Pipeline
override :failure_reasons
def failure_reasons
super.merge(activity_limit_exceeded: 2)
end
end
end
end
```
This works as-is, however, it has a couple of downside that:
- Someone could define a key/value pair in EE that is **conflicted** with a value defined in FOSS.
For example, define `activity_limit_exceeded: 1` in `EE::Enums::Pipeline`.
- When it happens, the feature works totally different.
For example, we cannot figure out `failure_reason` is either `config_error` or `activity_limit_exceeded`.
- When it happens, we have to ship a database migration to fix the data integrity,
which might be impossible if you cannot recover the original value.
Also, you might observe a workaround for this concern by setting an offset in EE's values.
For example, this example sets `1000` as the offset:
```ruby
module EE
module Enums
module Pipeline
override :failure_reasons
def failure_reasons
super.merge(activity_limit_exceeded: 1_000, size_limit_exceeded: 1_001)
end
end
end
end
```
This looks working as a workaround, however, this approach has some downsides that:
- Features could move from EE to FOSS or vice versa. Therefore, the offset might be mixed between FOSS and EE in the future.
For example, when you move `activity_limit_exceeded` to FOSS, you see `{ unknown_failure: 0, config_error: 1, activity_limit_exceeded: 1_000 }`.
- The integer column for the `enum` is likely created [as `SMALLINT`](#creating-enums).
Therefore, you need to be careful of that the offset doesn't exceed the maximum value of 2 bytes integer.
As a conclusion, you should define all of the key/value pairs in FOSS.
For example, you can simply write the following code in the above case:
```ruby
class Pipeline < ApplicationRecord
enum failure_reason: {
unknown_failure: 0,
config_error: 1,
activity_limit_exceeded: 2
}
end
```
## Add new values in the gap
After merging some EE and FOSS enums, there might be a gap between the two groups of values:
```ruby
module Enums
module Ci
module CommitStatus
def self.failure_reasons
{
# ...
data_integrity_failure: 12,
forward_deployment_failure: 13,
insufficient_bridge_permissions: 1_001,
downstream_bridge_project_not_found: 1_002,
# ...
}
end
end
end
end
```
To add new values, you should fill the gap first.
In the example above add `14` instead of `1_003`:
```ruby
{
# ...
data_integrity_failure: 12,
forward_deployment_failure: 13,
a_new_value: 14,
insufficient_bridge_permissions: 1_001,
downstream_bridge_project_not_found: 1_002,
# ...
}
```
<!-- This redirect file can be deleted after <2022-11-06>. -->
<!-- Redirects that point to other docs in the same project expire in three months. -->
<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->

View File

@ -368,7 +368,7 @@ A strategy to make the migration run faster is to schedule larger batches, and t
within the background migration to perform multiple statements.
The background migration helpers that queue multiple jobs such as
`queue_background_migration_jobs_by_range_at_intervals` use [`EachBatch`](../iterating_tables_in_batches.md).
`queue_background_migration_jobs_by_range_at_intervals` use [`EachBatch`](iterating_tables_in_batches.md).
The example above has batches of 1000, where each queued job takes two seconds. If the query has been optimized
to make the time for the delete statement within the [query performance guidelines](../query_performance.md),
1000 may be the largest number of records that can be deleted in a reasonable amount of time.

View File

@ -352,7 +352,7 @@ The default batching strategy provides an efficient way to iterate over primary
However, if you need to iterate over columns where values are not unique, you must use a
different batching strategy.
The `LooseIndexScanBatchingStrategy` batching strategy uses a special version of [`EachBatch`](../iterating_tables_in_batches.md#loose-index-scan-with-distinct_each_batch)
The `LooseIndexScanBatchingStrategy` batching strategy uses a special version of [`EachBatch`](iterating_tables_in_batches.md#loose-index-scan-with-distinct_each_batch)
to provide efficient and stable iteration over the distinct column values.
This example shows a batched background migration where the `issues.project_id` column is used as
@ -475,7 +475,7 @@ We can accomplish this by:
end
```
When applying a batching strategy, it is important to ensure the filter properly covered by an index to optimize `EachBatch` performance. See [the `EachBatch` docs for more information](../iterating_tables_in_batches.md).
When applying a batching strategy, it is important to ensure the filter properly covered by an index to optimize `EachBatch` performance. See [the `EachBatch` docs for more information](iterating_tables_in_batches.md).
## Testing

View File

@ -0,0 +1,154 @@
---
stage: Data Stores
group: Database
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Creating enums
When creating a new enum, it should use the database type `SMALLINT`.
The `SMALLINT` type size is 2 bytes, which is sufficient for an enum.
This would help to save space in the database.
To use this type, add `limit: 2` to the migration that creates the column.
Example:
```ruby
def change
add_column :ci_job_artifacts, :file_format, :integer, limit: 2
end
```
## All of the key/value pairs should be defined in FOSS
**Summary:** All enums needs to be defined in FOSS, if a model is also part of the FOSS.
```ruby
class Model < ApplicationRecord
enum platform: {
aws: 0,
gcp: 1 # EE-only
}
end
```
When you add a new key/value pair to a `enum` and if it's EE-specific, you might be
tempted to organize the `enum` as the following:
```ruby
# Define `failure_reason` enum in `Pipeline` model:
class Pipeline < ApplicationRecord
enum failure_reason: Enums::Pipeline.failure_reasons
end
```
```ruby
# Define key/value pairs that used in FOSS and EE:
module Enums
module Pipeline
def self.failure_reasons
{ unknown_failure: 0, config_error: 1 }
end
end
end
Enums::Pipeline.prepend_mod_with('Enums::Pipeline')
```
```ruby
# Define key/value pairs that used in EE only:
module EE
module Enums
module Pipeline
override :failure_reasons
def failure_reasons
super.merge(activity_limit_exceeded: 2)
end
end
end
end
```
This works as-is, however, it has a couple of downside that:
- Someone could define a key/value pair in EE that is **conflicted** with a value defined in FOSS.
For example, define `activity_limit_exceeded: 1` in `EE::Enums::Pipeline`.
- When it happens, the feature works totally different.
For example, we cannot figure out `failure_reason` is either `config_error` or `activity_limit_exceeded`.
- When it happens, we have to ship a database migration to fix the data integrity,
which might be impossible if you cannot recover the original value.
Also, you might observe a workaround for this concern by setting an offset in EE's values.
For example, this example sets `1000` as the offset:
```ruby
module EE
module Enums
module Pipeline
override :failure_reasons
def failure_reasons
super.merge(activity_limit_exceeded: 1_000, size_limit_exceeded: 1_001)
end
end
end
end
```
This looks working as a workaround, however, this approach has some downsides that:
- Features could move from EE to FOSS or vice versa. Therefore, the offset might be mixed between FOSS and EE in the future.
For example, when you move `activity_limit_exceeded` to FOSS, you see `{ unknown_failure: 0, config_error: 1, activity_limit_exceeded: 1_000 }`.
- The integer column for the `enum` is likely created [as `SMALLINT`](#creating-enums).
Therefore, you need to be careful of that the offset doesn't exceed the maximum value of 2 bytes integer.
As a conclusion, you should define all of the key/value pairs in FOSS.
For example, you can simply write the following code in the above case:
```ruby
class Pipeline < ApplicationRecord
enum failure_reason: {
unknown_failure: 0,
config_error: 1,
activity_limit_exceeded: 2
}
end
```
## Add new values in the gap
After merging some EE and FOSS enums, there might be a gap between the two groups of values:
```ruby
module Enums
module Ci
module CommitStatus
def self.failure_reasons
{
# ...
data_integrity_failure: 12,
forward_deployment_failure: 13,
insufficient_bridge_permissions: 1_001,
downstream_bridge_project_not_found: 1_002,
# ...
}
end
end
end
end
```
To add new values, you should fill the gap first.
In the example above add `14` instead of `1_003`:
```ruby
{
# ...
data_integrity_failure: 12,
forward_deployment_failure: 13,
a_new_value: 14,
insufficient_bridge_permissions: 1_001,
downstream_bridge_project_not_found: 1_002,
# ...
}
```

View File

@ -51,13 +51,13 @@ info: To determine the technical writer assigned to the Stage/Group associated w
- [Serializing data](../serializing_data.md)
- [Hash indexes](../hash_indexes.md)
- [Storing SHA1 hashes as binary](../sha1_as_binary.md)
- [Iterating tables in batches](../iterating_tables_in_batches.md)
- [Iterating tables in batches](iterating_tables_in_batches.md)
- [Insert into tables in batches](insert_into_tables_in_batches.md)
- [Ordering table columns](ordering_table_columns.md)
- [Verifying database capabilities](../verifying_database_capabilities.md)
- [Database Debugging and Troubleshooting](../database_debugging.md)
- [Query Count Limits](../query_count_limits.md)
- [Creating enums](../creating_enums.md)
- [Query Count Limits](query_count_limits.md)
- [Creating enums](creating_enums.md)
- [Client-side connection-pool](client_side_connection_pool.md)
- [Updating multiple values](setting_multiple_values.md)
- [Constraints naming conventions](constraint_naming_convention.md)

View File

@ -0,0 +1,598 @@
---
stage: Data Stores
group: Database
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Iterating tables in batches
Rails provides a method called `in_batches` that can be used to iterate over
rows in batches. For example:
```ruby
User.in_batches(of: 10) do |relation|
relation.update_all(updated_at: Time.now)
end
```
Unfortunately, this method is implemented in a way that is not very efficient,
both query and memory usage wise.
To work around this you can include the `EachBatch` module into your models,
then use the `each_batch` class method. For example:
```ruby
class User < ActiveRecord::Base
include EachBatch
end
User.each_batch(of: 10) do |relation|
relation.update_all(updated_at: Time.now)
end
```
This produces queries such as:
```plaintext
User Load (0.7ms) SELECT "users"."id" FROM "users" WHERE ("users"."id" >= 41654) ORDER BY "users"."id" ASC LIMIT 1 OFFSET 1000
(0.7ms) SELECT COUNT(*) FROM "users" WHERE ("users"."id" >= 41654) AND ("users"."id" < 42687)
```
The API of this method is similar to `in_batches`, though it doesn't support
all of the arguments that `in_batches` supports. You should always use
`each_batch` _unless_ you have a specific need for `in_batches`.
## Iterating over non-unique columns
One should proceed with extra caution. When you iterate over an attribute that is not unique,
even with the applied max batch size, there is no guarantee that the resulting batches do not
surpass it. The following snippet demonstrates this situation when one attempt to select
`Ci::Build` entries for users with `id` between `1` and `10,000`, the database returns
`1 215 178` matching rows.
```ruby
[ gstg ] production> Ci::Build.where(user_id: (1..10_000)).size
=> 1215178
```
This happens because the built relation is translated into the following query:
```ruby
[ gstg ] production> puts Ci::Build.where(user_id: (1..10_000)).to_sql
SELECT "ci_builds".* FROM "ci_builds" WHERE "ci_builds"."type" = 'Ci::Build' AND "ci_builds"."user_id" BETWEEN 1 AND 10000
=> nil
```
`And` queries which filter non-unique column by range `WHERE "ci_builds"."user_id" BETWEEN ? AND ?`,
even though the range size is limited to a certain threshold (`10,000` in the previous example) this
threshold does not translate to the size of the returned dataset. That happens because when taking
`n` possible values of attributes, one can't tell for sure that the number of records that contains
them is less than `n`.
### Loose-index scan with `distinct_each_batch`
When iterating over a non-unique column is necessary, use the `distinct_each_batch` helper
method. The helper uses the [loose-index scan technique](https://wiki.postgresql.org/wiki/Loose_indexscan)
(skip-index scan) to skip duplicated values within a database index.
Example: iterating over distinct `author_id` in the Issue model
```ruby
Issue.distinct_each_batch(column: :author_id, of: 1000) do |relation|
users = User.where(id: relation.select(:author_id)).to_a
end
```
The technique provides stable performance between the batches regardless of the data distribution.
The `relation` object returns an ActiveRecord scope where only the given `column` is available.
Other columns are not loaded.
The underlying database queries use recursive CTEs, which adds extra overhead. We therefore advise to use
smaller batch sizes than those used for a standard `each_batch` iteration.
## Column definition
`EachBatch` uses the primary key of the model by default for the iteration. This works most of the
cases, however in some cases, you might want to use a different column for the iteration.
```ruby
Project.distinct.each_batch(column: :creator_id, of: 10) do |relation|
puts User.where(id: relation.select(:creator_id)).map(&:id)
end
```
The query above iterates over the project creators and prints them out without duplications.
NOTE:
In case the column is not unique (no unique index definition), calling the `distinct` method on
the relation is necessary. Using not unique column without `distinct` may result in `each_batch`
falling into an endless loop as described in following
[issue](https://gitlab.com/gitlab-org/gitlab/-/issues/285097).
## `EachBatch` in data migrations
When dealing with data migrations the preferred way to iterate over a large volume of data is using
`EachBatch`.
A special case of data migration is a [background migration](background_migrations.md#scheduling)
where the actual data modification is executed in a background job. The migration code that
determines the data ranges (slices) and schedules the background jobs uses `each_batch`.
## Efficient usage of `each_batch`
`EachBatch` helps to iterate over large tables. It's important to highlight that `EachBatch`
does not magically solve all iteration-related performance problems, and it might not help at
all in some scenarios. From the database point of view, correctly configured database indexes are
also necessary to make `EachBatch` perform well.
### Example 1: Simple iteration
Let's consider that we want to iterate over the `users` table and print the `User` records to the
standard output. The `users` table contains millions of records, thus running one query to fetch
the users likely times out.
![Users table overview](../img/each_batch_users_table_v13_7.png)
This is a simplified version of the `users` table which contains several rows. We have a few
smaller gaps in the `id` column to make the example a bit more realistic (a few records were
already deleted). Currently, we have one index on the `id` field.
Loading all users into memory (avoid):
```ruby
users = User.all
users.each { |user| puts user.inspect }
```
Use `each_batch`:
```ruby
# Note: for this example I picked 5 as the batch size, the default is 1_000
User.each_batch(of: 5) do |relation|
relation.each { |user| puts user.inspect }
end
```
#### How `each_batch` works
As the first step, it finds the lowest `id` (start `id`) in the table by executing the following
database query:
```sql
SELECT "users"."id" FROM "users" ORDER BY "users"."id" ASC LIMIT 1
```
![Reading the start ID value](../img/each_batch_users_table_iteration_1_v13_7.png)
Notice that the query only reads data from the index (`INDEX ONLY SCAN`), the table is not
accessed. Database indexes are sorted so taking out the first item is a very cheap operation.
The next step is to find the next `id` (end `id`) which should respect the batch size
configuration. In this example we used a batch size of 5. `EachBatch` uses the `OFFSET` clause
to get a "shifted" `id` value.
```sql
SELECT "users"."id" FROM "users" WHERE "users"."id" >= 1 ORDER BY "users"."id" ASC LIMIT 1 OFFSET 5
```
![Reading the end ID value](../img/each_batch_users_table_iteration_2_v13_7.png)
Again, the query only looks into the index. The `OFFSET 5` takes out the sixth `id` value: this
query reads a maximum of six items from the index regardless of the table size or the iteration
count.
At this point, we know the `id` range for the first batch. Now it's time to construct the query
for the `relation` block.
```sql
SELECT "users".* FROM "users" WHERE "users"."id" >= 1 AND "users"."id" < 302
```
![Reading the rows from the `users` table](../img/each_batch_users_table_iteration_3_v13_7.png)
Notice the `<` sign. Previously six items were read from the index and in this query, the last
value is "excluded". The query looks at the index to get the location of the five `user`
rows on the disk and read the rows from the table. The returned array is processed in Ruby.
The first iteration is done. For the next iteration, the last `id` value is reused from the
previous iteration in order to find out the next end `id` value.
```sql
SELECT "users"."id" FROM "users" WHERE "users"."id" >= 302 ORDER BY "users"."id" ASC LIMIT 1 OFFSET 5
```
![Reading the second end ID value](../img/each_batch_users_table_iteration_4_v13_7.png)
Now we can easily construct the `users` query for the second iteration.
```sql
SELECT "users".* FROM "users" WHERE "users"."id" >= 302 AND "users"."id" < 353
```
![Reading the rows for the second iteration from the users table](../img/each_batch_users_table_iteration_5_v13_7.png)
### Example 2: Iteration with filters
Building on top of the previous example, we want to print users with zero sign-in count. We keep
track of the number of sign-ins in the `sign_in_count` column so we write the following code:
```ruby
users = User.where(sign_in_count: 0)
users.each_batch(of: 5) do |relation|
relation.each { |user| puts user.inspect }
end
```
`each_batch` produces the following SQL query for the start `id` value:
```sql
SELECT "users"."id" FROM "users" WHERE "users"."sign_in_count" = 0 ORDER BY "users"."id" ASC LIMIT 1
```
Selecting only the `id` column and ordering by `id` forces the database to use the
index on the `id` (primary key index) column however, we also have an extra condition on the
`sign_in_count` column. The column is not part of the index, so the database needs to look into
the actual table to find the first matching row.
![Reading the index with extra filter](../img/each_batch_users_table_filter_v13_7.png)
NOTE:
The number of scanned rows depends on the data distribution in the table.
- Best case scenario: the first user was never logged in. The database reads only one row.
- Worst case scenario: all users were logged in at least once. The database reads all rows.
In this particular example, the database had to read 10 rows (regardless of our batch size setting)
to determine the first `id` value. In a "real-world" application it's hard to predict whether the
filtering causes problems or not. In the case of GitLab, verifying the data on a
production replica is a good start, but keep in mind that data distribution on GitLab.com can be
different from self-managed instances.
#### Improve filtering with `each_batch`
##### Specialized conditional index
```sql
CREATE INDEX index_on_users_never_logged_in ON users (id) WHERE sign_in_count = 0
```
This is how our table and the newly created index looks like:
![Reading the specialized index](../img/each_batch_users_table_filtered_index_v13_7.png)
This index definition covers the conditions on the `id` and `sign_in_count` columns thus makes the
`each_batch` queries very effective (similar to the simple iteration example).
It's rare when a user was never signed in so we a anticipate small index size. Including only the
`id` in the index definition also helps to keep the index size small.
##### Index on columns
Later on, we might want to iterate over the table filtering for different `sign_in_count` values, in
those cases we cannot use the previously suggested conditional index because the `WHERE` condition
does not match with our new filter (`sign_in_count > 10`).
To address this problem, we have two options:
- Create another, conditional index to cover the new query.
- Replace the index with a more generalized configuration.
NOTE:
Having multiple indexes on the same table and on the same columns could be a performance bottleneck
when writing data.
Let's consider the following index (avoid):
```sql
CREATE INDEX index_on_users_never_logged_in ON users (id, sign_in_count)
```
The index definition starts with the `id` column which makes the index very inefficient from data
selectivity point of view.
```sql
SELECT "users"."id" FROM "users" WHERE "users"."sign_in_count" = 0 ORDER BY "users"."id" ASC LIMIT 1
```
Executing the query above results in an `INDEX ONLY SCAN`. However, the query still needs to
iterate over an unknown number of entries in the index, and then find the first item where the
`sign_in_count` is `0`.
![Reading an ineffective index](../img/each_batch_users_table_bad_index_v13_7.png)
We can improve the query significantly by swapping the columns in the index definition (prefer).
```sql
CREATE INDEX index_on_users_never_logged_in ON users (sign_in_count, id)
```
![Reading a good index](../img/each_batch_users_table_good_index_v13_7.png)
The following index definition does not work well with `each_batch` (avoid).
```sql
CREATE INDEX index_on_users_never_logged_in ON users (sign_in_count)
```
Since `each_batch` builds range queries based on the `id` column, this index cannot be used
efficiently. The DB reads the rows from the table or uses a bitmap search where the primary
key index is also read.
##### "Slow" iteration
Slow iteration means that we use a good index configuration to iterate over the table and
apply filtering on the yielded relation.
```ruby
User.each_batch(of: 5) do |relation|
relation.where(sign_in_count: 0).each { |user| puts user inspect }
end
```
The iteration uses the primary key index (on the `id` column) which makes it safe from statement
timeouts. The filter (`sign_in_count: 0`) is applied on the `relation` where the `id` is already
constrained (range). The number of rows is limited.
Slow iteration generally takes more time to finish. The iteration count is higher and
one iteration could yield fewer records than the batch size. Iterations may even yield
0 records. This is not an optimal solution; however, in some cases (especially when
dealing with large tables) this is the only viable option.
### Using Subqueries
Using subqueries in your `each_batch` query does not work well in most cases. Consider the following example:
```ruby
projects = Project.where(creator_id: Issue.where(confidential: true).select(:author_id))
projects.each_batch do |relation|
# do something
end
```
The iteration uses the `id` column of the `projects` table. The batching does not affect the
subquery. This means for each iteration, the subquery is executed by the database. This adds a
constant "load" on the query which often ends up in statement timeouts. We have an unknown number
of [confidential issues](../../user/project/issues/confidential_issues.md), the execution time
and the accessed database rows depend on the data distribution in the `issues` table.
NOTE:
Using subqueries works only when the subquery returns a small number of rows.
#### Improving Subqueries
When dealing with subqueries, a slow iteration approach could work: the filter on `creator_id`
can be part of the generated `relation` object.
```ruby
projects = Project.all
projects.each_batch do |relation|
relation.where(creator_id: Issue.where(confidential: true).select(:author_id))
end
```
If the query on the `issues` table itself is not performant enough, a nested loop could be
constructed. Try to avoid it when possible.
```ruby
projects = Project.all
projects.each_batch do |relation|
issues = Issue.where(confidential: true)
issues.each_batch do |issues_relation|
relation.where(creator_id: issues_relation.select(:author_id))
end
end
```
If we know that the `issues` table has many more rows than `projects`, it would make sense to flip
the queries, where the `issues` table is batched first.
### Using `JOIN` and `EXISTS`
When to use `JOINS`:
- When there's a 1:1 or 1:N relationship between the tables where we know that the joined record
(almost) always exists. This works well for "extension-like" tables:
- `projects` - `project_settings`
- `users` - `user_details`
- `users` - `user_statuses`
- `LEFT JOIN` works well in this case. Conditions on the joined table need to go to the yielded
relation so the iteration is not affected by the data distribution in the joined table.
Example:
```ruby
users = User.joins("LEFT JOIN personal_access_tokens on personal_access_tokens.user_id = users.id")
users.each_batch do |relation|
relation.where("personal_access_tokens.name = 'name'")
end
```
`EXISTS` queries should be added only to the inner `relation` of the `each_batch` query:
```ruby
User.each_batch do |relation|
relation.where("EXISTS (SELECT 1 FROM ...")
end
```
### Complex queries on the relation object
When the `relation` object has several extra conditions, the execution plans might become
"unstable".
Example:
```ruby
Issue.each_batch do |relation|
relation
.joins(:metrics)
.joins(:merge_requests_closing_issues)
.where("id IN (SELECT ...)")
.where(confidential: true)
end
```
Here, we expect that the `relation` query reads the `BATCH_SIZE` of user records and then
filters down the results according to the provided queries. The planner might decide that
using a bitmap index lookup with the index on the `confidential` column is a better way to
execute the query. This can cause an unexpectedly high amount of rows to be read and the
query could time out.
Problem: we know for sure that the relation is returning maximum `BATCH_SIZE` of records
however, the planner does not know this.
Common table expression (CTE) trick to force the range query to execute first:
```ruby
Issue.each_batch(of: 1000) do |relation|
cte = Gitlab::SQL::CTE.new(:batched_relation, relation.limit(1000))
scope = cte
.apply_to(Issue.all)
.joins(:metrics)
.joins(:merge_requests_closing_issues)
.where("id IN (SELECT ...)")
.where(confidential: true)
puts scope.to_a
end
```
### `EachBatch` vs `BatchCount`
When adding new counters for Service Ping, the preferred way to count records is using the
`Gitlab::Database::BatchCount` class. The iteration logic implemented in `BatchCount`
has similar performance characteristics like `EachBatch`. Most of the tips and suggestions
for improving `BatchCount` mentioned above applies to `BatchCount` as well.
## Iterate with keyset pagination
There are a few special cases where iterating with `EachBatch` does not work. `EachBatch`
requires one distinct column (usually the primary key), which makes the iteration impossible
for timestamp columns and tables with composite primary keys.
Where `EachBatch` does not work, you can use
[keyset pagination](pagination_guidelines.md#keyset-pagination) to iterate over the
table or a range of rows. The scaling and performance characteristics are very similar to
`EachBatch`.
Examples:
- Iterate over the table in a specific order (timestamp columns) in combination with a tie-breaker
if column user to sort by does not contain unique values.
- Iterate over the table with composite primary keys.
### Iterate over the issues in a project by creation date
You can use keyset pagination to iterate over any database column in a specific order (for example,
`created_at DESC`). To ensure consistent order of the returned records with the same values for
`created_at`, use a tie-breaker column with unique values (for example, `id`).
Assume you have the following index in the `issues` table:
```sql
idx_issues_on_project_id_and_created_at_and_id" btree (project_id, created_at, id)
```
### Fetching records for further processing
The following snippet iterates over issue records within the project using the specified order
(`created_at, id`).
```ruby
scope = Issue.where(project_id: 278964).order(:created_at, :id) # id is the tie-breaker
iterator = Gitlab::Pagination::Keyset::Iterator.new(scope: scope)
iterator.each_batch(of: 100) do |records|
puts records.map(&:id)
end
```
You can add extra filters to the query. This example only lists the issue IDs created in the last
30 days:
```ruby
scope = Issue.where(project_id: 278964).where('created_at > ?', 30.days.ago).order(:created_at, :id) # id is the tie-breaker
iterator = Gitlab::Pagination::Keyset::Iterator.new(scope: scope)
iterator.each_batch(of: 100) do |records|
puts records.map(&:id)
end
```
### Updating records in the batch
For complex `ActiveRecord` queries, the `.update_all` method does not work well, because it
generates an incorrect `UPDATE` statement.
You can use raw SQL for updating records in batches:
```ruby
scope = Issue.where(project_id: 278964).order(:created_at, :id) # id is the tie-breaker
iterator = Gitlab::Pagination::Keyset::Iterator.new(scope: scope)
iterator.each_batch(of: 100) do |records|
ApplicationRecord.connection.execute("UPDATE issues SET updated_at=NOW() WHERE issues.id in (#{records.dup.reselect(:id).to_sql})")
end
```
NOTE:
To keep the iteration stable and predictable, avoid updating the columns in the `ORDER BY` clause.
### Iterate over the `merge_request_diff_commits` table
The `merge_request_diff_commits` table uses a composite primary key (`merge_request_diff_id,
relative_order`), which makes `EachBatch` impossible to use efficiently.
To paginate over the `merge_request_diff_commits` table, you can use the following snippet:
```ruby
# Custom order object configuration:
order = Gitlab::Pagination::Keyset::Order.build([
Gitlab::Pagination::Keyset::ColumnOrderDefinition.new(
attribute_name: 'merge_request_diff_id',
order_expression: MergeRequestDiffCommit.arel_table[:merge_request_diff_id].asc,
nullable: :not_nullable,
distinct: false,
),
Gitlab::Pagination::Keyset::ColumnOrderDefinition.new(
attribute_name: 'relative_order',
order_expression: MergeRequestDiffCommit.arel_table[:relative_order].asc,
nullable: :not_nullable,
distinct: false,
)
])
MergeRequestDiffCommit.include(FromUnion) # keyset pagination generates UNION queries
scope = MergeRequestDiffCommit.order(order)
iterator = Gitlab::Pagination::Keyset::Iterator.new(scope: scope)
iterator.each_batch(of: 100) do |records|
puts records.map { |record| [record.merge_request_diff_id, record.relative_order] }.inspect
end
```
### Order object configuration
Keyset pagination works well with simple `ActiveRecord` `order` scopes
([first example](#iterate-over-the-issues-in-a-project-by-creation-date).
However, in special cases, you need to describe the columns in the `ORDER BY` clause (second example)
for the underlying keyset pagination library. When the `ORDER BY` configuration cannot be
automatically determined by the keyset pagination library, an error is raised.
The code comments of the
[`Gitlab::Pagination::Keyset::Order`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/pagination/keyset/order.rb)
and [`Gitlab::Pagination::Keyset::ColumnOrderDefinition`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/pagination/keyset/column_order_definition.rb)
classes give an overview of the possible options for configuring the `ORDER BY` clause. You can
also find a few code examples in the
[keyset pagination](keyset_pagination.md#complex-order-configuration) documentation.

View File

@ -246,7 +246,7 @@ where projects_with_ci_feature_usage.ci_feature = 'code_coverage'
```
The above example uses as a text column for simplicity but we should probably
use an [enum](../creating_enums.md) to save space.
use an [enum](creating_enums.md) to save space.
The downside of this new design is that this may need to be
updated (removed if the `ci_daily_build_group_report_results` is deleted).

View File

@ -0,0 +1,70 @@
---
stage: Data Stores
group: Database
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Query Count Limits
Each controller or API endpoint is allowed to execute up to 100 SQL queries and
in test environments we raise an error when this threshold is exceeded.
## Solving Failing Tests
When a test fails because it executes more than 100 SQL queries there are two
solutions to this problem:
- Reduce the number of SQL queries that are executed.
- Disable query limiting for the controller or API endpoint.
You should only resort to disabling query limits when an existing controller or endpoint
is to blame as in this case reducing the number of SQL queries can take a lot of
effort. Newly added controllers and endpoints are not allowed to execute more
than 100 SQL queries and no exceptions are made for this rule. _If_ a large
number of SQL queries is necessary to perform certain work it's best to have
this work performed by Sidekiq instead of doing this directly in a web request.
## Disable query limiting
In the event that you _have_ to disable query limits for a controller, you must first
create an issue. This issue should (preferably in the title) mention the
controller or endpoint and include the appropriate labels (`database`,
`performance`, and at least a team specific label such as `Discussion`).
After the issue has been created, you can disable query limits on the code in question. For
Rails controllers it's best to create a `before_action` hook that runs as early
as possible. The called method in turn should call
`Gitlab::QueryLimiting.disable!('issue URL here')`. For example:
```ruby
class MyController < ApplicationController
before_action :disable_query_limiting, only: [:show]
def index
# ...
end
def show
# ...
end
def disable_query_limiting
Gitlab::QueryLimiting.disable!('https://gitlab.com/gitlab-org/...')
end
end
```
By using a `before_action` you don't have to modify the controller method in
question, reducing the likelihood of merge conflicts.
For Grape API endpoints there unfortunately is not a reliable way of running a
hook before a specific endpoint. This means that you have to add the allowlist
call directly into the endpoint like so:
```ruby
get '/projects/:id/foo' do
Gitlab::QueryLimiting.disable!('...')
# ...
end
```

View File

@ -223,6 +223,15 @@ Gitlab::EventStore.publish(
)
```
Events should be dispatched from the relevant Service class whenever possible. Some
exceptions exist where we may allow models to publish events, like in state machine transitions.
For example, instead of scheduling `Ci::BuildFinishedWorker`, which runs a collection of side effects,
we could publish a `Ci::BuildFinishedEvent` and let other domains react asynchronously.
`ActiveRecord` callbacks are too low-level to represent a domain event. They represent more database
record changes. There might be cases where it would make sense, but we should consider
those exceptions.
## Create a subscriber
A subscriber is a Sidekiq worker that includes the `Gitlab::EventStore::Subscriber` module.
@ -320,7 +329,7 @@ it 'publishes a ProjectCreatedEvent with project id and namespace id' do
# The project ID will only be generated when the `create_project`
# is called in the expect block.
expected_data = { project_id: kind_of(Numeric), namespace_id: group_id }
expect { create_project(user, name: 'Project', path: 'project', namespace_id: group_id) }
.to publish_event(Projects::ProjectCreatedEvent)
.with(expected_data)

View File

@ -2001,7 +2001,11 @@ relative to `app/graphql/queries` folder: for example, if we need a
### Mocked client returns empty objects instead of mock response
If your unit test is failing because response contains empty objects instead of mock data, you would need to add `__typename` field to the mocked response. This happens because mocked client (unlike the real one) does not populate the response with typenames and in some cases we need to do it manually so the client is able to recognize a GraphQL type.
If your unit test is failing because the response contains empty objects instead of mock data, add
`__typename` field to the mocked responses.
Alternatively, [GraphQL query fixtures](../testing_guide/frontend_testing.md#graphql-query-fixtures)
automatically adds the `__typename` for you upon generation.
### Warning about losing cache data

View File

@ -1,598 +1,11 @@
---
stage: Data Stores
group: Database
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
redirect_to: 'database/iterating_tables_in_batches.md'
remove_date: '2022-11-06'
---
# Iterating tables in batches
This document was moved to [another location](database/iterating_tables_in_batches.md).
Rails provides a method called `in_batches` that can be used to iterate over
rows in batches. For example:
```ruby
User.in_batches(of: 10) do |relation|
relation.update_all(updated_at: Time.now)
end
```
Unfortunately, this method is implemented in a way that is not very efficient,
both query and memory usage wise.
To work around this you can include the `EachBatch` module into your models,
then use the `each_batch` class method. For example:
```ruby
class User < ActiveRecord::Base
include EachBatch
end
User.each_batch(of: 10) do |relation|
relation.update_all(updated_at: Time.now)
end
```
This produces queries such as:
```plaintext
User Load (0.7ms) SELECT "users"."id" FROM "users" WHERE ("users"."id" >= 41654) ORDER BY "users"."id" ASC LIMIT 1 OFFSET 1000
(0.7ms) SELECT COUNT(*) FROM "users" WHERE ("users"."id" >= 41654) AND ("users"."id" < 42687)
```
The API of this method is similar to `in_batches`, though it doesn't support
all of the arguments that `in_batches` supports. You should always use
`each_batch` _unless_ you have a specific need for `in_batches`.
## Iterating over non-unique columns
One should proceed with extra caution. When you iterate over an attribute that is not unique,
even with the applied max batch size, there is no guarantee that the resulting batches do not
surpass it. The following snippet demonstrates this situation when one attempt to select
`Ci::Build` entries for users with `id` between `1` and `10,000`, the database returns
`1 215 178` matching rows.
```ruby
[ gstg ] production> Ci::Build.where(user_id: (1..10_000)).size
=> 1215178
```
This happens because the built relation is translated into the following query:
```ruby
[ gstg ] production> puts Ci::Build.where(user_id: (1..10_000)).to_sql
SELECT "ci_builds".* FROM "ci_builds" WHERE "ci_builds"."type" = 'Ci::Build' AND "ci_builds"."user_id" BETWEEN 1 AND 10000
=> nil
```
`And` queries which filter non-unique column by range `WHERE "ci_builds"."user_id" BETWEEN ? AND ?`,
even though the range size is limited to a certain threshold (`10,000` in the previous example) this
threshold does not translate to the size of the returned dataset. That happens because when taking
`n` possible values of attributes, one can't tell for sure that the number of records that contains
them is less than `n`.
### Loose-index scan with `distinct_each_batch`
When iterating over a non-unique column is necessary, use the `distinct_each_batch` helper
method. The helper uses the [loose-index scan technique](https://wiki.postgresql.org/wiki/Loose_indexscan)
(skip-index scan) to skip duplicated values within a database index.
Example: iterating over distinct `author_id` in the Issue model
```ruby
Issue.distinct_each_batch(column: :author_id, of: 1000) do |relation|
users = User.where(id: relation.select(:author_id)).to_a
end
```
The technique provides stable performance between the batches regardless of the data distribution.
The `relation` object returns an ActiveRecord scope where only the given `column` is available.
Other columns are not loaded.
The underlying database queries use recursive CTEs, which adds extra overhead. We therefore advise to use
smaller batch sizes than those used for a standard `each_batch` iteration.
## Column definition
`EachBatch` uses the primary key of the model by default for the iteration. This works most of the
cases, however in some cases, you might want to use a different column for the iteration.
```ruby
Project.distinct.each_batch(column: :creator_id, of: 10) do |relation|
puts User.where(id: relation.select(:creator_id)).map(&:id)
end
```
The query above iterates over the project creators and prints them out without duplications.
NOTE:
In case the column is not unique (no unique index definition), calling the `distinct` method on
the relation is necessary. Using not unique column without `distinct` may result in `each_batch`
falling into an endless loop as described in following
[issue](https://gitlab.com/gitlab-org/gitlab/-/issues/285097).
## `EachBatch` in data migrations
When dealing with data migrations the preferred way to iterate over a large volume of data is using
`EachBatch`.
A special case of data migration is a [background migration](database/background_migrations.md#scheduling)
where the actual data modification is executed in a background job. The migration code that
determines the data ranges (slices) and schedules the background jobs uses `each_batch`.
## Efficient usage of `each_batch`
`EachBatch` helps to iterate over large tables. It's important to highlight that `EachBatch`
does not magically solve all iteration-related performance problems, and it might not help at
all in some scenarios. From the database point of view, correctly configured database indexes are
also necessary to make `EachBatch` perform well.
### Example 1: Simple iteration
Let's consider that we want to iterate over the `users` table and print the `User` records to the
standard output. The `users` table contains millions of records, thus running one query to fetch
the users likely times out.
![Users table overview](img/each_batch_users_table_v13_7.png)
This is a simplified version of the `users` table which contains several rows. We have a few
smaller gaps in the `id` column to make the example a bit more realistic (a few records were
already deleted). Currently, we have one index on the `id` field.
Loading all users into memory (avoid):
```ruby
users = User.all
users.each { |user| puts user.inspect }
```
Use `each_batch`:
```ruby
# Note: for this example I picked 5 as the batch size, the default is 1_000
User.each_batch(of: 5) do |relation|
relation.each { |user| puts user.inspect }
end
```
#### How `each_batch` works
As the first step, it finds the lowest `id` (start `id`) in the table by executing the following
database query:
```sql
SELECT "users"."id" FROM "users" ORDER BY "users"."id" ASC LIMIT 1
```
![Reading the start ID value](img/each_batch_users_table_iteration_1_v13_7.png)
Notice that the query only reads data from the index (`INDEX ONLY SCAN`), the table is not
accessed. Database indexes are sorted so taking out the first item is a very cheap operation.
The next step is to find the next `id` (end `id`) which should respect the batch size
configuration. In this example we used a batch size of 5. `EachBatch` uses the `OFFSET` clause
to get a "shifted" `id` value.
```sql
SELECT "users"."id" FROM "users" WHERE "users"."id" >= 1 ORDER BY "users"."id" ASC LIMIT 1 OFFSET 5
```
![Reading the end ID value](img/each_batch_users_table_iteration_2_v13_7.png)
Again, the query only looks into the index. The `OFFSET 5` takes out the sixth `id` value: this
query reads a maximum of six items from the index regardless of the table size or the iteration
count.
At this point, we know the `id` range for the first batch. Now it's time to construct the query
for the `relation` block.
```sql
SELECT "users".* FROM "users" WHERE "users"."id" >= 1 AND "users"."id" < 302
```
![Reading the rows from the `users` table](img/each_batch_users_table_iteration_3_v13_7.png)
Notice the `<` sign. Previously six items were read from the index and in this query, the last
value is "excluded". The query looks at the index to get the location of the five `user`
rows on the disk and read the rows from the table. The returned array is processed in Ruby.
The first iteration is done. For the next iteration, the last `id` value is reused from the
previous iteration in order to find out the next end `id` value.
```sql
SELECT "users"."id" FROM "users" WHERE "users"."id" >= 302 ORDER BY "users"."id" ASC LIMIT 1 OFFSET 5
```
![Reading the second end ID value](img/each_batch_users_table_iteration_4_v13_7.png)
Now we can easily construct the `users` query for the second iteration.
```sql
SELECT "users".* FROM "users" WHERE "users"."id" >= 302 AND "users"."id" < 353
```
![Reading the rows for the second iteration from the users table](img/each_batch_users_table_iteration_5_v13_7.png)
### Example 2: Iteration with filters
Building on top of the previous example, we want to print users with zero sign-in count. We keep
track of the number of sign-ins in the `sign_in_count` column so we write the following code:
```ruby
users = User.where(sign_in_count: 0)
users.each_batch(of: 5) do |relation|
relation.each { |user| puts user.inspect }
end
```
`each_batch` produces the following SQL query for the start `id` value:
```sql
SELECT "users"."id" FROM "users" WHERE "users"."sign_in_count" = 0 ORDER BY "users"."id" ASC LIMIT 1
```
Selecting only the `id` column and ordering by `id` forces the database to use the
index on the `id` (primary key index) column however, we also have an extra condition on the
`sign_in_count` column. The column is not part of the index, so the database needs to look into
the actual table to find the first matching row.
![Reading the index with extra filter](img/each_batch_users_table_filter_v13_7.png)
NOTE:
The number of scanned rows depends on the data distribution in the table.
- Best case scenario: the first user was never logged in. The database reads only one row.
- Worst case scenario: all users were logged in at least once. The database reads all rows.
In this particular example, the database had to read 10 rows (regardless of our batch size setting)
to determine the first `id` value. In a "real-world" application it's hard to predict whether the
filtering causes problems or not. In the case of GitLab, verifying the data on a
production replica is a good start, but keep in mind that data distribution on GitLab.com can be
different from self-managed instances.
#### Improve filtering with `each_batch`
##### Specialized conditional index
```sql
CREATE INDEX index_on_users_never_logged_in ON users (id) WHERE sign_in_count = 0
```
This is how our table and the newly created index looks like:
![Reading the specialized index](img/each_batch_users_table_filtered_index_v13_7.png)
This index definition covers the conditions on the `id` and `sign_in_count` columns thus makes the
`each_batch` queries very effective (similar to the simple iteration example).
It's rare when a user was never signed in so we a anticipate small index size. Including only the
`id` in the index definition also helps to keep the index size small.
##### Index on columns
Later on, we might want to iterate over the table filtering for different `sign_in_count` values, in
those cases we cannot use the previously suggested conditional index because the `WHERE` condition
does not match with our new filter (`sign_in_count > 10`).
To address this problem, we have two options:
- Create another, conditional index to cover the new query.
- Replace the index with a more generalized configuration.
NOTE:
Having multiple indexes on the same table and on the same columns could be a performance bottleneck
when writing data.
Let's consider the following index (avoid):
```sql
CREATE INDEX index_on_users_never_logged_in ON users (id, sign_in_count)
```
The index definition starts with the `id` column which makes the index very inefficient from data
selectivity point of view.
```sql
SELECT "users"."id" FROM "users" WHERE "users"."sign_in_count" = 0 ORDER BY "users"."id" ASC LIMIT 1
```
Executing the query above results in an `INDEX ONLY SCAN`. However, the query still needs to
iterate over an unknown number of entries in the index, and then find the first item where the
`sign_in_count` is `0`.
![Reading an ineffective index](img/each_batch_users_table_bad_index_v13_7.png)
We can improve the query significantly by swapping the columns in the index definition (prefer).
```sql
CREATE INDEX index_on_users_never_logged_in ON users (sign_in_count, id)
```
![Reading a good index](img/each_batch_users_table_good_index_v13_7.png)
The following index definition does not work well with `each_batch` (avoid).
```sql
CREATE INDEX index_on_users_never_logged_in ON users (sign_in_count)
```
Since `each_batch` builds range queries based on the `id` column, this index cannot be used
efficiently. The DB reads the rows from the table or uses a bitmap search where the primary
key index is also read.
##### "Slow" iteration
Slow iteration means that we use a good index configuration to iterate over the table and
apply filtering on the yielded relation.
```ruby
User.each_batch(of: 5) do |relation|
relation.where(sign_in_count: 0).each { |user| puts user inspect }
end
```
The iteration uses the primary key index (on the `id` column) which makes it safe from statement
timeouts. The filter (`sign_in_count: 0`) is applied on the `relation` where the `id` is already
constrained (range). The number of rows is limited.
Slow iteration generally takes more time to finish. The iteration count is higher and
one iteration could yield fewer records than the batch size. Iterations may even yield
0 records. This is not an optimal solution; however, in some cases (especially when
dealing with large tables) this is the only viable option.
### Using Subqueries
Using subqueries in your `each_batch` query does not work well in most cases. Consider the following example:
```ruby
projects = Project.where(creator_id: Issue.where(confidential: true).select(:author_id))
projects.each_batch do |relation|
# do something
end
```
The iteration uses the `id` column of the `projects` table. The batching does not affect the
subquery. This means for each iteration, the subquery is executed by the database. This adds a
constant "load" on the query which often ends up in statement timeouts. We have an unknown number
of [confidential issues](../user/project/issues/confidential_issues.md), the execution time
and the accessed database rows depend on the data distribution in the `issues` table.
NOTE:
Using subqueries works only when the subquery returns a small number of rows.
#### Improving Subqueries
When dealing with subqueries, a slow iteration approach could work: the filter on `creator_id`
can be part of the generated `relation` object.
```ruby
projects = Project.all
projects.each_batch do |relation|
relation.where(creator_id: Issue.where(confidential: true).select(:author_id))
end
```
If the query on the `issues` table itself is not performant enough, a nested loop could be
constructed. Try to avoid it when possible.
```ruby
projects = Project.all
projects.each_batch do |relation|
issues = Issue.where(confidential: true)
issues.each_batch do |issues_relation|
relation.where(creator_id: issues_relation.select(:author_id))
end
end
```
If we know that the `issues` table has many more rows than `projects`, it would make sense to flip
the queries, where the `issues` table is batched first.
### Using `JOIN` and `EXISTS`
When to use `JOINS`:
- When there's a 1:1 or 1:N relationship between the tables where we know that the joined record
(almost) always exists. This works well for "extension-like" tables:
- `projects` - `project_settings`
- `users` - `user_details`
- `users` - `user_statuses`
- `LEFT JOIN` works well in this case. Conditions on the joined table need to go to the yielded
relation so the iteration is not affected by the data distribution in the joined table.
Example:
```ruby
users = User.joins("LEFT JOIN personal_access_tokens on personal_access_tokens.user_id = users.id")
users.each_batch do |relation|
relation.where("personal_access_tokens.name = 'name'")
end
```
`EXISTS` queries should be added only to the inner `relation` of the `each_batch` query:
```ruby
User.each_batch do |relation|
relation.where("EXISTS (SELECT 1 FROM ...")
end
```
### Complex queries on the relation object
When the `relation` object has several extra conditions, the execution plans might become
"unstable".
Example:
```ruby
Issue.each_batch do |relation|
relation
.joins(:metrics)
.joins(:merge_requests_closing_issues)
.where("id IN (SELECT ...)")
.where(confidential: true)
end
```
Here, we expect that the `relation` query reads the `BATCH_SIZE` of user records and then
filters down the results according to the provided queries. The planner might decide that
using a bitmap index lookup with the index on the `confidential` column is a better way to
execute the query. This can cause an unexpectedly high amount of rows to be read and the
query could time out.
Problem: we know for sure that the relation is returning maximum `BATCH_SIZE` of records
however, the planner does not know this.
Common table expression (CTE) trick to force the range query to execute first:
```ruby
Issue.each_batch(of: 1000) do |relation|
cte = Gitlab::SQL::CTE.new(:batched_relation, relation.limit(1000))
scope = cte
.apply_to(Issue.all)
.joins(:metrics)
.joins(:merge_requests_closing_issues)
.where("id IN (SELECT ...)")
.where(confidential: true)
puts scope.to_a
end
```
### `EachBatch` vs `BatchCount`
When adding new counters for Service Ping, the preferred way to count records is using the
`Gitlab::Database::BatchCount` class. The iteration logic implemented in `BatchCount`
has similar performance characteristics like `EachBatch`. Most of the tips and suggestions
for improving `BatchCount` mentioned above applies to `BatchCount` as well.
## Iterate with keyset pagination
There are a few special cases where iterating with `EachBatch` does not work. `EachBatch`
requires one distinct column (usually the primary key), which makes the iteration impossible
for timestamp columns and tables with composite primary keys.
Where `EachBatch` does not work, you can use
[keyset pagination](database/pagination_guidelines.md#keyset-pagination) to iterate over the
table or a range of rows. The scaling and performance characteristics are very similar to
`EachBatch`.
Examples:
- Iterate over the table in a specific order (timestamp columns) in combination with a tie-breaker
if column user to sort by does not contain unique values.
- Iterate over the table with composite primary keys.
### Iterate over the issues in a project by creation date
You can use keyset pagination to iterate over any database column in a specific order (for example,
`created_at DESC`). To ensure consistent order of the returned records with the same values for
`created_at`, use a tie-breaker column with unique values (for example, `id`).
Assume you have the following index in the `issues` table:
```sql
idx_issues_on_project_id_and_created_at_and_id" btree (project_id, created_at, id)
```
### Fetching records for further processing
The following snippet iterates over issue records within the project using the specified order
(`created_at, id`).
```ruby
scope = Issue.where(project_id: 278964).order(:created_at, :id) # id is the tie-breaker
iterator = Gitlab::Pagination::Keyset::Iterator.new(scope: scope)
iterator.each_batch(of: 100) do |records|
puts records.map(&:id)
end
```
You can add extra filters to the query. This example only lists the issue IDs created in the last
30 days:
```ruby
scope = Issue.where(project_id: 278964).where('created_at > ?', 30.days.ago).order(:created_at, :id) # id is the tie-breaker
iterator = Gitlab::Pagination::Keyset::Iterator.new(scope: scope)
iterator.each_batch(of: 100) do |records|
puts records.map(&:id)
end
```
### Updating records in the batch
For complex `ActiveRecord` queries, the `.update_all` method does not work well, because it
generates an incorrect `UPDATE` statement.
You can use raw SQL for updating records in batches:
```ruby
scope = Issue.where(project_id: 278964).order(:created_at, :id) # id is the tie-breaker
iterator = Gitlab::Pagination::Keyset::Iterator.new(scope: scope)
iterator.each_batch(of: 100) do |records|
ApplicationRecord.connection.execute("UPDATE issues SET updated_at=NOW() WHERE issues.id in (#{records.dup.reselect(:id).to_sql})")
end
```
NOTE:
To keep the iteration stable and predictable, avoid updating the columns in the `ORDER BY` clause.
### Iterate over the `merge_request_diff_commits` table
The `merge_request_diff_commits` table uses a composite primary key (`merge_request_diff_id,
relative_order`), which makes `EachBatch` impossible to use efficiently.
To paginate over the `merge_request_diff_commits` table, you can use the following snippet:
```ruby
# Custom order object configuration:
order = Gitlab::Pagination::Keyset::Order.build([
Gitlab::Pagination::Keyset::ColumnOrderDefinition.new(
attribute_name: 'merge_request_diff_id',
order_expression: MergeRequestDiffCommit.arel_table[:merge_request_diff_id].asc,
nullable: :not_nullable,
distinct: false,
),
Gitlab::Pagination::Keyset::ColumnOrderDefinition.new(
attribute_name: 'relative_order',
order_expression: MergeRequestDiffCommit.arel_table[:relative_order].asc,
nullable: :not_nullable,
distinct: false,
)
])
MergeRequestDiffCommit.include(FromUnion) # keyset pagination generates UNION queries
scope = MergeRequestDiffCommit.order(order)
iterator = Gitlab::Pagination::Keyset::Iterator.new(scope: scope)
iterator.each_batch(of: 100) do |records|
puts records.map { |record| [record.merge_request_diff_id, record.relative_order] }.inspect
end
```
### Order object configuration
Keyset pagination works well with simple `ActiveRecord` `order` scopes
([first example](iterating_tables_in_batches.md#iterate-over-the-issues-in-a-project-by-creation-date).
However, in special cases, you need to describe the columns in the `ORDER BY` clause (second example)
for the underlying keyset pagination library. When the `ORDER BY` configuration cannot be
automatically determined by the keyset pagination library, an error is raised.
The code comments of the
[`Gitlab::Pagination::Keyset::Order`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/pagination/keyset/order.rb)
and [`Gitlab::Pagination::Keyset::ColumnOrderDefinition`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/pagination/keyset/column_order_definition.rb)
classes give an overview of the possible options for configuring the `ORDER BY` clause. You can
also find a few code examples in the
[keyset pagination](database/keyset_pagination.md#complex-order-configuration) documentation.
<!-- This redirect file can be deleted after <2022-11-06>. -->
<!-- Redirects that point to other docs in the same project expire in three months. -->
<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->

View File

@ -193,7 +193,7 @@ costly, time-consuming query to the replicas.
## Use CTEs wisely
Read about [complex queries on the relation object](iterating_tables_in_batches.md#complex-queries-on-the-relation-object) for considerations on how to use CTEs. We have found in some situations that CTEs can become problematic in use (similar to the n+1 problem above). In particular, hierarchical recursive CTE queries such as the CTE in [AuthorizedProjectsWorker](https://gitlab.com/gitlab-org/gitlab/-/issues/325688) are very difficult to optimize and don't scale. We should avoid them when implementing new features that require any kind of hierarchical structure.
Read about [complex queries on the relation object](database/iterating_tables_in_batches.md#complex-queries-on-the-relation-object) for considerations on how to use CTEs. We have found in some situations that CTEs can become problematic in use (similar to the n+1 problem above). In particular, hierarchical recursive CTE queries such as the CTE in [AuthorizedProjectsWorker](https://gitlab.com/gitlab-org/gitlab/-/issues/325688) are very difficult to optimize and don't scale. We should avoid them when implementing new features that require any kind of hierarchical structure.
CTEs have been effectively used as an optimization fence in many simpler cases,
such as this [example](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/43242#note_61416277).

View File

@ -1,70 +1,11 @@
---
stage: none
group: unassigned
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
redirect_to: 'database/query_count_limits.md'
remove_date: '2022-11-06'
---
# Query Count Limits
This document was moved to [another location](database/query_count_limits.md).
Each controller or API endpoint is allowed to execute up to 100 SQL queries and
in test environments we raise an error when this threshold is exceeded.
## Solving Failing Tests
When a test fails because it executes more than 100 SQL queries there are two
solutions to this problem:
- Reduce the number of SQL queries that are executed.
- Disable query limiting for the controller or API endpoint.
You should only resort to disabling query limits when an existing controller or endpoint
is to blame as in this case reducing the number of SQL queries can take a lot of
effort. Newly added controllers and endpoints are not allowed to execute more
than 100 SQL queries and no exceptions are made for this rule. _If_ a large
number of SQL queries is necessary to perform certain work it's best to have
this work performed by Sidekiq instead of doing this directly in a web request.
## Disable query limiting
In the event that you _have_ to disable query limits for a controller, you must first
create an issue. This issue should (preferably in the title) mention the
controller or endpoint and include the appropriate labels (`database`,
`performance`, and at least a team specific label such as `Discussion`).
After the issue has been created, you can disable query limits on the code in question. For
Rails controllers it's best to create a `before_action` hook that runs as early
as possible. The called method in turn should call
`Gitlab::QueryLimiting.disable!('issue URL here')`. For example:
```ruby
class MyController < ApplicationController
before_action :disable_query_limiting, only: [:show]
def index
# ...
end
def show
# ...
end
def disable_query_limiting
Gitlab::QueryLimiting.disable!('https://gitlab.com/gitlab-org/...')
end
end
```
By using a `before_action` you don't have to modify the controller method in
question, reducing the likelihood of merge conflicts.
For Grape API endpoints there unfortunately is not a reliable way of running a
hook before a specific endpoint. This means that you have to add the allowlist
call directly into the endpoint like so:
```ruby
get '/projects/:id/foo' do
Gitlab::QueryLimiting.disable!('...')
# ...
end
```
<!-- This redirect file can be deleted after <2022-11-06>. -->
<!-- Redirects that point to other docs in the same project expire in three months. -->
<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->

View File

@ -94,7 +94,7 @@ add_metric('CountUsersAssociatingMilestonesToReleasesMetric', time_frame: 'all')
```
WARNING:
Counting over non-unique columns can lead to performance issues. For more information, see the [iterating tables in batches](../iterating_tables_in_batches.md) guide.
Counting over non-unique columns can lead to performance issues. For more information, see the [iterating tables in batches](../database/iterating_tables_in_batches.md) guide.
Examples:

View File

@ -59,7 +59,7 @@ Arguments:
- `end`: custom end of the batch counting to avoid complex min calculations
WARNING:
Counting over non-unique columns can lead to performance issues. For more information, see the [iterating tables in batches](../iterating_tables_in_batches.md) guide.
Counting over non-unique columns can lead to performance issues. For more information, see the [iterating tables in batches](../database/iterating_tables_in_batches.md) guide.
Examples:

View File

@ -35,6 +35,10 @@ before we remove them.
|-----------------------|--------------------------|
| GitLab 15.0 or later | OpenSearch 1.x or later |
If your version of Elasticsearch or OpenSearch is incompatible, to prevent data loss, indexing pauses and
a message is logged in the
[`elasticsearch.log`](../../administration/logs/index.md#elasticsearchlog) file.
If you are using a compatible version and after connecting to OpenSearch, you get the message `Elasticsearch version not compatible`, [unpause indexing](#unpause-indexing).
## System requirements

View File

@ -38,7 +38,8 @@ module API
attributes[:maintenance_note] ||= deprecated_note if deprecated_note
attributes[:active] = !attributes.delete(:paused) if attributes.include?(:paused)
@runner = ::Ci::Runners::RegisterRunnerService.new.execute(params[:token], attributes)
result = ::Ci::Runners::RegisterRunnerService.new.execute(params[:token], attributes)
@runner = result.success? ? result.payload[:runner] : nil
forbidden! unless @runner
if @runner.persisted?

View File

@ -3,31 +3,45 @@
# This specific template is located at:
# https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/MATLAB.gitlab-ci.yml
# Use this template to run MATLAB and Simulink as part of your CI/CD pipeline. The template has three jobs:
# Use this template to run MATLAB and Simulink as part of your CI/CD pipeline. The template includes three jobs:
# - `command`: Run MATLAB scripts, functions, and statements.
# - `test`: Run tests authored using the MATLAB unit testing framework or Simulink Test.
# - `test_artifacts`: Run MATLAB and Simulink tests, and generate test and coverage artifacts.
#
# The jobs in the template use the `matlab -batch` syntax to start MATLAB. The `-batch` option is supported
# in MATLAB R2019a and later.
#
# You can copy and paste one or more jobs in this template into your `.gitlab-ci.yml` file.
# You should not add this template to an existing `.gitlab-ci.yml` file by using the `include:` keyword.
#
# - To run MATLAB and Simulink, MATLAB must be installed on the runner that will run the jobs.
# The runner will use the topmost MATLAB version on the system path.
# The build fails if the operating system cannot find MATLAB on the path.
# - The jobs in this template use the `matlab -batch` syntax to start MATLAB. The `-batch` option is supported
# in MATLAB R2019a and later.
# Your runner must use the Docker executor to run MATLAB within a container. The [MATLAB Container on Docker Hub][1]
# lets you run your build using MATLAB R2020b or a later release. If your build requires additional toolboxes, use a
# custom MATLAB container instead. For more information on how to create and use a custom MATLAB container,
# see [Create a Custom MATLAB Container][2].
#
# [1] https://www.mathworks.com/help/cloudcenter/ug/matlab-container-on-docker-hub.html
# [2] https://www.mathworks.com/help/cloudcenter/ug/create-a-custom-matlab-container.html
# The jobs in this template incorporate the contents of a hidden `.matlab_defaults` job. You need to
# configure this job before running the `command`, `test`, and `test_artifacts` jobs. To configure the job:
# - Specify the name of the MATLAB container image you want to use.
# - Set the `MLM_LICENSE_FILE` environment variable using the port number and DNS address for your network license manager.
#
.matlab_defaults:
image:
name: mathworks/matlab:latest # Replace the value with the name of the MATLAB container image you want to use
entrypoint: [""]
variables:
MLM_LICENSE_FILE: 27000@MyLicenseServer # Replace the value with the port number and DNS address for your network license manager
# The `command` job runs MATLAB scripts, functions, and statements. To use the job in your pipeline,
# substitute `mycommand` with the code you want to run.
#
command:
extends: .matlab_defaults
script: matlab -batch mycommand
# If the value of `mycommand` is the name of a MATLAB script or function, do not specify the file extension.
# For example, to run a script named `myscript.m` in the root of your repository, specify `mycommand` like this:
#
# "myscript"
#
# If you specify more than one script, function, or statement, use a comma or semicolon to separate them.
# For example, to run `myscript.m` in a folder named `myfolder` located in the root of the repository,
# you can specify `mycommand` like this:
@ -36,51 +50,51 @@ command:
#
# MATLAB exits with exit code 0 if the specified script, function, or statement executes successfully without
# error. Otherwise, MATLAB terminates with a nonzero exit code, which causes the job to fail. To have the
# job fail in certain conditions, use the [`assert`][1] or [`error`][2] functions.
# job fail in certain conditions, use the [`assert`][3] or [`error`][4] functions.
#
# [1] https://www.mathworks.com/help/matlab/ref/assert.html
# [2] https://www.mathworks.com/help/matlab/ref/error.html
# [3] https://www.mathworks.com/help/matlab/ref/assert.html
# [4] https://www.mathworks.com/help/matlab/ref/error.html
# The `test` job runs the MATLAB and Simulink tests in your project. It calls the [`runtests`][3] function
# to run the tests and then the [`assertSuccess`][4] method to fail the job if any of the tests fail.
# The `test` job runs the MATLAB and Simulink tests in your project. It calls the [`runtests`][5] function
# to run the tests and then the [`assertSuccess`][6] method to fail the job if any of the tests fail.
#
test:
extends: .matlab_defaults
script: matlab -batch "results = runtests('IncludeSubfolders',true), assertSuccess(results);"
# By default, the job includes any files in your [MATLAB Project][5] that have a `Test` label. If your repository
# By default, the job includes any files in your [MATLAB Project][7] that have a `Test` label. If your repository
# does not have a MATLAB project, then the job includes all tests in the root of your repository or in any of
# its subfolders.
#
# [3] https://www.mathworks.com/help/matlab/ref/runtests.html
# [4] https://www.mathworks.com/help/matlab/ref/matlab.unittest.testresult.assertsuccess.html
# [5] https://www.mathworks.com/help/matlab/projects.html
# [5] https://www.mathworks.com/help/matlab/ref/runtests.html
# [6] https://www.mathworks.com/help/matlab/ref/matlab.unittest.testresult.assertsuccess.html
# [7] https://www.mathworks.com/help/matlab/projects.html
# The `test_artifacts` job runs your tests and additionally generates test and coverage artifacts.
# It uses the plugin classes in the [`matlab.unittest.plugins`][6] package to generate a JUnit test results
# It uses the plugin classes in the [`matlab.unittest.plugins`][8] package to generate a JUnit test results
# report and a Cobertura code coverage report. Like the `test` job, this job runs all the tests in your
# project and fails the build if any of the tests fail.
#
test_artifacts:
extends: .matlab_defaults
script: |
matlab -batch "
import matlab.unittest.TestRunner
import matlab.unittest.Verbosity
import matlab.unittest.plugins.CodeCoveragePlugin
import matlab.unittest.plugins.XMLPlugin
import matlab.unittest.plugins.codecoverage.CoberturaFormat
suite = testsuite(pwd,'IncludeSubfolders',true);
[~,~] = mkdir('artifacts');
runner = TestRunner.withTextOutput('OutputDetail',Verbosity.Detailed);
runner.addPlugin(XMLPlugin.producingJUnitFormat('artifacts/results.xml'))
runner.addPlugin(CodeCoveragePlugin.forFolder(pwd,'IncludingSubfolders',true, ...
'Producing',CoberturaFormat('artifacts/cobertura.xml')))
results = runner.run(suite)
assertSuccess(results);"
cat <<- 'BLOCK' > runAllTests.m
import matlab.unittest.TestRunner
import matlab.unittest.Verbosity
import matlab.unittest.plugins.CodeCoveragePlugin
import matlab.unittest.plugins.XMLPlugin
import matlab.unittest.plugins.codecoverage.CoberturaFormat
suite = testsuite(pwd,'IncludeSubfolders',true);
[~,~] = mkdir('artifacts')
runner = TestRunner.withTextOutput('OutputDetail',Verbosity.Detailed);
runner.addPlugin(XMLPlugin.producingJUnitFormat('artifacts/results.xml'))
% Replace `pwd` with the location of the folder containing source code
runner.addPlugin(CodeCoveragePlugin.forFolder(pwd,'IncludingSubfolders',true, ...
'Producing',CoberturaFormat('artifacts/cobertura.xml')))
results = runner.run(suite)
assertSuccess(results);
BLOCK
matlab -batch runAllTests
artifacts:
reports:
junit: "./artifacts/results.xml"
@ -92,7 +106,7 @@ test_artifacts:
# You can modify the contents of the `test_artifacts` job depending on your goals. For more
# information on how to customize the test runner and generate various test and coverage artifacts,
# see [Generate Artifacts Using MATLAB Unit Test Plugins][7].
# see [Generate Artifacts Using MATLAB Unit Test Plugins][9].
#
# [6] https://www.mathworks.com/help/matlab/ref/matlab.unittest.plugins-package.html
# [7] https://www.mathworks.com/help/matlab/matlab_prog/generate-artifacts-using-matlab-unit-test-plugins.html
# [8] https://www.mathworks.com/help/matlab/ref/matlab.unittest.plugins-package.html
# [9] https://www.mathworks.com/help/matlab/matlab_prog/generate-artifacts-using-matlab-unit-test-plugins.html

View File

@ -332,6 +332,7 @@ namespace_package_settings: :gitlab_main
namespace_root_storage_statistics: :gitlab_main
namespace_ci_cd_settings: :gitlab_main
namespace_settings: :gitlab_main
namespace_details: :gitlab_main
namespaces: :gitlab_main
namespaces_sync_events: :gitlab_main
namespace_statistics: :gitlab_main

View File

@ -917,7 +917,7 @@ module Gitlab
def multi_action(
user, branch_name:, message:, actions:,
author_email: nil, author_name: nil,
start_branch_name: nil, start_sha: nil, start_repository: self,
start_branch_name: nil, start_sha: nil, start_repository: nil,
force: false)
wrapped_gitaly_errors do

View File

@ -412,7 +412,7 @@ module Gitlab
response = GitalyClient.call(@repository.storage, :operation_service,
:user_commit_files, req_enum, timeout: GitalyClient.long_timeout,
remote_storage: start_repository.storage)
remote_storage: start_repository&.storage)
if (pre_receive_error = response.pre_receive_error.presence)
raise Gitlab::Git::PreReceiveError, pre_receive_error
@ -535,7 +535,7 @@ module Gitlab
commit_author_name: encode_binary(author_name),
commit_author_email: encode_binary(author_email),
start_branch_name: encode_binary(start_branch_name),
start_repository: start_repository.gitaly_repository,
start_repository: start_repository&.gitaly_repository,
force: force,
start_sha: encode_binary(start_sha),
timestamp: Google::Protobuf::Timestamp.new(seconds: Time.now.utc.to_i)

View File

@ -10337,9 +10337,6 @@ msgstr ""
msgid "Content parsed with %{link}."
msgstr ""
msgid "ContentEditor|Table of Contents"
msgstr ""
msgid "ContentEditor|You have to provide a renderMarkdown function or a custom serializer"
msgstr ""

View File

@ -18,11 +18,11 @@ FactoryBot.define do
after(:build) do |runner, evaluator|
evaluator.projects.each do |proj|
runner.runner_projects << build(:ci_runner_project, project: proj)
runner.runner_projects << build(:ci_runner_project, runner: runner, project: proj)
end
evaluator.groups.each do |group|
runner.runner_namespaces << build(:ci_runner_namespace, namespace: group)
runner.runner_namespaces << build(:ci_runner_namespace, runner: runner, namespace: group)
end
end

View File

@ -37,16 +37,17 @@ describe('content_editor/components/toolbar_more_dropdown', () => {
});
describe.each`
name | contentType | command | params
${'Code block'} | ${'codeBlock'} | ${'setNode'} | ${['codeBlock']}
${'Details block'} | ${'details'} | ${'toggleList'} | ${['details', 'detailsContent']}
${'Bullet list'} | ${'bulletList'} | ${'toggleList'} | ${['bulletList', 'listItem']}
${'Ordered list'} | ${'orderedList'} | ${'toggleList'} | ${['orderedList', 'listItem']}
${'Task list'} | ${'taskList'} | ${'toggleList'} | ${['taskList', 'taskItem']}
${'Mermaid diagram'} | ${'diagram'} | ${'setNode'} | ${['diagram', { language: 'mermaid' }]}
${'PlantUML diagram'} | ${'diagram'} | ${'setNode'} | ${['diagram', { language: 'plantuml' }]}
${'Horizontal rule'} | ${'horizontalRule'} | ${'setHorizontalRule'} | ${[]}
`('when option $label is clicked', ({ name, command, contentType, params }) => {
name | contentType | command | params
${'Code block'} | ${'codeBlock'} | ${'setNode'} | ${['codeBlock']}
${'Details block'} | ${'details'} | ${'toggleList'} | ${['details', 'detailsContent']}
${'Bullet list'} | ${'bulletList'} | ${'toggleList'} | ${['bulletList', 'listItem']}
${'Ordered list'} | ${'orderedList'} | ${'toggleList'} | ${['orderedList', 'listItem']}
${'Task list'} | ${'taskList'} | ${'toggleList'} | ${['taskList', 'taskItem']}
${'Mermaid diagram'} | ${'diagram'} | ${'setNode'} | ${['diagram', { language: 'mermaid' }]}
${'PlantUML diagram'} | ${'diagram'} | ${'setNode'} | ${['diagram', { language: 'plantuml' }]}
${'Table of contents'} | ${'tableOfContents'} | ${'insertTableOfContents'} | ${[]}
${'Horizontal rule'} | ${'horizontalRule'} | ${'setHorizontalRule'} | ${[]}
`('when option $name is clicked', ({ name, command, contentType, params }) => {
let commands;
let btn;

View File

@ -0,0 +1,115 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`content/components/wrappers/table_of_contents collects all headings and renders a nested list of headings 1`] = `
<div
class="table-of-contents gl-border-1 gl-border-solid gl-border-gray-100 gl-mb-5 gl-p-4!"
data-testid="table-of-contents"
>
Table of contents
<li>
<a
href="#"
>
Heading 1
</a>
<ul>
<li>
<a
href="#"
>
Heading 1.1
</a>
<ul>
<li>
<a
href="#"
>
Heading 1.1.1
</a>
<!---->
</li>
</ul>
</li>
<li>
<a
href="#"
>
Heading 1.2
</a>
<ul>
<li>
<a
href="#"
>
Heading 1.2.1
</a>
<!---->
</li>
</ul>
</li>
<li>
<a
href="#"
>
Heading 1.3
</a>
<!---->
</li>
<li>
<a
href="#"
>
Heading 1.4
</a>
<ul>
<li>
<a
href="#"
>
Heading 1.4.1
</a>
<!---->
</li>
</ul>
</li>
</ul>
</li>
<li>
<a
href="#"
>
Heading 2
</a>
<!---->
</li>
</div>
`;

View File

@ -0,0 +1,84 @@
import { nextTick } from 'vue';
import { NodeViewWrapper } from '@tiptap/vue-2';
import { mountExtended } from 'helpers/vue_test_utils_helper';
import { stubComponent } from 'helpers/stub_component';
import eventHubFactory from '~/helpers/event_hub_factory';
import Heading from '~/content_editor/extensions/heading';
import Diagram from '~/content_editor/extensions/diagram';
import TableOfContentsWrapper from '~/content_editor/components/wrappers/table_of_contents.vue';
import { createTestEditor, createDocBuilder, emitEditorEvent } from '../../test_utils';
describe('content/components/wrappers/table_of_contents', () => {
let wrapper;
let tiptapEditor;
let contentEditor;
let eventHub;
const buildEditor = () => {
tiptapEditor = createTestEditor({ extensions: [Heading, Diagram] });
contentEditor = { renderDiagram: jest.fn().mockResolvedValue('url/to/some/diagram') };
eventHub = eventHubFactory();
};
const createWrapper = async () => {
wrapper = mountExtended(TableOfContentsWrapper, {
propsData: {
editor: tiptapEditor,
node: {
attrs: {},
},
},
stubs: {
NodeViewWrapper: stubComponent(NodeViewWrapper),
},
provide: {
contentEditor,
tiptapEditor,
eventHub,
},
});
};
beforeEach(async () => {
buildEditor();
createWrapper();
const {
builders: { heading, doc },
} = createDocBuilder({
tiptapEditor,
names: {
heading: { nodeType: Heading.name },
},
});
const initialDoc = doc(
heading({ level: 1 }, 'Heading 1'),
heading({ level: 2 }, 'Heading 1.1'),
heading({ level: 3 }, 'Heading 1.1.1'),
heading({ level: 2 }, 'Heading 1.2'),
heading({ level: 3 }, 'Heading 1.2.1'),
heading({ level: 2 }, 'Heading 1.3'),
heading({ level: 2 }, 'Heading 1.4'),
heading({ level: 3 }, 'Heading 1.4.1'),
heading({ level: 1 }, 'Heading 2'),
);
tiptapEditor.commands.setContent(initialDoc.toJSON());
await emitEditorEvent({ event: 'update', tiptapEditor });
await nextTick();
});
afterEach(() => {
wrapper.destroy();
});
it('renders a node-view-wrapper as a ul element', () => {
expect(wrapper.findComponent(NodeViewWrapper).props().as).toBe('ul');
});
it('collects all headings and renders a nested list of headings', () => {
expect(wrapper.findComponent(NodeViewWrapper).element).toMatchSnapshot();
});
});

View File

@ -0,0 +1,96 @@
import Heading from '~/content_editor/extensions/heading';
import { toTree, getHeadings } from '~/content_editor/services/table_of_contents_utils';
import { createTestEditor, createDocBuilder } from '../test_utils';
describe('content_editor/services/table_of_content_utils', () => {
describe('toTree', () => {
it('should fills in gaps in heading levels and convert headings to a tree', () => {
expect(
toTree([
{ level: 3, text: '3' },
{ level: 2, text: '2' },
]),
).toEqual([
expect.objectContaining({
level: 1,
text: '',
subHeadings: [
expect.objectContaining({
level: 2,
text: '',
subHeadings: [expect.objectContaining({ level: 3, text: '3', subHeadings: [] })],
}),
expect.objectContaining({ level: 2, text: '2', subHeadings: [] }),
],
}),
]);
});
});
describe('getHeadings', () => {
const tiptapEditor = createTestEditor({
extensions: [Heading],
});
const {
builders: { heading, doc },
} = createDocBuilder({
tiptapEditor,
names: {
heading: { nodeType: Heading.name },
},
});
it('gets all headings as a tree in a tiptap document', () => {
const initialDoc = doc(
heading({ level: 1 }, 'Heading 1'),
heading({ level: 2 }, 'Heading 1.1'),
heading({ level: 3 }, 'Heading 1.1.1'),
heading({ level: 2 }, 'Heading 1.2'),
heading({ level: 3 }, 'Heading 1.2.1'),
heading({ level: 2 }, 'Heading 1.3'),
heading({ level: 2 }, 'Heading 1.4'),
heading({ level: 3 }, 'Heading 1.4.1'),
heading({ level: 1 }, 'Heading 2'),
);
tiptapEditor.commands.setContent(initialDoc.toJSON());
expect(getHeadings(tiptapEditor)).toEqual([
expect.objectContaining({
level: 1,
text: 'Heading 1',
subHeadings: [
expect.objectContaining({
level: 2,
text: 'Heading 1.1',
subHeadings: [
expect.objectContaining({ level: 3, text: 'Heading 1.1.1', subHeadings: [] }),
],
}),
expect.objectContaining({
level: 2,
text: 'Heading 1.2',
subHeadings: [
expect.objectContaining({ level: 3, text: 'Heading 1.2.1', subHeadings: [] }),
],
}),
expect.objectContaining({ level: 2, text: 'Heading 1.3', subHeadings: [] }),
expect.objectContaining({
level: 2,
text: 'Heading 1.4',
subHeadings: [
expect.objectContaining({ level: 3, text: 'Heading 1.4.1', subHeadings: [] }),
],
}),
],
}),
expect.objectContaining({
level: 1,
text: 'Heading 2',
subHeadings: [],
}),
]);
});
});
});

View File

@ -95,4 +95,35 @@ This reference tag is a mix of letters and numbers [^footnote].
expect(wrapper.find('pre').text()).toContain('[gitlab]: https://gitlab.com');
});
});
it('renders table of contents', async () => {
jest.useFakeTimers();
buildWrapper();
renderMarkdown.mockResolvedValue(`
<ul class="section-nav">
</ul>
<h1 dir="auto" data-sourcepos="3:1-3:11">
Heading 1
</h1>
<h2 dir="auto" data-sourcepos="5:1-5:12">
Heading 2
</h2>
`);
await contentEditorService.setSerializedContent(`
[TOC]
# Heading 1
## Heading 2
`);
await nextTick();
jest.runAllTimers();
expect(wrapper.findByTestId('table-of-contents').text()).toContain('Heading 1');
expect(wrapper.findByTestId('table-of-contents').text()).toContain('Heading 2');
});
});

View File

@ -0,0 +1,76 @@
# frozen_string_literal: true
require 'spec_helper'
require_migration!
RSpec.describe CreateSyncNamespaceDetailsTrigger do
let(:migration) { described_class.new }
let(:namespaces) { table(:namespaces) }
let(:namespace_details) { table(:namespace_details) }
let!(:timestamp) { Time.new(2020, 01, 01).utc }
let(:synced_attributes) do
{
description: 'description',
description_html: '<p>description</p>',
cached_markdown_version: 1966080,
created_at: timestamp,
updated_at: timestamp
}
end
let(:other_attributes) do
{
name: 'name',
path: 'path'
}
end
let(:attributes) { other_attributes.merge(synced_attributes) }
describe '#up' do
before do
migrate!
end
describe 'INSERT trigger' do
it 'creates a namespace_detail record' do
expect do
namespaces.create!(attributes)
end.to change(namespace_details, :count).by(1)
end
it 'the created namespace_details record has matching attributes' do
namespaces.create!(attributes)
synced_namespace_details = namespace_details.last
expect(synced_namespace_details).to have_attributes(synced_attributes)
end
end
describe 'UPDATE trigger' do
let!(:namespace) { namespaces.create!(attributes) }
it 'updates the attribute in the synced namespace_details record' do
namespace.update!(description: 'new_description')
synced_namespace_details = namespace_details.last
expect(synced_namespace_details.description).to eq('new_description')
end
end
end
describe '#down' do
before do
migration.up
migration.down
end
it 'drops the trigger' do
expect do
namespaces.create!(attributes)
end.not_to change(namespace_details, :count)
end
end
end

View File

@ -0,0 +1,73 @@
# frozen_string_literal: true
require 'spec_helper'
require_migration!
RSpec.describe CreateSyncProjectNamespaceDetailsTrigger do
let(:migration) { described_class.new }
let(:projects) { table(:projects) }
let(:namespaces) { table(:namespaces) }
let(:namespace_details) { table(:namespace_details) }
let!(:timestamp) { Time.new(2020, 01, 01).utc }
let!(:project_namespace) { namespaces.create!(name: 'name', path: 'path') }
let!(:namespace) { namespaces.create!(name: 'group', path: 'group_path') }
let(:synced_attributes) do
{
description: 'description',
description_html: '<p>description</p>',
cached_markdown_version: 1966080,
updated_at: timestamp
}
end
let(:other_attributes) do
{
name: 'project_name',
project_namespace_id: project_namespace.id,
namespace_id: namespace.id
}
end
let(:attributes) { other_attributes.merge(synced_attributes) }
describe '#up' do
before do
migrate!
end
describe 'INSERT trigger' do
it 'the created namespace_details record has matching attributes' do
project = projects.create!(attributes)
synced_namespace_details = namespace_details.find_by(namespace_id: project.project_namespace_id)
expect(synced_namespace_details).to have_attributes(synced_attributes)
end
end
describe 'UPDATE trigger' do
let!(:project) { projects.create!(attributes) }
it 'updates the attribute in the synced namespace_details record' do
project.update!(description: 'new_description')
synced_namespace_details = namespace_details.find_by(namespace_id: project.project_namespace_id)
expect(synced_namespace_details.description).to eq('new_description')
end
end
end
describe '#down' do
before do
migration.up
migration.down
end
it 'drops the trigger' do
expect do
projects.create!(attributes)
end.not_to change(namespace_details, :count)
end
end
end

View File

@ -0,0 +1,40 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Namespace::Detail, type: :model do
describe 'associations' do
it { is_expected.to belong_to :namespace }
end
describe 'validations' do
it { is_expected.to validate_presence_of(:namespace) }
end
context 'when namespace description changes' do
let(:namespace) { create(:namespace, description: "old") }
it 'changes namespace details description' do
expect { namespace.update!(description: "new") }
.to change { namespace.namespace_details.description }.from("old").to("new")
end
end
context 'when project description changes' do
let(:project) { create(:project, description: "old") }
it 'changes project namespace details description' do
expect { project.update!(description: "new") }
.to change { project.project_namespace.namespace_details.description }.from("old").to("new")
end
end
context 'when group description changes' do
let(:group) { create(:group, description: "old") }
it 'changes group namespace details description' do
expect { group.update!(description: "new") }
.to change { group.namespace_details.description }.from("old").to("new")
end
end
end

View File

@ -22,6 +22,7 @@ RSpec.describe Namespace do
it { is_expected.to have_one :root_storage_statistics }
it { is_expected.to have_one :aggregation_schedule }
it { is_expected.to have_one :namespace_settings }
it { is_expected.to have_one :namespace_details }
it { is_expected.to have_one(:namespace_statistics) }
it { is_expected.to have_many :custom_emoji }
it { is_expected.to have_one :package_setting_relation }

View File

@ -1469,6 +1469,20 @@ RSpec.describe Repository do
expect(repository.find_branch(branch_name)).to be_nil
end
end
it 'expires branches cache' do
expect(repository).to receive(:expire_branches_cache)
subject
end
context 'when expire_cache: false' do
it 'does not expire branches cache' do
expect(repository).not_to receive(:expire_branches_cache)
repository.add_branch(user, branch_name, target, expire_cache: false)
end
end
end
shared_examples 'asymmetric cached method' do |method|

View File

@ -16,7 +16,8 @@ RSpec.describe API::Ci::Runner, :clean_gitlab_redis_shared_state do
context 'when invalid token is provided' do
it 'returns 403 error' do
allow_next_instance_of(::Ci::Runners::RegisterRunnerService) do |service|
allow(service).to receive(:execute).and_return(nil)
allow(service).to receive(:execute)
.and_return(ServiceResponse.error(message: 'invalid token supplied', http_status: :forbidden))
end
post api('/runners'), params: { token: 'invalid' }
@ -58,7 +59,7 @@ RSpec.describe API::Ci::Runner, :clean_gitlab_redis_shared_state do
expect(service).to receive(:execute)
.once
.with('valid token', a_hash_including(expected_params))
.and_return(new_runner)
.and_return(ServiceResponse.success(payload: { runner: new_runner }))
end
end
@ -113,7 +114,7 @@ RSpec.describe API::Ci::Runner, :clean_gitlab_redis_shared_state do
.once
.with('valid token', a_hash_including('maintenance_note' => 'Some maintainer notes')
.and(excluding('maintainter_note' => anything)))
.and_return(new_runner)
.and_return(ServiceResponse.success(payload: { runner: new_runner }))
end
request
@ -139,7 +140,7 @@ RSpec.describe API::Ci::Runner, :clean_gitlab_redis_shared_state do
expect(service).to receive(:execute)
.once
.with('valid token', a_hash_including(expected_params))
.and_return(new_runner)
.and_return(ServiceResponse.success(payload: { runner: new_runner }))
end
request

View File

@ -47,7 +47,7 @@ RSpec.describe API::ProjectImport, :aggregate_failures do
it 'executes a limited number of queries' do
control_count = ActiveRecord::QueryRecorder.new { subject }.count
expect(control_count).to be <= 108
expect(control_count).to be <= 109
end
it 'schedules an import using a namespace' do

View File

@ -93,7 +93,12 @@ RSpec.describe Branches::CreateService, :use_clean_rails_redis_caching do
let(:branches) { { 'master' => 'master', '' => 'master', 'failed_branch' => 'master' } }
it 'returns all errors' do
allow(project.repository).to receive(:add_branch).with(user, 'failed_branch', 'master').and_return(false)
allow(project.repository).to receive(:add_branch).with(
user,
'failed_branch',
'master',
expire_cache: false
).and_return(false)
expect(subject[:status]).to eq(:error)
expect(subject[:message]).to match_array(
@ -117,6 +122,26 @@ RSpec.describe Branches::CreateService, :use_clean_rails_redis_caching do
expect(control.by_command(:sadd).count).to eq(1)
end
end
context 'without N+1 branch cache expiration' do
let(:branches) { { 'branch_1' => 'master', 'branch_2' => 'master', 'branch_3' => 'master' } }
it 'triggers branch cache expiration only once' do
expect(project.repository).to receive(:expire_branches_cache).once
subject
end
context 'when branches were not added' do
let(:branches) { { 'master' => 'master' } }
it 'does not trigger branch expiration' do
expect(project.repository).not_to receive(:expire_branches_cache)
subject
end
end
end
end
describe '#execute' do

View File

@ -6,6 +6,7 @@ RSpec.describe ::Ci::Runners::RegisterRunnerService, '#execute' do
let(:registration_token) { 'abcdefg123456' }
let(:token) {}
let(:args) { {} }
let(:runner) { execute.payload[:runner] }
before do
stub_feature_flags(runner_registration_control: false)
@ -13,21 +14,25 @@ RSpec.describe ::Ci::Runners::RegisterRunnerService, '#execute' do
stub_application_setting(valid_runner_registrars: ApplicationSetting::VALID_RUNNER_REGISTRAR_TYPES)
end
subject(:runner) { described_class.new.execute(token, args) }
subject(:execute) { described_class.new.execute(token, args) }
context 'when no token is provided' do
let(:token) { '' }
it 'returns nil' do
is_expected.to be_nil
it 'returns error response' do
expect(execute).to be_error
expect(execute.message).to eq 'invalid token supplied'
expect(execute.http_status).to eq :forbidden
end
end
context 'when invalid token is provided' do
let(:token) { 'invalid' }
it 'returns nil' do
is_expected.to be_nil
it 'returns error response' do
expect(execute).to be_error
expect(execute.message).to eq 'invalid token supplied'
expect(execute.http_status).to eq :forbidden
end
end
@ -36,12 +41,14 @@ RSpec.describe ::Ci::Runners::RegisterRunnerService, '#execute' do
let(:token) { registration_token }
it 'creates runner with default values' do
is_expected.to be_an_instance_of(::Ci::Runner)
expect(subject.persisted?).to be_truthy
expect(subject.run_untagged).to be true
expect(subject.active).to be true
expect(subject.token).not_to eq(registration_token)
expect(subject).to be_instance_type
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.persisted?).to be_truthy
expect(runner.run_untagged).to be true
expect(runner.active).to be true
expect(runner.token).not_to eq(registration_token)
expect(runner).to be_instance_type
end
context 'with non-default arguments' do
@ -67,25 +74,27 @@ RSpec.describe ::Ci::Runners::RegisterRunnerService, '#execute' do
end
it 'creates runner with specified values', :aggregate_failures do
is_expected.to be_an_instance_of(::Ci::Runner)
expect(subject.active).to eq args[:active]
expect(subject.locked).to eq args[:locked]
expect(subject.run_untagged).to eq args[:run_untagged]
expect(subject.tags).to contain_exactly(
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.active).to eq args[:active]
expect(runner.locked).to eq args[:locked]
expect(runner.run_untagged).to eq args[:run_untagged]
expect(runner.tags).to contain_exactly(
an_object_having_attributes(name: 'tag1'),
an_object_having_attributes(name: 'tag2')
)
expect(subject.access_level).to eq args[:access_level]
expect(subject.maximum_timeout).to eq args[:maximum_timeout]
expect(subject.name).to eq args[:name]
expect(subject.version).to eq args[:version]
expect(subject.revision).to eq args[:revision]
expect(subject.platform).to eq args[:platform]
expect(subject.architecture).to eq args[:architecture]
expect(subject.ip_address).to eq args[:ip_address]
expect(runner.access_level).to eq args[:access_level]
expect(runner.maximum_timeout).to eq args[:maximum_timeout]
expect(runner.name).to eq args[:name]
expect(runner.version).to eq args[:version]
expect(runner.revision).to eq args[:revision]
expect(runner.platform).to eq args[:platform]
expect(runner.architecture).to eq args[:architecture]
expect(runner.ip_address).to eq args[:ip_address]
expect(Ci::Runner.tagged_with('tag1')).to include(subject)
expect(Ci::Runner.tagged_with('tag2')).to include(subject)
expect(Ci::Runner.tagged_with('tag1')).to include(runner)
expect(Ci::Runner.tagged_with('tag2')).to include(runner)
end
end
@ -95,8 +104,10 @@ RSpec.describe ::Ci::Runners::RegisterRunnerService, '#execute' do
end
it 'creates runner with token expiration' do
is_expected.to be_an_instance_of(::Ci::Runner)
expect(subject.token_expires_at).to eq(5.days.from_now)
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.token_expires_at).to eq(5.days.from_now)
end
end
end
@ -106,12 +117,14 @@ RSpec.describe ::Ci::Runners::RegisterRunnerService, '#execute' do
let(:token) { project.runners_token }
it 'creates project runner' do
is_expected.to be_an_instance_of(::Ci::Runner)
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(project.runners.size).to eq(1)
is_expected.to eq(project.runners.first)
expect(subject.token).not_to eq(registration_token)
expect(subject.token).not_to eq(project.runners_token)
expect(subject).to be_project_type
expect(runner).to eq(project.runners.first)
expect(runner.token).not_to eq(registration_token)
expect(runner.token).not_to eq(project.runners_token)
expect(runner).to be_project_type
end
context 'when it exceeds the application limits' do
@ -121,9 +134,13 @@ RSpec.describe ::Ci::Runners::RegisterRunnerService, '#execute' do
end
it 'does not create runner' do
is_expected.to be_an_instance_of(::Ci::Runner)
expect(subject.persisted?).to be_falsey
expect(subject.errors.messages).to eq('runner_projects.base': ['Maximum number of ci registered project runners (1) exceeded'])
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.persisted?).to be_falsey
expect(runner.errors.messages).to eq(
'runner_projects.base': ['Maximum number of ci registered project runners (1) exceeded']
)
expect(project.runners.reload.size).to eq(1)
end
end
@ -135,8 +152,10 @@ RSpec.describe ::Ci::Runners::RegisterRunnerService, '#execute' do
end
it 'creates runner' do
is_expected.to be_an_instance_of(::Ci::Runner)
expect(subject.errors).to be_empty
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.errors).to be_empty
expect(project.runners.reload.size).to eq(2)
expect(project.runners.recent.size).to eq(1)
end
@ -153,15 +172,18 @@ RSpec.describe ::Ci::Runners::RegisterRunnerService, '#execute' do
end
it 'returns 403 error' do
is_expected.to be_nil
expect(execute).to be_error
expect(execute.http_status).to eq :forbidden
end
end
context 'when feature flag is disabled' do
it 'registers the runner' do
is_expected.to be_an_instance_of(::Ci::Runner)
expect(subject.errors).to be_empty
expect(subject.active).to be true
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.errors).to be_empty
expect(runner.active).to be true
end
end
end
@ -172,12 +194,14 @@ RSpec.describe ::Ci::Runners::RegisterRunnerService, '#execute' do
let(:token) { group.runners_token }
it 'creates a group runner' do
is_expected.to be_an_instance_of(::Ci::Runner)
expect(subject.errors).to be_empty
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.errors).to be_empty
expect(group.runners.reload.size).to eq(1)
expect(subject.token).not_to eq(registration_token)
expect(subject.token).not_to eq(group.runners_token)
expect(subject).to be_group_type
expect(runner.token).not_to eq(registration_token)
expect(runner.token).not_to eq(group.runners_token)
expect(runner).to be_group_type
end
context 'when it exceeds the application limits' do
@ -187,9 +211,13 @@ RSpec.describe ::Ci::Runners::RegisterRunnerService, '#execute' do
end
it 'does not create runner' do
is_expected.to be_an_instance_of(::Ci::Runner)
expect(subject.persisted?).to be_falsey
expect(subject.errors.messages).to eq('runner_namespaces.base': ['Maximum number of ci registered group runners (1) exceeded'])
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.persisted?).to be_falsey
expect(runner.errors.messages).to eq(
'runner_namespaces.base': ['Maximum number of ci registered group runners (1) exceeded']
)
expect(group.runners.reload.size).to eq(1)
end
end
@ -202,8 +230,10 @@ RSpec.describe ::Ci::Runners::RegisterRunnerService, '#execute' do
end
it 'creates runner' do
is_expected.to be_an_instance_of(::Ci::Runner)
expect(subject.errors).to be_empty
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.errors).to be_empty
expect(group.runners.reload.size).to eq(3)
expect(group.runners.recent.size).to eq(1)
end
@ -219,16 +249,18 @@ RSpec.describe ::Ci::Runners::RegisterRunnerService, '#execute' do
stub_feature_flags(runner_registration_control: true)
end
it 'returns nil' do
is_expected.to be_nil
it 'returns error response' do
is_expected.to be_error
end
end
context 'when feature flag is disabled' do
it 'registers the runner' do
is_expected.to be_an_instance_of(::Ci::Runner)
expect(subject.errors).to be_empty
expect(subject.active).to be true
expect(execute).to be_success
expect(runner).to be_an_instance_of(::Ci::Runner)
expect(runner.errors).to be_empty
expect(runner.active).to be true
end
end
end

View File

@ -15,7 +15,7 @@ RSpec.describe ::Ci::Runners::ResetRegistrationTokenService, '#execute' do
it 'does not reset registration token and returns error response' do
expect(scope).not_to receive(token_reset_method_name)
is_expected.to be_error
expect(execute).to be_error
end
end
@ -25,7 +25,7 @@ RSpec.describe ::Ci::Runners::ResetRegistrationTokenService, '#execute' do
it 'does not reset registration token and returns error response' do
expect(scope).not_to receive(token_reset_method_name)
is_expected.to be_error
expect(execute).to be_error
end
end
@ -37,7 +37,7 @@ RSpec.describe ::Ci::Runners::ResetRegistrationTokenService, '#execute' do
expect(scope).to receive(token_method_name).once.and_return("#{token_method_name} return value")
end
is_expected.to be_success
expect(execute).to be_success
expect(execute.payload[:new_registration_token]).to eq("#{token_method_name} return value")
end
end

View File

@ -3,21 +3,21 @@
require 'spec_helper'
RSpec.describe ::Ci::Runners::UnassignRunnerService, '#execute' do
subject(:service) { described_class.new(runner_project, user).execute }
let_it_be(:runner) { create(:ci_runner, :project, projects: [project]) }
let_it_be(:project) { create(:project) }
let_it_be(:runner) { create(:ci_runner, :project, projects: [project]) }
let(:runner_project) { runner.runner_projects.last }
subject(:execute) { described_class.new(runner_project, user).execute }
context 'without user' do
let(:user) { nil }
it 'does not destroy runner_project', :aggregate_failures do
expect(runner_project).not_to receive(:destroy)
expect { service }.not_to change { runner.runner_projects.count }.from(1)
expect { execute }.not_to change { runner.runner_projects.count }.from(1)
is_expected.to eq(false)
is_expected.to be_error
end
end
@ -27,17 +27,27 @@ RSpec.describe ::Ci::Runners::UnassignRunnerService, '#execute' do
it 'does not call destroy on runner_project' do
expect(runner).not_to receive(:destroy)
service
is_expected.to be_error
end
end
context 'with admin user', :enable_admin_mode do
let(:user) { create_default(:user, :admin) }
it 'destroys runner_project' do
expect(runner_project).to receive(:destroy).once
context 'with destroy returning false' do
it 'returns error response' do
expect(runner_project).to receive(:destroy).once.and_return(false)
service
is_expected.to be_error
end
end
context 'with destroy returning true' do
it 'returns success response' do
expect(runner_project).to receive(:destroy).once.and_return(true)
is_expected.to be_success
end
end
end
end

View File

@ -176,6 +176,15 @@ RSpec.describe Groups::CreateService, '#execute' do
end
end
describe 'creating a details record' do
let(:service) { described_class.new(user, group_params) }
it 'create the details record connected to the group' do
group = subject
expect(group.namespace_details).to be_persisted
end
end
describe 'create service for the group' do
let(:service) { described_class.new(user, group_params) }
let(:created_group) { service.execute }

View File

@ -13,9 +13,8 @@ RSpec.shared_examples 'edits content using the content editor' do
expect(page).to have_css('[data-testid="formatting-bubble-menu"]')
end
it 'does not show a formatting bubble menu for code' do
find(content_editor_testid).send_keys 'This is a `code`'
find(content_editor_testid).send_keys [:shift, :left]
it 'does not show a formatting bubble menu for code blocks' do
find(content_editor_testid).send_keys '```js '
expect(page).not_to have_css('[data-testid="formatting-bubble-menu"]')
end

View File

@ -13,6 +13,8 @@ workflow:
- gem install bundler --no-document # Bundler is not installed with the image
- bundle config set --local path 'vendor' # Install dependencies into ./vendor/ruby
- bundle config set with 'development'
- bundle config set --local frozen 'true' # Disallow Gemfile.lock changes on CI
- bundle config # Show bundler configuration
- bundle install -j $(nproc)
script:
- bundle exec rspec

View File

@ -19,6 +19,8 @@ workflow:
- gem install bundler --no-document # Bundler is not installed with the image
- bundle config set --local path 'vendor' # Install dependencies into ./vendor/ruby
- bundle config set with 'development'
- bundle config set --local frozen 'true' # Disallow Gemfile.lock changes on CI
- bundle config # Show bundler configuration
- bundle install -j $(nproc)
script:
- bundle exec rspec

View File

@ -13,6 +13,8 @@ workflow:
- gem install bundler --no-document # Bundler is not installed with the image
- bundle config set --local path 'vendor' # Install dependencies into ./vendor/ruby
- bundle config set with 'development'
- bundle config set --local frozen 'true' # Disallow Gemfile.lock changes on CI
- bundle config # Show bundler configuration
- bundle install -j $(nproc)
script:
- bundle exec rspec

View File

@ -13,6 +13,8 @@ workflow:
- gem install bundler --no-document # Bundler is not installed with the image
- bundle config set --local path 'vendor' # Install dependencies into ./vendor/ruby
- bundle config set with 'development'
- bundle config set --local frozen 'true' # Disallow Gemfile.lock changes on CI
- bundle config # Show bundler configuration
- bundle install -j $(nproc)
script:
- bundle exec rspec

View File

@ -13,6 +13,8 @@ workflow:
- gem install bundler --no-document # Bundler is not installed with the image
- bundle config set --local path 'vendor' # Install dependencies into ./vendor/ruby
- bundle config set with 'development'
- bundle config set --local frozen 'true' # Disallow Gemfile.lock changes on CI
- bundle config # Show bundler configuration
- bundle install -j $(nproc)
script:
- bundle exec rspec