Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-06-21 00:08:43 +00:00
parent 991c66333d
commit 92ea86691a
78 changed files with 2730 additions and 485 deletions

View File

@ -323,7 +323,7 @@ gem 'thrift', '>= 0.14.0'
# I18n
gem 'ruby_parser', '~> 3.15', require: false
gem 'rails-i18n', '~> 6.0'
gem 'rails-i18n', '~> 7.0'
gem 'gettext_i18n_rails', '~> 1.8.0'
gem 'gettext_i18n_rails_js', '~> 1.3'
gem 'gettext', '~> 3.3', require: false, group: :development
@ -344,7 +344,7 @@ gem 'prometheus-client-mmap', '~> 0.15.0', require: 'prometheus/client'
gem 'warning', '~> 1.2.0'
group :development do
gem 'lefthook', '~> 1.0.0', require: false
gem 'lefthook', '~> 1.0.1', require: false
gem 'rubocop'
gem 'solargraph', '~> 0.44.3', require: false

View File

@ -724,7 +724,7 @@ GEM
rest-client (~> 2.0)
launchy (2.5.0)
addressable (~> 2.7)
lefthook (1.0.0)
lefthook (1.0.1)
letter_opener (1.7.0)
launchy (~> 2.2)
letter_opener_web (2.0.0)
@ -1030,9 +1030,9 @@ GEM
nokogiri (>= 1.6)
rails-html-sanitizer (1.4.2)
loofah (~> 2.3)
rails-i18n (6.0.0)
rails-i18n (7.0.3)
i18n (>= 0.7, < 2)
railties (>= 6.0.0, < 7)
railties (>= 6.0.0, < 8)
railties (6.1.4.7)
actionpack (= 6.1.4.7)
activesupport (= 6.1.4.7)
@ -1586,7 +1586,7 @@ DEPENDENCIES
knapsack (~> 1.21.1)
kramdown (~> 2.3.1)
kubeclient (~> 4.9.2)
lefthook (~> 1.0.0)
lefthook (~> 1.0.1)
letter_opener_web (~> 2.0.0)
licensee (~> 9.14.1)
lockbox (~> 0.6.2)
@ -1650,7 +1650,7 @@ DEPENDENCIES
rack-timeout (~> 0.6.0)
rails (~> 6.1.4.7)
rails-controller-testing
rails-i18n (~> 6.0)
rails-i18n (~> 7.0)
rainbow (~> 3.0)
rbtrace (~> 0.4)
rdoc (~> 6.3.2)

View File

@ -0,0 +1,56 @@
<script>
import { GlButton, GlIcon } from '@gitlab/ui';
import { SEVERITY_CLASSES, SEVERITY_ICONS } from '~/reports/codequality_report/constants';
export default {
components: { GlButton, GlIcon },
props: {
line: {
type: Number,
required: true,
},
codeQuality: {
type: Array,
required: true,
},
},
methods: {
severityClass(severity) {
return SEVERITY_CLASSES[severity] || SEVERITY_CLASSES.unknown;
},
severityIcon(severity) {
return SEVERITY_ICONS[severity] || SEVERITY_ICONS.unknown;
},
},
};
</script>
<template>
<div data-testid="diff-codequality" class="gl-relative">
<ul
class="gl-list-style-none gl-mb-0 gl-p-0 codequality-findings-list gl-border-top-1 gl-border-bottom-1 gl-bg-gray-10"
>
<li
v-for="finding in codeQuality"
:key="finding.description"
class="gl-pt-1 gl-pb-1 gl-pl-3 gl-border-solid gl-border-bottom-0 gl-border-right-0 gl-border-1 gl-border-gray-100"
>
<gl-icon
:size="12"
:name="severityIcon(finding.severity)"
:class="severityClass(finding.severity)"
class="codequality-severity-icon"
/>
{{ finding.description }}
</li>
</ul>
<gl-button
data-testid="diff-codequality-close"
category="tertiary"
size="small"
icon="close"
class="gl-absolute gl-right-2 gl-top-2"
@click="$emit('hideCodeQualityFindings', line)"
/>
</div>
</template>

View File

@ -274,6 +274,9 @@ export default {
v-if="$options.showCodequalityLeft(props)"
:codequality="props.line.left.codequality"
:file-path="props.filePath"
@showCodeQualityFindings="
listeners.toggleCodeQualityFindings(props.line.left.codequality[0].line)
"
/>
</div>
<div
@ -395,6 +398,9 @@ export default {
:codequality="props.line.right.codequality"
:file-path="props.filePath"
data-testid="codeQualityIcon"
@showCodeQualityFindings="
listeners.toggleCodeQualityFindings(props.line.right.codequality[0].line)
"
/>
</div>
<div

View File

@ -2,12 +2,14 @@
import { GlSafeHtmlDirective as SafeHtml } from '@gitlab/ui';
import { mapGetters, mapState, mapActions } from 'vuex';
import { IdState } from 'vendor/vue-virtual-scroller';
import glFeatureFlagsMixin from '~/vue_shared/mixins/gl_feature_flags_mixin';
import DraftNote from '~/batch_comments/components/draft_note.vue';
import draftCommentsMixin from '~/diffs/mixins/draft_comments';
import { getCommentedLines } from '~/notes/components/multiline_comment_utils';
import { hide } from '~/tooltips';
import { pickDirection } from '../utils/diff_line';
import DiffCommentCell from './diff_comment_cell.vue';
import DiffCodeQuality from './diff_code_quality.vue';
import DiffExpansionCell from './diff_expansion_cell.vue';
import DiffRow from './diff_row.vue';
import { isHighlighted } from './diff_row_utils';
@ -17,12 +19,17 @@ export default {
DiffExpansionCell,
DiffRow,
DiffCommentCell,
DiffCodeQuality,
DraftNote,
},
directives: {
SafeHtml,
},
mixins: [draftCommentsMixin, IdState({ idProp: (vm) => vm.diffFile.file_hash })],
mixins: [
draftCommentsMixin,
IdState({ idProp: (vm) => vm.diffFile.file_hash }),
glFeatureFlagsMixin(),
],
props: {
diffFile: {
type: Object,
@ -43,6 +50,11 @@ export default {
default: false,
},
},
data() {
return {
codeQualityExpandedLines: [],
};
},
idState() {
return {
dragStart: null,
@ -84,6 +96,23 @@ export default {
}
this.idState.dragStart = line;
},
parseCodeQuality(line) {
return line.left?.codequality ?? line.right.codequality;
},
hideCodeQualityFindings(line) {
const index = this.codeQualityExpandedLines.indexOf(line);
if (index > -1) {
this.codeQualityExpandedLines.splice(index, 1);
}
},
toggleCodeQualityFindings(line) {
if (!this.codeQualityExpandedLines.includes(line)) {
this.codeQualityExpandedLines.push(line);
} else {
this.hideCodeQualityFindings(line);
}
},
onDragOver(line) {
if (line.chunk !== this.idState.dragStart.chunk) return;
@ -125,15 +154,16 @@ export default {
},
handleParallelLineMouseDown(e) {
const line = e.target.closest('.diff-td');
const table = line.closest('.diff-table');
if (line) {
const table = line.closest('.diff-table');
table.classList.remove('left-side-selected', 'right-side-selected');
const [lineClass] = ['left-side', 'right-side'].filter((name) =>
line.classList.contains(name),
);
table.classList.remove('left-side-selected', 'right-side-selected');
const [lineClass] = ['left-side', 'right-side'].filter((name) =>
line.classList.contains(name),
);
if (lineClass) {
table.classList.add(`${lineClass}-selected`);
if (lineClass) {
table.classList.add(`${lineClass}-selected`);
}
}
},
getCountBetweenIndex(index) {
@ -148,6 +178,9 @@ export default {
Number(this.diffLines[index - 1].left.new_line)
);
},
getCodeQualityLine(line) {
return this.parseCodeQuality(line)?.[0]?.line;
},
},
userColorScheme: window.gon.user_color_scheme,
};
@ -190,6 +223,7 @@ export default {
:coverage-loaded="coverageLoaded"
@showCommentForm="(code) => singleLineComment(code, line)"
@setHighlightedRow="setHighlightedRow"
@toggleCodeQualityFindings="toggleCodeQualityFindings"
@toggleLineDiscussions="
({ lineCode, expanded }) =>
toggleLineDiscussions({ lineCode, fileHash: diffFile.file_hash, expanded })
@ -198,6 +232,17 @@ export default {
@startdragging="onStartDragging"
@stopdragging="onStopDragging"
/>
<diff-code-quality
v-if="
glFeatures.refactorCodeQualityInlineFindings &&
codeQualityExpandedLines.includes(getCodeQualityLine(line))
"
:key="line.line_code"
:line="getCodeQualityLine(line)"
:code-quality="parseCodeQuality(line)"
@hideCodeQualityFindings="hideCodeQualityFindings"
/>
<div
v-if="line.renderCommentRow"
:key="`dcr-${line.line_code || index}`"

View File

@ -3,10 +3,12 @@ import { isNode, isDocument, isSeq, visit } from 'yaml';
import { capitalize } from 'lodash';
import TextWidget from '~/pipeline_wizard/components/widgets/text.vue';
import ListWidget from '~/pipeline_wizard/components/widgets/list.vue';
import ChecklistWidget from '~/pipeline_wizard/components/widgets/checklist.vue';
const widgets = {
TextWidget,
ListWidget,
ChecklistWidget,
};
function isNullOrUndefined(v) {
@ -30,8 +32,9 @@ export default {
},
target: {
type: String,
required: true,
required: false,
validator: (v) => /^\$.*/g.test(v),
default: null,
},
widget: {
type: String,
@ -48,6 +51,7 @@ export default {
},
computed: {
path() {
if (!this.target) return null;
let res;
visit(this.template, (seqKey, node, path) => {
if (node && node.value === this.target) {

View File

@ -31,10 +31,7 @@ export default {
inputs: {
type: Array,
required: true,
validator: (value) =>
value.every((i) => {
return i?.target && i?.widget;
}),
validator: (value) => value.every((i) => i?.widget),
},
template: {
type: null,
@ -131,7 +128,7 @@ export default {
:template="template"
:validate="validate"
:widget="input.widget"
class="gl-mb-2"
class="gl-mb-8"
v-bind="input"
@highlight="onHighlight"
@update:valid="(validationState) => onInputValidationStateChange(i, validationState)"

View File

@ -0,0 +1,80 @@
<script>
import { GlFormGroup, GlFormCheckbox, GlFormCheckboxGroup } from '@gitlab/ui';
import { uniqueId } from 'lodash';
const isValidItemDefinition = (value) => {
// The Item definition should either be a simple string
// or an object with at least a "title" property
return typeof value === 'string' || Boolean(value.text);
};
export default {
name: 'ChecklistWidget',
components: {
GlFormGroup,
GlFormCheckbox,
GlFormCheckboxGroup,
},
props: {
title: {
type: String,
required: false,
default: null,
},
items: {
type: Array,
required: false,
validator: (v) => v.every(isValidItemDefinition),
default: () => [],
},
validate: {
type: Boolean,
required: false,
default: false,
},
},
computed: {
checklistItems() {
return this.items.map((rawItem) => {
const id = rawItem.id || uniqueId();
return {
id,
text: rawItem.text || rawItem,
help: rawItem.help || null,
};
});
},
},
created() {
if (this.items.length > 0) {
this.$emit('update:valid', false);
}
},
methods: {
updateValidState(values) {
this.$emit(
'update:valid',
this.checklistItems.every((item) => values.includes(item.id)),
);
},
},
};
</script>
<template>
<gl-form-group #default="{ ariaDescribedby }" :label="title">
<gl-form-checkbox-group :aria-describedby="ariaDescribedby" @input="updateValidState">
<gl-form-checkbox
v-for="item in checklistItems"
:id="item.id"
:key="item.id"
:value="item.id"
>
{{ item.text }}
<template v-if="item.help" #help>
{{ item.help }}
</template>
</gl-form-checkbox>
</gl-form-checkbox-group>
</gl-form-group>
</template>

View File

@ -95,8 +95,14 @@
}
}
.commits-row + .commits-row {
border-top: 1px solid $white-normal;
.commits-row {
+ .commits-row {
border-top: 1px solid $white-normal;
}
+ .commits-empty {
display: none;
}
}
.text-expander {

View File

@ -4,7 +4,7 @@ class Projects::GoogleCloud::DeploymentsController < Projects::GoogleCloud::Base
before_action :validate_gcp_token!
def cloud_run
params = { token_in_session: token_in_session }
params = { google_oauth2_token: token_in_session }
enable_cloud_run_response = GoogleCloud::EnableCloudRunService
.new(project, current_user, params).execute

View File

@ -44,6 +44,7 @@ class Projects::MergeRequestsController < Projects::MergeRequests::ApplicationCo
push_frontend_feature_flag(:issue_assignees_widget, @project)
push_frontend_feature_flag(:realtime_labels, project)
push_frontend_feature_flag(:refactor_security_extension, @project)
push_frontend_feature_flag(:refactor_code_quality_inline_findings, project)
push_frontend_feature_flag(:mr_attention_requests, current_user)
push_frontend_feature_flag(:moved_mr_sidebar, project)
push_frontend_feature_flag(:paginated_mr_discussions, project)

View File

@ -11,6 +11,7 @@ module Projects
before_action :integration, only: [:edit, :update, :test]
before_action :default_integration, only: [:edit, :update]
before_action :web_hook_logs, only: [:edit, :update]
before_action -> { check_rate_limit!(:project_testing_integration, scope: [@project, current_user]) }, only: :test
respond_to :html

View File

@ -50,14 +50,6 @@ class StageEntity < Grape::Entity
stage.detailed_status(request.current_user)
end
def grouped_statuses
@grouped_statuses ||= stage.statuses.latest_ordered.group_by(&:status)
end
def grouped_retried_statuses
@grouped_retried_statuses ||= stage.statuses.retried_ordered.group_by(&:status)
end
def latest_statuses
Ci::HasStatus::ORDERED_STATUSES.flat_map do |ordered_status|
grouped_statuses.fetch(ordered_status, [])
@ -69,4 +61,18 @@ class StageEntity < Grape::Entity
grouped_retried_statuses.fetch(ordered_status, [])
end
end
def grouped_statuses
@grouped_statuses ||= preload_metadata(stage.statuses.latest_ordered).group_by(&:status)
end
def grouped_retried_statuses
@grouped_retried_statuses ||= preload_metadata(stage.statuses.retried_ordered).group_by(&:status)
end
def preload_metadata(statuses)
Preloaders::CommitStatusPreloader.new(statuses).execute([:metadata])
statuses
end
end

View File

@ -0,0 +1,64 @@
# frozen_string_literal: true
module GoogleCloud
class BaseService < ::BaseService
protected
def google_oauth2_token
@params[:google_oauth2_token]
end
def gcp_project_id
@params[:gcp_project_id]
end
def environment_name
@params[:environment_name]
end
def google_api_client
@google_api_client_instance ||= GoogleApi::CloudPlatform::Client.new(google_oauth2_token, nil)
end
def unique_gcp_project_ids
filter_params = { key: 'GCP_PROJECT_ID' }
::Ci::VariablesFinder.new(project, filter_params).execute.map(&:value).uniq
end
def group_vars_by_environment(keys)
filtered_vars = project.variables.filter { |variable| keys.include? variable.key }
filtered_vars.each_with_object({}) do |variable, grouped|
grouped[variable.environment_scope] ||= {}
grouped[variable.environment_scope][variable.key] = variable.value
end
end
def create_or_replace_project_vars(environment_scope, key, value, is_protected)
change_params = {
variable_params: {
key: key,
value: value,
environment_scope: environment_scope,
protected: is_protected
}
}
existing_variable = find_existing_variable(environment_scope, key)
if existing_variable
change_params[:action] = :update
change_params[:variable] = existing_variable
else
change_params[:action] = :create
end
::Ci::ChangeVariableService.new(container: project, current_user: current_user, params: change_params).execute
end
private
def find_existing_variable(environment_scope, key)
filter_params = { key: key, filter: { environment_scope: environment_scope } }
::Ci::VariablesFinder.new(project, filter_params).execute.first
end
end
end

View File

@ -1,7 +1,7 @@
# frozen_string_literal: true
module GoogleCloud
class CreateServiceAccountsService < :: BaseService
class CreateServiceAccountsService < ::GoogleCloud::BaseService
def execute
service_account = google_api_client.create_service_account(gcp_project_id, service_account_name, service_account_desc)
service_account_key = google_api_client.create_service_account_key(gcp_project_id, service_account.unique_id)
@ -23,22 +23,6 @@ module GoogleCloud
private
def google_oauth2_token
@params[:google_oauth2_token]
end
def gcp_project_id
@params[:gcp_project_id]
end
def environment_name
@params[:environment_name]
end
def google_api_client
@google_api_client_instance ||= GoogleApi::CloudPlatform::Client.new(google_oauth2_token, nil)
end
def service_accounts_service
GoogleCloud::ServiceAccountsService.new(project)
end

View File

@ -1,15 +1,13 @@
# frozen_string_literal: true
module GoogleCloud
class EnableCloudRunService < :: BaseService
class EnableCloudRunService < ::GoogleCloud::BaseService
def execute
gcp_project_ids = unique_gcp_project_ids
if gcp_project_ids.empty?
error("No GCP projects found. Configure a service account or GCP_PROJECT_ID ci variable.")
else
google_api_client = GoogleApi::CloudPlatform::Client.new(token_in_session, nil)
gcp_project_ids.each do |gcp_project_id|
google_api_client.enable_cloud_run(gcp_project_id)
google_api_client.enable_artifacts_registry(gcp_project_id)
@ -19,16 +17,5 @@ module GoogleCloud
success({ gcp_project_ids: gcp_project_ids })
end
end
private
def unique_gcp_project_ids
all_gcp_project_ids = project.variables.filter { |var| var.key == 'GCP_PROJECT_ID' }.map { |var| var.value }
all_gcp_project_ids.uniq
end
def token_in_session
@params[:token_in_session]
end
end
end

View File

@ -1,7 +1,7 @@
# frozen_string_literal: true
module GoogleCloud
class GcpRegionAddOrReplaceService < ::BaseService
class GcpRegionAddOrReplaceService < ::GoogleCloud::BaseService
def execute(environment, region)
gcp_region_key = Projects::GoogleCloudController::GCP_REGION_CI_VAR_KEY

View File

@ -1,7 +1,7 @@
# frozen_string_literal: true
module GoogleCloud
class GeneratePipelineService < :: BaseService
class GeneratePipelineService < ::GoogleCloud::BaseService
ACTION_DEPLOY_TO_CLOUD_RUN = 'DEPLOY_TO_CLOUD_RUN'
ACTION_DEPLOY_TO_CLOUD_STORAGE = 'DEPLOY_TO_CLOUD_STORAGE'

View File

@ -8,7 +8,7 @@ module GoogleCloud
##
# This service deals with GCP Service Accounts in GitLab
class ServiceAccountsService < ::BaseService
class ServiceAccountsService < ::GoogleCloud::BaseService
##
# Find GCP Service Accounts in a GitLab project
#
@ -17,7 +17,7 @@ module GoogleCloud
# aligning GitLab project and ref to GCP projects
def find_for_project
group_vars_by_ref.map do |environment_scope, value|
group_vars_by_environment(GCP_KEYS).map do |environment_scope, value|
{
ref: environment_scope,
gcp_project: value['GCP_PROJECT_ID'],
@ -28,50 +28,24 @@ module GoogleCloud
end
def add_for_project(ref, gcp_project_id, service_account, service_account_key, is_protected)
project_var_create_or_replace(
create_or_replace_project_vars(
ref,
'GCP_PROJECT_ID',
gcp_project_id,
is_protected
)
project_var_create_or_replace(
create_or_replace_project_vars(
ref,
'GCP_SERVICE_ACCOUNT',
service_account,
is_protected
)
project_var_create_or_replace(
create_or_replace_project_vars(
ref,
'GCP_SERVICE_ACCOUNT_KEY',
service_account_key,
is_protected
)
end
private
def group_vars_by_ref
filtered_vars = project.variables.filter { |variable| GCP_KEYS.include? variable.key }
filtered_vars.each_with_object({}) do |variable, grouped|
grouped[variable.environment_scope] ||= {}
grouped[variable.environment_scope][variable.key] = variable.value
end
end
def project_var_create_or_replace(environment_scope, key, value, is_protected)
change_params = { variable_params: { key: key, value: value, environment_scope: environment_scope, protected: is_protected } }
filter_params = { key: key, filter: { environment_scope: environment_scope } }
existing_variable = ::Ci::VariablesFinder.new(project, filter_params).execute.first
if existing_variable
change_params[:action] = :update
change_params[:variable] = existing_variable
else
change_params[:action] = :create
end
::Ci::ChangeVariableService.new(container: project, current_user: current_user, params: change_params).execute
end
end
end

View File

@ -1,8 +1,8 @@
---
name: vsa_reaggregation_worker
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/84171
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/357647
milestone: '14.10'
name: refactor_code_quality_inline_findings
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/88576
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/364198
milestone: '15.1'
type: development
group: group::optimize
default_enabled: true
group: group::static analysis
default_enabled: false

View File

@ -5,4 +5,4 @@ rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/345595
milestone: '14.5'
type: development
group: group::source code
default_enabled: false
default_enabled: true

View File

@ -14,6 +14,10 @@
- **RackSpace** Customers using RackSpace-based object storage need to migrate data to a different provider.
If your object storage provider does not support `background_upload`, please [migrate objects to a supported object storage provider](https://docs.gitlab.com/ee/administration/object_storage.html#migrate-objects-to-a-different-object-storage-provider).
Additionally, this also breaks the use of [encrypted S3 buckets](https://docs.gitlab.com/ee/administration/object_storage.html#encrypted-s3-buckets) with [storage-specific configuration form](https://docs.gitlab.com/ee/administration/object_storage.html#storage-specific-configuration).
If your S3 buckets have [SSE-S3 or SSE-KMS encryption enabled](https://docs.aws.amazon.com/kms/latest/developerguide/services-s3.html), please [migrate your configuration to use consolidated object storage form](https://docs.gitlab.com/ee/administration/object_storage.html#transition-to-consolidated-form) before upgrading to GitLab 15.0. Otherwise, you may start getting `ETag mismatch` errors during objects upload.
stage: Enablement
tiers: [Core, Premium, Ultimate]
issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/26600

View File

@ -149,7 +149,7 @@ To extract the HTML files of the Docs site:
docker cp gitlab-docs:/usr/share/nginx/html /srv/gitlab/
```
You will end up with a `/srv/gitlab/html/` directory that holds the documentation website.
You end up with a `/srv/gitlab/html/` directory that holds the documentation website.
1. Remove the container:

View File

@ -24,7 +24,7 @@ file system performance, see
Starting with GitLab version 14.0, support for NFS to store Git repository data is deprecated. Technical customer support and engineering support is available for the 14.x releases. Engineering is fixing bugs and security vulnerabilities consistent with our [release and maintenance policy](../policy/maintenance.md#security-releases).
Upon the release of GitLab 15.6 technical and engineering support for using NFS to store Git repository data will be officially at end-of-life. There will be no product changes or troubleshooting provided via Engineering, Security or Paid Support channels after the release date of 15.6, regardless of your GitLab version.
Upon the release of GitLab 15.6 technical and engineering support for using NFS to store Git repository data is officially at end-of-life. There are no product changes or troubleshooting provided via Engineering, Security or Paid Support channels after the release date of 15.6, regardless of your GitLab version.
Until the release of 15.6, for customers running 14.x releases, we continue to help with Git related tickets from customers running one or more Gitaly servers with its data stored on NFS. Examples may include:
@ -268,9 +268,9 @@ version of a directory.
From the [Linux man page](https://linux.die.net/man/5/nfs), the important parts:
> If the nocto option is specified, the client uses a non-standard heuristic to determine when files on the server have changed.
> If the `nocto` option is specified, the client uses a non-standard heuristic to determine when files on the server have changed.
>
> Using the nocto option may improve performance for read-only mounts, but should be used only if the data on the server changes only occasionally.
> Using the `nocto` option may improve performance for read-only mounts, but should be used only if the data on the server changes only occasionally.
We have noticed this behavior in an issue about [refs not found after a push](https://gitlab.com/gitlab-org/gitlab/-/issues/326066),
where newly added loose refs can be seen as missing on a different client with a local dentry cache, as

View File

@ -173,7 +173,7 @@ ote_pid | tls
Some database changes have to be done directly, and not through PgBouncer.
Read more about the affected tasks: [database restores](../../raketasks/backup_restore.md#back-up-and-restore-for-installations-using-pgbouncer)
and [GitLab upgrades](../../update/zero_downtime.md#use-postgresql-ha).
and [GitLab upgrades](../../update/zero_downtime.md#postgresql).
1. To find the primary node, run the following on a database node:

View File

@ -162,7 +162,7 @@ Be aware of the following specific call outs:
### Praefect PostgreSQL
It's worth noting that at this time [Praefect requires its own database server](../gitaly/praefect.md#postgresql) and
that to achieve full High Availability a third-party PostgreSQL database solution will be required.
that to achieve full High Availability a third-party PostgreSQL database solution is required.
We hope to offer a built in solutions for these restrictions in the future but in the meantime a non HA PostgreSQL server
can be set up via Omnibus GitLab, which the above specs reflect. Refer to the following issues for more information: [`omnibus-gitlab#5919`](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5919) & [`gitaly#3398`](https://gitlab.com/gitlab-org/gitaly/-/issues/3398).
@ -232,7 +232,7 @@ The following list includes descriptions of each server and its assigned IP:
## Configure the external load balancer
In a multi-node GitLab configuration, you'll need a load balancer to route
In a multi-node GitLab configuration, you need a load balancer to route
traffic to the application servers. The specifics on which load balancer to use
or its exact configuration is beyond the scope of GitLab documentation. We assume
that if you're managing multi-node systems like GitLab, you already have a load
@ -245,7 +245,7 @@ This architecture has been tested and validated with [HAProxy](https://www.hapro
as the load balancer. Although other load balancers with similar feature sets
could also be used, those load balancers have not been validated.
The next question is how you will handle SSL in your environment.
The next question is how you handle SSL in your environment.
There are several different options:
- [The application node terminates SSL](#application-node-terminates-ssl).
@ -257,8 +257,8 @@ There are several different options:
### Application node terminates SSL
Configure your load balancer to pass connections on port 443 as `TCP` rather
than `HTTP(S)` protocol. This will pass the connection to the application node's
NGINX service untouched. NGINX will have the SSL certificate and listen on port 443.
than `HTTP(S)` protocol. This passes the connection to the application node's
NGINX service untouched. NGINX has the SSL certificate and listen on port 443.
See the [NGINX HTTPS documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#enable-https)
for details on managing SSL certificates and configuring NGINX.
@ -266,10 +266,10 @@ for details on managing SSL certificates and configuring NGINX.
### Load balancer terminates SSL without backend SSL
Configure your load balancer to use the `HTTP(S)` protocol rather than `TCP`.
The load balancer will then be responsible for managing SSL certificates and
The load balancer is then responsible for managing SSL certificates and
terminating SSL.
Since communication between the load balancer and GitLab will not be secure,
Since communication between the load balancer and GitLab is not secure,
there is some additional configuration needed. See the
[NGINX proxied SSL documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#supporting-proxied-ssl)
for details.
@ -277,12 +277,12 @@ for details.
### Load balancer terminates SSL with backend SSL
Configure your load balancers to use the 'HTTP(S)' protocol rather than 'TCP'.
The load balancers will be responsible for managing SSL certificates that
end users will see.
The load balancers are responsible for managing SSL certificates that
end users see.
Traffic will also be secure between the load balancers and NGINX in this
Traffic is also secure between the load balancers and NGINX in this
scenario. There is no need to add configuration for proxied SSL since the
connection will be secure all the way. However, configuration will need to be
connection is secure all the way. However, configuration needs to be
added to GitLab to configure SSL certificates. See
[NGINX HTTPS documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#enable-https)
for details on managing SSL certificates and configuring NGINX.
@ -292,7 +292,7 @@ for details on managing SSL certificates and configuring NGINX.
Ensure the external load balancer only routes to working services with built
in monitoring endpoints. The [readiness checks](../../user/admin_area/monitoring/health_check.md)
all require [additional configuration](../monitoring/ip_whitelist.md)
on the nodes being checked, otherwise, the external load balancer will not be able to
on the nodes being checked, otherwise, the external load balancer is not able to
connect.
### Ports
@ -311,11 +311,11 @@ The basic ports to be used are shown in the table below.
to pass through the `Connection` and `Upgrade` hop-by-hop headers. See the
[web terminal](../integration/terminal.md) integration guide for
more details.
- (*2*): When using HTTPS protocol for port 443, you will need to add an SSL
- (*2*): When using HTTPS protocol for port 443, you need to add an SSL
certificate to the load balancers. If you wish to terminate SSL at the
GitLab application server instead, use TCP protocol.
If you're using GitLab Pages with custom domain support you will need some
If you're using GitLab Pages with custom domain support you need some
additional port configurations.
GitLab Pages requires a separate virtual IP address. Configure DNS to point the
`pages_external_url` from `/etc/gitlab/gitlab.rb` at the new virtual IP address. See the
@ -337,7 +337,7 @@ GitLab Pages requires a separate virtual IP address. Configure DNS to point the
Some organizations have policies against opening SSH port 22. In this case,
it may be helpful to configure an alternate SSH hostname that allows users
to use SSH on port 443. An alternate SSH hostname will require a new virtual IP address
to use SSH on port 443. An alternate SSH hostname requires a new virtual IP address
compared to the other GitLab HTTP configuration above.
Configure DNS for an alternate SSH hostname such as `altssh.gitlab.example.com`.
@ -359,7 +359,7 @@ such as connections to [PgBouncer](#configure-pgbouncer) and [Praefect](#configu
It's a separate node from the External Load Balancer and shouldn't have any access externally.
The following IP will be used as an example:
The following IP is used as an example:
- `10.6.0.40`: Internal Load Balancer
@ -433,8 +433,8 @@ You are highly encouraged to read the [Redis Sentinel](https://redis.io/topics/s
before configuring Redis with GitLab to fully understand the topology and
architecture.
In this section, you'll be guided through configuring an external Redis instance
to be used with GitLab. The following IPs will be used as an example:
In this section, you are guided through configuring an external Redis instance
to be used with GitLab. The following IPs are used as an example:
- `10.6.0.61`: Redis Primary
- `10.6.0.62`: Redis Replica 1
@ -442,7 +442,7 @@ to be used with GitLab. The following IPs will be used as an example:
### Provide your own Redis instance
Managed Redis from cloud providers such as AWS ElastiCache will work. If these
Managed Redis from cloud providers such as AWS ElastiCache works. If these
services support high availability, be sure it is **not** the Redis Cluster type.
Redis version 5.0 or higher is required, as this is what ships with
@ -451,7 +451,7 @@ do not support an optional count argument to SPOP which is now required for
[Merge Trains](../../ci/pipelines/merge_trains.md).
Note the Redis node's IP address or hostname, port, and password (if required).
These will be necessary when configuring the
These are necessary when configuring the
[GitLab application servers](#configure-gitlab-rails) later.
### Standalone Redis using Omnibus GitLab
@ -617,8 +617,8 @@ You can specify multiple roles, like sentinel and Redis, as:
[roles](https://docs.gitlab.com/omnibus/roles/).
These values don't have to be changed again in `/etc/gitlab/gitlab.rb` after
a failover, as the nodes will be managed by the [Sentinels](#configure-consul-and-sentinel), and even after a
`gitlab-ctl reconfigure`, they will get their configuration restored by
a failover, as the nodes are managed by the [Sentinels](#configure-consul-and-sentinel), and even after a
`gitlab-ctl reconfigure`, they get their configuration restored by
the same Sentinels.
Advanced [configuration options](https://docs.gitlab.com/omnibus/settings/redis.html)
@ -633,7 +633,7 @@ are supported and can be added if needed.
## Configure Consul and Sentinel
Now that the Redis servers are all set up, let's configure the Sentinel
servers. The following IPs will be used as an example:
servers. The following IPs are used as an example:
- `10.6.0.11`: Consul/Sentinel 1
- `10.6.0.12`: Consul/Sentinel 2
@ -647,7 +647,7 @@ clients to report `NOAUTH Authentication required.`.
To configure the Sentinel:
1. SSH in to the server that will host Consul/Sentinel.
1. SSH in to the server that hosts Consul/Sentinel.
1. [Download and install](https://about.gitlab.com/install/) the Omnibus GitLab
package of your choice. Be sure to both follow _only_ installation steps 1 and 2
on the page, and to select the correct Omnibus GitLab package, with the same version
@ -776,7 +776,7 @@ run: sentinel: (pid 30098) 76832s; run: log: (pid 29704) 76850s
## Configure PostgreSQL
In this section, you'll be guided through configuring a highly available PostgreSQL
In this section, you are guided through configuring a highly available PostgreSQL
cluster to be used with GitLab.
### Provide your own PostgreSQL instance
@ -813,7 +813,7 @@ replication and failover requires:
A local PgBouncer service to be configured on each PostgreSQL node. Note that this is separate from the main PgBouncer cluster that tracks the primary.
The following IPs will be used as an example:
The following IPs are used as an example:
- `10.6.0.31`: PostgreSQL primary
- `10.6.0.32`: PostgreSQL secondary 1
@ -828,8 +828,8 @@ in the second step, do not supply the `EXTERNAL_URL` value.
#### PostgreSQL nodes
1. SSH in to one of the PostgreSQL nodes.
1. Generate a password hash for the PostgreSQL username/password pair. This assumes you will use the default
username of `gitlab` (recommended). The command will request a password
1. Generate a password hash for the PostgreSQL username/password pair. This assumes you use the default
username of `gitlab` (recommended). The command requests a password
and confirmation. Use the value that is output by this command in the next
step as the value of `<postgresql_password_hash>`:
@ -837,8 +837,8 @@ in the second step, do not supply the `EXTERNAL_URL` value.
sudo gitlab-ctl pg-password-md5 gitlab
```
1. Generate a password hash for the PgBouncer username/password pair. This assumes you will use the default
username of `pgbouncer` (recommended). The command will request a password
1. Generate a password hash for the PgBouncer username/password pair. This assumes you use the default
username of `pgbouncer` (recommended). The command requests a password
and confirmation. Use the value that is output by this command in the next
step as the value of `<pgbouncer_password_hash>`:
@ -846,8 +846,8 @@ in the second step, do not supply the `EXTERNAL_URL` value.
sudo gitlab-ctl pg-password-md5 pgbouncer
```
1. Generate a password hash for the PostgreSQL replication username/password pair. This assumes you will use the default
username of `gitlab_replicator` (recommended). The command will request a password
1. Generate a password hash for the PostgreSQL replication username/password pair. This assumes you use the default
username of `gitlab_replicator` (recommended). The command requests a password
and a confirmation. Use the value that is output by this command in the next step
as the value of `<postgresql_replication_password_hash>`:
@ -855,8 +855,8 @@ in the second step, do not supply the `EXTERNAL_URL` value.
sudo gitlab-ctl pg-password-md5 gitlab_replicator
```
1. Generate a password hash for the Consul database username/password pair. This assumes you will use the default
username of `gitlab-consul` (recommended). The command will request a password
1. Generate a password hash for the Consul database username/password pair. This assumes you use the default
username of `gitlab-consul` (recommended). The command requests a password
and confirmation. Use the value that is output by this command in the next
step as the value of `<consul_password_hash>`:
@ -935,7 +935,7 @@ in the second step, do not supply the `EXTERNAL_URL` value.
# END user configuration
```
PostgreSQL, with Patroni managing its failover, will default to use `pg_rewind` by default to handle conflicts.
PostgreSQL, with Patroni managing its failover, defaults to use `pg_rewind` by default to handle conflicts.
Like most failover handling methods, this has a small chance of leading to data loss.
Learn more about the various [Patroni replication methods](../postgresql/replication_and_failover.md#selecting-the-appropriate-patroni-replication-method).
@ -987,7 +987,7 @@ If the 'State' column for any node doesn't say "running", check the
Now that the PostgreSQL servers are all set up, let's configure PgBouncer
for tracking and handling reads/writes to the primary database.
The following IPs will be used as an example:
The following IPs are used as an example:
- `10.6.0.21`: PgBouncer 1
- `10.6.0.22`: PgBouncer 2
@ -1111,9 +1111,9 @@ The recommended cluster setup includes the following components:
- 1 Praefect PostgreSQL node: Database server for Praefect. A third-party solution
is required for Praefect database connections to be made highly available.
- 1 load balancer: A load balancer is required for Praefect. The
[internal load balancer](#configure-the-internal-load-balancer) will be used.
[internal load balancer](#configure-the-internal-load-balancer) is used.
This section will detail how to configure the recommended standard setup in order.
This section details how to configure the recommended standard setup in order.
For more advanced setups refer to the [standalone Gitaly Cluster documentation](../gitaly/praefect.md).
### Configure Praefect PostgreSQL
@ -1125,7 +1125,7 @@ A built-in solution is being [worked on](https://gitlab.com/gitlab-org/omnibus-g
#### Praefect non-HA PostgreSQL standalone using Omnibus GitLab
The following IPs will be used as an example:
The following IPs are used as an example:
- `10.6.0.141`: Praefect PostgreSQL
@ -1137,8 +1137,8 @@ in the second step, do not supply the `EXTERNAL_URL` value.
1. SSH in to the Praefect PostgreSQL node.
1. Create a strong password to be used for the Praefect PostgreSQL user. Take note of this password as `<praefect_postgresql_password>`.
1. Generate the password hash for the Praefect PostgreSQL username/password pair. This assumes you will use the default
username of `praefect` (recommended). The command will request the password `<praefect_postgresql_password>`
1. Generate the password hash for the Praefect PostgreSQL username/password pair. This assumes you use the default
username of `praefect` (recommended). The command requests the password `<praefect_postgresql_password>`
and confirmation. Use the value that is output by this command in the next
step as the value of `<praefect_postgresql_password_hash>`:
@ -1221,7 +1221,7 @@ Once the database is set up, follow the [post configuration](#praefect-postgresq
#### Praefect PostgreSQL post-configuration
After the Praefect PostgreSQL server has been set up, you'll then need to configure the user and database for Praefect to use.
After the Praefect PostgreSQL server has been set up, you then need to configure the user and database for Praefect to use.
We recommend the user be named `praefect` and the database `praefect_production`, and these can be configured as standard in PostgreSQL.
The password for the user is the same as the one you configured earlier as `<praefect_postgresql_password>`.
@ -1274,12 +1274,12 @@ Praefect requires several secret tokens to secure communications across the Clus
Gitaly Cluster nodes are configured in Praefect via a `virtual storage`. Each storage contains
the details of each Gitaly node that makes up the cluster. Each storage is also given a name
and this name is used in several areas of the configuration. In this guide, the name of the storage will be
and this name is used in several areas of the configuration. In this guide, the name of the storage is
`default`. Also, this guide is geared towards new installs, if upgrading an existing environment
to use Gitaly Cluster, you may need to use a different name.
Refer to the [Praefect documentation](../gitaly/praefect.md#praefect) for more information.
The following IPs will be used as an example:
The following IPs are used as an example:
- `10.6.0.131`: Praefect 1
- `10.6.0.132`: Praefect 2
@ -1429,7 +1429,7 @@ For configuring Gitaly you should note the following:
- `git_data_dirs` should be configured to reflect the storage path for the specific Gitaly node
- `auth_token` should be the same as `praefect_internal_token`
The following IPs will be used as an example:
The following IPs are used as an example:
- `10.6.0.91`: Gitaly 1
- `10.6.0.92`: Gitaly 2
@ -1811,7 +1811,7 @@ On each node perform the following:
```
1. Specify the necessary NFS mounts in `/etc/fstab`.
The exact contents of `/etc/fstab` will depend on how you chose
The exact contents of `/etc/fstab` depends on how you chose
to configure your NFS server. See the [NFS documentation](../nfs.md)
for examples and the various options.
@ -1827,9 +1827,9 @@ On each node perform the following:
on the page.
1. Create or edit `/etc/gitlab/gitlab.rb` and use the following configuration.
To maintain uniformity of links across nodes, the `external_url`
on the application server should point to the external URL that users will use
on the application server should point to the external URL that users use
to access GitLab. This would be the URL of the [external load balancer](#configure-the-external-load-balancer)
which will route traffic to the GitLab application server:
which routes traffic to the GitLab application server:
```ruby
external_url 'https://gitlab.example.com'
@ -1999,7 +1999,7 @@ On each node perform the following:
When you specify `https` in the `external_url`, as in the previous example,
GitLab expects that the SSL certificates are in `/etc/gitlab/ssl/`. If the
certificates aren't present, NGINX will fail to start. For more information, see
certificates aren't present, NGINX fails to start. For more information, see
the [NGINX documentation](https://docs.gitlab.com/omnibus/settings/nginx.html#enable-https).
### GitLab Rails post-configuration

View File

@ -296,7 +296,7 @@ The following table details the cost to run the different reference architecture
NOTE:
The following lists are non exhaustive. Generally, other cloud providers not listed
here will likely work with the same specs, but this hasn't been validated.
here likely work with the same specs, but this hasn't been validated.
Additionally, when it comes to other cloud provider services not listed here,
it's advised to be cautious as each implementation can be notably different
and should be tested thoroughly before production use.
@ -389,7 +389,7 @@ most complex:
As you implement these components, begin with a single server and then do
backups. Only after completing the first server should you proceed to the next.
Also, not implementing extra servers for GitLab doesn't necessarily mean that you'll have
Also, not implementing extra servers for GitLab doesn't necessarily mean that you have
more downtime. Depending on your needs and experience level, single servers can
have more actual perceived uptime for your users.
@ -410,7 +410,7 @@ is the least complex to setup. This provides a point-in-time recovery of a prede
> - Required domain knowledge: HAProxy, shared storage, distributed systems
This requires separating out GitLab into multiple application nodes with an added
[load balancer](../load_balancer.md). The load balancer will distribute traffic
[load balancer](../load_balancer.md). The load balancer distributes traffic
across GitLab application nodes. Meanwhile, each application node connects to a
shared file server and database systems on the back end. This way, if one of the
application servers fails, the workflow is not interrupted.
@ -434,7 +434,7 @@ to any of the [available reference architectures](#available-reference-architect
GitLab supports [zero-downtime upgrades](../../update/zero_downtime.md).
Single GitLab nodes can be updated with only a [few minutes of downtime](../../update/index.md#upgrade-based-on-installation-method).
To avoid this, we recommend to separate GitLab into several application nodes.
As long as at least one of each component is online and capable of handling the instance's usage load, your team's productivity will not be interrupted during the update.
As long as at least one of each component is online and capable of handling the instance's usage load, your team's productivity is not interrupted during the update.
### Automated database failover **(PREMIUM SELF)**
@ -459,8 +459,8 @@ that can also be promoted in case of disaster.
## Deviating from the suggested reference architectures
As a general guideline, the further away you move from the Reference Architectures,
the harder it will be get support for it. With any deviation, you're introducing
a layer of complexity that will add challenges to finding out where potential
the harder it is to get support for it. With any deviation, you're introducing
a layer of complexity that adds challenges to finding out where potential
issues might lie.
The reference architectures use the official GitLab Linux packages (Omnibus
@ -474,7 +474,7 @@ However, it is still an additional layer and may still add some support complexi
Other technologies, like [Docker swarm](https://docs.docker.com/engine/swarm/)
are not officially supported, but can be implemented at your own risk. In that
case, GitLab Support will not be able to help you.
case, GitLab Support is not able to help you.
## Supported modifications for lower user count HA reference architectures

View File

@ -24,7 +24,7 @@ files must be provided:
- Only RSA keys are supported.
Optionally, you can also provide a bundle of CA certs (PEM-encoded) to be
included on each signature. This will typically be an intermediate CA.
included on each signature. This is typically an intermediate CA.
WARNING:
Be mindful of the access levels for your private keys and visibility to

View File

@ -113,3 +113,95 @@ Adding a user:
SSO settings:
![OneLogin SSO settings](img/OneLogin-SSOsettings.png)
## SAML response example
When a user signs in using SAML, GitLab receives a SAML response. The SAML response can be found in `production.log` logs as a base64-encoded message. Locate the response by
searching for `SAMLResponse`. The decoded SAML response is in XML format. For example:
```xml
<?xml version="1.0" encoding="UTF-8"?>
<saml2p:Response xmlns:saml2p="urn:oasis:names:tc:SAML:2.0:protocol" xmlns:xs="http://www.w3.org/2001/XMLSchema" Destination="https://gitlabexample/-/saml/callback" ID="id4898983630840142426821432" InResponseTo="_c65e4c88-9425-4472-b42c-37f4186ac0ee" IssueInstant="2022-05-30T21:30:35.696Z" Version="2.0">
<saml2:Issuer xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://www.okta.com/exk2y6j57o1Pdr2lI8qh7</saml2:Issuer>
<ds:Signature xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
<ds:SignedInfo>
<ds:CanonicalizationMethod Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
<ds:SignatureMethod Algorithm="http://www.w3.org/2001/04/xmldsig-more#rsa-sha256"/>
<ds:Reference URI="#id4898983630840142426821432">
<ds:Transforms>
<ds:Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"/>
<ds:Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#">
<ec:InclusiveNamespaces xmlns:ec="http://www.w3.org/2001/10/xml-exc-c14n#" PrefixList="xs"/>
</ds:Transform>
</ds:Transforms>
<ds:DigestMethod Algorithm="http://www.w3.org/2001/04/xmlenc#sha256"/>
<ds:DigestValue>neiQvv9d3OgS4GZW8Nptp4JhjpKs3GCefibn+vmRgk4=</ds:DigestValue>
</ds:Reference>
</ds:SignedInfo>
<ds:SignatureValue>dMsQX8ivi...HMuKGhyLRvabGU6CuPrf7==</ds:SignatureValue>
<ds:KeyInfo>
<ds:X509Data>
<ds:X509Certificate>MIIDq...cptGr3vN9TQ==</ds:X509Certificate>
</ds:X509Data>
</ds:KeyInfo>
</ds:Signature>
<saml2p:Status xmlns:saml2p="urn:oasis:names:tc:SAML:2.0:protocol">
<saml2p:StatusCode Value="urn:oasis:names:tc:SAML:2.0:status:Success"/>
</saml2p:Status>
<saml2:Assertion xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:xs="http://www.w3.org/2001/XMLSchema" ID="id489" IssueInstant="2022-05-30T21:30:35.696Z" Version="2.0">
<saml2:Issuer xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://www.okta.com/exk2y6j57o1Pdr2lI8qh7</saml2:Issuer>
<ds:Signature xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
<ds:SignedInfo>
<ds:CanonicalizationMethod Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
<ds:SignatureMethod Algorithm="http://www.w3.org/2001/04/xmldsig-more#rsa-sha256"/>
<ds:Reference URI="#id48989836309833801859473359">
<ds:Transforms>
<ds:Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"/>
<ds:Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#">
<ec:InclusiveNamespaces xmlns:ec="http://www.w3.org/2001/10/xml-exc-c14n#" PrefixList="xs"/>
</ds:Transform>
</ds:Transforms>
<ds:DigestMethod Algorithm="http://www.w3.org/2001/04/xmlenc#sha256"/>
<ds:DigestValue>MaIsoi8hbT9gsi/mNZsz449mUuAcuEWY0q3bc4asOQs=</ds:DigestValue>
</ds:Reference>
</ds:SignedInfo>
<ds:SignatureValue>dMsQX8ivi...HMuKGhyLRvabGU6CuPrf7==<</ds:SignatureValue>
<ds:KeyInfo>
<ds:X509Data>
<ds:X509Certificate>MIIDq...cptGr3vN9TQ==</ds:X509Certificate>
</ds:X509Data>
</ds:KeyInfo>
</ds:Signature>
<saml2:Subject xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion">
<saml2:NameID Format="urn:oasis:names:tc:SAML:2.0:nameid-format:persistent">useremail@domain.com</saml2:NameID>
<saml2:SubjectConfirmation Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">
<saml2:SubjectConfirmationData InResponseTo="_c65e4c88-9425-4472-b42c-37f4186ac0ee" NotOnOrAfter="2022-05-30T21:35:35.696Z" Recipient="https://gitlab.example.com/-/saml/callback"/>
</saml2:SubjectConfirmation>
</saml2:Subject>
<saml2:Conditions xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion" NotBefore="2022-05-30T21:25:35.696Z" NotOnOrAfter="2022-05-30T21:35:35.696Z">
<saml2:AudienceRestriction>
<saml2:Audience>https://gitlab.example.com/</saml2:Audience>
</saml2:AudienceRestriction>
</saml2:Conditions>
<saml2:AuthnStatement xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion" AuthnInstant="2022-05-30T21:30:35.696Z" SessionIndex="_c65e4c88-9425-4472-b42c-37f4186ac0ee">
<saml2:AuthnContext>
<saml2:AuthnContextClassRef>urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport</saml2:AuthnContextClassRef>
</saml2:AuthnContext>
</saml2:AuthnStatement>
<saml2:AttributeStatement xmlns:saml2="urn:oasis:names:tc:SAML:2.0:assertion">
<saml2:Attribute Name="email" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified">
<saml2:AttributeValue xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="xs:string">useremail@domain.com</saml2:AttributeValue>
</saml2:Attribute>
<saml2:Attribute Name="firtname" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified">
<saml2:AttributeValue xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="xs:string">John</saml2:AttributeValue>
</saml2:Attribute>
<saml2:Attribute Name="lastname" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified">
<saml2:AttributeValue xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="xs:string">Doe</saml2:AttributeValue>
</saml2:Attribute>
<saml2:Attribute Name="Groups" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified">
<saml2:AttributeValue xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="xs:string">Super-awesome-group</saml2:AttributeValue>
</saml2:Attribute>
</saml2:AttributeStatement>
</saml2:Assertion>
</saml2p:Response>
```

View File

@ -24,7 +24,7 @@ The following API resources are available in the project context:
| Resource | Available endpoints |
|:------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| [Access requests](access_requests.md) | `/projects/:id/access_requests` (also available for groups) |
| [Access tokens](project_access_tokens.md) | `/projects/:id/access_tokens` (also available for groups) |
| [Access tokens](project_access_tokens.md) | `/projects/:id/access_tokens` (also available for groups) |
| [Agents](cluster_agents.md) | `/projects/:id/cluster_agents` |
| [Award emoji](award_emoji.md) | `/projects/:id/issues/.../award_emoji`, `/projects/:id/merge_requests/.../award_emoji`, `/projects/:id/snippets/.../award_emoji` |
| [Branches](branches.md) | `/projects/:id/repository/branches/`, `/projects/:id/repository/merged_branches` |
@ -57,6 +57,7 @@ The following API resources are available in the project context:
| [Merge request approvals](merge_request_approvals.md) **(PREMIUM)** | `/projects/:id/approvals`, `/projects/:id/merge_requests/.../approvals` |
| [Merge requests](merge_requests.md) | `/projects/:id/merge_requests` (also available for groups and standalone) |
| [Merge trains](merge_trains.md) | `/projects/:id/merge_trains` |
| [Metadata](metadata.md) | `/metadata` |
| [Notes](notes.md) (comments) | `/projects/:id/issues/.../notes`, `/projects/:id/snippets/.../notes`, `/projects/:id/merge_requests/.../notes` (also available for groups) |
| [Notification settings](notification_settings.md) | `/projects/:id/notification_settings` (also available for groups and standalone) |
| [Packages](packages.md) | `/projects/:id/packages` |
@ -70,7 +71,7 @@ The following API resources are available in the project context:
| [Project milestones](milestones.md) | `/projects/:id/milestones` |
| [Project snippets](project_snippets.md) | `/projects/:id/snippets` |
| [Project templates](project_templates.md) | `/projects/:id/templates` |
| [Project vulnerabilities](project_vulnerabilities.md) **(ULTIMATE)** | `/projects/:id/vulnerabilities` |
| [Project vulnerabilities](project_vulnerabilities.md) **(ULTIMATE)** | `/projects/:id/vulnerabilities` |
| [Project wikis](wikis.md) | `/projects/:id/wikis` |
| [Project-level variables](project_level_variables.md) | `/projects/:id/variables` |
| [Projects](projects.md) including setting Webhooks | `/projects`, `/projects/:id/hooks` (also available for users) |

46
doc/api/metadata.md Normal file
View File

@ -0,0 +1,46 @@
---
stage: Ecosystem
group: Integrations
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Metadata API **(FREE)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/357032) in GitLab 15.1.
Retrieve metadata information for this GitLab instance.
```plaintext
GET /metadata
```
Response body attributes:
| Attribute | Type | Description |
|:------------------|:---------------|:-----------------------------------------------------------------------------------------|
| `version` | string | Version of the GitLab instance. |
| `revision` | string | Revision of the GitLab instance. |
| `kas` | object | Metadata about the GitLab agent server for Kubernetes (KAS). |
| `kas.enabled` | boolean | Indicates whether KAS is enabled. |
| `kas.externalUrl` | string or null | URL used by the agents to communicate with KAS. It's `null` if `kas.enabled` is `false`. |
| `kas.version` | string or null | Version of KAS. It's `null` if `kas.enabled` is `false`. |
Example request:
```shell
curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/metadata"
```
Example response:
```json
{
"version": "15.0-pre",
"revision": "c401a659d0c",
"kas": {
"enabled": true,
"externalUrl": "grpc://gitlab.example.com:8150",
"version": "15.0.0"
}
}
```

View File

@ -1,5 +1,7 @@
openapi: 3.0.0
tags:
- name: metadata
description: Metadata of the GitLab instance
- name: version
description: Version
- name: access_requests
@ -39,6 +41,10 @@ components:
name: Private-Token
paths:
# METADATA
/v4/metadata:
$ref: 'v4/metadata.yaml'
# VERSION
/v4/version:
$ref: 'v4/version.yaml'
@ -49,7 +55,7 @@ paths:
/v4/projects/{id}/access_requests/{user_id}/approve:
$ref: 'v4/access_requests.yaml#/accessRequestsProjectsApprove'
/v4/projects/{id}/access_requests/{user_id}:
$ref: 'v4/access_requests.yaml#/accessRequestsProjectsDeny'
@ -59,7 +65,7 @@ paths:
/v4/groups/{id}/access_requests/{user_id}/approve:
$ref: 'v4/access_requests.yaml#/accessRequestsGroupsApprove'
/v4/groups/{id}/access_requests/{user_id}:
$ref: 'v4/access_requests.yaml#/accessRequestsGroupsDeny'
@ -68,4 +74,4 @@ paths:
$ref: 'v4/access_tokens.yaml#/accessTokens'
/v4/projects/{id}/access_tokens/{token_id}:
$ref: 'v4/access_tokens.yaml#/accessTokensRevoke'
$ref: 'v4/access_tokens.yaml#/accessTokensRevoke'

View File

@ -0,0 +1,43 @@
# Markdown documentation: https://gitlab.com/gitlab-org/gitlab/-/blob/master/doc/api/metadata.md
get:
tags:
- metadata
summary: "Retrieve metadata information for this GitLab instance."
operationId: "getMetadata"
responses:
"401":
description: "unauthorized operation"
"200":
description: "successful operation"
content:
"application/json":
schema:
title: "MetadataResponse"
type: "object"
properties:
version:
type: "string"
revision:
type: "string"
kas:
type: "object"
properties:
enabled:
type: "boolean"
externalUrl:
type: "string"
nullable: true
version:
type: "string"
nullable: true
examples:
Example:
value:
version: "15.0-pre"
revision: "c401a659d0c"
kas:
enabled: true
externalUrl: "grpc://gitlab.example.com:8150"
version: "15.0.0"

View File

@ -547,25 +547,29 @@ You can use [protected branches](../../user/project/protected_branches.md) to mo
### Types of manual jobs
Manual jobs can be either optional or blocking:
Manual jobs can be either optional or blocking.
- **Optional**: The default setting for manual jobs.
- They have [`allow_failure: true`](../yaml/index.md#allow_failure) by default.
- The status does not contribute to the overall pipeline status. A pipeline can
succeed even if all of its manual jobs fail.
- **Blocking**: An optional setting for manual jobs.
- Add `allow_failure: false` to the job configuration.
- The pipeline stops at the stage where the job is defined. To let the pipeline
continue running, [run the manual job](#run-a-manual-job).
- Merge requests in projects with [merge when pipeline succeeds](../../user/project/merge_requests/merge_when_pipeline_succeeds.md)
enabled can't be merged with a blocked pipeline. Blocked pipelines show a status
of **blocked**.
In optional manual jobs:
- [`allow_failure`](../yaml/index.md#allow_failure) is `true`, which is the default
setting for jobs that have `when: manual` and no [`rules`](../yaml/index.md#rules),
or `when: manual` defined outside of `rules`.
- The status does not contribute to the overall pipeline status. A pipeline can
succeed even if all of its manual jobs fail.
In blocking manual jobs:
- `allow_failure` is `false`, which is the default setting for jobs that have `when: manual`
defined inside [`rules`](../yaml/index.md#rules).
- The pipeline stops at the stage where the job is defined. To let the pipeline
continue running, [run the manual job](#run-a-manual-job).
- Merge requests in projects with [merge when pipeline succeeds](../../user/project/merge_requests/merge_when_pipeline_succeeds.md)
enabled can't be merged with a blocked pipeline.
- The pipeline shows a status of **blocked**.
### Run a manual job
To run a manual job, you must have permission to merge to the assigned branch.
To run a manual job:
To run a manual job, you must have permission to merge to the assigned branch:
1. Go to the pipeline, job, [environment](../environments/index.md#configure-manual-deployments),
or deployment view.

View File

@ -20,27 +20,24 @@ WHERE user_id = 2;
Here we are filtering by the `user_id` column and as such a developer may decide
to index this column.
While in certain cases indexing columns using the above approach may make sense
it can actually have a negative impact. Whenever you write data to a table any
existing indexes need to be updated. The more indexes there are the slower this
can potentially become. Indexes can also take up quite some disk space depending
While in certain cases indexing columns using the above approach may make sense,
it can actually have a negative impact. Whenever you write data to a table, any
existing indexes must also be updated. The more indexes there are, the slower this
can potentially become. Indexes can also take up significant disk space, depending
on the amount of data indexed and the index type. For example, PostgreSQL offers
"GIN" indexes which can be used to index certain data types that can not be
indexed by regular B-tree indexes. These indexes however generally take up more
`GIN` indexes which can be used to index certain data types that cannot be
indexed by regular B-tree indexes. These indexes, however, generally take up more
data and are slower to update compared to B-tree indexes.
Because of all this one should not blindly add a new index for every column used
to filter data by. Instead one should ask themselves the following questions:
Because of all this, it's important make the following considerations
when adding a new index:
1. Can you write your query in such a way that it re-uses as many existing indexes
as possible?
1. Is the data large enough that using an index is actually
faster than just iterating over the rows in the table?
1. Do the new queries re-use as many existing indexes as possible?
1. Is there enough data that using an index is faster than iterating over
rows in the table?
1. Is the overhead of maintaining the index worth the reduction in query
timings?
We explore every question in detail below.
## Re-using Queries
The first step is to make sure your query re-uses as many existing indexes as
@ -59,10 +56,8 @@ unindexed. In reality the query may perform just fine given the index on
`user_id` can filter out enough rows.
The best way to determine if indexes are re-used is to run your query using
`EXPLAIN ANALYZE`. Depending on any extra tables that may be joined and
other columns being used for filtering you may find an extra index is not going
to make much (if any) difference. On the other hand you may determine that the
index _may_ make a difference.
`EXPLAIN ANALYZE`. Depending on the joined tables and the columns being used for filtering,
you may find an extra index doesn't make much, if any, difference.
In short:
@ -73,28 +68,24 @@ In short:
## Data Size
A database may decide not to use an index despite it existing in case a regular
sequence scan (= simply iterating over all existing rows) is faster. This is
especially the case for small tables.
A database may not use an index even when a regular sequence scan
(iterating over all rows) is faster, especially for small tables.
If a table is expected to grow in size and you expect your query has to filter
out a lot of rows you may want to consider adding an index. If the table size is
very small (for example, fewer than `1,000` records) or any existing indexes filter out
enough rows you may _not_ want to add a new index.
Consider adding an index if a table is expected to grow, and your query has to filter a lot of rows.
You may _not_ want to add an index if the table size is small (<`1,000` records),
or if existing indexes already filter out enough rows.
## Maintenance Overhead
Indexes have to be updated on every table write. In case of PostgreSQL _all_
Indexes have to be updated on every table write. In the case of PostgreSQL, _all_
existing indexes are updated whenever data is written to a table. As a
result of this having many indexes on the same table slows down writes.
result, having many indexes on the same table slows down writes. It's therefore important
to balance query performance with the overhead of maintaining an extra index.
Because of this one should ask themselves: is the reduction in query performance
worth the overhead of maintaining an extra index?
If adding an index reduces SELECT timings by 5 milliseconds but increases
INSERT/UPDATE/DELETE timings by 10 milliseconds then the index may not be worth
it. On the other hand, if SELECT timings are reduced but INSERT/UPDATE/DELETE
timings are not affected you may want to add the index after all.
Let's say that adding an index reduces SELECT timings by 5 milliseconds but increases
INSERT/UPDATE/DELETE timings by 10 milliseconds. In this case, the new index may not be worth
it. A new index is more valuable when SELECT timings are reduced and INSERT/UPDATE/DELETE
timings are unaffected.
## Finding Unused Indexes
@ -111,26 +102,25 @@ ORDER BY pg_relation_size(indexrelname::regclass) desc;
```
This query outputs a list containing all indexes that are never used and sorts
them by indexes sizes in descending order. This query can be useful to
determine if any previously indexes are useful after all. More information on
them by indexes sizes in descending order. This query helps in
determining whether existing indexes are still required. More information on
the meaning of the various columns can be found at
<https://www.postgresql.org/docs/current/monitoring-stats.html>.
Because the output of this query relies on the actual usage of your database it
may be affected by factors such as (but not limited to):
Because the query output relies on the actual usage of your database, it
may be affected by factors such as:
- Certain queries never being executed, thus not being able to use certain
indexes.
- Certain tables having little data, resulting in PostgreSQL using sequence
scans instead of index scans.
In other words, this data is only reliable for a frequently used database with
plenty of data and with as many GitLab features enabled (and being used) as
possible.
This data is only reliable for a frequently used database with
plenty of data, and using as many GitLab features as possible.
## Requirements for naming indexes
Indexes with complex definitions need to be explicitly named rather than
Indexes with complex definitions must be explicitly named rather than
relying on the implicit naming behavior of migration methods. In short,
that means you **must** provide an explicit name argument for an index
created with one or more of the following options:
@ -172,7 +162,7 @@ end
Creation of the second index would fail, because Rails would generate
the same name for both indexes.
This is further complicated by the behavior of the `index_exists?` method.
This naming issue is further complicated by the behavior of the `index_exists?` method.
It considers only the table name, column names, and uniqueness specification
of the index when making a comparison. Consider:
@ -188,7 +178,7 @@ The call to `index_exists?` returns true if **any** index exists on
`:my_table` and `:my_column`, and index creation is bypassed.
The `add_concurrent_index` helper is a requirement for creating indexes
on populated tables. Since it cannot be used inside a transactional
on populated tables. Because it cannot be used inside a transactional
migration, it has a built-in check that detects if the index already
exists. In the event a match is found, index creation is skipped.
Without an explicit name argument, Rails can return a false positive
@ -201,14 +191,15 @@ chance of error is greatly reduced.
There may be times when an index is only needed temporarily.
For example, in a migration, a column of a table might be conditionally
updated. To query which columns need to be updated within the
[query performance guidelines](query_performance.md), an index is needed that would otherwise
not be used.
updated. To query which columns must be updated in the
[query performance guidelines](query_performance.md), an index is needed
that would otherwise not be used.
In these cases, a temporary index should be considered. To specify a
In these cases, consider a temporary index. To specify a
temporary index:
1. Prefix the index name with `tmp_` and follow the [naming conventions](database/constraint_naming_convention.md) and [requirements for naming indexes](#requirements-for-naming-indexes) for the rest of the name.
1. Prefix the index name with `tmp_` and follow the [naming conventions](database/constraint_naming_convention.md)
and [requirements for naming indexes](#requirements-for-naming-indexes) for the rest of the name.
1. Create a follow-up issue to remove the index in the next (or future) milestone.
1. Add a comment in the migration mentioning the removal issue.
@ -237,10 +228,10 @@ on GitLab.com, the deployment process is blocked waiting for index
creation to finish.
To limit impact on GitLab.com, a process exists to create indexes
asynchronously during weekend hours. Due to generally lower levels of
traffic and lack of regular deployments, this process allows the
creation of indexes to proceed with a lower level of risk. The below
sections describe the steps required to use these features:
asynchronously during weekend hours. Due to generally lower traffic and fewer deployments,
index creation can proceed at a lower level of risk.
### Schedule index creation for a low-impact time
1. [Schedule the index to be created](#schedule-the-index-to-be-created).
1. [Verify the MR was deployed and the index exists in production](#verify-the-mr-was-deployed-and-the-index-exists-in-production).
@ -291,12 +282,10 @@ migration as expected for other installations. The below block
demonstrates how to create the second migration for the previous
asynchronous example.
WARNING:
The responsibility lies on the individual writing the migrations to verify
the index exists in production before merging a second migration that
adds the index using `add_concurrent_index`. If the second migration is
deployed and the index has not yet been created, the index is created
synchronously when the second migration executes.
**WARNING:**
Verify that the index exists in production before merging a second migration with `add_concurrent_index`.
If the second migration is deployed before the index has been created,
the index is created synchronously when the second migration executes.
```ruby
# in db/post_migrate/

View File

@ -227,3 +227,21 @@ Use as `widget: list`. This inserts a `list` in the YAML file.
| `invalidFeedback` | **{dotted-circle}** No | string | Help text displayed when the pattern validation fails. |
| `default` | **{dotted-circle}** No | list | The default value for the list |
| `id` | **{dotted-circle}** No | string | The input field ID is usually autogenerated but can be overridden by providing this property. |
#### Checklist
Use as `widget: checklist`. This inserts a list of checkboxes that need to
be checked before proceeding to the next step.
| Name | Required | Type | Description |
|---------|------------------------|--------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `title` | **{dotted-circle}** No | string | A title above the checklist items. |
| `items` | **{dotted-circle}** No | list | A list of items that need to be checked. Each item corresponds to one checkbox, and can be a string or [checklist item](#checklist-item). |
##### Checklist Item
| Name | Required | Type | Description |
|--------|------------------------|---------|-----------------------------------------|
| `text` | **{check-circle}** Yes | string | A title above the checklist items. |
| `help` | **{dotted-circle}** No | string | Help text explaining the item. |
| `id` | **{dotted-circle}** No | string | The input field ID is usually autogenerated but can be overridden by providing this property. |

View File

@ -64,18 +64,14 @@ emails = Email.where(user_id: 1) # returns emails for the deleted user
Add a `NOT VALID` foreign key constraint to the table, which enforces consistency on the record changes.
[Using the `with_lock_retries` helper method is advised when performing operations on high-traffic tables](../migration_style_guide.md#when-to-use-the-helper-method),
in this case, if the table or the foreign table is a high-traffic table, we should use the helper method.
In the example above, you'd be still able to update records in the `emails` table. However, when you'd try to update the `user_id` with non-existent value, the constraint causes a database error.
Migration file for adding `NOT VALID` foreign key:
```ruby
class AddNotValidForeignKeyToEmailsUser < Gitlab::Database::Migration[1.0]
class AddNotValidForeignKeyToEmailsUser < Gitlab::Database::Migration[2.0]
def up
# safe to use: it requires short lock on the table since we don't validate the foreign key
add_foreign_key :emails, :users, on_delete: :cascade, validate: false
add_concurrent_foreign_key :emails, :users, on_delete: :cascade, validate: false
end
def down
@ -84,8 +80,14 @@ class AddNotValidForeignKeyToEmailsUser < Gitlab::Database::Migration[1.0]
end
```
Adding a foreign key without validating it is a fast operation. It only requires a
short lock on the table before being able to enforce the constraint on new data.
We do still want to enable lock retries for high traffic and large tables.
`add_concurrent_foreign_key` does this for us, and also checks if the foreign key already exists.
WARNING:
Avoid using the `add_foreign_key` constraint more than once per migration file, unless the source and target tables are identical.
Avoid using `add_foreign_key` or `add_concurrent_foreign_key` constraints more than
once per migration file, unless the source and target tables are identical.
#### Data migration to fix existing records
@ -98,7 +100,7 @@ In case the data volume is higher (>1000 records), it's better to create a backg
Example for cleaning up records in the `emails` table in a database migration:
```ruby
class RemoveRecordsWithoutUserFromEmailsTable < Gitlab::Database::Migration[1.0]
class RemoveRecordsWithoutUserFromEmailsTable < Gitlab::Database::Migration[2.0]
disable_ddl_transaction!
class Email < ActiveRecord::Base
@ -121,6 +123,7 @@ end
### Validate the foreign key
Validating the foreign key scans the whole table and makes sure that each relation is correct.
Fortunately, this does not lock the source table (`users`) while running.
NOTE:
When using [background migrations](background_migrations.md), foreign key validation should happen in the next GitLab release.
@ -130,7 +133,7 @@ Migration file for validating the foreign key:
```ruby
# frozen_string_literal: true
class ValidateForeignKeyOnEmailUsers < Gitlab::Database::Migration[1.0]
class ValidateForeignKeyOnEmailUsers < Gitlab::Database::Migration[2.0]
def up
validate_foreign_key :emails, :user_id
end

View File

@ -28,9 +28,80 @@ Guide](migration_style_guide.md) for more information.
Keep in mind that you can only safely add foreign keys to existing tables after
you have removed any orphaned rows. The method `add_concurrent_foreign_key`
does not take care of this so you need to do so manually. See
does not take care of this so you must do so manually. See
[adding foreign key constraint to an existing column](database/add_foreign_key_to_existing_column.md).
## Updating Foreign Keys In Migrations
Sometimes a foreign key constraint must be changed, preserving the column
but updating the constraint condition. For example, moving from
`ON DELETE CASCADE` to `ON DELETE SET NULL` or vice-versa.
PostgreSQL does not prevent you from adding overlapping foreign keys. It
honors the most recently added constraint. This allows us to replace foreign keys without
ever losing foreign key protection on a column.
To replace a foreign key:
1. [Add the new foreign key without validation](database/add_foreign_key_to_existing_column.md#prevent-invalid-records)
The name of the foreign key constraint must be changed to add a new
foreign key before removing the old one.
```ruby
class ReplaceFkOnPackagesPackagesProjectId < Gitlab::Database::Migration[2.0]
disable_ddl_transaction!
NEW_CONSTRAINT_NAME = 'fk_new'
def up
add_concurrent_foreign_key(:packages_packages, :projects, column: :project_id, on_delete: :nullify, validate: false, name: NEW_CONSTRAINT_NAME)
end
def down
with_lock_retries do
remove_foreign_key_if_exists(:packages_packages, column: :project_id, on_delete: :nullify, name: NEW_CONSTRAINT_NAME)
end
end
end
```
1. [Validate the new foreign key](database/add_foreign_key_to_existing_column.md#validate-the-foreign-key)
```ruby
class ValidateFkNew < Gitlab::Database::Migration[2.0]
NEW_CONSTRAINT_NAME = 'fk_new'
# foreign key added in <link to MR or path to migration adding new FK>
def up
validate_foreign_key(:packages_packages, name: NEW_CONSTRAINT_NAME)
end
def down
# no-op
end
end
```
1. Remove the old foreign key:
```ruby
class RemoveFkOld < Gitlab::Database::Migration[2.0]
OLD_CONSTRAINT_NAME = 'fk_old'
# new foreign key added in <link to MR or path to migration adding new FK>
# and validated in <link to MR or path to migration validating new FK>
def up
remove_foreign_key_if_exists(:packages_packages, column: :project_id, on_delete: :cascade, name: OLD_CONSTRAINT_NAME)
end
def down
# Validation is skipped here, so if rolled back, this will need to be revalidated in a separate migration
add_concurrent_foreign_key(:packages_packages, :projects, column: :project_id, on_delete: :cascade, validate: false, name: OLD_CONSTRAINT_NAME)
end
end
```
## Cascading Deletes
Every foreign key must define an `ON DELETE` clause, and in 99% of the cases

View File

@ -69,11 +69,11 @@ serve as input to automated conformance tests. It is
> This document attempts to specify Markdown syntax unambiguously. It contains many
> examples with side-by-side Markdown and HTML. These examples are intended to double as conformance tests.
The HTML-rendered versions of the specifications:
Here are the HTML-rendered versions of the specifications:
- [GitLab Flavored Markdown (GLFM) specification](https://gitlab.com/gitlab-org/gitlab/-/blob/master/glfm_specification/output/spec.html), which extends the:
- [GitHub Flavored Markdown (GFM) specification](https://github.github.com/gfm/), which extends the:
- [CommonMark specification](https://spec.commonmark.org/0.30/)
- [GitHub Flavored Markdown (GFM) specification](https://github.github.com/gfm/) (rendered from the [source `spec.txt` for GFM specification](https://github.com/github/cmark-gfm/blob/master/test/spec.txt)), which extends the:
- [CommonMark specification](https://spec.commonmark.org/0.30/) (rendered from the [source `spec.txt` for CommonMark specification](https://github.com/commonmark/commonmark-spec/blob/master/spec.txt))
NOTE:
The creation of the

View File

@ -133,6 +133,41 @@ During import, the tarball is cached in your configured `shared_path` directory.
disk has enough free space to accommodate both the cached tarball and the unpacked
project files on disk.
##### Import is successful, but with a `Total number of not imported relations: XX` message, and issues are not created during the import
If you receive a `Total number of not imported relations: XX` message, and issues
aren't created during the import, check [exceptions_json.log](../administration/logs.md#exceptions_jsonlog).
You might see an error like `N is out of range for ActiveModel::Type::Integer with limit 4 bytes`,
where `N` is the integer exceeding the 4-byte integer limit. If that's the case, you
are likely hitting the issue with rebalancing of `relative_position` field of the issues.
The feature flag to enable the rebalance automatically was enabled on GitLab.com.
We intend to enable it by default on self-managed instances when the issue
[Rebalance issues FF rollout](https://gitlab.com/gitlab-org/gitlab/-/issues/343368)
is implemented.
If the feature is not enabled by default on your GitLab version, run the following
commands in the [Rails console](../administration/operations/rails_console.md) as
a workaround. Replace the ID with the ID of your project you were trying to import:
```ruby
# Check if the feature is enabled on your instance. If it is, rebalance should work automatically on your instance
Feature.enabled?(:rebalance_issues,Project.find(ID).root_namespace)
# Check the current maximum value of relative_position
Issue.where(project_id: Project.find(ID).root_namespace.all_projects).maximum(:relative_position)
# Enable `rebalance_issues` feauture and check that it was successfully enabled
Feature.enable(:rebalance_issues,Project.find(ID).root_namespace)
Feature.enabled?(:rebalance_issues,Project.find(ID).root_namespace)
# Run the rebalancing process and check if the maximum value of relative_position has changed
Issues::RelativePositionRebalancingService.new(Project.find(ID).root_namespace.all_projects).execute
Issue.where(project_id: Project.find(ID).root_namespace.all_projects).maximum(:relative_position)
```
Repeat the import attempt after that and check if the issues are imported successfully.
### Importing via the Rails console
The last option is to import a project using a Rails console:

View File

@ -470,6 +470,7 @@ and [Helm Chart deployments](https://docs.gitlab.com/charts/). They come with ap
- If you run external PostgreSQL, particularly AWS RDS,
[check you have a PostgreSQL bug fix](#postgresql-segmentation-fault-issue)
to avoid the database crashing.
- The use of encrypted S3 buckets with storage-specific configuration is no longer supported after [removing support for using `background_upload`](removals.md#background-upload-for-object-storage).
### 14.10.0
@ -747,7 +748,7 @@ for how to proceed.
- [`geo_job_artifact_deleted_events`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/66763)
- [`push_event_payloads`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67299)
- `ci_job_artifacts`:
- [Finalize job_id conversion to `bigint` for `ci_job_artifacts`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67774)
- [Finalize `job_id` conversion to `bigint` for `ci_job_artifacts`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67774)
- [Finalize `ci_job_artifacts` conversion to `bigint`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/65601)
If the migrations are executed as part of a no-downtime deployment, there's a risk of failure due to lock conflicts with the application logic, resulting in lock timeout or deadlocks. In each case, these migrations are safe to re-run until successful:

View File

@ -15,7 +15,7 @@ Converting from the same version of CE to EE is not explicitly necessary, and an
you are upgrading the same version (for example, CE 12.1 to EE 12.1), which is **recommended**.
WARNING:
When updating to EE from CE, avoid reverting back to CE if you plan on going to EE again in the
When updating to EE from CE, avoid reverting back to CE if you plan to go to EE again in the
future. Reverting back to CE can cause
[database issues](index.md#500-error-when-accessing-project--settings--repository)
that may require Support intervention.
@ -31,7 +31,7 @@ The steps can be summed up to:
```
The output should be similar to: `Installed: 13.0.4-ce.0`. In that case,
the equivalent Enterprise Edition version will be: `13.0.4-ee.0`. Write this
the equivalent Enterprise Edition version is: `13.0.4-ee.0`. Write this
value down.
**For CentOS/RHEL**
@ -41,7 +41,7 @@ The steps can be summed up to:
```
The output should be similar to: `gitlab-ce-13.0.4-ce.0.el8.x86_64`. In that
case, the equivalent Enterprise Edition version will be:
case, the equivalent Enterprise Edition version is:
`gitlab-ee-13.0.4-ee.0.el8.x86_64`. Write this value down.
1. Add the `gitlab-ee` [Apt or Yum repository](https://packages.gitlab.com/gitlab/gitlab-ee/install):
@ -58,13 +58,13 @@ The steps can be summed up to:
curl --silent "https://packages.gitlab.com/install/repositories/gitlab/gitlab-ee/script.rpm.sh" | sudo bash
```
The above command will find your OS version and automatically set up the
The above command finds your OS version and automatically set up the
repository. If you are not comfortable installing the repository through a
piped script, you can first
[check its contents](https://packages.gitlab.com/gitlab/gitlab-ee/install).
1. Next, install the `gitlab-ee` package. Note that this will automatically
uninstall the `gitlab-ce` package on your GitLab server. `reconfigure`
1. Next, install the `gitlab-ee` package. Note that this automatically
uninstalls the `gitlab-ce` package on your GitLab server. `reconfigure`
Omnibus right after the `gitlab-ee` package is installed. **Make sure that you
install the exact same GitLab version**:

View File

@ -74,6 +74,10 @@ This impacts a small subset of object storage providers, including but not limit
If your object storage provider does not support `background_upload`, please [migrate objects to a supported object storage provider](https://docs.gitlab.com/ee/administration/object_storage.html#migrate-objects-to-a-different-object-storage-provider).
Additionally, this also breaks the use of [encrypted S3 buckets](https://docs.gitlab.com/ee/administration/object_storage.html#encrypted-s3-buckets) with [storage-specific configuration form](https://docs.gitlab.com/ee/administration/object_storage.html#storage-specific-configuration).
If your S3 buckets have [SSE-S3 or SSE-KMS encryption enabled](https://docs.aws.amazon.com/kms/latest/developerguide/services-s3.html), please [migrate your configuration to use consolidated object storage form](https://docs.gitlab.com/ee/administration/object_storage.html#transition-to-consolidated-form) before upgrading to GitLab 15.0. Otherwise, you may start getting `ETag mismatch` errors during objects upload.
### Container Network and Host Security
WARNING:

View File

@ -41,7 +41,7 @@ cd /home/git/gitlab
sudo -u git -H bundle exec rake gitlab:backup:create RAILS_ENV=production
```
For installations using MySQL, this may require granting "LOCK TABLES"
For installations using MySQL, this may require granting `LOCK TABLES`
privileges to the GitLab user on the database version.
### 1. Stop server

View File

@ -218,7 +218,7 @@ sudo yum install gitlab-ee
## Upgrade Redis HA (using Sentinel) **(PREMIUM SELF)**
Follow [the zero downtime instructions](zero_downtime.md#use-redis-ha-using-sentinel)
Follow [the zero downtime instructions](zero_downtime.md#redis-ha-using-sentinel)
for upgrading your Redis HA cluster.
## Upgrade the Rails nodes (Puma / Sidekiq)

View File

@ -27,8 +27,8 @@ If you meet all the requirements above, follow these instructions in order. Ther
| Deployment type | Description |
| --------------------------------------------------------------- | ------------------------------------------------ |
| [Gitaly or Gitaly Cluster](#gitaly-or-gitaly-cluster) | GitLab CE/EE using HA architecture for Gitaly or Gitaly Cluster |
| [Multi-node / PostgreSQL HA](#use-postgresql-ha) | GitLab CE/EE using HA architecture for PostgreSQL |
| [Multi-node / Redis HA](#use-redis-ha-using-sentinel) | GitLab CE/EE using HA architecture for Redis |
| [Multi-node / PostgreSQL HA](#postgresql) | GitLab CE/EE using HA architecture for PostgreSQL |
| [Multi-node / Redis HA](#redis-ha-using-sentinel) | GitLab CE/EE using HA architecture for Redis |
| [Geo](#geo-deployment) | GitLab EE with Geo enabled |
| [Multi-node / HA with Geo](#multi-node--ha-deployment-with-geo) | GitLab CE/EE on multiple nodes |
@ -260,7 +260,7 @@ node first and run database migrations.
sudo gitlab-ctl reconfigure
```
### Use PostgreSQL HA
### PostgreSQL
Pick a node to be the `Deploy Node`. It can be any application node, but it must be the same
node throughout the process.
@ -277,7 +277,7 @@ node throughout the process.
- To prevent `reconfigure` from automatically running database migrations, ensure that `gitlab_rails['auto_migrate'] = false` is set in `/etc/gitlab/gitlab.rb`.
**Gitaly only nodes**
**Postgres only nodes**
- Update the GitLab package
@ -385,7 +385,7 @@ sure you remove `/etc/gitlab/skip-auto-reconfigure` and revert
setting `gitlab_rails['auto_migrate'] = false` in
`/etc/gitlab/gitlab.rb` after you've completed these steps.
### Use Redis HA (using Sentinel) **(PREMIUM SELF)**
### Redis HA (using Sentinel) **(PREMIUM SELF)**
Package upgrades may involve version updates to the bundled Redis service. On
instances using [Redis for scaling](../administration/redis/index.md),

View File

@ -407,6 +407,11 @@ The API fuzzing behavior can be changed through CI/CD variables.
From GitLab 13.12 and later, the default API fuzzing configuration file is `.gitlab/gitlab-api-fuzzing-config.yml`. In GitLab 14.0 and later, API fuzzing configuration files must be in your repository's
`.gitlab` directory instead of your repository's root.
WARNING:
All customization of GitLab security scanning tools should be tested in a merge request before
merging these changes to the default branch. Failure to do so can give unexpected results,
including a large number of false positives.
### Authentication
Authentication is handled by providing the authentication token as a header or cookie. You can

View File

@ -231,7 +231,12 @@ between GitLab Dependency Scanning and Container Scanning for more details on wh
#### Available CI/CD variables
You can [configure](#customizing-the-container-scanning-settings) analyzers by using the following CI/CD variables:
You can [configure](#customizing-the-container-scanning-settings) analyzers by using the following CI/CD variables.
WARNING:
All customization of GitLab security scanning tools should be tested in a merge request before
merging these changes to the default branch. Failure to do so can give unexpected results,
including a large number of false positives.
| CI/CD Variable | Default | Description | Scanner |
| ------------------------------ | ------------- | ----------- | ------------ |

View File

@ -113,6 +113,11 @@ job. If you include these keys in your own job, you must copy their original con
Use the following variables to configure coverage-guided fuzz testing in your CI/CD pipeline.
WARNING:
All customization of GitLab security scanning tools should be tested in a merge request before
merging these changes to the default branch. Failure to do so can give unexpected results, including
a large number of false positives.
| CI/CD variable | Description |
|---------------------------|---------------------------------------------------------------------------------|
| `COVFUZZ_ADDITIONAL_ARGS` | Arguments passed to `gitlab-cov-fuzz`. Used to customize the behavior of the underlying fuzzing engine. Read the fuzzing engine's documentation for a complete list of arguments. |

View File

@ -622,6 +622,11 @@ To enable Mutual TLS:
These CI/CD variables are specific to DAST. They can be used to customize the behavior of DAST to your requirements.
WARNING:
All customization of GitLab security scanning tools should be tested in a merge request before
merging these changes to the default branch. Failure to do so can give unexpected results,
including a large number of false positives.
| CI/CD variable | Type | Description |
|:-------------------------------------------------|:--------------|:------------------------------|
| `DAST_ADVERTISE_SCAN` | boolean | Set to `true` to add a `Via` header to every request sent, advertising that the request was sent as part of a GitLab DAST scan. [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/334947) in GitLab 14.1. |

View File

@ -587,6 +587,11 @@ gemnasium-dependency_scanning:
Dependency scanning can be [configured](#customizing-the-dependency-scanning-settings)
using environment variables.
WARNING:
All customization of GitLab security scanning tools should be tested in a merge request before
merging these changes to the default branch. Failure to do so can give unexpected results,
including a large number of false positives.
#### Configuring dependency scanning
The following variables allow configuration of global dependency scanning settings.

View File

@ -114,6 +114,11 @@ While you cannot directly customize Auto DevOps, you can [include the Auto DevOp
To enable all GitLab security scanning tools, with the option of customizing settings, add the
GitLab CI/CD templates to your `.gitlab-ci.yml` file.
WARNING:
All customization of GitLab security scanning tools should be tested in a merge request before
merging these changes to the default branch. Failure to do so can give unexpected results,
including a large number of false positives.
To enable Static Application Security Testing, Dependency Scanning, License Scanning, and Secret
Detection, add:

View File

@ -837,6 +837,11 @@ spotbugs-sast:
SAST can be configured using the [`variables`](../../../ci/yaml/index.md#variables) parameter in
`.gitlab-ci.yml`.
WARNING:
All customization of GitLab security scanning tools should be tested in a merge request before
merging these changes to the default branch. Failure to do so can give unexpected results,
including a large number of false positives.
The following example includes the SAST template to override the `SAST_GOSEC_LEVEL`
variable to `2`. The template is [evaluated before](../../../ci/yaml/index.md#include) the pipeline
configuration, so the last mention of the variable takes precedence.

View File

@ -157,6 +157,11 @@ The Secret Detection scan settings can be changed through [CI/CD variables](#ava
by using the
[`variables`](../../../ci/yaml/index.md#variables) parameter in `.gitlab-ci.yml`.
WARNING:
All customization of GitLab security scanning tools should be tested in a merge request before
merging these changes to the default branch. Failure to do so can give unexpected results,
including a large number of false positives.
To override a job definition, (for example, change properties like `variables` or `dependencies`),
declare a job with the same name as the secret detection job to override. Place this new job after the template
inclusion and specify any additional keys under it.

View File

@ -186,3 +186,24 @@ Alternatively, you can mount the certificate file at a different location and sp
This error occurs when the project where you keep your manifests is not public. To fix it, make sure your project is public or your manifest files
are stored in the repository where the agent is configured.
## Failed to perform vulnerability scan on workload: Service account not found
```json
{
"level": "error",
"time": "2022-06-17T15:15:02.665Z",
"msg": "Failed to perform vulnerability scan on workload",
"mod_name": "starboard_vulnerability",
"error": "getting service account by name: gitlab-agent/gitlab-agent: serviceaccounts \"gitlab-agent\" not found"
}
```
The GitLab agent for Kubernetes has been able to run [vulnerability scans](vulnerabilities.md) since GitLab 15.0. However, the agent
cannot detect the service account name. Refer to [issue 361972](https://gitlab.com/gitlab-org/gitlab/-/issues/361972) for more
information. As a workaround you can pass the `--set serviceAccount.name=gitlab-agent` parameter
to the Helm command when [installing the agent](install/#install-the-agent-in-the-cluster), or manually create a service account.
```shell
kubectl create serviceaccount gitlab-agent -n gitlab-agent
```

View File

@ -63,7 +63,7 @@ level according to the available options:
- `debug`
The log level defaults to `info`. You can change it by using a top-level `observability`
section in the configuration file, for example:
section in the [agent configuration file](install/index.md#configure-your-agent), for example setting the level to `debug`:
```yaml
observability:
@ -71,6 +71,14 @@ observability:
level: debug
```
Commit the configuration changes and inspect the agent service logs:
```shell
kubectl logs -f -l=app=gitlab-agent -n gitlab-agent
```
For more information about debugging, see [troubleshooting documentation](troubleshooting.md).
## Reset the agent token
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/327152) in GitLab 14.9.

View File

@ -10,5 +10,5 @@
skip_running_conformance_static_tests: false # NOT YET SUPPORTED
skip_running_conformance_wysiwyg_tests: false # NOT YET SUPPORTED
skip_running_snapshot_static_html_tests: false # NOT YET SUPPORTED
skip_running_snapshot_wysiwyg_html_tests: false # NOT YET SUPPORTED
skip_running_snapshot_prosemirror_json_tests: false # NOT YET SUPPORTED
skip_running_snapshot_wysiwyg_html_tests: false
skip_running_snapshot_prosemirror_json_tests: false

View File

@ -242,6 +242,7 @@ module API
mount ::API::MergeRequestApprovals
mount ::API::MergeRequestDiffs
mount ::API::MergeRequests
mount ::API::Metadata
mount ::API::Metrics::Dashboard::Annotations
mount ::API::Metrics::UserStarredDashboards
mount ::API::Namespaces

39
lib/api/metadata.rb Normal file
View File

@ -0,0 +1,39 @@
# frozen_string_literal: true
module API
class Metadata < ::API::Base
helpers ::API::Helpers::GraphqlHelpers
include APIGuard
allow_access_with_scope :read_user, if: -> (request) { request.get? || request.head? }
before { authenticate! }
feature_category :not_owned # rubocop:todo Gitlab/AvoidFeatureCategoryNotOwned
METADATA_QUERY = <<~EOF
{
metadata {
version
revision
kas {
enabled
externalUrl
version
}
}
}
EOF
desc 'Get the metadata information of the GitLab instance.' do
detail 'This feature was introduced in GitLab 15.1.'
end
get '/metadata' do
run_graphql!(
query: METADATA_QUERY,
context: { current_user: current_user },
transform: ->(result) { result.dig('data', 'metadata') }
)
end
end
end

View File

@ -45,7 +45,8 @@ module Gitlab
search_rate_limit_unauthenticated: { threshold: -> { application_settings.search_rate_limit_unauthenticated }, interval: 1.minute },
gitlab_shell_operation: { threshold: 600, interval: 1.minute },
pipelines_create: { threshold: -> { application_settings.pipeline_limit_per_project_user_sha }, interval: 1.minute },
temporary_email_failure: { threshold: 50, interval: 1.day }
temporary_email_failure: { threshold: 50, interval: 1.day },
project_testing_integration: { threshold: 5, interval: 1.minute }
}.freeze
end

View File

@ -1337,6 +1337,11 @@ msgstr ""
msgid "0 bytes"
msgstr ""
msgid "1 Code quality finding"
msgid_plural "%d Code quality findings"
msgstr[0] ""
msgstr[1] ""
msgid "1 Day"
msgid_plural "%d Days"
msgstr[0] ""

View File

@ -28,6 +28,7 @@ Disallow: /-/ide/
Disallow: /-/experiment
# Restrict allowed routes to avoid very ugly search results
Allow: /users/sign_in
Allow: /users/sign_up
Allow: /users/*/snippets
# Generic resource routes like new, edit, raw

View File

@ -1,117 +1,7 @@
import fs from 'fs';
import { DOMSerializer } from 'prosemirror-model';
import jsYaml from 'js-yaml';
// TODO: DRY up duplication with spec/frontend/content_editor/services/markdown_serializer_spec.js
// See https://gitlab.com/groups/gitlab-org/-/epics/7719#plan
import Blockquote from '~/content_editor/extensions/blockquote';
import Bold from '~/content_editor/extensions/bold';
import BulletList from '~/content_editor/extensions/bullet_list';
import Code from '~/content_editor/extensions/code';
import CodeBlockHighlight from '~/content_editor/extensions/code_block_highlight';
import DescriptionItem from '~/content_editor/extensions/description_item';
import DescriptionList from '~/content_editor/extensions/description_list';
import Details from '~/content_editor/extensions/details';
import DetailsContent from '~/content_editor/extensions/details_content';
import Division from '~/content_editor/extensions/division';
import Emoji from '~/content_editor/extensions/emoji';
import Figure from '~/content_editor/extensions/figure';
import FigureCaption from '~/content_editor/extensions/figure_caption';
import FootnoteDefinition from '~/content_editor/extensions/footnote_definition';
import FootnoteReference from '~/content_editor/extensions/footnote_reference';
import FootnotesSection from '~/content_editor/extensions/footnotes_section';
import HardBreak from '~/content_editor/extensions/hard_break';
import Heading from '~/content_editor/extensions/heading';
import HorizontalRule from '~/content_editor/extensions/horizontal_rule';
import Image from '~/content_editor/extensions/image';
import InlineDiff from '~/content_editor/extensions/inline_diff';
import Italic from '~/content_editor/extensions/italic';
import Link from '~/content_editor/extensions/link';
import ListItem from '~/content_editor/extensions/list_item';
import OrderedList from '~/content_editor/extensions/ordered_list';
import Strike from '~/content_editor/extensions/strike';
import Table from '~/content_editor/extensions/table';
import TableCell from '~/content_editor/extensions/table_cell';
import TableHeader from '~/content_editor/extensions/table_header';
import TableRow from '~/content_editor/extensions/table_row';
import TaskItem from '~/content_editor/extensions/task_item';
import TaskList from '~/content_editor/extensions/task_list';
import createMarkdownDeserializer from '~/content_editor/services/remark_markdown_deserializer';
import { createTestEditor } from 'jest/content_editor/test_utils';
import { setTestTimeout } from 'jest/__helpers__/timeout';
const tiptapEditor = createTestEditor({
extensions: [
Blockquote,
Bold,
BulletList,
Code,
CodeBlockHighlight,
DescriptionItem,
DescriptionList,
Details,
DetailsContent,
Division,
Emoji,
FootnoteDefinition,
FootnoteReference,
FootnotesSection,
Figure,
FigureCaption,
HardBreak,
Heading,
HorizontalRule,
Image,
InlineDiff,
Italic,
Link,
ListItem,
OrderedList,
Strike,
Table,
TableCell,
TableHeader,
TableRow,
TaskItem,
TaskList,
],
});
async function renderMarkdownToHTMLAndJSON(markdown, schema, deserializer) {
let prosemirrorDocument;
try {
const { document } = await deserializer.deserialize({ schema, content: markdown });
prosemirrorDocument = document;
} catch (e) {
const errorMsg = `Error - check implementation:\n${e.message}`;
return {
html: errorMsg,
json: errorMsg,
};
}
const documentFragment = DOMSerializer.fromSchema(schema).serializeFragment(
prosemirrorDocument.content,
);
const htmlString = documentFragment.firstChild.outerHTML;
const json = prosemirrorDocument.toJSON();
const jsonString = JSON.stringify(json, null, 2);
return { html: htmlString, json: jsonString };
}
function renderHtmlAndJsonForAllExamples(markdownExamples) {
const { schema } = tiptapEditor;
const deserializer = createMarkdownDeserializer();
const exampleNames = Object.keys(markdownExamples);
return exampleNames.reduce(async (promisedExamples, exampleName) => {
const markdown = markdownExamples[exampleName];
const htmlAndJson = await renderMarkdownToHTMLAndJSON(markdown, schema, deserializer);
const examples = await promisedExamples;
examples[exampleName] = htmlAndJson;
return examples;
}, Promise.resolve({}));
}
import { renderHtmlAndJsonForAllExamples } from 'jest/content_editor/render_html_and_json_for_all_examples';
/* eslint-disable no-undef */
jest.mock('~/emoji');

View File

@ -138,7 +138,7 @@ RSpec.describe Projects::Settings::IntegrationsController do
end
end
context 'when unsuccessful' do
context 'when unsuccessful', :clean_gitlab_redis_rate_limiting do
it 'returns an error response when the integration test fails' do
stub_request(:get, 'http://example.com/rest/api/2/serverInfo')
.to_return(status: 404)
@ -184,6 +184,26 @@ RSpec.describe Projects::Settings::IntegrationsController do
end
end
end
context 'when the endpoint receives requests above the limit', :freeze_time, :clean_gitlab_redis_rate_limiting do
before do
allow(Gitlab::ApplicationRateLimiter).to receive(:rate_limits)
.and_return(project_testing_integration: { threshold: 1, interval: 1.minute })
end
it 'prevents making test requests' do
stub_jira_integration_test
expect_next_instance_of(::Integrations::Test::ProjectService) do |service|
expect(service).to receive(:execute).and_return(http_status: 200)
end
2.times { post :test, params: project_params(service: integration_params) }
expect(response.body).to eq(_('This endpoint has been requested too many times. Try again later.'))
expect(response).to have_gitlab_http_status(:too_many_requests)
end
end
end
describe 'PUT #update' do

View File

@ -0,0 +1,26 @@
{
"type": "object",
"required": [
"version",
"revision",
"kas"
],
"properties": {
"version": { "type": "string" },
"revision": { "type": "string" },
"kas": {
"type": "object",
"required": [
"enabled",
"externalUrl",
"version"
],
"properties": {
"enabled": { "type": "boolean" },
"externalUrl": { "type": ["string", "null"] },
"version": { "type": ["string", "null"] }
}
}
},
"additionalProperties": false
}

View File

@ -1492,7 +1492,7 @@
</td></tr></table>
wysiwyg: |-
Error - check implementation:
Hast node of type "table" not supported by this converter. Please, provide an specification.
Cannot read properties of undefined (reading 'className')
04_06__leaf_blocks__html_blocks__002:
canonical: |
<table>
@ -1513,8 +1513,9 @@
</table>
<p data-sourcepos="9:1-9:5" dir="auto">okay.</p>
wysiwyg: |-
Error - check implementation:
Hast node of type "table" not supported by this converter. Please, provide an specification.
<table><tbody><tr><td colspan="1" rowspan="1"><p>
hi
</p></td></tr></tbody></table>
04_06__leaf_blocks__html_blocks__003:
canonical: |2
<div>
@ -1627,8 +1628,9 @@
foo
</td></tr></table>
wysiwyg: |-
Error - check implementation:
Hast node of type "table" not supported by this converter. Please, provide an specification.
<table><tbody><tr><td colspan="1" rowspan="1"><p>
foo
</p></td></tr></tbody></table>
04_06__leaf_blocks__html_blocks__014:
canonical: |
<div></div>
@ -1850,7 +1852,7 @@
<p data-sourcepos="2:1-2:5" dir="auto"><em>baz</em></p>
wysiwyg: |-
Error - check implementation:
Cannot destructure property 'className' of 'hastNode.properties' as it is undefined.
Cannot read properties of undefined (reading 'wrapper')
04_06__leaf_blocks__html_blocks__030:
canonical: |
<script>
@ -1889,7 +1891,7 @@
<p data-sourcepos="6:1-6:4" dir="auto">okay</p>
wysiwyg: |-
Error - check implementation:
Cannot destructure property 'className' of 'hastNode.properties' as it is undefined.
Cannot read properties of undefined (reading 'wrapper')
04_06__leaf_blocks__html_blocks__033:
canonical: |
<!DOCTYPE html>
@ -1911,7 +1913,7 @@
}
]]>
<p>okay</p>
static: |2-
static: |-
&lt;![CDATA[
function matchwo(a,b)
{
@ -2038,8 +2040,9 @@
</tr>
</table>
wysiwyg: |-
Error - check implementation:
Hast node of type "table" not supported by this converter. Please, provide an specification.
<table><tbody><tr><td colspan="1" rowspan="1"><p>
Hi
</p></td></tr></tbody></table>
04_06__leaf_blocks__html_blocks__043:
canonical: |
<table>
@ -2062,8 +2065,9 @@
</tr>
</table>
wysiwyg: |-
Error - check implementation:
Hast node of type "table" not supported by this converter. Please, provide an specification.
<pre class="content-editor-code-block undefined code highlight"><code>&lt;td&gt;
Hi
&lt;/td&gt;</code></pre>
04_07__leaf_blocks__link_reference_definitions__001:
canonical: |
<p><a href="/url" title="title">foo</a></p>
@ -2448,8 +2452,7 @@
</tbody>
</table>
wysiwyg: |-
Error - check implementation:
Hast node of type "table" not supported by this converter. Please, provide an specification.
<table><tbody><tr><th colspan="1" rowspan="1"><p>foo</p></th><th colspan="1" rowspan="1"><p>bar</p></th></tr><tr><td colspan="1" rowspan="1"><p>baz</p></td><td colspan="1" rowspan="1"><p>bim</p></td></tr></tbody></table>
04_10__leaf_blocks__tables_extension__002:
canonical: |
<table>
@ -2482,8 +2485,7 @@
</tbody>
</table>
wysiwyg: |-
Error - check implementation:
Hast node of type "table" not supported by this converter. Please, provide an specification.
<table><tbody><tr><th colspan="1" rowspan="1"><p>abc</p></th><th colspan="1" rowspan="1"><p>defghi</p></th></tr><tr><td colspan="1" rowspan="1"><p>bar</p></td><td colspan="1" rowspan="1"><p>baz</p></td></tr></tbody></table>
04_10__leaf_blocks__tables_extension__003:
canonical: |
<table>
@ -2518,8 +2520,7 @@
</tbody>
</table>
wysiwyg: |-
Error - check implementation:
Hast node of type "table" not supported by this converter. Please, provide an specification.
<table><tbody><tr><th colspan="1" rowspan="1"><p>f|oo</p></th></tr><tr><td colspan="1" rowspan="1"><p>b <code>|</code> az</p></td></tr><tr><td colspan="1" rowspan="1"><p>b <strong>|</strong> im</p></td></tr></tbody></table>
04_10__leaf_blocks__tables_extension__004:
canonical: |
<table>
@ -2558,8 +2559,7 @@
<p data-sourcepos="4:3-4:5">bar</p>
</blockquote>
wysiwyg: |-
Error - check implementation:
Hast node of type "table" not supported by this converter. Please, provide an specification.
<table><tbody><tr><th colspan="1" rowspan="1"><p>abc</p></th><th colspan="1" rowspan="1"><p>def</p></th></tr><tr><td colspan="1" rowspan="1"><p>bar</p></td><td colspan="1" rowspan="1"><p>baz</p></td></tr></tbody></table>
04_10__leaf_blocks__tables_extension__005:
canonical: |
<table>
@ -2602,8 +2602,7 @@
</table>
<p data-sourcepos="6:1-6:3" dir="auto">bar</p>
wysiwyg: |-
Error - check implementation:
Hast node of type "table" not supported by this converter. Please, provide an specification.
<table><tbody><tr><th colspan="1" rowspan="1"><p>abc</p></th><th colspan="1" rowspan="1"><p>def</p></th></tr><tr><td colspan="1" rowspan="1"><p>bar</p></td><td colspan="1" rowspan="1"><p>baz</p></td></tr><tr><td colspan="1" rowspan="1"><p>bar</p></td><td colspan="1" rowspan="1"><p></p></td></tr></tbody></table>
04_10__leaf_blocks__tables_extension__006:
canonical: |
<p>| abc | def |
@ -2657,8 +2656,7 @@
</tbody>
</table>
wysiwyg: |-
Error - check implementation:
Hast node of type "table" not supported by this converter. Please, provide an specification.
<table><tbody><tr><th colspan="1" rowspan="1"><p>abc</p></th><th colspan="1" rowspan="1"><p>def</p></th></tr><tr><td colspan="1" rowspan="1"><p>bar</p></td><td colspan="1" rowspan="1"><p></p></td></tr><tr><td colspan="1" rowspan="1"><p>bar</p></td><td colspan="1" rowspan="1"><p>baz</p></td></tr></tbody></table>
04_10__leaf_blocks__tables_extension__008:
canonical: |
<table>
@ -2679,8 +2677,7 @@
</thead>
</table>
wysiwyg: |-
Error - check implementation:
Hast node of type "table" not supported by this converter. Please, provide an specification.
<table><tbody><tr><th colspan="1" rowspan="1"><p>abc</p></th><th colspan="1" rowspan="1"><p>def</p></th></tr></tbody></table>
05_01__container_blocks__block_quotes__001:
canonical: |
<blockquote>
@ -4032,8 +4029,8 @@
baz</li>
</ul>
wysiwyg: |-
<ul bullet="*"><li><p></p><h1>Foo</h1></li><li><p></p><h2>Bar
baz</h2></li></ul>
<ul bullet="*"><li><p></p><h1>Foo</h1></li><li><p></p><h2>Bar</h2><p>
baz</p></li></ul>
05_02__container_blocks__list_items__motivation__task_list_items_extension__lists__049:
canonical: |
<ul>
@ -4080,7 +4077,8 @@
<li data-sourcepos="9:1-9:5">baz</li>
</ul>
wysiwyg: |-
<ul bullet="*"><li><p>baz</p></li></ul>
Error - check implementation:
Cannot read properties of undefined (reading 'start')
05_02__container_blocks__list_items__motivation__task_list_items_extension__lists__050:
canonical: |
<ol>
@ -6970,8 +6968,7 @@
static: |-
<p data-sourcepos="1:1-1:23" dir="auto">&lt;<a href="mailto:foo+@bar.example.com">foo+@bar.example.com</a>&gt;</p>
wysiwyg: |-
Error - check implementation:
Cannot read properties of undefined (reading 'end')
<p>&lt;<a target="_blank" rel="noopener noreferrer nofollow" href="mailto:foo+@bar.example.com">foo+@bar.example.com</a>&gt;</p>
06_09__inlines__autolinks__014:
canonical: |
<p>&lt;&gt;</p>
@ -7466,14 +7463,13 @@
07_01__gitlab_specific_markdown__footnotes__001:
canonical: ""
static: |-
<p data-sourcepos="1:1-1:27" dir="auto">footnote reference tag <sup class="footnote-ref"><a href="#fn-1-2118" id="fnref-1-2118" data-footnote-ref>1</a></sup></p>
<p data-sourcepos="1:1-1:27" dir="auto">footnote reference tag <sup class="footnote-ref"><a href="#fn-1-5616" id="fnref-1-5616" data-footnote-ref>1</a></sup></p>
<section data-footnotes class="footnotes">
<ol>
<li id="fn-1-2118">
<p data-sourcepos="3:7-3:19">footnote text <a href="#fnref-1-2118" data-footnote-backref aria-label="Back to content" class="footnote-backref"><gl-emoji title="leftwards arrow with hook" data-name="leftwards_arrow_with_hook" data-unicode-version="1.1">↩</gl-emoji></a></p>
<li id="fn-1-5616">
<p data-sourcepos="3:7-3:19">footnote text <a href="#fnref-1-5616" data-footnote-backref aria-label="Back to content" class="footnote-backref"><gl-emoji title="leftwards arrow with hook" data-name="leftwards_arrow_with_hook" data-unicode-version="1.1">↩</gl-emoji></a></p>
</li>
</ol>
</section>
wysiwyg: |-
Error - check implementation:
Hast node of type "sup" not supported by this converter. Please, provide an specification.
<p>footnote reference tag <sup identifier="1">1</sup></p>

File diff suppressed because it is too large Load Diff

View File

@ -2,3 +2,4 @@ export * from './to_have_sprite_icon';
export * from './to_have_tracking_attributes';
export * from './to_match_interpolated_text';
export * from './to_validate_json_schema';
export * from './to_match_expected_for_markdown';

View File

@ -0,0 +1,60 @@
export function toMatchExpectedForMarkdown(
received,
deserializationTarget,
name,
markdown,
errMsg,
expected,
) {
const options = {
comment: `Markdown deserialization to ${deserializationTarget}`,
isNot: this.isNot,
promise: this.promise,
};
const EXPECTED_LABEL = 'Expected';
const RECEIVED_LABEL = 'Received';
const isExpand = (expand) => expand !== false;
const forMarkdownName = `for Markdown example '${name}':\n${markdown}`;
const matcherName = `toMatchExpected${
deserializationTarget === 'HTML' ? 'Html' : 'Json'
}ForMarkdown`;
let pass;
// If both expected and received are deserialization errors, force pass = true,
// because the actual error messages can vary across environments and cause
// false failures (e.g. due to jest '--coverage' being passed in CI).
const errMsgRegExp = new RegExp(errMsg);
const errMsgRegExp2 = new RegExp(errMsg);
if (errMsgRegExp.test(expected) && errMsgRegExp2.test(received)) {
pass = true;
} else {
pass = received === expected;
}
const message = pass
? () =>
// eslint-disable-next-line prefer-template
this.utils.matcherHint(matcherName, undefined, undefined, options) +
'\n\n' +
`Expected HTML to NOT match:\n${expected}\n\n${forMarkdownName}`
: () => {
return (
// eslint-disable-next-line prefer-template
this.utils.matcherHint(matcherName, undefined, undefined, options) +
'\n\n' +
this.utils.printDiffOrStringify(
expected,
received,
EXPECTED_LABEL,
RECEIVED_LABEL,
isExpand(this.expand),
) +
`\n\n${forMarkdownName}`
);
};
return { actual: received, expected, message, name: matcherName, pass };
}

View File

@ -0,0 +1,23 @@
import path from 'path';
import { describeMarkdownSnapshots } from 'jest/content_editor/markdown_snapshot_spec_helper';
jest.mock('~/emoji');
const glfmSpecificationDir = path.join(__dirname, '..', '..', '..', 'glfm_specification');
const glfmExampleSnapshotsDir = path.join(
__dirname,
'..',
'..',
'fixtures',
'glfm',
'example_snapshots',
);
// See https://docs.gitlab.com/ee/development/gitlab_flavored_markdown/specification_guide/#markdown-snapshot-testing
// for documentation on this spec.
describeMarkdownSnapshots(
'CE markdown snapshots in ContentEditor',
glfmSpecificationDir,
glfmExampleSnapshotsDir,
);

View File

@ -0,0 +1,105 @@
// See https://docs.gitlab.com/ee/development/gitlab_flavored_markdown/specification_guide/#markdown-snapshot-testing
// for documentation on this spec.
import fs from 'fs';
import path from 'path';
import jsYaml from 'js-yaml';
import { pick } from 'lodash';
import {
IMPLEMENTATION_ERROR_MSG,
renderHtmlAndJsonForAllExamples,
} from './render_html_and_json_for_all_examples';
const filterExamples = (examples) => {
const focusedMarkdownExamples = process.env.FOCUSED_MARKDOWN_EXAMPLES?.split(',') || [];
if (!focusedMarkdownExamples.length) {
return examples;
}
return pick(examples, focusedMarkdownExamples);
};
const loadExamples = (dir, fileName) => {
const yaml = fs.readFileSync(path.join(dir, fileName));
const examples = jsYaml.safeLoad(yaml, {});
return filterExamples(examples);
};
// eslint-disable-next-line jest/no-export
export const describeMarkdownSnapshots = (
description,
glfmSpecificationDir,
glfmExampleSnapshotsDir,
) => {
let actualHtmlAndJsonExamples;
let skipRunningSnapshotWysiwygHtmlTests;
let skipRunningSnapshotProsemirrorJsonTests;
const exampleStatuses = loadExamples(
path.join(glfmSpecificationDir, 'input', 'gitlab_flavored_markdown'),
'glfm_example_status.yml',
);
const markdownExamples = loadExamples(glfmExampleSnapshotsDir, 'markdown.yml');
const expectedHtmlExamples = loadExamples(glfmExampleSnapshotsDir, 'html.yml');
const expectedProseMirrorJsonExamples = loadExamples(
glfmExampleSnapshotsDir,
'prosemirror_json.yml',
);
beforeAll(async () => {
return renderHtmlAndJsonForAllExamples(markdownExamples).then((examples) => {
actualHtmlAndJsonExamples = examples;
});
});
describe(description, () => {
const exampleNames = Object.keys(markdownExamples);
describe.each(exampleNames)('%s', (name) => {
const exampleNamePrefix = 'verifies conversion of GLFM to';
skipRunningSnapshotWysiwygHtmlTests =
exampleStatuses[name]?.skip_running_snapshot_wysiwyg_html_tests;
skipRunningSnapshotProsemirrorJsonTests =
exampleStatuses[name]?.skip_running_snapshot_prosemirror_json_tests;
const markdown = markdownExamples[name];
if (skipRunningSnapshotWysiwygHtmlTests) {
it.todo(`${exampleNamePrefix} HTML: ${skipRunningSnapshotWysiwygHtmlTests}`);
} else {
it(`${exampleNamePrefix} HTML`, async () => {
const expectedHtml = expectedHtmlExamples[name].wysiwyg;
const { html: actualHtml } = actualHtmlAndJsonExamples[name];
// noinspection JSUnresolvedFunction (required to avoid RubyMine type inspection warning, because custom matchers auto-imported via Jest test setup are not automatically resolved - see https://youtrack.jetbrains.com/issue/WEB-42350/matcher-for-jest-is-not-recognized-but-it-is-runable)
expect(actualHtml).toMatchExpectedForMarkdown(
'HTML',
name,
markdown,
IMPLEMENTATION_ERROR_MSG,
expectedHtml,
);
});
}
if (skipRunningSnapshotProsemirrorJsonTests) {
it.todo(
`${exampleNamePrefix} ProseMirror JSON: ${skipRunningSnapshotProsemirrorJsonTests}`,
);
} else {
it(`${exampleNamePrefix} ProseMirror JSON`, async () => {
const expectedJson = expectedProseMirrorJsonExamples[name];
const { json: actualJson } = actualHtmlAndJsonExamples[name];
// noinspection JSUnresolvedFunction
expect(actualJson).toMatchExpectedForMarkdown(
'JSON',
name,
markdown,
IMPLEMENTATION_ERROR_MSG,
expectedJson,
);
});
}
});
});
};

View File

@ -0,0 +1,113 @@
import { DOMSerializer } from 'prosemirror-model';
// TODO: DRY up duplication with spec/frontend/content_editor/services/markdown_serializer_spec.js
// See https://gitlab.com/groups/gitlab-org/-/epics/7719#plan
import Blockquote from '~/content_editor/extensions/blockquote';
import Bold from '~/content_editor/extensions/bold';
import BulletList from '~/content_editor/extensions/bullet_list';
import Code from '~/content_editor/extensions/code';
import CodeBlockHighlight from '~/content_editor/extensions/code_block_highlight';
import DescriptionItem from '~/content_editor/extensions/description_item';
import DescriptionList from '~/content_editor/extensions/description_list';
import Details from '~/content_editor/extensions/details';
import DetailsContent from '~/content_editor/extensions/details_content';
import Division from '~/content_editor/extensions/division';
import Emoji from '~/content_editor/extensions/emoji';
import Figure from '~/content_editor/extensions/figure';
import FigureCaption from '~/content_editor/extensions/figure_caption';
import FootnoteDefinition from '~/content_editor/extensions/footnote_definition';
import FootnoteReference from '~/content_editor/extensions/footnote_reference';
import FootnotesSection from '~/content_editor/extensions/footnotes_section';
import HardBreak from '~/content_editor/extensions/hard_break';
import Heading from '~/content_editor/extensions/heading';
import HorizontalRule from '~/content_editor/extensions/horizontal_rule';
import Image from '~/content_editor/extensions/image';
import InlineDiff from '~/content_editor/extensions/inline_diff';
import Italic from '~/content_editor/extensions/italic';
import Link from '~/content_editor/extensions/link';
import ListItem from '~/content_editor/extensions/list_item';
import OrderedList from '~/content_editor/extensions/ordered_list';
import Strike from '~/content_editor/extensions/strike';
import Table from '~/content_editor/extensions/table';
import TableCell from '~/content_editor/extensions/table_cell';
import TableHeader from '~/content_editor/extensions/table_header';
import TableRow from '~/content_editor/extensions/table_row';
import TaskItem from '~/content_editor/extensions/task_item';
import TaskList from '~/content_editor/extensions/task_list';
import createMarkdownDeserializer from '~/content_editor/services/remark_markdown_deserializer';
import { createTestEditor } from 'jest/content_editor/test_utils';
const tiptapEditor = createTestEditor({
extensions: [
Blockquote,
Bold,
BulletList,
Code,
CodeBlockHighlight,
DescriptionItem,
DescriptionList,
Details,
DetailsContent,
Division,
Emoji,
FootnoteDefinition,
FootnoteReference,
FootnotesSection,
Figure,
FigureCaption,
HardBreak,
Heading,
HorizontalRule,
Image,
InlineDiff,
Italic,
Link,
ListItem,
OrderedList,
Strike,
Table,
TableCell,
TableHeader,
TableRow,
TaskItem,
TaskList,
],
});
export const IMPLEMENTATION_ERROR_MSG = 'Error - check implementation';
async function renderMarkdownToHTMLAndJSON(markdown, schema, deserializer) {
let prosemirrorDocument;
try {
const { document } = await deserializer.deserialize({ schema, content: markdown });
prosemirrorDocument = document;
} catch (e) {
const errorMsg = `${IMPLEMENTATION_ERROR_MSG}:\n${e.message}`;
return {
html: errorMsg,
json: errorMsg,
};
}
const documentFragment = DOMSerializer.fromSchema(schema).serializeFragment(
prosemirrorDocument.content,
);
const htmlString = documentFragment.firstChild.outerHTML;
const json = prosemirrorDocument.toJSON();
const jsonString = JSON.stringify(json, null, 2);
return { html: htmlString, json: jsonString };
}
export function renderHtmlAndJsonForAllExamples(markdownExamples) {
const { schema } = tiptapEditor;
const deserializer = createMarkdownDeserializer();
const exampleNames = Object.keys(markdownExamples);
return exampleNames.reduce(async (promisedExamples, exampleName) => {
const markdown = markdownExamples[exampleName];
const htmlAndJson = await renderMarkdownToHTMLAndJSON(markdown, schema, deserializer);
const examples = await promisedExamples;
examples[exampleName] = htmlAndJson;
return examples;
}, Promise.resolve({}));
}

View File

@ -0,0 +1,66 @@
import { GlIcon } from '@gitlab/ui';
import { mountExtended, shallowMountExtended } from 'helpers/vue_test_utils_helper';
import DiffCodeQuality from '~/diffs/components/diff_code_quality.vue';
import { SEVERITY_CLASSES, SEVERITY_ICONS } from '~/reports/codequality_report/constants';
import { multipleFindingsArr } from '../mock_data/diff_code_quality';
let wrapper;
const findIcon = () => wrapper.findComponent(GlIcon);
describe('DiffCodeQuality', () => {
afterEach(() => {
wrapper.destroy();
});
const createWrapper = (codeQuality, mountFunction = mountExtended) => {
return mountFunction(DiffCodeQuality, {
propsData: {
expandedLines: [],
line: 1,
codeQuality,
},
});
};
it('hides details and throws hideCodeQualityFindings event on close click', async () => {
wrapper = createWrapper(multipleFindingsArr);
expect(wrapper.findByTestId('diff-codequality').exists()).toBe(true);
await wrapper.findByTestId('diff-codequality-close').trigger('click');
expect(wrapper.emitted('hideCodeQualityFindings').length).toBe(1);
expect(wrapper.emitted().hideCodeQualityFindings[0][0]).toBe(wrapper.props('line'));
});
it('renders correct amount of list items for codequality array and their description', async () => {
wrapper = createWrapper(multipleFindingsArr);
const listItems = wrapper.findAll('li');
expect(wrapper.findAll('li').length).toBe(3);
listItems.wrappers.map((e, i) => {
return expect(e.text()).toEqual(multipleFindingsArr[i].description);
});
});
it.each`
severity
${'info'}
${'minor'}
${'major'}
${'critical'}
${'blocker'}
${'unknown'}
`('shows icon for $severity degradation', ({ severity }) => {
wrapper = createWrapper([{ severity }], shallowMountExtended);
expect(findIcon().exists()).toBe(true);
expect(findIcon().attributes()).toMatchObject({
class: `codequality-severity-icon ${SEVERITY_CLASSES[severity]}`,
name: SEVERITY_ICONS[severity],
size: '12',
});
});
});

View File

@ -1,7 +1,9 @@
import { shallowMount } from '@vue/test-utils';
import Vue from 'vue';
import Vue, { nextTick } from 'vue';
import Vuex from 'vuex';
import DiffView from '~/diffs/components/diff_view.vue';
import DiffCodeQuality from '~/diffs/components/diff_code_quality.vue';
import { diffCodeQuality } from '../mock_data/diff_code_quality';
describe('DiffView', () => {
const DiffExpansionCell = { template: `<div/>` };
@ -12,7 +14,7 @@ describe('DiffView', () => {
const setSelectedCommentPosition = jest.fn();
const getDiffRow = (wrapper) => wrapper.findComponent(DiffRow).vm;
const createWrapper = (props) => {
const createWrapper = (props, provide = {}) => {
Vue.use(Vuex);
const batchComments = {
@ -46,9 +48,33 @@ describe('DiffView', () => {
...props,
};
const stubs = { DiffExpansionCell, DiffRow, DiffCommentCell, DraftNote };
return shallowMount(DiffView, { propsData, store, stubs });
return shallowMount(DiffView, { propsData, store, stubs, provide });
};
it('does not render a codeQuality diff view when there is no finding', () => {
const wrapper = createWrapper();
expect(wrapper.findComponent(DiffCodeQuality).exists()).toBe(false);
});
it('does render a codeQuality diff view with the correct props when there is a finding & refactorCodeQualityInlineFindings flag is true ', async () => {
const wrapper = createWrapper(diffCodeQuality, {
glFeatures: { refactorCodeQualityInlineFindings: true },
});
wrapper.findComponent(DiffRow).vm.$emit('toggleCodeQualityFindings', 2);
await nextTick();
expect(wrapper.findComponent(DiffCodeQuality).exists()).toBe(true);
expect(wrapper.findComponent(DiffCodeQuality).props().codeQuality.length).not.toBe(0);
});
it('does not render a codeQuality diff view when there is a finding & refactorCodeQualityInlineFindings flag is false ', async () => {
const wrapper = createWrapper(diffCodeQuality, {
glFeatures: { refactorCodeQualityInlineFindings: false },
});
wrapper.findComponent(DiffRow).vm.$emit('toggleCodeQualityFindings', 2);
await nextTick();
expect(wrapper.findComponent(DiffCodeQuality).exists()).toBe(false);
});
it.each`
type | side | container | sides | total
${'parallel'} | ${'left'} | ${'.old'} | ${{ left: { lineDraft: {}, renderDiscussion: true }, right: { lineDraft: {}, renderDiscussion: true } }} | ${2}

View File

@ -0,0 +1,62 @@
export const multipleFindingsArr = [
{
severity: 'minor',
description: 'Unexpected Debugger Statement.',
line: 2,
},
{
severity: 'major',
description:
'Function `aVeryLongFunction` has 52 lines of code (exceeds 25 allowed). Consider refactoring.',
line: 3,
},
{
severity: 'minor',
description: 'Arrow function has too many statements (52). Maximum allowed is 30.',
line: 3,
},
];
export const multipleFindings = {
filePath: 'index.js',
codequality: multipleFindingsArr,
};
export const singularFinding = {
filePath: 'index.js',
codequality: [multipleFindingsArr[0]],
};
export const diffCodeQuality = {
diffFile: { file_hash: '123' },
diffLines: [
{
left: {
type: 'old',
old_line: 1,
new_line: null,
codequality: [],
lineDraft: {},
},
},
{
left: {
type: null,
old_line: 2,
new_line: 1,
codequality: [],
lineDraft: {},
},
},
{
left: {
type: 'new',
old_line: null,
new_line: 2,
codequality: [multipleFindingsArr[0]],
lineDraft: {},
},
},
],
};

View File

@ -0,0 +1,110 @@
import { GlFormCheckbox, GlFormCheckboxGroup } from '@gitlab/ui';
import { nextTick } from 'vue';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import ChecklistWidget from '~/pipeline_wizard/components/widgets/checklist.vue';
describe('Pipeline Wizard - Checklist Widget', () => {
let wrapper;
const props = {
title: 'Foobar',
items: [
'foo bar baz', // simple, text-only content
{
text: 'abc',
help: 'def',
},
],
};
const getLastUpdateValidEvent = () => {
const eventArray = wrapper.emitted('update:valid');
return eventArray[eventArray.length - 1];
};
const findItem = (atIndex = 0) => wrapper.findAllComponents(GlFormCheckbox).at(atIndex);
const getGlFormCheckboxGroup = () => wrapper.getComponent(GlFormCheckboxGroup);
// The item.ids *can* be passed inside props.items, but are usually
// autogenerated by lodash.uniqueId() inside the component. So to
// get the actual value that the component expects to be emitted in
// GlFormCheckboxGroup's `v-model`, we need to obtain the value that is
// actually passed to GlFormCheckbox.
const getAllItemIds = () => props.items.map((_, i) => findItem(i).attributes().value);
const createComponent = (mountFn = shallowMountExtended) => {
wrapper = mountFn(ChecklistWidget, {
propsData: {
...props,
},
});
};
afterEach(() => {
wrapper.destroy();
});
it('creates the component', () => {
createComponent();
expect(wrapper.exists()).toBe(true);
});
it('displays the item', () => {
createComponent();
expect(findItem().exists()).toBe(true);
});
it("displays the item's text", () => {
createComponent();
expect(findItem().text()).toBe(props.items[0]);
});
it('displays an item with a help text', () => {
createComponent();
const { text, help } = props.items[1];
const itemWrapper = findItem(1);
const itemText = itemWrapper.text();
// Unfortunately there is no wrapper.slots() accessor in vue_test_utils.
// To make sure the help text is being passed to the correct slot, we need to
// access the slot internally.
// This selector accesses the text of the first slot named "help" in itemWrapper
const helpText = itemWrapper.vm.$slots?.help[0]?.text?.trim();
expect(itemText).toBe(text);
expect(helpText).toBe(help);
});
it("emits a 'update:valid' event after all boxes have been checked", async () => {
createComponent();
// initially, `valid` should be false
expect(wrapper.emitted('update:valid')).toEqual([[false]]);
const values = getAllItemIds();
// this mocks checking all the boxes
getGlFormCheckboxGroup().vm.$emit('input', values);
await nextTick();
expect(wrapper.emitted('update:valid')).toEqual([[false], [true]]);
});
it('emits a invalid event after a box has been unchecked', async () => {
createComponent();
// initially, `valid` should be false
expect(wrapper.emitted('update:valid')).toEqual([[false]]);
// checking all the boxes first
const values = getAllItemIds();
getGlFormCheckboxGroup().vm.$emit('input', values);
await nextTick();
// ensure the test later doesn't just pass because it doesn't emit
// `true` to begin with
expect(getLastUpdateValidEvent()).toEqual([true]);
// Now we're unchecking the last box.
values.pop();
getGlFormCheckboxGroup().vm.$emit('input', values);
await nextTick();
expect(getLastUpdateValidEvent()).toEqual([false]);
});
});

View File

@ -0,0 +1,94 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe API::Metadata do
shared_examples_for 'GET /metadata' do
context 'when unauthenticated' do
it 'returns authentication error' do
get api('/metadata')
expect(response).to have_gitlab_http_status(:unauthorized)
end
end
context 'when authenticated as user' do
let(:user) { create(:user) }
it 'returns the metadata information' do
get api('/metadata', user)
expect_metadata
end
end
context 'when authenticated with token' do
let(:personal_access_token) { create(:personal_access_token, scopes: scopes) }
context 'with api scope' do
let(:scopes) { %i(api) }
it 'returns the metadata information' do
get api('/metadata', personal_access_token: personal_access_token)
expect_metadata
end
it 'returns "200" response on head requests' do
head api('/metadata', personal_access_token: personal_access_token)
expect(response).to have_gitlab_http_status(:ok)
end
end
context 'with read_user scope' do
let(:scopes) { %i(read_user) }
it 'returns the metadata information' do
get api('/metadata', personal_access_token: personal_access_token)
expect_metadata
end
it 'returns "200" response on head requests' do
head api('/metadata', personal_access_token: personal_access_token)
expect(response).to have_gitlab_http_status(:ok)
end
end
context 'with neither api nor read_user scope' do
let(:scopes) { %i(read_repository) }
it 'returns authorization error' do
get api('/metadata', personal_access_token: personal_access_token)
expect(response).to have_gitlab_http_status(:forbidden)
end
end
end
def expect_metadata
aggregate_failures("testing response") do
expect(response).to have_gitlab_http_status(:ok)
expect(response).to match_response_schema('public_api/v4/metadata')
end
end
end
context 'with graphql enabled' do
before do
stub_feature_flags(graphql: true)
end
include_examples 'GET /metadata'
end
context 'with graphql disabled' do
before do
stub_feature_flags(graphql: false)
end
include_examples 'GET /metadata'
end
end

View File

@ -0,0 +1,67 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Projects::PipelinesController do
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project, :repository) }
let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
before_all do
create(:ci_build, pipeline: pipeline, stage: 'build')
create(:ci_bridge, pipeline: pipeline, stage: 'build')
create(:generic_commit_status, pipeline: pipeline, stage: 'build')
project.add_developer(user)
end
before do
login_as(user)
end
describe "GET stages.json" do
it 'does not execute N+1 queries' do
request_build_stage
control_count = ActiveRecord::QueryRecorder.new do
request_build_stage
end.count
create(:ci_build, pipeline: pipeline, stage: 'build')
expect { request_build_stage }.not_to exceed_query_limit(control_count)
expect(response).to have_gitlab_http_status(:ok)
end
context 'with retried builds' do
it 'does not execute N+1 queries' do
create(:ci_build, :retried, :failed, pipeline: pipeline, stage: 'build')
request_build_stage(retried: true)
control_count = ActiveRecord::QueryRecorder.new do
request_build_stage(retried: true)
end.count
create(:ci_build, :retried, :failed, pipeline: pipeline, stage: 'build')
expect { request_build_stage(retried: true) }.not_to exceed_query_limit(control_count)
expect(response).to have_gitlab_http_status(:ok)
end
end
def request_build_stage(params = {})
get stage_namespace_project_pipeline_path(
params.merge(
namespace_id: project.namespace.to_param,
project_id: project.to_param,
id: pipeline.id,
stage: 'build',
format: :json
)
)
end
end
end