Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-12-14 12:13:33 +00:00
parent a58667ab4b
commit 79a7da2537
100 changed files with 662 additions and 348 deletions

View File

@ -488,6 +488,11 @@ rspec:coverage:
- rspec unit pg12 minimal
- rspec integration pg12 minimal
- rspec system pg12 minimal
# FOSS/EE decomposed jobs
- rspec migration pg12 decomposed
- rspec unit pg12 decomposed
- rspec integration pg12 decomposed
- rspec system pg12 decomposed
# EE jobs
- rspec-ee migration pg12
- rspec-ee unit pg12
@ -498,6 +503,11 @@ rspec:coverage:
- rspec-ee unit pg12 minimal
- rspec-ee integration pg12 minimal
- rspec-ee system pg12 minimal
# EE decomposed jobs
- rspec-ee migration pg12 decomposed
- rspec-ee unit pg12 decomposed
- rspec-ee integration pg12 decomposed
- rspec-ee system pg12 decomposed
# Geo jobs
- rspec-ee unit pg12 geo
- rspec-ee integration pg12 geo

View File

@ -289,17 +289,6 @@ Performance/DeleteSuffix:
- 'ee/app/models/geo/upload_registry.rb'
- 'ee/app/workers/geo/file_download_dispatch_worker/attachment_job_finder.rb'
# Offense count: 13
# Cop supports --auto-correct.
Performance/Detect:
Exclude:
- 'ee/spec/controllers/projects/dependencies_controller_spec.rb'
- 'ee/spec/requests/api/dependencies_spec.rb'
- 'qa/qa/runtime/feature.rb'
- 'spec/lib/gitlab/git/tree_spec.rb'
- 'spec/lib/gitlab/import_export/project/tree_restorer_spec.rb'
- 'spec/models/event_spec.rb'
# Offense count: 121
Performance/MethodObjectAsBlock:
Enabled: false
@ -827,11 +816,6 @@ Style/RescueModifier:
Style/SingleArgumentDig:
Enabled: false
# Offense count: 45
# Cop supports --auto-correct.
Style/SlicingWithRange:
Enabled: false
# Offense count: 63
# Configuration parameters: AllowModifier.
Style/SoleNestedConditional:

View File

@ -1 +1 @@
125f0fc0e49db4dd46f2e905f34178ce880dd79e
0e0aeb5ca4488903f41acd392911bd89d1ad3d6d

View File

@ -314,9 +314,8 @@ export default {
variables: { importRequests },
});
} catch (error) {
const message = error?.networkError?.response?.data?.error ?? i18n.ERROR_IMPORT;
createFlash({
message,
message: i18n.ERROR_IMPORT,
captureError: true,
error,
});

View File

@ -32,72 +32,84 @@ export default {
fullPath() {
return this.group.importTarget.targetNamespace.fullPath || s__('BulkImport|No parent');
},
invalidNameValidationMessage() {
return getInvalidNameValidationMessage(this.group.importTarget);
validationMessage() {
return (
this.group.progress?.message || getInvalidNameValidationMessage(this.group.importTarget)
);
},
validNameState() {
// bootstrap-vue requires null for "indifferent" state, if we return true
// this will highlight field in green like "passed validation"
return this.group.flags.isInvalid && this.group.flags.isAvailableForImport ? false : null;
},
},
};
</script>
<template>
<div class="gl-display-flex gl-align-items-stretch">
<import-group-dropdown
#default="{ namespaces }"
:text="fullPath"
:disabled="!group.flags.isAvailableForImport"
:namespaces="availableNamespaces"
toggle-class="gl-rounded-top-right-none! gl-rounded-bottom-right-none!"
class="gl-h-7 gl-flex-grow-1"
data-qa-selector="target_namespace_selector_dropdown"
>
<gl-dropdown-item @click="$emit('update-target-namespace', { fullPath: '', id: null })">{{
s__('BulkImport|No parent')
}}</gl-dropdown-item>
<template v-if="namespaces.length">
<gl-dropdown-divider />
<gl-dropdown-section-header>
{{ s__('BulkImport|Existing groups') }}
</gl-dropdown-section-header>
<gl-dropdown-item
v-for="ns in namespaces"
:key="ns.fullPath"
data-qa-selector="target_group_dropdown_item"
:data-qa-group-name="ns.fullPath"
@click="$emit('update-target-namespace', ns)"
>
{{ ns.fullPath }}
</gl-dropdown-item>
</template>
</import-group-dropdown>
<div
class="gl-h-7 gl-px-3 gl-display-flex gl-align-items-center gl-border-solid gl-border-0 gl-border-t-1 gl-border-b-1 gl-bg-gray-10"
:class="{
'gl-text-gray-400 gl-border-gray-100': !group.flags.isAvailableForImport,
'gl-border-gray-200': group.flags.isAvailableForImport,
}"
>
/
</div>
<div class="gl-flex-grow-1">
<gl-form-input
class="gl-rounded-top-left-none gl-rounded-bottom-left-none"
:class="{
'gl-inset-border-1-gray-200!': group.flags.isAvailableForImport,
'gl-inset-border-1-gray-100!': !group.flags.isAvailableForImport,
'is-invalid': group.flags.isInvalid && group.flags.isAvailableForImport,
}"
debounce="500"
<div>
<div class="gl-display-flex gl-align-items-stretch">
<import-group-dropdown
#default="{ namespaces }"
:text="fullPath"
:disabled="!group.flags.isAvailableForImport"
:value="group.importTarget.newName"
:aria-label="__('New name')"
@input="$emit('update-new-name', $event)"
/>
<p
v-if="group.flags.isAvailableForImport && group.flags.isInvalid"
class="gl-text-red-500 gl-m-0 gl-mt-2"
:namespaces="availableNamespaces"
toggle-class="gl-rounded-top-right-none! gl-rounded-bottom-right-none!"
class="gl-h-7 gl-flex-grow-1"
data-qa-selector="target_namespace_selector_dropdown"
>
{{ invalidNameValidationMessage }}
</p>
<gl-dropdown-item @click="$emit('update-target-namespace', { fullPath: '', id: null })">{{
s__('BulkImport|No parent')
}}</gl-dropdown-item>
<template v-if="namespaces.length">
<gl-dropdown-divider />
<gl-dropdown-section-header>
{{ s__('BulkImport|Existing groups') }}
</gl-dropdown-section-header>
<gl-dropdown-item
v-for="ns in namespaces"
:key="ns.fullPath"
data-qa-selector="target_group_dropdown_item"
:data-qa-group-name="ns.fullPath"
@click="$emit('update-target-namespace', ns)"
>
{{ ns.fullPath }}
</gl-dropdown-item>
</template>
</import-group-dropdown>
<div
class="gl-h-7 gl-px-3 gl-display-flex gl-align-items-center gl-border-solid gl-border-0 gl-border-t-1 gl-border-b-1 gl-bg-gray-10"
:class="{
'gl-text-gray-400 gl-border-gray-100': !group.flags.isAvailableForImport,
'gl-border-gray-200': group.flags.isAvailableForImport,
}"
>
/
</div>
<div class="gl-flex-grow-1">
<gl-form-input
class="gl-rounded-top-left-none gl-rounded-bottom-left-none"
:class="{
'gl-inset-border-1-gray-200!':
group.flags.isAvailableForImport && !group.flags.isInvalid,
'gl-inset-border-1-gray-100!':
!group.flags.isAvailableForImport && !group.flags.isInvalid,
}"
debounce="500"
:disabled="!group.flags.isAvailableForImport"
:value="group.importTarget.newName"
:aria-label="__('New name')"
:state="validNameState"
@input="$emit('update-new-name', $event)"
/>
</div>
</div>
<div
v-if="group.flags.isAvailableForImport && (group.flags.isInvalid || validationMessage)"
class="gl-text-red-500 gl-m-0 gl-mt-2"
role="alert"
>
{{ validationMessage }}
</div>
</div>
</template>

View File

@ -142,9 +142,7 @@ export function createResolvers({ endpoints }) {
};
});
const {
data: { id: jobId },
} = await axios.post(endpoints.createBulkImport, {
const { data: originalResponse } = await axios.post(endpoints.createBulkImport, {
bulk_import: importOperations.map((op) => ({
source_type: 'group_entity',
source_full_path: op.group.fullPath,
@ -153,15 +151,21 @@ export function createResolvers({ endpoints }) {
})),
});
return importOperations.map((op) => {
const responses = Array.isArray(originalResponse)
? originalResponse
: [{ success: true, id: originalResponse.id }];
return importOperations.map((op, idx) => {
const response = responses[idx];
const lastImportTarget = {
targetNamespace: op.targetNamespace,
newName: op.newName,
};
const progress = {
id: jobId,
status: STATUSES.CREATED,
id: response.id || `local-${Date.now()}-${idx}`,
status: response.success ? STATUSES.CREATED : STATUSES.FAILED,
message: response.message || null,
};
localStorageCache.set(op.group.webUrl, { progress, lastImportTarget });

View File

@ -1,4 +1,5 @@
fragment BulkImportSourceGroupProgress on ClientBulkImportProgress {
id
status
message
}

View File

@ -9,6 +9,7 @@ mutation importGroups($importRequests: [ImportGroupInput!]!) {
progress {
id
status
message
}
}
}

View File

@ -22,7 +22,14 @@ export class LocalStorageCache {
loadCacheFromStorage() {
try {
return JSON.parse(this.storage.getItem(KEY)) ?? {};
const storage = JSON.parse(this.storage.getItem(KEY)) ?? {};
Object.values(storage).forEach((entry) => {
if (entry.progress && !('message' in entry.progress)) {
// eslint-disable-next-line no-param-reassign
entry.progress.message = '';
}
});
return storage;
} catch {
return {};
}

View File

@ -16,6 +16,7 @@ type ClientBulkImportSourceGroupConnection {
type ClientBulkImportProgress {
id: ID!
status: String!
message: String
}
type ClientBulkImportValidationError {

View File

@ -1,4 +1,5 @@
import { s__, __ } from '~/locale';
import { helpPagePath } from '~/helpers/help_page_helper';
export {
DELETE_PACKAGE_TRACKING_ACTION,
@ -136,3 +137,8 @@ export const PACKAGE_TYPES = [
s__('PackageRegistry|Debian'),
s__('PackageRegistry|Helm'),
];
// links
export const EMPTY_LIST_HELP_URL = helpPagePath('user/packages/package_registry/index');
export const PACKAGE_HELP_URL = helpPagePath('user/packages/index');

View File

@ -0,0 +1,30 @@
import Vue from 'vue';
import Translate from '~/vue_shared/translate';
import { apolloProvider } from '~/packages_and_registries/package_registry/graphql/index';
import PackageRegistry from '~/packages_and_registries/package_registry/pages/index.vue';
import createRouter from './router';
Vue.use(Translate);
export default () => {
const el = document.getElementById('js-vue-packages-list');
const { endpoint, resourceId, fullPath, pageType, emptyListIllustration } = el.dataset;
const router = createRouter(endpoint);
const isGroupPage = pageType === 'groups';
return new Vue({
el,
router,
apolloProvider,
provide: {
resourceId,
fullPath,
emptyListIllustration,
isGroupPage,
},
render(createElement) {
return createElement(PackageRegistry);
},
});
};

View File

@ -0,0 +1,5 @@
<template>
<div>
<router-view />
</div>
</template>

View File

@ -1,24 +0,0 @@
import Vue from 'vue';
import Translate from '~/vue_shared/translate';
import { apolloProvider } from '~/packages_and_registries/package_registry/graphql/index';
import PackagesListApp from '../components/list/app.vue';
Vue.use(Translate);
export default () => {
const el = document.getElementById('js-vue-packages-list');
const isGroupPage = el.dataset.pageType === 'groups';
return new Vue({
el,
apolloProvider,
provide: {
...el.dataset,
isGroupPage,
},
render(createElement) {
return createElement(PackagesListApp);
},
});
};

View File

@ -9,13 +9,15 @@ import {
GROUP_RESOURCE_TYPE,
GRAPHQL_PAGE_SIZE,
DELETE_PACKAGE_SUCCESS_MESSAGE,
EMPTY_LIST_HELP_URL,
PACKAGE_HELP_URL,
} from '~/packages_and_registries/package_registry/constants';
import getPackagesQuery from '~/packages_and_registries/package_registry/graphql/queries/get_packages.query.graphql';
import DeletePackage from '~/packages_and_registries/package_registry/components/functional/delete_package.vue';
import PackageTitle from './package_title.vue';
import PackageSearch from './package_search.vue';
import PackageList from './packages_list.vue';
import PackageTitle from '~/packages_and_registries/package_registry/components/list/package_title.vue';
import PackageSearch from '~/packages_and_registries/package_registry/components/list/package_search.vue';
import PackageList from '~/packages_and_registries/package_registry/components/list/packages_list.vue';
export default {
components: {
@ -27,13 +29,7 @@ export default {
PackageSearch,
DeletePackage,
},
inject: [
'packageHelpUrl',
'emptyListIllustration',
'emptyListHelpUrl',
'isGroupPage',
'fullPath',
],
inject: ['emptyListIllustration', 'isGroupPage', 'fullPath'],
data() {
return {
packages: {},
@ -156,12 +152,16 @@ export default {
'PackageRegistry|Learn how to %{noPackagesLinkStart}publish and share your packages%{noPackagesLinkEnd} with GitLab.',
),
},
links: {
EMPTY_LIST_HELP_URL,
PACKAGE_HELP_URL,
},
};
</script>
<template>
<div>
<package-title :help-url="packageHelpUrl" :count="packagesCount" />
<package-title :help-url="$options.links.PACKAGE_HELP_URL" :count="packagesCount" />
<package-search @update="handleSearchUpdate" />
<delete-package
@ -185,7 +185,9 @@ export default {
<gl-sprintf v-if="hasFilters" :message="$options.i18n.widenFilters" />
<gl-sprintf v-else :message="$options.i18n.noResultsText">
<template #noPackagesLink="{ content }">
<gl-link :href="emptyListHelpUrl" target="_blank">{{ content }}</gl-link>
<gl-link :href="$options.links.EMPTY_LIST_HELP_URL" target="_blank">{{
content
}}</gl-link>
</template>
</gl-sprintf>
</template>

View File

@ -0,0 +1,21 @@
import Vue from 'vue';
import VueRouter from 'vue-router';
import List from '~/packages_and_registries/package_registry/pages/list.vue';
Vue.use(VueRouter);
export default function createRouter(base) {
const router = new VueRouter({
base,
mode: 'history',
routes: [
{
name: 'list',
path: '/',
component: List,
},
],
});
return router;
}

View File

@ -1,3 +1,3 @@
import packageList from '~/packages_and_registries/package_registry/pages/list';
import packageApp from '~/packages_and_registries/package_registry/index';
packageList();
packageApp();

View File

@ -1,3 +1,3 @@
import packageList from '~/packages_and_registries/package_registry/pages/list';
import packageApp from '~/packages_and_registries/package_registry/index';
packageList();
packageApp();

View File

@ -165,6 +165,6 @@ export default {
<template>
<div>
<div class="flash-container js-suggestions-flash"></div>
<div v-show="isRendered" ref="container" v-safe-html="noteHtml" class="md"></div>
<div v-show="isRendered" ref="container" v-safe-html="noteHtml" class="md suggestions"></div>
</div>
</template>

View File

@ -139,6 +139,10 @@
font-family: $monospace-font !important;
}
.suggestions.md > .markdown-code-block {
@include gl-static;
}
.md-suggestion-header {
height: $suggestion-header-height;
display: flex;

View File

@ -40,13 +40,9 @@ class Import::BulkImportsController < ApplicationController
end
def create
response = ::BulkImports::CreateService.new(current_user, create_params, credentials).execute
responses = create_params.map { |entry| ::BulkImports::CreateService.new(current_user, entry, credentials).execute }
if response.success?
render json: response.payload.to_json(only: [:id])
else
render json: { error: response.message }, status: response.http_status
end
render json: responses.map { |response| { success: response.success?, id: response.payload[:id], message: response.message } }
end
def realtime_changes

View File

@ -99,7 +99,7 @@ class Projects::BlobController < Projects::ApplicationController
@content = params[:content]
@blob.load_all_data!
diffy = Diffy::Diff.new(@blob.data, @content, diff: '-U 3', include_diff_info: true)
diff_lines = diffy.diff.scan(/.*\n/)[2..-1]
diff_lines = diffy.diff.scan(/.*\n/)[2..]
diff_lines = Gitlab::Diff::Parser.new.parse(diff_lines).to_a
@diff_lines = Gitlab::Diff::Highlight.new(diff_lines, repository: @repository).highlight

View File

@ -29,7 +29,7 @@ module Resolvers
job_types: security_report_types
).execute
else
pipeline.statuses
pipeline.statuses_order_id_desc
end
end
end

View File

@ -18,7 +18,7 @@ module Resolvers
def preloads
{
jobs: { statuses: [:needs] },
jobs: { statuses_order_id_desc: [:needs] },
upstream: [:triggered_by_pipeline],
downstream: [:triggered_pipelines]
}

View File

@ -283,7 +283,7 @@ module DiffHelper
return path unless path.size > max && max > 3
"...#{path[-(max - 3)..-1]}"
"...#{path[-(max - 3)..]}"
end
def code_navigation_path(diffs)

View File

@ -38,17 +38,6 @@ module PackagesHelper
"#{Gitlab.config.gitlab.host}/#{group_id}"
end
def packages_list_data(type, resource)
{
resource_id: resource.id,
full_path: resource.full_path,
page_type: type,
empty_list_help_url: help_page_path('user/packages/package_registry/index'),
empty_list_illustration: image_path('illustrations/no-packages.svg'),
package_help_url: help_page_path('user/packages/index')
}
end
def track_package_event(event_name, scope, **args)
::Packages::CreateEventService.new(nil, current_user, event_name: event_name, scope: scope).execute
category = args.delete(:category) || self.class.name

View File

@ -63,6 +63,7 @@ module Ci
has_many :statuses, class_name: 'CommitStatus', foreign_key: :commit_id, inverse_of: :pipeline
has_many :latest_statuses_ordered_by_stage, -> { latest.order(:stage_idx, :stage) }, class_name: 'CommitStatus', foreign_key: :commit_id, inverse_of: :pipeline
has_many :latest_statuses, -> { latest }, class_name: 'CommitStatus', foreign_key: :commit_id, inverse_of: :pipeline
has_many :statuses_order_id_desc, -> { order_id_desc }, class_name: 'CommitStatus', foreign_key: :commit_id
has_many :processables, class_name: 'Ci::Processable', foreign_key: :commit_id, inverse_of: :pipeline
has_many :bridges, class_name: 'Ci::Bridge', foreign_key: :commit_id, inverse_of: :pipeline
has_many :builds, foreign_key: :commit_id, inverse_of: :pipeline

View File

@ -11,7 +11,7 @@ module TokenAuthenticatableStrategies
# The pattern of the token is "#{DYNAMIC_NONCE_IDENTIFIER}#{token}#{iv_of_12_characters}"
if token.start_with?(DYNAMIC_NONCE_IDENTIFIER) && token.size > NONCE_SIZE + DYNAMIC_NONCE_IDENTIFIER.size
token_to_decrypt = token[1...-NONCE_SIZE]
iv = token[-NONCE_SIZE..-1]
iv = token[-NONCE_SIZE..]
Gitlab::CryptoHelper.aes256_gcm_decrypt(token_to_decrypt, nonce: iv)
else

View File

@ -31,10 +31,6 @@ class ProjectHook < WebHook
_('Webhooks')
end
def web_hooks_disable_failed?
Feature.enabled?(:web_hooks_disable_failed, project)
end
override :rate_limit
def rate_limit
project.actual_limits.limit_for(:web_hook_calls)
@ -44,6 +40,13 @@ class ProjectHook < WebHook
def application_context
super.merge(project: project)
end
private
override :web_hooks_disable_failed?
def web_hooks_disable_failed?
Feature.enabled?(:web_hooks_disable_failed, project)
end
end
ProjectHook.prepend_mod_with('ProjectHook')

View File

@ -34,9 +34,19 @@ class WebHook < ApplicationRecord
end
def executable?
return true unless web_hooks_disable_failed?
!temporarily_disabled? && !permanently_disabled?
end
recent_failures <= FAILURE_THRESHOLD && (disabled_until.nil? || disabled_until < Time.current)
def temporarily_disabled?
return false unless web_hooks_disable_failed?
disabled_until.present? && disabled_until >= Time.current
end
def permanently_disabled?
return false unless web_hooks_disable_failed?
recent_failures > FAILURE_THRESHOLD
end
# rubocop: disable CodeReuse/ServiceClass
@ -69,6 +79,8 @@ class WebHook < ApplicationRecord
end
def disable!
return if permanently_disabled?
update_attribute(:recent_failures, FAILURE_THRESHOLD + 1)
end
@ -80,7 +92,7 @@ class WebHook < ApplicationRecord
end
def backoff!
return if backoff_count >= MAX_FAILURES && disabled_until && disabled_until > Time.current
return if permanently_disabled? || (backoff_count >= MAX_FAILURES && temporarily_disabled?)
assign_attributes(disabled_until: next_backoff.from_now, backoff_count: backoff_count.succ.clamp(0, MAX_FAILURES))
save(validate: false)
@ -93,7 +105,19 @@ class WebHook < ApplicationRecord
save(validate: false)
end
# Overridden in ProjectHook and GroupHook, other webhooks are not rate-limited.
# @return [Boolean] Whether or not the WebHook is currently throttled.
def rate_limited?
return false unless rate_limit
Gitlab::ApplicationRateLimiter.peek(
:web_hook_calls,
scope: [self],
threshold: rate_limit
)
end
# Threshold for the rate-limit.
# Overridden in ProjectHook and GroupHook, other WebHooks are not rate-limited.
def rate_limit
nil
end

View File

@ -204,7 +204,7 @@ module Namespaces
end
if bottom
skope = skope.where(id: bottom.traversal_ids[0..-1])
skope = skope.where(id: bottom.traversal_ids)
end
# The original `with_depth` attribute in ObjectHierarchy increments as you

View File

@ -37,7 +37,7 @@ module Serverless
'a1',
serverless_domain_cluster.uuid[2..-3],
'f2',
serverless_domain_cluster.uuid[-2..-1]
serverless_domain_cluster.uuid[-2..]
].join
end
end

View File

@ -338,7 +338,7 @@ class WikiPage
current_dirname = File.dirname(title)
if persisted?
return title[1..-1] if current_dirname == '/'
return title[1..] if current_dirname == '/'
return File.join([directory.presence, title].compact) if current_dirname == '.'
end

View File

@ -59,7 +59,7 @@ module BulkImports
)
bulk_import.create_configuration!(credentials.slice(:url, :access_token))
params.each do |entity|
Array.wrap(params).each do |entity|
BulkImports::Entity.create!(
bulk_import: bulk_import,
source_type: entity[:source_type],

View File

@ -38,7 +38,7 @@ class CohortsService
{
registration_month: registration_month,
activity_months: activity_months[1..-1],
activity_months: activity_months[1..],
total: activity_months.first[:total],
inactive: inactive
}

View File

@ -9,7 +9,7 @@ class LfsObjectUploader < GitlabUploader
alias_method :upload, :model
def filename
model.oid[4..-1]
model.oid[4..]
end
def store_dir

View File

@ -3,4 +3,8 @@
.row
.col-12
#js-vue-packages-list{ data: packages_list_data('groups', @group) }
#js-vue-packages-list{ data: { resource_id: @group.id,
full_path: @group.full_path,
endpoint: group_packages_path(@group),
page_type: 'groups',
empty_list_illustration: image_path('illustrations/no-packages.svg'), } }

View File

@ -3,4 +3,8 @@
.row
.col-12
#js-vue-packages-list{ data: packages_list_data('projects', @project) }
#js-vue-packages-list{ data: { resource_id: @project.id,
full_path: @project.full_path,
endpoint: project_packages_path(@project),
page_type: 'projects',
empty_list_illustration: image_path('illustrations/no-packages.svg'), } }

View File

@ -108,7 +108,7 @@ module WikiCloth
"<span class=\"error\">#{I18n.t('template loop detected', :tree => debug_tree)}</span>"
else
key = params[0].to_s.strip
key_options = params[1..-1].collect { |p| p.is_a?(Hash) ? { :name => p[:name].strip, :value => p[:value].strip } : p.strip }
key_options = params[1..].collect { |p| p.is_a?(Hash) ? { :name => p[:name].strip, :value => p[:value].strip } : p.strip }
key_options ||= []
key_digest = Digest::MD5.hexdigest(key_options.to_a.sort {|x,y| (x.is_a?(Hash) ? x[:name] : x) <=> (y.is_a?(Hash) ? y[:name] : y) }.inspect)

View File

@ -37,7 +37,7 @@ full list of reference architectures, see
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61). Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
@ -486,8 +486,9 @@ cluster to be used with GitLab.
### Provide your own PostgreSQL instance
If you're hosting GitLab on a cloud provider, you can optionally use a
managed service for PostgreSQL. For example, AWS offers a managed Relational
Database Service (RDS) that runs PostgreSQL.
managed service for PostgreSQL.
A reputable provider or solution should be used for this. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61).
If you use a cloud-managed service, or provide your own PostgreSQL:
@ -1249,6 +1250,15 @@ There are many third-party solutions for PostgreSQL HA. The solution selected mu
- A static IP for all connections that doesn't change on failover.
- [`LISTEN`](https://www.postgresql.org/docs/12/sql-listen.html) SQL functionality must be supported.
NOTE:
With a third-party setup, it's possible to colocate Praefect's database on the same server as
the main [GitLab](#provide-your-own-postgresql-instance) database as a convenience unless
you are using Geo, where separate database instances are required for handling replication correctly.
In this setup, the specs of the main database setup shouldn't need to be changed as the impact should be
minimal.
A reputable provider or solution should be used for this. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61).
Examples of the above could include [Google's Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) or [Amazon RDS](https://aws.amazon.com/rds/).
Once the database is set up, follow the [post configuration](#praefect-postgresql-post-configuration).
@ -2234,7 +2244,7 @@ services where applicable):
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61). Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.

View File

@ -37,7 +37,7 @@ full list of reference architectures, see
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61). Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
@ -489,8 +489,9 @@ cluster to be used with GitLab.
### Provide your own PostgreSQL instance
If you're hosting GitLab on a cloud provider, you can optionally use a
managed service for PostgreSQL. For example, AWS offers a managed Relational
Database Service (RDS) that runs PostgreSQL.
managed service for PostgreSQL.
A reputable provider or solution should be used for this. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61).
If you use a cloud-managed service, or provide your own PostgreSQL:
@ -1255,7 +1256,14 @@ There are many third-party solutions for PostgreSQL HA. The solution selected mu
- A static IP for all connections that doesn't change on failover.
- [`LISTEN`](https://www.postgresql.org/docs/12/sql-listen.html) SQL functionality must be supported.
Examples of the above could include [Google's Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) or [Amazon RDS](https://aws.amazon.com/rds/).
NOTE:
With a third-party setup, it's possible to colocate Praefect's database on the same server as
the main [GitLab](#provide-your-own-postgresql-instance) database as a convenience unless
you are using Geo, where separate database instances are required for handling replication correctly.
In this setup, the specs of the main database setup shouldn't need to be changed as the impact should be
minimal.
A reputable provider or solution should be used for this. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61).
Once the database is set up, follow the [post configuration](#praefect-postgresql-post-configuration).
@ -2234,7 +2242,7 @@ services where applicable):
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61). Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.

View File

@ -30,7 +30,7 @@ For a full list of reference architectures, see
| NFS server (optional, not recommended) | 1 | 4 vCPU, 3.6 GB memory | `n1-highcpu-4` | `c5.xlarge` | `F4s v2` |
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61). Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run as reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run as reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
@ -233,8 +233,9 @@ to be used with GitLab.
### Provide your own PostgreSQL instance
If you're hosting GitLab on a cloud provider, you can optionally use a
managed service for PostgreSQL. For example, AWS offers a managed relational
database service (RDS) that runs PostgreSQL.
managed service for PostgreSQL.
A reputable provider or solution should be used for this. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61).
If you use a cloud-managed service, or provide your own PostgreSQL:
@ -1028,7 +1029,7 @@ services where applicable):
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61). Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
<!-- markdownlint-enable MD029 -->

View File

@ -46,7 +46,7 @@ For a full list of reference architectures, see
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61). Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
@ -777,8 +777,9 @@ cluster to be used with GitLab.
### Provide your own PostgreSQL instance
If you're hosting GitLab on a cloud provider, you can optionally use a
managed service for PostgreSQL. For example, AWS offers a managed Relational
Database Service (RDS) that runs PostgreSQL.
managed service for PostgreSQL.
A reputable provider or solution should be used for this. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61).
If you use a cloud-managed service, or provide your own PostgreSQL:
@ -1201,7 +1202,14 @@ There are many third-party solutions for PostgreSQL HA. The solution selected mu
- A static IP for all connections that doesn't change on failover.
- [`LISTEN`](https://www.postgresql.org/docs/12/sql-listen.html) SQL functionality must be supported.
Examples of the above could include [Google's Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) or [Amazon RDS](https://aws.amazon.com/rds/).
NOTE:
With a third-party setup, it's possible to colocate Praefect's database on the same server as
the main [GitLab](#provide-your-own-postgresql-instance) database as a convenience unless
you are using Geo, where separate database instances are required for handling replication correctly.
In this setup, the specs of the main database setup shouldn't need to be changed as the impact should be
minimal.
A reputable provider or solution should be used for this. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61).
Once the database is set up, follow the [post configuration](#praefect-postgresql-post-configuration).
@ -2198,7 +2206,7 @@ services where applicable):
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61). Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.

View File

@ -37,7 +37,7 @@ full list of reference architectures, see
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61). Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
@ -495,8 +495,9 @@ cluster to be used with GitLab.
### Provide your own PostgreSQL instance
If you're hosting GitLab on a cloud provider, you can optionally use a
managed service for PostgreSQL. For example, AWS offers a managed Relational
Database Service (RDS) that runs PostgreSQL.
managed service for PostgreSQL.
A reputable provider or solution should be used for this. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61).
If you use a cloud-managed service, or provide your own PostgreSQL:
@ -1262,6 +1263,15 @@ There are many third-party solutions for PostgreSQL HA. The solution selected mu
- A static IP for all connections that doesn't change on failover.
- [`LISTEN`](https://www.postgresql.org/docs/12/sql-listen.html) SQL functionality must be supported.
NOTE:
With a third-party setup, it's possible to colocate Praefect's database on the same server as
the main [GitLab](#provide-your-own-postgresql-instance) database as a convenience unless
you are using Geo, where separate database instances are required for handling replication correctly.
In this setup, the specs of the main database setup shouldn't need to be changed as the impact should be
minimal.
A reputable provider or solution should be used for this. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61).
Examples of the above could include [Google's Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) or [Amazon RDS](https://aws.amazon.com/rds/).
Once the database is set up, follow the [post configuration](#praefect-postgresql-post-configuration).
@ -2248,7 +2258,7 @@ services where applicable):
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61). Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.

View File

@ -43,7 +43,7 @@ costly-to-operate environment by using the
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61). Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
@ -769,8 +769,9 @@ cluster to be used with GitLab.
### Provide your own PostgreSQL instance
If you're hosting GitLab on a cloud provider, you can optionally use a
managed service for PostgreSQL. For example, AWS offers a managed Relational
Database Service (RDS) that runs PostgreSQL.
managed service for PostgreSQL.
A reputable provider or solution should be used for this. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61).
If you use a cloud-managed service, or provide your own PostgreSQL:
@ -1194,7 +1195,14 @@ There are many third-party solutions for PostgreSQL HA. The solution selected mu
- A static IP for all connections that doesn't change on failover.
- [`LISTEN`](https://www.postgresql.org/docs/12/sql-listen.html) SQL functionality must be supported.
Examples of the above could include [Google's Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) or [Amazon RDS](https://aws.amazon.com/rds/).
NOTE:
With a third-party setup, it's possible to colocate Praefect's database on the same server as
the main [GitLab](#provide-your-own-postgresql-instance) database as a convenience unless
you are using Geo, where separate database instances are required for handling replication correctly.
In this setup, the specs of the main database setup shouldn't need to be changed as the impact should be
minimal.
A reputable provider or solution should be used for this. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61).
Once the database is set up, follow the [post configuration](#praefect-postgresql-post-configuration).
@ -2169,7 +2177,7 @@ services where applicable):
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. [Google Cloud SQL](https://cloud.google.com/sql/docs/postgres/high-availability#normal) and [Amazon RDS](https://aws.amazon.com/rds/) are known to work, however Azure Database for PostgreSQL is **not recommended** due to [performance issues](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61). Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.

View File

@ -15,6 +15,8 @@ Read more on [pagination](index.md#pagination).
## List project pipelines
> `iid` in response [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/342223) in GitLab 14.6.
List pipelines in a project. Child pipelines are not included in the results,
but you can [get child pipeline](pipelines.md#get-a-single-pipeline) individually.
@ -74,6 +76,8 @@ Example of response
## Get a single pipeline
> `iid` in response [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/342223) in GitLab 14.6.
Get one pipeline from a project.
You can also get a single [child pipeline](../ci/pipelines/parent_child_pipelines.md).
@ -267,6 +271,8 @@ Sample response:
## Create a new pipeline
> `iid` in response [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/342223) in GitLab 14.6.
```plaintext
POST /projects/:id/pipeline
```
@ -316,6 +322,8 @@ Example of response
## Retry jobs in a pipeline
> `iid` in response [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/342223) in GitLab 14.6.
```plaintext
POST /projects/:id/pipelines/:pipeline_id/retry
```

View File

@ -58,8 +58,7 @@ it was originally composed for, if it is helpful to any of our audiences, we can
include it.
- If you use an image that has a separate source file (for example, a vector or
diagram format), link the image to the source file so that it may be reused or
updated by anyone.
diagram format), link the image to the source file so that anyone can update or reuse it.
- Do not copy and paste content from other sources unless it is a limited
quotation with the source cited. Typically it is better to either rephrase
relevant information in your own words or link out to the other source.

View File

@ -4,31 +4,39 @@ group: Integrations
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Sign into GitLab with (almost) any OAuth2 provider **(FREE SELF)**
# Generic OAuth2 provider **(FREE SELF)**
The `omniauth-oauth2-generic` gem allows Single Sign-On between GitLab and your own OAuth2 provider
(or any OAuth2 provider compatible with this gem)
The `omniauth-oauth2-generic` gem allows single sign-on (SSO) between GitLab
and your OAuth2 provider (or any OAuth2 provider compatible with this gem).
This strategy is designed to allow configuration of the simple OmniAuth SSO process outlined below:
This strategy allows for the configuration of this OmniAuth SSO process:
1. Strategy directs client to your authorization URL (**configurable**), with specified ID and key
1. OAuth provider handles authentication of request, user, and (optionally) authorization to access user's profile
1. OAuth provider directs client back to GitLab where Strategy handles retrieval of access token
1. Strategy requests user information from a **configurable** "user profile" URL (using the access token)
1. Strategy parses user information from the response, using a **configurable** format
1. GitLab finds or creates the returned user and logs them in
1. Strategy directs the client to your authorization URL (**configurable**), with
the specified ID and key.
1. The OAuth2 provider handles authentication of the request, user, and (optionally)
authorization to access user's profile.
1. The OAuth2 provider directs the client back to GitLab where Strategy handles
the retrieval of the access token.
1. Strategy requests user information from a **configurable** "user profile"
URL (using the access token).
1. Strategy parses user information from the response, using a **configurable**
format.
1. GitLab finds or creates the returned user and signs them in.
## Limitations of this Strategy
## Limitations of this strategy
- It can only be used for Single Sign on, and doesn't provide any other access granted by any OAuth provider
(importing projects or users, etc)
- It only supports the Authorization Grant flow (most common for client-server applications, like GitLab)
- It is not able to fetch user information from more than one URL
- It has not been tested with user information formats other than JSON
- It can only be used for single sign-on, and doesn't provide any other access
granted by any OAuth2 provider (like importing projects or users).
- It supports only the Authorization Grant flow (most common for client-server
applications, like GitLab).
- It can't fetch user information from more than one URL.
- It hasn't been tested with user information formats, other than JSON.
## Configuration Instructions
## Configure the OAuth2 provider
1. Register your application in the OAuth2 provider you wish to authenticate with.
To configure the provider:
1. Register your application in the OAuth2 provider you want to authenticate with.
The redirect URI you provide when registering the application should be:
@ -36,13 +44,13 @@ This strategy is designed to allow configuration of the simple OmniAuth SSO proc
http://your-gitlab.host.com/users/auth/oauth2_generic/callback
```
1. You should now be able to get a Client ID and Client Secret.
Where this shows up differs for each provider.
This may also be called Application ID and Secret
You should now be able to get a Client ID and Client Secret. Where this
appears differs for each provider. This may also be called Application ID
and Secret.
1. On your GitLab server, open the configuration file.
1. On your GitLab server, open the appropriate configuration file.
For Omnibus package:
For Omnibus GitLab:
```shell
sudo editor /etc/gitlab/gitlab.rb
@ -55,9 +63,10 @@ This strategy is designed to allow configuration of the simple OmniAuth SSO proc
sudo -u git -H editor config/gitlab.yml
```
1. See [Configure initial settings](omniauth.md#configure-initial-settings) for initial settings
1. See [Configure initial settings](omniauth.md#configure-initial-settings) for
initial settings.
1. Add the provider-specific configuration for your provider, for example:
1. Add the provider-specific configuration for your provider. For example:
```ruby
gitlab_rails['omniauth_providers'] = [
@ -92,11 +101,13 @@ This strategy is designed to allow configuration of the simple OmniAuth SSO proc
For more information about these settings, see [the gem's README](https://gitlab.com/satorix/omniauth-oauth2-generic#gitlab-config-example).
1. Save the configuration file
1. Save the configuration file.
1. Restart GitLab for the changes to take effect
1. [Restart](../administration/restart_gitlab.md#installations-from-source)
GitLab for the changes to take effect.
On the sign in page there should now be a new button below the regular sign in form.
Click the button to begin your provider's authentication process. This directs
the browser to your OAuth2 Provider's authentication page. If everything goes well
the user is returned to your GitLab instance and is signed in.
On the sign-in page there should now be a new button below the regular sign-in
form. Select the button to begin your provider's authentication process. This
directs the browser to your OAuth2 provider's authentication page. If
everything goes well, you are returned to your GitLab instance and are
signed in.

View File

@ -4,7 +4,7 @@ module API
module Entities
module Ci
class PipelineBasic < Grape::Entity
expose :id, :project_id, :sha, :ref, :status, :source
expose :id, :iid, :project_id, :sha, :ref, :status, :source
expose :created_at, :updated_at
expose :web_url do |pipeline, _options|

View File

@ -39,7 +39,7 @@ module Banzai
code[:class] = INLINE_CLASSES
code[STYLE_ATTRIBUTE] = 'inline'
closing.content = closing.content[1..-1]
closing.content = closing.content[1..]
opening.content = opening.content[0..-2]
end
end

View File

@ -174,7 +174,7 @@ module Banzai
def build_relative_path(path, request_path)
return request_path if path.empty?
return path unless request_path
return path[1..-1] if path.start_with?('/')
return path[1..] if path.start_with?('/')
parts = request_path.split('/')

View File

@ -113,7 +113,7 @@ module ExtractsRef
best_match = valid_refs.max_by(&:length)
# Partition the string into the ref and the path, ignoring the empty first value
id.partition(best_match)[1..-1]
id.partition(best_match)[1..]
end
def use_first_path_segment?(ref)

View File

@ -51,7 +51,7 @@ module Flowdock
end
def body
content = @commit[:message][first_line.size..-1]
content = @commit[:message][first_line.size..]
content.strip! if content
"<pre>#{content}</pre>" unless content.empty?
end

View File

@ -48,10 +48,10 @@ module Gitlab
# follows it will produce a nil. For example:
#
# "\n".split("\n") # => []
# "\n".split("\n")[1..-1] # => nil
# "\n".split("\n")[1..] # => nil
#
# To work around this we only "join" if we're given an Array.
if (converted = output.split("\n")[1..-1])
if (converted = output.split("\n")[1..])
converted.join("\n")
else
''

View File

@ -51,7 +51,7 @@ module Gitlab
def replace_with!(other)
instance_variables.each do |ivar|
instance_variable_set(ivar, other.public_send(ivar.to_s[1..-1])) # rubocop:disable GitlabSecurity/PublicSend
instance_variable_set(ivar, other.public_send(ivar.to_s[1..])) # rubocop:disable GitlabSecurity/PublicSend
end
end

View File

@ -36,7 +36,7 @@ module Gitlab
end
def strip_diff_frontmatter(diff_content)
diff_content.scan(/.*\n/)[2..-1]&.join('') if diff_content.present?
diff_content.scan(/.*\n/)[2..]&.join('') if diff_content.present?
end
def blobs_with_transformed_diffs

View File

@ -6,8 +6,8 @@ module Gitlab
attr_accessor :old_line, :new_line, :offset
def initialize(old_line, new_line, offset: 0)
@old_line = old_line[offset..-1]
@new_line = new_line[offset..-1]
@old_line = old_line[offset..]
@new_line = new_line[offset..]
@offset = offset
end

View File

@ -57,7 +57,7 @@ module Gitlab
def unfold_reference(reference, match, target_parent)
before = @text[0...match.begin(0)]
after = @text[match.end(0)..-1]
after = @text[match.end(0)..]
referable = find_referable(reference)
return reference unless referable

View File

@ -48,7 +48,7 @@ module Gitlab
raw_keys.each_with_object({}) do |raw_key, grouped_subkeys|
primary_subkey_id = raw_key.primary_subkey.keyid
grouped_subkeys[primary_subkey_id] = raw_key.subkeys[1..-1].map do |s|
grouped_subkeys[primary_subkey_id] = raw_key.subkeys[1..].map do |s|
{ keyid: s.keyid, fingerprint: s.fingerprint }
end
end

View File

@ -44,7 +44,7 @@ module Gitlab
next unless match
input = match.split(':')[1..-1].join
input = match.split(':')[1..].join
next if input.empty?
filter[:negated] = match.start_with?("-")

View File

@ -70,7 +70,7 @@ module Gitlab
next if total_duration <= MINIMUM_DURATION
stats[1..-1].each_with_index do |data, index|
stats[1..].each_with_index do |data, index|
next unless source_lines[index]
duration = microsec_to_millisec(data[0])

View File

@ -99,7 +99,7 @@ module Gitlab
start = prev = positions[0]
range = MarkerRange.new(start, prev, mode: mode)
positions[1..-1].each do |pos|
positions[1..].each do |pos|
if pos == prev + 1
range = MarkerRange.new(start, pos, mode: mode)
prev = pos

View File

@ -16,7 +16,7 @@ module Gitlab
# we remove the leading `//` and add `descendant-or-self::`
# in order to ensure we're searching from this node and all
# descendants.
xpath.map { |t| "descendant-or-self::#{t[2..-1]}" }.join('|')
xpath.map { |t| "descendant-or-self::#{t[2..]}" }.join('|')
end
end
end

View File

@ -184,7 +184,7 @@ class GroupSeeder
group = Group.find(group_id)
@resource_count.times do |i|
_, project_path = PROJECT_URL.split('/')[-2..-1]
_, project_path = PROJECT_URL.split('/')[-2..]
project_path.gsub!('.git', '')

View File

@ -168,6 +168,41 @@ module QA
)
end
# Object comparison
#
# @param [QA::Resource::MergeRequest] other
# @return [Boolean]
def ==(other)
other.is_a?(MergeRequest) && comparable_mr == other.comparable_mr
end
# Override inspect for a better rspec failure diff output
#
# @return [String]
def inspect
JSON.pretty_generate(comparable_mr)
end
protected
# Return subset of fields for comparing merge requests
#
# @return [Hash]
def comparable_mr
reload! if api_response.nil?
api_resource.except(
:id,
:web_url,
:project_id,
:source_project_id,
:target_project_id,
# these can differ depending on user fetching mr
:subscribed,
:first_contribution
).merge({ references: api_resource[:references].except(:full) })
end
private
def transform_api_resource(api_resource)

View File

@ -46,7 +46,7 @@ module RuboCop
end
def arguments(node)
node.children[2..-1]
node.children[2..]
end
end
end

View File

@ -46,7 +46,7 @@ module RuboCop
end
def arguments(node)
node.children[2..-1]
node.children[2..]
end
end
end

View File

@ -215,9 +215,13 @@ RSpec.describe Import::BulkImportsController do
let(:pat) { "fake-pat" }
let(:bulk_import_params) do
[{ "source_type" => "group_entity",
"source_full_path" => "full_path",
"destination_name" => "destination_name",
"destination_namespace" => "root" }]
"source_full_path" => "full_path",
"destination_name" => "destination_name",
"destination_namespace" => "root" },
{ "source_type" => "group_entity2",
"source_full_path" => "full_path2",
"destination_name" => "destination_name2",
"destination_namespace" => "root" }]
end
before do
@ -225,29 +229,23 @@ RSpec.describe Import::BulkImportsController do
session[:bulk_import_gitlab_url] = instance_url
end
it 'executes BulkImpors::CreatetService' do
it 'executes BulkImpors::CreateService' do
error_response = ServiceResponse.error(message: 'Record invalid', http_status: :unprocessable_entity)
expect_next_instance_of(
::BulkImports::CreateService, user, bulk_import_params, { url: instance_url, access_token: pat }) do |service|
::BulkImports::CreateService, user, bulk_import_params[0], { url: instance_url, access_token: pat }) do |service|
allow(service).to receive(:execute).and_return(ServiceResponse.success(payload: bulk_import))
end
post :create, params: { bulk_import: bulk_import_params }
expect(response).to have_gitlab_http_status(:ok)
expect(response.body).to eq({ id: bulk_import.id }.to_json)
end
it 'returns error when validation fails' do
error_response = ServiceResponse.error(message: 'Record invalid', http_status: :unprocessable_entity)
expect_next_instance_of(
::BulkImports::CreateService, user, bulk_import_params, { url: instance_url, access_token: pat }) do |service|
::BulkImports::CreateService, user, bulk_import_params[1], { url: instance_url, access_token: pat }) do |service|
allow(service).to receive(:execute).and_return(error_response)
end
post :create, params: { bulk_import: bulk_import_params }
expect(response).to have_gitlab_http_status(:unprocessable_entity)
expect(response.body).to eq({ error: 'Record invalid' }.to_json)
expect(response).to have_gitlab_http_status(:ok)
expect(json_response).to eq([{ "success" => true, "id" => bulk_import.id, "message" => nil },
{ "success" => false, "id" => nil, "message" => "Record invalid" }])
end
end
end

View File

@ -128,7 +128,7 @@ RSpec.describe Projects::Serverless::FunctionsController do
expect(json_response["functions"]).to all(
include(
'url' => "https://#{function_name}-#{serverless_domain_cluster.uuid[0..1]}a1#{serverless_domain_cluster.uuid[2..-3]}f2#{serverless_domain_cluster.uuid[-2..-1]}#{"%x" % environment.id}-#{environment.slug}.#{serverless_domain_cluster.domain}"
'url' => "https://#{function_name}-#{serverless_domain_cluster.uuid[0..1]}a1#{serverless_domain_cluster.uuid[2..-3]}f2#{serverless_domain_cluster.uuid[-2..]}#{"%x" % environment.id}-#{environment.slug}.#{serverless_domain_cluster.domain}"
)
)
end
@ -166,7 +166,7 @@ RSpec.describe Projects::Serverless::FunctionsController do
expect(response).to have_gitlab_http_status(:ok)
expect(json_response).to include(
'url' => "https://#{function_name}-#{serverless_domain_cluster.uuid[0..1]}a1#{serverless_domain_cluster.uuid[2..-3]}f2#{serverless_domain_cluster.uuid[-2..-1]}#{"%x" % environment.id}-#{environment.slug}.#{serverless_domain_cluster.domain}"
'url' => "https://#{function_name}-#{serverless_domain_cluster.uuid[0..1]}a1#{serverless_domain_cluster.uuid[2..-3]}f2#{serverless_domain_cluster.uuid[-2..]}#{"%x" % environment.id}-#{environment.slug}.#{serverless_domain_cluster.domain}"
)
end

View File

@ -20,7 +20,7 @@ RSpec.describe 'User views an open merge request' do
# Work around a weird Capybara behavior where calling `parent` on a node
# returns the whole document, not the node's actual parent element
expect(find(:xpath, "#{node.path}/..").text).to eq(merge_request.description[2..-1])
expect(find(:xpath, "#{node.path}/..").text).to eq(merge_request.description[2..])
expect(page).to have_content(merge_request.title)
end

View File

@ -80,7 +80,7 @@ RSpec.describe 'Issue prioritization' do
expect(issue_titles[0..1]).to contain_exactly('issue_5', 'issue_8')
expect(issue_titles[2..4]).to contain_exactly('issue_1', 'issue_3', 'issue_7')
expect(issue_titles[5..-1]).to eq(%w(issue_2 issue_4 issue_6))
expect(issue_titles[5..]).to eq(%w(issue_2 issue_4 issue_6))
end
end
end

View File

@ -71,7 +71,7 @@ RSpec.describe Packages::Nuget::PackageFinder do
end
context 'with prefix wildcard' do
let(:package_name) { "%#{package1.name[3..-1]}" }
let(:package_name) { "%#{package1.name[3..]}" }
it { is_expected.to match_array([package1, package2]) }
end

View File

@ -14,6 +14,7 @@
"type": ["object", "null"],
"properties": {
"id": { "type": "integer" },
"iid": { "type": "integer" },
"project_id": { "type": "integer" },
"sha": { "type": "string" },
"ref": { "type": "string" },

View File

@ -123,13 +123,22 @@ describe('import target cell', () => {
});
describe('when entity is available for import', () => {
const FAKE_PROGRESS_MESSAGE = 'progress message';
beforeEach(() => {
group = generateFakeTableEntry({ id: 1, flags: { isAvailableForImport: true } });
group = generateFakeTableEntry({
id: 1,
flags: { isAvailableForImport: true },
progress: { message: FAKE_PROGRESS_MESSAGE },
});
createComponent({ group });
});
it('renders namespace dropdown as enabled', () => {
expect(findNamespaceDropdown().attributes('disabled')).toBe(undefined);
});
it('renders progress message as error if it exists', () => {
expect(wrapper.find('[role=alert]').text()).toBe(FAKE_PROGRESS_MESSAGE);
});
});
});

View File

@ -163,12 +163,14 @@ describe('Bulk import resolvers', () => {
});
describe('mutations', () => {
beforeEach(() => {
axiosMockAdapter.onPost(FAKE_ENDPOINTS.createBulkImport).reply(httpStatus.OK, { id: 1 });
});
beforeEach(() => {});
describe('importGroup', () => {
it('sets import status to CREATED when request completes', async () => {
it('sets import status to CREATED for successful groups when request completes', async () => {
axiosMockAdapter
.onPost(FAKE_ENDPOINTS.createBulkImport)
.reply(httpStatus.OK, [{ success: true, id: 1 }]);
await client.mutate({
mutation: importGroupsMutation,
variables: {
@ -185,9 +187,57 @@ describe('Bulk import resolvers', () => {
await axios.waitForAll();
expect(results[0].progress.status).toBe(STATUSES.CREATED);
});
it('sets import status to CREATED for successful groups when request completes with legacy response', async () => {
axiosMockAdapter.onPost(FAKE_ENDPOINTS.createBulkImport).reply(httpStatus.OK, { id: 1 });
await client.mutate({
mutation: importGroupsMutation,
variables: {
importRequests: [
{
sourceGroupId: statusEndpointFixture.importable_data[0].id,
newName: 'test',
targetNamespace: 'root',
},
],
},
});
await axios.waitForAll();
expect(results[0].progress.status).toBe(STATUSES.CREATED);
});
it('sets import status to FAILED and sets progress message for failed groups when request completes', async () => {
const FAKE_ERROR_MESSAGE = 'foo';
axiosMockAdapter
.onPost(FAKE_ENDPOINTS.createBulkImport)
.reply(httpStatus.OK, [{ success: false, id: 1, message: FAKE_ERROR_MESSAGE }]);
await client.mutate({
mutation: importGroupsMutation,
variables: {
importRequests: [
{
sourceGroupId: statusEndpointFixture.importable_data[0].id,
newName: 'test',
targetNamespace: 'root',
},
],
},
});
await axios.waitForAll();
expect(results[0].progress.status).toBe(STATUSES.FAILED);
expect(results[0].progress.message).toBe(FAKE_ERROR_MESSAGE);
});
});
it('updateImportStatus updates status', async () => {
axiosMockAdapter
.onPost(FAKE_ENDPOINTS.createBulkImport)
.reply(httpStatus.OK, [{ success: true, id: 1 }]);
const NEW_STATUS = 'dummy';
await client.mutate({
mutation: importGroupsMutation,
@ -216,6 +266,7 @@ describe('Bulk import resolvers', () => {
expect(statusInResponse).toStrictEqual({
__typename: clientTypenames.BulkImportProgress,
id,
message: null,
status: NEW_STATUS,
});
});

View File

@ -1,7 +1,7 @@
import { STATUSES } from '~/import_entities/constants';
import { clientTypenames } from '~/import_entities/import_groups/graphql/client_factory';
export const generateFakeEntry = ({ id, status, ...rest }) => ({
export const generateFakeEntry = ({ id, status, message, ...rest }) => ({
__typename: clientTypenames.BulkImportSourceGroup,
webUrl: `https://fake.host/${id}`,
fullPath: `fake_group_${id}`,
@ -18,6 +18,7 @@ export const generateFakeEntry = ({ id, status, ...rest }) => ({
: {
id,
status,
message: message || '',
},
...rest,
});

View File

@ -4,7 +4,7 @@ exports[`PackagesListApp renders 1`] = `
<div>
<package-title-stub
count="2"
helpurl="packageHelpUrl"
helpurl="/help/user/packages/index"
/>
<package-search-stub />
@ -49,7 +49,7 @@ exports[`PackagesListApp renders 1`] = `
<b-link-stub
class="gl-link"
event="click"
href="emptyListHelpUrl"
href="/help/user/packages/package_registry/index"
routertag="a"
target="_blank"
>

View File

@ -6,7 +6,7 @@ import { nextTick } from 'vue';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import createMockApollo from 'helpers/mock_apollo_helper';
import waitForPromises from 'helpers/wait_for_promises';
import PackageListApp from '~/packages_and_registries/package_registry/components/list/app.vue';
import ListPage from '~/packages_and_registries/package_registry/pages/list.vue';
import PackageTitle from '~/packages_and_registries/package_registry/components/list/package_title.vue';
import PackageSearch from '~/packages_and_registries/package_registry/components/list/package_search.vue';
import OriginalPackageList from '~/packages_and_registries/package_registry/components/list/packages_list.vue';
@ -16,11 +16,13 @@ import {
PROJECT_RESOURCE_TYPE,
GROUP_RESOURCE_TYPE,
GRAPHQL_PAGE_SIZE,
EMPTY_LIST_HELP_URL,
PACKAGE_HELP_URL,
} from '~/packages_and_registries/package_registry/constants';
import getPackagesQuery from '~/packages_and_registries/package_registry/graphql/queries/get_packages.query.graphql';
import { packagesListQuery, packageData, pagination } from '../../mock_data';
import { packagesListQuery, packageData, pagination } from '../mock_data';
jest.mock('~/lib/utils/common_utils');
jest.mock('~/flash');
@ -32,9 +34,7 @@ describe('PackagesListApp', () => {
let apolloProvider;
const defaultProvide = {
packageHelpUrl: 'packageHelpUrl',
emptyListIllustration: 'emptyListIllustration',
emptyListHelpUrl: 'emptyListHelpUrl',
isGroupPage: true,
fullPath: 'gitlab-org',
};
@ -66,7 +66,7 @@ describe('PackagesListApp', () => {
const requestHandlers = [[getPackagesQuery, resolver]];
apolloProvider = createMockApollo(requestHandlers);
wrapper = shallowMountExtended(PackageListApp, {
wrapper = shallowMountExtended(ListPage, {
localVue,
apolloProvider,
provide,
@ -113,7 +113,10 @@ describe('PackagesListApp', () => {
await waitForFirstRequest();
expect(findPackageTitle().exists()).toBe(true);
expect(findPackageTitle().props('count')).toBe(2);
expect(findPackageTitle().props()).toMatchObject({
count: 2,
helpUrl: PACKAGE_HELP_URL,
});
});
describe('search component', () => {
@ -213,12 +216,12 @@ describe('PackagesListApp', () => {
it('generate the correct empty list link', () => {
const link = findListComponent().findComponent(GlLink);
expect(link.attributes('href')).toBe(defaultProvide.emptyListHelpUrl);
expect(link.attributes('href')).toBe(EMPTY_LIST_HELP_URL);
expect(link.text()).toBe('publish and share your packages');
});
it('includes the right content on the default tab', () => {
expect(findEmptyState().text()).toContain(PackageListApp.i18n.emptyPageTitle);
expect(findEmptyState().text()).toContain(ListPage.i18n.emptyPageTitle);
});
});
@ -234,8 +237,8 @@ describe('PackagesListApp', () => {
});
it('should show specific empty message', () => {
expect(findEmptyState().text()).toContain(PackageListApp.i18n.noResultsTitle);
expect(findEmptyState().text()).toContain(PackageListApp.i18n.widenFilters);
expect(findEmptyState().text()).toContain(ListPage.i18n.noResultsTitle);
expect(findEmptyState().text()).toContain(ListPage.i18n.widenFilters);
});
});

View File

@ -260,34 +260,4 @@ RSpec.describe PackagesHelper do
end
end
end
describe '#packages_list_data' do
let_it_be(:resource) { project }
let_it_be(:type) { 'project' }
let(:expected_result) do
{
resource_id: resource.id,
full_path: resource.full_path,
page_type: type
}
end
subject(:result) { helper.packages_list_data(type, resource) }
context 'at a project level' do
it 'populates presenter data' do
expect(result).to match(hash_including(expected_result))
end
end
context 'at a group level' do
let_it_be(:resource) { create(:group) }
let_it_be(:type) { 'group' }
it 'populates presenter data' do
expect(result).to match(hash_including(expected_result))
end
end
end
end

View File

@ -43,7 +43,7 @@ RSpec.describe Gitlab::Git::Tree, :seed_helper do
end
describe '#dir?' do
let(:dir) { entries.select(&:dir?).first }
let(:dir) { entries.find(&:dir?) }
it { expect(dir).to be_kind_of Gitlab::Git::Tree }
it { expect(dir.id).to eq('3c122d2b7830eca25235131070602575cf8b41a1') }
@ -134,7 +134,7 @@ RSpec.describe Gitlab::Git::Tree, :seed_helper do
end
describe '#file?' do
let(:file) { entries.select(&:file?).first }
let(:file) { entries.find(&:file?) }
it { expect(file).to be_kind_of Gitlab::Git::Tree }
it { expect(file.id).to eq('dfaa3f97ca337e20154a98ac9d0be76ddd1fcc82') }
@ -143,21 +143,21 @@ RSpec.describe Gitlab::Git::Tree, :seed_helper do
end
describe '#readme?' do
let(:file) { entries.select(&:readme?).first }
let(:file) { entries.find(&:readme?) }
it { expect(file).to be_kind_of Gitlab::Git::Tree }
it { expect(file.name).to eq('README.md') }
end
describe '#contributing?' do
let(:file) { entries.select(&:contributing?).first }
let(:file) { entries.find(&:contributing?) }
it { expect(file).to be_kind_of Gitlab::Git::Tree }
it { expect(file.name).to eq('CONTRIBUTING.md') }
end
describe '#submodule?' do
let(:submodule) { entries.select(&:submodule?).first }
let(:submodule) { entries.find(&:submodule?) }
it { expect(submodule).to be_kind_of Gitlab::Git::Tree }
it { expect(submodule.id).to eq('79bceae69cb5750d6567b223597999bfa91cb3b9') }

View File

@ -43,10 +43,10 @@ RSpec.describe Gitlab::GitalyClient::ConflictFilesStitcher do
messages = [
double(files: [double(header: header_1), double(header: nil, content: content_1[0..5])]),
double(files: [double(header: nil, content: content_1[6..-1])]),
double(files: [double(header: nil, content: content_1[6..])]),
double(files: [double(header: header_2)]),
double(files: [double(header: nil, content: content_2[0..5]), double(header: nil, content: content_2[6..10])]),
double(files: [double(header: nil, content: content_2[11..-1])])
double(files: [double(header: nil, content: content_2[11..])])
]
conflict_files = described_class.new(messages, target_repository.gitaly_repository).to_a

View File

@ -41,7 +41,7 @@ RSpec.describe Gitlab::GitalyClient::DiffStitcher do
msg_2.raw_patch_data = diff_2.patch[0..100]
msg_2.end_of_patch = false
msg_3 = OpenStruct.new(raw_patch_data: diff_2.patch[101..-1], end_of_patch: true)
msg_3 = OpenStruct.new(raw_patch_data: diff_2.patch[101..], end_of_patch: true)
msg_4 = OpenStruct.new(diff_3.to_h.except(:patch))
msg_4.raw_patch_data = diff_3.patch

View File

@ -192,7 +192,7 @@ RSpec.describe Gitlab::Gpg::InvalidGpgSignatureUpdater do
project: project,
commit_sha: commit_sha,
gpg_key: nil,
gpg_key_primary_keyid: GpgHelpers::User3.subkey_fingerprints.last[24..-1],
gpg_key_primary_keyid: GpgHelpers::User3.subkey_fingerprints.last[24..],
verification_status: 'unknown_key'
end

View File

@ -98,7 +98,7 @@ RSpec.describe Gitlab::Graphql::Pagination::Keyset::Connection do
let(:nodes) { Project.all.order(Gitlab::Pagination::Keyset::Order.build([column_order_id_desc])) }
it 'returns the correct nodes' do
expect(subject.sliced_nodes).to contain_exactly(*projects[2..-1])
expect(subject.sliced_nodes).to contain_exactly(*projects[2..])
end
end
end
@ -107,7 +107,7 @@ RSpec.describe Gitlab::Graphql::Pagination::Keyset::Connection do
let(:arguments) { { after: encoded_cursor(projects[1]) } }
it 'only returns the project before the selected one' do
expect(subject.sliced_nodes).to contain_exactly(*projects[2..-1])
expect(subject.sliced_nodes).to contain_exactly(*projects[2..])
end
context 'when the sort order is descending' do

View File

@ -120,7 +120,7 @@ RSpec.describe Gitlab::Graphql::Pagination::Keyset::Connection do
let(:nodes) { Project.all.order(id: :desc) }
it 'returns the correct nodes' do
expect(subject.sliced_nodes).to contain_exactly(*projects[2..-1])
expect(subject.sliced_nodes).to contain_exactly(*projects[2..])
end
end
end
@ -129,7 +129,7 @@ RSpec.describe Gitlab::Graphql::Pagination::Keyset::Connection do
let(:arguments) { { after: encoded_cursor(projects[1]) } }
it 'only returns the project before the selected one' do
expect(subject.sliced_nodes).to contain_exactly(*projects[2..-1])
expect(subject.sliced_nodes).to contain_exactly(*projects[2..])
end
context 'when the sort order is descending' do

View File

@ -224,6 +224,7 @@ ci_pipelines:
- ci_ref
- stages
- statuses
- statuses_order_id_desc
- latest_statuses_ordered_by_stage
- builds
- bridges

View File

@ -3,7 +3,7 @@
require 'spec_helper'
def match_mr1_note(content_regex)
MergeRequest.find_by(title: 'MR1').notes.select { |n| n.note.match(/#{content_regex}/)}.first
MergeRequest.find_by(title: 'MR1').notes.find { |n| n.note.match(/#{content_regex}/) }
end
RSpec.describe Gitlab::ImportExport::Project::TreeRestorer do
@ -75,7 +75,7 @@ RSpec.describe Gitlab::ImportExport::Project::TreeRestorer do
context 'for an Issue' do
it 'does not import note_html' do
note_content = 'Quo reprehenderit aliquam qui dicta impedit cupiditate eligendi'
issue_note = Issue.find_by(description: 'Aliquam enim illo et possimus.').notes.select { |n| n.note.match(/#{note_content}/)}.first
issue_note = Issue.find_by(description: 'Aliquam enim illo et possimus.').notes.find { |n| n.note.match(/#{note_content}/) }
expect(issue_note.note_html).to match(/#{note_content}/)
end
@ -552,7 +552,7 @@ RSpec.describe Gitlab::ImportExport::Project::TreeRestorer do
it 'issue system note metadata restored successfully' do
note_content = 'created merge request !1 to address this issue'
note = project.issues.first.notes.select { |n| n.note.match(/#{note_content}/)}.first
note = project.issues.first.notes.find { |n| n.note.match(/#{note_content}/)}
expect(note.noteable_type).to eq('Issue')
expect(note.system).to eq(true)

View File

@ -40,7 +40,7 @@ RSpec.describe Gitlab::MultiCollectionPaginator do
end
it 'fils the last page with elements from the second collection' do
expected_collection = all_groups[-2..-1]
expected_collection = all_groups[-2..]
expect(paginator.paginate(3)).to eq(expected_collection)
end

View File

@ -127,7 +127,7 @@ RSpec.describe Gitlab::Pagination::Keyset::Order do
end
it do
expect(subject).to eq(expected.reverse[1..-1]) # removing one item because we used it to calculate cursor data for the "last" page in subject
expect(subject).to eq(expected.reverse[1..]) # removing one item because we used it to calculate cursor data for the "last" page in subject
end
end
end

View File

@ -78,7 +78,7 @@ RSpec.describe Gitlab::RackAttack, :aggregate_failures do
it 'configures tracks and throttles with a selected set of dry-runs' do
dry_run_throttles = throttles.each_key.first(2)
regular_throttles = throttles.keys[2..-1]
regular_throttles = throttles.keys[2..]
stub_env('GITLAB_THROTTLE_DRY_RUN', dry_run_throttles.join(','))
described_class.configure(fake_rack_attack)

View File

@ -44,20 +44,23 @@ RSpec.describe Gitlab::Redis::Sessions do
describe '#store' do
subject { described_class.store(namespace: described_class::SESSION_NAMESPACE) }
context 'when redis.sessions configuration is provided' do
context 'when redis.sessions configuration is NOT provided' do
it 'instantiates ::Redis instance' do
expect(described_class).to receive(:config_fallback?).and_return(true)
expect(subject).to be_instance_of(::Redis::Store)
end
end
context 'when redis.sessions configuration is not provided' do
context 'when redis.sessions configuration is provided' do
before do
allow(described_class).to receive(:config_fallback?).and_return(false)
end
it 'instantiates an instance of MultiStore' do
expect(described_class).to receive(:config_fallback?).and_return(false)
expect(subject).to be_instance_of(::Gitlab::Redis::MultiStore)
end
end
it_behaves_like 'multi store feature flags', :use_primary_and_secondary_stores_for_sessions, :use_primary_store_as_default_for_sessions
it_behaves_like 'multi store feature flags', :use_primary_and_secondary_stores_for_sessions, :use_primary_store_as_default_for_sessions
end
end
end

View File

@ -2506,7 +2506,7 @@ RSpec.describe Ci::Build do
it { is_expected.to start_with(project.web_url[0..6]) }
it { is_expected.to include(build.token) }
it { is_expected.to include('gitlab-ci-token') }
it { is_expected.to include(project.web_url[7..-1]) }
it { is_expected.to include(project.web_url[7..]) }
end
context 'when token is empty' do

View File

@ -28,6 +28,7 @@ RSpec.describe Ci::Pipeline, :mailer, factory_default: :keep do
it { is_expected.to have_many(:trigger_requests) }
it { is_expected.to have_many(:variables) }
it { is_expected.to have_many(:builds) }
it { is_expected.to have_many(:statuses_order_id_desc) }
it { is_expected.to have_many(:bridges) }
it { is_expected.to have_many(:job_artifacts).through(:builds) }
it { is_expected.to have_many(:auto_canceled_pipelines) }

View File

@ -706,7 +706,7 @@ RSpec.describe Event do
describe '.for_wiki_meta' do
it 'finds events for a given wiki page metadata object' do
event = events.select(&:wiki_page?).first
event = events.find(&:wiki_page?)
expect(described_class.for_wiki_meta(event.target)).to contain_exactly(event)
end

View File

@ -330,6 +330,20 @@ RSpec.describe WebHook do
expect { hook.backoff! }.to change(hook, :backoff_count).by(1)
end
context 'when the hook is permanently disabled' do
before do
allow(hook).to receive(:permanently_disabled?).and_return(true)
end
it 'does not set disabled_until' do
expect { hook.backoff! }.not_to change(hook, :disabled_until)
end
it 'does not increment the backoff count' do
expect { hook.backoff! }.not_to change(hook, :backoff_count)
end
end
context 'when we have backed off MAX_FAILURES times' do
before do
stub_const("#{described_class}::MAX_FAILURES", 5)
@ -392,4 +406,77 @@ RSpec.describe WebHook do
end
end
end
describe '#temporarily_disabled?' do
it 'is false when not temporarily disabled' do
expect(hook).not_to be_temporarily_disabled
end
context 'when hook has been told to back off' do
before do
hook.backoff!
end
it 'is true' do
expect(hook).to be_temporarily_disabled
end
it 'is false when `web_hooks_disable_failed` flag is disabled' do
stub_feature_flags(web_hooks_disable_failed: false)
expect(hook).not_to be_temporarily_disabled
end
end
end
describe '#permanently_disabled?' do
it 'is false when not disabled' do
expect(hook).not_to be_permanently_disabled
end
context 'when hook has been disabled' do
before do
hook.disable!
end
it 'is true' do
expect(hook).to be_permanently_disabled
end
it 'is false when `web_hooks_disable_failed` flag is disabled' do
stub_feature_flags(web_hooks_disable_failed: false)
expect(hook).not_to be_permanently_disabled
end
end
end
describe '#rate_limited?' do
context 'when there are rate limits' do
before do
allow(hook).to receive(:rate_limit).and_return(3)
end
it 'is false when hook has not been rate limited' do
expect(Gitlab::ApplicationRateLimiter).to receive(:peek).and_return(false)
expect(hook).not_to be_rate_limited
end
it 'is true when hook has been rate limited' do
expect(Gitlab::ApplicationRateLimiter).to receive(:peek).and_return(true)
expect(hook).to be_rate_limited
end
end
context 'when there are no rate limits' do
before do
allow(hook).to receive(:rate_limit).and_return(nil)
end
it 'does not call Gitlab::ApplicationRateLimiter, and is false' do
expect(Gitlab::ApplicationRateLimiter).not_to receive(:peek)
expect(hook).not_to be_rate_limited
end
end
end
end

View File

@ -33,6 +33,7 @@ RSpec.describe API::Ci::Pipelines do
expect(json_response).to be_an Array
expect(json_response.first['sha']).to match(/\A\h{40}\z/)
expect(json_response.first['id']).to eq pipeline.id
expect(json_response.first['iid']).to eq pipeline.iid
expect(json_response.first['web_url']).to be_present
end
@ -40,7 +41,7 @@ RSpec.describe API::Ci::Pipelines do
it 'includes pipeline source' do
get api("/projects/#{project.id}/pipelines", user)
expect(json_response.first.keys).to contain_exactly(*%w[id project_id sha ref status web_url created_at updated_at source])
expect(json_response.first.keys).to contain_exactly(*%w[id iid project_id sha ref status web_url created_at updated_at source])
end
end

View File

@ -57,7 +57,7 @@ RSpec.describe SystemNotes::CommitService do
end
context 'with multiple existing commits' do
let(:old_commits) { noteable.commits[3..-1] }
let(:old_commits) { noteable.commits[3..] }
context 'with oldrev' do
let(:oldrev) { noteable.commits[2].id }

View File

@ -138,7 +138,7 @@ module GpgHelpers
end
def primary_keyid
fingerprint[-16..-1]
fingerprint[-16..]
end
def fingerprint
@ -281,7 +281,7 @@ module GpgHelpers
end
def primary_keyid2
fingerprint2[-16..-1]
fingerprint2[-16..]
end
def fingerprint2
@ -374,7 +374,7 @@ module GpgHelpers
end
def primary_keyid
fingerprint[-16..-1]
fingerprint[-16..]
end
def fingerprint
@ -776,7 +776,7 @@ module GpgHelpers
end
def primary_keyid
fingerprint[-16..-1]
fingerprint[-16..]
end
def fingerprint

View File

@ -23,7 +23,7 @@ module MemoryUsageHelper
output, status = Gitlab::Popen.popen(%w(free -m))
abort "`free -m` return code is #{status}: #{output}" unless status == 0
result = output.split("\n")[1].split(" ")[1..-1]
result = output.split("\n")[1].split(" ")[1..]
attrs = %i(m_total m_used m_free m_shared m_buffers_cache m_available).freeze
attrs.zip(result).to_h

View File

@ -8,10 +8,16 @@ RSpec.shared_examples "redis_new_instance_shared_examples" do |name, fallback_cl
let(:fallback_config_file) { nil }
before do
fallback_class.remove_instance_variable(:@_raw_config) rescue nil
allow(fallback_class).to receive(:config_file_name).and_return(fallback_config_file)
end
include_examples "redis_shared_examples"
after do
fallback_class.remove_instance_variable(:@_raw_config) rescue nil
end
it_behaves_like "redis_shared_examples"
describe '.config_file_name' do
subject { described_class.config_file_name }