Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
90726a8ccc
commit
71d34aac9a
159 changed files with 968 additions and 459 deletions
|
@ -108,8 +108,6 @@ rules:
|
|||
message: 'Migrate to GlSkeletonLoader, or import GlDeprecatedSkeletonLoading.'
|
||||
# See https://gitlab.com/gitlab-org/gitlab/-/issues/360551
|
||||
vue/multi-word-component-names: off
|
||||
unicorn/prefer-dom-node-dataset:
|
||||
- error
|
||||
overrides:
|
||||
- files:
|
||||
- '{,ee/,jh/}spec/frontend*/**/*'
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
.qa-job-base:
|
||||
image: ${REGISTRY_HOST}/${REGISTRY_GROUP}/gitlab-build-images:debian-bullseye-ruby-2.7-bundler-2.3-git-2.33-lfs-2.9-chrome-99-docker-20.10.14-gcloud-383-kubectl-1.23
|
||||
image: ${REGISTRY_HOST}/${REGISTRY_GROUP}/gitlab-build-images/debian-bullseye-ruby-2.7:bundler-2.3-git-2.33-chrome-99
|
||||
extends:
|
||||
- .default-retry
|
||||
- .qa-cache
|
||||
|
|
|
@ -28,7 +28,7 @@ include:
|
|||
- .qa-cache
|
||||
- .test_variables
|
||||
- .bundler_variables
|
||||
image: ${REGISTRY_HOST}/${REGISTRY_GROUP}/gitlab-build-images:debian-bullseye-ruby-2.7-bundler-2.3-git-2.33-lfs-2.9-chrome-99-docker-20.10.14-gcloud-383-kubectl-1.23
|
||||
image: ${REGISTRY_HOST}/${REGISTRY_GROUP}/gitlab-build-images/debian-bullseye-ruby-2.7:bundler-2.3-git-2.33-lfs-2.9-chrome-99-docker-20.10.14-gcloud-383-kubectl-1.23
|
||||
stage: qa
|
||||
needs:
|
||||
- review-deploy
|
||||
|
@ -50,7 +50,6 @@ include:
|
|||
--tag ~orchestrated \
|
||||
--tag ~transient \
|
||||
--tag ~skip_signup_disabled \
|
||||
--tag ~requires_git_protocol_v2 \
|
||||
--force-color \
|
||||
--order random \
|
||||
--format documentation \
|
||||
|
|
|
@ -90,7 +90,6 @@ Layout/SpaceInsideParens:
|
|||
- 'ee/spec/lib/gitlab/auth/otp/session_enforcer_spec.rb'
|
||||
- 'ee/spec/lib/gitlab/auth/smartcard/ldap_certificate_spec.rb'
|
||||
- 'ee/spec/lib/gitlab/auth_spec.rb'
|
||||
- 'ee/spec/lib/gitlab/ci/templates/cluster_image_scanning_gitlab_ci_yaml_spec.rb'
|
||||
- 'ee/spec/lib/gitlab/ci/templates/coverage_fuzzing_gitlab_ci_yaml_spec.rb'
|
||||
- 'ee/spec/lib/gitlab/ci/templates/dast_api_gitlab_ci_yaml_spec.rb'
|
||||
- 'ee/spec/lib/gitlab/ci/templates/dast_api_latest_gitlab_ci_yaml_spec.rb'
|
||||
|
|
|
@ -1 +1 @@
|
|||
14.10.0
|
||||
15.0.0
|
||||
|
|
|
@ -1 +1 @@
|
|||
14.1.1
|
||||
14.2.0
|
||||
|
|
|
@ -55,8 +55,8 @@ export function renderKroki(krokiImages) {
|
|||
|
||||
// A single Kroki image is processed multiple times for some reason,
|
||||
// so this condition ensures we only create one alert per Kroki image
|
||||
if (!Object.hasOwn(parent.dataset, 'krokiProcessed')) {
|
||||
parent.dataset.krokiProcessed = 'true';
|
||||
if (!parent.hasAttribute('data-kroki-processed')) {
|
||||
parent.setAttribute('data-kroki-processed', 'true');
|
||||
parent.after(createAlert(krokiImage));
|
||||
}
|
||||
});
|
||||
|
|
|
@ -110,7 +110,7 @@ class SafeMathRenderer {
|
|||
|
||||
try {
|
||||
displayContainer.innerHTML = this.katex.renderToString(text, {
|
||||
displayMode: el.dataset.mathStyle === 'display',
|
||||
displayMode: el.getAttribute('data-math-style') === 'display',
|
||||
throwOnError: true,
|
||||
maxSize: 20,
|
||||
maxExpand: 20,
|
||||
|
@ -143,7 +143,7 @@ class SafeMathRenderer {
|
|||
this.elements.forEach((el) => {
|
||||
const placeholder = document.createElement('span');
|
||||
placeholder.style.display = 'none';
|
||||
placeholder.dataset.mathStyle = el.dataset.mathStyle;
|
||||
placeholder.setAttribute('data-math-style', el.getAttribute('data-math-style'));
|
||||
placeholder.textContent = el.textContent;
|
||||
el.parentNode.replaceChild(placeholder, el);
|
||||
this.queue.push(placeholder);
|
||||
|
|
|
@ -9,11 +9,10 @@ const updateLineNumbersOnBlobPermalinks = (linksToUpdate) => {
|
|||
|
||||
[].concat(Array.prototype.slice.call(linksToUpdate)).forEach((permalinkButton) => {
|
||||
const baseHref =
|
||||
permalinkButton.dataset.originalHref ||
|
||||
permalinkButton.getAttribute('data-original-href') ||
|
||||
(() => {
|
||||
const href = permalinkButton.getAttribute('href');
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
permalinkButton.dataset.originalHref = href;
|
||||
permalinkButton.setAttribute('data-original-href', href);
|
||||
return href;
|
||||
})();
|
||||
permalinkButton.setAttribute('href', `${baseHref}${hashUrlString}`);
|
||||
|
|
|
@ -132,7 +132,7 @@ export default {
|
|||
<gl-button
|
||||
ref="goToPipelines"
|
||||
:href="goToPipelinesPath"
|
||||
variant="success"
|
||||
variant="confirm"
|
||||
:data-track-property="humanAccess"
|
||||
:data-track-value="$options.goToTrackValuePipelines"
|
||||
:data-track-action="$options.trackEvent"
|
||||
|
|
|
@ -36,19 +36,19 @@ const loadRichBlobViewer = (type) => {
|
|||
|
||||
const loadViewer = (viewerParam) => {
|
||||
const viewer = viewerParam;
|
||||
const { url } = viewer.dataset;
|
||||
const url = viewer.getAttribute('data-url');
|
||||
|
||||
if (!url || viewer.dataset.loaded || viewer.dataset.loading) {
|
||||
if (!url || viewer.getAttribute('data-loaded') || viewer.getAttribute('data-loading')) {
|
||||
return Promise.resolve(viewer);
|
||||
}
|
||||
|
||||
viewer.dataset.loading = 'true';
|
||||
viewer.setAttribute('data-loading', 'true');
|
||||
|
||||
return axios.get(url).then(({ data }) => {
|
||||
viewer.innerHTML = data.html;
|
||||
|
||||
window.requestIdleCallback(() => {
|
||||
delete viewer.dataset.loading;
|
||||
viewer.removeAttribute('data-loading');
|
||||
});
|
||||
|
||||
return viewer;
|
||||
|
@ -108,7 +108,7 @@ export class BlobViewer {
|
|||
|
||||
switchToInitialViewer() {
|
||||
const initialViewer = this.$fileHolder[0].querySelector('.blob-viewer:not(.hidden)');
|
||||
let initialViewerName = initialViewer.dataset.type;
|
||||
let initialViewerName = initialViewer.getAttribute('data-type');
|
||||
|
||||
if (this.switcher && window.location.hash.indexOf('#L') === 0) {
|
||||
initialViewerName = 'simple';
|
||||
|
@ -138,12 +138,12 @@ export class BlobViewer {
|
|||
|
||||
e.preventDefault();
|
||||
|
||||
this.switchToViewer(target.dataset.viewer);
|
||||
this.switchToViewer(target.getAttribute('data-viewer'));
|
||||
}
|
||||
|
||||
toggleCopyButtonState() {
|
||||
if (!this.copySourceBtn) return;
|
||||
if (this.simpleViewer.dataset.loaded) {
|
||||
if (this.simpleViewer.getAttribute('data-loaded')) {
|
||||
this.copySourceBtnTooltip.setAttribute('title', __('Copy file contents'));
|
||||
this.copySourceBtn.classList.remove('disabled');
|
||||
} else if (this.activeViewer === this.simpleViewer) {
|
||||
|
@ -199,8 +199,7 @@ export class BlobViewer {
|
|||
this.$fileHolder.trigger('highlight:line');
|
||||
handleLocationHash();
|
||||
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
viewer.dataset.loaded = 'true';
|
||||
viewer.setAttribute('data-loaded', 'true');
|
||||
this.toggleCopyButtonState();
|
||||
eventHub.$emit('showBlobInteractionZones', viewer.dataset.path);
|
||||
});
|
||||
|
|
|
@ -5,7 +5,7 @@ export const addTooltipToEl = (el) => {
|
|||
|
||||
if (textEl && textEl.scrollWidth > textEl.offsetWidth) {
|
||||
el.setAttribute('title', el.textContent);
|
||||
el.dataset.container = 'body';
|
||||
el.setAttribute('data-container', 'body');
|
||||
el.classList.add('has-tooltip');
|
||||
}
|
||||
};
|
||||
|
|
|
@ -32,8 +32,8 @@ export const addInteractionClass = ({ path, d, wrapTextNodes }) => {
|
|||
});
|
||||
|
||||
if (el && !isTextNode(el)) {
|
||||
el.dataset.charIndex = d.start_char;
|
||||
el.dataset.lineIndex = d.start_line;
|
||||
el.setAttribute('data-char-index', d.start_char);
|
||||
el.setAttribute('data-line-index', d.start_line);
|
||||
el.classList.add('cursor-pointer', 'code-navigation', 'js-code-navigation');
|
||||
el.closest('.line').classList.add('code-navigation-line');
|
||||
}
|
||||
|
|
|
@ -107,10 +107,10 @@ function createLink(data, selected, options, index) {
|
|||
}
|
||||
|
||||
if (options.trackSuggestionClickedLabel) {
|
||||
link.dataset.trackAction = 'click_text';
|
||||
link.dataset.trackLabel = options.trackSuggestionClickedLabel;
|
||||
link.dataset.trackValue = index;
|
||||
link.dataset.trackProperty = slugify(data.category || 'no-category');
|
||||
link.setAttribute('data-track-action', 'click_text');
|
||||
link.setAttribute('data-track-label', options.trackSuggestionClickedLabel);
|
||||
link.setAttribute('data-track-value', index);
|
||||
link.setAttribute('data-track-property', slugify(data.category || 'no-category'));
|
||||
}
|
||||
|
||||
link.classList.toggle('is-active', selected);
|
||||
|
|
|
@ -26,7 +26,7 @@ export default class Diff {
|
|||
FilesCommentButton.init($diffFile);
|
||||
|
||||
const firstFile = $('.files').first().get(0);
|
||||
const canCreateNote = firstFile && Object.hasOwn(firstFile.dataset, 'canCreateNote');
|
||||
const canCreateNote = firstFile && firstFile.hasAttribute('data-can-create-note');
|
||||
$diffFile.each((index, file) => initImageDiffHelper.initImageDiff(file, canCreateNote));
|
||||
|
||||
if (!isBound) {
|
||||
|
|
|
@ -197,10 +197,10 @@ export default class AvailableDropdownMappings {
|
|||
}
|
||||
|
||||
getGroupId() {
|
||||
return this.filteredSearchInput.dataset.groupId || '';
|
||||
return this.filteredSearchInput.getAttribute('data-group-id') || '';
|
||||
}
|
||||
|
||||
getProjectId() {
|
||||
return this.filteredSearchInput.dataset.projectId || '';
|
||||
return this.filteredSearchInput.getAttribute('data-project-id') || '';
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,9 +25,9 @@ export default class DropdownHint extends FilteredSearchDropdown {
|
|||
const { selected } = e.detail;
|
||||
|
||||
if (selected.tagName === 'LI') {
|
||||
if (Object.hasOwn(selected.dataset, 'value')) {
|
||||
if (selected.hasAttribute('data-value')) {
|
||||
this.dismissDropdown();
|
||||
} else if (selected.dataset.action === 'submit') {
|
||||
} else if (selected.getAttribute('data-action') === 'submit') {
|
||||
this.dismissDropdown();
|
||||
this.dispatchFormSubmitEvent();
|
||||
} else {
|
||||
|
|
|
@ -23,7 +23,7 @@ export default class DropdownOperator extends FilteredSearchDropdown {
|
|||
const { selected } = e.detail;
|
||||
|
||||
if (selected.tagName === 'LI') {
|
||||
if (Object.hasOwn(selected.dataset, 'value')) {
|
||||
if (selected.hasAttribute('data-value')) {
|
||||
const name = FilteredSearchVisualTokens.getLastTokenPartial();
|
||||
const operator = selected.dataset.value;
|
||||
|
||||
|
|
|
@ -31,11 +31,11 @@ export default class DropdownUser extends DropdownAjaxFilter {
|
|||
}
|
||||
|
||||
getGroupId() {
|
||||
return this.input.dataset.groupId;
|
||||
return this.input.getAttribute('data-group-id');
|
||||
}
|
||||
|
||||
getProjectId() {
|
||||
return this.input.dataset.projectId;
|
||||
return this.input.getAttribute('data-project-id');
|
||||
}
|
||||
|
||||
projectOrGroupId() {
|
||||
|
|
|
@ -87,7 +87,6 @@ export default class DropdownUtils {
|
|||
}
|
||||
|
||||
static setDataValueIfSelected(filter, operator, selected) {
|
||||
// eslint-disable-next-line unicorn/prefer-dom-node-dataset
|
||||
const dataValue = selected.getAttribute('data-value');
|
||||
|
||||
if (dataValue) {
|
||||
|
@ -97,7 +96,6 @@ export default class DropdownUtils {
|
|||
tokenValue: dataValue,
|
||||
clicked: true,
|
||||
options: {
|
||||
// eslint-disable-next-line unicorn/prefer-dom-node-dataset
|
||||
capitalizeTokenValue: selected.hasAttribute('data-capitalize'),
|
||||
},
|
||||
});
|
||||
|
|
|
@ -165,8 +165,8 @@ class DropDown {
|
|||
images.forEach((image) => {
|
||||
const img = image;
|
||||
|
||||
img.src = img.dataset.src;
|
||||
delete img.dataset.src;
|
||||
img.src = img.getAttribute('data-src');
|
||||
img.removeAttribute('data-src');
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -814,7 +814,7 @@ export default class FilteredSearchManager {
|
|||
getUsernameParams() {
|
||||
const usernamesById = {};
|
||||
try {
|
||||
const attribute = this.filteredSearchInput.dataset.usernameParams;
|
||||
const attribute = this.filteredSearchInput.getAttribute('data-username-params');
|
||||
JSON.parse(attribute).forEach((user) => {
|
||||
usernamesById[user.id] = user.username;
|
||||
});
|
||||
|
|
|
@ -6,7 +6,7 @@ export function setPositionDataAttribute(el, options) {
|
|||
|
||||
const positionObject = { ...JSON.parse(position), x, y, width, height };
|
||||
|
||||
el.dataset.position = JSON.stringify(positionObject);
|
||||
el.setAttribute('data-position', JSON.stringify(positionObject));
|
||||
}
|
||||
|
||||
export function updateDiscussionAvatarBadgeNumber(discussionEl, newBadgeNumber) {
|
||||
|
|
|
@ -81,7 +81,10 @@ export default class CreateMergeRequestDropdown {
|
|||
this.init();
|
||||
|
||||
if (isConfidentialIssue()) {
|
||||
this.createMergeRequestButton.dataset.dropdownTrigger = '#create-merge-request-dropdown';
|
||||
this.createMergeRequestButton.setAttribute(
|
||||
'data-dropdown-trigger',
|
||||
'#create-merge-request-dropdown',
|
||||
);
|
||||
initConfidentialMergeRequest();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -270,7 +270,7 @@ export default {
|
|||
},
|
||||
setActiveTask(el) {
|
||||
const { parentElement } = el;
|
||||
const lineNumbers = parentElement.dataset.sourcepos.match(/\b\d+(?=:)/g);
|
||||
const lineNumbers = parentElement.getAttribute('data-sourcepos').match(/\b\d+(?=:)/g);
|
||||
this.activeTask = {
|
||||
title: parentElement.innerText,
|
||||
lineNumberStart: lineNumbers[0],
|
||||
|
|
|
@ -127,7 +127,7 @@ export default class LazyLoader {
|
|||
|
||||
// Loading Images which are in the current viewport or close to them
|
||||
this.lazyImages = this.lazyImages.filter((selectedImage) => {
|
||||
if (selectedImage.dataset.src) {
|
||||
if (selectedImage.getAttribute('data-src')) {
|
||||
const imgBoundRect = selectedImage.getBoundingClientRect();
|
||||
const imgTop = scrollTop + imgBoundRect.top;
|
||||
const imgBound = imgTop + imgBoundRect.height;
|
||||
|
@ -156,17 +156,16 @@ export default class LazyLoader {
|
|||
}
|
||||
|
||||
static loadImage(img) {
|
||||
if (img.dataset.src) {
|
||||
if (img.getAttribute('data-src')) {
|
||||
img.setAttribute('loading', 'lazy');
|
||||
let imgUrl = img.dataset.src;
|
||||
let imgUrl = img.getAttribute('data-src');
|
||||
// Only adding width + height for avatars for now
|
||||
if (imgUrl.indexOf('/avatar/') > -1 && imgUrl.indexOf('?') === -1) {
|
||||
const targetWidth = img.getAttribute('width') || img.width;
|
||||
imgUrl += `?width=${targetWidth}`;
|
||||
}
|
||||
img.setAttribute('src', imgUrl);
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
delete img.dataset.src;
|
||||
img.removeAttribute('data-src');
|
||||
img.classList.remove('lazy');
|
||||
img.classList.add('js-lazy-loaded');
|
||||
img.classList.add('qa-js-lazy-loaded');
|
||||
|
|
|
@ -52,7 +52,7 @@ export function confirmAction(
|
|||
export function confirmViaGlModal(message, element) {
|
||||
const primaryBtnConfig = {};
|
||||
|
||||
const { confirmBtnVariant } = element.dataset;
|
||||
const confirmBtnVariant = element.getAttribute('data-confirm-btn-variant');
|
||||
|
||||
if (confirmBtnVariant) {
|
||||
primaryBtnConfig.primaryBtnVariant = confirmBtnVariant;
|
||||
|
|
|
@ -41,7 +41,7 @@ export default {
|
|||
const dropdownToggle = this.$refs.glDropdown.$el.querySelector('.dropdown-toggle');
|
||||
|
||||
if (dropdownToggle) {
|
||||
dropdownToggle.dataset.qaSelector = 'access_level_dropdown';
|
||||
dropdownToggle.setAttribute('data-qa-selector', 'access_level_dropdown');
|
||||
}
|
||||
},
|
||||
methods: {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
function onSidebarLinkClick() {
|
||||
const setDataTrackAction = (element, action) => {
|
||||
element.dataset.trackAction = action;
|
||||
element.setAttribute('data-track-action', action);
|
||||
};
|
||||
|
||||
const setDataTrackExtra = (element, value) => {
|
||||
|
@ -12,10 +12,10 @@ function onSidebarLinkClick() {
|
|||
? SIDEBAR_COLLAPSED
|
||||
: SIDEBAR_EXPANDED;
|
||||
|
||||
element.dataset.trackExtra = JSON.stringify({
|
||||
sidebar_display: sidebarCollapsed,
|
||||
menu_display: value,
|
||||
});
|
||||
element.setAttribute(
|
||||
'data-track-extra',
|
||||
JSON.stringify({ sidebar_display: sidebarCollapsed, menu_display: value }),
|
||||
);
|
||||
};
|
||||
|
||||
const EXPANDED = 'Expanded';
|
||||
|
|
|
@ -298,7 +298,7 @@ export default class ActivityCalendar {
|
|||
.querySelector(this.activitiesContainer)
|
||||
.querySelectorAll('.js-localtime')
|
||||
.forEach((el) => {
|
||||
el.setAttribute('title', formatDate(el.dataset.datetime));
|
||||
el.setAttribute('title', formatDate(el.getAttribute('data-datetime')));
|
||||
});
|
||||
})
|
||||
.catch(() =>
|
||||
|
|
|
@ -24,7 +24,7 @@ export default {
|
|||
directives: {
|
||||
GlTooltip: GlTooltipDirective,
|
||||
},
|
||||
inject: ['ciConfigPath'],
|
||||
inject: ['ciConfigPath', 'includesHelpPagePath'],
|
||||
props: {
|
||||
includes: {
|
||||
type: Array,
|
||||
|
@ -61,7 +61,14 @@ export default {
|
|||
<span data-testid="current-config-filename">{{ ciConfigPath }}</span>
|
||||
</span>
|
||||
</div>
|
||||
<gl-alert v-if="showTip" variant="tip" :title="$options.i18n.tipTitle" @dismiss="dismissTip">
|
||||
<gl-alert
|
||||
v-if="showTip"
|
||||
variant="tip"
|
||||
:title="$options.i18n.tipTitle"
|
||||
:secondary-button-text="$options.i18n.tipBtn"
|
||||
:secondary-button-link="includesHelpPagePath"
|
||||
@dismiss="dismissTip"
|
||||
>
|
||||
{{ $options.i18n.tipDescription }}
|
||||
</gl-alert>
|
||||
<div class="gl-overflow-y-auto">
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
<script>
|
||||
import { GlPopover, GlOutsideDirective as Outside } from '@gitlab/ui';
|
||||
import { s__, __ } from '~/locale';
|
||||
import { GlLink, GlPopover, GlOutsideDirective as Outside, GlSprintf } from '@gitlab/ui';
|
||||
import { s__ } from '~/locale';
|
||||
import { FILE_TREE_POPOVER_DISMISSED_KEY } from '../../constants';
|
||||
|
||||
export default {
|
||||
|
@ -8,13 +8,15 @@ export default {
|
|||
directives: { Outside },
|
||||
i18n: {
|
||||
description: s__(
|
||||
'pipelineEditorWalkthrough|You can use the file tree to view your pipeline configuration files.',
|
||||
'pipelineEditorWalkthrough|You can use the file tree to view your pipeline configuration files. %{linkStart}Learn more%{linkEnd}',
|
||||
),
|
||||
learnMore: __('Learn more'),
|
||||
},
|
||||
components: {
|
||||
GlLink,
|
||||
GlPopover,
|
||||
GlSprintf,
|
||||
},
|
||||
inject: ['includesHelpPagePath'],
|
||||
data() {
|
||||
return {
|
||||
showPopover: false,
|
||||
|
@ -46,8 +48,12 @@ export default {
|
|||
data-qa-selector="file_tree_popover"
|
||||
@close-button-clicked="dismissPermanently"
|
||||
>
|
||||
<div v-outside="closePopover" class="gl-display-flex gl-flex-direction-column">
|
||||
<p class="gl-font-base">{{ $options.i18n.description }}</p>
|
||||
<div v-outside="closePopover" class="gl-font-base gl-mb-3">
|
||||
<gl-sprintf :message="$options.i18n.description">
|
||||
<template #link="{ content }">
|
||||
<gl-link :href="includesHelpPagePath" target="_blank">{{ content }}</gl-link>
|
||||
</template>
|
||||
</gl-sprintf>
|
||||
</div>
|
||||
</gl-popover>
|
||||
</template>
|
||||
|
|
|
@ -30,6 +30,7 @@ export const initPipelineEditor = (selector = '#js-pipeline-editor') => {
|
|||
defaultBranch,
|
||||
emptyStateIllustrationPath,
|
||||
helpPaths,
|
||||
includesHelpPagePath,
|
||||
lintHelpPagePath,
|
||||
lintUnavailableHelpPagePath,
|
||||
needsHelpPagePath,
|
||||
|
@ -118,6 +119,7 @@ export const initPipelineEditor = (selector = '#js-pipeline-editor') => {
|
|||
defaultBranch,
|
||||
emptyStateIllustrationPath,
|
||||
helpPaths,
|
||||
includesHelpPagePath,
|
||||
lintHelpPagePath,
|
||||
lintUnavailableHelpPagePath,
|
||||
needsHelpPagePath,
|
||||
|
|
|
@ -57,7 +57,7 @@ export default {
|
|||
|
||||
if (authorParam) {
|
||||
commitsSearchInput.setAttribute('disabled', true);
|
||||
commitsSearchInput.dataset.toggle = 'tooltip';
|
||||
commitsSearchInput.setAttribute('data-toggle', 'tooltip');
|
||||
commitsSearchInput.setAttribute('title', tooltipMessage);
|
||||
this.currentAuthor = authorParam;
|
||||
}
|
||||
|
|
|
@ -119,7 +119,7 @@ function mountAssigneesComponentDeprecated(mediator) {
|
|||
issuableIid: String(iid),
|
||||
projectPath: fullPath,
|
||||
field: el.dataset.field,
|
||||
signedIn: Object.hasOwn(el.dataset, 'signedIn'),
|
||||
signedIn: el.hasAttribute('data-signed-in'),
|
||||
issuableType:
|
||||
isInIssuePage() || isInIncidentPage() || isInDesignPage()
|
||||
? IssuableType.Issue
|
||||
|
@ -149,7 +149,7 @@ function mountAssigneesComponent() {
|
|||
},
|
||||
provide: {
|
||||
canUpdate: editable,
|
||||
directlyInviteMembers: Object.hasOwn(el.dataset, 'directlyInviteMembers'),
|
||||
directlyInviteMembers: el.hasAttribute('data-directly-invite-members'),
|
||||
},
|
||||
render: (createElement) =>
|
||||
createElement('sidebar-assignees-widget', {
|
||||
|
|
|
@ -39,7 +39,7 @@ export default () => {
|
|||
props: {
|
||||
emptyStateImage,
|
||||
projectPath,
|
||||
terraformAdmin: Object.hasOwn(el.dataset, 'terraformAdmin'),
|
||||
terraformAdmin: el.hasAttribute('data-terraform-admin'),
|
||||
},
|
||||
});
|
||||
},
|
||||
|
|
|
@ -33,7 +33,7 @@ export default {
|
|||
this.fetchFreshItems();
|
||||
|
||||
const body = document.querySelector('body');
|
||||
const { namespaceId } = body.dataset;
|
||||
const namespaceId = body.getAttribute('data-namespace-id');
|
||||
|
||||
this.track('click_whats_new_drawer', { label: 'namespace_id', value: namespaceId });
|
||||
},
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
export const STORAGE_KEY = 'display-whats-new-notification';
|
||||
|
||||
export const getVersionDigest = (appEl) => appEl.dataset.versionDigest;
|
||||
export const getVersionDigest = (appEl) => appEl.getAttribute('data-version-digest');
|
||||
|
||||
export const setNotification = (appEl) => {
|
||||
const versionDigest = getVersionDigest(appEl);
|
||||
|
|
|
@ -68,7 +68,7 @@ class Admin::SessionsController < ApplicationController
|
|||
|
||||
def valid_otp_attempt?(user)
|
||||
otp_validation_result =
|
||||
::Users::ValidateOtpService.new(user).execute(user_params[:otp_attempt])
|
||||
::Users::ValidateManualOtpService.new(user).execute(user_params[:otp_attempt])
|
||||
valid_otp_attempt = otp_validation_result[:status] == :success
|
||||
|
||||
return valid_otp_attempt if Gitlab::Database.read_only?
|
||||
|
|
|
@ -44,7 +44,7 @@ class Profiles::TwoFactorAuthsController < Profiles::ApplicationController
|
|||
|
||||
def create
|
||||
otp_validation_result =
|
||||
::Users::ValidateOtpService.new(current_user).execute(params[:pin_code])
|
||||
::Users::ValidateManualOtpService.new(current_user).execute(params[:pin_code])
|
||||
|
||||
if otp_validation_result[:status] == :success
|
||||
ActiveSession.destroy_all_but_current(current_user, session)
|
||||
|
|
|
@ -271,7 +271,7 @@ class SessionsController < Devise::SessionsController
|
|||
|
||||
def valid_otp_attempt?(user)
|
||||
otp_validation_result =
|
||||
::Users::ValidateOtpService.new(user).execute(user_params[:otp_attempt])
|
||||
::Users::ValidateManualOtpService.new(user).execute(user_params[:otp_attempt])
|
||||
return true if otp_validation_result[:status] == :success
|
||||
|
||||
user.invalidate_otp_backup_code!(user_params[:otp_attempt])
|
||||
|
|
|
@ -21,6 +21,7 @@ module Ci
|
|||
"default-branch" => project.default_branch_or_main,
|
||||
"empty-state-illustration-path" => image_path('illustrations/empty-state/empty-dag-md.svg'),
|
||||
"initial-branch-name" => initial_branch,
|
||||
"includes-help-page-path" => help_page_path('ci/yaml/includes'),
|
||||
"lint-help-page-path" => help_page_path('ci/lint', anchor: 'check-cicd-syntax'),
|
||||
"lint-unavailable-help-page-path" => help_page_path('ci/pipeline_editor/index', anchor: 'configuration-validation-currently-not-available-message'),
|
||||
"needs-help-page-path" => help_page_path('ci/yaml/index', anchor: 'needs'),
|
||||
|
|
|
@ -119,7 +119,7 @@ module AlertManagement
|
|||
end
|
||||
end
|
||||
|
||||
def self.find_ongoing_alert(project, fingerprint)
|
||||
def self.find_unresolved_alert(project, fingerprint)
|
||||
for_fingerprint(project, fingerprint).not_resolved.take
|
||||
end
|
||||
|
||||
|
|
|
@ -49,7 +49,8 @@ module Users
|
|||
storage_enforcement_banner_fourth_enforcement_threshold: 46,
|
||||
attention_requests_top_nav: 47,
|
||||
attention_requests_side_nav: 48,
|
||||
minute_limit_banner: 49
|
||||
minute_limit_banner: 49,
|
||||
preview_user_over_limit_free_plan_alert: 50 # EE-only
|
||||
}
|
||||
|
||||
validates :feature_name,
|
||||
|
|
|
@ -162,8 +162,9 @@ module AlertManagement
|
|||
end
|
||||
|
||||
def filter_duplicate
|
||||
# Only need to check if changing to an open status
|
||||
return unless params[:status_event] && AlertManagement::Alert.open_status?(status)
|
||||
# Only need to check if changing to a not-resolved status
|
||||
return if params[:status_event].blank? || params[:status_event] == :resolve
|
||||
return unless alert.resolved?
|
||||
|
||||
param_errors << unresolved_alert_error if duplicate_alert?
|
||||
end
|
||||
|
@ -171,24 +172,23 @@ module AlertManagement
|
|||
def duplicate_alert?
|
||||
return if alert.fingerprint.blank?
|
||||
|
||||
open_alerts.any? && open_alerts.exclude?(alert)
|
||||
unresolved_alert.present?
|
||||
end
|
||||
|
||||
def open_alerts
|
||||
strong_memoize(:open_alerts) do
|
||||
AlertManagement::Alert.for_fingerprint(project, alert.fingerprint).open
|
||||
def unresolved_alert
|
||||
strong_memoize(:unresolved_alert) do
|
||||
AlertManagement::Alert.find_unresolved_alert(project, alert.fingerprint)
|
||||
end
|
||||
end
|
||||
|
||||
def unresolved_alert_error
|
||||
_('An %{link_start}alert%{link_end} with the same fingerprint is already open. ' \
|
||||
'To change the status of this alert, resolve the linked alert.'
|
||||
) % open_alert_url_params
|
||||
) % unresolved_alert_url_params
|
||||
end
|
||||
|
||||
def open_alert_url_params
|
||||
open_alert = open_alerts.first
|
||||
alert_path = Gitlab::Routing.url_helpers.details_project_alert_management_path(project, open_alert)
|
||||
def unresolved_alert_url_params
|
||||
alert_path = Gitlab::Routing.url_helpers.details_project_alert_management_path(project, unresolved_alert)
|
||||
|
||||
{
|
||||
link_start: '<a href="%{url}">'.html_safe % { url: alert_path },
|
||||
|
|
|
@ -104,7 +104,7 @@ module AlertManagement
|
|||
def find_existing_alert
|
||||
return unless incoming_payload.gitlab_fingerprint
|
||||
|
||||
AlertManagement::Alert.find_ongoing_alert(project, incoming_payload.gitlab_fingerprint)
|
||||
AlertManagement::Alert.find_unresolved_alert(project, incoming_payload.gitlab_fingerprint)
|
||||
end
|
||||
|
||||
def build_new_alert
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Users
|
||||
class ValidateOtpService < BaseService
|
||||
class ValidateManualOtpService < BaseService
|
||||
include ::Gitlab::Auth::Otp::Fortinet
|
||||
|
||||
def initialize(current_user)
|
||||
@current_user = current_user
|
||||
@strategy = if forti_authenticator_enabled?(current_user)
|
||||
::Gitlab::Auth::Otp::Strategies::FortiAuthenticator.new(current_user)
|
||||
::Gitlab::Auth::Otp::Strategies::FortiAuthenticator::ManualOtp.new(current_user)
|
||||
elsif forti_token_cloud_enabled?(current_user)
|
||||
::Gitlab::Auth::Otp::Strategies::FortiTokenCloud.new(current_user)
|
||||
else
|
||||
|
@ -19,7 +19,7 @@ module Users
|
|||
strategy.validate(otp_code)
|
||||
rescue StandardError => ex
|
||||
Gitlab::ErrorTracking.log_exception(ex)
|
||||
error(message: ex.message)
|
||||
error(ex.message)
|
||||
end
|
||||
|
||||
private
|
25
app/services/users/validate_push_otp_service.rb
Normal file
25
app/services/users/validate_push_otp_service.rb
Normal file
|
@ -0,0 +1,25 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Users
|
||||
class ValidatePushOtpService < BaseService
|
||||
include ::Gitlab::Auth::Otp::Fortinet
|
||||
|
||||
def initialize(current_user)
|
||||
@current_user = current_user
|
||||
@strategy = if forti_authenticator_enabled?(current_user)
|
||||
::Gitlab::Auth::Otp::Strategies::FortiAuthenticator::PushOtp.new(current_user)
|
||||
end
|
||||
end
|
||||
|
||||
def execute
|
||||
strategy.validate
|
||||
rescue StandardError => ex
|
||||
Gitlab::ErrorTracking.log_exception(ex)
|
||||
error(ex.message)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
attr_reader :strategy
|
||||
end
|
||||
end
|
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
name: namespace_storage_limit_bypass_date_check
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/86794
|
||||
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/361785
|
||||
milestone: '15.0'
|
||||
type: development
|
||||
group: group::utilization
|
||||
default_enabled: false
|
|
@ -6,7 +6,9 @@ product_stage: ''
|
|||
product_group: ''
|
||||
product_category: ''
|
||||
value_type: number
|
||||
status: active
|
||||
status: removed
|
||||
milestone_removed: '15.0'
|
||||
removed_by_url: 'https://gitlab.com/gitlab-org/gitlab/-/merge_requests/87000'
|
||||
milestone: '14.3'
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/69204
|
||||
time_frame: 28d
|
||||
|
|
|
@ -6,7 +6,9 @@ product_stage: ''
|
|||
product_group: ''
|
||||
product_category: ''
|
||||
value_type: number
|
||||
status: active
|
||||
status: removed
|
||||
milestone_removed: '15.0'
|
||||
removed_by_url: 'https://gitlab.com/gitlab-org/gitlab/-/merge_requests/87000'
|
||||
milestone: '14.3'
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/69204
|
||||
time_frame: 28d
|
||||
|
|
|
@ -6,7 +6,9 @@ product_stage: ''
|
|||
product_group: ''
|
||||
product_category: ''
|
||||
value_type: number
|
||||
status: active
|
||||
status: removed
|
||||
milestone_removed: '15.0'
|
||||
removed_by_url: 'https://gitlab.com/gitlab-org/gitlab/-/merge_requests/87000'
|
||||
milestone: '14.3'
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/69204
|
||||
time_frame: 7d
|
||||
|
|
|
@ -6,7 +6,9 @@ product_stage: ''
|
|||
product_group: ''
|
||||
product_category: ''
|
||||
value_type: number
|
||||
status: active
|
||||
status: removed
|
||||
milestone_removed: '15.0'
|
||||
removed_by_url: 'https://gitlab.com/gitlab-org/gitlab/-/merge_requests/87000'
|
||||
milestone: '14.3'
|
||||
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/69204
|
||||
time_frame: 7d
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class ScheduleExpireOAuthTokens < Gitlab::Database::Migration[2.0]
|
||||
MIGRATION = 'ExpireOAuthTokens'
|
||||
INTERVAL = 2.minutes.freeze
|
||||
|
||||
disable_ddl_transaction!
|
||||
|
||||
restrict_gitlab_migration gitlab_schema: :gitlab_main
|
||||
|
||||
def up
|
||||
queue_batched_background_migration(
|
||||
MIGRATION,
|
||||
:oauth_access_tokens,
|
||||
:id,
|
||||
job_interval: INTERVAL
|
||||
)
|
||||
end
|
||||
|
||||
def down
|
||||
Gitlab::Database::BackgroundMigration::BatchedMigration
|
||||
.for_configuration(MIGRATION, :oauth_access_tokens, :id, [])
|
||||
.delete_all
|
||||
end
|
||||
end
|
|
@ -0,0 +1,28 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class UpdateIndexOnAlertsToExcludeNullFingerprints < Gitlab::Database::Migration[2.0]
|
||||
disable_ddl_transaction!
|
||||
|
||||
OLD_INDEX_NAME = 'index_partial_am_alerts_on_project_id_and_fingerprint'
|
||||
NEW_INDEX_NAME = 'index_unresolved_alerts_on_project_id_and_fingerprint'
|
||||
|
||||
def up
|
||||
add_concurrent_index :alert_management_alerts,
|
||||
[:project_id, :fingerprint],
|
||||
where: "fingerprint IS NOT NULL and status <> 2",
|
||||
name: NEW_INDEX_NAME,
|
||||
unique: true
|
||||
|
||||
remove_concurrent_index_by_name :alert_management_alerts, OLD_INDEX_NAME
|
||||
end
|
||||
|
||||
def down
|
||||
add_concurrent_index :alert_management_alerts,
|
||||
[:project_id, :fingerprint],
|
||||
where: "status <> 2",
|
||||
name: OLD_INDEX_NAME,
|
||||
unique: true
|
||||
|
||||
remove_concurrent_index_by_name :alert_management_alerts, NEW_INDEX_NAME
|
||||
end
|
||||
end
|
1
db/schema_migrations/20220428133724
Normal file
1
db/schema_migrations/20220428133724
Normal file
|
@ -0,0 +1 @@
|
|||
9daf43ece9531540c942ade3939bf86f332701af8a9a0e50bd8ef3b90322baa2
|
1
db/schema_migrations/20220505174658
Normal file
1
db/schema_migrations/20220505174658
Normal file
|
@ -0,0 +1 @@
|
|||
57dd9ed105c0380b660e8cc450848b8662bf6a41e47cbfac1bcc061934cbc24c
|
|
@ -28679,8 +28679,6 @@ CREATE INDEX index_pages_domains_on_verified_at_and_enabled_until ON pages_domai
|
|||
|
||||
CREATE INDEX index_pages_domains_on_wildcard ON pages_domains USING btree (wildcard);
|
||||
|
||||
CREATE UNIQUE INDEX index_partial_am_alerts_on_project_id_and_fingerprint ON alert_management_alerts USING btree (project_id, fingerprint) WHERE (status <> 2);
|
||||
|
||||
CREATE INDEX index_partial_ci_builds_on_user_id_name_parser_features ON ci_builds USING btree (user_id, name) WHERE (((type)::text = 'Ci::Build'::text) AND ((name)::text = ANY (ARRAY[('container_scanning'::character varying)::text, ('dast'::character varying)::text, ('dependency_scanning'::character varying)::text, ('license_management'::character varying)::text, ('license_scanning'::character varying)::text, ('sast'::character varying)::text, ('coverage_fuzzing'::character varying)::text, ('secret_detection'::character varying)::text])));
|
||||
|
||||
CREATE INDEX index_pat_on_user_id_and_expires_at ON personal_access_tokens USING btree (user_id, expires_at);
|
||||
|
@ -29367,6 +29365,8 @@ CREATE INDEX index_unit_test_failures_failed_at ON ci_unit_test_failures USING b
|
|||
|
||||
CREATE UNIQUE INDEX index_unit_test_failures_unique_columns ON ci_unit_test_failures USING btree (unit_test_id, failed_at DESC, build_id);
|
||||
|
||||
CREATE UNIQUE INDEX index_unresolved_alerts_on_project_id_and_fingerprint ON alert_management_alerts USING btree (project_id, fingerprint) WHERE ((fingerprint IS NOT NULL) AND (status <> 2));
|
||||
|
||||
CREATE UNIQUE INDEX index_upcoming_reconciliations_on_namespace_id ON upcoming_reconciliations USING btree (namespace_id);
|
||||
|
||||
CREATE INDEX index_upload_states_failed_verification ON upload_states USING btree (verification_retry_at NULLS FIRST) WHERE (verification_state = 3);
|
||||
|
|
|
@ -11,4 +11,4 @@ level: warning
|
|||
scope: raw
|
||||
ignorecase: true
|
||||
raw:
|
||||
- '\w*\(s\)(?<!http\(s\))'
|
||||
- '\b\w+\(s\)(?<!http\(s\))'
|
||||
|
|
|
@ -44,7 +44,7 @@ The following table outlines failure modes and mitigation paths for the product
|
|||
| ----------- | -------------------------- | ----------------------------- | ---------------------------------- | ----- |
|
||||
| Single Gitaly Node | Downtime - Must restore from backup | Downtime - Must restore from Backup | Downtime - Must wait for outage to end | |
|
||||
| Single Gitaly Node + Geo Secondary | Downtime - Must restore from backup, can perform a manual failover to secondary | Downtime - Must restore from Backup, errors could have propagated to secondary | Manual intervention - failover to Geo secondary | |
|
||||
| Sharded Gitaly Install | Partial Downtime - Only repos on impacted node affected, must restore from backup | Partial Downtime - Only repos on impacted node affected, must restore from backup | Downtime - Must wait for outage to end | |
|
||||
| Sharded Gitaly Install + Geo Secondary | Partial Downtime - Only repos on impacted node affected, must restore from backup, could perform manual failover to secondary for impacted repos | Partial Downtime - Only repos on impacted node affected, must restore from backup, errors could have propagated to secondary | Manual intervention - failover to Geo secondary | |
|
||||
| Sharded Gitaly Install | Partial Downtime - Only repositories on impacted node affected, must restore from backup | Partial Downtime - Only repositories on impacted node affected, must restore from backup | Downtime - Must wait for outage to end | |
|
||||
| Sharded Gitaly Install + Geo Secondary | Partial Downtime - Only repositories on impacted node affected, must restore from backup, could perform manual failover to secondary for impacted repositories | Partial Downtime - Only repositories on impacted node affected, must restore from backup, errors could have propagated to secondary | Manual intervention - failover to Geo secondary | |
|
||||
| Gitaly Cluster Install* | No Downtime - will swap repository primary to another node after 10 seconds | N/A - All writes are voted on by multiple Gitaly Cluster nodes | Downtime - Must wait for outage to end | Snapshot backups for Gitaly Cluster nodes not supported at this time |
|
||||
| Gitaly Cluster Install* + Geo Secondary | No Downtime - will swap repository primary to another node after 10 seconds | N/A - All writes are voted on by multiple Gitaly Cluster nodes | Manual intervention - failover to Geo secondary | Snapshot backups for Gitaly Cluster nodes not supported at this time |
|
||||
|
|
|
@ -643,7 +643,7 @@ avoid a split-brain situation where writes can occur in two different GitLab
|
|||
instances, complicating recovery efforts. So to prepare for the failover, you
|
||||
must disable the **primary** site:
|
||||
|
||||
- If you have access to the **primary** Kubernetes cluster, connect to it and disable the GitLab webservice and Sidekiq pods:
|
||||
- If you have access to the **primary** Kubernetes cluster, connect to it and disable the GitLab `webservice` and `Sidekiq` pods:
|
||||
|
||||
```shell
|
||||
kubectl --namespace gitlab scale deploy gitlab-geo-webservice-default --replicas=0
|
||||
|
|
|
@ -119,7 +119,7 @@ NOTE:
|
|||
[NFS](../../nfs.md) can be used in place of Gitaly but is not
|
||||
recommended.
|
||||
|
||||
### Step 2: Configure Postgres streaming replication
|
||||
### Step 2: Configure PostgreSQL streaming replication
|
||||
|
||||
Follow the [Geo database replication instructions](../setup/database.md).
|
||||
|
||||
|
@ -261,7 +261,7 @@ nodes connect to the databases.
|
|||
NOTE:
|
||||
Make sure that current node's IP is listed in
|
||||
`postgresql['md5_auth_cidr_addresses']` setting of the read-replica database to
|
||||
allow Rails on this node to connect to Postgres.
|
||||
allow Rails on this node to connect to PostgreSQL.
|
||||
|
||||
After making these changes [Reconfigure GitLab](../../restart_gitlab.md#omnibus-gitlab-reconfigure) so the changes take effect.
|
||||
|
||||
|
|
|
@ -833,7 +833,7 @@ especially for large repositories.
|
|||
|
||||
Control groups (cgroups) in Linux allow limits to be imposed on how much memory and CPU can be consumed.
|
||||
See the [`cgroups` Linux man page](https://man7.org/linux/man-pages/man7/cgroups.7.html) for more information.
|
||||
cgroups can be useful for protecting the system against resource exhaustion because of overcomsumption of memory and CPU.
|
||||
cgroups can be useful for protecting the system against resource exhaustion because of overconsumption of memory and CPU.
|
||||
|
||||
Gitaly has built-in cgroups control. When configured, Gitaly assigns Git
|
||||
processes to a cgroup based on the repository the Git command is operating in.
|
||||
|
@ -915,7 +915,7 @@ gitaly['cgroups_repositories_cpu_shares'] => 512
|
|||
when Gitaly starts.
|
||||
- `cgroups_memory_bytes` is the total memory limit that is imposed collectively on all
|
||||
Git processes that Gitaly spawns. 0 implies no limit.
|
||||
- `cgroups_cpu_shares` is the cpu limit that is imposed collectively on all Git
|
||||
- `cgroups_cpu_shares` is the CPU limit that is imposed collectively on all Git
|
||||
processes that Gitaly spawns. 0 implies no limit. The maximum is 1024 shares,
|
||||
which represents 100% of CPU.
|
||||
- `cgroups_repositories_count` is the number of cgroups in the cgroups pool. Each time a new Git
|
||||
|
|
|
@ -14,7 +14,7 @@ These packages are produced by the GitLab CI process, as found in the [Omnibus
|
|||
|
||||
## GnuPG Public Keys
|
||||
|
||||
All packages are signed with [GnuPG](https://www.gnupg.org/), in a method appropriate for their format. The key used to sign these packages can be found on [pgp.mit.edu](https://pgp.mit.edu) at [0x3cfcf9baf27eab47](https://pgp.mit.edu/pks/lookup?op=vindex&search=0x3CFCF9BAF27EAB47)
|
||||
All packages are signed with [GnuPG](https://www.gnupg.org/), in a method appropriate for their format. The key used to sign these packages can be found on [MIT PGP Public Key Server](https://pgp.mit.edu) at [0x3cfcf9baf27eab47](https://pgp.mit.edu/pks/lookup?op=vindex&search=0x3CFCF9BAF27EAB47)
|
||||
|
||||
## Verifying Signatures
|
||||
|
||||
|
|
|
@ -219,12 +219,12 @@ the database. Each of the listed services below use the following formula to def
|
|||
|
||||
- `puma` : `max_threads + headroom` (default `14`)
|
||||
- `max_threads` is configured via: `gitlab['puma']['max_threads']` (default: `4`)
|
||||
- `headroom` can be configured via `DB_POOL_HEADROOM` env variable (default to `10`)
|
||||
- `headroom` can be configured via `DB_POOL_HEADROOM` environment variable (default to `10`)
|
||||
- `sidekiq` : `max_concurrency + 1 + headroom` (default: `61`)
|
||||
- `max_concurrency` is configured via: `sidekiq['max_concurrency']` (default: `50`)
|
||||
- `headroom` can be configured via `DB_POOL_HEADROOM` env variable (default to `10`)
|
||||
- `headroom` can be configured via `DB_POOL_HEADROOM` environment variable (default to `10`)
|
||||
- `geo-logcursor`: `1+headroom` (default: `11`)
|
||||
- `headroom` can be configured via `DB_POOL_HEADROOM` env variable (default to `10`)
|
||||
- `headroom` can be configured via `DB_POOL_HEADROOM` environment variable (default to `10`)
|
||||
|
||||
To calculate the `default_pool_size`, multiply the number of instances of `puma`, `sidekiq` and `geo-logcursor` by the
|
||||
number of connections each can consume as per listed above. The total will be the suggested `default_pool_size`.
|
||||
|
|
|
@ -2311,7 +2311,7 @@ future with further specific cloud provider details.
|
|||
|
||||
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
|
||||
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
|
||||
- Nodes configuration is shown as it is forced to ensure pod vcpu / memory ratios and avoid scaling during **performance testing**.
|
||||
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
|
||||
- In production deployments, there is no need to assign pods to nodes. A minimum of three nodes in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
|
||||
|
||||
Next are the backend components that run on static compute VMs via Omnibus (or External PaaS
|
||||
|
|
|
@ -62,7 +62,7 @@ monitor .[#7FFFD4,norank]--> redis
|
|||
@enduml
|
||||
```
|
||||
|
||||
The diagram above shows that while GitLab can be installed on a single server, it is internally composed of multiple services. As a GitLab instance is scaled, each of these services are broken out and independently scaled according to the demands placed on them. In some cases PaaS can be leveraged for some services (e.g. Cloud Object Storage for some file systems). For the sake of redundancy some of the services become clusters of nodes storing the same data. In a horizontal configuration of GitLab there are various ancillary services required to coordinate clusters or discover of resources (e.g. PgBouncer for Postgres connection management, Consul for Prometheus end point discovery).
|
||||
The diagram above shows that while GitLab can be installed on a single server, it is internally composed of multiple services. As a GitLab instance is scaled, each of these services are broken out and independently scaled according to the demands placed on them. In some cases PaaS can be leveraged for some services (e.g. Cloud Object Storage for some file systems). For the sake of redundancy some of the services become clusters of nodes storing the same data. In a horizontal configuration of GitLab there are various ancillary services required to coordinate clusters or discover of resources (e.g. PgBouncer for PostgreSQL connection management, Consul for Prometheus end point discovery).
|
||||
|
||||
## Requirements
|
||||
|
||||
|
|
|
@ -2309,7 +2309,7 @@ future with further specific cloud provider details.
|
|||
|
||||
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
|
||||
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
|
||||
- Nodes configuration is shown as it is forced to ensure pod vcpu / memory ratios and avoid scaling during **performance testing**.
|
||||
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
|
||||
- In production deployments, there is no need to assign pods to nodes. A minimum of three nodes in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
|
||||
|
||||
Next are the backend components that run on static compute VMs via Omnibus (or External PaaS
|
||||
|
|
|
@ -1014,7 +1014,7 @@ future with further specific cloud provider details.
|
|||
|
||||
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
|
||||
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
|
||||
- Nodes configuration is shown as it is forced to ensure pod vcpu / memory ratios and avoid scaling during **performance testing**.
|
||||
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
|
||||
- In production deployments, there is no need to assign pods to nodes. A minimum of three nodes in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
|
||||
|
||||
Next are the backend components that run on static compute VMs via Omnibus (or External PaaS
|
||||
|
|
|
@ -2269,7 +2269,7 @@ future with further specific cloud provider details.
|
|||
|
||||
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
|
||||
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
|
||||
- Nodes configuration is shown as it is forced to ensure pod vcpu / memory ratios and avoid scaling during **performance testing**.
|
||||
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
|
||||
- In production deployments, there is no need to assign pods to nodes. A minimum of three nodes in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
|
||||
|
||||
Next are the backend components that run on static compute VMs via Omnibus (or External PaaS
|
||||
|
|
|
@ -2325,7 +2325,7 @@ future with further specific cloud provider details.
|
|||
|
||||
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
|
||||
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
|
||||
- Nodes configuration is shown as it is forced to ensure pod vcpu / memory ratios and avoid scaling during **performance testing**.
|
||||
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
|
||||
- In production deployments, there is no need to assign pods to nodes. A minimum of three nodes in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
|
||||
|
||||
Next are the backend components that run on static compute VMs via Omnibus (or External PaaS
|
||||
|
|
|
@ -2244,7 +2244,7 @@ future with further specific cloud provider details.
|
|||
|
||||
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
|
||||
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
|
||||
- Nodes configuration is shown as it is forced to ensure pod vcpu / memory ratios and avoid scaling during **performance testing**.
|
||||
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
|
||||
- In production deployments, there is no need to assign pods to nodes. A minimum of three nodes in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
|
||||
|
||||
Next are the backend components that run on static compute VMs via Omnibus (or External PaaS
|
||||
|
|
|
@ -19322,6 +19322,7 @@ Name of the feature that the callout is for.
|
|||
| <a id="usercalloutfeaturenameenumpersonal_access_token_expiry"></a>`PERSONAL_ACCESS_TOKEN_EXPIRY` | Callout feature name for personal_access_token_expiry. |
|
||||
| <a id="usercalloutfeaturenameenumpipeline_needs_banner"></a>`PIPELINE_NEEDS_BANNER` | Callout feature name for pipeline_needs_banner. |
|
||||
| <a id="usercalloutfeaturenameenumpipeline_needs_hover_tip"></a>`PIPELINE_NEEDS_HOVER_TIP` | Callout feature name for pipeline_needs_hover_tip. |
|
||||
| <a id="usercalloutfeaturenameenumpreview_user_over_limit_free_plan_alert"></a>`PREVIEW_USER_OVER_LIMIT_FREE_PLAN_ALERT` | Callout feature name for preview_user_over_limit_free_plan_alert. |
|
||||
| <a id="usercalloutfeaturenameenumprofile_personal_access_token_expiry"></a>`PROFILE_PERSONAL_ACCESS_TOKEN_EXPIRY` | Callout feature name for profile_personal_access_token_expiry. |
|
||||
| <a id="usercalloutfeaturenameenumregistration_enabled_callout"></a>`REGISTRATION_ENABLED_CALLOUT` | Callout feature name for registration_enabled_callout. |
|
||||
| <a id="usercalloutfeaturenameenumsecurity_configuration_devops_alert"></a>`SECURITY_CONFIGURATION_DEVOPS_ALERT` | Callout feature name for security_configuration_devops_alert. |
|
||||
|
|
|
@ -68,7 +68,7 @@ GET /projects/:id/wikis/:slug
|
|||
| Attribute | Type | Required | Description |
|
||||
| --------- | ------- | -------- | --------------------- |
|
||||
| `id` | integer/string | yes | The ID or [URL-encoded path of the project](index.md#namespaced-path-encoding) |
|
||||
| `slug` | string | yes | URLencoded slug (a unique string) of the wiki page, such as `dir%2Fpage_name` |
|
||||
| `slug` | string | yes | URL encoded slug (a unique string) of the wiki page, such as `dir%2Fpage_name` |
|
||||
| `render_html` | boolean | no | Return the rendered HTML of the wiki page |
|
||||
| `version` | string | no | Wiki page version sha |
|
||||
|
||||
|
|
|
@ -100,7 +100,7 @@ The short-term goal is detailed in [this epic](https://gitlab.com/groups/gitlab-
|
|||
|
||||
### Mid-term - Improved feedback, query testing and background migration testing
|
||||
|
||||
Mid-term, we plan to expand the level of detail the testing pipeline reports back to the merge requet and expand its scope to cover query testing, too. By doing so, we use our experience from database code reviews and using thin-clone technology and bring this back closer to the GitLab workflow. Instead of reaching out to different tools (`postgres.ai`, `joe`, Slack, plan visualizations, and so on) we bring this back to GitLab and working directly on the merge request.
|
||||
Mid-term, we plan to expand the level of detail the testing pipeline reports back to the merge request and expand its scope to cover query testing, too. By doing so, we use our experience from database code reviews and using thin-clone technology and bring this back closer to the GitLab workflow. Instead of reaching out to different tools (`postgres.ai`, `joe`, Slack, plan visualizations, and so on) we bring this back to GitLab and working directly on the merge request.
|
||||
|
||||
Secondly, we plan to cover background migrations testing, too. These are typically data migrations that are scheduled to run over a long period of time. The success of both the scheduling phase and the job execution phase typically depends a lot on data distribution - which only surfaces when running these migrations on actual production data. In order to become confident about a background migration, we plan to provide the following feedback:
|
||||
|
||||
|
|
|
@ -83,4 +83,4 @@ You can check documentation for your specific OS to learn how to find and displa
|
|||
hidden files.
|
||||
|
||||
If there is no `.gitmodules` file, it's possible the submodule settings are in a
|
||||
[gitconfig](https://www.atlassian.com/git/tutorials/setting-up-a-repository/git-config) file.
|
||||
[`git config`](https://www.atlassian.com/git/tutorials/setting-up-a-repository/git-config) file.
|
||||
|
|
|
@ -19,7 +19,7 @@ If you are migrating from another CI/CD tool, view this documentation:
|
|||
- [Migrate from CircleCI](../migration/circleci.md).
|
||||
- [Migrate from Jenkins](../migration/jenkins.md).
|
||||
|
||||
> - <i class="fa fa-youtube-play youtube" aria-hidden="true"></i> Watch [First time GitLab & CI/CD](https://www.youtube.com/watch?v=kTNfi5z6Uvk&t=553s). This includes a quick introduction to GitLab, the first steps with CI/CD, building a Go project, running tests, using the CI/CD pipeline editor, detecting secrets and security vulnerabilities and offers more exercises for async practice.
|
||||
> - <i class="fa fa-youtube-play youtube" aria-hidden="true"></i> Watch [First time GitLab & CI/CD](https://www.youtube.com/watch?v=kTNfi5z6Uvk&t=553s). This includes a quick introduction to GitLab, the first steps with CI/CD, building a Go project, running tests, using the CI/CD pipeline editor, detecting secrets and security vulnerabilities and offers more exercises for asynchronous practice.
|
||||
> - <i class="fa fa-youtube-play youtube" aria-hidden="true"></i> Watch [Intro to GitLab CI](https://www.youtube.com/watch?v=l5705U8s_nQ&t=358s). This workshop uses the Web IDE to quickly get going with building source code using CI/CD, and run unit tests.
|
||||
|
||||
## CI/CD process overview
|
||||
|
|
|
@ -43,7 +43,7 @@ The DDL migrations are all migrations that:
|
|||
1. Add or remove a column with or without a default value (for example, `add_column`).
|
||||
1. Create or drop trigger functions (for example, `create_trigger_function`).
|
||||
1. Attach or detach triggers from tables (for example, `track_record_deletions`, `untrack_record_deletions`).
|
||||
1. Prepare or not async indexes (for example, `prepare_async_index`, `unprepare_async_index_by_name`).
|
||||
1. Prepare or not asynchronous indexes (for example, `prepare_async_index`, `unprepare_async_index_by_name`).
|
||||
|
||||
As such DDL migrations **CANNOT**:
|
||||
|
||||
|
@ -159,7 +159,7 @@ end
|
|||
|
||||
### The special purpose of `gitlab_shared`
|
||||
|
||||
As described in [gitlab_schema](multiple_databases.md#the-special-purpose-of-gitlab_shared),
|
||||
As described in [`gitlab_schema`](multiple_databases.md#the-special-purpose-of-gitlab_shared),
|
||||
the `gitlab_shared` tables are allowed to contain data across all databases. This implies
|
||||
that such migrations should run across all databases to modify structure (DDL) or modify data (DML).
|
||||
|
||||
|
|
|
@ -526,7 +526,7 @@ ci_build.update!(updated_at: Time.current) # CI DB
|
|||
ci_build.project.update!(updated_at: Time.current) # Main DB
|
||||
```
|
||||
|
||||
##### Async processing
|
||||
##### Asynchronous processing
|
||||
|
||||
If we need more guarantee that an operation finishes the work consistently we can execute it
|
||||
within a background job. A background job is scheduled asynchronously and retried several times
|
||||
|
|
|
@ -25,7 +25,7 @@ When there is a case on GitLab.com (SaaS) that necessitates turning off all expe
|
|||
|
||||
You can toggle experiments on SaaS on and off using the `gitlab_experiment` [feature flag](../feature_flags).
|
||||
|
||||
This can be done via chatops:
|
||||
This can be done via ChatOps:
|
||||
|
||||
- [disable](../feature_flags/controls.md#disabling-feature-flags): `/chatops run feature set gitlab_experiment false`
|
||||
- [enable](../feature_flags/controls.md#process): `/chatops run feature delete gitlab_experiment`
|
||||
|
|
|
@ -8,7 +8,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
|
|||
|
||||
The migration from Vue 2 to 3 is tracked in epic [&6252](https://gitlab.com/groups/gitlab-org/-/epics/6252).
|
||||
|
||||
To ease migration to Vue 3.x, we have added [eslint rules](https://gitlab.com/gitlab-org/frontend/eslint-plugin/-/merge_requests/50)
|
||||
To ease migration to Vue 3.x, we have added [ESLint rules](https://gitlab.com/gitlab-org/frontend/eslint-plugin/-/merge_requests/50)
|
||||
that prevent us from using the following deprecated features in the codebase.
|
||||
|
||||
## Vue filters
|
||||
|
|
|
@ -505,7 +505,7 @@ end
|
|||
### When to use the helper method
|
||||
|
||||
You can **only** use the `with_lock_retries` helper method when the execution is not already inside
|
||||
an open transaction (using Postgres subtransactions is discouraged). It can be used with
|
||||
an open transaction (using PostgreSQL subtransactions is discouraged). It can be used with
|
||||
standard Rails migration helper methods. Calling more than one migration
|
||||
helper is not a problem if they're executed on the same table.
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ In summary:
|
|||
|
||||
To identify the minimal set of tests needed, we use the [`test_file_finder` gem](https://gitlab.com/gitlab-org/ci-cd/test_file_finder), with two strategies:
|
||||
|
||||
- dynamic mapping from test coverage tracing (generated via the [Crystalball gem](https://github.com/toptal/crystalball))
|
||||
- dynamic mapping from test coverage tracing (generated via the [`Crystalball` gem](https://github.com/toptal/crystalball))
|
||||
([see where it's used](https://gitlab.com/gitlab-org/gitlab/-/blob/47d507c93779675d73a05002e2ec9c3c467cd698/tooling/bin/find_tests#L15))
|
||||
- static mapping maintained in the [`tests.yml` file](https://gitlab.com/gitlab-org/gitlab/-/blob/master/tests.yml) for special cases that cannot
|
||||
be mapped via coverage tracing ([see where it's used](https://gitlab.com/gitlab-org/gitlab/-/blob/47d507c93779675d73a05002e2ec9c3c467cd698/tooling/bin/find_tests#L12))
|
||||
|
|
|
@ -16,8 +16,8 @@ A hand-raise PQL is a user who requests to speak to sales from within the produc
|
|||
1. Set up CustomersDot to talk to a staging instance of Platypus.
|
||||
|
||||
1. Set up CustomersDot using the [normal install instructions](https://gitlab.com/gitlab-org/customers-gitlab-com/-/blob/staging/doc/setup/installation_steps.md).
|
||||
1. Set the `CUSTOMER_PORTAL_URL` env var to your local (or ngrok) URL of your CustomersDot instance.
|
||||
1. Place `export CUSTOMER_PORTAL_URL='https://XXX.ngrok.io/'` in your shell rc script (~/.zshrc or ~/.bash_profile or ~/.bashrc) and restart GDK.
|
||||
1. Set the `CUSTOMER_PORTAL_URL` environment variable to your local (or ngrok) URL of your CustomersDot instance.
|
||||
1. Place `export CUSTOMER_PORTAL_URL='https://XXX.ngrok.io/'` in your shell rc script (`~/.zshrc` or `~/.bash_profile` or `~/.bashrc`) and restart GDK.
|
||||
1. Enter the credentials on CustomersDot development to Platypus in your `/config/secrets.yml` and restart. Credentials for the Platypus Staging are in the 1Password Growth vault. The URL for staging is `https://staging.ci.nexus.gitlabenvironment.cloud`.
|
||||
|
||||
```yaml
|
||||
|
|
|
@ -660,7 +660,7 @@ cfg := &tls.Config{
|
|||
}
|
||||
```
|
||||
|
||||
For **Ruby**, you can use [HTTParty](https://github.com/jnunemaker/httparty) and specify TLS 1.3 version as well as ciphers:
|
||||
For **Ruby**, you can use [`HTTParty`](https://github.com/jnunemaker/httparty) and specify TLS 1.3 version as well as ciphers:
|
||||
|
||||
Whenever possible this example should be **avoided** for security purposes:
|
||||
|
||||
|
@ -705,7 +705,7 @@ tls.Config{
|
|||
|
||||
This example was taken [here](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/blob/871b52dc700f1a66f6644fbb1e78a6d463a6ff83/internal/tool/tlstool/tlstool.go#L72).
|
||||
|
||||
For **Ruby**, you can use again [HTTParty](https://github.com/jnunemaker/httparty) and specify this time TLS 1.2 version alongside with the recommended ciphers:
|
||||
For **Ruby**, you can use again [`HTTParty`](https://github.com/jnunemaker/httparty) and specify this time TLS 1.2 version alongside with the recommended ciphers:
|
||||
|
||||
```ruby
|
||||
response = GitLab::HTTP.perform_request(Net::HTTP::Get, 'https://gitlab.com', ssl_version: :TLSv1_2, ciphers: ['ECDHE-ECDSA-AES128-GCM-SHA256', 'ECDHE-RSA-AES128-GCM-SHA256', 'ECDHE-ECDSA-AES256-GCM-SHA384', 'ECDHE-RSA-AES256-GCM-SHA384', 'ECDHE-ECDSA-CHACHA20-POLY1305', 'ECDHE-RSA-CHACHA20-POLY1305'])
|
||||
|
@ -873,7 +873,7 @@ If a vulnerable application extracts an archive file with any of these file name
|
|||
|
||||
#### Ruby
|
||||
|
||||
For zip files, the [rubyzip](https://rubygems.org/gems/rubyzip) Ruby gem is already patched against the Zip Slip vulnerability and will refuse to extract files that try to perform directory traversal, so for this vulnerable example we will extract a `tar.gz` file with `Gem::Package::TarReader`:
|
||||
For zip files, the [`rubyzip`](https://rubygems.org/gems/rubyzip) Ruby gem is already patched against the Zip Slip vulnerability and will refuse to extract files that try to perform directory traversal, so for this vulnerable example we will extract a `tar.gz` file with `Gem::Package::TarReader`:
|
||||
|
||||
```ruby
|
||||
# Vulnerable tar.gz extraction example!
|
||||
|
@ -1072,7 +1072,7 @@ Symlink attacks makes it possible for an attacker to read the contents of arbitr
|
|||
|
||||
#### Ruby
|
||||
|
||||
For zip files, the [rubyzip](https://rubygems.org/gems/rubyzip) Ruby gem is already patched against symlink attacks as it simply ignores symbolic links, so for this vulnerable example we will extract a `tar.gz` file with `Gem::Package::TarReader`:
|
||||
For zip files, the [`rubyzip`](https://rubygems.org/gems/rubyzip) Ruby gem is already patched against symlink attacks as it simply ignores symbolic links, so for this vulnerable example we will extract a `tar.gz` file with `Gem::Package::TarReader`:
|
||||
|
||||
```ruby
|
||||
# Vulnerable tar.gz extraction example!
|
||||
|
|
|
@ -36,7 +36,7 @@ as base:
|
|||
|
||||
_\* Undergoes a pseudonymization process at the collector level._
|
||||
|
||||
These properties [are overriden](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/assets/javascripts/tracking/get_standard_context.js)
|
||||
These properties [are overridden](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/assets/javascripts/tracking/get_standard_context.js)
|
||||
with frontend-specific values, like `source` (`gitlab-javascript`), `google_analytics_id`
|
||||
and the custom `extra` object. You can modify this object for any subsequent
|
||||
structured event that fires, although this is not recommended.
|
||||
|
@ -83,7 +83,7 @@ The following example shows `data-track-*` attributes assigned to a button:
|
|||
| `data-track-action` | true | Action the user is taking. Clicks must be prepended with `click` and activations must be prepended with `activate`. For example, focusing a form field is `activate_form_input` and clicking a button is `click_button`. Replaces `data-track-event`, which was [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/290962) in GitLab 13.11. |
|
||||
| `data-track-label` | false | The specific element or object to act on. This can be: the label of the element, for example, a tab labeled 'Create from template' for `create_from_template`; a unique identifier if no text is available, for example, `groups_dropdown_close` for closing the Groups dropdown in the top bar; or the name or title attribute of a record being created. |
|
||||
| `data-track-property` | false | Any additional property of the element, or object being acted on. |
|
||||
| `data-track-value` | false | Describes a numeric value (decimal) directly related to the event. This could be the value of an input. For example, `10` when clicking `internal` visibility. If omitted, this is the element's `value` property or `undefined`. For checkboxes, the default value is the element's checked attribute or `0` when unchecked. The value is parsed as numeric before sendind the event. |
|
||||
| `data-track-value` | false | Describes a numeric value (decimal) directly related to the event. This could be the value of an input. For example, `10` when clicking `internal` visibility. If omitted, this is the element's `value` property or `undefined`. For checkboxes, the default value is the element's checked attribute or `0` when unchecked. The value is parsed as numeric before sending the event. |
|
||||
| `data-track-extra` | false | A key-value pair object passed as a valid JSON string. This attribute is added to the `extra` property in our [`gitlab_standard`](schemas.md#gitlab_standard) schema. |
|
||||
| `data-track-context` | false | To append a custom context object, passed as a valid JSON string. |
|
||||
|
||||
|
|
|
@ -153,7 +153,7 @@ On Demand pricing is used in this table for comparisons, but should not be used
|
|||
| Supporting services such as NGINX, Prometheus, etc | 2 vCPU, 8 GB | | |
|
||||
| **GitLab Ref Arch Raw Total K8s Node Capacity** | 16 vCPU, 32 GB | | |
|
||||
| One Node for Overhead and Miscellaneous (EKS Cluster AutoScaler, Grafana, Prometheus, etc) | + 8 vCPU, 16GB | | |
|
||||
| **Grand Total w/ Overheads**<br />Minimum hosts = 3 | 24 vCPU, 48 GB | **c5.2xlarge** <br />(8vcpu/16GB) x 3 nodes<br />24 vCPU, 48 GB | $1.02/hr |
|
||||
| **Grand Total w/ Overheads**<br />Minimum hosts = 3 | 24 vCPU, 48 GB | **c5.2xlarge** <br />(8vCPU/16GB) x 3 nodes<br />24 vCPU, 48 GB | $1.02/hr |
|
||||
| **Idle Configuration (Scaled-In)** | 16 vCPU, 32 GB | **c5.2xlarge** x 2 | $0.68/hr |
|
||||
|
||||
NOTE:
|
||||
|
@ -206,7 +206,7 @@ On Demand pricing is used in this table for comparisons, but should not be used
|
|||
| Supporting services such as NGINX, Prometheus, etc | [2 allocations](../../administration/reference_architectures/3k_users.md#cluster-topology) x ([2 vCPU and 7.5 GB](../../administration/reference_architectures/3k_users.md#cluster-topology)) = <br />4 vCPU, 15 GB | | |
|
||||
| **GitLab Ref Arch Raw Total K8s Node Capacity** | 32 vCPU, 56 GB | | |
|
||||
| One Node for Overhead and Miscellaneous (EKS Cluster AutoScaler, Grafana, Prometheus, etc) | + 16 vCPU, 32GB | | |
|
||||
| **Grand Total w/ Overheads Full Scale**<br />Minimum hosts = 3 | 48 vCPU, 88 GB | **c5.2xlarge** (8vcpu/16GB) x 5 nodes<br />40 vCPU, 80 GB<br />[Full Fixed Scale GPT Test Results](https://gitlab.com/guided-explorations/aws/implementation-patterns/gitlab-cloud-native-hybrid-on-eks/-/blob/master/gitlab-alliances-testing/3K/3k-QuickStart-ARM-RDS-Cache_v13-12-3-ee_2021-07-23_124216/3k-QuickStart-ARM-RDS-Cache_v13-12-3-ee_2021-07-23_124216_results.txt) | $1.70/hr |
|
||||
| **Grand Total w/ Overheads Full Scale**<br />Minimum hosts = 3 | 48 vCPU, 88 GB | **c5.2xlarge** (8vCPU/16GB) x 5 nodes<br />40 vCPU, 80 GB<br />[Full Fixed Scale GPT Test Results](https://gitlab.com/guided-explorations/aws/implementation-patterns/gitlab-cloud-native-hybrid-on-eks/-/blob/master/gitlab-alliances-testing/3K/3k-QuickStart-ARM-RDS-Cache_v13-12-3-ee_2021-07-23_124216/3k-QuickStart-ARM-RDS-Cache_v13-12-3-ee_2021-07-23_124216_results.txt) | $1.70/hr |
|
||||
| **Possible Idle Configuration (Scaled-In 75% - round up)**<br />Pod autoscaling must be also adjusted to enable lower idling configuration. | 24 vCPU, 48 GB | c5.2xlarge x 4 | $1.36/hr |
|
||||
|
||||
Other combinations of node type and quantity can be used to meet the Grand Total. Due to the properties of pods, hosts that are overly small may have significant unused capacity.
|
||||
|
@ -260,10 +260,10 @@ On Demand pricing is used in this table for comparisons, but should not be used
|
|||
| Supporting services such as NGINX, Prometheus, etc | [2 allocations](../../administration/reference_architectures/5k_users.md#cluster-topology) x ([2 vCPU and 7.5 GB](../../administration/reference_architectures/5k_users.md#cluster-topology)) = <br />4 vCPU, 15 GB | | |
|
||||
| **GitLab Ref Arch Raw Total K8s Node Capacity** | 62 vCPU, 96.5 GB | | |
|
||||
| One Node for Quick Start Overhead and Miscellaneous (EKS Cluster AutoScaler, Grafana, Prometheus, etc) | + 8 vCPU, 16GB | | |
|
||||
| **Grand Total w/ Overheads Full Scale**<br />Minimum hosts = 3 | 70 vCPU, 112.5 GB | **c5.2xlarge** (8vcpu/16GB) x 9 nodes<br />72 vCPU, 144 GB<br />[Full Fixed Scale GPT Test Results](https://gitlab.com/guided-explorations/aws/implementation-patterns/gitlab-cloud-native-hybrid-on-eks/-/blob/master/gitlab-alliances-testing/5K/5k-QuickStart-ARM-RDS-Redis_v13-12-3-ee_2021-07-23_140128/5k-QuickStart-ARM-RDS-Redis_v13-12-3-ee_2021-07-23_140128_results.txt) | $2.38/hr |
|
||||
| **Grand Total w/ Overheads Full Scale**<br />Minimum hosts = 3 | 70 vCPU, 112.5 GB | **c5.2xlarge** (8vCPU/16GB) x 9 nodes<br />72 vCPU, 144 GB<br />[Full Fixed Scale GPT Test Results](https://gitlab.com/guided-explorations/aws/implementation-patterns/gitlab-cloud-native-hybrid-on-eks/-/blob/master/gitlab-alliances-testing/5K/5k-QuickStart-ARM-RDS-Redis_v13-12-3-ee_2021-07-23_140128/5k-QuickStart-ARM-RDS-Redis_v13-12-3-ee_2021-07-23_140128_results.txt) | $2.38/hr |
|
||||
| **Possible Idle Configuration (Scaled-In 75% - round up)**<br />Pod autoscaling must be also adjusted to enable lower idling configuration. | 24 vCPU, 48 GB | c5.2xlarge x 7 | $1.85/hr |
|
||||
|
||||
Other combinations of node type and quantity can be used to meet the Grand Total. Due to the cpu and memory requirements of pods, hosts that are overly small may have significant unused capacity.
|
||||
Other combinations of node type and quantity can be used to meet the Grand Total. Due to the CPU and memory requirements of pods, hosts that are overly small may have significant unused capacity.
|
||||
|
||||
NOTE:
|
||||
If EKS node autoscaling is employed, it is likely that your average loading will run lower than this, especially during non-working hours and weekends.
|
||||
|
@ -313,10 +313,10 @@ On Demand pricing is used in this table for comparisons, but should not be used
|
|||
| Supporting services such as NGINX, Prometheus, etc | [2 allocations](../../administration/reference_architectures/10k_users.md#cluster-topology) x ([2 vCPU and 7.5 GB](../../administration/reference_architectures/10k_users.md#cluster-topology))<br />4 vCPU, 15 GB | | |
|
||||
| **GitLab Ref Arch Raw Total K8s Node Capacity** | 128 vCPU, 158 GB | | |
|
||||
| One Node for Overhead and Miscellaneous (EKS Cluster AutoScaler, Grafana, Prometheus, etc) | + 16 vCPU, 32GB | | |
|
||||
| **Grand Total w/ Overheads Fully Scaled**<br />Minimum hosts = 3 | 142 vCPU, 190 GB | **c5.4xlarge** (16vcpu/32GB) x 9 nodes<br />144 vCPU, 288GB<br /><br />[Full Fixed Scale GPT Test Results](https://gitlab.com/guided-explorations/aws/implementation-patterns/gitlab-cloud-native-hybrid-on-eks/-/blob/master/gitlab-alliances-testing/10K/GL-CloudNative-10k-RDS-Graviton_v13-12-3-ee_2021-07-08_194647/GL-CloudNative-10k-RDS-Graviton_v13-12-3-ee_2021-07-08_194647_results.txt) | $6.12/hr |
|
||||
| **Grand Total w/ Overheads Fully Scaled**<br />Minimum hosts = 3 | 142 vCPU, 190 GB | **c5.4xlarge** (16vCPU/32GB) x 9 nodes<br />144 vCPU, 288GB<br /><br />[Full Fixed Scale GPT Test Results](https://gitlab.com/guided-explorations/aws/implementation-patterns/gitlab-cloud-native-hybrid-on-eks/-/blob/master/gitlab-alliances-testing/10K/GL-CloudNative-10k-RDS-Graviton_v13-12-3-ee_2021-07-08_194647/GL-CloudNative-10k-RDS-Graviton_v13-12-3-ee_2021-07-08_194647_results.txt) | $6.12/hr |
|
||||
| **Possible Idle Configuration (Scaled-In 75% - round up)**<br />Pod autoscaling must be also adjusted to enable lower idling configuration. | 40 vCPU, 80 GB | c5.4xlarge x 7<br /><br />[Elastic Auto Scale GPT Test Results](https://gitlab.com/guided-explorations/aws/implementation-patterns/gitlab-cloud-native-hybrid-on-eks/-/blob/master/gitlab-alliances-testing/10K/GL-CloudNative-10k-AutoScaling-Test_v13-12-3-ee_2021-07-09_115139/GL-CloudNative-10k-AutoScaling-Test_v13-12-3-ee_2021-07-09_115139_results.txt) | $4.76/hr |
|
||||
|
||||
Other combinations of node type and quantity can be used to meet the Grand Total. Due to the cpu and memory requirements of pods, hosts that are overly small may have significant unused capacity.
|
||||
Other combinations of node type and quantity can be used to meet the Grand Total. Due to the CPU and memory requirements of pods, hosts that are overly small may have significant unused capacity.
|
||||
|
||||
NOTE:
|
||||
If EKS node autoscaling is employed, it is likely that your average loading will run lower than this, especially during non-working hours and weekends.
|
||||
|
@ -366,10 +366,10 @@ On Demand pricing is used in this table for comparisons, but should not be used
|
|||
| Supporting services such as NGINX, Prometheus, etc | [2 allocations](../../administration/reference_architectures/10k_users.md#cluster-topology) x ([2 vCPU and 7.5 GB](../../administration/reference_architectures/10k_users.md#cluster-topology))<br />4 vCPU, 15 GB | | |
|
||||
| **GitLab Ref Arch Raw Total K8s Node Capacity** | 428 vCPU, 533 GB | | |
|
||||
| One Node for Overhead and Miscellaneous (EKS Cluster AutoScaler, Grafana, Prometheus, etc) | + 16 vCPU, 32GB | | |
|
||||
| **Grand Total w/ Overheads Fully Scaled**<br />Minimum hosts = 3 | 444 vCPU, 565 GB | **c5.4xlarge** (16vcpu/32GB) x 28 nodes<br />448 vCPU, 896GB<br /><br />[Full Fixed Scale GPT Test Results](https://gitlab.com/guided-explorations/aws/implementation-patterns/gitlab-cloud-native-hybrid-on-eks/-/blob/master/gitlab-alliances-testing/50K/50k-Fixed-Scale-Test_v13-12-3-ee_2021-08-13_172819/50k-Fixed-Scale-Test_v13-12-3-ee_2021-08-13_172819_results.txt) | $19.04/hr |
|
||||
| **Grand Total w/ Overheads Fully Scaled**<br />Minimum hosts = 3 | 444 vCPU, 565 GB | **c5.4xlarge** (16vCPU/32GB) x 28 nodes<br />448 vCPU, 896GB<br /><br />[Full Fixed Scale GPT Test Results](https://gitlab.com/guided-explorations/aws/implementation-patterns/gitlab-cloud-native-hybrid-on-eks/-/blob/master/gitlab-alliances-testing/50K/50k-Fixed-Scale-Test_v13-12-3-ee_2021-08-13_172819/50k-Fixed-Scale-Test_v13-12-3-ee_2021-08-13_172819_results.txt) | $19.04/hr |
|
||||
| **Possible Idle Configuration (Scaled-In 75% - round up)**<br />Pod autoscaling must be also adjusted to enable lower idling configuration. | 40 vCPU, 80 GB | c5.2xlarge x 10<br /><br />[Elastic Auto Scale GPT Test Results](https://gitlab.com/guided-explorations/aws/implementation-patterns/gitlab-cloud-native-hybrid-on-eks/-/blob/master/gitlab-alliances-testing/50K/50k-AutoScale-Test_v13-12-3-ee_2021-08-13_192633/50k-AutoScale-Test_v13-12-3-ee_2021-08-13_192633.txt) | $6.80/hr |
|
||||
|
||||
Other combinations of node type and quantity can be used to meet the Grand Total. Due to the cpu and memory requirements of pods, hosts that are overly small may have significant unused capacity.
|
||||
Other combinations of node type and quantity can be used to meet the Grand Total. Due to the CPU and memory requirements of pods, hosts that are overly small may have significant unused capacity.
|
||||
|
||||
NOTE:
|
||||
If EKS node autoscaling is employed, it is likely that your average loading will run lower than this, especially during non-working hours and weekends.
|
||||
|
|
|
@ -425,7 +425,7 @@ an expiration message is displayed to all administrators.
|
|||
For GitLab self-managed instances, you have a 14-day grace period
|
||||
before this occurs.
|
||||
|
||||
- To resume functionality, acticate a new license.
|
||||
- To resume functionality, activate a new license.
|
||||
- To fall back to Free features, delete the expired license.
|
||||
|
||||
## Contact Support
|
||||
|
|
|
@ -8,7 +8,7 @@ type: reference
|
|||
# Rate limits on Repository files API **(FREE SELF)**
|
||||
|
||||
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68561) in GitLab 14.3.
|
||||
> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/75918) in GitLab 14.6. [Feature flag files_api_throttling](https://gitlab.com/gitlab-org/gitlab/-/issues/338903) removed.
|
||||
> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/75918) in GitLab 14.6. [Feature flag `files_api_throttling`](https://gitlab.com/gitlab-org/gitlab/-/issues/338903) removed.
|
||||
|
||||
The [Repository files API](../../../api/repository_files.md) enables you to
|
||||
fetch, create, update, and delete files in your repository. To improve the security
|
||||
|
|
|
@ -60,7 +60,7 @@ To enforce confirmation of the email address used for new sign ups:
|
|||
|
||||
1. On the top bar, select **Menu > Admin**.
|
||||
1. On the left sidebar, select **Settings > General**, and expand **Sign-up restrictions**.
|
||||
1. Select the **Enable email restrictions for sign ups** checkbox, then select **Save changes**.
|
||||
1. Select the **Send confirmation email on sign-up** checkbox, then select **Save changes**.
|
||||
|
||||
## User cap
|
||||
|
||||
|
|
|
@ -28,10 +28,17 @@ To integrate GitLab with security scanners other than those listed here, see
|
|||
|
||||
You can use cluster image scanning through the following methods:
|
||||
|
||||
- [The cluster image scanning analyzer](#use-the-cluster-image-scanning-analyzer)
|
||||
<!--- start_remove The following content will be removed on remove_date: '2022-08-22' -->
|
||||
- [The cluster image scanning analyzer](#use-the-cluster-image-scanning-analyzer-removed) ([Removed](https://gitlab.com/gitlab-org/gitlab/-/issues/356465) in GitLab 15.0. Use [the GitLab agent](#cluster-image-scanning-with-the-gitlab-agent) instead.)
|
||||
<!--- end_remove -->
|
||||
- [The GitLab agent](#cluster-image-scanning-with-the-gitlab-agent)
|
||||
|
||||
## Use the cluster image scanning analyzer
|
||||
<!--- start_remove The following content will be removed on remove_date: '2022-08-22' -->
|
||||
|
||||
## Use the cluster image scanning analyzer (removed)
|
||||
|
||||
This feature was [removed](https://gitlab.com/gitlab-org/gitlab/-/issues/356465) in GitLab 15.0.
|
||||
Use [the GitLab agent](#cluster-image-scanning-with-the-gitlab-agent) instead.
|
||||
|
||||
You can use the cluster image scanning analyzer to run cluster image scanning with [GitLab CI/CD](../../../ci/index.md).
|
||||
To enable the cluster image scanning analyzer, [include the CI job](#configuration)
|
||||
|
@ -277,6 +284,7 @@ Here's an example cluster image scanning report:
|
|||
}
|
||||
```
|
||||
|
||||
<!--- end_remove -->
|
||||
## Cluster image scanning with the GitLab agent
|
||||
|
||||
You can use the [GitLab agent](../../clusters/agent/index.md) to
|
||||
|
@ -304,9 +312,12 @@ the security vulnerabilities in your groups, projects, and pipelines.
|
|||
After you find a vulnerability, you can address it in the [vulnerability report](../vulnerabilities/index.md)
|
||||
or the [GitLab agent's](../../clusters/agent/vulnerabilities.md)
|
||||
details section.
|
||||
<!--- start_remove The following content will be removed on remove_date: '2022-08-22' -->
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Getting warning message `gl-cluster-image-scanning-report.json: no matching files`
|
||||
|
||||
For information on this error, see the [general Application Security troubleshooting section](../../../ci/pipelines/job_artifacts.md#error-message-no-files-to-upload).
|
||||
|
||||
<!--- end_remove -->
|
||||
|
|
|
@ -32,4 +32,4 @@ information from the `X-Powered-By` header.
|
|||
## Links
|
||||
|
||||
- [CWE](https://cwe.mitre.org/data/definitions/16.html)
|
||||
- [PHP expose_php](https://www.php.net/manual/en/ini.core.php#ini.expose-php)
|
||||
- [PHP `expose_php`](https://www.php.net/manual/en/ini.core.php#ini.expose-php)
|
||||
|
|
|
@ -526,7 +526,7 @@ always take the latest dependency scanning artifact available.
|
|||
|
||||
> - [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/4908) in GitLab 14.1 [with a flag](../../../administration/feature_flags.md) named `sec_dependency_scanning_ui_enable`. Enabled by default.
|
||||
> - [Enabled on self-managed](https://gitlab.com/gitlab-org/gitlab/-/issues/282533) in GitLab 14.1.
|
||||
> - [Feature flag sec_dependency_scanning_ui_enable removed](https://gitlab.com/gitlab-org/gitlab/-/issues/326005) in GitLab 14.2.
|
||||
> - [Feature flag `sec_dependency_scanning_ui_enable` removed](https://gitlab.com/gitlab-org/gitlab/-/issues/326005) in GitLab 14.2.
|
||||
|
||||
To enable Dependency Scanning in a project, you can create a merge request:
|
||||
|
||||
|
|
|
@ -548,7 +548,7 @@ Several passthrouh types generate a configuration for the target analyzer:
|
|||
|
||||
- Two `git` passthrough sections pull the head of branch
|
||||
`refs/remotes/origin/test` from the `myrules` Git repository, and revision
|
||||
`97f7686` from the `sast-rules` Git repostory. From the `sast-rules` Git
|
||||
`97f7686` from the `sast-rules` Git repository. From the `sast-rules` Git
|
||||
repository, only data from the `go` subdirectory is considered.
|
||||
- The `sast-rules` entry has a higher precedence because it appears later in
|
||||
the configuration.
|
||||
|
|
|
@ -78,7 +78,7 @@ This image contains a set of Bash utility scripts to support [Helm v3 releases](
|
|||
The template contains a [Helmfile](https://github.com/roboll/helmfile) you can use to manage
|
||||
cluster applications with [Helm v3](https://helm.sh/).
|
||||
|
||||
This file has a list of paths to other Helmfiles for each app. They're all commented out by default, so you must uncomment
|
||||
This file has a list of paths to other Helm files for each app. They're all commented out by default, so you must uncomment
|
||||
the paths for the apps that you would like to use in your cluster.
|
||||
|
||||
By default, each `helmfile.yaml` in these sub-paths has the attribute `installed: true`. This means that every time
|
||||
|
|
|
@ -18,7 +18,7 @@ as well as its related [features](#deprecated-features).
|
|||
The certificate-based Kubernetes integration with GitLab is deprecated.
|
||||
It had the following issues:
|
||||
|
||||
- There were security issues as it required direct access to the Kube API by GitLab.
|
||||
- There were security issues as it required direct access to the Kubernetes API by GitLab.
|
||||
- The configuration options weren't flexible.
|
||||
- The integration was flaky.
|
||||
- Users were constantly reporting issues with features based on this model.
|
||||
|
|
|
@ -270,7 +270,7 @@ to collect test coverage data and generate the report.
|
|||
|
||||
With a minimal [`phpunit.xml`](https://phpunit.readthedocs.io/en/9.5/configuration.html) file (you may reference
|
||||
[this example repository](https://gitlab.com/yookoala/code-coverage-visualization-with-php/)), you can run the test and
|
||||
generate the coverage xml:
|
||||
generate the `coverage.xml`:
|
||||
|
||||
```yaml
|
||||
run tests:
|
||||
|
|
|
@ -34,7 +34,7 @@ Advanced Search searches default project branches only.
|
|||
| Use | Description | Example |
|
||||
|--------------|---------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `filename:` | File name | [`filename:*spec.rb`](https://gitlab.com/search?snippets=&scope=blobs&repository_ref=&search=filename%3A*spec.rb&group_id=9970&project_id=278964) |
|
||||
| `path:` | Repo location | [`path:spec/workers/`](https://gitlab.com/search?group_id=9970&project_id=278964&repository_ref=&scope=blobs&search=path%3Aspec%2Fworkers&snippets=) |
|
||||
| `path:` | Repository location | [`path:spec/workers/`](https://gitlab.com/search?group_id=9970&project_id=278964&repository_ref=&scope=blobs&search=path%3Aspec%2Fworkers&snippets=) |
|
||||
| `extension:` | File extension, without the `.` | [`extension:js`](https://gitlab.com/search?group_id=9970&project_id=278964&repository_ref=&scope=blobs&search=extension%3Ajs&snippets=) |
|
||||
| `blob:` | Git object ID | [`blob:998707*`](https://gitlab.com/search?snippets=false&scope=blobs&repository_ref=&search=blob%3A998707*&group_id=9970) |
|
||||
|
||||
|
|
|
@ -123,10 +123,19 @@ module API
|
|||
'Could not find a user for the given key' unless actor.user
|
||||
end
|
||||
|
||||
# TODO: backwards compatibility; remove after https://gitlab.com/gitlab-org/gitlab-shell/-/merge_requests/454 is merged
|
||||
def two_factor_otp_check
|
||||
{ success: false, message: 'Feature is not available' }
|
||||
end
|
||||
|
||||
def two_factor_manual_otp_check
|
||||
{ success: false, message: 'Feature is not available' }
|
||||
end
|
||||
|
||||
def two_factor_push_otp_check
|
||||
{ success: false, message: 'Feature is not available' }
|
||||
end
|
||||
|
||||
def with_admin_mode_bypass!(actor_id)
|
||||
return yield unless Gitlab::CurrentSettings.admin_mode
|
||||
|
||||
|
@ -320,10 +329,23 @@ module API
|
|||
end
|
||||
end
|
||||
|
||||
# TODO: backwards compatibility; remove after https://gitlab.com/gitlab-org/gitlab-shell/-/merge_requests/454 is merged
|
||||
post '/two_factor_otp_check', feature_category: :authentication_and_authorization do
|
||||
status 200
|
||||
|
||||
two_factor_otp_check
|
||||
two_factor_manual_otp_check
|
||||
end
|
||||
|
||||
post '/two_factor_push_otp_check', feature_category: :authentication_and_authorization do
|
||||
status 200
|
||||
|
||||
two_factor_push_otp_check
|
||||
end
|
||||
|
||||
post '/two_factor_manual_otp_check', feature_category: :authentication_and_authorization do
|
||||
status 200
|
||||
|
||||
two_factor_manual_otp_check
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -8,7 +8,6 @@ module BulkImports
|
|||
import_entity = context.entity
|
||||
|
||||
data
|
||||
.then { |data| transform_name(import_entity, data) }
|
||||
.then { |data| transform_path(import_entity, data) }
|
||||
.then { |data| transform_full_path(data) }
|
||||
.then { |data| transform_parent(context, import_entity, data) }
|
||||
|
@ -19,11 +18,6 @@ module BulkImports
|
|||
|
||||
private
|
||||
|
||||
def transform_name(import_entity, data)
|
||||
data['name'] = import_entity.destination_name
|
||||
data
|
||||
end
|
||||
|
||||
def transform_path(import_entity, data)
|
||||
data['path'] = import_entity.destination_name.parameterize
|
||||
data
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue