Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-09-15 12:11:13 +00:00
parent 33f96e8df0
commit ae27cd3c88
121 changed files with 2024 additions and 841 deletions

View File

@ -88,3 +88,4 @@ deprecations-doc check:
needs: []
script:
- bundle exec rake gitlab:docs:check_deprecations
allow_failure: true

View File

@ -31,7 +31,6 @@ export default class BlobFileDropzone {
autoProcessQueue: false,
url: form.attr('action'),
// Rails uses a hidden input field for PUT
// http://stackoverflow.com/questions/21056482/how-to-set-method-put-in-form-tag-in-rails
method,
clickable: true,
uploadMultiple: false,

View File

@ -1,6 +1,8 @@
import axios from '~/lib/utils/axios_utils';
import { joinPaths } from '~/lib/utils/url_utility';
export const baseUrl = (projectPath) => `/${projectPath}/ide_terminals`;
export const baseUrl = (projectPath) =>
joinPaths(gon.relative_url_root || '', `/${projectPath}/ide_terminals`);
export const checkConfig = (projectPath, branch) =>
axios.post(`${baseUrl(projectPath)}/check_config`, {

View File

@ -69,6 +69,9 @@ export default {
isIssuableUrlExternal() {
return isExternal(this.webUrl);
},
reference() {
return this.issuable.reference || `${this.issuableSymbol}${this.issuable.iid}`;
},
labels() {
return this.issuable.labels?.nodes || this.issuable.labels || [];
},
@ -201,9 +204,9 @@ export default {
</div>
<div class="issuable-info">
<slot v-if="hasSlotContents('reference')" name="reference"></slot>
<span v-else data-testid="issuable-reference" class="issuable-reference"
>{{ issuableSymbol }}{{ issuable.iid }}</span
>
<span v-else data-testid="issuable-reference" class="issuable-reference">
{{ reference }}
</span>
<span class="issuable-authored gl-display-none gl-sm-display-inline-block! gl-mr-3">
<span aria-hidden="true">&middot;</span>
<span

View File

@ -14,6 +14,7 @@ import getIssuesCountsQuery from 'ee_else_ce/issues_list/queries/get_issues_coun
import createFlash from '~/flash';
import { TYPE_USER } from '~/graphql_shared/constants';
import { convertToGraphQLId, getIdFromGraphQLId } from '~/graphql_shared/utils';
import { ITEM_TYPE } from '~/groups/constants';
import CsvImportExportButtons from '~/issuable/components/csv_import_export_buttons.vue';
import IssuableByEmail from '~/issuable/components/issuable_by_email.vue';
import IssuableList from '~/issuable_list/components/issuable_list_root.vue';
@ -140,11 +141,11 @@ export default {
initialEmail: {
default: '',
},
isSignedIn: {
isProject: {
default: false,
},
issuesPath: {
default: '',
isSignedIn: {
default: false,
},
jiraIntegrationPath: {
default: '',
@ -186,9 +187,11 @@ export default {
variables() {
return this.queryVariables;
},
update: ({ project }) => project?.issues.nodes ?? [],
update(data) {
return data[this.namespace]?.issues.nodes ?? [];
},
result({ data }) {
this.pageInfo = data.project?.issues.pageInfo ?? {};
this.pageInfo = data[this.namespace]?.issues.pageInfo ?? {};
this.exportCsvPathWithQuery = this.getExportCsvPathWithQuery();
},
error(error) {
@ -204,7 +207,9 @@ export default {
variables() {
return this.queryVariables;
},
update: ({ project }) => project ?? {},
update(data) {
return data[this.namespace] ?? {};
},
error(error) {
createFlash({ message: this.$options.i18n.errorFetchingCounts, captureError: true, error });
},
@ -220,8 +225,9 @@ export default {
computed: {
queryVariables() {
return {
isSignedIn: this.isSignedIn,
fullPath: this.fullPath,
isProject: this.isProject,
isSignedIn: this.isSignedIn,
search: this.searchQuery,
sort: this.sortKey,
state: this.state,
@ -229,6 +235,9 @@ export default {
...this.apiFilterParams,
};
},
namespace() {
return this.isProject ? ITEM_TYPE.PROJECT : ITEM_TYPE.GROUP;
},
hasSearch() {
return this.searchQuery || Object.keys(this.urlFilterParams).length;
},
@ -242,7 +251,7 @@ export default {
return this.state === IssuableStates.Opened;
},
showCsvButtons() {
return this.isSignedIn;
return this.isProject && this.isSignedIn;
},
apiFilterParams() {
return convertToApiParams(this.filterTokens);
@ -447,39 +456,41 @@ export default {
return this.$apollo
.query({
query: searchLabelsQuery,
variables: { fullPath: this.fullPath, search },
variables: { fullPath: this.fullPath, search, isProject: this.isProject },
})
.then(({ data }) => data.project.labels.nodes);
.then(({ data }) => data[this.namespace]?.labels.nodes);
},
fetchMilestones(search) {
return this.$apollo
.query({
query: searchMilestonesQuery,
variables: { fullPath: this.fullPath, search },
variables: { fullPath: this.fullPath, search, isProject: this.isProject },
})
.then(({ data }) => data.project.milestones.nodes);
.then(({ data }) => data[this.namespace]?.milestones.nodes);
},
fetchIterations(search) {
const id = Number(search);
const variables =
!search || Number.isNaN(id)
? { fullPath: this.fullPath, search }
: { fullPath: this.fullPath, id };
? { fullPath: this.fullPath, search, isProject: this.isProject }
: { fullPath: this.fullPath, id, isProject: this.isProject };
return this.$apollo
.query({
query: searchIterationsQuery,
variables,
})
.then(({ data }) => data.project.iterations.nodes);
.then(({ data }) => data[this.namespace]?.iterations.nodes);
},
fetchUsers(search) {
return this.$apollo
.query({
query: searchUsersQuery,
variables: { fullPath: this.fullPath, search },
variables: { fullPath: this.fullPath, search, isProject: this.isProject },
})
.then(({ data }) => data.project.projectMembers.nodes.map((member) => member.user));
.then(({ data }) =>
data[this.namespace]?.[`${this.namespace}Members`].nodes.map((member) => member.user),
);
},
getExportCsvPathWithQuery() {
return `${this.exportCsvPath}${window.location.search}`;
@ -560,15 +571,16 @@ export default {
}
return axios
.put(joinPaths(this.issuesPath, issueToMove.iid, 'reorder'), {
.put(joinPaths(issueToMove.webPath, 'reorder'), {
move_before_id: isMovingToBeginning ? null : getIdFromGraphQLId(moveBeforeId),
move_after_id: isMovingToEnd ? null : getIdFromGraphQLId(moveAfterId),
group_full_path: this.isProject ? undefined : this.fullPath,
})
.then(() => {
const serializedVariables = JSON.stringify(this.queryVariables);
return this.$apollo.mutate({
mutation: reorderIssuesMutation,
variables: { oldIndex, newIndex, serializedVariables },
variables: { oldIndex, newIndex, namespace: this.namespace, serializedVariables },
});
})
.catch((error) => {

View File

@ -85,17 +85,17 @@ export function mountIssuesListApp() {
const resolvers = {
Mutation: {
reorderIssues: (_, { oldIndex, newIndex, serializedVariables }, { cache }) => {
reorderIssues: (_, { oldIndex, newIndex, namespace, serializedVariables }, { cache }) => {
const variables = JSON.parse(serializedVariables);
const sourceData = cache.readQuery({ query: getIssuesQuery, variables });
const data = produce(sourceData, (draftData) => {
const issues = draftData.project.issues.nodes.slice();
const issues = draftData[namespace].issues.nodes.slice();
const issueToMove = issues[oldIndex];
issues.splice(oldIndex, 1);
issues.splice(newIndex, 0, issueToMove);
draftData.project.issues.nodes = issues;
draftData[namespace].issues.nodes = issues;
});
cache.writeQuery({ query: getIssuesQuery, variables, data });
@ -128,8 +128,8 @@ export function mountIssuesListApp() {
hasMultipleIssueAssigneesFeature,
importCsvIssuesPath,
initialEmail,
isProject,
isSignedIn,
issuesPath,
jiraIntegrationPath,
markdownHelpPath,
maxAttachmentSize,
@ -158,8 +158,8 @@ export function mountIssuesListApp() {
hasIssueWeightsFeature: parseBoolean(hasIssueWeightsFeature),
hasIterationsFeature: parseBoolean(hasIterationsFeature),
hasMultipleIssueAssigneesFeature: parseBoolean(hasMultipleIssueAssigneesFeature),
isProject: parseBoolean(isProject),
isSignedIn: parseBoolean(isSignedIn),
issuesPath,
jiraIntegrationPath,
newIssuePath,
rssPath,

View File

@ -2,6 +2,7 @@
#import "./issue.fragment.graphql"
query getIssues(
$isProject: Boolean = false
$isSignedIn: Boolean = false
$fullPath: ID!
$search: String
@ -20,7 +21,35 @@ query getIssues(
$firstPageSize: Int
$lastPageSize: Int
) {
project(fullPath: $fullPath) {
group(fullPath: $fullPath) @skip(if: $isProject) {
issues(
includeSubgroups: true
search: $search
sort: $sort
state: $state
assigneeId: $assigneeId
assigneeUsernames: $assigneeUsernames
authorUsername: $authorUsername
labelName: $labelName
milestoneTitle: $milestoneTitle
milestoneWildcardId: $milestoneWildcardId
types: $types
not: $not
before: $beforeCursor
after: $afterCursor
first: $firstPageSize
last: $lastPageSize
) {
pageInfo {
...PageInfo
}
nodes {
...IssueFragment
reference(full: true)
}
}
}
project(fullPath: $fullPath) @include(if: $isProject) {
issues(
search: $search
sort: $sort

View File

@ -1,4 +1,5 @@
query getIssuesCount(
$isProject: Boolean = false
$fullPath: ID!
$search: String
$assigneeId: String
@ -10,7 +11,54 @@ query getIssuesCount(
$types: [IssueType!]
$not: NegatedIssueFilterInput
) {
project(fullPath: $fullPath) {
group(fullPath: $fullPath) @skip(if: $isProject) {
openedIssues: issues(
includeSubgroups: true
state: opened
search: $search
assigneeId: $assigneeId
assigneeUsernames: $assigneeUsernames
authorUsername: $authorUsername
labelName: $labelName
milestoneTitle: $milestoneTitle
milestoneWildcardId: $milestoneWildcardId
types: $types
not: $not
) {
count
}
closedIssues: issues(
includeSubgroups: true
state: closed
search: $search
assigneeId: $assigneeId
assigneeUsernames: $assigneeUsernames
authorUsername: $authorUsername
labelName: $labelName
milestoneTitle: $milestoneTitle
milestoneWildcardId: $milestoneWildcardId
types: $types
not: $not
) {
count
}
allIssues: issues(
includeSubgroups: true
state: all
search: $search
assigneeId: $assigneeId
assigneeUsernames: $assigneeUsernames
authorUsername: $authorUsername
labelName: $labelName
milestoneTitle: $milestoneTitle
milestoneWildcardId: $milestoneWildcardId
types: $types
not: $not
) {
count
}
}
project(fullPath: $fullPath) @include(if: $isProject) {
openedIssues: issues(
state: opened
search: $search

View File

@ -13,6 +13,7 @@ fragment IssueFragment on Issue {
updatedAt
upvotes
userDiscussionsCount @include(if: $isSignedIn)
webPath
webUrl
assignees {
nodes {

View File

@ -0,0 +1,4 @@
fragment Iteration on Iteration {
id
title
}

View File

@ -0,0 +1,6 @@
fragment Label on Label {
id
color
textColor
title
}

View File

@ -0,0 +1,4 @@
fragment Milestone on Milestone {
id
title
}

View File

@ -1,7 +1,13 @@
mutation reorderIssues($oldIndex: Int, $newIndex: Int, $serializedVariables: String) {
mutation reorderIssues(
$oldIndex: Int
$newIndex: Int
$namespace: String
$serializedVariables: String
) {
reorderIssues(
oldIndex: $oldIndex
newIndex: $newIndex
namespace: $namespace
serializedVariables: $serializedVariables
) @client
}

View File

@ -1,9 +1,17 @@
query searchIterations($fullPath: ID!, $search: String, $id: ID) {
project(fullPath: $fullPath) {
iterations(title: $search, id: $id) {
#import "./iteration.fragment.graphql"
query searchIterations($fullPath: ID!, $search: String, $id: ID, $isProject: Boolean = false) {
group(fullPath: $fullPath) @skip(if: $isProject) {
iterations(title: $search, id: $id, includeAncestors: true) {
nodes {
id
title
...Iteration
}
}
}
project(fullPath: $fullPath) @include(if: $isProject) {
iterations(title: $search, id: $id, includeAncestors: true) {
nodes {
...Iteration
}
}
}

View File

@ -1,11 +1,17 @@
query searchLabels($fullPath: ID!, $search: String) {
project(fullPath: $fullPath) {
#import "./label.fragment.graphql"
query searchLabels($fullPath: ID!, $search: String, $isProject: Boolean = false) {
group(fullPath: $fullPath) @skip(if: $isProject) {
labels(searchTerm: $search, includeAncestorGroups: true, includeDescendantGroups: true) {
nodes {
...Label
}
}
}
project(fullPath: $fullPath) @include(if: $isProject) {
labels(searchTerm: $search, includeAncestorGroups: true) {
nodes {
id
color
textColor
title
...Label
}
}
}

View File

@ -1,9 +1,17 @@
query searchMilestones($fullPath: ID!, $search: String) {
project(fullPath: $fullPath) {
#import "./milestone.fragment.graphql"
query searchMilestones($fullPath: ID!, $search: String, $isProject: Boolean = false) {
group(fullPath: $fullPath) @skip(if: $isProject) {
milestones(searchTitle: $search, includeAncestors: true, includeDescendants: true) {
nodes {
...Milestone
}
}
}
project(fullPath: $fullPath) @include(if: $isProject) {
milestones(searchTitle: $search, includeAncestors: true) {
nodes {
id
title
...Milestone
}
}
}

View File

@ -1,12 +1,20 @@
query searchUsers($fullPath: ID!, $search: String) {
project(fullPath: $fullPath) {
#import "./user.fragment.graphql"
query searchUsers($fullPath: ID!, $search: String, $isProject: Boolean = false) {
group(fullPath: $fullPath) @skip(if: $isProject) {
groupMembers(search: $search) {
nodes {
user {
...User
}
}
}
}
project(fullPath: $fullPath) @include(if: $isProject) {
projectMembers(search: $search) {
nodes {
user {
id
avatarUrl
name
username
...User
}
}
}

View File

@ -0,0 +1,6 @@
fragment User on User {
id
avatarUrl
name
username
}

View File

@ -117,7 +117,6 @@ export const handleLocationHash = () => {
};
// Check if element scrolled into viewport from above or below
// Courtesy http://stackoverflow.com/a/7557433/414749
export const isInViewport = (el, offset = {}) => {
const rect = el.getBoundingClientRect();
const { top, left } = offset;
@ -560,8 +559,6 @@ export const addSelectOnFocusBehaviour = (selector = '.js-select-on-focus') => {
* Method to round of values with decimal places
* with provided precision.
*
* Taken from https://stackoverflow.com/a/7343013/414749
*
* Eg; roundOffFloat(3.141592, 3) = 3.142
*
* Refer to spec/frontend/lib/utils/common_utils_spec.js for

View File

@ -125,8 +125,7 @@ export default {
// This method is defined here instead of in `methods`
// because we need to access the .cancel() method
// lodash attaches to the function, which is
// made inaccessible by Vue. More info:
// https://stackoverflow.com/a/52988020/1063392
// made inaccessible by Vue.
this.debouncedSearch = debounce(function search() {
this.search(this.searchQuery);
}, SEARCH_DEBOUNCE_MS);

View File

@ -1,26 +1,30 @@
import IssuableFilteredSearchTokenKeys from 'ee_else_ce/filtered_search/issuable_filtered_search_token_keys';
import issuableInitBulkUpdateSidebar from '~/issuable_bulk_update_sidebar/issuable_init_bulk_update_sidebar';
import { mountIssuablesListApp } from '~/issues_list';
import { mountIssuablesListApp, mountIssuesListApp } from '~/issues_list';
import initManualOrdering from '~/manual_ordering';
import { FILTERED_SEARCH } from '~/pages/constants';
import initFilteredSearch from '~/pages/search/init_filtered_search';
import projectSelect from '~/project_select';
const ISSUE_BULK_UPDATE_PREFIX = 'issue_';
if (gon.features?.vueIssuesList) {
mountIssuesListApp();
} else {
const ISSUE_BULK_UPDATE_PREFIX = 'issue_';
IssuableFilteredSearchTokenKeys.addExtraTokensForIssues();
IssuableFilteredSearchTokenKeys.removeTokensForKeys('release');
issuableInitBulkUpdateSidebar.init(ISSUE_BULK_UPDATE_PREFIX);
IssuableFilteredSearchTokenKeys.addExtraTokensForIssues();
IssuableFilteredSearchTokenKeys.removeTokensForKeys('release');
issuableInitBulkUpdateSidebar.init(ISSUE_BULK_UPDATE_PREFIX);
initFilteredSearch({
page: FILTERED_SEARCH.ISSUES,
isGroupDecendent: true,
useDefaultState: true,
filteredSearchTokenKeys: IssuableFilteredSearchTokenKeys,
});
projectSelect();
initManualOrdering();
initFilteredSearch({
page: FILTERED_SEARCH.ISSUES,
isGroupDecendent: true,
useDefaultState: true,
filteredSearchTokenKeys: IssuableFilteredSearchTokenKeys,
});
projectSelect();
initManualOrdering();
if (gon.features?.vueIssuablesList) {
mountIssuablesListApp();
if (gon.features?.vueIssuablesList) {
mountIssuablesListApp();
}
}

View File

@ -149,8 +149,7 @@ export default {
// This method is defined here instead of in `methods`
// because we need to access the .cancel() method
// lodash attaches to the function, which is
// made inaccessible by Vue. More info:
// https://stackoverflow.com/a/52988020/1063392
// made inaccessible by Vue.
this.debouncedSearch = debounce(function search() {
this.search();
}, SEARCH_DEBOUNCE_MS);

View File

@ -4,7 +4,7 @@ export * from './api/user_api';
export * from './api/markdown_api';
// Note: It's not possible to spy on methods imported from this file in
// Jest tests. See https://stackoverflow.com/a/53307822/1063392.
// Jest tests.
// As a workaround, in Jest tests, import the methods from the file
// in which they are defined:
//

View File

@ -19,7 +19,6 @@ const IGNORE_ERRORS = [
'fb_xd_fragment',
// ISP "optimizing" proxy - `Cache-Control: no-transform` seems to
// reduce this. (thanks @acdha)
// See http://stackoverflow.com/questions/4113268
'bmi_SafeAddOnload',
'EBCallBackMessageReceived',
// See http://toolbar.conduit.com/Developer/HtmlAndGadget/Methods/JSInjection.aspx

View File

@ -18,7 +18,7 @@ Regexp notes:
const identifierInstanceRegex = /((?:\[.+?\]){1}(?:\[\]|\[.+?\])?(?!:))/g;
const isIdentifierInstance = (literal) => {
// Reset lastIndex as global flag in regexp are stateful (https://stackoverflow.com/a/11477448)
// Reset lastIndex as global flag in regexp are stateful
identifierInstanceRegex.lastIndex = 0;
return identifierInstanceRegex.test(literal);
};

View File

@ -505,7 +505,7 @@ $line-removed-dark: #fac5cd !default;
* would hide other layers (selected text, matching brackets).
*
* When the transparent colors get layered on white background, they create their
* full opacity counterparts (computed with https://stackoverflow.com/a/12228643/606571):
* full opacity counterparts:
*
* - white + $line-added-transparent = $line-added
* - white + $line-added-transparent + $line-added-dark-transparent = $line-added-dark

View File

@ -1,7 +1,6 @@
@import 'framework/variables';
// Do not use 3-letter hex codes, bgcolor vs css background-color is problematic in emails
// See https://stackoverflow.com/questions/28551981/why-are-3-digit-hex-color-code-values-interpreted-differently-in-internet-explor
//
// stylelint-disable color-hex-length

View File

@ -40,7 +40,7 @@
position: -webkit-sticky;
position: sticky;
// Unitless zero values are not allowed in calculations https://stackoverflow.com/a/55391061
// Unitless zero values are not allowed in calculations
// stylelint-disable-next-line length-zero-no-unit
top: calc(#{$top-pos} + var(--system-header-height, 0px) + var(--performance-bar-height, 0px));
// stylelint-disable-next-line length-zero-no-unit

View File

@ -33,6 +33,7 @@ class GroupsController < Groups::ApplicationController
before_action do
push_frontend_feature_flag(:vue_issuables_list, @group)
push_frontend_feature_flag(:vue_issues_list, @group, default_enabled: :yaml)
push_frontend_feature_flag(:iteration_cadences, @group, default_enabled: :yaml)
end

View File

@ -43,7 +43,7 @@ class Projects::IssuesController < Projects::ApplicationController
push_frontend_feature_flag(:tribute_autocomplete, @project)
push_frontend_feature_flag(:vue_issuables_list, project)
push_frontend_feature_flag(:improved_emoji_picker, project, default_enabled: :yaml)
push_frontend_feature_flag(:vue_issues_list, project)
push_frontend_feature_flag(:vue_issues_list, project&.group, default_enabled: :yaml)
push_frontend_feature_flag(:iteration_cadences, project&.group, default_enabled: :yaml)
end

View File

@ -0,0 +1,36 @@
# frozen_string_literal: true
module Mutations
module CustomEmoji
class Destroy < BaseMutation
graphql_name 'DestroyCustomEmoji'
authorize :delete_custom_emoji
field :custom_emoji,
Types::CustomEmojiType,
null: true,
description: 'Deleted custom emoji.'
argument :id, ::Types::GlobalIDType[::CustomEmoji],
required: true,
description: 'Global ID of the custom emoji to destroy.'
def resolve(id:)
custom_emoji = authorized_find!(id: id)
custom_emoji.destroy!
{
custom_emoji: custom_emoji
}
end
private
def find_object(id:)
GitlabSchema.object_from_id(id, expected_type: ::CustomEmoji)
end
end
end
end

View File

@ -33,6 +33,7 @@ module Types
mount_mutation Mutations::Branches::Create, calls_gitaly: true
mount_mutation Mutations::Commits::Create, calls_gitaly: true
mount_mutation Mutations::CustomEmoji::Create, feature_flag: :custom_emoji
mount_mutation Mutations::CustomEmoji::Destroy, feature_flag: :custom_emoji
mount_mutation Mutations::CustomerRelations::Organizations::Create
mount_mutation Mutations::Discussions::ToggleResolve
mount_mutation Mutations::DependencyProxy::ImageTtlGroupPolicy::Update

View File

@ -5,7 +5,7 @@ module Types
class CustomEmoji < BasePermissionType
graphql_name 'CustomEmojiPermissions'
abilities :create_custom_emoji, :read_custom_emoji
abilities :create_custom_emoji, :read_custom_emoji, :delete_custom_emoji
end
end
end

View File

@ -203,34 +203,45 @@ module IssuesHelper
}
end
def issues_list_data(project, current_user, finder)
def common_issues_list_data(namespace, current_user)
{
autocomplete_award_emojis_path: autocomplete_award_emojis_path,
calendar_path: url_for(safe_params.merge(calendar_url_options)),
empty_state_svg_path: image_path('illustrations/issues.svg'),
full_path: namespace.full_path,
is_signed_in: current_user.present?.to_s,
jira_integration_path: help_page_url('integration/jira/issues', anchor: 'view-jira-issues'),
rss_path: url_for(safe_params.merge(rss_url_options)),
sign_in_path: new_user_session_path
}
end
def project_issues_list_data(project, current_user, finder)
common_issues_list_data(project, current_user).merge(
can_bulk_update: can?(current_user, :admin_issue, project).to_s,
can_edit: can?(current_user, :admin_project, project).to_s,
can_import_issues: can?(current_user, :import_issues, @project).to_s,
email: current_user&.notification_email,
emails_help_page_path: help_page_path('development/emails', anchor: 'email-namespace'),
empty_state_svg_path: image_path('illustrations/issues.svg'),
export_csv_path: export_csv_project_issues_path(project),
full_path: project.full_path,
has_any_issues: project_issues(project).exists?.to_s,
import_csv_issues_path: import_csv_namespace_project_issues_path,
initial_email: project.new_issuable_address(current_user, 'issue'),
is_signed_in: current_user.present?.to_s,
issues_path: project_issues_path(project),
jira_integration_path: help_page_url('integration/jira/issues', anchor: 'view-jira-issues'),
is_project: true.to_s,
markdown_help_path: help_page_path('user/markdown'),
max_attachment_size: number_to_human_size(Gitlab::CurrentSettings.max_attachment_size.megabytes),
new_issue_path: new_project_issue_path(project, issue: { milestone_id: finder.milestones.first.try(:id) }),
project_import_jira_path: project_import_jira_path(project),
quick_actions_help_path: help_page_path('user/project/quick_actions'),
reset_path: new_issuable_address_project_path(project, issuable_type: 'issue'),
rss_path: url_for(safe_params.merge(rss_url_options)),
show_new_issue_link: show_new_issue_link?(project).to_s,
sign_in_path: new_user_session_path
}
show_new_issue_link: show_new_issue_link?(project).to_s
)
end
def group_issues_list_data(group, current_user, issues)
common_issues_list_data(group, current_user).merge(
has_any_issues: issues.to_a.any?.to_s
)
end
# Overridden in EE

View File

@ -170,8 +170,6 @@ module Ci
scope :with_stale_live_trace, -> { with_live_trace.finished_before(12.hours.ago) }
scope :finished_before, -> (date) { finished.where('finished_at < ?', date) }
scope :with_secure_reports_from_options, -> (job_type) { where('options like :job_type', job_type: "%:artifacts:%:reports:%:#{job_type}:%") }
scope :with_secure_reports_from_config_options, -> (job_types) do
joins(:metadata).where("ci_builds_metadata.config_options -> 'artifacts' -> 'reports' ?| array[:job_types]", job_types: job_types)
end

View File

@ -323,6 +323,13 @@ class Issue < ApplicationRecord
)
end
def self.column_order_id_asc
Gitlab::Pagination::Keyset::ColumnOrderDefinition.new(
attribute_name: 'id',
order_expression: arel_table[:id].asc
)
end
def self.to_branch_name(*args)
branch_name = args.map(&:to_s).each_with_index.map do |arg, i|
arg.parameterize(preserve_case: i == 0).presence

View File

@ -526,6 +526,7 @@ class Project < ApplicationRecord
scope :sorted_by_stars_desc, -> { reorder(self.arel_table['star_count'].desc) }
scope :sorted_by_stars_asc, -> { reorder(self.arel_table['star_count'].asc) }
# Sometimes queries (e.g. using CTEs) require explicit disambiguation with table name
scope :projects_order_id_asc, -> { reorder(self.arel_table['id'].asc) }
scope :projects_order_id_desc, -> { reorder(self.arel_table['id'].desc) }
scope :sorted_by_similarity_desc, -> (search, include_in_select: false) do

View File

@ -2,4 +2,14 @@
class CustomEmojiPolicy < BasePolicy
delegate { @subject.group }
condition(:author) { @subject.creator == @user }
rule { can?(:maintainer_access) }.policy do
enable :delete_custom_emoji
end
rule { author & can?(:create_custom_emoji) }.policy do
enable :delete_custom_emoji
end
end

View File

@ -89,6 +89,7 @@ class GroupPolicy < BasePolicy
rule { guest }.policy do
enable :read_group
enable :upload_file
enable :guest_access
end
rule { admin }.policy do
@ -132,6 +133,7 @@ class GroupPolicy < BasePolicy
enable :create_custom_emoji
enable :create_package
enable :create_package_settings
enable :developer_access
end
rule { reporter }.policy do
@ -161,6 +163,7 @@ class GroupPolicy < BasePolicy
enable :read_deploy_token
enable :create_jira_connect_subscription
enable :update_runners_registration_token
enable :maintainer_access
end
rule { owner }.policy do
@ -176,6 +179,7 @@ class GroupPolicy < BasePolicy
enable :update_default_branch_protection
enable :create_deploy_token
enable :destroy_deploy_token
enable :owner_access
end
rule { can?(:read_nested_project_resources) }.policy do

View File

@ -34,7 +34,6 @@ module Projects
# We need `.connected_to_user` here otherwise when a group has an
# invitee, it would make the following query return 0 rows since a NULL
# user_id would be present in the subquery
# See http://stackoverflow.com/questions/129077/not-in-clause-and-null-values
non_null_user_ids = project.project_members.connected_to_user.select(:user_id)
GroupMembersFinder.new(project.group).execute.where.not(user_id: non_null_user_ids)
end

View File

@ -1,136 +0,0 @@
# frozen_string_literal: true
class IssueRebalancingService
MAX_ISSUE_COUNT = 10_000
BATCH_SIZE = 100
SMALLEST_BATCH_SIZE = 5
RETRIES_LIMIT = 3
TooManyIssues = Class.new(StandardError)
TIMING_CONFIGURATION = [
[0.1.seconds, 0.05.seconds], # short timings, lock_timeout: 100ms, sleep after LockWaitTimeout: 50ms
[0.5.seconds, 0.05.seconds],
[1.second, 0.5.seconds],
[1.second, 0.5.seconds],
[5.seconds, 1.second]
].freeze
def initialize(projects_collection)
@root_namespace = projects_collection.take.root_namespace # rubocop:disable CodeReuse/ActiveRecord
@base = Issue.in_projects(projects_collection)
end
def execute
return unless Feature.enabled?(:rebalance_issues, root_namespace)
raise TooManyIssues, "#{issue_count} issues" if issue_count > MAX_ISSUE_COUNT
start = RelativePositioning::START_POSITION - (gaps / 2) * gap_size
if Feature.enabled?(:issue_rebalancing_optimization)
Issue.transaction do
assign_positions(start, indexed_ids)
.sort_by(&:first)
.each_slice(BATCH_SIZE) do |pairs_with_position|
if Feature.enabled?(:issue_rebalancing_with_retry)
update_positions_with_retry(pairs_with_position, 'rebalance issue positions in batches ordered by id')
else
update_positions(pairs_with_position, 'rebalance issue positions in batches ordered by id')
end
end
end
else
Issue.transaction do
indexed_ids.each_slice(BATCH_SIZE) do |pairs|
pairs_with_position = assign_positions(start, pairs)
if Feature.enabled?(:issue_rebalancing_with_retry)
update_positions_with_retry(pairs_with_position, 'rebalance issue positions')
else
update_positions(pairs_with_position, 'rebalance issue positions')
end
end
end
end
end
private
attr_reader :root_namespace, :base
# rubocop: disable CodeReuse/ActiveRecord
def indexed_ids
base.reorder(:relative_position, :id).pluck(:id).each_with_index
end
# rubocop: enable CodeReuse/ActiveRecord
def assign_positions(start, pairs)
pairs.map do |id, index|
[id, start + (index * gap_size)]
end
end
def update_positions_with_retry(pairs_with_position, query_name)
retries = 0
batch_size = pairs_with_position.size
until pairs_with_position.empty?
begin
update_positions(pairs_with_position.first(batch_size), query_name)
pairs_with_position = pairs_with_position.drop(batch_size)
retries = 0
rescue ActiveRecord::StatementTimeout, ActiveRecord::QueryCanceled => ex
raise ex if batch_size < SMALLEST_BATCH_SIZE
if (retries += 1) == RETRIES_LIMIT
# shrink the batch size in half when RETRIES limit is reached and update still fails perhaps because batch size is still too big
batch_size = (batch_size / 2).to_i
retries = 0
end
retry
end
end
end
def update_positions(pairs_with_position, query_name)
values = pairs_with_position.map do |id, index|
"(#{id}, #{index})"
end.join(', ')
Gitlab::Database::WithLockRetries.new(timing_configuration: TIMING_CONFIGURATION, klass: self.class).run do
run_update_query(values, query_name)
end
end
def run_update_query(values, query_name)
Issue.connection.exec_query(<<~SQL, query_name)
WITH cte(cte_id, new_pos) AS #{Gitlab::Database::AsWithMaterialized.materialized_if_supported} (
SELECT *
FROM (VALUES #{values}) as t (id, pos)
)
UPDATE #{Issue.table_name}
SET relative_position = cte.new_pos
FROM cte
WHERE cte_id = id
SQL
end
def issue_count
@issue_count ||= base.count
end
def gaps
issue_count - 1
end
def gap_size
# We could try to split the available range over the number of gaps we need,
# but IDEAL_DISTANCE * MAX_ISSUE_COUNT is only 0.1% of the available range,
# so we are guaranteed not to exhaust it by using this static value.
#
# If we raise MAX_ISSUE_COUNT or IDEAL_DISTANCE significantly, this may
# change!
RelativePositioning::IDEAL_DISTANCE
end
end

View File

@ -0,0 +1,193 @@
# frozen_string_literal: true
module Issues
class RelativePositionRebalancingService
UPDATE_BATCH_SIZE = 100
PREFETCH_ISSUES_BATCH_SIZE = 10_000
SMALLEST_BATCH_SIZE = 5
RETRIES_LIMIT = 3
TooManyConcurrentRebalances = Class.new(StandardError)
def initialize(projects)
@projects_collection = (projects.is_a?(Array) ? Project.id_in(projects) : projects).projects_order_id_asc
@root_namespace = @projects_collection.take.root_namespace # rubocop:disable CodeReuse/ActiveRecord
@caching = ::Gitlab::Issues::Rebalancing::State.new(@root_namespace, @projects_collection)
end
def execute
return unless Feature.enabled?(:rebalance_issues, root_namespace)
# Given can_start_rebalance? and track_new_running_rebalance are not atomic
# it can happen that we end up with more than Rebalancing::State::MAX_NUMBER_OF_CONCURRENT_REBALANCES running.
# Considering the number of allowed Rebalancing::State::MAX_NUMBER_OF_CONCURRENT_REBALANCES is small we should be ok,
# but should be something to consider if we'd want to scale this up.
error_message = "#{caching.concurrent_running_rebalances_count} concurrent re-balances currently running"
raise TooManyConcurrentRebalances, error_message unless caching.can_start_rebalance?
block_issue_repositioning! unless root_namespace.issue_repositioning_disabled?
caching.track_new_running_rebalance
index = caching.get_current_index
loop do
issue_ids = get_issue_ids(index, PREFETCH_ISSUES_BATCH_SIZE)
pairs_with_index = assign_indexes(issue_ids, index)
pairs_with_index.each_slice(UPDATE_BATCH_SIZE) do |pairs_batch|
update_positions_with_retry(pairs_batch, 're-balance issue positions in batches ordered by position')
end
index = caching.get_current_index
break if index >= caching.issue_count - 1
end
caching.cleanup_cache
unblock_issue_repositioning!
end
private
attr_reader :root_namespace, :projects_collection, :caching
def block_issue_repositioning!
Feature.enable(:block_issue_repositioning, root_namespace)
end
def unblock_issue_repositioning!
Feature.disable(:block_issue_repositioning, root_namespace)
end
def get_issue_ids(index, limit)
issue_ids = caching.get_cached_issue_ids(index, limit)
# if we have a list of cached issues and no current project id cached,
# then we successfully cached issues for all projects
return issue_ids if issue_ids.any? && caching.get_current_project_id.blank?
# if we got no issue ids at the start of re-balancing then we did not cache any issue ids yet
preload_issue_ids
caching.get_cached_issue_ids(index, limit)
end
# rubocop: disable CodeReuse/ActiveRecord
def preload_issue_ids
index = 0
cached_project_id = caching.get_current_project_id
collection = projects_collection
collection = projects_collection.where(Project.arel_table[:id].gteq(cached_project_id.to_i)) if cached_project_id.present?
collection.each do |project|
caching.cache_current_project_id(project.id)
index += 1
scope = Issue.in_projects(project).reorder(custom_reorder).select(:id, :relative_position)
with_retry(PREFETCH_ISSUES_BATCH_SIZE, 100) do |batch_size|
Gitlab::Pagination::Keyset::Iterator.new(scope: scope).each_batch(of: batch_size) do |batch|
caching.cache_issue_ids(batch)
end
end
end
caching.remove_current_project_id_cache
end
# rubocop: enable CodeReuse/ActiveRecord
def assign_indexes(ids, start_index)
ids.each_with_index.map do |id, idx|
[id, start_index + idx]
end
end
# The method runs in a loop where we try for RETRIES_LIMIT=3 times, to run the update statement on
# a number of records(batch size). Method gets an array of (id, value) pairs as argument that is used
# to build the update query matching by id and updating relative_position = value. If we get a statement
# timeout, we split the batch size in half and try(for 3 times again) to batch update on a smaller number of records.
# On success, because we know the batch size and we always pick from the beginning of the array param,
# we can remove first batch_size number of items from array and continue with the successful batch_size for next batches.
# On failures we continue to split batch size to a SMALLEST_BATCH_SIZE limit, which is now set at 5.
#
# e.g.
# 0. items | previous batch size|new batch size | comment
# 1. 100 | 100 | 100 | 3 failures -> split the batch size in half
# 2. 100 | 100 | 50 | 3 failures -> split the batch size in half again
# 3. 100 | 50 | 25 | 3 succeed -> so we drop 25 items 3 times, 4th fails -> split the batch size in half again
# 5. 25 | 25 | 12 | 3 failures -> split the batch size in half
# 6. 25 | 12 | 6 | 3 failures -> we exit because smallest batch size is 5 and we'll be at 3 if we split again
def update_positions_with_retry(pairs_with_index, query_name)
retry_batch_size = pairs_with_index.size
until pairs_with_index.empty?
with_retry(retry_batch_size, SMALLEST_BATCH_SIZE) do |batch_size|
retry_batch_size = batch_size
update_positions(pairs_with_index.first(batch_size), query_name)
# pairs_with_index[batch_size - 1] - can be nil for last batch
# if last batch is smaller than batch_size, so we just get the last pair.
last_pair_in_batch = pairs_with_index[batch_size - 1] || pairs_with_index.last
caching.cache_current_index(last_pair_in_batch.last + 1)
pairs_with_index = pairs_with_index.drop(batch_size)
end
end
end
def update_positions(pairs_with_position, query_name)
values = pairs_with_position.map do |id, index|
"(#{id}, #{start_position + (index * gap_size)})"
end.join(', ')
run_update_query(values, query_name)
end
def run_update_query(values, query_name)
Issue.connection.exec_query(<<~SQL, query_name)
WITH cte(cte_id, new_pos) AS #{Gitlab::Database::AsWithMaterialized.materialized_if_supported} (
SELECT *
FROM (VALUES #{values}) as t (id, pos)
)
UPDATE #{Issue.table_name}
SET relative_position = cte.new_pos
FROM cte
WHERE cte_id = id
SQL
end
def gaps
caching.issue_count - 1
end
def gap_size
RelativePositioning::MAX_GAP
end
def start_position
@start_position ||= (RelativePositioning::START_POSITION - (gaps / 2) * gap_size).to_i
end
def custom_reorder
::Gitlab::Pagination::Keyset::Order.build([Issue.column_order_relative_position, Issue.column_order_id_asc])
end
def with_retry(initial_batch_size, exit_batch_size)
retries = 0
batch_size = initial_batch_size
begin
yield batch_size
retries = 0
rescue ActiveRecord::StatementTimeout, ActiveRecord::QueryCanceled => ex
raise ex if batch_size < exit_batch_size
if (retries += 1) == RETRIES_LIMIT
# shrink the batch size in half when RETRIES limit is reached and update still fails perhaps because batch size is still too big
batch_size = (batch_size / 2).to_i
retries = 0
end
retry
end
end
end
end

View File

@ -5,29 +5,34 @@
= content_for :meta_tags do
= auto_discovery_link_tag(:atom, safe_params.merge(rss_url_options).to_h, title: "#{@group.name} issues")
.top-area
= render 'shared/issuable/nav', type: :issues
.nav-controls
= render 'shared/issuable/feed_buttons'
- if @can_bulk_update
= render_if_exists 'shared/issuable/bulk_update_button', type: :issues
= render 'shared/new_project_item_select', path: 'issues/new', label: "New issue", type: :issues, with_feature_enabled: 'issues', with_shared: false, include_projects_in_subgroups: true
= render 'shared/issuable/search_bar', type: :issues
- if @can_bulk_update
= render_if_exists 'shared/issuable/group_bulk_update_sidebar', group: @group, type: :issues
- if Feature.enabled?(:vue_issuables_list, @group) && @issues.to_a.any?
- if use_startup_call?
- add_page_startup_api_call(api_v4_groups_issues_path(id: @group.id, params: startup_call_params))
.js-issuables-list{ data: { endpoint: expose_url(api_v4_groups_issues_path(id: @group.id)),
'can-bulk-edit': @can_bulk_update.to_json,
'empty-state-meta': { svg_path: image_path('illustrations/issues.svg') },
'sort-key': @sort,
type: 'issues',
'scoped-labels-available': scoped_labels_available?(@group).to_json } }
- if Feature.enabled?(:vue_issues_list, @group, default_enabled: :yaml)
.js-issues-list{ data: group_issues_list_data(@group, current_user, @issues) }
- if @can_bulk_update
= render_if_exists 'shared/issuable/group_bulk_update_sidebar', group: @group, type: :issues
- else
= render 'shared/issues', project_select_button: true
.top-area
= render 'shared/issuable/nav', type: :issues
.nav-controls
= render 'shared/issuable/feed_buttons'
- if @can_bulk_update
= render_if_exists 'shared/issuable/bulk_update_button', type: :issues
= render 'shared/new_project_item_select', path: 'issues/new', label: "New issue", type: :issues, with_feature_enabled: 'issues', with_shared: false, include_projects_in_subgroups: true
= render 'shared/issuable/search_bar', type: :issues
- if @can_bulk_update
= render_if_exists 'shared/issuable/group_bulk_update_sidebar', group: @group, type: :issues
- if Feature.enabled?(:vue_issuables_list, @group) && @issues.to_a.any?
- if use_startup_call?
- add_page_startup_api_call(api_v4_groups_issues_path(id: @group.id, params: startup_call_params))
.js-issuables-list{ data: { endpoint: expose_url(api_v4_groups_issues_path(id: @group.id)),
'can-bulk-edit': @can_bulk_update.to_json,
'empty-state-meta': { svg_path: image_path('illustrations/issues.svg') },
'sort-key': @sort,
type: 'issues',
'scoped-labels-available': scoped_labels_available?(@group).to_json } }
- else
= render 'shared/issues', project_select_button: true

View File

@ -13,8 +13,8 @@
issues_path: project_issues_path(@project),
project_path: @project.full_path } }
- if Feature.enabled?(:vue_issues_list, @project)
.js-issues-list{ data: issues_list_data(@project, current_user, finder) }
- if Feature.enabled?(:vue_issues_list, @project&.group, default_enabled: :yaml)
.js-issues-list{ data: project_issues_list_data(@project, current_user, finder) }
- if @can_bulk_update
= render 'shared/issuable/bulk_update_sidebar', type: :issues
- elsif project_issues(@project).exists?

View File

@ -32,12 +32,8 @@ class IssueRebalancingWorker
return
end
# Temporary disable rebalancing for performance reasons
# For more information check https://gitlab.com/gitlab-com/gl-infra/production/-/issues/4321
return if projects_to_rebalance.take&.root_namespace&.issue_repositioning_disabled? # rubocop:disable CodeReuse/ActiveRecord
IssueRebalancingService.new(projects_to_rebalance).execute
rescue IssueRebalancingService::TooManyIssues => e
Issues::RelativePositionRebalancingService.new(projects_to_rebalance).execute
rescue Issues::RelativePositionRebalancingService::TooManyConcurrentRebalances => e
Gitlab::ErrorTracking.log_exception(e, root_namespace_id: root_namespace_id, project_id: project_id)
end

View File

@ -1,8 +0,0 @@
---
name: issue_rebalancing_optimization
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/53384
rollout_issue_url:
milestone: '13.9'
type: development
group: group::project management
default_enabled: false

View File

@ -1,8 +0,0 @@
---
name: issue_rebalancing_with_retry
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/59744
rollout_issue_url:
milestone: '13.11'
type: development
group: group::project management
default_enabled: false

View File

@ -1,8 +0,0 @@
---
name: roadmap_daterange_filter
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/55639
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/323917
milestone: '14.3'
type: development
group: group::product planning
default_enabled: false

View File

@ -13,7 +13,7 @@ require 'marginalia'
# matching against the raw SQL, and prepending the comment prevents color
# coding from working in the development log.
Marginalia::Comment.prepend_comment = true if Rails.env.production?
Marginalia::Comment.components = [:application, :correlation_id, :jid, :endpoint_id]
Marginalia::Comment.components = [:application, :correlation_id, :jid, :endpoint_id, :db_config_name]
# As mentioned in https://github.com/basecamp/marginalia/pull/93/files,
# adding :line has some overhead because a regexp on the backtrace has

View File

@ -3,4 +3,3 @@ filenames:
- ee/app/assets/javascripts/oncall_schedules/graphql/mutations/update_oncall_schedule_rotation.mutation.graphql
- ee/app/assets/javascripts/security_configuration/api_fuzzing/graphql/api_fuzzing_ci_configuration.query.graphql
- ee/app/assets/javascripts/security_configuration/api_fuzzing/graphql/create_api_fuzzing_configuration.mutation.graphql
- ee/app/assets/javascripts/security_configuration/dast_profiles/graphql/dast_failed_site_validations.query.graphql

View File

@ -17,9 +17,8 @@
# 3: high priority
# 5: _super_ high priority, this should only be used for _very_ important queues
#
# As per http://stackoverflow.com/a/21241357/290102 the formula for calculating
# the likelihood of a job being popped off a queue (given all queues have work
# to perform) is:
# The formula for calculating the likelihood of a job being popped off a queue
# (given all queues have work to perform) is:
#
# chance = (queue weight / total weight of all queues) * 100
---

View File

@ -11,8 +11,8 @@ class CreateVulnerabilityFindingLinks < ActiveRecord::Migration[6.0]
create_table :vulnerability_finding_links, if_not_exists: true do |t|
t.timestamps_with_timezone null: false
t.references :vulnerability_occurrence, index: { name: 'finding_links_on_vulnerability_occurrence_id' }, null: false, foreign_key: { on_delete: :cascade }
t.text :name, limit: 255
t.text :url, limit: 2048, null: false
t.text :name
t.text :url, null: false
end
add_text_limit :vulnerability_finding_links, :name, 255

View File

@ -418,7 +418,7 @@ p.create_wiki ### creates the wiki project on the filesystem
```ruby
p = Project.find_by_full_path('PROJECT PATH')
IssueRebalancingService.new(p.issues.take).execute
Issues::RelativePositionRebalancingService.new(p.root_namespace.all_projects).execute
```
## Imports / Exports

View File

@ -1924,6 +1924,27 @@ Input type: `DestroyContainerRepositoryTagsInput`
| <a id="mutationdestroycontainerrepositorytagsdeletedtagnames"></a>`deletedTagNames` | [`[String!]!`](#string) | Deleted container repository tags. |
| <a id="mutationdestroycontainerrepositorytagserrors"></a>`errors` | [`[String!]!`](#string) | Errors encountered during execution of the mutation. |
### `Mutation.destroyCustomEmoji`
Available only when feature flag `custom_emoji` is enabled. This flag is disabled by default, because the feature is experimental and is subject to change without notice.
Input type: `DestroyCustomEmojiInput`
#### Arguments
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="mutationdestroycustomemojiclientmutationid"></a>`clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
| <a id="mutationdestroycustomemojiid"></a>`id` | [`CustomEmojiID!`](#customemojiid) | Global ID of the custom emoji to destroy. |
#### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="mutationdestroycustomemojiclientmutationid"></a>`clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
| <a id="mutationdestroycustomemojicustomemoji"></a>`customEmoji` | [`CustomEmoji`](#customemoji) | Deleted custom emoji. |
| <a id="mutationdestroycustomemojierrors"></a>`errors` | [`[String!]!`](#string) | Errors encountered during execution of the mutation. |
### `Mutation.destroyEpicBoard`
Input type: `DestroyEpicBoardInput`
@ -12526,6 +12547,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="projectdastsitevalidationsnormalizedtargeturls"></a>`normalizedTargetUrls` | [`[String!]`](#string) | Normalized URL of the target to be scanned. |
| <a id="projectdastsitevalidationsstatus"></a>`status` | [`DastSiteValidationStatusEnum`](#dastsitevalidationstatusenum) | Status of the site validation. Ignored if `dast_failed_site_validations` feature flag is disabled. |
##### `Project.environment`
@ -15376,6 +15398,15 @@ Unit for the duration of Dast Profile Cadence.
| <a id="dastsiteprofilevalidationstatusenumpassed_validation"></a>`PASSED_VALIDATION` | Site validation process finished successfully. |
| <a id="dastsiteprofilevalidationstatusenumpending_validation"></a>`PENDING_VALIDATION` | Site validation process has not started. |
### `DastSiteValidationStatusEnum`
| Value | Description |
| ----- | ----------- |
| <a id="dastsitevalidationstatusenumfailed_validation"></a>`FAILED_VALIDATION` | Site validation process finished but failed. |
| <a id="dastsitevalidationstatusenuminprogress_validation"></a>`INPROGRESS_VALIDATION` | Site validation process is in progress. |
| <a id="dastsitevalidationstatusenumpassed_validation"></a>`PASSED_VALIDATION` | Site validation process finished successfully. |
| <a id="dastsitevalidationstatusenumpending_validation"></a>`PENDING_VALIDATION` | Site validation process has not started. |
### `DastSiteValidationStrategyEnum`
| Value | Description |

View File

@ -11,11 +11,13 @@ info: To determine the technical writer assigned to the Stage/Group associated w
When adding new columns that will be used to store strings or other textual information:
1. We always use the `text` data type instead of the `string` data type.
1. `text` columns should always have a limit set, either by using the `create_table_with_constraints` helper
when creating a table, or by using the `add_text_limit` when altering an existing table.
1. `text` columns should always have a limit set, either by using the `create_table` with
the `#text ... limit: 100` helper (see below) when creating a table, or by using the `add_text_limit`
when altering an existing table.
The `text` data type can not be defined with a limit, so `create_table_with_constraints` and `add_text_limit` enforce
that by adding a [check constraint](https://www.postgresql.org/docs/11/ddl-constraints.html) on the column.
The standard Rails `text` column type can not be defined with a limit, but we extend `create_table` to
add a `limit: 255` option. Outside of `create_table`, `add_text_limit` can be used to add a [check constraint](https://www.postgresql.org/docs/11/ddl-constraints.html)
to an already existing column.
## Background information
@ -41,34 +43,24 @@ Don't use text columns for `attr_encrypted` attributes. Use a
## Create a new table with text columns
When adding a new table, the limits for all text columns should be added in the same migration as
the table creation.
the table creation. We add a `limit:` attribute to Rails' `#text` method, which allows adding a limit
for this column.
For example, consider a migration that creates a table with two text columns,
`db/migrate/20200401000001_create_db_guides.rb`:
```ruby
class CreateDbGuides < Gitlab::Database::Migration[1.0]
def up
create_table_with_constraints :db_guides do |t|
def change
create_table :db_guides do |t|
t.bigint :stars, default: 0, null: false
t.text :title
t.text :notes
t.text_limit :title, 128
t.text_limit :notes, 1024
t.text :title, limit: 128
t.text :notes, limit: 1024
end
end
def down
# No need to drop the constraints, drop_table takes care of everything
drop_table :db_guides
end
end
```
Note that the `create_table_with_constraints` helper uses the `with_lock_retries` helper
internally, so we don't need to manually wrap the method call in the migration.
## Add a text column to an existing table
Adding a column to an existing table requires an exclusive lock for that table. Even though that lock

View File

@ -108,7 +108,7 @@ the following preparations into account.
- Ensure the down method reverts the changes in `db/structure.sql`.
- Update the migration output whenever you modify the migrations during the review process.
- Add tests for the migration in `spec/migrations` if necessary. See [Testing Rails migrations at GitLab](testing_guide/testing_migrations_guide.md) for more details.
- When [high-traffic](https://gitlab.com/gitlab-org/gitlab/-/blob/master/rubocop/rubocop-migrations.yml#L3) tables are involved in the migration, use the [`with_lock_retries`](migration_style_guide.md#retry-mechanism-when-acquiring-database-locks) helper method. Review the relevant [examples in our documentation](migration_style_guide.md#examples) for use cases and solutions.
- When [high-traffic](https://gitlab.com/gitlab-org/gitlab/-/blob/master/rubocop/rubocop-migrations.yml#L3) tables are involved in the migration, use the [`enable_lock_retries`](migration_style_guide.md#retry-mechanism-when-acquiring-database-locks) method to enable lock-retries. Review the relevant [examples in our documentation](migration_style_guide.md#usage-with-transactional-migrations) for use cases and solutions.
- Ensure RuboCop checks are not disabled unless there's a valid reason to.
- When adding an index to a [large table](https://gitlab.com/gitlab-org/gitlab/-/blob/master/rubocop/rubocop-migrations.yml#L3),
test its execution using `CREATE INDEX CONCURRENTLY` in the `#database-lab` Slack channel and add the execution time to the MR description:

View File

@ -281,79 +281,91 @@ This problem could cause failed application upgrade processes and even applicati
stability issues, since the table may be inaccessible for a short period of time.
To increase the reliability and stability of database migrations, the GitLab codebase
offers a helper method to retry the operations with different `lock_timeout` settings
and wait time between the attempts. Multiple smaller attempts to acquire the necessary
offers a method to retry the operations with different `lock_timeout` settings
and wait time between the attempts. Multiple shorter attempts to acquire the necessary
lock allow the database to process other statements.
### Examples
There are two distinct ways to use lock retries:
1. Inside a transactional migration: use `enable_lock_retries!`.
1. Inside a non-transactional migration: use `with_lock_retries`.
If possible, enable lock-retries for any migration that touches a [high-traffic table](#high-traffic-tables).
### Usage with transactional migrations
Regular migrations execute the full migration in a transaction. We can enable the
lock-retry methodology by calling `enable_lock_retries!` at the migration level.
This leads to the lock timeout being controlled for this migration. Also, it can lead to retrying the full
migration if the lock could not be granted within the timeout.
Note that, while this is currently an opt-in setting, we prefer to use lock-retries for all migrations and
plan to make this the default going forward.
Occasionally a migration may need to acquire multiple locks on different objects.
To prevent catalog bloat, ask for all those locks explicitly before performing any DDL.
A better strategy is to split the migration, so that we only need to acquire one lock at the time.
**Removing a column:**
```ruby
enable_lock_retries!
def up
with_lock_retries do
remove_column :users, :full_name
end
remove_column :users, :full_name
end
def down
with_lock_retries do
add_column :users, :full_name, :string
end
add_column :users, :full_name, :string
end
```
**Multiple changes on the same table:**
The helper `with_lock_retries` wraps all operations into a single transaction. When you have the lock,
With the lock-retry methodology enabled, all operations wrap into a single transaction. When you have the lock,
you should do as much as possible inside the transaction rather than trying to get another lock later.
Be careful about running long database statements within the block. The acquired locks are kept until the transaction (block) finishes and depending on the lock type, it might block other database operations.
```ruby
enable_lock_retries!
def up
with_lock_retries do
add_column :users, :full_name, :string
add_column :users, :bio, :string
end
add_column :users, :full_name, :string
add_column :users, :bio, :string
end
def down
with_lock_retries do
remove_column :users, :full_name
remove_column :users, :bio
end
remove_column :users, :full_name
remove_column :users, :bio
end
```
**Removing a foreign key:**
```ruby
enable_lock_retries!
def up
with_lock_retries do
remove_foreign_key :issues, :projects
end
remove_foreign_key :issues, :projects
end
def down
with_lock_retries do
add_foreign_key :issues, :projects
end
add_foreign_key :issues, :projects
end
```
**Changing default value for a column:**
```ruby
enable_lock_retries!
def up
with_lock_retries do
change_column_default :merge_requests, :lock_version, from: nil, to: 0
end
change_column_default :merge_requests, :lock_version, from: nil, to: 0
end
def down
with_lock_retries do
change_column_default :merge_requests, :lock_version, from: 0, to: nil
end
change_column_default :merge_requests, :lock_version, from: 0, to: nil
end
```
@ -362,19 +374,17 @@ end
We can wrap the `create_table` method with `with_lock_retries`:
```ruby
enable_lock_retries!
def up
with_lock_retries do
create_table :issues do |t|
t.references :project, index: true, null: false, foreign_key: { on_delete: :cascade }
t.string :title, limit: 255
end
create_table :issues do |t|
t.references :project, index: true, null: false, foreign_key: { on_delete: :cascade }
t.string :title, limit: 255
end
end
def down
with_lock_retries do
drop_table :issues
end
drop_table :issues
end
```
@ -442,16 +452,20 @@ def down
end
```
**Usage with `disable_ddl_transaction!`**
### Usage with non-transactional migrations (`disable_ddl_transaction!`)
Generally the `with_lock_retries` helper should work with `disable_ddl_transaction!`. A custom RuboCop rule ensures that only allowed methods can be placed within the lock retries block.
Only when we disable transactional migrations using `disable_ddl_transaction!`, we can use
the `with_lock_retries` helper to guard an individual sequence of steps. It opens a transaction
to execute the given block.
A custom RuboCop rule ensures that only allowed methods can be placed within the lock retries block.
```ruby
disable_ddl_transaction!
def up
with_lock_retries do
add_column :users, :name, :text
add_column :users, :name, :text unless column_exists?(:users, :name)
end
add_text_limit :users, :name, 255 # Includes constraint validation (full table scan)
@ -472,7 +486,8 @@ end
### When to use the helper method
The `with_lock_retries` helper method can be used when you normally use
You can **only** use the `with_lock_retries` helper method when the execution is not already inside
an open transaction (using Postgres subtransactions is discouraged). It can be used with
standard Rails migration helper methods. Calling more than one migration
helper is not a problem if they're executed on the same table.

View File

@ -18,3 +18,31 @@ pay 25% of what you would have paid previously. This results in substantial savi
If it's not possible for you to participate in quarterly reconciliations, you can opt out of the
process by using a contract amendment. In that case, you default to the annual review.
## Timeline for invoicing and payment
At the end of each subscription quarter, GitLab notifies you about overages.
The date you're notified about the overage is not the same as the date
you are billed.
### GitLab SaaS
Group owners receive an email **on the reconciliation date**.
The email communicates the [overage seat quantity](gitlab_com/index.md#seats-owed-example)
and expected invoice amount.
**Seven days later**, the subscription is updated to include the additional
seats, and an invoice is generated for a prorated amount. If a credit card
is on file, a payment is automatically applied. Otherwise, an invoice is
sent and subject to your terms.
### Self-managed instances
Admins receive an email **six days after the reconciliation date**.
This email communicates the [overage seat quantity](self_managed/index.md#users-over-license)
and expected invoice amount.
**Seven days later**, the subscription is updated to include the additional
seats, and an invoice is generated for a prorated amount. If a credit card
is on file, a payment is automatically applied. Otherwise, an invoice is
sent and subject to your payment terms.

View File

@ -50,11 +50,11 @@ The following metrics and visualizations are available on a project or group lev
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/13188) in GitLab 12.4.
GitLab has the ability to filter analytics based on a date range. To filter results:
You can filter analytics based on a date range. To filter results:
1. Select a group.
1. Optionally select a project.
1. Select a date range using the available date pickers.
1. Optional. Select a project.
1. Select a date range by using the available date pickers.
## Permissions

View File

@ -176,17 +176,14 @@ To perform a one-liner installation, run the command below. Make sure to replace
- `your-agent-token` with the token received from the previous step (identified as `secret` in the JSON output).
- `gitlab-kubernetes-agent` with the namespace you defined in the previous step.
- `wss://kas.gitlab.example.com` with the configured access of the Kubernetes Agent Server (KAS). For GitLab.com users, the KAS is available under `wss://kas.gitlab.com`.
- `--agent-version=vX.Y.Z` with the latest released patch version matching your GitLab installation's major and minor versions. For example, for GitLab v13.9.0, use `--agent-version=v13.9.1`. You can find your GitLab version under the "Help/Help" menu.
```shell
docker run --pull=always --rm registry.gitlab.com/gitlab-org/cluster-integration/gitlab-agent/cli:stable generate --agent-token=your-agent-token --kas-address=wss://kas.gitlab.example.com --agent-version stable --namespace gitlab-kubernetes-agent | kubectl apply -f -
docker run --pull=always --rm registry.gitlab.com/gitlab-org/cluster-integration/gitlab-agent/cli:stable generate --agent-token=your-agent-token --kas-address=wss://kas.gitlab.example.com --agent-version=vX.Y.Z --namespace gitlab-kubernetes-agent | kubectl apply -f -
```
Set `--agent-version` to the latest released patch version matching your
GitLab installation's major and minor versions. For example, if you have
GitLab v13.9.0, set `--agent-version=v13.9.1`.
WARNING:
Version `stable` can be used to refer to the latest stable release at the time when the command runs. It's fine for
`--agent-version stable` can be used to refer to the latest stable release at the time when the command runs. It's fine for
testing purposes but for production please make sure to specify a matching version explicitly.
To find out the various options the above Docker container supports, run:
@ -289,7 +286,7 @@ spec:
containers:
- name: agent
# Make sure to specify a matching version for production
image: "registry.gitlab.com/gitlab-org/cluster-integration/gitlab-agent/agentk:stable"
image: "registry.gitlab.com/gitlab-org/cluster-integration/gitlab-agent/agentk:vX.Y.Z
args:
- --token-file=/config/token
- --kas-address
@ -385,29 +382,16 @@ Each time you push a change to a monitored manifest repository, the Agent logs t
#### Example manifest file
This file creates an NGINX deployment.
This file creates a minimal `ConfigMap`:
```yaml
apiVersion: apps/v1
kind: Deployment
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-deployment
name: demo-map
namespace: gitlab-kubernetes-agent # Can be any namespace managed by you that the agent has access to.
spec:
selector:
matchLabels:
app: nginx
replicas: 2
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
data:
key: value
```
## Example projects

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

View File

@ -32,17 +32,20 @@ Management projects are restricted to the following:
group (or descendants) as the cluster's group.
- For instance-level clusters, there are no such restrictions.
## Usage
## How to create and configure a cluster management project
To use a cluster management project for a cluster:
To use a cluster management project to manage your cluster:
1. Select the project.
1. Configure your pipelines.
1. Set an environment scope.
1. Create a new project to serve as the cluster management project
for your cluster. We recommend that you
[create this project based on the Cluster Management project template](management_project_template.md#create-a-new-project-based-on-the-cluster-management-template).
1. [Associate the cluster with the management project](#associate-the-cluster-management-project-with-the-cluster).
1. [Configure your cluster's pipelines](#configuring-your-pipeline).
1. [Set the environment scope](#setting-the-environment-scope).
### Selecting a cluster management project
### Associate the cluster management project with the cluster
To select a cluster management project to use:
To associate a cluster management project with your cluster:
1. Navigate to the appropriate configuration page. For a:
- [Project-level cluster](../project/clusters/index.md), go to your project's
@ -50,10 +53,9 @@ To select a cluster management project to use:
- [Group-level cluster](../group/clusters/index.md), go to your group's **Kubernetes**
page.
- [Instance-level cluster](../instance/clusters/index.md), on the top bar, select **Menu > Admin > Kubernetes**.
1. Select the project using **Cluster management project field** in the **Advanced settings**
section.
![Selecting a cluster management project under Advanced settings](img/advanced-settings-cluster-management-project-v12_5.png)
1. Expand **Advanced settings**.
1. From the **Cluster management project** dropdown, select the cluster management project
you created in the previous step.
### Configuring your pipeline

View File

@ -4,39 +4,68 @@ group: Configure
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Cluster Management Project Template **(FREE)**
# Cluster Management project template **(FREE)**
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/25318) in GitLab 12.10 with Helmfile support via Helm v2.
> - [Improved](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/63577) in GitLab 14.0 with Helmfile support via Helm v3 instead, and a much more flexible usage of Helmfile. This introduces breaking changes that are detailed below.
> - Helm v2 support was [dropped](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/63577) in GitLab 14.0. Use Helm v3 instead.
This [GitLab built-in project template](../project/working_with_projects.md#built-in-templates)
provides a quicker start for users interested in managing cluster
applications via [Helm v3](https://helm.sh/) charts. More specifically, taking advantage of the
[Helmfile](https://github.com/roboll/helmfile) utility client. The template consists of some pre-configured apps that
should help you get started quickly using various GitLab features. Still, you have all the flexibility to remove the ones you do not
need, or even add new ones that are not built-in.
With a [cluster management project](management_project.md) you can manage
your cluster's deployment and applications through a repository in GitLab.
## How to use this template
The Custer Management project template provides you a baseline to get
started and flexibility to customize your project to your cluster's needs.
For instance, you can:
1. [Connect your cluster to GitLab](../project/clusters/index.md#add-and-remove-clusters).
1. Create a new project for the purpose of managing your cluster from GitLab. To do so,
[create a new project from a template](../project/working_with_projects.md#built-in-templates)
and select **GitLab Cluster Management**.
1. Configure this project as a [cluster management project](management_project.md#selecting-a-cluster-management-project)
for the cluster you have integrated on the first step.
1. If you used the [GitLab Managed Apps](applications.md), refer to
[Migrating from GitLab Managed Apps](migrating_from_gma_to_project_template.md).
- Extend the CI/CD configuration.
- Configure the built-in cluster applications.
- Remove the built-in cluster applications you don't need.
- Add other cluster applications using the same structure as the ones already available.
### Components
The template contains the following [components](#available-components):
In the repository of the newly-created project, you will find:
- A pre-configured GitLab CI/CD file so that you can configure deployment pipelines.
- A pre-configured [Helmfile](https://github.com/roboll/helmfile) so that
you can manage cluster applications with [Helm v3](https://helm.sh/).
- An `applications` directory with a `helmfile.yaml` configured for each
application available in the template.
- A predefined [`.gitlab-ci.yml`](https://gitlab.com/gitlab-org/project-templates/cluster-management/-/blob/master/.gitlab-ci.yml)
file, with a CI pipeline already configured.
- A main [`helmfile.yaml`](https://gitlab.com/gitlab-org/project-templates/cluster-management/-/blob/master/helmfile.yaml) to toggle which applications you would like to manage.
- An `applications` directory with a `helmfile.yaml` configured for each application GitLab provides.
WARNING:
If you used [GitLab Managed Apps](applications.md) to manage your
cluster from GitLab, see how to [migrate from GitLab Managed Apps](migrating_from_gma_to_project_template.md) to the Cluster Management
project.
#### The `.gitlab-ci.yml` file
## Set up the management project from the Cluster Management project template
To set up your cluster's management project off of the Cluster Management project template:
1. [Create a new project based on the Cluster Management template](#create-a-new-project-based-on-the-cluster-management-template).
1. [Associate the cluster management project with your cluster](management_project.md#associate-the-cluster-management-project-with-the-cluster).
1. Use the [available components](#available-components) to manage your cluster.
### Create a new project based on the Cluster Management template
To get started, create a new project based on the Cluster Management
project template to use as a cluster management project.
You can either create the [new project](../project/working_with_projects.md#create-a-project)
from the template or import the project from the URL. Importing
the project is useful if you are using a GitLab self-managed
instance that may not have the latest version of the template.
To create the new project:
- From the template: select the **GitLab Cluster Management** project template.
- Importing from the URL: use `https://gitlab.com/gitlab-org/project-templates/cluster-management.git`.
## Available components
Use the available components to configure your cluster:
- [A `.gitlab-ci.yml` file](#the-gitlab-ciyml-file).
- [A main `helmfile.yml` file](#the-main-helmfileyml-file).
- [A directory with built-in applications](#built-in-applications).
### The `.gitlab-ci.yml` file
The base image used in your pipeline is built by the [cluster-applications](https://gitlab.com/gitlab-org/cluster-integration/cluster-applications)
project. This image consists of a set of Bash utility scripts to support [Helm v3 releases](https://helm.sh/docs/intro/using_helm/#three-big-concepts):
@ -52,23 +81,21 @@ project. This image consists of a set of Bash utility scripts to support [Helm v
facilitate the GitLab Managed Apps adoption.
- `gl-helmfile {arguments}`: A thin wrapper that triggers the [Helmfile](https://github.com/roboll/helmfile) command.
#### The main `helmfile.yml` file
### The main `helmfile.yml` file
This file has a list of paths to other Helmfiles for each app. They're all commented out by default, so you must uncomment
the paths for the apps that you would like to manage.
the paths for the apps that you would like to use in your cluster.
By default, each `helmfile.yaml` in these sub-paths have the attribute `installed: true`. This signifies that every time
By default, each `helmfile.yaml` in these sub-paths has the attribute `installed: true`. This means that every time
the pipeline runs, Helmfile tries to either install or update your apps according to the current state of your
cluster and Helm releases. If you change this attribute to `installed: false`, Helmfile tries try to uninstall this app
from your cluster. [Read more](https://github.com/roboll/helmfile) about how Helmfile works.
Furthermore, each app has an `applications/{app}/values.yaml` file (`applicaton/{app}/values.yaml.gotmpl` in case of GitLab Runner). This is the
place where you can define some default values for your app's Helm chart. Some apps already have defaults
place where you can define default values for your app's Helm chart. Some apps already have defaults
pre-defined by GitLab.
#### Built-in applications
The built-in applications are intended to provide an easy way to get started with various Kubernetes oriented GitLab features.
### Built-in applications
The [built-in supported applications](https://gitlab.com/gitlab-org/project-templates/cluster-management/-/tree/master/applications) are:
@ -83,8 +110,3 @@ The [built-in supported applications](https://gitlab.com/gitlab-org/project-temp
- [Prometheus](../infrastructure/clusters/manage/management_project_applications/prometheus.md)
- [Sentry](../infrastructure/clusters/manage/management_project_applications/sentry.md)
- [Vault](../infrastructure/clusters/manage/management_project_applications/vault.md)
### Migrate from GitLab Managed Apps
If you had GitLab Managed Apps, either One-Click or CI/CD install, read the docs on how to
[migrate from GitLab Managed Apps to project template](migrating_from_gma_to_project_template.md)

View File

@ -4,23 +4,24 @@ group: Configure
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Migrating from GitLab Managed Apps to a management project template
# Migrate from GitLab Managed Apps to Cluster Management Projects
The [GitLab Managed Apps](applications.md) deprecated in GitLab 14.0.
To manage your apps through a cluster management project, you need a [GitLab Runner](../../ci/runners/index.md) available.
Then, follow the steps below. You can also watch
some recorded videos with [live examples](#live-examples).
The [GitLab Managed Apps](applications.md) were deprecated in GitLab 14.0
in favor of [Cluster Management Projects](management_project.md).
Managing your cluster applications through a project enables you a
lot more flexibility to manage your cluster than through the late GitLab Managed Apps.
To migrate to the cluster management project you need
[GitLab Runners](../../ci/runners/index.md)
available and be familiar with [Helm](https://helm.sh/).
1. Familiarize yourself with the [management project template](management_project_template.md).
1. Create a [new project](../project/working_with_projects.md#create-a-project), either:
- From a template, selecting the **GitLab Cluster Management** project template.
- Importing the project from the URL `https://gitlab.com/gitlab-org/project-templates/cluster-management.git`. This
is useful if you are using GitLab Self-Managed and you want to use the latest version of the template.
## Migrate to a Cluster Management Project
This is your cluster management project.
If you are using a self-managed GitLab instance older than the latest one, import the cluster management project via URL from `https://gitlab.com/gitlab-org/project-templates/cluster-management.git`.
1. Go to the project associated with your cluster.
1. In your cluster's configuration page [set the cluster management project](management_project.md#selecting-a-cluster-management-project) that you just created.
To migrate from GitLab Managed Apps to a Cluster Management Project,
follow the steps below.
See also [video walk-throughs](#video-walk-throughs) with examples.
1. Create a new project based on the [Cluster Management Project template](management_project_template.md#create-a-new-project-based-on-the-cluster-management-template).
1. [Associate your new Cluster Management Project with your cluster](management_project.md#associate-the-cluster-management-project-with-the-cluster).
1. Detect apps deployed through Helm v2 releases by using the pre-configured [`.gitlab-ci.yml`](management_project_template.md#the-gitlab-ciyml-file) file:
- In case you had overwritten the default GitLab Managed Apps namespace, edit `.gitlab-ci.yml`,
and make sure the script is receiving the correct namespace as an argument:
@ -125,7 +126,9 @@ you want to manage with the Cluster Management Project.
For example, if you found a resource of type `ConfigMap` named `cert-manager-controller`, delete it by executing:
`kubectl delete configmap -n gitlab-managed-apps cert-manager-controller`.
## Live examples
## Video walk-throughs
You can watch these videos with examples on how to migrate from GMA to a Cluster Management project:
- [Migrating from scratch using a brand new cluster management project](https://youtu.be/jCUFGWT0jS0). Also covers Helm v2 apps migration.
- [Migrating from an existing GitLab managed apps CI/CD project](https://youtu.be/U2lbBGZjZmc)
- [Migrating from an existing GitLab managed apps CI/CD project](https://youtu.be/U2lbBGZjZmc).

View File

@ -7,7 +7,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Value Stream Analytics **(PREMIUM)**
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/196455) in [GitLab Premium](https://about.gitlab.com/pricing/) 12.9 at the group level.
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/196455) in [GitLab Premium](https://about.gitlab.com/pricing/) 12.9 for groups.
Value Stream Analytics measures the time spent to go from an
[idea to production](https://about.gitlab.com/blog/2016/08/05/continuous-integration-delivery-and-deployment-with-gitlab/#from-idea-to-production-with-gitlab)

View File

@ -53,9 +53,3 @@ You can customize the installation of cert-manager by defining a
management project. Refer to the
[chart](https://github.com/jetstack/cert-manager) for the
available configuration options.
Support for installing the Cert Manager managed application is provided by the
GitLab Configure group. If you run into unknown issues,
[open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new), and ping at
least 2 people from the
[Configure group](https://about.gitlab.com/handbook/product/categories/#configure-group).

View File

@ -120,9 +120,3 @@ global:
enabled:
- 'flow:sourceContext=namespace;destinationContext=namespace'
```
Support for installing the Cilium managed application is provided by the
GitLab Container Security group. If you run into unknown issues,
[open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new), and ping at
least 2 people from the
[Container Security group](https://about.gitlab.com/handbook/product/categories/#container-security-group).

View File

@ -27,8 +27,3 @@ You can customize the installation of Elastic Stack by updating the
management project. Refer to the
[chart](https://gitlab.com/gitlab-org/charts/elastic-stack) for all
available configuration options.
Support for installing the Elastic Stack managed application is provided by the
GitLab Monitor group. If you run into unknown issues,
[open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new), and ping at
least 2 people from the [Monitor group](https://about.gitlab.com/handbook/product/categories/#monitor-group).

View File

@ -93,9 +93,3 @@ You can check these logs with the following command:
```shell
kubectl -n gitlab-managed-apps logs -l app=falco
```
Support for installing the Falco managed application is provided by the
GitLab Container Security group. If you run into unknown issues,
[open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new), and ping at
least 2 people from the
[Container Security group](https://about.gitlab.com/handbook/product/categories/#container-security-group).

View File

@ -28,9 +28,3 @@ for the current development release of Fluentd for all available configuration o
The configuration chart link points to the current development release, which
may differ from the version you have installed. To ensure compatibility, switch
to the specific branch or tag you are using.
Support for installing the Fluentd managed application is provided by the
GitLab Container Security group. If you run into unknown issues,
[open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new), and ping at
least 2 people from the
[Container Security group](https://about.gitlab.com/handbook/product/categories/#container-security-group).

View File

@ -24,8 +24,3 @@ You can customize the installation of Ingress by updating the
management project. Refer to the
[chart](https://github.com/helm/charts/tree/master/stable/nginx-ingress)
for the available configuration options.
Support for installing the Ingress managed application is provided by the GitLab Configure group.
If you run into unknown issues, [open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new),
and ping at least 2 people from the
[Configure group](https://about.gitlab.com/handbook/product/categories/#configure-group).

View File

@ -25,8 +25,3 @@ You can customize the installation of Prometheus by updating the
management project. Refer to the
[Configuration section](https://github.com/helm/charts/tree/master/stable/prometheus#configuration)
of the Prometheus chart's README for the available configuration options.
Support for installing the Prometheus managed application is provided by the
GitLab Monitor group. If you run into unknown issues,
[open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new), and ping at
least 2 people from the [Monitor group](https://about.gitlab.com/handbook/product/categories/#monitor-group).

View File

@ -42,9 +42,3 @@ You can customize the installation of GitLab Runner by defining
management project. Refer to the
[chart](https://gitlab.com/gitlab-org/charts/gitlab-runner) for the
available configuration options.
Support for installing the GitLab Runner managed application is provided by the
GitLab Runner group. If you run into unknown issues,
[open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new), and ping at
least 2 people from the
[Runner group](https://about.gitlab.com/handbook/product/categories/#runner-group).

View File

@ -68,9 +68,3 @@ ingress:
postgresql:
postgresqlPassword: example-postgresql-password
```
Support for installing the Sentry managed application is provided by the
GitLab Monitor group. If you run into unknown issues,
[open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new), and ping at
least 2 people from the
[Monitor group](https://about.gitlab.com/handbook/product/categories/#monitor-group).

View File

@ -100,9 +100,3 @@ kubectl -n gitlab-managed-apps exec -it vault-0 sh
This should give you your unseal keys and initial root token. Make sure to note these down
and keep these safe, as they're required to unseal the Vault throughout its lifecycle.
Support for installing the Vault managed application is provided by the
GitLab Release Management group. If you run into unknown issues,
[open a new issue](https://gitlab.com/gitlab-org/gitlab/-/issues/new), and ping at
least 2 people from the
[Release Management group](https://about.gitlab.com/handbook/product/categories/#release-management-group).

View File

@ -9,11 +9,9 @@ info: To determine the technical writer assigned to the Stage/Group associated w
> [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/327908) in GitLab 14.0.
WARNING:
Creating a new cluster through the certificate-based method
is deprecated and no longer recommended. Kubernetes cluster, similar to any other
infrastructure, should be created, updated, maintained using [Infrastructure as Code](../../infrastructure/index.md).
GitLab is developing a built-in capability to create clusters with Terraform.
You can follow along in this [epic](https://gitlab.com/groups/gitlab-org/-/epics/6049).
Creating a new cluster through cluster certificates
is deprecated and no longer recommended. To create a new cluster use
[Infrastructure as Code](../../infrastructure/iac/index.md#create-a-new-cluster-through-iac).
NOTE:
Every new Google Cloud Platform (GCP) account receives
@ -30,29 +28,38 @@ in a few clicks.
## Create new cluster
> The certificate-based method for creating clusters from GitLab was [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/327908) in GitLab 14.0.
> [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/327908) in GitLab 14.0.
As of GitLab 14.0, use [Infrastructure as Code](../../infrastructure/index.md)
to **safely create your new cluster from GitLab**.
As of GitLab 14.0, use [Infrastructure as Code](../../infrastructure/iac/index.md#create-a-new-cluster-through-iac)
to **safely create new clusters from GitLab**.
The certificate-based method is **deprecated** and scheduled for removal in
GitLab 15.0. However, you can still use it until then. Through
this method, you can host your cluster in EKS, GKE, on premises, and with other
providers. To host them on premises and with other providers,
use either the EKS or GKE method to guide you through and enter your cluster's
settings manually:
Creating clusters from GitLab using cluster certificates is still available on the
GitLab UI but was **deprecated** in GitLab 14.0 and is scheduled for removal in
GitLab 15.0. We don't recommend using this method.
You can create a new cluster hosted in EKS, GKE, on premises, and with other
providers using cluster certificates:
- [New cluster hosted on Google Kubernetes Engine (GKE)](add_gke_clusters.md).
- [New cluster hosted on Amazon Elastic Kubernetes Service (EKS)](add_eks_clusters.md).
To host them on premises and with other providers, you can use Terraform
or your preferred tool of choice to create and connect a cluster with GitLab.
The [GitLab Terraform provider](https://registry.terraform.io/providers/gitlabhq/gitlab/latest/docs/resources/project_cluster)
supports connecting existing clusters using the certificate-based connection method.
## Add existing cluster
If you already have a cluster and want to integrate it with GitLab, see how to
[add an existing cluster](add_existing_cluster.md).
As of GitLab 14.0, use the [GitLab Kubernetes Agent](../../clusters/agent/index.md)
to connect your cluster to GitLab.
Alternativelly, you can [add an existing cluster](add_existing_cluster.md)
through the certificate-based method, but we don't recommend using this method for [security implications](index.md#security-implications).
## Configure your cluster
As of GitLab 14.0, use the [GitLab Kubernetes Agent](../../clusters/agent/index.md) to configure your cluster.
As of GitLab 14.0, use the [GitLab Kubernetes Agent](../../clusters/agent/index.md)
to configure your cluster.
## Disable a cluster

View File

@ -114,7 +114,7 @@ module Gitlab
# If child commit is a direct ancestor, its first parent is also a direct ancestor.
# We assume direct ancestors matches the trail of the target branch over time,
# This assumption is correct most of the time, especially for gitlab managed merges,
# but there are exception cases which can't be solved (https://stackoverflow.com/a/49754723/474597)
# but there are exception cases which can't be solved.
def mark_all_direct_ancestors(commit)
loop do
commit = get_commit(commit.parent_ids.first)

View File

@ -21,7 +21,7 @@
#
# The deploy stage copies the exe and msi from build stage to a network drive
# You need to have the network drive mapped as Local System user for gitlab-runner service to see it
# The best way to persist the mapping is via a scheduled task (see: https://stackoverflow.com/a/7867064/1288473),
# The best way to persist the mapping is via a scheduled task
# running the following batch command: net use P: \\x.x.x.x\Projects /u:your_user your_pass /persistent:yes
# place project specific paths in variables to make the rest of the script more generic

View File

@ -3,7 +3,6 @@
module Gitlab
module Ci
class Trace
# This was inspired from: http://stackoverflow.com/a/10219411/1520132
class Stream
BUFFER_SIZE = 4096
LIMIT_SIZE = 500.kilobytes

View File

@ -198,14 +198,30 @@ module Gitlab
::ActiveRecord::Base.configurations.configs_for(env_name: Rails.env).map(&:name)
end
def self.db_config_name(ar_connection)
if ar_connection.respond_to?(:pool) &&
ar_connection.pool.respond_to?(:db_config) &&
ar_connection.pool.db_config.respond_to?(:name)
return ar_connection.pool.db_config.name
end
def self.db_config_for_connection(connection)
return unless connection
'unknown'
# The LB connection proxy does not have a direct db_config
# that can be referenced
return if connection.is_a?(::Gitlab::Database::LoadBalancing::ConnectionProxy)
# During application init we might receive `NullPool`
return unless connection.respond_to?(:pool) &&
connection.pool.respond_to?(:db_config)
connection.pool.db_config
end
# At the moment, the connection can only be retrieved by
# Gitlab::Database::LoadBalancer#read or #read_write or from the
# ActiveRecord directly. Therefore, if the load balancer doesn't
# recognize the connection, this method returns the primary role
# directly. In future, we may need to check for other sources.
# Expected returned names:
# main, main_replica, ci, ci_replica, unknown
def self.db_config_name(connection)
db_config = db_config_for_connection(connection)
db_config&.name || 'unknown'
end
def self.read_only?

View File

@ -79,24 +79,12 @@ module Gitlab
].freeze
# Returns the role (primary/replica) of the database the connection is
# connecting to. At the moment, the connection can only be retrieved by
# Gitlab::Database::LoadBalancer#read or #read_write or from the
# ActiveRecord directly. Therefore, if the load balancer doesn't
# recognize the connection, this method returns the primary role
# directly. In future, we may need to check for other sources.
# connecting to.
def self.db_role_for_connection(connection)
return ROLE_UNKNOWN unless connection
db_config = Database.db_config_for_connection(connection)
return ROLE_UNKNOWN unless db_config
# The connection proxy does not have a role assigned
# as this is dependent on a execution context
return ROLE_UNKNOWN if connection.is_a?(ConnectionProxy)
# During application init we might receive `NullPool`
return ROLE_UNKNOWN unless connection.respond_to?(:pool) &&
connection.pool.respond_to?(:db_config) &&
connection.pool.db_config.respond_to?(:name)
if connection.pool.db_config.name.ends_with?(LoadBalancer::REPLICA_SUFFIX)
if db_config.name.ends_with?(LoadBalancer::REPLICA_SUFFIX)
ROLE_REPLICA
else
ROLE_PRIMARY

View File

@ -73,6 +73,7 @@ module Gitlab
end
end
# @deprecated Use `create_table` in V2 instead
#
# Creates a new table, optionally allowing the caller to add check constraints to the table.
# Aside from that addition, this method should behave identically to Rails' `create_table` method.

View File

@ -6,6 +6,65 @@ module Gitlab
module V2
include Gitlab::Database::MigrationHelpers
# Superseded by `create_table` override below
def create_table_with_constraints(*_)
raise <<~EOM
#create_table_with_constraints is not supported anymore - use #create_table instead, for example:
create_table :db_guides do |t|
t.bigint :stars, default: 0, null: false
t.text :title, limit: 128
t.text :notes, limit: 1024
t.check_constraint 'stars > 1000', name: 'so_many_stars'
end
See https://docs.gitlab.com/ee/development/database/strings_and_the_text_data_type.html
EOM
end
# Creates a new table, optionally allowing the caller to add text limit constraints to the table.
# This method only extends Rails' `create_table` method
#
# Example:
#
# create_table :db_guides do |t|
# t.bigint :stars, default: 0, null: false
# t.text :title, limit: 128
# t.text :notes, limit: 1024
#
# t.check_constraint 'stars > 1000', name: 'so_many_stars'
# end
#
# See Rails' `create_table` for more info on the available arguments.
#
# When adding foreign keys to other tables, consider wrapping the call into a with_lock_retries block
# to avoid traffic stalls.
def create_table(table_name, *args, **kwargs, &block)
helper_context = self
super do |t|
t.define_singleton_method(:text) do |column_name, **kwargs|
limit = kwargs.delete(:limit)
super(column_name, **kwargs)
if limit
# rubocop:disable GitlabSecurity/PublicSend
name = helper_context.send(:text_limit_name, table_name, column_name)
# rubocop:enable GitlabSecurity/PublicSend
column_name = helper_context.quote_column_name(column_name)
definition = "char_length(#{column_name}) <= #{limit}"
t.check_constraint(definition, name: name)
end
end
t.instance_eval(&block) unless block.nil?
end
end
# Executes the block with a retry mechanism that alters the +lock_timeout+ and +sleep_time+ between attempts.
# The timings can be controlled via the +timing_configuration+ parameter.
# If the lock was not acquired within the retry period, a last attempt is made without using +lock_timeout+.

View File

@ -55,7 +55,7 @@ module Gitlab
private
def create_issue
Issues::CreateService.new(
::Issues::CreateService.new(
project: project,
current_user: author,
params: {

View File

@ -71,7 +71,7 @@ module Gitlab
end
def create_issue!
@issue = Issues::CreateService.new(
@issue = ::Issues::CreateService.new(
project: project,
current_user: User.support_bot,
params: {

View File

@ -5,7 +5,6 @@ require_dependency 'gitlab/encoding_helper'
module Gitlab
module Git
# The ID of empty tree.
# See http://stackoverflow.com/a/40884093/1856239 and
# https://github.com/git/git/blob/3ad8b5bf26362ac67c9020bf8c30eee54a84f56d/cache.h#L1011-L1012
EMPTY_TREE_ID = '4b825dc642cb6eb9a060e54bf8d69288fbee4904'
BLANK_SHA = ('0' * 40).freeze

View File

@ -0,0 +1,154 @@
# frozen_string_literal: true
module Gitlab
module Issues
module Rebalancing
class State
REDIS_EXPIRY_TIME = 10.days
MAX_NUMBER_OF_CONCURRENT_REBALANCES = 5
NAMESPACE = 1
PROJECT = 2
def initialize(root_namespace, projects)
@root_namespace = root_namespace
@projects = projects
@rebalanced_container_type = @root_namespace.is_a?(Group) ? NAMESPACE : PROJECT
@rebalanced_container_id = @rebalanced_container_type == NAMESPACE ? @root_namespace.id : projects.take.id # rubocop:disable CodeReuse/ActiveRecord
end
def track_new_running_rebalance
with_redis do |redis|
redis.multi do |multi|
# we trigger re-balance for namespaces(groups) or specific user project
value = "#{rebalanced_container_type}/#{rebalanced_container_id}"
multi.sadd(concurrent_running_rebalances_key, value)
multi.expire(concurrent_running_rebalances_key, REDIS_EXPIRY_TIME)
end
end
end
def concurrent_running_rebalances_count
with_redis { |redis| redis.scard(concurrent_running_rebalances_key).to_i }
end
def rebalance_in_progress?
all_rebalanced_containers = with_redis { |redis| redis.smembers(concurrent_running_rebalances_key) }
is_running = case rebalanced_container_type
when NAMESPACE
namespace_ids = all_rebalanced_containers.map {|string| string.split("#{NAMESPACE}/").second.to_i }.compact
namespace_ids.include?(root_namespace.id)
when PROJECT
project_ids = all_rebalanced_containers.map {|string| string.split("#{PROJECT}/").second.to_i }.compact
project_ids.include?(projects.take.id) # rubocop:disable CodeReuse/ActiveRecord
else
false
end
refresh_keys_expiration if is_running
is_running
end
def can_start_rebalance?
rebalance_in_progress? || too_many_rebalances_running?
end
def cache_issue_ids(issue_ids)
with_redis do |redis|
values = issue_ids.map { |issue| [issue.relative_position, issue.id] }
redis.multi do |multi|
multi.zadd(issue_ids_key, values) unless values.blank?
multi.expire(issue_ids_key, REDIS_EXPIRY_TIME)
end
end
end
def get_cached_issue_ids(index, limit)
with_redis do |redis|
redis.zrange(issue_ids_key, index, index + limit - 1)
end
end
def cache_current_index(index)
with_redis { |redis| redis.set(current_index_key, index, ex: REDIS_EXPIRY_TIME) }
end
def get_current_index
with_redis { |redis| redis.get(current_index_key).to_i }
end
def cache_current_project_id(project_id)
with_redis { |redis| redis.set(current_project_key, project_id, ex: REDIS_EXPIRY_TIME) }
end
def get_current_project_id
with_redis { |redis| redis.get(current_project_key) }
end
def issue_count
@issue_count ||= with_redis { |redis| redis.zcard(issue_ids_key)}
end
def remove_current_project_id_cache
with_redis { |redis| redis.del(current_project_key)}
end
def refresh_keys_expiration
with_redis do |redis|
redis.multi do |multi|
multi.expire(issue_ids_key, REDIS_EXPIRY_TIME)
multi.expire(current_index_key, REDIS_EXPIRY_TIME)
multi.expire(current_project_key, REDIS_EXPIRY_TIME)
multi.expire(concurrent_running_rebalances_key, REDIS_EXPIRY_TIME)
end
end
end
def cleanup_cache
with_redis do |redis|
redis.multi do |multi|
multi.del(issue_ids_key)
multi.del(current_index_key)
multi.del(current_project_key)
multi.srem(concurrent_running_rebalances_key, "#{rebalanced_container_type}/#{rebalanced_container_id}")
end
end
end
private
attr_accessor :root_namespace, :projects, :rebalanced_container_type, :rebalanced_container_id
def too_many_rebalances_running?
concurrent_running_rebalances_count <= MAX_NUMBER_OF_CONCURRENT_REBALANCES
end
def redis_key_prefix
"gitlab:issues-position-rebalances"
end
def issue_ids_key
"#{redis_key_prefix}:#{root_namespace.id}"
end
def current_index_key
"#{issue_ids_key}:current_index"
end
def current_project_key
"#{issue_ids_key}:current_project_id"
end
def concurrent_running_rebalances_key
"#{redis_key_prefix}:running_rebalances"
end
def with_redis(&blk)
Gitlab::Redis::SharedState.with(&blk) # rubocop: disable CodeReuse/ActiveRecord
end
end
end
end
end

View File

@ -41,6 +41,10 @@ module Gitlab
def endpoint_id
Labkit::Context.current&.get_attribute(:caller_id)
end
def db_config_name
::Gitlab::Database.db_config_name(marginalia_adapter)
end
end
end
end

View File

@ -219,7 +219,7 @@ module Gitlab
column_definition.column_expression.dup.as(column_definition.attribute_name).to_sql
end
scope = scope.select(*scope.arel.projections, *additional_projections) if additional_projections
scope = scope.reselect(*scope.arel.projections, *additional_projections) unless additional_projections.blank?
scope
end

View File

@ -267,7 +267,7 @@ module Gitlab
private
def zoom_link_service
Issues::ZoomLinkService.new(project: quick_action_target.project, current_user: current_user, params: { issue: quick_action_target })
::Issues::ZoomLinkService.new(project: quick_action_target.project, current_user: current_user, params: { issue: quick_action_target })
end
end
end

View File

@ -29,7 +29,7 @@ module Gitlab
private
def close_issue(issue:)
Issues::CloseService.new(project: project, current_user: current_user).execute(issue)
::Issues::CloseService.new(project: project, current_user: current_user).execute(issue)
end
def presenter(issue)

View File

@ -29,11 +29,11 @@ module Gitlab
return Gitlab::SlashCommands::Presenters::Access.new.not_found
end
new_issue = Issues::MoveService.new(project: project, current_user: current_user)
new_issue = ::Issues::MoveService.new(project: project, current_user: current_user)
.execute(old_issue, target_project)
presenter(new_issue).present(old_issue)
rescue Issues::MoveService::MoveError => e
rescue ::Issues::MoveService::MoveError => e
presenter(old_issue).display_move_error(e.message)
end

View File

@ -33,7 +33,7 @@ module Gitlab
private
def create_issue(title:, description:)
Issues::CreateService.new(project: project, current_user: current_user, params: { title: title, description: description }, spam_params: nil).execute
::Issues::CreateService.new(project: project, current_user: current_user, params: { title: title, description: description }, spam_params: nil).execute
end
def presenter(issue)

View File

@ -918,7 +918,7 @@ module Gitlab
jira: count(::JiraImportState.where(time_period)), # rubocop: disable CodeReuse/ActiveRecord
fogbugz: projects_imported_count('fogbugz', time_period),
phabricator: projects_imported_count('phabricator', time_period),
csv: count(Issues::CsvImport.where(time_period)) # rubocop: disable CodeReuse/ActiveRecord
csv: count(::Issues::CsvImport.where(time_period)) # rubocop: disable CodeReuse/ActiveRecord
}
end
@ -934,7 +934,7 @@ module Gitlab
project_imports = distinct_count(::Project.where(time_period).where.not(import_type: nil), :creator_id)
bulk_imports = distinct_count(::BulkImport.where(time_period), :user_id)
jira_issue_imports = distinct_count(::JiraImportState.where(time_period), :user_id)
csv_issue_imports = distinct_count(Issues::CsvImport.where(time_period), :user_id)
csv_issue_imports = distinct_count(::Issues::CsvImport.where(time_period), :user_id)
group_imports = distinct_count(::GroupImportState.where(time_period), :user_id)
add(project_imports, bulk_imports, jira_issue_imports, csv_issue_imports, group_imports)

View File

@ -1,5 +1,4 @@
# GitLab logrotate settings
# based on: http://stackoverflow.com/a/4883967
/home/git/gitlab/log/*.log {
su git git

View File

@ -86,9 +86,8 @@ namespace :gitlab do
# 3: high priority
# 5: _super_ high priority, this should only be used for _very_ important queues
#
# As per http://stackoverflow.com/a/21241357/290102 the formula for calculating
# the likelihood of a job being popped off a queue (given all queues have work
# to perform) is:
# The formula for calculating the likelihood of a job being popped off a queue
# (given all queues have work to perform) is:
#
# chance = (queue weight / total weight of all queues) * 100
BANNER

View File

@ -2872,10 +2872,10 @@ msgstr ""
msgid "Admin|Quarterly reconciliation will occur on %{qrtlyDate}"
msgstr ""
msgid "Admin|The number of max seats used for your namespace is currently exceeding the number of seats in your subscription. On %{qrtlyDate}, GitLab will process a quarterly reconciliation and automatically bill you a prorated amount for the overage. There is no action needed from you. If you have a credit card on file, it will be charged. Otherwise, you will receive an invoice."
msgid "Admin|The number of max seats in your namespace exceeds the number of seats in your subscription. On %{qrtlyDate}, quarterly reconciliation occurs and you are automatically billed a prorated amount for the overage. No action is needed from you. If you have a credit card on file, it will be charged. Otherwise, you will receive an invoice. For more information about the timing of the invoicing process, view the documentation."
msgstr ""
msgid "Admin|The number of maximum users for your instance is currently exceeding the number of users in license. On %{qrtlyDate}, GitLab will process a quarterly reconciliation and automatically bill you a prorated amount for the overage. There is no action needed from you. If you have a credit card on file, it will be charged. Otherwise, you will receive an invoice."
msgid "Admin|The number of max users in your instance exceeds the number of users in your license. On %{qrtlyDate}, quarterly reconciliation occurs and you are automatically billed a prorated amount for the overage. No action is needed from you. If you have a credit card on file, it will be charged. Otherwise, you will receive an invoice. For more information about the timing of the invoicing process, view the documentation."
msgstr ""
msgid "Admin|View pending user approvals"

View File

@ -13,8 +13,13 @@ module RuboCop
class AddLimitToTextColumns < RuboCop::Cop::Cop
include MigrationHelpers
TEXT_LIMIT_ATTRIBUTE_ALLOWED_SINCE = 2021_09_10_00_00_00
MSG = 'Text columns should always have a limit set (255 is suggested). ' \
'You can add a limit to a `text` column by using `add_text_limit`'
'You can add a limit to a `text` column by using `add_text_limit` or by using `.text... limit: 255` inside `create_table`'
TEXT_LIMIT_ATTRIBUTE_NOT_ALLOWED = 'Text columns should always have a limit set (255 is suggested). Using limit: is not supported in this version. ' \
'You can add a limit to a `text` column by using `add_text_limit` or `.text_limit` inside `create_table`'
def_node_matcher :reverting?, <<~PATTERN
(def :down ...)
@ -37,15 +42,29 @@ module RuboCop
node.each_descendant(:send) do |send_node|
next unless text_operation?(send_node)
# We require a limit for the same table and attribute name
if text_limit_missing?(node, *table_and_attribute_name(send_node))
add_offense(send_node, location: :selector)
if text_operation_with_limit?(send_node)
add_offense(send_node, location: :selector, message: TEXT_LIMIT_ATTRIBUTE_NOT_ALLOWED) if version(node) < TEXT_LIMIT_ATTRIBUTE_ALLOWED_SINCE
else
# We require a limit for the same table and attribute name
if text_limit_missing?(node, *table_and_attribute_name(send_node))
add_offense(send_node, location: :selector)
end
end
end
end
private
def text_operation_with_limit?(node)
migration_method = node.children[1]
return unless migration_method == :text
if attributes = node.children[3]
attributes.pairs.find { |pair| pair.key.value == :limit }.present?
end
end
def text_operation?(node)
# Don't complain about text arrays
return false if array_column?(node)

View File

@ -2,9 +2,7 @@
* Manage the instance of a custom `window.localStorage`
*
* This only encapsulates the setup / teardown logic so that it can easily be
* reused with different implementations (i.e. a spy or a [fake][1])
*
* [1]: https://stackoverflow.com/a/41434763/1708147
* reused with different implementations (i.e. a spy or a fake)
*
* @param {() => any} fn Function that returns the object to use for localStorage
*/

View File

@ -2,9 +2,7 @@
* Manage the instance of a custom `window.location`
*
* This only encapsulates the setup / teardown logic so that it can easily be
* reused with different implementations (i.e. a spy or a [fake][1])
*
* [1]: https://stackoverflow.com/a/41434763/1708147
* reused with different implementations (i.e. a spy or a fake)
*
* @param {() => any} fn Function that returns the object to use for window.location
*/

Some files were not shown because too many files have changed in this diff Show More