Add latest changes from gitlab-org/gitlab@master
This commit is contained in:
parent
26879909dd
commit
eb08c8e6f8
82 changed files with 1563 additions and 345 deletions
|
@ -30,14 +30,14 @@ If applicable, any groups/projects that are happy to have this feature turned on
|
|||
|
||||
## Roll Out Steps
|
||||
|
||||
- [ ] Enable on staging
|
||||
- [ ] Enable on staging (`/chatops run feature set feature_name true --staging`)
|
||||
- [ ] Test on staging
|
||||
- [ ] Ensure that documentation has been updated
|
||||
- [ ] Enable on GitLab.com for individual groups/projects listed above and verify behaviour
|
||||
- [ ] Enable on GitLab.com for individual groups/projects listed above and verify behaviour (`/chatops run feature set --project=gitlab-org/gitlab feature_name true`)
|
||||
- [ ] Coordinate a time to enable the flag with `#production` and `#g_delivery` on slack.
|
||||
- [ ] Announce on the issue an estimated time this will be enabled on GitLab.com
|
||||
- [ ] Enable on GitLab.com by running chatops command in `#production`
|
||||
- [ ] Cross post chatops slack command to `#support_gitlab-com` ([more guidance when this is necessary in the dev docs](https://docs.gitlab.com/ee/development/feature_flags/controls.html#where-to-run-commands)) and in your team channel
|
||||
- [ ] Enable on GitLab.com by running chatops command in `#production` (`/chatops run feature set feature_name true`)
|
||||
- [ ] Cross post chatops Slack command to `#support_gitlab-com` ([more guidance when this is necessary in the dev docs](https://docs.gitlab.com/ee/development/feature_flags/controls.html#where-to-run-commands)) and in your team channel
|
||||
- [ ] Announce on the issue that the flag has been enabled
|
||||
- [ ] Remove feature flag and add changelog entry
|
||||
- [ ] After the flag removal is deployed, [clean up the feature flag](https://docs.gitlab.com/ee/development/feature_flags/controls.html#cleaning-up) by running chatops command in `#production` channel
|
||||
|
|
|
@ -6,6 +6,7 @@ query getDesignList($fullPath: ID!, $iid: String!, $atVersion: ID) {
|
|||
id
|
||||
issue(iid: $iid) {
|
||||
designCollection {
|
||||
copyState
|
||||
designs(atVersion: $atVersion) {
|
||||
nodes {
|
||||
...DesignListItem
|
||||
|
|
|
@ -8,7 +8,7 @@ import { DESIGNS_ROUTE_NAME } from '../router/constants';
|
|||
export default {
|
||||
mixins: [allVersionsMixin],
|
||||
apollo: {
|
||||
designs: {
|
||||
designCollection: {
|
||||
query: getDesignListQuery,
|
||||
variables() {
|
||||
return {
|
||||
|
@ -25,10 +25,11 @@ export default {
|
|||
'designs',
|
||||
'nodes',
|
||||
]);
|
||||
if (designNodes) {
|
||||
return designNodes;
|
||||
}
|
||||
return [];
|
||||
const copyState = propertyOf(data)(['project', 'issue', 'designCollection', 'copyState']);
|
||||
return {
|
||||
designs: designNodes,
|
||||
copyState,
|
||||
};
|
||||
},
|
||||
error() {
|
||||
this.error = true;
|
||||
|
@ -42,13 +43,26 @@ export default {
|
|||
);
|
||||
this.$router.replace({ name: DESIGNS_ROUTE_NAME, query: { version: undefined } });
|
||||
}
|
||||
if (this.designCollection.copyState === 'ERROR') {
|
||||
createFlash(
|
||||
s__(
|
||||
'DesignManagement|There was an error moving your designs. Please upload your designs below.',
|
||||
),
|
||||
'warning',
|
||||
);
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
data() {
|
||||
return {
|
||||
designs: [],
|
||||
designCollection: null,
|
||||
error: false,
|
||||
};
|
||||
},
|
||||
computed: {
|
||||
designs() {
|
||||
return this.designCollection?.designs || [];
|
||||
},
|
||||
},
|
||||
};
|
||||
|
|
|
@ -76,7 +76,9 @@ export default {
|
|||
},
|
||||
computed: {
|
||||
isLoading() {
|
||||
return this.$apollo.queries.designs.loading || this.$apollo.queries.permissions.loading;
|
||||
return (
|
||||
this.$apollo.queries.designCollection.loading || this.$apollo.queries.permissions.loading
|
||||
);
|
||||
},
|
||||
isSaving() {
|
||||
return this.filesToBeSaved.length > 0;
|
||||
|
@ -110,6 +112,11 @@ export default {
|
|||
isDesignListEmpty() {
|
||||
return !this.isSaving && !this.hasDesigns;
|
||||
},
|
||||
isDesignCollectionCopying() {
|
||||
return (
|
||||
this.designCollection && ['PENDING', 'COPYING'].includes(this.designCollection.copyState)
|
||||
);
|
||||
},
|
||||
designDropzoneWrapperClass() {
|
||||
return this.isDesignListEmpty
|
||||
? 'col-12'
|
||||
|
@ -360,6 +367,21 @@ export default {
|
|||
<gl-alert v-else-if="error" variant="danger" :dismissible="false">
|
||||
{{ __('An error occurred while loading designs. Please try again.') }}
|
||||
</gl-alert>
|
||||
<header
|
||||
v-else-if="isDesignCollectionCopying"
|
||||
class="card gl-p-3"
|
||||
data-testid="design-collection-is-copying"
|
||||
>
|
||||
<div class="card-header design-card-header border-bottom-0">
|
||||
<div class="card-title gl-my-0 gl-h-7">
|
||||
{{
|
||||
s__(
|
||||
'DesignManagement|Your designs are being copied and are on their way… Please refresh to update.',
|
||||
)
|
||||
}}
|
||||
</div>
|
||||
</div>
|
||||
</header>
|
||||
<vue-draggable
|
||||
v-else
|
||||
:value="designs"
|
||||
|
|
|
@ -155,6 +155,7 @@ const addNewDesignToStore = (store, designManagementUpload, query) => {
|
|||
|
||||
const updatedDesigns = {
|
||||
__typename: 'DesignCollection',
|
||||
copyState: 'READY',
|
||||
designs: {
|
||||
__typename: 'DesignConnection',
|
||||
nodes: newDesigns,
|
||||
|
|
|
@ -65,6 +65,10 @@ export const designUploadOptimisticResponse = files => {
|
|||
fullPath: '',
|
||||
notesCount: 0,
|
||||
event: 'NONE',
|
||||
currentUserTodos: {
|
||||
__typename: 'TodoConnection',
|
||||
nodes: [],
|
||||
},
|
||||
diffRefs: {
|
||||
__typename: 'DiffRefs',
|
||||
baseSha: '',
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
<script>
|
||||
import MembersTable from '~/vue_shared/components/members/table/members_table.vue';
|
||||
|
||||
export default {
|
||||
name: 'GroupMembersApp',
|
||||
components: { MembersTable },
|
||||
};
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<span>
|
||||
<!-- Temporary empty template -->
|
||||
</span>
|
||||
<members-table />
|
||||
</template>
|
||||
|
|
|
@ -4,7 +4,7 @@ import App from './components/app.vue';
|
|||
import membersModule from '~/vuex_shared/modules/members';
|
||||
import { convertObjectPropsToCamelCase } from '~/lib/utils/common_utils';
|
||||
|
||||
export default el => {
|
||||
export const initGroupMembersApp = (el, tableFields) => {
|
||||
if (!el) {
|
||||
return () => {};
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ export default el => {
|
|||
members: convertObjectPropsToCamelCase(JSON.parse(members), { deep: true }),
|
||||
sourceId: parseInt(groupId, 10),
|
||||
currentUserId: gon.current_user_id || null,
|
||||
tableFields,
|
||||
}),
|
||||
});
|
||||
|
||||
|
|
|
@ -5,10 +5,14 @@ import LabelsSelect from './labels_select';
|
|||
import IssuableContext from './issuable_context';
|
||||
import Sidebar from './right_sidebar';
|
||||
import DueDateSelectors from './due_date_select';
|
||||
import { mountSidebarLabels } from '~/sidebar/mount_sidebar';
|
||||
import { mountSidebarLabels, getSidebarOptions } from '~/sidebar/mount_sidebar';
|
||||
|
||||
export default () => {
|
||||
const sidebarOptions = JSON.parse(document.querySelector('.js-sidebar-options').innerHTML);
|
||||
const sidebarOptEl = document.querySelector('.js-sidebar-options');
|
||||
|
||||
if (!sidebarOptEl) return;
|
||||
|
||||
const sidebarOptions = getSidebarOptions(sidebarOptEl);
|
||||
|
||||
new MilestoneSelect({
|
||||
full_path: sidebarOptions.fullPath,
|
||||
|
|
|
@ -5,15 +5,19 @@ import initSortDiscussions from './sort_discussions';
|
|||
import { store } from './stores';
|
||||
|
||||
document.addEventListener('DOMContentLoaded', () => {
|
||||
const el = document.getElementById('js-vue-notes');
|
||||
|
||||
if (!el) return;
|
||||
|
||||
// eslint-disable-next-line no-new
|
||||
new Vue({
|
||||
el: '#js-vue-notes',
|
||||
el,
|
||||
components: {
|
||||
notesApp,
|
||||
},
|
||||
store,
|
||||
data() {
|
||||
const notesDataset = document.getElementById('js-vue-notes').dataset;
|
||||
const notesDataset = el.dataset;
|
||||
const parsedUserData = JSON.parse(notesDataset.currentUserData);
|
||||
const noteableData = JSON.parse(notesDataset.noteableData);
|
||||
let currentUserData = {};
|
||||
|
|
|
@ -4,7 +4,7 @@ import memberExpirationDate from '~/member_expiration_date';
|
|||
import UsersSelect from '~/users_select';
|
||||
import groupsSelect from '~/groups_select';
|
||||
import RemoveMemberModal from '~/vue_shared/components/remove_member_modal.vue';
|
||||
import initGroupMembersApp from '~/groups/members';
|
||||
import { initGroupMembersApp } from '~/groups/members';
|
||||
|
||||
function mountRemoveMemberModal() {
|
||||
const el = document.querySelector('.js-remove-member-modal');
|
||||
|
@ -26,10 +26,24 @@ document.addEventListener('DOMContentLoaded', () => {
|
|||
memberExpirationDate('.js-access-expiration-date-groups');
|
||||
mountRemoveMemberModal();
|
||||
|
||||
initGroupMembersApp(document.querySelector('.js-group-members-list'));
|
||||
initGroupMembersApp(document.querySelector('.js-group-linked-list'));
|
||||
initGroupMembersApp(document.querySelector('.js-group-invited-members-list'));
|
||||
initGroupMembersApp(document.querySelector('.js-group-access-requests-list'));
|
||||
const SHARED_FIELDS = ['account', 'expires', 'maxRole', 'expiration', 'actions'];
|
||||
|
||||
initGroupMembersApp(
|
||||
document.querySelector('.js-group-members-list'),
|
||||
SHARED_FIELDS.concat(['source', 'granted']),
|
||||
);
|
||||
initGroupMembersApp(
|
||||
document.querySelector('.js-group-linked-list'),
|
||||
SHARED_FIELDS.concat('granted'),
|
||||
);
|
||||
initGroupMembersApp(
|
||||
document.querySelector('.js-group-invited-members-list'),
|
||||
SHARED_FIELDS.concat('invited'),
|
||||
);
|
||||
initGroupMembersApp(
|
||||
document.querySelector('.js-group-access-requests-list'),
|
||||
SHARED_FIELDS.concat('requested'),
|
||||
);
|
||||
|
||||
new Members(); // eslint-disable-line no-new
|
||||
new UsersSelect(); // eslint-disable-line no-new
|
||||
|
|
|
@ -18,7 +18,7 @@ export default function() {
|
|||
|
||||
if (issueType === 'incident') {
|
||||
initIncidentApp(issuableData);
|
||||
} else {
|
||||
} else if (issueType === 'issue') {
|
||||
initIssueApp(issuableData);
|
||||
}
|
||||
|
||||
|
|
|
@ -75,6 +75,7 @@ export default {
|
|||
},
|
||||
methods: {
|
||||
fetchFiles() {
|
||||
const originalPath = this.path || '/';
|
||||
this.isLoadingFiles = true;
|
||||
|
||||
return this.$apollo
|
||||
|
@ -83,14 +84,14 @@ export default {
|
|||
variables: {
|
||||
projectPath: this.projectPath,
|
||||
ref: this.ref,
|
||||
path: this.path || '/',
|
||||
path: originalPath,
|
||||
nextPageCursor: this.nextPageCursor,
|
||||
pageSize: this.pageSize,
|
||||
},
|
||||
})
|
||||
.then(({ data }) => {
|
||||
if (data.errors) throw data.errors;
|
||||
if (!data?.project?.repository) return;
|
||||
if (!data?.project?.repository || originalPath !== (this.path || '/')) return;
|
||||
|
||||
const pageInfo = this.hasNextPage(data.project.repository.tree);
|
||||
|
||||
|
|
|
@ -5,8 +5,8 @@ import commitsQuery from './queries/commits.query.graphql';
|
|||
import projectPathQuery from './queries/project_path.query.graphql';
|
||||
import refQuery from './queries/ref.query.graphql';
|
||||
|
||||
let fetchpromise;
|
||||
let resolvers = [];
|
||||
const fetchpromises = {};
|
||||
const resolvers = {};
|
||||
|
||||
export function resolveCommit(commits, path, { resolve, entry }) {
|
||||
const commit = commits.find(c => c.filePath === `${path}/${entry.name}` && c.type === entry.type);
|
||||
|
@ -18,15 +18,19 @@ export function resolveCommit(commits, path, { resolve, entry }) {
|
|||
|
||||
export function fetchLogsTree(client, path, offset, resolver = null) {
|
||||
if (resolver) {
|
||||
resolvers.push(resolver);
|
||||
if (!resolvers[path]) {
|
||||
resolvers[path] = [resolver];
|
||||
} else {
|
||||
resolvers[path].push(resolver);
|
||||
}
|
||||
}
|
||||
|
||||
if (fetchpromise) return fetchpromise;
|
||||
if (fetchpromises[path]) return fetchpromises[path];
|
||||
|
||||
const { projectPath } = client.readQuery({ query: projectPathQuery });
|
||||
const { escapedRef } = client.readQuery({ query: refQuery });
|
||||
|
||||
fetchpromise = axios
|
||||
fetchpromises[path] = axios
|
||||
.get(
|
||||
`${gon.relative_url_root}/${projectPath}/-/refs/${escapedRef}/logs_tree/${encodeURIComponent(
|
||||
path.replace(/^\//, ''),
|
||||
|
@ -46,16 +50,16 @@ export function fetchLogsTree(client, path, offset, resolver = null) {
|
|||
data,
|
||||
});
|
||||
|
||||
resolvers.forEach(r => resolveCommit(data.commits, path, r));
|
||||
resolvers[path].forEach(r => resolveCommit(data.commits, path, r));
|
||||
|
||||
fetchpromise = null;
|
||||
delete fetchpromises[path];
|
||||
|
||||
if (headerLogsOffset) {
|
||||
fetchLogsTree(client, path, headerLogsOffset);
|
||||
} else {
|
||||
resolvers = [];
|
||||
delete resolvers[path];
|
||||
}
|
||||
});
|
||||
|
||||
return fetchpromise;
|
||||
return fetchpromises[path];
|
||||
}
|
||||
|
|
|
@ -22,8 +22,8 @@ Vue.use(Translate);
|
|||
Vue.use(VueApollo);
|
||||
Vue.use(Vuex);
|
||||
|
||||
function getSidebarOptions() {
|
||||
return JSON.parse(document.querySelector('.js-sidebar-options').innerHTML);
|
||||
function getSidebarOptions(sidebarOptEl = document.querySelector('.js-sidebar-options')) {
|
||||
return JSON.parse(sidebarOptEl.innerHTML);
|
||||
}
|
||||
|
||||
function mountAssigneesComponent(mediator) {
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
import { __ } from '~/locale';
|
||||
|
||||
export const FIELDS = [
|
||||
{
|
||||
key: 'account',
|
||||
label: __('Account'),
|
||||
},
|
||||
{
|
||||
key: 'source',
|
||||
label: __('Source'),
|
||||
thClass: 'col-meta',
|
||||
tdClass: 'col-meta',
|
||||
},
|
||||
{
|
||||
key: 'granted',
|
||||
label: __('Access granted'),
|
||||
thClass: 'col-meta',
|
||||
tdClass: 'col-meta',
|
||||
},
|
||||
{
|
||||
key: 'invited',
|
||||
label: __('Invited'),
|
||||
thClass: 'col-meta',
|
||||
tdClass: 'col-meta',
|
||||
},
|
||||
{
|
||||
key: 'requested',
|
||||
label: __('Requested'),
|
||||
thClass: 'col-meta',
|
||||
tdClass: 'col-meta',
|
||||
},
|
||||
{
|
||||
key: 'expires',
|
||||
label: __('Access expires'),
|
||||
thClass: 'col-meta',
|
||||
tdClass: 'col-meta',
|
||||
},
|
||||
{
|
||||
key: 'maxRole',
|
||||
label: __('Max role'),
|
||||
thClass: 'col-meta',
|
||||
tdClass: 'col-meta',
|
||||
},
|
||||
{
|
||||
key: 'expiration',
|
||||
label: __('Expiration'),
|
||||
thClass: 'col-expiration',
|
||||
tdClass: 'col-expiration',
|
||||
},
|
||||
{
|
||||
key: 'actions',
|
||||
thClass: 'col-actions',
|
||||
tdClass: 'col-actions',
|
||||
},
|
||||
];
|
|
@ -0,0 +1,44 @@
|
|||
<script>
|
||||
import { mapState } from 'vuex';
|
||||
import { GlTable } from '@gitlab/ui';
|
||||
import { FIELDS } from '../constants';
|
||||
import initUserPopovers from '~/user_popovers';
|
||||
|
||||
export default {
|
||||
name: 'MembersTable',
|
||||
components: {
|
||||
GlTable,
|
||||
},
|
||||
computed: {
|
||||
...mapState(['members', 'tableFields']),
|
||||
filteredFields() {
|
||||
return FIELDS.filter(field => this.tableFields.includes(field.key));
|
||||
},
|
||||
},
|
||||
mounted() {
|
||||
initUserPopovers(this.$el.querySelectorAll('.js-user-link'));
|
||||
},
|
||||
};
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<gl-table
|
||||
class="members-table"
|
||||
head-variant="white"
|
||||
stacked="lg"
|
||||
:fields="filteredFields"
|
||||
:items="members"
|
||||
primary-key="id"
|
||||
thead-class="border-bottom"
|
||||
:empty-text="__('No members found')"
|
||||
show-empty
|
||||
>
|
||||
<template #cell(source)>
|
||||
<!-- Temporarily empty -->
|
||||
</template>
|
||||
|
||||
<template #head(actions)="{ label }">
|
||||
<span data-testid="col-actions" class="gl-sr-only">{{ label }}</span>
|
||||
</template>
|
||||
</gl-table>
|
||||
</template>
|
|
@ -1,5 +1,6 @@
|
|||
export default ({ members, sourceId, currentUserId }) => ({
|
||||
export default ({ members, sourceId, currentUserId, tableFields }) => ({
|
||||
members,
|
||||
sourceId,
|
||||
currentUserId,
|
||||
tableFields,
|
||||
});
|
||||
|
|
|
@ -152,6 +152,10 @@
|
|||
}
|
||||
}
|
||||
|
||||
.design-card-header {
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
.design-dropzone-border {
|
||||
border: 2px dashed $gray-100;
|
||||
}
|
||||
|
|
|
@ -209,6 +209,23 @@
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
.members-table {
|
||||
@include media-breakpoint-up(lg) {
|
||||
.col-meta {
|
||||
width: px-to-rem(150px);
|
||||
}
|
||||
|
||||
.col-expiration {
|
||||
width: px-to-rem(200px);
|
||||
}
|
||||
|
||||
.col-actions {
|
||||
width: px-to-rem(50px);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.card-mobile {
|
||||
.content-list.members-list li {
|
||||
display: block;
|
||||
|
|
|
@ -40,12 +40,13 @@ class Packages::Package < ApplicationRecord
|
|||
validates :version, format: { with: Gitlab::Regex.conan_recipe_component_regex }, if: :conan?
|
||||
validates :version, format: { with: Gitlab::Regex.maven_version_regex }, if: -> { version? && maven? }
|
||||
validates :version, format: { with: Gitlab::Regex.pypi_version_regex }, if: :pypi?
|
||||
validates :version, format: { with: Gitlab::Regex.prefixed_semver_regex }, if: :golang?
|
||||
validates :version,
|
||||
presence: true,
|
||||
format: { with: Gitlab::Regex.generic_package_version_regex },
|
||||
if: :generic?
|
||||
|
||||
enum package_type: { maven: 1, npm: 2, conan: 3, nuget: 4, pypi: 5, composer: 6, generic: 7 }
|
||||
enum package_type: { maven: 1, npm: 2, conan: 3, nuget: 4, pypi: 5, composer: 6, generic: 7, golang: 8 }
|
||||
|
||||
scope :with_name, ->(name) { where(name: name) }
|
||||
scope :with_name_like, ->(name) { where(arel_table[:name].matches(name)) }
|
||||
|
|
74
app/workers/concerns/limited_capacity/job_tracker.rb
Normal file
74
app/workers/concerns/limited_capacity/job_tracker.rb
Normal file
|
@ -0,0 +1,74 @@
|
|||
# frozen_string_literal: true
|
||||
module LimitedCapacity
|
||||
class JobTracker # rubocop:disable Scalability/IdempotentWorker
|
||||
include Gitlab::Utils::StrongMemoize
|
||||
|
||||
def initialize(namespace)
|
||||
@namespace = namespace
|
||||
end
|
||||
|
||||
def register(jid)
|
||||
_added, @count = with_redis_pipeline do |redis|
|
||||
register_job_keys(redis, jid)
|
||||
get_job_count(redis)
|
||||
end
|
||||
end
|
||||
|
||||
def remove(jid)
|
||||
_removed, @count = with_redis_pipeline do |redis|
|
||||
remove_job_keys(redis, jid)
|
||||
get_job_count(redis)
|
||||
end
|
||||
end
|
||||
|
||||
def clean_up
|
||||
completed_jids = Gitlab::SidekiqStatus.completed_jids(running_jids)
|
||||
return unless completed_jids.any?
|
||||
|
||||
_removed, @count = with_redis_pipeline do |redis|
|
||||
remove_job_keys(redis, completed_jids)
|
||||
get_job_count(redis)
|
||||
end
|
||||
end
|
||||
|
||||
def count
|
||||
@count ||= with_redis { |redis| get_job_count(redis) }
|
||||
end
|
||||
|
||||
def running_jids
|
||||
with_redis do |redis|
|
||||
redis.smembers(counter_key)
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
attr_reader :namespace
|
||||
|
||||
def counter_key
|
||||
"worker:#{namespace.to_s.underscore}:running"
|
||||
end
|
||||
|
||||
def get_job_count(redis)
|
||||
redis.scard(counter_key)
|
||||
end
|
||||
|
||||
def register_job_keys(redis, keys)
|
||||
redis.sadd(counter_key, keys)
|
||||
end
|
||||
|
||||
def remove_job_keys(redis, keys)
|
||||
redis.srem(counter_key, keys)
|
||||
end
|
||||
|
||||
def with_redis(&block)
|
||||
Gitlab::Redis::Queues.with(&block) # rubocop: disable CodeReuse/ActiveRecord
|
||||
end
|
||||
|
||||
def with_redis_pipeline(&block)
|
||||
with_redis do |redis|
|
||||
redis.pipelined(&block)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
164
app/workers/concerns/limited_capacity/worker.rb
Normal file
164
app/workers/concerns/limited_capacity/worker.rb
Normal file
|
@ -0,0 +1,164 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
# Usage:
|
||||
#
|
||||
# Worker that performs the tasks:
|
||||
#
|
||||
# class DummyWorker
|
||||
# include ApplicationWorker
|
||||
# include LimitedCapacity::Worker
|
||||
#
|
||||
# # For each job that raises any error, a worker instance will be disabled
|
||||
# # until the next schedule-run.
|
||||
# # If you wish to get around this, exceptions must by handled by the implementer.
|
||||
# #
|
||||
# def perform_work(*args)
|
||||
# end
|
||||
#
|
||||
# def remaining_work_count(*args)
|
||||
# 5
|
||||
# end
|
||||
#
|
||||
# def max_running_jobs
|
||||
# 25
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# Cron worker to fill the pool of regular workers:
|
||||
#
|
||||
# class ScheduleDummyCronWorker
|
||||
# include ApplicationWorker
|
||||
# include CronjobQueue
|
||||
#
|
||||
# def perform(*args)
|
||||
# DummyWorker.perform_with_capacity(*args)
|
||||
# end
|
||||
# end
|
||||
#
|
||||
|
||||
module LimitedCapacity
|
||||
module Worker
|
||||
extend ActiveSupport::Concern
|
||||
include Gitlab::Utils::StrongMemoize
|
||||
|
||||
included do
|
||||
# Disable Sidekiq retries, log the error, and send the job to the dead queue.
|
||||
# This is done to have only one source that produces jobs and because the slot
|
||||
# would be occupied by a job that will be performed in the distant future.
|
||||
# We let the cron worker enqueue new jobs, this could be seen as our retry and
|
||||
# back off mechanism because the job might fail again if executed immediately.
|
||||
sidekiq_options retry: 0
|
||||
deduplicate :none
|
||||
end
|
||||
|
||||
class_methods do
|
||||
def perform_with_capacity(*args)
|
||||
worker = self.new
|
||||
worker.remove_failed_jobs
|
||||
worker.report_prometheus_metrics(*args)
|
||||
required_jobs_count = worker.required_jobs_count(*args)
|
||||
|
||||
arguments = Array.new(required_jobs_count) { args }
|
||||
self.bulk_perform_async(arguments) # rubocop:disable Scalability/BulkPerformWithContext
|
||||
end
|
||||
end
|
||||
|
||||
def perform(*args)
|
||||
return unless has_capacity?
|
||||
|
||||
job_tracker.register(jid)
|
||||
perform_work(*args)
|
||||
rescue => exception
|
||||
raise
|
||||
ensure
|
||||
job_tracker.remove(jid)
|
||||
report_prometheus_metrics
|
||||
re_enqueue(*args) unless exception
|
||||
end
|
||||
|
||||
def perform_work(*args)
|
||||
raise NotImplementedError
|
||||
end
|
||||
|
||||
def remaining_work_count(*args)
|
||||
raise NotImplementedError
|
||||
end
|
||||
|
||||
def max_running_jobs
|
||||
raise NotImplementedError
|
||||
end
|
||||
|
||||
def has_capacity?
|
||||
remaining_capacity > 0
|
||||
end
|
||||
|
||||
def remaining_capacity
|
||||
[
|
||||
max_running_jobs - running_jobs_count - self.class.queue_size,
|
||||
0
|
||||
].max
|
||||
end
|
||||
|
||||
def has_work?(*args)
|
||||
remaining_work_count(*args) > 0
|
||||
end
|
||||
|
||||
def remove_failed_jobs
|
||||
job_tracker.clean_up
|
||||
end
|
||||
|
||||
def report_prometheus_metrics(*args)
|
||||
running_jobs_gauge.set(prometheus_labels, running_jobs_count)
|
||||
remaining_work_gauge.set(prometheus_labels, remaining_work_count(*args))
|
||||
max_running_jobs_gauge.set(prometheus_labels, max_running_jobs)
|
||||
end
|
||||
|
||||
def required_jobs_count(*args)
|
||||
[
|
||||
remaining_work_count(*args),
|
||||
remaining_capacity
|
||||
].min
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def running_jobs_count
|
||||
job_tracker.count
|
||||
end
|
||||
|
||||
def job_tracker
|
||||
strong_memoize(:job_tracker) do
|
||||
JobTracker.new(self.class.name)
|
||||
end
|
||||
end
|
||||
|
||||
def re_enqueue(*args)
|
||||
return unless has_capacity?
|
||||
return unless has_work?(*args)
|
||||
|
||||
self.class.perform_async(*args)
|
||||
end
|
||||
|
||||
def running_jobs_gauge
|
||||
strong_memoize(:running_jobs_gauge) do
|
||||
Gitlab::Metrics.gauge(:limited_capacity_worker_running_jobs, 'Number of running jobs')
|
||||
end
|
||||
end
|
||||
|
||||
def max_running_jobs_gauge
|
||||
strong_memoize(:max_running_jobs_gauge) do
|
||||
Gitlab::Metrics.gauge(:limited_capacity_worker_max_running_jobs, 'Maximum number of running jobs')
|
||||
end
|
||||
end
|
||||
|
||||
def remaining_work_gauge
|
||||
strong_memoize(:remaining_work_gauge) do
|
||||
Gitlab::Metrics.gauge(:limited_capacity_worker_remaining_work_count, 'Number of jobs waiting to be enqueued')
|
||||
end
|
||||
end
|
||||
|
||||
def prometheus_labels
|
||||
{ worker: self.class.name }
|
||||
end
|
||||
end
|
||||
end
|
|
@ -153,6 +153,10 @@ class FeatureFlagOptionParser
|
|||
end
|
||||
end
|
||||
|
||||
def read_ee_only(options)
|
||||
TYPES.dig(options.type, :ee_only)
|
||||
end
|
||||
|
||||
def read_rollout_issue_url(options)
|
||||
return unless TYPES.dig(options.type, :rollout_issue)
|
||||
|
||||
|
@ -204,6 +208,7 @@ class FeatureFlagCreator
|
|||
|
||||
# Read type from $stdin unless is already set
|
||||
options.type ||= FeatureFlagOptionParser.read_type
|
||||
options.ee ||= FeatureFlagOptionParser.read_ee_only(options)
|
||||
options.group ||= FeatureFlagOptionParser.read_group
|
||||
options.introduced_by_url ||= FeatureFlagOptionParser.read_introduced_by_url
|
||||
options.rollout_issue_url ||= FeatureFlagOptionParser.read_rollout_issue_url(options)
|
||||
|
@ -224,14 +229,22 @@ class FeatureFlagCreator
|
|||
private
|
||||
|
||||
def contents
|
||||
YAML.dump(
|
||||
# Slice is used to ensure that YAML keys
|
||||
# are always ordered in a predictable way
|
||||
config_hash.slice(
|
||||
*::Feature::Shared::PARAMS.map(&:to_s)
|
||||
).to_yaml
|
||||
end
|
||||
|
||||
def config_hash
|
||||
{
|
||||
'name' => options.name,
|
||||
'introduced_by_url' => options.introduced_by_url,
|
||||
'rollout_issue_url' => options.rollout_issue_url,
|
||||
'group' => options.group.to_s,
|
||||
'group' => options.group,
|
||||
'type' => options.type.to_s,
|
||||
'default_enabled' => FeatureFlagOptionParser.read_default_enabled(options)
|
||||
).strip
|
||||
}
|
||||
end
|
||||
|
||||
def write
|
||||
|
|
5
changelogs/unreleased/13426-graphql-fe.yml
Normal file
5
changelogs/unreleased/13426-graphql-fe.yml
Normal file
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
title: Copy designs to issue when an issue with designs is moved
|
||||
merge_request: 42548
|
||||
author:
|
||||
type: fixed
|
5
changelogs/unreleased/add-golang-to-packages.yml
Normal file
5
changelogs/unreleased/add-golang-to-packages.yml
Normal file
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
title: Add Go(lang) to Packages
|
||||
merge_request: 41712
|
||||
author: Ethan Reesor (@firelizzard)
|
||||
type: added
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
title: 'Feature flags form: Replace fa-chevron-down with GitLab SVG'
|
||||
merge_request: 42968
|
||||
author:
|
||||
type: changed
|
|
@ -69,10 +69,6 @@ Rails.application.routes.draw do
|
|||
# Begin of the /-/ scope.
|
||||
# Use this scope for all new global routes.
|
||||
scope path: '-' do
|
||||
# remove in 13.5
|
||||
get '/instance_statistics', to: redirect('admin/dev_ops_report')
|
||||
get '/instance_statistics/dev_ops_score', to: redirect('admin/dev_ops_report')
|
||||
get '/instance_statistics/cohorts', to: redirect('admin/cohorts')
|
||||
# Autocomplete
|
||||
get '/autocomplete/users' => 'autocomplete#users'
|
||||
get '/autocomplete/users/:id' => 'autocomplete#user'
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class AddGolangPackageMaxFileSizeToPlanLimits < ActiveRecord::Migration[6.0]
|
||||
DOWNTIME = false
|
||||
|
||||
def change
|
||||
add_column(:plan_limits, :golang_max_file_size, :bigint, default: 100.megabytes, null: false)
|
||||
end
|
||||
end
|
|
@ -19,7 +19,6 @@ class CleanupGroupImportStatesWithNullUserId < ActiveRecord::Migration[6.0]
|
|||
class Namespace < ActiveRecord::Base
|
||||
self.table_name = 'namespaces'
|
||||
|
||||
belongs_to :parent, class_name: 'CleanupGroupImportStatesWithNullUserId::Namespace'
|
||||
belongs_to :owner, class_name: 'CleanupGroupImportStatesWithNullUserId::User'
|
||||
end
|
||||
|
||||
|
@ -39,6 +38,10 @@ class CleanupGroupImportStatesWithNullUserId < ActiveRecord::Migration[6.0]
|
|||
owners.first || parent&.default_owner || owner
|
||||
end
|
||||
|
||||
def parent
|
||||
Group.find_by_id(parent_id)
|
||||
end
|
||||
|
||||
def owners
|
||||
Member.where(type: 'GroupMember', source_type: 'Namespace', source_id: id, requested_at: nil, access_level: OWNER).map(&:user)
|
||||
end
|
||||
|
|
1
db/schema_migrations/20200905013247
Normal file
1
db/schema_migrations/20200905013247
Normal file
|
@ -0,0 +1 @@
|
|||
860c45fd6293f2f8f10d7351cb5a2fbab2cc9147e56b538cb62d75469b039ef0
|
|
@ -14364,7 +14364,8 @@ CREATE TABLE plan_limits (
|
|||
npm_max_file_size bigint DEFAULT 524288000 NOT NULL,
|
||||
nuget_max_file_size bigint DEFAULT 524288000 NOT NULL,
|
||||
pypi_max_file_size bigint DEFAULT '3221225472'::bigint NOT NULL,
|
||||
generic_packages_max_file_size bigint DEFAULT '5368709120'::bigint NOT NULL
|
||||
generic_packages_max_file_size bigint DEFAULT '5368709120'::bigint NOT NULL,
|
||||
golang_max_file_size bigint DEFAULT 104857600 NOT NULL
|
||||
);
|
||||
|
||||
CREATE SEQUENCE plan_limits_id_seq
|
||||
|
|
|
@ -204,6 +204,9 @@ configuration option in `gitlab.yml`. These metrics are served from the
|
|||
| `geo_snippet_repositories_synced` | Gauge | 13.4 | Number of syncable snippets synced on secondary | `url` |
|
||||
| `geo_snippet_repositories_failed` | Gauge | 13.4 | Number of syncable snippets failed on secondary | `url` |
|
||||
| `geo_snippet_repositories_registry` | Gauge | 13.4 | Number of syncable snippets in the registry | `url` |
|
||||
| `limited_capacity_worker_running_jobs` | Gauge | 13.5 | Number of running jobs | `worker` |
|
||||
| `limited_capacity_worker_max_running_jobs` | Gauge | 13.5 | Maximum number of running jobs | `worker` |
|
||||
| `limited_capacity_worker_remaining_work_count` | Gauge | 13.5 | Number of jobs waiting to be enqueued | `worker` |
|
||||
|
||||
## Database load balancing metrics **(PREMIUM ONLY)**
|
||||
|
||||
|
|
|
@ -369,7 +369,7 @@ Here are the valid connection parameters for Rackspace Cloud, provided by
|
|||
| `rackspace_username` | The username of the Rackspace account with access to the container | `joe.smith` |
|
||||
| `rackspace_api_key` | The API key of the Rackspace account with access to the container | `ABC123DEF456ABC123DEF456ABC123DE` |
|
||||
| `rackspace_region` | The Rackspace storage region to use, a three letter code from the [list of service access endpoints](https://developer.rackspace.com/docs/cloud-files/v1/general-api-info/service-access/) | `iad` |
|
||||
| `rackspace_temp_url_key` | The private key you have set in the Rackspace API for temporary URLs. Read more [here](https://developer.rackspace.com/docs/cloud-files/v1/use-cases/public-access-to-your-cloud-files-account/#tempurl) | `ABC123DEF456ABC123DEF456ABC123DE` |
|
||||
| `rackspace_temp_url_key` | The private key you have set in the Rackspace API for [temporary URLs](https://developer.rackspace.com/docs/cloud-files/v1/use-cases/public-access-to-your-cloud-files-account/#tempurl). | `ABC123DEF456ABC123DEF456ABC123DE` |
|
||||
|
||||
NOTE: **Note:**
|
||||
Regardless of whether the container has public access enabled or disabled, Fog will
|
||||
|
|
|
@ -1754,6 +1754,7 @@ On each node perform the following:
|
|||
roles ['application_role']
|
||||
gitaly['enable'] = false
|
||||
nginx['enable'] = true
|
||||
sidekiq['enable'] = false
|
||||
|
||||
## PostgreSQL connection details
|
||||
# Disable PostgreSQL on the application node
|
||||
|
@ -1797,7 +1798,6 @@ On each node perform the following:
|
|||
# Set the network addresses that the exporters used for monitoring will listen on
|
||||
node_exporter['listen_address'] = '0.0.0.0:9100'
|
||||
gitlab_workhorse['prometheus_listen_addr'] = '0.0.0.0:9229'
|
||||
sidekiq['listen_address'] = "0.0.0.0"
|
||||
puma['listen'] = '0.0.0.0'
|
||||
|
||||
# Add the monitoring node's IP address to the monitoring whitelist and allow it to
|
||||
|
|
|
@ -1754,6 +1754,7 @@ On each node perform the following:
|
|||
roles ['application_role']
|
||||
gitaly['enable'] = false
|
||||
nginx['enable'] = true
|
||||
sidekiq['enable'] = false
|
||||
|
||||
## PostgreSQL connection details
|
||||
# Disable PostgreSQL on the application node
|
||||
|
@ -1797,7 +1798,6 @@ On each node perform the following:
|
|||
# Set the network addresses that the exporters used for monitoring will listen on
|
||||
node_exporter['listen_address'] = '0.0.0.0:9100'
|
||||
gitlab_workhorse['prometheus_listen_addr'] = '0.0.0.0:9229'
|
||||
sidekiq['listen_address'] = "0.0.0.0"
|
||||
puma['listen'] = '0.0.0.0'
|
||||
|
||||
# Add the monitoring node's IP address to the monitoring whitelist and allow it to
|
||||
|
|
|
@ -1754,6 +1754,7 @@ On each node perform the following:
|
|||
roles ['application_role']
|
||||
gitaly['enable'] = false
|
||||
nginx['enable'] = true
|
||||
sidekiq['enable'] = false
|
||||
|
||||
## PostgreSQL connection details
|
||||
# Disable PostgreSQL on the application node
|
||||
|
@ -1797,7 +1798,6 @@ On each node perform the following:
|
|||
# Set the network addresses that the exporters used for monitoring will listen on
|
||||
node_exporter['listen_address'] = '0.0.0.0:9100'
|
||||
gitlab_workhorse['prometheus_listen_addr'] = '0.0.0.0:9229'
|
||||
sidekiq['listen_address'] = "0.0.0.0"
|
||||
puma['listen'] = '0.0.0.0'
|
||||
|
||||
# Add the monitoring node's IP address to the monitoring whitelist and allow it to
|
||||
|
|
|
@ -84,8 +84,7 @@ and they will assist you with any issues you are having.
|
|||
|
||||
## GitLab-specific Kubernetes information
|
||||
|
||||
- Minimal config that can be used to test a Kubernetes Helm chart can be found
|
||||
[here](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/620).
|
||||
- Minimal config that can be used to [test a Kubernetes Helm chart](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/620).
|
||||
|
||||
- Tailing logs of a separate pod. An example for a Webservice pod:
|
||||
|
||||
|
|
|
@ -11536,6 +11536,11 @@ enum PackageTypeEnum {
|
|||
"""
|
||||
GENERIC
|
||||
|
||||
"""
|
||||
Packages from the golang package manager
|
||||
"""
|
||||
GOLANG
|
||||
|
||||
"""
|
||||
Packages from the maven package manager
|
||||
"""
|
||||
|
@ -14301,6 +14306,16 @@ type Query {
|
|||
"""
|
||||
startDate: ISO8601Date!
|
||||
): VulnerabilitiesCountByDayAndSeverityConnection @deprecated(reason: "Use `vulnerabilitiesCountByDay`. Deprecated in 13.3")
|
||||
|
||||
"""
|
||||
Find a vulnerability
|
||||
"""
|
||||
vulnerability(
|
||||
"""
|
||||
The Global ID of the Vulnerability
|
||||
"""
|
||||
id: VulnerabilityID!
|
||||
): Vulnerability
|
||||
}
|
||||
|
||||
"""
|
||||
|
|
|
@ -34436,6 +34436,12 @@
|
|||
"description": "Packages from the generic package manager",
|
||||
"isDeprecated": false,
|
||||
"deprecationReason": null
|
||||
},
|
||||
{
|
||||
"name": "GOLANG",
|
||||
"description": "Packages from the golang package manager",
|
||||
"isDeprecated": false,
|
||||
"deprecationReason": null
|
||||
}
|
||||
],
|
||||
"possibleTypes": null
|
||||
|
@ -41796,6 +41802,33 @@
|
|||
},
|
||||
"isDeprecated": true,
|
||||
"deprecationReason": "Use `vulnerabilitiesCountByDay`. Deprecated in 13.3"
|
||||
},
|
||||
{
|
||||
"name": "vulnerability",
|
||||
"description": "Find a vulnerability",
|
||||
"args": [
|
||||
{
|
||||
"name": "id",
|
||||
"description": "The Global ID of the Vulnerability",
|
||||
"type": {
|
||||
"kind": "NON_NULL",
|
||||
"name": null,
|
||||
"ofType": {
|
||||
"kind": "SCALAR",
|
||||
"name": "VulnerabilityID",
|
||||
"ofType": null
|
||||
}
|
||||
},
|
||||
"defaultValue": null
|
||||
}
|
||||
],
|
||||
"type": {
|
||||
"kind": "OBJECT",
|
||||
"name": "Vulnerability",
|
||||
"ofType": null
|
||||
},
|
||||
"isDeprecated": false,
|
||||
"deprecationReason": null
|
||||
}
|
||||
],
|
||||
"inputFields": null,
|
||||
|
|
|
@ -3290,6 +3290,7 @@ Values for sorting projects.
|
|||
| `COMPOSER` | Packages from the composer package manager |
|
||||
| `CONAN` | Packages from the conan package manager |
|
||||
| `GENERIC` | Packages from the generic package manager |
|
||||
| `GOLANG` | Packages from the golang package manager |
|
||||
| `MAVEN` | Packages from the maven package manager |
|
||||
| `NPM` | Packages from the npm package manager |
|
||||
| `NUGET` | Packages from the nuget package manager |
|
||||
|
|
|
@ -26,7 +26,7 @@ GET /projects/:id/packages
|
|||
| `id` | integer/string | yes | ID or [URL-encoded path of the project](README.md#namespaced-path-encoding) |
|
||||
| `order_by`| string | no | The field to use as order. One of `created_at` (default), `name`, `version`, or `type`. |
|
||||
| `sort` | string | no | The direction of the order, either `asc` (default) for ascending order or `desc` for descending order. |
|
||||
| `package_type` | string | no | Filter the returned packages by type. One of `conan`, `maven`, `npm`, `pypi`, `composer`, or `nuget`. (_Introduced in GitLab 12.9_)
|
||||
| `package_type` | string | no | Filter the returned packages by type. One of `conan`, `maven`, `npm`, `pypi`, `composer`, `nuget`, or `golang`. (_Introduced in GitLab 12.9_)
|
||||
| `package_name` | string | no | Filter the project packages with a fuzzy search by name. (_Introduced in GitLab 12.9_)
|
||||
|
||||
```shell
|
||||
|
@ -73,7 +73,7 @@ GET /groups/:id/packages
|
|||
| `exclude_subgroups` | boolean | false | If the parameter is included as true, packages from projects from subgroups are not listed. Default is `false`. |
|
||||
| `order_by`| string | no | The field to use as order. One of `created_at` (default), `name`, `version`, `type`, or `project_path`. |
|
||||
| `sort` | string | no | The direction of the order, either `asc` (default) for ascending order or `desc` for descending order. |
|
||||
| `package_type` | string | no | Filter the returned packages by type. One of `conan`, `maven`, `npm`, `pypi`, `composer`, or `nuget`. (_Introduced in GitLab 12.9_) |
|
||||
| `package_type` | string | no | Filter the returned packages by type. One of `conan`, `maven`, `npm`, `pypi`, `composer`, `nuget`, or `golang`. (_Introduced in GitLab 12.9_) |
|
||||
| `package_name` | string | no | Filter the project packages with a fuzzy search by name. (_[Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/30980) in GitLab 13.0_)
|
||||
|
||||
```shell
|
||||
|
|
|
@ -39,11 +39,10 @@ multiple projects.
|
|||
|
||||
If you are using a self-managed instance of GitLab:
|
||||
|
||||
- Your administrator can install and register shared runners by viewing the instructions
|
||||
[here](https://docs.gitlab.com/runner/install/index.html).
|
||||
- Your administrator can install and register shared runners by [following the documentation](https://docs.gitlab.com/runner/install/index.html).
|
||||
<!-- going to your project's
|
||||
<!-- **Settings > CI / CD**, expanding the **Runners** section, and clicking **Show runner installation instructions**.-->
|
||||
<!-- These instructions are also available [here](https://docs.gitlab.com/runner/install/index.html).-->
|
||||
<!-- These instructions are also available [in the documentation](https://docs.gitlab.com/runner/install/index.html).-->
|
||||
- The administrator can also configure a maximum number of shared runner [pipeline minutes for
|
||||
each group](../../user/admin_area/settings/continuous_integration.md#shared-runners-pipeline-minutes-quota).
|
||||
|
||||
|
|
|
@ -571,7 +571,7 @@ in which you wish to use it.
|
|||
|
||||
## Where variables can be used
|
||||
|
||||
Click [here](where_variables_can_be_used.md) for a section that describes where and how the different types of variables can be used.
|
||||
[This section](where_variables_can_be_used.md) describes where and how the different types of variables can be used.
|
||||
|
||||
## Advanced use
|
||||
|
||||
|
|
|
@ -18,9 +18,9 @@ automatically create merge requests for updating dependencies of several project
|
|||
up-to-date list of projects managed by the renovate bot in the project’s README. Some key dependencies
|
||||
updated using renovate are:
|
||||
|
||||
- [`@gitlab/ui`](https://gitlab.com/gitlab-org/gitlab-ui/)
|
||||
- [`@gitlab/svgs`](https://gitlab.com/gitlab-org/gitlab-svgs/)
|
||||
- [`@gitlab/eslint-config`](https://gitlab.com/gitlab-org/gitlab-eslint-config)
|
||||
- [`@gitlab/ui`](https://gitlab.com/gitlab-org/gitlab-ui)
|
||||
- [`@gitlab/svgs`](https://gitlab.com/gitlab-org/gitlab-svgs)
|
||||
- [`@gitlab/eslint-plugin`](https://gitlab.com/gitlab-org/frontend/eslint-plugin)
|
||||
|
||||
### Blocked dependencies
|
||||
|
||||
|
|
|
@ -51,16 +51,16 @@ To create a new dashboard from the command line:
|
|||
- group: 'Group Title'
|
||||
panels:
|
||||
- type: area-chart
|
||||
title: "Chart Title"
|
||||
y_label: "Y-Axis"
|
||||
title: 'Chart Title'
|
||||
y_label: 'Y-Axis'
|
||||
y_axis:
|
||||
format: number
|
||||
precision: 0
|
||||
metrics:
|
||||
- id: my_metric_id
|
||||
query_range: 'http_requests_total'
|
||||
label: "Instance: {{instance}}, method: {{method}}"
|
||||
unit: "count"
|
||||
label: 'Instance: {{instance}}, method: {{method}}'
|
||||
unit: 'count'
|
||||
```
|
||||
|
||||
1. Save the file, commit, and push to your repository. The file must be present in your **default** branch.
|
||||
|
|
|
@ -19,14 +19,14 @@ panel_groups:
|
|||
panels:
|
||||
- type: area-chart # or line-chart
|
||||
title: 'Area Chart Title'
|
||||
y_label: "Y-Axis"
|
||||
y_label: 'Y-Axis'
|
||||
y_axis:
|
||||
format: number
|
||||
precision: 0
|
||||
metrics:
|
||||
- id: area_http_requests_total
|
||||
query_range: 'http_requests_total'
|
||||
label: "Instance: {{instance}}, Method: {{method}}"
|
||||
label: 'Instance: {{instance}}, Method: {{method}}'
|
||||
unit: "count"
|
||||
```
|
||||
|
||||
|
@ -55,23 +55,23 @@ panel_groups:
|
|||
- group: 'Group Title'
|
||||
panels:
|
||||
- type: anomaly-chart
|
||||
title: "Chart Title"
|
||||
title: 'Chart Title'
|
||||
y_label: "Y-Axis"
|
||||
metrics:
|
||||
- id: anomaly_requests_normal
|
||||
query_range: 'http_requests_total'
|
||||
label: "# of Requests"
|
||||
unit: "count"
|
||||
label: '# of Requests'
|
||||
unit: 'count'
|
||||
metrics:
|
||||
- id: anomaly_requests_upper_limit
|
||||
query_range: 10000
|
||||
label: "Max # of requests"
|
||||
unit: "count"
|
||||
label: 'Max # of requests'
|
||||
unit: 'count'
|
||||
metrics:
|
||||
- id: anomaly_requests_lower_limit
|
||||
query_range: 2000
|
||||
label: "Min # of requests"
|
||||
unit: "count"
|
||||
label: 'Min # of requests'
|
||||
unit: 'count'
|
||||
```
|
||||
|
||||
Note the following properties:
|
||||
|
@ -93,13 +93,13 @@ panel_groups:
|
|||
- group: 'Group title'
|
||||
panels:
|
||||
- type: bar
|
||||
title: "Http Handlers"
|
||||
title: 'HTTP Handlers'
|
||||
x_label: 'Response Size'
|
||||
y_axis:
|
||||
name: "Handlers"
|
||||
name: 'Handlers'
|
||||
metrics:
|
||||
- id: prometheus_http_response_size_bytes_bucket
|
||||
query_range: "sum(increase(prometheus_http_response_size_bytes_bucket[1d])) by (handler)"
|
||||
query_range: 'sum(increase(prometheus_http_response_size_bytes_bucket[1d])) by (handler)'
|
||||
unit: 'Bytes'
|
||||
```
|
||||
|
||||
|
@ -121,13 +121,13 @@ dashboard: 'Dashboard Title'
|
|||
panel_groups:
|
||||
- group: 'Group title'
|
||||
panels:
|
||||
- title: "Column"
|
||||
type: "column"
|
||||
- title: 'Column'
|
||||
type: 'column'
|
||||
metrics:
|
||||
- id: 1024_memory
|
||||
query: 'avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="%{kube_namespace}"}) without (job)) /1024/1024'
|
||||
unit: MB
|
||||
label: "Memory Usage"
|
||||
label: 'Memory Usage'
|
||||
```
|
||||
|
||||
Note the following properties:
|
||||
|
@ -153,19 +153,19 @@ panel_groups:
|
|||
priority: 5
|
||||
panels:
|
||||
- type: 'stacked-column'
|
||||
title: "Stacked column"
|
||||
y_label: "y label"
|
||||
title: 'Stacked column'
|
||||
y_label: 'y label'
|
||||
x_label: 'x label'
|
||||
metrics:
|
||||
- id: memory_1
|
||||
query_range: 'memory_query'
|
||||
label: "memory query 1"
|
||||
unit: "count"
|
||||
label: 'memory query 1'
|
||||
unit: 'count'
|
||||
series_name: 'group 1'
|
||||
- id: memory_2
|
||||
query_range: 'memory_query_2'
|
||||
label: "memory query 2"
|
||||
unit: "count"
|
||||
label: 'memory query 2'
|
||||
unit: 'count'
|
||||
series_name: 'group 2'
|
||||
```
|
||||
|
||||
|
@ -185,13 +185,13 @@ dashboard: 'Dashboard Title'
|
|||
panel_groups:
|
||||
- group: 'Group Title'
|
||||
panels:
|
||||
- title: "Single Stat"
|
||||
type: "single-stat"
|
||||
- title: 'Single Stat'
|
||||
type: 'single-stat'
|
||||
metrics:
|
||||
- id: 10
|
||||
query: 'max(go_memstats_alloc_bytes{job="prometheus"})'
|
||||
unit: MB
|
||||
label: "Total"
|
||||
label: 'Total'
|
||||
```
|
||||
|
||||
Note the following properties:
|
||||
|
@ -215,14 +215,14 @@ dashboard: 'Dashboard Title'
|
|||
panel_groups:
|
||||
- group: 'Group Title'
|
||||
panels:
|
||||
- title: "Single Stat"
|
||||
type: "single-stat"
|
||||
- title: 'Single Stat'
|
||||
type: 'single-stat'
|
||||
max_value: 100
|
||||
metrics:
|
||||
- id: 10
|
||||
query: 'max(go_memstats_alloc_bytes{job="prometheus"})'
|
||||
unit: '%'
|
||||
label: "Total"
|
||||
label: 'Total'
|
||||
```
|
||||
|
||||
For example, if you have a query value of `53.6`, adding `%` as the unit results in a single stat value of `53.6%`, but if the maximum expected value of the query is `120`, the value would be `44.6%`. Adding the `max_value` causes the correct percentage value to display.
|
||||
|
@ -242,15 +242,15 @@ dashboard: 'Dashboard Title'
|
|||
panel_groups:
|
||||
- group: 'Group Title'
|
||||
panels:
|
||||
- title: "Gauge"
|
||||
type: "gauge"
|
||||
- title: 'Gauge'
|
||||
type: 'gauge'
|
||||
min_value: 0
|
||||
max_value: 1000
|
||||
split: 5
|
||||
thresholds:
|
||||
values: [60, 90]
|
||||
mode: "percentage"
|
||||
format: "kilobytes"
|
||||
mode: 'percentage'
|
||||
format: 'kilobytes'
|
||||
metrics:
|
||||
- id: 10
|
||||
query: 'floor(max(prometheus_http_response_size_bytes_bucket)/1000)'
|
||||
|
@ -289,13 +289,13 @@ dashboard: 'Dashboard Title'
|
|||
panel_groups:
|
||||
- group: 'Group Title'
|
||||
panels:
|
||||
- title: "Heatmap"
|
||||
type: "heatmap"
|
||||
- title: 'Heatmap'
|
||||
type: 'heatmap'
|
||||
metrics:
|
||||
- id: 10
|
||||
query: 'sum(rate(nginx_upstream_responses_total{upstream=~"%{kube_namespace}-%{ci_environment_slug}-.*"}[60m])) by (status_code)'
|
||||
unit: req/sec
|
||||
label: "Status code"
|
||||
label: 'Status code'
|
||||
```
|
||||
|
||||
Note the following properties:
|
||||
|
|
|
@ -103,8 +103,8 @@ When a static label is used and a query returns multiple time series, then all t
|
|||
metrics:
|
||||
- id: my_metric_id
|
||||
query_range: 'http_requests_total'
|
||||
label: "Time Series"
|
||||
unit: "count"
|
||||
label: 'Time Series'
|
||||
unit: 'count'
|
||||
```
|
||||
|
||||
This may render a legend like this:
|
||||
|
@ -117,8 +117,8 @@ For labels to be more explicit, using variables that reflect time series labels
|
|||
metrics:
|
||||
- id: my_metric_id
|
||||
query_range: 'http_requests_total'
|
||||
label: "Instance: {{instance}}, method: {{method}}"
|
||||
unit: "count"
|
||||
label: 'Instance: {{instance}}, method: {{method}}'
|
||||
unit: 'count'
|
||||
```
|
||||
|
||||
The resulting rendered legend will look like this:
|
||||
|
@ -131,8 +131,8 @@ There is also a shorthand value for dynamic dashboard labels that make use of on
|
|||
metrics:
|
||||
- id: my_metric_id
|
||||
query_range: 'http_requests_total'
|
||||
label: "Method"
|
||||
unit: "count"
|
||||
label: 'Method'
|
||||
unit: 'count'
|
||||
```
|
||||
|
||||
This works by lowercasing the value of `label` and, if there are more words separated by spaces, replacing those spaces with an underscore (`_`). The transformed value is then checked against the labels of the time series returned by the Prometheus query. If a time series label is found that is equal to the transformed value, then the label value will be used and rendered in the legend like this:
|
||||
|
|
|
@ -1041,7 +1041,7 @@ Be advised that, backup is successfully restored in spite of these warnings.
|
|||
The Rake task runs this as the `gitlab` user which does not have the superuser access to the database. When restore is initiated it will also run as `gitlab` user but it will also try to alter the objects it does not have access to.
|
||||
Those objects have no influence on the database backup/restore but they give this annoying warning.
|
||||
|
||||
For more information see similar questions on PostgreSQL issue tracker [here](https://www.postgresql.org/message-id/201110220712.30886.adrian.klaver@gmail.com) and [here](https://www.postgresql.org/message-id/2039.1177339749@sss.pgh.pa.us) as well as [stack overflow](https://stackoverflow.com/questions/4368789/error-must-be-owner-of-language-plpgsql).
|
||||
For more information see these PostgreSQL issue tracker questions about [not being a superuser](https://www.postgresql.org/message-id/201110220712.30886.adrian.klaver@gmail.com), [having different owners](https://www.postgresql.org/message-id/2039.1177339749@sss.pgh.pa.us), and on stack overflow, about [resulting errors](https://stackoverflow.com/questions/4368789/error-must-be-owner-of-language-plpgsql).
|
||||
|
||||
### When the secrets file is lost
|
||||
|
||||
|
|
|
@ -48,6 +48,7 @@ GitLab is a Git-based platform that integrates a great number of essential tools
|
|||
- Integrating with Docker by using [GitLab Container Registry](packages/container_registry/index.md).
|
||||
- Tracking the development lifecycle by using [GitLab Value Stream Analytics](project/cycle_analytics.md).
|
||||
- Provide support with [Service Desk](project/service_desk.md).
|
||||
- [Export issues as CSV](project/issues/csv_export.md).
|
||||
|
||||
With GitLab Enterprise Edition, you can also:
|
||||
|
||||
|
@ -60,7 +61,6 @@ With GitLab Enterprise Edition, you can also:
|
|||
- Leverage [Elasticsearch](../integration/elasticsearch.md) with [Advanced Search](search/advanced_global_search.md) and [Advanced Search Syntax](search/advanced_search_syntax.md) for faster, more advanced code search across your entire GitLab instance.
|
||||
- [Authenticate users with Kerberos](../integration/kerberos.md).
|
||||
- [Mirror a repository](project/repository/repository_mirroring.md) from elsewhere on your local server.
|
||||
- [Export issues as CSV](project/issues/csv_export.md).
|
||||
- View your entire CI/CD pipeline involving more than one project with [Multiple-Project Pipelines](../ci/multi_project_pipeline_graphs.md).
|
||||
- [Lock files](project/file_lock.md) to prevent conflicts.
|
||||
- View the current health and status of each CI environment running on Kubernetes with [Deploy Boards](project/deploy_boards.md).
|
||||
|
|
|
@ -587,7 +587,7 @@ you can use the Container Registry to store Helm Charts. However, due to the way
|
|||
and stored by Docker, it is not possible for GitLab to parse this data and meet performance standards.
|
||||
[This epic](https://gitlab.com/groups/gitlab-org/-/epics/2313) updates the architecture of the Container Registry to support Helm Charts.
|
||||
|
||||
You can read more about the above challenges [here](https://gitlab.com/gitlab-org/gitlab/-/issues/38047#note_298842890).
|
||||
[Read more about the above challenges](https://gitlab.com/gitlab-org/gitlab/-/issues/38047#note_298842890).
|
||||
|
||||
## Limitations
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ over [`git filter-branch`](https://git-scm.com/docs/git-filter-branch) and
|
|||
[BFG](https://rtyley.github.io/bfg-repo-cleaner/).
|
||||
|
||||
DANGER: **Danger:**
|
||||
Rewriting repository history is a destructive operation. Make sure to backup your repository before
|
||||
Rewriting repository history is a destructive operation. Make sure to back up your repository before
|
||||
you begin. The best way back up a repository is to
|
||||
[export the project](../settings/import_export.md#exporting-a-project-and-its-data).
|
||||
|
||||
|
@ -266,21 +266,20 @@ You can still:
|
|||
- Create new issues.
|
||||
- Clone the project.
|
||||
|
||||
If you exceed the repository size limit, you might try to:
|
||||
If you exceed the repository size limit, you can:
|
||||
|
||||
1. Remove some data.
|
||||
1. Make a new commit.
|
||||
1. Push back to the repository.
|
||||
|
||||
Perhaps you might also:
|
||||
If these actions are insufficient, you can also:
|
||||
|
||||
- Move some blobs to LFS.
|
||||
- Remove some old dependency updates from history.
|
||||
|
||||
Unfortunately, this workflow won't work. Deleting files in a commit doesn't actually reduce the size
|
||||
of the repository because the earlier commits and blobs still exist.
|
||||
|
||||
What you need to do is rewrite history. We recommend the open-source community-maintained tool
|
||||
Unfortunately, this workflow doesn't work. Deleting files in a commit doesn't actually reduce the
|
||||
size of the repository, because the earlier commits and blobs still exist. Instead, you must rewrite
|
||||
history. We recommend the open-source community-maintained tool
|
||||
[`git filter-repo`](https://github.com/newren/git-filter-repo).
|
||||
|
||||
NOTE: **Note:**
|
||||
|
|
|
@ -131,8 +131,8 @@ There are two main ways of how you can discover snippets in GitLab.
|
|||
|
||||
For exploring all snippets that are visible to you, you can go to the Snippets
|
||||
dashboard of your GitLab instance via the top navigation. For GitLab.com you can
|
||||
find it [here](https://gitlab.com/dashboard/snippets). This navigates you to an
|
||||
overview that shows snippets you created and allows you to explore all snippets.
|
||||
navigate to an [overview]((https://gitlab.com/dashboard/snippets)) that shows snippets
|
||||
you created and allows you to explore all snippets.
|
||||
|
||||
If you want to discover snippets that belong to a specific project, you can navigate
|
||||
to the Snippets page via the left side navigation on the project page.
|
||||
|
|
|
@ -9,12 +9,14 @@ class Feature
|
|||
# optional: defines if a on-disk definition is required for this feature flag type
|
||||
# rollout_issue: defines if `bin/feature-flag` asks for rollout issue
|
||||
# default_enabled: defines a default state of a feature flag when created by `bin/feature-flag`
|
||||
# ee_only: defines that a feature flag can only be created in a context of EE
|
||||
# example: usage being shown when exception is raised
|
||||
TYPES = {
|
||||
development: {
|
||||
description: 'Short lived, used to enable unfinished code to be deployed',
|
||||
optional: false,
|
||||
rollout_issue: true,
|
||||
ee_only: false,
|
||||
default_enabled: false,
|
||||
example: <<-EOS
|
||||
Feature.enabled?(:my_feature_flag, project)
|
||||
|
@ -26,6 +28,7 @@ class Feature
|
|||
description: "Long-lived feature flags that control operational aspects of GitLab's behavior",
|
||||
optional: true,
|
||||
rollout_issue: false,
|
||||
ee_only: false,
|
||||
default_enabled: false,
|
||||
example: <<-EOS
|
||||
Feature.enabled?(:my_ops_flag, type: ops)
|
||||
|
@ -36,6 +39,7 @@ class Feature
|
|||
description: 'Permanent feature flags used to temporarily disable licensed features.',
|
||||
optional: true,
|
||||
rollout_issue: false,
|
||||
ee_only: true,
|
||||
default_enabled: true,
|
||||
example: <<-EOS
|
||||
project.feature_available?(:my_licensed_feature)
|
||||
|
@ -44,6 +48,8 @@ class Feature
|
|||
}
|
||||
}.freeze
|
||||
|
||||
# The ordering of PARAMS defines an order in YAML
|
||||
# This is done to ease the file comparison
|
||||
PARAMS = %i[
|
||||
name
|
||||
introduced_by_url
|
||||
|
|
|
@ -1,144 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Database
|
||||
class ConcurrentReindex
|
||||
include Gitlab::Utils::StrongMemoize
|
||||
include MigrationHelpers
|
||||
|
||||
ReindexError = Class.new(StandardError)
|
||||
|
||||
PG_IDENTIFIER_LENGTH = 63
|
||||
TEMPORARY_INDEX_PREFIX = 'tmp_reindex_'
|
||||
REPLACED_INDEX_PREFIX = 'old_reindex_'
|
||||
|
||||
attr_reader :index_name, :logger
|
||||
|
||||
def initialize(index_name, logger:)
|
||||
@index_name = index_name
|
||||
@logger = logger
|
||||
end
|
||||
|
||||
def perform
|
||||
raise ReindexError, "index #{index_name} does not exist" unless index_exists?
|
||||
|
||||
raise ReindexError, 'UNIQUE indexes are currently not supported' if index_unique?
|
||||
|
||||
logger.debug("dropping dangling index from previous run: #{replacement_index_name}")
|
||||
remove_replacement_index
|
||||
|
||||
begin
|
||||
create_replacement_index
|
||||
|
||||
unless replacement_index_valid?
|
||||
message = 'replacement index was created as INVALID'
|
||||
logger.error("#{message}, cleaning up")
|
||||
raise ReindexError, "failed to reindex #{index_name}: #{message}"
|
||||
end
|
||||
|
||||
swap_replacement_index
|
||||
rescue Gitlab::Database::WithLockRetries::AttemptsExhaustedError => e
|
||||
logger.error('failed to obtain the required database locks to swap the indexes, cleaning up')
|
||||
raise ReindexError, e.message
|
||||
rescue ActiveRecord::ActiveRecordError, PG::Error => e
|
||||
logger.error("database error while attempting reindex of #{index_name}: #{e.message}")
|
||||
raise ReindexError, e.message
|
||||
ensure
|
||||
logger.info("dropping unneeded replacement index: #{replacement_index_name}")
|
||||
remove_replacement_index
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
delegate :execute, to: :connection
|
||||
def connection
|
||||
@connection ||= ActiveRecord::Base.connection
|
||||
end
|
||||
|
||||
def replacement_index_name
|
||||
@replacement_index_name ||= constrained_index_name(TEMPORARY_INDEX_PREFIX)
|
||||
end
|
||||
|
||||
def index
|
||||
strong_memoize(:index) do
|
||||
find_index(index_name)
|
||||
end
|
||||
end
|
||||
|
||||
def index_exists?
|
||||
!index.nil?
|
||||
end
|
||||
|
||||
def index_unique?
|
||||
index.indisunique
|
||||
end
|
||||
|
||||
def constrained_index_name(prefix)
|
||||
"#{prefix}#{index_name}".slice(0, PG_IDENTIFIER_LENGTH)
|
||||
end
|
||||
|
||||
def create_replacement_index
|
||||
create_replacement_index_statement = index.indexdef
|
||||
.sub(/CREATE INDEX/, 'CREATE INDEX CONCURRENTLY')
|
||||
.sub(/#{index_name}/, replacement_index_name)
|
||||
|
||||
logger.info("creating replacement index #{replacement_index_name}")
|
||||
logger.debug("replacement index definition: #{create_replacement_index_statement}")
|
||||
|
||||
disable_statement_timeout do
|
||||
connection.execute(create_replacement_index_statement)
|
||||
end
|
||||
end
|
||||
|
||||
def replacement_index_valid?
|
||||
find_index(replacement_index_name).indisvalid
|
||||
end
|
||||
|
||||
def find_index(index_name)
|
||||
record = connection.select_one(<<~SQL)
|
||||
SELECT
|
||||
pg_index.indisunique,
|
||||
pg_index.indisvalid,
|
||||
pg_indexes.indexdef
|
||||
FROM pg_index
|
||||
INNER JOIN pg_class ON pg_class.oid = pg_index.indexrelid
|
||||
INNER JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid
|
||||
INNER JOIN pg_indexes ON pg_class.relname = pg_indexes.indexname
|
||||
WHERE pg_namespace.nspname = 'public'
|
||||
AND pg_class.relname = #{connection.quote(index_name)}
|
||||
SQL
|
||||
|
||||
OpenStruct.new(record) if record
|
||||
end
|
||||
|
||||
def swap_replacement_index
|
||||
replaced_index_name = constrained_index_name(REPLACED_INDEX_PREFIX)
|
||||
|
||||
logger.info("swapping replacement index #{replacement_index_name} with #{index_name}")
|
||||
|
||||
with_lock_retries do
|
||||
rename_index(index_name, replaced_index_name)
|
||||
rename_index(replacement_index_name, index_name)
|
||||
rename_index(replaced_index_name, replacement_index_name)
|
||||
end
|
||||
end
|
||||
|
||||
def rename_index(old_index_name, new_index_name)
|
||||
connection.execute("ALTER INDEX #{old_index_name} RENAME TO #{new_index_name}")
|
||||
end
|
||||
|
||||
def remove_replacement_index
|
||||
disable_statement_timeout do
|
||||
connection.execute("DROP INDEX CONCURRENTLY IF EXISTS #{replacement_index_name}")
|
||||
end
|
||||
end
|
||||
|
||||
def with_lock_retries(&block)
|
||||
arguments = { klass: self.class, logger: logger }
|
||||
|
||||
Gitlab::Database::WithLockRetries.new(arguments).run(raise_on_exhaustion: true, &block)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
112
lib/gitlab/database/reindexing/concurrent_reindex.rb
Normal file
112
lib/gitlab/database/reindexing/concurrent_reindex.rb
Normal file
|
@ -0,0 +1,112 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Database
|
||||
module Reindexing
|
||||
class ConcurrentReindex
|
||||
include Gitlab::Utils::StrongMemoize
|
||||
include MigrationHelpers
|
||||
|
||||
ReindexError = Class.new(StandardError)
|
||||
|
||||
PG_IDENTIFIER_LENGTH = 63
|
||||
TEMPORARY_INDEX_PREFIX = 'tmp_reindex_'
|
||||
REPLACED_INDEX_PREFIX = 'old_reindex_'
|
||||
|
||||
attr_reader :index, :logger
|
||||
|
||||
def initialize(index, logger: Gitlab::AppLogger)
|
||||
@index = index
|
||||
@logger = logger
|
||||
end
|
||||
|
||||
def perform
|
||||
raise ReindexError, 'UNIQUE indexes are currently not supported' if index.unique?
|
||||
|
||||
with_rebuilt_index do |replacement_index|
|
||||
swap_index(replacement_index)
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def with_rebuilt_index
|
||||
logger.debug("dropping dangling index from previous run (if it exists): #{replacement_index_name}")
|
||||
remove_replacement_index
|
||||
|
||||
create_replacement_index_statement = index.definition
|
||||
.sub(/CREATE INDEX/, 'CREATE INDEX CONCURRENTLY')
|
||||
.sub(/#{index.name}/, replacement_index_name)
|
||||
|
||||
logger.info("creating replacement index #{replacement_index_name}")
|
||||
logger.debug("replacement index definition: #{create_replacement_index_statement}")
|
||||
|
||||
disable_statement_timeout do
|
||||
connection.execute(create_replacement_index_statement)
|
||||
end
|
||||
|
||||
replacement_index = Index.find_with_schema("#{index.schema}.#{replacement_index_name}")
|
||||
|
||||
unless replacement_index.valid?
|
||||
message = 'replacement index was created as INVALID'
|
||||
logger.error("#{message}, cleaning up")
|
||||
raise ReindexError, "failed to reindex #{index}: #{message}"
|
||||
end
|
||||
|
||||
yield replacement_index
|
||||
|
||||
rescue Gitlab::Database::WithLockRetries::AttemptsExhaustedError => e
|
||||
logger.error('failed to obtain the required database locks to swap the indexes, cleaning up')
|
||||
raise ReindexError, e.message
|
||||
rescue ActiveRecord::ActiveRecordError, PG::Error => e
|
||||
logger.error("database error while attempting reindex of #{index}: #{e.message}")
|
||||
raise ReindexError, e.message
|
||||
ensure
|
||||
logger.info("dropping unneeded replacement index: #{replacement_index_name}")
|
||||
remove_replacement_index
|
||||
end
|
||||
|
||||
def swap_index(replacement_index)
|
||||
replaced_index_name = constrained_index_name(REPLACED_INDEX_PREFIX)
|
||||
|
||||
logger.info("swapping replacement index #{replacement_index} with #{index}")
|
||||
|
||||
with_lock_retries do
|
||||
rename_index(index.name, replaced_index_name)
|
||||
rename_index(replacement_index.name, index.name)
|
||||
rename_index(replaced_index_name, replacement_index.name)
|
||||
end
|
||||
end
|
||||
|
||||
def rename_index(old_index_name, new_index_name)
|
||||
connection.execute("ALTER INDEX #{old_index_name} RENAME TO #{new_index_name}")
|
||||
end
|
||||
|
||||
def remove_replacement_index
|
||||
disable_statement_timeout do
|
||||
connection.execute("DROP INDEX CONCURRENTLY IF EXISTS #{replacement_index_name}")
|
||||
end
|
||||
end
|
||||
|
||||
def replacement_index_name
|
||||
@replacement_index_name ||= constrained_index_name(TEMPORARY_INDEX_PREFIX)
|
||||
end
|
||||
|
||||
def constrained_index_name(prefix)
|
||||
"#{prefix}#{index.name}".slice(0, PG_IDENTIFIER_LENGTH)
|
||||
end
|
||||
|
||||
def with_lock_retries(&block)
|
||||
arguments = { klass: self.class, logger: logger }
|
||||
|
||||
Gitlab::Database::WithLockRetries.new(arguments).run(raise_on_exhaustion: true, &block)
|
||||
end
|
||||
|
||||
delegate :execute, to: :connection
|
||||
def connection
|
||||
@connection ||= ActiveRecord::Base.connection
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
52
lib/gitlab/database/reindexing/index.rb
Normal file
52
lib/gitlab/database/reindexing/index.rb
Normal file
|
@ -0,0 +1,52 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
module Gitlab
|
||||
module Database
|
||||
module Reindexing
|
||||
class Index
|
||||
def self.find_with_schema(full_name)
|
||||
raise ArgumentError, "Index name is not fully qualified with a schema: #{full_name}" unless full_name =~ /^\w+\.\w+$/
|
||||
|
||||
schema, index = full_name.split('.')
|
||||
|
||||
record = ActiveRecord::Base.connection.select_one(<<~SQL)
|
||||
SELECT
|
||||
pg_index.indisunique as is_unique,
|
||||
pg_index.indisvalid as is_valid,
|
||||
pg_indexes.indexdef as definition,
|
||||
pg_namespace.nspname as schema,
|
||||
pg_class.relname as name
|
||||
FROM pg_index
|
||||
INNER JOIN pg_class ON pg_class.oid = pg_index.indexrelid
|
||||
INNER JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid
|
||||
INNER JOIN pg_indexes ON pg_class.relname = pg_indexes.indexname
|
||||
WHERE pg_namespace.nspname = #{ActiveRecord::Base.connection.quote(schema)}
|
||||
AND pg_class.relname = #{ActiveRecord::Base.connection.quote(index)}
|
||||
SQL
|
||||
|
||||
return unless record
|
||||
|
||||
new(OpenStruct.new(record))
|
||||
end
|
||||
|
||||
delegate :definition, :schema, :name, to: :@attrs
|
||||
|
||||
def initialize(attrs)
|
||||
@attrs = attrs
|
||||
end
|
||||
|
||||
def unique?
|
||||
@attrs.is_unique
|
||||
end
|
||||
|
||||
def valid?
|
||||
@attrs.is_valid
|
||||
end
|
||||
|
||||
def to_s
|
||||
name
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -80,6 +80,11 @@ module Gitlab
|
|||
@semver_regex ||= Regexp.new("\\A#{::Gitlab::Regex.unbounded_semver_regex.source}\\z", ::Gitlab::Regex.unbounded_semver_regex.options)
|
||||
end
|
||||
|
||||
def prefixed_semver_regex
|
||||
# identical to semver_regex, except starting with 'v'
|
||||
@prefixed_semver_regex ||= Regexp.new("\\Av#{::Gitlab::Regex.unbounded_semver_regex.source}\\z", ::Gitlab::Regex.unbounded_semver_regex.options)
|
||||
end
|
||||
|
||||
def go_package_regex
|
||||
# A Go package name looks like a URL but is not; it:
|
||||
# - Must not have a scheme, such as http:// or https://
|
||||
|
|
|
@ -170,13 +170,18 @@ namespace :gitlab do
|
|||
desc 'reindex a regular (non-unique) index without downtime to eliminate bloat'
|
||||
task :reindex, [:index_name] => :environment do |_, args|
|
||||
unless Feature.enabled?(:database_reindexing, type: :ops)
|
||||
puts "This feature (database_reindexing) is currently disabled.".yellow
|
||||
puts "This feature (database_reindexing) is currently disabled.".color(:yellow)
|
||||
exit
|
||||
end
|
||||
|
||||
raise ArgumentError, 'must give the index name to reindex' unless args[:index_name]
|
||||
|
||||
Gitlab::Database::ConcurrentReindex.new(args[:index_name], logger: Logger.new(STDOUT)).perform
|
||||
index = Gitlab::Database::Reindexing::Index.find_with_schema(args[:index_name])
|
||||
|
||||
raise ArgumentError, "Given index does not exist: #{args[:index_name]}" unless index
|
||||
|
||||
puts "Rebuilding index #{index}".color(:green)
|
||||
Gitlab::Database::Reindexing::ConcurrentReindex.new(index).perform
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -8,6 +8,8 @@ msgid ""
|
|||
msgstr ""
|
||||
"Project-Id-Version: gitlab 1.0.0\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2020-09-18 11:00+1200\n"
|
||||
"PO-Revision-Date: 2020-09-18 11:00+1200\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"Language: \n"
|
||||
|
@ -1317,9 +1319,15 @@ msgstr ""
|
|||
msgid "Access expiration date"
|
||||
msgstr ""
|
||||
|
||||
msgid "Access expires"
|
||||
msgstr ""
|
||||
|
||||
msgid "Access forbidden. Check your access level."
|
||||
msgstr ""
|
||||
|
||||
msgid "Access granted"
|
||||
msgstr ""
|
||||
|
||||
msgid "Access requests"
|
||||
msgstr ""
|
||||
|
||||
|
@ -8853,6 +8861,9 @@ msgstr ""
|
|||
msgid "DesignManagement|The maximum number of designs allowed to be uploaded is %{upload_limit}. Please try again."
|
||||
msgstr ""
|
||||
|
||||
msgid "DesignManagement|There was an error moving your designs. Please upload your designs below."
|
||||
msgstr ""
|
||||
|
||||
msgid "DesignManagement|To upload designs, you'll need to enable LFS and have admin enable hashed storage. %{requirements_link_start}More information%{requirements_link_end}"
|
||||
msgstr ""
|
||||
|
||||
|
@ -8865,6 +8876,9 @@ msgstr ""
|
|||
msgid "DesignManagement|Upload skipped."
|
||||
msgstr ""
|
||||
|
||||
msgid "DesignManagement|Your designs are being copied and are on their way… Please refresh to update."
|
||||
msgstr ""
|
||||
|
||||
msgid "DesignManagement|and %{moreCount} more."
|
||||
msgstr ""
|
||||
|
||||
|
@ -15468,6 +15482,9 @@ msgstr ""
|
|||
msgid "Max access level"
|
||||
msgstr ""
|
||||
|
||||
msgid "Max role"
|
||||
msgstr ""
|
||||
|
||||
msgid "Max size 15 MB"
|
||||
msgstr ""
|
||||
|
||||
|
@ -17232,6 +17249,9 @@ msgstr ""
|
|||
msgid "No matching results for \"%{query}\""
|
||||
msgstr ""
|
||||
|
||||
msgid "No members found"
|
||||
msgstr ""
|
||||
|
||||
msgid "No merge requests found"
|
||||
msgstr ""
|
||||
|
||||
|
@ -21569,6 +21589,9 @@ msgstr ""
|
|||
msgid "Request to link SAML account must be authorized"
|
||||
msgstr ""
|
||||
|
||||
msgid "Requested"
|
||||
msgstr ""
|
||||
|
||||
msgid "Requested %{time_ago}"
|
||||
msgstr ""
|
||||
|
||||
|
@ -30159,6 +30182,9 @@ msgstr ""
|
|||
msgid "failed to dismiss associated finding(id=%{finding_id}): %{message}"
|
||||
msgstr ""
|
||||
|
||||
msgid "failed to revert associated finding(id=%{finding_id}) to detected"
|
||||
msgstr ""
|
||||
|
||||
msgid "file"
|
||||
msgid_plural "files"
|
||||
msgstr[0] ""
|
||||
|
|
|
@ -8,10 +8,6 @@ load File.expand_path('../../bin/feature-flag', __dir__)
|
|||
RSpec.describe 'bin/feature-flag' do
|
||||
using RSpec::Parameterized::TableSyntax
|
||||
|
||||
before do
|
||||
skip_feature_flags_yaml_validation
|
||||
end
|
||||
|
||||
describe FeatureFlagCreator do
|
||||
let(:argv) { %w[feature-flag-name -t development -g group::memory -i https://url -m http://url] }
|
||||
let(:options) { FeatureFlagOptionParser.parse(argv) }
|
||||
|
@ -244,5 +240,18 @@ RSpec.describe 'bin/feature-flag' do
|
|||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe '.read_ee_only' do
|
||||
where(:type, :is_ee_only) do
|
||||
:development | false
|
||||
:licensed | true
|
||||
end
|
||||
|
||||
with_them do
|
||||
let(:options) { OpenStruct.new(name: 'foo', type: type) }
|
||||
|
||||
it { expect(described_class.read_ee_only(options)).to eq(is_ee_only) }
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -91,6 +91,12 @@ FactoryBot.define do
|
|||
end
|
||||
end
|
||||
|
||||
factory :golang_package do
|
||||
sequence(:name) { |n| "golang.org/x/pkg-#{n}"}
|
||||
sequence(:version) { |n| "v1.0.#{n}" }
|
||||
package_type { :golang }
|
||||
end
|
||||
|
||||
factory :conan_package do
|
||||
conan_metadatum
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ describe('Design management pagination component', () => {
|
|||
|
||||
it('renders navigation buttons', () => {
|
||||
wrapper.setData({
|
||||
designs: [{ id: '1' }, { id: '2' }],
|
||||
designCollection: { designs: [{ id: '1' }, { id: '2' }] },
|
||||
});
|
||||
|
||||
return wrapper.vm.$nextTick().then(() => {
|
||||
|
@ -54,7 +54,7 @@ describe('Design management pagination component', () => {
|
|||
describe('keyboard buttons navigation', () => {
|
||||
beforeEach(() => {
|
||||
wrapper.setData({
|
||||
designs: [{ filename: '1' }, { filename: '2' }, { filename: '3' }],
|
||||
designCollection: { designs: [{ filename: '1' }, { filename: '2' }, { filename: '3' }] },
|
||||
});
|
||||
});
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ export const designListQueryResponse = {
|
|||
id: '1',
|
||||
issue: {
|
||||
designCollection: {
|
||||
copyState: 'READY',
|
||||
designs: {
|
||||
nodes: [
|
||||
{
|
||||
|
|
|
@ -92,6 +92,8 @@ describe('Design management index page', () => {
|
|||
const findDesignCheckboxes = () => wrapper.findAll('.design-checkbox');
|
||||
const findSelectAllButton = () => wrapper.find('.js-select-all');
|
||||
const findToolbar = () => wrapper.find('.qa-selector-toolbar');
|
||||
const findDesignCollectionIsCopying = () =>
|
||||
wrapper.find('[data-testid="design-collection-is-copying"');
|
||||
const findDeleteButton = () => wrapper.find(DeleteButton);
|
||||
const findDropzone = () => wrapper.findAll(DesignDropzone).at(0);
|
||||
const dropzoneClasses = () => findDropzone().classes();
|
||||
|
@ -116,8 +118,8 @@ describe('Design management index page', () => {
|
|||
|
||||
function createComponent({
|
||||
loading = false,
|
||||
designs = [],
|
||||
allVersions = [],
|
||||
designCollection = { designs: mockDesigns, copyState: 'READY' },
|
||||
createDesign = true,
|
||||
stubs = {},
|
||||
mockMutate = jest.fn().mockResolvedValue(),
|
||||
|
@ -125,7 +127,7 @@ describe('Design management index page', () => {
|
|||
mutate = mockMutate;
|
||||
const $apollo = {
|
||||
queries: {
|
||||
designs: {
|
||||
designCollection: {
|
||||
loading,
|
||||
},
|
||||
permissions: {
|
||||
|
@ -138,8 +140,8 @@ describe('Design management index page', () => {
|
|||
wrapper = shallowMount(Index, {
|
||||
data() {
|
||||
return {
|
||||
designs,
|
||||
allVersions,
|
||||
designCollection,
|
||||
permissions: {
|
||||
createDesign,
|
||||
},
|
||||
|
@ -201,13 +203,13 @@ describe('Design management index page', () => {
|
|||
});
|
||||
|
||||
it('renders a toolbar with buttons when there are designs', () => {
|
||||
createComponent({ designs: mockDesigns, allVersions: [mockVersion] });
|
||||
createComponent({ allVersions: [mockVersion] });
|
||||
|
||||
expect(findToolbar().exists()).toBe(true);
|
||||
});
|
||||
|
||||
it('renders designs list and header with upload button', () => {
|
||||
createComponent({ designs: mockDesigns, allVersions: [mockVersion] });
|
||||
createComponent({ allVersions: [mockVersion] });
|
||||
|
||||
expect(wrapper.element).toMatchSnapshot();
|
||||
});
|
||||
|
@ -237,7 +239,7 @@ describe('Design management index page', () => {
|
|||
|
||||
describe('when has no designs', () => {
|
||||
beforeEach(() => {
|
||||
createComponent();
|
||||
createComponent({ designCollection: { designs: [], copyState: 'READY' } });
|
||||
});
|
||||
|
||||
it('renders design dropzone', () =>
|
||||
|
@ -260,6 +262,21 @@ describe('Design management index page', () => {
|
|||
}));
|
||||
});
|
||||
|
||||
describe('handling design collection copy state', () => {
|
||||
it.each`
|
||||
copyState | isRendered | description
|
||||
${'COPYING'} | ${true} | ${'renders'}
|
||||
${'READY'} | ${false} | ${'does not render'}
|
||||
${'ERROR'} | ${false} | ${'does not render'}
|
||||
`(
|
||||
'$description the copying message if design collection copyState is $copyState',
|
||||
({ copyState, isRendered }) => {
|
||||
createComponent({ designCollection: { designs: [], copyState } });
|
||||
expect(findDesignCollectionIsCopying().exists()).toBe(isRendered);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
describe('uploading designs', () => {
|
||||
it('calls mutation on upload', () => {
|
||||
createComponent({ stubs: { GlEmptyState } });
|
||||
|
@ -283,6 +300,10 @@ describe('Design management index page', () => {
|
|||
{
|
||||
__typename: 'Design',
|
||||
id: expect.anything(),
|
||||
currentUserTodos: {
|
||||
__typename: 'TodoConnection',
|
||||
nodes: [],
|
||||
},
|
||||
image: '',
|
||||
imageV432x230: '',
|
||||
filename: 'test',
|
||||
|
@ -532,13 +553,16 @@ describe('Design management index page', () => {
|
|||
});
|
||||
|
||||
it('on latest version when has no designs toolbar buttons are invisible', () => {
|
||||
createComponent({ designs: [], allVersions: [mockVersion] });
|
||||
createComponent({
|
||||
designCollection: { designs: [], copyState: 'READY' },
|
||||
allVersions: [mockVersion],
|
||||
});
|
||||
expect(findToolbar().isVisible()).toBe(false);
|
||||
});
|
||||
|
||||
describe('on non-latest version', () => {
|
||||
beforeEach(() => {
|
||||
createComponent({ designs: mockDesigns, allVersions: [mockVersion] });
|
||||
createComponent({ allVersions: [mockVersion] });
|
||||
});
|
||||
|
||||
it('does not render design checkboxes', async () => {
|
||||
|
|
|
@ -25,7 +25,7 @@ function factory(routeArg) {
|
|||
mocks: {
|
||||
$apollo: {
|
||||
queries: {
|
||||
designs: { loading: true },
|
||||
designCollection: { loading: true },
|
||||
design: { loading: true },
|
||||
permissions: { loading: true },
|
||||
},
|
||||
|
|
|
@ -93,6 +93,10 @@ describe('optimistic responses', () => {
|
|||
fullPath: '',
|
||||
notesCount: 0,
|
||||
event: 'NONE',
|
||||
currentUserTodos: {
|
||||
__typename: 'TodoConnection',
|
||||
nodes: [],
|
||||
},
|
||||
diffRefs: { __typename: 'DiffRefs', baseSha: '', startSha: '', headSha: '' },
|
||||
discussions: { __typename: 'DesignDiscussion', nodes: [] },
|
||||
versions: {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import { createWrapper } from '@vue/test-utils';
|
||||
import initGroupMembersApp from '~/groups/members';
|
||||
import { initGroupMembersApp } from '~/groups/members';
|
||||
import GroupMembersApp from '~/groups/members/components/app.vue';
|
||||
import { membersJsonString, membersParsed } from './mock_data';
|
||||
|
||||
|
@ -9,7 +9,7 @@ describe('initGroupMembersApp', () => {
|
|||
let wrapper;
|
||||
|
||||
const setup = () => {
|
||||
vm = initGroupMembersApp(el);
|
||||
vm = initGroupMembersApp(el, ['account']);
|
||||
wrapper = createWrapper(vm);
|
||||
};
|
||||
|
||||
|
@ -63,4 +63,10 @@ describe('initGroupMembersApp', () => {
|
|||
|
||||
expect(vm.$store.state.members).toEqual(membersParsed);
|
||||
});
|
||||
|
||||
it('sets `tableFields` in Vuex store', () => {
|
||||
setup();
|
||||
|
||||
expect(vm.$store.state.tableFields).toEqual(['account']);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -84,6 +84,14 @@ describe('fetchLogsTree', () => {
|
|||
expect(axios.get.mock.calls.length).toEqual(1);
|
||||
}));
|
||||
|
||||
it('calls axios for each path', () =>
|
||||
Promise.all([
|
||||
fetchLogsTree(client, '', '0', resolver),
|
||||
fetchLogsTree(client, '/test', '0', resolver),
|
||||
]).then(() => {
|
||||
expect(axios.get.mock.calls.length).toEqual(2);
|
||||
}));
|
||||
|
||||
it('calls entry resolver', () =>
|
||||
fetchLogsTree(client, '', '0', resolver).then(() => {
|
||||
expect(resolver.resolve).toHaveBeenCalledWith(
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
import { mount, createLocalVue, createWrapper } from '@vue/test-utils';
|
||||
import Vuex from 'vuex';
|
||||
import {
|
||||
getByText as getByTextHelper,
|
||||
getByTestId as getByTestIdHelper,
|
||||
} from '@testing-library/dom';
|
||||
import MembersTable from '~/vue_shared/components/members/table/members_table.vue';
|
||||
import * as initUserPopovers from '~/user_popovers';
|
||||
|
||||
const localVue = createLocalVue();
|
||||
localVue.use(Vuex);
|
||||
|
||||
describe('MemberList', () => {
|
||||
let wrapper;
|
||||
|
||||
const createStore = (state = {}) => {
|
||||
return new Vuex.Store({
|
||||
state: {
|
||||
members: [],
|
||||
tableFields: [],
|
||||
...state,
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
const createComponent = state => {
|
||||
wrapper = mount(MembersTable, {
|
||||
localVue,
|
||||
store: createStore(state),
|
||||
stubs: ['member-avatar'],
|
||||
});
|
||||
};
|
||||
|
||||
const getByText = (text, options) =>
|
||||
createWrapper(getByTextHelper(wrapper.element, text, options));
|
||||
|
||||
const getByTestId = (id, options) =>
|
||||
createWrapper(getByTestIdHelper(wrapper.element, id, options));
|
||||
|
||||
afterEach(() => {
|
||||
wrapper.destroy();
|
||||
wrapper = null;
|
||||
});
|
||||
|
||||
describe('fields', () => {
|
||||
it.each`
|
||||
field | label
|
||||
${'source'} | ${'Source'}
|
||||
${'granted'} | ${'Access granted'}
|
||||
${'invited'} | ${'Invited'}
|
||||
${'requested'} | ${'Requested'}
|
||||
${'expires'} | ${'Access expires'}
|
||||
${'maxRole'} | ${'Max role'}
|
||||
${'expiration'} | ${'Expiration'}
|
||||
`('renders the $label field', ({ field, label }) => {
|
||||
createComponent({
|
||||
tableFields: [field],
|
||||
});
|
||||
|
||||
expect(getByText(label, { selector: '[role="columnheader"]' }).exists()).toBe(true);
|
||||
});
|
||||
|
||||
it('renders "Actions" field for screen readers', () => {
|
||||
createComponent({ tableFields: ['actions'] });
|
||||
|
||||
const actionField = getByTestId('col-actions');
|
||||
|
||||
expect(actionField.exists()).toBe(true);
|
||||
expect(actionField.classes('gl-sr-only')).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('when `members` is an empty array', () => {
|
||||
it('displays a "No members found" message', () => {
|
||||
createComponent();
|
||||
|
||||
expect(getByText('No members found').exists()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
it('initializes user popovers when mounted', () => {
|
||||
const initUserPopoversMock = jest.spyOn(initUserPopovers, 'default');
|
||||
|
||||
createComponent();
|
||||
|
||||
expect(initUserPopoversMock).toHaveBeenCalled();
|
||||
});
|
||||
});
|
|
@ -4,6 +4,6 @@ require 'spec_helper'
|
|||
|
||||
RSpec.describe GitlabSchema.types['PackageTypeEnum'] do
|
||||
it 'exposes all package types' do
|
||||
expect(described_class.values.keys).to contain_exactly(*%w[MAVEN NPM CONAN NUGET PYPI COMPOSER GENERIC])
|
||||
expect(described_class.values.keys).to contain_exactly(*%w[MAVEN NPM CONAN NUGET PYPI COMPOSER GENERIC GOLANG])
|
||||
end
|
||||
end
|
||||
|
|
|
@ -2,12 +2,13 @@
|
|||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::Database::ConcurrentReindex, '#perform' do
|
||||
subject { described_class.new(index_name, logger: logger) }
|
||||
RSpec.describe Gitlab::Database::Reindexing::ConcurrentReindex, '#perform' do
|
||||
subject { described_class.new(index, logger: logger) }
|
||||
|
||||
let(:table_name) { '_test_reindex_table' }
|
||||
let(:column_name) { '_test_column' }
|
||||
let(:index_name) { '_test_reindex_index' }
|
||||
let(:index) { double('index', name: index_name, schema: 'public', unique?: false, definition: 'CREATE INDEX _test_reindex_index ON public._test_reindex_table USING btree (_test_column)') }
|
||||
let(:logger) { double('logger', debug: nil, info: nil, error: nil ) }
|
||||
let(:connection) { ActiveRecord::Base.connection }
|
||||
|
||||
|
@ -17,29 +18,12 @@ RSpec.describe Gitlab::Database::ConcurrentReindex, '#perform' do
|
|||
id serial NOT NULL PRIMARY KEY,
|
||||
#{column_name} integer NOT NULL);
|
||||
|
||||
CREATE INDEX #{index_name} ON #{table_name} (#{column_name});
|
||||
CREATE INDEX #{index.name} ON #{table_name} (#{column_name});
|
||||
SQL
|
||||
end
|
||||
|
||||
context 'when the index does not exist' do
|
||||
before do
|
||||
connection.execute(<<~SQL)
|
||||
DROP INDEX #{index_name}
|
||||
SQL
|
||||
end
|
||||
|
||||
it 'raises an error' do
|
||||
expect { subject.perform }.to raise_error(described_class::ReindexError, /does not exist/)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when the index is unique' do
|
||||
before do
|
||||
connection.execute(<<~SQL)
|
||||
DROP INDEX #{index_name};
|
||||
CREATE UNIQUE INDEX #{index_name} ON #{table_name} (#{column_name})
|
||||
SQL
|
||||
end
|
||||
let(:index) { double('index', name: index_name, unique?: true, definition: 'CREATE INDEX _test_reindex_index ON public._test_reindex_table USING btree (_test_column)') }
|
||||
|
||||
it 'raises an error' do
|
||||
expect do
|
||||
|
@ -83,8 +67,8 @@ RSpec.describe Gitlab::Database::ConcurrentReindex, '#perform' do
|
|||
expect(instance).to receive(:run).with(raise_on_exhaustion: true).and_yield
|
||||
end
|
||||
|
||||
expect_to_execute_in_order("ALTER INDEX #{index_name} RENAME TO #{replaced_name}")
|
||||
expect_to_execute_in_order("ALTER INDEX #{replacement_name} RENAME TO #{index_name}")
|
||||
expect_to_execute_in_order("ALTER INDEX #{index.name} RENAME TO #{replaced_name}")
|
||||
expect_to_execute_in_order("ALTER INDEX #{replacement_name} RENAME TO #{index.name}")
|
||||
expect_to_execute_in_order("ALTER INDEX #{replaced_name} RENAME TO #{replacement_name}")
|
||||
|
||||
expect_to_execute_concurrently_in_order(drop_index)
|
||||
|
@ -109,8 +93,8 @@ RSpec.describe Gitlab::Database::ConcurrentReindex, '#perform' do
|
|||
expect(instance).to receive(:run).with(raise_on_exhaustion: true).and_yield
|
||||
end
|
||||
|
||||
expect_to_execute_in_order("ALTER INDEX #{index_name} RENAME TO #{replaced_name}")
|
||||
expect_to_execute_in_order("ALTER INDEX #{replacement_name} RENAME TO #{index_name}")
|
||||
expect_to_execute_in_order("ALTER INDEX #{index.name} RENAME TO #{replaced_name}")
|
||||
expect_to_execute_in_order("ALTER INDEX #{replacement_name} RENAME TO #{index.name}")
|
||||
expect_to_execute_in_order("ALTER INDEX #{replaced_name} RENAME TO #{replacement_name}")
|
||||
|
||||
expect_to_execute_concurrently_in_order(drop_index)
|
||||
|
@ -141,7 +125,8 @@ RSpec.describe Gitlab::Database::ConcurrentReindex, '#perform' do
|
|||
expect_to_execute_concurrently_in_order(drop_index)
|
||||
expect_to_execute_concurrently_in_order(create_index)
|
||||
|
||||
expect(subject).to receive(:replacement_index_valid?).and_return(false)
|
||||
replacement_index = double('replacement index', valid?: false)
|
||||
allow(Gitlab::Database::Reindexing::Index).to receive(:find_with_schema).with("public.#{replacement_name}").and_return(replacement_index)
|
||||
|
||||
expect_to_execute_concurrently_in_order(drop_index)
|
||||
|
||||
|
@ -161,8 +146,8 @@ RSpec.describe Gitlab::Database::ConcurrentReindex, '#perform' do
|
|||
end
|
||||
|
||||
expect(connection).to receive(:execute).ordered
|
||||
.with("ALTER INDEX #{index_name} RENAME TO #{replaced_name}")
|
||||
.and_raise(ActiveRecord::ConnectionTimeoutError, 'connect timeout')
|
||||
.with("ALTER INDEX #{index.name} RENAME TO #{replaced_name}")
|
||||
.and_raise(ActiveRecord::ConnectionTimeoutError, 'connect timeout')
|
||||
|
||||
expect_to_execute_concurrently_in_order(drop_index)
|
||||
|
||||
|
@ -209,7 +194,7 @@ RSpec.describe Gitlab::Database::ConcurrentReindex, '#perform' do
|
|||
SELECT indexdef
|
||||
FROM pg_indexes
|
||||
WHERE schemaname = 'public'
|
||||
AND indexname = #{ActiveRecord::Base.connection.quote(index_name)}
|
||||
AND indexname = #{ActiveRecord::Base.connection.quote(index.name)}
|
||||
SQL
|
||||
end
|
||||
|
86
spec/lib/gitlab/database/reindexing/index_spec.rb
Normal file
86
spec/lib/gitlab/database/reindexing/index_spec.rb
Normal file
|
@ -0,0 +1,86 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe Gitlab::Database::Reindexing::Index do
|
||||
before do
|
||||
ActiveRecord::Base.connection.execute(<<~SQL)
|
||||
CREATE INDEX foo_idx ON public.users (name);
|
||||
CREATE UNIQUE INDEX bar_key ON public.users (id);
|
||||
|
||||
CREATE TABLE example_table (id serial primary key);
|
||||
SQL
|
||||
end
|
||||
|
||||
def find(name)
|
||||
described_class.find_with_schema(name)
|
||||
end
|
||||
|
||||
describe '.find_with_schema' do
|
||||
it 'returns an instance of Gitlab::Database::Reindexing::Index when the index is present' do
|
||||
expect(find('public.foo_idx')).to be_a(Gitlab::Database::Reindexing::Index)
|
||||
end
|
||||
|
||||
it 'returns nil if the index is not present' do
|
||||
expect(find('public.idontexist')).to be_nil
|
||||
end
|
||||
|
||||
it 'raises ArgumentError if given a non-fully qualified index name' do
|
||||
expect { find('foo') }.to raise_error(ArgumentError, /not fully qualified/)
|
||||
end
|
||||
end
|
||||
|
||||
describe '#unique?' do
|
||||
it 'returns true for a unique index' do
|
||||
expect(find('public.bar_key')).to be_unique
|
||||
end
|
||||
|
||||
it 'returns false for a regular, non-unique index' do
|
||||
expect(find('public.foo_idx')).not_to be_unique
|
||||
end
|
||||
|
||||
it 'returns true for a primary key index' do
|
||||
expect(find('public.example_table_pkey')).to be_unique
|
||||
end
|
||||
end
|
||||
|
||||
describe '#valid?' do
|
||||
it 'returns true if the index is valid' do
|
||||
expect(find('public.foo_idx')).to be_valid
|
||||
end
|
||||
|
||||
it 'returns false if the index is marked as invalid' do
|
||||
ActiveRecord::Base.connection.execute(<<~SQL)
|
||||
UPDATE pg_index SET indisvalid=false
|
||||
FROM pg_class
|
||||
WHERE pg_class.relname = 'foo_idx' AND pg_index.indexrelid = pg_class.oid
|
||||
SQL
|
||||
|
||||
expect(find('public.foo_idx')).not_to be_valid
|
||||
end
|
||||
end
|
||||
|
||||
describe '#to_s' do
|
||||
it 'returns the index name' do
|
||||
expect(find('public.foo_idx').to_s).to eq('foo_idx')
|
||||
end
|
||||
end
|
||||
|
||||
describe '#name' do
|
||||
it 'returns the name' do
|
||||
expect(find('public.foo_idx').name).to eq('foo_idx')
|
||||
end
|
||||
end
|
||||
|
||||
describe '#schema' do
|
||||
it 'returns the index schema' do
|
||||
expect(find('public.foo_idx').schema).to eq('public')
|
||||
end
|
||||
end
|
||||
|
||||
describe '#definition' do
|
||||
it 'returns the index definition' do
|
||||
expect(find('public.foo_idx').definition).to eq('CREATE INDEX foo_idx ON public.users USING btree (name)')
|
||||
end
|
||||
end
|
||||
end
|
|
@ -448,4 +448,17 @@ RSpec.describe Gitlab::Regex do
|
|||
it { is_expected.not_to match('my file name') }
|
||||
it { is_expected.not_to match('!!()()') }
|
||||
end
|
||||
|
||||
describe '.prefixed_semver_regex' do
|
||||
subject { described_class.prefixed_semver_regex }
|
||||
|
||||
it { is_expected.to match('v1.2.3') }
|
||||
it { is_expected.to match('v1.2.3-beta') }
|
||||
it { is_expected.to match('v1.2.3-alpha.3') }
|
||||
it { is_expected.not_to match('v1') }
|
||||
it { is_expected.not_to match('v1.2') }
|
||||
it { is_expected.not_to match('v1./2.3') }
|
||||
it { is_expected.not_to match('v../../../../../1.2.3') }
|
||||
it { is_expected.not_to match('v%2e%2e%2f1.2.3') }
|
||||
end
|
||||
end
|
||||
|
|
|
@ -61,6 +61,32 @@ RSpec.describe CleanupGroupImportStatesWithNullUserId, :migration,
|
|||
expect { group_import_state_3.reload }.to raise_error(ActiveRecord::RecordNotFound)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when group has parent' do
|
||||
it 'updates user_id with parent group default owner id' do
|
||||
user = users_table.create!(name: 'user4', email: 'user4@example.com', projects_limit: 1)
|
||||
group_1 = namespaces_table.create!(name: 'group_1', path: 'group_1', type: 'Group')
|
||||
create_member(user_id: user.id, type: 'GroupMember', source_type: 'Namespace', source_id: group_1.id, access_level: described_class::Group::OWNER)
|
||||
group_2 = namespaces_table.create!(name: 'group_2', path: 'group_2', type: 'Group', parent_id: group_1.id)
|
||||
group_import_state = group_import_states_table.create!(group_id: group_2.id, user_id: nil, status: 0)
|
||||
|
||||
disable_migrations_output { migrate! }
|
||||
|
||||
expect(group_import_state.reload.user_id).to eq(user.id)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when group has owner_id' do
|
||||
it 'updates user_id with owner_id' do
|
||||
user = users_table.create!(name: 'user', email: 'user@example.com', projects_limit: 1)
|
||||
group = namespaces_table.create!(name: 'group', path: 'group', type: 'Group', owner_id: user.id)
|
||||
group_import_state = group_import_states_table.create!(group_id: group.id, user_id: nil, status: 0)
|
||||
|
||||
disable_migrations_output { migrate! }
|
||||
|
||||
expect(group_import_state.reload.user_id).to eq(user.id)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def create_member(options)
|
||||
|
|
|
@ -71,6 +71,11 @@ RSpec.describe DesignManagement::DesignPolicy do
|
|||
end
|
||||
end
|
||||
|
||||
shared_examples_for "read-only design abilities" do
|
||||
it { is_expected.to be_allowed(*guest_design_abilities) }
|
||||
it { is_expected.to be_disallowed(*developer_design_abilities) }
|
||||
end
|
||||
|
||||
shared_examples_for "design abilities available for members" do
|
||||
context "for owners" do
|
||||
let(:current_user) { owner }
|
||||
|
@ -86,8 +91,7 @@ RSpec.describe DesignManagement::DesignPolicy do
|
|||
end
|
||||
|
||||
context "when admin mode disabled" do
|
||||
it { is_expected.to be_allowed(*guest_design_abilities) }
|
||||
it { is_expected.to be_disallowed(*developer_design_abilities) }
|
||||
it_behaves_like "read-only design abilities"
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -106,16 +110,10 @@ RSpec.describe DesignManagement::DesignPolicy do
|
|||
context "for reporters" do
|
||||
let(:current_user) { reporter }
|
||||
|
||||
it { is_expected.to be_allowed(*guest_design_abilities) }
|
||||
it { is_expected.to be_disallowed(*developer_design_abilities) }
|
||||
it_behaves_like "read-only design abilities"
|
||||
end
|
||||
end
|
||||
|
||||
shared_examples_for "read-only design abilities" do
|
||||
it { is_expected.to be_allowed(:read_design) }
|
||||
it { is_expected.to be_disallowed(:create_design, :destroy_design) }
|
||||
end
|
||||
|
||||
context "when DesignManagement is not enabled" do
|
||||
before do
|
||||
enable_design_management(false)
|
||||
|
@ -135,15 +133,13 @@ RSpec.describe DesignManagement::DesignPolicy do
|
|||
let_it_be(:project) { create(:project, :private) }
|
||||
let(:current_user) { guest }
|
||||
|
||||
it { is_expected.to be_allowed(*guest_design_abilities) }
|
||||
it { is_expected.to be_disallowed(*developer_design_abilities) }
|
||||
it_behaves_like "read-only design abilities"
|
||||
end
|
||||
|
||||
context "for anonymous users in public projects" do
|
||||
let(:current_user) { nil }
|
||||
|
||||
it { is_expected.to be_allowed(*guest_design_abilities) }
|
||||
it { is_expected.to be_disallowed(*developer_design_abilities) }
|
||||
it_behaves_like "read-only design abilities"
|
||||
end
|
||||
|
||||
context "when the issue is confidential" do
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe 'Instance Statistics', 'routing' do
|
||||
include RSpec::Rails::RequestExampleGroup
|
||||
|
||||
it "routes '/-/instance_statistics' to dev ops report" do
|
||||
expect(get('/-/instance_statistics')).to redirect_to('/admin/dev_ops_report')
|
||||
end
|
||||
end
|
|
@ -66,7 +66,11 @@ RSpec.configure do |config|
|
|||
config.display_try_failure_messages = true
|
||||
|
||||
config.infer_spec_type_from_file_location!
|
||||
config.full_backtrace = !!ENV['CI']
|
||||
|
||||
# Add :full_backtrace tag to an example if full_backtrace output is desired
|
||||
config.before(:each, full_backtrace: true) do |example|
|
||||
config.full_backtrace = true
|
||||
end
|
||||
|
||||
unless ENV['CI']
|
||||
# Re-run failures locally with `--only-failures`
|
||||
|
|
|
@ -170,6 +170,7 @@ RSpec.shared_examples 'filters on each package_type' do |is_project: false|
|
|||
let_it_be(:package5) { create(:pypi_package, project: project) }
|
||||
let_it_be(:package6) { create(:composer_package, project: project) }
|
||||
let_it_be(:package7) { create(:generic_package, project: project) }
|
||||
let_it_be(:package8) { create(:golang_package, project: project) }
|
||||
|
||||
Packages::Package.package_types.keys.each do |package_type|
|
||||
context "for package type #{package_type}" do
|
||||
|
|
|
@ -173,16 +173,23 @@ RSpec.describe 'gitlab:db namespace rake task' do
|
|||
end
|
||||
end
|
||||
|
||||
it 'calls the index rebuilder with the proper arguments' do
|
||||
reindex = double('rebuilder')
|
||||
context 'with index name given' do
|
||||
let(:index) { double('index') }
|
||||
let(:reindex) { double('reindex') }
|
||||
|
||||
expect(Gitlab::Database::ConcurrentReindex).to receive(:new)
|
||||
.with('some_index_name', logger: instance_of(Logger))
|
||||
.and_return(reindex)
|
||||
it 'calls the index rebuilder with the proper arguments' do
|
||||
expect(Gitlab::Database::Reindexing::Index).to receive(:find_with_schema).with('public.foo_idx').and_return(index)
|
||||
expect(Gitlab::Database::Reindexing::ConcurrentReindex).to receive(:new).with(index, any_args).and_return(reindex)
|
||||
expect(reindex).to receive(:perform)
|
||||
|
||||
expect(reindex).to receive(:perform)
|
||||
run_rake_task('gitlab:db:reindex', '[public.foo_idx]')
|
||||
end
|
||||
|
||||
run_rake_task('gitlab:db:reindex', '[some_index_name]')
|
||||
it 'raises an error if the index does not exist' do
|
||||
expect(Gitlab::Database::Reindexing::Index).to receive(:find_with_schema).with('public.absent_index').and_return(nil)
|
||||
|
||||
expect { run_rake_task('gitlab:db:reindex', '[public.absent_index]') }.to raise_error(ArgumentError, /index does not exist/)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
|
100
spec/workers/concerns/limited_capacity/job_tracker_spec.rb
Normal file
100
spec/workers/concerns/limited_capacity/job_tracker_spec.rb
Normal file
|
@ -0,0 +1,100 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe LimitedCapacity::JobTracker, :clean_gitlab_redis_queues do
|
||||
let(:job_tracker) do
|
||||
described_class.new('namespace')
|
||||
end
|
||||
|
||||
describe '#register' do
|
||||
it 'adds jid to the set' do
|
||||
job_tracker.register('a-job-id')
|
||||
|
||||
expect(job_tracker.running_jids).to contain_exactly('a-job-id')
|
||||
end
|
||||
|
||||
it 'updates the counter' do
|
||||
expect { job_tracker.register('a-job-id') }
|
||||
.to change { job_tracker.count }
|
||||
.from(0)
|
||||
.to(1)
|
||||
end
|
||||
|
||||
it 'does it in only one Redis call' do
|
||||
expect(job_tracker).to receive(:with_redis).once.and_call_original
|
||||
|
||||
job_tracker.register('a-job-id')
|
||||
end
|
||||
end
|
||||
|
||||
describe '#remove' do
|
||||
before do
|
||||
job_tracker.register(%w[a-job-id other-job-id])
|
||||
end
|
||||
|
||||
it 'removes jid from the set' do
|
||||
job_tracker.remove('other-job-id')
|
||||
|
||||
expect(job_tracker.running_jids).to contain_exactly('a-job-id')
|
||||
end
|
||||
|
||||
it 'updates the counter' do
|
||||
expect { job_tracker.remove('other-job-id') }
|
||||
.to change { job_tracker.count }
|
||||
.from(2)
|
||||
.to(1)
|
||||
end
|
||||
|
||||
it 'does it in only one Redis call' do
|
||||
expect(job_tracker).to receive(:with_redis).once.and_call_original
|
||||
|
||||
job_tracker.remove('other-job-id')
|
||||
end
|
||||
end
|
||||
|
||||
describe '#clean_up' do
|
||||
before do
|
||||
job_tracker.register('a-job-id')
|
||||
end
|
||||
|
||||
context 'with running jobs' do
|
||||
before do
|
||||
expect(Gitlab::SidekiqStatus).to receive(:completed_jids)
|
||||
.with(%w[a-job-id])
|
||||
.and_return([])
|
||||
end
|
||||
|
||||
it 'does not remove the jid from the set' do
|
||||
expect { job_tracker.clean_up }
|
||||
.not_to change { job_tracker.running_jids.include?('a-job-id') }
|
||||
end
|
||||
|
||||
it 'does only one Redis call to get the job ids' do
|
||||
expect(job_tracker).to receive(:with_redis).once.and_call_original
|
||||
|
||||
job_tracker.clean_up
|
||||
end
|
||||
end
|
||||
|
||||
context 'with completed jobs' do
|
||||
it 'removes the jid from the set' do
|
||||
expect { job_tracker.clean_up }
|
||||
.to change { job_tracker.running_jids.include?('a-job-id') }
|
||||
end
|
||||
|
||||
it 'updates the counter' do
|
||||
expect { job_tracker.clean_up }
|
||||
.to change { job_tracker.count }
|
||||
.from(1)
|
||||
.to(0)
|
||||
end
|
||||
|
||||
it 'gets the job ids, removes them, and updates the counter with only two Redis calls' do
|
||||
expect(job_tracker).to receive(:with_redis).twice.and_call_original
|
||||
|
||||
job_tracker.clean_up
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
285
spec/workers/concerns/limited_capacity/worker_spec.rb
Normal file
285
spec/workers/concerns/limited_capacity/worker_spec.rb
Normal file
|
@ -0,0 +1,285 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
RSpec.describe LimitedCapacity::Worker, :clean_gitlab_redis_queues, :aggregate_failures do
|
||||
let(:worker_class) do
|
||||
Class.new do
|
||||
def self.name
|
||||
'DummyWorker'
|
||||
end
|
||||
|
||||
include ApplicationWorker
|
||||
include LimitedCapacity::Worker
|
||||
end
|
||||
end
|
||||
|
||||
let(:worker) { worker_class.new }
|
||||
|
||||
let(:job_tracker) do
|
||||
LimitedCapacity::JobTracker.new(worker_class.name)
|
||||
end
|
||||
|
||||
before do
|
||||
worker.jid = 'my-jid'
|
||||
allow(worker).to receive(:job_tracker).and_return(job_tracker)
|
||||
end
|
||||
|
||||
describe 'required methods' do
|
||||
it { expect { worker.perform_work }.to raise_error(NotImplementedError) }
|
||||
it { expect { worker.remaining_work_count }.to raise_error(NotImplementedError) }
|
||||
it { expect { worker.max_running_jobs }.to raise_error(NotImplementedError) }
|
||||
end
|
||||
|
||||
describe 'Sidekiq options' do
|
||||
it 'does not retry failed jobs' do
|
||||
expect(worker_class.sidekiq_options['retry']).to eq(0)
|
||||
end
|
||||
|
||||
it 'does not deduplicate jobs' do
|
||||
expect(worker_class.get_deduplicate_strategy).to eq(:none)
|
||||
end
|
||||
end
|
||||
|
||||
describe '.perform_with_capacity' do
|
||||
subject(:perform_with_capacity) { worker_class.perform_with_capacity(:arg) }
|
||||
|
||||
before do
|
||||
expect_next_instance_of(worker_class) do |instance|
|
||||
expect(instance).to receive(:remove_failed_jobs)
|
||||
expect(instance).to receive(:report_prometheus_metrics)
|
||||
|
||||
allow(instance).to receive(:remaining_work_count).and_return(remaining_work_count)
|
||||
allow(instance).to receive(:remaining_capacity).and_return(remaining_capacity)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when capacity is larger than work' do
|
||||
let(:remaining_work_count) { 2 }
|
||||
let(:remaining_capacity) { 3 }
|
||||
|
||||
it 'enqueues jobs for remaining work' do
|
||||
expect(worker_class)
|
||||
.to receive(:bulk_perform_async)
|
||||
.with([[:arg], [:arg]])
|
||||
|
||||
perform_with_capacity
|
||||
end
|
||||
end
|
||||
|
||||
context 'when capacity is lower than work' do
|
||||
let(:remaining_work_count) { 5 }
|
||||
let(:remaining_capacity) { 3 }
|
||||
|
||||
it 'enqueues jobs for remaining work' do
|
||||
expect(worker_class)
|
||||
.to receive(:bulk_perform_async)
|
||||
.with([[:arg], [:arg], [:arg]])
|
||||
|
||||
perform_with_capacity
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe '#perform' do
|
||||
subject(:perform) { worker.perform(:arg) }
|
||||
|
||||
context 'with capacity' do
|
||||
before do
|
||||
allow(worker).to receive(:max_running_jobs).and_return(10)
|
||||
allow(worker).to receive(:running_jobs_count).and_return(0)
|
||||
allow(worker).to receive(:remaining_work_count).and_return(0)
|
||||
end
|
||||
|
||||
it 'calls perform_work' do
|
||||
expect(worker).to receive(:perform_work).with(:arg)
|
||||
|
||||
perform
|
||||
end
|
||||
|
||||
it 're-enqueues itself' do
|
||||
allow(worker).to receive(:perform_work)
|
||||
expect(worker).to receive(:re_enqueue).with(:arg)
|
||||
|
||||
perform
|
||||
end
|
||||
|
||||
it 'registers itself in the running set' do
|
||||
allow(worker).to receive(:perform_work)
|
||||
expect(job_tracker).to receive(:register).with('my-jid')
|
||||
|
||||
perform
|
||||
end
|
||||
|
||||
it 'removes itself from the running set' do
|
||||
expect(job_tracker).to receive(:remove).with('my-jid')
|
||||
|
||||
allow(worker).to receive(:perform_work)
|
||||
|
||||
perform
|
||||
end
|
||||
|
||||
it 'reports prometheus metrics' do
|
||||
allow(worker).to receive(:perform_work)
|
||||
expect(worker).to receive(:report_prometheus_metrics)
|
||||
|
||||
perform
|
||||
end
|
||||
end
|
||||
|
||||
context 'with capacity and without work' do
|
||||
before do
|
||||
allow(worker).to receive(:max_running_jobs).and_return(10)
|
||||
allow(worker).to receive(:running_jobs_count).and_return(0)
|
||||
allow(worker).to receive(:remaining_work_count).and_return(0)
|
||||
allow(worker).to receive(:perform_work)
|
||||
end
|
||||
|
||||
it 'does not re-enqueue itself' do
|
||||
expect(worker_class).not_to receive(:perform_async)
|
||||
|
||||
perform
|
||||
end
|
||||
end
|
||||
|
||||
context 'without capacity' do
|
||||
before do
|
||||
allow(worker).to receive(:max_running_jobs).and_return(10)
|
||||
allow(worker).to receive(:running_jobs_count).and_return(15)
|
||||
allow(worker).to receive(:remaining_work_count).and_return(10)
|
||||
end
|
||||
|
||||
it 'does not call perform_work' do
|
||||
expect(worker).not_to receive(:perform_work)
|
||||
|
||||
perform
|
||||
end
|
||||
|
||||
it 'does not re-enqueue itself' do
|
||||
expect(worker_class).not_to receive(:perform_async)
|
||||
|
||||
perform
|
||||
end
|
||||
|
||||
it 'does not register in the running set' do
|
||||
expect(job_tracker).not_to receive(:register)
|
||||
|
||||
perform
|
||||
end
|
||||
|
||||
it 'removes itself from the running set' do
|
||||
expect(job_tracker).to receive(:remove).with('my-jid')
|
||||
|
||||
perform
|
||||
end
|
||||
|
||||
it 'reports prometheus metrics' do
|
||||
expect(worker).to receive(:report_prometheus_metrics)
|
||||
|
||||
perform
|
||||
end
|
||||
end
|
||||
|
||||
context 'when perform_work fails' do
|
||||
it 'does not re-enqueue itself' do
|
||||
expect(worker).not_to receive(:re_enqueue)
|
||||
|
||||
expect { perform }.to raise_error(NotImplementedError)
|
||||
end
|
||||
|
||||
it 'removes itself from the running set' do
|
||||
expect(job_tracker).to receive(:remove)
|
||||
|
||||
expect { perform }.to raise_error(NotImplementedError)
|
||||
end
|
||||
|
||||
it 'reports prometheus metrics' do
|
||||
expect(worker).to receive(:report_prometheus_metrics)
|
||||
|
||||
expect { perform }.to raise_error(NotImplementedError)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe '#remaining_capacity' do
|
||||
subject(:remaining_capacity) { worker.remaining_capacity }
|
||||
|
||||
before do
|
||||
expect(worker).to receive(:max_running_jobs).and_return(max_capacity)
|
||||
end
|
||||
|
||||
context 'when changing the capacity to a lower value' do
|
||||
let(:max_capacity) { -1 }
|
||||
|
||||
it { expect(remaining_capacity).to eq(0) }
|
||||
end
|
||||
|
||||
context 'when registering new jobs' do
|
||||
let(:max_capacity) { 2 }
|
||||
|
||||
before do
|
||||
job_tracker.register('a-job-id')
|
||||
end
|
||||
|
||||
it { expect(remaining_capacity).to eq(1) }
|
||||
end
|
||||
|
||||
context 'with jobs in the queue' do
|
||||
let(:max_capacity) { 2 }
|
||||
|
||||
before do
|
||||
expect(worker_class).to receive(:queue_size).and_return(1)
|
||||
end
|
||||
|
||||
it { expect(remaining_capacity).to eq(1) }
|
||||
end
|
||||
|
||||
context 'with both running jobs and queued jobs' do
|
||||
let(:max_capacity) { 10 }
|
||||
|
||||
before do
|
||||
expect(worker_class).to receive(:queue_size).and_return(5)
|
||||
expect(worker).to receive(:running_jobs_count).and_return(3)
|
||||
end
|
||||
|
||||
it { expect(remaining_capacity).to eq(2) }
|
||||
end
|
||||
end
|
||||
|
||||
describe '#remove_failed_jobs' do
|
||||
subject(:remove_failed_jobs) { worker.remove_failed_jobs }
|
||||
|
||||
before do
|
||||
job_tracker.register('a-job-id')
|
||||
allow(worker).to receive(:max_running_jobs).and_return(2)
|
||||
|
||||
expect(job_tracker).to receive(:clean_up).and_call_original
|
||||
end
|
||||
|
||||
context 'with failed jobs' do
|
||||
it 'update the available capacity' do
|
||||
expect { remove_failed_jobs }.to change { worker.remaining_capacity }.by(1)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe '#report_prometheus_metrics' do
|
||||
subject(:report_prometheus_metrics) { worker.report_prometheus_metrics }
|
||||
|
||||
before do
|
||||
allow(worker).to receive(:running_jobs_count).and_return(5)
|
||||
allow(worker).to receive(:max_running_jobs).and_return(7)
|
||||
allow(worker).to receive(:remaining_work_count).and_return(9)
|
||||
end
|
||||
|
||||
it 'reports number of running jobs' do
|
||||
labels = { worker: 'DummyWorker' }
|
||||
|
||||
report_prometheus_metrics
|
||||
|
||||
expect(Gitlab::Metrics.registry.get(:limited_capacity_worker_running_jobs).get(labels)).to eq(5)
|
||||
expect(Gitlab::Metrics.registry.get(:limited_capacity_worker_max_running_jobs).get(labels)).to eq(7)
|
||||
expect(Gitlab::Metrics.registry.get(:limited_capacity_worker_remaining_work_count).get(labels)).to eq(9)
|
||||
end
|
||||
end
|
||||
end
|
Loading…
Reference in a new issue