Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2022-10-06 15:09:39 +00:00
parent 3e9c050126
commit d4806ad7db
107 changed files with 1698 additions and 1239 deletions

View File

@ -98,7 +98,9 @@ variables:
FRONTEND_FIXTURES_MAPPING_PATH: crystalball/frontend_fixtures_mapping.json
RSPEC_CHANGED_FILES_PATH: rspec/changed_files.txt
RSPEC_MATCHING_TESTS_PATH: rspec/matching_tests.txt
RSPEC_MATCHING_TESTS_FOSS_PATH: rspec/matching_tests-foss.txt
RSPEC_LAST_RUN_RESULTS_FILE: rspec/rspec_last_run_results.txt
RSPEC_FOSS_IMPACT_PIPELINE_YML: rspec-foss-impact-pipeline.yml
JUNIT_RESULT_FILE: rspec/junit_rspec.xml
JUNIT_RETRY_FILE: rspec/junit_rspec-retry.xml

View File

@ -1,160 +1,5 @@
#######################
# rspec job base specs
.rails-job-base:
extends:
- .default-retry
- .default-before_script
- .rails-cache
.base-script:
script:
- source ./scripts/rspec_helpers.sh
# Only install knapsack after bundle install! Otherwise oddly some native
# gems could not be found under some circumstance. No idea why, hours wasted.
- run_timed_command "gem install knapsack --no-document"
- echo -e "\e[0Ksection_start:`date +%s`:gitaly-test-spawn[collapsed=true]\r\e[0KStarting Gitaly"
- run_timed_command "scripts/gitaly-test-spawn" # Do not use 'bundle exec' here
- echo -e "\e[0Ksection_end:`date +%s`:gitaly-test-spawn\r\e[0K"
.minimal-rspec-tests:
variables:
RSPEC_TESTS_MAPPING_ENABLED: "true"
.single-db:
variables:
DECOMPOSED_DB: "false"
.single-db-rspec:
extends: .single-db
.praefect-with-db:
variables:
GITALY_PRAEFECT_WITH_DB: '1'
.rspec-base:
extends:
- .rails-job-base
- .base-artifacts
stage: test
variables:
RUBY_GC_MALLOC_LIMIT: 67108864
RUBY_GC_MALLOC_LIMIT_MAX: 134217728
RECORD_DEPRECATIONS: "true"
GEO_SECONDARY_PROXY: 0
needs: ["setup-test-env", "retrieve-tests-metadata", "compile-test-assets", "detect-tests"]
script:
- !reference [.base-script, script]
- rspec_paralellized_job "--tag ~quarantine --tag ~level:migration"
.base-artifacts:
artifacts:
expire_in: 31d
when: always
paths:
- coverage/
- crystalball/
- deprecations/
- knapsack/
- rspec/
- tmp/capybara/
- log/*.log
reports:
junit: ${JUNIT_RESULT_FILE}
.rspec-base-migration:
extends:
- .base-artifacts
- .rails:rules:ee-and-foss-migration
script:
- !reference [.base-script, script]
- rspec_paralellized_job "--tag ~quarantine --tag level:migration"
.rspec-base-pg11:
extends:
- .rspec-base
- .use-pg11
.rspec-base-pg12:
extends:
- .rspec-base
- .use-pg12
.rspec-base-pg12-as-if-foss:
extends:
- .rspec-base
- .as-if-foss
- .use-pg12
needs: ["setup-test-env", "retrieve-tests-metadata", "compile-test-assets as-if-foss", "detect-tests"]
.rspec-base-pg13:
extends:
- .rspec-base
- .use-pg13
.rspec-ee-base-pg11:
extends:
- .rspec-base
- .use-pg11-ee
.rspec-ee-base-pg12:
extends:
- .rspec-base
- .use-pg12-ee
.rspec-ee-base-pg12-es8:
extends:
- .rspec-base
- .use-pg12-es8-ee
- .rails:rules:run-search-tests
.rspec-ee-base-pg12-opensearch1:
extends:
- .rspec-base
- .use-pg12-opensearch1-ee
- .rails:rules:run-search-tests
.rspec-ee-base-pg13:
extends:
- .rspec-base
- .use-pg13-ee
.db-job-base:
extends:
- .rails-job-base
- .rails:rules:ee-and-foss-migration
- .use-pg12
stage: test
needs: ["setup-test-env"]
# rspec job base specs
######################
############################
# rspec job parallel configs
.rspec-migration-parallel:
parallel: 12
.rspec-ee-migration-parallel:
parallel: 4
.rspec-unit-parallel:
parallel: 28
.rspec-ee-unit-parallel:
parallel: 18
.rspec-integration-parallel:
parallel: 12
.rspec-ee-integration-parallel:
parallel: 6
.rspec-system-parallel:
parallel: 28
.rspec-ee-system-parallel:
parallel: 10
# rspec job parallel configs
############################
include:
- local: .gitlab/ci/rails/shared.gitlab-ci.yml
###############################################################
# EE/FOSS: default refs (MRs, default branch, schedules) jobs #
@ -927,22 +772,41 @@ rspec fail-fast:
paths:
- tmp/capybara/
rspec foss-impact:
rspec-foss-impact:pipeline-generate:
extends:
- .rspec-base-pg12-as-if-foss
- .rails:rules:rspec-foss-impact
needs: ["setup-test-env", "retrieve-tests-metadata", "compile-test-assets as-if-foss", "detect-tests"]
stage: prepare
needs: ["detect-tests"]
script:
- !reference [.base-script, script]
- rspec_matched_foss_tests "${RSPEC_MATCHING_TESTS_PATH}" "--tag ~quarantine"
- scripts/generate-rspec-foss-impact-pipeline "${RSPEC_MATCHING_TESTS_FOSS_PATH}" "${RSPEC_FOSS_IMPACT_PIPELINE_YML}"
artifacts:
expire_in: 7d
expire_in: 1 day
paths:
- tmp/capybara/
# Temporary allow failure because of the high rate of failure due to the job
# running a lot more tests since https://gitlab.com/gitlab-org/gitlab/-/merge_requests/96152.
# This should be reverted once https://gitlab.com/gitlab-org/gitlab/-/merge_requests/96797 is merged.
allow_failure: true
- $RSPEC_FOSS_IMPACT_PIPELINE_YML
rspec-foss-impact:trigger:
extends:
- .rails:rules:rspec-foss-impact
stage: test
needs:
- job: "setup-test-env"
artifacts: false
- job: "retrieve-tests-metadata"
artifacts: false
- job: "compile-test-assets as-if-foss"
artifacts: false
- job: "rspec-foss-impact:pipeline-generate"
artifacts: true
variables:
PARENT_PIPELINE_ID: $CI_PIPELINE_ID
trigger:
strategy: depend
forward:
yaml_variables: true
pipeline_variables: true
include:
- artifact: $RSPEC_FOSS_IMPACT_PIPELINE_YML
job: rspec-foss-impact:pipeline-generate
fail-pipeline-early:
extends:

View File

@ -0,0 +1,50 @@
# RSpec FOSS impact pipeline loaded dynamically by script: scripts/generate-rspec-foss-impact-pipeline
include:
- local: .gitlab/ci/rails/shared.gitlab-ci.yml
default:
image: $DEFAULT_CI_IMAGE
tags:
- gitlab-org
# Default job timeout set to 90m https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/10520
timeout: 90m
interruptible: true
stages:
- test
dont-interrupt-me:
extends: .rules:dont-interrupt
stage: .pre
interruptible: false
script:
- echo "This jobs makes sure this pipeline won't be interrupted! See https://docs.gitlab.com/ee/ci/yaml/#interruptible."
rspec foss-impact:
extends: .rspec-base-pg12-as-if-foss
needs:
- pipeline: $PARENT_PIPELINE_ID
job: detect-tests
- pipeline: $PARENT_PIPELINE_ID
job: setup-test-env
- pipeline: $PARENT_PIPELINE_ID
job: retrieve-tests-metadata
- pipeline: $PARENT_PIPELINE_ID
job: compile-test-assets as-if-foss
rules:
- when: always
variables:
RSPEC_TESTS_FILTER_FILE: "${RSPEC_MATCHING_TESTS_FOSS_PATH}"
RSPEC_TESTS_MAPPING_ENABLED: "true"
<% if Integer(parallel_value) > 1 %>
parallel: <%= parallel_value %>
<% end %>
script:
- !reference [.base-script, script]
- rspec_paralellized_job "--tag ~quarantine"
artifacts:
expire_in: 7d
paths:
- "${RSPEC_MATCHING_TESTS_FOSS_PATH}"
- tmp/capybara/

View File

@ -0,0 +1,172 @@
include:
- local: .gitlab/ci/global.gitlab-ci.yml
- local: .gitlab/ci/rules.gitlab-ci.yml
.rules:dont-interrupt:
rules:
- if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH
allow_failure: true
- if: $CI_MERGE_REQUEST_IID
when: manual
allow_failure: true
#######################
# rspec job base specs
.rails-job-base:
extends:
- .default-retry
- .default-before_script
- .rails-cache
.base-script:
script:
- source ./scripts/rspec_helpers.sh
# Only install knapsack after bundle install! Otherwise oddly some native
# gems could not be found under some circumstance. No idea why, hours wasted.
- run_timed_command "gem install knapsack --no-document"
- echo -e "\e[0Ksection_start:`date +%s`:gitaly-test-spawn[collapsed=true]\r\e[0KStarting Gitaly"
- run_timed_command "scripts/gitaly-test-spawn" # Do not use 'bundle exec' here
- echo -e "\e[0Ksection_end:`date +%s`:gitaly-test-spawn\r\e[0K"
.minimal-rspec-tests:
variables:
RSPEC_TESTS_MAPPING_ENABLED: "true"
.single-db:
variables:
DECOMPOSED_DB: "false"
.single-db-rspec:
extends: .single-db
.praefect-with-db:
variables:
GITALY_PRAEFECT_WITH_DB: '1'
.rspec-base:
extends:
- .rails-job-base
- .base-artifacts
stage: test
variables:
RUBY_GC_MALLOC_LIMIT: 67108864
RUBY_GC_MALLOC_LIMIT_MAX: 134217728
RECORD_DEPRECATIONS: "true"
GEO_SECONDARY_PROXY: 0
RSPEC_TESTS_FILTER_FILE: "${RSPEC_MATCHING_TESTS_PATH}"
needs: ["setup-test-env", "retrieve-tests-metadata", "compile-test-assets", "detect-tests"]
script:
- !reference [.base-script, script]
- rspec_paralellized_job "--tag ~quarantine --tag ~level:migration"
.base-artifacts:
artifacts:
expire_in: 31d
when: always
paths:
- coverage/
- crystalball/
- deprecations/
- knapsack/
- rspec/
- tmp/capybara/
- log/*.log
reports:
junit: ${JUNIT_RESULT_FILE}
.rspec-base-migration:
extends:
- .base-artifacts
- .rails:rules:ee-and-foss-migration
variables:
RSPEC_TESTS_FILTER_FILE: "${RSPEC_MATCHING_TESTS_PATH}"
script:
- !reference [.base-script, script]
- rspec_paralellized_job "--tag ~quarantine --tag level:migration"
.rspec-base-pg11:
extends:
- .rspec-base
- .use-pg11
.rspec-base-pg12:
extends:
- .rspec-base
- .use-pg12
.rspec-base-pg12-as-if-foss:
extends:
- .rspec-base
- .as-if-foss
- .use-pg12
needs: ["setup-test-env", "retrieve-tests-metadata", "compile-test-assets as-if-foss", "detect-tests"]
.rspec-base-pg13:
extends:
- .rspec-base
- .use-pg13
.rspec-ee-base-pg11:
extends:
- .rspec-base
- .use-pg11-ee
.rspec-ee-base-pg12:
extends:
- .rspec-base
- .use-pg12-ee
.rspec-ee-base-pg12-es8:
extends:
- .rspec-base
- .use-pg12-es8-ee
- .rails:rules:run-search-tests
.rspec-ee-base-pg12-opensearch1:
extends:
- .rspec-base
- .use-pg12-opensearch1-ee
- .rails:rules:run-search-tests
.rspec-ee-base-pg13:
extends:
- .rspec-base
- .use-pg13-ee
.db-job-base:
extends:
- .rails-job-base
- .rails:rules:ee-and-foss-migration
- .use-pg12
stage: test
needs: ["setup-test-env"]
# rspec job base specs
######################
############################
# rspec job parallel configs
.rspec-migration-parallel:
parallel: 12
.rspec-ee-migration-parallel:
parallel: 4
.rspec-unit-parallel:
parallel: 28
.rspec-ee-unit-parallel:
parallel: 18
.rspec-integration-parallel:
parallel: 12
.rspec-ee-integration-parallel:
parallel: 6
.rspec-system-parallel:
parallel: 28
.rspec-ee-system-parallel:
parallel: 10
# rspec job parallel configs
############################

View File

@ -1692,7 +1692,7 @@
rules:
- if: '$LICENSE_MANAGEMENT_DISABLED || $GITLAB_FEATURES !~ /\blicense_scanning\b/'
when: never
- changes: *code-backstage-qa-patterns
- changes: *dependency-patterns
################
# Review rules #

View File

@ -134,14 +134,17 @@ detect-tests:
tooling/bin/find_changes ${RSPEC_CHANGED_FILES_PATH};
tooling/bin/find_tests ${RSPEC_CHANGED_FILES_PATH} ${RSPEC_MATCHING_TESTS_PATH};
tooling/bin/find_changes ${RSPEC_CHANGED_FILES_PATH} ${RSPEC_MATCHING_TESTS_PATH} ${FRONTEND_FIXTURES_MAPPING_PATH};
echo "Changed files: $(cat $RSPEC_CHANGED_FILES_PATH)";
echo "Related rspec tests: $(cat $RSPEC_MATCHING_TESTS_PATH)";
filter_rspec_matched_foss_tests ${RSPEC_MATCHING_TESTS_PATH} ${RSPEC_MATCHING_TESTS_FOSS_PATH};
echoinfo "Changed files: $(cat $RSPEC_CHANGED_FILES_PATH)";
echoinfo "Related RSpec tests: $(cat $RSPEC_MATCHING_TESTS_PATH)";
echoinfo "Related FOSS RSpec tests: $(cat $RSPEC_MATCHING_TESTS_FOSS_PATH)";
fi
artifacts:
expire_in: 7d
paths:
- ${RSPEC_CHANGED_FILES_PATH}
- ${RSPEC_MATCHING_TESTS_PATH}
- ${RSPEC_MATCHING_TESTS_FOSS_PATH}
- ${FRONTEND_FIXTURES_MAPPING_PATH}
detect-previous-failed-tests:

View File

@ -7,26 +7,6 @@ Layout/FirstArrayElementIndentation:
- 'spec/lib/gitlab/search/found_blob_spec.rb'
- 'spec/models/ci/runner_version_spec.rb'
- 'spec/models/repository_spec.rb'
- 'spec/models/user_preference_spec.rb'
- 'spec/models/user_spec.rb'
- 'spec/models/wiki_directory_spec.rb'
- 'spec/policies/concerns/crud_policy_helpers_spec.rb'
- 'spec/presenters/ci/build_runner_presenter_spec.rb'
- 'spec/requests/api/ci/jobs_spec.rb'
- 'spec/requests/api/ci/runner/jobs_request_post_spec.rb'
- 'spec/requests/api/ci/runners_spec.rb'
- 'spec/requests/api/deploy_tokens_spec.rb'
- 'spec/requests/api/graphql/ci/config_spec.rb'
- 'spec/requests/api/graphql/ci/jobs_spec.rb'
- 'spec/requests/api/graphql/mutations/uploads/delete_spec.rb'
- 'spec/requests/api/graphql/project/cluster_agents_spec.rb'
- 'spec/requests/api/graphql/project/issue/designs/designs_spec.rb'
- 'spec/requests/api/graphql/project/milestones_spec.rb'
- 'spec/requests/api/graphql/usage_trends_measurements_spec.rb'
- 'spec/requests/api/graphql/work_item_spec.rb'
- 'spec/requests/api/issues/post_projects_issues_spec.rb'
- 'spec/requests/api/issues/put_projects_issues_spec.rb'
- 'spec/requests/api/merge_requests_spec.rb'
- 'spec/requests/api/task_completion_status_spec.rb'
- 'spec/services/security/merge_reports_service_spec.rb'
- 'spec/simplecov_env.rb'

View File

@ -220,7 +220,7 @@ gem 'acts-as-taggable-on', '~> 9.0'
# Background jobs
gem 'sidekiq', '~> 6.4.0'
gem 'sidekiq-cron', '~> 1.4.0'
gem 'sidekiq-cron', '~> 1.8.0'
gem 'redis-namespace', '~> 1.9.0'
gem 'gitlab-sidekiq-fetcher', '0.8.0', require: 'sidekiq-reliable-fetch'

View File

@ -80,7 +80,7 @@
{"name":"colored2","version":"3.1.2","platform":"ruby","checksum":"b13c2bd7eeae2cf7356a62501d398e72fde78780bd26aec6a979578293c28b4a"},
{"name":"commonmarker","version":"0.23.6","platform":"ruby","checksum":"c8aeaaaff4ba497bf180f762db63a0069794fafb6eff221224c9c8199d337b38"},
{"name":"concurrent-ruby","version":"1.1.10","platform":"ruby","checksum":"244cb1ca0d91ec2c15ca2209507c39fb163336994428e16fbd3f465c87bd8e68"},
{"name":"connection_pool","version":"2.2.5","platform":"ruby","checksum":"13a8fc3921ce4df8e04fb65f1037251decb08d74757b41163688bd1c1feccd39"},
{"name":"connection_pool","version":"2.3.0","platform":"ruby","checksum":"677985be912f33c90f98f229aaa0c0ddb2ef8776f21929a36eeeb25251c944da"},
{"name":"contracts","version":"0.11.0","platform":"ruby","checksum":"df6e438efa89c31dd3095851c3f7a25dfdae36b35ff1d4547f1d92941b3c7286"},
{"name":"cork","version":"0.3.0","platform":"ruby","checksum":"a0a0ac50e262f8514d1abe0a14e95e71c98b24e3378690e5d044daf0013ad4bc"},
{"name":"cose","version":"1.0.0","platform":"ruby","checksum":"520ebaad97b56d2873de02ff4e2c973f5e77ce2f8edbda454af9ee3073643bc0"},
@ -532,7 +532,7 @@
{"name":"shellany","version":"0.0.1","platform":"ruby","checksum":"0e127a9132698766d7e752e82cdac8250b6adbd09e6c0a7fbbb6f61964fedee7"},
{"name":"shoulda-matchers","version":"5.1.0","platform":"ruby","checksum":"a01d20589989e9653ab4a28c67d9db2b82bcf0a2496cf01d5e1a95a4aaaf5b07"},
{"name":"sidekiq","version":"6.4.2","platform":"ruby","checksum":"0d3c05fecb5fbace5ff5efc63da707e02a9c4673fb8e33ceca10b5ec0e9f062c"},
{"name":"sidekiq-cron","version":"1.4.0","platform":"ruby","checksum":"21612ade25ea79b4eeb8eacd7fb559a85a1abf6bf1da1aca1aa0079cafd3376d"},
{"name":"sidekiq-cron","version":"1.8.0","platform":"ruby","checksum":"47da72ca73ce5b71896aaf7e7c4391386ec517dd003f184c50c0b727d82eb0ca"},
{"name":"sigdump","version":"0.2.4","platform":"ruby","checksum":"0bf2176e55c1a262788623fe5ea57caddd6ba2abebe5e349d9d5e7c3a3010ed7"},
{"name":"signet","version":"0.17.0","platform":"ruby","checksum":"1d2831930dc28da32e34bec68cf7ded97ee2867b208f97c500ee293829cb0004"},
{"name":"simple_po_parser","version":"1.1.6","platform":"ruby","checksum":"122687d44d3de516a0e69e2f383a4180f5015e8c5ed5a7f2258f2b376f64cbf3"},

View File

@ -288,7 +288,7 @@ GEM
colored2 (3.1.2)
commonmarker (0.23.6)
concurrent-ruby (1.1.10)
connection_pool (2.2.5)
connection_pool (2.3.0)
contracts (0.11.0)
cork (0.3.0)
colored2 (~> 3.1)
@ -1309,7 +1309,7 @@ GEM
connection_pool (>= 2.2.2)
rack (~> 2.0)
redis (>= 4.2.0)
sidekiq-cron (1.4.0)
sidekiq-cron (1.8.0)
fugit (~> 1)
sidekiq (>= 4.2.1)
sigdump (0.2.4)
@ -1779,7 +1779,7 @@ DEPENDENCIES
settingslogic (~> 2.0.9)
shoulda-matchers (~> 5.1.0)
sidekiq (~> 6.4.0)
sidekiq-cron (~> 1.4.0)
sidekiq-cron (~> 1.8.0)
sigdump (~> 0.2.4)
simple_po_parser (~> 1.1.6)
simplecov (~> 0.21)

View File

@ -9,10 +9,10 @@ export default {
LegacyCiVariableTable,
},
computed: {
...mapState(['isGroup']),
...mapState(['isGroup', 'isProject']),
},
mounted() {
if (!this.isGroup) {
if (this.isProject) {
this.fetchEnvironments();
}
},

View File

@ -81,6 +81,7 @@ const mountLegacyCiVariableListApp = (containerEl) => {
endpoint,
projectId,
isGroup,
isProject,
maskableRegex,
protectedByDefault,
awsLogoSvgPath,
@ -92,6 +93,8 @@ const mountLegacyCiVariableListApp = (containerEl) => {
maskedEnvironmentVariablesLink,
environmentScopeLink,
} = containerEl.dataset;
const parsedIsProject = parseBoolean(isProject);
const parsedIsGroup = parseBoolean(isGroup);
const isProtectedByDefault = parseBoolean(protectedByDefault);
@ -99,6 +102,7 @@ const mountLegacyCiVariableListApp = (containerEl) => {
endpoint,
projectId,
isGroup: parsedIsGroup,
isProject: parsedIsProject,
maskableRegex,
isProtectedByDefault,
awsLogoSvgPath,

View File

@ -2,7 +2,7 @@
import { GlFormGroup } from '@gitlab/ui';
import { __, s__ } from '~/locale';
import ConfirmDanger from '~/vue_shared/components/confirm_danger/confirm_danger.vue';
import NamespaceSelect from '~/vue_shared/components/namespace_select/namespace_select.vue';
import NamespaceSelect from '~/vue_shared/components/namespace_select/namespace_select_deprecated.vue';
export const i18n = {
confirmationMessage: __(

View File

@ -0,0 +1,51 @@
<script>
import { GlAccordion, GlAccordionItem, GlAlert, GlForm, GlFormCheckbox } from '@gitlab/ui';
export default {
components: {
GlAccordion,
GlAccordionItem,
GlAlert,
GlForm,
GlFormCheckbox,
},
props: {
stages: {
required: true,
type: Array,
},
value: {
required: true,
type: Object,
},
isInitiallyExpanded: {
type: Boolean,
required: false,
default: false,
},
},
};
</script>
<template>
<gl-accordion :header-level="3">
<gl-accordion-item
:title="s__('ImportProjects|Advanced import settings')"
:visible="isInitiallyExpanded"
>
<gl-alert variant="warning" class="gl-mb-5" :dismissible="false">{{
s__('ImportProjects|The more information you select, the longer it will take to import')
}}</gl-alert>
<gl-form>
<gl-form-checkbox
v-for="{ name, label, details } in stages"
:key="name"
:checked="value[name]"
@change="$emit('input', { ...value, [name]: $event })"
>
{{ label }}
<template v-if="details" #help>{{ details }} </template>
</gl-form-checkbox>
</gl-form>
</gl-accordion-item>
</gl-accordion>
</template>

View File

@ -9,10 +9,12 @@ import {
import { mapActions, mapState, mapGetters } from 'vuex';
import { n__, __, sprintf } from '~/locale';
import ProviderRepoTableRow from './provider_repo_table_row.vue';
import AdvancedSettings from './advanced_settings.vue';
export default {
name: 'ImportProjectsTable',
components: {
AdvancedSettings,
ProviderRepoTableRow,
GlLoadingIcon,
GlButton,
@ -35,6 +37,24 @@ export default {
required: false,
default: false,
},
optionalStages: {
type: Array,
required: false,
default: () => [],
},
isAdvancedSettingsPanelInitiallyExpanded: {
type: Boolean,
required: false,
default: true,
},
},
data() {
return {
optionalStagesSelection: Object.fromEntries(
this.optionalStages.map(({ name }) => [name, false]),
),
};
},
computed: {
@ -127,7 +147,7 @@ export default {
modal-id="import-all-modal"
:title="s__('ImportProjects|Import repositories')"
:ok-title="__('Import')"
@ok="importAll"
@ok="importAll({ optionalStages: optionalStagesSelection })"
>
{{
n__(
@ -150,6 +170,13 @@ export default {
/>
</form>
</div>
<advanced-settings
v-if="optionalStages && optionalStages.length"
v-model="optionalStagesSelection"
:stages="optionalStages"
:is-initially-expanded="isAdvancedSettingsPanelInitiallyExpanded"
class="gl-mb-5"
/>
<div v-if="repositories.length" class="gl-w-full">
<table>
<thead class="gl-border-0 gl-border-solid gl-border-t-1 gl-border-gray-100">
@ -171,6 +198,7 @@ export default {
:repo="repo"
:available-namespaces="namespaces"
:user-namespace="defaultTargetNamespace"
:optional-stages="optionalStagesSelection"
/>
</template>
</tbody>

View File

@ -43,6 +43,10 @@ export default {
type: Array,
required: true,
},
optionalStages: {
type: Object,
required: true,
},
},
computed: {
@ -177,7 +181,7 @@ export default {
v-if="isImportNotStarted"
type="button"
data-qa-selector="import_button"
@click="fetchImport(repo.importSource.id)"
@click="fetchImport({ repoId: repo.importSource.id, optionalStages })"
>
{{ importButtonText }}
</gl-button>

View File

@ -42,6 +42,7 @@ export function initPropsFromElement(element) {
providerTitle: element.dataset.provider,
filterable: parseBoolean(element.dataset.filterable),
paginatable: parseBoolean(element.dataset.paginatable),
optionalStages: JSON.parse(element.dataset.optionalStages),
};
}

View File

@ -43,11 +43,14 @@ const restartJobsPolling = () => {
const setImportTarget = ({ commit }, { repoId, importTarget }) =>
commit(types.SET_IMPORT_TARGET, { repoId, importTarget });
const importAll = ({ state, dispatch }) => {
const importAll = ({ state, dispatch }, config = {}) => {
return Promise.all(
state.repositories
.filter(isProjectImportable)
.map((r) => dispatch('fetchImport', r.importSource.id)),
state.repositories.filter(isProjectImportable).map((r) =>
dispatch('fetchImport', {
repoId: r.importSource.id,
optionalStages: config?.optionalStages,
}),
),
);
};
@ -92,7 +95,10 @@ const fetchReposFactory = ({ reposPath = isRequired() }) => ({ state, commit })
});
};
const fetchImportFactory = (importPath = isRequired()) => ({ state, commit, getters }, repoId) => {
const fetchImportFactory = (importPath = isRequired()) => (
{ state, commit, getters },
{ repoId, optionalStages },
) => {
const { ciCdOnly } = state;
const importTarget = getters.getImportTarget(repoId);
@ -105,6 +111,7 @@ const fetchImportFactory = (importPath = isRequired()) => ({ state, commit, gett
ci_cd_only: ciCdOnly,
new_name: newName,
target_namespace: targetNamespace,
...(Object.keys(optionalStages).length ? { optional_stages: optionalStages } : {}),
})
.then(({ data }) => {
commit(types.RECEIVE_IMPORT_SUCCESS, {

View File

@ -1,9 +1,16 @@
<script>
import { GlLoadingIcon, GlAlert } from '@gitlab/ui';
import { s__ } from '~/locale';
import getPipelineSchedulesQuery from '../graphql/queries/get_pipeline_schedules.query.graphql';
import PipelineSchedulesTable from './table/pipeline_schedules_table.vue';
export default {
i18n: {
schedulesFetchError: s__('PipelineSchedules|There was a problem fetching pipeline schedules.'),
},
components: {
GlAlert,
GlLoadingIcon,
PipelineSchedulesTable,
},
inject: {
@ -22,20 +29,39 @@ export default {
update({ project }) {
return project?.pipelineSchedules?.nodes || [];
},
error() {
this.hasError = true;
},
},
},
data() {
return {
schedules: [],
hasError: false,
errorDismissed: false,
};
},
computed: {
isLoading() {
return this.$apollo.queries.schedules.loading;
},
showError() {
return this.hasError && !this.errorDismissed;
},
},
};
</script>
<template>
<div>
<gl-alert v-if="showError" class="gl-mb-2" variant="danger" @dismiss="errorDismissed = true">
{{ $options.i18n.schedulesFetchError }}
</gl-alert>
<gl-loading-icon v-if="isLoading" size="lg" />
<!-- Tabs will be addressed in #371989 -->
<pipeline-schedules-table :schedules="schedules" />
<pipeline-schedules-table v-else :schedules="schedules" />
</div>
</template>

View File

@ -2,7 +2,7 @@
import { GlFormGroup, GlAlert } from '@gitlab/ui';
import { debounce } from 'lodash';
import ConfirmDanger from '~/vue_shared/components/confirm_danger/confirm_danger.vue';
import NamespaceSelect from '~/vue_shared/components/namespace_select/namespace_select.vue';
import NamespaceSelect from '~/vue_shared/components/namespace_select/namespace_select_deprecated.vue';
import { getIdFromGraphQLId } from '~/graphql_shared/utils';
import { getTransferLocations } from '~/api/projects_api';
import { parseIntPagination, normalizeHeaders } from '~/lib/utils/common_utils';

View File

@ -119,11 +119,6 @@ export default {
upgradeStatusTokenConfig,
];
},
isBulkDeleteEnabled() {
// Feature flag: admin_runners_bulk_delete
// Rollout issue: https://gitlab.com/gitlab-org/gitlab/-/issues/353981
return this.glFeatures.adminRunnersBulkDelete;
},
isSearchFiltered() {
return isSearchFiltered(this.search);
},
@ -216,18 +211,14 @@ export default {
:filtered-svg-path="emptyStateFilteredSvgPath"
/>
<template v-else>
<runner-bulk-delete
v-if="isBulkDeleteEnabled"
:runners="runners.items"
@deleted="onDeleted"
/>
<runner-bulk-delete :runners="runners.items" @deleted="onDeleted" />
<runner-list
:runners="runners.items"
:loading="runnersLoading"
:checkable="isBulkDeleteEnabled"
:checkable="true"
@checked="onChecked"
>
<template v-if="isBulkDeleteEnabled" #head-checkbox>
<template #head-checkbox>
<runner-bulk-delete-checkbox :runners="runners.items" />
</template>
<template #runner-name="{ runner }">

View File

@ -20,10 +20,6 @@ import typeDefs from './typedefs.graphql';
* localMutations.setRunnerChecked( ... )
* ```
*
* Note: Currently only in use behind a feature flag:
* admin_runners_bulk_delete for the admin list, rollout issue:
* https://gitlab.com/gitlab-org/gitlab/-/issues/353981
*
* @returns {Object} An object to configure an Apollo client:
* contains cacheConfig, typeDefs, localMutations.
*/

View File

@ -2,7 +2,6 @@
import { GlBadge, GlLink, GlSafeHtmlDirective } from '@gitlab/ui';
import Actions from '../action_buttons.vue';
import { generateText } from '../extensions/utils';
import ContentHeader from './widget_content_header.vue';
import ContentRow from './widget_content_row.vue';
export default {
@ -12,7 +11,6 @@ export default {
GlLink,
Actions,
ContentRow,
ContentHeader,
},
directives: {
SafeHtml: GlSafeHtmlDirective,
@ -55,10 +53,12 @@ export default {
</script>
<template>
<content-row :level="level" :status-icon-name="statusIcon" :widget-name="widgetName">
<template #header>
<content-header v-if="data.header" :header="data.header" />
</template>
<content-row
:level="level"
:status-icon-name="statusIcon"
:widget-name="widgetName"
:header="data.header"
>
<template #body>
<div class="gl-display-flex gl-flex-direction-column">
<div>

View File

@ -1,28 +0,0 @@
<script>
import { EXTENSION_ICONS } from '../../constants';
import StatusIcon from './status_icon.vue';
export default {
components: {
StatusIcon,
},
props: {
statusIconName: {
type: String,
default: '',
required: false,
validator: (value) => value === '' || Object.keys(EXTENSION_ICONS).includes(value),
},
widgetName: {
type: String,
required: true,
},
},
};
</script>
<template>
<div class="gl-display-flex gl-align-items-baseline gl-w-full">
<status-icon v-if="statusIconName" :level="2" :name="widgetName" :icon-name="statusIconName" />
<slot name="default"></slot>
</div>
</template>

View File

@ -1,35 +0,0 @@
<script>
import { GlSafeHtmlDirective } from '@gitlab/ui';
import { generateText } from '../extensions/utils';
export default {
directives: {
SafeHtml: GlSafeHtmlDirective,
},
props: {
header: {
type: [String, Array],
default: '',
required: false,
},
},
computed: {
generatedHeader() {
return generateText(Array.isArray(this.header) ? this.header[0] : this.header);
},
generatedSubheader() {
return Array.isArray(this.header) && this.header[1] ? generateText(this.header[1]) : '';
},
},
};
</script>
<template>
<div class="gl-mb-2">
<strong v-safe-html="generatedHeader" class="gl-display-block"></strong
><span
v-if="generatedSubheader"
v-safe-html="generatedSubheader"
class="gl-display-block"
></span>
</div>
</template>

View File

@ -1,10 +1,15 @@
<script>
import { GlSafeHtmlDirective } from '@gitlab/ui';
import { EXTENSION_ICONS } from '../../constants';
import ContentBody from './widget_content_body.vue';
import { generateText } from '../extensions/utils';
import StatusIcon from './status_icon.vue';
export default {
components: {
ContentBody,
StatusIcon,
},
directives: {
SafeHtml: GlSafeHtmlDirective,
},
props: {
level: {
@ -22,6 +27,19 @@ export default {
type: String,
required: true,
},
header: {
type: [String, Array],
default: '',
required: false,
},
},
computed: {
generatedHeader() {
return generateText(Array.isArray(this.header) ? this.header[0] : this.header);
},
generatedSubheader() {
return Array.isArray(this.header) && this.header[1] ? generateText(this.header[1]) : '';
},
},
};
</script>
@ -30,9 +48,22 @@ export default {
class="gl-w-full mr-widget-content-row"
:class="{ 'gl-border-t gl-py-3 gl-pl-7': level === 2 }"
>
<slot name="header"></slot>
<content-body :status-icon-name="statusIconName" :widget-name="widgetName">
<div v-if="header" class="gl-mb-2">
<strong v-safe-html="generatedHeader" class="gl-display-block"></strong
><span
v-if="generatedSubheader"
v-safe-html="generatedSubheader"
class="gl-display-block"
></span>
</div>
<div class="gl-display-flex gl-align-items-baseline gl-w-full">
<status-icon
v-if="statusIconName"
:level="2"
:name="widgetName"
:icon-name="statusIconName"
/>
<slot name="body"></slot>
</content-body>
</div>
</div>
</template>

View File

@ -27,7 +27,7 @@ const filterByName = (data, searchTerm = '') => {
};
export default {
name: 'NamespaceSelect',
name: 'NamespaceSelectDeprecated',
components: {
GlDropdown,
GlDropdownDivider,

View File

@ -1,5 +0,0 @@
.date-time-picker {
.date-time-picker-menu {
width: 400px;
}
}

View File

@ -1,193 +0,0 @@
$design-pin-diameter: 28px;
$design-pin-diameter-sm: 24px;
$t-gray-a-16-design-pin: rgba($black, 0.16);
.layout-page.design-detail-layout {
max-height: 100vh;
}
.design-detail {
background-color: rgba($modal-backdrop-bg, $modal-backdrop-opacity);
.with-performance-bar & {
top: 35px;
}
.comment-indicator {
border-radius: 50%;
}
.comment-indicator,
.frame .design-note-pin {
&:active {
cursor: grabbing;
}
}
}
.design-scaler-wrapper {
bottom: 0;
left: 50%;
transform: translateX(-50%);
}
.design-checkbox {
position: absolute;
top: $gl-padding;
left: 30px;
}
.image-notes {
overflow-y: scroll;
padding: $gl-padding;
padding-top: 50px;
background-color: $white;
flex-shrink: 0;
min-width: 400px;
flex-basis: 28%;
.link-inherit-color {
&:hover,
&:active,
&:focus {
color: inherit;
text-decoration: none;
}
}
.toggle-comments {
line-height: 20px;
border-top: 1px solid $border-color;
&.expanded {
border-bottom: 1px solid $border-color;
}
.toggle-comments-button:focus {
text-decoration: none;
color: $blue-600;
}
}
.design-note-pin {
margin-left: $gl-padding;
}
.design-discussion {
margin: $gl-padding 0;
&::before {
content: '';
border-left: 1px solid $gray-100;
position: absolute;
left: 28px;
top: -17px;
height: 17px;
}
.design-note {
padding: $gl-padding;
list-style: none;
transition: background $gl-transition-duration-medium $general-hover-transition-curve;
border-top-left-radius: $border-radius-default; // same border radius used by .bordered-box
border-top-right-radius: $border-radius-default;
a {
color: inherit;
}
.note-text a {
color: $blue-600;
}
}
.reply-wrapper {
padding: $gl-padding;
}
}
.reply-wrapper {
border-top: 1px solid $border-color;
}
.new-discussion-disclaimer {
line-height: 20px;
}
}
@media (max-width: map-get($grid-breakpoints, lg)) {
.design-detail {
overflow-y: scroll;
}
.image-notes {
overflow-y: auto;
min-width: 100%;
flex-grow: 1;
flex-basis: auto;
}
}
.design-card-header {
background: transparent;
}
.design-note-pin {
display: flex;
height: $design-pin-diameter;
width: $design-pin-diameter;
box-sizing: content-box;
background-color: $purple-500;
color: $white;
font-weight: $gl-font-weight-bold;
border-radius: 50%;
z-index: 1;
padding: 0;
border: 0;
&.draft {
background-color: $orange-500;
}
&.resolved {
background-color: $gray-500;
}
&.on-image {
box-shadow: 0 2px 4px $t-gray-a-08, 0 0 1px $t-gray-a-24;
border: $white 2px solid;
will-change: transform, box-shadow, opacity;
// NOTE: verbose transition property required for Safari
transition: transform $general-hover-transition-duration linear, box-shadow $general-hover-transition-duration linear, opacity $general-hover-transition-duration linear;
transform-origin: 0 0;
transform: translate(-50%, -50%);
&:hover {
transform: scale(1.2) translate(-50%, -50%);
}
&:active {
box-shadow: 0 0 4px $t-gray-a-16-design-pin, 0 4px 12px $t-gray-a-16-design-pin;
}
&.inactive {
@include gl-opacity-5;
&:hover {
@include gl-opacity-10;
}
}
}
&.small {
position: absolute;
border: 1px solid $white;
height: $design-pin-diameter-sm;
width: $design-pin-diameter-sm;
}
&.user-avatar {
top: 25px;
right: 8px;
}
}

View File

@ -1,19 +0,0 @@
.design-list-item {
height: 280px;
text-decoration: none;
.icon-version-status {
position: absolute;
right: 10px;
top: 10px;
}
.card-body {
height: 230px;
}
}
// This is temporary class to be removed after feature flag removal: https://gitlab.com/gitlab-org/gitlab/-/issues/223197
.design-list-item-new {
height: 210px;
}

View File

@ -1209,3 +1209,16 @@ table.code {
@include gl-bg-gray-200;
}
}
.diff-grid-row.expansion.match {
border-top: 1px solid var(--diff-expansion-background-color);
border-bottom: 1px solid var(--diff-expansion-background-color);
&:first-child {
border-top: 0;
}
&:last-child {
border-bottom: 0;
}
}

View File

@ -175,6 +175,8 @@ $dark-il: #de935f;
}
&.diff-grid-row {
--diff-expansion-background-color: #{$gray-600};
@include dark-diff-expansion-line;
}

View File

@ -168,6 +168,8 @@ $monokai-gh: #75715e;
}
&.diff-grid-row {
--diff-expansion-background-color: #{$gray-600};
@include dark-diff-expansion-line;
}

View File

@ -76,6 +76,10 @@
@include match-line;
}
&.diff-grid-row {
--diff-expansion-background-color: #{$gray-100};
}
.line-coverage {
@include line-coverage-border-color($green-500, $orange-500);
}

View File

@ -171,6 +171,8 @@ $solarized-dark-il: #2aa198;
}
&.diff-grid-row {
--diff-expansion-background-color: #{lighten($solarized-dark-pre-bg, 10%)};
@include dark-diff-expansion-line;
}

View File

@ -156,6 +156,10 @@ $solarized-light-il: #2aa198;
@include match-line;
}
&.diff-grid-row {
--diff-expansion-background-color: #{$gray-100};
}
&.diff-grid-row.expansion .diff-td {
background-color: $solarized-light-matchline-bg;
}

View File

@ -154,6 +154,8 @@ pre.code,
}
&.diff-grid-row {
--diff-expansion-background-color: #{$gray-100};
@include diff-match-line;
}

View File

@ -1,5 +1,9 @@
@import 'mixins_and_variables_and_functions';
$design-pin-diameter: 28px;
$design-pin-diameter-sm: 24px;
$t-gray-a-16-design-pin: rgba($black, 0.16);
.description {
li {
position: relative;
@ -23,6 +27,216 @@
}
}
.design-card-header {
background: transparent;
}
.design-checkbox {
position: absolute;
top: $gl-padding;
left: 30px;
}
.layout-page.design-detail-layout {
max-height: 100vh;
}
.design-detail {
background-color: rgba($modal-backdrop-bg, $modal-backdrop-opacity);
.with-performance-bar & {
top: 35px;
}
.comment-indicator {
border-radius: 50%;
}
.comment-indicator,
.frame .design-note-pin {
&:active {
cursor: grabbing;
}
}
}
.design-list-item {
height: 280px;
text-decoration: none;
.icon-version-status {
position: absolute;
right: 10px;
top: 10px;
}
.card-body {
height: 230px;
}
}
// This is temporary class to be removed after feature flag removal: https://gitlab.com/gitlab-org/gitlab/-/issues/223197
.design-list-item-new {
height: 210px;
}
.design-note-pin {
display: flex;
height: $design-pin-diameter;
width: $design-pin-diameter;
box-sizing: content-box;
background-color: var(--purple-500, $purple-500);
color: var(--white, $white);
font-weight: $gl-font-weight-bold;
border-radius: 50%;
z-index: 1;
padding: 0;
border: 0;
&.draft {
background-color: var(--orange-500, $orange-500);
}
&.resolved {
background-color: var(--gray-500, $gray-500);
}
&.on-image {
box-shadow: 0 2px 4px $t-gray-a-08, 0 0 1px $t-gray-a-24;
border: var(--white, $white) 2px solid;
will-change: transform, box-shadow, opacity;
// NOTE: verbose transition property required for Safari
transition: transform $general-hover-transition-duration linear, box-shadow $general-hover-transition-duration linear, opacity $general-hover-transition-duration linear;
transform-origin: 0 0;
transform: translate(-50%, -50%);
&:hover {
transform: scale(1.2) translate(-50%, -50%);
}
&:active {
box-shadow: 0 0 4px $t-gray-a-16-design-pin, 0 4px 12px $t-gray-a-16-design-pin;
}
&.inactive {
@include gl-opacity-5;
&:hover {
@include gl-opacity-10;
}
}
}
&.small {
position: absolute;
border: 1px solid var(--white, $white);
height: $design-pin-diameter-sm;
width: $design-pin-diameter-sm;
}
&.user-avatar {
top: 25px;
right: 8px;
}
}
.design-scaler-wrapper {
bottom: 0;
left: 50%;
transform: translateX(-50%);
}
.image-notes {
overflow-y: scroll;
padding: $gl-padding;
padding-top: 50px;
background-color: var(--white, $white);
flex-shrink: 0;
min-width: 400px;
flex-basis: 28%;
.link-inherit-color {
&:hover,
&:active,
&:focus {
color: inherit;
text-decoration: none;
}
}
.toggle-comments {
line-height: 20px;
border-top: 1px solid var(--border-color, $border-color);
&.expanded {
border-bottom: 1px solid var(--border-color, $border-color);
}
.toggle-comments-button:focus {
text-decoration: none;
color: var(--blue-600, $blue-600);
}
}
.design-note-pin {
margin-left: $gl-padding;
}
.design-discussion {
margin: $gl-padding 0;
&::before {
content: '';
border-left: 1px solid var(--gray-100, $gray-100);
position: absolute;
left: 28px;
top: -17px;
height: 17px;
}
.design-note {
padding: $gl-padding;
list-style: none;
transition: background $gl-transition-duration-medium $general-hover-transition-curve;
border-top-left-radius: $border-radius-default; // same border radius used by .bordered-box
border-top-right-radius: $border-radius-default;
a {
color: inherit;
}
.note-text a {
color: var(--blue-600, $blue-600);
}
}
.reply-wrapper {
padding: $gl-padding;
}
}
.reply-wrapper {
border-top: 1px solid var(--border-color, $border-color);
}
.new-discussion-disclaimer {
line-height: 20px;
}
}
@media (max-width: map-get($grid-breakpoints, lg)) {
.design-detail {
overflow-y: scroll;
}
.image-notes {
overflow-y: auto;
min-width: 100%;
flex-grow: 1;
flex-basis: auto;
}
}
.is-ghost {
opacity: 0.3;
pointer-events: none;

View File

@ -1,5 +1,11 @@
@import 'mixins_and_variables_and_functions';
.date-time-picker {
.date-time-picker-menu {
width: 400px;
}
}
.prometheus-graphs {
.dropdown-buttons {
> div {

View File

@ -4,9 +4,6 @@ class Admin::RunnersController < Admin::ApplicationController
include RunnerSetupScripts
before_action :runner, except: [:index, :tag_list, :runner_setup_scripts]
before_action only: [:index] do
push_frontend_feature_flag(:admin_runners_bulk_delete)
end
before_action only: [:show] do
push_frontend_feature_flag(:enforce_runner_token_expires_at)

View File

@ -0,0 +1,18 @@
# frozen_string_literal: true
module Projects
class ProjectAttributesChangedEvent < ::Gitlab::EventStore::Event
def schema
{
'type' => 'object',
'properties' => {
'project_id' => { 'type' => 'integer' },
'namespace_id' => { 'type' => 'integer' },
'root_namespace_id' => { 'type' => 'integer' },
'attributes' => { 'type' => 'array' }
},
'required' => %w[project_id namespace_id root_namespace_id attributes]
}
end
end
end

View File

@ -54,7 +54,8 @@ module Resolvers
{
last_edited_by: :last_edited_by,
assignees: :assignees,
parent: :work_item_parent
parent: :work_item_parent,
labels: :labels
}
end

View File

@ -23,6 +23,9 @@ module Types
ORPHAN_TYPES
end
# Whenever a new widget is added make sure to update the spec to avoid N + 1 queries in
# spec/requests/api/graphql/project/work_items_spec.rb and add the necessary preloads
# in app/graphql/resolvers/work_items_resolver.rb
def self.resolve_type(object, context)
case object
when ::WorkItems::Widgets::Description

View File

@ -122,7 +122,7 @@ module Projects
update_pending_builds if runners_settings_toggled?
publish_event
publish_events
end
def after_rename_service(project)
@ -212,7 +212,12 @@ module Projects
end
end
def publish_event
def publish_events
publish_project_archived_event
publish_project_attributed_changed_event
end
def publish_project_archived_event
return unless project.archived_previously_changed?
event = Projects::ProjectArchivedEvent.new(data: {
@ -223,6 +228,21 @@ module Projects
Gitlab::EventStore.publish(event)
end
def publish_project_attributed_changed_event
changes = @project.previous_changes
return if changes.blank?
event = Projects::ProjectAttributesChangedEvent.new(data: {
project_id: @project.id,
namespace_id: @project.namespace_id,
root_namespace_id: @project.root_namespace.id,
attributes: changes.keys
})
Gitlab::EventStore.publish(event)
end
end
end

View File

@ -10,7 +10,7 @@
%p.settings-message.text-center
- link_start = '<a href="%{url}">'.html_safe % { url: help_page_path('ci/variables/index', anchor: 'protected-cicd-variables') }
= s_('Environment variables on this GitLab instance are configured to be %{link_start}protected%{link_end} by default.').html_safe % { link_start: link_start, link_end: '</a>'.html_safe }
#js-instance-variables{ data: { endpoint: admin_ci_variables_path, group: 'true', maskable_regex: ci_variable_maskable_regex, protected_by_default: ci_variable_protected_by_default?.to_s} }
#js-instance-variables{ data: { endpoint: admin_ci_variables_path, maskable_regex: ci_variable_maskable_regex, protected_by_default: ci_variable_protected_by_default?.to_s} }
%section.settings.as-ci-cd.no-animate#js-ci-cd-settings{ class: ('expanded' if expanded_by_default?) }
.settings-header

View File

@ -5,6 +5,7 @@
- paginatable = local_assigns.fetch(:paginatable, false)
- default_namespace_path = (local_assigns[:default_namespace] || current_user.namespace).full_path
- provider_title = Gitlab::ImportSources.title(provider)
- optional_stages = local_assigns.fetch(:optional_stages, [])
- header_title _("New project"), new_project_path
- add_to_breadcrumbs s_('ProjectsNew|Import project'), new_project_path(anchor: 'import_project')
@ -18,4 +19,5 @@
default_target_namespace: default_namespace_path,
import_path: url_for([:import, provider, { format: :json }]),
filterable: filterable.to_s,
paginatable: paginatable.to_s }.merge(extra_data) }
paginatable: paginatable.to_s,
optional_stages: optional_stages.to_json }.merge(extra_data) }

View File

@ -1,8 +0,0 @@
---
name: admin_runners_bulk_delete
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/81894
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/353981
milestone: '14.9'
type: development
group: group::runner
default_enabled: false

View File

@ -5,4 +5,4 @@ rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/373148
milestone: '15.4'
type: development
group: group::pipeline authoring
default_enabled: false
default_enabled: true

View File

@ -114,4 +114,3 @@ end
Sidekiq::Scheduled::Poller.prepend Gitlab::Patch::SidekiqPoller
Sidekiq::Cron::Poller.prepend Gitlab::Patch::SidekiqPoller
Sidekiq::Cron::Poller.prepend Gitlab::Patch::SidekiqCronPoller

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

View File

@ -22,12 +22,12 @@ We use the following terms to describe components and properties of the Pods arc
### Pod
A Pod is a set of infrastructure components that contains multiple workspaces that belong to different organizations. The components include both datastores (PostgreSQL, Redis etc.) and stateless services (web etc.). The infrastructure components provided within a Pod are shared among workspaces but not shared with other Pods. This isolation of infrastructure components means that Pods are independent from each other.
A Pod is a set of infrastructure components that contains multiple top-level namespaces that belong to different organizations. The components include both datastores (PostgreSQL, Redis etc.) and stateless services (web etc.). The infrastructure components provided within a Pod are shared among organizations and their top-level namespaces but not shared with other Pods. This isolation of infrastructure components means that Pods are independent from each other.
#### Pod properties
- Each pod is independent from the others
- Infrastructure components are shared by workspaces within a Pod
- Infrastructure components are shared by organizations and their top-level namespaces within a Pod
- More Pods can be provisioned to provide horizontal scalability
- A failing Pod does not lead to failure of other Pods
- Noisy neighbor effects are limited to within a Pod
@ -36,23 +36,39 @@ A Pod is a set of infrastructure components that contains multiple workspaces th
Discouraged synonyms: GitLab instance, cluster, shard
### Workspace
### Cluster
A [workspace](../../../user/workspace/index.md) is the name for the top-level namespace that is used by organizations to manage everything GitLab. It will provide similar administrative capabilities to a self-managed instance.
A cluster is a collection of Pods.
See more in the [workspace group overview](https://about.gitlab.com/direction/manage/workspace/#overview).
### Organizations
#### Workspace properties
GitLab references [Organizations in the initial set up](../../../topics/set_up_organization.md) and users can add a (free text) organization to their profile. There is no Organization entity established in the GitLab codebase.
- Workspaces are isolated from each other by default
- A workspace is located on a single Pod
- Workspaces share the resources provided by a Pod
As part of delivering Pods, we propose the introduction of an `organization` entity. Organizations would represent billable entities or customers.
Organizations are a known concept, present for example in [AWS](https://docs.aws.amazon.com/whitepapers/latest/organizing-your-aws-environment/core-concepts.html) and [GCP](https://cloud.google.com/resource-manager/docs/cloud-platform-resource-hierarchy#organizations).
Organizations work under the following assumptions:
1. Users care about what happens within their organizations.
1. Features need to work within an organization.
1. Only few features need to work across organizations.
1. Users understand that the majority of pages they view are only scoped to a single organization at a time.
#### Organization properties
- Top-level namespaces belong to organizations
- Users can be members of different organizations
- Organizations are isolated from each other by default meaning that cross-namespace features will only work for namespaces that exist within a single organization
- User namespaces must not belong to an organization
Discouraged synonyms: Billable entities, customers
### Top-Level namespace
A top-level namespace is the logical object container in the code that represents all groups, subgroups and projects that belong to an organization.
A top-level namespace is the root of nested collection namespaces and projects. The namespace and its related entities form a tree-like hierarchy: Namespaces are the nodes of the tree, projects are the leaves. An organization usually contains a single top-level namespace, called a workspace.
A top-level namespace is the root of nested collection namespaces and projects. The namespace and its related entities form a tree-like hierarchy: Namespaces are the nodes of the tree, projects are the leaves.
Example:
@ -61,21 +77,28 @@ Example:
- `gitlab-org` is a `top-level namespace`; the root for all groups and projects of an organization
- `gitlab` is a `project`; a project of the organization.
Top-level namespaces may [be replaced by workspaces](https://gitlab.com/gitlab-org/gitlab/-/issues/368237#high-level-goals). This proposal only uses the term top-level namespaces as the workspace definition is ongoing.
Discouraged synonyms: Root-level namespace
#### Top-level namespace properties
Same as workspaces.
- Top-level namespaces belonging to an organization are located on the same Pod
- Top-level namespaces can interact with other top-level namespaces that belong to the same organization
### Users
Users are available globally and not restricted to a single Pod. Users can create multiple workspaces and they may be members of several workspaces and contribute to them. Because users' activity is not limited to an individual Pod, their activity needs to be aggregated across Pods to reflect all their contributions (for example TODOs). This means, the Pods architecture may need to provide a central dashboard.
Users are available globally and not restricted to a single Pod. Users can be members of many different organizations with varying permissions. Inside organizations, users can create multiple top-level namespaces. User activity is not limited to a single organization but their contributions (for example TODOs) are only aggregated within an organization. This avoids the need for aggregating across pods.
#### User properties
- Users are shared globally across all Pods
- Users can create multiple workspaces
- Users can be a member of multiple workspaces
- Users can create multiple top-level namespaces
- Users can be a member of multiple top-level namespaces
- Users can be a member of multiple organizations
- Users can administrate organizations
- User activity is aggregated within an organization
- Every user has one personal namespace
## Goals
@ -87,7 +110,7 @@ Pods provide a horizontally scalable solution because additional Pods can be cre
### Increased availability
A major challenge for shared-infrastructure architectures is a lack of isolation between workspaces. This can lead to noisy neighbor effects. A organization's behavior inside a workspace can impact all other workspaces. This is highly undesirable. Pods provide isolation at the pod level. A group of organizations is fully isolated from other organizations located on a different Pod. This minimizes noisy neighbor effects while still benefiting from the cost-efficiency of shared infrastructure.
A major challenge for shared-infrastructure architectures is a lack of isolation between top-level namespaces. This can lead to noisy neighbor effects. A organization's behavior inside a top-level namespace can impact all other organizations. This is highly undesirable. Pods provide isolation at the pod level. A group of organizations is fully isolated from other organizations located on a different Pod. This minimizes noisy neighbor effects while still benefiting from the cost-efficiency of shared infrastructure.
Additionally, Pods provide a way to implement disaster recovery capabilities. Entire Pods may be replicated to read-only standbys with automatic failover capabilities.
@ -104,13 +127,11 @@ GitLab.com is only hosted within the United States of America. Organizations loc
Pods would provide a solution for organizations in the small to medium business (up to 100 users) and the mid-market segment (up to 2000 users).
(See [segmentation definitions](https://about.gitlab.com/handbook/sales/field-operations/gtm-resources/#segmentation).)
Larger organizations may benefit substantially from [GitLab Dedicated](../../../subscriptions/gitlab_dedicated/index.md).
At this moment, GitLab.com has many more "social-network"-like capabilities that may not fit well into a more isolated workspace model.
Removing them, however, possesses a ton of challenges:
At this moment, GitLab.com has "social-network"-like capabilities that may not fit well into a more isolated organization model. Removing those features, however, possesses some challenges:
1. How will existing `gitlab-org` contributors contribute to workspaces?
1. How do we move existing workspaces into the new model (effectively breaking their social features)?
1. How does this affect on-premise installations that by design use many top-level namespaces (workspaces) if we forbid in-between workspace interactions? (on-premise customers or open source projects like [https://salsa.debian.org](https://salsa.debian.org/))
1. How will existing `gitlab-org` contributors contribute to the namespace??
1. How do we move existing top-level namespaces into the new model (effectively breaking their social features)?
We should evaluate if the SMB and mid market segment is interested in these features, or if not having them is acceptable in most cases.
@ -118,75 +139,102 @@ We should evaluate if the SMB and mid market segment is interested in these feat
A number of technical issues need to be resolved to implement Pods (in no particular order). This section will be expanded.
1. How are users of an organization routed to the correct Pod containing their workspace?
1. How are users of an organization routed to the correct Pod?
1. How do users authenticate?
1. How are Pods rebalanced?
1. How are Pods provisioned?
1. How can Pods implement disaster recovery capabilities?
## Iteration 1
## Iteration plan
A Pods architecture should offer the same user experience as a self-managed instance and GitLab dedicated for existing and new users of GitLab.com. In order to get there, we have to ship smaller iterations that already provide value. In the first iteration, we will ship two different user experiences:
We can't ship the entire Pods architecture in one go - it is too large. Instead, we are adopting an iteration plan that provides value along the way.
1. For existing users of GitLab.com
1. For new users of GitLab.com (opt-in)
1. Introduce organizations
1. Migrate existing top-level namespaces to organizations
1. Create new organizations on `pod_0`
1. Migrate existing organizations from `pod_0` to `pod_n`
1. Add additional Pod capabilities (DR, Regions)
### Why should users opt-in? Who can opt-in?
### Iteration 0: Introduce organizations
In order to get adoption, we must offer distinct advantages to Pods even in the first iteration. We could consider supporting specific Premium+ features on Pods already, that we won't be able to support without Pods. Candidates for this are
In the first iteration, we introduce the concept of an organization
as a way to group top-level namespaces together. Support for organizations **does not require any Pods work** but having them will make all subsequent iterations of Pods simpler. This is mainly because we can group top-level namespaces for a single organization onto a Pod. Within an organization all interactions work as normal but we eliminate any cross-organizational interactions except in well defined cases (e.g. forking).
- Disaster Recovery with lower SLOs
- Regional support
- Fewer noisy neighbors (free)
This means that we don't have a large number of cross-pod interactions.
We should likely position this as a push for GitLab workspaces and not talk about the underlying Pods architecture.
Introducing organizations allows GitLab to move towards a multi-tenant system that is similar to Discord's with a single user account but many different "servers" - our organizations - that allow users to switch context. This model harmonizes the UX across self-managed and our SaaS Platforms and is a good fit for Pods.
What are other distinct advantages of workspaces that could be shipped?
Organizations solve the following problems:
- Easier administrator controls
- Better permission management
- Instance-like UX
1. We can group top-level namespaces by organization. This eliminates the difference between self-managed and GitLab.com. It is very similar to the initial concept of "instance groups". For example these two top-level namespaces would belong to the organization `GitLab`:
1. `https://gitlab.com/gitlab-org/`
1. `https://gitlab.com/gitlab-com/`
1. We can isolate organizations from each other. Top-level namespaces of the same organization can interact within organizations but are not allowed to interact with other namespaces in other organizations. This is useful for customers because it means an organization provides clear boundaries - similar to a self-managed instance. This means we don't have to aggregate user dashboards across everything and can locally scope them to organizations.
1. We don't need to define hierarchies inside an organization. It is a container that could be filled with whatever hierarchy / entity set makes sense (workspaces, top-level namespaces etc.)
1. Self-managed instances would set a default organization. One organization per instance.
1. Organizations can control user-profiles in a central way. This could be achieved by having an organization specific user-profile. Such a profile makes it possible for the organization administrators to control the user role in a company, enforce user e-mails, or show a graphical indicator of a user being part of the organization. An example would be a "GitLab Employee stamp" on comments.
![Move to Organizations](2022-10-05-Pods-Organizations-Iteration0.png)
### Why would customers opt-in to Organizations?
By introducing organizations and Pods we can improve the reliability, performance and availability of our SaaS Platforms.
The first iteration of organizations would also have some benefits by providing more isolation. A simple example would be that `@` mentions could be scoped to an organization.
Future iterations would create additional value but are beyond the scope of this blueprint.
Organizations will likely be required in the future as well.
### Initial user experience
1. We create a default `GitLab.com public` organization and assign all public top-level namespaces to it. This allows existing users to access all the data on GitLab.com, exactly as it does now.
1. Any user wanting to opt-in to the benefits of organizations will need to set a single default organization. Any attempts for these users to load a global page like `/dashboard` will end up redirecting to `/-/organizations/<DEFAULT_ORGANIZATION>/dashboard`.
1. New users that opted in to organizations will only ever see data that is related to a single organization. Upon login, data is shown for the default organization. It will be clear to the user how they can switch to a different organization. Users can still navigate to the `GitLab.com` organization but they won't see TODOs from their new organizations in any such views. Instead they'd need to navigate directly to `/organizations/my-company/-/dashboard`.
### Migrating to Organizations
Existing customers could also opt-in to migrate their existing top-level paid namespaces to become part of an organization. In most cases this will be a 1-to-1 mapping. But in some cases it may allow a customer to move multiple top-level namespaces into one organization (for example GitLab).
Migrating to Organizations would be optional. We could even recruit a few beta testers early on to see if this works for them. GitLab itself could dogfood organizations and we'd surface a lot of issues restricting interactions with other namespaces.
## Iteration 1 - Introduce Pod US 0
### GitLab.com as Pod US0
GitLab.com will be treated as the first pod `Pod US0`. It will be unique and much larger compared to newly created pods. All existing users will remain on `Pod US0` in the first iteration.
GitLab.com will be treated as the first pod `Pod US 0`. It will be unique and much larger compared to newly created pods. All existing top-level namespaces and organizations will remain on `Pod US 0` in the first iteration.
### Users are globally available
Users are globally available and the same for all pods. This means that user data needs to be handled separately, for example via decomposition, see [!95941](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/95941).
### Users have a default workspace
### Pod groundwork
1. Existing users on GitLab.com (`Pod US0`) will start by defaulting to no workspace. This allows them to access all the data on GitLab.com, exactly as it does now.
In this iteration, we'll lay all the groundwork to support a second Pod for new organizations. This will be transparent to customers.
1. Any user wanting to opt-in to the benefits of pods will need to set a single default workspace. Workspaces will be located on `Pod US1`. Any attempts for these users to load a global page like `/dashboard` will end up redirecting to `<DEFAULT_WORKSPACE>/-/dashboard`.
## Iteration 2 - Introduce Pod US 1
### User experience is always isolated to a single workspace at a time
### Add new organizations to Pod US 1
1. Existing users whose workspaces are located on `Pod US0` will be able to continue to see aggregated data for all workspaces except for new workspaces that opt-in to be on a new pod. The user experience is the same.
1. New users that opted in to Pods will only ever see data that is related to a single workspace. Upon login, data is shown for the default workspace. It will be clear to the user how they can switch to a different workspace. Users can still navigate to the `GitLab.com` workspace and but they won't see TODOs from their new workspace in any such views. Instead they'd need to navigate directly to /dashboard/-/my-company.
After we are ready to support a second Pod, newly created organizations are located by default on `Pod US 1`. The user experience for organizations is already well established.
### Features are restricted to a workspace
### Migrate existing organizations from Pod US 0 to Pod US 1
Even though some workspaces are on the same Pod, we will not allow features to cross workspace boundaries. As explored in [#330785](https://gitlab.com/gitlab-org/gitlab/-/issues/330785) this will break a number of features that are allowed to work across top-level namespaces today. We assume that
We know that we'll have to move organizations from `Pod US 0` to other pods to reduce its size and ultimately retire the existing GitLab.com architecture.
1. Users care about what happens within a workspace
1. Most features need to only work within a workspace
1. Using features that cut across workspaces are the exception
By introducing organizations early, we should be able to draw strong "boundaries" across organizations and support migrating existing organizations to a new Pod.
Over time, we may have to add back certain features via APIs but if the workspace is similar to a self-managed instance, there are few use cases where features must work across instances. Sometimes isolation may even be preferrable.
This is likely going to be GitLab itself - if we can dogfood this, we are likely going to be successful with other organizations as well.
For existing users, `Pod US0` will work as it does
## Iteration 3 - Introduce Regions
For users that opted-in, all features are restricted to interacting with a single workspace at a time and there are no cross-workspace features available to them. They can still interact with projects located in the `GitLab.com` workspace.
We can now leverage the Pods architecture to introduce Regions.
## Iteration 2
## Iteration 4 - Introduce cross-organizational interactions as needed
Based on user research, we may want to change certain features to work across namespaces to allow organizations to interact with each other in specific circumstances.
Based on user research, we may want to change certain features to work across organizations. Examples include:
Additional features:
- Specific features allow for cross-workspace interactions, for example forking, search.
- Specific features allow for cross-organization interactions, for example forking, search.
### Links

View File

@ -442,8 +442,7 @@ first time.
### Requesting a review
When you are ready to have your merge request reviewed,
you should [request an initial review](../user/project/merge_requests/getting_started.md#reviewer) by selecting a reviewer from your group or team.
However, you can also assign it to any reviewer. The list of reviewers can be found on [Engineering projects](https://about.gitlab.com/handbook/engineering/projects/) page.
you should [request an initial review](../user/project/merge_requests/getting_started.md#reviewer) by selecting a reviewer based on the [approval guidelines](#approval-guidelines).
When a merge request has multiple areas for review, it is recommended you specify which area a reviewer should be reviewing, and at which stage (first or second).
This will help team members who qualify as a reviewer for multiple areas to know which area they're being requested to review.

View File

@ -17,11 +17,15 @@ with additions and improvements.
## Merge request reviews
As a merge request (MR) author, you must include _Before_ and _After_
As a merge request (MR) author, you must:
- Include _Before_ and _After_
screenshots (or videos) of your changes in the description, as explained in our
[MR workflow](merge_request_workflow.md). These screenshots/videos are very helpful
for all reviewers and can speed up the review process, especially if the changes
are small.
- Attach the ~UX label to any merge request that impacts the user experience. This will enable Product Designers to [review](https://about.gitlab.com/handbook/engineering/ux/product-designer/mr-reviews/#stage-group-mrs/) any user facing changes.
- Assign the Product Designer suggested by Reviewer Roulette as the reviewer of your merge request. The reviewer does not have to be the domain expert unless this is a community contribution.
## Checklist

View File

@ -59,12 +59,12 @@ To give Vault the application ID and secret generated by GitLab and allow
Vault to authenticate through GitLab, run the following command in the terminal:
```shell
$ vault write auth/oidc/config \
oidc_discovery_url="https://gitlab.com" \
oidc_client_id="<your_application_id>" \
oidc_client_secret="<your_secret>" \
default_role="demo" \
bound_issuer="localhost"
vault write auth/oidc/config \
oidc_discovery_url="https://gitlab.com" \
oidc_client_id="<your_application_id>" \
oidc_client_secret="<your_secret>" \
default_role="demo" \
bound_issuer="localhost"
```
Replace `<your_application_id>` and `<your_secret>` with the application ID

View File

@ -353,10 +353,8 @@ You can also filter runners by status, type, and tag. To filter:
#### Bulk delete runners
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/370241) in GitLab 15.4 [with a flag](../../administration/feature_flags.md) named `admin_runners_bulk_delete`. Disabled by default.
FLAG:
On self-managed GitLab, by default this feature is not available. To make it available, ask an administrator to [enable the feature flag](../../administration/feature_flags.md) named `admin_runners_bulk_delete`. On GitLab.com, this feature is not available but can be enabled by GitLab.com administrators.
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/370241) in GitLab 15.4.
> - [Feature flag removed](https://gitlab.com/gitlab-org/gitlab/-/issues/353981) in GitLab 15.5.
You can delete multiple runners at the same time.

View File

@ -13,8 +13,8 @@ info: To determine the technical writer assigned to the Stage/Group associated w
> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/221047) in GitLab 14.6. [Feature flag `group_iterations`](https://gitlab.com/gitlab-org/gitlab/-/issues/221047) removed.
Iterations are a way to track issues over a period of time. This allows teams
to track velocity and volatility metrics. Iterations can be used with [milestones](../../project/milestones/index.md)
for tracking over different time periods.
to track velocity and volatility metrics. For tracking over different time periods, you can use iterations [milestones](../../project/milestones/index.md).
You can create and manage various [iteration cadences](#iteration-cadences).
For example, you can use:
@ -24,15 +24,51 @@ For example, you can use:
In GitLab, iterations are similar to milestones, with a few differences:
- Iterations are only available to groups.
- A group can only have one active iteration at a time.
- Iterations are grouped into iteration cadences.
- Iterations require both a start and an end date.
- Iteration date ranges cannot overlap.
- Iteration date ranges cannot overlap within an iteration cadence.
## View the iterations list
## Iteration cadences
To view the iterations list:
> - [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/5077) in GitLab 14.1 [with a flag](../../../administration/feature_flags.md), named `iteration_cadences`. Disabled by default.
> - [Changed](https://gitlab.com/gitlab-org/gitlab/-/issues/354977) in GitLab 15.0: All scheduled iterations must start on the same day of the week as the cadence start day. Start date of cadence cannot be edited after the first iteration starts.
> - [Enabled on GitLab.com and self-managed](https://gitlab.com/gitlab-org/gitlab/-/issues/354878) in GitLab 15.0.
> - [Changed](https://gitlab.com/gitlab-org/gitlab/-/issues/367493) in GitLab 15.4: A new automation start date can be selected for cadence. Upcoming iterations will be scheduled to start on the same day of the week as the changed start date. Iteration cadences can be manually managed by turning off the automatic scheduling feature.
> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/354878) in GitLab 15.5. Feature flag `iteration_cadences` removed.
1. On the top bar, select **Main menu > Projects** and find your project.
Iteration cadences are containers for iterations and can be used to automate iteration scheduling.
You can use them to automate creating iterations every 1, 2, 3, or 4 weeks. You can also
configure iteration cadences to automatically roll over incomplete issues to the next iteration.
### Create an iteration cadence
> [Changed](https://gitlab.com/gitlab-org/gitlab/-/issues/343889) the minimum user role from Developer to Reporter in GitLab 15.0.
Prerequisites:
- You must have at least the Reporter role for a group.
To create an iteration cadence:
1. On the top bar, select **Main menu > Groups** and find your group.
1. On the left sidebar, select **Issues > Iterations**.
1. Select **New iteration cadence**.
1. Enter the title and description of the iteration cadence.
1. To manually manage the iteration cadence, clear the **Enable automatic scheduling** checkbox and skip the next step.
1. Complete the required fields to use automatic scheduling.
- Select the automation start date of the iteration cadence. Iterations will be scheduled to
begin on the same day of the week as the day of the week of the start date.
- From the **Duration** dropdown list, select how many weeks each iteration should last.
- From the **Upcoming iterations** dropdown list, select how many upcoming iterations should be
created and maintained by GitLab.
- Optional. To move incomplete issues to the next iteration, select **Roll over issues**.
1. Select **Create cadence**. The cadence list page opens.
If you want to manually manage the created cadence, read [Manual Iteration Management](#manual-iteration-management).
### View the iterations list
1. On the top bar, select **Main menu > Groups** and find your group.
1. Select **Issues > Iterations**.
To view all the iterations in a cadence, ordered by descending date, select that iteration cadence.
@ -46,61 +82,132 @@ by going to its URL. To do so, add: `/-/cadences` to your project or group URL.
For example `https://gitlab.com/gitlab-org/sample-data-templates/sample-gitlab-project/-/cadences`.
This is tracked in [issue 339009](https://gitlab.com/gitlab-org/gitlab/-/issues/339009).
## Create an iteration
### Edit an iteration cadence
> - [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/356069) in GitLab 14.10.
> - [Changed](https://gitlab.com/gitlab-org/gitlab/-/issues/343889) the minimum user role from Developer to Reporter in GitLab 15.0.
Prerequisites:
WARNING:
Manual iteration management is in its end-of-life process. Creating an iteration is [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/356069)
in GitLab 14.10, and is planned for removal in GitLab 16.0.
- You must have at least the Developer role for a group.
To edit an iteration cadence:
1. On the top bar, select **Main menu > Groups** and find your group.
1. On the left sidebar, select **Issues > Iterations**.
1. Select **Edit iteration cadence**.
When you use automatic scheduling and edit the **Automation start date** field,
you must set a new start date that doesn't overlap with the existing
current or past iterations.
Editing **Upcoming iterations** is a non-destructive action.
If ten upcoming iterations already exist, changing the number under **Upcoming iterations** to `2`
doesn't delete the eight existing upcoming iterations.
#### Turn on and off automatic scheduling for an iteration cadence
1. On the top bar, select **Main menu > Groups** and find your group.
1. On the left sidebar, select **Issues > Iterations**.
1. Next to the cadence for which you want to turn on or off automatic scheduling, select the
three-dot menu (**{ellipsis_v}**) **> Edit cadence**.
1. Select or clear the **Enable automatic scheduling** checkbox.
1. If you're turning on automatic scheduling,
complete the required fields **Duration**, **Upcoming iterations**, and **Automation start date**.
For **Automation start date**, you can select any date that doesn't overlap with the existing open iterations.
If you have upcoming iterations, the automatic scheduling adjusts them appropriately to fit
your chosen duration.
1. Select **Save changes**.
#### Example of turning on automatic scheduling for a manual iteration cadence
Suppose it's Friday, April 15, and you have three iteration in a manual iteration cadence:
- Monday, April 4 - Friday, April 8 (closed)
- Tuesday, April 12 - Friday, April 15 (ongoing)
- Tuesday, May 3 - Friday, May 6 (upcoming)
The earliest possible **Automation start date** you can choose
is Saturday, April 16 in this scenario, because April 15 overlaps with
the ongoing iteration.
If you select Monday, April 18 as the automation start date to
automate scheduling iterations every week up to two upcoming iterations,
after the conversion you have the following iterations:
- Monday, April 4 - Friday, April 8 (closed)
- Tuesday, April 12 - Friday, April 15 (ongoing)
- Monday, April 18 - Sunday, April 24 (upcoming)
- Monday, April 25 - Sunday, May 1 (upcoming)
Your existing upcoming iteration "Tuesday, April 12 - Friday, April 15"
is changed to "April 18 - Sunday, April 24".
An additional upcoming iteration "April 25 - Sunday, May 1" is scheduled
to satisfy the requirement that there are at least two upcoming iterations scheduled.
### Delete an iteration cadence
> [Changed](https://gitlab.com/gitlab-org/gitlab/-/issues/343889) the minimum user role from Developer to Reporter in GitLab 15.0.
Prerequisites:
- You must have at least the Reporter role for a group.
Deleting an iteration cadence also deletes all iterations within that cadence.
To delete an iteration cadence:
1. On the top bar, select **Main menu > Groups** and find your group.
1. On the left sidebar, select **Issues > Iterations**.
1. Select the three-dot menu (**{ellipsis_v}**) > **Delete cadence** for the cadence you want to delete.
1. Select **Delete cadence**.
## Manual iteration management
If you don't want your iterations to be scheduled by iteration cadences,
you can also create and manage them manually.
### Create an iteration
> [Changed](https://gitlab.com/gitlab-org/gitlab/-/issues/343889) the minimum user role from Developer to Reporter in GitLab 15.0.
Prerequisites:
- You must have at least the Reporter role for a group.
- [Automatic scheduling must be disabled](#turn-on-and-off-automatic-scheduling-for-an-iteration-cadence) for the iteration cadence.
To create an iteration:
1. On the top bar, select **Main menu > Groups** and find your group.
1. On the left sidebar, select **Issues > Iterations**.
1. On the left sidebar, select **Issues > Iterations** and select an iteration cadence.
1. Select **New iteration**.
1. Enter the title, a description (optional), a start date, and a due date.
1. Select **Create iteration**. The iteration details page opens.
## Edit an iteration
### Edit an iteration
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/218277) in GitLab 13.2.
> - [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/356069) in GitLab 14.10.
> - [Changed](https://gitlab.com/gitlab-org/gitlab/-/issues/343889) the minimum user role from Developer to Reporter in GitLab 15.0.
WARNING:
Editing all attributes, with the exception of `description` is [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/356069)
in GitLab 14.10, and is planned for removal in GitLab 16.0.
In the future only editing an iteration's `description` will be allowed.
Prerequisites:
- You must have at least the Reporter role for a group.
- [Automatic scheduling must be disabled](#turn-on-and-off-automatic-scheduling-for-an-iteration-cadence) for the iteration cadence.
To edit an iteration, select the three-dot menu (**{ellipsis_v}**) > **Edit**.
## Delete an iteration
### Delete an iteration
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/292268) in GitLab 14.3.
> - [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/356069) in GitLab 14.10.
> - [Changed](https://gitlab.com/gitlab-org/gitlab/-/issues/343889) the minimum user role from Developer to Reporter in GitLab 15.0.
WARNING:
Manual iteration management is in its end-of-life process. Deleting an iteration is [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/356069)
in GitLab 14.10, and is planned for removal in GitLab 16.0.
Prerequisites:
- You must have at least the Reporter role for a group.
- [Automatic scheduling must be disabled](#turn-on-and-off-automatic-scheduling-for-an-iteration-cadence) for the iteration cadence.
To delete an iteration, select the three-dot menu (**{ellipsis_v}**) > **Delete**.
## Add an issue to an iteration
### Add an issue to an iteration
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/216158) in GitLab 13.2.
@ -176,116 +283,3 @@ To group issues by label:
1. Select the labels you want to group by in the labels dropdown.
You can also search for labels by typing in the search input.
1. Select any area outside the label dropdown list. The page is now grouped by the selected labels.
## Iteration cadences
> - [Introduced](https://gitlab.com/groups/gitlab-org/-/epics/5077) in GitLab 14.1 [with a flag](../../../administration/feature_flags.md), named `iteration_cadences`. Disabled by default.
> - [Changed](https://gitlab.com/gitlab-org/gitlab/-/issues/354977) in GitLab 15.0: All scheduled iterations must start on the same day of the week as the cadence start day. Start date of cadence cannot be edited after the first iteration starts.
> - [Enabled on GitLab.com and self-managed](https://gitlab.com/gitlab-org/gitlab/-/issues/354878) in GitLab 15.0.
> - [Changed](https://gitlab.com/gitlab-org/gitlab/-/issues/367493) in GitLab 15.4: A new automation start date can be selected for cadence. Upcoming iterations will be scheduled to start on the same day of the week as the changed start date. Iteration cadences can be manually managed by turning off the automatic scheduling feature.
Iteration cadences automate iteration scheduling. You can use them to
automate creating iterations every 1, 2, 3, or 4 weeks. You can also
configure iteration cadences to automatically roll over incomplete issues to the next iteration.
### Create an iteration cadence
> [Changed](https://gitlab.com/gitlab-org/gitlab/-/issues/343889) the minimum user role from Developer to Reporter in GitLab 15.0.
Prerequisites:
- You must have at least the Reporter role for a group.
To create an iteration cadence:
1. On the top bar, select **Main menu > Groups** and find your group.
1. On the left sidebar, select **Issues > Iterations**.
1. Select **New iteration cadence**.
1. Enter the title and description of the iteration cadence.
1. To manually manage the iteration cadence, clear the **Enable automatic scheduling** checkbox and skip the next step.
1. Complete the required fields to use automatic scheduling.
- Select the automation start date of the iteration cadence. Iterations will be scheduled to
begin on the same day of the week as the day of the week of the start date.
- From the **Duration** dropdown list, select how many weeks each iteration should last.
- From the **Upcoming iterations** dropdown list, select how many upcoming iterations should be
created and maintained by GitLab.
- Optional. To move incomplete issues to the next iteration, select **Roll over issues**.
1. Select **Create cadence**. The cadence list page opens.
### Edit an iteration cadence
Prerequisites:
- You must have at least the Developer role for a group.
To edit an iteration cadence:
1. On the top bar, select **Main menu > Groups** and find your group.
1. On the left sidebar, select **Issues > Iterations**.
1. Select **Edit iteration cadence**.
When you are using automatic scheduling and edit the **Automation start date** field,
you must set a new start date that doesn't overlap with the existing
current or past iterations.
Editing **Upcoming iterations** is a non-destructive action.
If ten upcoming iterations already exist, changing the number under **Upcoming iterations** to `2`
doesn't delete the eight existing upcoming iterations.
#### Turn on automatic scheduling for manual iterations cadence
1. On the top bar, select **Main menu > Groups** and find your group.
1. On the left sidebar, select **Issues > Iterations**.
1. Select the three-dot menu (**{ellipsis_v}**) > **Edit cadence** for the cadence for which you want to enable automatic scheduling.
1. Check the **Enable automatic scheduling** checkbox.
1. Complete the required fields **Duration**, **Upcoming iterations**, and **Automation start date**.
For **Automation start date**, you can select any date that doesn't overlap with the existing open iterations.
If you have upcoming iterations, the automatic scheduling adjusts them appropriately to fit
your chosen duration.
1. Select **Save changes**.
When you want to manage your iterations cadence manually again, edit your cadence and uncheck the **Enable automatic scheduling** checkbox.
#### Example of turning on automatic scheduling for manual iterations cadence
Suppose it's Friday, April 15, and you have three iteration in a manual iterations cadence:
- Monday, April 4 - Friday, April 8 (closed)
- Tuesday, April 12 - Friday, April 15 (ongoing)
- Tuesday, May 3 - Friday, May 6 (upcoming)
The earliest possible **Automation start date** you can choose
is Saturday, April 16 in this scenario, because April 15 overlaps with
the ongoing iteration.
If you select Monday, April 18 as the automation start date to
automate scheduling iterations every week up to two upcoming iterations,
after the conversion you have the following iterations:
- Monday, April 4 - Friday, April 8 (closed)
- Tuesday, April 12 - Friday, April 15 (ongoing)
- Monday, April 18 - Sunday, April 24 (upcoming)
- Monday, April 25 - Sunday, May 1 (upcoming)
Your existing upcoming iteration "Tuesday, April 12 - Friday, April 15"
is changed to "April 18 - Sunday, April 24".
An additional upcoming iteration "April 25 - Sunday, May 1" is scheduled
to satisfy the requirement that there are at least two upcoming iterations scheduled.
### Delete an iteration cadence
> [Changed](https://gitlab.com/gitlab-org/gitlab/-/issues/343889) the minimum user role from Developer to Reporter in GitLab 15.0.
Prerequisites:
- You must have at least the Reporter role for a group.
Deleting an iteration cadence also deletes all iterations within that cadence.
To delete an iteration cadence:
1. On the top bar, select **Main menu > Groups** and find your group.
1. On the left sidebar, select **Issues > Iterations**.
1. Select the three-dot menu (**{ellipsis_v}**) > **Delete cadence** for the cadence you want to delete.
1. Select **Delete cadence**.

View File

@ -26,7 +26,7 @@ To view project insights:
1. On the top bar, select **Main menu > Projects** and find your project.
1. On the left sidebar, select **Analytics > Insights**.
1. To view a report, select the **Select page** dropdown list.
1. To view a report, select the **Select report** dropdown list.
## Configure project insights

View File

@ -286,7 +286,7 @@ module Gitlab
def find_license
request = Gitaly::FindLicenseRequest.new(repository: @gitaly_repo)
GitalyClient.call(@storage, :repository_service, :find_license, request, timeout: GitalyClient.fast_timeout)
GitalyClient.call(@storage, :repository_service, :find_license, request, timeout: GitalyClient.medium_timeout)
end
def calculate_checksum

View File

@ -1,25 +0,0 @@
# frozen_string_literal: true
# Patch to address https://github.com/ondrejbartas/sidekiq-cron/issues/361
# This restores the poll interval to v1.2.0 behavior
# https://github.com/ondrejbartas/sidekiq-cron/blob/v1.2.0/lib/sidekiq/cron/poller.rb#L36-L38
# This patch only applies to v1.4.0
require 'sidekiq/cron/version'
if Gem::Version.new(Sidekiq::Cron::VERSION) != Gem::Version.new('1.4.0')
raise 'New version of sidekiq-cron detected, please remove or update this patch'
end
module Gitlab
module Patch
module SidekiqCronPoller
def enqueue
super if poll_interval_average > 0
end
def poll_interval_average
Sidekiq.options[:cron_poll_interval]
end
end
end
end

View File

@ -20418,6 +20418,9 @@ msgstr ""
msgid "ImportProjects|%{provider} rate limit exceeded. Try again later"
msgstr ""
msgid "ImportProjects|Advanced import settings"
msgstr ""
msgid "ImportProjects|Blocked import URL: %{message}"
msgstr ""
@ -20442,6 +20445,9 @@ msgstr ""
msgid "ImportProjects|Select the repositories you want to import"
msgstr ""
msgid "ImportProjects|The more information you select, the longer it will take to import"
msgstr ""
msgid "ImportProjects|The remote data could not be imported."
msgstr ""
@ -29484,6 +29490,9 @@ msgstr ""
msgid "PipelineSchedules|Target"
msgstr ""
msgid "PipelineSchedules|There was a problem fetching pipeline schedules."
msgstr ""
msgid "PipelineSchedules|Variables"
msgstr ""

View File

@ -9,7 +9,7 @@ module QA
def self.included(base)
super
base.view "app/assets/javascripts/vue_shared/components/namespace_select/namespace_select.vue" do
base.view "app/assets/javascripts/vue_shared/components/namespace_select/namespace_select_deprecated.vue" do
element :namespaces_list
element :namespaces_list_groups
element :namespaces_list_item

View File

@ -0,0 +1,66 @@
#!/usr/bin/env bash
set -euo pipefail
# Script to generate `rspec foss-impact` test child pipeline with dynamically parallelized jobs.
source scripts/utils.sh
rspec_matching_tests_foss_path="${1}"
pipeline_yml="${2}"
test_file_count=$(wc -w "${rspec_matching_tests_foss_path}" | awk '{ print $1 }')
echoinfo "test_file_count: ${test_file_count}"
if [[ "${test_file_count}" -eq 0 ]]; then
skip_pipeline=".gitlab/ci/_skip.yml"
echo "Using ${skip_pipeline} due to no impacted FOSS rspec tests to run"
cp $skip_pipeline "$pipeline_yml"
exit
fi
# As of 2022-09-01:
# $ find spec -type f | wc -l
# 12825
# and
# $ find ee/spec -type f | wc -l
# 5610
# which gives a total of 18435 test files (`number_of_tests_in_total_in_the_test_suite`).
#
# Total time to run all tests (based on https://gitlab-org.gitlab.io/rspec_profiling_stats/) is 170183 seconds (`duration_of_the_test_suite_in_seconds`).
#
# This gives an approximate 170183 / 18435 = 9.2 seconds per test file (`average_test_file_duration_in_seconds`).
#
# If we want each test job to finish in 10 minutes, given we have 3 minutes of setup (`setup_duration_in_seconds`), then we need to give 7 minutes of testing to each test node (`optimal_test_runtime_duration_in_seconds`).
# (7 * 60) / 9.2 = 45.6
#
# So if we'd want to run the full test suites in 10 minutes (`optimal_test_job_duration_in_seconds`), we'd need to run at max 45 test file per nodes (`optimal_test_file_count_per_node`).
number_of_tests_in_total_in_the_test_suite=18435
duration_of_the_test_suite_in_seconds=170183
optimal_test_job_duration_in_seconds=600 # 10 minutes
setup_duration_in_seconds=180 # 3 minutes
optimal_test_runtime_duration_in_seconds=$(( optimal_test_job_duration_in_seconds - setup_duration_in_seconds ))
echoinfo "optimal_test_runtime_duration_in_seconds: ${optimal_test_runtime_duration_in_seconds}"
average_test_file_duration_in_seconds=$(( duration_of_the_test_suite_in_seconds / number_of_tests_in_total_in_the_test_suite ))
echoinfo "average_test_file_duration_in_seconds: ${average_test_file_duration_in_seconds}"
optimal_test_file_count_per_node=$(( optimal_test_runtime_duration_in_seconds / average_test_file_duration_in_seconds ))
echoinfo "optimal_test_file_count_per_node: ${optimal_test_file_count_per_node}"
node_count=$(( test_file_count / optimal_test_file_count_per_node ))
echoinfo "node_count: ${node_count}"
echoinfo "Optimal node count for 'rspec foss-impact' jobs is ${node_count}."
MAX_NODES_COUNT=50 # Maximum parallelization allowed by GitLab
if [[ "${node_count}" -gt "${MAX_NODES_COUNT}" ]]; then
echoinfo "We don't want to parallelize 'rspec foss-impact' to more than ${MAX_NODES_COUNT} jobs for now! Decreasing the parallelization to ${MAX_NODES_COUNT}."
node_count=${MAX_NODES_COUNT}
fi
ruby -rerb -e "puts ERB.new(File.read('.gitlab/ci/rails/rspec-foss-impact.gitlab-ci.yml.erb')).result_with_hash(parallel_value: ${node_count})" > "${pipeline_yml}"
echosuccess "Generated ${pipeline_yml} pipeline with following content:"
cat "${pipeline_yml}"

View File

@ -247,7 +247,12 @@ function rspec_paralellized_job() {
cp "${KNAPSACK_RSPEC_SUITE_REPORT_PATH}" "${KNAPSACK_REPORT_PATH}"
export KNAPSACK_TEST_FILE_PATTERN=$(ruby -r./tooling/quality/test_level.rb -e "puts Quality::TestLevel.new(${spec_folder_prefixes}).pattern(:${test_level})")
export KNAPSACK_TEST_FILE_PATTERN="spec/{,**/}*_spec.rb"
if [[ "${test_level}" != "foss-impact" ]]; then
export KNAPSACK_TEST_FILE_PATTERN=$(ruby -r./tooling/quality/test_level.rb -e "puts Quality::TestLevel.new(${spec_folder_prefixes}).pattern(:${test_level})")
fi
export FLAKY_RSPEC_REPORT_PATH="${rspec_flaky_folder_path}all_${report_name}_report.json"
export NEW_FLAKY_RSPEC_REPORT_PATH="${rspec_flaky_folder_path}new_${report_name}_report.json"
export SKIPPED_FLAKY_TESTS_REPORT_PATH="${rspec_flaky_folder_path}skipped_flaky_tests_${report_name}_report.txt"
@ -268,8 +273,8 @@ function rspec_paralellized_job() {
debug_rspec_variables
if [[ -n $RSPEC_TESTS_MAPPING_ENABLED ]]; then
tooling/bin/parallel_rspec --rspec_args "$(rspec_args "${rspec_opts}")" --filter "${RSPEC_MATCHING_TESTS_PATH}" || rspec_run_status=$?
if [[ -n "${RSPEC_TESTS_MAPPING_ENABLED}" ]]; then
tooling/bin/parallel_rspec --rspec_args "$(rspec_args "${rspec_opts}")" --filter "${RSPEC_TESTS_FILTER_FILE}" || rspec_run_status=$?
else
tooling/bin/parallel_rspec --rspec_args "$(rspec_args "${rspec_opts}")" || rspec_run_status=$?
fi
@ -357,41 +362,12 @@ function rspec_fail_fast() {
fi
}
function rspec_matched_foss_tests() {
local test_file_count_threshold=40
local matching_tests_file=${1}
local foss_matching_tests_file="${matching_tests_file}-foss"
function filter_rspec_matched_foss_tests() {
local matching_tests_file="${1}"
local foss_matching_tests_file="${2}"
# Keep only files that exists (i.e. exclude EE speficic files)
cat ${matching_tests_file} | ruby -e 'puts $stdin.read.split(" ").select { |f| File.exist?(f) && f.include?("spec/") }.join(" ")' > "${foss_matching_tests_file}"
echo "Matching tests file:"
cat ${matching_tests_file}
echo -e "\n\n"
echo "FOSS matching tests file:"
cat ${foss_matching_tests_file}
echo -e "\n\n"
local rspec_opts=${2}
local test_files="$(cat ${foss_matching_tests_file})"
local test_file_count=$(wc -w "${foss_matching_tests_file}" | awk {'print $1'})
if [[ "${test_file_count}" -gt "${test_file_count_threshold}" ]]; then
echo "This job is intentionally failed because there are more than ${test_file_count_threshold} FOSS test files matched,"
echo "which would take too long to run in this job."
echo "To reduce the likelihood of breaking FOSS pipelines,"
echo "please add ~\"pipeline:run-as-if-foss\" label to the merge request and trigger a new pipeline."
echo "This would run all as-if-foss jobs in this merge request"
echo "and remove this failing job from the pipeline."
exit 1
fi
if [[ -n $test_files ]]; then
rspec_simple_job "${rspec_opts} ${test_files}"
else
echo "No impacted FOSS rspec tests to run"
fi
cat ${matching_tests_file} | ruby -e 'puts $stdin.read.split(" ").select { |f| f.start_with?("spec/") && File.exist?(f) }.join(" ")' > "${foss_matching_tests_file}"
}
function generate_frontend_fixtures_mapping() {

View File

@ -9,11 +9,11 @@ Vue.use(Vuex);
describe('Ci variable table', () => {
let wrapper;
let store;
let isGroup;
let isProject;
const createComponent = (groupState) => {
const createComponent = (projectState) => {
store = createStore();
store.state.isGroup = groupState;
store.state.isProject = projectState;
jest.spyOn(store, 'dispatch').mockImplementation();
wrapper = shallowMount(LegacyCiVariableSettings, {
store,
@ -25,14 +25,14 @@ describe('Ci variable table', () => {
});
it('dispatches fetchEnvironments when mounted', () => {
isGroup = false;
createComponent(isGroup);
isProject = true;
createComponent(isProject);
expect(store.dispatch).toHaveBeenCalledWith('fetchEnvironments');
});
it('does not dispatch fetchenvironments when in group context', () => {
isGroup = true;
createComponent(isGroup);
isProject = false;
createComponent(isProject);
expect(store.dispatch).not.toHaveBeenCalled();
});
});

View File

@ -2,7 +2,7 @@ import { GlAlert, GlSprintf } from '@gitlab/ui';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import Component from '~/groups/components/transfer_group_form.vue';
import ConfirmDanger from '~/vue_shared/components/confirm_danger/confirm_danger.vue';
import NamespaceSelect from '~/vue_shared/components/namespace_select/namespace_select.vue';
import NamespaceSelect from '~/vue_shared/components/namespace_select/namespace_select_deprecated.vue';
describe('Transfer group form', () => {
let wrapper;

View File

@ -0,0 +1,60 @@
import { mount } from '@vue/test-utils';
import { GlFormCheckbox } from '@gitlab/ui';
import AdvancedSettingsPanel from '~/import_entities/import_projects/components/advanced_settings.vue';
describe('Import Advanced Settings', () => {
let wrapper;
const OPTIONAL_STAGES = [
{ name: 'stage1', label: 'Stage 1' },
{ name: 'stage2', label: 'Stage 2', details: 'Extra details' },
];
const createComponent = () => {
wrapper = mount(AdvancedSettingsPanel, {
propsData: {
stages: OPTIONAL_STAGES,
value: {
stage1: false,
stage2: false,
},
},
});
};
beforeEach(() => {
createComponent();
});
afterEach(() => {
wrapper.destroy();
});
it('renders GLFormCheckbox for each optional stage', () => {
expect(wrapper.findAllComponents(GlFormCheckbox)).toHaveLength(OPTIONAL_STAGES.length);
});
it('renders label for each optional stage', () => {
wrapper.findAllComponents(GlFormCheckbox).wrappers.forEach((w, idx) => {
expect(w.text()).toContain(OPTIONAL_STAGES[idx].label);
});
});
it('renders details for stage with details', () => {
expect(wrapper.findAllComponents(GlFormCheckbox).at(1).text()).toContain(
OPTIONAL_STAGES[1].details,
);
});
it('emits new stages selection state when checkbox is changed', () => {
const firstCheckbox = wrapper.findComponent(GlFormCheckbox);
firstCheckbox.vm.$emit('change', true);
expect(wrapper.emitted('input')[0]).toStrictEqual([
{
stage1: true,
stage2: false,
},
]);
});
});

View File

@ -5,6 +5,7 @@ import Vuex from 'vuex';
import { STATUSES } from '~/import_entities/constants';
import ImportProjectsTable from '~/import_entities/import_projects/components/import_projects_table.vue';
import ProviderRepoTableRow from '~/import_entities/import_projects/components/provider_repo_table_row.vue';
import AdvancedSettingsPanel from '~/import_entities/import_projects/components/advanced_settings.vue';
import * as getters from '~/import_entities/import_projects/store/getters';
import state from '~/import_entities/import_projects/store/state';
@ -45,6 +46,7 @@ describe('ImportProjectsTable', () => {
slots,
filterable,
paginatable,
optionalStages,
} = {}) {
Vue.use(Vuex);
@ -71,6 +73,7 @@ describe('ImportProjectsTable', () => {
providerTitle,
filterable,
paginatable,
optionalStages,
},
slots,
stubs: {
@ -271,4 +274,23 @@ describe('ImportProjectsTable', () => {
expect(wrapper.text().includes(INCOMPATIBLE_TEXT)).toBe(shouldRenderSlot);
},
);
it('should not render advanced settings panel when no optional steps are passed', () => {
createComponent({ state: { providerRepos: [providerRepo] } });
expect(wrapper.findComponent(AdvancedSettingsPanel).exists()).toBe(false);
});
it('should render advanced settings panel when no optional steps are passed', () => {
const OPTIONAL_STAGES = [{ name: 'step1', label: 'Step 1' }];
createComponent({ state: { providerRepos: [providerRepo] }, optionalStages: OPTIONAL_STAGES });
expect(wrapper.findComponent(AdvancedSettingsPanel).exists()).toBe(true);
expect(wrapper.findComponent(AdvancedSettingsPanel).props('stages')).toStrictEqual(
OPTIONAL_STAGES,
);
expect(wrapper.findComponent(AdvancedSettingsPanel).props('value')).toStrictEqual({
step1: false,
});
});
});

View File

@ -44,7 +44,7 @@ describe('ProviderRepoTableRow', () => {
wrapper = shallowMount(ProviderRepoTableRow, {
store,
propsData: { availableNamespaces, userNamespace, ...props },
propsData: { availableNamespaces, userNamespace, optionalStages: {}, ...props },
});
}
@ -92,10 +92,24 @@ describe('ProviderRepoTableRow', () => {
await nextTick();
const { calls } = fetchImport.mock;
expect(fetchImport).toHaveBeenCalledWith(expect.anything(), {
repoId: repo.importSource.id,
optionalStages: {},
});
});
expect(calls).toHaveLength(1);
expect(calls[0][1]).toBe(repo.importSource.id);
it('includes optionalStages to import', async () => {
const OPTIONAL_STAGES = { stage1: true, stage2: false };
await wrapper.setProps({ optionalStages: OPTIONAL_STAGES });
findImportButton().vm.$emit('click');
await nextTick();
expect(fetchImport).toHaveBeenCalledWith(expect.anything(), {
repoId: repo.importSource.id,
optionalStages: OPTIONAL_STAGES,
});
});
});

View File

@ -198,7 +198,7 @@ describe('import_projects store actions', () => {
return testAction(
fetchImport,
importRepoId,
{ repoId: importRepoId, optionalStages: {} },
localState,
[
{
@ -222,7 +222,7 @@ describe('import_projects store actions', () => {
await testAction(
fetchImport,
importRepoId,
{ repoId: importRepoId, optionalStages: {} },
localState,
[
{
@ -245,7 +245,7 @@ describe('import_projects store actions', () => {
await testAction(
fetchImport,
importRepoId,
{ repoId: importRepoId, optionalStages: {} },
localState,
[
{
@ -366,14 +366,22 @@ describe('import_projects store actions', () => {
describe('importAll', () => {
it('dispatches multiple fetchImport actions', async () => {
const OPTIONAL_STAGES = { stage1: true, stage2: false };
await testAction(
importAll,
null,
{ optionalStages: OPTIONAL_STAGES },
localState,
[],
[
{ type: 'fetchImport', payload: importRepoId },
{ type: 'fetchImport', payload: otherImportRepoId },
{
type: 'fetchImport',
payload: { repoId: importRepoId, optionalStages: OPTIONAL_STAGES },
},
{
type: 'fetchImport',
payload: { repoId: otherImportRepoId, optionalStages: OPTIONAL_STAGES },
},
],
);
});

View File

@ -1,3 +1,4 @@
import { GlAlert, GlLoadingIcon } from '@gitlab/ui';
import { shallowMount } from '@vue/test-utils';
import Vue from 'vue';
import VueApollo from 'vue-apollo';
@ -14,6 +15,7 @@ describe('Pipeline schedules app', () => {
let wrapper;
const successHandler = jest.fn().mockResolvedValue(mockGetPipelineSchedulesGraphQLResponse);
const failedHandler = jest.fn().mockRejectedValue(new Error('GraphQL error'));
const createMockApolloProvider = (handler) => {
const requestHandlers = [[getPipelineSchedulesQuery, handler]];
@ -31,24 +33,47 @@ describe('Pipeline schedules app', () => {
};
const findTable = () => wrapper.findComponent(PipelineSchedulesTable);
beforeEach(() => {
createComponent();
});
const findAlert = () => wrapper.findComponent(GlAlert);
const findLoadingIcon = () => wrapper.findComponent(GlLoadingIcon);
afterEach(() => {
wrapper.destroy();
});
it('displays table', () => {
it('displays table', async () => {
createComponent();
await waitForPromises();
expect(findTable().exists()).toBe(true);
expect(findAlert().exists()).toBe(false);
});
it('fetches query and passes an array of pipeline schedules', async () => {
createComponent();
expect(successHandler).toHaveBeenCalled();
await waitForPromises();
expect(findTable().props('schedules')).toEqual(mockPipelineScheduleNodes);
});
it('handles loading state', async () => {
createComponent();
expect(findLoadingIcon().exists()).toBe(true);
await waitForPromises();
expect(findLoadingIcon().exists()).toBe(false);
});
it('shows error alert', async () => {
createComponent(failedHandler);
await waitForPromises();
expect(findAlert().exists()).toBe(true);
});
});

View File

@ -8,7 +8,7 @@ import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import createMockApollo from 'helpers/mock_apollo_helper';
import { getIdFromGraphQLId } from '~/graphql_shared/utils';
import TransferProjectForm from '~/projects/settings/components/transfer_project_form.vue';
import NamespaceSelect from '~/vue_shared/components/namespace_select/namespace_select.vue';
import NamespaceSelect from '~/vue_shared/components/namespace_select/namespace_select_deprecated.vue';
import ConfirmDanger from '~/vue_shared/components/confirm_danger/confirm_danger.vue';
import currentUserNamespaceQuery from '~/projects/settings/graphql/queries/current_user_namespace.query.graphql';
import { getTransferLocations } from '~/api/projects_api';

View File

@ -367,15 +367,10 @@ describe('AdminRunnersApp', () => {
expect(findRunnerPagination().attributes('disabled')).toBe('true');
});
describe('when bulk delete is enabled', () => {
describe('Bulk delete', () => {
describe('Before runners are deleted', () => {
beforeEach(async () => {
await createComponent({
mountFn: mountExtended,
provide: {
glFeatures: { adminRunnersBulkDelete: true },
},
});
await createComponent({ mountFn: mountExtended });
});
it('runner bulk delete is available', () => {
@ -414,12 +409,7 @@ describe('AdminRunnersApp', () => {
describe('When runners are deleted', () => {
beforeEach(async () => {
await createComponent({
mountFn: mountExtended,
provide: {
glFeatures: { adminRunnersBulkDelete: true },
},
});
await createComponent({ mountFn: mountExtended });
});
it('count data is refetched', async () => {

View File

@ -1,8 +1,7 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`~/vue_merge_request_widget/components/widget/dynamic_content.vue renders given data 1`] = `
"<content-row-stub level=\\"2\\" statusiconname=\\"success\\" widgetname=\\"MyWidget\\">
<div class=\\"gl-mb-2\\"><strong class=\\"gl-display-block\\">This is a header</strong><span class=\\"gl-display-block\\">This is a subheader</span></div>
"<content-row-stub level=\\"2\\" statusiconname=\\"success\\" widgetname=\\"MyWidget\\" header=\\"This is a header,This is a subheader\\">
<div class=\\"gl-display-flex gl-flex-direction-column\\">
<div>
<p class=\\"gl-mb-0\\">Main text for the row</p>
@ -16,10 +15,7 @@ exports[`~/vue_merge_request_widget/components/widget/dynamic_content.vue render
</div>
<ul class=\\"gl-m-0 gl-p-0 gl-list-style-none\\">
<li>
<content-row-stub level=\\"3\\" statusiconname=\\"\\" widgetname=\\"MyWidget\\" data-qa-selector=\\"child_content\\">
<div class=\\"gl-mb-2\\"><strong class=\\"gl-display-block\\">Child row header</strong>
<!---->
</div>
<content-row-stub level=\\"3\\" statusiconname=\\"\\" widgetname=\\"MyWidget\\" header=\\"Child row header\\" data-qa-selector=\\"child_content\\">
<div class=\\"gl-display-flex gl-flex-direction-column\\">
<div>
<p class=\\"gl-mb-0\\">This is recursive. It will be listed in level 3.</p>

View File

@ -1,8 +1,6 @@
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import { EXTENSION_ICONS } from '~/vue_merge_request_widget/constants';
import DynamicContent from '~/vue_merge_request_widget/components/widget/dynamic_content.vue';
import ContentHeader from '~/vue_merge_request_widget/components/widget/widget_content_header.vue';
import ContentBody from '~/vue_merge_request_widget/components/widget/widget_content_body.vue';
describe('~/vue_merge_request_widget/components/widget/dynamic_content.vue', () => {
let wrapper;
@ -14,8 +12,6 @@ describe('~/vue_merge_request_widget/components/widget/dynamic_content.vue', ()
...propsData,
},
stubs: {
ContentHeader,
ContentBody,
DynamicContent,
},
});

View File

@ -1,39 +0,0 @@
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import WidgetContentBody from '~/vue_merge_request_widget/components/widget/widget_content_body.vue';
import StatusIcon from '~/vue_merge_request_widget/components/widget/status_icon.vue';
describe('~/vue_merge_request_widget/components/widget/widget_content_body.vue', () => {
let wrapper;
const findStatusIcon = () => wrapper.findComponent(StatusIcon);
const createComponent = ({ propsData, slots } = {}) => {
wrapper = shallowMountExtended(WidgetContentBody, {
propsData: {
widgetName: 'MyWidget',
...propsData,
},
slots,
});
};
it('does not render the status icon when it is not provided', () => {
createComponent();
expect(findStatusIcon().exists()).toBe(false);
});
it('renders the status icon when provided', () => {
createComponent({ propsData: { statusIconName: 'failed' } });
expect(findStatusIcon().exists()).toBe(true);
});
it('renders the default slot', () => {
createComponent({
slots: {
default: 'Hello world',
},
});
expect(wrapper.findByText('Hello world').exists()).toBe(true);
});
});

View File

@ -1,31 +0,0 @@
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import WidgetContentHeader from '~/vue_merge_request_widget/components/widget/widget_content_header.vue';
describe('~/vue_merge_request_widget/components/widget/widget_content_header.vue', () => {
let wrapper;
const createComponent = ({ propsData } = {}) => {
wrapper = shallowMountExtended(WidgetContentHeader, {
propsData: {
widgetName: 'MyWidget',
...propsData,
},
});
};
it('renders an array of header and subheader', () => {
createComponent({ propsData: { header: ['this is a header', 'this is a subheader'] } });
expect(wrapper.findByText('this is a header').exists()).toBe(true);
expect(wrapper.findByText('this is a subheader').exists()).toBe(true);
});
it('renders a string', () => {
createComponent({ propsData: { header: 'this is a header' } });
expect(wrapper.findByText('this is a header').exists()).toBe(true);
});
it('escapes html injection properly', () => {
createComponent({ propsData: { header: '<b role="header">this is a header</b>' } });
expect(wrapper.findByText('<b role="header">this is a header</b>').exists()).toBe(true);
});
});

View File

@ -1,38 +1,63 @@
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import WidgetContentRow from '~/vue_merge_request_widget/components/widget/widget_content_row.vue';
import WidgetContentBody from '~/vue_merge_request_widget/components/widget/widget_content_body.vue';
import StatusIcon from '~/vue_merge_request_widget/components/widget/status_icon.vue';
describe('~/vue_merge_request_widget/components/widget/widget_content_row.vue', () => {
let wrapper;
const findContentBody = () => wrapper.findComponent(WidgetContentBody);
const findStatusIcon = () => wrapper.findComponent(StatusIcon);
const createComponent = ({ propsData, slots } = {}) => {
wrapper = shallowMountExtended(WidgetContentRow, {
propsData: {
widgetName: 'MyWidget',
level: 2,
...propsData,
},
slots,
});
};
it('renders slots properly', () => {
createComponent({
propsData: {
statusIconName: 'success',
level: 2,
},
slots: {
header: '<b>this is a header</b>',
body: '<span>this is a body</span>',
},
describe('body', () => {
it('renders the status icon when provided', () => {
createComponent({ propsData: { statusIconName: 'failed' } });
expect(findStatusIcon().exists()).toBe(true);
});
expect(wrapper.findByText('this is a header').exists()).toBe(true);
expect(findContentBody().props()).toMatchObject({
statusIconName: 'success',
widgetName: 'MyWidget',
it('does not render the status icon when it is not provided', () => {
createComponent();
expect(findStatusIcon().exists()).toBe(false);
});
it('renders slots properly', () => {
createComponent({
propsData: {
statusIconName: 'success',
},
slots: {
body: '<span>this is a body</span>',
},
});
expect(wrapper.findByText('this is a body').exists()).toBe(true);
});
});
describe('header', () => {
it('renders an array of header and subheader', () => {
createComponent({ propsData: { header: ['this is a header', 'this is a subheader'] } });
expect(wrapper.findByText('this is a header').exists()).toBe(true);
expect(wrapper.findByText('this is a subheader').exists()).toBe(true);
});
it('renders a string', () => {
createComponent({ propsData: { header: 'this is a header' } });
expect(wrapper.findByText('this is a header').exists()).toBe(true);
});
it('escapes html injection properly', () => {
createComponent({ propsData: { header: '<b role="header">this is a header</b>' } });
expect(wrapper.findByText('<b role="header">this is a header</b>').exists()).toBe(true);
});
});
});

View File

@ -11,14 +11,14 @@ import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import NamespaceSelect, {
i18n,
EMPTY_NAMESPACE_ID,
} from '~/vue_shared/components/namespace_select/namespace_select.vue';
} from '~/vue_shared/components/namespace_select/namespace_select_deprecated.vue';
import { userNamespaces, groupNamespaces } from './mock_data';
const FLAT_NAMESPACES = [...userNamespaces, ...groupNamespaces];
const EMPTY_NAMESPACE_TITLE = 'Empty namespace TEST';
const EMPTY_NAMESPACE_ITEM = { id: EMPTY_NAMESPACE_ID, humanName: EMPTY_NAMESPACE_TITLE };
describe('Namespace Select', () => {
describe('NamespaceSelectDeprecated', () => {
let wrapper;
const createComponent = (props = {}) =>

View File

@ -343,4 +343,18 @@ RSpec.describe Gitlab::GitalyClient::RepositoryService do
expect(client.full_path).to eq(path)
end
end
describe "#find_license" do
it 'sends a find_license request with medium timeout' do
expect_any_instance_of(Gitaly::RepositoryService::Stub)
.to receive(:find_license) do |_service, _request, headers|
expect(headers[:deadline]).to be_between(
Gitlab::GitalyClient.fast_timeout.seconds.from_now.to_f,
Gitlab::GitalyClient.medium_timeout.seconds.from_now.to_f
)
end
client.find_license
end
end
end

View File

@ -10,20 +10,20 @@ RSpec.describe UserPreference do
using RSpec::Parameterized::TableSyntax
where(color: [
'#000000',
'#123456',
'#abcdef',
'#AbCdEf',
'#ffffff',
'#fFfFfF',
'#000',
'#123',
'#abc',
'#AbC',
'#fff',
'#fFf',
''
])
'#000000',
'#123456',
'#abcdef',
'#AbCdEf',
'#ffffff',
'#fFfFfF',
'#000',
'#123',
'#abc',
'#AbC',
'#fff',
'#fFf',
''
])
with_them do
it { is_expected.to allow_value(color).for(:diffs_deletion_color) }
@ -31,14 +31,14 @@ RSpec.describe UserPreference do
end
where(color: [
'#1',
'#12',
'#1234',
'#12345',
'#1234567',
'123456',
'#12345x'
])
'#1',
'#12',
'#1234',
'#12345',
'#1234567',
'123456',
'#12345x'
])
with_them do
it { is_expected.not_to allow_value(color).for(:diffs_deletion_color) }

View File

@ -7416,9 +7416,10 @@ RSpec.describe User do
let_it_be(:internal_user) { User.alert_bot.tap { |u| u.confirm } }
it 'does not return blocked or banned users' do
expect(described_class.without_forbidden_states).to match_array([
normal_user, admin_user, external_user, unconfirmed_user, omniauth_user, internal_user
])
expect(described_class.without_forbidden_states).to match_array(
[
normal_user, admin_user, external_user, unconfirmed_user, omniauth_user, internal_user
])
end
end

View File

@ -24,31 +24,32 @@ RSpec.describe WikiDirectory do
[toplevel1, toplevel2, toplevel3, child1, child2, child3, grandchild1, grandchild2].sort_by(&:title)
)
expect(entries).to match([
toplevel1,
a_kind_of(WikiDirectory).and(
having_attributes(
slug: 'parent1', entries: [
child1,
child2,
a_kind_of(WikiDirectory).and(
having_attributes(
slug: 'parent1/subparent',
entries: [grandchild1, grandchild2]
expect(entries).to match(
[
toplevel1,
a_kind_of(WikiDirectory).and(
having_attributes(
slug: 'parent1', entries: [
child1,
child2,
a_kind_of(WikiDirectory).and(
having_attributes(
slug: 'parent1/subparent',
entries: [grandchild1, grandchild2]
)
)
)
]
)
),
a_kind_of(WikiDirectory).and(
having_attributes(
slug: 'parent2',
entries: [child3]
)
),
toplevel2,
toplevel3
])
]
)
),
a_kind_of(WikiDirectory).and(
having_attributes(
slug: 'parent2',
entries: [child3]
)
),
toplevel2,
toplevel3
])
end
end

View File

@ -17,34 +17,37 @@ RSpec.describe CrudPolicyHelpers do
describe '.create_read_update_admin_destroy' do
it 'returns an array of the appropriate abilites given a feature name' do
expect(PolicyTestClass.create_read_update_admin_destroy(feature_name)).to eq([
:read_foo,
:create_foo,
:update_foo,
:admin_foo,
:destroy_foo
])
expect(PolicyTestClass.create_read_update_admin_destroy(feature_name)).to eq(
[
:read_foo,
:create_foo,
:update_foo,
:admin_foo,
:destroy_foo
])
end
end
describe '.create_update_admin_destroy' do
it 'returns an array of the appropriate abilites given a feature name' do
expect(PolicyTestClass.create_update_admin_destroy(feature_name)).to eq([
:create_foo,
:update_foo,
:admin_foo,
:destroy_foo
])
expect(PolicyTestClass.create_update_admin_destroy(feature_name)).to eq(
[
:create_foo,
:update_foo,
:admin_foo,
:destroy_foo
])
end
end
describe '.create_update_admin' do
it 'returns an array of the appropriate abilites given a feature name' do
expect(PolicyTestClass.create_update_admin(feature_name)).to eq([
:create_foo,
:update_foo,
:admin_foo
])
expect(PolicyTestClass.create_update_admin(feature_name)).to eq(
[
:create_foo,
:update_foo,
:admin_foo
])
end
end
end

View File

@ -383,10 +383,10 @@ RSpec.describe Ci::BuildRunnerPresenter do
it 'returns expanded and sorted variables' do
is_expected.to eq [
{ key: 'C', value: 'value', public: false, masked: false },
{ key: 'B', value: 'refB-value-$D', public: false, masked: false },
{ key: 'A', value: 'refA-refB-value-$D', public: false, masked: false }
]
{ key: 'C', value: 'value', public: false, masked: false },
{ key: 'B', value: 'refB-value-$D', public: false, masked: false },
{ key: 'A', value: 'refA-refB-value-$D', public: false, masked: false }
]
end
end
end

View File

@ -226,18 +226,19 @@ RSpec.describe API::Ci::Jobs do
expect(json_response.dig('user', 'username')).to eq(api_user.username)
expect(json_response.dig('user', 'roles_in_project')).to match_array %w(guest reporter developer)
expect(json_response).not_to include('environment')
expect(json_response['allowed_agents']).to match_array([
{
'id' => implicit_authorization.agent_id,
'config_project' => hash_including('id' => implicit_authorization.agent.project_id),
'configuration' => implicit_authorization.config
},
{
'id' => group_authorization.agent_id,
'config_project' => hash_including('id' => group_authorization.agent.project_id),
'configuration' => group_authorization.config
}
])
expect(json_response['allowed_agents']).to match_array(
[
{
'id' => implicit_authorization.agent_id,
'config_project' => hash_including('id' => implicit_authorization.agent.project_id),
'configuration' => implicit_authorization.config
},
{
'id' => group_authorization.agent_id,
'config_project' => hash_including('id' => group_authorization.agent.project_id),
'configuration' => group_authorization.config
}
])
end
end

View File

@ -220,14 +220,15 @@ RSpec.describe API::Ci::Runner, :clean_gitlab_redis_shared_state do
expect(json_response['image']).to eq(
{ 'name' => 'image:1.0', 'entrypoint' => '/bin/sh', 'ports' => [], 'pull_policy' => nil }
)
expect(json_response['services']).to eq([
{ 'name' => 'postgres', 'entrypoint' => nil, 'alias' => nil, 'command' => nil, 'ports' => [],
'variables' => nil, 'pull_policy' => nil },
{ 'name' => 'docker:stable-dind', 'entrypoint' => '/bin/sh', 'alias' => 'docker', 'command' => 'sleep 30',
'ports' => [], 'variables' => [], 'pull_policy' => nil },
{ 'name' => 'mysql:latest', 'entrypoint' => nil, 'alias' => nil, 'command' => nil, 'ports' => [],
'variables' => [{ 'key' => 'MYSQL_ROOT_PASSWORD', 'value' => 'root123.' }], 'pull_policy' => nil }
])
expect(json_response['services']).to eq(
[
{ 'name' => 'postgres', 'entrypoint' => nil, 'alias' => nil, 'command' => nil, 'ports' => [],
'variables' => nil, 'pull_policy' => nil },
{ 'name' => 'docker:stable-dind', 'entrypoint' => '/bin/sh', 'alias' => 'docker', 'command' => 'sleep 30',
'ports' => [], 'variables' => [], 'pull_policy' => nil },
{ 'name' => 'mysql:latest', 'entrypoint' => nil, 'alias' => nil, 'command' => nil, 'ports' => [],
'variables' => [{ 'key' => 'MYSQL_ROOT_PASSWORD', 'value' => 'root123.' }], 'pull_policy' => nil }
])
expect(json_response['steps']).to eq(expected_steps)
expect(json_response['artifacts']).to eq(expected_artifacts)
expect(json_response['cache']).to match(expected_cache)
@ -383,23 +384,24 @@ RSpec.describe API::Ci::Runner, :clean_gitlab_redis_shared_state do
expect(response).to have_gitlab_http_status(:created)
expect(response.headers).not_to have_key('X-GitLab-Last-Update')
expect(json_response['steps']).to eq([
{
"name" => "script",
"script" => ["make changelog | tee release_changelog.txt"],
"timeout" => 3600,
"when" => "on_success",
"allow_failure" => false
},
{
"name" => "release",
"script" =>
["release-cli create --name \"Release $CI_COMMIT_SHA\" --description \"Created using the release-cli $EXTRA_DESCRIPTION\" --tag-name \"release-$CI_COMMIT_SHA\" --ref \"$CI_COMMIT_SHA\" --assets-link \"{\\\"url\\\":\\\"https://example.com/assets/1\\\",\\\"name\\\":\\\"asset1\\\"}\""],
"timeout" => 3600,
"when" => "on_success",
"allow_failure" => false
}
])
expect(json_response['steps']).to eq(
[
{
"name" => "script",
"script" => ["make changelog | tee release_changelog.txt"],
"timeout" => 3600,
"when" => "on_success",
"allow_failure" => false
},
{
"name" => "release",
"script" =>
["release-cli create --name \"Release $CI_COMMIT_SHA\" --description \"Created using the release-cli $EXTRA_DESCRIPTION\" --tag-name \"release-$CI_COMMIT_SHA\" --ref \"$CI_COMMIT_SHA\" --assets-link \"{\\\"url\\\":\\\"https://example.com/assets/1\\\",\\\"name\\\":\\\"asset1\\\"}\""],
"timeout" => 3600,
"when" => "on_success",
"allow_failure" => false
}
])
end
end

View File

@ -918,10 +918,11 @@ RSpec.describe API::Ci::Runners do
create(:ci_build, :failed, runner: shared_runner, project: project_with_repo, pipeline: pipeline)
expect_next_instance_of(Repository) do |repo|
expect(repo).to receive(:commits_by).with(oids: %w[
1a0b36b3cdad1d2ee32457c102a8c0b7056fa863
c1c67abbaf91f624347bb3ae96eabe3a1b742478
]).once.and_call_original
expect(repo).to receive(:commits_by).with(oids:
%w[
1a0b36b3cdad1d2ee32457c102a8c0b7056fa863
c1c67abbaf91f624347bb3ae96eabe3a1b742478
]).once.and_call_original
end
get api("/runners/#{shared_runner.id}/jobs", admin), params: { per_page: 2, order_by: 'id', sort: 'desc' }
@ -1124,27 +1125,24 @@ RSpec.describe API::Ci::Runners do
it 'returns all runners' do
get api("/groups/#{group.id}/runners", user)
expect(json_response).to match_array([
a_hash_including('description' => 'Group runner A', 'active' => true, 'paused' => false),
a_hash_including('description' => 'Shared runner', 'active' => true, 'paused' => false)
])
expect(json_response).to match_array(
[
a_hash_including('description' => 'Group runner A', 'active' => true, 'paused' => false),
a_hash_including('description' => 'Shared runner', 'active' => true, 'paused' => false)
])
end
context 'filter by type' do
it 'returns record when valid and present' do
get api("/groups/#{group.id}/runners?type=group_type", user)
expect(json_response).to match_array([
a_hash_including('description' => 'Group runner A')
])
expect(json_response).to match_array([a_hash_including('description' => 'Group runner A')])
end
it 'returns instance runners when instance_type is specified' do
get api("/groups/#{group.id}/runners?type=instance_type", user)
expect(json_response).to match_array([
a_hash_including('description' => 'Shared runner')
])
expect(json_response).to match_array([a_hash_including('description' => 'Shared runner')])
end
# TODO: Remove when REST API v5 is implemented (https://gitlab.com/gitlab-org/gitlab/-/issues/351466)
@ -1167,18 +1165,14 @@ RSpec.describe API::Ci::Runners do
it 'returns runners by paused state' do
get api("/groups/#{group.id}/runners?paused=true", user)
expect(json_response).to match_array([
a_hash_including('description' => 'Inactive group runner')
])
expect(json_response).to match_array([a_hash_including('description' => 'Inactive group runner')])
end
context 'filter runners by status' do
it 'returns runners by valid status' do
get api("/groups/#{group.id}/runners?status=paused", user)
expect(json_response).to match_array([
a_hash_including('description' => 'Inactive group runner')
])
expect(json_response).to match_array([a_hash_including('description' => 'Inactive group runner')])
end
it 'does not filter by invalid status' do
@ -1195,9 +1189,7 @@ RSpec.describe API::Ci::Runners do
get api("/groups/#{group.id}/runners?tag_list=tag1,tag2", user)
expect(json_response).to match_array([
a_hash_including('description' => 'Runner tagged with tag1 and tag2')
])
expect(json_response).to match_array([a_hash_including('description' => 'Runner tagged with tag1 and tag2')])
end
end

View File

@ -44,14 +44,15 @@ RSpec.describe API::DeployTokens do
token_ids = json_response.map { |token| token['id'] }
expect(response).to include_pagination_headers
expect(response).to match_response_schema('public_api/v4/deploy_tokens')
expect(token_ids).to match_array([
deploy_token.id,
revoked_deploy_token.id,
expired_deploy_token.id,
group_deploy_token.id,
revoked_group_deploy_token.id,
expired_group_deploy_token.id
])
expect(token_ids).to match_array(
[
deploy_token.id,
revoked_deploy_token.id,
expired_deploy_token.id,
group_deploy_token.id,
revoked_group_deploy_token.id,
expired_group_deploy_token.id
])
end
context 'and active=true' do
@ -61,10 +62,11 @@ RSpec.describe API::DeployTokens do
token_ids = json_response.map { |token| token['id'] }
expect(response).to have_gitlab_http_status(:ok)
expect(response).to include_pagination_headers
expect(token_ids).to match_array([
deploy_token.id,
group_deploy_token.id
])
expect(token_ids).to match_array(
[
deploy_token.id,
group_deploy_token.id
])
end
end
end
@ -110,11 +112,12 @@ RSpec.describe API::DeployTokens do
subject
token_ids = json_response.map { |token| token['id'] }
expect(token_ids).to match_array([
deploy_token.id,
expired_deploy_token.id,
revoked_deploy_token.id
])
expect(token_ids).to match_array(
[
deploy_token.id,
expired_deploy_token.id,
revoked_deploy_token.id
])
end
context 'and active=true' do

View File

@ -176,22 +176,22 @@ RSpec.describe 'Query.ciConfig' do
"jobs" =>
{
"nodes" => [
{
"name" => "docker",
"groupName" => "docker",
"stage" => "test",
"script" => ["curl http://dockerhub/URL"],
"beforeScript" => ["bundle install", "bundle exec rake db:create"],
"afterScript" => ["echo 'run this after'"],
"allowFailure" => true,
"only" => { "refs" => %w[branches tags] },
"when" => "manual",
"except" => { "refs" => ["branches"] },
"environment" => nil,
"tags" => [],
"needs" => { "nodes" => [{ "name" => "spinach" }, { "name" => "rspec 0 1" }] }
}
]
{
"name" => "docker",
"groupName" => "docker",
"stage" => "test",
"script" => ["curl http://dockerhub/URL"],
"beforeScript" => ["bundle install", "bundle exec rake db:create"],
"afterScript" => ["echo 'run this after'"],
"allowFailure" => true,
"only" => { "refs" => %w[branches tags] },
"when" => "manual",
"except" => { "refs" => ["branches"] },
"environment" => nil,
"tags" => [],
"needs" => { "nodes" => [{ "name" => "spinach" }, { "name" => "rspec 0 1" }] }
}
]
}
}
]
@ -209,22 +209,22 @@ RSpec.describe 'Query.ciConfig' do
"jobs" =>
{
"nodes" => [
{
"name" => "deploy_job",
"groupName" => "deploy_job",
"stage" => "deploy",
"script" => ["echo 'done'"],
"beforeScript" => ["bundle install", "bundle exec rake db:create"],
"afterScript" => ["echo 'run this after'"],
"allowFailure" => false,
"only" => { "refs" => %w[branches tags] },
"when" => "on_success",
"except" => nil,
"environment" => "production",
"tags" => [],
"needs" => { "nodes" => [] }
}
]
{
"name" => "deploy_job",
"groupName" => "deploy_job",
"stage" => "deploy",
"script" => ["echo 'done'"],
"beforeScript" => ["bundle install", "bundle exec rake db:create"],
"afterScript" => ["echo 'run this after'"],
"allowFailure" => false,
"only" => { "refs" => %w[branches tags] },
"when" => "on_success",
"except" => nil,
"environment" => "production",
"tags" => [],
"needs" => { "nodes" => [] }
}
]
}
}
]

View File

@ -25,11 +25,12 @@ RSpec.describe 'Query.project.pipeline' do
let(:first_n) { var('Int') }
let(:query) do
with_signature([first_n], wrap_fields(query_graphql_path([
[:project, { full_path: project.full_path }],
[:pipeline, { iid: pipeline.iid.to_s }],
[:stages, { first: first_n }]
], stage_fields)))
with_signature([first_n], wrap_fields(query_graphql_path(
[
[:project, { full_path: project.full_path }],
[:pipeline, { iid: pipeline.iid.to_s }],
[:stages, { first: first_n }]
], stage_fields)))
end
let(:stage_fields) do

View File

@ -47,10 +47,11 @@ RSpec.describe 'Delete an upload' do
expect(response).to have_gitlab_http_status(:success)
expect(mutation_response['upload']).to be_nil
expect(mutation_response['errors']).to match_array([
"The resource that you are attempting to access does not "\
"exist or you don't have permission to perform this action."
])
expect(mutation_response['errors']).to match_array(
[
"The resource that you are attempting to access does not "\
"exist or you don't have permission to perform this action."
])
end
end
end

View File

@ -61,11 +61,12 @@ RSpec.describe 'Project.cluster_agents' do
tokens = graphql_data_at(:project, :cluster_agents, :nodes, :tokens, :nodes)
expect(tokens).to match([
a_graphql_entity_for(token_3),
a_graphql_entity_for(token_2),
a_graphql_entity_for(token_1)
])
expect(tokens).to match(
[
a_graphql_entity_for(token_3),
a_graphql_entity_for(token_2),
a_graphql_entity_for(token_1)
])
end
it 'does not suffer from N+1 performance issues' do

View File

@ -245,9 +245,10 @@ RSpec.describe 'Getting designs related to an issue' do
end
it 'only returns one version record for the design (the original version)' do
expect(version_nodes).to eq([
[{ 'node' => { 'id' => global_id(version) } }]
])
expect(version_nodes).to eq(
[
[{ 'node' => { 'id' => global_id(version) } }]
])
end
end
@ -289,10 +290,11 @@ RSpec.describe 'Getting designs related to an issue' do
end
it 'returns the correct versions records for both designs' do
expect(version_nodes).to eq([
[{ 'node' => { 'id' => global_id(design.versions.first) } }],
[{ 'node' => { 'id' => global_id(second_design.versions.first) } }]
])
expect(version_nodes).to eq(
[
[{ 'node' => { 'id' => global_id(design.versions.first) } }],
[{ 'node' => { 'id' => global_id(second_design.versions.first) } }]
])
end
end
@ -341,15 +343,16 @@ RSpec.describe 'Getting designs related to an issue' do
end
it 'returns all versions records for the designs' do
expect(version_nodes).to eq([
expect(version_nodes).to eq(
[
{ 'node' => { 'id' => global_id(design.versions.first) } }
],
[
{ 'node' => { 'id' => global_id(second_design.versions.second) } },
{ 'node' => { 'id' => global_id(second_design.versions.first) } }
]
])
[
{ 'node' => { 'id' => global_id(design.versions.first) } }
],
[
{ 'node' => { 'id' => global_id(second_design.versions.second) } },
{ 'node' => { 'id' => global_id(second_design.versions.first) } }
]
])
end
end
end

View File

@ -25,9 +25,10 @@ RSpec.describe 'getting milestone listings nested in a project' do
graphql_query_for(
:project,
{ full_path: project.full_path },
query_graphql_field(:milestones, search_params, [
query_graphql_field(:nodes, nil, %i[id title])
])
query_graphql_field(:milestones, search_params,
[
query_graphql_field(:nodes, nil, %i[id title])
])
)
end

View File

@ -8,10 +8,19 @@ RSpec.describe 'getting a work item list for a project' do
let_it_be(:group) { create(:group) }
let_it_be(:project) { create(:project, :repository, :public, group: group) }
let_it_be(:current_user) { create(:user) }
let_it_be(:label1) { create(:label, project: project) }
let_it_be(:label2) { create(:label, project: project) }
let_it_be(:item1) { create(:work_item, project: project, discussion_locked: true, title: 'item1') }
let_it_be(:item1) { create(:work_item, project: project, discussion_locked: true, title: 'item1', labels: [label1]) }
let_it_be(:item2) do
create(:work_item, project: project, title: 'item2', last_edited_by: current_user, last_edited_at: 1.day.ago)
create(
:work_item,
project: project,
title: 'item2',
last_edited_by: current_user,
last_edited_at: 1.day.ago,
labels: [label2]
)
end
let_it_be(:confidential_item) { create(:work_item, confidential: true, project: project, title: 'item3') }
@ -40,7 +49,14 @@ RSpec.describe 'getting a work item list for a project' do
expect_graphql_errors_to_be_empty
create_list(:work_item, 3, :task, :last_edited_by_user, last_edited_at: 1.week.ago, project: project)
create_list(
:work_item, 3,
:task,
:last_edited_by_user,
last_edited_at: 1.week.ago,
project: project,
labels: [label1, label2]
)
expect_graphql_errors_to_be_empty
expect { post_graphql(query, current_user: current_user) }.not_to exceed_query_limit(control)
@ -74,6 +90,10 @@ RSpec.describe 'getting a work item list for a project' do
... on WorkItemWidgetHierarchy {
parent { id }
}
... on WorkItemWidgetLabels {
labels { nodes { id } }
allowsScopedLabels
}
}
}
GRAPHQL

View File

@ -17,19 +17,25 @@ RSpec.describe 'UsageTrendsMeasurements' do
end
it 'returns measurement objects' do
expect(graphql_data.dig('usageTrendsMeasurements', 'nodes')).to eq([
{ "count" => 10, 'identifier' => 'PROJECTS' },
{ "count" => 5, 'identifier' => 'PROJECTS' }
])
expect(graphql_data.dig('usageTrendsMeasurements', 'nodes')).to eq(
[
{ "count" => 10, 'identifier' => 'PROJECTS' },
{ "count" => 5, 'identifier' => 'PROJECTS' }
])
end
context 'with recorded_at filters' do
let(:arguments) { %(identifier: PROJECTS, recordedAfter: "#{15.days.ago.to_date}", recordedBefore: "#{5.days.ago.to_date}") }
let(:arguments) do
%(identifier: PROJECTS,
recordedAfter: "#{15.days.ago.to_date}",
recordedBefore: "#{5.days.ago.to_date}")
end
it 'returns filtered measurement objects' do
expect(graphql_data.dig('usageTrendsMeasurements', 'nodes')).to eq([
{ "count" => 10, 'identifier' => 'PROJECTS' }
])
expect(graphql_data.dig('usageTrendsMeasurements', 'nodes')).to eq(
[
{ "count" => 10, 'identifier' => 'PROJECTS' }
])
end
end
end

View File

@ -128,10 +128,11 @@ RSpec.describe 'Query.work_item(id)' do
hash_including(
'type' => 'HIERARCHY',
'parent' => nil,
'children' => { 'nodes' => match_array([
hash_including('id' => child_link1.work_item.to_gid.to_s),
hash_including('id' => child_link2.work_item.to_gid.to_s)
]) }
'children' => { 'nodes' => match_array(
[
hash_including('id' => child_link1.work_item.to_gid.to_s),
hash_including('id' => child_link2.work_item.to_gid.to_s)
]) }
)
)
)
@ -161,9 +162,10 @@ RSpec.describe 'Query.work_item(id)' do
hash_including(
'type' => 'HIERARCHY',
'parent' => nil,
'children' => { 'nodes' => match_array([
hash_including('id' => child_link1.work_item.to_gid.to_s)
]) }
'children' => { 'nodes' => match_array(
[
hash_including('id' => child_link1.work_item.to_gid.to_s)
]) }
)
)
)

View File

@ -274,9 +274,7 @@ RSpec.describe API::Issues do
post api("/projects/#{project.id}/issues", user),
params: { title: 'g' * 256 }
expect(response).to have_gitlab_http_status(:bad_request)
expect(json_response['message']['title']).to eq([
'is too long (maximum is 255 characters)'
])
expect(json_response['message']['title']).to eq(['is too long (maximum is 255 characters)'])
end
context 'resolving discussions' do

View File

@ -381,9 +381,7 @@ RSpec.describe API::Issues do
put api_for_user, params: { title: 'g' * 256 }
expect(response).to have_gitlab_http_status(:bad_request)
expect(json_response['message']['title']).to eq([
'is too long (maximum is 255 characters)'
])
expect(json_response['message']['title']).to eq(['is too long (maximum is 255 characters)'])
end
end

View File

@ -119,10 +119,13 @@ RSpec.describe API::MergeRequests do
it 'returns an array of all merge_requests' do
get api(endpoint_path, user)
expect_paginated_array_response([
merge_request_merged.id, merge_request_locked.id,
merge_request_closed.id, merge_request.id
])
expect_paginated_array_response(
[
merge_request_merged.id,
merge_request_locked.id,
merge_request_closed.id,
merge_request.id
])
expect(json_response.last['title']).to eq(merge_request.title)
expect(json_response.last).to have_key('web_url')
@ -172,10 +175,13 @@ RSpec.describe API::MergeRequests do
get api(path, user)
expect_paginated_array_response([
merge_request_merged.id, merge_request_locked.id,
merge_request_closed.id, merge_request.id
])
expect_paginated_array_response(
[
merge_request_merged.id,
merge_request_locked.id,
merge_request_closed.id,
merge_request.id
])
expect(json_response.last.keys).to match_array(%w(id iid title web_url created_at description project_id state updated_at))
expect(json_response.last['iid']).to eq(merge_request.iid)
expect(json_response.last['title']).to eq(merge_request.title)
@ -190,10 +196,13 @@ RSpec.describe API::MergeRequests do
get api(path, user)
expect_paginated_array_response([
merge_request_merged.id, merge_request_locked.id,
merge_request_closed.id, merge_request.id
])
expect_paginated_array_response(
[
merge_request_merged.id,
merge_request_locked.id,
merge_request_closed.id,
merge_request.id
])
expect(json_response.last['title']).to eq(merge_request.title)
end
@ -354,10 +363,13 @@ RSpec.describe API::MergeRequests do
get api(path, user)
expect_paginated_array_response([
merge_request.id, merge_request_closed.id,
merge_request_locked.id, merge_request_merged.id
])
expect_paginated_array_response(
[
merge_request.id,
merge_request_closed.id,
merge_request_locked.id,
merge_request_merged.id
])
response_dates = json_response.map { |merge_request| merge_request['created_at'] }
expect(response_dates).to eq(response_dates.sort)
end
@ -367,10 +379,13 @@ RSpec.describe API::MergeRequests do
get api(path, user)
expect_paginated_array_response([
merge_request_merged.id, merge_request_locked.id,
merge_request_closed.id, merge_request.id
])
expect_paginated_array_response(
[
merge_request_merged.id,
merge_request_locked.id,
merge_request_closed.id,
merge_request.id
])
response_dates = json_response.map { |merge_request| merge_request['created_at'] }
expect(response_dates).to eq(response_dates.sort.reverse)
end
@ -398,10 +413,13 @@ RSpec.describe API::MergeRequests do
get api(path, user)
expect_paginated_array_response([
merge_request.id, merge_request_locked.id,
merge_request_merged.id, merge_request_closed.id
])
expect_paginated_array_response(
[
merge_request.id,
merge_request_locked.id,
merge_request_merged.id,
merge_request_closed.id
])
response_dates = json_response.map { |merge_request| merge_request['updated_at'] }
expect(response_dates).to eq(response_dates.sort.reverse)
end
@ -411,10 +429,13 @@ RSpec.describe API::MergeRequests do
get api(path, user)
expect_paginated_array_response([
merge_request.id, merge_request_closed.id,
merge_request_locked.id, merge_request_merged.id
])
expect_paginated_array_response(
[
merge_request.id,
merge_request_closed.id,
merge_request_locked.id,
merge_request_merged.id
])
response_dates = json_response.map { |merge_request| merge_request['created_at'] }
expect(response_dates).to eq(response_dates.sort)
end

Some files were not shown because too many files have changed in this diff Show More