Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-10-25 18:12:16 +00:00
parent 82c63f420c
commit 942229d2b4
69 changed files with 799 additions and 134 deletions

View File

@ -555,7 +555,7 @@
},
"start_in": {
"type": "string",
"description": "Used in conjunction with 'when: delayed' to set how long to delay before starting a job.",
"description": "Used in conjunction with 'when: delayed' to set how long to delay before starting a job. e.g. '5', 5 seconds, 30 minutes, 1 week, etc. Read more: https://docs.gitlab.com/ee/ci/jobs/job_control.html#run-a-job-after-a-delay",
"minLength": 1
},
"allow_failure": {
@ -939,7 +939,7 @@
"stage": {
"type": "string",
"description": "Define what stage the job will run in.",
"default": "test"
"minLength": 1
},
"only": {
"$ref": "#/definitions/filter",

View File

@ -1,5 +1,5 @@
<script>
import { GlTooltipDirective, GlButton, GlIcon } from '@gitlab/ui';
import { GlTooltipDirective, GlButton, GlIcon, GlSafeHtmlDirective } from '@gitlab/ui';
import { throttle } from 'lodash';
import { mapActions, mapState } from 'vuex';
import { __ } from '../../../locale';
@ -14,6 +14,7 @@ const scrollPositions = {
export default {
directives: {
GlTooltip: GlTooltipDirective,
SafeHtml: GlSafeHtmlDirective,
},
components: {
GlButton,
@ -100,8 +101,8 @@ export default {
<pre ref="buildJobLog" class="build-log mb-0 h-100 mr-3" @scroll="scrollBuildLog">
<code
v-show="!detailJob.isLoading"
v-safe-html="jobOutput"
class="bash"
v-html="jobOutput /* eslint-disable-line vue/no-v-html */"
>
</code>
<div

View File

@ -9,7 +9,12 @@ Vue.use(VueApollo);
Vue.use(GlToast);
const apolloProvider = new VueApollo({
defaultClient: createDefaultClient(),
defaultClient: createDefaultClient(
{},
{
assumeImmutableResults: true,
},
),
});
export default (containerId = 'js-jobs-table') => {

View File

@ -88,6 +88,7 @@ export default {
<runner-instructions-modal
v-if="instructionsModalOpened"
ref="runnerInstructionsModal"
:registration-token="registrationToken"
data-testid="runner-instructions-modal"
/>
</gl-dropdown-item>

View File

@ -1,8 +1,10 @@
import { GlToast } from '@gitlab/ui';
import Vue from 'vue';
import VueApollo from 'vue-apollo';
import createDefaultClient from '~/lib/graphql';
import GroupRunnersApp from './group_runners_app.vue';
Vue.use(GlToast);
Vue.use(VueApollo);
export const initGroupRunners = (selector = '#js-group-runners') => {

View File

@ -339,7 +339,7 @@ export default {
</div>
<div class="gl-display-flex">
<span class="gl-text-gray-600 gl-ml-5">
{{ s__('SetStatusModal|A busy indicator is shown next to your name and avatar.') }}
{{ s__('SetStatusModal|An indicator appears next to your name and avatar') }}
</span>
</div>
</div>

View File

@ -22,6 +22,7 @@ export default () => {
return object.id || defaultDataIdFromObject(object);
},
},
assumeImmutableResults: true,
});
const { emptyStateImage, projectPath, accessTokensPath, terraformApiUrl, username } = el.dataset;

View File

@ -2,6 +2,8 @@ import { s__ } from '~/locale';
export const PLATFORMS_WITHOUT_ARCHITECTURES = ['docker', 'kubernetes'];
export const REGISTRATION_TOKEN_PLACEHOLDER = '$REGISTRATION_TOKEN';
export const INSTRUCTIONS_PLATFORMS_WITHOUT_ARCHITECTURES = {
docker: {
instructions: s__(

View File

@ -16,8 +16,9 @@ import { isEmpty } from 'lodash';
import { __, s__ } from '~/locale';
import ModalCopyButton from '~/vue_shared/components/modal_copy_button.vue';
import {
PLATFORMS_WITHOUT_ARCHITECTURES,
INSTRUCTIONS_PLATFORMS_WITHOUT_ARCHITECTURES,
PLATFORMS_WITHOUT_ARCHITECTURES,
REGISTRATION_TOKEN_PLACEHOLDER,
} from './constants';
import getRunnerPlatformsQuery from './graphql/queries/get_runner_platforms.query.graphql';
import getRunnerSetupInstructionsQuery from './graphql/queries/get_runner_setup.query.graphql';
@ -44,6 +45,11 @@ export default {
required: false,
default: 'runner-instructions-modal',
},
registrationToken: {
type: String,
required: false,
default: null,
},
},
apollo: {
platforms: {
@ -118,6 +124,15 @@ export default {
runnerInstallationLink() {
return INSTRUCTIONS_PLATFORMS_WITHOUT_ARCHITECTURES[this.selectedPlatformName]?.link;
},
registerInstructionsWithToken() {
const { registerInstructions } = this.instructions || {};
if (this.registrationToken) {
return registerInstructions.replace(REGISTRATION_TOKEN_PLACEHOLDER, this.registrationToken);
}
return registerInstructions;
},
},
methods: {
show() {
@ -249,11 +264,11 @@ export default {
<pre
class="gl-bg-gray gl-flex-grow-1 gl-white-space-pre-line"
data-testid="register-command"
>{{ instructions.registerInstructions }}</pre
>{{ registerInstructionsWithToken }}</pre
>
<modal-copy-button
:title="$options.i18n.copyInstructions"
:text="instructions.registerInstructions"
:text="registerInstructionsWithToken"
:modal-id="$options.modalId"
css-classes="gl-align-self-start gl-ml-2 gl-mt-2"
category="tertiary"

View File

@ -9,7 +9,7 @@ module Types
field :install_instructions, GraphQL::Types::String, null: false,
description: 'Instructions for installing the runner on the specified architecture.'
field :register_instructions, GraphQL::Types::String, null: true,
description: 'Instructions for registering the runner.'
description: 'Instructions for registering the runner. The actual registration tokens are not included in the commands. Instead, a placeholder `$REGISTRATION_TOKEN` is shown.'
end
end
end

View File

@ -13,7 +13,7 @@ class LegacyDiffNote < Note
validates :line_code, presence: true, line_code: true
before_create :set_diff
before_create :set_diff, unless: :skip_setting_st_diff?
def discussion_class(*)
LegacyDiffDiscussion
@ -90,6 +90,10 @@ class LegacyDiffNote < Note
self.st_diff = diff.to_hash if diff
end
def skip_setting_st_diff?
st_diff.present? && importing? && Feature.enabled?(:skip_legacy_diff_note_callback_on_import, default_enabled: :yaml)
end
def diff_for_line_code
attributes = {
noteable_type: noteable_type,

View File

@ -27,12 +27,17 @@ class ProtectedBranch < ApplicationRecord
# Check if branch name is marked as protected in the system
def self.protected?(project, ref_name)
return true if project.empty_repo? && project.default_branch_protected?
return false if ref_name.blank?
Rails.cache.fetch("protected_ref-#{ref_name}-#{project.cache_key}") do
Rails.cache.fetch(protected_ref_cache_key(project, ref_name)) do
self.matching(ref_name, protected_refs: protected_refs(project)).present?
end
end
def self.protected_ref_cache_key(project, ref_name)
"protected_ref-#{project.cache_key}-#{Digest::SHA1.hexdigest(ref_name)}"
end
def self.allow_force_push?(project, ref_name)
project.protected_branches.allowing_force_push.matching(ref_name).any?
end

View File

@ -9,6 +9,7 @@ module Users
belongs_to :user
validates :holder_name, length: { maximum: 26 }
validates :network, length: { maximum: 32 }
validates :last_digits, allow_nil: true, numericality: {
greater_than_or_equal_to: 0, less_than_or_equal_to: 9999
}

View File

@ -12,6 +12,7 @@ module Users
credit_card_validated_at: params.fetch(:credit_card_validated_at),
expiration_date: get_expiration_date(params),
last_digits: Integer(params.fetch(:credit_card_mask_number), 10),
network: params.fetch(:credit_card_type),
holder_name: params.fetch(:credit_card_holder_name)
}

View File

@ -71,7 +71,7 @@
placeholder: s_("Profiles|What's your status?")
.checkbox-icon-inline-wrapper
= status_form.check_box :availability, { data: { testid: "user-availability-checkbox" }, label: s_("Profiles|Busy"), wrapper_class: 'gl-mr-0 gl-font-weight-bold' }, availability["busy"], availability["not_set"]
.gl-text-gray-600.gl-ml-5= s_('Profiles|"Busy" will be shown next to your name')
.gl-text-gray-600.gl-ml-5= s_('Profiles|An indicator appears next to your name and avatar')
.col-lg-12
%hr
.row.user-time-preferences.js-search-settings-section

View File

@ -5,4 +5,4 @@ rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/329153
milestone: '13.12'
type: development
group: group::source code
default_enabled: false
default_enabled: true

View File

@ -0,0 +1,8 @@
---
name: skip_legacy_diff_note_callback_on_import
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/72897
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/343666
milestone: '14.5'
type: development
group: group::import
default_enabled: false

View File

@ -0,0 +1,8 @@
---
name: usage_data_instrumentation
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/68808
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/338029
milestone: '14.5'
type: development
group: group::product intelligence
default_enabled: false

View File

@ -0,0 +1,9 @@
# frozen_string_literal: true
class AddNetworkToUserCreditCardValidations < Gitlab::Database::Migration[1.0]
# rubocop:disable Migration/AddLimitToTextColumns
def change
add_column :user_credit_card_validations, :network, :text
end
# rubocop:enable Migration/AddLimitToTextColumns
end

View File

@ -0,0 +1,13 @@
# frozen_string_literal: true
class LimitNetworkOnUserCreditCardValidations < Gitlab::Database::Migration[1.0]
disable_ddl_transaction!
def up
add_text_limit :user_credit_card_validations, :network, 32
end
def down
remove_text_limit :user_credit_card_validations, :network
end
end

View File

@ -0,0 +1,17 @@
# frozen_string_literal: true
class IndexIncludeNetworkOnUserCreditCardValidations < Gitlab::Database::Migration[1.0]
disable_ddl_transaction!
INDEX_NAME = 'index_user_credit_card_validations_meta_data_partial_match'
def up
add_concurrent_index :user_credit_card_validations,
[:expiration_date, :last_digits, :network, :credit_card_validated_at],
name: INDEX_NAME
end
def down
remove_concurrent_index_by_name :user_credit_card_validations, INDEX_NAME
end
end

View File

@ -0,0 +1 @@
5c5adaf0f6f053c7e737051fbccf61d1fc36e20360a82d5fca142883d3e3bfdd

View File

@ -0,0 +1 @@
06d6458f7b85b3e729c3c8a8ae29c29f7c5504ea330ae3a3bcf1e0074ed66cf6

View File

@ -0,0 +1 @@
a5928cef69626ad5e972e8cb7a570ca83201cdfe7ec4f2401f2aa14c34b9cfb8

View File

@ -19843,6 +19843,8 @@ CREATE TABLE user_credit_card_validations (
expiration_date date,
last_digits smallint,
holder_name text,
network text,
CONSTRAINT check_1765e2b30f CHECK ((char_length(network) <= 32)),
CONSTRAINT check_3eea080c91 CHECK (((last_digits >= 0) AND (last_digits <= 9999))),
CONSTRAINT check_eafe45d88b CHECK ((char_length(holder_name) <= 26))
);
@ -26821,6 +26823,8 @@ CREATE UNIQUE INDEX index_user_canonical_emails_on_user_id_and_canonical_email O
CREATE INDEX index_user_credit_card_validations_meta_data_full_match ON user_credit_card_validations USING btree (holder_name, expiration_date, last_digits, credit_card_validated_at);
CREATE INDEX index_user_credit_card_validations_meta_data_partial_match ON user_credit_card_validations USING btree (expiration_date, last_digits, network, credit_card_validated_at);
CREATE INDEX index_user_custom_attributes_on_key_and_value ON user_custom_attributes USING btree (key, value);
CREATE UNIQUE INDEX index_user_custom_attributes_on_user_id_and_key ON user_custom_attributes USING btree (user_id, key);

View File

@ -140,6 +140,7 @@ exceptions:
- RPM
- RPS
- RSA
- RDS
- RSS
- RVM
- SAAS

View File

@ -8,7 +8,7 @@ type: howto
# Geo with external PostgreSQL instances **(PREMIUM SELF)**
This document is relevant if you are using a PostgreSQL instance that is *not
managed by Omnibus*. This includes cloud-managed instances like AWS RDS, or
managed by Omnibus*. This includes cloud-managed instances like Amazon RDS, or
manually installed and configured PostgreSQL instances.
NOTE:
@ -58,7 +58,7 @@ developed and tested. We aim to be compatible with most external
To set up an external database, you can either:
- Set up [streaming replication](https://www.postgresql.org/docs/12/warm-standby.html#STREAMING-REPLICATION-SLOTS) yourself (for example AWS RDS, bare metal not managed by Omnibus, and so on).
- Set up [streaming replication](https://www.postgresql.org/docs/12/warm-standby.html#STREAMING-REPLICATION-SLOTS) yourself (for example Amazon RDS, bare metal not managed by Omnibus, and so on).
- Perform the Omnibus configuration manually as follows.
#### Leverage your cloud provider's tools to replicate the primary database
@ -200,7 +200,7 @@ This is for the installation of extensions during installation and upgrades. As
To setup an external tracking database, follow the instructions below:
NOTE:
If you want to use AWS RDS as a tracking database, make sure it has access to
If you want to use Amazon RDS as a tracking database, make sure it has access to
the secondary database. Unfortunately, just assigning the same security group is not enough as
outbound rules do not apply to RDS PostgreSQL databases. Therefore, you need to explicitly add an inbound
rule to the read-replica's security group allowing any TCP traffic from

View File

@ -132,6 +132,44 @@ This is a brief overview. Please refer to the above instructions for more contex
1. Remove the `AuthorizedKeysCommand` lines from `/etc/ssh/sshd_config` or from `/assets/sshd_config` if you are using Omnibus Docker.
1. Reload `sshd`: `sudo service sshd reload`.
## Use `gitlab-sshd` instead of OpenSSH
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/299109) in GitLab 14.5.
WARNING:
`gitlab-sshd` is in [**Alpha**](https://about.gitlab.com/handbook/product/gitlab-the-product/#alpha-beta-ga).
It is not ready for production use.
`gitlab-sshd` is [a standalone SSH server](https://gitlab.com/gitlab-org/gitlab-shell/-/tree/main/internal/sshd)
written in Go. It is provided as a part of `gitlab-shell` package. It has a lower memory
use as a OpenSSH alternative and supports
[group access restriction by IP address](../../user/group/index.md) for applications
running behind the proxy.
If you are considering switching from OpenSSH to `gitlab-sshd`, consider these concerns:
- The `gitlab-sshd` component is only available for
[Cloud Native Helm Charts](https://docs.gitlab.com/charts/) deployments.
- `gitlab-sshd` supports the PROXY protocol. It can run behind proxy servers that rely
on it, such as HAProxy.
- `gitlab-sshd` does not share a SSH port with the system administrator's OpenSSH,
and requires a bind to port 22.
- `gitlab-sshd` **does not** support SSH certificates.
To switch from OpenSSH to `gitlab-sshd`:
1. Set the `gitlab-shell` charts `sshDaemon` option to
[`gitlab-sshd`](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/index.html#installation-command-line-options).
For example:
```yaml
gitlab:
gitlab-shell:
sshDaemon: gitlab-sshd
```
1. Perform a Helm upgrade.
## Compiling a custom version of OpenSSH for CentOS 6
Building a custom version of OpenSSH is not necessary for Ubuntu 16.04 users,

View File

@ -36,7 +36,7 @@ full list of reference architectures, see
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
@ -2199,7 +2199,7 @@ services where applicable):
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.

View File

@ -36,7 +36,7 @@ full list of reference architectures, see
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
@ -2199,7 +2199,7 @@ services where applicable):
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.

View File

@ -29,7 +29,7 @@ For a full list of reference architectures, see
| NFS server (optional, not recommended) | 1 | 4 vCPU, 3.6 GB memory | `n1-highcpu-4` | `c5.xlarge` | `F4s v2` |
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run as reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run as reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
@ -1027,7 +1027,7 @@ services where applicable):
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
<!-- markdownlint-enable MD029 -->

View File

@ -45,7 +45,7 @@ For a full list of reference architectures, see
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
@ -2106,7 +2106,7 @@ but with smaller performance requirements, several modifications can be consider
- GitLab Rails and Sidekiq: Stateless services don't have a minimum node count. Two are enough for redundancy.
- Gitaly and Praefect: A quorum is not strictly necessary. Two Gitaly nodes and two Praefect nodes are enough for redundancy.
- Running select components in reputable Cloud PaaS solutions: Select components of the GitLab setup can instead be run on Cloud Provider PaaS solutions. By doing this, additional dependent components can also be removed:
- PostgreSQL: Can be run on reputable Cloud PaaS solutions such as Google Cloud SQL or AWS RDS. In this setup, the PgBouncer and Consul nodes are no longer required:
- PostgreSQL: Can be run on reputable Cloud PaaS solutions such as Google Cloud SQL or Amazon RDS. In this setup, the PgBouncer and Consul nodes are no longer required:
- Consul may still be desired if [Prometheus](../monitoring/prometheus/index.md) auto discovery is a requirement, otherwise you would need to [manually add scrape configurations](../monitoring/prometheus/index.md#adding-custom-scrape-configurations) for all nodes.
- As Redis Sentinel runs on the same box as Consul in this architecture, it may need to be run on a separate box if Redis is still being run via Omnibus.
- Redis: Can be run on reputable Cloud PaaS solutions such as Google Memorystore and AWS ElastiCache. In this setup, the Redis Sentinel is no longer required.
@ -2170,7 +2170,7 @@ services where applicable):
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.

View File

@ -36,7 +36,7 @@ full list of reference architectures, see
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.
@ -2213,7 +2213,7 @@ services where applicable):
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and AWS RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
1. Can be optionally run on reputable third-party external PaaS PostgreSQL solutions. Google Cloud SQL and Amazon RDS are known to work, however Azure Database for PostgreSQL is [not recommended](https://gitlab.com/gitlab-org/quality/reference-architectures/-/issues/61) due to performance issues. Consul is primarily used for PostgreSQL high availability so can be ignored when using a PostgreSQL PaaS setup. However it is also used optionally by Prometheus for Omnibus auto host discovery.
2. Can be optionally run on reputable third-party external PaaS Redis solutions. Google Memorystore and AWS Elasticache are known to work.
3. Can be optionally run on reputable third-party load balancing services (LB PaaS). AWS ELB is known to work.
4. Should be run on reputable third-party object storage (storage PaaS) for cloud implementations. Google Cloud Storage and AWS S3 are known to work.

View File

@ -4711,17 +4711,17 @@ Input type: `VulnerabilityCreateInput`
| <a id="mutationvulnerabilitycreateclientmutationid"></a>`clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
| <a id="mutationvulnerabilitycreateconfidence"></a>`confidence` | [`VulnerabilityConfidence`](#vulnerabilityconfidence) | Confidence of the vulnerability (defaults to `unknown`). |
| <a id="mutationvulnerabilitycreateconfirmedat"></a>`confirmedAt` | [`Time`](#time) | Timestamp of when the vulnerability state changed to confirmed (defaults to creation time if status is `confirmed`). |
| <a id="mutationvulnerabilitycreatedescription"></a>`description` | [`String!`](#string) | Description of the vulnerability. |
| <a id="mutationvulnerabilitycreatedescription"></a>`description` | [`String!`](#string) | Long text section that describes the vulnerability in more detail. |
| <a id="mutationvulnerabilitycreatedetectedat"></a>`detectedAt` | [`Time`](#time) | Timestamp of when the vulnerability was first detected (defaults to creation time). |
| <a id="mutationvulnerabilitycreatedismissedat"></a>`dismissedAt` | [`Time`](#time) | Timestamp of when the vulnerability state changed to dismissed (defaults to creation time if status is `dismissed`). |
| <a id="mutationvulnerabilitycreateidentifiers"></a>`identifiers` | [`[VulnerabilityIdentifierInput!]!`](#vulnerabilityidentifierinput) | Array of CVE or CWE identifiers for the vulnerability. |
| <a id="mutationvulnerabilitycreatemessage"></a>`message` | [`String`](#string) | Additional information about the vulnerability. |
| <a id="mutationvulnerabilitycreatemessage"></a>`message` | [`String`](#string) | Short text section that describes the vulnerability. This may include the finding's specific information. |
| <a id="mutationvulnerabilitycreatename"></a>`name` | [`String!`](#string) | Name of the vulnerability. |
| <a id="mutationvulnerabilitycreateproject"></a>`project` | [`ProjectID!`](#projectid) | ID of the project to attach the vulnerability to. |
| <a id="mutationvulnerabilitycreateresolvedat"></a>`resolvedAt` | [`Time`](#time) | Timestamp of when the vulnerability state changed to resolved (defaults to creation time if status is `resolved`). |
| <a id="mutationvulnerabilitycreatescanner"></a>`scanner` | [`VulnerabilityScannerInput!`](#vulnerabilityscannerinput) | Information about the scanner used to discover the vulnerability. |
| <a id="mutationvulnerabilitycreateseverity"></a>`severity` | [`VulnerabilitySeverity`](#vulnerabilityseverity) | Severity of the vulnerability (defaults to `unknown`). |
| <a id="mutationvulnerabilitycreatesolution"></a>`solution` | [`String`](#string) | How to fix this vulnerability. |
| <a id="mutationvulnerabilitycreatesolution"></a>`solution` | [`String`](#string) | Instructions for how to fix the vulnerability. |
| <a id="mutationvulnerabilitycreatestate"></a>`state` | [`VulnerabilityState`](#vulnerabilitystate) | State of the vulnerability (defaults to `detected`). |
#### Fields
@ -13935,7 +13935,7 @@ Counts of requirements by their state.
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="runnersetupinstallinstructions"></a>`installInstructions` | [`String!`](#string) | Instructions for installing the runner on the specified architecture. |
| <a id="runnersetupregisterinstructions"></a>`registerInstructions` | [`String`](#string) | Instructions for registering the runner. |
| <a id="runnersetupregisterinstructions"></a>`registerInstructions` | [`String`](#string) | Instructions for registering the runner. The actual registration tokens are not included in the commands. Instead, a placeholder `$REGISTRATION_TOKEN` is shown. |
### `SastCiConfiguration`

View File

@ -104,7 +104,7 @@ GitLab also provides [Docker images](https://gitlab.com/gitlab-org/cloud-deploy/
- Use `registry.gitlab.com/gitlab-org/cloud-deploy/aws-ecs:latest` to deploy your application to AWS ECS.
Before getting started with this process, you need a cluster on AWS ECS, as well as related
components, like an ECS service, ECS task definition, a database on AWS RDS, and so on.
components, like an ECS service, ECS task definition, a database on Amazon RDS, and so on.
[Read more about AWS ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/Welcome.html).
The ECS task definition can be:

View File

@ -37,9 +37,9 @@ And then configure your application to use the database, for example:
```yaml
Host: postgres
User: $PG_USER
Password: $PG_PASSWORD
Database: $PG_DB
User: $POSTGRES_USER
Password: $POSTGRES_PASSWORD
Database: $POSTGRES_DB
```
If you're wondering why we used `postgres` for the `Host`, read more at

View File

@ -0,0 +1,182 @@
---
stage: Enablement
group: Database
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Loose foreign keys
## Problem statement
In relational databases (including PostgreSQL), foreign keys provide a way to link
two database tables together, and ensure data-consistency between them. In GitLab,
[foreign keys](../foreign_keys.md) are vital part of the database design process.
Most of our database tables have foreign keys.
With the ongoing database [decomposition work](https://gitlab.com/groups/gitlab-org/-/epics/6168),
linked records might be present on two different database servers. Ensuring data consistency
between two databases is not possible with standard PostgreSQL foreign keys. PostgreSQL
does not support foreign keys operating within a single database server, defining
a link between two database tables in two different database servers over the network.
Example:
- Database "Main": `projects` table
- Database "CI": `ci_pipelines` table
A project can have many pipelines. When a project is deleted, the associated `ci_pipeline` (via the
`project_id` column) records must be also deleted.
With a multi-database setup, this cannot be achieved with foreign keys.
## Asynchronous approach
Our preferred approach to this problem is eventual consistency. With the loose foreign keys
feature, we can configure delayed association cleanup without negatively affecting the
application performance.
### How it works
In the previous example, a record in the `projects` table can have multiple `ci_pipeline`
records. To keep the cleanup process separate from the actual parent record deletion,
we can:
1. Create a `DELETE` trigger on the `projects` table.
Record the deletions in a separate table (`deleted_records`).
1. A job checks the `deleted_records` table every 5 minutes.
1. For each record in the table, delete the associated `ci_pipelines` records
using the `project_id` column.
NOTE:
For this procedure to work, we must register which tables to clean up asynchronously.
## Example migration and configuration
### Configure the model
First, tell the application that the `projects` table has a new loose foreign key.
You can do this in the `Project` model:
```ruby
class Project < ApplicationRecord
# ...
include LooseForeignKey
loose_foreign_key :ci_pipelines, :project_id, on_delete: :async_delete # or async_nullify
# ...
end
```
This instruction ensures the asynchronous cleanup process knows about the association, and the
how to do the cleanup. In this case, the associated `ci_pipelines` records are deleted.
### Track record changes
To know about deletions in the `projects` table, configure a `DELETE` trigger using a database
migration (post-migration). The trigger needs to be configured only once. If the model already has
at least one `loose_foreign_key` definition, then this step can be skipped:
```ruby
class TrackProjectRecordChanges < Gitlab::Database::Migration[1.0]
include Gitlab::Database::MigrationHelpers::LooseForeignKeyHelpers
enable_lock_retries!
def up
track_record_deletions(:projects)
end
def down
untrack_record_deletions(:projects)
end
end
```
### Remove the foreign key
If there is an existing foreign key, then it can be removed from the database. As of GitLab 14.5,
the following foreign key describes the link between the `projects` and `ci_pipelines` tables:
```sql
ALTER TABLE ONLY ci_pipelines
ADD CONSTRAINT fk_86635dbd80
FOREIGN KEY (project_id)
REFERENCES projects(id)
ON DELETE CASCADE;
```
The migration should run after the `DELETE` trigger is installed. If the foreign key is deleted
earlier, there is a good chance of introducing data inconsistency which needs manual cleanup:
```ruby
class RemoveProjectsCiPipelineFk < Gitlab::Database::Migration[1.0]
enable_lock_retries!
def up
remove_foreign_key_if_exists(:ci_pipelines, :projects, name: "fk_86635dbd80")
end
def down
add_concurrent_foreign_key(:ci_pipelines, :projects, name: "fk_86635dbd80", column: :project_id, target_column: :id, on_delete: "cascade")
end
end
```
At this point, the setup phase is concluded. The deleted `projects` records should be automatically
picked up by the scheduled cleanup worker job.
## Caveats of loose foreign keys
### Record creation
The feature provides an efficient way of cleaning up associated records after the parent record is
deleted. Without foreign keys, it's the application's responsibility to validate if the parent record
exists when a new associated record is created.
A bad example: record creation with the given ID (`project_id` comes from user input).
In this example, nothing prevents us from passing a random project ID:
```ruby
Ci::Pipeline.create!(project_id: params[:project_id])
```
A good example: record creation with extra check:
```ruby
project = Project.find(params[:project_id])
Ci::Pipeline.create!(project_id: project.id)
```
### Association lookup
Consider the following HTTP request:
```plaintext
GET /projects/5/pipelines/100
```
The controller action ignores the `project_id` parameter and finds the pipeline using the ID:
```ruby
def show
# bad, avoid it
pipeline = Ci::Pipeline.find(params[:id]) # 100
end
```
This endpoint still works when the parent `Project` model is deleted. This can be considered a
a data leak which should not happen under normal circumstances:
```ruby
def show
# good
project = Project.find(params[:project_id])
pipeline = project.pipelines.find(params[:pipeline_id]) # 100
end
```
NOTE:
This example is unlikely in GitLab, because we usually look up the parent models to perform
permission checks.

View File

@ -524,8 +524,8 @@ Use lowercase for **runners**. These are the agents that run CI/CD jobs. See als
Do not use **(s)** to make a word optionally plural. It can slow down comprehension. For example:
Do: Select the jobs you want.
Do not: Select the job(s) you want.
- Do: Select the jobs you want.
- Do not: Select the job(s) you want.
If you can select multiples of something, then write the word as plural.

View File

@ -219,7 +219,7 @@ If EKS node autoscaling is employed, it is likely that your average loading will
| **<u>Gitaly Cluster</u>** [Details](gitlab_sre_for_aws.md#gitaly-sre-considerations) | | | | |
| Gitaly Instances (in ASG) | 12 vCPU, 45GB<br />([across 3 nodes](gitlab_sre_for_aws.md#gitaly-and-praefect-elections)) | **m5.large** x 3 nodes<br />(12 vCPU, 48 GB) | $0.192 x 3 = $0.58/hr | [Gitaly & Praefect Must Have an Uneven Node Count for HA](gitlab_sre_for_aws.md#gitaly-and-praefect-elections) |
| Praefect (Instances in ASG with load balancer) | 6 vCPU, 5.4 GB<br />([across 3 nodes](gitlab_sre_for_aws.md#gitaly-and-praefect-elections)) | **c5.large** x 3 nodes<br />(6 vCPU, 12 GB) | $0.09 x 3 = $0.21/hr | [Gitaly & Praefect Must Have an Uneven Node Count for HA](gitlab_sre_for_aws.md#gitaly-and-praefect-elections) |
| Praefect PostgreSQL(1) (AWS RDS) | 6 vCPU, 5.4 GB<br />([across 3 nodes](gitlab_sre_for_aws.md#gitaly-and-praefect-elections)) | N/A Reuses GitLab PostgreSQL | $0 | |
| Praefect PostgreSQL(1) (Amazon RDS) | 6 vCPU, 5.4 GB<br />([across 3 nodes](gitlab_sre_for_aws.md#gitaly-and-praefect-elections)) | N/A Reuses GitLab PostgreSQL | $0 | |
| Internal Load Balancing Node | 2 vCPU, 1.8 GB | AWS ELB | $0.10/hr | $0.10/hr |
### 5K Cloud Native Hybrid on EKS
@ -273,7 +273,7 @@ If EKS node autoscaling is employed, it is likely that your average loading will
| **<u>Gitaly Cluster</u>** [Details](gitlab_sre_for_aws.md#gitaly-sre-considerations) | | | | |
| Gitaly Instances (in ASG) | 24 vCPU, 90GB<br />([across 3 nodes](gitlab_sre_for_aws.md#gitaly-and-praefect-elections)) | **m5.2xlarge** x 3 nodes<br />(24 vCPU, 96GB) | $0.384 x 3 = $1.15/hr | [Gitaly & Praefect Must Have an Uneven Node Count for HA](gitlab_sre_for_aws.md#gitaly-and-praefect-elections) |
| Praefect (Instances in ASG with load balancer) | 6 vCPU, 5.4 GB<br />([across 3 nodes](gitlab_sre_for_aws.md#gitaly-and-praefect-elections)) | **c5.large** x 3 nodes<br />(6 vCPU, 12 GB) | $0.09 x 3 = $0.21/hr | [Gitaly & Praefect Must Have an Uneven Node Count for HA](gitlab_sre_for_aws.md#gitaly-and-praefect-elections) |
| Praefect PostgreSQL(1) (AWS RDS) | 6 vCPU, 5.4 GB<br />([across 3 nodes](gitlab_sre_for_aws.md#gitaly-and-praefect-elections)) | N/A Reuses GitLab PostgreSQL | $0 | |
| Praefect PostgreSQL(1) (Amazon RDS) | 6 vCPU, 5.4 GB<br />([across 3 nodes](gitlab_sre_for_aws.md#gitaly-and-praefect-elections)) | N/A Reuses GitLab PostgreSQL | $0 | |
| Internal Load Balancing Node | 2 vCPU, 1.8 GB | AWS ELB | $0.10/hr | $0.10/hr |
### 10K Cloud Native Hybrid on EKS
@ -326,7 +326,7 @@ If EKS node autoscaling is employed, it is likely that your average loading will
| **<u>Gitaly Cluster</u>** [Details](gitlab_sre_for_aws.md#gitaly-sre-considerations) | | | | |
| Gitaly Instances (in ASG) | 48 vCPU, 180GB<br />([across 3 nodes](gitlab_sre_for_aws.md#gitaly-and-praefect-elections)) | **m5.4xlarge** x 3 nodes<br />(48 vCPU, 180 GB) | $0.77 x 3 = $2.31/hr | [Gitaly & Praefect Must Have an Uneven Node Count for HA](gitlab_sre_for_aws.md#gitaly-and-praefect-elections) |
| Praefect (Instances in ASG with load balancer) | 6 vCPU, 5.4 GB<br />([across 3 nodes](gitlab_sre_for_aws.md#gitaly-and-praefect-elections)) | **c5.large** x 3 nodes<br />(6 vCPU, 12 GB) | $0.09 x 3 = $0.21/hr | [Gitaly & Praefect Must Have an Uneven Node Count for HA](gitlab_sre_for_aws.md#gitaly-and-praefect-elections) |
| Praefect PostgreSQL(1) (AWS RDS) | 6 vCPU, 5.4 GB<br />([across 3 nodes](gitlab_sre_for_aws.md#gitaly-and-praefect-elections)) | N/A Reuses GitLab PostgreSQL | $0 | |
| Praefect PostgreSQL(1) (Amazon RDS) | 6 vCPU, 5.4 GB<br />([across 3 nodes](gitlab_sre_for_aws.md#gitaly-and-praefect-elections)) | N/A Reuses GitLab PostgreSQL | $0 | |
| Internal Load Balancing Node | 2 vCPU, 1.8 GB | AWS ELB | $0.10/hr | $0.10/hr |
### 50K Cloud Native Hybrid on EKS

View File

@ -32,7 +32,7 @@ For the Cloud Native Hybrid architectures there are two Infrastructure as Code o
## Introduction
For the most part, we'll make use of Omnibus GitLab in our setup, but we'll also leverage native AWS services. Instead of using the Omnibus bundled PostgreSQL and Redis, we will use AWS RDS and ElastiCache.
For the most part, we'll make use of Omnibus GitLab in our setup, but we'll also leverage native AWS services. Instead of using the Omnibus bundled PostgreSQL and Redis, we will use Amazon RDS and ElastiCache.
In this guide, we'll go through a multi-node setup where we'll start by
configuring our Virtual Private Cloud and subnets to later integrate

View File

@ -7,7 +7,7 @@ type: reference, concepts
# Merge request approval rules **(PREMIUM SELF)**
> Introduced in [GitLab Premium](https://gitlab.com/gitlab-org/gitlab/-/issues/39060) 12.8.
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/39060) in GitLab 12.8.
Merge request approval rules prevent users from overriding certain settings on the project
level. When enabled at the instance level, these settings are no longer editable on the

View File

@ -148,7 +148,7 @@ add the line below to `/etc/gitlab/gitlab.rb` before increasing the max attachme
nginx['client_max_body_size'] = "200m"
```
## Customize session duration for Git Operations when 2FA is enabled **(PREMIUM)**
## Customize session duration for Git Operations when 2FA is enabled **(PREMIUM SELF)**
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/296669) in GitLab 13.9.
> - It's deployed behind a feature flag, disabled by default.
@ -173,7 +173,7 @@ To set a limit on how long these sessions are valid:
## Limit the lifetime of personal access tokens **(ULTIMATE SELF)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/3649) in GitLab Ultimate 12.6.
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/3649) in GitLab 12.6.
Users can optionally specify a lifetime for
[personal access tokens](../../profile/personal_access_tokens.md).
@ -222,7 +222,7 @@ Disabling SSH key expiration immediately enables all expired SSH keys.
## Allow expired Personal Access Tokens to be used **(ULTIMATE SELF)**
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/214723) in GitLab Ultimate 13.1.
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/214723) in GitLab 13.1.
> - [Feature flag removed](https://gitlab.com/gitlab-org/gitlab/-/issues/296881) in GitLab 13.9.
By default, expired personal access tokens (PATs) **are not usable**.

View File

@ -41,7 +41,7 @@ To access the default page for Admin Area settings:
| [Kroki](../../../administration/integration/kroki.md#enable-kroki-in-gitlab) | Allow rendering of diagrams in AsciiDoc and Markdown documents using [kroki.io](https://kroki.io). |
| [Mailgun](../../../administration/integration/mailgun.md) | Enable your GitLab instance to receive invite email bounce events from Mailgun, if it is your email provider. |
| [PlantUML](../../../administration/integration/plantuml.md) | Allow rendering of PlantUML diagrams in documents. |
| [Slack application](../../../user/project/integrations/gitlab_slack_application.md#configuration) **(FREE SAAS)** | Slack integration allows you to interact with GitLab via slash commands in a chat window. This option is only available on GitLab.com, though it may be [available for self-managed instances in the future](https://gitlab.com/gitlab-org/gitlab/-/issues/28164). |
| [Slack application](../../../user/project/integrations/gitlab_slack_application.md#configuration) | Slack integration allows you to interact with GitLab via slash commands in a chat window. This option is only available on GitLab.com, though it may be [available for self-managed instances in the future](https://gitlab.com/gitlab-org/gitlab/-/issues/28164). |
| [Third party offers](third_party_offers.md) | Control the display of third party offers. |
| [Snowplow](../../../development/snowplow/index.md) | Configure the Snowplow integration. |
| [Google GKE](../../project/clusters/add_gke_clusters.md) | Google GKE integration allows you to provision GKE clusters from GitLab. |
@ -69,7 +69,7 @@ To access the default page for Admin Area settings:
| Option | Description |
| ------ | ----------- |
| [Continuous Integration and Deployment](continuous_integration.md) | Auto DevOps, runners and job artifacts. |
| [Required pipeline configuration](continuous_integration.md#required-pipeline-configuration) **(PREMIUM SELF)** | Set an instance-wide auto included [pipeline configuration](../../../ci/yaml/index.md). This pipeline configuration is run after the project's own configuration. |
| [Required pipeline configuration](continuous_integration.md#required-pipeline-configuration) | Set an instance-wide auto included [pipeline configuration](../../../ci/yaml/index.md). This pipeline configuration is run after the project's own configuration. |
| [Package Registry](continuous_integration.md#package-registry-configuration) | Settings related to the use and experience of using the GitLab Package Registry. Note there are [risks involved](../../packages/container_registry/index.md#use-with-external-container-registries) in enabling some of these settings. |
## Reporting
@ -88,7 +88,7 @@ To access the default page for Admin Area settings:
| [Profiling - Performance bar](../../../administration/monitoring/performance/performance_bar.md#enable-the-performance-bar-for-non-administrators) | Enable access to the Performance Bar for non-administrator users in a given group. |
| [Self monitoring](../../../administration/monitoring/gitlab_self_monitoring_project/index.md#create-the-self-monitoring-project) | Enable or disable instance self monitoring. |
| [Usage statistics](usage_statistics.md) | Enable or disable version check and Service Ping. |
| [Pseudonymizer data collection](../../../administration/pseudonymizer.md) **(ULTIMATE)** | Enable or disable the Pseudonymizer data collection. |
| [Pseudonymizer data collection](../../../administration/pseudonymizer.md) | Enable or disable the Pseudonymizer data collection. |
## Network
@ -105,11 +105,11 @@ To access the default page for Admin Area settings:
| [Incident Management](../../../operations/incident_management/index.md) Limits | Limit the number of inbound alerts that can be sent to a project. |
| [Notes creation limit](rate_limit_on_notes_creation.md)| Set a rate limit on the note creation requests. |
## Geo
## Geo **(PREMIUM SELF)**
| Option | Description |
| ------ | ----------- |
| Geo | Geo allows you to replicate your GitLab instance to other geographical locations. Redirects to **Admin Area > Geo > Settings** are no longer available at **Admin Area > Settings > Geo** in [GitLab 13.0](https://gitlab.com/gitlab-org/gitlab/-/issues/36896). |
| [Geo](../../../administration/geo/index.md) | Geo allows you to replicate your GitLab instance to other geographical locations. Redirects to **Admin Area > Geo > Settings** are no longer available at **Admin Area > Settings > Geo** in [GitLab 13.0](https://gitlab.com/gitlab-org/gitlab/-/issues/36896). |
## Preferences

View File

@ -7,7 +7,7 @@ type: reference
# Instance template repository **(PREMIUM SELF)**
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/5986) in GitLab Premium 11.3.
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/5986) in GitLab 11.3.
In hosted systems, enterprises often have a need to share their own templates
across teams. This feature allows an administrator to pick a project to be the

View File

@ -125,7 +125,7 @@ If an issue or merge request is locked and closed, you cannot reopen it.
> - [Deployed behind a feature flag](../feature_flags.md), disabled by default.
> - Disabled on GitLab.com.
> - Not recommended for production use.
> - To use in GitLab self-managed instances, ask a GitLab administrator to enable it. **(FREE SELF)**
> - To use in GitLab self-managed instances, ask a GitLab administrator to enable it.
WARNING:
This feature might not be available to you. Check the **version history** note above for details.
@ -178,7 +178,7 @@ When you reply to a standard comment, you create a thread.
Prerequisites:
- You must have at least the [Guest role](../permissions.md#project-members-permissions).
- You must be in an issue, merge request, or epic. Commits and snippets threads are not supported.
- You must be in an issue, merge request, or epic. Threads in commits and snippets are not supported.
To create a thread by replying to a comment:

View File

Before

Width:  |  Height:  |  Size: 8.1 KiB

After

Width:  |  Height:  |  Size: 8.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.6 KiB

View File

@ -35,7 +35,7 @@ You can also filter the results using the search and filter field, as described
GitLab shows shortcuts to issues and merge requests created by you or assigned to you
in the search field in the upper right corner:
![shortcut to your issues and merge requests](img/issues_mrs_shortcut.png)
![shortcut to your issues and merge requests](img/issues_mrs_shortcut_v14_4.png)
### Filter issue and merge request lists
@ -290,7 +290,7 @@ To start a search, type into the search bar on the top-right of the screen. You
in all GitLab and may also see the options to search in a group or project if you are in the
group or project dashboard.
![basic search](img/basic_search.png)
![basic search](img/basic_search_v14_4.png)
After the results are returned, you can modify the search, select a different type of data to
search, or choose a specific group or project.

View File

@ -1062,6 +1062,7 @@ module API
requires :credit_card_expiration_year, type: Integer, desc: 'The year the credit card expires'
requires :credit_card_holder_name, type: String, desc: 'The credit card holder name'
requires :credit_card_mask_number, type: String, desc: 'The last 4 digits of credit card number'
requires :credit_card_type, type: String, desc: 'The credit card network name'
end
put ":user_id/credit_card_validation", feature_category: :users do
authenticated_as_admin!

View File

@ -25,6 +25,10 @@ module Gitlab
unflatten_key_path(intrumentation_object.instrumentation)
end
def with_suggested_name
unflatten_key_path(intrumentation_object.suggested_name)
end
private
def unflatten_key_path(value)

View File

@ -18,6 +18,10 @@ module Gitlab
private
def instrumentation_metrics
::Gitlab::UsageDataMetrics.suggested_names
end
def count(relation, column = nil, batch: true, batch_size: nil, start: nil, finish: nil)
Gitlab::Usage::Metrics::NameSuggestion.for(:count, column: column, relation: relation)
end

View File

@ -45,23 +45,10 @@ module Gitlab
clear_memoized
with_finished_at(:recording_ce_finished_at) do
license_usage_data
.merge(system_usage_data_license)
.merge(system_usage_data_settings)
.merge(system_usage_data)
.merge(system_usage_data_monthly)
.merge(system_usage_data_weekly)
.merge(features_usage_data)
.merge(components_usage_data)
.merge(object_store_usage_data)
.merge(topology_usage_data)
.merge(usage_activity_by_stage)
.merge(usage_activity_by_stage(:usage_activity_by_stage_monthly, monthly_time_range_db_params))
.merge(analytics_unique_visits_data)
.merge(compliance_unique_visits_data)
.merge(search_unique_visits_data)
.merge(redis_hll_counters)
.deep_merge(aggregated_metrics_data)
usage_data = usage_data_metrics
usage_data = usage_data.with_indifferent_access.deep_merge(instrumentation_metrics.with_indifferent_access) if Feature.enabled?(:usage_data_instrumentation)
usage_data
end
end
@ -729,6 +716,30 @@ module Gitlab
private
def usage_data_metrics
license_usage_data
.merge(system_usage_data_license)
.merge(system_usage_data_settings)
.merge(system_usage_data)
.merge(system_usage_data_monthly)
.merge(system_usage_data_weekly)
.merge(features_usage_data)
.merge(components_usage_data)
.merge(object_store_usage_data)
.merge(topology_usage_data)
.merge(usage_activity_by_stage)
.merge(usage_activity_by_stage(:usage_activity_by_stage_monthly, monthly_time_range_db_params))
.merge(analytics_unique_visits_data)
.merge(compliance_unique_visits_data)
.merge(search_unique_visits_data)
.merge(redis_hll_counters)
.deep_merge(aggregated_metrics_data)
end
def instrumentation_metrics
Gitlab::UsageDataMetrics.uncached_data # rubocop:disable UsageData/LargeTable
end
def metric_time_period(time_period)
time_period.present? ? '28d' : 'none'
end

View File

@ -5,7 +5,17 @@ module Gitlab
class << self
# Build the Usage Ping JSON payload from metrics YAML definitions which have instrumentation class set
def uncached_data
::Gitlab::Usage::Metric.all.map(&:with_value).reduce({}, :deep_merge)
build_payload(:with_value)
end
def suggested_names
build_payload(:with_suggested_name)
end
private
def build_payload(method_symbol)
::Gitlab::Usage::Metric.all.map(&method_symbol).reduce({}, :deep_merge)
end
end
end

View File

@ -6,7 +6,10 @@ module Gitlab
class << self
def uncached_data
super.with_indifferent_access.deep_merge(instrumentation_metrics_queries.with_indifferent_access)
# instrumentation_metrics is already included with feature flag enabled
return super if Feature.enabled?(:usage_data_instrumentation)
super.with_indifferent_access.deep_merge(instrumentation_metrics.with_indifferent_access)
end
def add_metric(metric, time_frame: 'none')
@ -50,7 +53,7 @@ module Gitlab
private
def instrumentation_metrics_queries
def instrumentation_metrics
::Gitlab::Usage::Metric.all.map(&:with_instrumentation).reduce({}, :deep_merge)
end
end

View File

@ -6,7 +6,10 @@ module Gitlab
class UsageDataQueries < UsageData
class << self
def uncached_data
super.with_indifferent_access.deep_merge(instrumentation_metrics_queries.with_indifferent_access)
# instrumentation_metrics is already included with feature flag enabled
return super if Feature.enabled?(:usage_data_instrumentation)
super.with_indifferent_access.deep_merge(instrumentation_metrics.with_indifferent_access)
end
def add_metric(metric, time_frame: 'none')
@ -71,7 +74,7 @@ module Gitlab
private
def instrumentation_metrics_queries
def instrumentation_metrics
::Gitlab::Usage::Metric.all.map(&:with_instrumentation).reduce({}, :deep_merge)
end
end

View File

@ -43,8 +43,13 @@ module Gitlab
HISTOGRAM_FALLBACK = { '-1' => -1 }.freeze
DISTRIBUTED_HLL_FALLBACK = -2
MAX_BUCKET_SIZE = 100
INSTRUMENTATION_CLASS_FALLBACK = -100
def add_metric(metric, time_frame: 'none')
# Results of this method should be overwritten by instrumentation class values
# -100 indicates the metric was not properly merged.
return INSTRUMENTATION_CLASS_FALLBACK if Feature.enabled?(:usage_data_instrumentation)
metric_class = "Gitlab::Usage::Metrics::Instrumentations::#{metric}".constantize
metric_class.new(time_frame: time_frame).value

View File

@ -8644,9 +8644,6 @@ msgstr ""
msgid "Configure advanced permissions, Large File Storage, and two-factor authentication settings."
msgstr ""
msgid "Configure approvals by authors and committers on all projects."
msgstr ""
msgid "Configure existing installation"
msgstr ""
@ -10909,12 +10906,6 @@ msgstr ""
msgid "Define a custom pattern with cron syntax"
msgstr ""
msgid "Define approval rules."
msgstr ""
msgid "Define approval rules. %{linkStart}Learn more.%{linkEnd}"
msgstr ""
msgid "Define custom rules for what constitutes spam, independent of Akismet"
msgstr ""
@ -21561,6 +21552,15 @@ msgstr ""
msgid "MergeRequestAnalytics|Time to merge"
msgstr ""
msgid "MergeRequestApprovals|Define approval rules and settings to ensure %{link_start}separation of duties%{link_end} for new merge requests."
msgstr ""
msgid "MergeRequestApprovals|Enforce %{link_start}separation of duties%{link_end} for all projects."
msgstr ""
msgid "MergeRequestApprovals|Enforce %{separationLinkStart}separation of duties%{separationLinkEnd} for all projects. %{learnLinkStart}Learn more.%{learnLinkEnd}"
msgstr ""
msgid "MergeRequestDiffs|Commenting on lines %{selectStart}start%{selectEnd} to %{end}"
msgstr ""
@ -26071,9 +26071,6 @@ msgstr ""
msgid "Profiles| You are going to change the username %{currentUsernameBold} to %{newUsernameBold}. Profile and projects will be redirected to the %{newUsername} namespace but this redirect will expire once the %{currentUsername} namespace is registered by another user or group. Please update your Git repository remotes as soon as possible."
msgstr ""
msgid "Profiles|\"Busy\" will be shown next to your name"
msgstr ""
msgid "Profiles|%{provider} Active"
msgstr ""
@ -26101,6 +26098,9 @@ msgstr ""
msgid "Profiles|An error occurred while updating your username, please try again."
msgstr ""
msgid "Profiles|An indicator appears next to your name and avatar"
msgstr ""
msgid "Profiles|Avatar cropper"
msgstr ""
@ -31294,10 +31294,10 @@ msgstr ""
msgid "SetPasswordToCloneLink|set a password"
msgstr ""
msgid "SetStatusModal|A busy indicator is shown next to your name and avatar."
msgid "SetStatusModal|Add status emoji"
msgstr ""
msgid "SetStatusModal|Add status emoji"
msgid "SetStatusModal|An indicator appears next to your name and avatar"
msgstr ""
msgid "SetStatusModal|Busy"
@ -39888,12 +39888,18 @@ msgstr ""
msgid "can only have one escalation policy"
msgstr ""
msgid "can't be nil"
msgstr ""
msgid "can't be the same as the source project"
msgstr ""
msgid "can't include: %{invalid_storages}"
msgstr ""
msgid "can't reference a branch that does not exist"
msgstr ""
msgid "cannot be a date in the past"
msgstr ""
@ -40269,6 +40275,12 @@ msgstr ""
msgid "container_name cannot be larger than %{max_length} chars"
msgstr ""
msgid "contains URLs that exceed the 1024 character limit (%{urls})"
msgstr ""
msgid "contains invalid URLs (%{urls})"
msgstr ""
msgid "contribute to this project."
msgstr ""
@ -40337,6 +40349,12 @@ msgstr ""
msgid "does not have a supported extension. Only %{extension_list} are supported"
msgstr ""
msgid "does not match dast_site.project"
msgstr ""
msgid "does not match dast_site_validation.project"
msgstr ""
msgid "download it"
msgstr ""
@ -40390,6 +40408,9 @@ msgstr ""
msgid "example.com"
msgstr ""
msgid "exceeds the %{max_value_length} character limit"
msgstr ""
msgid "exceeds the limit of %{bytes} bytes"
msgstr ""
@ -41068,6 +41089,12 @@ msgstr ""
msgid "must be unique by status and elapsed time within a policy"
msgstr ""
msgid "must have a repository"
msgstr ""
msgid "must match %{association}.project_id"
msgstr ""
msgid "my-awesome-group"
msgstr ""

View File

@ -52,7 +52,7 @@ describe('RunnerInstructionsModal component', () => {
const findBinaryInstructions = () => wrapper.findByTestId('binary-instructions');
const findRegisterCommand = () => wrapper.findByTestId('register-command');
const createComponent = (options = {}) => {
const createComponent = ({ props, ...options } = {}) => {
const requestHandlers = [
[getRunnerPlatformsQuery, runnerPlatformsHandler],
[getRunnerSetupInstructionsQuery, runnerSetupInstructionsHandler],
@ -64,6 +64,8 @@ describe('RunnerInstructionsModal component', () => {
shallowMount(RunnerInstructionsModal, {
propsData: {
modalId: 'runner-instructions-modal',
registrationToken: 'MY_TOKEN',
...props,
},
localVue,
apolloProvider: fakeApollo,
@ -119,18 +121,30 @@ describe('RunnerInstructionsModal component', () => {
expect(instructions).toBe(installInstructions);
});
it('register command is shown', () => {
it('register command is shown with a replaced token', () => {
const instructions = findRegisterCommand().text();
expect(instructions).toBe(registerInstructions);
expect(instructions).toBe(
'sudo gitlab-runner register --url http://gdk.test:3000/ --registration-token MY_TOKEN',
);
});
describe('when a register token is not shown', () => {
beforeEach(async () => {
createComponent({ props: { registrationToken: undefined } });
await nextTick();
});
it('register command is shown without a defined registration token', () => {
const instructions = findRegisterCommand().text();
expect(instructions).toBe(registerInstructions);
});
});
});
describe('after a platform and architecture are selected', () => {
const {
installInstructions,
registerInstructions,
} = mockGraphqlInstructionsWindows.data.runnerSetup;
const { installInstructions } = mockGraphqlInstructionsWindows.data.runnerSetup;
beforeEach(async () => {
runnerSetupInstructionsHandler.mockResolvedValue(mockGraphqlInstructionsWindows);
@ -158,7 +172,9 @@ describe('RunnerInstructionsModal component', () => {
it('register command is shown', () => {
const command = findRegisterCommand().text();
expect(command).toBe(registerInstructions);
expect(command).toBe(
'./gitlab-runner.exe register --url http://gdk.test:3000/ --registration-token MY_TOKEN',
);
});
});

View File

@ -45,4 +45,10 @@ RSpec.describe Gitlab::Usage::Metric do
expect(described_class.new(issue_count_metric_definiton).with_instrumentation).to eq({ counts: { issues: "SELECT COUNT(\"issues\".\"id\") FROM \"issues\"" } })
end
end
describe '#with_suggested_name' do
it 'returns key_path metric with the corresponding generated query' do
expect(described_class.new(issue_count_metric_definiton).with_suggested_name).to eq({ counts: { issues: 'count_issues' } })
end
end
end

View File

@ -25,10 +25,30 @@ RSpec.describe Gitlab::Usage::Metrics::NamesSuggestions::Generator do
end
context 'for count with default column metrics' do
it_behaves_like 'name suggestion' do
# corresponding metric is collected with count(Board)
let(:key_path) { 'counts.boards' }
let(:name_suggestion) { /count_boards/ }
context 'with usage_data_instrumentation feature flag' do
context 'when enabled' do
before do
stub_feature_flags(usage_data_instrumentation: true)
end
it_behaves_like 'name suggestion' do
# corresponding metric is collected with ::Gitlab::UsageDataMetrics.suggested_names
let(:key_path) { 'counts.boards' }
let(:name_suggestion) { /count_boards/ }
end
end
context 'when disabled' do
before do
stub_feature_flags(usage_data_instrumentation: false)
end
it_behaves_like 'name suggestion' do
# corresponding metric is collected with count(Board)
let(:key_path) { 'counts.boards' }
let(:name_suggestion) { /count_boards/ }
end
end
end
end

View File

@ -76,4 +76,16 @@ RSpec.describe Gitlab::UsageDataMetrics do
end
end
end
describe '.suggested_names' do
subject { described_class.suggested_names }
let(:suggested_names) do
::Gitlab::Usage::Metric.all.map(&:with_suggested_name).reduce({}, :deep_merge)
end
it 'includes Service Ping suggested names' do
expect(subject).to match_array(suggested_names)
end
end
end

View File

@ -80,6 +80,12 @@ RSpec.describe Gitlab::UsageData, :aggregate_failures do
end
end
end
it 'allows indifferent access' do
allow(::Gitlab::UsageDataCounters::HLLRedisCounter).to receive(:unique_events).and_return(1)
expect(subject[:search_unique_visits][:search_unique_visits_for_any_target_monthly]).to eq(1)
expect(subject[:search_unique_visits]['search_unique_visits_for_any_target_monthly']).to eq(1)
end
end
describe 'usage_activity_by_stage_package' do
@ -428,7 +434,6 @@ RSpec.describe Gitlab::UsageData, :aggregate_failures do
end
expect(described_class.usage_activity_by_stage_plan({})).to include(
issues: 3,
notes: 2,
projects: 2,
todos: 2,
@ -439,7 +444,6 @@ RSpec.describe Gitlab::UsageData, :aggregate_failures do
projects_jira_dvcs_server_active: 2
)
expect(described_class.usage_activity_by_stage_plan(described_class.monthly_time_range_db_params)).to include(
issues: 2,
notes: 1,
projects: 1,
todos: 1,
@ -450,6 +454,44 @@ RSpec.describe Gitlab::UsageData, :aggregate_failures do
projects_jira_dvcs_server_active: 1
)
end
context 'with usage_data_instrumentation feature flag' do
context 'when enabled' do
it 'merges the data from instrumentation classes' do
stub_feature_flags(usage_data_instrumentation: true)
for_defined_days_back do
user = create(:user)
project = create(:project, creator: user)
create(:issue, project: project, author: user)
create(:issue, project: project, author: User.support_bot)
end
expect(described_class.usage_activity_by_stage_plan({})).to include(issues: Gitlab::Utils::UsageData::INSTRUMENTATION_CLASS_FALLBACK)
expect(described_class.usage_activity_by_stage_plan(described_class.monthly_time_range_db_params)).to include(issues: Gitlab::Utils::UsageData::INSTRUMENTATION_CLASS_FALLBACK)
uncached_data = described_class.uncached_data
expect(uncached_data[:usage_activity_by_stage][:plan]).to include(issues: 3)
expect(uncached_data[:usage_activity_by_stage_monthly][:plan]).to include(issues: 2)
end
end
context 'when disabled' do
it 'does not merge the data from instrumentation classes' do
stub_feature_flags(usage_data_instrumentation: false)
for_defined_days_back do
user = create(:user)
project = create(:project, creator: user)
create(:issue, project: project, author: user)
create(:issue, project: project, author: User.support_bot)
end
expect(described_class.usage_activity_by_stage_plan({})).to include(issues: 3)
expect(described_class.usage_activity_by_stage_plan(described_class.monthly_time_range_db_params)).to include(issues: 2)
end
end
end
end
describe 'usage_activity_by_stage_release' do
@ -466,17 +508,53 @@ RSpec.describe Gitlab::UsageData, :aggregate_failures do
deployments: 2,
failed_deployments: 2,
releases: 2,
successful_deployments: 2,
releases_with_milestones: 2
successful_deployments: 2
)
expect(described_class.usage_activity_by_stage_release(described_class.monthly_time_range_db_params)).to include(
deployments: 1,
failed_deployments: 1,
releases: 1,
successful_deployments: 1,
releases_with_milestones: 1
successful_deployments: 1
)
end
context 'with usage_data_instrumentation feature flag' do
before do
for_defined_days_back do
user = create(:user)
create(:deployment, :failed, user: user)
release = create(:release, author: user)
create(:milestone, project: release.project, releases: [release])
create(:deployment, :success, user: user)
end
end
context 'when enabled' do
before do
stub_feature_flags(usage_data_instrumentation: true)
end
it 'merges data from instrumentation classes' do
expect(described_class.usage_activity_by_stage_release({})).to include(releases_with_milestones: Gitlab::Utils::UsageData::INSTRUMENTATION_CLASS_FALLBACK)
expect(described_class.usage_activity_by_stage_release(described_class.monthly_time_range_db_params)).to include(releases_with_milestones: Gitlab::Utils::UsageData::INSTRUMENTATION_CLASS_FALLBACK)
uncached_data = described_class.uncached_data
expect(uncached_data[:usage_activity_by_stage][:release]).to include(releases_with_milestones: 2)
expect(uncached_data[:usage_activity_by_stage_monthly][:release]).to include(releases_with_milestones: 1)
end
end
context 'when disabled' do
before do
stub_feature_flags(usage_data_instrumentation: false)
end
it 'does not merge data from instrumentation classes' do
expect(described_class.usage_activity_by_stage_release({})).to include(releases_with_milestones: 2)
expect(described_class.usage_activity_by_stage_release(described_class.monthly_time_range_db_params)).to include(releases_with_milestones: 1)
end
end
end
end
describe 'usage_activity_by_stage_verify' do
@ -525,16 +603,16 @@ RSpec.describe Gitlab::UsageData, :aggregate_failures do
subject { described_class.data }
it 'gathers usage data' do
expect(subject.keys).to include(*UsageDataHelpers::USAGE_DATA_KEYS)
expect(subject.keys).to include(*UsageDataHelpers::USAGE_DATA_KEYS.map(&:to_s))
end
it 'gathers usage counts', :aggregate_failures do
count_data = subject[:counts]
expect(count_data[:boards]).to eq(1)
expect(count_data[:projects]).to eq(4)
expect(count_data.keys).to include(*UsageDataHelpers::COUNTS_KEYS)
expect(UsageDataHelpers::COUNTS_KEYS - count_data.keys).to be_empty
count_keys = UsageDataHelpers::COUNTS_KEYS.map(&:to_s)
expect(count_data.keys).to include(*count_keys)
expect(count_keys - count_data.keys).to be_empty
expect(count_data.values).to all(be_a_kind_of(Integer))
end
@ -619,7 +697,7 @@ RSpec.describe Gitlab::UsageData, :aggregate_failures do
external_diffs: { enabled: false },
lfs: { enabled: true, object_store: { enabled: false, direct_upload: true, background_upload: false, provider: "AWS" } },
uploads: { enabled: nil, object_store: { enabled: false, direct_upload: true, background_upload: false, provider: "AWS" } },
packages: { enabled: true, object_store: { enabled: false, direct_upload: false, background_upload: true, provider: "AWS" } } }
packages: { enabled: true, object_store: { enabled: false, direct_upload: false, background_upload: true, provider: "AWS" } } }.with_indifferent_access
)
end
@ -793,12 +871,37 @@ RSpec.describe Gitlab::UsageData, :aggregate_failures do
subject { described_class.license_usage_data }
it 'gathers license data' do
expect(subject[:uuid]).to eq(Gitlab::CurrentSettings.uuid)
expect(subject[:version]).to eq(Gitlab::VERSION)
expect(subject[:installation_type]).to eq('gitlab-development-kit')
expect(subject[:active_user_count]).to eq(User.active.size)
expect(subject[:recorded_at]).to be_a(Time)
end
context 'with usage_data_instrumentation feature flag' do
context 'when enabled' do
it 'merges uuid and hostname data from instrumentation classes' do
stub_feature_flags(usage_data_instrumentation: true)
expect(subject[:uuid]).to eq(Gitlab::Utils::UsageData::INSTRUMENTATION_CLASS_FALLBACK)
expect(subject[:hostname]).to eq(Gitlab::Utils::UsageData::INSTRUMENTATION_CLASS_FALLBACK)
expect(subject[:active_user_count]).to eq(Gitlab::Utils::UsageData::INSTRUMENTATION_CLASS_FALLBACK)
uncached_data = described_class.data
expect(uncached_data[:uuid]).to eq(Gitlab::CurrentSettings.uuid)
expect(uncached_data[:hostname]).to eq(Gitlab.config.gitlab.host)
expect(uncached_data[:active_user_count]).to eq(User.active.size)
end
end
context 'when disabled' do
it 'does not merge uuid and hostname data from instrumentation classes' do
stub_feature_flags(usage_data_instrumentation: false)
expect(subject[:uuid]).to eq(Gitlab::CurrentSettings.uuid)
expect(subject[:hostname]).to eq(Gitlab.config.gitlab.host)
expect(subject[:active_user_count]).to eq(User.active.size)
end
end
end
end
context 'when not relying on database records' do
@ -1061,18 +1164,46 @@ RSpec.describe Gitlab::UsageData, :aggregate_failures do
expect(subject[:settings][:gitaly_apdex]).to be_within(0.001).of(0.95)
end
it 'reports collected data categories' do
expected_value = %w[standard subscription operational optional]
context 'with usage_data_instrumentation feature flag' do
context 'when enabled' do
before do
stub_feature_flags(usage_data_instrumentation: true)
end
allow_next_instance_of(ServicePing::PermitDataCategoriesService) do |instance|
expect(instance).to receive(:execute).and_return(expected_value)
it 'reports collected data categories' do
expected_value = %w[standard subscription operational optional]
allow_next_instance_of(ServicePing::PermitDataCategoriesService) do |instance|
expect(instance).to receive(:execute).and_return(expected_value)
end
expect(described_class.data[:settings][:collected_data_categories]).to eq(expected_value)
end
it 'gathers service_ping_features_enabled' do
expect(described_class.data[:settings][:service_ping_features_enabled]).to eq(Gitlab::CurrentSettings.usage_ping_features_enabled)
end
end
expect(subject[:settings][:collected_data_categories]).to eq(expected_value)
end
context 'when disabled' do
before do
stub_feature_flags(usage_data_instrumentation: false)
end
it 'gathers service_ping_features_enabled' do
expect(subject[:settings][:service_ping_features_enabled]).to eq(Gitlab::CurrentSettings.usage_ping_features_enabled)
it 'reports collected data categories' do
expected_value = %w[standard subscription operational optional]
allow_next_instance_of(ServicePing::PermitDataCategoriesService) do |instance|
expect(instance).to receive(:execute).and_return(expected_value)
end
expect(subject[:settings][:collected_data_categories]).to eq(expected_value)
end
it 'gathers service_ping_features_enabled' do
expect(subject[:settings][:service_ping_features_enabled]).to eq(Gitlab::CurrentSettings.usage_ping_features_enabled)
end
end
end
it 'gathers user_cap_feature_enabled' do

View File

@ -8,8 +8,26 @@ RSpec.describe Gitlab::Utils::UsageData do
describe '#add_metric' do
let(:metric) { 'UuidMetric'}
it 'computes the metric value for given metric' do
expect(described_class.add_metric(metric)).to eq(Gitlab::CurrentSettings.uuid)
context 'with usage_data_instrumentation feature flag' do
context 'when enabled' do
before do
stub_feature_flags(usage_data_instrumentation: true)
end
it 'returns -100 value to be overriden' do
expect(described_class.add_metric(metric)).to eq(-100)
end
end
context 'when disabled' do
before do
stub_feature_flags(usage_data_instrumentation: false)
end
it 'computes the metric value for given metric' do
expect(described_class.add_metric(metric)).to eq(Gitlab::CurrentSettings.uuid)
end
end
end
end

View File

@ -8,4 +8,58 @@ RSpec.describe LegacyDiffNote do
it { is_expected.to eq('note') }
end
describe 'callbacks' do
describe '#set_diff' do
let(:note) do
build(:legacy_diff_note_on_merge_request, st_diff: '_st_diff_').tap do |record|
record.instance_variable_set(:@diff, {})
end
end
context 'when not importing' do
it 'updates st_diff' do
note.save!(validate: false)
expect(note.st_diff).to eq({})
end
end
context 'when importing' do
before do
note.importing = true
end
it 'does not update st_diff' do
note.save!(validate: false)
expect(note.st_diff).to eq('_st_diff_')
end
context 'when feature flag is false' do
before do
stub_feature_flags(skip_legacy_diff_note_callback_on_import: false)
end
it 'updates st_diff' do
note.save!(validate: false)
expect(note.st_diff).to eq({})
end
end
context 'when st_diff is blank' do
before do
note.st_diff = nil
end
it 'updates st_diff' do
note.save!(validate: false)
expect(note.st_diff).to eq({})
end
end
end
end
end
end

View File

@ -163,27 +163,32 @@ RSpec.describe ProtectedBranch do
expect(described_class.protected?(project, 'staging/some-branch')).to eq(false)
end
it 'returns false when branch name is nil' do
expect(described_class.protected?(project, nil)).to eq(false)
end
context 'with caching', :use_clean_rails_memory_store_caching do
let_it_be(:project) { create(:project, :repository) }
let_it_be(:protected_branch) { create(:protected_branch, project: project, name: "jawn") }
let_it_be(:protected_branch) { create(:protected_branch, project: project, name: "jawn") }
before do
allow(described_class).to receive(:matching).once.and_call_original
allow(described_class).to receive(:matching).with(protected_branch.name, protected_refs: anything).once.and_call_original
# the original call works and warms the cache
described_class.protected?(project, 'jawn')
described_class.protected?(project, protected_branch.name)
end
it 'correctly invalidates a cache' do
expect(described_class).to receive(:matching).once.and_call_original
expect(described_class).to receive(:matching).with(protected_branch.name, protected_refs: anything).once.and_call_original
create(:protected_branch, project: project, name: "bar")
# the cache is invalidated because the project has been "updated"
expect(described_class.protected?(project, 'jawn')).to eq(true)
expect(described_class.protected?(project, protected_branch.name)).to eq(true)
end
it 'correctly uses the cached version' do
expect(described_class).not_to receive(:matching)
expect(described_class.protected?(project, 'jawn')).to eq(true)
expect(described_class.protected?(project, protected_branch.name)).to eq(true)
end
end
end

View File

@ -6,6 +6,7 @@ RSpec.describe Users::CreditCardValidation do
it { is_expected.to belong_to(:user) }
it { is_expected.to validate_length_of(:holder_name).is_at_most(26) }
it { is_expected.to validate_length_of(:network).is_at_most(32) }
it { is_expected.to validate_numericality_of(:last_digits).is_less_than_or_equal_to(9999) }
describe '.similar_records' do

View File

@ -1464,6 +1464,7 @@ RSpec.describe API::Users do
credit_card_expiration_year: expiration_year,
credit_card_expiration_month: 1,
credit_card_holder_name: 'John Smith',
credit_card_type: 'AmericanExpress',
credit_card_mask_number: '1111'
}
end
@ -1495,6 +1496,7 @@ RSpec.describe API::Users do
credit_card_validated_at: credit_card_validated_time,
expiration_date: Date.new(expiration_year, 1, 31),
last_digits: 1111,
network: 'AmericanExpress',
holder_name: 'John Smith'
)
end

View File

@ -15,6 +15,7 @@ RSpec.describe Users::UpsertCreditCardValidationService do
credit_card_expiration_year: expiration_year,
credit_card_expiration_month: 1,
credit_card_holder_name: 'John Smith',
credit_card_type: 'AmericanExpress',
credit_card_mask_number: '1111'
}
end
@ -30,7 +31,16 @@ RSpec.describe Users::UpsertCreditCardValidationService do
result = service.execute
expect(result.status).to eq(:success)
expect(user.reload.credit_card_validated_at).to eq(credit_card_validated_time)
user.reload
expect(user.credit_card_validation).to have_attributes(
credit_card_validated_at: credit_card_validated_time,
network: 'AmericanExpress',
holder_name: 'John Smith',
last_digits: 1111,
expiration_date: Date.new(expiration_year, 1, 31)
)
end
end
@ -97,6 +107,7 @@ RSpec.describe Users::UpsertCreditCardValidationService do
expiration_date: Date.new(expiration_year, 1, 31),
holder_name: "John Smith",
last_digits: 1111,
network: "AmericanExpress",
user_id: user_id
}