Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2021-01-07 21:10:18 +00:00
parent dec7332357
commit fd320d0858
103 changed files with 1945 additions and 1020 deletions

View File

@ -110,3 +110,4 @@ include:
- local: .gitlab/ci/notify.gitlab-ci.yml
- local: .gitlab/ci/dast.gitlab-ci.yml
- local: .gitlab/ci/workhorse.gitlab-ci.yml
- local: .gitlab/ci/graphql.gitlab-ci.yml

View File

@ -84,16 +84,3 @@ ui-docs-links lint:
needs: []
script:
- bundle exec haml-lint -i DocumentationLinks
graphql-reference-verify:
extends:
- .default-retry
- .rails-cache
- .default-before_script
- .docs:rules:graphql-reference-verify
- .use-pg11
stage: test
needs: ["setup-test-env"]
script:
- bundle exec rake gitlab:graphql:check_docs
- bundle exec rake gitlab:graphql:check_schema

View File

@ -0,0 +1,14 @@
graphql-verify:
variables:
SETUP_DB: "false"
extends:
- .default-retry
- .rails-cache
- .default-before_script
- .graphql:rules:graphql-verify
stage: test
needs: []
script:
- bundle exec rake gitlab:graphql:validate
- bundle exec rake gitlab:graphql:check_docs
- bundle exec rake gitlab:graphql:check_schema

View File

@ -349,7 +349,11 @@
changes: *docs-patterns
when: on_success
.docs:rules:graphql-reference-verify:
##################
# GraphQL rules #
##################
.graphql:rules:graphql-verify:
rules:
- <<: *if-not-ee
when: never

View File

@ -1 +1 @@
fbb56944d4581445d0cee29702dbe9531948ea04
1b2467043dfe265bc4091d2d6e93140d7614898b

View File

@ -242,7 +242,7 @@ gem 'discordrb-webhooks-blackst0ne', '~> 3.3', require: false
gem 'hipchat', '~> 1.5.0'
# Jira integration
gem 'jira-ruby', '~> 2.0.0'
gem 'jira-ruby', '~> 2.1.4'
gem 'atlassian-jwt', '~> 0.2.0'
# Flowdock integration

View File

@ -609,7 +609,7 @@ GEM
jaeger-client (1.1.0)
opentracing (~> 0.3)
thrift
jira-ruby (2.0.0)
jira-ruby (2.1.4)
activesupport
atlassian-jwt
multipart-post
@ -1397,7 +1397,7 @@ DEPENDENCIES
icalendar
invisible_captcha (~> 0.12.1)
ipaddress (~> 0.8.3)
jira-ruby (~> 2.0.0)
jira-ruby (~> 2.1.4)
js_regex (~> 3.4)
json (~> 2.3.0)
json-schema (~> 2.8.0)

View File

@ -681,6 +681,34 @@ export const secondsToHours = (offset) => {
export const nDaysAfter = (date, numberOfDays) =>
new Date(newDate(date)).setDate(date.getDate() + numberOfDays);
/**
* Returns the date n days before the date provided
*
* @param {Date} date the initial date
* @param {Number} numberOfDays number of days before
* @return {Date} the date preceding the date provided
*/
export const nDaysBefore = (date, numberOfDays) => nDaysAfter(date, -numberOfDays);
/**
* Returns the date n months after the date provided
*
* @param {Date} date the initial date
* @param {Number} numberOfMonths number of months after
* @return {Date} the date following the date provided
*/
export const nMonthsAfter = (date, numberOfMonths) =>
new Date(newDate(date)).setMonth(date.getMonth() + numberOfMonths);
/**
* Returns the date n months before the date provided
*
* @param {Date} date the initial date
* @param {Number} numberOfMonths number of months before
* @return {Date} the date preceding the date provided
*/
export const nMonthsBefore = (date, numberOfMonths) => nMonthsAfter(date, -numberOfMonths);
/**
* Returns the date after the date provided
*

View File

@ -1,3 +1,3 @@
query getProjectPath {
projectPath
projectPath @client
}

View File

@ -34,7 +34,6 @@ class ProjectsController < Projects::ApplicationController
before_action only: [:edit] do
push_frontend_feature_flag(:approval_suggestions, @project, default_enabled: true)
push_frontend_feature_flag(:allow_editing_commit_messages, @project)
push_frontend_feature_flag(:search_settings_in_page, @project, default_enabled: false)
end
layout :determine_layout

View File

@ -4,10 +4,10 @@ class DeploymentsFinder
attr_reader :project, :params
ALLOWED_SORT_VALUES = %w[id iid created_at updated_at ref].freeze
DEFAULT_SORT_VALUE = 'id'.freeze
DEFAULT_SORT_VALUE = 'id'
ALLOWED_SORT_DIRECTIONS = %w[asc desc].freeze
DEFAULT_SORT_DIRECTION = 'asc'.freeze
DEFAULT_SORT_DIRECTION = 'asc'
def initialize(project, params = {})
@project = project

View File

@ -404,6 +404,38 @@ class ApplicationSetting < ApplicationRecord
length: { maximum: 255, message: _('is too long (maximum is %{count} characters)') },
allow_blank: true
validates :throttle_unauthenticated_requests_per_period,
presence: true,
numericality: { only_integer: true, greater_than: 0 }
validates :throttle_unauthenticated_period_in_seconds,
presence: true,
numericality: { only_integer: true, greater_than: 0 }
validates :throttle_authenticated_api_requests_per_period,
presence: true,
numericality: { only_integer: true, greater_than: 0 }
validates :throttle_authenticated_api_period_in_seconds,
presence: true,
numericality: { only_integer: true, greater_than: 0 }
validates :throttle_authenticated_web_requests_per_period,
presence: true,
numericality: { only_integer: true, greater_than: 0 }
validates :throttle_authenticated_web_period_in_seconds,
presence: true,
numericality: { only_integer: true, greater_than: 0 }
validates :throttle_protected_paths_requests_per_period,
presence: true,
numericality: { only_integer: true, greater_than: 0 }
validates :throttle_protected_paths_period_in_seconds,
presence: true,
numericality: { only_integer: true, greater_than: 0 }
attr_encrypted :asset_proxy_secret_key,
mode: :per_attribute_iv,
key: Settings.attr_encrypted_db_key_base_truncated,

View File

@ -3,10 +3,10 @@
class AuthenticationEvent < ApplicationRecord
include UsageStatistics
TWO_FACTOR = 'two-factor'.freeze
TWO_FACTOR_U2F = 'two-factor-via-u2f-device'.freeze
TWO_FACTOR_WEBAUTHN = 'two-factor-via-webauthn-device'.freeze
STANDARD = 'standard'.freeze
TWO_FACTOR = 'two-factor'
TWO_FACTOR_U2F = 'two-factor-via-u2f-device'
TWO_FACTOR_WEBAUTHN = 'two-factor-via-webauthn-device'
STANDARD = 'standard'
STATIC_PROVIDERS = [TWO_FACTOR, TWO_FACTOR_U2F, TWO_FACTOR_WEBAUTHN, STANDARD].freeze
belongs_to :user, optional: true

View File

@ -7,8 +7,8 @@ module Ci
extend Gitlab::Ci::Model
TERMINAL_SUBPROTOCOL = 'terminal.gitlab.com'
DEFAULT_SERVICE_NAME = 'build'.freeze
DEFAULT_PORT_NAME = 'default_port'.freeze
DEFAULT_SERVICE_NAME = 'build'
DEFAULT_PORT_NAME = 'default_port'
self.table_name = 'ci_builds_runner_session'

View File

@ -1,7 +1,7 @@
# frozen_string_literal: true
class Namespace::RootStorageStatistics < ApplicationRecord
SNIPPETS_SIZE_STAT_NAME = 'snippets_size'.freeze
SNIPPETS_SIZE_STAT_NAME = 'snippets_size'
STATISTICS_ATTRIBUTES = %W(
storage_size
repository_size

View File

@ -3,8 +3,8 @@
class Packages::Conan::FileMetadatum < ApplicationRecord
belongs_to :package_file, inverse_of: :conan_file_metadatum
DEFAULT_PACKAGE_REVISION = '0'.freeze
DEFAULT_RECIPE_REVISION = '0'.freeze
DEFAULT_PACKAGE_REVISION = '0'
DEFAULT_RECIPE_REVISION = '0'
validates :package_file, presence: true

View File

@ -6,7 +6,7 @@ class Packages::Dependency < ApplicationRecord
validates :name, uniqueness: { scope: :version_pattern }
NAME_VERSION_PATTERN_TUPLE_MATCHING = '(name, version_pattern) = (?, ?)'.freeze
NAME_VERSION_PATTERN_TUPLE_MATCHING = '(name, version_pattern) = (?, ?)'
MAX_STRING_LENGTH = 255.freeze
MAX_CHUNKED_QUERIES_COUNT = 10.freeze

View File

@ -1,7 +1,7 @@
# frozen_string_literal: true
class Plan < ApplicationRecord
DEFAULT = 'default'.freeze
DEFAULT = 'default'
has_one :limits, class_name: 'PlanLimits'

View File

@ -3,8 +3,8 @@
class ProjectFeatureUsage < ApplicationRecord
self.primary_key = :project_id
JIRA_DVCS_CLOUD_FIELD = 'jira_dvcs_cloud_last_sync_at'.freeze
JIRA_DVCS_SERVER_FIELD = 'jira_dvcs_server_last_sync_at'.freeze
JIRA_DVCS_CLOUD_FIELD = 'jira_dvcs_cloud_last_sync_at'
JIRA_DVCS_SERVER_FIELD = 'jira_dvcs_server_last_sync_at'
belongs_to :project
validates :project, presence: true

View File

@ -1,10 +1,10 @@
# frozen_string_literal: true
class DatadogService < Service
DEFAULT_SITE = 'datadoghq.com'.freeze
URL_TEMPLATE = 'https://webhooks-http-intake.logs.%{datadog_site}/v1/input/'.freeze
URL_TEMPLATE_API_KEYS = 'https://app.%{datadog_site}/account/settings#api'.freeze
URL_API_KEYS_DOCS = "https://docs.#{DEFAULT_SITE}/account_management/api-app-keys/".freeze
DEFAULT_SITE = 'datadoghq.com'
URL_TEMPLATE = 'https://webhooks-http-intake.logs.%{datadog_site}/v1/input/'
URL_TEMPLATE_API_KEYS = 'https://app.%{datadog_site}/account/settings#api'
URL_API_KEYS_DOCS = "https://docs.#{DEFAULT_SITE}/account_management/api-app-keys/"
SUPPORTED_EVENTS = %w[
pipeline job

View File

@ -31,7 +31,7 @@ class User < ApplicationRecord
INSTANCE_ACCESS_REQUEST_APPROVERS_TO_BE_NOTIFIED_LIMIT = 10
BLOCKED_PENDING_APPROVAL_STATE = 'blocked_pending_approval'.freeze
BLOCKED_PENDING_APPROVAL_STATE = 'blocked_pending_approval'
add_authentication_token_field :incoming_email_token, token_generator: -> { SecureRandom.hex.to_i(16).to_s(36) }
add_authentication_token_field :feed_token

View File

@ -19,7 +19,7 @@ module Packages
metadata: 'Get package metadata.'
}.freeze
VERSION = '3.0.0'.freeze
VERSION = '3.0.0'
PROJECT_LEVEL_SERVICES = %i[download publish].freeze
GROUP_LEVEL_SERVICES = %i[search metadata].freeze

View File

@ -6,7 +6,7 @@ module Ci
TerminalCreationError = Class.new(StandardError)
TERMINAL_NAME = 'terminal'.freeze
TERMINAL_NAME = 'terminal'
attr_reader :terminal

View File

@ -8,8 +8,8 @@ module Ci
JOB_QUEUE_DURATION_SECONDS_BUCKETS = [1, 3, 10, 30, 60, 300, 900, 1800, 3600].freeze
JOBS_RUNNING_FOR_PROJECT_MAX_BUCKET = 5.freeze
METRICS_SHARD_TAG_PREFIX = 'metrics_shard::'.freeze
DEFAULT_METRICS_SHARD = 'default'.freeze
METRICS_SHARD_TAG_PREFIX = 'metrics_shard::'
DEFAULT_METRICS_SHARD = 'default'
Result = Struct.new(:build, :build_json, :valid?)

View File

@ -4,7 +4,7 @@ module Ide
class BaseConfigService < ::BaseService
ValidationError = Class.new(StandardError)
WEBIDE_CONFIG_FILE = '.gitlab/.gitlab-webide.yml'.freeze
WEBIDE_CONFIG_FILE = '.gitlab/.gitlab-webide.yml'
attr_reader :config, :config_content

View File

@ -2,8 +2,8 @@
module Packages
module Maven
class FindOrCreatePackageService < BaseService
MAVEN_METADATA_FILE = 'maven-metadata.xml'.freeze
SNAPSHOT_TERM = '-SNAPSHOT'.freeze
MAVEN_METADATA_FILE = 'maven-metadata.xml'
SNAPSHOT_TERM = '-SNAPSHOT'
def execute
package =

View File

@ -2,7 +2,7 @@
module Serverless
class AssociateDomainService
PLACEHOLDER_HOSTNAME = 'example.com'.freeze
PLACEHOLDER_HOSTNAME = 'example.com'
def initialize(knative, pages_domain_id:, creator:)
@knative = knative

View File

@ -1,9 +1,9 @@
# frozen_string_literal: true
class FeatureFlagStrategiesValidator < ActiveModel::EachValidator
STRATEGY_DEFAULT = 'default'.freeze
STRATEGY_GRADUALROLLOUTUSERID = 'gradualRolloutUserId'.freeze
STRATEGY_USERWITHID = 'userWithId'.freeze
STRATEGY_DEFAULT = 'default'
STRATEGY_GRADUALROLLOUTUSERID = 'gradualRolloutUserId'
STRATEGY_USERWITHID = 'userWithId'
# Order key names alphabetically
STRATEGIES = {
STRATEGY_DEFAULT => [].freeze,

View File

@ -9,7 +9,7 @@
- many_refs = event.ref_count.to_i > 1
%span.event-type.d-inline-block.gl-mr-2.pushed= many_refs ? "#{event.action_name} #{event.ref_count} #{event.ref_type.pluralize}" : "#{event.action_name} #{event.ref_type}"
- unless many_refs
%span.gl-mr-2
%span.gl-mr-2.text-truncate
- commits_link = project_commits_path(project, event.ref_name)
- should_link = event.tag? ? project.repository.tag_exists?(event.ref_name) : project.repository.branch_exists?(event.ref_name)
= link_to_if should_link, event.ref_name, commits_link, class: 'ref-name'

View File

@ -0,0 +1,5 @@
---
title: Remove unnecessary use of .freeze
merge_request: 50963
author: Adam Davies @adamd92
type: other

View File

@ -0,0 +1,5 @@
---
title: Fix branch name overflows in profile activity section
merge_request: 50975
author: Kev @KevSlashNull
type: fixed

View File

@ -0,0 +1,5 @@
---
title: Add RateLimit-* headers to RackAttack responses
merge_request: 50833
author: qmnguyen0711
type: fixed

View File

@ -0,0 +1,4 @@
---
filenames:
- ee/app/assets/javascripts/on_demand_scans/graphql/dast_scan_create.mutation.graphql
- ee/app/assets/javascripts/oncall_schedules/graphql/mutations/update_oncall_schedule_rotation.mutation.graphql

View File

@ -38,8 +38,8 @@ Please consider creating a merge request to
for them.
MARKDOWN
OPTIONAL_REVIEW_TEMPLATE = "%{role} review is optional for %{category}".freeze
NOT_AVAILABLE_TEMPLATE = 'No %{role} available'.freeze
OPTIONAL_REVIEW_TEMPLATE = '%{role} review is optional for %{category}'
NOT_AVAILABLE_TEMPLATE = 'No %{role} available'
def note_for_spins_role(spins, role)
spins.each do |spin|

View File

@ -4,7 +4,7 @@ class RemovePartialIndexFromCiBuildsArtifactsFile < ActiveRecord::Migration[5.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
INDEX_NAME = 'partial_index_ci_builds_on_id_with_legacy_artifacts'.freeze
INDEX_NAME = 'partial_index_ci_builds_on_id_with_legacy_artifacts'
disable_ddl_transaction!

View File

@ -422,9 +422,9 @@ install the necessary dependencies from step 1, and add the
GitLab package repository from step 2. When installing GitLab
in the second step, do not supply the `EXTERNAL_URL` value.
#### PostgreSQL primary node
#### PostgreSQL nodes
1. SSH in to the PostgreSQL primary node.
1. SSH in to one of the PostgreSQL nodes.
1. Generate a password hash for the PostgreSQL username/password pair. This assumes you will use the default
username of `gitlab` (recommended). The command will request a password
and confirmation. Use the value that is output by this command in the next
@ -452,23 +452,33 @@ in the second step, do not supply the `EXTERNAL_URL` value.
sudo gitlab-ctl pg-password-md5 gitlab-consul
```
1. On the primary database node, edit `/etc/gitlab/gitlab.rb` replacing values noted in the `# START user configuration` section:
1. On every database node, edit `/etc/gitlab/gitlab.rb` replacing values noted in the `# START user configuration` section:
```ruby
# Disable all components except PostgreSQL and Repmgr and Consul
# Disable all components except PostgreSQL, Patroni, and Consul
roles ['postgres_role']
# PostgreSQL configuration
postgresql['listen_address'] = '0.0.0.0'
postgresql['hot_standby'] = 'on'
postgresql['wal_level'] = 'replica'
postgresql['shared_preload_libraries'] = 'repmgr_funcs'
# Enable Patroni
patroni['enable'] = true
# Set `max_wal_senders` to one more than the number of database nodes in the cluster.
# This is used to prevent replication from using up all of the
# available database connections.
patroni['postgresql']['max_wal_senders'] = 4
patroni['postgresql']['max_replication_slots'] = 4
# Incoming recommended value for max connections is 500. See https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5691.
patroni['postgresql']['max_connections'] = 500
# Disable automatic database migrations
gitlab_rails['auto_migrate'] = false
# Configure the Consul agent
consul['enable'] = true
consul['services'] = %w(postgresql)
## Enable service discovery for Prometheus
consul['monitoring_service_discovery'] = true
# START user configuration
# Please set the real values as explained in Required Information section
@ -477,18 +487,9 @@ in the second step, do not supply the `EXTERNAL_URL` value.
postgresql['pgbouncer_user_password'] = '<pgbouncer_password_hash>'
# Replace POSTGRESQL_PASSWORD_HASH with a generated md5 value
postgresql['sql_user_password'] = '<postgresql_password_hash>'
# Set `max_wal_senders` to one more than the number of database nodes in the cluster.
# This is used to prevent replication from using up all of the
# available database connections.
postgresql['max_wal_senders'] = 4
postgresql['max_replication_slots'] = 4
# Replace XXX.XXX.XXX.XXX/YY with Network Address
postgresql['trust_auth_cidr_addresses'] = %w(10.6.0.0/24)
repmgr['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.6.0.0/24)
## Enable service discovery for Prometheus
consul['monitoring_service_discovery'] = true
# Set the network addresses that the exporters will listen on for monitoring
node_exporter['listen_address'] = '0.0.0.0:9100'
@ -503,70 +504,9 @@ in the second step, do not supply the `EXTERNAL_URL` value.
# END user configuration
```
1. Copy the `/etc/gitlab/gitlab-secrets.json` file from your Consul server, and replace
the file of the same name on this server. If that file is not on this server,
add the file from your Consul server to this server.
1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
Back to setup components <i class="fa fa-angle-double-up" aria-hidden="true"></i>
</a>
</div>
#### PostgreSQL secondary nodes
1. On both the secondary nodes, add the same configuration specified above for the primary node
with an additional setting (`repmgr['master_on_initialization'] = false`) that will inform `gitlab-ctl` that they are standby nodes initially
and there's no need to attempt to register them as a primary node:
```ruby
# Disable all components except PostgreSQL and Repmgr and Consul
roles ['postgres_role']
# PostgreSQL configuration
postgresql['listen_address'] = '0.0.0.0'
postgresql['hot_standby'] = 'on'
postgresql['wal_level'] = 'replica'
postgresql['shared_preload_libraries'] = 'repmgr_funcs'
# Disable automatic database migrations
gitlab_rails['auto_migrate'] = false
# Configure the Consul agent
consul['services'] = %w(postgresql)
# Specify if a node should attempt to be primary on initialization.
repmgr['master_on_initialization'] = false
# Replace PGBOUNCER_PASSWORD_HASH with a generated md5 value
postgresql['pgbouncer_user_password'] = '<pgbouncer_password_hash>'
# Replace POSTGRESQL_PASSWORD_HASH with a generated md5 value
postgresql['sql_user_password'] = '<postgresql_password_hash>'
# Set `max_wal_senders` to one more than the number of database nodes in the cluster.
# This is used to prevent replication from using up all of the
# available database connections.
postgresql['max_wal_senders'] = 4
postgresql['max_replication_slots'] = 4
# Replace with your network addresses
postgresql['trust_auth_cidr_addresses'] = %w(10.6.0.0/24)
repmgr['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.6.0.0/24)
## Enable service discovery for Prometheus
consul['monitoring_service_discovery'] = true
# Set the network addresses that the exporters will listen on for monitoring
node_exporter['listen_address'] = '0.0.0.0:9100'
postgres_exporter['listen_address'] = '0.0.0.0:9187'
## The IPs of the Consul server nodes
## You can also use FQDNs and intermix them with IPs
consul['configuration'] = {
retry_join: %w(10.6.0.11 10.6.0.12 10.6.0.13),
}
```
PostgreSQL, with Patroni managing its failover, will default to use `pg_rewind` by default to handle conflicts.
Like most failover handling methods, this has a small chance of leading to data loss.
Learn more about the various [Patroni replication methods](../postgresql/replication_and_failover.md#selecting-the-appropriate-patroni-replication-method).
1. Copy the `/etc/gitlab/gitlab-secrets.json` file from your Consul server, and replace
the file of the same name on this server. If that file is not on this server,
@ -601,84 +541,25 @@ SSH in to the **primary node**:
1. Exit the database prompt by typing `\q` and Enter.
1. Verify the cluster is initialized with one node:
1. Check the status of the leader and cluster:
```shell
gitlab-ctl repmgr cluster show
gitlab-ctl patroni members
```
The output should be similar to the following:
```plaintext
Role | Name | Upstream | Connection String
----------+----------|----------|----------------------------------------
* master | HOSTNAME | | host=HOSTNAME user=gitlab_repmgr dbname=gitlab_repmgr
| Cluster | Member | Host | Role | State | TL | Lag in MB | Pending restart |
|---------------|-----------------------------------|-----------|--------|---------|-----|-----------|-----------------|
| postgresql-ha | <PostgreSQL primary hostname> | 10.6.0.21 | Leader | running | 175 | | * |
| postgresql-ha | <PostgreSQL secondary 1 hostname> | 10.6.0.22 | | running | 175 | 0 | * |
| postgresql-ha | <PostgreSQL secondary 2 hostname> | 10.6.0.23 | | running | 175 | 0 | * |
```
1. Note down the hostname or IP address in the connection string: `host=HOSTNAME`. We will
refer to the hostname in the next section as `<primary_node_name>`. If the value
is not an IP address, it will need to be a resolvable name (via DNS or
`/etc/hosts`)
SSH in to the **secondary node**:
1. Set up the repmgr standby:
```shell
gitlab-ctl repmgr standby setup <primary_node_name>
```
Do note that this will remove the existing data on the node. The command
has a wait time.
The output should be similar to the following:
```console
Doing this will delete the entire contents of /var/opt/gitlab/postgresql/data
If this is not what you want, hit Ctrl-C now to exit
To skip waiting, rerun with the -w option
Sleeping for 30 seconds
Stopping the database
Removing the data
Cloning the data
Starting the database
Registering the node with the cluster
ok: run: repmgrd: (pid 19068) 0s
```
Before moving on, make sure the databases are configured correctly. Run the
following command on the **primary** node to verify that replication is working
properly and the secondary nodes appear in the cluster:
```shell
gitlab-ctl repmgr cluster show
```
The output should be similar to the following:
```plaintext
Role | Name | Upstream | Connection String
----------+---------|-----------|------------------------------------------------
* master | MASTER | | host=<primary_node_name> user=gitlab_repmgr dbname=gitlab_repmgr
standby | STANDBY | MASTER | host=<secondary_node_name> user=gitlab_repmgr dbname=gitlab_repmgr
standby | STANDBY | MASTER | host=<secondary_node_name> user=gitlab_repmgr dbname=gitlab_repmgr
```
If the 'Role' column for any node says "FAILED", check the
If the 'State' column for any node doesn't say "running", check the
[Troubleshooting section](troubleshooting.md) before proceeding.
Also, check that the `repmgr-check-master` command works successfully on each node:
```shell
su - gitlab-consul
gitlab-ctl repmgr-check-master || echo 'This node is a standby repmgr node'
```
This command relies on exit codes to tell Consul whether a particular node is a master
or secondary. The most important thing here is that this command does not produce errors.
If there are errors it's most likely due to incorrect `gitlab-consul` database user permissions.
Check the [Troubleshooting section](troubleshooting.md) before proceeding.
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
Back to setup components <i class="fa fa-angle-double-up" aria-hidden="true"></i>
@ -696,7 +577,7 @@ The following IPs will be used as an example:
1. On each PgBouncer node, edit `/etc/gitlab/gitlab.rb`, and replace
`<consul_password_hash>` and `<pgbouncer_password_hash>` with the
password hashes you [set up previously](#postgresql-primary-node):
password hashes you [set up previously](#postgresql-nodes):
```ruby
# Disable all components except Pgbouncer and Consul agent
@ -704,15 +585,16 @@ The following IPs will be used as an example:
# Configure PgBouncer
pgbouncer['admin_users'] = %w(pgbouncer gitlab-consul)
pgbouncer['users'] = {
'gitlab-consul': {
password: '<consul_password_hash>'
},
'pgbouncer': {
password: '<pgbouncer_password_hash>'
}
'gitlab-consul': {
password: '<consul_password_hash>'
},
'pgbouncer': {
password: '<pgbouncer_password_hash>'
}
}
# Incoming recommended value for max db connections is 150. See https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5691.
pgbouncer['max_db_connections'] = 150
# Configure Consul agent
consul['watchers'] = %w(postgresql)

View File

@ -422,9 +422,9 @@ install the necessary dependencies from step 1, and add the
GitLab package repository from step 2. When installing GitLab
in the second step, do not supply the `EXTERNAL_URL` value.
#### PostgreSQL primary node
#### PostgreSQL nodes
1. SSH in to the PostgreSQL primary node.
1. SSH in to one of the PostgreSQL nodes.
1. Generate a password hash for the PostgreSQL username/password pair. This assumes you will use the default
username of `gitlab` (recommended). The command will request a password
and confirmation. Use the value that is output by this command in the next
@ -452,23 +452,33 @@ in the second step, do not supply the `EXTERNAL_URL` value.
sudo gitlab-ctl pg-password-md5 gitlab-consul
```
1. On the primary database node, edit `/etc/gitlab/gitlab.rb` replacing values noted in the `# START user configuration` section:
1. On every database node, edit `/etc/gitlab/gitlab.rb` replacing values noted in the `# START user configuration` section:
```ruby
# Disable all components except PostgreSQL and Repmgr and Consul
# Disable all components except PostgreSQL, Patroni, and Consul
roles ['postgres_role']
# PostgreSQL configuration
postgresql['listen_address'] = '0.0.0.0'
postgresql['hot_standby'] = 'on'
postgresql['wal_level'] = 'replica'
postgresql['shared_preload_libraries'] = 'repmgr_funcs'
# Enable Patroni
patroni['enable'] = true
# Set `max_wal_senders` to one more than the number of database nodes in the cluster.
# This is used to prevent replication from using up all of the
# available database connections.
patroni['postgresql']['max_wal_senders'] = 4
patroni['postgresql']['max_replication_slots'] = 4
# Incoming recommended value for max connections is 500. See https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5691.
patroni['postgresql']['max_connections'] = 500
# Disable automatic database migrations
gitlab_rails['auto_migrate'] = false
# Configure the Consul agent
consul['enable'] = true
consul['services'] = %w(postgresql)
## Enable service discovery for Prometheus
consul['monitoring_service_discovery'] = true
# START user configuration
# Please set the real values as explained in Required Information section
@ -477,18 +487,9 @@ in the second step, do not supply the `EXTERNAL_URL` value.
postgresql['pgbouncer_user_password'] = '<pgbouncer_password_hash>'
# Replace POSTGRESQL_PASSWORD_HASH with a generated md5 value
postgresql['sql_user_password'] = '<postgresql_password_hash>'
# Set `max_wal_senders` to one more than the number of database nodes in the cluster.
# This is used to prevent replication from using up all of the
# available database connections.
postgresql['max_wal_senders'] = 4
postgresql['max_replication_slots'] = 4
# Replace XXX.XXX.XXX.XXX/YY with Network Address
postgresql['trust_auth_cidr_addresses'] = %w(10.6.0.0/24)
repmgr['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.6.0.0/24)
## Enable service discovery for Prometheus
consul['monitoring_service_discovery'] = true
# Set the network addresses that the exporters will listen on for monitoring
node_exporter['listen_address'] = '0.0.0.0:9100'
@ -503,70 +504,9 @@ in the second step, do not supply the `EXTERNAL_URL` value.
# END user configuration
```
1. Copy the `/etc/gitlab/gitlab-secrets.json` file from your Consul server, and replace
the file of the same name on this server. If that file is not on this server,
add the file from your Consul server to this server.
1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
Back to setup components <i class="fa fa-angle-double-up" aria-hidden="true"></i>
</a>
</div>
#### PostgreSQL secondary nodes
1. On both the secondary nodes, add the same configuration specified above for the primary node
with an additional setting (`repmgr['master_on_initialization'] = false`) that will inform `gitlab-ctl` that they are standby nodes initially
and there's no need to attempt to register them as a primary node:
```ruby
# Disable all components except PostgreSQL and Repmgr and Consul
roles ['postgres_role']
# PostgreSQL configuration
postgresql['listen_address'] = '0.0.0.0'
postgresql['hot_standby'] = 'on'
postgresql['wal_level'] = 'replica'
postgresql['shared_preload_libraries'] = 'repmgr_funcs'
# Disable automatic database migrations
gitlab_rails['auto_migrate'] = false
# Configure the Consul agent
consul['services'] = %w(postgresql)
# Specify if a node should attempt to be primary on initialization.
repmgr['master_on_initialization'] = false
# Replace PGBOUNCER_PASSWORD_HASH with a generated md5 value
postgresql['pgbouncer_user_password'] = '<pgbouncer_password_hash>'
# Replace POSTGRESQL_PASSWORD_HASH with a generated md5 value
postgresql['sql_user_password'] = '<postgresql_password_hash>'
# Set `max_wal_senders` to one more than the number of database nodes in the cluster.
# This is used to prevent replication from using up all of the
# available database connections.
postgresql['max_wal_senders'] = 4
postgresql['max_replication_slots'] = 4
# Replace with your network addresses
postgresql['trust_auth_cidr_addresses'] = %w(10.6.0.0/24)
repmgr['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.6.0.0/24)
## Enable service discovery for Prometheus
consul['monitoring_service_discovery'] = true
# Set the network addresses that the exporters will listen on for monitoring
node_exporter['listen_address'] = '0.0.0.0:9100'
postgres_exporter['listen_address'] = '0.0.0.0:9187'
## The IPs of the Consul server nodes
## You can also use FQDNs and intermix them with IPs
consul['configuration'] = {
retry_join: %w(10.6.0.11 10.6.0.12 10.6.0.13),
}
```
PostgreSQL, with Patroni managing its failover, will default to use `pg_rewind` by default to handle conflicts.
Like most failover handling methods, this has a small chance of leading to data loss.
Learn more about the various [Patroni replication methods](../postgresql/replication_and_failover.md#selecting-the-appropriate-patroni-replication-method).
1. Copy the `/etc/gitlab/gitlab-secrets.json` file from your Consul server, and replace
the file of the same name on this server. If that file is not on this server,
@ -601,84 +541,25 @@ SSH in to the **primary node**:
1. Exit the database prompt by typing `\q` and Enter.
1. Verify the cluster is initialized with one node:
1. Check the status of the leader and cluster:
```shell
gitlab-ctl repmgr cluster show
gitlab-ctl patroni members
```
The output should be similar to the following:
```plaintext
Role | Name | Upstream | Connection String
----------+----------|----------|----------------------------------------
* master | HOSTNAME | | host=HOSTNAME user=gitlab_repmgr dbname=gitlab_repmgr
| Cluster | Member | Host | Role | State | TL | Lag in MB | Pending restart |
|---------------|-----------------------------------|-----------|--------|---------|-----|-----------|-----------------|
| postgresql-ha | <PostgreSQL primary hostname> | 10.6.0.21 | Leader | running | 175 | | * |
| postgresql-ha | <PostgreSQL secondary 1 hostname> | 10.6.0.22 | | running | 175 | 0 | * |
| postgresql-ha | <PostgreSQL secondary 2 hostname> | 10.6.0.23 | | running | 175 | 0 | * |
```
1. Note down the hostname or IP address in the connection string: `host=HOSTNAME`. We will
refer to the hostname in the next section as `<primary_node_name>`. If the value
is not an IP address, it will need to be a resolvable name (via DNS or
`/etc/hosts`)
SSH in to the **secondary node**:
1. Set up the repmgr standby:
```shell
gitlab-ctl repmgr standby setup <primary_node_name>
```
Do note that this will remove the existing data on the node. The command
has a wait time.
The output should be similar to the following:
```console
Doing this will delete the entire contents of /var/opt/gitlab/postgresql/data
If this is not what you want, hit Ctrl-C now to exit
To skip waiting, rerun with the -w option
Sleeping for 30 seconds
Stopping the database
Removing the data
Cloning the data
Starting the database
Registering the node with the cluster
ok: run: repmgrd: (pid 19068) 0s
```
Before moving on, make sure the databases are configured correctly. Run the
following command on the **primary** node to verify that replication is working
properly and the secondary nodes appear in the cluster:
```shell
gitlab-ctl repmgr cluster show
```
The output should be similar to the following:
```plaintext
Role | Name | Upstream | Connection String
----------+---------|-----------|------------------------------------------------
* master | MASTER | | host=<primary_node_name> user=gitlab_repmgr dbname=gitlab_repmgr
standby | STANDBY | MASTER | host=<secondary_node_name> user=gitlab_repmgr dbname=gitlab_repmgr
standby | STANDBY | MASTER | host=<secondary_node_name> user=gitlab_repmgr dbname=gitlab_repmgr
```
If the 'Role' column for any node says "FAILED", check the
If the 'State' column for any node doesn't say "running", check the
[Troubleshooting section](troubleshooting.md) before proceeding.
Also, check that the `repmgr-check-master` command works successfully on each node:
```shell
su - gitlab-consul
gitlab-ctl repmgr-check-master || echo 'This node is a standby repmgr node'
```
This command relies on exit codes to tell Consul whether a particular node is a master
or secondary. The most important thing here is that this command does not produce errors.
If there are errors it's most likely due to incorrect `gitlab-consul` database user permissions.
Check the [Troubleshooting section](troubleshooting.md) before proceeding.
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
Back to setup components <i class="fa fa-angle-double-up" aria-hidden="true"></i>
@ -696,7 +577,7 @@ The following IPs will be used as an example:
1. On each PgBouncer node, edit `/etc/gitlab/gitlab.rb`, and replace
`<consul_password_hash>` and `<pgbouncer_password_hash>` with the
password hashes you [set up previously](#postgresql-primary-node):
password hashes you [set up previously](#postgresql-nodes):
```ruby
# Disable all components except Pgbouncer and Consul agent
@ -704,15 +585,16 @@ The following IPs will be used as an example:
# Configure PgBouncer
pgbouncer['admin_users'] = %w(pgbouncer gitlab-consul)
pgbouncer['users'] = {
'gitlab-consul': {
password: '<consul_password_hash>'
},
'pgbouncer': {
password: '<pgbouncer_password_hash>'
}
'gitlab-consul': {
password: '<consul_password_hash>'
},
'pgbouncer': {
password: '<pgbouncer_password_hash>'
}
}
# Incoming recommended value for max db connections is 150. See https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5691.
pgbouncer['max_db_connections'] = 150
# Configure Consul agent
consul['watchers'] = %w(postgresql)

View File

@ -271,7 +271,7 @@ further configuration steps.
```ruby
# Disable all components except PostgreSQL
roles ['postgres_role']
repmgr['enable'] = false
patroni['enable'] = false
consul['enable'] = false
prometheus['enable'] = false
alertmanager['enable'] = false

View File

@ -672,9 +672,9 @@ install the necessary dependencies from step 1, and add the
GitLab package repository from step 2. When installing GitLab
in the second step, do not supply the `EXTERNAL_URL` value.
#### PostgreSQL primary node
#### PostgreSQL nodes
1. SSH in to the PostgreSQL primary node.
1. SSH in to one of the PostgreSQL nodes.
1. Generate a password hash for the PostgreSQL username/password pair. This assumes you will use the default
username of `gitlab` (recommended). The command will request a password
and confirmation. Use the value that is output by this command in the next
@ -702,23 +702,33 @@ in the second step, do not supply the `EXTERNAL_URL` value.
sudo gitlab-ctl pg-password-md5 gitlab-consul
```
1. On the primary database node, edit `/etc/gitlab/gitlab.rb` replacing values noted in the `# START user configuration` section:
1. On every database node, edit `/etc/gitlab/gitlab.rb` replacing values noted in the `# START user configuration` section:
```ruby
# Disable all components except PostgreSQL and Repmgr and Consul
# Disable all components except PostgreSQL, Patroni, and Consul
roles ['postgres_role']
# PostgreSQL configuration
postgresql['listen_address'] = '0.0.0.0'
postgresql['hot_standby'] = 'on'
postgresql['wal_level'] = 'replica'
postgresql['shared_preload_libraries'] = 'repmgr_funcs'
# Enable Patroni
patroni['enable'] = true
# Set `max_wal_senders` to one more than the number of database nodes in the cluster.
# This is used to prevent replication from using up all of the
# available database connections.
patroni['postgresql']['max_wal_senders'] = 4
patroni['postgresql']['max_replication_slots'] = 4
# Incoming recommended value for max connections is 500. See https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5691.
patroni['postgresql']['max_connections'] = 500
# Disable automatic database migrations
gitlab_rails['auto_migrate'] = false
# Configure the Consul agent
consul['enable'] = true
consul['services'] = %w(postgresql)
## Enable service discovery for Prometheus
consul['monitoring_service_discovery'] = true
# START user configuration
# Please set the real values as explained in Required Information section
@ -727,25 +737,13 @@ in the second step, do not supply the `EXTERNAL_URL` value.
postgresql['pgbouncer_user_password'] = '<pgbouncer_password_hash>'
# Replace POSTGRESQL_PASSWORD_HASH with a generated md5 value
postgresql['sql_user_password'] = '<postgresql_password_hash>'
# Set `max_wal_senders` to one more than the number of database nodes in the cluster.
# This is used to prevent replication from using up all of the
# available database connections.
postgresql['max_wal_senders'] = 4
postgresql['max_replication_slots'] = 4
# Replace XXX.XXX.XXX.XXX/YY with Network Address
postgresql['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.6.0.0/24)
repmgr['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.6.0.0/24)
## Enable service discovery for Prometheus
consul['enable'] = true
consul['monitoring_service_discovery'] = true
postgresql['trust_auth_cidr_addresses'] = %w(10.6.0.0/24)
# Set the network addresses that the exporters will listen on for monitoring
node_exporter['listen_address'] = '0.0.0.0:9100'
postgres_exporter['listen_address'] = '0.0.0.0:9187'
postgres_exporter['dbname'] = 'gitlabhq_production'
postgres_exporter['password'] = '<postgresql_password_hash>'
## The IPs of the Consul server nodes
## You can also use FQDNs and intermix them with IPs
@ -756,95 +754,13 @@ in the second step, do not supply the `EXTERNAL_URL` value.
# END user configuration
```
1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
1. You can list the current PostgreSQL primary, secondary nodes status via:
PostgreSQL, with Patroni managing its failover, will default to use `pg_rewind` by default to handle conflicts.
Like most failover handling methods, this has a small chance of leading to data loss.
Learn more about the various [Patroni replication methods](../postgresql/replication_and_failover.md#selecting-the-appropriate-patroni-replication-method).
```shell
sudo /opt/gitlab/bin/gitlab-ctl repmgr cluster show
```
1. Verify the GitLab services are running:
```shell
sudo gitlab-ctl status
```
The output should be similar to the following:
```plaintext
run: consul: (pid 30593) 77133s; run: log: (pid 29912) 77156s
run: logrotate: (pid 23449) 3341s; run: log: (pid 29794) 77175s
run: node-exporter: (pid 30613) 77133s; run: log: (pid 29824) 77170s
run: postgres-exporter: (pid 30620) 77132s; run: log: (pid 29894) 77163s
run: postgresql: (pid 30630) 77132s; run: log: (pid 29618) 77181s
run: repmgrd: (pid 30639) 77132s; run: log: (pid 29985) 77150s
```
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
Back to setup components <i class="fa fa-angle-double-up" aria-hidden="true"></i>
</a>
</div>
#### PostgreSQL secondary nodes
1. On both the secondary nodes, add the same configuration specified above for the primary node
with an additional setting that will inform `gitlab-ctl` that they are standby nodes initially
and there's no need to attempt to register them as a primary node:
```ruby
# Disable all components except PostgreSQL and Repmgr and Consul
roles ['postgres_role']
# PostgreSQL configuration
postgresql['listen_address'] = '0.0.0.0'
postgresql['hot_standby'] = 'on'
postgresql['wal_level'] = 'replica'
postgresql['shared_preload_libraries'] = 'repmgr_funcs'
# Disable automatic database migrations
gitlab_rails['auto_migrate'] = false
# Configure the Consul agent
consul['services'] = %w(postgresql)
# Specify if a node should attempt to be primary on initialization.
repmgr['master_on_initialization'] = false
# START user configuration
# Please set the real values as explained in Required Information section
#
# Replace PGBOUNCER_PASSWORD_HASH with a generated md5 value
postgresql['pgbouncer_user_password'] = '<pgbouncer_password_hash>'
# Replace POSTGRESQL_PASSWORD_HASH with a generated md5 value
postgresql['sql_user_password'] = '<postgresql_password_hash>'
# Set `max_wal_senders` to one more than the number of database nodes in the cluster.
# This is used to prevent replication from using up all of the
# available database connections.
postgresql['max_wal_senders'] = 4
postgresql['max_replication_slots'] = 4
# Replace XXX.XXX.XXX.XXX/YY with Network Address
postgresql['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.6.0.0/24)
repmgr['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.6.0.0/24)
## Enable service discovery for Prometheus
consul['enable'] = true
consul['monitoring_service_discovery'] = true
# Set the network addresses that the exporters will listen on for monitoring
node_exporter['listen_address'] = '0.0.0.0:9100'
postgres_exporter['listen_address'] = '0.0.0.0:9187'
postgres_exporter['dbname'] = 'gitlabhq_production'
postgres_exporter['password'] = '<postgresql_password_hash>'
## The IPs of the Consul server nodes
## You can also use FQDNs and intermix them with IPs
consul['configuration'] = {
retry_join: %w(10.6.0.11 10.6.0.12 10.6.0.13),
}
# END user configuration
```
1. Copy the `/etc/gitlab/gitlab-secrets.json` file from your Consul server, and replace
the file of the same name on this server. If that file is not on this server,
add the file from your Consul server to this server.
1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
@ -876,84 +792,25 @@ SSH in to the **primary node**:
1. Exit the database prompt by typing `\q` and Enter.
1. Verify the cluster is initialized with one node:
1. Check the status of the leader and cluster:
```shell
gitlab-ctl repmgr cluster show
gitlab-ctl patroni members
```
The output should be similar to the following:
```plaintext
Role | Name | Upstream | Connection String
----------+----------|----------|----------------------------------------
* master | HOSTNAME | | host=HOSTNAME user=gitlab_repmgr dbname=gitlab_repmgr
| Cluster | Member | Host | Role | State | TL | Lag in MB | Pending restart |
|---------------|-----------------------------------|-----------|--------|---------|-----|-----------|-----------------|
| postgresql-ha | <PostgreSQL primary hostname> | 10.6.0.31 | Leader | running | 175 | | * |
| postgresql-ha | <PostgreSQL secondary 1 hostname> | 10.6.0.32 | | running | 175 | 0 | * |
| postgresql-ha | <PostgreSQL secondary 2 hostname> | 10.6.0.33 | | running | 175 | 0 | * |
```
1. Note down the hostname or IP address in the connection string: `host=HOSTNAME`. We will
refer to the hostname in the next section as `<primary_node_name>`. If the value
is not an IP address, it will need to be a resolvable name (via DNS or
`/etc/hosts`)
SSH in to the **secondary node**:
1. Set up the repmgr standby:
```shell
gitlab-ctl repmgr standby setup <primary_node_name>
```
Do note that this will remove the existing data on the node. The command
has a wait time.
The output should be similar to the following:
```console
Doing this will delete the entire contents of /var/opt/gitlab/postgresql/data
If this is not what you want, hit Ctrl-C now to exit
To skip waiting, rerun with the -w option
Sleeping for 30 seconds
Stopping the database
Removing the data
Cloning the data
Starting the database
Registering the node with the cluster
ok: run: repmgrd: (pid 19068) 0s
```
Before moving on, make sure the databases are configured correctly. Run the
following command on the **primary** node to verify that replication is working
properly and the secondary nodes appear in the cluster:
```shell
gitlab-ctl repmgr cluster show
```
The output should be similar to the following:
```plaintext
Role | Name | Upstream | Connection String
----------+---------|-----------|------------------------------------------------
* master | MASTER | | host=<primary_node_name> user=gitlab_repmgr dbname=gitlab_repmgr
standby | STANDBY | MASTER | host=<secondary_node_name> user=gitlab_repmgr dbname=gitlab_repmgr
standby | STANDBY | MASTER | host=<secondary_node_name> user=gitlab_repmgr dbname=gitlab_repmgr
```
If the 'Role' column for any node says "FAILED", check the
If the 'State' column for any node doesn't say "running", check the
[Troubleshooting section](troubleshooting.md) before proceeding.
Also, check that the `repmgr-check-master` command works successfully on each node:
```shell
su - gitlab-consul
gitlab-ctl repmgr-check-master || echo 'This node is a standby repmgr node'
```
This command relies on exit codes to tell Consul whether a particular node is a master
or secondary. The most important thing here is that this command does not produce errors.
If there are errors it's most likely due to incorrect `gitlab-consul` database user permissions.
Check the [Troubleshooting section](troubleshooting.md) before proceeding.
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
Back to setup components <i class="fa fa-angle-double-up" aria-hidden="true"></i>
@ -971,7 +828,7 @@ The following IPs will be used as an example:
1. On each PgBouncer node, edit `/etc/gitlab/gitlab.rb`, and replace
`<consul_password_hash>` and `<pgbouncer_password_hash>` with the
password hashes you [set up previously](#postgresql-primary-node):
password hashes you [set up previously](#postgresql-nodes):
```ruby
# Disable all components except Pgbouncer and Consul agent
@ -979,15 +836,16 @@ The following IPs will be used as an example:
# Configure PgBouncer
pgbouncer['admin_users'] = %w(pgbouncer gitlab-consul)
pgbouncer['users'] = {
'gitlab-consul': {
password: '<consul_password_hash>'
},
'pgbouncer': {
password: '<pgbouncer_password_hash>'
}
'gitlab-consul': {
password: '<consul_password_hash>'
},
'pgbouncer': {
password: '<pgbouncer_password_hash>'
}
}
# Incoming recommended value for max db connections is 150. See https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5691.
pgbouncer['max_db_connections'] = 150
# Configure Consul agent
consul['watchers'] = %w(postgresql)

View File

@ -422,9 +422,9 @@ install the necessary dependencies from step 1, and add the
GitLab package repository from step 2. When installing GitLab
in the second step, do not supply the `EXTERNAL_URL` value.
#### PostgreSQL primary node
#### PostgreSQL nodes
1. SSH in to the PostgreSQL primary node.
1. SSH in to one of the PostgreSQL nodes.
1. Generate a password hash for the PostgreSQL username/password pair. This assumes you will use the default
username of `gitlab` (recommended). The command will request a password
and confirmation. Use the value that is output by this command in the next
@ -452,23 +452,33 @@ in the second step, do not supply the `EXTERNAL_URL` value.
sudo gitlab-ctl pg-password-md5 gitlab-consul
```
1. On the primary database node, edit `/etc/gitlab/gitlab.rb` replacing values noted in the `# START user configuration` section:
1. On every database node, edit `/etc/gitlab/gitlab.rb` replacing values noted in the `# START user configuration` section:
```ruby
# Disable all components except PostgreSQL and Repmgr and Consul
# Disable all components except PostgreSQL, Patroni, and Consul
roles ['postgres_role']
# PostgreSQL configuration
postgresql['listen_address'] = '0.0.0.0'
postgresql['hot_standby'] = 'on'
postgresql['wal_level'] = 'replica'
postgresql['shared_preload_libraries'] = 'repmgr_funcs'
# Enable Patroni
patroni['enable'] = true
# Set `max_wal_senders` to one more than the number of database nodes in the cluster.
# This is used to prevent replication from using up all of the
# available database connections.
patroni['postgresql']['max_wal_senders'] = 4
patroni['postgresql']['max_replication_slots'] = 4
# Incoming recommended value for max connections is 500. See https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5691.
patroni['postgresql']['max_connections'] = 500
# Disable automatic database migrations
gitlab_rails['auto_migrate'] = false
# Configure the Consul agent
consul['enable'] = true
consul['services'] = %w(postgresql)
## Enable service discovery for Prometheus
consul['monitoring_service_discovery'] = true
# START user configuration
# Please set the real values as explained in Required Information section
@ -477,18 +487,9 @@ in the second step, do not supply the `EXTERNAL_URL` value.
postgresql['pgbouncer_user_password'] = '<pgbouncer_password_hash>'
# Replace POSTGRESQL_PASSWORD_HASH with a generated md5 value
postgresql['sql_user_password'] = '<postgresql_password_hash>'
# Set `max_wal_senders` to one more than the number of database nodes in the cluster.
# This is used to prevent replication from using up all of the
# available database connections.
postgresql['max_wal_senders'] = 4
postgresql['max_replication_slots'] = 4
# Replace XXX.XXX.XXX.XXX/YY with Network Address
postgresql['trust_auth_cidr_addresses'] = %w(10.6.0.0/24)
repmgr['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.6.0.0/24)
## Enable service discovery for Prometheus
consul['monitoring_service_discovery'] = true
# Set the network addresses that the exporters will listen on for monitoring
node_exporter['listen_address'] = '0.0.0.0:9100'
@ -503,70 +504,9 @@ in the second step, do not supply the `EXTERNAL_URL` value.
# END user configuration
```
1. Copy the `/etc/gitlab/gitlab-secrets.json` file from your Consul server, and replace
the file of the same name on this server. If that file is not on this server,
add the file from your Consul server to this server.
1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
Back to setup components <i class="fa fa-angle-double-up" aria-hidden="true"></i>
</a>
</div>
#### PostgreSQL secondary nodes
1. On both the secondary nodes, add the same configuration specified above for the primary node
with an additional setting (`repmgr['master_on_initialization'] = false`) that will inform `gitlab-ctl` that they are standby nodes initially
and there's no need to attempt to register them as a primary node:
```ruby
# Disable all components except PostgreSQL and Repmgr and Consul
roles ['postgres_role']
# PostgreSQL configuration
postgresql['listen_address'] = '0.0.0.0'
postgresql['hot_standby'] = 'on'
postgresql['wal_level'] = 'replica'
postgresql['shared_preload_libraries'] = 'repmgr_funcs'
# Disable automatic database migrations
gitlab_rails['auto_migrate'] = false
# Configure the Consul agent
consul['services'] = %w(postgresql)
# Specify if a node should attempt to be primary on initialization.
repmgr['master_on_initialization'] = false
# Replace PGBOUNCER_PASSWORD_HASH with a generated md5 value
postgresql['pgbouncer_user_password'] = '<pgbouncer_password_hash>'
# Replace POSTGRESQL_PASSWORD_HASH with a generated md5 value
postgresql['sql_user_password'] = '<postgresql_password_hash>'
# Set `max_wal_senders` to one more than the number of database nodes in the cluster.
# This is used to prevent replication from using up all of the
# available database connections.
postgresql['max_wal_senders'] = 4
postgresql['max_replication_slots'] = 4
# Replace with your network addresses
postgresql['trust_auth_cidr_addresses'] = %w(10.6.0.0/24)
repmgr['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.6.0.0/24)
## Enable service discovery for Prometheus
consul['monitoring_service_discovery'] = true
# Set the network addresses that the exporters will listen on for monitoring
node_exporter['listen_address'] = '0.0.0.0:9100'
postgres_exporter['listen_address'] = '0.0.0.0:9187'
## The IPs of the Consul server nodes
## You can also use FQDNs and intermix them with IPs
consul['configuration'] = {
retry_join: %w(10.6.0.11 10.6.0.12 10.6.0.13),
}
```
PostgreSQL, with Patroni managing its failover, will default to use `pg_rewind` by default to handle conflicts.
Like most failover handling methods, this has a small chance of leading to data loss.
Learn more about the various [Patroni replication methods](../postgresql/replication_and_failover.md#selecting-the-appropriate-patroni-replication-method).
1. Copy the `/etc/gitlab/gitlab-secrets.json` file from your Consul server, and replace
the file of the same name on this server. If that file is not on this server,
@ -601,84 +541,25 @@ SSH in to the **primary node**:
1. Exit the database prompt by typing `\q` and Enter.
1. Verify the cluster is initialized with one node:
1. Check the status of the leader and cluster:
```shell
gitlab-ctl repmgr cluster show
gitlab-ctl patroni members
```
The output should be similar to the following:
```plaintext
Role | Name | Upstream | Connection String
----------+----------|----------|----------------------------------------
* master | HOSTNAME | | host=HOSTNAME user=gitlab_repmgr dbname=gitlab_repmgr
| Cluster | Member | Host | Role | State | TL | Lag in MB | Pending restart |
|---------------|-----------------------------------|-----------|--------|---------|-----|-----------|-----------------|
| postgresql-ha | <PostgreSQL primary hostname> | 10.6.0.21 | Leader | running | 175 | | * |
| postgresql-ha | <PostgreSQL secondary 1 hostname> | 10.6.0.22 | | running | 175 | 0 | * |
| postgresql-ha | <PostgreSQL secondary 2 hostname> | 10.6.0.23 | | running | 175 | 0 | * |
```
1. Note down the hostname or IP address in the connection string: `host=HOSTNAME`. We will
refer to the hostname in the next section as `<primary_node_name>`. If the value
is not an IP address, it will need to be a resolvable name (via DNS or
`/etc/hosts`)
SSH in to the **secondary node**:
1. Set up the repmgr standby:
```shell
gitlab-ctl repmgr standby setup <primary_node_name>
```
Do note that this will remove the existing data on the node. The command
has a wait time.
The output should be similar to the following:
```console
Doing this will delete the entire contents of /var/opt/gitlab/postgresql/data
If this is not what you want, hit Ctrl-C now to exit
To skip waiting, rerun with the -w option
Sleeping for 30 seconds
Stopping the database
Removing the data
Cloning the data
Starting the database
Registering the node with the cluster
ok: run: repmgrd: (pid 19068) 0s
```
Before moving on, make sure the databases are configured correctly. Run the
following command on the **primary** node to verify that replication is working
properly and the secondary nodes appear in the cluster:
```shell
gitlab-ctl repmgr cluster show
```
The output should be similar to the following:
```plaintext
Role | Name | Upstream | Connection String
----------+---------|-----------|------------------------------------------------
* master | MASTER | | host=<primary_node_name> user=gitlab_repmgr dbname=gitlab_repmgr
standby | STANDBY | MASTER | host=<secondary_node_name> user=gitlab_repmgr dbname=gitlab_repmgr
standby | STANDBY | MASTER | host=<secondary_node_name> user=gitlab_repmgr dbname=gitlab_repmgr
```
If the 'Role' column for any node says "FAILED", check the
If the 'State' column for any node doesn't say "running", check the
[Troubleshooting section](troubleshooting.md) before proceeding.
Also, check that the `repmgr-check-master` command works successfully on each node:
```shell
su - gitlab-consul
gitlab-ctl repmgr-check-master || echo 'This node is a standby repmgr node'
```
This command relies on exit codes to tell Consul whether a particular node is a master
or secondary. The most important thing here is that this command does not produce errors.
If there are errors it's most likely due to incorrect `gitlab-consul` database user permissions.
Check the [Troubleshooting section](troubleshooting.md) before proceeding.
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
Back to setup components <i class="fa fa-angle-double-up" aria-hidden="true"></i>
@ -696,7 +577,7 @@ The following IPs will be used as an example:
1. On each PgBouncer node, edit `/etc/gitlab/gitlab.rb`, and replace
`<consul_password_hash>` and `<pgbouncer_password_hash>` with the
password hashes you [set up previously](#postgresql-primary-node):
password hashes you [set up previously](#postgresql-nodes):
```ruby
# Disable all components except Pgbouncer and Consul agent
@ -704,15 +585,16 @@ The following IPs will be used as an example:
# Configure PgBouncer
pgbouncer['admin_users'] = %w(pgbouncer gitlab-consul)
pgbouncer['users'] = {
'gitlab-consul': {
password: '<consul_password_hash>'
},
'pgbouncer': {
password: '<pgbouncer_password_hash>'
}
'gitlab-consul': {
password: '<consul_password_hash>'
},
'pgbouncer': {
password: '<pgbouncer_password_hash>'
}
}
# Incoming recommended value for max db connections is 150. See https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5691.
pgbouncer['max_db_connections'] = 150
# Configure Consul agent
consul['watchers'] = %w(postgresql)

View File

@ -671,9 +671,9 @@ install the necessary dependencies from step 1, and add the
GitLab package repository from step 2. When installing GitLab
in the second step, do not supply the `EXTERNAL_URL` value.
#### PostgreSQL primary node
#### PostgreSQL nodes
1. SSH in to the PostgreSQL primary node.
1. SSH in to one of the PostgreSQL nodes.
1. Generate a password hash for the PostgreSQL username/password pair. This assumes you will use the default
username of `gitlab` (recommended). The command will request a password
and confirmation. Use the value that is output by this command in the next
@ -701,23 +701,33 @@ in the second step, do not supply the `EXTERNAL_URL` value.
sudo gitlab-ctl pg-password-md5 gitlab-consul
```
1. On the primary database node, edit `/etc/gitlab/gitlab.rb` replacing values noted in the `# START user configuration` section:
1. On every database node, edit `/etc/gitlab/gitlab.rb` replacing values noted in the `# START user configuration` section:
```ruby
# Disable all components except PostgreSQL and Repmgr and Consul
# Disable all components except PostgreSQL, Patroni, and Consul
roles ['postgres_role']
# PostgreSQL configuration
postgresql['listen_address'] = '0.0.0.0'
postgresql['hot_standby'] = 'on'
postgresql['wal_level'] = 'replica'
postgresql['shared_preload_libraries'] = 'repmgr_funcs'
# Enable Patroni
patroni['enable'] = true
# Set `max_wal_senders` to one more than the number of database nodes in the cluster.
# This is used to prevent replication from using up all of the
# available database connections.
patroni['postgresql']['max_wal_senders'] = 4
patroni['postgresql']['max_replication_slots'] = 4
# Incoming recommended value for max connections is 500. See https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5691.
patroni['postgresql']['max_connections'] = 500
# Disable automatic database migrations
gitlab_rails['auto_migrate'] = false
# Configure the Consul agent
consul['enable'] = true
consul['services'] = %w(postgresql)
## Enable service discovery for Prometheus
consul['monitoring_service_discovery'] = true
# START user configuration
# Please set the real values as explained in Required Information section
@ -726,25 +736,13 @@ in the second step, do not supply the `EXTERNAL_URL` value.
postgresql['pgbouncer_user_password'] = '<pgbouncer_password_hash>'
# Replace POSTGRESQL_PASSWORD_HASH with a generated md5 value
postgresql['sql_user_password'] = '<postgresql_password_hash>'
# Set `max_wal_senders` to one more than the number of database nodes in the cluster.
# This is used to prevent replication from using up all of the
# available database connections.
postgresql['max_wal_senders'] = 4
postgresql['max_replication_slots'] = 4
# Replace XXX.XXX.XXX.XXX/YY with Network Address
postgresql['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.6.0.0/24)
repmgr['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.6.0.0/24)
## Enable service discovery for Prometheus
consul['enable'] = true
consul['monitoring_service_discovery'] = true
postgresql['trust_auth_cidr_addresses'] = %w(10.6.0.0/24)
# Set the network addresses that the exporters will listen on for monitoring
node_exporter['listen_address'] = '0.0.0.0:9100'
postgres_exporter['listen_address'] = '0.0.0.0:9187'
postgres_exporter['dbname'] = 'gitlabhq_production'
postgres_exporter['password'] = '<postgresql_password_hash>'
## The IPs of the Consul server nodes
## You can also use FQDNs and intermix them with IPs
@ -755,95 +753,13 @@ in the second step, do not supply the `EXTERNAL_URL` value.
# END user configuration
```
1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
1. You can list the current PostgreSQL primary, secondary nodes status via:
PostgreSQL, with Patroni managing its failover, will default to use `pg_rewind` by default to handle conflicts.
Like most failover handling methods, this has a small chance of leading to data loss.
Learn more about the various [Patroni replication methods](../postgresql/replication_and_failover.md#selecting-the-appropriate-patroni-replication-method).
```shell
sudo /opt/gitlab/bin/gitlab-ctl repmgr cluster show
```
1. Verify the GitLab services are running:
```shell
sudo gitlab-ctl status
```
The output should be similar to the following:
```plaintext
run: consul: (pid 30593) 77133s; run: log: (pid 29912) 77156s
run: logrotate: (pid 23449) 3341s; run: log: (pid 29794) 77175s
run: node-exporter: (pid 30613) 77133s; run: log: (pid 29824) 77170s
run: postgres-exporter: (pid 30620) 77132s; run: log: (pid 29894) 77163s
run: postgresql: (pid 30630) 77132s; run: log: (pid 29618) 77181s
run: repmgrd: (pid 30639) 77132s; run: log: (pid 29985) 77150s
```
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
Back to setup components <i class="fa fa-angle-double-up" aria-hidden="true"></i>
</a>
</div>
#### PostgreSQL secondary nodes
1. On both the secondary nodes, add the same configuration specified above for the primary node
with an additional setting that will inform `gitlab-ctl` that they are standby nodes initially
and there's no need to attempt to register them as a primary node:
```ruby
# Disable all components except PostgreSQL and Repmgr and Consul
roles ['postgres_role']
# PostgreSQL configuration
postgresql['listen_address'] = '0.0.0.0'
postgresql['hot_standby'] = 'on'
postgresql['wal_level'] = 'replica'
postgresql['shared_preload_libraries'] = 'repmgr_funcs'
# Disable automatic database migrations
gitlab_rails['auto_migrate'] = false
# Configure the Consul agent
consul['services'] = %w(postgresql)
# Specify if a node should attempt to be primary on initialization.
repmgr['master_on_initialization'] = false
# START user configuration
# Please set the real values as explained in Required Information section
#
# Replace PGBOUNCER_PASSWORD_HASH with a generated md5 value
postgresql['pgbouncer_user_password'] = '<pgbouncer_password_hash>'
# Replace POSTGRESQL_PASSWORD_HASH with a generated md5 value
postgresql['sql_user_password'] = '<postgresql_password_hash>'
# Set `max_wal_senders` to one more than the number of database nodes in the cluster.
# This is used to prevent replication from using up all of the
# available database connections.
postgresql['max_wal_senders'] = 4
postgresql['max_replication_slots'] = 4
# Replace XXX.XXX.XXX.XXX/YY with Network Address
postgresql['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.6.0.0/24)
repmgr['trust_auth_cidr_addresses'] = %w(127.0.0.1/32 10.6.0.0/24)
## Enable service discovery for Prometheus
consul['enable'] = true
consul['monitoring_service_discovery'] = true
# Set the network addresses that the exporters will listen on for monitoring
node_exporter['listen_address'] = '0.0.0.0:9100'
postgres_exporter['listen_address'] = '0.0.0.0:9187'
postgres_exporter['dbname'] = 'gitlabhq_production'
postgres_exporter['password'] = '<postgresql_password_hash>'
## The IPs of the Consul server nodes
## You can also use FQDNs and intermix them with IPs
consul['configuration'] = {
retry_join: %w(10.6.0.11 10.6.0.12 10.6.0.13),
}
# END user configuration
```
1. Copy the `/etc/gitlab/gitlab-secrets.json` file from your Consul server, and replace
the file of the same name on this server. If that file is not on this server,
add the file from your Consul server to this server.
1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
@ -874,84 +790,25 @@ SSH in to the **primary node**:
1. Exit the database prompt by typing `\q` and Enter.
1. Verify the cluster is initialized with one node:
1. Check the status of the leader and cluster:
```shell
gitlab-ctl repmgr cluster show
gitlab-ctl patroni members
```
The output should be similar to the following:
```plaintext
Role | Name | Upstream | Connection String
----------+----------|----------|----------------------------------------
* master | HOSTNAME | | host=HOSTNAME user=gitlab_repmgr dbname=gitlab_repmgr
| Cluster | Member | Host | Role | State | TL | Lag in MB | Pending restart |
|---------------|-----------------------------------|-----------|--------|---------|-----|-----------|-----------------|
| postgresql-ha | <PostgreSQL primary hostname> | 10.6.0.31 | Leader | running | 175 | | * |
| postgresql-ha | <PostgreSQL secondary 1 hostname> | 10.6.0.32 | | running | 175 | 0 | * |
| postgresql-ha | <PostgreSQL secondary 2 hostname> | 10.6.0.33 | | running | 175 | 0 | * |
```
1. Note down the hostname or IP address in the connection string: `host=HOSTNAME`. We will
refer to the hostname in the next section as `<primary_node_name>`. If the value
is not an IP address, it will need to be a resolvable name (via DNS or
`/etc/hosts`)
SSH in to the **secondary node**:
1. Set up the repmgr standby:
```shell
gitlab-ctl repmgr standby setup <primary_node_name>
```
Do note that this will remove the existing data on the node. The command
has a wait time.
The output should be similar to the following:
```console
Doing this will delete the entire contents of /var/opt/gitlab/postgresql/data
If this is not what you want, hit Ctrl-C now to exit
To skip waiting, rerun with the -w option
Sleeping for 30 seconds
Stopping the database
Removing the data
Cloning the data
Starting the database
Registering the node with the cluster
ok: run: repmgrd: (pid 19068) 0s
```
Before moving on, make sure the databases are configured correctly. Run the
following command on the **primary** node to verify that replication is working
properly and the secondary nodes appear in the cluster:
```shell
gitlab-ctl repmgr cluster show
```
The output should be similar to the following:
```plaintext
Role | Name | Upstream | Connection String
----------+---------|-----------|------------------------------------------------
* master | MASTER | | host=<primary_node_name> user=gitlab_repmgr dbname=gitlab_repmgr
standby | STANDBY | MASTER | host=<secondary_node_name> user=gitlab_repmgr dbname=gitlab_repmgr
standby | STANDBY | MASTER | host=<secondary_node_name> user=gitlab_repmgr dbname=gitlab_repmgr
```
If the 'Role' column for any node says "FAILED", check the
If the 'State' column for any node doesn't say "running", check the
[Troubleshooting section](troubleshooting.md) before proceeding.
Also, check that the `repmgr-check-master` command works successfully on each node:
```shell
su - gitlab-consul
gitlab-ctl repmgr-check-master || echo 'This node is a standby repmgr node'
```
This command relies on exit codes to tell Consul whether a particular node is a master
or secondary. The most important thing here is that this command does not produce errors.
If there are errors it's most likely due to incorrect `gitlab-consul` database user permissions.
Check the [Troubleshooting section](troubleshooting.md) before proceeding.
<div align="right">
<a type="button" class="btn btn-default" href="#setup-components">
Back to setup components <i class="fa fa-angle-double-up" aria-hidden="true"></i>
@ -969,7 +826,7 @@ The following IPs will be used as an example:
1. On each PgBouncer node, edit `/etc/gitlab/gitlab.rb`, and replace
`<consul_password_hash>` and `<pgbouncer_password_hash>` with the
password hashes you [set up previously](#postgresql-primary-node):
password hashes you [set up previously](#postgresql-nodes):
```ruby
# Disable all components except Pgbouncer and Consul agent
@ -977,15 +834,16 @@ The following IPs will be used as an example:
# Configure PgBouncer
pgbouncer['admin_users'] = %w(pgbouncer gitlab-consul)
pgbouncer['users'] = {
'gitlab-consul': {
password: '<consul_password_hash>'
},
'pgbouncer': {
password: '<pgbouncer_password_hash>'
}
'gitlab-consul': {
password: '<consul_password_hash>'
},
'pgbouncer': {
password: '<pgbouncer_password_hash>'
}
}
# Incoming recommended value for max db connections is 150. See https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/5691.
pgbouncer['max_db_connections'] = 150
# Configure Consul agent
consul['watchers'] = %w(postgresql)

View File

@ -514,39 +514,24 @@ See the suggested fix [in Geo documentation](../geo/replication/troubleshooting.
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting.md#message-log--invalid-ip-mask-md5-name-or-service-not-known).
## Troubleshooting PostgreSQL
## Troubleshooting PostgreSQL with Patroni
In case you are experiencing any issues connecting through PgBouncer, the first place to check is always the logs:
In case you are experiencing any issues connecting through PgBouncer, the first place to check is always the logs for PostgreSQL (which is run through Patroni):
```shell
sudo gitlab-ctl tail postgresql
sudo gitlab-ctl tail patroni
```
### Consul and PostgreSQL changes not taking effect
### Consul and PostgreSQL with Patroni changes not taking effect
Due to the potential impacts, `gitlab-ctl reconfigure` only reloads Consul and PostgreSQL, it will not restart the services. However, not all changes can be activated by reloading.
To restart either service, run `gitlab-ctl restart SERVICE`
To restart either service, run `gitlab-ctl restart consul` or `gitlab-ctl restart patroni` respectively.
For PostgreSQL, it is usually safe to restart the master node by default. Automatic failover defaults to a 1 minute timeout. Provided the database returns before then, nothing else needs to be done. To be safe, you can stop `repmgrd` on the standby nodes first with `gitlab-ctl stop repmgrd`, then start afterwards with `gitlab-ctl start repmgrd`.
For PostgreSQL with Patroni, to prevent the primary node from being failed over automatically, it's safest to stop all secondaries first, then restart the primary and finally restart the secondaries again.
On the Consul server nodes, it is important to restart the Consul service in a controlled fashion. Read our [Consul documentation](../consul.md#restart-consul) for instructions on how to restart the service.
### `gitlab-ctl repmgr-check-master` command produces errors
If this command displays errors about database permissions it is likely that something failed during
install, resulting in the `gitlab-consul` database user getting incorrect permissions. Follow these
steps to fix the problem:
1. On the master database node, connect to the database prompt - `gitlab-psql -d template1`
1. Delete the `gitlab-consul` user - `DROP USER "gitlab-consul";`
1. Exit the database prompt - `\q`
1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) and the user will be re-added with the proper permissions.
1. Change to the `gitlab-consul` user - `su - gitlab-consul`
1. Try the check command again - `gitlab-ctl repmgr-check-master`.
Now there should not be errors. If errors still occur then there is another problem.
### PgBouncer error `ERROR: pgbouncer cannot connect to server`
You may get this error when running `gitlab-rake gitlab:db:configure` or you

View File

@ -60,8 +60,8 @@ PUT /application/appearance
| `favicon` | mixed | no | Instance favicon in `.ico` or `.png` format
| `new_project_guidelines` | string | no | Markdown text shown on the new project page
| `profile_image_guidelines` | string | no | Markdown text shown on the profile page below Public Avatar
| `header_message` | string | no | Message within the system header bar
| `footer_message` | string | no | Message within the system footer bar
| `header_message` | string | no | Message in the system header bar
| `footer_message` | string | no | Message in the system footer bar
| `message_background_color` | string | no | Background color for the system header / footer bar
| `message_font_color` | string | no | Font color for the system header / footer bar
| `email_header_and_footer_enabled` | boolean | no | Add header and footer to all outgoing emails if enabled

View File

@ -30,7 +30,7 @@ Parameters:
| Attribute | Type | Required | Description |
|:----------|:--------|:---------|:----------------------------------------------------------------------------------------------------------------------------------------|
| `email` | string | yes | Public email address of the user. |
| `size` | integer | no | Single pixel dimension (since images are squares). Only used for avatar lookups at `Gravatar` or at the configured `Libravatar` server. |
| `size` | integer | no | Single pixel dimension (because images are squares). Only used for avatar lookups at `Gravatar` or at the configured `Libravatar` server. |
Example request:

View File

@ -426,7 +426,7 @@ POST /projects/:id/boards/:board_id/lists
NOTE:
Label, assignee and milestone arguments are mutually exclusive,
that is, only one of them are accepted in a request.
Check the [Issue Board docs](../user/project/issue_board.md)
Check the [Issue Board documentation](../user/project/issue_board.md)
for more information regarding the required license for each list type.
```shell

View File

@ -8,7 +8,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/55978) in GitLab 11.8.
This is the API docs of the [GitLab Container Registry](../user/packages/container_registry/index.md).
This is the API documentation of the [GitLab Container Registry](../user/packages/container_registry/index.md).
## List registry repositories

View File

@ -33,7 +33,7 @@ To use this in a [`script` definition](../ci/yaml/README.md#script) inside
- The `JOB-TOKEN` header with the GitLab-provided `CI_JOB_TOKEN` variable.
For example, the following job downloads the artifacts of the job with ID
`42`. Note that the command is wrapped into single quotes since it contains a
`42`. Note that the command is wrapped into single quotes because it contains a
colon (`:`):
```yaml
@ -99,7 +99,7 @@ To use this in a [`script` definition](../ci/yaml/README.md#script) inside
- The `JOB-TOKEN` header with the GitLab-provided `CI_JOB_TOKEN` variable.
For example, the following job downloads the artifacts of the `test` job
of the `master` branch. Note that the command is wrapped into single quotes
since it contains a colon (`:`):
because it contains a colon (`:`):
```yaml
artifact_download:
@ -130,7 +130,7 @@ Possible response status codes:
> Introduced in GitLab 10.0
Download a single artifact file from a job with a specified ID from within
Download a single artifact file from a job with a specified ID from inside
the job's artifacts zipped archive. The file is extracted from the archive and
streamed to the client.
@ -165,7 +165,7 @@ Possible response status codes:
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/23538) in GitLab 11.5.
Download a single artifact file for a specific job of the latest successful
pipeline for the given reference name from within the job's artifacts archive.
pipeline for the given reference name from inside the job's artifacts archive.
The file is extracted from the archive and streamed to the client.
In [GitLab 13.5](https://gitlab.com/gitlab-org/gitlab/-/issues/201784) and later, artifacts

View File

@ -468,7 +468,7 @@ DELETE /projects/:id/members/:user_id
| --------- | ---- | -------- | ----------- |
| `id` | integer/string | yes | The ID or [URL-encoded path of the project or group](README.md#namespaced-path-encoding) owned by the authenticated user |
| `user_id` | integer | yes | The user ID of the member |
| `unassign_issuables` | boolean | false | Flag indicating if the removed member should be unassigned from any issues or merge requests within given group or project |
| `unassign_issuables` | boolean | false | Flag indicating if the removed member should be unassigned from any issues or merge requests inside a given group or project |
```shell
curl --request DELETE --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/groups/:id/members/:user_id"

View File

@ -19,12 +19,12 @@ documentation. This functionality is based on the
GitLab currently supports the following authorization flows:
- **Authorization code with [Proof Key for Code Exchange (PKCE)](https://tools.ietf.org/html/rfc7636):**
- **Authorization code with [Proof Key for Code Exchange (PKCE)](https://tools.ietf.org/html/rfc7636):**
Most secure. Without PKCE, you'd have to include client secrets on mobile clients,
and is recommended for both client and server aoos.
- **Authorization code:** Secure and common flow. Recommended option for secure
- **Authorization code:** Secure and common flow. Recommended option for secure
server-side apps.
- **Implicit grant:** Originally designed for user-agent only apps, such as
- **Implicit grant:** Originally designed for user-agent only apps, such as
single page web apps running on GitLab Pages).
The [IETF](https://tools.ietf.org/html/draft-ietf-oauth-security-topics-09#section-2.1.2)
recommends against Implicit grant flow.
@ -159,7 +159,7 @@ The authorization code flow is essentially the same as
You should then use `code` to request an access token.
1. Once you have the authorization code you can request an `access_token` using the
1. After you have the authorization code you can request an `access_token` using the
code. You can do that by using any HTTP client. In the following example,
we are using Ruby's `rest-client`:
@ -189,7 +189,7 @@ You can now make requests to the API with the access token returned.
### Implicit grant flow
NOTE:
For a detailed flow diagram, see the [RFC specification](https://tools.ietf.org/html/rfc6749#section-4.2).
For a detailed flow diagram, see the [RFC specification](https://tools.ietf.org/html/rfc6749#section-4.2).
WARNING:
The Implicit grant flow is inherently insecure. The IETF plans to remove it in

View File

@ -6,7 +6,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Packages API
This is the API docs of [GitLab Packages](../administration/packages/index.md).
This is the API documentation of [GitLab Packages](../administration/packages/index.md).
## List packages
@ -68,7 +68,7 @@ Example response:
]
```
By default, the `GET` request returns 20 results, since the API is [paginated](README.md#pagination).
By default, the `GET` request returns 20 results, because the API is [paginated](README.md#pagination).
### Within a group
@ -159,7 +159,7 @@ Example response:
]
```
By default, the `GET` request returns 20 results, since the API is [paginated](README.md#pagination).
By default, the `GET` request returns 20 results, because the API is [paginated](README.md#pagination).
The `_links` object contains the following properties:
@ -316,7 +316,7 @@ Example response:
]
```
By default, the `GET` request returns 20 results, since the API is [paginated](README.md#pagination).
By default, the `GET` request returns 20 results, because the API is [paginated](README.md#pagination).
## Delete a project package

View File

@ -69,11 +69,12 @@ It picks reviewers and maintainers from the list at the
[engineering projects](https://about.gitlab.com/handbook/engineering/projects/)
page, with these behaviors:
1. It doesn't pick people whose [GitLab status](../user/profile/index.md#current-status)
contains the string 'OOO', or the emoji is `:palm_tree:` or `:beach:`.
1. It doesn't pick people whose Slack or [GitLab status](../user/profile/index.md#current-status):
- contains the string 'OOO', 'PTO', 'Parental Leave', or 'Friends and Family'
- emoji is `:palm_tree:`, `:beach:`, `:beach_umbrella:`, `:beach_with_umbrella:`, `:ferris_wheel:`, `:thermometer:`, `:face_with_thermometer:`, `:red_circle:`, `:bulb:`, `:sun_with_face:`.
1. [Trainee maintainers](https://about.gitlab.com/handbook/engineering/workflow/code-review/#trainee-maintainer)
are three times as likely to be picked as other reviewers.
1. People whose [GitLab status](../user/profile/index.md#current-status) emoji
1. Team members whose Slack or [GitLab status](../user/profile/index.md#current-status) emoji
is `:large_blue_circle:` are more likely to be picked. This applies to both reviewers and trainee maintainers.
- Reviewers with `:large_blue_circle:` are two times as likely to be picked as other reviewers.
- Trainee maintainers with `:large_blue_circle:` are four times as likely to be picked as other reviewers.

View File

@ -0,0 +1,73 @@
---
stage: Growth
group: Product Intelligence
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#assignments
---
# Metrics Dictionary Guide
This guide describes Metrics Dictionary and how it's implemented
## Metrics Definition and validation
We are using [JSON Schema](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/metrics/schema.json) to validate the metrics definition.
This process is meant to ensure consistent and valid metrics defined for Usage Ping. All metrics *must*:
- Comply with the definied [JSON schema](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/metrics/schema.json).
- Have a unique `full_path` .
- Have an owner.
All metrics are stored in YAML files:
- [`config/metrics`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/config/metrics)
Each metric is definied in a separate YAML file consisting of a number of fields:
| Field | Required | Additional information |
|---------------------|----------|----------------------------------------------------------------|
| `name` | yes | |
| `description` | yes | |
| `value_type` | yes | |
| `status` | yes | |
| `default_generation`| yes | Default generation path of the metric. One full_path value. (1) |
| `full_path` | yes | Full path of the metric for one or multiple generations. Path of the metric in Usage Ping payload. (1) |
| `group` | yes | The [group](https://about.gitlab.com/handbook/product/categories/#devops-stages) that owns the metric. |
| `time_frame` | yes | `string`; may be set to a value like "7d" |
| `data_source` | yes | `string`: may be set to a value like `database` or `redis_hll`. |
| `distribution` | yes | The [distribution](https://about.gitlab.com/handbook/marketing/strategic-marketing/tiers/#definitions) where the metric applies. |
| `tier` | yes | The [tier]( https://about.gitlab.com/handbook/marketing/strategic-marketing/tiers/) where the metric applies. |
| `product_category` | no | The [product category](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/categories.yml) for the metric. |
| `stage` | no | The [stage](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/stages.yml) for the metric. |
| `milestone` | no | The milestone when the metric is introduced. |
| `milestone_removed` | no | The milestone when the metric is removed. |
| `introduced_by_url` | no | The URL to the Merge Request that introduced the metric. |
1. The default generation path is the location of the metric in the Usage Ping payload.
The `full_path` is the list locations for multiple Usage Ping generaations.
### Example metric definition
The linked [`uuid`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/metrics/license/uuid.yml)
YAML file includes an example metric definition, where the `uuid` metric is the GitLab
instance unique identifier.
```yaml
name: uuid
description: GitLab instance unique identifier
value_type: string
product_category: collection
stage: growth
status: data_available
default_generation: generation_1
full_path:
generation_1: uuid
generation_2: license.uuid
milestone: 9.1
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/1521
group: group::product intelligence
time_frame: none
data_source: database
distribution: [ee, ce]
tier: ['free', 'starter', 'premium', 'ultimate', 'bronze', 'silver', 'gold']
```

View File

@ -54,11 +54,7 @@ By default, protected paths are:
- `/import/github/personal_access_token`
- `/admin/session`
This header is included in responses to blocked requests:
```plaintext
Retry-After: 60
```
See [User and IP rate limits](../user/admin_area/settings/user_and_ip_rate_limits.md#response-headers) for the headers responded to blocked requests.
For example, the following are limited to a maximum 10 requests per minute:

View File

@ -7,6 +7,14 @@ type: reference
# Account and limit settings **(CORE ONLY)**
## Default projects limit
You can change the default maximum number of projects that users can create in their personal namespace.
Navigate to **Admin Area > Settings > General**, then expand **Account and Limit**.
You can increase or decrease that `Default projects limit` value.
- If you set `Default projects limit` to 0, users are not allowed to create projects in their users personal namespace. However, projects can still be created within a group.
## Max attachment size
You can change the maximum file size for attachments in comments and replies in GitLab.

View File

@ -28,11 +28,7 @@ GitLab rate limits the following paths with Rack Attack by default:
GitLab responds with HTTP status code `429` to POST requests at protected paths
that exceed 10 requests per minute per IP address.
This header is included in responses to blocked requests:
```plaintext
Retry-After: 60
```
See [User and IP rate limits](../../admin_area/settings/user_and_ip_rate_limits.md#response-headers) for the headers responded to blocked requests.
For example, the following are limited to a maximum 10 requests per minute:

View File

@ -36,6 +36,25 @@ Retry later
It is possible to customize this response text in the admin area.
## Response headers
> [Introduced](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/731) in GitLab 13.8, the `Rate-Limit` headers. `Retry-After` was introduced in an earlier version.
When a client exceeds the associated rate limit, the following requests are
blocked. The server may respond with rate-limiting information allowing the
requester to retry after a specific period of time. These information are
attached into the response headers.
| Header | Example | Description |
|:----------------------|:--------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `RateLimit-Limit` | `60` | The request quota for the client **each minute**. If the rate limit period set in the admin area is different from 1 minute, the value of this header is adjusted to approximately the nearest 60-minute period. |
| `RateLimit-Name` | `throttle_authenticated_web` | Name of the throttle blocking the requests. |
| `RateLimit-Observed` | `67` | Number of requests associated to the client in the time window. |
| `RateLimit-Remaining` | `0` | Remaining quota in the time window. The result of `RateLimit-Limit` - `RateLimit-Remaining`. |
| `RateLimit-Reset` | `30` | An alias of `Retry-After` header. |
| `RateLimit-ResetTime` | `Tue, 05 Jan 2021 11:00:00 GMT` | [RFC2616](https://tools.ietf.org/html/rfc2616#section-3.3.1)-formatted date and time when the request quota is reset. |
| `Retry-After` | `30` | Remaining duration **in seconds** until the quota is reset. This is a [standard HTTP header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After). |
## Use an HTTP header to bypass rate limiting
> [Introduced](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/622) in GitLab 13.6.

View File

@ -157,6 +157,7 @@ You can use various tools to generate HAR files:
- [Insomnia Core](https://insomnia.rest/): API client
- [Chrome](https://www.google.com/chrome/): Browser
- [Firefox](https://www.mozilla.org/en-US/firefox/): Browser
- [GitLab HAR Recorder](https://gitlab.com/gitlab-org/security-products/har-recorder): Command line
WARNING:
HAR files may contain sensitive information such as authentication tokens, API keys, and session

View File

@ -704,6 +704,49 @@ security reports without requiring internet access.
Alternatively, you can use the variable `SECURE_ANALYZERS_PREFIX` to override the base registry address of the `dast` image.
## On-demand scans
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/218465) in GitLab 13.2.
> - [Improved](https://gitlab.com/gitlab-org/gitlab/-/issues/218465) in GitLab 13.3.
An on-demand DAST scan runs outside the DevOps life cycle. Changes in your repository don't trigger
the scan. You must start it manually.
An on-demand DAST scan:
- Uses settings in the site profile and scanner profile you select when you run the scan,
instead of those in the `.gitlab-ci.yml` file.
- Is associated with your project's default branch.
### On-demand scan modes
An on-demand scan can be run in active or passive mode:
- _Passive mode_ is the default and runs a ZAP Baseline Scan.
- _Active mode_ runs a ZAP Full Scan which is potentially harmful to the site being scanned. To
minimize the risk of accidental damage, running an active scan requires a [validated site
profile](#site-profile-validation).
### Run an on-demand DAST scan
NOTE:
You must have permission to run an on-demand DAST scan against a protected branch.
The default branch is automatically protected. For more information, see
[Pipeline security on protected branches](../../../ci/pipelines/index.md#pipeline-security-on-protected-branches).
To run an on-demand DAST scan, you need:
- A [scanner profile](#create-a-scanner-profile).
- A [site profile](#create-a-site-profile).
- If you are running an active scan the site profile must be [validated](#validate-a-site-profile).
1. From your project's home page, go to **Security & Compliance > On-demand Scans** in the left sidebar.
1. In **Scanner profile**, select a scanner profile from the dropdown.
1. In **Site profile**, select a site profile from the dropdown.
1. Click **Run scan**.
The on-demand DAST scan runs and the project's dashboard shows the results.
## Site profile
A site profile describes the attributes of a web site to scan on demand with DAST. A site profile is
@ -714,31 +757,115 @@ A site profile contains the following:
- **Profile name**: A name you assign to the site to be scanned.
- **Target URL**: The URL that DAST runs against.
## Site profile validation
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/233020) in GitLab 13.8.
Site profile validation reduces the risk of running an active scan against the wrong website. A site
must be validated before an active scan can run against it. The site validation methods are as
follows:
- _Text file validation_ requires a text file be uploaded to the target site. The text file is
allocated a name and content that is unique to the project. The validation process checks the
file's content.
- _Header validation_ requires the header `Gitlab-On-Demand-DAST` be added to the target site,
with a value unique to the project. The validation process checks that the header is present, and
checks its value.
Both methods are equivalent in functionality. Use whichever is feasible.
### Create a site profile
To create a site profile:
1. From your project's home page, go to **Security & Compliance > Configuration**.
1. Click **Manage** in the **DAST Profiles** row.
1. Click **New Profile > Site Profile**.
1. Type in a unique **Profile name** and **Target URL** then click **Save profile**.
1. Select **Manage** in the **DAST Profiles** row.
1. Select **New Profile > Site Profile**.
1. Type in a unique **Profile name** and **Target URL** then select **Save profile**.
### Edit a site profile
To edit an existing site profile:
1. From your project's home page, go to **Security & Compliance > Configuration**.
1. Click **Manage** in the **DAST Profiles** row.
1. Click **Edit** in the row of the profile to edit.
1. Edit the **Profile name** and **Target URL**, then click **Save profile**.
1. Select **Manage** in the **DAST Profiles** row.
1. Select **Edit** in the row of the profile to edit.
1. Edit the **Profile name** and **Target URL**, then select **Save profile**.
### Delete a site profile
To delete an existing site profile:
1. From your project's home page, go to **Security & Compliance > Configuration**.
1. Click **Manage** in the **DAST Profiles** row.
1. Click **{remove}** (Delete profile) in the row of the profile to delete.
1. Select **Manage** in the **DAST Profiles** row.
1. Select **{remove}** (Delete profile) in the row of the profile to delete.
### Validate a site profile
To validate a site profile:
1. From your project's home page, go to **Security & Compliance > Configuration**.
1. Select **Manage** in the **DAST Profiles** row.
1. Select **Validate target site** beside the profile to validate.
1. Select the validation method.
1. For **Text file validation**:
1. Download the validation file listed in **Step 2**.
1. Upload the validation file to the host. You can upload the file to the location in
**Step 3** or any location you prefer.
1. Select **Validate**.
1. For **Header validation**:
1. Select the clipboard icon in **Step 2**.
1. Edit the header of the site to validate, and paste the clipboard content.
1. Select the input field in **Step 3** and enter the location of the header.
1. Select **Validate**.
The site is validated and an active scan can run against it.
If a validated site profile's target URL is edited, the site is no longer validated.
#### Validated site profile headers
The following are code samples of how you could provide the required site profile header in your
application.
##### Ruby on Rails example for on-demand scan
Here's how you can add a custom header in a Ruby on Rails application:
```ruby
class DastWebsiteTargetController < ActionController::Base
def dast_website_target
response.headers['Gitlab-On-Demand-DAST'] = '0dd79c9a-7b29-4e26-a815-eaaf53fcab1c'
head :ok
end
end
```
##### Django example for on-demand scan
Here's how you can add a
[custom header in Django](https://docs.djangoproject.com/en/2.2/ref/request-response/#setting-header-fields):
```python
class DastWebsiteTargetView(View):
def head(self, *args, **kwargs):
response = HttpResponse()
response['Gitlab-On-Demand-DAST'] = '0dd79c9a-7b29-4e26-a815-eaaf53fcab1c'
return response
```
##### Node (with Express) example for on-demand scan
Here's how you can add a
[custom header in Node (with Express)](http://expressjs.com/en/5x/api.html#res.append):
```javascript
app.get('/dast-website-target', function(req, res) {
res.append('Gitlab-On-Demand-DAST', '0dd79c9a-7b29-4e26-a815-eaaf53fcab1c')
res.send('Respond to DAST ping')
})
```
## Scanner profile
@ -782,40 +909,6 @@ To delete a scanner profile:
1. Click **Manage** in the **DAST Profiles** row.
1. Click **{remove}** (Delete profile) in the scanner profile's row.
## On-demand scans
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/218465) in GitLab 13.2.
> - [Improved](https://gitlab.com/gitlab-org/gitlab/-/issues/218465) in GitLab 13.3.
An on-demand DAST scan runs outside the DevOps life cycle. Changes in your repository don't trigger
the scan. You must start it manually.
An on-demand DAST scan:
- Uses settings in the site profile and scanner profile you select when you run the scan,
instead of those in the `.gitlab-ci.yml` file.
- Is associated with your project's default branch.
### Run an on-demand DAST scan
NOTE:
You must have permission to run an on-demand DAST scan against a protected branch.
The default branch is automatically protected. For more information, see
[Pipeline security on protected branches](../../../ci/pipelines/index.md#pipeline-security-on-protected-branches).
To run an on-demand DAST scan, you need:
- A [scanner profile](#create-a-scanner-profile).
- A [site profile](#create-a-site-profile).
1. From your project's home page, go to **Security & Compliance > On-demand Scans** in the left sidebar.
1. Click **Create new DAST scan**.
1. In **Scanner profile**, select a scanner profile from the dropdown.
1. In **Site profile**, select a site profile from the dropdown.
1. Click **Run scan**.
The on-demand DAST scan runs and the project's dashboard shows the results.
## Reports
The DAST tool outputs a report file in JSON format by default. However, this tool can also generate reports in

View File

@ -532,13 +532,10 @@ endpoints](../../user/admin_area/settings/rate_limits_on_raw_endpoints.md).
### Rate limiting responses
The [`Retry-After`
header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After)
indicates when the client should retry.
For information on rate limiting responses, see:
Rate limits applied by HAProxy (instead of Cloudflare or the
GitLab application) have `RateLimit-Reset` and `RateLimit-ResetTime`
headers.
- [List of headers on responses to blocked requests](../admin_area/settings/user_and_ip_rate_limits.md#response-headers).
- [Customizable response text](../admin_area/settings/user_and_ip_rate_limits.md#response-text).
### Protected paths throttle
@ -548,11 +545,7 @@ paths that exceed 10 requests per **minute** per IP address.
See the source below for which paths are protected. This includes user creation,
user confirmation, user sign in, and password reset.
This header is included in responses to blocked requests:
```plaintext
Retry-After: 60
```
[User and IP rate limits](../admin_area/settings/user_and_ip_rate_limits.md#response-headers) includes a list of the headers responded to blocked requests.
See [Protected Paths](../admin_area/settings/protected_paths.md) for more details.

View File

@ -21,6 +21,11 @@ If you do not have a Composer package, create one and check it in to
a repository. This example shows a GitLab repository, but the repository
can be any public or private repository.
WARNING:
If you are using a GitLab repository, the project must have been created from
a group's namespace, rather than a user's namespace. Composer packages
[can't be published to projects created from a user's namespace](https://gitlab.com/gitlab-org/gitlab/-/issues/235467).
1. Create a directory called `my-composer-package` and change to that directory:
```shell

View File

@ -94,7 +94,10 @@ Some features such as [publishing](#publish-an-npm-package) a package is only av
## Authenticate to the Package Registry
To authenticate to the Package Registry, you must use one of the following:
You must authenticate with the Package Registry when the project
is private. Public projects do not require authentication.
To authenticate, use one of the following:
- A [personal access token](../../../user/profile/personal_access_tokens.md)
(required for two-factor authentication (2FA)), with the scope set to `api`.

View File

@ -1347,7 +1347,14 @@ Note that `deployable_id` is the ID of the CI job.
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/260347) in GitLab 13.7.
Triggered when a user is added as a group member.
Member events are triggered when:
- A user is added as a group member
- The access level of a user has changed
- The expiration date for user access has been updated
- A user has been removed from the group
#### Add member to group
**Request Header**:
@ -1375,6 +1382,62 @@ X-Gitlab-Event: Member Hook
}
```
#### Update member access level or expiration date
**Request Header**:
```plaintext
X-Gitlab-Event: Member Hook
```
**Request Body**:
```json
{
"created_at": "2020-12-11T04:57:22Z",
"updated_at": "2020-12-12T08:48:19Z",
"group_name": "webhook-test",
"group_path": "webhook-test",
"group_id": 100,
"user_username": "test_user",
"user_name": "Test User",
"user_email": "testuser@webhooktest.com",
"user_id": 64,
"group_access": "Developer",
"group_plan": null,
"expires_at": "2020-12-20T00:00:00Z",
"event_name": "user_update_for_group"
}
```
#### Remove member from group
**Request Header**:
```plaintext
X-Gitlab-Event: Member Hook
```
**Request Body**:
```json
{
"created_at": "2020-12-11T04:57:22Z",
"updated_at": "2020-12-12T08:52:34Z",
"group_name": "webhook-test",
"group_path": "webhook-test",
"group_id": 100,
"user_username": "test_user",
"user_name": "Test User",
"user_email": "testuser@webhooktest.com",
"user_id": 64,
"group_access": "Guest",
"group_plan": null,
"expires_at": "2020-12-14T00:00:00Z",
"event_name": "user_remove_from_group"
}
```
### Feature Flag events
Triggered when a feature flag is turned on or off.

View File

@ -72,10 +72,26 @@ Let's consider the following scenario:
## Job token
A unique job token is generated for each job and provides the user read
access all projects that would be normally accessible to the user creating that
job. The unique job token does not have any write permissions, but there
is a [proposal to add support](https://gitlab.com/groups/gitlab-org/-/epics/3559).
When a pipeline job is about to run, GitLab generates a unique token and injects it as the
[`CI_JOB_TOKEN` predefined variable](../../ci/variables/predefined_variables.md).
This token can authenticate [API requests](../../api/README.md)
from the job script (Runner) that needs to access the project's resources (for example, when
fetching a job artifact).
Once the token is authenticated, GitLab identifies the user who triggered the job and uses this user
to authorize access to the resource. Therefore, this user must be assigned to
[a role that has the required privileges](../permissions.md).
The job token has these limitations:
- Not all APIs allow job tokens for authentication. See [this list](../../api/README.md#gitlab-ci-job-token)
for available endpoints.
- The token is valid only while the pipeline job runs. Once the job finishes, the token can't be
used for authentication.
Although a job token is handy to quickly access a project's resources without any configuration, it
sometimes gives extra permissions that aren't necessary. There is [a proposal](https://gitlab.com/groups/gitlab-org/-/epics/3559)
to redesign the feature for more strategic control of the access permissions.
If you need your CI pipeline to push to the Package Registry, consider using [deploy tokens](deploy_tokens/index.md).

View File

@ -0,0 +1,4 @@
fragment AuthorF on Author {
name
handle
}

View File

@ -0,0 +1,4 @@
fragment BadF on Blog {
wibble
wobble
}

View File

@ -0,0 +1,5 @@
query($bad: String) {
blog(title: $bad) {
description
}
}

View File

@ -0,0 +1,3 @@
query {
thingy @client
}

View File

@ -0,0 +1,7 @@
#import "./thingy.fragment.graphql"
query($slug: String!, $foo: String) {
thingy(someArg: $foo) @client {
...ThingyF
}
}

View File

@ -0,0 +1,9 @@
query($slug: String!) {
post(slug: $slug) {
author {
posts @connection(key: "posts") {
title
}
}
}
}

View File

@ -0,0 +1,7 @@
# import "../author.fragment.graphql"
query($slug: String!) {
post(slug: $slug) {
author { ...AuthorF }
}
}

View File

@ -0,0 +1,7 @@
# import "../../author.fragment.graphql"
query($slug: String!) {
post(slug: $slug) {
author { ...AuthorF }
}
}

View File

@ -0,0 +1,10 @@
# import "./author.fragment.graphql"
# import "./post.fragment.graphql"
query($title: String!) {
blog(title: $title) {
description
mainAuthor { ...AuthorF }
posts { ...PostF }
}
}

View File

@ -0,0 +1,5 @@
fragment AuthorF on Author {
name
handle
verified
}

View File

@ -0,0 +1,9 @@
#import "ee_else_ce/author.fragment.graphql"
query {
post(slug: "validating-queries") {
title
content
author { ...AuthorF }
}
}

View File

@ -0,0 +1,8 @@
query {
blog {
title
posts {
title
}
}
}

View File

@ -0,0 +1,7 @@
query {
thingy @client
post(slug: "validating-queries") {
title
otherThing @client
}
}

View File

@ -0,0 +1,7 @@
query {
thingy @client
post(slug: "validating-queries") {
titlz
otherThing @client
}
}

View File

@ -0,0 +1,11 @@
query($slug: String!, $foo: String) {
thingy(someArg: $foo) @client {
x
y
z
}
post(slug: $slug) {
title
otherThing @client
}
}

View File

@ -0,0 +1,11 @@
#import "./thingy.fragment.graphql"
query($slug: String!, $foo: String) {
thingy(someArg: $foo) @client {
...ThingyF
}
post(slug: $slug) {
title
otherThing @client
}
}

View File

@ -0,0 +1,8 @@
# import "./author.fragment.graphql"
fragment PostF on Post {
name
title
content
author { ...AuthorF }
}

View File

@ -0,0 +1,7 @@
query {
post(slug: "validating-queries") {
title
content
author { name }
}
}

View File

@ -0,0 +1,9 @@
#import "./author.fragment.graphql"
query {
post(slug: "validating-queries") {
title
content
author { ...AuthorF }
}
}

View File

@ -0,0 +1,9 @@
# import "./auther.fragment.graphql"
query {
post(slug: "validating-queries") {
title
content
author { ...AuthorF }
}
}

View File

@ -0,0 +1,5 @@
query }
blog(title: "boom") {
description
}
}

View File

@ -0,0 +1,3 @@
fragment ThingyF on Thingy {
x y z
}

View File

@ -0,0 +1,6 @@
# import "does-not-exist.graphql"
fragment AuthorF on Author {
name
handle
}

View File

@ -0,0 +1,9 @@
# import "./transitive_bad_import.fragment.graphql"
query($slug: String!) {
post(slug: $slug) {
title
content
author { ...AuthorF }
}
}

View File

@ -0,0 +1,3 @@
type Author {
name: String
}

View File

@ -0,0 +1,8 @@
# import "./author.fragment.graphql"
query($slug: String!) {
post(slug: $slug) {
title
content
}
}

View File

@ -0,0 +1,7 @@
query {
blog(title: "A history of GraphQL") {
title
createdAt
categories { name }
}
}

View File

@ -0,0 +1,7 @@
# import "./bad.fragment.graphql"
query($title: String!) {
blog(title: $title) {
...BadF
}
}

View File

@ -0,0 +1,286 @@
# frozen_string_literal: true
require 'find'
module Gitlab
module Graphql
module Queries
IMPORT_RE = /^#\s*import "(?<path>[^"]+)"$/m.freeze
EE_ELSE_CE = /^ee_else_ce/.freeze
HOME_RE = /^~/.freeze
HOME_EE = %r{^ee/}.freeze
DOTS_RE = %r{^(\.\./)+}.freeze
DOT_RE = %r{^\./}.freeze
IMPLICIT_ROOT = %r{^app/}.freeze
CONN_DIRECTIVE = /@connection\(key: "\w+"\)/.freeze
class WrappedError
delegate :message, to: :@error
def initialize(error)
@error = error
end
def path
[]
end
end
class FileNotFound
def initialize(file)
@file = file
end
def message
"File not found: #{@file}"
end
def path
[]
end
end
# We need to re-write queries to remove all @client fields. Ideally we
# would do that as a source-to-source transformation of the AST, but doing it using a
# printer is much simpler.
class ClientFieldRedactor < GraphQL::Language::Printer
attr_reader :fields_printed, :skipped_arguments, :printed_arguments, :used_fragments
def initialize(skips = true)
@skips = skips
@fields_printed = 0
@in_operation = false
@skipped_arguments = [].to_set
@printed_arguments = [].to_set
@used_fragments = [].to_set
@skipped_fragments = [].to_set
@used_fragments = [].to_set
end
def print_variable_identifier(variable_identifier)
@printed_arguments << variable_identifier.name
super
end
def print_fragment_spread(fragment_spread, indent: "")
@used_fragments << fragment_spread.name
super
end
def print_operation_definition(op, indent: "")
@in_operation = true
out = +"#{indent}#{op.operation_type}"
out << " #{op.name}" if op.name
# Do these first, so that we detect any skipped arguments
dirs = print_directives(op.directives)
sels = print_selections(op.selections, indent: indent)
# remove variable definitions only used in skipped (client) fields
vars = op.variables.reject do |v|
@skipped_arguments.include?(v.name) && !@printed_arguments.include?(v.name)
end
if vars.any?
out << "(#{vars.map { |v| print_variable_definition(v) }.join(", ")})"
end
out + dirs + sels
ensure
@in_operation = false
end
def print_field(field, indent: '')
if skips? && field.directives.any? { |d| d.name == 'client' }
skipped = self.class.new(false)
skipped.print_node(field)
@skipped_fragments |= skipped.used_fragments
@skipped_arguments |= skipped.printed_arguments
return ''
end
ret = super
@fields_printed += 1 if @in_operation && ret != ''
ret
end
def print_fragment_definition(fragment_def, indent: "")
if skips? && @skipped_fragments.include?(fragment_def.name) && !@used_fragments.include?(fragment_def.name)
return ''
end
super
end
def skips?
@skips
end
end
class Definition
attr_reader :file, :imports
def initialize(path, fragments)
@file = path
@fragments = fragments
@imports = []
@errors = []
@ee_else_ce = []
end
def text(mode: :ce)
qs = [query] + all_imports(mode: mode).uniq.sort.map { |p| fragment(p).query }
t = qs.join("\n\n").gsub(/\n\n+/, "\n\n")
return t unless /@client/.match?(t)
doc = ::GraphQL.parse(t)
printer = ClientFieldRedactor.new
redacted = doc.dup.to_query_string(printer: printer)
return redacted if printer.fields_printed > 0
end
def query
return @query if defined?(@query)
# CONN_DIRECTIVEs are purely client-side constructs
@query = File.read(file).gsub(CONN_DIRECTIVE, '').gsub(IMPORT_RE) do
path = $~[:path]
if EE_ELSE_CE.match?(path)
@ee_else_ce << path.gsub(EE_ELSE_CE, '')
else
@imports << fragment_path(path)
end
''
end
rescue Errno::ENOENT
@errors << FileNotFound.new(file)
@query = nil
end
def all_imports(mode: :ce)
return [] if query.nil?
home = mode == :ee ? @fragments.home_ee : @fragments.home
eithers = @ee_else_ce.map { |p| home + p }
(imports + eithers).flat_map { |p| [p] + @fragments.get(p).all_imports(mode: mode) }
end
def all_errors
return @errors.to_set if query.nil?
paths = imports + @ee_else_ce.flat_map { |p| [@fragments.home + p, @fragments.home_ee + p] }
paths.map { |p| fragment(p).all_errors }.reduce(@errors.to_set) { |a, b| a | b }
end
def validate(schema)
return [:client_query, []] if query.present? && text.nil?
errs = all_errors.presence || schema.validate(text)
if @ee_else_ce.present?
errs += schema.validate(text(mode: :ee))
end
[:validated, errs]
rescue ::GraphQL::ParseError => e
[:validated, [WrappedError.new(e)]]
end
private
def fragment(path)
@fragments.get(path)
end
def fragment_path(import_path)
frag_path = import_path.gsub(HOME_RE, @fragments.home)
frag_path = frag_path.gsub(HOME_EE, @fragments.home_ee + '/')
frag_path = frag_path.gsub(DOT_RE) do
Pathname.new(file).parent.to_s + '/'
end
frag_path = frag_path.gsub(DOTS_RE) do |dots|
rel_dir(dots.split('/').count)
end
frag_path = frag_path.gsub(IMPLICIT_ROOT) do
(Rails.root / 'app').to_s + '/'
end
frag_path
end
def rel_dir(n_steps_up)
path = Pathname.new(file).parent
while n_steps_up > 0
path = path.parent
n_steps_up -= 1
end
path.to_s + '/'
end
end
class Fragments
def initialize(root, dir = 'app/assets/javascripts')
@root = root
@store = {}
@dir = dir
end
def home
@home ||= (@root / @dir).to_s
end
def home_ee
@home_ee ||= (@root / 'ee' / @dir).to_s
end
def get(frag_path)
@store[frag_path] ||= Definition.new(frag_path, self)
end
end
def self.find(root)
definitions = []
::Find.find(root.to_s) do |path|
definitions << Definition.new(path, fragments) if query?(path)
end
definitions
rescue Errno::ENOENT
[] # root does not exist
end
def self.fragments
@fragments ||= Fragments.new(Rails.root)
end
def self.all
['.', 'ee'].flat_map do |prefix|
find(Rails.root / prefix / 'app/assets/javascripts')
end
end
def self.known_failure?(path)
@known_failures ||= YAML.safe_load(File.read(Rails.root.join('config', 'known_invalid_graphql_queries.yml')))
@known_failures.fetch('filenames', []).any? { |known_failure| path.to_s.ends_with?(known_failure) }
end
def self.query?(path)
path.ends_with?('.graphql') &&
!path.ends_with?('.fragment.graphql') &&
!path.ends_with?('typedefs.graphql')
end
end
end
end

View File

@ -4,7 +4,7 @@ module Gitlab
module Jira
# Gitlab JIRA HTTP client to be used with jira-ruby gem, this subclasses JIRA::HTTPClient.
# Uses Gitlab::HTTP to make requests to JIRA REST API.
# The parent class implementation can be found at: https://github.com/sumoheavy/jira-ruby/blob/v1.7.0/lib/jira/http_client.rb
# The parent class implementation can be found at: https://github.com/sumoheavy/jira-ruby/blob/master/lib/jira/http_client.rb
class HttpClient < JIRA::HttpClient
extend ::Gitlab::Utils::Override
@ -43,6 +43,8 @@ module Gitlab
result
end
private
def auth_params
return {} unless @options[:username] && @options[:password]
@ -54,8 +56,6 @@ module Gitlab
}
end
private
def get_cookies
cookie_array = @cookies.values.map { |cookie| "#{cookie.name}=#{cookie.value[0]}" }
cookie_array += Array(@options[:additional_cookies]) if @options.key?(:additional_cookies)

View File

@ -13,12 +13,10 @@ module Gitlab
# This is Rack::Attack::DEFAULT_THROTTLED_RESPONSE, modified to allow a custom response
Rack::Attack.throttled_response = lambda do |env|
# Send the Retry-After header so clients (e.g. python-gitlab) can make good choices about delays
match_data = env['rack.attack.match_data']
now = match_data[:epoch_time]
retry_after = match_data[:period] - (now % match_data[:period])
[429, { 'Content-Type' => 'text/plain', 'Retry-After' => retry_after.to_s }, [Gitlab::Throttle.rate_limiting_response_text]]
throttled_headers = Gitlab::RackAttack.throttled_response_headers(
env['rack.attack.matched'], env['rack.attack.match_data']
)
[429, { 'Content-Type' => 'text/plain' }.merge(throttled_headers), [Gitlab::Throttle.rate_limiting_response_text]]
end
# Configure the throttles
@ -27,6 +25,55 @@ module Gitlab
configure_user_allowlist
end
# Rate Limit HTTP headers are not standardized anywhere. This is the latest
# draft submitted to IETF:
# https://github.com/ietf-wg-httpapi/ratelimit-headers/blob/main/draft-ietf-httpapi-ratelimit-headers.md
#
# This method implement the most viable parts of the headers. Those headers
# will be sent back to the client when it gets throttled.
#
# - RateLimit-Limit: indicates the request quota associated to the client
# in 60 seconds. The time window for the quota here is supposed to be
# mirrored to throttle_*_period_in_seconds application settings. However,
# our HAProxy as well as some ecosystem libraries are using a fixed
# 60-second window. Therefore, the returned limit is approximately rounded
# up to fit into that window.
#
# - RateLimit-Observed: indicates the current request amount associated to
# the client within the time window.
#
# - RateLimit-Remaining: indicates the remaining quota within the time
# window. It is the result of RateLimit-Limit - RateLimit-Remaining
#
# - Retry-After: the remaining duration in seconds until the quota is
# reset. This is a standardized HTTP header:
# https://tools.ietf.org/html/rfc7231#page-69
#
# - RateLimit-Reset: Similar to Retry-After.
#
# - RateLimit-ResetTime: the point of time that the quest quota is reset.
def self.throttled_response_headers(matched, match_data)
# Match data example:
# {:discriminator=>"127.0.0.1", :count=>12, :period=>60 seconds, :limit=>1, :epoch_time=>1609833930}
# Source: https://github.com/rack/rack-attack/blob/v6.3.0/lib/rack/attack/throttle.rb#L33
period = match_data[:period]
limit = match_data[:limit]
rounded_limit = (limit.to_f * 1.minute / match_data[:period]).ceil
observed = match_data[:count]
now = match_data[:epoch_time]
retry_after = period - (now % period)
reset_time = now + (period - now % period)
{
'RateLimit-Name' => matched.to_s,
'RateLimit-Limit' => rounded_limit.to_s,
'RateLimit-Observed' => observed.to_s,
'RateLimit-Remaining' => (limit > observed ? limit - observed : 0).to_s,
'RateLimit-Reset' => retry_after.to_s,
'RateLimit-ResetTime' => Time.at(reset_time).httpdate,
'Retry-After' => retry_after.to_s
}
end
def self.configure_user_allowlist
@user_allowlist = nil
user_allowlist

View File

@ -82,3 +82,5 @@ module Gitlab
end
end
end
Gitlab::Usage::MetricDefinition.prepend_if_ee('EE::Gitlab::Usage::MetricDefinition')

View File

@ -33,6 +33,44 @@ namespace :gitlab do
)
namespace :graphql do
desc 'Gitlab | GraphQL | Validate queries'
task validate: [:environment, :enable_feature_flags] do |t, args|
queries = if args.to_a.present?
args.to_a.flat_map { |path| Gitlab::Graphql::Queries.find(path) }
else
Gitlab::Graphql::Queries.all
end
failed = queries.flat_map do |defn|
summary, errs = defn.validate(GitlabSchema)
case summary
when :client_query
warn("SKIP #{defn.file}: client query")
else
warn("OK #{defn.file}") if errs.empty?
errs.each do |err|
warn(<<~MSG)
ERROR #{defn.file}: #{err.message} (at #{err.path.join('.')})
MSG
end
end
errs.empty? ? [] : [defn.file]
end
if failed.present?
format_output(
"#{failed.count} GraphQL #{'query'.pluralize(failed.count)} out of #{queries.count} failed validation:",
*failed.map do |name|
known_failure = Gitlab::Graphql::Queries.known_failure?(name)
"- #{name}" + (known_failure ? ' (known failure)' : '')
end
)
abort unless failed.all? { |name| Gitlab::Graphql::Queries.known_failure?(name) }
end
end
desc 'GitLab | GraphQL | Generate GraphQL docs'
task compile_docs: [:environment, :enable_feature_flags] do
renderer = Gitlab::Graphql::Docs::Renderer.new(GitlabSchema.graphql_definition, render_options)
@ -78,11 +116,11 @@ def render_options
}
end
def format_output(str)
def format_output(*strs)
heading = '#' * 10
puts heading
puts '#'
puts "# #{str}"
strs.each { |str| puts "# #{str}" }
puts '#'
puts heading
end

View File

@ -7,7 +7,7 @@ const matchExtensions = ['js', 'vue', 'graphql'];
// This will improve glob performance by excluding certain directories.
// The .prettierignore file will also be respected, but after the glob has executed.
const globIgnore = ['**/node_modules/**', 'vendor/**', 'public/**'];
const globIgnore = ['**/node_modules/**', 'vendor/**', 'public/**', 'fixtures/**'];
const readFileAsync = (file, options) =>
new Promise((resolve, reject) => {

View File

@ -608,6 +608,92 @@ describe('secondsToDays', () => {
});
});
describe('nDaysAfter', () => {
const date = new Date('2019-07-16T00:00:00.000Z');
it.each`
numberOfDays | expectedResult
${1} | ${new Date('2019-07-17T00:00:00.000Z').valueOf()}
${90} | ${new Date('2019-10-14T00:00:00.000Z').valueOf()}
${-1} | ${new Date('2019-07-15T00:00:00.000Z').valueOf()}
${0} | ${date.valueOf()}
${0.9} | ${date.valueOf()}
`('returns $numberOfDays day(s) after the provided date', ({ numberOfDays, expectedResult }) => {
expect(datetimeUtility.nDaysAfter(date, numberOfDays)).toBe(expectedResult);
});
});
describe('nDaysBefore', () => {
const date = new Date('2019-07-16T00:00:00.000Z');
it.each`
numberOfDays | expectedResult
${1} | ${new Date('2019-07-15T00:00:00.000Z').valueOf()}
${90} | ${new Date('2019-04-17T00:00:00.000Z').valueOf()}
${-1} | ${new Date('2019-07-17T00:00:00.000Z').valueOf()}
${0} | ${date.valueOf()}
${0.9} | ${new Date('2019-07-15T00:00:00.000Z').valueOf()}
`('returns $numberOfDays day(s) before the provided date', ({ numberOfDays, expectedResult }) => {
expect(datetimeUtility.nDaysBefore(date, numberOfDays)).toBe(expectedResult);
});
});
describe('nMonthsAfter', () => {
// February has 28 days
const feb2019 = new Date('2019-02-15T00:00:00.000Z');
// Except in 2020, it had 29 days
const feb2020 = new Date('2020-02-15T00:00:00.000Z');
// April has 30 days
const apr2020 = new Date('2020-04-15T00:00:00.000Z');
// May has 31 days
const may2020 = new Date('2020-05-15T00:00:00.000Z');
it.each`
date | numberOfMonths | expectedResult
${feb2019} | ${1} | ${new Date('2019-03-15T00:00:00.000Z').valueOf()}
${feb2020} | ${1} | ${new Date('2020-03-15T00:00:00.000Z').valueOf()}
${apr2020} | ${1} | ${new Date('2020-05-15T00:00:00.000Z').valueOf()}
${may2020} | ${1} | ${new Date('2020-06-15T00:00:00.000Z').valueOf()}
${may2020} | ${12} | ${new Date('2021-05-15T00:00:00.000Z').valueOf()}
${may2020} | ${-1} | ${new Date('2020-04-15T00:00:00.000Z').valueOf()}
${may2020} | ${0} | ${may2020.valueOf()}
${may2020} | ${0.9} | ${may2020.valueOf()}
`(
'returns $numberOfMonths month(s) after the provided date',
({ date, numberOfMonths, expectedResult }) => {
expect(datetimeUtility.nMonthsAfter(date, numberOfMonths)).toBe(expectedResult);
},
);
});
describe('nMonthsBefore', () => {
// The previous month (February) has 28 days
const march2019 = new Date('2019-03-15T00:00:00.000Z');
// Except in 2020, it had 29 days
const march2020 = new Date('2020-03-15T00:00:00.000Z');
// The previous month (April) has 30 days
const may2020 = new Date('2020-05-15T00:00:00.000Z');
// The previous month (May) has 31 days
const june2020 = new Date('2020-06-15T00:00:00.000Z');
it.each`
date | numberOfMonths | expectedResult
${march2019} | ${1} | ${new Date('2019-02-15T00:00:00.000Z').valueOf()}
${march2020} | ${1} | ${new Date('2020-02-15T00:00:00.000Z').valueOf()}
${may2020} | ${1} | ${new Date('2020-04-15T00:00:00.000Z').valueOf()}
${june2020} | ${1} | ${new Date('2020-05-15T00:00:00.000Z').valueOf()}
${june2020} | ${12} | ${new Date('2019-06-15T00:00:00.000Z').valueOf()}
${june2020} | ${-1} | ${new Date('2020-07-15T00:00:00.000Z').valueOf()}
${june2020} | ${0} | ${june2020.valueOf()}
${june2020} | ${0.9} | ${new Date('2020-05-15T00:00:00.000Z').valueOf()}
`(
'returns $numberOfMonths month(s) before the provided date',
({ date, numberOfMonths, expectedResult }) => {
expect(datetimeUtility.nMonthsBefore(date, numberOfMonths)).toBe(expectedResult);
},
);
});
describe('approximateDuration', () => {
it.each`
seconds

View File

@ -1,4 +1,3 @@
import waitForPromises from 'helpers/wait_for_promises';
import { setHTMLFixture, resetHTMLFixture } from 'helpers/fixtures';
import initSearch from '~/search_settings';
import mountSearchSettings from '~/pages/projects/edit/mount_search_settings';
@ -7,24 +6,19 @@ jest.mock('~/search_settings');
describe('pages/projects/edit/mount_search_settings', () => {
afterEach(() => {
initSearch.mockReset();
resetHTMLFixture();
});
it('initializes search settings when js-search-settings-app is available', async () => {
setHTMLFixture('<div class="js-search-settings-app"></div>');
mountSearchSettings();
await waitForPromises();
await mountSearchSettings();
expect(initSearch).toHaveBeenCalled();
});
it('does not initialize search settings when js-search-settings-app is unavailable', async () => {
mountSearchSettings();
await waitForPromises();
await mountSearchSettings();
expect(initSearch).not.toHaveBeenCalled();
});

View File

@ -0,0 +1,343 @@
# frozen_string_literal: true
require 'fast_spec_helper'
require "test_prof/recipes/rspec/let_it_be"
RSpec.describe Gitlab::Graphql::Queries do
shared_examples 'a valid GraphQL query for the blog schema' do
it 'is valid' do
expect(subject.validate(schema).second).to be_empty
end
end
shared_examples 'an invalid GraphQL query for the blog schema' do
it 'is invalid' do
expect(subject.validate(schema).second).to match errors
end
end
# Toy schema to validate queries against
let_it_be(:schema) do
author = Class.new(GraphQL::Schema::Object) do
graphql_name 'Author'
field :name, GraphQL::STRING_TYPE, null: true
field :handle, GraphQL::STRING_TYPE, null: false
field :verified, GraphQL::BOOLEAN_TYPE, null: false
end
post = Class.new(GraphQL::Schema::Object) do
graphql_name 'Post'
field :name, GraphQL::STRING_TYPE, null: false
field :title, GraphQL::STRING_TYPE, null: false
field :content, GraphQL::STRING_TYPE, null: true
field :author, author, null: false
end
author.field :posts, [post], null: false do
argument :blog_title, GraphQL::STRING_TYPE, required: false
end
blog = Class.new(GraphQL::Schema::Object) do
graphql_name 'Blog'
field :title, GraphQL::STRING_TYPE, null: false
field :description, GraphQL::STRING_TYPE, null: false
field :main_author, author, null: false
field :posts, [post], null: false
field :post, post, null: true do
argument :slug, GraphQL::STRING_TYPE, required: true
end
end
Class.new(GraphQL::Schema) do
query(Class.new(GraphQL::Schema::Object) do
graphql_name 'Query'
field :blog, blog, null: true do
argument :title, GraphQL::STRING_TYPE, required: true
end
field :post, post, null: true do
argument :slug, GraphQL::STRING_TYPE, required: true
end
end)
end
end
let(:root) do
Rails.root / 'fixtures/lib/gitlab/graphql/queries'
end
describe Gitlab::Graphql::Queries::Fragments do
subject { described_class.new(root) }
it 'has the right home' do
expect(subject.home).to eq (root / 'app/assets/javascripts').to_s
end
it 'has the right EE home' do
expect(subject.home_ee).to eq (root / 'ee/app/assets/javascripts').to_s
end
it 'caches query definitions' do
fragment = subject.get('foo')
expect(fragment).to be_a(::Gitlab::Graphql::Queries::Definition)
expect(subject.get('foo')).to be fragment
end
end
describe '.all' do
it 'is the combination of finding queries in CE and EE' do
expect(described_class)
.to receive(:find).with(Rails.root / 'app/assets/javascripts').and_return([:ce])
expect(described_class)
.to receive(:find).with(Rails.root / 'ee/app/assets/javascripts').and_return([:ee])
expect(described_class.all).to eq([:ce, :ee])
end
end
describe '.find' do
def definition_of(path)
be_a(::Gitlab::Graphql::Queries::Definition)
.and(have_attributes(file: path.to_s))
end
it 'find a single specific file' do
path = root / 'post_by_slug.graphql'
expect(described_class.find(path)).to contain_exactly(definition_of(path))
end
it 'ignores files that do not exist' do
path = root / 'not_there.graphql'
expect(described_class.find(path)).to be_empty
end
it 'ignores fragments' do
path = root / 'author.fragment.graphql'
expect(described_class.find(path)).to be_empty
end
it 'ignores typedefs' do
path = root / 'typedefs.graphql'
expect(described_class.find(path)).to be_empty
end
it 'finds all query definitions under a root directory' do
found = described_class.find(root)
expect(found).to include(
definition_of(root / 'post_by_slug.graphql'),
definition_of(root / 'post_by_slug.with_import.graphql'),
definition_of(root / 'post_by_slug.with_import.misspelled.graphql'),
definition_of(root / 'duplicate_imports.graphql'),
definition_of(root / 'deeply/nested/query.graphql')
)
expect(found).not_to include(
definition_of(root / 'typedefs.graphql'),
definition_of(root / 'author.fragment.graphql')
)
end
end
describe Gitlab::Graphql::Queries::Definition do
let(:fragments) { Gitlab::Graphql::Queries::Fragments.new(root, '.') }
subject { described_class.new(root / path, fragments) }
context 'a simple query' do
let(:path) { 'post_by_slug.graphql' }
it_behaves_like 'a valid GraphQL query for the blog schema'
end
context 'a query with an import' do
let(:path) { 'post_by_slug.with_import.graphql' }
it_behaves_like 'a valid GraphQL query for the blog schema'
end
context 'a query with duplicate imports' do
let(:path) { 'duplicate_imports.graphql' }
it_behaves_like 'a valid GraphQL query for the blog schema'
end
context 'a query importing from ee_else_ce' do
let(:path) { 'ee_else_ce.import.graphql' }
it_behaves_like 'a valid GraphQL query for the blog schema'
it 'can resolve the ee fields' do
expect(subject.text(mode: :ce)).not_to include('verified')
expect(subject.text(mode: :ee)).to include('verified')
end
end
context 'a query refering to parent directories' do
let(:path) { 'deeply/nested/query.graphql' }
it_behaves_like 'a valid GraphQL query for the blog schema'
end
context 'a query refering to parent directories, incorrectly' do
let(:path) { 'deeply/nested/bad_import.graphql' }
it_behaves_like 'an invalid GraphQL query for the blog schema' do
let(:errors) do
contain_exactly(
be_a(::Gitlab::Graphql::Queries::FileNotFound)
.and(have_attributes(message: include('deeply/author.fragment.graphql')))
)
end
end
end
context 'a query with a broken import' do
let(:path) { 'post_by_slug.with_import.misspelled.graphql' }
it_behaves_like 'an invalid GraphQL query for the blog schema' do
let(:errors) do
contain_exactly(
be_a(::Gitlab::Graphql::Queries::FileNotFound)
.and(have_attributes(message: include('auther.fragment.graphql')))
)
end
end
end
context 'a query which imports a file with a broken import' do
let(:path) { 'transitive_bad_import.graphql' }
it_behaves_like 'an invalid GraphQL query for the blog schema' do
let(:errors) do
contain_exactly(
be_a(::Gitlab::Graphql::Queries::FileNotFound)
.and(have_attributes(message: include('does-not-exist.graphql')))
)
end
end
end
context 'a query containing a client directive' do
let(:path) { 'client.query.graphql' }
it_behaves_like 'a valid GraphQL query for the blog schema'
it 'is tagged as a client query' do
expect(subject.validate(schema).first).to eq :client_query
end
end
context 'a mixed client query, valid' do
let(:path) { 'mixed_client.query.graphql' }
it_behaves_like 'a valid GraphQL query for the blog schema'
it 'is not tagged as a client query' do
expect(subject.validate(schema).first).not_to eq :client_query
end
end
context 'a mixed client query, with skipped argument' do
let(:path) { 'mixed_client_skipped_argument.graphql' }
it_behaves_like 'a valid GraphQL query for the blog schema'
end
context 'a mixed client query, with unused fragment' do
let(:path) { 'mixed_client_unused_fragment.graphql' }
it_behaves_like 'a valid GraphQL query for the blog schema'
end
context 'a client query, with unused fragment' do
let(:path) { 'client_unused_fragment.graphql' }
it_behaves_like 'a valid GraphQL query for the blog schema'
it 'is tagged as a client query' do
expect(subject.validate(schema).first).to eq :client_query
end
end
context 'a mixed client query, invalid' do
let(:path) { 'mixed_client_invalid.query.graphql' }
it_behaves_like 'an invalid GraphQL query for the blog schema' do
let(:errors) do
contain_exactly(have_attributes(message: include('titlz')))
end
end
end
context 'a query containing a connection directive' do
let(:path) { 'connection.query.graphql' }
it_behaves_like 'a valid GraphQL query for the blog schema'
end
context 'a query which mentions an incorrect field' do
let(:path) { 'wrong_field.graphql' }
it_behaves_like 'an invalid GraphQL query for the blog schema' do
let(:errors) do
contain_exactly(
have_attributes(message: /'createdAt' doesn't exist/),
have_attributes(message: /'categories' doesn't exist/)
)
end
end
end
context 'a query which has a missing argument' do
let(:path) { 'missing_argument.graphql' }
it_behaves_like 'an invalid GraphQL query for the blog schema' do
let(:errors) do
contain_exactly(
have_attributes(message: include('blog'))
)
end
end
end
context 'a query which has a bad argument' do
let(:path) { 'bad_argument.graphql' }
it_behaves_like 'an invalid GraphQL query for the blog schema' do
let(:errors) do
contain_exactly(
have_attributes(message: include('Nullability mismatch on variable $bad'))
)
end
end
end
context 'a query which has a syntax error' do
let(:path) { 'syntax-error.graphql' }
it_behaves_like 'an invalid GraphQL query for the blog schema' do
let(:errors) do
contain_exactly(
have_attributes(message: include('Parse error'))
)
end
end
end
context 'a query which has an unused import' do
let(:path) { 'unused_import.graphql' }
it_behaves_like 'an invalid GraphQL query for the blog schema' do
let(:errors) do
contain_exactly(
have_attributes(message: include('AuthorF was defined, but not used'))
)
end
end
end
end
end

Some files were not shown because too many files have changed in this diff Show More