Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2020-01-20 18:08:44 +00:00
parent 364f6f2e33
commit e0ab7eda1b
44 changed files with 654 additions and 324 deletions

View file

@ -190,6 +190,7 @@
min-width: 0;
border: 1px solid $border-color;
background-color: $white-light;
border-radius: $border-radius-default 0 0 $border-radius-default;
@include media-breakpoint-down(sm) {
flex: 1 1 auto;
@ -287,7 +288,7 @@
.filtered-search-history-dropdown-toggle-button {
flex: 1;
width: auto;
border-radius: 0;
border-radius: $border-radius-default 0 0 $border-radius-default;
border: 0;
border-right: 1px solid $border-color;
color: $gl-text-color-secondary;
@ -296,8 +297,7 @@
&:hover,
&:focus {
color: $gl-text-color;
border-color: $blue-300;
outline: none;
border-color: $border-color;
}
svg {

View file

@ -73,7 +73,9 @@ module ErrorTracking
end
def sentry_client
Sentry::Client.new(api_url, token)
strong_memoize(:sentry_client) do
Sentry::Client.new(api_url, token)
end
end
def sentry_external_url

View file

@ -6,9 +6,15 @@ class SentryIssue < ApplicationRecord
validates :issue, uniqueness: true, presence: true
validates :sentry_issue_identifier, presence: true
after_create_commit :enqueue_sentry_sync_job
def self.for_project_and_identifier(project, identifier)
joins(:issue)
.where(issues: { project_id: project.id })
.find_by_sentry_issue_identifier(identifier)
end
def enqueue_sentry_sync_job
ErrorTrackingIssueLinkWorker.perform_async(issue.id)
end
end

View file

@ -47,7 +47,7 @@
.filtered-search-box
= dropdown_tag(_('Recent searches'),
options: { wrapper_class: 'filtered-search-history-dropdown-wrapper',
toggle_class: 'filtered-search-history-dropdown-toggle-button',
toggle_class: 'btn filtered-search-history-dropdown-toggle-button',
dropdown_class: 'filtered-search-history-dropdown',
content_class: 'filtered-search-history-dropdown-content' }) do
.js-filtered-search-history-dropdown{ data: { full_path: admin_runners_path } }

View file

@ -21,7 +21,7 @@
- if type != :boards_modal && type != :boards
= dropdown_tag(_('Recent searches'),
options: { wrapper_class: "filtered-search-history-dropdown-wrapper",
toggle_class: "filtered-search-history-dropdown-toggle-button",
toggle_class: "btn filtered-search-history-dropdown-toggle-button",
dropdown_class: "filtered-search-history-dropdown",
content_class: "filtered-search-history-dropdown-content" }) do
.js-filtered-search-history-dropdown{ data: { full_path: search_history_storage_prefix } }

View file

@ -143,6 +143,7 @@
- delete_user
- email_receiver
- emails_on_push
- error_tracking_issue_link
- expire_build_instance_artifacts
- git_garbage_collect
- gitlab_shell

View file

@ -0,0 +1,85 @@
# frozen_string_literal: true
# Creates a link in Sentry between a Sentry issue and a GitLab issue.
# If the link already exists, no changes will occur.
# If a link to a different GitLab issue exists, a new link
# will still be created, but will not be visible in Sentry
# until the prior link is deleted.
class ErrorTrackingIssueLinkWorker
include ApplicationWorker
include ExclusiveLeaseGuard
include Gitlab::Utils::StrongMemoize
feature_category :error_tracking
worker_has_external_dependencies!
LEASE_TIMEOUT = 15.minutes
attr_reader :issue
def perform(issue_id)
@issue = Issue.find_by_id(issue_id)
return unless issue && error_tracking && sentry_issue_id
try_obtain_lease do
logger.info("Linking Sentry issue #{sentry_issue_id} to GitLab issue #{issue.id}")
if integration_id.nil?
logger.info("Sentry integration unavailable for #{error_tracking.api_url}")
break
end
sentry_client.create_issue_link(integration_id, sentry_issue_id, issue)
rescue Sentry::Client::Error
logger.info("Failed to link Sentry issue #{sentry_issue_id} to GitLab issue #{issue.id}")
end
end
private
def error_tracking
strong_memoize(:error_tracking) do
issue.project.error_tracking_setting
end
end
def sentry_issue_id
strong_memoize(:sentry_issue_id) do
issue.sentry_issue.sentry_issue_identifier
end
end
def sentry_client
error_tracking.sentry_client
end
def integration_id
strong_memoize(:integration_id) do
repo&.integration_id
end
end
def repo
sentry_client
.repos(organization_slug)
.find { |repo| repo.project_id == issue.project_id && repo.status == 'active' }
end
def organization_slug
error_tracking.organization_slug
end
def project_url
::Gitlab::Routing.url_helpers.project_url(issue.project)
end
def lease_key
"link_sentry_issue_#{sentry_issue_id}_gitlab_#{issue.id}"
end
def lease_timeout
LEASE_TIMEOUT
end
end

View file

@ -0,0 +1,5 @@
---
title: Add border radius and remove blue outline on recent searches filter
merge_request: 23266
author:
type: fixed

View file

@ -0,0 +1,5 @@
---
title: Add cilium to the managed cluster apps template
merge_request: 22557
author:
type: added

View file

@ -0,0 +1,5 @@
---
title: Rename Custom hooks to Server hooks
merge_request: 23064
author:
type: changed

View file

@ -0,0 +1,5 @@
---
title: Sync GitLab issue back to Sentry when created in GitLab
merge_request: 23007
author:
type: added

View file

@ -101,6 +101,7 @@
- [group_export, 1]
- [self_monitoring_project_create, 2]
- [self_monitoring_project_delete, 2]
- [error_tracking_issue_link, 2]
# EE-specific queues
- [analytics, 1]

View file

@ -1,126 +1,5 @@
# Custom server-side Git hooks **(CORE ONLY)**
---
redirect_to: 'server_hooks.md'
---
NOTE: **Note:**
Custom Git hooks must be configured on the filesystem of the GitLab
server. Only GitLab server administrators will be able to complete these tasks.
Please explore [webhooks] and [CI] as an option if you do not
have filesystem access. For a user configurable Git hook interface, see
[Push Rules](../push_rules/push_rules.md),
available in GitLab Enterprise Edition.
NOTE: **Note:**
Custom Git hooks won't be replicated to secondary nodes if you use [GitLab Geo](geo/replication/index.md)
Git natively supports hooks that are executed on different actions.
Examples of server-side Git hooks include pre-receive, post-receive, and update.
See [Git SCM Server-Side Hooks][hooks] for more information about each hook type.
As of GitLab Shell version 2.2.0 (which requires GitLab 7.5+), GitLab
administrators can add custom Git hooks to any GitLab project.
## Create a custom Git hook for a repository
Server-side Git hooks are typically placed in the repository's `hooks`
subdirectory. In GitLab, hook directories are symlinked to the GitLab Shell
`hooks` directory for ease of maintenance between GitLab Shell upgrades.
Custom hooks are implemented differently, but the behavior is exactly the same
once the hook is created. Follow the steps below to set up a custom hook for a
repository:
1. Pick a project that needs a custom Git hook.
1. On the GitLab server, navigate to the project's repository directory.
For an installation from source the path is usually
`/home/git/repositories/<group>/<project>.git`. For Omnibus installs the path is
usually `/var/opt/gitlab/git-data/repositories/<group>/<project>.git`.
1. Create a new directory in this location called `custom_hooks`.
1. Inside the new `custom_hooks` directory, create a file with a name matching
the hook type. For a pre-receive hook the file name should be `pre-receive`
with no extension.
1. Make the hook file executable and make sure it's owned by Git.
1. Write the code to make the Git hook function as expected. Hooks can be
in any language. Ensure the 'shebang' at the top properly reflects the language
type. For example, if the script is in Ruby the shebang will probably be
`#!/usr/bin/env ruby`.
That's it! Assuming the hook code is properly implemented the hook will fire
as appropriate.
## Set a global Git hook for all repositories
To create a Git hook that applies to all of your repositories in
your instance, set a global Git hook. Since GitLab will look inside the GitLab Shell
`hooks` directory for global hooks, adding any hook there will apply it to all repositories.
Follow the steps below to properly set up a custom hook for all repositories:
1. On the GitLab server, navigate to the configured custom hook directory. The
default is in the GitLab Shell directory. The GitLab Shell `hook` directory
for an installation from source the path is usually
`/home/git/gitlab-shell/hooks`. For Omnibus installs the path is usually
`/opt/gitlab/embedded/service/gitlab-shell/hooks`.
To look in a different directory for the global custom hooks,
set `custom_hooks_dir` in the GitLab Shell config. For
Omnibus installations, this can be set in `gitlab.rb`; and in source
installations, this can be set in `gitlab-shell/config.yml`.
1. Create a new directory in this location. Depending on your hook, it will be
either a `pre-receive.d`, `post-receive.d`, or `update.d` directory.
1. Inside this new directory, add your hook. Hooks can be
in any language. Ensure the 'shebang' at the top properly reflects the language
type. For example, if the script is in Ruby the shebang will probably be
`#!/usr/bin/env ruby`.
1. Make the hook file executable and make sure it's owned by Git.
Now test the hook to see that it's functioning properly.
## Chained hooks support
> [Introduced][93] in GitLab Shell 4.1.0 and GitLab 8.15.
Hooks can be also global or be set per project directories and support a chained
execution of the hooks.
NOTE: **Note:**
`<hook_name>.d` would need to be either `pre-receive.d`,
`post-receive.d`, or `update.d` to work properly. Any other names will be ignored.
NOTE: **Note:**
Files in `.d` directories need to be executable and not match the backup file
pattern (`*~`).
The hooks are searched and executed in this order:
1. `gitlab-shell/hooks` directory as known to Gitaly
1. `<project>.git/hooks/<hook_name>` - executed by `git` itself, this is symlinked to `gitlab-shell/hooks/<hook_name>`
1. `<project>.git/custom_hooks/<hook_name>` - per project hook (this is already existing behavior)
1. `<project>.git/custom_hooks/<hook_name>.d/*` - per project hooks
1. `<project>.git/hooks/<hook_name>.d/*` OR `<custom_hooks_dir>/<hook_name.d>/*` - global hooks: all executable files (minus editor backup files)
The hooks of the same type are executed in order and execution stops on the
first script exiting with a non-zero value.
## Custom error messages
> [Introduced][5073] in GitLab 8.10.
To have custom error messages appear in GitLab's UI when the commit is
declined or an error occurs during the Git hook, your script should:
- Send the custom error messages to either the script's `stdout` or `stderr`.
- Prefix each message with `GL-HOOK-ERR:` with no characters appearing before the prefix.
### Example custom error message
This hook script written in bash will generate the following message in GitLab's UI:
```bash
#!/bin/sh
echo "GL-HOOK-ERR: My custom error message.";
exit 1
```
![Custom message from custom Git hook](img/custom_hooks_error_msg.png)
[CI]: ../ci/README.md
[hooks]: https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks#Server-Side-Hooks
[webhooks]: ../user/project/integrations/webhooks.md
[5073]: https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/5073
[93]: https://gitlab.com/gitlab-org/gitlab-shell/merge_requests/93
This document was moved to [another location](server_hooks.md).

View file

@ -352,9 +352,9 @@ coming in. One sure way to trigger a Gitaly request is to clone a repository
from your GitLab server over HTTP.
DANGER: **Danger:**
If you have [custom server-side Git hooks](../custom_hooks.md) configured,
If you have [Server hooks](../server_hooks.md) configured,
either per repository or globally, you must move these to the Gitaly node.
If you have multiple Gitaly nodes, copy your custom hook(s) to all nodes.
If you have multiple Gitaly nodes, copy your server hook(s) to all nodes.
### Disabling the Gitaly service in a cluster environment

View file

@ -47,8 +47,8 @@ complexity.
- Redis - Key/Value store (User sessions, cache, queue for Sidekiq)
- Sentinel - Redis health check/failover manager
- Gitaly - Provides high-level storage and RPC access to Git repositories
- S3 Object Storage service[^3] and / or NFS storage servers[^4] for entities such as Uploads, Artifacts, LFS Objects, etc...
- Load Balancer[^2] - Main entry point and handles load balancing for the GitLab application nodes.
- S3 Object Storage service[^4] and / or NFS storage servers[^5] for entities such as Uploads, Artifacts, LFS Objects, etc...
- Load Balancer[^6] - Main entry point and handles load balancing for the GitLab application nodes.
- Monitor - Prometheus and Grafana monitoring with auto discovery.
## Scalable Architecture Examples
@ -72,9 +72,9 @@ larger one.
- 1 PostgreSQL node
- 1 Redis node
- 1 Gitaly node
- 1 or more Object Storage services[^3] and / or NFS storage server[^4]
- 1 or more Object Storage services[^4] and / or NFS storage server[^5]
- 2 or more GitLab application nodes (Unicorn / Puma, Workhorse, Sidekiq)
- 1 or more Load Balancer nodes[^2]
- 1 or more Load Balancer nodes[^6]
- 1 Monitoring node (Prometheus, Grafana)
#### Installation Instructions
@ -83,13 +83,13 @@ Complete the following installation steps in order. A link at the end of each
section will bring you back to the Scalable Architecture Examples section so
you can continue with the next step.
1. [Load Balancer(s)](load_balancer.md)[^2]
1. [Load Balancer(s)](load_balancer.md)[^6]
1. [Consul](consul.md)
1. [PostgreSQL](database.md#postgresql-in-a-scaled-environment) with [PgBouncer](https://docs.gitlab.com/ee/administration/high_availability/pgbouncer.html)
1. [PostgreSQL](database.md#postgresql-in-a-scaled-environment) with [PgBouncer](pgbouncer.md)
1. [Redis](redis.md#redis-in-a-scaled-environment)
1. [Gitaly](gitaly.md) (recommended) and / or [NFS](nfs.md)[^4]
1. [Gitaly](gitaly.md) (recommended) and / or [NFS](nfs.md)[^5]
1. [GitLab application nodes](gitlab.md)
- With [Object Storage service enabled](../gitaly/index.md#eliminating-nfs-altogether)[^3]
- With [Object Storage service enabled](../gitaly/index.md#eliminating-nfs-altogether)[^4]
1. [Monitoring node (Prometheus and Grafana)](monitoring_node.md)
### Full Scaling
@ -103,10 +103,10 @@ in size, indicating that there is contention or there are not enough resources.
- 1 or more PostgreSQL nodes
- 1 or more Redis nodes
- 1 or more Gitaly storage servers
- 1 or more Object Storage services[^3] and / or NFS storage server[^4]
- 1 or more Object Storage services[^4] and / or NFS storage server[^5]
- 2 or more Sidekiq nodes
- 2 or more GitLab application nodes (Unicorn / Puma, Workhorse, Sidekiq)
- 1 or more Load Balancer nodes[^2]
- 1 or more Load Balancer nodes[^6]
- 1 Monitoring node (Prometheus, Grafana)
## High Availability Architecture Examples
@ -117,17 +117,17 @@ page mentions, there is a tradeoff between cost/complexity and uptime. Be sure
this complexity is absolutely required before taking the step into full
high availability.
For all examples below, we recommend running Consul and Redis Sentinel on
dedicated nodes. If Consul is running on PostgreSQL nodes or Sentinel on
For all examples below, we recommend running Consul and Redis Sentinel separately
from the services they monitor. If Consul is running on PostgreSQL nodes or Sentinel on
Redis nodes, there is a potential that high resource usage by PostgreSQL or
Redis could prevent communication between the other Consul and Sentinel nodes.
This may lead to the other nodes believing a failure has occurred and initiating
automated failover. Isolating Redis and Consul from the services they monitor
automated failover. Isolating Consul and Redis Sentinel from the services they monitor
reduces the chances of a false positive that a failure has occurred.
The examples below do not address high availability of NFS for objects. We recommend a
S3 Object Storage service[^3] is used where possible over NFS but it's still required in
certain cases[^4]. Where NFS is to be used some enterprises have access to NFS appliances
S3 Object Storage service[^4] is used where possible over NFS but it's still required in
certain cases[^5]. Where NFS is to be used some enterprises have access to NFS appliances
that manage availability and this would be best case scenario.
There are many options in between each of these examples. Work with GitLab Support
@ -147,12 +147,12 @@ moving to a hybrid or fully distributed architecture depending on what is causin
the contention.
- 3 PostgreSQL nodes
- 2 Redis nodes
- 3 Consul/Sentinel nodes
- 3 Redis nodes
- 3 Consul / Sentinel nodes
- 2 or more GitLab application nodes (Unicorn / Puma, Workhorse, Sidekiq)
- 1 Gitaly storage servers
- 1 Object Storage service[^3] and / or NFS storage server[^4]
- 1 or more Load Balancer nodes[^2]
- 1 Object Storage service[^4] and / or NFS storage server[^5]
- 1 or more Load Balancer nodes[^6]
- 1 Monitoring node (Prometheus, Grafana)
![Horizontal architecture diagram](img/horizontal.png)
@ -166,13 +166,13 @@ contention due to certain workloads.
- 3 PostgreSQL nodes
- 1 PgBouncer node
- 2 Redis nodes
- 3 Consul/Sentinel nodes
- 3 Redis nodes
- 3 Consul / Sentinel nodes
- 2 or more Sidekiq nodes
- 2 or more GitLab application nodes (Unicorn / Puma, Workhorse, Sidekiq)
- 1 Gitaly storage servers
- 1 Object Storage service[^3] and / or NFS storage server[^4]
- 1 or more Load Balancer nodes[^2]
- 1 Object Storage service[^4] and / or NFS storage server[^5]
- 1 or more Load Balancer nodes[^6]
- 1 Monitoring node (Prometheus, Grafana)
![Hybrid architecture diagram](img/hybrid.png)
@ -194,8 +194,8 @@ with the added complexity of many more nodes to configure, manage, and monitor.
- 2 or more API nodes (All requests to `/api`)
- 2 or more Web nodes (All other web requests)
- 2 or more Gitaly storage servers
- 1 or more Object Storage services[^3] and / or NFS storage servers[^4]
- 1 or more Load Balancer nodes[^2]
- 1 or more Object Storage services[^4] and / or NFS storage servers[^5]
- 1 or more Load Balancer nodes[^6]
- 1 Monitoring node (Prometheus, Grafana)
![Fully Distributed architecture diagram](img/fully-distributed.png)
@ -216,9 +216,12 @@ per 1000 users:
- Web: 2 RPS
- Git: 2 RPS
Note that your exact needs may be more, depending on your workload. Your
workload is influenced by factors such as - but not limited to - how active your
users are, how much automation you use, mirroring, and repo/change size.
NOTE: **Note:** Note that depending on your workflow the below recommended
reference architectures may need to be adapted accordingly. Your workload
is influenced by factors such as - but not limited to - how active your users are,
how much automation you use, mirroring, and repo/change size. Additionally the
shown memory values are given directly by [GCP machine types](https://cloud.google.com/compute/docs/machine-types).
On different cloud vendors a best effort like for like can be used.
### 2,000 User Configuration
@ -229,22 +232,18 @@ users are, how much automation you use, mirroring, and repo/change size.
| Service | Nodes | Configuration | GCP type |
| ----------------------------|-------|-----------------------|---------------|
| GitLab Rails <br> - Puma workers on each node set to 90% of available CPUs with 8 threads | 3 | 8 vCPU, 7.2GB Memory | n1-highcpu-8 |
| GitLab Rails[^1] | 3 | 8 vCPU, 7.2GB Memory | n1-highcpu-8 |
| PostgreSQL | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Gitaly <br> - Gitaly Ruby workers on each node set to 20% of available CPUs | X[^1] . | 4 vCPU, 15GB Memory | n1-standard-4 |
| Redis Cache + Sentinel <br> - Cache maxmemory set to 90% of available memory | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| Redis Persistent + Sentinel | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| Gitaly[^2] [^7] | X | 4 vCPU, 15GB Memory | n1-standard-4 |
| Redis[^3] | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| Consul + Sentinel[^3] | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Sidekiq | 4 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| NFS Server[^4] . | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
| S3 Object Storage[^3] . | - | - | - |
| S3 Object Storage[^4] | - | - | - |
| NFS Server[^5] [^7] | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| External load balancing node[^2] . | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Internal load balancing node[^2] . | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
NOTE: **Note:** Memory values are given directly by GCP machine sizes. On different cloud
vendors a best effort like for like can be used.
| External load balancing node[^6] | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Internal load balancing node[^6] | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
### 5,000 User Configuration
@ -255,22 +254,18 @@ vendors a best effort like for like can be used.
| Service | Nodes | Configuration | GCP type |
| ----------------------------|-------|-----------------------|---------------|
| GitLab Rails <br> - Puma workers on each node set to 90% of available CPUs with 16 threads | 3 | 16 vCPU, 14.4GB Memory | n1-highcpu-16 |
| GitLab Rails[^1] | 3 | 16 vCPU, 14.4GB Memory | n1-highcpu-16 |
| PostgreSQL | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Gitaly <br> - Gitaly Ruby workers on each node set to 20% of available CPUs | X[^1] . | 8 vCPU, 30GB Memory | n1-standard-8 |
| Redis Cache + Sentinel <br> - Cache maxmemory set to 90% of available memory | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| Redis Persistent + Sentinel | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| Gitaly[^2] [^7] | X | 8 vCPU, 30GB Memory | n1-standard-8 |
| Redis[^3] | 3 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| Consul + Sentinel[^3] | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Sidekiq | 4 | 2 vCPU, 7.5GB Memory | n1-standard-2 |
| Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| NFS Server[^4] . | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
| S3 Object Storage[^3] . | - | - | - |
| S3 Object Storage[^4] | - | - | - |
| NFS Server[^5] [^7] | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
| Monitoring node | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| External load balancing node[^2] . | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Internal load balancing node[^2] . | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
NOTE: **Note:** Memory values are given directly by GCP machine sizes. On different cloud
vendors a best effort like for like can be used.
| External load balancing node[^6] | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Internal load balancing node[^6] | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
### 10,000 User Configuration
@ -281,22 +276,21 @@ vendors a best effort like for like can be used.
| Service | Nodes | Configuration | GCP type |
| ----------------------------|-------|-----------------------|---------------|
| GitLab Rails <br> - Puma workers on each node set to 90% of available CPUs with 16 threads | 3 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 |
| GitLab Rails[^1] | 3 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 |
| PostgreSQL | 3 | 4 vCPU, 15GB Memory | n1-standard-4 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Gitaly <br> - Gitaly Ruby workers on each node set to 20% of available CPUs | X[^1] . | 16 vCPU, 60GB Memory | n1-standard-16 |
| Redis Cache + Sentinel <br> - Cache maxmemory set to 90% of available memory | 3 | 4 vCPU, 15GB Memory | n1-standard-4 |
| Redis Persistent + Sentinel | 3 | 4 vCPU, 15GB Memory | n1-standard-4 |
| Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 |
| Gitaly[^2] [^7] | X | 16 vCPU, 60GB Memory | n1-standard-16 |
| Redis[^3] - Cache | 3 | 4 vCPU, 15GB Memory | n1-standard-4 |
| Redis[^3] - Queues / Shared State | 3 | 4 vCPU, 15GB Memory | n1-standard-4 |
| Redis Sentinel[^3] - Cache | 3 | 1 vCPU, 1.7GB Memory | g1-small |
| Redis Sentinel[^3] - Queues / Shared State | 3 | 1 vCPU, 1.7GB Memory | g1-small |
| Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| NFS Server[^4] . | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
| S3 Object Storage[^3] . | - | - | - |
| Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 |
| S3 Object Storage[^4] | - | - | - |
| NFS Server[^5] [^7] | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
| Monitoring node | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
| External load balancing node[^2] . | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Internal load balancing node[^2] . | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
NOTE: **Note:** Memory values are given directly by GCP machine sizes. On different cloud
vendors a best effort like for like can be used.
| External load balancing node[^6] | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Internal load balancing node[^6] | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
### 25,000 User Configuration
@ -307,22 +301,21 @@ vendors a best effort like for like can be used.
| Service | Nodes | Configuration | GCP type |
| ----------------------------|-------|-----------------------|---------------|
| GitLab Rails <br> - Puma workers on each node set to 90% of available CPUs with 16 threads | 7 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 |
| GitLab Rails[^1] | 7 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 |
| PostgreSQL | 3 | 8 vCPU, 30GB Memory | n1-standard-8 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Gitaly <br> - Gitaly Ruby workers on each node set to 20% of available CPUs | X[^1] . | 32 vCPU, 120GB Memory | n1-standard-32 |
| Redis Cache + Sentinel <br> - Cache maxmemory set to 90% of available memory | 3 | 4 vCPU, 15GB Memory | n1-standard-4 |
| Redis Persistent + Sentinel | 3 | 4 vCPU, 15GB Memory | n1-standard-4 |
| Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 |
| Gitaly[^2] [^7] | X | 32 vCPU, 120GB Memory | n1-standard-32 |
| Redis[^3] - Cache | 3 | 4 vCPU, 15GB Memory | n1-standard-4 |
| Redis[^3] - Queues / Shared State | 3 | 4 vCPU, 15GB Memory | n1-standard-4 |
| Redis Sentinel[^3] - Cache | 3 | 1 vCPU, 1.7GB Memory | g1-small |
| Redis Sentinel[^3] - Queues / Shared State | 3 | 1 vCPU, 1.7GB Memory | g1-small |
| Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| NFS Server[^4] . | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
| S3 Object Storage[^3] . | - | - | - |
| Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 |
| S3 Object Storage[^4] | - | - | - |
| NFS Server[^5] [^7] | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
| Monitoring node | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
| External load balancing node[^2] . | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Internal load balancing node[^2] . | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
NOTE: **Note:** Memory values are given directly by GCP machine sizes. On different cloud
vendors a best effort like for like can be used.
| External load balancing node[^6] | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Internal load balancing node[^6] | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
### 50,000 User Configuration
@ -333,35 +326,42 @@ vendors a best effort like for like can be used.
| Service | Nodes | Configuration | GCP type |
| ----------------------------|-------|-----------------------|---------------|
| GitLab Rails <br> - Puma workers on each node set to 90% of available CPUs with 16 threads | 15 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 |
| GitLab Rails[^1] | 15 | 32 vCPU, 28.8GB Memory | n1-highcpu-32 |
| PostgreSQL | 3 | 8 vCPU, 30GB Memory | n1-standard-8 |
| PgBouncer | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Gitaly <br> - Gitaly Ruby workers on each node set to 20% of available CPUs | X[^1] . | 64 vCPU, 240GB Memory | n1-standard-64 |
| Redis Cache + Sentinel <br> - Cache maxmemory set to 90% of available memory | 3 | 4 vCPU, 15GB Memory | n1-standard-4 |
| Redis Persistent + Sentinel | 3 | 4 vCPU, 15GB Memory | n1-standard-4 |
| Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 |
| Gitaly[^2] [^7] | X | 64 vCPU, 240GB Memory | n1-standard-64 |
| Redis[^3] - Cache | 3 | 4 vCPU, 15GB Memory | n1-standard-4 |
| Redis[^3] - Queues / Shared State | 3 | 4 vCPU, 15GB Memory | n1-standard-4 |
| Redis Sentinel[^3] - Cache | 3 | 1 vCPU, 1.7GB Memory | g1-small |
| Redis Sentinel[^3] - Queues / Shared State | 3 | 1 vCPU, 1.7GB Memory | g1-small |
| Consul | 3 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| NFS Server[^4] . | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
| S3 Object Storage[^3] . | - | - | - |
| Sidekiq | 4 | 4 vCPU, 15GB Memory | n1-standard-4 |
| NFS Server[^5] [^7] | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
| S3 Object Storage[^4] | - | - | - |
| Monitoring node | 1 | 4 vCPU, 3.6GB Memory | n1-highcpu-4 |
| External load balancing node[^2] . | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Internal load balancing node[^2] . | 1 | 8 vCPU, 7.2GB Memory | n1-highcpu-8 |
| External load balancing node[^6] | 1 | 2 vCPU, 1.8GB Memory | n1-highcpu-2 |
| Internal load balancing node[^6] | 1 | 8 vCPU, 7.2GB Memory | n1-highcpu-8 |
NOTE: **Note:** Memory values are given directly by GCP machine sizes. On different cloud
vendors a best effort like for like can be used.
[^1]: In our architectures we run each GitLab Rails node using the Puma webserver
and have its number of workers set to 90% of available CPUs along with 4 threads.
[^1]: Gitaly node requirements are dependent on customer data, specifically the number of
[^2]: Gitaly node requirements are dependent on customer data, specifically the number of
projects and their sizes. We recommend 2 nodes as an absolute minimum for HA environments
and at least 4 nodes should be used when supporting 50,000 or more users.
We recommend that each Gitaly node should store no more than 5TB of data.
Additional nodes should be considered in conjunction with a review of expected
data size and spread based on the recommendations above.
We also recommend that each Gitaly node should store no more than 5TB of data
and have the number of [`gitaly-ruby` workers](../gitaly/index.md#gitaly-ruby)
set to 20% of available CPUs. Additional nodes should be considered in conjunction
with a review of expected data size and spread based on the recommendations above.
[^2]: Our architectures have been tested and validated with [HAProxy](https://www.haproxy.org/)
as the load balancer. However other reputable load balancers with similar feature sets
should also work instead but be aware these aren't validated.
[^3]: Recommended Redis setup differs depending on the size of the architecture.
For smaller architectures (up to 5,000 users) we suggest one Redis cluster for all
classes and that Redis Sentinel is hosted alongside Consul.
For larger architectures (10,000 users or more) we suggest running a separate
[Redis Cluster](redis.md#running-multiple-redis-clusters) for the Cache class
and another for the Queues and Shared State classes respectively. We also recommend
that you run the Redis Sentinel clusters separately as well for each Redis Cluster.
[^3]: For data objects such as LFS, Uploads, Artifacts, etc... We recommend a S3 Object Storage
[^4]: For data objects such as LFS, Uploads, Artifacts, etc... We recommend a S3 Object Storage
where possible over NFS due to better performance and availability. Several types of objects
are supported for S3 storage - [Job artifacts](../job_artifacts.md#using-object-storage),
[LFS](../lfs/lfs_administration.md#storing-lfs-objects-in-remote-object-storage),
@ -370,6 +370,17 @@ vendors a best effort like for like can be used.
[Packages](../packages/index.md#using-object-storage) (Optional Feature),
[Dependency Proxy](../packages/dependency_proxy.md#using-object-storage) (Optional Feature).
[^4]: NFS storage server is still required for [GitLab Pages](https://gitlab.com/gitlab-org/gitlab-pages/issues/196)
[^5]: NFS storage server is still required for [GitLab Pages](https://gitlab.com/gitlab-org/gitlab-pages/issues/196)
and optionally for CI Job Incremental Logging
([can be switched to use Redis instead](https://docs.gitlab.com/ee/administration/job_logs.html#new-incremental-logging-architecture)).
([can be switched to use Redis instead](../job_logs.md#new-incremental-logging-architecture)).
[^6]: Our architectures have been tested and validated with [HAProxy](https://www.haproxy.org/)
as the load balancer. However other reputable load balancers with similar feature sets
should also work instead but be aware these aren't validated.
[^7]: We strongly recommend that the Gitaly and / or NFS nodes are set up with SSD disks over
HDD with a throughput of at least 8,000 IOPS for read operations and 2,000 IOPS for write
as these components have heavy I/O. These IOPS values are recommended only as a starter
as with time they may be adjusted higher or lower depending on the scale of your
environment's workload. If you're running the environment on a Cloud provider
you may need to refer to their documentation on how configure IOPS correctly.

View file

@ -166,7 +166,7 @@ Learn how to install, configure, update, and maintain your GitLab instance.
## Git configuration options
- [Custom Git hooks](custom_hooks.md): Custom Git hooks (on the filesystem) for when webhooks aren't enough.
- [Server hooks](server_hooks.md): Server hooks (on the filesystem) for when webhooks aren't enough.
- [Git LFS configuration](lfs/lfs_administration.md): Learn how to configure LFS for GitLab.
- [Housekeeping](housekeeping.md): Keep your Git repositories tidy and fast.
- [Configuring Git Protocol v2](git_protocol.md): Git protocol version 2 support.

View file

@ -0,0 +1,120 @@
---
type: reference, howto
disqus_identifier: 'https://docs.gitlab.com/ee/administration/custom_hooks.html'
---
# Server hooks **(CORE ONLY)**
> **Notes:**
>
> - Server hooks were [introduced](https://gitlab.com/gitlab-org/gitlab/issues/196051) in GitLab 12.8 replacing Custom Hooks.
> - Server hooks must be configured on the filesystem of the GitLab server. Only GitLab server administrators will be able to complete these tasks. Please explore [webhooks](../user/project/integrations/webhooks.md) and [GitLab CI/CD](../ci/README.md) as an option if you do not have filesystem access. For a user-configurable Git hook interface, see [Push Rules](../push_rules/push_rules.md), available in GitLab Starter **(STARTER)**.
> - Server hooks won't be replicated to secondary nodes if you use [GitLab Geo](geo/replication/index.md).
Git natively supports hooks that are executed on different actions.
Examples of server-side Git hooks include pre-receive, post-receive, and update.
See [Git SCM Server-Side Hooks](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks#Server-Side-Hooks) for more information about each hook type.
As of GitLab Shell version 2.2.0 (which requires GitLab 7.5+), GitLab
administrators can add custom Git hooks to any GitLab project.
## Create a server hook for a repository
Server-side Git hooks are typically placed in the repository's `hooks`
subdirectory. In GitLab, hook directories are symlinked to the GitLab Shell
`hooks` directory for ease of maintenance between GitLab Shell upgrades.
Server hooks are implemented differently, but the behavior is exactly the same
once the hook is created. Follow the steps below to set up a server hook for a
repository:
1. Pick a project that needs a server hook.
1. On the GitLab server, navigate to the project's repository directory.
For an installation from source the path is usually
`/home/git/repositories/<group>/<project>.git`. For Omnibus installs the path is
usually `/var/opt/gitlab/git-data/repositories/<group>/<project>.git`.
1. Create a new directory in this location called `custom_hooks`.
1. Inside the new `custom_hooks` directory, create a file with a name matching
the hook type. For a pre-receive hook the file name should be `pre-receive`
with no extension.
1. Make the hook file executable and make sure it's owned by Git.
1. Write the code to make the server hook function as expected. Hooks can be
in any language. Ensure the 'shebang' at the top properly reflects the language
type. For example, if the script is in Ruby the shebang will probably be
`#!/usr/bin/env ruby`.
That's it! Assuming the hook code is properly implemented the hook will fire
as appropriate.
## Set a global server hook for all repositories
To create a Git hook that applies to all of your repositories in
your instance, set a global server hook. Since GitLab will look inside the GitLab Shell
`hooks` directory for global hooks, adding any hook there will apply it to all repositories.
Follow the steps below to properly set up a server hook for all repositories:
1. On the GitLab server, navigate to the configured custom hook directory. The
default is in the GitLab Shell directory. The GitLab Shell `hook` directory
for an installation from source the path is usually
`/home/git/gitlab-shell/hooks`. For Omnibus installs the path is usually
`/opt/gitlab/embedded/service/gitlab-shell/hooks`.
To look in a different directory for the global custom hooks,
set `custom_hooks_dir` in the GitLab Shell config. For
Omnibus installations, this can be set in `gitlab.rb`; and in source
installations, this can be set in `gitlab-shell/config.yml`.
1. Create a new directory in this location. Depending on your hook, it will be
either a `pre-receive.d`, `post-receive.d`, or `update.d` directory.
1. Inside this new directory, add your hook. Hooks can be
in any language. Ensure the 'shebang' at the top properly reflects the language
type. For example, if the script is in Ruby the shebang will probably be
`#!/usr/bin/env ruby`.
1. Make the hook file executable and make sure it's owned by Git.
Now test the hook to check whether it is functioning properly.
## Chained hooks support
> [Introduced](https://gitlab.com/gitlab-org/gitlab-shell/merge_requests/93) in GitLab Shell 4.1.0 and GitLab 8.15.
Hooks can be also global or be set per project directories and support a chained
execution of the hooks.
NOTE: **Note:**
`<hook_name>.d` would need to be either `pre-receive.d`,
`post-receive.d`, or `update.d` to work properly. Any other names will be ignored.
NOTE: **Note:**
Files in `.d` directories need to be executable and not match the backup file
pattern (`*~`).
The hooks are searched and executed in this order:
1. `gitlab-shell/hooks` directory as known to Gitaly.
1. `<project>.git/hooks/<hook_name>` - executed by `git` itself, this is symlinked to `gitlab-shell/hooks/<hook_name>`.
1. `<project>.git/custom_hooks/<hook_name>` - per-project hook (this was kept as the already existing behavior).
1. `<project>.git/custom_hooks/<hook_name>.d/*` - per-project hooks.
1. `<project>.git/hooks/<hook_name>.d/*` OR `<custom_hooks_dir>/<hook_name.d>/*` - global hooks: all executable files (except editor backup files).
The hooks of the same type are executed in order and execution stops on the
first script exiting with a non-zero value.
## Custom error messages
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/5073) in GitLab 8.10.
To have custom error messages appear in GitLab's UI when the commit is
declined or an error occurs during the Git hook, your script should:
- Send the custom error messages to either the script's `stdout` or `stderr`.
- Prefix each message with `GL-HOOK-ERR:` with no characters appearing before the prefix.
### Example custom error message
This hook script written in bash will generate the following message in GitLab's UI:
```bash
#!/bin/sh
echo "GL-HOOK-ERR: My custom error message.";
exit 1
```
![Custom message from custom Git hook](img/custom_hooks_error_msg.png)

View file

@ -256,12 +256,18 @@ Do not include the same information in multiple places. [Link to a SSOT instead.
Some features are also objects. For example, "GitLab's Merge Requests support X" and
"Create a new merge request for Z."
- Use common contractions when it helps create a friendly and informal tone, especially in tutorials and [UIs](https://design.gitlab.com/content/punctuation/#contractions).
- Do use contractions like: _it's_, _can't_, _wouldn't_, _you're_, _you've_, _haven't_, don't, _we're_, _that's_, and _won't_. Contractions in instructional documentation such as tutorials can help create a friendly and informal tone.
- Avoid less common contractions such as: _he'd_, _it'll_, _should've_, and _there'd_.
- Do not use contractions in reference documentation. Examples:
- You cannot set a limit higher than 1000.
- For `parameter1`, the default is 10.
- Do not use contractions with a proper noun and a verb, such as _GitLab's creating X_.
- Avoid using contractions when you need to emphasize a negative, such as "Do **not** install X with Y."
- Avoid use of the future tense:
- Instead of "after you execute this command, GitLab will display the result", use "after you execute this command, GitLab displays the result".
- Only use the future tense to convey when the action or result will actually occur at a future time.
- Do not use contractions:
- Instead of "don't," "can't," "doesn't," and so on, use "do not," "cannot," or "does not."
- Possible exceptions are cases when a more familiar tone is desired, such as a blog post or other casual context.
- Do not use slashes to clump different words together or as a replacement for the word "or":
- Instead of "and/or," consider using "or," or use another sensible construction.
- Other examples include "clone/fetch," author/assignee," and "namespace/repository name." Break apart any such instances in an appropriate way.

View file

@ -1,7 +1,7 @@
---
redirect_to: '../administration/custom_hooks.md'
redirect_to: '../administration/server_hooks.md'
---
# Custom Git Hooks
This document was moved to [administration/custom_hooks.md](../administration/custom_hooks.md).
This document was moved to [administration/server_hooks.md](../administration/server_hooks.md).

View file

@ -4,57 +4,31 @@ You can integrate your GitLab instance with GitHub.com as well as GitHub Enterpr
## Enabling GitHub OAuth
To enable GitHub OmniAuth provider, you must use GitHub's credentials for your GitLab instance.
To get the credentials (a pair of Client ID and Client Secret), you must register an application as an OAuth App on GitHub.
To enable the GitHub OmniAuth provider, you'll need an OAuth 2 Client ID and Client Secret from GitHub. To get these credentials, sign into GitHub and follow their procedure for [Creating an OAuth App](https://developer.github.com/apps/building-oauth-apps/creating-an-oauth-app/).
1. Sign in to GitHub.
When you create an OAuth 2 app in GitHub, you'll need the following information:
1. Navigate to your individual user or organization settings, depending on how you want the application registered. It does not matter if the application is registered as an individual or an organization - that is entirely up to you.
- The URL of your GitLab instance, such as `https://gitlab.example.com`.
- The authorization callback URL; in this case, `https://gitlab.example.com/users/auth`. Include the port number if your GitLab instance uses a non-default port.
- For individual accounts, select **Developer settings** from the left menu, then select **OAuth Apps**.
- For organization accounts, directly select **OAuth Apps** from the left menu.
NOTE: **Note:**
To prevent an [OAuth2 covert redirect](http://tetraph.com/covert_redirect/) vulnerability, append `/users/auth` to the end of the GitHub authorization callback URL.
1. Select **Register an application** (if you don't have any OAuth App) or **New OAuth App** (if you already have OAuth Apps).
![Register OAuth App](img/github_app_entry.png)
See [Initial OmniAuth Configuration](omniauth.md#initial-omniauth-configuration) for initial settings.
1. Provide the required details.
- Application name: This can be anything. Consider something like `<Organization>'s GitLab` or `<Your Name>'s GitLab` or something else descriptive.
- Homepage URL: The URL of your GitLab installation. For example, `https://gitlab.example.com`.
- Application description: Fill this in if you wish.
- Authorization callback URL: `http(s)://${YOUR_DOMAIN}/users/auth`. Please make sure the port is included if your GitLab instance is not configured on default port.
![Register OAuth App](img/github_register_app.png)
Once you have configured the GitHub provider, you'll need the following information, which you'll need to substitute in the GitLab configuration file, in the steps shown next.
NOTE: Be sure to append `/users/auth` to the end of the callback URL
to prevent a [OAuth2 convert
redirect](http://tetraph.com/covert_redirect/) vulnerability.
| Setting from GitHub | Substitute in the GitLab configuration file | Description |
|:---------------------|:-----------------------------------------------|:------------|
| Client ID | `YOUR_APP_ID` | OAuth 2 Client ID |
| Client Secret | `YOUR_APP_SECRET` | OAuth 2 Client Secret |
| URL | `https://github.example.com/` | GitHub Deployment URL |
1. Select **Register application**.
Follow these steps to incorporate the GitHub OAuth 2 app in your GitLab server:
1. You should now see a pair of **Client ID** and **Client Secret** near the top right of the page (see screenshot).
Keep this page open as you continue configuration.
![GitHub app](img/github_app.png)
**For Omnibus installations**
1. On your GitLab server, open the configuration file.
For Omnibus package:
```sh
sudo editor /etc/gitlab/gitlab.rb
```
For installations from source:
```sh
cd /home/git/gitlab
sudo -u git -H editor config/gitlab.yml
```
1. See [Initial OmniAuth Configuration](omniauth.md#initial-omniauth-configuration) for initial settings.
1. Add the provider configuration:
For Omnibus package:
1. Edit `/etc/gitlab/gitlab.rb`:
For GitHub.com:
@ -83,7 +57,15 @@ To get the credentials (a pair of Client ID and Client Secret), you must registe
]
```
For installation from source:
**Replace `https://github.example.com/` with your GitHub URL.**
1. Save the file and [reconfigure](../administration/restart_gitlab.html#omnibus-gitlab-reconfigure) GitLab for the changes to take effect.
---
**For installations from source**
1. Navigate to your repository and edit `config/gitlab.yml`:
For GitHub.com:
@ -102,20 +84,15 @@ To get the credentials (a pair of Client ID and Client Secret), you must registe
args: { scope: 'user:email' } }
```
__Replace `https://github.example.com/` with your GitHub URL.__
**Replace `https://github.example.com/` with your GitHub URL.**
1. Change `YOUR_APP_ID` to the Client ID from the GitHub application page from step 6.
1. Save the file and [restart](../administration/restart_gitlab.html#installations-from-source) GitLab for the changes to take effect.
1. Change `YOUR_APP_SECRET` to the Client Secret from the GitHub application page from step 6.
---
1. Save the configuration file.
1. Refresh the GitLab sign in page. You should now see a GitHub icon below the regular sign in form.
1. [Reconfigure GitLab][] or [restart GitLab][] for the changes to take effect if you
installed GitLab via Omnibus or from source respectively.
On the sign in page there should now be a GitHub icon below the regular sign in form.
Click the icon to begin the authentication process. GitHub will ask the user to sign in and authorize the GitLab application.
If everything goes well the user will be returned to GitLab and will be signed in.
1. Click the icon to begin the authentication process. GitHub will ask the user to sign in and authorize the GitLab application.
## GitHub Enterprise with self-signed Certificate

Binary file not shown.

Before

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 39 KiB

View file

@ -52,9 +52,9 @@ will get rejected.
### Custom Push Rules **(CORE ONLY)**
It's possible to create custom push rules rather than the push rules available in
**Admin Area > Push Rules** by using more advanced server-side Git hooks.
**Admin Area > Push Rules** by using more advanced server hooks.
See [custom server-side Git hooks](../administration/custom_hooks.md) for more information.
See [server hooks](../administration/server_hooks.md) for more information.
## Enabling push rules

View file

@ -713,6 +713,56 @@ workers:
terminationGracePeriodSeconds: 60
```
#### Network Policy
> [Introduced](https://gitlab.com/gitlab-org/charts/auto-deploy-app/merge_requests/30) in GitLab 12.7.
By default, all Kubernetes pods are
[non-isolated](https://kubernetes.io/docs/concepts/services-networking/network-policies/#isolated-and-non-isolated-pods)
and accept traffic from any source. You can use
[NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
to restrict connections to selected pods or namespaces.
NOTE: **Note:**
You must use a Kubernetes network plugin that implements support for
`NetworkPolicy`, the default network plugin for Kubernetes (`kubenet`)
[doesn't implement](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#kubenet)
support for it. The [Cilium](https://cilium.io/) network plugin can be
installed as a [cluster application](../../user/clusters/applications.md#install-cilium-using-gitlab-ci)
to enable support for network policies.
You can enable deployment of a network policy by setting the following
in the `.gitlab/auto-deploy-values.yaml` file:
```yml
networkPolicy:
enabled: true
```
The default policy deployed by the auto deploy pipeline will allow
traffic within a local namespace and from the `gitlab-managed-apps`
namespace, all other inbound connection will be blocked. Outbound
traffic is not affected by the default policy.
You can also provide a custom [policy specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#networkpolicyspec-v1-networking-k8s-io)
via the `.gitlab/auto-deploy-values.yaml` file, for example:
```yml
networkPolicy:
enabled: true
spec:
podSelector:
matchLabels:
app.gitlab.com/env: staging
ingress:
- from:
- podSelector:
matchLabels: {}
- namespaceSelector:
matchLabels:
app.gitlab.com/managed_by: gitlab
```
#### Running commands in the container
Applications built with [Auto Build](#auto-build) using Herokuish, the default

View file

@ -69,7 +69,7 @@ If you have problems with Git, the following may help:
The following are advanced topics for those who want to get the most out of Git:
- [Custom Git Hooks](../../administration/custom_hooks.md)
- [Server Hooks](../../administration/server_hooks.md)
- [Git Attributes](../../user/project/git_attributes.md)
- Git Submodules: [Using Git submodules with GitLab CI](../../ci/git_submodules.md#using-git-submodules-with-gitlab-ci)
- [Partial Clone](partial_clone.md)

View file

@ -180,15 +180,14 @@ any downgrades would result to all sessions being invalidated and users are logg
In 12.0.0 we made various database related changes. These changes require that
users first upgrade to the latest 11.11 patch release. Once upgraded to 11.11.x,
users can upgrade to 12.x. Failure to do so may result in database migrations
users can upgrade to 12.0.x. Failure to do so may result in database migrations
not being applied, which could lead to application errors.
Example 1: you are currently using GitLab 11.11.3, which is the latest patch
release for 11.11.x. You can upgrade as usual to 12.0.0, 12.1.0, etc.
release for 11.11.x. You can upgrade as usual to 12.0.x.
Example 2: you are currently using a version of GitLab 10.x. To upgrade, first
upgrade to 11.11.3. Once upgraded to 11.11.3 you can safely upgrade to 12.0.0
or future versions.
upgrade to 11.11.3. Once upgraded to 11.11.3 you can safely upgrade to 12.0.x.
## Miscellaneous

View file

@ -477,6 +477,7 @@ Supported applications:
- [cert-manager](#install-cert-manager-using-gitlab-ci)
- [Sentry](#install-sentry-using-gitlab-ci)
- [GitLab Runner](#install-gitlab-runner-using-gitlab-ci)
- [Cilium](#install-cilium-using-gitlab-ci)
### Usage
@ -661,6 +662,65 @@ management project. Refer to the
[chart](https://gitlab.com/gitlab-org/charts/gitlab-runner) for the
available configuration options.
### Install Cilium using GitLab CI
> [Introduced](https://gitlab.com/gitlab-org/cluster-integration/cluster-applications/merge_requests/22) in GitLab 12.7.
[Cilium](https://cilium.io/) is a networking plugin for Kubernetes
that you can use to implement support for
[NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
resources.
Enable Cilium in the `.gitlab/managed-apps/config.yaml` file to install it:
```yaml
# possible values are gke, eks or you can leave it blank
clusterType: gke
cilium:
installed: true
```
The `clusterType` variable enables the recommended Helm variables for
a corresponding cluster type, the default value is blank. You can
check the recommended variables for each cluster type in the official
documentation:
- [Google GKE](https://cilium.readthedocs.io/en/stable/gettingstarted/k8s-install-gke/#prepare-deploy-cilium)
- [AWS EKS](https://cilium.readthedocs.io/en/stable/gettingstarted/k8s-install-eks/#prepare-deploy-cilium)
You can customize Cilium's Helm variables by defining the
`.gitlab/managed-apps/cilium/values.yaml` file in your cluster
management project. Refer to the
[Cilium chart](https://github.com/cilium/cilium/tree/master/install/kubernetes/cilium)
for the available configuration options.
CAUTION: **Caution:**
Installation and removal of the Cilium [requires restart](https://cilium.readthedocs.io/en/stable/gettingstarted/k8s-install-gke/#restart-remaining-pods)
of all affected pods in all namespaces to ensure that they are
[managed](https://cilium.readthedocs.io/en/stable/troubleshooting/#ensure-pod-is-managed-by-cilium)
by the correct networking plugin.
NOTE: **Note:**
Major upgrades might require additional setup steps, please consult
the official [upgrade guide](https://docs.cilium.io/en/stable/install/upgrade/) for more
information.
By default, the drop log for traffic is logged out by the
`cilium-monitor` sidecar container. You can check these logs via:
```bash
kubectl -n gitlab-managed-apps logs cilium-XXXX cilium-monitor
```
Drop logging can be disabled via `.gitlab/managed-apps/cilium/values.yaml`:
```yml
agent:
monitor:
enabled: false
```
## Upgrading applications
> [Introduced](https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/24789) in GitLab 11.8.

View file

@ -10,7 +10,7 @@ Error tracking allows developers to easily discover and view the errors that the
### Deploying Sentry
You may sign up to the cloud hosted <https://sentry.io> or deploy your own [on-premise instance](https://docs.sentry.io/server/installation/).
You may sign up to the cloud hosted <https://sentry.io>, deploy your own [on-premise instance](https://docs.sentry.io/server/installation/) or use GitLab to [install Sentry to a Kubernetes cluster](../../clusters/applications.md#install-sentry-using-gitlab-ci).
### Enabling Sentry

View file

@ -341,7 +341,7 @@ the upstream Git repository. In this configuration one Git repository acts as
the authoritative upstream, and the other as downstream. The `pre-receive` hook
will be installed on the downstream repository.
Read about [configuring custom Git hooks](../../../administration/custom_hooks.md) on the GitLab server.
Read about [configuring Server hooks](../../../administration/server_hooks.md) on the GitLab server.
A sample `pre-receive` hook is provided below.

View file

@ -1,6 +1,6 @@
apply:
stage: deploy
image: "registry.gitlab.com/gitlab-org/cluster-integration/cluster-applications:v0.5.0"
image: "registry.gitlab.com/gitlab-org/cluster-integration/cluster-applications:v0.6.0"
environment:
name: production
variables:
@ -10,6 +10,7 @@ apply:
CERT_MANAGER_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/cert-manager/values.yaml
SENTRY_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/sentry/values.yaml
GITLAB_RUNNER_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/gitlab-runner/values.yaml
CILIUM_VALUES_FILE: $CI_PROJECT_DIR/.gitlab/managed-apps/cilium/values.yaml
script:
- gitlab-managed-apps /usr/local/share/gitlab-managed-apps/helmfile.yaml
only:

View file

@ -16,7 +16,7 @@ module Gitlab
if fork_merge_request? && @diff_head_sha
@merge_request.source_project_id = @relation_hash['project_id']
fetch_ref unless branch_exists?(@merge_request.source_branch)
create_source_branch unless branch_exists?(@merge_request.source_branch)
create_target_branch unless branch_exists?(@merge_request.target_branch)
end
@ -34,17 +34,18 @@ module Gitlab
@merge_request
end
def create_target_branch
@project.repository.create_branch(@merge_request.target_branch, @merge_request.target_branch_sha)
# When the exported MR was in a fork, the source branch does not exist in
# the imported bundle - although the commits usually do - so it must be
# created manually. Ignore failures so we get the merge request itself if
# the commits are missing.
def create_source_branch
@project.repository.create_branch(@merge_request.source_branch, @diff_head_sha)
rescue => err
Rails.logger.warn("Import/Export warning: Failed to create source branch #{@merge_request.source_branch} => #{@diff_head_sha} for MR #{@merge_request.iid}: #{err}") # rubocop:disable Gitlab/RailsLogger
end
# Gitaly migration: https://gitlab.com/gitlab-org/gitaly/issues/1295
def fetch_ref
target_ref = Gitlab::Git::BRANCH_REF_PREFIX + @merge_request.source_branch
unless @project.repository.fetch_source_branch!(@project.repository, @diff_head_sha, target_ref)
Rails.logger.warn("Import/Export warning: Failed to create #{target_ref} for MR: #{@merge_request.iid}") # rubocop:disable Gitlab/RailsLogger
end
def create_target_branch
@project.repository.create_branch(@merge_request.target_branch, @merge_request.target_branch_sha)
end
def branch_exists?(branch_name)

View file

@ -67,6 +67,16 @@ bundle exec bin/qa Test::Instance::All http://localhost:3000
Note: If you want to run tests requiring SSH against GDK, you
will need to [modify your GDK setup](https://gitlab.com/gitlab-org/gitlab-qa/blob/master/docs/run_qa_against_gdk.md).
#### Running EE tests
When running EE tests you'll need to have a license available. GitLab engineers can [request a license](https://about.gitlab.com/handbook/developer-onboarding/#working-on-gitlab-ee).
Once you have the license file you can export it as an environment variable and then the framework can use it. If you do so it will be installed automatically.
```
export EE_LICENSE=$(cat /path/to/gitlab_license)
```
### Writing tests
- [Writing tests from scratch tutorial](../doc/development/testing_guide/end_to_end/quick_start_guide.md)

View file

@ -1,9 +1,8 @@
# frozen_string_literal: true
module QA
# https://gitlab.com/gitlab-org/gitlab/issues/26952
# BUG_IN_CODE
context 'Manage', :github, :quarantine do
context 'Manage', :github, quarantine: 'https://gitlab.com/gitlab-org/gitlab/issues/26952' do
describe 'Project import from GitHub' do
let(:imported_project) do
Resource::ProjectImportedFromGithub.fabricate! do |project|

View file

@ -1,9 +1,8 @@
# frozen_string_literal: true
module QA
# Failure issue: https://gitlab.com/gitlab-org/gitlab/issues/36817
# BUG_IN_CODE
context 'Create', :quarantine do
context 'Create', quarantine: 'https://gitlab.com/gitlab-org/gitlab/issues/36817' do
describe 'Merge request rebasing' do
it 'user rebases source branch of merge request' do
Flow::Login.sign_in

View file

@ -2,9 +2,8 @@
module QA
# Git protocol v2 is temporarily disabled
# https://gitlab.com/gitlab-org/gitlab/issues/27828
# BUG_IN_CODE
context 'Create', :quarantine do
context 'Create', quarantine: 'https://gitlab.com/gitlab-org/gitlab/issues/27828' do
describe 'Push over HTTP using Git protocol version 2', :requires_git_protocol_v2 do
it 'user pushes to the repository' do
Flow::Login.sign_in

View file

@ -2,9 +2,8 @@
module QA
# Git protocol v2 is temporarily disabled
# https://gitlab.com/gitlab-org/gitlab/issues/27828
# BUG_IN_CODE
context 'Create', :quarantine do
context 'Create', quarantine: 'https://gitlab.com/gitlab-org/gitlab/issues/27828' do
describe 'Push over SSH using Git protocol version 2', :requires_git_protocol_v2 do
# Note: If you run this test against GDK make sure you've enabled sshd and
# enabled setting the Git protocol by adding `AcceptEnv GIT_PROTOCOL` to

View file

@ -50,8 +50,7 @@ module QA
end
end
# Failure issue: https://gitlab.com/gitlab-org/gitlab/issues/118481
describe 'Auto DevOps support', :orchestrated, :kubernetes, :quarantine do
describe 'Auto DevOps support', :orchestrated, :kubernetes, quarantine: 'https://gitlab.com/gitlab-org/gitlab/issues/118481' do
context 'when rbac is enabled' do
before(:all) do
@cluster = Service::KubernetesCluster.new.create!

View file

@ -56,7 +56,7 @@ describe 'Merge request > User posts diff notes', :js do
end
context 'with an unchanged line on the left and an unchanged line on the right' do
it 'allows commenting on the left side' do
it 'allows commenting on the left side', quarantine: 'https://gitlab.com/gitlab-org/gitlab/issues/196826' do
should_allow_commenting(find('[id="2f6fcd96b88b36ce98c38da085c795a27d92a3dd_7_7"]', match: :first).find(:xpath, '..'), 'left')
end

View file

@ -76,7 +76,7 @@ describe 'Merge request > User sees diff', :js do
end
context 'as user who needs to fork' do
it 'shows fork/cancel confirmation', :sidekiq_might_not_need_inline do
it 'shows fork/cancel confirmation', :sidekiq_might_not_need_inline, quarantine: 'https://gitlab.com/gitlab-org/gitlab/issues/196749' do
sign_in(user)
visit diffs_project_merge_request_path(project, merge_request)

View file

@ -40,8 +40,6 @@ describe Gitlab::ImportExport::MergeRequestParser do
allow(instance).to receive(:branch_exists?).with(merge_request.source_branch).and_return(false)
allow(instance).to receive(:fork_merge_request?).and_return(true)
end
allow(Gitlab::GitalyClient).to receive(:migrate).and_call_original
allow(Gitlab::GitalyClient).to receive(:migrate).with(:fetch_ref).and_return([nil, 0])
expect(parsed_merge_request).to eq(merge_request)
end

View file

@ -15,6 +15,18 @@ describe SentryIssue do
it { is_expected.to validate_presence_of(:sentry_issue_identifier) }
end
describe 'callbacks' do
context 'after create commit do' do
it 'updates Sentry with a reciprocal link on creation' do
issue = create(:issue)
expect(ErrorTrackingIssueLinkWorker).to receive(:perform_async).with(issue.id)
create(:sentry_issue, issue: issue)
end
end
end
describe '.for_project_and_identifier' do
let!(:sentry_issue) { create(:sentry_issue) }
let(:project) { sentry_issue.issue.project }

View file

@ -269,7 +269,7 @@ shared_examples 'thread comments' do |resource_name|
end
end
it 'has "Comment" selected when opening the menu' do
it 'has "Comment" selected when opening the menu', quarantine: 'https://gitlab.com/gitlab-org/gitlab/issues/196825' do
find(toggle_selector).click
find("#{menu_selector} li", match: :first)

View file

@ -0,0 +1,96 @@
# frozen_string_literal: true
require 'spec_helper'
describe ErrorTrackingIssueLinkWorker do
let_it_be(:error_tracking) { create(:project_error_tracking_setting) }
let_it_be(:project) { error_tracking.project }
let_it_be(:issue) { create(:issue, project: project) }
let_it_be(:sentry_issue) { create(:sentry_issue, issue: issue) }
let(:repo) do
Gitlab::ErrorTracking::Repo.new(
status: 'active',
integration_id: 66666,
project_id: project.id
)
end
subject { described_class.new.perform(issue.id) }
describe '#perform' do
it 'creates a link between an issue and a Sentry issue in Sentry' do
expect_next_instance_of(Sentry::Client) do |client|
expect(client).to receive(:repos).with('sentry-org').and_return([repo])
expect(client)
.to receive(:create_issue_link)
.with(66666, sentry_issue.sentry_issue_identifier, issue)
.and_return(true)
end
expect(subject).to be true
end
shared_examples_for 'makes no external API requests' do
it 'takes no action' do
expect_any_instance_of(Sentry::Client).not_to receive(:repos)
expect_any_instance_of(Sentry::Client).not_to receive(:create_issue_link)
expect(subject).to be nil
end
end
shared_examples_for 'terminates after one API request' do
it 'takes no action' do
expect_next_instance_of(Sentry::Client) do |client|
expect(client).to receive(:repos).with('sentry-org').and_return([repo])
end
expect_any_instance_of(Sentry::Client).not_to receive(:create_issue_link)
expect(subject).to be nil
end
end
context 'when issue is unavailable' do
let(:issue) { double('issue', id: -3) }
it_behaves_like 'makes no external API requests'
end
context 'when project does not have error tracking configured' do
let(:issue) { build(:project) }
it_behaves_like 'makes no external API requests'
end
context 'when the issue is not linked to a Sentry issue in GitLab' do
let(:issue) { build(:issue, project: project) }
it_behaves_like 'makes no external API requests'
end
context 'when Sentry disabled the GitLab integration' do
let(:repo) do
Gitlab::ErrorTracking::Repo.new(
status: 'inactive',
integration_id: 66666,
project_id: project.id
)
end
it_behaves_like 'terminates after one API request'
end
context 'when Sentry the GitLab integration is for another project' do
let(:repo) do
Gitlab::ErrorTracking::Repo.new(
status: 'active',
integration_id: 66666,
project_id: -3
)
end
it_behaves_like 'terminates after one API request'
end
end
end